1 /* $OpenBSD: if_bridge.c,v 1.349 2021/01/28 20:06:38 mvs Exp $ */ 2 3 /* 4 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 25 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Effort sponsored in part by the Defense Advanced Research Projects 29 * Agency (DARPA) and Air Force Research Laboratory, Air Force 30 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 31 * 32 */ 33 34 #include "bpfilter.h" 35 #include "gif.h" 36 #include "pf.h" 37 #include "carp.h" 38 #include "vlan.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/mbuf.h> 43 #include <sys/socket.h> 44 #include <sys/ioctl.h> 45 #include <sys/kernel.h> 46 47 #include <net/if.h> 48 #include <net/if_types.h> 49 #include <net/if_llc.h> 50 #include <net/netisr.h> 51 52 #include <netinet/in.h> 53 #include <netinet/ip.h> 54 #include <netinet/ip_var.h> 55 #include <netinet/if_ether.h> 56 #include <netinet/ip_icmp.h> 57 58 #ifdef IPSEC 59 #include <netinet/ip_ipsp.h> 60 #include <net/if_enc.h> 61 #endif 62 63 #ifdef INET6 64 #include <netinet6/in6_var.h> 65 #include <netinet/ip6.h> 66 #include <netinet6/ip6_var.h> 67 #endif 68 69 #if NPF > 0 70 #include <net/pfvar.h> 71 #define BRIDGE_IN PF_IN 72 #define BRIDGE_OUT PF_OUT 73 #else 74 #define BRIDGE_IN 0 75 #define BRIDGE_OUT 1 76 #endif 77 78 #if NBPFILTER > 0 79 #include <net/bpf.h> 80 #endif 81 82 #if NCARP > 0 83 #include <netinet/ip_carp.h> 84 #endif 85 86 #if NVLAN > 0 87 #include <net/if_vlan_var.h> 88 #endif 89 90 #include <net/if_bridge.h> 91 92 /* 93 * Maximum number of addresses to cache 94 */ 95 #ifndef BRIDGE_RTABLE_MAX 96 #define BRIDGE_RTABLE_MAX 100 97 #endif 98 99 /* 100 * Timeout (in seconds) for entries learned dynamically 101 */ 102 #ifndef BRIDGE_RTABLE_TIMEOUT 103 #define BRIDGE_RTABLE_TIMEOUT 240 104 #endif 105 106 void bridgeattach(int); 107 int bridge_ioctl(struct ifnet *, u_long, caddr_t); 108 void bridge_ifdetach(void *); 109 void bridge_spandetach(void *); 110 int bridge_ifremove(struct bridge_iflist *); 111 void bridge_spanremove(struct bridge_iflist *); 112 struct mbuf * 113 bridge_input(struct ifnet *, struct mbuf *, void *); 114 void bridge_process(struct ifnet *, struct mbuf *); 115 void bridgeintr_frame(struct ifnet *, struct ifnet *, struct mbuf *); 116 void bridge_bifgetstp(struct bridge_softc *, struct bridge_iflist *, 117 struct ifbreq *); 118 void bridge_broadcast(struct bridge_softc *, struct ifnet *, 119 struct ether_header *, struct mbuf *); 120 int bridge_localbroadcast(struct ifnet *, struct ether_header *, 121 struct mbuf *); 122 void bridge_span(struct ifnet *, struct mbuf *); 123 void bridge_stop(struct bridge_softc *); 124 void bridge_init(struct bridge_softc *); 125 int bridge_bifconf(struct bridge_softc *, struct ifbifconf *); 126 int bridge_blocknonip(struct ether_header *, struct mbuf *); 127 void bridge_ifinput(struct ifnet *, struct mbuf *); 128 int bridge_dummy_output(struct ifnet *, struct mbuf *, struct sockaddr *, 129 struct rtentry *); 130 void bridge_send_icmp_err(struct ifnet *, struct ether_header *, 131 struct mbuf *, int, struct llc *, int, int, int); 132 int bridge_ifenqueue(struct ifnet *, struct ifnet *, struct mbuf *); 133 struct mbuf *bridge_ip(struct ifnet *, int, struct ifnet *, 134 struct ether_header *, struct mbuf *); 135 #ifdef IPSEC 136 int bridge_ipsec(struct ifnet *, struct ether_header *, int, struct llc *, 137 int, int, int, struct mbuf *); 138 #endif 139 int bridge_clone_create(struct if_clone *, int); 140 int bridge_clone_destroy(struct ifnet *); 141 142 #define ETHERADDR_IS_IP_MCAST(a) \ 143 /* struct etheraddr *a; */ \ 144 ((a)->ether_addr_octet[0] == 0x01 && \ 145 (a)->ether_addr_octet[1] == 0x00 && \ 146 (a)->ether_addr_octet[2] == 0x5e) 147 148 struct niqueue bridgeintrq = NIQUEUE_INITIALIZER(1024, NETISR_BRIDGE); 149 150 struct if_clone bridge_cloner = 151 IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy); 152 153 const struct ether_brport bridge_brport = { 154 bridge_input, 155 NULL, 156 }; 157 158 void 159 bridgeattach(int n) 160 { 161 if_clone_attach(&bridge_cloner); 162 } 163 164 int 165 bridge_clone_create(struct if_clone *ifc, int unit) 166 { 167 struct bridge_softc *sc; 168 struct ifnet *ifp; 169 int i; 170 171 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO); 172 sc->sc_stp = bstp_create(); 173 if (!sc->sc_stp) { 174 free(sc, M_DEVBUF, sizeof *sc); 175 return (ENOMEM); 176 } 177 178 sc->sc_brtmax = BRIDGE_RTABLE_MAX; 179 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT; 180 timeout_set(&sc->sc_brtimeout, bridge_rtage, sc); 181 SMR_SLIST_INIT(&sc->sc_iflist); 182 SMR_SLIST_INIT(&sc->sc_spanlist); 183 mtx_init(&sc->sc_mtx, IPL_MPFLOOR); 184 for (i = 0; i < BRIDGE_RTABLE_SIZE; i++) 185 LIST_INIT(&sc->sc_rts[i]); 186 arc4random_buf(&sc->sc_hashkey, sizeof(sc->sc_hashkey)); 187 ifp = &sc->sc_if; 188 snprintf(ifp->if_xname, sizeof ifp->if_xname, "%s%d", ifc->ifc_name, 189 unit); 190 ifp->if_softc = sc; 191 ifp->if_mtu = ETHERMTU; 192 ifp->if_ioctl = bridge_ioctl; 193 ifp->if_output = bridge_dummy_output; 194 ifp->if_xflags = IFXF_CLONED; 195 ifp->if_start = NULL; 196 ifp->if_type = IFT_BRIDGE; 197 ifp->if_hdrlen = ETHER_HDR_LEN; 198 199 if_attach(ifp); 200 if_alloc_sadl(ifp); 201 202 #if NBPFILTER > 0 203 bpfattach(&sc->sc_if.if_bpf, ifp, 204 DLT_EN10MB, ETHER_HDR_LEN); 205 #endif 206 207 return (0); 208 } 209 210 int 211 bridge_dummy_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, 212 struct rtentry *rt) 213 { 214 m_freem(m); 215 return (EAFNOSUPPORT); 216 } 217 218 int 219 bridge_clone_destroy(struct ifnet *ifp) 220 { 221 struct bridge_softc *sc = ifp->if_softc; 222 struct bridge_iflist *bif; 223 224 /* 225 * bridge(4) detach hook doesn't need the NET_LOCK(), worst the 226 * use of smr_barrier() while holding the lock might lead to a 227 * deadlock situation. 228 */ 229 NET_ASSERT_UNLOCKED(); 230 231 bridge_stop(sc); 232 bridge_rtflush(sc, IFBF_FLUSHALL); 233 while ((bif = SMR_SLIST_FIRST_LOCKED(&sc->sc_iflist)) != NULL) 234 bridge_ifremove(bif); 235 while ((bif = SMR_SLIST_FIRST_LOCKED(&sc->sc_spanlist)) != NULL) 236 bridge_spanremove(bif); 237 238 bstp_destroy(sc->sc_stp); 239 240 if_detach(ifp); 241 242 free(sc, M_DEVBUF, sizeof *sc); 243 return (0); 244 } 245 246 int 247 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 248 { 249 struct bridge_softc *sc = (struct bridge_softc *)ifp->if_softc; 250 struct ifbreq *req = (struct ifbreq *)data; 251 struct ifbropreq *brop = (struct ifbropreq *)data; 252 struct ifnet *ifs; 253 struct bridge_iflist *bif; 254 struct bstp_port *bp; 255 struct bstp_state *bs = sc->sc_stp; 256 int error = 0; 257 258 /* 259 * bridge(4) data structure aren't protected by the NET_LOCK(). 260 * Idealy it shouldn't be taken before calling `ifp->if_ioctl' 261 * but we aren't there yet. 262 */ 263 NET_UNLOCK(); 264 265 switch (cmd) { 266 case SIOCBRDGADD: 267 /* bridge(4) does not distinguish between routing/forwarding ports */ 268 case SIOCBRDGADDL: 269 if ((error = suser(curproc)) != 0) 270 break; 271 272 ifs = if_unit(req->ifbr_ifsname); 273 if (ifs == NULL) { /* no such interface */ 274 error = ENOENT; 275 break; 276 } 277 if (ifs->if_type != IFT_ETHER) { 278 if_put(ifs); 279 error = EINVAL; 280 break; 281 } 282 if (ifs->if_bridgeidx != 0) { 283 if (ifs->if_bridgeidx == ifp->if_index) 284 error = EEXIST; 285 else 286 error = EBUSY; 287 if_put(ifs); 288 break; 289 } 290 291 error = ether_brport_isset(ifs); 292 if (error != 0) { 293 if_put(ifs); 294 break; 295 } 296 297 /* If it's in the span list, it can't be a member. */ 298 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_spanlist, bif_next) { 299 if (bif->ifp == ifs) 300 break; 301 } 302 if (bif != NULL) { 303 if_put(ifs); 304 error = EBUSY; 305 break; 306 } 307 308 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 309 if (bif == NULL) { 310 if_put(ifs); 311 error = ENOMEM; 312 break; 313 } 314 315 NET_LOCK(); 316 error = ifpromisc(ifs, 1); 317 NET_UNLOCK(); 318 if (error != 0) { 319 if_put(ifs); 320 free(bif, M_DEVBUF, sizeof(*bif)); 321 break; 322 } 323 324 /* 325 * XXX If the NET_LOCK() or ifpromisc() calls above 326 * had to sleep, then something else could have come 327 * along and taken over ifs while the kernel lock was 328 * released. 329 */ 330 331 bif->bridge_sc = sc; 332 bif->ifp = ifs; 333 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER; 334 SIMPLEQ_INIT(&bif->bif_brlin); 335 SIMPLEQ_INIT(&bif->bif_brlout); 336 ifs->if_bridgeidx = ifp->if_index; 337 task_set(&bif->bif_dtask, bridge_ifdetach, bif); 338 if_detachhook_add(ifs, &bif->bif_dtask); 339 ether_brport_set(bif->ifp, &bridge_brport); 340 SMR_SLIST_INSERT_HEAD_LOCKED(&sc->sc_iflist, bif, bif_next); 341 break; 342 case SIOCBRDGDEL: 343 if ((error = suser(curproc)) != 0) 344 break; 345 error = bridge_findbif(sc, req->ifbr_ifsname, &bif); 346 if (error != 0) 347 break; 348 bridge_ifremove(bif); 349 break; 350 case SIOCBRDGIFS: 351 error = bridge_bifconf(sc, (struct ifbifconf *)data); 352 break; 353 case SIOCBRDGADDS: 354 if ((error = suser(curproc)) != 0) 355 break; 356 ifs = if_unit(req->ifbr_ifsname); 357 if (ifs == NULL) { /* no such interface */ 358 error = ENOENT; 359 break; 360 } 361 if (ifs->if_type != IFT_ETHER) { 362 if_put(ifs); 363 error = EINVAL; 364 break; 365 } 366 if (ifs->if_bridgeidx != 0) { 367 if (ifs->if_bridgeidx == ifp->if_index) 368 error = EEXIST; 369 else 370 error = EBUSY; 371 if_put(ifs); 372 break; 373 } 374 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_spanlist, bif_next) { 375 if (bif->ifp == ifs) 376 break; 377 } 378 if (bif != NULL) { 379 if_put(ifs); 380 error = EEXIST; 381 break; 382 } 383 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 384 if (bif == NULL) { 385 if_put(ifs); 386 error = ENOMEM; 387 break; 388 } 389 bif->bridge_sc = sc; 390 bif->ifp = ifs; 391 bif->bif_flags = IFBIF_SPAN; 392 SIMPLEQ_INIT(&bif->bif_brlin); 393 SIMPLEQ_INIT(&bif->bif_brlout); 394 task_set(&bif->bif_dtask, bridge_spandetach, bif); 395 if_detachhook_add(ifs, &bif->bif_dtask); 396 SMR_SLIST_INSERT_HEAD_LOCKED(&sc->sc_spanlist, bif, bif_next); 397 break; 398 case SIOCBRDGDELS: 399 if ((error = suser(curproc)) != 0) 400 break; 401 ifs = if_unit(req->ifbr_ifsname); 402 if (ifs == NULL) { 403 error = ENOENT; 404 break; 405 } 406 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_spanlist, bif_next) { 407 if (bif->ifp == ifs) 408 break; 409 } 410 if_put(ifs); 411 if (bif == NULL) { 412 error = ESRCH; 413 break; 414 } 415 bridge_spanremove(bif); 416 break; 417 case SIOCBRDGGIFFLGS: 418 error = bridge_findbif(sc, req->ifbr_ifsname, &bif); 419 if (error != 0) 420 break; 421 req->ifbr_ifsflags = bif->bif_flags; 422 req->ifbr_portno = bif->ifp->if_index & 0xfff; 423 req->ifbr_protected = bif->bif_protected; 424 if (bif->bif_flags & IFBIF_STP) 425 bridge_bifgetstp(sc, bif, req); 426 break; 427 case SIOCBRDGSIFFLGS: 428 if (req->ifbr_ifsflags & IFBIF_RO_MASK) { 429 error = EINVAL; 430 break; 431 } 432 if ((error = suser(curproc)) != 0) 433 break; 434 error = bridge_findbif(sc, req->ifbr_ifsname, &bif); 435 if (error != 0) 436 break; 437 if (req->ifbr_ifsflags & IFBIF_STP) { 438 if ((bif->bif_flags & IFBIF_STP) == 0) { 439 /* Enable STP */ 440 if ((bif->bif_stp = bstp_add(sc->sc_stp, 441 bif->ifp)) == NULL) { 442 error = ENOMEM; 443 break; 444 } 445 } else { 446 /* Update STP flags */ 447 bstp_ifsflags(bif->bif_stp, req->ifbr_ifsflags); 448 } 449 } else if (bif->bif_flags & IFBIF_STP) { 450 bstp_delete(bif->bif_stp); 451 bif->bif_stp = NULL; 452 } 453 bif->bif_flags = req->ifbr_ifsflags; 454 break; 455 case SIOCSIFFLAGS: 456 if ((ifp->if_flags & IFF_UP) == IFF_UP) 457 bridge_init(sc); 458 459 if ((ifp->if_flags & IFF_UP) == 0) 460 bridge_stop(sc); 461 462 break; 463 case SIOCBRDGGPARAM: 464 if ((bp = bs->bs_root_port) == NULL) 465 brop->ifbop_root_port = 0; 466 else 467 brop->ifbop_root_port = bp->bp_ifindex; 468 brop->ifbop_maxage = bs->bs_bridge_max_age >> 8; 469 brop->ifbop_hellotime = bs->bs_bridge_htime >> 8; 470 brop->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; 471 brop->ifbop_holdcount = bs->bs_txholdcount; 472 brop->ifbop_priority = bs->bs_bridge_priority; 473 brop->ifbop_protocol = bs->bs_protover; 474 brop->ifbop_root_bridge = bs->bs_root_pv.pv_root_id; 475 brop->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; 476 brop->ifbop_root_port = bs->bs_root_pv.pv_port_id; 477 brop->ifbop_desg_bridge = bs->bs_root_pv.pv_dbridge_id; 478 brop->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; 479 brop->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; 480 break; 481 case SIOCBRDGSIFPROT: 482 error = bridge_findbif(sc, req->ifbr_ifsname, &bif); 483 if (error != 0) 484 break; 485 bif->bif_protected = req->ifbr_protected; 486 break; 487 case SIOCBRDGRTS: 488 case SIOCBRDGGCACHE: 489 case SIOCBRDGGPRI: 490 case SIOCBRDGGMA: 491 case SIOCBRDGGHT: 492 case SIOCBRDGGFD: 493 case SIOCBRDGGTO: 494 case SIOCBRDGGRL: 495 break; 496 case SIOCBRDGFLUSH: 497 case SIOCBRDGSADDR: 498 case SIOCBRDGDADDR: 499 case SIOCBRDGSCACHE: 500 case SIOCBRDGSTO: 501 case SIOCBRDGARL: 502 case SIOCBRDGFRL: 503 case SIOCBRDGSPRI: 504 case SIOCBRDGSFD: 505 case SIOCBRDGSMA: 506 case SIOCBRDGSHT: 507 case SIOCBRDGSTXHC: 508 case SIOCBRDGSPROTO: 509 case SIOCBRDGSIFPRIO: 510 case SIOCBRDGSIFCOST: 511 error = suser(curproc); 512 break; 513 default: 514 error = ENOTTY; 515 break; 516 } 517 518 if (!error) 519 error = bridgectl_ioctl(ifp, cmd, data); 520 521 if (!error) 522 error = bstp_ioctl(ifp, cmd, data); 523 524 NET_LOCK(); 525 return (error); 526 } 527 528 /* Detach an interface from a bridge. */ 529 int 530 bridge_ifremove(struct bridge_iflist *bif) 531 { 532 struct bridge_softc *sc = bif->bridge_sc; 533 int error; 534 535 SMR_SLIST_REMOVE_LOCKED(&sc->sc_iflist, bif, bridge_iflist, bif_next); 536 if_detachhook_del(bif->ifp, &bif->bif_dtask); 537 ether_brport_clr(bif->ifp); 538 539 smr_barrier(); 540 541 if (bif->bif_flags & IFBIF_STP) { 542 bstp_delete(bif->bif_stp); 543 bif->bif_stp = NULL; 544 } 545 546 bif->ifp->if_bridgeidx = 0; 547 NET_LOCK(); 548 error = ifpromisc(bif->ifp, 0); 549 NET_UNLOCK(); 550 551 bridge_rtdelete(sc, bif->ifp, 0); 552 bridge_flushrule(bif); 553 554 if_put(bif->ifp); 555 bif->ifp = NULL; 556 free(bif, M_DEVBUF, sizeof(*bif)); 557 558 return (error); 559 } 560 561 void 562 bridge_spanremove(struct bridge_iflist *bif) 563 { 564 struct bridge_softc *sc = bif->bridge_sc; 565 566 SMR_SLIST_REMOVE_LOCKED(&sc->sc_spanlist, bif, bridge_iflist, bif_next); 567 if_detachhook_del(bif->ifp, &bif->bif_dtask); 568 569 smr_barrier(); 570 571 if_put(bif->ifp); 572 bif->ifp = NULL; 573 free(bif, M_DEVBUF, sizeof(*bif)); 574 } 575 576 void 577 bridge_ifdetach(void *xbif) 578 { 579 struct bridge_iflist *bif = xbif; 580 581 /* 582 * bridge(4) detach hook doesn't need the NET_LOCK(), worst the 583 * use of smr_barrier() while holding the lock might lead to a 584 * deadlock situation. 585 */ 586 NET_UNLOCK(); 587 bridge_ifremove(bif); 588 NET_LOCK(); 589 } 590 591 void 592 bridge_spandetach(void *xbif) 593 { 594 struct bridge_iflist *bif = xbif; 595 596 /* 597 * bridge(4) detach hook doesn't need the NET_LOCK(), worst the 598 * use of smr_barrier() while holding the lock might lead to a 599 * deadlock situation. 600 */ 601 NET_UNLOCK(); 602 bridge_spanremove(bif); 603 NET_LOCK(); 604 } 605 606 void 607 bridge_bifgetstp(struct bridge_softc *sc, struct bridge_iflist *bif, 608 struct ifbreq *breq) 609 { 610 struct bstp_state *bs = sc->sc_stp; 611 struct bstp_port *bp = bif->bif_stp; 612 613 breq->ifbr_state = bstp_getstate(bs, bp); 614 breq->ifbr_priority = bp->bp_priority; 615 breq->ifbr_path_cost = bp->bp_path_cost; 616 breq->ifbr_proto = bp->bp_protover; 617 breq->ifbr_role = bp->bp_role; 618 breq->ifbr_stpflags = bp->bp_flags; 619 breq->ifbr_fwd_trans = bp->bp_forward_transitions; 620 breq->ifbr_root_bridge = bs->bs_root_pv.pv_root_id; 621 breq->ifbr_root_cost = bs->bs_root_pv.pv_cost; 622 breq->ifbr_root_port = bs->bs_root_pv.pv_port_id; 623 breq->ifbr_desg_bridge = bs->bs_root_pv.pv_dbridge_id; 624 breq->ifbr_desg_port = bs->bs_root_pv.pv_dport_id; 625 626 /* Copy STP state options as flags */ 627 if (bp->bp_operedge) 628 breq->ifbr_ifsflags |= IFBIF_BSTP_EDGE; 629 if (bp->bp_flags & BSTP_PORT_AUTOEDGE) 630 breq->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE; 631 if (bp->bp_ptp_link) 632 breq->ifbr_ifsflags |= IFBIF_BSTP_PTP; 633 if (bp->bp_flags & BSTP_PORT_AUTOPTP) 634 breq->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP; 635 } 636 637 int 638 bridge_bifconf(struct bridge_softc *sc, struct ifbifconf *bifc) 639 { 640 struct bridge_iflist *bif; 641 u_int32_t total = 0, i = 0; 642 int error = 0; 643 struct ifbreq *breq, *breqs = NULL; 644 645 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) 646 total++; 647 648 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_spanlist, bif_next) 649 total++; 650 651 if (bifc->ifbic_len == 0) { 652 i = total; 653 goto done; 654 } 655 656 breqs = mallocarray(total, sizeof(*breqs), M_TEMP, M_NOWAIT|M_ZERO); 657 if (breqs == NULL) 658 goto done; 659 660 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) { 661 if (bifc->ifbic_len < (i + 1) * sizeof(*breqs)) 662 break; 663 breq = &breqs[i]; 664 strlcpy(breq->ifbr_name, sc->sc_if.if_xname, IFNAMSIZ); 665 strlcpy(breq->ifbr_ifsname, bif->ifp->if_xname, IFNAMSIZ); 666 breq->ifbr_ifsflags = bif->bif_flags; 667 breq->ifbr_portno = bif->ifp->if_index & 0xfff; 668 breq->ifbr_protected = bif->bif_protected; 669 if (bif->bif_flags & IFBIF_STP) 670 bridge_bifgetstp(sc, bif, breq); 671 i++; 672 } 673 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_spanlist, bif_next) { 674 if (bifc->ifbic_len < (i + 1) * sizeof(*breqs)) 675 break; 676 breq = &breqs[i]; 677 strlcpy(breq->ifbr_name, sc->sc_if.if_xname, IFNAMSIZ); 678 strlcpy(breq->ifbr_ifsname, bif->ifp->if_xname, IFNAMSIZ); 679 breq->ifbr_ifsflags = bif->bif_flags | IFBIF_SPAN; 680 breq->ifbr_portno = bif->ifp->if_index & 0xfff; 681 i++; 682 } 683 684 error = copyout(breqs, bifc->ifbic_req, i * sizeof(*breqs)); 685 done: 686 free(breqs, M_TEMP, total * sizeof(*breq)); 687 bifc->ifbic_len = i * sizeof(*breq); 688 return (error); 689 } 690 691 int 692 bridge_findbif(struct bridge_softc *sc, const char *name, 693 struct bridge_iflist **rbif) 694 { 695 struct ifnet *ifp; 696 struct bridge_iflist *bif; 697 int error = 0; 698 699 KERNEL_ASSERT_LOCKED(); 700 701 if ((ifp = if_unit(name)) == NULL) 702 return (ENOENT); 703 704 if (ifp->if_bridgeidx != sc->sc_if.if_index) { 705 error = ESRCH; 706 goto put; 707 } 708 709 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) { 710 if (bif->ifp == ifp) 711 break; 712 } 713 714 if (bif == NULL) { 715 error = ENOENT; 716 goto put; 717 } 718 719 *rbif = bif; 720 put: 721 if_put(ifp); 722 723 return (error); 724 } 725 726 struct bridge_iflist * 727 bridge_getbif(struct ifnet *ifp) 728 { 729 struct bridge_iflist *bif; 730 struct bridge_softc *sc; 731 struct ifnet *bifp; 732 733 KERNEL_ASSERT_LOCKED(); 734 735 bifp = if_get(ifp->if_bridgeidx); 736 if (bifp == NULL) 737 return (NULL); 738 739 sc = bifp->if_softc; 740 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) { 741 if (bif->ifp == ifp) 742 break; 743 } 744 745 if_put(bifp); 746 747 return (bif); 748 } 749 750 void 751 bridge_init(struct bridge_softc *sc) 752 { 753 struct ifnet *ifp = &sc->sc_if; 754 755 if (ISSET(ifp->if_flags, IFF_RUNNING)) 756 return; 757 758 bstp_enable(sc->sc_stp, ifp->if_index); 759 760 if (sc->sc_brttimeout != 0) 761 timeout_add_sec(&sc->sc_brtimeout, sc->sc_brttimeout); 762 763 SET(ifp->if_flags, IFF_RUNNING); 764 } 765 766 /* 767 * Stop the bridge and deallocate the routing table. 768 */ 769 void 770 bridge_stop(struct bridge_softc *sc) 771 { 772 struct ifnet *ifp = &sc->sc_if; 773 774 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 775 return; 776 777 CLR(ifp->if_flags, IFF_RUNNING); 778 779 bstp_disable(sc->sc_stp); 780 781 timeout_del_barrier(&sc->sc_brtimeout); 782 783 bridge_rtflush(sc, IFBF_FLUSHDYN); 784 } 785 786 /* 787 * Send output from the bridge. The mbuf has the ethernet header 788 * already attached. We must enqueue or free the mbuf before exiting. 789 */ 790 int 791 bridge_enqueue(struct ifnet *ifp, struct mbuf *m) 792 { 793 struct ifnet *brifp; 794 struct ether_header *eh; 795 struct ifnet *dst_if = NULL; 796 unsigned int dst_ifidx = 0; 797 #if NBPFILTER > 0 798 caddr_t if_bpf; 799 #endif 800 int error = 0; 801 802 if (m->m_len < sizeof(*eh)) { 803 m = m_pullup(m, sizeof(*eh)); 804 if (m == NULL) 805 return (ENOBUFS); 806 } 807 808 /* ifp must be a member interface of the bridge. */ 809 brifp = if_get(ifp->if_bridgeidx); 810 if (brifp == NULL) { 811 m_freem(m); 812 return (EINVAL); 813 } 814 815 /* 816 * If bridge is down, but original output interface is up, 817 * go ahead and send out that interface. Otherwise the packet 818 * is dropped below. 819 */ 820 if (!ISSET(brifp->if_flags, IFF_RUNNING)) { 821 /* Loop prevention. */ 822 m->m_flags |= M_PROTO1; 823 error = if_enqueue(ifp, m); 824 if_put(brifp); 825 return (error); 826 } 827 828 #if NBPFILTER > 0 829 if_bpf = brifp->if_bpf; 830 if (if_bpf) 831 bpf_mtap(if_bpf, m, BPF_DIRECTION_OUT); 832 #endif 833 ifp->if_opackets++; 834 ifp->if_obytes += m->m_pkthdr.len; 835 836 bridge_span(brifp, m); 837 838 eh = mtod(m, struct ether_header *); 839 if (!ETHER_IS_MULTICAST(eh->ether_dhost)) { 840 struct ether_addr *dst; 841 842 dst = (struct ether_addr *)&eh->ether_dhost[0]; 843 dst_ifidx = bridge_rtlookup(brifp, dst, m); 844 } 845 846 /* 847 * If the packet is a broadcast or we don't know a better way to 848 * get there, send to all interfaces. 849 */ 850 if (dst_ifidx == 0) { 851 struct bridge_softc *sc = brifp->if_softc; 852 struct bridge_iflist *bif; 853 struct mbuf *mc; 854 855 smr_read_enter(); 856 SMR_SLIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 857 dst_if = bif->ifp; 858 if ((dst_if->if_flags & IFF_RUNNING) == 0) 859 continue; 860 861 /* 862 * If this is not the original output interface, 863 * and the interface is participating in spanning 864 * tree, make sure the port is in a state that 865 * allows forwarding. 866 */ 867 if (dst_if != ifp && 868 (bif->bif_flags & IFBIF_STP) && 869 (bif->bif_state == BSTP_IFSTATE_DISCARDING)) 870 continue; 871 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && 872 (m->m_flags & (M_BCAST | M_MCAST)) == 0) 873 continue; 874 875 if (bridge_filterrule(&bif->bif_brlout, eh, m) == 876 BRL_ACTION_BLOCK) 877 continue; 878 879 mc = m_dup_pkt(m, ETHER_ALIGN, M_NOWAIT); 880 if (mc == NULL) { 881 brifp->if_oerrors++; 882 continue; 883 } 884 885 error = bridge_ifenqueue(brifp, dst_if, mc); 886 if (error) 887 continue; 888 } 889 smr_read_leave(); 890 m_freem(m); 891 goto out; 892 } 893 894 dst_if = if_get(dst_ifidx); 895 if ((dst_if == NULL) || !ISSET(dst_if->if_flags, IFF_RUNNING)) { 896 m_freem(m); 897 if_put(dst_if); 898 error = ENETDOWN; 899 goto out; 900 } 901 902 bridge_ifenqueue(brifp, dst_if, m); 903 if_put(dst_if); 904 out: 905 if_put(brifp); 906 return (error); 907 } 908 909 /* 910 * Loop through each bridge interface and process their input queues. 911 */ 912 void 913 bridgeintr(void) 914 { 915 struct mbuf_list ml; 916 struct mbuf *m; 917 struct ifnet *ifp; 918 919 niq_delist(&bridgeintrq, &ml); 920 if (ml_empty(&ml)) 921 return; 922 923 KERNEL_LOCK(); 924 while ((m = ml_dequeue(&ml)) != NULL) { 925 926 ifp = if_get(m->m_pkthdr.ph_ifidx); 927 if (ifp == NULL) { 928 m_freem(m); 929 continue; 930 } 931 932 bridge_process(ifp, m); 933 934 if_put(ifp); 935 } 936 KERNEL_UNLOCK(); 937 } 938 939 /* 940 * Process a single frame. Frame must be freed or queued before returning. 941 */ 942 void 943 bridgeintr_frame(struct ifnet *brifp, struct ifnet *src_if, struct mbuf *m) 944 { 945 struct bridge_softc *sc = brifp->if_softc; 946 struct ifnet *dst_if = NULL; 947 struct bridge_iflist *bif; 948 struct ether_addr *dst, *src; 949 struct ether_header eh; 950 unsigned int dst_ifidx; 951 u_int32_t protected; 952 int len; 953 954 955 sc->sc_if.if_ipackets++; 956 sc->sc_if.if_ibytes += m->m_pkthdr.len; 957 958 bif = bridge_getbif(src_if); 959 KASSERT(bif != NULL); 960 961 m_copydata(m, 0, ETHER_HDR_LEN, (caddr_t)&eh); 962 dst = (struct ether_addr *)&eh.ether_dhost[0]; 963 src = (struct ether_addr *)&eh.ether_shost[0]; 964 965 /* 966 * If interface is learning, and if source address 967 * is not broadcast or multicast, record its address. 968 */ 969 if ((bif->bif_flags & IFBIF_LEARNING) && 970 !ETHER_IS_MULTICAST(eh.ether_shost) && 971 !ETHER_IS_ANYADDR(eh.ether_shost)) 972 bridge_rtupdate(sc, src, src_if, 0, IFBAF_DYNAMIC, m); 973 974 if ((bif->bif_flags & IFBIF_STP) && 975 (bif->bif_state == BSTP_IFSTATE_LEARNING)) { 976 m_freem(m); 977 return; 978 } 979 980 /* 981 * At this point, the port either doesn't participate in stp or 982 * it's in the forwarding state 983 */ 984 985 /* 986 * If packet is unicast, destined for someone on "this" 987 * side of the bridge, drop it. 988 */ 989 if (!ETHER_IS_MULTICAST(eh.ether_dhost)) { 990 dst_ifidx = bridge_rtlookup(brifp, dst, NULL); 991 if (dst_ifidx == src_if->if_index) { 992 m_freem(m); 993 return; 994 } 995 } else { 996 if (ETHER_IS_BROADCAST(eh.ether_dhost)) 997 m->m_flags |= M_BCAST; 998 else 999 m->m_flags |= M_MCAST; 1000 } 1001 1002 /* 1003 * Multicast packets get handled a little differently: 1004 * If interface is: 1005 * -link0,-link1 (default) Forward all multicast 1006 * as broadcast. 1007 * -link0,link1 Drop non-IP multicast, forward 1008 * as broadcast IP multicast. 1009 * link0,-link1 Drop IP multicast, forward as 1010 * broadcast non-IP multicast. 1011 * link0,link1 Drop all multicast. 1012 */ 1013 if (m->m_flags & M_MCAST) { 1014 if ((sc->sc_if.if_flags & 1015 (IFF_LINK0 | IFF_LINK1)) == 1016 (IFF_LINK0 | IFF_LINK1)) { 1017 m_freem(m); 1018 return; 1019 } 1020 if (sc->sc_if.if_flags & IFF_LINK0 && 1021 ETHERADDR_IS_IP_MCAST(dst)) { 1022 m_freem(m); 1023 return; 1024 } 1025 if (sc->sc_if.if_flags & IFF_LINK1 && 1026 !ETHERADDR_IS_IP_MCAST(dst)) { 1027 m_freem(m); 1028 return; 1029 } 1030 } 1031 1032 if (bif->bif_flags & IFBIF_BLOCKNONIP && bridge_blocknonip(&eh, m)) { 1033 m_freem(m); 1034 return; 1035 } 1036 1037 if (bridge_filterrule(&bif->bif_brlin, &eh, m) == BRL_ACTION_BLOCK) { 1038 m_freem(m); 1039 return; 1040 } 1041 m = bridge_ip(&sc->sc_if, BRIDGE_IN, src_if, &eh, m); 1042 if (m == NULL) 1043 return; 1044 /* 1045 * If the packet is a multicast or broadcast OR if we don't 1046 * know any better, forward it to all interfaces. 1047 */ 1048 if ((m->m_flags & (M_BCAST | M_MCAST)) || dst_ifidx == 0) { 1049 sc->sc_if.if_imcasts++; 1050 bridge_broadcast(sc, src_if, &eh, m); 1051 return; 1052 } 1053 protected = bif->bif_protected; 1054 1055 dst_if = if_get(dst_ifidx); 1056 if (dst_if == NULL) 1057 goto bad; 1058 1059 /* 1060 * At this point, we're dealing with a unicast frame going to a 1061 * different interface 1062 */ 1063 if (!ISSET(dst_if->if_flags, IFF_RUNNING)) 1064 goto bad; 1065 bif = bridge_getbif(dst_if); 1066 if ((bif == NULL) || ((bif->bif_flags & IFBIF_STP) && 1067 (bif->bif_state == BSTP_IFSTATE_DISCARDING))) 1068 goto bad; 1069 /* 1070 * Do not transmit if both ports are part of the same protected 1071 * domain. 1072 */ 1073 if (protected != 0 && (protected & bif->bif_protected)) 1074 goto bad; 1075 if (bridge_filterrule(&bif->bif_brlout, &eh, m) == BRL_ACTION_BLOCK) 1076 goto bad; 1077 m = bridge_ip(&sc->sc_if, BRIDGE_OUT, dst_if, &eh, m); 1078 if (m == NULL) 1079 goto bad; 1080 1081 len = m->m_pkthdr.len; 1082 #if NVLAN > 0 1083 if ((m->m_flags & M_VLANTAG) && 1084 (dst_if->if_capabilities & IFCAP_VLAN_HWTAGGING) == 0) 1085 len += ETHER_VLAN_ENCAP_LEN; 1086 #endif 1087 if ((len - ETHER_HDR_LEN) > dst_if->if_mtu) 1088 bridge_fragment(&sc->sc_if, dst_if, &eh, m); 1089 else { 1090 bridge_ifenqueue(&sc->sc_if, dst_if, m); 1091 } 1092 m = NULL; 1093 bad: 1094 if_put(dst_if); 1095 m_freem(m); 1096 } 1097 1098 /* 1099 * Return 1 if `ena' belongs to `bif', 0 otherwise. 1100 */ 1101 int 1102 bridge_ourether(struct ifnet *ifp, uint8_t *ena) 1103 { 1104 struct arpcom *ac = (struct arpcom *)ifp; 1105 1106 if (memcmp(ac->ac_enaddr, ena, ETHER_ADDR_LEN) == 0) 1107 return (1); 1108 1109 #if NCARP > 0 1110 if (carp_ourether(ifp, ena)) 1111 return (1); 1112 #endif 1113 1114 return (0); 1115 } 1116 1117 /* 1118 * Receive input from an interface. Queue the packet for bridging if its 1119 * not for us, and schedule an interrupt. 1120 */ 1121 struct mbuf * 1122 bridge_input(struct ifnet *ifp, struct mbuf *m, void *null) 1123 { 1124 KASSERT(m->m_flags & M_PKTHDR); 1125 1126 if (m->m_flags & M_PROTO1) { 1127 m->m_flags &= ~M_PROTO1; 1128 return (m); 1129 } 1130 1131 niq_enqueue(&bridgeintrq, m); 1132 1133 return (NULL); 1134 } 1135 1136 void 1137 bridge_process(struct ifnet *ifp, struct mbuf *m) 1138 { 1139 struct ifnet *brifp; 1140 struct bridge_softc *sc; 1141 struct bridge_iflist *bif = NULL, *bif0 = NULL; 1142 struct ether_header *eh; 1143 struct mbuf *mc; 1144 #if NBPFILTER > 0 1145 caddr_t if_bpf; 1146 #endif 1147 1148 KERNEL_ASSERT_LOCKED(); 1149 1150 brifp = if_get(ifp->if_bridgeidx); 1151 if ((brifp == NULL) || !ISSET(brifp->if_flags, IFF_RUNNING)) 1152 goto reenqueue; 1153 1154 if (m->m_pkthdr.len < sizeof(*eh)) 1155 goto bad; 1156 1157 #if NVLAN > 0 1158 /* 1159 * If the underlying interface removed the VLAN header itself, 1160 * add it back. 1161 */ 1162 if (ISSET(m->m_flags, M_VLANTAG)) { 1163 m = vlan_inject(m, ETHERTYPE_VLAN, m->m_pkthdr.ether_vtag); 1164 if (m == NULL) 1165 goto bad; 1166 } 1167 #endif 1168 1169 #if NBPFILTER > 0 1170 if_bpf = brifp->if_bpf; 1171 if (if_bpf) 1172 bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_IN); 1173 #endif 1174 1175 eh = mtod(m, struct ether_header *); 1176 1177 sc = brifp->if_softc; 1178 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) { 1179 struct arpcom *ac = (struct arpcom *)bif->ifp; 1180 if (memcmp(ac->ac_enaddr, eh->ether_shost, ETHER_ADDR_LEN) == 0) 1181 goto bad; 1182 if (bif->ifp == ifp) 1183 bif0 = bif; 1184 } 1185 if (bif0 == NULL) 1186 goto reenqueue; 1187 1188 bridge_span(brifp, m); 1189 1190 if (ETHER_IS_MULTICAST(eh->ether_dhost)) { 1191 /* 1192 * Reserved destination MAC addresses (01:80:C2:00:00:0x) 1193 * should not be forwarded to bridge members according to 1194 * section 7.12.6 of the 802.1D-2004 specification. The 1195 * STP destination address (as stored in bstp_etheraddr) 1196 * is the first of these. 1197 */ 1198 if (memcmp(eh->ether_dhost, bstp_etheraddr, 1199 ETHER_ADDR_LEN - 1) == 0) { 1200 if (eh->ether_dhost[ETHER_ADDR_LEN - 1] == 0) { 1201 /* STP traffic */ 1202 m = bstp_input(sc->sc_stp, bif0->bif_stp, eh, 1203 m); 1204 if (m == NULL) 1205 goto bad; 1206 } else if (eh->ether_dhost[ETHER_ADDR_LEN - 1] <= 0xf) 1207 goto bad; 1208 } 1209 1210 /* 1211 * No need to process frames for ifs in the discarding state 1212 */ 1213 if ((bif0->bif_flags & IFBIF_STP) && 1214 (bif0->bif_state == BSTP_IFSTATE_DISCARDING)) 1215 goto reenqueue; 1216 1217 mc = m_dup_pkt(m, ETHER_ALIGN, M_NOWAIT); 1218 if (mc == NULL) 1219 goto reenqueue; 1220 1221 bridge_ifinput(ifp, mc); 1222 1223 bridgeintr_frame(brifp, ifp, m); 1224 if_put(brifp); 1225 return; 1226 } 1227 1228 /* 1229 * Unicast, make sure it's not for us. 1230 */ 1231 if (bridge_ourether(bif0->ifp, eh->ether_dhost)) { 1232 bif = bif0; 1233 } else { 1234 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) { 1235 if (bif->ifp == ifp) 1236 continue; 1237 if (bridge_ourether(bif->ifp, eh->ether_dhost)) 1238 break; 1239 } 1240 } 1241 if (bif != NULL) { 1242 if (bif0->bif_flags & IFBIF_LEARNING) 1243 bridge_rtupdate(sc, 1244 (struct ether_addr *)&eh->ether_shost, 1245 ifp, 0, IFBAF_DYNAMIC, m); 1246 if (bridge_filterrule(&bif0->bif_brlin, eh, m) == 1247 BRL_ACTION_BLOCK) { 1248 goto bad; 1249 } 1250 1251 /* Count for the bridge */ 1252 brifp->if_ipackets++; 1253 brifp->if_ibytes += m->m_pkthdr.len; 1254 1255 ifp = bif->ifp; 1256 goto reenqueue; 1257 } 1258 1259 bridgeintr_frame(brifp, ifp, m); 1260 if_put(brifp); 1261 return; 1262 1263 reenqueue: 1264 bridge_ifinput(ifp, m); 1265 m = NULL; 1266 bad: 1267 m_freem(m); 1268 if_put(brifp); 1269 } 1270 1271 /* 1272 * Send a frame to all interfaces that are members of the bridge 1273 * (except the one it came in on). 1274 */ 1275 void 1276 bridge_broadcast(struct bridge_softc *sc, struct ifnet *ifp, 1277 struct ether_header *eh, struct mbuf *m) 1278 { 1279 struct bridge_iflist *bif; 1280 struct mbuf *mc; 1281 struct ifnet *dst_if; 1282 int len, used = 0; 1283 u_int32_t protected; 1284 1285 bif = bridge_getbif(ifp); 1286 KASSERT(bif != NULL); 1287 protected = bif->bif_protected; 1288 1289 SMR_SLIST_FOREACH_LOCKED(bif, &sc->sc_iflist, bif_next) { 1290 dst_if = bif->ifp; 1291 1292 if ((dst_if->if_flags & IFF_RUNNING) == 0) 1293 continue; 1294 1295 if ((bif->bif_flags & IFBIF_STP) && 1296 (bif->bif_state == BSTP_IFSTATE_DISCARDING)) 1297 continue; 1298 1299 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && 1300 (m->m_flags & (M_BCAST | M_MCAST)) == 0) 1301 continue; 1302 1303 /* Drop non-IP frames if the appropriate flag is set. */ 1304 if (bif->bif_flags & IFBIF_BLOCKNONIP && 1305 bridge_blocknonip(eh, m)) 1306 continue; 1307 1308 /* 1309 * Do not transmit if both ports are part of the same 1310 * protected domain. 1311 */ 1312 if (protected != 0 && (protected & bif->bif_protected)) 1313 continue; 1314 1315 if (bridge_filterrule(&bif->bif_brlout, eh, m) == 1316 BRL_ACTION_BLOCK) 1317 continue; 1318 1319 /* 1320 * Don't retransmit out of the same interface where 1321 * the packet was received from. 1322 */ 1323 if (dst_if->if_index == ifp->if_index) 1324 continue; 1325 1326 if (bridge_localbroadcast(dst_if, eh, m)) 1327 sc->sc_if.if_oerrors++; 1328 1329 /* If last one, reuse the passed-in mbuf */ 1330 if (SMR_SLIST_NEXT_LOCKED(bif, bif_next) == NULL) { 1331 mc = m; 1332 used = 1; 1333 } else { 1334 mc = m_dup_pkt(m, ETHER_ALIGN, M_NOWAIT); 1335 if (mc == NULL) { 1336 sc->sc_if.if_oerrors++; 1337 continue; 1338 } 1339 } 1340 1341 mc = bridge_ip(&sc->sc_if, BRIDGE_OUT, dst_if, eh, mc); 1342 if (mc == NULL) 1343 continue; 1344 1345 len = mc->m_pkthdr.len; 1346 #if NVLAN > 0 1347 if ((mc->m_flags & M_VLANTAG) && 1348 (dst_if->if_capabilities & IFCAP_VLAN_HWTAGGING) == 0) 1349 len += ETHER_VLAN_ENCAP_LEN; 1350 #endif 1351 if ((len - ETHER_HDR_LEN) > dst_if->if_mtu) 1352 bridge_fragment(&sc->sc_if, dst_if, eh, mc); 1353 else { 1354 bridge_ifenqueue(&sc->sc_if, dst_if, mc); 1355 } 1356 } 1357 1358 if (!used) 1359 m_freem(m); 1360 } 1361 1362 int 1363 bridge_localbroadcast(struct ifnet *ifp, struct ether_header *eh, 1364 struct mbuf *m) 1365 { 1366 struct mbuf *m1; 1367 u_int16_t etype; 1368 1369 /* 1370 * quick optimisation, don't send packets up the stack if no 1371 * corresponding address has been specified. 1372 */ 1373 etype = ntohs(eh->ether_type); 1374 if (!(m->m_flags & M_VLANTAG) && etype == ETHERTYPE_IP) { 1375 struct ifaddr *ifa; 1376 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) { 1377 if (ifa->ifa_addr->sa_family == AF_INET) 1378 break; 1379 } 1380 if (ifa == NULL) 1381 return (0); 1382 } 1383 1384 m1 = m_dup_pkt(m, ETHER_ALIGN, M_NOWAIT); 1385 if (m1 == NULL) 1386 return (1); 1387 1388 #if NPF > 0 1389 pf_pkt_addr_changed(m1); 1390 #endif /* NPF */ 1391 1392 bridge_ifinput(ifp, m1); 1393 1394 return (0); 1395 } 1396 1397 void 1398 bridge_span(struct ifnet *brifp, struct mbuf *m) 1399 { 1400 struct bridge_softc *sc = brifp->if_softc; 1401 struct bridge_iflist *bif; 1402 struct ifnet *ifp; 1403 struct mbuf *mc; 1404 int error; 1405 1406 smr_read_enter(); 1407 SMR_SLIST_FOREACH(bif, &sc->sc_spanlist, bif_next) { 1408 ifp = bif->ifp; 1409 1410 if ((ifp->if_flags & IFF_RUNNING) == 0) 1411 continue; 1412 1413 mc = m_copym(m, 0, M_COPYALL, M_DONTWAIT); 1414 if (mc == NULL) { 1415 brifp->if_oerrors++; 1416 continue; 1417 } 1418 1419 error = bridge_ifenqueue(brifp, ifp, mc); 1420 if (error) 1421 continue; 1422 } 1423 smr_read_leave(); 1424 } 1425 1426 /* 1427 * Block non-ip frames: 1428 * Returns 0 if frame is ip, and 1 if it should be dropped. 1429 */ 1430 int 1431 bridge_blocknonip(struct ether_header *eh, struct mbuf *m) 1432 { 1433 struct llc llc; 1434 u_int16_t etype; 1435 1436 if (m->m_pkthdr.len < ETHER_HDR_LEN) 1437 return (1); 1438 1439 #if NVLAN > 0 1440 if (m->m_flags & M_VLANTAG) 1441 return (1); 1442 #endif 1443 1444 etype = ntohs(eh->ether_type); 1445 switch (etype) { 1446 case ETHERTYPE_ARP: 1447 case ETHERTYPE_REVARP: 1448 case ETHERTYPE_IP: 1449 case ETHERTYPE_IPV6: 1450 return (0); 1451 } 1452 1453 if (etype > ETHERMTU) 1454 return (1); 1455 1456 if (m->m_pkthdr.len < 1457 (ETHER_HDR_LEN + LLC_SNAPFRAMELEN)) 1458 return (1); 1459 1460 m_copydata(m, ETHER_HDR_LEN, LLC_SNAPFRAMELEN, 1461 (caddr_t)&llc); 1462 1463 etype = ntohs(llc.llc_snap.ether_type); 1464 if (llc.llc_dsap == LLC_SNAP_LSAP && 1465 llc.llc_ssap == LLC_SNAP_LSAP && 1466 llc.llc_control == LLC_UI && 1467 llc.llc_snap.org_code[0] == 0 && 1468 llc.llc_snap.org_code[1] == 0 && 1469 llc.llc_snap.org_code[2] == 0 && 1470 (etype == ETHERTYPE_ARP || etype == ETHERTYPE_REVARP || 1471 etype == ETHERTYPE_IP || etype == ETHERTYPE_IPV6)) { 1472 return (0); 1473 } 1474 1475 return (1); 1476 } 1477 1478 #ifdef IPSEC 1479 int 1480 bridge_ipsec(struct ifnet *ifp, struct ether_header *eh, int hassnap, 1481 struct llc *llc, int dir, int af, int hlen, struct mbuf *m) 1482 { 1483 union sockaddr_union dst; 1484 struct tdb *tdb; 1485 u_int32_t spi; 1486 u_int16_t cpi; 1487 int error, off; 1488 u_int8_t proto = 0; 1489 struct ip *ip; 1490 #ifdef INET6 1491 struct ip6_hdr *ip6; 1492 #endif /* INET6 */ 1493 #if NPF > 0 1494 struct ifnet *encif; 1495 #endif 1496 1497 if (dir == BRIDGE_IN) { 1498 switch (af) { 1499 case AF_INET: 1500 if (m->m_pkthdr.len - hlen < 2 * sizeof(u_int32_t)) 1501 goto skiplookup; 1502 1503 ip = mtod(m, struct ip *); 1504 proto = ip->ip_p; 1505 off = offsetof(struct ip, ip_p); 1506 1507 if (proto != IPPROTO_ESP && proto != IPPROTO_AH && 1508 proto != IPPROTO_IPCOMP) 1509 goto skiplookup; 1510 1511 bzero(&dst, sizeof(union sockaddr_union)); 1512 dst.sa.sa_family = AF_INET; 1513 dst.sin.sin_len = sizeof(struct sockaddr_in); 1514 m_copydata(m, offsetof(struct ip, ip_dst), 1515 sizeof(struct in_addr), 1516 (caddr_t)&dst.sin.sin_addr); 1517 1518 break; 1519 #ifdef INET6 1520 case AF_INET6: 1521 if (m->m_pkthdr.len - hlen < 2 * sizeof(u_int32_t)) 1522 goto skiplookup; 1523 1524 ip6 = mtod(m, struct ip6_hdr *); 1525 1526 /* XXX We should chase down the header chain */ 1527 proto = ip6->ip6_nxt; 1528 off = offsetof(struct ip6_hdr, ip6_nxt); 1529 1530 if (proto != IPPROTO_ESP && proto != IPPROTO_AH && 1531 proto != IPPROTO_IPCOMP) 1532 goto skiplookup; 1533 1534 bzero(&dst, sizeof(union sockaddr_union)); 1535 dst.sa.sa_family = AF_INET6; 1536 dst.sin6.sin6_len = sizeof(struct sockaddr_in6); 1537 m_copydata(m, offsetof(struct ip6_hdr, ip6_nxt), 1538 sizeof(struct in6_addr), 1539 (caddr_t)&dst.sin6.sin6_addr); 1540 1541 break; 1542 #endif /* INET6 */ 1543 default: 1544 return (0); 1545 } 1546 1547 switch (proto) { 1548 case IPPROTO_ESP: 1549 m_copydata(m, hlen, sizeof(u_int32_t), (caddr_t)&spi); 1550 break; 1551 case IPPROTO_AH: 1552 m_copydata(m, hlen + sizeof(u_int32_t), 1553 sizeof(u_int32_t), (caddr_t)&spi); 1554 break; 1555 case IPPROTO_IPCOMP: 1556 m_copydata(m, hlen + sizeof(u_int16_t), 1557 sizeof(u_int16_t), (caddr_t)&cpi); 1558 spi = ntohl(htons(cpi)); 1559 break; 1560 } 1561 1562 NET_ASSERT_LOCKED(); 1563 1564 tdb = gettdb(ifp->if_rdomain, spi, &dst, proto); 1565 if (tdb != NULL && (tdb->tdb_flags & TDBF_INVALID) == 0 && 1566 tdb->tdb_xform != NULL) { 1567 if (tdb->tdb_first_use == 0) { 1568 tdb->tdb_first_use = gettime(); 1569 if (tdb->tdb_flags & TDBF_FIRSTUSE) 1570 timeout_add_sec(&tdb->tdb_first_tmo, 1571 tdb->tdb_exp_first_use); 1572 if (tdb->tdb_flags & TDBF_SOFT_FIRSTUSE) 1573 timeout_add_sec(&tdb->tdb_sfirst_tmo, 1574 tdb->tdb_soft_first_use); 1575 } 1576 1577 (*(tdb->tdb_xform->xf_input))(m, tdb, hlen, off); 1578 return (1); 1579 } else { 1580 skiplookup: 1581 /* XXX do an input policy lookup */ 1582 return (0); 1583 } 1584 } else { /* Outgoing from the bridge. */ 1585 tdb = ipsp_spd_lookup(m, af, hlen, &error, 1586 IPSP_DIRECTION_OUT, NULL, NULL, 0); 1587 if (tdb != NULL) { 1588 /* 1589 * We don't need to do loop detection, the 1590 * bridge will do that for us. 1591 */ 1592 #if NPF > 0 1593 if ((encif = enc_getif(tdb->tdb_rdomain, 1594 tdb->tdb_tap)) == NULL || 1595 pf_test(af, dir, encif, &m) != PF_PASS) { 1596 m_freem(m); 1597 return (1); 1598 } 1599 if (m == NULL) 1600 return (1); 1601 else if (af == AF_INET) 1602 in_proto_cksum_out(m, encif); 1603 #ifdef INET6 1604 else if (af == AF_INET6) 1605 in6_proto_cksum_out(m, encif); 1606 #endif /* INET6 */ 1607 #endif /* NPF */ 1608 1609 ip = mtod(m, struct ip *); 1610 if ((af == AF_INET) && 1611 ip_mtudisc && (ip->ip_off & htons(IP_DF)) && 1612 tdb->tdb_mtu && ntohs(ip->ip_len) > tdb->tdb_mtu && 1613 tdb->tdb_mtutimeout > gettime()) 1614 bridge_send_icmp_err(ifp, eh, m, 1615 hassnap, llc, tdb->tdb_mtu, 1616 ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG); 1617 else 1618 error = ipsp_process_packet(m, tdb, af, 0); 1619 return (1); 1620 } else 1621 return (0); 1622 } 1623 1624 return (0); 1625 } 1626 #endif /* IPSEC */ 1627 1628 /* 1629 * Filter IP packets by peeking into the ethernet frame. This violates 1630 * the ISO model, but allows us to act as a IP filter at the data link 1631 * layer. As a result, most of this code will look familiar to those 1632 * who've read net/if_ethersubr.c and netinet/ip_input.c 1633 */ 1634 struct mbuf * 1635 bridge_ip(struct ifnet *brifp, int dir, struct ifnet *ifp, 1636 struct ether_header *eh, struct mbuf *m) 1637 { 1638 struct llc llc; 1639 int hassnap = 0; 1640 struct ip *ip; 1641 int hlen; 1642 u_int16_t etype; 1643 1644 #if NVLAN > 0 1645 if (m->m_flags & M_VLANTAG) 1646 return (m); 1647 #endif 1648 1649 etype = ntohs(eh->ether_type); 1650 1651 if (etype != ETHERTYPE_IP && etype != ETHERTYPE_IPV6) { 1652 if (etype > ETHERMTU || 1653 m->m_pkthdr.len < (LLC_SNAPFRAMELEN + 1654 ETHER_HDR_LEN)) 1655 return (m); 1656 1657 m_copydata(m, ETHER_HDR_LEN, 1658 LLC_SNAPFRAMELEN, (caddr_t)&llc); 1659 1660 if (llc.llc_dsap != LLC_SNAP_LSAP || 1661 llc.llc_ssap != LLC_SNAP_LSAP || 1662 llc.llc_control != LLC_UI || 1663 llc.llc_snap.org_code[0] || 1664 llc.llc_snap.org_code[1] || 1665 llc.llc_snap.org_code[2]) 1666 return (m); 1667 1668 etype = ntohs(llc.llc_snap.ether_type); 1669 if (etype != ETHERTYPE_IP && etype != ETHERTYPE_IPV6) 1670 return (m); 1671 hassnap = 1; 1672 } 1673 1674 m_adj(m, ETHER_HDR_LEN); 1675 if (hassnap) 1676 m_adj(m, LLC_SNAPFRAMELEN); 1677 1678 switch (etype) { 1679 1680 case ETHERTYPE_IP: 1681 if (m->m_pkthdr.len < sizeof(struct ip)) 1682 goto dropit; 1683 1684 /* Copy minimal header, and drop invalids */ 1685 if (m->m_len < sizeof(struct ip) && 1686 (m = m_pullup(m, sizeof(struct ip))) == NULL) { 1687 ipstat_inc(ips_toosmall); 1688 return (NULL); 1689 } 1690 ip = mtod(m, struct ip *); 1691 1692 if (ip->ip_v != IPVERSION) { 1693 ipstat_inc(ips_badvers); 1694 goto dropit; 1695 } 1696 1697 hlen = ip->ip_hl << 2; /* get whole header length */ 1698 if (hlen < sizeof(struct ip)) { 1699 ipstat_inc(ips_badhlen); 1700 goto dropit; 1701 } 1702 1703 if (hlen > m->m_len) { 1704 if ((m = m_pullup(m, hlen)) == NULL) { 1705 ipstat_inc(ips_badhlen); 1706 return (NULL); 1707 } 1708 ip = mtod(m, struct ip *); 1709 } 1710 1711 if ((m->m_pkthdr.csum_flags & M_IPV4_CSUM_IN_OK) == 0) { 1712 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_IN_BAD) { 1713 ipstat_inc(ips_badsum); 1714 goto dropit; 1715 } 1716 1717 ipstat_inc(ips_inswcsum); 1718 if (in_cksum(m, hlen) != 0) { 1719 ipstat_inc(ips_badsum); 1720 goto dropit; 1721 } 1722 } 1723 1724 if (ntohs(ip->ip_len) < hlen) 1725 goto dropit; 1726 1727 if (m->m_pkthdr.len < ntohs(ip->ip_len)) 1728 goto dropit; 1729 if (m->m_pkthdr.len > ntohs(ip->ip_len)) { 1730 if (m->m_len == m->m_pkthdr.len) { 1731 m->m_len = ntohs(ip->ip_len); 1732 m->m_pkthdr.len = ntohs(ip->ip_len); 1733 } else 1734 m_adj(m, ntohs(ip->ip_len) - m->m_pkthdr.len); 1735 } 1736 1737 #ifdef IPSEC 1738 if ((brifp->if_flags & IFF_LINK2) == IFF_LINK2 && 1739 bridge_ipsec(ifp, eh, hassnap, &llc, dir, AF_INET, hlen, m)) 1740 return (NULL); 1741 #endif /* IPSEC */ 1742 #if NPF > 0 1743 /* Finally, we get to filter the packet! */ 1744 if (pf_test(AF_INET, dir, ifp, &m) != PF_PASS) 1745 goto dropit; 1746 if (m == NULL) 1747 goto dropit; 1748 #endif /* NPF > 0 */ 1749 1750 /* Rebuild the IP header */ 1751 if (m->m_len < hlen && ((m = m_pullup(m, hlen)) == NULL)) 1752 return (NULL); 1753 if (m->m_len < sizeof(struct ip)) 1754 goto dropit; 1755 in_proto_cksum_out(m, ifp); 1756 ip = mtod(m, struct ip *); 1757 ip->ip_sum = 0; 1758 if (0 && (ifp->if_capabilities & IFCAP_CSUM_IPv4)) 1759 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_OUT; 1760 else { 1761 ipstat_inc(ips_outswcsum); 1762 ip->ip_sum = in_cksum(m, hlen); 1763 } 1764 1765 #if NPF > 0 1766 if (dir == BRIDGE_IN && 1767 m->m_pkthdr.pf.flags & PF_TAG_DIVERTED) { 1768 m_resethdr(m); 1769 m->m_pkthdr.ph_ifidx = ifp->if_index; 1770 m->m_pkthdr.ph_rtableid = ifp->if_rdomain; 1771 ipv4_input(ifp, m); 1772 return (NULL); 1773 } 1774 #endif /* NPF > 0 */ 1775 1776 break; 1777 1778 #ifdef INET6 1779 case ETHERTYPE_IPV6: { 1780 struct ip6_hdr *ip6; 1781 1782 if (m->m_len < sizeof(struct ip6_hdr)) { 1783 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) 1784 == NULL) { 1785 ip6stat_inc(ip6s_toosmall); 1786 return (NULL); 1787 } 1788 } 1789 1790 ip6 = mtod(m, struct ip6_hdr *); 1791 1792 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) { 1793 ip6stat_inc(ip6s_badvers); 1794 goto dropit; 1795 } 1796 1797 #ifdef IPSEC 1798 hlen = sizeof(struct ip6_hdr); 1799 1800 if ((brifp->if_flags & IFF_LINK2) == IFF_LINK2 && 1801 bridge_ipsec(ifp, eh, hassnap, &llc, dir, AF_INET6, hlen, 1802 m)) 1803 return (NULL); 1804 #endif /* IPSEC */ 1805 1806 #if NPF > 0 1807 if (pf_test(AF_INET6, dir, ifp, &m) != PF_PASS) 1808 goto dropit; 1809 if (m == NULL) 1810 return (NULL); 1811 #endif /* NPF > 0 */ 1812 in6_proto_cksum_out(m, ifp); 1813 1814 #if NPF > 0 1815 if (dir == BRIDGE_IN && 1816 m->m_pkthdr.pf.flags & PF_TAG_DIVERTED) { 1817 m_resethdr(m); 1818 m->m_pkthdr.ph_ifidx = ifp->if_index; 1819 m->m_pkthdr.ph_rtableid = ifp->if_rdomain; 1820 ipv6_input(ifp, m); 1821 return (NULL); 1822 } 1823 #endif /* NPF > 0 */ 1824 1825 break; 1826 } 1827 #endif /* INET6 */ 1828 1829 default: 1830 goto dropit; 1831 break; 1832 } 1833 1834 /* Reattach SNAP header */ 1835 if (hassnap) { 1836 M_PREPEND(m, LLC_SNAPFRAMELEN, M_DONTWAIT); 1837 if (m == NULL) 1838 goto dropit; 1839 bcopy(&llc, mtod(m, caddr_t), LLC_SNAPFRAMELEN); 1840 } 1841 1842 /* Reattach ethernet header */ 1843 M_PREPEND(m, sizeof(*eh), M_DONTWAIT); 1844 if (m == NULL) 1845 goto dropit; 1846 bcopy(eh, mtod(m, caddr_t), sizeof(*eh)); 1847 1848 return (m); 1849 1850 dropit: 1851 m_freem(m); 1852 return (NULL); 1853 } 1854 1855 void 1856 bridge_fragment(struct ifnet *brifp, struct ifnet *ifp, struct ether_header *eh, 1857 struct mbuf *m) 1858 { 1859 struct llc llc; 1860 struct mbuf *m0; 1861 int error = 0; 1862 int hassnap = 0; 1863 u_int16_t etype; 1864 struct ip *ip; 1865 1866 etype = ntohs(eh->ether_type); 1867 #if NVLAN > 0 1868 if ((m->m_flags & M_VLANTAG) || etype == ETHERTYPE_VLAN || 1869 etype == ETHERTYPE_QINQ) { 1870 int len = m->m_pkthdr.len; 1871 1872 if (m->m_flags & M_VLANTAG) 1873 len += ETHER_VLAN_ENCAP_LEN; 1874 if ((ifp->if_capabilities & IFCAP_VLAN_MTU) && 1875 (len - sizeof(struct ether_vlan_header) <= ifp->if_mtu)) { 1876 bridge_ifenqueue(brifp, ifp, m); 1877 return; 1878 } 1879 goto dropit; 1880 } 1881 #endif 1882 if (etype != ETHERTYPE_IP) { 1883 if (etype > ETHERMTU || 1884 m->m_pkthdr.len < (LLC_SNAPFRAMELEN + 1885 ETHER_HDR_LEN)) 1886 goto dropit; 1887 1888 m_copydata(m, ETHER_HDR_LEN, 1889 LLC_SNAPFRAMELEN, (caddr_t)&llc); 1890 1891 if (llc.llc_dsap != LLC_SNAP_LSAP || 1892 llc.llc_ssap != LLC_SNAP_LSAP || 1893 llc.llc_control != LLC_UI || 1894 llc.llc_snap.org_code[0] || 1895 llc.llc_snap.org_code[1] || 1896 llc.llc_snap.org_code[2] || 1897 llc.llc_snap.ether_type != htons(ETHERTYPE_IP)) 1898 goto dropit; 1899 1900 hassnap = 1; 1901 } 1902 1903 m_adj(m, ETHER_HDR_LEN); 1904 if (hassnap) 1905 m_adj(m, LLC_SNAPFRAMELEN); 1906 1907 if (m->m_len < sizeof(struct ip) && 1908 (m = m_pullup(m, sizeof(struct ip))) == NULL) 1909 goto dropit; 1910 ip = mtod(m, struct ip *); 1911 1912 /* Respect IP_DF, return a ICMP_UNREACH_NEEDFRAG. */ 1913 if (ip->ip_off & htons(IP_DF)) { 1914 bridge_send_icmp_err(ifp, eh, m, hassnap, &llc, 1915 ifp->if_mtu, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG); 1916 return; 1917 } 1918 1919 error = ip_fragment(m, ifp, ifp->if_mtu); 1920 if (error) { 1921 m = NULL; 1922 goto dropit; 1923 } 1924 1925 for (; m; m = m0) { 1926 m0 = m->m_nextpkt; 1927 m->m_nextpkt = NULL; 1928 if (error == 0) { 1929 if (hassnap) { 1930 M_PREPEND(m, LLC_SNAPFRAMELEN, M_DONTWAIT); 1931 if (m == NULL) { 1932 error = ENOBUFS; 1933 continue; 1934 } 1935 bcopy(&llc, mtod(m, caddr_t), 1936 LLC_SNAPFRAMELEN); 1937 } 1938 M_PREPEND(m, sizeof(*eh), M_DONTWAIT); 1939 if (m == NULL) { 1940 error = ENOBUFS; 1941 continue; 1942 } 1943 bcopy(eh, mtod(m, caddr_t), sizeof(*eh)); 1944 error = bridge_ifenqueue(brifp, ifp, m); 1945 if (error) { 1946 continue; 1947 } 1948 } else 1949 m_freem(m); 1950 } 1951 1952 if (error == 0) 1953 ipstat_inc(ips_fragmented); 1954 1955 return; 1956 dropit: 1957 m_freem(m); 1958 } 1959 1960 int 1961 bridge_ifenqueue(struct ifnet *brifp, struct ifnet *ifp, struct mbuf *m) 1962 { 1963 int error, len; 1964 1965 /* Loop prevention. */ 1966 m->m_flags |= M_PROTO1; 1967 1968 len = m->m_pkthdr.len; 1969 1970 error = if_enqueue(ifp, m); 1971 if (error) { 1972 brifp->if_oerrors++; 1973 return (error); 1974 } 1975 1976 brifp->if_opackets++; 1977 brifp->if_obytes += len; 1978 1979 return (0); 1980 } 1981 1982 void 1983 bridge_ifinput(struct ifnet *ifp, struct mbuf *m) 1984 { 1985 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1986 1987 m->m_flags |= M_PROTO1; 1988 1989 ml_enqueue(&ml, m); 1990 if_input(ifp, &ml); 1991 } 1992 1993 void 1994 bridge_send_icmp_err(struct ifnet *ifp, 1995 struct ether_header *eh, struct mbuf *n, int hassnap, struct llc *llc, 1996 int mtu, int type, int code) 1997 { 1998 struct ip *ip; 1999 struct icmp *icp; 2000 struct in_addr t; 2001 struct mbuf *m, *n2; 2002 int hlen; 2003 u_int8_t ether_tmp[ETHER_ADDR_LEN]; 2004 2005 n2 = m_copym(n, 0, M_COPYALL, M_DONTWAIT); 2006 if (!n2) { 2007 m_freem(n); 2008 return; 2009 } 2010 m = icmp_do_error(n, type, code, 0, mtu); 2011 if (m == NULL) { 2012 m_freem(n2); 2013 return; 2014 } 2015 2016 n = n2; 2017 2018 ip = mtod(m, struct ip *); 2019 hlen = ip->ip_hl << 2; 2020 t = ip->ip_dst; 2021 ip->ip_dst = ip->ip_src; 2022 ip->ip_src = t; 2023 2024 m->m_data += hlen; 2025 m->m_len -= hlen; 2026 icp = mtod(m, struct icmp *); 2027 icp->icmp_cksum = 0; 2028 icp->icmp_cksum = in_cksum(m, ntohs(ip->ip_len) - hlen); 2029 m->m_data -= hlen; 2030 m->m_len += hlen; 2031 2032 ip->ip_v = IPVERSION; 2033 ip->ip_off &= htons(IP_DF); 2034 ip->ip_id = htons(ip_randomid()); 2035 ip->ip_ttl = MAXTTL; 2036 ip->ip_sum = 0; 2037 ip->ip_sum = in_cksum(m, hlen); 2038 2039 /* Swap ethernet addresses */ 2040 bcopy(&eh->ether_dhost, ðer_tmp, sizeof(ether_tmp)); 2041 bcopy(&eh->ether_shost, &eh->ether_dhost, sizeof(ether_tmp)); 2042 bcopy(ðer_tmp, &eh->ether_shost, sizeof(ether_tmp)); 2043 2044 /* Reattach SNAP header */ 2045 if (hassnap) { 2046 M_PREPEND(m, LLC_SNAPFRAMELEN, M_DONTWAIT); 2047 if (m == NULL) 2048 goto dropit; 2049 bcopy(llc, mtod(m, caddr_t), LLC_SNAPFRAMELEN); 2050 } 2051 2052 /* Reattach ethernet header */ 2053 M_PREPEND(m, sizeof(*eh), M_DONTWAIT); 2054 if (m == NULL) 2055 goto dropit; 2056 bcopy(eh, mtod(m, caddr_t), sizeof(*eh)); 2057 2058 bridge_enqueue(ifp, m); 2059 m_freem(n); 2060 return; 2061 2062 dropit: 2063 m_freem(n); 2064 } 2065