xref: /dragonfly/sys/net/bridge/if_bridge.c (revision efbafed1)
1 /*
2  * Copyright 2001 Wasabi Systems, Inc.
3  * All rights reserved.
4  *
5  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed for the NetBSD Project by
18  *	Wasabi Systems, Inc.
19  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
20  *    or promote products derived from this software without specific prior
21  *    written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /*
37  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
38  * All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. All advertising materials mentioning features or use of this software
49  *    must display the following acknowledgement:
50  *	This product includes software developed by Jason L. Wright
51  * 4. The name of the author may not be used to endorse or promote products
52  *    derived from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
56  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
57  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
58  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
59  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
60  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
62  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
63  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64  * POSSIBILITY OF SUCH DAMAGE.
65  *
66  * $OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp $
67  * $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $
68  * $FreeBSD: src/sys/net/if_bridge.c,v 1.26 2005/10/13 23:05:55 thompsa Exp $
69  */
70 
71 /*
72  * Network interface bridge support.
73  *
74  * TODO:
75  *
76  *	- Currently only supports Ethernet-like interfaces (Ethernet,
77  *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
78  *	  to bridge other types of interfaces (FDDI-FDDI, and maybe
79  *	  consider heterogenous bridges).
80  *
81  *
82  * Bridge's route information is duplicated to each CPUs:
83  *
84  *      CPU0          CPU1          CPU2          CPU3
85  * +-----------+ +-----------+ +-----------+ +-----------+
86  * |  rtnode   | |  rtnode   | |  rtnode   | |  rtnode   |
87  * |           | |           | |           | |           |
88  * | dst eaddr | | dst eaddr | | dst eaddr | | dst eaddr |
89  * +-----------+ +-----------+ +-----------+ +-----------+
90  *       |         |                     |         |
91  *       |         |                     |         |
92  *       |         |     +----------+    |         |
93  *       |         |     |  rtinfo  |    |         |
94  *       |         +---->|          |<---+         |
95  *       |               |  flags   |              |
96  *       +-------------->|  timeout |<-------------+
97  *                       |  dst_ifp |
98  *                       +----------+
99  *
100  * We choose to put timeout and dst_ifp into shared part, so updating
101  * them will be cheaper than using message forwarding.  Also there is
102  * not need to use spinlock to protect the updating: timeout and dst_ifp
103  * is not related and specific field's updating order has no importance.
104  * The cache pollution by the share part should not be heavy: in a stable
105  * setup, dst_ifp probably will be not changed in rtnode's life time,
106  * while timeout is refreshed once per second; most of the time, timeout
107  * and dst_ifp are read-only accessed.
108  *
109  *
110  * Bridge route information installation on bridge_input path:
111  *
112  *      CPU0           CPU1         CPU2          CPU3
113  *
114  *                                 netisr2
115  *                                    |
116  *                                alloc nmsg
117  *                    snd nmsg        |
118  *                    w/o rtinfo      |
119  *     netisr0<-----------------------+
120  *        |                           :
121  *    lookup dst                      :
122  *   rtnode exists?(Y)free nmsg       :
123  *        |(N)                        :
124  *        |                           :
125  *  alloc rtinfo                      :
126  *  alloc rtnode                      :
127  * install rtnode                     :
128  *        |                           :
129  *        +---------->netisr1         :
130  *        : fwd nmsg     |            :
131  *        : w/ rtinfo    |            :
132  *        :              |            :
133  *        :              |            :
134  *                  alloc rtnode      :
135  *                (w/ nmsg's rtinfo)  :
136  *                 install rtnode     :
137  *                       |            :
138  *                       +----------->|
139  *                       : fwd nmsg   |
140  *                       : w/ rtinfo  |
141  *                       :            |
142  *                       :     same as netisr1
143  *                                    |
144  *                                    +---------->netisr3
145  *                                    : fwd nmsg     |
146  *                                    : w/ rtinfo    |
147  *                                    :              |
148  *                                    :       same as netisr1
149  *                                               free nmsg
150  *                                                   :
151  *                                                   :
152  *
153  * The netmsgs forwarded between netisr2 are allocated with
154  * (M_WAITOK|M_NULLOK), so it will not fail under most cases (route
155  * information is too precious to be not installed :).  Since multiple
156  * netisrs may try to install route information for the same dst eaddr,
157  * we look up route information in netisr0.  However, this looking up
158  * only need to be performed on netisr0, which is the start point of
159  * the route information installation process.
160  *
161  *
162  * Bridge route information deleting/flushing:
163  *
164  *  CPU0            CPU1              CPU2             CPU3
165  *
166  * netisr0
167  *    |
168  *  find suitable rtnodes,
169  *  mark their rtinfo dead
170  *    |
171  *    | domsg <-------------------------------------------+
172  *    : delete rtnodes                                    | replymsg
173  *    : w/ dead rtinfo                                    |
174  *    :                                                   |
175  *    :  fwdmsg             fwdmsg            fwdmsg      |
176  *    :----------> netisr1 --------> netisr2 --------> netisr3
177  *              delete rtnodes    delete rtnodes    delete rtnodes
178  *              w/ dead rtinfo    w/ dead rtinfo    w/ dead rtinfo
179  *                                                 free dead rtinfos
180  *
181  * All deleting/flushing operations are serialized by netisr0, so each
182  * operation only reaps the route information marked dead by itself.
183  *
184  *
185  * Bridge route information adding/deleting/flushing:
186  * Since all operation is serialized by the fixed message flow between
187  * netisrs, it is not possible to create corrupted per-cpu route
188  * information.
189  *
190  *
191  *
192  * XXX This no longer applies.
193  * Percpu member interface list iteration with blocking operation:
194  * Since one bridge could only delete one member interface at a time and
195  * the deleted member interface is not freed after netmsg_service_sync(),
196  * following way is used to make sure that even if the certain member
197  * interface is ripped from the percpu list during the blocking operation,
198  * the iteration still could keep going:
199  *
200  * TAILQ_FOREACH_MUTABLE(bif, sc->sc_iflists[mycpuid], bif_next, nbif) {
201  *     blocking operation;
202  *     blocking operation;
203  *     ...
204  *     ...
205  *     if (nbif != NULL && !nbif->bif_onlist) {
206  *         KKASSERT(bif->bif_onlist);
207  *         nbif = TAILQ_NEXT(bif, bif_next);
208  *     }
209  * }
210  *
211  * As mentioned above only one member interface could be unlinked from the
212  * percpu member interface list, so either bif or nbif may be not on the list,
213  * but _not_ both.  To keep the list iteration, we don't care about bif, but
214  * only nbif.  Since removed member interface will only be freed after we
215  * finish our work, it is safe to access any field in an unlinked bif (here
216  * bif_onlist).  If nbif is no longer on the list, then bif must be on the
217  * list, so we change nbif to the next element of bif and keep going.
218  */
219 
220 #include "opt_inet.h"
221 #include "opt_inet6.h"
222 
223 #include <sys/param.h>
224 #include <sys/mbuf.h>
225 #include <sys/malloc.h>
226 #include <sys/protosw.h>
227 #include <sys/systm.h>
228 #include <sys/time.h>
229 #include <sys/socket.h> /* for net/if.h */
230 #include <sys/sockio.h>
231 #include <sys/ctype.h>  /* string functions */
232 #include <sys/kernel.h>
233 #include <sys/random.h>
234 #include <sys/sysctl.h>
235 #include <sys/module.h>
236 #include <sys/proc.h>
237 #include <sys/priv.h>
238 #include <sys/lock.h>
239 #include <sys/thread.h>
240 #include <sys/thread2.h>
241 #include <sys/mpipe.h>
242 
243 #include <net/bpf.h>
244 #include <net/if.h>
245 #include <net/if_dl.h>
246 #include <net/if_types.h>
247 #include <net/if_var.h>
248 #include <net/pfil.h>
249 #include <net/ifq_var.h>
250 #include <net/if_clone.h>
251 
252 #include <netinet/in.h> /* for struct arpcom */
253 #include <netinet/in_systm.h>
254 #include <netinet/in_var.h>
255 #include <netinet/ip.h>
256 #include <netinet/ip_var.h>
257 #ifdef INET6
258 #include <netinet/ip6.h>
259 #include <netinet6/ip6_var.h>
260 #endif
261 #include <netinet/if_ether.h> /* for struct arpcom */
262 #include <net/bridge/if_bridgevar.h>
263 #include <net/if_llc.h>
264 #include <net/netmsg2.h>
265 #include <net/netisr2.h>
266 
267 #include <net/route.h>
268 #include <sys/in_cksum.h>
269 
270 /*
271  * Size of the route hash table.  Must be a power of two.
272  */
273 #ifndef BRIDGE_RTHASH_SIZE
274 #define	BRIDGE_RTHASH_SIZE		1024
275 #endif
276 
277 #define	BRIDGE_RTHASH_MASK		(BRIDGE_RTHASH_SIZE - 1)
278 
279 /*
280  * Maximum number of addresses to cache.
281  */
282 #ifndef BRIDGE_RTABLE_MAX
283 #define	BRIDGE_RTABLE_MAX		4096
284 #endif
285 
286 /*
287  * Spanning tree defaults.
288  */
289 #define	BSTP_DEFAULT_MAX_AGE		(20 * 256)
290 #define	BSTP_DEFAULT_HELLO_TIME		(2 * 256)
291 #define	BSTP_DEFAULT_FORWARD_DELAY	(15 * 256)
292 #define	BSTP_DEFAULT_HOLD_TIME		(1 * 256)
293 #define	BSTP_DEFAULT_BRIDGE_PRIORITY	0x8000
294 #define	BSTP_DEFAULT_PORT_PRIORITY	0x80
295 #define	BSTP_DEFAULT_PATH_COST		55
296 
297 /*
298  * Timeout (in seconds) for entries learned dynamically.
299  */
300 #ifndef BRIDGE_RTABLE_TIMEOUT
301 #define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
302 #endif
303 
304 /*
305  * Number of seconds between walks of the route list.
306  */
307 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
308 #define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
309 #endif
310 
311 /*
312  * List of capabilities to mask on the member interface.
313  */
314 #define	BRIDGE_IFCAPS_MASK		(IFCAP_TXCSUM | IFCAP_TSO)
315 
316 typedef int	(*bridge_ctl_t)(struct bridge_softc *, void *);
317 
318 struct netmsg_brctl {
319 	struct netmsg_base	base;
320 	bridge_ctl_t		bc_func;
321 	struct bridge_softc	*bc_sc;
322 	void			*bc_arg;
323 };
324 
325 struct netmsg_brsaddr {
326 	struct netmsg_base	base;
327 	struct bridge_softc	*br_softc;
328 	struct ifnet		*br_dst_if;
329 	struct bridge_rtinfo	*br_rtinfo;
330 	int			br_setflags;
331 	uint8_t			br_dst[ETHER_ADDR_LEN];
332 	uint8_t			br_flags;
333 };
334 
335 struct netmsg_braddbif {
336 	struct netmsg_base	base;
337 	struct bridge_softc	*br_softc;
338 	struct bridge_ifinfo	*br_bif_info;
339 	struct ifnet		*br_bif_ifp;
340 };
341 
342 struct netmsg_brdelbif {
343 	struct netmsg_base	base;
344 	struct bridge_softc	*br_softc;
345 	struct bridge_ifinfo	*br_bif_info;
346 	struct bridge_iflist_head *br_bif_list;
347 };
348 
349 struct netmsg_brsflags {
350 	struct netmsg_base	base;
351 	struct bridge_softc	*br_softc;
352 	struct bridge_ifinfo	*br_bif_info;
353 	uint32_t		br_bif_flags;
354 };
355 
356 eventhandler_tag	bridge_detach_cookie = NULL;
357 
358 extern	struct mbuf *(*bridge_input_p)(struct ifnet *, struct mbuf *);
359 extern	int (*bridge_output_p)(struct ifnet *, struct mbuf *);
360 extern	void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
361 extern  struct ifnet *(*bridge_interface_p)(void *if_bridge);
362 
363 static int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
364 
365 static int	bridge_clone_create(struct if_clone *, int, caddr_t, caddr_t);
366 static int	bridge_clone_destroy(struct ifnet *);
367 
368 static int	bridge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
369 static void	bridge_mutecaps(struct bridge_ifinfo *, struct ifnet *, int);
370 static void	bridge_ifdetach(void *, struct ifnet *);
371 static void	bridge_init(void *);
372 static int	bridge_from_us(struct bridge_softc *, struct ether_header *);
373 static void	bridge_stop(struct ifnet *);
374 static void	bridge_start(struct ifnet *, struct ifaltq_subque *);
375 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
376 static int	bridge_output(struct ifnet *, struct mbuf *);
377 static struct ifnet *bridge_interface(void *if_bridge);
378 
379 static void	bridge_forward(struct bridge_softc *, struct mbuf *m);
380 
381 static void	bridge_timer_handler(netmsg_t);
382 static void	bridge_timer(void *);
383 
384 static void	bridge_start_bcast(struct bridge_softc *, struct mbuf *);
385 static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
386 		    struct mbuf *);
387 static void	bridge_span(struct bridge_softc *, struct mbuf *);
388 
389 static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
390 		    struct ifnet *, uint8_t);
391 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
392 static void	bridge_rtreap(struct bridge_softc *);
393 static void	bridge_rtreap_async(struct bridge_softc *);
394 static void	bridge_rttrim(struct bridge_softc *);
395 static int	bridge_rtage_finddead(struct bridge_softc *);
396 static void	bridge_rtage(struct bridge_softc *);
397 static void	bridge_rtflush(struct bridge_softc *, int);
398 static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
399 static int	bridge_rtsaddr(struct bridge_softc *, const uint8_t *,
400 		    struct ifnet *, uint8_t);
401 static void	bridge_rtmsg_sync(struct bridge_softc *sc);
402 static void	bridge_rtreap_handler(netmsg_t);
403 static void	bridge_rtinstall_handler(netmsg_t);
404 static int	bridge_rtinstall_oncpu(struct bridge_softc *, const uint8_t *,
405 		    struct ifnet *, int, uint8_t, struct bridge_rtinfo **);
406 
407 static void	bridge_rtable_init(struct bridge_softc *);
408 static void	bridge_rtable_fini(struct bridge_softc *);
409 
410 static int	bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
411 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
412 		    const uint8_t *);
413 static void	bridge_rtnode_insert(struct bridge_softc *,
414 		    struct bridge_rtnode *);
415 static void	bridge_rtnode_destroy(struct bridge_softc *,
416 		    struct bridge_rtnode *);
417 
418 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
419 		    const char *name);
420 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
421 		    struct ifnet *ifp);
422 static struct bridge_iflist *bridge_lookup_member_ifinfo(struct bridge_softc *,
423 		    struct bridge_ifinfo *);
424 static void	bridge_delete_member(struct bridge_softc *,
425 		    struct bridge_iflist *, int);
426 static void	bridge_delete_span(struct bridge_softc *,
427 		    struct bridge_iflist *);
428 
429 static int	bridge_control(struct bridge_softc *, u_long,
430 			       bridge_ctl_t, void *);
431 static int	bridge_ioctl_init(struct bridge_softc *, void *);
432 static int	bridge_ioctl_stop(struct bridge_softc *, void *);
433 static int	bridge_ioctl_add(struct bridge_softc *, void *);
434 static int	bridge_ioctl_del(struct bridge_softc *, void *);
435 static void	bridge_ioctl_fillflags(struct bridge_softc *sc,
436 				struct bridge_iflist *bif, struct ifbreq *req);
437 static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
438 static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
439 static int	bridge_ioctl_scache(struct bridge_softc *, void *);
440 static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
441 static int	bridge_ioctl_gifs(struct bridge_softc *, void *);
442 static int	bridge_ioctl_rts(struct bridge_softc *, void *);
443 static int	bridge_ioctl_saddr(struct bridge_softc *, void *);
444 static int	bridge_ioctl_sto(struct bridge_softc *, void *);
445 static int	bridge_ioctl_gto(struct bridge_softc *, void *);
446 static int	bridge_ioctl_daddr(struct bridge_softc *, void *);
447 static int	bridge_ioctl_flush(struct bridge_softc *, void *);
448 static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
449 static int	bridge_ioctl_spri(struct bridge_softc *, void *);
450 static int	bridge_ioctl_reinit(struct bridge_softc *, void *);
451 static int	bridge_ioctl_ght(struct bridge_softc *, void *);
452 static int	bridge_ioctl_sht(struct bridge_softc *, void *);
453 static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
454 static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
455 static int	bridge_ioctl_gma(struct bridge_softc *, void *);
456 static int	bridge_ioctl_sma(struct bridge_softc *, void *);
457 static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
458 static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
459 static int	bridge_ioctl_addspan(struct bridge_softc *, void *);
460 static int	bridge_ioctl_delspan(struct bridge_softc *, void *);
461 static int	bridge_ioctl_sifbondwght(struct bridge_softc *, void *);
462 static int	bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
463 		    int);
464 static int	bridge_ip_checkbasic(struct mbuf **mp);
465 #ifdef INET6
466 static int	bridge_ip6_checkbasic(struct mbuf **mp);
467 #endif /* INET6 */
468 static int	bridge_fragment(struct ifnet *, struct mbuf *,
469 		    struct ether_header *, int, struct llc *);
470 static void	bridge_enqueue_handler(netmsg_t);
471 static void	bridge_handoff(struct bridge_softc *, struct ifnet *,
472 		    struct mbuf *, int);
473 
474 static void	bridge_del_bif_handler(netmsg_t);
475 static void	bridge_add_bif_handler(netmsg_t);
476 static void	bridge_del_bif(struct bridge_softc *, struct bridge_ifinfo *,
477 		    struct bridge_iflist_head *);
478 static void	bridge_add_bif(struct bridge_softc *, struct bridge_ifinfo *,
479 		    struct ifnet *);
480 
481 SYSCTL_DECL(_net_link);
482 SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge");
483 
484 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */
485 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */
486 static int pfil_member = 1; /* run pfil hooks on the member interface */
487 static int bridge_debug;
488 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW,
489     &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled");
490 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW,
491     &pfil_bridge, 0, "Packet filter on the bridge interface");
492 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW,
493     &pfil_member, 0, "Packet filter on the member interface");
494 SYSCTL_INT(_net_link_bridge, OID_AUTO, debug, CTLFLAG_RW,
495     &bridge_debug, 0, "Bridge debug mode");
496 
497 struct bridge_control_arg {
498 	union {
499 		struct ifbreq ifbreq;
500 		struct ifbifconf ifbifconf;
501 		struct ifbareq ifbareq;
502 		struct ifbaconf ifbaconf;
503 		struct ifbrparam ifbrparam;
504 	} bca_u;
505 	int	bca_len;
506 	void	*bca_uptr;
507 	void	*bca_kptr;
508 };
509 
510 struct bridge_control {
511 	bridge_ctl_t	bc_func;
512 	int		bc_argsize;
513 	int		bc_flags;
514 };
515 
516 #define	BC_F_COPYIN		0x01	/* copy arguments in */
517 #define	BC_F_COPYOUT		0x02	/* copy arguments out */
518 #define	BC_F_SUSER		0x04	/* do super-user check */
519 
520 const struct bridge_control bridge_control_table[] = {
521 	{ bridge_ioctl_add,		sizeof(struct ifbreq),
522 	  BC_F_COPYIN|BC_F_SUSER },
523 	{ bridge_ioctl_del,		sizeof(struct ifbreq),
524 	  BC_F_COPYIN|BC_F_SUSER },
525 
526 	{ bridge_ioctl_gifflags,	sizeof(struct ifbreq),
527 	  BC_F_COPYIN|BC_F_COPYOUT },
528 	{ bridge_ioctl_sifflags,	sizeof(struct ifbreq),
529 	  BC_F_COPYIN|BC_F_SUSER },
530 
531 	{ bridge_ioctl_scache,		sizeof(struct ifbrparam),
532 	  BC_F_COPYIN|BC_F_SUSER },
533 	{ bridge_ioctl_gcache,		sizeof(struct ifbrparam),
534 	  BC_F_COPYOUT },
535 
536 	{ bridge_ioctl_gifs,		sizeof(struct ifbifconf),
537 	  BC_F_COPYIN|BC_F_COPYOUT },
538 	{ bridge_ioctl_rts,		sizeof(struct ifbaconf),
539 	  BC_F_COPYIN|BC_F_COPYOUT },
540 
541 	{ bridge_ioctl_saddr,		sizeof(struct ifbareq),
542 	  BC_F_COPYIN|BC_F_SUSER },
543 
544 	{ bridge_ioctl_sto,		sizeof(struct ifbrparam),
545 	  BC_F_COPYIN|BC_F_SUSER },
546 	{ bridge_ioctl_gto,		sizeof(struct ifbrparam),
547 	  BC_F_COPYOUT },
548 
549 	{ bridge_ioctl_daddr,		sizeof(struct ifbareq),
550 	  BC_F_COPYIN|BC_F_SUSER },
551 
552 	{ bridge_ioctl_flush,		sizeof(struct ifbreq),
553 	  BC_F_COPYIN|BC_F_SUSER },
554 
555 	{ bridge_ioctl_gpri,		sizeof(struct ifbrparam),
556 	  BC_F_COPYOUT },
557 	{ bridge_ioctl_spri,		sizeof(struct ifbrparam),
558 	  BC_F_COPYIN|BC_F_SUSER },
559 
560 	{ bridge_ioctl_ght,		sizeof(struct ifbrparam),
561 	  BC_F_COPYOUT },
562 	{ bridge_ioctl_sht,		sizeof(struct ifbrparam),
563 	  BC_F_COPYIN|BC_F_SUSER },
564 
565 	{ bridge_ioctl_gfd,		sizeof(struct ifbrparam),
566 	  BC_F_COPYOUT },
567 	{ bridge_ioctl_sfd,		sizeof(struct ifbrparam),
568 	  BC_F_COPYIN|BC_F_SUSER },
569 
570 	{ bridge_ioctl_gma,		sizeof(struct ifbrparam),
571 	  BC_F_COPYOUT },
572 	{ bridge_ioctl_sma,		sizeof(struct ifbrparam),
573 	  BC_F_COPYIN|BC_F_SUSER },
574 
575 	{ bridge_ioctl_sifprio,		sizeof(struct ifbreq),
576 	  BC_F_COPYIN|BC_F_SUSER },
577 
578 	{ bridge_ioctl_sifcost,		sizeof(struct ifbreq),
579 	  BC_F_COPYIN|BC_F_SUSER },
580 
581 	{ bridge_ioctl_addspan,		sizeof(struct ifbreq),
582 	  BC_F_COPYIN|BC_F_SUSER },
583 	{ bridge_ioctl_delspan,		sizeof(struct ifbreq),
584 	  BC_F_COPYIN|BC_F_SUSER },
585 
586 	{ bridge_ioctl_sifbondwght,	sizeof(struct ifbreq),
587 	  BC_F_COPYIN|BC_F_SUSER },
588 
589 };
590 static const int bridge_control_table_size = NELEM(bridge_control_table);
591 
592 LIST_HEAD(, bridge_softc) bridge_list;
593 
594 struct if_clone bridge_cloner = IF_CLONE_INITIALIZER("bridge",
595 				bridge_clone_create,
596 				bridge_clone_destroy, 0, IF_MAXUNIT);
597 
598 static int
599 bridge_modevent(module_t mod, int type, void *data)
600 {
601 	switch (type) {
602 	case MOD_LOAD:
603 		LIST_INIT(&bridge_list);
604 		if_clone_attach(&bridge_cloner);
605 		bridge_input_p = bridge_input;
606 		bridge_output_p = bridge_output;
607 		bridge_interface_p = bridge_interface;
608 		bridge_detach_cookie = EVENTHANDLER_REGISTER(
609 		    ifnet_detach_event, bridge_ifdetach, NULL,
610 		    EVENTHANDLER_PRI_ANY);
611 #if 0 /* notyet */
612 		bstp_linkstate_p = bstp_linkstate;
613 #endif
614 		break;
615 	case MOD_UNLOAD:
616 		if (!LIST_EMPTY(&bridge_list))
617 			return (EBUSY);
618 		EVENTHANDLER_DEREGISTER(ifnet_detach_event,
619 		    bridge_detach_cookie);
620 		if_clone_detach(&bridge_cloner);
621 		bridge_input_p = NULL;
622 		bridge_output_p = NULL;
623 		bridge_interface_p = NULL;
624 #if 0 /* notyet */
625 		bstp_linkstate_p = NULL;
626 #endif
627 		break;
628 	default:
629 		return (EOPNOTSUPP);
630 	}
631 	return (0);
632 }
633 
634 static moduledata_t bridge_mod = {
635 	"if_bridge",
636 	bridge_modevent,
637 	0
638 };
639 
640 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
641 
642 /*#define BRIDGE_DEBUG_ENABLE*/
643 #ifdef BRIDGE_DEBUG_ENABLE
644 
645 static void
646 BRIDGE_DEBUG(const char *str, struct ifnet *src_if, struct ifnet *dst_if,
647              struct mbuf *m)
648 {
649 	if ((bridge_debug & 2) == 0)
650 		return;
651 
652 	if (str)
653 		kprintf("%s", str);
654 	if (src_if)
655 		kprintf(" src={%s,%s%d}",
656 			src_if->if_xname, src_if->if_dname, src_if->if_dunit);
657 	if (dst_if)
658 		kprintf(" dst={%s,%s%d}",
659 			dst_if->if_xname, dst_if->if_dname, dst_if->if_dunit);
660 	if (m) {
661 		struct ether_header *eh;
662 		struct ip *ip;
663 
664 		eh = mtod(m, struct ether_header *);
665 
666 		kprintf(" %02x:%02x:%02x:%02x:%02x:%02x "
667 			"%02x:%02x:%02x:%02x:%02x:%02x type %04x ",
668 			eh->ether_dhost[0],
669 			eh->ether_dhost[1],
670 			eh->ether_dhost[2],
671 			eh->ether_dhost[3],
672 			eh->ether_dhost[4],
673 			eh->ether_dhost[5],
674 			eh->ether_shost[0],
675 			eh->ether_shost[1],
676 			eh->ether_shost[2],
677 			eh->ether_shost[3],
678 			eh->ether_shost[4],
679 			eh->ether_shost[5],
680 			eh->ether_type);
681 		ip = (void *)(eh + 1);
682 		kprintf("%u.%u.%u.%u -> %u.%u.%u.%u",
683 			(uint8_t)(ip->ip_src.s_addr >> 24),
684 			(uint8_t)(ip->ip_src.s_addr >> 16),
685 			(uint8_t)(ip->ip_src.s_addr >> 8),
686 			(uint8_t)(ip->ip_src.s_addr),
687 			(uint8_t)(ip->ip_dst.s_addr >> 24),
688 			(uint8_t)(ip->ip_dst.s_addr >> 16),
689 			(uint8_t)(ip->ip_dst.s_addr >> 8),
690 			(uint8_t)(ip->ip_dst.s_addr));
691 		kprintf("\n");
692 	}
693 }
694 
695 #else
696 
697 #define BRIDGE_DEBUG(ctl, sif, dif, m)
698 
699 #endif
700 
701 /*
702  * bridge_clone_create:
703  *
704  *	Create a new bridge instance.
705  */
706 static int
707 bridge_clone_create(struct if_clone *ifc, int unit,
708 		    caddr_t params __unused, caddr_t data __unused)
709 {
710 	struct bridge_softc *sc;
711 	struct ifnet *ifp;
712 	u_char eaddr[6];
713 	int cpu, rnd;
714 
715 	sc = kmalloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);
716 	ifp = sc->sc_ifp = &sc->sc_if;
717 
718 	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
719 	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
720 	sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
721 	sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
722 	sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
723 	sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
724 	sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
725 
726 	/* Initialize our routing table. */
727 	bridge_rtable_init(sc);
728 
729 	callout_init_mp(&sc->sc_brcallout);
730 	netmsg_init(&sc->sc_brtimemsg, NULL, &netisr_adone_rport,
731 		    MSGF_DROPABLE, bridge_timer_handler);
732 	sc->sc_brtimemsg.lmsg.u.ms_resultp = sc;
733 
734 	callout_init_mp(&sc->sc_bstpcallout);
735 	netmsg_init(&sc->sc_bstptimemsg, NULL, &netisr_adone_rport,
736 		    MSGF_DROPABLE, bstp_tick_handler);
737 	sc->sc_bstptimemsg.lmsg.u.ms_resultp = sc;
738 
739 	/* Initialize per-cpu member iface lists */
740 	sc->sc_iflists = kmalloc(sizeof(*sc->sc_iflists) * netisr_ncpus,
741 				 M_DEVBUF, M_WAITOK);
742 	for (cpu = 0; cpu < netisr_ncpus; ++cpu)
743 		TAILQ_INIT(&sc->sc_iflists[cpu]);
744 
745 	TAILQ_INIT(&sc->sc_spanlist);
746 
747 	ifp->if_softc = sc;
748 	if_initname(ifp, ifc->ifc_name, unit);
749 	ifp->if_mtu = ETHERMTU;
750 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_ISBRIDGE;
751 	ifp->if_ioctl = bridge_ioctl;
752 	ifp->if_start = bridge_start;
753 	ifp->if_init = bridge_init;
754 	ifp->if_type = IFT_ETHER;
755 	ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
756 	ifq_set_ready(&ifp->if_snd);
757 	ifp->if_hdrlen = ETHER_HDR_LEN;
758 
759 	/*
760 	 * Generate a random ethernet address and use the private AC:DE:48
761 	 * OUI code.
762 	 */
763 	rnd = karc4random();
764 	bcopy(&rnd, &eaddr[0], 4); /* ETHER_ADDR_LEN == 6 */
765 	rnd = karc4random();
766 	bcopy(&rnd, &eaddr[2], 4); /* ETHER_ADDR_LEN == 6 */
767 
768 	eaddr[0] &= ~1;	/* clear multicast bit */
769 	eaddr[0] |= 2;	/* set the LAA bit */
770 
771 	ether_ifattach(ifp, eaddr, NULL);
772 	/* Now undo some of the damage... */
773 	ifp->if_baudrate = 0;
774 	/*ifp->if_type = IFT_BRIDGE;*/
775 
776 	crit_enter();	/* XXX MP */
777 	LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
778 	crit_exit();
779 
780 	return (0);
781 }
782 
783 static void
784 bridge_delete_dispatch(netmsg_t msg)
785 {
786 	struct bridge_softc *sc = msg->lmsg.u.ms_resultp;
787 	struct ifnet *bifp = sc->sc_ifp;
788 	struct bridge_iflist *bif;
789 
790 	ifnet_serialize_all(bifp);
791 
792 	while ((bif = TAILQ_FIRST(&sc->sc_iflists[mycpuid])) != NULL)
793 		bridge_delete_member(sc, bif, 0);
794 
795 	while ((bif = TAILQ_FIRST(&sc->sc_spanlist)) != NULL)
796 		bridge_delete_span(sc, bif);
797 
798 	ifnet_deserialize_all(bifp);
799 
800 	lwkt_replymsg(&msg->lmsg, 0);
801 }
802 
803 /*
804  * bridge_clone_destroy:
805  *
806  *	Destroy a bridge instance.
807  */
808 static int
809 bridge_clone_destroy(struct ifnet *ifp)
810 {
811 	struct bridge_softc *sc = ifp->if_softc;
812 	struct netmsg_base msg;
813 
814 	ifnet_serialize_all(ifp);
815 
816 	bridge_stop(ifp);
817 	ifp->if_flags &= ~IFF_UP;
818 
819 	ifnet_deserialize_all(ifp);
820 
821 	netmsg_init(&msg, NULL, &curthread->td_msgport,
822 		    0, bridge_delete_dispatch);
823 	msg.lmsg.u.ms_resultp = sc;
824 	lwkt_domsg(BRIDGE_CFGPORT, &msg.lmsg, 0);
825 
826 	crit_enter();	/* XXX MP */
827 	LIST_REMOVE(sc, sc_list);
828 	crit_exit();
829 
830 	ether_ifdetach(ifp);
831 
832 	/* Tear down the routing table. */
833 	bridge_rtable_fini(sc);
834 
835 	/* Free per-cpu member iface lists */
836 	kfree(sc->sc_iflists, M_DEVBUF);
837 
838 	kfree(sc, M_DEVBUF);
839 
840 	return 0;
841 }
842 
843 /*
844  * bridge_ioctl:
845  *
846  *	Handle a control request from the operator.
847  */
848 static int
849 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
850 {
851 	struct bridge_softc *sc = ifp->if_softc;
852 	struct bridge_control_arg args;
853 	struct ifdrv *ifd = (struct ifdrv *) data;
854 	const struct bridge_control *bc;
855 	int error = 0;
856 
857 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
858 
859 	switch (cmd) {
860 	case SIOCADDMULTI:
861 	case SIOCDELMULTI:
862 		break;
863 
864 	case SIOCGDRVSPEC:
865 	case SIOCSDRVSPEC:
866 		if (ifd->ifd_cmd >= bridge_control_table_size) {
867 			error = EINVAL;
868 			break;
869 		}
870 		bc = &bridge_control_table[ifd->ifd_cmd];
871 
872 		if (cmd == SIOCGDRVSPEC &&
873 		    (bc->bc_flags & BC_F_COPYOUT) == 0) {
874 			error = EINVAL;
875 			break;
876 		} else if (cmd == SIOCSDRVSPEC &&
877 			   (bc->bc_flags & BC_F_COPYOUT)) {
878 			error = EINVAL;
879 			break;
880 		}
881 
882 		if (bc->bc_flags & BC_F_SUSER) {
883 			error = priv_check_cred(cr, PRIV_ROOT, NULL_CRED_OKAY);
884 			if (error)
885 				break;
886 		}
887 
888 		if (ifd->ifd_len != bc->bc_argsize ||
889 		    ifd->ifd_len > sizeof(args.bca_u)) {
890 			error = EINVAL;
891 			break;
892 		}
893 
894 		memset(&args, 0, sizeof(args));
895 		if (bc->bc_flags & BC_F_COPYIN) {
896 			error = copyin(ifd->ifd_data, &args.bca_u,
897 				       ifd->ifd_len);
898 			if (error)
899 				break;
900 		}
901 
902 		error = bridge_control(sc, cmd, bc->bc_func, &args);
903 		if (error) {
904 			KKASSERT(args.bca_len == 0 && args.bca_kptr == NULL);
905 			break;
906 		}
907 
908 		if (bc->bc_flags & BC_F_COPYOUT) {
909 			error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
910 			if (args.bca_len != 0) {
911 				KKASSERT(args.bca_kptr != NULL);
912 				if (!error) {
913 					error = copyout(args.bca_kptr,
914 						args.bca_uptr, args.bca_len);
915 				}
916 				kfree(args.bca_kptr, M_TEMP);
917 			} else {
918 				KKASSERT(args.bca_kptr == NULL);
919 			}
920 		} else {
921 			KKASSERT(args.bca_len == 0 && args.bca_kptr == NULL);
922 		}
923 		break;
924 
925 	case SIOCSIFFLAGS:
926 		if (!(ifp->if_flags & IFF_UP) &&
927 		    (ifp->if_flags & IFF_RUNNING)) {
928 			/*
929 			 * If interface is marked down and it is running,
930 			 * then stop it.
931 			 */
932 			bridge_stop(ifp);
933 		} else if ((ifp->if_flags & IFF_UP) &&
934 		    !(ifp->if_flags & IFF_RUNNING)) {
935 			/*
936 			 * If interface is marked up and it is stopped, then
937 			 * start it.
938 			 */
939 			ifp->if_init(sc);
940 		}
941 
942 		/*
943 		 * If running and link flag state change we have to
944 		 * reinitialize as well.
945 		 */
946 		if ((ifp->if_flags & IFF_RUNNING) &&
947 		    (ifp->if_flags & (IFF_LINK0|IFF_LINK1|IFF_LINK2)) !=
948 		    sc->sc_copy_flags) {
949 			sc->sc_copy_flags = ifp->if_flags &
950 					(IFF_LINK0|IFF_LINK1|IFF_LINK2);
951 			bridge_control(sc, 0, bridge_ioctl_reinit, NULL);
952 		}
953 
954 		break;
955 
956 	case SIOCSIFMTU:
957 		/* Do not allow the MTU to be changed on the bridge */
958 		error = EINVAL;
959 		break;
960 
961 	default:
962 		error = ether_ioctl(ifp, cmd, data);
963 		break;
964 	}
965 	return (error);
966 }
967 
968 /*
969  * bridge_mutecaps:
970  *
971  *	Clear or restore unwanted capabilities on the member interface
972  */
973 static void
974 bridge_mutecaps(struct bridge_ifinfo *bif_info, struct ifnet *ifp, int mute)
975 {
976 	struct ifreq ifr;
977 
978 	if (ifp->if_ioctl == NULL)
979 		return;
980 
981 	bzero(&ifr, sizeof(ifr));
982 	ifr.ifr_reqcap = ifp->if_capenable;
983 
984 	if (mute) {
985 		/* mask off and save capabilities */
986 		bif_info->bifi_mutecap = ifr.ifr_reqcap & BRIDGE_IFCAPS_MASK;
987 		if (bif_info->bifi_mutecap != 0)
988 			ifr.ifr_reqcap &= ~BRIDGE_IFCAPS_MASK;
989 	} else {
990 		/* restore muted capabilities */
991 		ifr.ifr_reqcap |= bif_info->bifi_mutecap;
992 	}
993 
994 	if (bif_info->bifi_mutecap != 0) {
995 		ifnet_serialize_all(ifp);
996 		ifp->if_ioctl(ifp, SIOCSIFCAP, (caddr_t)&ifr, NULL);
997 		ifnet_deserialize_all(ifp);
998 	}
999 }
1000 
1001 /*
1002  * bridge_lookup_member:
1003  *
1004  *	Lookup a bridge member interface.
1005  */
1006 static struct bridge_iflist *
1007 bridge_lookup_member(struct bridge_softc *sc, const char *name)
1008 {
1009 	struct bridge_iflist *bif;
1010 
1011 	TAILQ_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
1012 		if (strcmp(bif->bif_ifp->if_xname, name) == 0)
1013 			return (bif);
1014 	}
1015 	return (NULL);
1016 }
1017 
1018 /*
1019  * bridge_lookup_member_if:
1020  *
1021  *	Lookup a bridge member interface by ifnet*.
1022  */
1023 static struct bridge_iflist *
1024 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
1025 {
1026 	struct bridge_iflist *bif;
1027 
1028 	TAILQ_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
1029 		if (bif->bif_ifp == member_ifp)
1030 			return (bif);
1031 	}
1032 	return (NULL);
1033 }
1034 
1035 /*
1036  * bridge_lookup_member_ifinfo:
1037  *
1038  *	Lookup a bridge member interface by bridge_ifinfo.
1039  */
1040 static struct bridge_iflist *
1041 bridge_lookup_member_ifinfo(struct bridge_softc *sc,
1042 			    struct bridge_ifinfo *bif_info)
1043 {
1044 	struct bridge_iflist *bif;
1045 
1046 	TAILQ_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
1047 		if (bif->bif_info == bif_info)
1048 			return (bif);
1049 	}
1050 	return (NULL);
1051 }
1052 
1053 /*
1054  * bridge_delete_member:
1055  *
1056  *	Delete the specified member interface.
1057  */
1058 static void
1059 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
1060     int gone)
1061 {
1062 	struct ifnet *ifs = bif->bif_ifp;
1063 	struct ifnet *bifp = sc->sc_ifp;
1064 	struct bridge_ifinfo *bif_info = bif->bif_info;
1065 	struct bridge_iflist_head saved_bifs;
1066 
1067 	ASSERT_IFNET_SERIALIZED_ALL(bifp);
1068 	KKASSERT(bif_info != NULL);
1069 
1070 	ifs->if_bridge = NULL;
1071 
1072 	/*
1073 	 * Release bridge interface's serializer:
1074 	 * - To avoid possible dead lock.
1075 	 * - Various sync operation will block the current thread.
1076 	 */
1077 	ifnet_deserialize_all(bifp);
1078 
1079 	if (!gone) {
1080 		switch (ifs->if_type) {
1081 		case IFT_ETHER:
1082 		case IFT_L2VLAN:
1083 			/*
1084 			 * Take the interface out of promiscuous mode.
1085 			 */
1086 			ifpromisc(ifs, 0);
1087 			bridge_mutecaps(bif_info, ifs, 0);
1088 			break;
1089 
1090 		case IFT_GIF:
1091 			break;
1092 
1093 		default:
1094 			panic("bridge_delete_member: impossible");
1095 			break;
1096 		}
1097 	}
1098 
1099 	/*
1100 	 * Remove bifs from percpu linked list.
1101 	 *
1102 	 * Removed bifs are not freed immediately, instead,
1103 	 * they are saved in saved_bifs.  They will be freed
1104 	 * after we make sure that no one is accessing them,
1105 	 * i.e. after following netmsg_service_sync()
1106 	 */
1107 	TAILQ_INIT(&saved_bifs);
1108 	bridge_del_bif(sc, bif_info, &saved_bifs);
1109 
1110 	/*
1111 	 * Make sure that all protocol threads:
1112 	 * o  see 'ifs' if_bridge is changed
1113 	 * o  know that bif is removed from the percpu linked list
1114 	 */
1115 	netmsg_service_sync();
1116 
1117 	/*
1118 	 * Free the removed bifs
1119 	 */
1120 	KKASSERT(!TAILQ_EMPTY(&saved_bifs));
1121 	while ((bif = TAILQ_FIRST(&saved_bifs)) != NULL) {
1122 		TAILQ_REMOVE(&saved_bifs, bif, bif_next);
1123 		kfree(bif, M_DEVBUF);
1124 	}
1125 
1126 	/* See the comment in bridge_ioctl_stop() */
1127 	bridge_rtmsg_sync(sc);
1128 	bridge_rtdelete(sc, ifs, IFBF_FLUSHALL | IFBF_FLUSHSYNC);
1129 
1130 	ifnet_serialize_all(bifp);
1131 
1132 	if (bifp->if_flags & IFF_RUNNING)
1133 		bstp_initialization(sc);
1134 
1135 	/*
1136 	 * Free the bif_info after bstp_initialization(), so that
1137 	 * bridge_softc.sc_root_port will not reference a dangling
1138 	 * pointer.
1139 	 */
1140 	kfree(bif_info, M_DEVBUF);
1141 }
1142 
1143 /*
1144  * bridge_delete_span:
1145  *
1146  *	Delete the specified span interface.
1147  */
1148 static void
1149 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1150 {
1151 	KASSERT(bif->bif_ifp->if_bridge == NULL,
1152 	    ("%s: not a span interface", __func__));
1153 
1154 	TAILQ_REMOVE(&sc->sc_iflists[mycpuid], bif, bif_next);
1155 	kfree(bif, M_DEVBUF);
1156 }
1157 
1158 static int
1159 bridge_ioctl_init(struct bridge_softc *sc, void *arg __unused)
1160 {
1161 	struct ifnet *ifp = sc->sc_ifp;
1162 
1163 	if (ifp->if_flags & IFF_RUNNING)
1164 		return 0;
1165 
1166 	callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1167 	    bridge_timer, sc);
1168 
1169 	ifp->if_flags |= IFF_RUNNING;
1170 	bstp_initialization(sc);
1171 	return 0;
1172 }
1173 
1174 static int
1175 bridge_ioctl_stop(struct bridge_softc *sc, void *arg __unused)
1176 {
1177 	struct ifnet *ifp = sc->sc_ifp;
1178 
1179 	if ((ifp->if_flags & IFF_RUNNING) == 0)
1180 		return 0;
1181 
1182 	callout_stop(&sc->sc_brcallout);
1183 
1184 	crit_enter();
1185 	lwkt_dropmsg(&sc->sc_brtimemsg.lmsg);
1186 	crit_exit();
1187 
1188 	bstp_stop(sc);
1189 
1190 	ifp->if_flags &= ~IFF_RUNNING;
1191 
1192 	ifnet_deserialize_all(ifp);
1193 
1194 	/* Let everyone know that we are stopped */
1195 	netmsg_service_sync();
1196 
1197 	/*
1198 	 * Sync ifnetX msgports in the order we forward rtnode
1199 	 * installation message.  This is used to make sure that
1200 	 * all rtnode installation messages sent by bridge_rtupdate()
1201 	 * during above netmsg_service_sync() are flushed.
1202 	 */
1203 	bridge_rtmsg_sync(sc);
1204 	bridge_rtflush(sc, IFBF_FLUSHDYN | IFBF_FLUSHSYNC);
1205 
1206 	ifnet_serialize_all(ifp);
1207 	return 0;
1208 }
1209 
1210 static int
1211 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1212 {
1213 	struct ifbreq *req = arg;
1214 	struct bridge_iflist *bif;
1215 	struct bridge_ifinfo *bif_info;
1216 	struct ifnet *ifs, *bifp;
1217 	int error = 0;
1218 
1219 	bifp = sc->sc_ifp;
1220 	ASSERT_IFNET_SERIALIZED_ALL(bifp);
1221 
1222 	ifs = ifunit_netisr(req->ifbr_ifsname);
1223 	if (ifs == NULL)
1224 		return (ENOENT);
1225 
1226 	/* If it's in the span list, it can't be a member. */
1227 	TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
1228 		if (ifs == bif->bif_ifp)
1229 			return (EBUSY);
1230 
1231 	/* Allow the first Ethernet member to define the MTU */
1232 	if (ifs->if_type != IFT_GIF) {
1233 		if (TAILQ_EMPTY(&sc->sc_iflists[mycpuid])) {
1234 			bifp->if_mtu = ifs->if_mtu;
1235 		} else if (bifp->if_mtu != ifs->if_mtu) {
1236 			if_printf(bifp, "invalid MTU for %s\n", ifs->if_xname);
1237 			return (EINVAL);
1238 		}
1239 	}
1240 
1241 	if (ifs->if_bridge == sc)
1242 		return (EEXIST);
1243 
1244 	if (ifs->if_bridge != NULL)
1245 		return (EBUSY);
1246 
1247 	bif_info = kmalloc(sizeof(*bif_info), M_DEVBUF, M_WAITOK | M_ZERO);
1248 	bif_info->bifi_priority = BSTP_DEFAULT_PORT_PRIORITY;
1249 	bif_info->bifi_path_cost = BSTP_DEFAULT_PATH_COST;
1250 	bif_info->bifi_ifp = ifs;
1251 	bif_info->bifi_bond_weight = 1;
1252 
1253 	/*
1254 	 * Release bridge interface's serializer:
1255 	 * - To avoid possible dead lock.
1256 	 * - Various sync operation will block the current thread.
1257 	 */
1258 	ifnet_deserialize_all(bifp);
1259 
1260 	switch (ifs->if_type) {
1261 	case IFT_ETHER:
1262 	case IFT_L2VLAN:
1263 		/*
1264 		 * Place the interface into promiscuous mode.
1265 		 */
1266 		error = ifpromisc(ifs, 1);
1267 		if (error) {
1268 			ifnet_serialize_all(bifp);
1269 			goto out;
1270 		}
1271 		bridge_mutecaps(bif_info, ifs, 1);
1272 		break;
1273 
1274 	case IFT_GIF: /* :^) */
1275 		break;
1276 
1277 	default:
1278 		error = EINVAL;
1279 		ifnet_serialize_all(bifp);
1280 		goto out;
1281 	}
1282 
1283 	/*
1284 	 * Add bifs to percpu linked lists
1285 	 */
1286 	bridge_add_bif(sc, bif_info, ifs);
1287 
1288 	ifnet_serialize_all(bifp);
1289 
1290 	if (bifp->if_flags & IFF_RUNNING)
1291 		bstp_initialization(sc);
1292 	else
1293 		bstp_stop(sc);
1294 
1295 	/*
1296 	 * Everything has been setup, so let the member interface
1297 	 * deliver packets to this bridge on its input/output path.
1298 	 */
1299 	ifs->if_bridge = sc;
1300 out:
1301 	if (error) {
1302 		if (bif_info != NULL)
1303 			kfree(bif_info, M_DEVBUF);
1304 	}
1305 	return (error);
1306 }
1307 
1308 static int
1309 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1310 {
1311 	struct ifbreq *req = arg;
1312 	struct bridge_iflist *bif;
1313 
1314 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1315 	if (bif == NULL)
1316 		return (ENOENT);
1317 
1318 	bridge_delete_member(sc, bif, 0);
1319 
1320 	return (0);
1321 }
1322 
1323 static int
1324 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1325 {
1326 	struct ifbreq *req = arg;
1327 	struct bridge_iflist *bif;
1328 
1329 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1330 	if (bif == NULL)
1331 		return (ENOENT);
1332 	bridge_ioctl_fillflags(sc, bif, req);
1333 	return (0);
1334 }
1335 
1336 static void
1337 bridge_ioctl_fillflags(struct bridge_softc *sc, struct bridge_iflist *bif,
1338 		       struct ifbreq *req)
1339 {
1340 	req->ifbr_ifsflags = bif->bif_flags;
1341 	req->ifbr_state = bif->bif_state;
1342 	req->ifbr_priority = bif->bif_priority;
1343 	req->ifbr_path_cost = bif->bif_path_cost;
1344 	req->ifbr_bond_weight = bif->bif_bond_weight;
1345 	req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1346 	if (bif->bif_flags & IFBIF_STP) {
1347 		req->ifbr_peer_root = bif->bif_peer_root;
1348 		req->ifbr_peer_bridge = bif->bif_peer_bridge;
1349 		req->ifbr_peer_cost = bif->bif_peer_cost;
1350 		req->ifbr_peer_port = bif->bif_peer_port;
1351 		if (bstp_supersedes_port_info(sc, bif)) {
1352 			req->ifbr_designated_root = bif->bif_peer_root;
1353 			req->ifbr_designated_bridge = bif->bif_peer_bridge;
1354 			req->ifbr_designated_cost = bif->bif_peer_cost;
1355 			req->ifbr_designated_port = bif->bif_peer_port;
1356 		} else {
1357 			req->ifbr_designated_root = sc->sc_bridge_id;
1358 			req->ifbr_designated_bridge = sc->sc_bridge_id;
1359 			req->ifbr_designated_cost = bif->bif_path_cost +
1360 						    bif->bif_peer_cost;
1361 			req->ifbr_designated_port = bif->bif_port_id;
1362 		}
1363 	} else {
1364 		req->ifbr_peer_root = 0;
1365 		req->ifbr_peer_bridge = 0;
1366 		req->ifbr_peer_cost = 0;
1367 		req->ifbr_peer_port = 0;
1368 		req->ifbr_designated_root = 0;
1369 		req->ifbr_designated_bridge = 0;
1370 		req->ifbr_designated_cost = 0;
1371 		req->ifbr_designated_port = 0;
1372 	}
1373 }
1374 
1375 static int
1376 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1377 {
1378 	struct ifbreq *req = arg;
1379 	struct bridge_iflist *bif;
1380 	struct ifnet *bifp = sc->sc_ifp;
1381 
1382 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1383 	if (bif == NULL)
1384 		return (ENOENT);
1385 
1386 	if (req->ifbr_ifsflags & IFBIF_SPAN) {
1387 		/* SPAN is readonly */
1388 		return (EINVAL);
1389 	}
1390 
1391 	if (req->ifbr_ifsflags & IFBIF_STP) {
1392 		switch (bif->bif_ifp->if_type) {
1393 		case IFT_ETHER:
1394 			/* These can do spanning tree. */
1395 			break;
1396 
1397 		default:
1398 			/* Nothing else can. */
1399 			return (EINVAL);
1400 		}
1401 	}
1402 
1403 	bif->bif_flags = (bif->bif_flags & IFBIF_KEEPMASK) |
1404 			 (req->ifbr_ifsflags & ~IFBIF_KEEPMASK);
1405 	if (bifp->if_flags & IFF_RUNNING)
1406 		bstp_initialization(sc);
1407 
1408 	return (0);
1409 }
1410 
1411 static int
1412 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1413 {
1414 	struct ifbrparam *param = arg;
1415 	struct ifnet *ifp = sc->sc_ifp;
1416 
1417 	sc->sc_brtmax = param->ifbrp_csize;
1418 
1419 	ifnet_deserialize_all(ifp);
1420 	bridge_rttrim(sc);
1421 	ifnet_serialize_all(ifp);
1422 
1423 	return (0);
1424 }
1425 
1426 static int
1427 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1428 {
1429 	struct ifbrparam *param = arg;
1430 
1431 	param->ifbrp_csize = sc->sc_brtmax;
1432 
1433 	return (0);
1434 }
1435 
1436 static int
1437 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1438 {
1439 	struct bridge_control_arg *bc_arg = arg;
1440 	struct ifbifconf *bifc = arg;
1441 	struct bridge_iflist *bif;
1442 	struct ifbreq *breq;
1443 	int count, len;
1444 
1445 	count = 0;
1446 	TAILQ_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next)
1447 		count++;
1448 	TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
1449 		count++;
1450 
1451 	if (bifc->ifbic_len == 0) {
1452 		bifc->ifbic_len = sizeof(*breq) * count;
1453 		return 0;
1454 	} else if (count == 0 || bifc->ifbic_len < sizeof(*breq)) {
1455 		bifc->ifbic_len = 0;
1456 		return 0;
1457 	}
1458 
1459 	len = min(bifc->ifbic_len, sizeof(*breq) * count);
1460 	KKASSERT(len >= sizeof(*breq));
1461 
1462 	breq = kmalloc(len, M_TEMP, M_WAITOK | M_NULLOK | M_ZERO);
1463 	if (breq == NULL) {
1464 		bifc->ifbic_len = 0;
1465 		return ENOMEM;
1466 	}
1467 	bc_arg->bca_kptr = breq;
1468 
1469 	count = 0;
1470 	TAILQ_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
1471 		if (len < sizeof(*breq))
1472 			break;
1473 
1474 		strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1475 			sizeof(breq->ifbr_ifsname));
1476 		bridge_ioctl_fillflags(sc, bif, breq);
1477 		breq++;
1478 		count++;
1479 		len -= sizeof(*breq);
1480 	}
1481 	TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1482 		if (len < sizeof(*breq))
1483 			break;
1484 
1485 		strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1486 			sizeof(breq->ifbr_ifsname));
1487 		breq->ifbr_ifsflags = bif->bif_flags;
1488 		breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1489 		breq++;
1490 		count++;
1491 		len -= sizeof(*breq);
1492 	}
1493 
1494 	bifc->ifbic_len = sizeof(*breq) * count;
1495 	KKASSERT(bifc->ifbic_len > 0);
1496 
1497 	bc_arg->bca_len = bifc->ifbic_len;
1498 	bc_arg->bca_uptr = bifc->ifbic_req;
1499 	return 0;
1500 }
1501 
1502 static int
1503 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1504 {
1505 	struct bridge_control_arg *bc_arg = arg;
1506 	struct ifbaconf *bac = arg;
1507 	struct bridge_rtnode *brt;
1508 	struct ifbareq *bareq;
1509 	int count, len;
1510 
1511 	count = 0;
1512 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list)
1513 		count++;
1514 
1515 	if (bac->ifbac_len == 0) {
1516 		bac->ifbac_len = sizeof(*bareq) * count;
1517 		return 0;
1518 	} else if (count == 0 || bac->ifbac_len < sizeof(*bareq)) {
1519 		bac->ifbac_len = 0;
1520 		return 0;
1521 	}
1522 
1523 	len = min(bac->ifbac_len, sizeof(*bareq) * count);
1524 	KKASSERT(len >= sizeof(*bareq));
1525 
1526 	bareq = kmalloc(len, M_TEMP, M_WAITOK | M_NULLOK | M_ZERO);
1527 	if (bareq == NULL) {
1528 		bac->ifbac_len = 0;
1529 		return ENOMEM;
1530 	}
1531 	bc_arg->bca_kptr = bareq;
1532 
1533 	count = 0;
1534 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
1535 		struct bridge_rtinfo *bri = brt->brt_info;
1536 		time_t expire;
1537 
1538 		if (len < sizeof(*bareq))
1539 			break;
1540 
1541 		strlcpy(bareq->ifba_ifsname, bri->bri_ifp->if_xname,
1542 			sizeof(bareq->ifba_ifsname));
1543 		memcpy(bareq->ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1544 		expire = bri->bri_expire;
1545 		if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1546 		    time_uptime < expire)
1547 			bareq->ifba_expire = expire - time_uptime;
1548 		else
1549 			bareq->ifba_expire = 0;
1550 		bareq->ifba_flags = bri->bri_flags;
1551 		bareq++;
1552 		count++;
1553 		len -= sizeof(*bareq);
1554 	}
1555 
1556 	bac->ifbac_len = sizeof(*bareq) * count;
1557 	KKASSERT(bac->ifbac_len > 0);
1558 
1559 	bc_arg->bca_len = bac->ifbac_len;
1560 	bc_arg->bca_uptr = bac->ifbac_req;
1561 	return 0;
1562 }
1563 
1564 static int
1565 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1566 {
1567 	struct ifbareq *req = arg;
1568 	struct bridge_iflist *bif;
1569 	struct ifnet *ifp = sc->sc_ifp;
1570 	int error;
1571 
1572 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
1573 
1574 	bif = bridge_lookup_member(sc, req->ifba_ifsname);
1575 	if (bif == NULL)
1576 		return (ENOENT);
1577 
1578 	ifnet_deserialize_all(ifp);
1579 	error = bridge_rtsaddr(sc, req->ifba_dst, bif->bif_ifp,
1580 			       req->ifba_flags);
1581 	ifnet_serialize_all(ifp);
1582 	return (error);
1583 }
1584 
1585 static int
1586 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1587 {
1588 	struct ifbrparam *param = arg;
1589 
1590 	sc->sc_brttimeout = param->ifbrp_ctime;
1591 
1592 	return (0);
1593 }
1594 
1595 static int
1596 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1597 {
1598 	struct ifbrparam *param = arg;
1599 
1600 	param->ifbrp_ctime = sc->sc_brttimeout;
1601 
1602 	return (0);
1603 }
1604 
1605 static int
1606 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1607 {
1608 	struct ifbareq *req = arg;
1609 	struct ifnet *ifp = sc->sc_ifp;
1610 	int error;
1611 
1612 	ifnet_deserialize_all(ifp);
1613 	error = bridge_rtdaddr(sc, req->ifba_dst);
1614 	ifnet_serialize_all(ifp);
1615 	return error;
1616 }
1617 
1618 static int
1619 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1620 {
1621 	struct ifbreq *req = arg;
1622 	struct ifnet *ifp = sc->sc_ifp;
1623 
1624 	ifnet_deserialize_all(ifp);
1625 	bridge_rtflush(sc, req->ifbr_ifsflags | IFBF_FLUSHSYNC);
1626 	ifnet_serialize_all(ifp);
1627 
1628 	return (0);
1629 }
1630 
1631 static int
1632 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1633 {
1634 	struct ifbrparam *param = arg;
1635 
1636 	param->ifbrp_prio = sc->sc_bridge_priority;
1637 
1638 	return (0);
1639 }
1640 
1641 static int
1642 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1643 {
1644 	struct ifbrparam *param = arg;
1645 
1646 	sc->sc_bridge_priority = param->ifbrp_prio;
1647 
1648 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1649 		bstp_initialization(sc);
1650 
1651 	return (0);
1652 }
1653 
1654 static int
1655 bridge_ioctl_reinit(struct bridge_softc *sc, void *arg __unused)
1656 {
1657 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1658 		bstp_initialization(sc);
1659 	return (0);
1660 }
1661 
1662 static int
1663 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1664 {
1665 	struct ifbrparam *param = arg;
1666 
1667 	param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1668 
1669 	return (0);
1670 }
1671 
1672 static int
1673 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1674 {
1675 	struct ifbrparam *param = arg;
1676 
1677 	if (param->ifbrp_hellotime == 0)
1678 		return (EINVAL);
1679 	sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1680 
1681 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1682 		bstp_initialization(sc);
1683 
1684 	return (0);
1685 }
1686 
1687 static int
1688 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1689 {
1690 	struct ifbrparam *param = arg;
1691 
1692 	param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1693 
1694 	return (0);
1695 }
1696 
1697 static int
1698 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1699 {
1700 	struct ifbrparam *param = arg;
1701 
1702 	if (param->ifbrp_fwddelay == 0)
1703 		return (EINVAL);
1704 	sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1705 
1706 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1707 		bstp_initialization(sc);
1708 
1709 	return (0);
1710 }
1711 
1712 static int
1713 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1714 {
1715 	struct ifbrparam *param = arg;
1716 
1717 	param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1718 
1719 	return (0);
1720 }
1721 
1722 static int
1723 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1724 {
1725 	struct ifbrparam *param = arg;
1726 
1727 	if (param->ifbrp_maxage == 0)
1728 		return (EINVAL);
1729 	sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1730 
1731 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1732 		bstp_initialization(sc);
1733 
1734 	return (0);
1735 }
1736 
1737 static int
1738 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1739 {
1740 	struct ifbreq *req = arg;
1741 	struct bridge_iflist *bif;
1742 
1743 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1744 	if (bif == NULL)
1745 		return (ENOENT);
1746 
1747 	bif->bif_priority = req->ifbr_priority;
1748 
1749 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1750 		bstp_initialization(sc);
1751 
1752 	return (0);
1753 }
1754 
1755 static int
1756 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1757 {
1758 	struct ifbreq *req = arg;
1759 	struct bridge_iflist *bif;
1760 
1761 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1762 	if (bif == NULL)
1763 		return (ENOENT);
1764 
1765 	bif->bif_path_cost = req->ifbr_path_cost;
1766 
1767 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1768 		bstp_initialization(sc);
1769 
1770 	return (0);
1771 }
1772 
1773 static int
1774 bridge_ioctl_sifbondwght(struct bridge_softc *sc, void *arg)
1775 {
1776 	struct ifbreq *req = arg;
1777 	struct bridge_iflist *bif;
1778 
1779 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1780 	if (bif == NULL)
1781 		return (ENOENT);
1782 
1783 	bif->bif_bond_weight = req->ifbr_bond_weight;
1784 
1785 	/* no reinit needed */
1786 
1787 	return (0);
1788 }
1789 
1790 static int
1791 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
1792 {
1793 	struct ifbreq *req = arg;
1794 	struct bridge_iflist *bif;
1795 	struct ifnet *ifs;
1796 	struct bridge_ifinfo *bif_info;
1797 
1798 	ifs = ifunit_netisr(req->ifbr_ifsname);
1799 	if (ifs == NULL)
1800 		return (ENOENT);
1801 
1802 	TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
1803 		if (ifs == bif->bif_ifp)
1804 			return (EBUSY);
1805 
1806 	if (ifs->if_bridge != NULL)
1807 		return (EBUSY);
1808 
1809 	switch (ifs->if_type) {
1810 	case IFT_ETHER:
1811 	case IFT_GIF:
1812 	case IFT_L2VLAN:
1813 		break;
1814 
1815 	default:
1816 		return (EINVAL);
1817 	}
1818 
1819 	/*
1820 	 * bif_info is needed for bif_flags
1821 	 */
1822         bif_info = kmalloc(sizeof(*bif_info), M_DEVBUF, M_WAITOK | M_ZERO);
1823         bif_info->bifi_ifp = ifs;
1824 
1825 	bif = kmalloc(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
1826 	bif->bif_ifp = ifs;
1827 	bif->bif_info = bif_info;
1828 	bif->bif_flags = IFBIF_SPAN;
1829 	/* NOTE: span bif does not need bridge_ifinfo */
1830 
1831 	TAILQ_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
1832 
1833 	sc->sc_span = 1;
1834 
1835 	return (0);
1836 }
1837 
1838 static int
1839 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
1840 {
1841 	struct ifbreq *req = arg;
1842 	struct bridge_iflist *bif;
1843 	struct ifnet *ifs;
1844 
1845 	ifs = ifunit_netisr(req->ifbr_ifsname);
1846 	if (ifs == NULL)
1847 		return (ENOENT);
1848 
1849 	TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
1850 		if (ifs == bif->bif_ifp)
1851 			break;
1852 
1853 	if (bif == NULL)
1854 		return (ENOENT);
1855 
1856 	bridge_delete_span(sc, bif);
1857 
1858 	if (TAILQ_EMPTY(&sc->sc_spanlist))
1859 		sc->sc_span = 0;
1860 
1861 	return (0);
1862 }
1863 
1864 static void
1865 bridge_ifdetach_dispatch(netmsg_t msg)
1866 {
1867 	struct ifnet *ifp, *bifp;
1868 	struct bridge_softc *sc;
1869 	struct bridge_iflist *bif;
1870 
1871 	ifp = msg->lmsg.u.ms_resultp;
1872 	sc = ifp->if_bridge;
1873 
1874 	/* Check if the interface is a bridge member */
1875 	if (sc != NULL) {
1876 		bifp = sc->sc_ifp;
1877 
1878 		ifnet_serialize_all(bifp);
1879 
1880 		bif = bridge_lookup_member_if(sc, ifp);
1881 		if (bif != NULL) {
1882 			bridge_delete_member(sc, bif, 1);
1883 		} else {
1884 			/* XXX Why bif will be NULL? */
1885 		}
1886 
1887 		ifnet_deserialize_all(bifp);
1888 		goto reply;
1889 	}
1890 
1891 	crit_enter();	/* XXX MP */
1892 
1893 	/* Check if the interface is a span port */
1894 	LIST_FOREACH(sc, &bridge_list, sc_list) {
1895 		bifp = sc->sc_ifp;
1896 
1897 		ifnet_serialize_all(bifp);
1898 
1899 		TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
1900 			if (ifp == bif->bif_ifp) {
1901 				bridge_delete_span(sc, bif);
1902 				break;
1903 			}
1904 
1905 		ifnet_deserialize_all(bifp);
1906 	}
1907 
1908 	crit_exit();
1909 
1910 reply:
1911 	lwkt_replymsg(&msg->lmsg, 0);
1912 }
1913 
1914 /*
1915  * bridge_ifdetach:
1916  *
1917  *	Detach an interface from a bridge.  Called when a member
1918  *	interface is detaching.
1919  */
1920 static void
1921 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
1922 {
1923 	struct netmsg_base msg;
1924 
1925 	netmsg_init(&msg, NULL, &curthread->td_msgport,
1926 		    0, bridge_ifdetach_dispatch);
1927 	msg.lmsg.u.ms_resultp = ifp;
1928 
1929 	lwkt_domsg(BRIDGE_CFGPORT, &msg.lmsg, 0);
1930 }
1931 
1932 /*
1933  * bridge_init:
1934  *
1935  *	Initialize a bridge interface.
1936  */
1937 static void
1938 bridge_init(void *xsc)
1939 {
1940 	bridge_control(xsc, SIOCSIFFLAGS, bridge_ioctl_init, NULL);
1941 }
1942 
1943 /*
1944  * bridge_stop:
1945  *
1946  *	Stop the bridge interface.
1947  */
1948 static void
1949 bridge_stop(struct ifnet *ifp)
1950 {
1951 	bridge_control(ifp->if_softc, SIOCSIFFLAGS, bridge_ioctl_stop, NULL);
1952 }
1953 
1954 /*
1955  * Returns TRUE if the packet is being sent 'from us'... from our bridge
1956  * interface or from any member of our bridge interface.  This is used
1957  * later on to force the MAC to be the MAC of our bridge interface.
1958  */
1959 static int
1960 bridge_from_us(struct bridge_softc *sc, struct ether_header *eh)
1961 {
1962 	struct bridge_iflist *bif;
1963 
1964 	if (memcmp(eh->ether_shost, IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN) == 0)
1965 		return (1);
1966 
1967 	TAILQ_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
1968 		if (memcmp(eh->ether_shost, IF_LLADDR(bif->bif_ifp),
1969 			   ETHER_ADDR_LEN) == 0)
1970 	        {
1971 			return (1);
1972 		}
1973 	}
1974 	return (0);
1975 }
1976 
1977 /*
1978  * bridge_enqueue:
1979  *
1980  *	Enqueue a packet on a bridge member interface.
1981  *
1982  */
1983 void
1984 bridge_enqueue(struct ifnet *dst_ifp, struct mbuf *m)
1985 {
1986 	struct netmsg_packet *nmp;
1987 
1988 	mbuftrackid(m, 64);
1989 
1990 	nmp = &m->m_hdr.mh_netmsg;
1991 	netmsg_init(&nmp->base, NULL, &netisr_apanic_rport,
1992 		    0, bridge_enqueue_handler);
1993 	nmp->nm_packet = m;
1994 	nmp->base.lmsg.u.ms_resultp = dst_ifp;
1995 
1996 	lwkt_sendmsg_oncpu(netisr_cpuport(mycpuid), &nmp->base.lmsg);
1997 }
1998 
1999 /*
2000  * After looking up dst_if in our forwarding table we still have to
2001  * deal with channel bonding.  Find the best interface in the bonding set.
2002  */
2003 static struct ifnet *
2004 bridge_select_unicast(struct bridge_softc *sc, struct ifnet *dst_if,
2005 		      int from_blocking, struct mbuf *m)
2006 {
2007 	struct bridge_iflist *bif, *alt_bif, *nbif;
2008 	int alt_priority, alt_count;
2009 	uint8_t alt_state;
2010 
2011 	/*
2012 	 * Unicast, kinda replicates the output side of bridge_output().
2013 	 *
2014 	 * Even though this is a uni-cast packet we may have to select
2015 	 * an interface from a bonding set.
2016 	 */
2017 	bif = bridge_lookup_member_if(sc, dst_if);
2018 	if (bif == NULL) {
2019 		/* Not a member of the bridge (anymore?) */
2020 		return NULL;
2021 	}
2022 
2023 	/*
2024 	 * Send directly if the interface is not part of the spanning
2025 	 * tree.
2026 	 */
2027 	if ((bif->bif_flags & IFBIF_STP) == 0) {
2028 		goto sendunicast;
2029 	}
2030 
2031 	/*
2032 	 * If STP is enabled on the target we are an equal opportunity
2033 	 * employer and do not necessarily output to dst_if.  Instead
2034 	 * scan available links with the same MAC as the current dst_if
2035 	 * and choose the best one.
2036 	 *
2037 	 * We also need to do this because arp or other cached entries might
2038 	 * be tagged to the wrong port after a fail-over.  We don't want to
2039 	 * route packets to dead ports when perfectly good ones exist.
2040 	 *
2041 	 * If LINK2 is set on the bridge, any interfaces in the same bonding
2042 	 * set as dst_if with the same priority will be round-robined.  If
2043 	 * different priorities, only the highest priority is chosen.  In
2044 	 * this case links in a STP FORWARDING or BONDED state are allowed
2045 	 * for unicast packets.
2046 	 */
2047 	alt_state = BSTP_IFSTATE_LEARNING;
2048 	alt_bif = NULL;
2049 	alt_priority = 0;
2050 	alt_count = 0;
2051 
2052 	TAILQ_FOREACH_MUTABLE(bif, &sc->sc_iflists[mycpuid], bif_next, nbif) {
2053 		/*
2054 		 * Ignore interfaces not in the same bonding set as dst_if
2055 		 * or which are not running.
2056 		 */
2057 		if (memcmp(IF_LLADDR(bif->bif_ifp), IF_LLADDR(dst_if),
2058 			   ETHER_ADDR_LEN) != 0)
2059 		{
2060 			continue;
2061 		}
2062 
2063 		if ((bif->bif_ifp->if_flags & IFF_RUNNING) == 0)
2064 			continue;
2065 
2066 		/*
2067 		 * NOTE: We allow tranmissions through a BLOCKING
2068 		 *	 or LEARNING interface only as a last resort.
2069 		 *	 We DISALLOW both cases if the receiving
2070 		 *
2071 		 * NOTE: If we send a packet through a learning
2072 		 *	 interface the receiving end (if also in
2073 		 *	 LEARNING) will throw it away, so this is
2074 		 *	 the ultimate last resort.
2075 		 */
2076 		switch(bif->bif_state) {
2077 		case BSTP_IFSTATE_LEARNING:
2078 			if (from_blocking == 0 &&
2079 			    alt_state == BSTP_IFSTATE_LEARNING &&
2080 			    bif->bif_priority > alt_priority)
2081 			{
2082 				alt_priority = bif->bif_priority;
2083 				alt_bif = bif;
2084 			}
2085 			break;
2086 		case BSTP_IFSTATE_BLOCKING:
2087 			if (from_blocking == 0 &&
2088 			    (alt_state == BSTP_IFSTATE_LEARNING ||
2089 			     (alt_state == BSTP_IFSTATE_BLOCKING &&
2090 			      bif->bif_priority > alt_priority)))
2091 			{
2092 				alt_state = BSTP_IFSTATE_BLOCKING;
2093 				alt_priority = bif->bif_priority;
2094 				alt_bif = bif;
2095 			}
2096 			break;
2097 		case BSTP_IFSTATE_L1BLOCKING:
2098 		case BSTP_IFSTATE_LISTENING:
2099 		case BSTP_IFSTATE_DISABLED:
2100 			break;
2101 		default:
2102 			/*
2103 			 * Select the best interface in the FORWARDING
2104 			 * set (or BONDING, but there shouldn't be any
2105 			 * when LINK2 is not set).
2106 			 */
2107 			if (alt_state != BSTP_IFSTATE_BONDED ||
2108 			    bif->bif_priority > alt_priority)
2109 			{
2110 				alt_state = BSTP_IFSTATE_BONDED;
2111 				alt_priority = bif->bif_priority;
2112 				alt_bif = bif;
2113 				alt_count = 0;
2114 			} else if (alt_state == BSTP_IFSTATE_BONDED &&
2115 				   bif->bif_priority == alt_priority)
2116 			{
2117 				/*
2118 				 * Round-robin
2119 				 */
2120 				++alt_count;
2121 			}
2122 			break;
2123 		}
2124 	}
2125 
2126 	/*
2127 	 * If bonding is enabled (LINK2) and there were multiple interfaces
2128 	 * at the selected priority level, count packets and switch the
2129 	 * output interface.
2130 	 *
2131 	 * XXX need to use the toepliz hash or something like that instead
2132 	 * of a dumb packet round-robin.
2133 	 */
2134 	if (alt_count && (sc->sc_ifp->if_flags & IFF_LINK2)) {
2135 		if (++alt_bif->bif_bond_count >= alt_bif->bif_bond_weight) {
2136 			alt_bif->bif_bond_count = 0;
2137 			TAILQ_REMOVE(&sc->sc_iflists[mycpuid],
2138 				     alt_bif, bif_next);
2139 			TAILQ_INSERT_TAIL(
2140 				     &sc->sc_iflists[mycpuid],
2141 				     alt_bif, bif_next);
2142 		}
2143 	}
2144 
2145 	/*
2146 	 * After loop, alt_if is the interface we selected.  alt_if can
2147 	 * be NULL.
2148 	 */
2149 	if (alt_bif)
2150 		dst_if = alt_bif->bif_ifp;
2151 
2152 sendunicast:
2153 	/*
2154 	 * At this point, we're dealing with a unicast frame
2155 	 * going to a different interface.
2156 	 */
2157 	if ((dst_if->if_flags & IFF_RUNNING) == 0)
2158 		dst_if = NULL;
2159 	return (dst_if);
2160 }
2161 
2162 
2163 /*
2164  * bridge_output
2165  *
2166  * Issue locally originated (not forwarded) packet to the bridge.  ifp
2167  * is the nominal interface the system route table is trying to send
2168  * it to, but we get here because that interface is part of the bridge
2169  * so really the packet is being sent to the whole bridge.
2170  *
2171  * The mbuf has the Ethernet header already attached.  We must
2172  * enqueue or free the mbuf before returning.
2173  */
2174 static int
2175 bridge_output(struct ifnet *ifp, struct mbuf *m)
2176 {
2177 	struct bridge_softc *sc = ifp->if_bridge;
2178 	struct bridge_iflist *bif, *nbif;
2179 	struct ether_header *eh;
2180 	struct ifnet *dst_if, *alt_if, *bifp;
2181 	int from_us;
2182 	int alt_priority;
2183 	uint8_t alt_state;
2184 	struct mbuf *mc;
2185 	int used;
2186 	int found;
2187 
2188 	ASSERT_IFNET_NOT_SERIALIZED_ALL(ifp);
2189 	ASSERT_NETISR_NCPUS(mycpuid);
2190 	mbuftrackid(m, 65);
2191 
2192 	/*
2193 	 * Make sure that we are still a member of a bridge interface.
2194 	 */
2195 	if (sc == NULL) {
2196 		m_freem(m);
2197 		return (0);
2198 	}
2199 	bifp = sc->sc_ifp;
2200 
2201 	/*
2202 	 * Acquire header
2203 	 */
2204 	if (m->m_len < ETHER_HDR_LEN) {
2205 		m = m_pullup(m, ETHER_HDR_LEN);
2206 		if (m == NULL) {
2207 			IFNET_STAT_INC(bifp, oerrors, 1);
2208 			return (0);
2209 		}
2210 	}
2211 	eh = mtod(m, struct ether_header *);
2212 	from_us = bridge_from_us(sc, eh);
2213 
2214 	/*
2215 	 * If bridge is down, but the original output interface is up,
2216 	 * go ahead and send out that interface.  Otherwise, the packet
2217 	 * is dropped below.
2218 	 */
2219 	if ((bifp->if_flags & IFF_RUNNING) == 0) {
2220 		dst_if = ifp;
2221 		goto sendunicast;
2222 	}
2223 
2224 	/*
2225 	 * If the packet is a broadcast or multicast, or we don't know a better
2226 	 * way to get there, send to all interfaces except the originating one.
2227 	 */
2228 	if (ETHER_IS_MULTICAST(eh->ether_dhost))
2229 		dst_if = NULL;
2230 	else
2231 		dst_if = bridge_rtlookup(sc, eh->ether_dhost);
2232 
2233 	if (dst_if)
2234 		goto sendunicast;
2235 
2236 	used = 0;
2237 	found = 0;
2238 
2239 	if (sc->sc_span)
2240 		bridge_span(sc, m);
2241 
2242 	alt_if = NULL;
2243 	alt_priority = 0;
2244 	alt_state = BSTP_IFSTATE_LEARNING;
2245 
2246 	TAILQ_FOREACH_MUTABLE(bif, &sc->sc_iflists[mycpuid], bif_next, nbif) {
2247 		dst_if = bif->bif_ifp;
2248 
2249 		/*
2250 		 * Ignore interfaces marked down
2251 		 *
2252 		 * NOTE: Since the packet is originated on the machine, the
2253 		 *	 original interface the system tried to send it to
2254 		 *	 (ifp), which is part of the bridge, is not treated
2255 		 *	 specially verses other interfaces on the bridge.
2256 		 */
2257 		if ((dst_if->if_flags & IFF_RUNNING) == 0)
2258 			continue;
2259 
2260 		/*
2261 		 * Issue to all FORWARDING STP-enabled interfaces and
2262 		 * to all non-STP interfaces.  Keep track of a possible
2263 		 * backup to a BONDED, BLOCKING, or LEARNING interfaces
2264 		 * (in that priority) in case no other interfaces are found.
2265 		 */
2266 		if (bif->bif_flags & IFBIF_STP) {
2267 			switch (bif->bif_state) {
2268 			case BSTP_IFSTATE_BONDED:
2269 				if (alt_state != BSTP_IFSTATE_BONDED ||
2270 				    bif->bif_priority > alt_priority)
2271 				{
2272 					alt_state = BSTP_IFSTATE_BONDED;
2273 					alt_priority = bif->bif_priority;
2274 					alt_if = bif->bif_ifp;
2275 				}
2276 				continue;
2277 			case BSTP_IFSTATE_BLOCKING:
2278 				if (alt_state == BSTP_IFSTATE_LEARNING ||
2279 				    (alt_state == BSTP_IFSTATE_BLOCKING &&
2280 				     bif->bif_priority > alt_priority))
2281 				{
2282 					alt_state = BSTP_IFSTATE_BLOCKING;
2283 					alt_priority = bif->bif_priority;
2284 					alt_if = bif->bif_ifp;
2285 				}
2286 				continue;
2287 			case BSTP_IFSTATE_LEARNING:
2288 				if (alt_state == BSTP_IFSTATE_LEARNING &&
2289 				    bif->bif_priority > alt_priority)
2290 				{
2291 					alt_priority = bif->bif_priority;
2292 					alt_if = bif->bif_ifp;
2293 				}
2294 				continue;
2295 			case BSTP_IFSTATE_L1BLOCKING:
2296 			case BSTP_IFSTATE_LISTENING:
2297 			case BSTP_IFSTATE_DISABLED:
2298 				/*
2299 				 * Ignore interfaces in these states
2300 				 */
2301 				continue;
2302 			default:
2303 				/* FORWARDING */
2304 				break;
2305 			}
2306 		}
2307 
2308 		/*
2309 		 * Copy the packet to dstif
2310 		 */
2311 		KKASSERT(used == 0);
2312 		if (TAILQ_NEXT(bif, bif_next) == NULL) {
2313 			used = 1;
2314 			mc = m;
2315 		} else {
2316 			mc = m_copypacket(m, M_NOWAIT);
2317 			if (mc == NULL) {
2318 				IFNET_STAT_INC(bifp, oerrors, 1);
2319 				continue;
2320 			}
2321 		}
2322 
2323 		/*
2324 		 * If the packet is 'from' us override ether_shost.
2325 		 */
2326 		bridge_handoff(sc, dst_if, mc, from_us);
2327 		found = 1;
2328 
2329 		if (nbif != NULL && !nbif->bif_onlist) {
2330 			KKASSERT(bif->bif_onlist);
2331 			nbif = TAILQ_NEXT(bif, bif_next);
2332 		}
2333 	}
2334 
2335 	/*
2336 	 * If we couldn't find anything use the backup interface
2337 	 * if we have one.
2338 	 */
2339 	if (found == 0 && alt_if) {
2340 		KKASSERT(used == 0);
2341 		mc = m;
2342 		used = 1;
2343 		bridge_handoff(sc, alt_if, mc, from_us);
2344 	}
2345 
2346 	if (used == 0)
2347 		m_freem(m);
2348 	return (0);
2349 
2350 	/*
2351 	 * Unicast
2352 	 */
2353 sendunicast:
2354 	dst_if = bridge_select_unicast(sc, dst_if, 0, m);
2355 
2356 	if (sc->sc_span)
2357 		bridge_span(sc, m);
2358 	if (dst_if == NULL)
2359 		m_freem(m);
2360 	else
2361 		bridge_handoff(sc, dst_if, m, from_us);
2362 	return (0);
2363 }
2364 
2365 /*
2366  * Returns the bridge interface associated with an ifc.
2367  * Pass ifp->if_bridge (must not be NULL).  Used by the ARP
2368  * code to supply the bridge for the is-at info, making
2369  * the bridge responsible for matching local addresses.
2370  *
2371  * Without this the ARP code will supply bridge member interfaces
2372  * for the is-at which makes it difficult the bridge to fail-over
2373  * interfaces (among other things).
2374  */
2375 static struct ifnet *
2376 bridge_interface(void *if_bridge)
2377 {
2378 	struct bridge_softc *sc = if_bridge;
2379 	return (sc->sc_ifp);
2380 }
2381 
2382 /*
2383  * bridge_start:
2384  *
2385  *	Start output on a bridge.
2386  */
2387 static void
2388 bridge_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
2389 {
2390 	struct bridge_softc *sc = ifp->if_softc;
2391 
2392 	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
2393 	ASSERT_ALTQ_SQ_SERIALIZED_HW(ifsq);
2394 	ASSERT_NETISR_NCPUS(mycpuid);
2395 
2396 	ifsq_set_oactive(ifsq);
2397 	for (;;) {
2398 		struct ifnet *dst_if = NULL;
2399 		struct ether_header *eh;
2400 		struct mbuf *m;
2401 
2402 		m = ifsq_dequeue(ifsq);
2403 		if (m == NULL)
2404 			break;
2405 		mbuftrackid(m, 75);
2406 
2407 		if (m->m_len < sizeof(*eh)) {
2408 			m = m_pullup(m, sizeof(*eh));
2409 			if (m == NULL) {
2410 				IFNET_STAT_INC(ifp, oerrors, 1);
2411 				continue;
2412 			}
2413 		}
2414 		eh = mtod(m, struct ether_header *);
2415 
2416 		BPF_MTAP(ifp, m);
2417 		IFNET_STAT_INC(ifp, opackets, 1);
2418 
2419 		if ((m->m_flags & (M_BCAST|M_MCAST)) == 0)
2420 			dst_if = bridge_rtlookup(sc, eh->ether_dhost);
2421 
2422 		/*
2423 		 * Multicast or broadcast
2424 		 */
2425 		if (dst_if == NULL) {
2426 			bridge_start_bcast(sc, m);
2427 			continue;
2428 		}
2429 
2430 		/*
2431 		 * Unicast
2432 		 */
2433 		dst_if = bridge_select_unicast(sc, dst_if, 0, m);
2434 
2435 		if (dst_if == NULL)
2436 			m_freem(m);
2437 		else
2438 			bridge_enqueue(dst_if, m);
2439 	}
2440 	ifsq_clr_oactive(ifsq);
2441 }
2442 
2443 /*
2444  * bridge_forward:
2445  *
2446  *	Forward packets received on a bridge interface via the input
2447  *	path.
2448  *
2449  *	This implements the forwarding function of the bridge.
2450  */
2451 static void
2452 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
2453 {
2454 	struct bridge_iflist *bif;
2455 	struct ifnet *src_if, *dst_if, *ifp;
2456 	struct ether_header *eh;
2457 	int from_blocking;
2458 
2459 	mbuftrackid(m, 66);
2460 	src_if = m->m_pkthdr.rcvif;
2461 	ifp = sc->sc_ifp;
2462 
2463 	ASSERT_IFNET_NOT_SERIALIZED_ALL(ifp);
2464 
2465 	/*
2466 	 * packet coming in on the bridge is also going out on the bridge,
2467 	 * but ether code won't adjust output stats for the bridge because
2468 	 * we are changing the interface to something else.
2469 	 */
2470 	IFNET_STAT_INC(ifp, opackets, 1);
2471 	IFNET_STAT_INC(ifp, obytes, m->m_pkthdr.len);
2472 
2473 	/*
2474 	 * Look up the bridge_iflist.
2475 	 */
2476 	bif = bridge_lookup_member_if(sc, src_if);
2477 	if (bif == NULL) {
2478 		/* Interface is not a bridge member (anymore?) */
2479 		m_freem(m);
2480 		return;
2481 	}
2482 
2483 	/*
2484 	 * In spanning tree mode receiving a packet from an interface
2485 	 * in a BLOCKING state is allowed, it could be a member of last
2486 	 * resort from the sender's point of view, but forwarding it is
2487 	 * not allowed.
2488 	 *
2489 	 * The sender's spanning tree will eventually sync up and the
2490 	 * sender will go into a BLOCKING state too (but this still may be
2491 	 * an interface of last resort during state changes).
2492 	 */
2493 	if (bif->bif_flags & IFBIF_STP) {
2494 		switch (bif->bif_state) {
2495 		case BSTP_IFSTATE_L1BLOCKING:
2496 		case BSTP_IFSTATE_LISTENING:
2497 		case BSTP_IFSTATE_DISABLED:
2498 			m_freem(m);
2499 			return;
2500 		default:
2501 			/* learning, blocking, bonded, forwarding */
2502 			break;
2503 		}
2504 		from_blocking = (bif->bif_state == BSTP_IFSTATE_BLOCKING);
2505 	} else {
2506 		from_blocking = 0;
2507 	}
2508 
2509 	eh = mtod(m, struct ether_header *);
2510 
2511 	/*
2512 	 * If the interface is learning, and the source
2513 	 * address is valid and not multicast, record
2514 	 * the address.
2515 	 */
2516 	if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
2517 	    from_blocking == 0 &&
2518 	    ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
2519 	    (eh->ether_shost[0] == 0 &&
2520 	     eh->ether_shost[1] == 0 &&
2521 	     eh->ether_shost[2] == 0 &&
2522 	     eh->ether_shost[3] == 0 &&
2523 	     eh->ether_shost[4] == 0 &&
2524 	     eh->ether_shost[5] == 0) == 0)
2525 	{
2526 		bridge_rtupdate(sc, eh->ether_shost, src_if, IFBAF_DYNAMIC);
2527 	}
2528 
2529 	/*
2530 	 * Don't forward from an interface in the listening or learning
2531 	 * state.  That is, in the learning state we learn information
2532 	 * but we throw away the packets.
2533 	 *
2534 	 * We let through packets on interfaces in the blocking state.
2535 	 * The blocking state is applicable to the send side, not the
2536 	 * receive side.
2537 	 */
2538 	if ((bif->bif_flags & IFBIF_STP) != 0 &&
2539 	    (bif->bif_state == BSTP_IFSTATE_LISTENING ||
2540 	     bif->bif_state == BSTP_IFSTATE_LEARNING)) {
2541 		m_freem(m);
2542 		return;
2543 	}
2544 
2545 	/*
2546 	 * At this point, the port either doesn't participate
2547 	 * in spanning tree or it is in the forwarding state.
2548 	 */
2549 
2550 	/*
2551 	 * If the packet is unicast, destined for someone on
2552 	 * "this" side of the bridge, drop it.
2553 	 *
2554 	 * src_if implies the entire bonding set so we have to compare MAC
2555 	 * addresses and not just if pointers.
2556 	 */
2557 	if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2558 		dst_if = bridge_rtlookup(sc, eh->ether_dhost);
2559 		if (dst_if && memcmp(IF_LLADDR(src_if), IF_LLADDR(dst_if),
2560 				     ETHER_ADDR_LEN) == 0) {
2561 			m_freem(m);
2562 			return;
2563 		}
2564 	} else {
2565 		/* ...forward it to all interfaces. */
2566 		IFNET_STAT_INC(ifp, imcasts, 1);
2567 		dst_if = NULL;
2568 	}
2569 
2570 	/*
2571 	 * Brodcast if we do not have forwarding information.  However, if
2572 	 * we received the packet on a blocking interface we do not do this
2573 	 * (unless you really want to blow up your network).
2574 	 */
2575 	if (dst_if == NULL) {
2576 		if (from_blocking)
2577 			m_freem(m);
2578 		else
2579 			bridge_broadcast(sc, src_if, m);
2580 		return;
2581 	}
2582 
2583 	dst_if = bridge_select_unicast(sc, dst_if, from_blocking, m);
2584 
2585 	if (dst_if == NULL) {
2586 		m_freem(m);
2587 		return;
2588 	}
2589 
2590 	if (inet_pfil_hook.ph_hashooks > 0
2591 #ifdef INET6
2592 	    || inet6_pfil_hook.ph_hashooks > 0
2593 #endif
2594 	    ) {
2595 		if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2596 			return;
2597 		if (m == NULL)
2598 			return;
2599 
2600 		if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2601 			return;
2602 		if (m == NULL)
2603 			return;
2604 	}
2605 	bridge_handoff(sc, dst_if, m, 0);
2606 }
2607 
2608 /*
2609  * bridge_input:
2610  *
2611  *	Receive input from a member interface.  Queue the packet for
2612  *	bridging if it is not for us.
2613  */
2614 static struct mbuf *
2615 bridge_input(struct ifnet *ifp, struct mbuf *m)
2616 {
2617 	struct bridge_softc *sc = ifp->if_bridge;
2618 	struct bridge_iflist *bif;
2619 	struct ifnet *bifp, *new_ifp;
2620 	struct ether_header *eh;
2621 	struct mbuf *mc, *mc2;
2622 
2623 	ASSERT_IFNET_NOT_SERIALIZED_ALL(ifp);
2624 	ASSERT_NETISR_NCPUS(mycpuid);
2625 	mbuftrackid(m, 67);
2626 
2627 	/*
2628 	 * Make sure that we are still a member of a bridge interface.
2629 	 */
2630 	if (sc == NULL)
2631 		return m;
2632 
2633 	new_ifp = NULL;
2634 	bifp = sc->sc_ifp;
2635 
2636 	if ((bifp->if_flags & IFF_RUNNING) == 0)
2637 		goto out;
2638 
2639 	/*
2640 	 * Implement support for bridge monitoring.  If this flag has been
2641 	 * set on this interface, discard the packet once we push it through
2642 	 * the bpf(4) machinery, but before we do, increment various counters
2643 	 * associated with this bridge.
2644 	 */
2645 	if (bifp->if_flags & IFF_MONITOR) {
2646 		/*
2647 		 * Change input interface to this bridge
2648 		 *
2649 		 * Update bridge's ifnet statistics
2650 		 */
2651 		m->m_pkthdr.rcvif = bifp;
2652 
2653 		BPF_MTAP(bifp, m);
2654 		IFNET_STAT_INC(bifp, ipackets, 1);
2655 		IFNET_STAT_INC(bifp, ibytes, m->m_pkthdr.len);
2656 		if (m->m_flags & (M_MCAST | M_BCAST))
2657 			IFNET_STAT_INC(bifp, imcasts, 1);
2658 
2659 		m_freem(m);
2660 		m = NULL;
2661 		goto out;
2662 	}
2663 
2664 	/*
2665 	 * Handle the ether_header
2666 	 *
2667 	 * In all cases if the packet is destined for us via our MAC
2668 	 * we must clear BRIDGE_MBUF_TAGGED to ensure that we don't
2669 	 * repeat the source MAC out the same interface.
2670 	 *
2671 	 * This first test against our bridge MAC is the fast-path.
2672 	 *
2673 	 * NOTE!  The bridge interface can serve as an endpoint for
2674 	 *	  communication but normally there are no IPs associated
2675 	 *	  with it so you cannot route through it.  Instead what
2676 	 *	  you do is point your default route *THROUGH* the bridge
2677 	 *	  to the actual default router for one of the bridged spaces.
2678 	 *
2679 	 *	  Another possibility is to put all your IP specifications
2680 	 *	  on the bridge instead of on the individual interfaces.  If
2681 	 *	  you do this it should be possible to use the bridge as an
2682 	 *	  end point and route (rather than switch) through it using
2683 	 *	  the default route or ipfw forwarding rules.
2684 	 */
2685 
2686 	/*
2687 	 * Acquire header
2688 	 */
2689 	if (m->m_len < ETHER_HDR_LEN) {
2690 		m = m_pullup(m, ETHER_HDR_LEN);
2691 		if (m == NULL)
2692 			goto out;
2693 	}
2694 	eh = mtod(m, struct ether_header *);
2695 	m->m_pkthdr.fw_flags |= BRIDGE_MBUF_TAGGED;
2696 	bcopy(eh->ether_shost, m->m_pkthdr.ether_br_shost, ETHER_ADDR_LEN);
2697 
2698 	if ((bridge_debug & 1) &&
2699 	    (ntohs(eh->ether_type) == ETHERTYPE_ARP ||
2700 	    ntohs(eh->ether_type) == ETHERTYPE_REVARP)) {
2701 		kprintf("%02x:%02x:%02x:%02x:%02x:%02x "
2702 			"%02x:%02x:%02x:%02x:%02x:%02x type %04x "
2703 			"lla %02x:%02x:%02x:%02x:%02x:%02x\n",
2704 			eh->ether_dhost[0],
2705 			eh->ether_dhost[1],
2706 			eh->ether_dhost[2],
2707 			eh->ether_dhost[3],
2708 			eh->ether_dhost[4],
2709 			eh->ether_dhost[5],
2710 			eh->ether_shost[0],
2711 			eh->ether_shost[1],
2712 			eh->ether_shost[2],
2713 			eh->ether_shost[3],
2714 			eh->ether_shost[4],
2715 			eh->ether_shost[5],
2716 			eh->ether_type,
2717 			((u_char *)IF_LLADDR(bifp))[0],
2718 			((u_char *)IF_LLADDR(bifp))[1],
2719 			((u_char *)IF_LLADDR(bifp))[2],
2720 			((u_char *)IF_LLADDR(bifp))[3],
2721 			((u_char *)IF_LLADDR(bifp))[4],
2722 			((u_char *)IF_LLADDR(bifp))[5]
2723 		);
2724 	}
2725 
2726 	/*
2727 	 * If the packet is for us, set the packets source as the
2728 	 * bridge, and return the packet back to ifnet.if_input for
2729 	 * local processing.
2730 	 */
2731 	if (memcmp(eh->ether_dhost, IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0) {
2732 		/*
2733 		 * We must still record the source interface in our
2734 		 * addr cache, otherwise our bridge won't know where
2735 		 * to send responses and will broadcast them.
2736 		 */
2737 		bif = bridge_lookup_member_if(sc, ifp);
2738 		if ((bif->bif_flags & IFBIF_LEARNING) &&
2739 		    ((bif->bif_flags & IFBIF_STP) == 0 ||
2740 		     bif->bif_state != BSTP_IFSTATE_BLOCKING))
2741 		{
2742 			bridge_rtupdate(sc, eh->ether_shost,
2743 					ifp, IFBAF_DYNAMIC);
2744 		}
2745 
2746 		/*
2747 		 * Perform pfil hooks.
2748 		 */
2749 		m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED;
2750 		KASSERT(bifp->if_bridge == NULL,
2751 			("loop created in bridge_input"));
2752 		if (pfil_member != 0) {
2753 			if (inet_pfil_hook.ph_hashooks > 0
2754 #ifdef INET6
2755 			    || inet6_pfil_hook.ph_hashooks > 0
2756 #endif
2757 			) {
2758 				if (bridge_pfil(&m, NULL, ifp, PFIL_IN) != 0)
2759 					goto out;
2760 				if (m == NULL)
2761 					goto out;
2762 			}
2763 		}
2764 
2765 		/*
2766 		 * Set new_ifp and skip to the end.  This will trigger code
2767 		 * to reinput the packet and run it into our stack.
2768 		 */
2769 		new_ifp = bifp;
2770 		goto out;
2771 	}
2772 
2773 	/*
2774 	 * Tap all packets arriving on the bridge, no matter if
2775 	 * they are local destinations or not.  In is in.
2776 	 *
2777 	 * Update bridge's ifnet statistics
2778 	 */
2779 	BPF_MTAP(bifp, m);
2780 	IFNET_STAT_INC(bifp, ipackets, 1);
2781 	IFNET_STAT_INC(bifp, ibytes, m->m_pkthdr.len);
2782 	if (m->m_flags & (M_MCAST | M_BCAST))
2783 		IFNET_STAT_INC(bifp, imcasts, 1);
2784 
2785 	bif = bridge_lookup_member_if(sc, ifp);
2786 	if (bif == NULL)
2787 		goto out;
2788 
2789 	if (sc->sc_span)
2790 		bridge_span(sc, m);
2791 
2792 	if (m->m_flags & (M_BCAST | M_MCAST)) {
2793 		/*
2794 		 * Tap off 802.1D packets; they do not get forwarded.
2795 		 */
2796 		if (memcmp(eh->ether_dhost, bstp_etheraddr,
2797 			    ETHER_ADDR_LEN) == 0) {
2798 			ifnet_serialize_all(bifp);
2799 			bstp_input(sc, bif, m);
2800 			ifnet_deserialize_all(bifp);
2801 
2802 			/* m is freed by bstp_input */
2803 			m = NULL;
2804 			goto out;
2805 		}
2806 
2807 		/*
2808 		 * Other than 802.11d packets, ignore packets if the
2809 		 * interface is not in a good state.
2810 		 *
2811 		 * NOTE: Broadcast/mcast packets received on a blocking or
2812 		 *	 learning interface are allowed for local processing.
2813 		 *
2814 		 *	 The sending side of a blocked port will stop
2815 		 *	 transmitting when a better alternative is found.
2816 		 *	 However, later on we will disallow the forwarding
2817 		 *	 of bcast/mcsat packets over a blocking interface.
2818 		 */
2819 		if (bif->bif_flags & IFBIF_STP) {
2820 			switch (bif->bif_state) {
2821 			case BSTP_IFSTATE_L1BLOCKING:
2822 			case BSTP_IFSTATE_LISTENING:
2823 			case BSTP_IFSTATE_DISABLED:
2824 				goto out;
2825 			default:
2826 				/* blocking, learning, bonded, forwarding */
2827 				break;
2828 			}
2829 		}
2830 
2831 		/*
2832 		 * Make a deep copy of the packet and enqueue the copy
2833 		 * for bridge processing; return the original packet for
2834 		 * local processing.
2835 		 */
2836 		mc = m_dup(m, M_NOWAIT);
2837 		if (mc == NULL)
2838 			goto out;
2839 
2840 		/*
2841 		 * It's just too dangerous to allow bcast/mcast over a
2842 		 * blocked interface, eventually the network will sort
2843 		 * itself out and a better path will be found.
2844 		 */
2845 		if ((bif->bif_flags & IFBIF_STP) == 0 ||
2846 		    bif->bif_state != BSTP_IFSTATE_BLOCKING)
2847 		{
2848 			bridge_forward(sc, mc);
2849 		}
2850 
2851 		/*
2852 		 * Reinject the mbuf as arriving on the bridge so we have a
2853 		 * chance at claiming multicast packets. We can not loop back
2854 		 * here from ether_input as a bridge is never a member of a
2855 		 * bridge.
2856 		 */
2857 		KASSERT(bifp->if_bridge == NULL,
2858 			("loop created in bridge_input"));
2859 		mc2 = m_dup(m, M_NOWAIT);
2860 #ifdef notyet
2861 		if (mc2 != NULL) {
2862 			/* Keep the layer3 header aligned */
2863 			int i = min(mc2->m_pkthdr.len, max_protohdr);
2864 			mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2865 		}
2866 #endif
2867 		if (mc2 != NULL) {
2868 			/*
2869 			 * Don't tap to bpf(4) again; we have already done
2870 			 * the tapping.
2871 			 *
2872 			 * Leave m_pkthdr.rcvif alone, so ARP replies are
2873 			 * processed as coming in on the correct interface.
2874 			 *
2875 			 * Clear the bridge flag for local processing in
2876 			 * case the packet gets routed.
2877 			 */
2878 			mc2->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED;
2879 			ether_reinput_oncpu(bifp, mc2, 0);
2880 		}
2881 
2882 		/* Return the original packet for local processing. */
2883 		goto out;
2884 	}
2885 
2886 	/*
2887 	 * Input of a unicast packet.  We have to allow unicast packets
2888 	 * input from links in the BLOCKING state as this might be an
2889 	 * interface of last resort.
2890 	 *
2891 	 * NOTE: We explicitly ignore normal packets received on a link
2892 	 *	 in the BLOCKING state.  The point of being in that state
2893 	 *	 is to avoid getting duplicate packets.
2894 	 *
2895 	 *	 HOWEVER, if LINK2 is set the normal spanning tree code
2896 	 *	 will mark an interface BLOCKING to avoid multi-cast/broadcast
2897 	 *	 loops.  Unicast packets CAN still loop if we allow the
2898 	 *	 case (hence we only do it in LINK2), but it isn't quite as
2899 	 *	 bad as a broadcast packet looping.
2900 	 */
2901 	if (bif->bif_flags & IFBIF_STP) {
2902 		switch (bif->bif_state) {
2903 		case BSTP_IFSTATE_L1BLOCKING:
2904 		case BSTP_IFSTATE_LISTENING:
2905 		case BSTP_IFSTATE_DISABLED:
2906 			goto out;
2907 		default:
2908 			/* blocking, bonded, forwarding, learning */
2909 			break;
2910 		}
2911 	}
2912 
2913 	/*
2914 	 * Unicast.  Make sure it's not for us.
2915 	 *
2916 	 * This loop is MPSAFE; the only blocking operation (bridge_rtupdate)
2917 	 * is followed by breaking out of the loop.
2918 	 */
2919 	TAILQ_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
2920 		if (bif->bif_ifp->if_type != IFT_ETHER)
2921 			continue;
2922 
2923 		/*
2924 		 * It is destined for an interface linked to the bridge.
2925 		 * We want the bridge itself to take care of link level
2926 		 * forwarding to member interfaces so reinput on the bridge.
2927 		 * i.e. if you ping an IP on a target interface associated
2928 		 * with the bridge, the arp is-at response should indicate
2929 		 * the bridge MAC.
2930 		 *
2931 		 * Only update our addr list when learning if the port
2932 		 * is not in a blocking state.  If it is we still allow
2933 		 * the packet but we do not try to learn from it.
2934 		 */
2935 		if (memcmp(IF_LLADDR(bif->bif_ifp), eh->ether_dhost,
2936 			   ETHER_ADDR_LEN) == 0) {
2937 			if (bif->bif_ifp != ifp) {
2938 				/* XXX loop prevention */
2939 				m->m_flags |= M_ETHER_BRIDGED;
2940 			}
2941 			if ((bif->bif_flags & IFBIF_LEARNING) &&
2942 			    ((bif->bif_flags & IFBIF_STP) == 0 ||
2943 			     bif->bif_state != BSTP_IFSTATE_BLOCKING)) {
2944 				bridge_rtupdate(sc, eh->ether_shost,
2945 						ifp, IFBAF_DYNAMIC);
2946 			}
2947 			new_ifp = bifp; /* not bif->bif_ifp */
2948 			m->m_pkthdr.fw_flags &= ~BRIDGE_MBUF_TAGGED;
2949 			goto out;
2950 		}
2951 
2952 		/*
2953 		 * Ignore received packets that were sent by us.
2954 		 */
2955 		if (memcmp(IF_LLADDR(bif->bif_ifp), eh->ether_shost,
2956 			   ETHER_ADDR_LEN) == 0) {
2957 			m_freem(m);
2958 			m = NULL;
2959 			goto out;
2960 		}
2961 	}
2962 
2963 	/*
2964 	 * It isn't for us.
2965 	 *
2966 	 * Perform the bridge forwarding function, but disallow bridging
2967 	 * to interfaces in the blocking state if the packet came in on
2968 	 * an interface in the blocking state.
2969 	 *
2970 	 * (bridge_forward also updates the addr cache).
2971 	 */
2972 	bridge_forward(sc, m);
2973 	m = NULL;
2974 
2975 	/*
2976 	 * ether_reinput_oncpu() will reprocess rcvif as
2977 	 * coming from new_ifp (since we do not specify
2978 	 * REINPUT_KEEPRCVIF).
2979 	 */
2980 out:
2981 	if (new_ifp != NULL) {
2982 		/*
2983 		 * Clear the bridge flag for local processing in
2984 		 * case the packet gets routed.
2985 		 */
2986 		ether_reinput_oncpu(new_ifp, m, REINPUT_RUNBPF);
2987 		m = NULL;
2988 	}
2989 	return (m);
2990 }
2991 
2992 /*
2993  * bridge_start_bcast:
2994  *
2995  *	Broadcast the packet sent from bridge to all member
2996  *	interfaces.
2997  *	This is a simplified version of bridge_broadcast(), however,
2998  *	this function expects caller to hold bridge's serializer.
2999  */
3000 static void
3001 bridge_start_bcast(struct bridge_softc *sc, struct mbuf *m)
3002 {
3003 	struct bridge_iflist *bif;
3004 	struct mbuf *mc;
3005 	struct ifnet *dst_if, *alt_if, *bifp;
3006 	int used = 0;
3007 	int found = 0;
3008 	int alt_priority;
3009 
3010 	mbuftrackid(m, 68);
3011 	bifp = sc->sc_ifp;
3012 	ASSERT_IFNET_SERIALIZED_ALL(bifp);
3013 
3014 	/*
3015 	 * Following loop is MPSAFE; nothing is blocking
3016 	 * in the loop body.
3017 	 *
3018 	 * NOTE: We transmit through an member in the BLOCKING state only
3019 	 *	 as a last resort.
3020 	 */
3021 	alt_if = NULL;
3022 	alt_priority = 0;
3023 
3024 	TAILQ_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
3025 		dst_if = bif->bif_ifp;
3026 
3027 		if (bif->bif_flags & IFBIF_STP) {
3028 			switch (bif->bif_state) {
3029 			case BSTP_IFSTATE_BLOCKING:
3030 				if (bif->bif_priority > alt_priority) {
3031 					alt_priority = bif->bif_priority;
3032 					alt_if = bif->bif_ifp;
3033 				}
3034 				/* fall through */
3035 			case BSTP_IFSTATE_L1BLOCKING:
3036 			case BSTP_IFSTATE_DISABLED:
3037 				continue;
3038 			default:
3039 				/* listening, learning, bonded, forwarding */
3040 				break;
3041 			}
3042 		}
3043 
3044 		if ((bif->bif_flags & IFBIF_DISCOVER) == 0 &&
3045 		    (m->m_flags & (M_BCAST|M_MCAST)) == 0)
3046 			continue;
3047 
3048 		if ((dst_if->if_flags & IFF_RUNNING) == 0)
3049 			continue;
3050 
3051 		if (TAILQ_NEXT(bif, bif_next) == NULL) {
3052 			mc = m;
3053 			used = 1;
3054 		} else {
3055 			mc = m_copypacket(m, M_NOWAIT);
3056 			if (mc == NULL) {
3057 				IFNET_STAT_INC(bifp, oerrors, 1);
3058 				continue;
3059 			}
3060 		}
3061 		found = 1;
3062 		bridge_enqueue(dst_if, mc);
3063 	}
3064 
3065 	if (found == 0 && alt_if) {
3066 		KKASSERT(used == 0);
3067 		mc = m;
3068 		used = 1;
3069 		bridge_enqueue(alt_if, mc);
3070 	}
3071 
3072 	if (used == 0)
3073 		m_freem(m);
3074 }
3075 
3076 /*
3077  * bridge_broadcast:
3078  *
3079  * Send a frame to all interfaces that are members of the bridge, except
3080  * for the one on which the packet arrived.
3081  */
3082 static void
3083 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if, struct mbuf *m)
3084 {
3085 	struct bridge_iflist *bif, *nbif;
3086 	struct ether_header *eh;
3087 	struct mbuf *mc;
3088 	struct ifnet *dst_if, *alt_if, *bifp;
3089 	int used;
3090 	int found;
3091 	int alt_priority;
3092 	int from_us;
3093 	uint8_t alt_state;
3094 
3095 	mbuftrackid(m, 69);
3096 	bifp = sc->sc_ifp;
3097 	ASSERT_IFNET_NOT_SERIALIZED_ALL(bifp);
3098 
3099 	eh = mtod(m, struct ether_header *);
3100 	from_us = bridge_from_us(sc, eh);
3101 
3102 	if (inet_pfil_hook.ph_hashooks > 0
3103 #ifdef INET6
3104 	    || inet6_pfil_hook.ph_hashooks > 0
3105 #endif
3106 	    )
3107 	{
3108 		if (bridge_pfil(&m, bifp, src_if, PFIL_IN) != 0)
3109 			return;
3110 		if (m == NULL)
3111 			return;
3112 
3113 		/* Filter on the bridge interface before broadcasting */
3114 		if (bridge_pfil(&m, bifp, NULL, PFIL_OUT) != 0)
3115 			return;
3116 		if (m == NULL)
3117 			return;
3118 	}
3119 
3120 	alt_state = BSTP_IFSTATE_LEARNING;
3121 	alt_if = NULL;
3122 	alt_priority = 0;
3123 	found = 0;
3124 	used = 0;
3125 
3126 	TAILQ_FOREACH_MUTABLE(bif, &sc->sc_iflists[mycpuid], bif_next, nbif) {
3127 		dst_if = bif->bif_ifp;
3128 
3129 		if ((dst_if->if_flags & IFF_RUNNING) == 0)
3130 			continue;
3131 
3132 		/*
3133 		 * Don't bounce the packet out the same interface it came
3134 		 * in on.  We have to test MAC addresses because a packet
3135 		 * can come in a bonded interface and we don't want it to
3136 		 * be echod out the forwarding interface for the same bonding
3137 		 * set.
3138 		 */
3139 		if (src_if &&
3140 		    memcmp(IF_LLADDR(src_if), IF_LLADDR(dst_if),
3141 			   ETHER_ADDR_LEN) == 0)
3142 	        {
3143 			continue;
3144 		}
3145 
3146 		/*
3147 		 * Generally speaking we only broadcast through forwarding
3148 		 * interfaces.  If no interfaces are available we select
3149 		 * a BONDED, BLOCKING, or LEARNING interface to forward
3150 		 * through.
3151 		 */
3152 		if (bif->bif_flags & IFBIF_STP) {
3153 			switch (bif->bif_state) {
3154 			case BSTP_IFSTATE_LEARNING:
3155 				if (alt_state == BSTP_IFSTATE_LEARNING &&
3156 				    bif->bif_priority > alt_priority)
3157 				{
3158 					alt_priority = bif->bif_priority;
3159 					alt_if = bif->bif_ifp;
3160 				}
3161 				continue;
3162 			case BSTP_IFSTATE_BLOCKING:
3163 				if (alt_state == BSTP_IFSTATE_LEARNING ||
3164 				    (alt_state == BSTP_IFSTATE_BLOCKING &&
3165 				     bif->bif_priority > alt_priority))
3166 				{
3167 					alt_state = BSTP_IFSTATE_BLOCKING;
3168 					alt_priority = bif->bif_priority;
3169 					alt_if = bif->bif_ifp;
3170 				}
3171 				continue;
3172 			case BSTP_IFSTATE_BONDED:
3173 				if (alt_state != BSTP_IFSTATE_BONDED ||
3174 				    bif->bif_priority > alt_priority)
3175 				{
3176 					alt_state = BSTP_IFSTATE_BONDED;
3177 					alt_priority = bif->bif_priority;
3178 					alt_if = bif->bif_ifp;
3179 				}
3180 				continue;
3181 			case BSTP_IFSTATE_L1BLOCKING:
3182 			case BSTP_IFSTATE_DISABLED:
3183 			case BSTP_IFSTATE_LISTENING:
3184 				continue;
3185 			default:
3186 				/* forwarding */
3187 				break;
3188 			}
3189 		}
3190 
3191 		/*
3192 		 * FORWARDING
3193 		 */
3194 		if ((bif->bif_flags & IFBIF_DISCOVER) == 0 &&
3195 		    (m->m_flags & (M_BCAST|M_MCAST)) == 0)
3196 		{
3197 			continue;
3198 		}
3199 
3200 		/*
3201 		 * Last interface in list?
3202 		 */
3203 		if (TAILQ_NEXT(bif, bif_next) == NULL) {
3204 			mc = m;
3205 			used = 1;
3206 		} else {
3207 			mc = m_copypacket(m, M_NOWAIT);
3208 			if (mc == NULL) {
3209 				IFNET_STAT_INC(sc->sc_ifp, oerrors, 1);
3210 				continue;
3211 			}
3212 		}
3213 		found = 1;
3214 
3215 		/*
3216 		 * Filter on the output interface.  Pass a NULL bridge
3217 		 * interface pointer so we do not redundantly filter on
3218 		 * the bridge for each interface we broadcast on.
3219 		 */
3220 		if (inet_pfil_hook.ph_hashooks > 0
3221 #ifdef INET6
3222 		    || inet6_pfil_hook.ph_hashooks > 0
3223 #endif
3224 		    )
3225 		{
3226 			if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
3227 				continue;
3228 			if (mc == NULL)
3229 				continue;
3230 		}
3231 		bridge_handoff(sc, dst_if, mc, from_us);
3232 
3233 		if (nbif != NULL && !nbif->bif_onlist) {
3234 			KKASSERT(bif->bif_onlist);
3235 			nbif = TAILQ_NEXT(bif, bif_next);
3236 		}
3237 	}
3238 
3239 	if (found == 0 && alt_if) {
3240 		KKASSERT(used == 0);
3241 		mc = m;
3242 		used = 1;
3243 		bridge_enqueue(alt_if, mc);
3244 	}
3245 
3246 	if (used == 0)
3247 		m_freem(m);
3248 }
3249 
3250 /*
3251  * bridge_span:
3252  *
3253  *	Duplicate a packet out one or more interfaces that are in span mode,
3254  *	the original mbuf is unmodified.
3255  */
3256 static void
3257 bridge_span(struct bridge_softc *sc, struct mbuf *m)
3258 {
3259 	struct bridge_iflist *bif;
3260 	struct ifnet *dst_if, *bifp;
3261 	struct mbuf *mc;
3262 
3263 	mbuftrackid(m, 70);
3264 	bifp = sc->sc_ifp;
3265 	ifnet_serialize_all(bifp);
3266 
3267 	TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) {
3268 		dst_if = bif->bif_ifp;
3269 
3270 		if ((dst_if->if_flags & IFF_RUNNING) == 0)
3271 			continue;
3272 
3273 		mc = m_copypacket(m, M_NOWAIT);
3274 		if (mc == NULL) {
3275 			IFNET_STAT_INC(sc->sc_ifp, oerrors, 1);
3276 			continue;
3277 		}
3278 		bridge_enqueue(dst_if, mc);
3279 	}
3280 
3281 	ifnet_deserialize_all(bifp);
3282 }
3283 
3284 static void
3285 bridge_rtmsg_sync_handler(netmsg_t msg)
3286 {
3287 	netisr_forwardmsg(&msg->base, mycpuid + 1);
3288 }
3289 
3290 static void
3291 bridge_rtmsg_sync(struct bridge_softc *sc)
3292 {
3293 	struct netmsg_base msg;
3294 
3295 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3296 
3297 	/* XXX use netmsg_service_sync */
3298 	netmsg_init(&msg, NULL, &curthread->td_msgport,
3299 		    0, bridge_rtmsg_sync_handler);
3300 	netisr_domsg(&msg, 0);
3301 }
3302 
3303 static __inline void
3304 bridge_rtinfo_update(struct bridge_rtinfo *bri, struct ifnet *dst_if,
3305 		     int setflags, uint8_t flags, uint32_t timeo)
3306 {
3307 	if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
3308 	    bri->bri_ifp != dst_if)
3309 		bri->bri_ifp = dst_if;
3310 	if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
3311 	    bri->bri_expire != time_uptime + timeo)
3312 		bri->bri_expire = time_uptime + timeo;
3313 	if (setflags)
3314 		bri->bri_flags = flags;
3315 }
3316 
3317 static int
3318 bridge_rtinstall_oncpu(struct bridge_softc *sc, const uint8_t *dst,
3319 		       struct ifnet *dst_if, int setflags, uint8_t flags,
3320 		       struct bridge_rtinfo **bri0)
3321 {
3322 	struct bridge_rtnode *brt;
3323 	struct bridge_rtinfo *bri;
3324 
3325 	if (mycpuid == 0) {
3326 		brt = bridge_rtnode_lookup(sc, dst);
3327 		if (brt != NULL) {
3328 			/*
3329 			 * rtnode for 'dst' already exists.  We inform the
3330 			 * caller about this by leaving bri0 as NULL.  The
3331 			 * caller will terminate the intallation upon getting
3332 			 * NULL bri0.  However, we still need to update the
3333 			 * rtinfo.
3334 			 */
3335 			KKASSERT(*bri0 == NULL);
3336 
3337 			/* Update rtinfo */
3338 			bridge_rtinfo_update(brt->brt_info, dst_if, setflags,
3339 					     flags, sc->sc_brttimeout);
3340 			return 0;
3341 		}
3342 
3343 		/*
3344 		 * We only need to check brtcnt on CPU0, since if limit
3345 		 * is to be exceeded, ENOSPC is returned.  Caller knows
3346 		 * this and will terminate the installation.
3347 		 */
3348 		if (sc->sc_brtcnt >= sc->sc_brtmax)
3349 			return ENOSPC;
3350 
3351 		KKASSERT(*bri0 == NULL);
3352 		bri = kmalloc(sizeof(struct bridge_rtinfo), M_DEVBUF,
3353 				  M_WAITOK | M_ZERO);
3354 		*bri0 = bri;
3355 
3356 		/* Setup rtinfo */
3357 		bri->bri_flags = IFBAF_DYNAMIC;
3358 		bridge_rtinfo_update(bri, dst_if, setflags, flags,
3359 				     sc->sc_brttimeout);
3360 	} else {
3361 		bri = *bri0;
3362 		KKASSERT(bri != NULL);
3363 	}
3364 
3365 	brt = kmalloc(sizeof(struct bridge_rtnode), M_DEVBUF,
3366 		      M_WAITOK | M_ZERO);
3367 	memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
3368 	brt->brt_info = bri;
3369 	atomic_add_int(&bri->bri_refs, 1);
3370 
3371 	bridge_rtnode_insert(sc, brt);
3372 	return 0;
3373 }
3374 
3375 static void
3376 bridge_rtinstall_handler(netmsg_t msg)
3377 {
3378 	struct netmsg_brsaddr *brmsg = (struct netmsg_brsaddr *)msg;
3379 	int error;
3380 
3381 	error = bridge_rtinstall_oncpu(brmsg->br_softc,
3382 				       brmsg->br_dst, brmsg->br_dst_if,
3383 				       brmsg->br_setflags, brmsg->br_flags,
3384 				       &brmsg->br_rtinfo);
3385 	if (error) {
3386 		KKASSERT(mycpuid == 0 && brmsg->br_rtinfo == NULL);
3387 		netisr_replymsg(&brmsg->base, error);
3388 		return;
3389 	} else if (brmsg->br_rtinfo == NULL) {
3390 		/* rtnode already exists for 'dst' */
3391 		KKASSERT(mycpuid == 0);
3392 		netisr_replymsg(&brmsg->base, 0);
3393 		return;
3394 	}
3395 	netisr_forwardmsg(&brmsg->base, mycpuid + 1);
3396 }
3397 
3398 /*
3399  * bridge_rtupdate:
3400  *
3401  *	Add/Update a bridge routing entry.
3402  */
3403 static int
3404 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
3405 		struct ifnet *dst_if, uint8_t flags)
3406 {
3407 	struct bridge_rtnode *brt;
3408 
3409 	/*
3410 	 * A route for this destination might already exist.  If so,
3411 	 * update it, otherwise create a new one.
3412 	 */
3413 	if ((brt = bridge_rtnode_lookup(sc, dst)) == NULL) {
3414 		struct netmsg_brsaddr *brmsg;
3415 
3416 		if (sc->sc_brtcnt >= sc->sc_brtmax)
3417 			return ENOSPC;
3418 
3419 		brmsg = kmalloc(sizeof(*brmsg), M_LWKTMSG, M_WAITOK | M_NULLOK);
3420 		if (brmsg == NULL)
3421 			return ENOMEM;
3422 
3423 		netmsg_init(&brmsg->base, NULL, &netisr_afree_rport,
3424 			    0, bridge_rtinstall_handler);
3425 		memcpy(brmsg->br_dst, dst, ETHER_ADDR_LEN);
3426 		brmsg->br_dst_if = dst_if;
3427 		brmsg->br_flags = flags;
3428 		brmsg->br_setflags = 0;
3429 		brmsg->br_softc = sc;
3430 		brmsg->br_rtinfo = NULL;
3431 
3432 		netisr_sendmsg(&brmsg->base, 0);
3433 		return 0;
3434 	}
3435 	bridge_rtinfo_update(brt->brt_info, dst_if, 0, flags,
3436 			     sc->sc_brttimeout);
3437 	return 0;
3438 }
3439 
3440 static int
3441 bridge_rtsaddr(struct bridge_softc *sc, const uint8_t *dst,
3442 	       struct ifnet *dst_if, uint8_t flags)
3443 {
3444 	struct netmsg_brsaddr brmsg;
3445 
3446 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3447 
3448 	netmsg_init(&brmsg.base, NULL, &curthread->td_msgport,
3449 		    0, bridge_rtinstall_handler);
3450 	memcpy(brmsg.br_dst, dst, ETHER_ADDR_LEN);
3451 	brmsg.br_dst_if = dst_if;
3452 	brmsg.br_flags = flags;
3453 	brmsg.br_setflags = 1;
3454 	brmsg.br_softc = sc;
3455 	brmsg.br_rtinfo = NULL;
3456 
3457 	return netisr_domsg(&brmsg.base, 0);
3458 }
3459 
3460 /*
3461  * bridge_rtlookup:
3462  *
3463  *	Lookup the destination interface for an address.
3464  */
3465 static struct ifnet *
3466 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
3467 {
3468 	struct bridge_rtnode *brt;
3469 
3470 	if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
3471 		return NULL;
3472 	return brt->brt_info->bri_ifp;
3473 }
3474 
3475 static void
3476 bridge_rtreap_handler(netmsg_t msg)
3477 {
3478 	struct bridge_softc *sc = msg->lmsg.u.ms_resultp;
3479 	struct bridge_rtnode *brt, *nbrt;
3480 
3481 	LIST_FOREACH_MUTABLE(brt, &sc->sc_rtlists[mycpuid], brt_list, nbrt) {
3482 		if (brt->brt_info->bri_dead)
3483 			bridge_rtnode_destroy(sc, brt);
3484 	}
3485 	netisr_forwardmsg(&msg->base, mycpuid + 1);
3486 }
3487 
3488 static void
3489 bridge_rtreap(struct bridge_softc *sc)
3490 {
3491 	struct netmsg_base msg;
3492 
3493 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3494 
3495 	netmsg_init(&msg, NULL, &curthread->td_msgport,
3496 		    0, bridge_rtreap_handler);
3497 	msg.lmsg.u.ms_resultp = sc;
3498 
3499 	netisr_domsg(&msg, 0);
3500 }
3501 
3502 static void
3503 bridge_rtreap_async(struct bridge_softc *sc)
3504 {
3505 	struct netmsg_base *msg;
3506 
3507 	msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK);
3508 
3509 	netmsg_init(msg, NULL, &netisr_afree_rport,
3510 		    0, bridge_rtreap_handler);
3511 	msg->lmsg.u.ms_resultp = sc;
3512 
3513 	netisr_sendmsg(msg, 0);
3514 }
3515 
3516 /*
3517  * bridge_rttrim:
3518  *
3519  *	Trim the routine table so that we have a number
3520  *	of routing entries less than or equal to the
3521  *	maximum number.
3522  */
3523 static void
3524 bridge_rttrim(struct bridge_softc *sc)
3525 {
3526 	struct bridge_rtnode *brt;
3527 	int dead;
3528 
3529 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3530 
3531 	/* Make sure we actually need to do this. */
3532 	if (sc->sc_brtcnt <= sc->sc_brtmax)
3533 		return;
3534 
3535 	/*
3536 	 * Find out how many rtnodes are dead
3537 	 */
3538 	dead = bridge_rtage_finddead(sc);
3539 	KKASSERT(dead <= sc->sc_brtcnt);
3540 
3541 	if (sc->sc_brtcnt - dead <= sc->sc_brtmax) {
3542 		/* Enough dead rtnodes are found */
3543 		bridge_rtreap(sc);
3544 		return;
3545 	}
3546 
3547 	/*
3548 	 * Kill some dynamic rtnodes to meet the brtmax
3549 	 */
3550 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
3551 		struct bridge_rtinfo *bri = brt->brt_info;
3552 
3553 		if (bri->bri_dead) {
3554 			/*
3555 			 * We have counted this rtnode in
3556 			 * bridge_rtage_finddead()
3557 			 */
3558 			continue;
3559 		}
3560 
3561 		if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
3562 			bri->bri_dead = 1;
3563 			++dead;
3564 			KKASSERT(dead <= sc->sc_brtcnt);
3565 
3566 			if (sc->sc_brtcnt - dead <= sc->sc_brtmax) {
3567 				/* Enough rtnodes are collected */
3568 				break;
3569 			}
3570 		}
3571 	}
3572 	if (dead)
3573 		bridge_rtreap(sc);
3574 }
3575 
3576 /*
3577  * bridge_timer:
3578  *
3579  *	Aging timer for the bridge.
3580  */
3581 static void
3582 bridge_timer(void *arg)
3583 {
3584 	struct bridge_softc *sc = arg;
3585 	struct netmsg_base *msg;
3586 
3587 	KKASSERT(mycpuid == BRIDGE_CFGCPU);
3588 
3589 	crit_enter();
3590 
3591 	if (callout_pending(&sc->sc_brcallout) ||
3592 	    !callout_active(&sc->sc_brcallout)) {
3593 		crit_exit();
3594 		return;
3595 	}
3596 	callout_deactivate(&sc->sc_brcallout);
3597 
3598 	msg = &sc->sc_brtimemsg;
3599 	KKASSERT(msg->lmsg.ms_flags & MSGF_DONE);
3600 	lwkt_sendmsg_oncpu(BRIDGE_CFGPORT, &msg->lmsg);
3601 
3602 	crit_exit();
3603 }
3604 
3605 static void
3606 bridge_timer_handler(netmsg_t msg)
3607 {
3608 	struct bridge_softc *sc = msg->lmsg.u.ms_resultp;
3609 
3610 	KKASSERT(&curthread->td_msgport == BRIDGE_CFGPORT);
3611 
3612 	crit_enter();
3613 	/* Reply ASAP */
3614 	lwkt_replymsg(&msg->lmsg, 0);
3615 	crit_exit();
3616 
3617 	bridge_rtage(sc);
3618 	if (sc->sc_ifp->if_flags & IFF_RUNNING) {
3619 		callout_reset(&sc->sc_brcallout,
3620 		    bridge_rtable_prune_period * hz, bridge_timer, sc);
3621 	}
3622 }
3623 
3624 static int
3625 bridge_rtage_finddead(struct bridge_softc *sc)
3626 {
3627 	struct bridge_rtnode *brt;
3628 	int dead = 0;
3629 
3630 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
3631 		struct bridge_rtinfo *bri = brt->brt_info;
3632 
3633 		if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
3634 		    time_uptime >= bri->bri_expire) {
3635 			bri->bri_dead = 1;
3636 			++dead;
3637 			KKASSERT(dead <= sc->sc_brtcnt);
3638 		}
3639 	}
3640 	return dead;
3641 }
3642 
3643 /*
3644  * bridge_rtage:
3645  *
3646  *	Perform an aging cycle.
3647  */
3648 static void
3649 bridge_rtage(struct bridge_softc *sc)
3650 {
3651 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3652 
3653 	if (bridge_rtage_finddead(sc))
3654 		bridge_rtreap(sc);
3655 }
3656 
3657 /*
3658  * bridge_rtflush:
3659  *
3660  *	Remove all dynamic addresses from the bridge.
3661  */
3662 static void
3663 bridge_rtflush(struct bridge_softc *sc, int bf)
3664 {
3665 	struct bridge_rtnode *brt;
3666 	int reap;
3667 
3668 	reap = 0;
3669 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
3670 		struct bridge_rtinfo *bri = brt->brt_info;
3671 
3672 		if ((bf & IFBF_FLUSHALL) ||
3673 		    (bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
3674 			bri->bri_dead = 1;
3675 			reap = 1;
3676 		}
3677 	}
3678 	if (reap) {
3679 		if (bf & IFBF_FLUSHSYNC)
3680 			bridge_rtreap(sc);
3681 		else
3682 			bridge_rtreap_async(sc);
3683 	}
3684 }
3685 
3686 /*
3687  * bridge_rtdaddr:
3688  *
3689  *	Remove an address from the table.
3690  */
3691 static int
3692 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
3693 {
3694 	struct bridge_rtnode *brt;
3695 
3696 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3697 
3698 	if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
3699 		return (ENOENT);
3700 
3701 	/* TODO: add a cheaper delete operation */
3702 	brt->brt_info->bri_dead = 1;
3703 	bridge_rtreap(sc);
3704 	return (0);
3705 }
3706 
3707 /*
3708  * bridge_rtdelete:
3709  *
3710  *	Delete routes to a speicifc member interface.
3711  */
3712 void
3713 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int bf)
3714 {
3715 	struct bridge_rtnode *brt;
3716 	int reap;
3717 
3718 	reap = 0;
3719 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
3720 		struct bridge_rtinfo *bri = brt->brt_info;
3721 
3722 		if (bri->bri_ifp == ifp &&
3723 		    ((bf & IFBF_FLUSHALL) ||
3724 		     (bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) {
3725 			bri->bri_dead = 1;
3726 			reap = 1;
3727 		}
3728 	}
3729 	if (reap) {
3730 		if (bf & IFBF_FLUSHSYNC)
3731 			bridge_rtreap(sc);
3732 		else
3733 			bridge_rtreap_async(sc);
3734 	}
3735 }
3736 
3737 /*
3738  * bridge_rtable_init:
3739  *
3740  *	Initialize the route table for this bridge.
3741  */
3742 static void
3743 bridge_rtable_init(struct bridge_softc *sc)
3744 {
3745 	int cpu;
3746 
3747 	/*
3748 	 * Initialize per-cpu hash tables
3749 	 */
3750 	sc->sc_rthashs = kmalloc(sizeof(*sc->sc_rthashs) * netisr_ncpus,
3751 				 M_DEVBUF, M_WAITOK);
3752 	for (cpu = 0; cpu < netisr_ncpus; ++cpu) {
3753 		int i;
3754 
3755 		sc->sc_rthashs[cpu] =
3756 		kmalloc(sizeof(struct bridge_rtnode_head) * BRIDGE_RTHASH_SIZE,
3757 			M_DEVBUF, M_WAITOK);
3758 
3759 		for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
3760 			LIST_INIT(&sc->sc_rthashs[cpu][i]);
3761 	}
3762 	sc->sc_rthash_key = karc4random();
3763 
3764 	/*
3765 	 * Initialize per-cpu lists
3766 	 */
3767 	sc->sc_rtlists =
3768 	    kmalloc(sizeof(struct bridge_rtnode_head) * netisr_ncpus,
3769 	    M_DEVBUF, M_WAITOK);
3770 	for (cpu = 0; cpu < netisr_ncpus; ++cpu)
3771 		LIST_INIT(&sc->sc_rtlists[cpu]);
3772 }
3773 
3774 /*
3775  * bridge_rtable_fini:
3776  *
3777  *	Deconstruct the route table for this bridge.
3778  */
3779 static void
3780 bridge_rtable_fini(struct bridge_softc *sc)
3781 {
3782 	int cpu;
3783 
3784 	/*
3785 	 * Free per-cpu hash tables
3786 	 */
3787 	for (cpu = 0; cpu < netisr_ncpus; ++cpu)
3788 		kfree(sc->sc_rthashs[cpu], M_DEVBUF);
3789 	kfree(sc->sc_rthashs, M_DEVBUF);
3790 
3791 	/*
3792 	 * Free per-cpu lists
3793 	 */
3794 	kfree(sc->sc_rtlists, M_DEVBUF);
3795 }
3796 
3797 /*
3798  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
3799  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
3800  */
3801 #define	mix(a, b, c)							\
3802 do {									\
3803 	a -= b; a -= c; a ^= (c >> 13);					\
3804 	b -= c; b -= a; b ^= (a << 8);					\
3805 	c -= a; c -= b; c ^= (b >> 13);					\
3806 	a -= b; a -= c; a ^= (c >> 12);					\
3807 	b -= c; b -= a; b ^= (a << 16);					\
3808 	c -= a; c -= b; c ^= (b >> 5);					\
3809 	a -= b; a -= c; a ^= (c >> 3);					\
3810 	b -= c; b -= a; b ^= (a << 10);					\
3811 	c -= a; c -= b; c ^= (b >> 15);					\
3812 } while (/*CONSTCOND*/0)
3813 
3814 static __inline uint32_t
3815 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
3816 {
3817 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
3818 
3819 	b += addr[5] << 8;
3820 	b += addr[4];
3821 	a += addr[3] << 24;
3822 	a += addr[2] << 16;
3823 	a += addr[1] << 8;
3824 	a += addr[0];
3825 
3826 	mix(a, b, c);
3827 
3828 	return (c & BRIDGE_RTHASH_MASK);
3829 }
3830 
3831 #undef mix
3832 
3833 static int
3834 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
3835 {
3836 	int i, d;
3837 
3838 	for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
3839 		d = ((int)a[i]) - ((int)b[i]);
3840 	}
3841 
3842 	return (d);
3843 }
3844 
3845 /*
3846  * bridge_rtnode_lookup:
3847  *
3848  *	Look up a bridge route node for the specified destination.
3849  */
3850 static struct bridge_rtnode *
3851 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
3852 {
3853 	struct bridge_rtnode *brt;
3854 	uint32_t hash;
3855 	int dir;
3856 
3857 	hash = bridge_rthash(sc, addr);
3858 	LIST_FOREACH(brt, &sc->sc_rthashs[mycpuid][hash], brt_hash) {
3859 		dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
3860 		if (dir == 0)
3861 			return (brt);
3862 		if (dir > 0)
3863 			return (NULL);
3864 	}
3865 
3866 	return (NULL);
3867 }
3868 
3869 /*
3870  * bridge_rtnode_insert:
3871  *
3872  *	Insert the specified bridge node into the route table.
3873  *	Caller has to make sure that rtnode does not exist.
3874  */
3875 static void
3876 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
3877 {
3878 	struct bridge_rtnode *lbrt;
3879 	uint32_t hash;
3880 	int dir;
3881 
3882 	hash = bridge_rthash(sc, brt->brt_addr);
3883 
3884 	lbrt = LIST_FIRST(&sc->sc_rthashs[mycpuid][hash]);
3885 	if (lbrt == NULL) {
3886 		LIST_INSERT_HEAD(&sc->sc_rthashs[mycpuid][hash],
3887 				  brt, brt_hash);
3888 		goto out;
3889 	}
3890 
3891 	do {
3892 		dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
3893 		KASSERT(dir != 0, ("rtnode already exist"));
3894 
3895 		if (dir > 0) {
3896 			LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
3897 			goto out;
3898 		}
3899 		if (LIST_NEXT(lbrt, brt_hash) == NULL) {
3900 			LIST_INSERT_AFTER(lbrt, brt, brt_hash);
3901 			goto out;
3902 		}
3903 		lbrt = LIST_NEXT(lbrt, brt_hash);
3904 	} while (lbrt != NULL);
3905 
3906 	panic("no suitable position found for rtnode");
3907 out:
3908 	LIST_INSERT_HEAD(&sc->sc_rtlists[mycpuid], brt, brt_list);
3909 	if (mycpuid == 0) {
3910 		/*
3911 		 * Update the brtcnt.
3912 		 * We only need to do it once and we do it on CPU0.
3913 		 */
3914 		sc->sc_brtcnt++;
3915 	}
3916 }
3917 
3918 /*
3919  * bridge_rtnode_destroy:
3920  *
3921  *	Destroy a bridge rtnode.
3922  */
3923 static void
3924 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
3925 {
3926 	struct bridge_rtinfo *bri;
3927 
3928 	LIST_REMOVE(brt, brt_hash);
3929 	LIST_REMOVE(brt, brt_list);
3930 
3931 	bri = brt->brt_info;
3932 
3933 	/*
3934 	 * The bri_dead flag can be set asynchronously and catch some gc's
3935 	 * in the middle, don't free bri until all references have actually
3936 	 * gone away.
3937 	 */
3938 	if (atomic_fetchadd_int(&bri->bri_refs, -1) == 1) {
3939 		/* Free rtinfo associated with rtnode on the last cpu */
3940 		kfree(bri, M_DEVBUF);
3941 		brt->brt_info = NULL;	/* safety */
3942 	}
3943 	kfree(brt, M_DEVBUF);
3944 
3945 	if (mycpuid == 0) {
3946 		/* Update brtcnt only on CPU0 */
3947 		sc->sc_brtcnt--;
3948 	}
3949 }
3950 
3951 static __inline int
3952 bridge_post_pfil(struct mbuf *m)
3953 {
3954 	if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED)
3955 		return EOPNOTSUPP;
3956 
3957 	/* Not yet */
3958 	if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED)
3959 		return EOPNOTSUPP;
3960 
3961 	return 0;
3962 }
3963 
3964 /*
3965  * Send bridge packets through pfil if they are one of the types pfil can deal
3966  * with, or if they are ARP or REVARP.  (pfil will pass ARP and REVARP without
3967  * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
3968  * that interface.
3969  */
3970 static int
3971 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
3972 {
3973 	int snap, error, i, hlen;
3974 	struct ether_header *eh1, eh2;
3975 	struct ip *ip;
3976 	struct llc llc1;
3977 	u_int16_t ether_type;
3978 
3979 	snap = 0;
3980 	error = -1;	/* Default error if not error == 0 */
3981 
3982 	if (pfil_bridge == 0 && pfil_member == 0)
3983 		return (0); /* filtering is disabled */
3984 
3985 	i = min((*mp)->m_pkthdr.len, max_protohdr);
3986 	if ((*mp)->m_len < i) {
3987 		*mp = m_pullup(*mp, i);
3988 		if (*mp == NULL) {
3989 			kprintf("%s: m_pullup failed\n", __func__);
3990 			return (-1);
3991 		}
3992 	}
3993 
3994 	eh1 = mtod(*mp, struct ether_header *);
3995 	ether_type = ntohs(eh1->ether_type);
3996 
3997 	/*
3998 	 * Check for SNAP/LLC.
3999 	 */
4000 	if (ether_type < ETHERMTU) {
4001 		struct llc *llc2 = (struct llc *)(eh1 + 1);
4002 
4003 		if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
4004 		    llc2->llc_dsap == LLC_SNAP_LSAP &&
4005 		    llc2->llc_ssap == LLC_SNAP_LSAP &&
4006 		    llc2->llc_control == LLC_UI) {
4007 			ether_type = htons(llc2->llc_un.type_snap.ether_type);
4008 			snap = 1;
4009 		}
4010 	}
4011 
4012 	/*
4013 	 * If we're trying to filter bridge traffic, don't look at anything
4014 	 * other than IP and ARP traffic.  If the filter doesn't understand
4015 	 * IPv6, don't allow IPv6 through the bridge either.  This is lame
4016 	 * since if we really wanted, say, an AppleTalk filter, we are hosed,
4017 	 * but of course we don't have an AppleTalk filter to begin with.
4018 	 * (Note that since pfil doesn't understand ARP it will pass *ALL*
4019 	 * ARP traffic.)
4020 	 */
4021 	switch (ether_type) {
4022 	case ETHERTYPE_ARP:
4023 	case ETHERTYPE_REVARP:
4024 		return (0); /* Automatically pass */
4025 
4026 	case ETHERTYPE_IP:
4027 #ifdef INET6
4028 	case ETHERTYPE_IPV6:
4029 #endif /* INET6 */
4030 		break;
4031 
4032 	default:
4033 		/*
4034 		 * Check to see if the user wants to pass non-ip
4035 		 * packets, these will not be checked by pfil(9)
4036 		 * and passed unconditionally so the default is to drop.
4037 		 */
4038 		if (pfil_onlyip)
4039 			goto bad;
4040 	}
4041 
4042 	/* Strip off the Ethernet header and keep a copy. */
4043 	m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
4044 	m_adj(*mp, ETHER_HDR_LEN);
4045 
4046 	/* Strip off snap header, if present */
4047 	if (snap) {
4048 		m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
4049 		m_adj(*mp, sizeof(struct llc));
4050 	}
4051 
4052 	/*
4053 	 * Check the IP header for alignment and errors
4054 	 */
4055 	if (dir == PFIL_IN) {
4056 		switch (ether_type) {
4057 		case ETHERTYPE_IP:
4058 			error = bridge_ip_checkbasic(mp);
4059 			break;
4060 #ifdef INET6
4061 		case ETHERTYPE_IPV6:
4062 			error = bridge_ip6_checkbasic(mp);
4063 			break;
4064 #endif /* INET6 */
4065 		default:
4066 			error = 0;
4067 		}
4068 		if (error)
4069 			goto bad;
4070 	}
4071 
4072 	error = 0;
4073 
4074 	/*
4075 	 * Run the packet through pfil
4076 	 */
4077 	switch (ether_type) {
4078 	case ETHERTYPE_IP:
4079 		/*
4080 		 * before calling the firewall, swap fields the same as
4081 		 * IP does. here we assume the header is contiguous
4082 		 */
4083 		ip = mtod(*mp, struct ip *);
4084 
4085 		ip->ip_len = ntohs(ip->ip_len);
4086 		ip->ip_off = ntohs(ip->ip_off);
4087 
4088 		/*
4089 		 * Run pfil on the member interface and the bridge, both can
4090 		 * be skipped by clearing pfil_member or pfil_bridge.
4091 		 *
4092 		 * Keep the order:
4093 		 *   in_if -> bridge_if -> out_if
4094 		 */
4095 		if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) {
4096 			error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, dir);
4097 			if (*mp == NULL || error != 0) /* filter may consume */
4098 				break;
4099 			error = bridge_post_pfil(*mp);
4100 			if (error)
4101 				break;
4102 		}
4103 
4104 		if (pfil_member && ifp != NULL) {
4105 			error = pfil_run_hooks(&inet_pfil_hook, mp, ifp, dir);
4106 			if (*mp == NULL || error != 0) /* filter may consume */
4107 				break;
4108 			error = bridge_post_pfil(*mp);
4109 			if (error)
4110 				break;
4111 		}
4112 
4113 		if (pfil_bridge && dir == PFIL_IN && bifp != NULL) {
4114 			error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, dir);
4115 			if (*mp == NULL || error != 0) /* filter may consume */
4116 				break;
4117 			error = bridge_post_pfil(*mp);
4118 			if (error)
4119 				break;
4120 		}
4121 
4122 		/* check if we need to fragment the packet */
4123 		if (pfil_member && ifp != NULL && dir == PFIL_OUT) {
4124 			i = (*mp)->m_pkthdr.len;
4125 			if (i > ifp->if_mtu) {
4126 				error = bridge_fragment(ifp, *mp, &eh2, snap,
4127 					    &llc1);
4128 				return (error);
4129 			}
4130 		}
4131 
4132 		/* Recalculate the ip checksum and restore byte ordering */
4133 		ip = mtod(*mp, struct ip *);
4134 		hlen = ip->ip_hl << 2;
4135 		if (hlen < sizeof(struct ip))
4136 			goto bad;
4137 		if (hlen > (*mp)->m_len) {
4138 			if ((*mp = m_pullup(*mp, hlen)) == NULL)
4139 				goto bad;
4140 			ip = mtod(*mp, struct ip *);
4141 			if (ip == NULL)
4142 				goto bad;
4143 		}
4144 		ip->ip_len = htons(ip->ip_len);
4145 		ip->ip_off = htons(ip->ip_off);
4146 		ip->ip_sum = 0;
4147 		if (hlen == sizeof(struct ip))
4148 			ip->ip_sum = in_cksum_hdr(ip);
4149 		else
4150 			ip->ip_sum = in_cksum(*mp, hlen);
4151 
4152 		break;
4153 #ifdef INET6
4154 	case ETHERTYPE_IPV6:
4155 		if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
4156 			error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
4157 					dir);
4158 
4159 		if (*mp == NULL || error != 0) /* filter may consume */
4160 			break;
4161 
4162 		if (pfil_member && ifp != NULL)
4163 			error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp,
4164 					dir);
4165 
4166 		if (*mp == NULL || error != 0) /* filter may consume */
4167 			break;
4168 
4169 		if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
4170 			error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
4171 					dir);
4172 		break;
4173 #endif
4174 	default:
4175 		error = 0;
4176 		break;
4177 	}
4178 
4179 	if (*mp == NULL)
4180 		return (error);
4181 	if (error != 0)
4182 		goto bad;
4183 
4184 	error = -1;
4185 
4186 	/*
4187 	 * Finally, put everything back the way it was and return
4188 	 */
4189 	if (snap) {
4190 		M_PREPEND(*mp, sizeof(struct llc), M_NOWAIT);
4191 		if (*mp == NULL)
4192 			return (error);
4193 		bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
4194 	}
4195 
4196 	M_PREPEND(*mp, ETHER_HDR_LEN, M_NOWAIT);
4197 	if (*mp == NULL)
4198 		return (error);
4199 	bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
4200 
4201 	return (0);
4202 
4203 bad:
4204 	m_freem(*mp);
4205 	*mp = NULL;
4206 	return (error);
4207 }
4208 
4209 /*
4210  * Perform basic checks on header size since
4211  * pfil assumes ip_input has already processed
4212  * it for it.  Cut-and-pasted from ip_input.c.
4213  * Given how simple the IPv6 version is,
4214  * does the IPv4 version really need to be
4215  * this complicated?
4216  *
4217  * XXX Should we update ipstat here, or not?
4218  * XXX Right now we update ipstat but not
4219  * XXX csum_counter.
4220  */
4221 static int
4222 bridge_ip_checkbasic(struct mbuf **mp)
4223 {
4224 	struct mbuf *m = *mp;
4225 	struct ip *ip;
4226 	int len, hlen;
4227 	u_short sum;
4228 
4229 	if (*mp == NULL)
4230 		return (-1);
4231 #if 0 /* notyet */
4232 	if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
4233 		if ((m = m_copyup(m, sizeof(struct ip),
4234 			(max_linkhdr + 3) & ~3)) == NULL) {
4235 			/* XXXJRT new stat, please */
4236 			ipstat.ips_toosmall++;
4237 			goto bad;
4238 		}
4239 	} else
4240 #endif
4241 #ifndef __predict_false
4242 #define __predict_false(x) x
4243 #endif
4244 	 if (__predict_false(m->m_len < sizeof (struct ip))) {
4245 		if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
4246 			ipstat.ips_toosmall++;
4247 			goto bad;
4248 		}
4249 	}
4250 	ip = mtod(m, struct ip *);
4251 	if (ip == NULL) goto bad;
4252 
4253 	if (ip->ip_v != IPVERSION) {
4254 		ipstat.ips_badvers++;
4255 		goto bad;
4256 	}
4257 	hlen = ip->ip_hl << 2;
4258 	if (hlen < sizeof(struct ip)) { /* minimum header length */
4259 		ipstat.ips_badhlen++;
4260 		goto bad;
4261 	}
4262 	if (hlen > m->m_len) {
4263 		if ((m = m_pullup(m, hlen)) == NULL) {
4264 			ipstat.ips_badhlen++;
4265 			goto bad;
4266 		}
4267 		ip = mtod(m, struct ip *);
4268 		if (ip == NULL) goto bad;
4269 	}
4270 
4271 	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
4272 		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
4273 	} else {
4274 		if (hlen == sizeof(struct ip)) {
4275 			sum = in_cksum_hdr(ip);
4276 		} else {
4277 			sum = in_cksum(m, hlen);
4278 		}
4279 	}
4280 	if (sum) {
4281 		ipstat.ips_badsum++;
4282 		goto bad;
4283 	}
4284 
4285 	/* Retrieve the packet length. */
4286 	len = ntohs(ip->ip_len);
4287 
4288 	/*
4289 	 * Check for additional length bogosity
4290 	 */
4291 	if (len < hlen) {
4292 		ipstat.ips_badlen++;
4293 		goto bad;
4294 	}
4295 
4296 	/*
4297 	 * Check that the amount of data in the buffers
4298 	 * is as at least much as the IP header would have us expect.
4299 	 * Drop packet if shorter than we expect.
4300 	 */
4301 	if (m->m_pkthdr.len < len) {
4302 		ipstat.ips_tooshort++;
4303 		goto bad;
4304 	}
4305 
4306 	/* Checks out, proceed */
4307 	*mp = m;
4308 	return (0);
4309 
4310 bad:
4311 	*mp = m;
4312 	return (-1);
4313 }
4314 
4315 #ifdef INET6
4316 /*
4317  * Same as above, but for IPv6.
4318  * Cut-and-pasted from ip6_input.c.
4319  * XXX Should we update ip6stat, or not?
4320  */
4321 static int
4322 bridge_ip6_checkbasic(struct mbuf **mp)
4323 {
4324 	struct mbuf *m = *mp;
4325 	struct ip6_hdr *ip6;
4326 
4327 	/*
4328 	 * If the IPv6 header is not aligned, slurp it up into a new
4329 	 * mbuf with space for link headers, in the event we forward
4330 	 * it.  Otherwise, if it is aligned, make sure the entire base
4331 	 * IPv6 header is in the first mbuf of the chain.
4332 	 */
4333 #if 0 /* notyet */
4334 	if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
4335 		struct ifnet *inifp = m->m_pkthdr.rcvif;
4336 		if ((m = m_copyup(m, sizeof(struct ip6_hdr),
4337 			    (max_linkhdr + 3) & ~3)) == NULL) {
4338 			/* XXXJRT new stat, please */
4339 			ip6stat.ip6s_toosmall++;
4340 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
4341 			goto bad;
4342 		}
4343 	} else
4344 #endif
4345 	if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
4346 		struct ifnet *inifp = m->m_pkthdr.rcvif;
4347 		if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
4348 			ip6stat.ip6s_toosmall++;
4349 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
4350 			goto bad;
4351 		}
4352 	}
4353 
4354 	ip6 = mtod(m, struct ip6_hdr *);
4355 
4356 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
4357 		ip6stat.ip6s_badvers++;
4358 		in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
4359 		goto bad;
4360 	}
4361 
4362 	/* Checks out, proceed */
4363 	*mp = m;
4364 	return (0);
4365 
4366 bad:
4367 	*mp = m;
4368 	return (-1);
4369 }
4370 #endif /* INET6 */
4371 
4372 /*
4373  * bridge_fragment:
4374  *
4375  *	Return a fragmented mbuf chain.
4376  */
4377 static int
4378 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh,
4379     int snap, struct llc *llc)
4380 {
4381 	struct mbuf *m0;
4382 	struct ip *ip;
4383 	int error = -1;
4384 
4385 	if (m->m_len < sizeof(struct ip) &&
4386 	    (m = m_pullup(m, sizeof(struct ip))) == NULL)
4387 		goto out;
4388 	ip = mtod(m, struct ip *);
4389 
4390 	error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist,
4391 		    CSUM_DELAY_IP);
4392 	if (error)
4393 		goto out;
4394 
4395 	/* walk the chain and re-add the Ethernet header */
4396 	for (m0 = m; m0; m0 = m0->m_nextpkt) {
4397 		if (error == 0) {
4398 			if (snap) {
4399 				M_PREPEND(m0, sizeof(struct llc), M_NOWAIT);
4400 				if (m0 == NULL) {
4401 					error = ENOBUFS;
4402 					continue;
4403 				}
4404 				bcopy(llc, mtod(m0, caddr_t),
4405 				    sizeof(struct llc));
4406 			}
4407 			M_PREPEND(m0, ETHER_HDR_LEN, M_NOWAIT);
4408 			if (m0 == NULL) {
4409 				error = ENOBUFS;
4410 				continue;
4411 			}
4412 			bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN);
4413 		} else
4414 			m_freem(m);
4415 	}
4416 
4417 	if (error == 0)
4418 		ipstat.ips_fragmented++;
4419 
4420 	return (error);
4421 
4422 out:
4423 	if (m != NULL)
4424 		m_freem(m);
4425 	return (error);
4426 }
4427 
4428 static void
4429 bridge_enqueue_handler(netmsg_t msg)
4430 {
4431 	struct netmsg_packet *nmp;
4432 	struct ifnet *dst_ifp;
4433 	struct mbuf *m;
4434 
4435 	nmp = &msg->packet;
4436 	m = nmp->nm_packet;
4437 	dst_ifp = nmp->base.lmsg.u.ms_resultp;
4438 	mbuftrackid(m, 71);
4439 
4440 	bridge_handoff(dst_ifp->if_bridge, dst_ifp, m, 1);
4441 }
4442 
4443 static void
4444 bridge_handoff(struct bridge_softc *sc, struct ifnet *dst_ifp,
4445 	       struct mbuf *m, int from_us)
4446 {
4447 	struct mbuf *m0;
4448 	struct ifnet *bifp;
4449 
4450 	bifp = sc->sc_ifp;
4451 	mbuftrackid(m, 72);
4452 
4453 	/* We may be sending a fragment so traverse the mbuf */
4454 	for (; m; m = m0) {
4455 		struct altq_pktattr pktattr;
4456 
4457 		m0 = m->m_nextpkt;
4458 		m->m_nextpkt = NULL;
4459 
4460 		/*
4461 		 * If being sent from our host override ether_shost
4462 		 * with the bridge MAC.  This is mandatory for ARP
4463 		 * so things don't get confused.  In particular we
4464 		 * don't want ARPs to get associated with link interfaces
4465 		 * under the bridge which might or might not stay valid.
4466 		 *
4467 		 * Also override ether_shost when relaying a packet out
4468 		 * the same interface it came in on, due to multi-homed
4469 		 * addresses & default routes, otherwise switches will
4470 		 * get very confused.
4471 		 *
4472 		 * Otherwise if we are in transparent mode.
4473 		 */
4474 		if (from_us || m->m_pkthdr.rcvif == dst_ifp) {
4475 			m_copyback(m,
4476 				   offsetof(struct ether_header, ether_shost),
4477 				   ETHER_ADDR_LEN, IF_LLADDR(sc->sc_ifp));
4478 		} else if ((bifp->if_flags & IFF_LINK0) &&
4479 			   (m->m_pkthdr.fw_flags & BRIDGE_MBUF_TAGGED)) {
4480 			m_copyback(m,
4481 				   offsetof(struct ether_header, ether_shost),
4482 				   ETHER_ADDR_LEN,
4483 				   m->m_pkthdr.ether_br_shost);
4484 		} /* else retain shost */
4485 
4486 		if (ifq_is_enabled(&dst_ifp->if_snd))
4487 			altq_etherclassify(&dst_ifp->if_snd, m, &pktattr);
4488 
4489 		ifq_dispatch(dst_ifp, m, &pktattr);
4490 	}
4491 }
4492 
4493 static void
4494 bridge_control_dispatch(netmsg_t msg)
4495 {
4496 	struct netmsg_brctl *bc_msg = (struct netmsg_brctl *)msg;
4497 	struct ifnet *bifp = bc_msg->bc_sc->sc_ifp;
4498 	int error;
4499 
4500 	ifnet_serialize_all(bifp);
4501 	error = bc_msg->bc_func(bc_msg->bc_sc, bc_msg->bc_arg);
4502 	ifnet_deserialize_all(bifp);
4503 
4504 	lwkt_replymsg(&bc_msg->base.lmsg, error);
4505 }
4506 
4507 static int
4508 bridge_control(struct bridge_softc *sc, u_long cmd,
4509 	       bridge_ctl_t bc_func, void *bc_arg)
4510 {
4511 	struct ifnet *bifp = sc->sc_ifp;
4512 	struct netmsg_brctl bc_msg;
4513 	int error;
4514 
4515 	ASSERT_IFNET_SERIALIZED_ALL(bifp);
4516 
4517 	bzero(&bc_msg, sizeof(bc_msg));
4518 
4519 	netmsg_init(&bc_msg.base, NULL, &curthread->td_msgport,
4520 		    0, bridge_control_dispatch);
4521 	bc_msg.bc_func = bc_func;
4522 	bc_msg.bc_sc = sc;
4523 	bc_msg.bc_arg = bc_arg;
4524 
4525 	ifnet_deserialize_all(bifp);
4526 	error = lwkt_domsg(BRIDGE_CFGPORT, &bc_msg.base.lmsg, 0);
4527 	ifnet_serialize_all(bifp);
4528 	return error;
4529 }
4530 
4531 static void
4532 bridge_add_bif_handler(netmsg_t msg)
4533 {
4534 	struct netmsg_braddbif *amsg = (struct netmsg_braddbif *)msg;
4535 	struct bridge_softc *sc;
4536 	struct bridge_iflist *bif;
4537 
4538 	sc = amsg->br_softc;
4539 
4540 	bif = kmalloc(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
4541 	bif->bif_ifp = amsg->br_bif_ifp;
4542 	bif->bif_onlist = 1;
4543 	bif->bif_info = amsg->br_bif_info;
4544 
4545 	/*
4546 	 * runs through bif_info
4547 	 */
4548 	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
4549 
4550 	TAILQ_INSERT_HEAD(&sc->sc_iflists[mycpuid], bif, bif_next);
4551 
4552 	netisr_forwardmsg(&amsg->base, mycpuid + 1);
4553 }
4554 
4555 static void
4556 bridge_add_bif(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
4557 	       struct ifnet *ifp)
4558 {
4559 	struct netmsg_braddbif amsg;
4560 
4561 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
4562 
4563 	netmsg_init(&amsg.base, NULL, &curthread->td_msgport,
4564 		    0, bridge_add_bif_handler);
4565 	amsg.br_softc = sc;
4566 	amsg.br_bif_info = bif_info;
4567 	amsg.br_bif_ifp = ifp;
4568 
4569 	netisr_domsg(&amsg.base, 0);
4570 }
4571 
4572 static void
4573 bridge_del_bif_handler(netmsg_t msg)
4574 {
4575 	struct netmsg_brdelbif *dmsg = (struct netmsg_brdelbif *)msg;
4576 	struct bridge_softc *sc;
4577 	struct bridge_iflist *bif;
4578 
4579 	sc = dmsg->br_softc;
4580 
4581 	/*
4582 	 * Locate the bif associated with the br_bif_info
4583 	 * on the current CPU
4584 	 */
4585 	bif = bridge_lookup_member_ifinfo(sc, dmsg->br_bif_info);
4586 	KKASSERT(bif != NULL && bif->bif_onlist);
4587 
4588 	/* Remove the bif from the current CPU's iflist */
4589 	bif->bif_onlist = 0;
4590 	TAILQ_REMOVE(dmsg->br_bif_list, bif, bif_next);
4591 
4592 	/* Save the removed bif for later freeing */
4593 	TAILQ_INSERT_HEAD(dmsg->br_bif_list, bif, bif_next);
4594 
4595 	netisr_forwardmsg(&dmsg->base, mycpuid + 1);
4596 }
4597 
4598 static void
4599 bridge_del_bif(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
4600 	       struct bridge_iflist_head *saved_bifs)
4601 {
4602 	struct netmsg_brdelbif dmsg;
4603 
4604 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
4605 
4606 	netmsg_init(&dmsg.base, NULL, &curthread->td_msgport,
4607 		    0, bridge_del_bif_handler);
4608 	dmsg.br_softc = sc;
4609 	dmsg.br_bif_info = bif_info;
4610 	dmsg.br_bif_list = saved_bifs;
4611 
4612 	netisr_domsg(&dmsg.base, 0);
4613 }
4614