xref: /dragonfly/sys/net/bridge/if_bridge.c (revision 81c11cd3)
1 /*
2  * Copyright 2001 Wasabi Systems, Inc.
3  * All rights reserved.
4  *
5  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed for the NetBSD Project by
18  *	Wasabi Systems, Inc.
19  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
20  *    or promote products derived from this software without specific prior
21  *    written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /*
37  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
38  * All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. All advertising materials mentioning features or use of this software
49  *    must display the following acknowledgement:
50  *	This product includes software developed by Jason L. Wright
51  * 4. The name of the author may not be used to endorse or promote products
52  *    derived from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
56  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
57  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
58  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
59  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
60  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
62  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
63  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64  * POSSIBILITY OF SUCH DAMAGE.
65  *
66  * $OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp $
67  * $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $
68  * $FreeBSD: src/sys/net/if_bridge.c,v 1.26 2005/10/13 23:05:55 thompsa Exp $
69  * $DragonFly: src/sys/net/bridge/if_bridge.c,v 1.60 2008/11/26 12:49:43 sephe Exp $
70  */
71 
72 /*
73  * Network interface bridge support.
74  *
75  * TODO:
76  *
77  *	- Currently only supports Ethernet-like interfaces (Ethernet,
78  *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
79  *	  to bridge other types of interfaces (FDDI-FDDI, and maybe
80  *	  consider heterogenous bridges).
81  *
82  *
83  * Bridge's route information is duplicated to each CPUs:
84  *
85  *      CPU0          CPU1          CPU2          CPU3
86  * +-----------+ +-----------+ +-----------+ +-----------+
87  * |  rtnode   | |  rtnode   | |  rtnode   | |  rtnode   |
88  * |           | |           | |           | |           |
89  * | dst eaddr | | dst eaddr | | dst eaddr | | dst eaddr |
90  * +-----------+ +-----------+ +-----------+ +-----------+
91  *       |         |                     |         |
92  *       |         |                     |         |
93  *       |         |     +----------+    |         |
94  *       |         |     |  rtinfo  |    |         |
95  *       |         +---->|          |<---+         |
96  *       |               |  flags   |              |
97  *       +-------------->|  timeout |<-------------+
98  *                       |  dst_ifp |
99  *                       +----------+
100  *
101  * We choose to put timeout and dst_ifp into shared part, so updating
102  * them will be cheaper than using message forwarding.  Also there is
103  * not need to use spinlock to protect the updating: timeout and dst_ifp
104  * is not related and specific field's updating order has no importance.
105  * The cache pollution by the share part should not be heavy: in a stable
106  * setup, dst_ifp probably will be not changed in rtnode's life time,
107  * while timeout is refreshed once per second; most of the time, timeout
108  * and dst_ifp are read-only accessed.
109  *
110  *
111  * Bridge route information installation on bridge_input path:
112  *
113  *      CPU0           CPU1         CPU2          CPU3
114  *
115  *                               tcp_thread2
116  *                                    |
117  *                                alloc nmsg
118  *                    snd nmsg        |
119  *                    w/o rtinfo      |
120  *      ifnet0<-----------------------+
121  *        |                           :
122  *    lookup dst                      :
123  *   rtnode exists?(Y)free nmsg       :
124  *        |(N)                        :
125  *        |
126  *  alloc rtinfo
127  *  alloc rtnode
128  * install rtnode
129  *        |
130  *        +---------->ifnet1
131  *        : fwd nmsg    |
132  *        : w/ rtinfo   |
133  *        :             |
134  *        :             |
135  *                 alloc rtnode
136  *               (w/ nmsg's rtinfo)
137  *                install rtnode
138  *                      |
139  *                      +---------->ifnet2
140  *                      : fwd nmsg    |
141  *                      : w/ rtinfo   |
142  *                      :             |
143  *                      :         same as ifnet1
144  *                                    |
145  *                                    +---------->ifnet3
146  *                                    : fwd nmsg    |
147  *                                    : w/ rtinfo   |
148  *                                    :             |
149  *                                    :         same as ifnet1
150  *                                               free nmsg
151  *                                                  :
152  *                                                  :
153  *
154  * The netmsgs forwarded between protocol threads and ifnet threads are
155  * allocated with (M_WAITOK|M_NULLOK), so it will not fail under most
156  * cases (route information is too precious to be not installed :).
157  * Since multiple threads may try to install route information for the
158  * same dst eaddr, we look up route information in ifnet0.  However, this
159  * looking up only need to be performed on ifnet0, which is the start
160  * point of the route information installation process.
161  *
162  *
163  * Bridge route information deleting/flushing:
164  *
165  *  CPU0            CPU1             CPU2             CPU3
166  *
167  * netisr0
168  *   |
169  * find suitable rtnodes,
170  * mark their rtinfo dead
171  *   |
172  *   | domsg <------------------------------------------+
173  *   |                                                  | replymsg
174  *   |                                                  |
175  *   V     fwdmsg           fwdmsg           fwdmsg     |
176  * ifnet0 --------> ifnet1 --------> ifnet2 --------> ifnet3
177  * delete rtnodes   delete rtnodes   delete rtnodes   delete rtnodes
178  * w/ dead rtinfo   w/ dead rtinfo   w/ dead rtinfo   w/ dead rtinfo
179  *                                                    free dead rtinfos
180  *
181  * All deleting/flushing operations are serialized by netisr0, so each
182  * operation only reaps the route information marked dead by itself.
183  *
184  *
185  * Bridge route information adding/deleting/flushing:
186  * Since all operation is serialized by the fixed message flow between
187  * ifnet threads, it is not possible to create corrupted per-cpu route
188  * information.
189  *
190  *
191  *
192  * Percpu member interface list iteration with blocking operation:
193  * Since one bridge could only delete one member interface at a time and
194  * the deleted member interface is not freed after netmsg_service_sync(),
195  * following way is used to make sure that even if the certain member
196  * interface is ripped from the percpu list during the blocking operation,
197  * the iteration still could keep going:
198  *
199  * LIST_FOREACH_MUTABLE(bif, sc->sc_iflists[mycpuid], bif_next, nbif) {
200  *     blocking operation;
201  *     blocking operation;
202  *     ...
203  *     ...
204  *     if (nbif != NULL && !nbif->bif_onlist) {
205  *         KKASSERT(bif->bif_onlist);
206  *         nbif = LIST_NEXT(bif, bif_next);
207  *     }
208  * }
209  *
210  * As mentioned above only one member interface could be unlinked from the
211  * percpu member interface list, so either bif or nbif may be not on the list,
212  * but _not_ both.  To keep the list iteration, we don't care about bif, but
213  * only nbif.  Since removed member interface will only be freed after we
214  * finish our work, it is safe to access any field in an unlinked bif (here
215  * bif_onlist).  If nbif is no longer on the list, then bif must be on the
216  * list, so we change nbif to the next element of bif and keep going.
217  */
218 
219 #include "opt_inet.h"
220 #include "opt_inet6.h"
221 
222 #include <sys/param.h>
223 #include <sys/mbuf.h>
224 #include <sys/malloc.h>
225 #include <sys/protosw.h>
226 #include <sys/systm.h>
227 #include <sys/time.h>
228 #include <sys/socket.h> /* for net/if.h */
229 #include <sys/sockio.h>
230 #include <sys/ctype.h>  /* string functions */
231 #include <sys/kernel.h>
232 #include <sys/random.h>
233 #include <sys/sysctl.h>
234 #include <sys/module.h>
235 #include <sys/proc.h>
236 #include <sys/priv.h>
237 #include <sys/lock.h>
238 #include <sys/thread.h>
239 #include <sys/thread2.h>
240 #include <sys/mpipe.h>
241 
242 #include <net/bpf.h>
243 #include <net/if.h>
244 #include <net/if_dl.h>
245 #include <net/if_types.h>
246 #include <net/if_var.h>
247 #include <net/pfil.h>
248 #include <net/ifq_var.h>
249 #include <net/if_clone.h>
250 
251 #include <netinet/in.h> /* for struct arpcom */
252 #include <netinet/in_systm.h>
253 #include <netinet/in_var.h>
254 #include <netinet/ip.h>
255 #include <netinet/ip_var.h>
256 #ifdef INET6
257 #include <netinet/ip6.h>
258 #include <netinet6/ip6_var.h>
259 #endif
260 #include <netinet/if_ether.h> /* for struct arpcom */
261 #include <net/bridge/if_bridgevar.h>
262 #include <net/if_llc.h>
263 #include <net/netmsg2.h>
264 
265 #include <net/route.h>
266 #include <sys/in_cksum.h>
267 
268 /*
269  * Size of the route hash table.  Must be a power of two.
270  */
271 #ifndef BRIDGE_RTHASH_SIZE
272 #define	BRIDGE_RTHASH_SIZE		1024
273 #endif
274 
275 #define	BRIDGE_RTHASH_MASK		(BRIDGE_RTHASH_SIZE - 1)
276 
277 /*
278  * Maximum number of addresses to cache.
279  */
280 #ifndef BRIDGE_RTABLE_MAX
281 #define	BRIDGE_RTABLE_MAX		100
282 #endif
283 
284 /*
285  * Spanning tree defaults.
286  */
287 #define	BSTP_DEFAULT_MAX_AGE		(20 * 256)
288 #define	BSTP_DEFAULT_HELLO_TIME		(2 * 256)
289 #define	BSTP_DEFAULT_FORWARD_DELAY	(15 * 256)
290 #define	BSTP_DEFAULT_HOLD_TIME		(1 * 256)
291 #define	BSTP_DEFAULT_BRIDGE_PRIORITY	0x8000
292 #define	BSTP_DEFAULT_PORT_PRIORITY	0x80
293 #define	BSTP_DEFAULT_PATH_COST		55
294 
295 /*
296  * Timeout (in seconds) for entries learned dynamically.
297  */
298 #ifndef BRIDGE_RTABLE_TIMEOUT
299 #define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
300 #endif
301 
302 /*
303  * Number of seconds between walks of the route list.
304  */
305 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
306 #define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
307 #endif
308 
309 /*
310  * List of capabilities to mask on the member interface.
311  */
312 #define	BRIDGE_IFCAPS_MASK		IFCAP_TXCSUM
313 
314 typedef int	(*bridge_ctl_t)(struct bridge_softc *, void *);
315 
316 struct netmsg_brctl {
317 	struct netmsg_base	base;
318 	bridge_ctl_t		bc_func;
319 	struct bridge_softc	*bc_sc;
320 	void			*bc_arg;
321 };
322 
323 struct netmsg_brsaddr {
324 	struct netmsg_base	base;
325 	struct bridge_softc	*br_softc;
326 	struct ifnet		*br_dst_if;
327 	struct bridge_rtinfo	*br_rtinfo;
328 	int			br_setflags;
329 	uint8_t			br_dst[ETHER_ADDR_LEN];
330 	uint8_t			br_flags;
331 };
332 
333 struct netmsg_braddbif {
334 	struct netmsg_base	base;
335 	struct bridge_softc	*br_softc;
336 	struct bridge_ifinfo	*br_bif_info;
337 	struct ifnet		*br_bif_ifp;
338 };
339 
340 struct netmsg_brdelbif {
341 	struct netmsg_base	base;
342 	struct bridge_softc	*br_softc;
343 	struct bridge_ifinfo	*br_bif_info;
344 	struct bridge_iflist_head *br_bif_list;
345 };
346 
347 struct netmsg_brsflags {
348 	struct netmsg_base	base;
349 	struct bridge_softc	*br_softc;
350 	struct bridge_ifinfo	*br_bif_info;
351 	uint32_t		br_bif_flags;
352 };
353 
354 eventhandler_tag	bridge_detach_cookie = NULL;
355 
356 extern	struct mbuf *(*bridge_input_p)(struct ifnet *, struct mbuf *);
357 extern	int (*bridge_output_p)(struct ifnet *, struct mbuf *);
358 extern	void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
359 
360 static int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
361 
362 static int	bridge_clone_create(struct if_clone *, int, caddr_t);
363 static int	bridge_clone_destroy(struct ifnet *);
364 
365 static int	bridge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
366 static void	bridge_mutecaps(struct bridge_ifinfo *, struct ifnet *, int);
367 static void	bridge_ifdetach(void *, struct ifnet *);
368 static void	bridge_init(void *);
369 static void	bridge_stop(struct ifnet *);
370 static void	bridge_start(struct ifnet *);
371 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
372 static int	bridge_output(struct ifnet *, struct mbuf *);
373 
374 static void	bridge_forward(struct bridge_softc *, struct mbuf *m);
375 
376 static void	bridge_timer_handler(netmsg_t);
377 static void	bridge_timer(void *);
378 
379 static void	bridge_start_bcast(struct bridge_softc *, struct mbuf *);
380 static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
381 		    struct mbuf *);
382 static void	bridge_span(struct bridge_softc *, struct mbuf *);
383 
384 static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
385 		    struct ifnet *, uint8_t);
386 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
387 static void	bridge_rtreap(struct bridge_softc *);
388 static void	bridge_rtreap_async(struct bridge_softc *);
389 static void	bridge_rttrim(struct bridge_softc *);
390 static int	bridge_rtage_finddead(struct bridge_softc *);
391 static void	bridge_rtage(struct bridge_softc *);
392 static void	bridge_rtflush(struct bridge_softc *, int);
393 static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
394 static int	bridge_rtsaddr(struct bridge_softc *, const uint8_t *,
395 		    struct ifnet *, uint8_t);
396 static void	bridge_rtmsg_sync(struct bridge_softc *sc);
397 static void	bridge_rtreap_handler(netmsg_t);
398 static void	bridge_rtinstall_handler(netmsg_t);
399 static int	bridge_rtinstall_oncpu(struct bridge_softc *, const uint8_t *,
400 		    struct ifnet *, int, uint8_t, struct bridge_rtinfo **);
401 
402 static void	bridge_rtable_init(struct bridge_softc *);
403 static void	bridge_rtable_fini(struct bridge_softc *);
404 
405 static int	bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
406 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
407 		    const uint8_t *);
408 static void	bridge_rtnode_insert(struct bridge_softc *,
409 		    struct bridge_rtnode *);
410 static void	bridge_rtnode_destroy(struct bridge_softc *,
411 		    struct bridge_rtnode *);
412 
413 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
414 		    const char *name);
415 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
416 		    struct ifnet *ifp);
417 static struct bridge_iflist *bridge_lookup_member_ifinfo(struct bridge_softc *,
418 		    struct bridge_ifinfo *);
419 static void	bridge_delete_member(struct bridge_softc *,
420 		    struct bridge_iflist *, int);
421 static void	bridge_delete_span(struct bridge_softc *,
422 		    struct bridge_iflist *);
423 
424 static int	bridge_control(struct bridge_softc *, u_long,
425 			       bridge_ctl_t, void *);
426 static int	bridge_ioctl_init(struct bridge_softc *, void *);
427 static int	bridge_ioctl_stop(struct bridge_softc *, void *);
428 static int	bridge_ioctl_add(struct bridge_softc *, void *);
429 static int	bridge_ioctl_del(struct bridge_softc *, void *);
430 static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
431 static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
432 static int	bridge_ioctl_scache(struct bridge_softc *, void *);
433 static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
434 static int	bridge_ioctl_gifs(struct bridge_softc *, void *);
435 static int	bridge_ioctl_rts(struct bridge_softc *, void *);
436 static int	bridge_ioctl_saddr(struct bridge_softc *, void *);
437 static int	bridge_ioctl_sto(struct bridge_softc *, void *);
438 static int	bridge_ioctl_gto(struct bridge_softc *, void *);
439 static int	bridge_ioctl_daddr(struct bridge_softc *, void *);
440 static int	bridge_ioctl_flush(struct bridge_softc *, void *);
441 static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
442 static int	bridge_ioctl_spri(struct bridge_softc *, void *);
443 static int	bridge_ioctl_ght(struct bridge_softc *, void *);
444 static int	bridge_ioctl_sht(struct bridge_softc *, void *);
445 static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
446 static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
447 static int	bridge_ioctl_gma(struct bridge_softc *, void *);
448 static int	bridge_ioctl_sma(struct bridge_softc *, void *);
449 static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
450 static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
451 static int	bridge_ioctl_addspan(struct bridge_softc *, void *);
452 static int	bridge_ioctl_delspan(struct bridge_softc *, void *);
453 static int	bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
454 		    int);
455 static int	bridge_ip_checkbasic(struct mbuf **mp);
456 #ifdef INET6
457 static int	bridge_ip6_checkbasic(struct mbuf **mp);
458 #endif /* INET6 */
459 static int	bridge_fragment(struct ifnet *, struct mbuf *,
460 		    struct ether_header *, int, struct llc *);
461 static void	bridge_enqueue_handler(netmsg_t);
462 static void	bridge_handoff(struct ifnet *, struct mbuf *);
463 
464 static void	bridge_del_bif_handler(netmsg_t);
465 static void	bridge_add_bif_handler(netmsg_t);
466 static void	bridge_set_bifflags_handler(netmsg_t);
467 static void	bridge_del_bif(struct bridge_softc *, struct bridge_ifinfo *,
468 		    struct bridge_iflist_head *);
469 static void	bridge_add_bif(struct bridge_softc *, struct bridge_ifinfo *,
470 		    struct ifnet *);
471 static void	bridge_set_bifflags(struct bridge_softc *,
472 		    struct bridge_ifinfo *, uint32_t);
473 
474 SYSCTL_DECL(_net_link);
475 SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge");
476 
477 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */
478 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */
479 static int pfil_member = 1; /* run pfil hooks on the member interface */
480 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW,
481     &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled");
482 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW,
483     &pfil_bridge, 0, "Packet filter on the bridge interface");
484 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW,
485     &pfil_member, 0, "Packet filter on the member interface");
486 
487 struct bridge_control_arg {
488 	union {
489 		struct ifbreq ifbreq;
490 		struct ifbifconf ifbifconf;
491 		struct ifbareq ifbareq;
492 		struct ifbaconf ifbaconf;
493 		struct ifbrparam ifbrparam;
494 	} bca_u;
495 	int	bca_len;
496 	void	*bca_uptr;
497 	void	*bca_kptr;
498 };
499 
500 struct bridge_control {
501 	bridge_ctl_t	bc_func;
502 	int		bc_argsize;
503 	int		bc_flags;
504 };
505 
506 #define	BC_F_COPYIN		0x01	/* copy arguments in */
507 #define	BC_F_COPYOUT		0x02	/* copy arguments out */
508 #define	BC_F_SUSER		0x04	/* do super-user check */
509 
510 const struct bridge_control bridge_control_table[] = {
511 	{ bridge_ioctl_add,		sizeof(struct ifbreq),
512 	  BC_F_COPYIN|BC_F_SUSER },
513 	{ bridge_ioctl_del,		sizeof(struct ifbreq),
514 	  BC_F_COPYIN|BC_F_SUSER },
515 
516 	{ bridge_ioctl_gifflags,	sizeof(struct ifbreq),
517 	  BC_F_COPYIN|BC_F_COPYOUT },
518 	{ bridge_ioctl_sifflags,	sizeof(struct ifbreq),
519 	  BC_F_COPYIN|BC_F_SUSER },
520 
521 	{ bridge_ioctl_scache,		sizeof(struct ifbrparam),
522 	  BC_F_COPYIN|BC_F_SUSER },
523 	{ bridge_ioctl_gcache,		sizeof(struct ifbrparam),
524 	  BC_F_COPYOUT },
525 
526 	{ bridge_ioctl_gifs,		sizeof(struct ifbifconf),
527 	  BC_F_COPYIN|BC_F_COPYOUT },
528 	{ bridge_ioctl_rts,		sizeof(struct ifbaconf),
529 	  BC_F_COPYIN|BC_F_COPYOUT },
530 
531 	{ bridge_ioctl_saddr,		sizeof(struct ifbareq),
532 	  BC_F_COPYIN|BC_F_SUSER },
533 
534 	{ bridge_ioctl_sto,		sizeof(struct ifbrparam),
535 	  BC_F_COPYIN|BC_F_SUSER },
536 	{ bridge_ioctl_gto,		sizeof(struct ifbrparam),
537 	  BC_F_COPYOUT },
538 
539 	{ bridge_ioctl_daddr,		sizeof(struct ifbareq),
540 	  BC_F_COPYIN|BC_F_SUSER },
541 
542 	{ bridge_ioctl_flush,		sizeof(struct ifbreq),
543 	  BC_F_COPYIN|BC_F_SUSER },
544 
545 	{ bridge_ioctl_gpri,		sizeof(struct ifbrparam),
546 	  BC_F_COPYOUT },
547 	{ bridge_ioctl_spri,		sizeof(struct ifbrparam),
548 	  BC_F_COPYIN|BC_F_SUSER },
549 
550 	{ bridge_ioctl_ght,		sizeof(struct ifbrparam),
551 	  BC_F_COPYOUT },
552 	{ bridge_ioctl_sht,		sizeof(struct ifbrparam),
553 	  BC_F_COPYIN|BC_F_SUSER },
554 
555 	{ bridge_ioctl_gfd,		sizeof(struct ifbrparam),
556 	  BC_F_COPYOUT },
557 	{ bridge_ioctl_sfd,		sizeof(struct ifbrparam),
558 	  BC_F_COPYIN|BC_F_SUSER },
559 
560 	{ bridge_ioctl_gma,		sizeof(struct ifbrparam),
561 	  BC_F_COPYOUT },
562 	{ bridge_ioctl_sma,		sizeof(struct ifbrparam),
563 	  BC_F_COPYIN|BC_F_SUSER },
564 
565 	{ bridge_ioctl_sifprio,		sizeof(struct ifbreq),
566 	  BC_F_COPYIN|BC_F_SUSER },
567 
568 	{ bridge_ioctl_sifcost,		sizeof(struct ifbreq),
569 	  BC_F_COPYIN|BC_F_SUSER },
570 
571 	{ bridge_ioctl_addspan,		sizeof(struct ifbreq),
572 	  BC_F_COPYIN|BC_F_SUSER },
573 	{ bridge_ioctl_delspan,		sizeof(struct ifbreq),
574 	  BC_F_COPYIN|BC_F_SUSER },
575 };
576 static const int bridge_control_table_size =
577     sizeof(bridge_control_table) / sizeof(bridge_control_table[0]);
578 
579 LIST_HEAD(, bridge_softc) bridge_list;
580 
581 struct if_clone bridge_cloner = IF_CLONE_INITIALIZER("bridge",
582 				bridge_clone_create,
583 				bridge_clone_destroy, 0, IF_MAXUNIT);
584 
585 static int
586 bridge_modevent(module_t mod, int type, void *data)
587 {
588 	switch (type) {
589 	case MOD_LOAD:
590 		LIST_INIT(&bridge_list);
591 		if_clone_attach(&bridge_cloner);
592 		bridge_input_p = bridge_input;
593 		bridge_output_p = bridge_output;
594 		bridge_detach_cookie = EVENTHANDLER_REGISTER(
595 		    ifnet_detach_event, bridge_ifdetach, NULL,
596 		    EVENTHANDLER_PRI_ANY);
597 #if notyet
598 		bstp_linkstate_p = bstp_linkstate;
599 #endif
600 		break;
601 	case MOD_UNLOAD:
602 		if (!LIST_EMPTY(&bridge_list))
603 			return (EBUSY);
604 		EVENTHANDLER_DEREGISTER(ifnet_detach_event,
605 		    bridge_detach_cookie);
606 		if_clone_detach(&bridge_cloner);
607 		bridge_input_p = NULL;
608 		bridge_output_p = NULL;
609 #if notyet
610 		bstp_linkstate_p = NULL;
611 #endif
612 		break;
613 	default:
614 		return (EOPNOTSUPP);
615 	}
616 	return (0);
617 }
618 
619 static moduledata_t bridge_mod = {
620 	"if_bridge",
621 	bridge_modevent,
622 	0
623 };
624 
625 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
626 
627 
628 /*
629  * bridge_clone_create:
630  *
631  *	Create a new bridge instance.
632  */
633 static int
634 bridge_clone_create(struct if_clone *ifc, int unit, caddr_t param __unused)
635 {
636 	struct bridge_softc *sc;
637 	struct ifnet *ifp;
638 	u_char eaddr[6];
639 	int cpu, rnd;
640 
641 	sc = kmalloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);
642 	ifp = sc->sc_ifp = &sc->sc_if;
643 
644 	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
645 	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
646 	sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
647 	sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
648 	sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
649 	sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
650 	sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
651 
652 	/* Initialize our routing table. */
653 	bridge_rtable_init(sc);
654 
655 	callout_init(&sc->sc_brcallout);
656 	netmsg_init(&sc->sc_brtimemsg, NULL, &netisr_adone_rport,
657 		    MSGF_DROPABLE, bridge_timer_handler);
658 	sc->sc_brtimemsg.lmsg.u.ms_resultp = sc;
659 
660 	callout_init(&sc->sc_bstpcallout);
661 	netmsg_init(&sc->sc_bstptimemsg, NULL, &netisr_adone_rport,
662 		    MSGF_DROPABLE, bstp_tick_handler);
663 	sc->sc_bstptimemsg.lmsg.u.ms_resultp = sc;
664 
665 	/* Initialize per-cpu member iface lists */
666 	sc->sc_iflists = kmalloc(sizeof(*sc->sc_iflists) * ncpus,
667 				 M_DEVBUF, M_WAITOK);
668 	for (cpu = 0; cpu < ncpus; ++cpu)
669 		LIST_INIT(&sc->sc_iflists[cpu]);
670 
671 	LIST_INIT(&sc->sc_spanlist);
672 
673 	ifp->if_softc = sc;
674 	if_initname(ifp, ifc->ifc_name, unit);
675 	ifp->if_mtu = ETHERMTU;
676 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
677 	ifp->if_ioctl = bridge_ioctl;
678 	ifp->if_start = bridge_start;
679 	ifp->if_init = bridge_init;
680 	ifp->if_type = IFT_BRIDGE;
681 	ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
682 	ifq_set_ready(&ifp->if_snd);
683 	ifp->if_hdrlen = ETHER_HDR_LEN;
684 
685 	/*
686 	 * Generate a random ethernet address and use the private AC:DE:48
687 	 * OUI code.
688 	 */
689 	rnd = karc4random();
690 	bcopy(&rnd, &eaddr[0], 4); /* ETHER_ADDR_LEN == 6 */
691 	rnd = karc4random();
692 	bcopy(&rnd, &eaddr[2], 4); /* ETHER_ADDR_LEN == 6 */
693 
694 	eaddr[0] &= ~1;	/* clear multicast bit */
695 	eaddr[0] |= 2;	/* set the LAA bit */
696 
697 	ether_ifattach(ifp, eaddr, NULL);
698 	/* Now undo some of the damage... */
699 	ifp->if_baudrate = 0;
700 	ifp->if_type = IFT_BRIDGE;
701 
702 	crit_enter();	/* XXX MP */
703 	LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
704 	crit_exit();
705 
706 	return (0);
707 }
708 
709 static void
710 bridge_delete_dispatch(netmsg_t msg)
711 {
712 	struct bridge_softc *sc = msg->lmsg.u.ms_resultp;
713 	struct ifnet *bifp = sc->sc_ifp;
714 	struct bridge_iflist *bif;
715 
716 	ifnet_serialize_all(bifp);
717 
718 	while ((bif = LIST_FIRST(&sc->sc_iflists[mycpuid])) != NULL)
719 		bridge_delete_member(sc, bif, 0);
720 
721 	while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL)
722 		bridge_delete_span(sc, bif);
723 
724 	ifnet_deserialize_all(bifp);
725 
726 	lwkt_replymsg(&msg->lmsg, 0);
727 }
728 
729 /*
730  * bridge_clone_destroy:
731  *
732  *	Destroy a bridge instance.
733  */
734 static int
735 bridge_clone_destroy(struct ifnet *ifp)
736 {
737 	struct bridge_softc *sc = ifp->if_softc;
738 	struct netmsg_base msg;
739 
740 	ifnet_serialize_all(ifp);
741 
742 	bridge_stop(ifp);
743 	ifp->if_flags &= ~IFF_UP;
744 
745 	ifnet_deserialize_all(ifp);
746 
747 	netmsg_init(&msg, NULL, &curthread->td_msgport,
748 		    0, bridge_delete_dispatch);
749 	msg.lmsg.u.ms_resultp = sc;
750 	lwkt_domsg(BRIDGE_CFGPORT, &msg.lmsg, 0);
751 
752 	crit_enter();	/* XXX MP */
753 	LIST_REMOVE(sc, sc_list);
754 	crit_exit();
755 
756 	ether_ifdetach(ifp);
757 
758 	/* Tear down the routing table. */
759 	bridge_rtable_fini(sc);
760 
761 	/* Free per-cpu member iface lists */
762 	kfree(sc->sc_iflists, M_DEVBUF);
763 
764 	kfree(sc, M_DEVBUF);
765 
766 	return 0;
767 }
768 
769 /*
770  * bridge_ioctl:
771  *
772  *	Handle a control request from the operator.
773  */
774 static int
775 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
776 {
777 	struct bridge_softc *sc = ifp->if_softc;
778 	struct bridge_control_arg args;
779 	struct ifdrv *ifd = (struct ifdrv *) data;
780 	const struct bridge_control *bc;
781 	int error = 0;
782 
783 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
784 
785 	switch (cmd) {
786 	case SIOCADDMULTI:
787 	case SIOCDELMULTI:
788 		break;
789 
790 	case SIOCGDRVSPEC:
791 	case SIOCSDRVSPEC:
792 		if (ifd->ifd_cmd >= bridge_control_table_size) {
793 			error = EINVAL;
794 			break;
795 		}
796 		bc = &bridge_control_table[ifd->ifd_cmd];
797 
798 		if (cmd == SIOCGDRVSPEC &&
799 		    (bc->bc_flags & BC_F_COPYOUT) == 0) {
800 			error = EINVAL;
801 			break;
802 		} else if (cmd == SIOCSDRVSPEC &&
803 			   (bc->bc_flags & BC_F_COPYOUT)) {
804 			error = EINVAL;
805 			break;
806 		}
807 
808 		if (bc->bc_flags & BC_F_SUSER) {
809 			error = priv_check_cred(cr, PRIV_ROOT, NULL_CRED_OKAY);
810 			if (error)
811 				break;
812 		}
813 
814 		if (ifd->ifd_len != bc->bc_argsize ||
815 		    ifd->ifd_len > sizeof(args.bca_u)) {
816 			error = EINVAL;
817 			break;
818 		}
819 
820 		memset(&args, 0, sizeof(args));
821 		if (bc->bc_flags & BC_F_COPYIN) {
822 			error = copyin(ifd->ifd_data, &args.bca_u,
823 				       ifd->ifd_len);
824 			if (error)
825 				break;
826 		}
827 
828 		error = bridge_control(sc, cmd, bc->bc_func, &args);
829 		if (error) {
830 			KKASSERT(args.bca_len == 0 && args.bca_kptr == NULL);
831 			break;
832 		}
833 
834 		if (bc->bc_flags & BC_F_COPYOUT) {
835 			error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
836 			if (args.bca_len != 0) {
837 				KKASSERT(args.bca_kptr != NULL);
838 				if (!error) {
839 					error = copyout(args.bca_kptr,
840 						args.bca_uptr, args.bca_len);
841 				}
842 				kfree(args.bca_kptr, M_TEMP);
843 			} else {
844 				KKASSERT(args.bca_kptr == NULL);
845 			}
846 		} else {
847 			KKASSERT(args.bca_len == 0 && args.bca_kptr == NULL);
848 		}
849 		break;
850 
851 	case SIOCSIFFLAGS:
852 		if (!(ifp->if_flags & IFF_UP) &&
853 		    (ifp->if_flags & IFF_RUNNING)) {
854 			/*
855 			 * If interface is marked down and it is running,
856 			 * then stop it.
857 			 */
858 			bridge_stop(ifp);
859 		} else if ((ifp->if_flags & IFF_UP) &&
860 		    !(ifp->if_flags & IFF_RUNNING)) {
861 			/*
862 			 * If interface is marked up and it is stopped, then
863 			 * start it.
864 			 */
865 			ifp->if_init(sc);
866 		}
867 		break;
868 
869 	case SIOCSIFMTU:
870 		/* Do not allow the MTU to be changed on the bridge */
871 		error = EINVAL;
872 		break;
873 
874 	default:
875 		error = ether_ioctl(ifp, cmd, data);
876 		break;
877 	}
878 	return (error);
879 }
880 
881 /*
882  * bridge_mutecaps:
883  *
884  *	Clear or restore unwanted capabilities on the member interface
885  */
886 static void
887 bridge_mutecaps(struct bridge_ifinfo *bif_info, struct ifnet *ifp, int mute)
888 {
889 	struct ifreq ifr;
890 	int error;
891 
892 	if (ifp->if_ioctl == NULL)
893 		return;
894 
895 	bzero(&ifr, sizeof(ifr));
896 	ifr.ifr_reqcap = ifp->if_capenable;
897 
898 	if (mute) {
899 		/* mask off and save capabilities */
900 		bif_info->bifi_mutecap = ifr.ifr_reqcap & BRIDGE_IFCAPS_MASK;
901 		if (bif_info->bifi_mutecap != 0)
902 			ifr.ifr_reqcap &= ~BRIDGE_IFCAPS_MASK;
903 	} else {
904 		/* restore muted capabilities */
905 		ifr.ifr_reqcap |= bif_info->bifi_mutecap;
906 	}
907 
908 	if (bif_info->bifi_mutecap != 0) {
909 		ifnet_serialize_all(ifp);
910 		error = ifp->if_ioctl(ifp, SIOCSIFCAP, (caddr_t)&ifr, NULL);
911 		ifnet_deserialize_all(ifp);
912 	}
913 }
914 
915 /*
916  * bridge_lookup_member:
917  *
918  *	Lookup a bridge member interface.
919  */
920 static struct bridge_iflist *
921 bridge_lookup_member(struct bridge_softc *sc, const char *name)
922 {
923 	struct bridge_iflist *bif;
924 
925 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
926 		if (strcmp(bif->bif_ifp->if_xname, name) == 0)
927 			return (bif);
928 	}
929 	return (NULL);
930 }
931 
932 /*
933  * bridge_lookup_member_if:
934  *
935  *	Lookup a bridge member interface by ifnet*.
936  */
937 static struct bridge_iflist *
938 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
939 {
940 	struct bridge_iflist *bif;
941 
942 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
943 		if (bif->bif_ifp == member_ifp)
944 			return (bif);
945 	}
946 	return (NULL);
947 }
948 
949 /*
950  * bridge_lookup_member_ifinfo:
951  *
952  *	Lookup a bridge member interface by bridge_ifinfo.
953  */
954 static struct bridge_iflist *
955 bridge_lookup_member_ifinfo(struct bridge_softc *sc,
956 			    struct bridge_ifinfo *bif_info)
957 {
958 	struct bridge_iflist *bif;
959 
960 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
961 		if (bif->bif_info == bif_info)
962 			return (bif);
963 	}
964 	return (NULL);
965 }
966 
967 /*
968  * bridge_delete_member:
969  *
970  *	Delete the specified member interface.
971  */
972 static void
973 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
974     int gone)
975 {
976 	struct ifnet *ifs = bif->bif_ifp;
977 	struct ifnet *bifp = sc->sc_ifp;
978 	struct bridge_ifinfo *bif_info = bif->bif_info;
979 	struct bridge_iflist_head saved_bifs;
980 
981 	ASSERT_IFNET_SERIALIZED_ALL(bifp);
982 	KKASSERT(bif_info != NULL);
983 
984 	ifs->if_bridge = NULL;
985 
986 	/*
987 	 * Release bridge interface's serializer:
988 	 * - To avoid possible dead lock.
989 	 * - Various sync operation will block the current thread.
990 	 */
991 	ifnet_deserialize_all(bifp);
992 
993 	if (!gone) {
994 		switch (ifs->if_type) {
995 		case IFT_ETHER:
996 		case IFT_L2VLAN:
997 			/*
998 			 * Take the interface out of promiscuous mode.
999 			 */
1000 			ifpromisc(ifs, 0);
1001 			bridge_mutecaps(bif_info, ifs, 0);
1002 			break;
1003 
1004 		case IFT_GIF:
1005 			break;
1006 
1007 		default:
1008 			panic("bridge_delete_member: impossible");
1009 			break;
1010 		}
1011 	}
1012 
1013 	/*
1014 	 * Remove bifs from percpu linked list.
1015 	 *
1016 	 * Removed bifs are not freed immediately, instead,
1017 	 * they are saved in saved_bifs.  They will be freed
1018 	 * after we make sure that no one is accessing them,
1019 	 * i.e. after following netmsg_service_sync()
1020 	 */
1021 	LIST_INIT(&saved_bifs);
1022 	bridge_del_bif(sc, bif_info, &saved_bifs);
1023 
1024 	/*
1025 	 * Make sure that all protocol threads:
1026 	 * o  see 'ifs' if_bridge is changed
1027 	 * o  know that bif is removed from the percpu linked list
1028 	 */
1029 	netmsg_service_sync();
1030 
1031 	/*
1032 	 * Free the removed bifs
1033 	 */
1034 	KKASSERT(!LIST_EMPTY(&saved_bifs));
1035 	while ((bif = LIST_FIRST(&saved_bifs)) != NULL) {
1036 		LIST_REMOVE(bif, bif_next);
1037 		kfree(bif, M_DEVBUF);
1038 	}
1039 
1040 	/* See the comment in bridge_ioctl_stop() */
1041 	bridge_rtmsg_sync(sc);
1042 	bridge_rtdelete(sc, ifs, IFBF_FLUSHALL | IFBF_FLUSHSYNC);
1043 
1044 	ifnet_serialize_all(bifp);
1045 
1046 	if (bifp->if_flags & IFF_RUNNING)
1047 		bstp_initialization(sc);
1048 
1049 	/*
1050 	 * Free the bif_info after bstp_initialization(), so that
1051 	 * bridge_softc.sc_root_port will not reference a dangling
1052 	 * pointer.
1053 	 */
1054 	kfree(bif_info, M_DEVBUF);
1055 }
1056 
1057 /*
1058  * bridge_delete_span:
1059  *
1060  *	Delete the specified span interface.
1061  */
1062 static void
1063 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1064 {
1065 	KASSERT(bif->bif_ifp->if_bridge == NULL,
1066 	    ("%s: not a span interface", __func__));
1067 
1068 	LIST_REMOVE(bif, bif_next);
1069 	kfree(bif, M_DEVBUF);
1070 }
1071 
1072 static int
1073 bridge_ioctl_init(struct bridge_softc *sc, void *arg __unused)
1074 {
1075 	struct ifnet *ifp = sc->sc_ifp;
1076 
1077 	if (ifp->if_flags & IFF_RUNNING)
1078 		return 0;
1079 
1080 	callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1081 	    bridge_timer, sc);
1082 
1083 	ifp->if_flags |= IFF_RUNNING;
1084 	bstp_initialization(sc);
1085 	return 0;
1086 }
1087 
1088 static int
1089 bridge_ioctl_stop(struct bridge_softc *sc, void *arg __unused)
1090 {
1091 	struct ifnet *ifp = sc->sc_ifp;
1092 	struct lwkt_msg *lmsg;
1093 
1094 	if ((ifp->if_flags & IFF_RUNNING) == 0)
1095 		return 0;
1096 
1097 	callout_stop(&sc->sc_brcallout);
1098 
1099 	crit_enter();
1100 	lmsg = &sc->sc_brtimemsg.lmsg;
1101 	if ((lmsg->ms_flags & MSGF_DONE) == 0) {
1102 		/* Pending to be processed; drop it */
1103 		lwkt_dropmsg(lmsg);
1104 	}
1105 	crit_exit();
1106 
1107 	bstp_stop(sc);
1108 
1109 	ifp->if_flags &= ~IFF_RUNNING;
1110 
1111 	ifnet_deserialize_all(ifp);
1112 
1113 	/* Let everyone know that we are stopped */
1114 	netmsg_service_sync();
1115 
1116 	/*
1117 	 * Sync ifnetX msgports in the order we forward rtnode
1118 	 * installation message.  This is used to make sure that
1119 	 * all rtnode installation messages sent by bridge_rtupdate()
1120 	 * during above netmsg_service_sync() are flushed.
1121 	 */
1122 	bridge_rtmsg_sync(sc);
1123 	bridge_rtflush(sc, IFBF_FLUSHDYN | IFBF_FLUSHSYNC);
1124 
1125 	ifnet_serialize_all(ifp);
1126 	return 0;
1127 }
1128 
1129 static int
1130 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1131 {
1132 	struct ifbreq *req = arg;
1133 	struct bridge_iflist *bif;
1134 	struct bridge_ifinfo *bif_info;
1135 	struct ifnet *ifs, *bifp;
1136 	int error = 0;
1137 
1138 	bifp = sc->sc_ifp;
1139 	ASSERT_IFNET_SERIALIZED_ALL(bifp);
1140 
1141 	ifs = ifunit(req->ifbr_ifsname);
1142 	if (ifs == NULL)
1143 		return (ENOENT);
1144 
1145 	/* If it's in the span list, it can't be a member. */
1146 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1147 		if (ifs == bif->bif_ifp)
1148 			return (EBUSY);
1149 
1150 	/* Allow the first Ethernet member to define the MTU */
1151 	if (ifs->if_type != IFT_GIF) {
1152 		if (LIST_EMPTY(&sc->sc_iflists[mycpuid])) {
1153 			bifp->if_mtu = ifs->if_mtu;
1154 		} else if (bifp->if_mtu != ifs->if_mtu) {
1155 			if_printf(bifp, "invalid MTU for %s\n", ifs->if_xname);
1156 			return (EINVAL);
1157 		}
1158 	}
1159 
1160 	if (ifs->if_bridge == sc)
1161 		return (EEXIST);
1162 
1163 	if (ifs->if_bridge != NULL)
1164 		return (EBUSY);
1165 
1166 	bif_info = kmalloc(sizeof(*bif_info), M_DEVBUF, M_WAITOK | M_ZERO);
1167 	bif_info->bifi_priority = BSTP_DEFAULT_PORT_PRIORITY;
1168 	bif_info->bifi_path_cost = BSTP_DEFAULT_PATH_COST;
1169 	bif_info->bifi_ifp = ifs;
1170 
1171 	/*
1172 	 * Release bridge interface's serializer:
1173 	 * - To avoid possible dead lock.
1174 	 * - Various sync operation will block the current thread.
1175 	 */
1176 	ifnet_deserialize_all(bifp);
1177 
1178 	switch (ifs->if_type) {
1179 	case IFT_ETHER:
1180 	case IFT_L2VLAN:
1181 		/*
1182 		 * Place the interface into promiscuous mode.
1183 		 */
1184 		error = ifpromisc(ifs, 1);
1185 		if (error) {
1186 			ifnet_serialize_all(bifp);
1187 			goto out;
1188 		}
1189 		bridge_mutecaps(bif_info, ifs, 1);
1190 		break;
1191 
1192 	case IFT_GIF: /* :^) */
1193 		break;
1194 
1195 	default:
1196 		error = EINVAL;
1197 		ifnet_serialize_all(bifp);
1198 		goto out;
1199 	}
1200 
1201 	/*
1202 	 * Add bifs to percpu linked lists
1203 	 */
1204 	bridge_add_bif(sc, bif_info, ifs);
1205 
1206 	ifnet_serialize_all(bifp);
1207 
1208 	if (bifp->if_flags & IFF_RUNNING)
1209 		bstp_initialization(sc);
1210 	else
1211 		bstp_stop(sc);
1212 
1213 	/*
1214 	 * Everything has been setup, so let the member interface
1215 	 * deliver packets to this bridge on its input/output path.
1216 	 */
1217 	ifs->if_bridge = sc;
1218 out:
1219 	if (error) {
1220 		if (bif_info != NULL)
1221 			kfree(bif_info, M_DEVBUF);
1222 	}
1223 	return (error);
1224 }
1225 
1226 static int
1227 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1228 {
1229 	struct ifbreq *req = arg;
1230 	struct bridge_iflist *bif;
1231 
1232 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1233 	if (bif == NULL)
1234 		return (ENOENT);
1235 
1236 	bridge_delete_member(sc, bif, 0);
1237 
1238 	return (0);
1239 }
1240 
1241 static int
1242 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1243 {
1244 	struct ifbreq *req = arg;
1245 	struct bridge_iflist *bif;
1246 
1247 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1248 	if (bif == NULL)
1249 		return (ENOENT);
1250 
1251 	req->ifbr_ifsflags = bif->bif_flags;
1252 	req->ifbr_state = bif->bif_state;
1253 	req->ifbr_priority = bif->bif_priority;
1254 	req->ifbr_path_cost = bif->bif_path_cost;
1255 	req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1256 
1257 	return (0);
1258 }
1259 
1260 static int
1261 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1262 {
1263 	struct ifbreq *req = arg;
1264 	struct bridge_iflist *bif;
1265 	struct ifnet *bifp = sc->sc_ifp;
1266 
1267 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1268 	if (bif == NULL)
1269 		return (ENOENT);
1270 
1271 	if (req->ifbr_ifsflags & IFBIF_SPAN) {
1272 		/* SPAN is readonly */
1273 		return (EINVAL);
1274 	}
1275 
1276 	if (req->ifbr_ifsflags & IFBIF_STP) {
1277 		switch (bif->bif_ifp->if_type) {
1278 		case IFT_ETHER:
1279 			/* These can do spanning tree. */
1280 			break;
1281 
1282 		default:
1283 			/* Nothing else can. */
1284 			return (EINVAL);
1285 		}
1286 	}
1287 
1288 	ifnet_deserialize_all(bifp);
1289 	bridge_set_bifflags(sc, bif->bif_info, req->ifbr_ifsflags);
1290 	ifnet_serialize_all(bifp);
1291 
1292 	if (bifp->if_flags & IFF_RUNNING)
1293 		bstp_initialization(sc);
1294 
1295 	return (0);
1296 }
1297 
1298 static int
1299 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1300 {
1301 	struct ifbrparam *param = arg;
1302 	struct ifnet *ifp = sc->sc_ifp;
1303 
1304 	sc->sc_brtmax = param->ifbrp_csize;
1305 
1306 	ifnet_deserialize_all(ifp);
1307 	bridge_rttrim(sc);
1308 	ifnet_serialize_all(ifp);
1309 
1310 	return (0);
1311 }
1312 
1313 static int
1314 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1315 {
1316 	struct ifbrparam *param = arg;
1317 
1318 	param->ifbrp_csize = sc->sc_brtmax;
1319 
1320 	return (0);
1321 }
1322 
1323 static int
1324 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1325 {
1326 	struct bridge_control_arg *bc_arg = arg;
1327 	struct ifbifconf *bifc = arg;
1328 	struct bridge_iflist *bif;
1329 	struct ifbreq *breq;
1330 	int count, len;
1331 
1332 	count = 0;
1333 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next)
1334 		count++;
1335 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1336 		count++;
1337 
1338 	if (bifc->ifbic_len == 0) {
1339 		bifc->ifbic_len = sizeof(*breq) * count;
1340 		return 0;
1341 	} else if (count == 0 || bifc->ifbic_len < sizeof(*breq)) {
1342 		bifc->ifbic_len = 0;
1343 		return 0;
1344 	}
1345 
1346 	len = min(bifc->ifbic_len, sizeof(*breq) * count);
1347 	KKASSERT(len >= sizeof(*breq));
1348 
1349 	breq = kmalloc(len, M_TEMP, M_WAITOK | M_NULLOK | M_ZERO);
1350 	if (breq == NULL) {
1351 		bifc->ifbic_len = 0;
1352 		return ENOMEM;
1353 	}
1354 	bc_arg->bca_kptr = breq;
1355 
1356 	count = 0;
1357 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
1358 		if (len < sizeof(*breq))
1359 			break;
1360 
1361 		strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1362 			sizeof(breq->ifbr_ifsname));
1363 		breq->ifbr_ifsflags = bif->bif_flags;
1364 		breq->ifbr_state = bif->bif_state;
1365 		breq->ifbr_priority = bif->bif_priority;
1366 		breq->ifbr_path_cost = bif->bif_path_cost;
1367 		breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1368 		breq++;
1369 		count++;
1370 		len -= sizeof(*breq);
1371 	}
1372 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1373 		if (len < sizeof(*breq))
1374 			break;
1375 
1376 		strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1377 			sizeof(breq->ifbr_ifsname));
1378 		breq->ifbr_ifsflags = bif->bif_flags;
1379 		breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1380 		breq++;
1381 		count++;
1382 		len -= sizeof(*breq);
1383 	}
1384 
1385 	bifc->ifbic_len = sizeof(*breq) * count;
1386 	KKASSERT(bifc->ifbic_len > 0);
1387 
1388 	bc_arg->bca_len = bifc->ifbic_len;
1389 	bc_arg->bca_uptr = bifc->ifbic_req;
1390 	return 0;
1391 }
1392 
1393 static int
1394 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1395 {
1396 	struct bridge_control_arg *bc_arg = arg;
1397 	struct ifbaconf *bac = arg;
1398 	struct bridge_rtnode *brt;
1399 	struct ifbareq *bareq;
1400 	int count, len;
1401 
1402 	count = 0;
1403 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list)
1404 		count++;
1405 
1406 	if (bac->ifbac_len == 0) {
1407 		bac->ifbac_len = sizeof(*bareq) * count;
1408 		return 0;
1409 	} else if (count == 0 || bac->ifbac_len < sizeof(*bareq)) {
1410 		bac->ifbac_len = 0;
1411 		return 0;
1412 	}
1413 
1414 	len = min(bac->ifbac_len, sizeof(*bareq) * count);
1415 	KKASSERT(len >= sizeof(*bareq));
1416 
1417 	bareq = kmalloc(len, M_TEMP, M_WAITOK | M_NULLOK | M_ZERO);
1418 	if (bareq == NULL) {
1419 		bac->ifbac_len = 0;
1420 		return ENOMEM;
1421 	}
1422 	bc_arg->bca_kptr = bareq;
1423 
1424 	count = 0;
1425 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
1426 		struct bridge_rtinfo *bri = brt->brt_info;
1427 		unsigned long expire;
1428 
1429 		if (len < sizeof(*bareq))
1430 			break;
1431 
1432 		strlcpy(bareq->ifba_ifsname, bri->bri_ifp->if_xname,
1433 			sizeof(bareq->ifba_ifsname));
1434 		memcpy(bareq->ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1435 		expire = bri->bri_expire;
1436 		if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1437 		    time_second < expire)
1438 			bareq->ifba_expire = expire - time_second;
1439 		else
1440 			bareq->ifba_expire = 0;
1441 		bareq->ifba_flags = bri->bri_flags;
1442 		bareq++;
1443 		count++;
1444 		len -= sizeof(*bareq);
1445 	}
1446 
1447 	bac->ifbac_len = sizeof(*bareq) * count;
1448 	KKASSERT(bac->ifbac_len > 0);
1449 
1450 	bc_arg->bca_len = bac->ifbac_len;
1451 	bc_arg->bca_uptr = bac->ifbac_req;
1452 	return 0;
1453 }
1454 
1455 static int
1456 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1457 {
1458 	struct ifbareq *req = arg;
1459 	struct bridge_iflist *bif;
1460 	struct ifnet *ifp = sc->sc_ifp;
1461 	int error;
1462 
1463 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
1464 
1465 	bif = bridge_lookup_member(sc, req->ifba_ifsname);
1466 	if (bif == NULL)
1467 		return (ENOENT);
1468 
1469 	ifnet_deserialize_all(ifp);
1470 	error = bridge_rtsaddr(sc, req->ifba_dst, bif->bif_ifp,
1471 			       req->ifba_flags);
1472 	ifnet_serialize_all(ifp);
1473 	return (error);
1474 }
1475 
1476 static int
1477 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1478 {
1479 	struct ifbrparam *param = arg;
1480 
1481 	sc->sc_brttimeout = param->ifbrp_ctime;
1482 
1483 	return (0);
1484 }
1485 
1486 static int
1487 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1488 {
1489 	struct ifbrparam *param = arg;
1490 
1491 	param->ifbrp_ctime = sc->sc_brttimeout;
1492 
1493 	return (0);
1494 }
1495 
1496 static int
1497 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1498 {
1499 	struct ifbareq *req = arg;
1500 	struct ifnet *ifp = sc->sc_ifp;
1501 	int error;
1502 
1503 	ifnet_deserialize_all(ifp);
1504 	error = bridge_rtdaddr(sc, req->ifba_dst);
1505 	ifnet_serialize_all(ifp);
1506 	return error;
1507 }
1508 
1509 static int
1510 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1511 {
1512 	struct ifbreq *req = arg;
1513 	struct ifnet *ifp = sc->sc_ifp;
1514 
1515 	ifnet_deserialize_all(ifp);
1516 	bridge_rtflush(sc, req->ifbr_ifsflags | IFBF_FLUSHSYNC);
1517 	ifnet_serialize_all(ifp);
1518 
1519 	return (0);
1520 }
1521 
1522 static int
1523 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1524 {
1525 	struct ifbrparam *param = arg;
1526 
1527 	param->ifbrp_prio = sc->sc_bridge_priority;
1528 
1529 	return (0);
1530 }
1531 
1532 static int
1533 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1534 {
1535 	struct ifbrparam *param = arg;
1536 
1537 	sc->sc_bridge_priority = param->ifbrp_prio;
1538 
1539 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1540 		bstp_initialization(sc);
1541 
1542 	return (0);
1543 }
1544 
1545 static int
1546 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1547 {
1548 	struct ifbrparam *param = arg;
1549 
1550 	param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1551 
1552 	return (0);
1553 }
1554 
1555 static int
1556 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1557 {
1558 	struct ifbrparam *param = arg;
1559 
1560 	if (param->ifbrp_hellotime == 0)
1561 		return (EINVAL);
1562 	sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1563 
1564 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1565 		bstp_initialization(sc);
1566 
1567 	return (0);
1568 }
1569 
1570 static int
1571 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1572 {
1573 	struct ifbrparam *param = arg;
1574 
1575 	param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1576 
1577 	return (0);
1578 }
1579 
1580 static int
1581 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1582 {
1583 	struct ifbrparam *param = arg;
1584 
1585 	if (param->ifbrp_fwddelay == 0)
1586 		return (EINVAL);
1587 	sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1588 
1589 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1590 		bstp_initialization(sc);
1591 
1592 	return (0);
1593 }
1594 
1595 static int
1596 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1597 {
1598 	struct ifbrparam *param = arg;
1599 
1600 	param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1601 
1602 	return (0);
1603 }
1604 
1605 static int
1606 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1607 {
1608 	struct ifbrparam *param = arg;
1609 
1610 	if (param->ifbrp_maxage == 0)
1611 		return (EINVAL);
1612 	sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1613 
1614 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1615 		bstp_initialization(sc);
1616 
1617 	return (0);
1618 }
1619 
1620 static int
1621 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1622 {
1623 	struct ifbreq *req = arg;
1624 	struct bridge_iflist *bif;
1625 
1626 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1627 	if (bif == NULL)
1628 		return (ENOENT);
1629 
1630 	bif->bif_priority = req->ifbr_priority;
1631 
1632 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1633 		bstp_initialization(sc);
1634 
1635 	return (0);
1636 }
1637 
1638 static int
1639 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1640 {
1641 	struct ifbreq *req = arg;
1642 	struct bridge_iflist *bif;
1643 
1644 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1645 	if (bif == NULL)
1646 		return (ENOENT);
1647 
1648 	bif->bif_path_cost = req->ifbr_path_cost;
1649 
1650 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1651 		bstp_initialization(sc);
1652 
1653 	return (0);
1654 }
1655 
1656 static int
1657 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
1658 {
1659 	struct ifbreq *req = arg;
1660 	struct bridge_iflist *bif;
1661 	struct ifnet *ifs;
1662 
1663 	ifs = ifunit(req->ifbr_ifsname);
1664 	if (ifs == NULL)
1665 		return (ENOENT);
1666 
1667 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1668 		if (ifs == bif->bif_ifp)
1669 			return (EBUSY);
1670 
1671 	if (ifs->if_bridge != NULL)
1672 		return (EBUSY);
1673 
1674 	switch (ifs->if_type) {
1675 	case IFT_ETHER:
1676 	case IFT_GIF:
1677 	case IFT_L2VLAN:
1678 		break;
1679 
1680 	default:
1681 		return (EINVAL);
1682 	}
1683 
1684 	bif = kmalloc(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
1685 	bif->bif_ifp = ifs;
1686 	bif->bif_flags = IFBIF_SPAN;
1687 	/* NOTE: span bif does not need bridge_ifinfo */
1688 
1689 	LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
1690 
1691 	sc->sc_span = 1;
1692 
1693 	return (0);
1694 }
1695 
1696 static int
1697 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
1698 {
1699 	struct ifbreq *req = arg;
1700 	struct bridge_iflist *bif;
1701 	struct ifnet *ifs;
1702 
1703 	ifs = ifunit(req->ifbr_ifsname);
1704 	if (ifs == NULL)
1705 		return (ENOENT);
1706 
1707 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1708 		if (ifs == bif->bif_ifp)
1709 			break;
1710 
1711 	if (bif == NULL)
1712 		return (ENOENT);
1713 
1714 	bridge_delete_span(sc, bif);
1715 
1716 	if (LIST_EMPTY(&sc->sc_spanlist))
1717 		sc->sc_span = 0;
1718 
1719 	return (0);
1720 }
1721 
1722 static void
1723 bridge_ifdetach_dispatch(netmsg_t msg)
1724 {
1725 	struct ifnet *ifp, *bifp;
1726 	struct bridge_softc *sc;
1727 	struct bridge_iflist *bif;
1728 
1729 	ifp = msg->lmsg.u.ms_resultp;
1730 	sc = ifp->if_bridge;
1731 
1732 	/* Check if the interface is a bridge member */
1733 	if (sc != NULL) {
1734 		bifp = sc->sc_ifp;
1735 
1736 		ifnet_serialize_all(bifp);
1737 
1738 		bif = bridge_lookup_member_if(sc, ifp);
1739 		if (bif != NULL) {
1740 			bridge_delete_member(sc, bif, 1);
1741 		} else {
1742 			/* XXX Why bif will be NULL? */
1743 		}
1744 
1745 		ifnet_deserialize_all(bifp);
1746 		goto reply;
1747 	}
1748 
1749 	crit_enter();	/* XXX MP */
1750 
1751 	/* Check if the interface is a span port */
1752 	LIST_FOREACH(sc, &bridge_list, sc_list) {
1753 		bifp = sc->sc_ifp;
1754 
1755 		ifnet_serialize_all(bifp);
1756 
1757 		LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1758 			if (ifp == bif->bif_ifp) {
1759 				bridge_delete_span(sc, bif);
1760 				break;
1761 			}
1762 
1763 		ifnet_deserialize_all(bifp);
1764 	}
1765 
1766 	crit_exit();
1767 
1768 reply:
1769 	lwkt_replymsg(&msg->lmsg, 0);
1770 }
1771 
1772 /*
1773  * bridge_ifdetach:
1774  *
1775  *	Detach an interface from a bridge.  Called when a member
1776  *	interface is detaching.
1777  */
1778 static void
1779 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
1780 {
1781 	struct netmsg_base msg;
1782 
1783 	netmsg_init(&msg, NULL, &curthread->td_msgport,
1784 		    0, bridge_ifdetach_dispatch);
1785 	msg.lmsg.u.ms_resultp = ifp;
1786 
1787 	lwkt_domsg(BRIDGE_CFGPORT, &msg.lmsg, 0);
1788 }
1789 
1790 /*
1791  * bridge_init:
1792  *
1793  *	Initialize a bridge interface.
1794  */
1795 static void
1796 bridge_init(void *xsc)
1797 {
1798 	bridge_control(xsc, SIOCSIFFLAGS, bridge_ioctl_init, NULL);
1799 }
1800 
1801 /*
1802  * bridge_stop:
1803  *
1804  *	Stop the bridge interface.
1805  */
1806 static void
1807 bridge_stop(struct ifnet *ifp)
1808 {
1809 	bridge_control(ifp->if_softc, SIOCSIFFLAGS, bridge_ioctl_stop, NULL);
1810 }
1811 
1812 /*
1813  * bridge_enqueue:
1814  *
1815  *	Enqueue a packet on a bridge member interface.
1816  *
1817  */
1818 void
1819 bridge_enqueue(struct ifnet *dst_ifp, struct mbuf *m)
1820 {
1821 	struct netmsg_packet *nmp;
1822 
1823 	nmp = &m->m_hdr.mh_netmsg;
1824 	netmsg_init(&nmp->base, NULL, &netisr_apanic_rport,
1825 		    0, bridge_enqueue_handler);
1826 	nmp->nm_packet = m;
1827 	nmp->base.lmsg.u.ms_resultp = dst_ifp;
1828 
1829 	lwkt_sendmsg(ifnet_portfn(mycpu->gd_cpuid), &nmp->base.lmsg);
1830 }
1831 
1832 /*
1833  * bridge_output:
1834  *
1835  *	Send output from a bridge member interface.  This
1836  *	performs the bridging function for locally originated
1837  *	packets.
1838  *
1839  *	The mbuf has the Ethernet header already attached.  We must
1840  *	enqueue or free the mbuf before returning.
1841  */
1842 static int
1843 bridge_output(struct ifnet *ifp, struct mbuf *m)
1844 {
1845 	struct bridge_softc *sc = ifp->if_bridge;
1846 	struct ether_header *eh;
1847 	struct ifnet *dst_if, *bifp;
1848 
1849 	ASSERT_IFNET_NOT_SERIALIZED_ALL(ifp);
1850 
1851 	/*
1852 	 * Make sure that we are still a member of a bridge interface.
1853 	 */
1854 	if (sc == NULL) {
1855 		m_freem(m);
1856 		return (0);
1857 	}
1858 	bifp = sc->sc_ifp;
1859 
1860 	if (m->m_len < ETHER_HDR_LEN) {
1861 		m = m_pullup(m, ETHER_HDR_LEN);
1862 		if (m == NULL)
1863 			return (0);
1864 	}
1865 	eh = mtod(m, struct ether_header *);
1866 
1867 	/*
1868 	 * If bridge is down, but the original output interface is up,
1869 	 * go ahead and send out that interface.  Otherwise, the packet
1870 	 * is dropped below.
1871 	 */
1872 	if ((bifp->if_flags & IFF_RUNNING) == 0) {
1873 		dst_if = ifp;
1874 		goto sendunicast;
1875 	}
1876 
1877 	/*
1878 	 * If the packet is a multicast, or we don't know a better way to
1879 	 * get there, send to all interfaces.
1880 	 */
1881 	if (ETHER_IS_MULTICAST(eh->ether_dhost))
1882 		dst_if = NULL;
1883 	else
1884 		dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1885 	if (dst_if == NULL) {
1886 		struct bridge_iflist *bif, *nbif;
1887 		struct mbuf *mc;
1888 		int used = 0;
1889 
1890 		if (sc->sc_span)
1891 			bridge_span(sc, m);
1892 
1893 		LIST_FOREACH_MUTABLE(bif, &sc->sc_iflists[mycpuid],
1894 				     bif_next, nbif) {
1895 			dst_if = bif->bif_ifp;
1896 			if ((dst_if->if_flags & IFF_RUNNING) == 0)
1897 				continue;
1898 
1899 			/*
1900 			 * If this is not the original output interface,
1901 			 * and the interface is participating in spanning
1902 			 * tree, make sure the port is in a state that
1903 			 * allows forwarding.
1904 			 */
1905 			if (dst_if != ifp &&
1906 			    (bif->bif_flags & IFBIF_STP) != 0) {
1907 				switch (bif->bif_state) {
1908 				case BSTP_IFSTATE_BLOCKING:
1909 				case BSTP_IFSTATE_LISTENING:
1910 				case BSTP_IFSTATE_DISABLED:
1911 					continue;
1912 				}
1913 			}
1914 
1915 			if (LIST_NEXT(bif, bif_next) == NULL) {
1916 				used = 1;
1917 				mc = m;
1918 			} else {
1919 				mc = m_copypacket(m, MB_DONTWAIT);
1920 				if (mc == NULL) {
1921 					bifp->if_oerrors++;
1922 					continue;
1923 				}
1924 			}
1925 			bridge_handoff(dst_if, mc);
1926 
1927 			if (nbif != NULL && !nbif->bif_onlist) {
1928 				KKASSERT(bif->bif_onlist);
1929 				nbif = LIST_NEXT(bif, bif_next);
1930 			}
1931 		}
1932 		if (used == 0)
1933 			m_freem(m);
1934 		return (0);
1935 	}
1936 
1937 sendunicast:
1938 	/*
1939 	 * XXX Spanning tree consideration here?
1940 	 */
1941 	if (sc->sc_span)
1942 		bridge_span(sc, m);
1943 	if ((dst_if->if_flags & IFF_RUNNING) == 0)
1944 		m_freem(m);
1945 	else
1946 		bridge_handoff(dst_if, m);
1947 	return (0);
1948 }
1949 
1950 /*
1951  * bridge_start:
1952  *
1953  *	Start output on a bridge.
1954  *
1955  */
1956 static void
1957 bridge_start(struct ifnet *ifp)
1958 {
1959 	struct bridge_softc *sc = ifp->if_softc;
1960 
1961 	ASSERT_IFNET_SERIALIZED_TX(ifp);
1962 
1963 	ifp->if_flags |= IFF_OACTIVE;
1964 	for (;;) {
1965 		struct ifnet *dst_if = NULL;
1966 		struct ether_header *eh;
1967 		struct mbuf *m;
1968 
1969 		m = ifq_dequeue(&ifp->if_snd, NULL);
1970 		if (m == NULL)
1971 			break;
1972 
1973 		if (m->m_len < sizeof(*eh)) {
1974 			m = m_pullup(m, sizeof(*eh));
1975 			if (m == NULL) {
1976 				ifp->if_oerrors++;
1977 				continue;
1978 			}
1979 		}
1980 		eh = mtod(m, struct ether_header *);
1981 
1982 		BPF_MTAP(ifp, m);
1983 		ifp->if_opackets++;
1984 
1985 		if ((m->m_flags & (M_BCAST|M_MCAST)) == 0)
1986 			dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1987 
1988 		if (dst_if == NULL)
1989 			bridge_start_bcast(sc, m);
1990 		else
1991 			bridge_enqueue(dst_if, m);
1992 	}
1993 	ifp->if_flags &= ~IFF_OACTIVE;
1994 }
1995 
1996 /*
1997  * bridge_forward:
1998  *
1999  *	The forwarding function of the bridge.
2000  */
2001 static void
2002 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
2003 {
2004 	struct bridge_iflist *bif;
2005 	struct ifnet *src_if, *dst_if, *ifp;
2006 	struct ether_header *eh;
2007 
2008 	src_if = m->m_pkthdr.rcvif;
2009 	ifp = sc->sc_ifp;
2010 
2011 	ASSERT_IFNET_NOT_SERIALIZED_ALL(ifp);
2012 
2013 	ifp->if_ipackets++;
2014 	ifp->if_ibytes += m->m_pkthdr.len;
2015 
2016 	/*
2017 	 * Look up the bridge_iflist.
2018 	 */
2019 	bif = bridge_lookup_member_if(sc, src_if);
2020 	if (bif == NULL) {
2021 		/* Interface is not a bridge member (anymore?) */
2022 		m_freem(m);
2023 		return;
2024 	}
2025 
2026 	if (bif->bif_flags & IFBIF_STP) {
2027 		switch (bif->bif_state) {
2028 		case BSTP_IFSTATE_BLOCKING:
2029 		case BSTP_IFSTATE_LISTENING:
2030 		case BSTP_IFSTATE_DISABLED:
2031 			m_freem(m);
2032 			return;
2033 		}
2034 	}
2035 
2036 	eh = mtod(m, struct ether_header *);
2037 
2038 	/*
2039 	 * If the interface is learning, and the source
2040 	 * address is valid and not multicast, record
2041 	 * the address.
2042 	 */
2043 	if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
2044 	    ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
2045 	    (eh->ether_shost[0] == 0 &&
2046 	     eh->ether_shost[1] == 0 &&
2047 	     eh->ether_shost[2] == 0 &&
2048 	     eh->ether_shost[3] == 0 &&
2049 	     eh->ether_shost[4] == 0 &&
2050 	     eh->ether_shost[5] == 0) == 0)
2051 		bridge_rtupdate(sc, eh->ether_shost, src_if, IFBAF_DYNAMIC);
2052 
2053 	if ((bif->bif_flags & IFBIF_STP) != 0 &&
2054 	    bif->bif_state == BSTP_IFSTATE_LEARNING) {
2055 		m_freem(m);
2056 		return;
2057 	}
2058 
2059 	/*
2060 	 * At this point, the port either doesn't participate
2061 	 * in spanning tree or it is in the forwarding state.
2062 	 */
2063 
2064 	/*
2065 	 * If the packet is unicast, destined for someone on
2066 	 * "this" side of the bridge, drop it.
2067 	 */
2068 	if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2069 		dst_if = bridge_rtlookup(sc, eh->ether_dhost);
2070 		if (src_if == dst_if) {
2071 			m_freem(m);
2072 			return;
2073 		}
2074 	} else {
2075 		/* ...forward it to all interfaces. */
2076 		ifp->if_imcasts++;
2077 		dst_if = NULL;
2078 	}
2079 
2080 	if (dst_if == NULL) {
2081 		bridge_broadcast(sc, src_if, m);
2082 		return;
2083 	}
2084 
2085 	/*
2086 	 * At this point, we're dealing with a unicast frame
2087 	 * going to a different interface.
2088 	 */
2089 	if ((dst_if->if_flags & IFF_RUNNING) == 0) {
2090 		m_freem(m);
2091 		return;
2092 	}
2093 	bif = bridge_lookup_member_if(sc, dst_if);
2094 	if (bif == NULL) {
2095 		/* Not a member of the bridge (anymore?) */
2096 		m_freem(m);
2097 		return;
2098 	}
2099 
2100 	if (bif->bif_flags & IFBIF_STP) {
2101 		switch (bif->bif_state) {
2102 		case BSTP_IFSTATE_DISABLED:
2103 		case BSTP_IFSTATE_BLOCKING:
2104 			m_freem(m);
2105 			return;
2106 		}
2107 	}
2108 
2109 	if (inet_pfil_hook.ph_hashooks > 0
2110 #ifdef INET6
2111 	    || inet6_pfil_hook.ph_hashooks > 0
2112 #endif
2113 	    ) {
2114 		if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2115 			return;
2116 		if (m == NULL)
2117 			return;
2118 
2119 		if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2120 			return;
2121 		if (m == NULL)
2122 			return;
2123 	}
2124 	bridge_handoff(dst_if, m);
2125 }
2126 
2127 /*
2128  * bridge_input:
2129  *
2130  *	Receive input from a member interface.  Queue the packet for
2131  *	bridging if it is not for us.
2132  */
2133 static struct mbuf *
2134 bridge_input(struct ifnet *ifp, struct mbuf *m)
2135 {
2136 	struct bridge_softc *sc = ifp->if_bridge;
2137 	struct bridge_iflist *bif;
2138 	struct ifnet *bifp, *new_ifp;
2139 	struct ether_header *eh;
2140 	struct mbuf *mc, *mc2;
2141 
2142 	ASSERT_IFNET_NOT_SERIALIZED_ALL(ifp);
2143 
2144 	/*
2145 	 * Make sure that we are still a member of a bridge interface.
2146 	 */
2147 	if (sc == NULL)
2148 		return m;
2149 
2150 	new_ifp = NULL;
2151 	bifp = sc->sc_ifp;
2152 
2153 	if ((bifp->if_flags & IFF_RUNNING) == 0)
2154 		goto out;
2155 
2156 	/*
2157 	 * Implement support for bridge monitoring.  If this flag has been
2158 	 * set on this interface, discard the packet once we push it through
2159 	 * the bpf(4) machinery, but before we do, increment various counters
2160 	 * associated with this bridge.
2161 	 */
2162 	if (bifp->if_flags & IFF_MONITOR) {
2163 	 	/* Change input interface to this bridge */
2164 		m->m_pkthdr.rcvif = bifp;
2165 
2166 		BPF_MTAP(bifp, m);
2167 
2168 		/* Update bridge's ifnet statistics */
2169 		bifp->if_ipackets++;
2170 		bifp->if_ibytes += m->m_pkthdr.len;
2171 		if (m->m_flags & (M_MCAST | M_BCAST))
2172 			bifp->if_imcasts++;
2173 
2174 		m_freem(m);
2175 		m = NULL;
2176 		goto out;
2177 	}
2178 
2179 	eh = mtod(m, struct ether_header *);
2180 
2181 	if (memcmp(eh->ether_dhost, IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0) {
2182 		/*
2183 		 * If the packet is for us, set the packets source as the
2184 		 * bridge, and return the packet back to ifnet.if_input for
2185 		 * local processing.
2186 		 */
2187 		KASSERT(bifp->if_bridge == NULL,
2188 			("loop created in bridge_input"));
2189 		if (pfil_member != 0) {
2190 			if (inet_pfil_hook.ph_hashooks > 0
2191 #ifdef INET6
2192 			    || inet6_pfil_hook.ph_hashooks > 0
2193 #endif
2194 			) {
2195 				if (bridge_pfil(&m, NULL, ifp, PFIL_IN) != 0)
2196 					goto out;
2197 				if (m == NULL)
2198 					goto out;
2199 			}
2200 		}
2201 		new_ifp = bifp;
2202 		goto out;
2203 	}
2204 
2205 	/*
2206 	 * Tap all packets arriving on the bridge, no matter if
2207 	 * they are local destinations or not.  In is in.
2208 	 */
2209 	BPF_MTAP(bifp, m);
2210 
2211 	bif = bridge_lookup_member_if(sc, ifp);
2212 	if (bif == NULL)
2213 		goto out;
2214 
2215 	if (sc->sc_span)
2216 		bridge_span(sc, m);
2217 
2218 	if (m->m_flags & (M_BCAST | M_MCAST)) {
2219 		/* Tap off 802.1D packets; they do not get forwarded. */
2220 		if (memcmp(eh->ether_dhost, bstp_etheraddr,
2221 		    ETHER_ADDR_LEN) == 0) {
2222 			ifnet_serialize_all(bifp);
2223 			bstp_input(sc, bif, m);
2224 			ifnet_deserialize_all(bifp);
2225 
2226 			/* m is freed by bstp_input */
2227 			m = NULL;
2228 			goto out;
2229 		}
2230 
2231 		if (bif->bif_flags & IFBIF_STP) {
2232 			switch (bif->bif_state) {
2233 			case BSTP_IFSTATE_BLOCKING:
2234 			case BSTP_IFSTATE_LISTENING:
2235 			case BSTP_IFSTATE_DISABLED:
2236 				goto out;
2237 			}
2238 		}
2239 
2240 		/*
2241 		 * Make a deep copy of the packet and enqueue the copy
2242 		 * for bridge processing; return the original packet for
2243 		 * local processing.
2244 		 */
2245 		mc = m_dup(m, MB_DONTWAIT);
2246 		if (mc == NULL)
2247 			goto out;
2248 
2249 		bridge_forward(sc, mc);
2250 
2251 		/*
2252 		 * Reinject the mbuf as arriving on the bridge so we have a
2253 		 * chance at claiming multicast packets. We can not loop back
2254 		 * here from ether_input as a bridge is never a member of a
2255 		 * bridge.
2256 		 */
2257 		KASSERT(bifp->if_bridge == NULL,
2258 			("loop created in bridge_input"));
2259 		mc2 = m_dup(m, MB_DONTWAIT);
2260 #ifdef notyet
2261 		if (mc2 != NULL) {
2262 			/* Keep the layer3 header aligned */
2263 			int i = min(mc2->m_pkthdr.len, max_protohdr);
2264 			mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2265 		}
2266 #endif
2267 		if (mc2 != NULL) {
2268 			/*
2269 			 * Don't tap to bpf(4) again; we have
2270 			 * already done the tapping.
2271 			 */
2272 			ether_reinput_oncpu(bifp, mc2, 0);
2273 		}
2274 
2275 		/* Return the original packet for local processing. */
2276 		goto out;
2277 	}
2278 
2279 	if (bif->bif_flags & IFBIF_STP) {
2280 		switch (bif->bif_state) {
2281 		case BSTP_IFSTATE_BLOCKING:
2282 		case BSTP_IFSTATE_LISTENING:
2283 		case BSTP_IFSTATE_DISABLED:
2284 			goto out;
2285 		}
2286 	}
2287 
2288 	/*
2289 	 * Unicast.  Make sure it's not for us.
2290 	 *
2291 	 * This loop is MPSAFE; the only blocking operation (bridge_rtupdate)
2292 	 * is followed by breaking out of the loop.
2293 	 */
2294 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
2295 		if (bif->bif_ifp->if_type != IFT_ETHER)
2296 			continue;
2297 
2298 		/* It is destined for us. */
2299 		if (memcmp(IF_LLADDR(bif->bif_ifp), eh->ether_dhost,
2300 		    ETHER_ADDR_LEN) == 0) {
2301 			if (bif->bif_ifp != ifp) {
2302 				/* XXX loop prevention */
2303 				m->m_flags |= M_ETHER_BRIDGED;
2304 				new_ifp = bif->bif_ifp;
2305 			}
2306 			if (bif->bif_flags & IFBIF_LEARNING) {
2307 				bridge_rtupdate(sc, eh->ether_shost,
2308 						ifp, IFBAF_DYNAMIC);
2309 			}
2310 			goto out;
2311 		}
2312 
2313 		/* We just received a packet that we sent out. */
2314 		if (memcmp(IF_LLADDR(bif->bif_ifp), eh->ether_shost,
2315 		    ETHER_ADDR_LEN) == 0) {
2316 			m_freem(m);
2317 			m = NULL;
2318 			goto out;
2319 		}
2320 	}
2321 
2322 	/* Perform the bridge forwarding function. */
2323 	bridge_forward(sc, m);
2324 	m = NULL;
2325 out:
2326 	if (new_ifp != NULL) {
2327 		ether_reinput_oncpu(new_ifp, m, 1);
2328 		m = NULL;
2329 	}
2330 	return (m);
2331 }
2332 
2333 /*
2334  * bridge_start_bcast:
2335  *
2336  *	Broadcast the packet sent from bridge to all member
2337  *	interfaces.
2338  *	This is a simplified version of bridge_broadcast(), however,
2339  *	this function expects caller to hold bridge's serializer.
2340  */
2341 static void
2342 bridge_start_bcast(struct bridge_softc *sc, struct mbuf *m)
2343 {
2344 	struct bridge_iflist *bif;
2345 	struct mbuf *mc;
2346 	struct ifnet *dst_if, *bifp;
2347 	int used = 0;
2348 
2349 	bifp = sc->sc_ifp;
2350 	ASSERT_IFNET_SERIALIZED_ALL(bifp);
2351 
2352 	/*
2353 	 * Following loop is MPSAFE; nothing is blocking
2354 	 * in the loop body.
2355 	 */
2356 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
2357 		dst_if = bif->bif_ifp;
2358 
2359 		if (bif->bif_flags & IFBIF_STP) {
2360 			switch (bif->bif_state) {
2361 			case BSTP_IFSTATE_BLOCKING:
2362 			case BSTP_IFSTATE_DISABLED:
2363 				continue;
2364 			}
2365 		}
2366 
2367 		if ((bif->bif_flags & IFBIF_DISCOVER) == 0 &&
2368 		    (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2369 			continue;
2370 
2371 		if ((dst_if->if_flags & IFF_RUNNING) == 0)
2372 			continue;
2373 
2374 		if (LIST_NEXT(bif, bif_next) == NULL) {
2375 			mc = m;
2376 			used = 1;
2377 		} else {
2378 			mc = m_copypacket(m, MB_DONTWAIT);
2379 			if (mc == NULL) {
2380 				bifp->if_oerrors++;
2381 				continue;
2382 			}
2383 		}
2384 		bridge_enqueue(dst_if, mc);
2385 	}
2386 	if (used == 0)
2387 		m_freem(m);
2388 }
2389 
2390 /*
2391  * bridge_broadcast:
2392  *
2393  *	Send a frame to all interfaces that are members of
2394  *	the bridge, except for the one on which the packet
2395  *	arrived.
2396  */
2397 static void
2398 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2399     struct mbuf *m)
2400 {
2401 	struct bridge_iflist *bif, *nbif;
2402 	struct mbuf *mc;
2403 	struct ifnet *dst_if, *bifp;
2404 	int used = 0;
2405 
2406 	bifp = sc->sc_ifp;
2407 	ASSERT_IFNET_NOT_SERIALIZED_ALL(bifp);
2408 
2409 	if (inet_pfil_hook.ph_hashooks > 0
2410 #ifdef INET6
2411 	    || inet6_pfil_hook.ph_hashooks > 0
2412 #endif
2413 	    ) {
2414 		if (bridge_pfil(&m, bifp, src_if, PFIL_IN) != 0)
2415 			return;
2416 		if (m == NULL)
2417 			return;
2418 
2419 		/* Filter on the bridge interface before broadcasting */
2420 		if (bridge_pfil(&m, bifp, NULL, PFIL_OUT) != 0)
2421 			return;
2422 		if (m == NULL)
2423 			return;
2424 	}
2425 
2426 	LIST_FOREACH_MUTABLE(bif, &sc->sc_iflists[mycpuid], bif_next, nbif) {
2427 		dst_if = bif->bif_ifp;
2428 		if (dst_if == src_if)
2429 			continue;
2430 
2431 		if (bif->bif_flags & IFBIF_STP) {
2432 			switch (bif->bif_state) {
2433 			case BSTP_IFSTATE_BLOCKING:
2434 			case BSTP_IFSTATE_DISABLED:
2435 				continue;
2436 			}
2437 		}
2438 
2439 		if ((bif->bif_flags & IFBIF_DISCOVER) == 0 &&
2440 		    (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2441 			continue;
2442 
2443 		if ((dst_if->if_flags & IFF_RUNNING) == 0)
2444 			continue;
2445 
2446 		if (LIST_NEXT(bif, bif_next) == NULL) {
2447 			mc = m;
2448 			used = 1;
2449 		} else {
2450 			mc = m_copypacket(m, MB_DONTWAIT);
2451 			if (mc == NULL) {
2452 				sc->sc_ifp->if_oerrors++;
2453 				continue;
2454 			}
2455 		}
2456 
2457 		/*
2458 		 * Filter on the output interface.  Pass a NULL bridge
2459 		 * interface pointer so we do not redundantly filter on
2460 		 * the bridge for each interface we broadcast on.
2461 		 */
2462 		if (inet_pfil_hook.ph_hashooks > 0
2463 #ifdef INET6
2464 		    || inet6_pfil_hook.ph_hashooks > 0
2465 #endif
2466 		    ) {
2467 			if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
2468 				continue;
2469 			if (mc == NULL)
2470 				continue;
2471 		}
2472 		bridge_handoff(dst_if, mc);
2473 
2474 		if (nbif != NULL && !nbif->bif_onlist) {
2475 			KKASSERT(bif->bif_onlist);
2476 			nbif = LIST_NEXT(bif, bif_next);
2477 		}
2478 	}
2479 	if (used == 0)
2480 		m_freem(m);
2481 }
2482 
2483 /*
2484  * bridge_span:
2485  *
2486  *	Duplicate a packet out one or more interfaces that are in span mode,
2487  *	the original mbuf is unmodified.
2488  */
2489 static void
2490 bridge_span(struct bridge_softc *sc, struct mbuf *m)
2491 {
2492 	struct bridge_iflist *bif;
2493 	struct ifnet *dst_if, *bifp;
2494 	struct mbuf *mc;
2495 
2496 	bifp = sc->sc_ifp;
2497 	ifnet_serialize_all(bifp);
2498 
2499 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
2500 		dst_if = bif->bif_ifp;
2501 
2502 		if ((dst_if->if_flags & IFF_RUNNING) == 0)
2503 			continue;
2504 
2505 		mc = m_copypacket(m, MB_DONTWAIT);
2506 		if (mc == NULL) {
2507 			sc->sc_ifp->if_oerrors++;
2508 			continue;
2509 		}
2510 		bridge_enqueue(dst_if, mc);
2511 	}
2512 
2513 	ifnet_deserialize_all(bifp);
2514 }
2515 
2516 static void
2517 bridge_rtmsg_sync_handler(netmsg_t msg)
2518 {
2519 	ifnet_forwardmsg(&msg->lmsg, mycpuid + 1);
2520 }
2521 
2522 static void
2523 bridge_rtmsg_sync(struct bridge_softc *sc)
2524 {
2525 	struct netmsg_base msg;
2526 
2527 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
2528 
2529 	netmsg_init(&msg, NULL, &curthread->td_msgport,
2530 		    0, bridge_rtmsg_sync_handler);
2531 	ifnet_domsg(&msg.lmsg, 0);
2532 }
2533 
2534 static __inline void
2535 bridge_rtinfo_update(struct bridge_rtinfo *bri, struct ifnet *dst_if,
2536 		     int setflags, uint8_t flags, uint32_t timeo)
2537 {
2538 	if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2539 	    bri->bri_ifp != dst_if)
2540 		bri->bri_ifp = dst_if;
2541 	if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2542 	    bri->bri_expire != time_second + timeo)
2543 		bri->bri_expire = time_second + timeo;
2544 	if (setflags)
2545 		bri->bri_flags = flags;
2546 }
2547 
2548 static int
2549 bridge_rtinstall_oncpu(struct bridge_softc *sc, const uint8_t *dst,
2550 		       struct ifnet *dst_if, int setflags, uint8_t flags,
2551 		       struct bridge_rtinfo **bri0)
2552 {
2553 	struct bridge_rtnode *brt;
2554 	struct bridge_rtinfo *bri;
2555 
2556 	if (mycpuid == 0) {
2557 		brt = bridge_rtnode_lookup(sc, dst);
2558 		if (brt != NULL) {
2559 			/*
2560 			 * rtnode for 'dst' already exists.  We inform the
2561 			 * caller about this by leaving bri0 as NULL.  The
2562 			 * caller will terminate the intallation upon getting
2563 			 * NULL bri0.  However, we still need to update the
2564 			 * rtinfo.
2565 			 */
2566 			KKASSERT(*bri0 == NULL);
2567 
2568 			/* Update rtinfo */
2569 			bridge_rtinfo_update(brt->brt_info, dst_if, setflags,
2570 					     flags, sc->sc_brttimeout);
2571 			return 0;
2572 		}
2573 
2574 		/*
2575 		 * We only need to check brtcnt on CPU0, since if limit
2576 		 * is to be exceeded, ENOSPC is returned.  Caller knows
2577 		 * this and will terminate the installation.
2578 		 */
2579 		if (sc->sc_brtcnt >= sc->sc_brtmax)
2580 			return ENOSPC;
2581 
2582 		KKASSERT(*bri0 == NULL);
2583 		bri = kmalloc(sizeof(struct bridge_rtinfo), M_DEVBUF,
2584 				  M_WAITOK | M_ZERO);
2585 		*bri0 = bri;
2586 
2587 		/* Setup rtinfo */
2588 		bri->bri_flags = IFBAF_DYNAMIC;
2589 		bridge_rtinfo_update(bri, dst_if, setflags, flags,
2590 				     sc->sc_brttimeout);
2591 	} else {
2592 		bri = *bri0;
2593 		KKASSERT(bri != NULL);
2594 	}
2595 
2596 	brt = kmalloc(sizeof(struct bridge_rtnode), M_DEVBUF,
2597 		      M_WAITOK | M_ZERO);
2598 	memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2599 	brt->brt_info = bri;
2600 
2601 	bridge_rtnode_insert(sc, brt);
2602 	return 0;
2603 }
2604 
2605 static void
2606 bridge_rtinstall_handler(netmsg_t msg)
2607 {
2608 	struct netmsg_brsaddr *brmsg = (struct netmsg_brsaddr *)msg;
2609 	int error;
2610 
2611 	error = bridge_rtinstall_oncpu(brmsg->br_softc,
2612 				       brmsg->br_dst, brmsg->br_dst_if,
2613 				       brmsg->br_setflags, brmsg->br_flags,
2614 				       &brmsg->br_rtinfo);
2615 	if (error) {
2616 		KKASSERT(mycpuid == 0 && brmsg->br_rtinfo == NULL);
2617 		lwkt_replymsg(&brmsg->base.lmsg, error);
2618 		return;
2619 	} else if (brmsg->br_rtinfo == NULL) {
2620 		/* rtnode already exists for 'dst' */
2621 		KKASSERT(mycpuid == 0);
2622 		lwkt_replymsg(&brmsg->base.lmsg, 0);
2623 		return;
2624 	}
2625 	ifnet_forwardmsg(&brmsg->base.lmsg, mycpuid + 1);
2626 }
2627 
2628 /*
2629  * bridge_rtupdate:
2630  *
2631  *	Add/Update a bridge routing entry.
2632  */
2633 static int
2634 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2635 		struct ifnet *dst_if, uint8_t flags)
2636 {
2637 	struct bridge_rtnode *brt;
2638 
2639 	/*
2640 	 * A route for this destination might already exist.  If so,
2641 	 * update it, otherwise create a new one.
2642 	 */
2643 	if ((brt = bridge_rtnode_lookup(sc, dst)) == NULL) {
2644 		struct netmsg_brsaddr *brmsg;
2645 
2646 		if (sc->sc_brtcnt >= sc->sc_brtmax)
2647 			return ENOSPC;
2648 
2649 		brmsg = kmalloc(sizeof(*brmsg), M_LWKTMSG, M_WAITOK | M_NULLOK);
2650 		if (brmsg == NULL)
2651 			return ENOMEM;
2652 
2653 		netmsg_init(&brmsg->base, NULL, &netisr_afree_rport,
2654 			    0, bridge_rtinstall_handler);
2655 		memcpy(brmsg->br_dst, dst, ETHER_ADDR_LEN);
2656 		brmsg->br_dst_if = dst_if;
2657 		brmsg->br_flags = flags;
2658 		brmsg->br_setflags = 0;
2659 		brmsg->br_softc = sc;
2660 		brmsg->br_rtinfo = NULL;
2661 
2662 		ifnet_sendmsg(&brmsg->base.lmsg, 0);
2663 		return 0;
2664 	}
2665 	bridge_rtinfo_update(brt->brt_info, dst_if, 0, flags,
2666 			     sc->sc_brttimeout);
2667 	return 0;
2668 }
2669 
2670 static int
2671 bridge_rtsaddr(struct bridge_softc *sc, const uint8_t *dst,
2672 	       struct ifnet *dst_if, uint8_t flags)
2673 {
2674 	struct netmsg_brsaddr brmsg;
2675 
2676 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
2677 
2678 	netmsg_init(&brmsg.base, NULL, &curthread->td_msgport,
2679 		    0, bridge_rtinstall_handler);
2680 	memcpy(brmsg.br_dst, dst, ETHER_ADDR_LEN);
2681 	brmsg.br_dst_if = dst_if;
2682 	brmsg.br_flags = flags;
2683 	brmsg.br_setflags = 1;
2684 	brmsg.br_softc = sc;
2685 	brmsg.br_rtinfo = NULL;
2686 
2687 	return ifnet_domsg(&brmsg.base.lmsg, 0);
2688 }
2689 
2690 /*
2691  * bridge_rtlookup:
2692  *
2693  *	Lookup the destination interface for an address.
2694  */
2695 static struct ifnet *
2696 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2697 {
2698 	struct bridge_rtnode *brt;
2699 
2700 	if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
2701 		return NULL;
2702 	return brt->brt_info->bri_ifp;
2703 }
2704 
2705 static void
2706 bridge_rtreap_handler(netmsg_t msg)
2707 {
2708 	struct bridge_softc *sc = msg->lmsg.u.ms_resultp;
2709 	struct bridge_rtnode *brt, *nbrt;
2710 
2711 	LIST_FOREACH_MUTABLE(brt, &sc->sc_rtlists[mycpuid], brt_list, nbrt) {
2712 		if (brt->brt_info->bri_dead)
2713 			bridge_rtnode_destroy(sc, brt);
2714 	}
2715 	ifnet_forwardmsg(&msg->lmsg, mycpuid + 1);
2716 }
2717 
2718 static void
2719 bridge_rtreap(struct bridge_softc *sc)
2720 {
2721 	struct netmsg_base msg;
2722 
2723 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
2724 
2725 	netmsg_init(&msg, NULL, &curthread->td_msgport,
2726 		    0, bridge_rtreap_handler);
2727 	msg.lmsg.u.ms_resultp = sc;
2728 
2729 	ifnet_domsg(&msg.lmsg, 0);
2730 }
2731 
2732 static void
2733 bridge_rtreap_async(struct bridge_softc *sc)
2734 {
2735 	struct netmsg_base *msg;
2736 
2737 	msg = kmalloc(sizeof(*msg), M_LWKTMSG, M_WAITOK);
2738 
2739 	netmsg_init(msg, NULL, &netisr_afree_rport,
2740 		    0, bridge_rtreap_handler);
2741 	msg->lmsg.u.ms_resultp = sc;
2742 
2743 	ifnet_sendmsg(&msg->lmsg, 0);
2744 }
2745 
2746 /*
2747  * bridge_rttrim:
2748  *
2749  *	Trim the routine table so that we have a number
2750  *	of routing entries less than or equal to the
2751  *	maximum number.
2752  */
2753 static void
2754 bridge_rttrim(struct bridge_softc *sc)
2755 {
2756 	struct bridge_rtnode *brt;
2757 	int dead;
2758 
2759 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
2760 
2761 	/* Make sure we actually need to do this. */
2762 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2763 		return;
2764 
2765 	/*
2766 	 * Find out how many rtnodes are dead
2767 	 */
2768 	dead = bridge_rtage_finddead(sc);
2769 	KKASSERT(dead <= sc->sc_brtcnt);
2770 
2771 	if (sc->sc_brtcnt - dead <= sc->sc_brtmax) {
2772 		/* Enough dead rtnodes are found */
2773 		bridge_rtreap(sc);
2774 		return;
2775 	}
2776 
2777 	/*
2778 	 * Kill some dynamic rtnodes to meet the brtmax
2779 	 */
2780 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2781 		struct bridge_rtinfo *bri = brt->brt_info;
2782 
2783 		if (bri->bri_dead) {
2784 			/*
2785 			 * We have counted this rtnode in
2786 			 * bridge_rtage_finddead()
2787 			 */
2788 			continue;
2789 		}
2790 
2791 		if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2792 			bri->bri_dead = 1;
2793 			++dead;
2794 			KKASSERT(dead <= sc->sc_brtcnt);
2795 
2796 			if (sc->sc_brtcnt - dead <= sc->sc_brtmax) {
2797 				/* Enough rtnodes are collected */
2798 				break;
2799 			}
2800 		}
2801 	}
2802 	if (dead)
2803 		bridge_rtreap(sc);
2804 }
2805 
2806 /*
2807  * bridge_timer:
2808  *
2809  *	Aging timer for the bridge.
2810  */
2811 static void
2812 bridge_timer(void *arg)
2813 {
2814 	struct bridge_softc *sc = arg;
2815 	struct netmsg_base *msg;
2816 
2817 	KKASSERT(mycpuid == BRIDGE_CFGCPU);
2818 
2819 	crit_enter();
2820 
2821 	if (callout_pending(&sc->sc_brcallout) ||
2822 	    !callout_active(&sc->sc_brcallout)) {
2823 		crit_exit();
2824 		return;
2825 	}
2826 	callout_deactivate(&sc->sc_brcallout);
2827 
2828 	msg = &sc->sc_brtimemsg;
2829 	KKASSERT(msg->lmsg.ms_flags & MSGF_DONE);
2830 	lwkt_sendmsg(BRIDGE_CFGPORT, &msg->lmsg);
2831 
2832 	crit_exit();
2833 }
2834 
2835 static void
2836 bridge_timer_handler(netmsg_t msg)
2837 {
2838 	struct bridge_softc *sc = msg->lmsg.u.ms_resultp;
2839 
2840 	KKASSERT(&curthread->td_msgport == BRIDGE_CFGPORT);
2841 
2842 	crit_enter();
2843 	/* Reply ASAP */
2844 	lwkt_replymsg(&msg->lmsg, 0);
2845 	crit_exit();
2846 
2847 	bridge_rtage(sc);
2848 	if (sc->sc_ifp->if_flags & IFF_RUNNING) {
2849 		callout_reset(&sc->sc_brcallout,
2850 		    bridge_rtable_prune_period * hz, bridge_timer, sc);
2851 	}
2852 }
2853 
2854 static int
2855 bridge_rtage_finddead(struct bridge_softc *sc)
2856 {
2857 	struct bridge_rtnode *brt;
2858 	int dead = 0;
2859 
2860 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2861 		struct bridge_rtinfo *bri = brt->brt_info;
2862 
2863 		if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2864 		    time_second >= bri->bri_expire) {
2865 			bri->bri_dead = 1;
2866 			++dead;
2867 			KKASSERT(dead <= sc->sc_brtcnt);
2868 		}
2869 	}
2870 	return dead;
2871 }
2872 
2873 /*
2874  * bridge_rtage:
2875  *
2876  *	Perform an aging cycle.
2877  */
2878 static void
2879 bridge_rtage(struct bridge_softc *sc)
2880 {
2881 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
2882 
2883 	if (bridge_rtage_finddead(sc))
2884 		bridge_rtreap(sc);
2885 }
2886 
2887 /*
2888  * bridge_rtflush:
2889  *
2890  *	Remove all dynamic addresses from the bridge.
2891  */
2892 static void
2893 bridge_rtflush(struct bridge_softc *sc, int bf)
2894 {
2895 	struct bridge_rtnode *brt;
2896 	int reap;
2897 
2898 	reap = 0;
2899 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2900 		struct bridge_rtinfo *bri = brt->brt_info;
2901 
2902 		if ((bf & IFBF_FLUSHALL) ||
2903 		    (bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2904 			bri->bri_dead = 1;
2905 			reap = 1;
2906 		}
2907 	}
2908 	if (reap) {
2909 		if (bf & IFBF_FLUSHSYNC)
2910 			bridge_rtreap(sc);
2911 		else
2912 			bridge_rtreap_async(sc);
2913 	}
2914 }
2915 
2916 /*
2917  * bridge_rtdaddr:
2918  *
2919  *	Remove an address from the table.
2920  */
2921 static int
2922 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2923 {
2924 	struct bridge_rtnode *brt;
2925 
2926 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
2927 
2928 	if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
2929 		return (ENOENT);
2930 
2931 	/* TODO: add a cheaper delete operation */
2932 	brt->brt_info->bri_dead = 1;
2933 	bridge_rtreap(sc);
2934 	return (0);
2935 }
2936 
2937 /*
2938  * bridge_rtdelete:
2939  *
2940  *	Delete routes to a speicifc member interface.
2941  */
2942 void
2943 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int bf)
2944 {
2945 	struct bridge_rtnode *brt;
2946 	int reap;
2947 
2948 	reap = 0;
2949 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2950 		struct bridge_rtinfo *bri = brt->brt_info;
2951 
2952 		if (bri->bri_ifp == ifp &&
2953 		    ((bf & IFBF_FLUSHALL) ||
2954 		     (bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) {
2955 			bri->bri_dead = 1;
2956 			reap = 1;
2957 		}
2958 	}
2959 	if (reap) {
2960 		if (bf & IFBF_FLUSHSYNC)
2961 			bridge_rtreap(sc);
2962 		else
2963 			bridge_rtreap_async(sc);
2964 	}
2965 }
2966 
2967 /*
2968  * bridge_rtable_init:
2969  *
2970  *	Initialize the route table for this bridge.
2971  */
2972 static void
2973 bridge_rtable_init(struct bridge_softc *sc)
2974 {
2975 	int cpu;
2976 
2977 	/*
2978 	 * Initialize per-cpu hash tables
2979 	 */
2980 	sc->sc_rthashs = kmalloc(sizeof(*sc->sc_rthashs) * ncpus,
2981 				 M_DEVBUF, M_WAITOK);
2982 	for (cpu = 0; cpu < ncpus; ++cpu) {
2983 		int i;
2984 
2985 		sc->sc_rthashs[cpu] =
2986 		kmalloc(sizeof(struct bridge_rtnode_head) * BRIDGE_RTHASH_SIZE,
2987 			M_DEVBUF, M_WAITOK);
2988 
2989 		for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2990 			LIST_INIT(&sc->sc_rthashs[cpu][i]);
2991 	}
2992 	sc->sc_rthash_key = karc4random();
2993 
2994 	/*
2995 	 * Initialize per-cpu lists
2996 	 */
2997 	sc->sc_rtlists = kmalloc(sizeof(struct bridge_rtnode_head) * ncpus,
2998 				 M_DEVBUF, M_WAITOK);
2999 	for (cpu = 0; cpu < ncpus; ++cpu)
3000 		LIST_INIT(&sc->sc_rtlists[cpu]);
3001 }
3002 
3003 /*
3004  * bridge_rtable_fini:
3005  *
3006  *	Deconstruct the route table for this bridge.
3007  */
3008 static void
3009 bridge_rtable_fini(struct bridge_softc *sc)
3010 {
3011 	int cpu;
3012 
3013 	/*
3014 	 * Free per-cpu hash tables
3015 	 */
3016 	for (cpu = 0; cpu < ncpus; ++cpu)
3017 		kfree(sc->sc_rthashs[cpu], M_DEVBUF);
3018 	kfree(sc->sc_rthashs, M_DEVBUF);
3019 
3020 	/*
3021 	 * Free per-cpu lists
3022 	 */
3023 	kfree(sc->sc_rtlists, M_DEVBUF);
3024 }
3025 
3026 /*
3027  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
3028  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
3029  */
3030 #define	mix(a, b, c)							\
3031 do {									\
3032 	a -= b; a -= c; a ^= (c >> 13);					\
3033 	b -= c; b -= a; b ^= (a << 8);					\
3034 	c -= a; c -= b; c ^= (b >> 13);					\
3035 	a -= b; a -= c; a ^= (c >> 12);					\
3036 	b -= c; b -= a; b ^= (a << 16);					\
3037 	c -= a; c -= b; c ^= (b >> 5);					\
3038 	a -= b; a -= c; a ^= (c >> 3);					\
3039 	b -= c; b -= a; b ^= (a << 10);					\
3040 	c -= a; c -= b; c ^= (b >> 15);					\
3041 } while (/*CONSTCOND*/0)
3042 
3043 static __inline uint32_t
3044 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
3045 {
3046 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
3047 
3048 	b += addr[5] << 8;
3049 	b += addr[4];
3050 	a += addr[3] << 24;
3051 	a += addr[2] << 16;
3052 	a += addr[1] << 8;
3053 	a += addr[0];
3054 
3055 	mix(a, b, c);
3056 
3057 	return (c & BRIDGE_RTHASH_MASK);
3058 }
3059 
3060 #undef mix
3061 
3062 static int
3063 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
3064 {
3065 	int i, d;
3066 
3067 	for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
3068 		d = ((int)a[i]) - ((int)b[i]);
3069 	}
3070 
3071 	return (d);
3072 }
3073 
3074 /*
3075  * bridge_rtnode_lookup:
3076  *
3077  *	Look up a bridge route node for the specified destination.
3078  */
3079 static struct bridge_rtnode *
3080 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
3081 {
3082 	struct bridge_rtnode *brt;
3083 	uint32_t hash;
3084 	int dir;
3085 
3086 	hash = bridge_rthash(sc, addr);
3087 	LIST_FOREACH(brt, &sc->sc_rthashs[mycpuid][hash], brt_hash) {
3088 		dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
3089 		if (dir == 0)
3090 			return (brt);
3091 		if (dir > 0)
3092 			return (NULL);
3093 	}
3094 
3095 	return (NULL);
3096 }
3097 
3098 /*
3099  * bridge_rtnode_insert:
3100  *
3101  *	Insert the specified bridge node into the route table.
3102  *	Caller has to make sure that rtnode does not exist.
3103  */
3104 static void
3105 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
3106 {
3107 	struct bridge_rtnode *lbrt;
3108 	uint32_t hash;
3109 	int dir;
3110 
3111 	hash = bridge_rthash(sc, brt->brt_addr);
3112 
3113 	lbrt = LIST_FIRST(&sc->sc_rthashs[mycpuid][hash]);
3114 	if (lbrt == NULL) {
3115 		LIST_INSERT_HEAD(&sc->sc_rthashs[mycpuid][hash], brt, brt_hash);
3116 		goto out;
3117 	}
3118 
3119 	do {
3120 		dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
3121 		KASSERT(dir != 0, ("rtnode already exist\n"));
3122 
3123 		if (dir > 0) {
3124 			LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
3125 			goto out;
3126 		}
3127 		if (LIST_NEXT(lbrt, brt_hash) == NULL) {
3128 			LIST_INSERT_AFTER(lbrt, brt, brt_hash);
3129 			goto out;
3130 		}
3131 		lbrt = LIST_NEXT(lbrt, brt_hash);
3132 	} while (lbrt != NULL);
3133 
3134 	panic("no suitable position found for rtnode\n");
3135 out:
3136 	LIST_INSERT_HEAD(&sc->sc_rtlists[mycpuid], brt, brt_list);
3137 	if (mycpuid == 0) {
3138 		/*
3139 		 * Update the brtcnt.
3140 		 * We only need to do it once and we do it on CPU0.
3141 		 */
3142 		sc->sc_brtcnt++;
3143 	}
3144 }
3145 
3146 /*
3147  * bridge_rtnode_destroy:
3148  *
3149  *	Destroy a bridge rtnode.
3150  */
3151 static void
3152 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
3153 {
3154 	LIST_REMOVE(brt, brt_hash);
3155 	LIST_REMOVE(brt, brt_list);
3156 
3157 	if (mycpuid + 1 == ncpus) {
3158 		/* Free rtinfo associated with rtnode on the last cpu */
3159 		kfree(brt->brt_info, M_DEVBUF);
3160 	}
3161 	kfree(brt, M_DEVBUF);
3162 
3163 	if (mycpuid == 0) {
3164 		/* Update brtcnt only on CPU0 */
3165 		sc->sc_brtcnt--;
3166 	}
3167 }
3168 
3169 static __inline int
3170 bridge_post_pfil(struct mbuf *m)
3171 {
3172 	if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED)
3173 		return EOPNOTSUPP;
3174 
3175 	/* Not yet */
3176 	if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED)
3177 		return EOPNOTSUPP;
3178 
3179 	return 0;
3180 }
3181 
3182 /*
3183  * Send bridge packets through pfil if they are one of the types pfil can deal
3184  * with, or if they are ARP or REVARP.  (pfil will pass ARP and REVARP without
3185  * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
3186  * that interface.
3187  */
3188 static int
3189 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
3190 {
3191 	int snap, error, i, hlen;
3192 	struct ether_header *eh1, eh2;
3193 	struct ip *ip;
3194 	struct llc llc1;
3195 	u_int16_t ether_type;
3196 
3197 	snap = 0;
3198 	error = -1;	/* Default error if not error == 0 */
3199 
3200 	if (pfil_bridge == 0 && pfil_member == 0)
3201 		return (0); /* filtering is disabled */
3202 
3203 	i = min((*mp)->m_pkthdr.len, max_protohdr);
3204 	if ((*mp)->m_len < i) {
3205 		*mp = m_pullup(*mp, i);
3206 		if (*mp == NULL) {
3207 			kprintf("%s: m_pullup failed\n", __func__);
3208 			return (-1);
3209 		}
3210 	}
3211 
3212 	eh1 = mtod(*mp, struct ether_header *);
3213 	ether_type = ntohs(eh1->ether_type);
3214 
3215 	/*
3216 	 * Check for SNAP/LLC.
3217 	 */
3218 	if (ether_type < ETHERMTU) {
3219 		struct llc *llc2 = (struct llc *)(eh1 + 1);
3220 
3221 		if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
3222 		    llc2->llc_dsap == LLC_SNAP_LSAP &&
3223 		    llc2->llc_ssap == LLC_SNAP_LSAP &&
3224 		    llc2->llc_control == LLC_UI) {
3225 			ether_type = htons(llc2->llc_un.type_snap.ether_type);
3226 			snap = 1;
3227 		}
3228 	}
3229 
3230 	/*
3231 	 * If we're trying to filter bridge traffic, don't look at anything
3232 	 * other than IP and ARP traffic.  If the filter doesn't understand
3233 	 * IPv6, don't allow IPv6 through the bridge either.  This is lame
3234 	 * since if we really wanted, say, an AppleTalk filter, we are hosed,
3235 	 * but of course we don't have an AppleTalk filter to begin with.
3236 	 * (Note that since pfil doesn't understand ARP it will pass *ALL*
3237 	 * ARP traffic.)
3238 	 */
3239 	switch (ether_type) {
3240 	case ETHERTYPE_ARP:
3241 	case ETHERTYPE_REVARP:
3242 		return (0); /* Automatically pass */
3243 
3244 	case ETHERTYPE_IP:
3245 #ifdef INET6
3246 	case ETHERTYPE_IPV6:
3247 #endif /* INET6 */
3248 		break;
3249 
3250 	default:
3251 		/*
3252 		 * Check to see if the user wants to pass non-ip
3253 		 * packets, these will not be checked by pfil(9)
3254 		 * and passed unconditionally so the default is to drop.
3255 		 */
3256 		if (pfil_onlyip)
3257 			goto bad;
3258 	}
3259 
3260 	/* Strip off the Ethernet header and keep a copy. */
3261 	m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
3262 	m_adj(*mp, ETHER_HDR_LEN);
3263 
3264 	/* Strip off snap header, if present */
3265 	if (snap) {
3266 		m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
3267 		m_adj(*mp, sizeof(struct llc));
3268 	}
3269 
3270 	/*
3271 	 * Check the IP header for alignment and errors
3272 	 */
3273 	if (dir == PFIL_IN) {
3274 		switch (ether_type) {
3275 		case ETHERTYPE_IP:
3276 			error = bridge_ip_checkbasic(mp);
3277 			break;
3278 #ifdef INET6
3279 		case ETHERTYPE_IPV6:
3280 			error = bridge_ip6_checkbasic(mp);
3281 			break;
3282 #endif /* INET6 */
3283 		default:
3284 			error = 0;
3285 		}
3286 		if (error)
3287 			goto bad;
3288 	}
3289 
3290 	error = 0;
3291 
3292 	/*
3293 	 * Run the packet through pfil
3294 	 */
3295 	switch (ether_type) {
3296 	case ETHERTYPE_IP:
3297 		/*
3298 		 * before calling the firewall, swap fields the same as
3299 		 * IP does. here we assume the header is contiguous
3300 		 */
3301 		ip = mtod(*mp, struct ip *);
3302 
3303 		ip->ip_len = ntohs(ip->ip_len);
3304 		ip->ip_off = ntohs(ip->ip_off);
3305 
3306 		/*
3307 		 * Run pfil on the member interface and the bridge, both can
3308 		 * be skipped by clearing pfil_member or pfil_bridge.
3309 		 *
3310 		 * Keep the order:
3311 		 *   in_if -> bridge_if -> out_if
3312 		 */
3313 		if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) {
3314 			error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, dir);
3315 			if (*mp == NULL || error != 0) /* filter may consume */
3316 				break;
3317 			error = bridge_post_pfil(*mp);
3318 			if (error)
3319 				break;
3320 		}
3321 
3322 		if (pfil_member && ifp != NULL) {
3323 			error = pfil_run_hooks(&inet_pfil_hook, mp, ifp, dir);
3324 			if (*mp == NULL || error != 0) /* filter may consume */
3325 				break;
3326 			error = bridge_post_pfil(*mp);
3327 			if (error)
3328 				break;
3329 		}
3330 
3331 		if (pfil_bridge && dir == PFIL_IN && bifp != NULL) {
3332 			error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, dir);
3333 			if (*mp == NULL || error != 0) /* filter may consume */
3334 				break;
3335 			error = bridge_post_pfil(*mp);
3336 			if (error)
3337 				break;
3338 		}
3339 
3340 		/* check if we need to fragment the packet */
3341 		if (pfil_member && ifp != NULL && dir == PFIL_OUT) {
3342 			i = (*mp)->m_pkthdr.len;
3343 			if (i > ifp->if_mtu) {
3344 				error = bridge_fragment(ifp, *mp, &eh2, snap,
3345 					    &llc1);
3346 				return (error);
3347 			}
3348 		}
3349 
3350 		/* Recalculate the ip checksum and restore byte ordering */
3351 		ip = mtod(*mp, struct ip *);
3352 		hlen = ip->ip_hl << 2;
3353 		if (hlen < sizeof(struct ip))
3354 			goto bad;
3355 		if (hlen > (*mp)->m_len) {
3356 			if ((*mp = m_pullup(*mp, hlen)) == 0)
3357 				goto bad;
3358 			ip = mtod(*mp, struct ip *);
3359 			if (ip == NULL)
3360 				goto bad;
3361 		}
3362 		ip->ip_len = htons(ip->ip_len);
3363 		ip->ip_off = htons(ip->ip_off);
3364 		ip->ip_sum = 0;
3365 		if (hlen == sizeof(struct ip))
3366 			ip->ip_sum = in_cksum_hdr(ip);
3367 		else
3368 			ip->ip_sum = in_cksum(*mp, hlen);
3369 
3370 		break;
3371 #ifdef INET6
3372 	case ETHERTYPE_IPV6:
3373 		if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
3374 			error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3375 					dir);
3376 
3377 		if (*mp == NULL || error != 0) /* filter may consume */
3378 			break;
3379 
3380 		if (pfil_member && ifp != NULL)
3381 			error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp,
3382 					dir);
3383 
3384 		if (*mp == NULL || error != 0) /* filter may consume */
3385 			break;
3386 
3387 		if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
3388 			error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3389 					dir);
3390 		break;
3391 #endif
3392 	default:
3393 		error = 0;
3394 		break;
3395 	}
3396 
3397 	if (*mp == NULL)
3398 		return (error);
3399 	if (error != 0)
3400 		goto bad;
3401 
3402 	error = -1;
3403 
3404 	/*
3405 	 * Finally, put everything back the way it was and return
3406 	 */
3407 	if (snap) {
3408 		M_PREPEND(*mp, sizeof(struct llc), MB_DONTWAIT);
3409 		if (*mp == NULL)
3410 			return (error);
3411 		bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
3412 	}
3413 
3414 	M_PREPEND(*mp, ETHER_HDR_LEN, MB_DONTWAIT);
3415 	if (*mp == NULL)
3416 		return (error);
3417 	bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
3418 
3419 	return (0);
3420 
3421 bad:
3422 	m_freem(*mp);
3423 	*mp = NULL;
3424 	return (error);
3425 }
3426 
3427 /*
3428  * Perform basic checks on header size since
3429  * pfil assumes ip_input has already processed
3430  * it for it.  Cut-and-pasted from ip_input.c.
3431  * Given how simple the IPv6 version is,
3432  * does the IPv4 version really need to be
3433  * this complicated?
3434  *
3435  * XXX Should we update ipstat here, or not?
3436  * XXX Right now we update ipstat but not
3437  * XXX csum_counter.
3438  */
3439 static int
3440 bridge_ip_checkbasic(struct mbuf **mp)
3441 {
3442 	struct mbuf *m = *mp;
3443 	struct ip *ip;
3444 	int len, hlen;
3445 	u_short sum;
3446 
3447 	if (*mp == NULL)
3448 		return (-1);
3449 #if notyet
3450 	if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3451 		if ((m = m_copyup(m, sizeof(struct ip),
3452 			(max_linkhdr + 3) & ~3)) == NULL) {
3453 			/* XXXJRT new stat, please */
3454 			ipstat.ips_toosmall++;
3455 			goto bad;
3456 		}
3457 	} else
3458 #endif
3459 #ifndef __predict_false
3460 #define __predict_false(x) x
3461 #endif
3462 	 if (__predict_false(m->m_len < sizeof (struct ip))) {
3463 		if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
3464 			ipstat.ips_toosmall++;
3465 			goto bad;
3466 		}
3467 	}
3468 	ip = mtod(m, struct ip *);
3469 	if (ip == NULL) goto bad;
3470 
3471 	if (ip->ip_v != IPVERSION) {
3472 		ipstat.ips_badvers++;
3473 		goto bad;
3474 	}
3475 	hlen = ip->ip_hl << 2;
3476 	if (hlen < sizeof(struct ip)) { /* minimum header length */
3477 		ipstat.ips_badhlen++;
3478 		goto bad;
3479 	}
3480 	if (hlen > m->m_len) {
3481 		if ((m = m_pullup(m, hlen)) == 0) {
3482 			ipstat.ips_badhlen++;
3483 			goto bad;
3484 		}
3485 		ip = mtod(m, struct ip *);
3486 		if (ip == NULL) goto bad;
3487 	}
3488 
3489 	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3490 		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3491 	} else {
3492 		if (hlen == sizeof(struct ip)) {
3493 			sum = in_cksum_hdr(ip);
3494 		} else {
3495 			sum = in_cksum(m, hlen);
3496 		}
3497 	}
3498 	if (sum) {
3499 		ipstat.ips_badsum++;
3500 		goto bad;
3501 	}
3502 
3503 	/* Retrieve the packet length. */
3504 	len = ntohs(ip->ip_len);
3505 
3506 	/*
3507 	 * Check for additional length bogosity
3508 	 */
3509 	if (len < hlen) {
3510 		ipstat.ips_badlen++;
3511 		goto bad;
3512 	}
3513 
3514 	/*
3515 	 * Check that the amount of data in the buffers
3516 	 * is as at least much as the IP header would have us expect.
3517 	 * Drop packet if shorter than we expect.
3518 	 */
3519 	if (m->m_pkthdr.len < len) {
3520 		ipstat.ips_tooshort++;
3521 		goto bad;
3522 	}
3523 
3524 	/* Checks out, proceed */
3525 	*mp = m;
3526 	return (0);
3527 
3528 bad:
3529 	*mp = m;
3530 	return (-1);
3531 }
3532 
3533 #ifdef INET6
3534 /*
3535  * Same as above, but for IPv6.
3536  * Cut-and-pasted from ip6_input.c.
3537  * XXX Should we update ip6stat, or not?
3538  */
3539 static int
3540 bridge_ip6_checkbasic(struct mbuf **mp)
3541 {
3542 	struct mbuf *m = *mp;
3543 	struct ip6_hdr *ip6;
3544 
3545 	/*
3546 	 * If the IPv6 header is not aligned, slurp it up into a new
3547 	 * mbuf with space for link headers, in the event we forward
3548 	 * it.  Otherwise, if it is aligned, make sure the entire base
3549 	 * IPv6 header is in the first mbuf of the chain.
3550 	 */
3551 #if notyet
3552 	if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3553 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3554 		if ((m = m_copyup(m, sizeof(struct ip6_hdr),
3555 			    (max_linkhdr + 3) & ~3)) == NULL) {
3556 			/* XXXJRT new stat, please */
3557 			ip6stat.ip6s_toosmall++;
3558 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3559 			goto bad;
3560 		}
3561 	} else
3562 #endif
3563 	if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
3564 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3565 		if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
3566 			ip6stat.ip6s_toosmall++;
3567 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3568 			goto bad;
3569 		}
3570 	}
3571 
3572 	ip6 = mtod(m, struct ip6_hdr *);
3573 
3574 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
3575 		ip6stat.ip6s_badvers++;
3576 		in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
3577 		goto bad;
3578 	}
3579 
3580 	/* Checks out, proceed */
3581 	*mp = m;
3582 	return (0);
3583 
3584 bad:
3585 	*mp = m;
3586 	return (-1);
3587 }
3588 #endif /* INET6 */
3589 
3590 /*
3591  * bridge_fragment:
3592  *
3593  *	Return a fragmented mbuf chain.
3594  */
3595 static int
3596 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh,
3597     int snap, struct llc *llc)
3598 {
3599 	struct mbuf *m0;
3600 	struct ip *ip;
3601 	int error = -1;
3602 
3603 	if (m->m_len < sizeof(struct ip) &&
3604 	    (m = m_pullup(m, sizeof(struct ip))) == NULL)
3605 		goto out;
3606 	ip = mtod(m, struct ip *);
3607 
3608 	error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist,
3609 		    CSUM_DELAY_IP);
3610 	if (error)
3611 		goto out;
3612 
3613 	/* walk the chain and re-add the Ethernet header */
3614 	for (m0 = m; m0; m0 = m0->m_nextpkt) {
3615 		if (error == 0) {
3616 			if (snap) {
3617 				M_PREPEND(m0, sizeof(struct llc), MB_DONTWAIT);
3618 				if (m0 == NULL) {
3619 					error = ENOBUFS;
3620 					continue;
3621 				}
3622 				bcopy(llc, mtod(m0, caddr_t),
3623 				    sizeof(struct llc));
3624 			}
3625 			M_PREPEND(m0, ETHER_HDR_LEN, MB_DONTWAIT);
3626 			if (m0 == NULL) {
3627 				error = ENOBUFS;
3628 				continue;
3629 			}
3630 			bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN);
3631 		} else
3632 			m_freem(m);
3633 	}
3634 
3635 	if (error == 0)
3636 		ipstat.ips_fragmented++;
3637 
3638 	return (error);
3639 
3640 out:
3641 	if (m != NULL)
3642 		m_freem(m);
3643 	return (error);
3644 }
3645 
3646 static void
3647 bridge_enqueue_handler(netmsg_t msg)
3648 {
3649 	struct netmsg_packet *nmp;
3650 	struct ifnet *dst_ifp;
3651 	struct mbuf *m;
3652 
3653 	nmp = &msg->packet;
3654 	m = nmp->nm_packet;
3655 	dst_ifp = nmp->base.lmsg.u.ms_resultp;
3656 
3657 	bridge_handoff(dst_ifp, m);
3658 }
3659 
3660 static void
3661 bridge_handoff(struct ifnet *dst_ifp, struct mbuf *m)
3662 {
3663 	struct mbuf *m0;
3664 
3665 	/* We may be sending a fragment so traverse the mbuf */
3666 	for (; m; m = m0) {
3667 		struct altq_pktattr pktattr;
3668 
3669 		m0 = m->m_nextpkt;
3670 		m->m_nextpkt = NULL;
3671 
3672 		if (ifq_is_enabled(&dst_ifp->if_snd))
3673 			altq_etherclassify(&dst_ifp->if_snd, m, &pktattr);
3674 
3675 		ifq_dispatch(dst_ifp, m, &pktattr);
3676 	}
3677 }
3678 
3679 static void
3680 bridge_control_dispatch(netmsg_t msg)
3681 {
3682 	struct netmsg_brctl *bc_msg = (struct netmsg_brctl *)msg;
3683 	struct ifnet *bifp = bc_msg->bc_sc->sc_ifp;
3684 	int error;
3685 
3686 	ifnet_serialize_all(bifp);
3687 	error = bc_msg->bc_func(bc_msg->bc_sc, bc_msg->bc_arg);
3688 	ifnet_deserialize_all(bifp);
3689 
3690 	lwkt_replymsg(&bc_msg->base.lmsg, error);
3691 }
3692 
3693 static int
3694 bridge_control(struct bridge_softc *sc, u_long cmd,
3695 	       bridge_ctl_t bc_func, void *bc_arg)
3696 {
3697 	struct ifnet *bifp = sc->sc_ifp;
3698 	struct netmsg_brctl bc_msg;
3699 	int error;
3700 
3701 	ASSERT_IFNET_SERIALIZED_ALL(bifp);
3702 
3703 	bzero(&bc_msg, sizeof(bc_msg));
3704 
3705 	netmsg_init(&bc_msg.base, NULL, &curthread->td_msgport,
3706 		    0, bridge_control_dispatch);
3707 	bc_msg.bc_func = bc_func;
3708 	bc_msg.bc_sc = sc;
3709 	bc_msg.bc_arg = bc_arg;
3710 
3711 	ifnet_deserialize_all(bifp);
3712 	error = lwkt_domsg(BRIDGE_CFGPORT, &bc_msg.base.lmsg, 0);
3713 	ifnet_serialize_all(bifp);
3714 	return error;
3715 }
3716 
3717 static void
3718 bridge_add_bif_handler(netmsg_t msg)
3719 {
3720 	struct netmsg_braddbif *amsg = (struct netmsg_braddbif *)msg;
3721 	struct bridge_softc *sc;
3722 	struct bridge_iflist *bif;
3723 
3724 	sc = amsg->br_softc;
3725 
3726 	bif = kmalloc(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
3727 	bif->bif_ifp = amsg->br_bif_ifp;
3728 	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
3729 	bif->bif_onlist = 1;
3730 	bif->bif_info = amsg->br_bif_info;
3731 
3732 	LIST_INSERT_HEAD(&sc->sc_iflists[mycpuid], bif, bif_next);
3733 
3734 	ifnet_forwardmsg(&amsg->base.lmsg, mycpuid + 1);
3735 }
3736 
3737 static void
3738 bridge_add_bif(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3739 	       struct ifnet *ifp)
3740 {
3741 	struct netmsg_braddbif amsg;
3742 
3743 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3744 
3745 	netmsg_init(&amsg.base, NULL, &curthread->td_msgport,
3746 		    0, bridge_add_bif_handler);
3747 	amsg.br_softc = sc;
3748 	amsg.br_bif_info = bif_info;
3749 	amsg.br_bif_ifp = ifp;
3750 
3751 	ifnet_domsg(&amsg.base.lmsg, 0);
3752 }
3753 
3754 static void
3755 bridge_del_bif_handler(netmsg_t msg)
3756 {
3757 	struct netmsg_brdelbif *dmsg = (struct netmsg_brdelbif *)msg;
3758 	struct bridge_softc *sc;
3759 	struct bridge_iflist *bif;
3760 
3761 	sc = dmsg->br_softc;
3762 
3763 	/*
3764 	 * Locate the bif associated with the br_bif_info
3765 	 * on the current CPU
3766 	 */
3767 	bif = bridge_lookup_member_ifinfo(sc, dmsg->br_bif_info);
3768 	KKASSERT(bif != NULL && bif->bif_onlist);
3769 
3770 	/* Remove the bif from the current CPU's iflist */
3771 	bif->bif_onlist = 0;
3772 	LIST_REMOVE(bif, bif_next);
3773 
3774 	/* Save the removed bif for later freeing */
3775 	LIST_INSERT_HEAD(dmsg->br_bif_list, bif, bif_next);
3776 
3777 	ifnet_forwardmsg(&dmsg->base.lmsg, mycpuid + 1);
3778 }
3779 
3780 static void
3781 bridge_del_bif(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3782 	       struct bridge_iflist_head *saved_bifs)
3783 {
3784 	struct netmsg_brdelbif dmsg;
3785 
3786 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3787 
3788 	netmsg_init(&dmsg.base, NULL, &curthread->td_msgport,
3789 		    0, bridge_del_bif_handler);
3790 	dmsg.br_softc = sc;
3791 	dmsg.br_bif_info = bif_info;
3792 	dmsg.br_bif_list = saved_bifs;
3793 
3794 	ifnet_domsg(&dmsg.base.lmsg, 0);
3795 }
3796 
3797 static void
3798 bridge_set_bifflags_handler(netmsg_t msg)
3799 {
3800 	struct netmsg_brsflags *smsg = (struct netmsg_brsflags *)msg;
3801 	struct bridge_softc *sc;
3802 	struct bridge_iflist *bif;
3803 
3804 	sc = smsg->br_softc;
3805 
3806 	/*
3807 	 * Locate the bif associated with the br_bif_info
3808 	 * on the current CPU
3809 	 */
3810 	bif = bridge_lookup_member_ifinfo(sc, smsg->br_bif_info);
3811 	KKASSERT(bif != NULL && bif->bif_onlist);
3812 
3813 	bif->bif_flags = smsg->br_bif_flags;
3814 
3815 	ifnet_forwardmsg(&smsg->base.lmsg, mycpuid + 1);
3816 }
3817 
3818 static void
3819 bridge_set_bifflags(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3820 		    uint32_t bif_flags)
3821 {
3822 	struct netmsg_brsflags smsg;
3823 
3824 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3825 
3826 	netmsg_init(&smsg.base, NULL, &curthread->td_msgport,
3827 		    0, bridge_set_bifflags_handler);
3828 	smsg.br_softc = sc;
3829 	smsg.br_bif_info = bif_info;
3830 	smsg.br_bif_flags = bif_flags;
3831 
3832 	ifnet_domsg(&smsg.base.lmsg, 0);
3833 }
3834