xref: /dragonfly/sys/net/bridge/if_bridge.c (revision c89a6c1b)
1 /*
2  * Copyright 2001 Wasabi Systems, Inc.
3  * All rights reserved.
4  *
5  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed for the NetBSD Project by
18  *	Wasabi Systems, Inc.
19  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
20  *    or promote products derived from this software without specific prior
21  *    written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /*
37  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
38  * All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. All advertising materials mentioning features or use of this software
49  *    must display the following acknowledgement:
50  *	This product includes software developed by Jason L. Wright
51  * 4. The name of the author may not be used to endorse or promote products
52  *    derived from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
56  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
57  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
58  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
59  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
60  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
62  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
63  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64  * POSSIBILITY OF SUCH DAMAGE.
65  *
66  * $OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp $
67  * $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $
68  * $FreeBSD: src/sys/net/if_bridge.c,v 1.26 2005/10/13 23:05:55 thompsa Exp $
69  * $DragonFly: src/sys/net/bridge/if_bridge.c,v 1.60 2008/11/26 12:49:43 sephe Exp $
70  */
71 
72 /*
73  * Network interface bridge support.
74  *
75  * TODO:
76  *
77  *	- Currently only supports Ethernet-like interfaces (Ethernet,
78  *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
79  *	  to bridge other types of interfaces (FDDI-FDDI, and maybe
80  *	  consider heterogenous bridges).
81  *
82  *
83  * Bridge's route information is duplicated to each CPUs:
84  *
85  *      CPU0          CPU1          CPU2          CPU3
86  * +-----------+ +-----------+ +-----------+ +-----------+
87  * |  rtnode   | |  rtnode   | |  rtnode   | |  rtnode   |
88  * |           | |           | |           | |           |
89  * | dst eaddr | | dst eaddr | | dst eaddr | | dst eaddr |
90  * +-----------+ +-----------+ +-----------+ +-----------+
91  *       |         |                     |         |
92  *       |         |                     |         |
93  *       |         |     +----------+    |         |
94  *       |         |     |  rtinfo  |    |         |
95  *       |         +---->|          |<---+         |
96  *       |               |  flags   |              |
97  *       +-------------->|  timeout |<-------------+
98  *                       |  dst_ifp |
99  *                       +----------+
100  *
101  * We choose to put timeout and dst_ifp into shared part, so updating
102  * them will be cheaper than using message forwarding.  Also there is
103  * not need to use spinlock to protect the updating: timeout and dst_ifp
104  * is not related and specific field's updating order has no importance.
105  * The cache pollution by the share part should not be heavy: in a stable
106  * setup, dst_ifp probably will be not changed in rtnode's life time,
107  * while timeout is refreshed once per second; most of the time, timeout
108  * and dst_ifp are read-only accessed.
109  *
110  *
111  * Bridge route information installation on bridge_input path:
112  *
113  *      CPU0           CPU1         CPU2          CPU3
114  *
115  *                               tcp_thread2
116  *                                    |
117  *                                alloc nmsg
118  *                    snd nmsg        |
119  *                    w/o rtinfo      |
120  *      ifnet0<-----------------------+
121  *        |                           :
122  *    lookup dst                      :
123  *   rtnode exists?(Y)free nmsg       :
124  *        |(N)                        :
125  *        |
126  *  alloc rtinfo
127  *  alloc rtnode
128  * install rtnode
129  *        |
130  *        +---------->ifnet1
131  *        : fwd nmsg    |
132  *        : w/ rtinfo   |
133  *        :             |
134  *        :             |
135  *                 alloc rtnode
136  *               (w/ nmsg's rtinfo)
137  *                install rtnode
138  *                      |
139  *                      +---------->ifnet2
140  *                      : fwd nmsg    |
141  *                      : w/ rtinfo   |
142  *                      :             |
143  *                      :         same as ifnet1
144  *                                    |
145  *                                    +---------->ifnet3
146  *                                    : fwd nmsg    |
147  *                                    : w/ rtinfo   |
148  *                                    :             |
149  *                                    :         same as ifnet1
150  *                                               free nmsg
151  *                                                  :
152  *                                                  :
153  *
154  * The netmsgs forwarded between protocol threads and ifnet threads are
155  * allocated with (M_WAITOK|M_NULLOK), so it will not fail under most
156  * cases (route information is too precious to be not installed :).
157  * Since multiple threads may try to install route information for the
158  * same dst eaddr, we look up route information in ifnet0.  However, this
159  * looking up only need to be performed on ifnet0, which is the start
160  * point of the route information installation process.
161  *
162  *
163  * Bridge route information deleting/flushing:
164  *
165  *  CPU0            CPU1             CPU2             CPU3
166  *
167  * netisr0
168  *   |
169  * find suitable rtnodes,
170  * mark their rtinfo dead
171  *   |
172  *   | domsg <------------------------------------------+
173  *   |                                                  | replymsg
174  *   |                                                  |
175  *   V     fwdmsg           fwdmsg           fwdmsg     |
176  * ifnet0 --------> ifnet1 --------> ifnet2 --------> ifnet3
177  * delete rtnodes   delete rtnodes   delete rtnodes   delete rtnodes
178  * w/ dead rtinfo   w/ dead rtinfo   w/ dead rtinfo   w/ dead rtinfo
179  *                                                    free dead rtinfos
180  *
181  * All deleting/flushing operations are serialized by netisr0, so each
182  * operation only reaps the route information marked dead by itself.
183  *
184  *
185  * Bridge route information adding/deleting/flushing:
186  * Since all operation is serialized by the fixed message flow between
187  * ifnet threads, it is not possible to create corrupted per-cpu route
188  * information.
189  *
190  *
191  *
192  * Percpu member interface list iteration with blocking operation:
193  * Since one bridge could only delete one member interface at a time and
194  * the deleted member interface is not freed after netmsg_service_sync(),
195  * following way is used to make sure that even if the certain member
196  * interface is ripped from the percpu list during the blocking operation,
197  * the iteration still could keep going:
198  *
199  * LIST_FOREACH_MUTABLE(bif, sc->sc_iflists[mycpuid], bif_next, nbif) {
200  *     blocking operation;
201  *     blocking operation;
202  *     ...
203  *     ...
204  *     if (nbif != NULL && !nbif->bif_onlist) {
205  *         KKASSERT(bif->bif_onlist);
206  *         nbif = LIST_NEXT(bif, bif_next);
207  *     }
208  * }
209  *
210  * As mentioned above only one member interface could be unlinked from the
211  * percpu member interface list, so either bif or nbif may be not on the list,
212  * but _not_ both.  To keep the list iteration, we don't care about bif, but
213  * only nbif.  Since removed member interface will only be freed after we
214  * finish our work, it is safe to access any field in an unlinked bif (here
215  * bif_onlist).  If nbif is no longer on the list, then bif must be on the
216  * list, so we change nbif to the next element of bif and keep going.
217  */
218 
219 #include "opt_inet.h"
220 #include "opt_inet6.h"
221 
222 #include <sys/param.h>
223 #include <sys/mbuf.h>
224 #include <sys/malloc.h>
225 #include <sys/protosw.h>
226 #include <sys/systm.h>
227 #include <sys/time.h>
228 #include <sys/socket.h> /* for net/if.h */
229 #include <sys/sockio.h>
230 #include <sys/ctype.h>  /* string functions */
231 #include <sys/kernel.h>
232 #include <sys/random.h>
233 #include <sys/sysctl.h>
234 #include <sys/module.h>
235 #include <sys/proc.h>
236 #include <sys/priv.h>
237 #include <sys/lock.h>
238 #include <sys/thread.h>
239 #include <sys/thread2.h>
240 #include <sys/mpipe.h>
241 
242 #include <net/bpf.h>
243 #include <net/if.h>
244 #include <net/if_dl.h>
245 #include <net/if_types.h>
246 #include <net/if_var.h>
247 #include <net/pfil.h>
248 #include <net/ifq_var.h>
249 #include <net/if_clone.h>
250 
251 #include <netinet/in.h> /* for struct arpcom */
252 #include <netinet/in_systm.h>
253 #include <netinet/in_var.h>
254 #include <netinet/ip.h>
255 #include <netinet/ip_var.h>
256 #ifdef INET6
257 #include <netinet/ip6.h>
258 #include <netinet6/ip6_var.h>
259 #endif
260 #include <netinet/if_ether.h> /* for struct arpcom */
261 #include <net/bridge/if_bridgevar.h>
262 #include <net/if_llc.h>
263 #include <net/netmsg2.h>
264 
265 #include <net/route.h>
266 #include <sys/in_cksum.h>
267 
268 /*
269  * Size of the route hash table.  Must be a power of two.
270  */
271 #ifndef BRIDGE_RTHASH_SIZE
272 #define	BRIDGE_RTHASH_SIZE		1024
273 #endif
274 
275 #define	BRIDGE_RTHASH_MASK		(BRIDGE_RTHASH_SIZE - 1)
276 
277 /*
278  * Maximum number of addresses to cache.
279  */
280 #ifndef BRIDGE_RTABLE_MAX
281 #define	BRIDGE_RTABLE_MAX		100
282 #endif
283 
284 /*
285  * Spanning tree defaults.
286  */
287 #define	BSTP_DEFAULT_MAX_AGE		(20 * 256)
288 #define	BSTP_DEFAULT_HELLO_TIME		(2 * 256)
289 #define	BSTP_DEFAULT_FORWARD_DELAY	(15 * 256)
290 #define	BSTP_DEFAULT_HOLD_TIME		(1 * 256)
291 #define	BSTP_DEFAULT_BRIDGE_PRIORITY	0x8000
292 #define	BSTP_DEFAULT_PORT_PRIORITY	0x80
293 #define	BSTP_DEFAULT_PATH_COST		55
294 
295 /*
296  * Timeout (in seconds) for entries learned dynamically.
297  */
298 #ifndef BRIDGE_RTABLE_TIMEOUT
299 #define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
300 #endif
301 
302 /*
303  * Number of seconds between walks of the route list.
304  */
305 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
306 #define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
307 #endif
308 
309 /*
310  * List of capabilities to mask on the member interface.
311  */
312 #define	BRIDGE_IFCAPS_MASK		IFCAP_TXCSUM
313 
314 typedef int	(*bridge_ctl_t)(struct bridge_softc *, void *);
315 
316 struct netmsg_brctl {
317 	struct netmsg		bc_nmsg;
318 	bridge_ctl_t		bc_func;
319 	struct bridge_softc	*bc_sc;
320 	void			*bc_arg;
321 };
322 
323 struct netmsg_brsaddr {
324 	struct netmsg		br_nmsg;
325 	struct bridge_softc	*br_softc;
326 	struct ifnet		*br_dst_if;
327 	struct bridge_rtinfo	*br_rtinfo;
328 	int			br_setflags;
329 	uint8_t			br_dst[ETHER_ADDR_LEN];
330 	uint8_t			br_flags;
331 };
332 
333 struct netmsg_braddbif {
334 	struct netmsg		br_nmsg;
335 	struct bridge_softc	*br_softc;
336 	struct bridge_ifinfo	*br_bif_info;
337 	struct ifnet		*br_bif_ifp;
338 };
339 
340 struct netmsg_brdelbif {
341 	struct netmsg		br_nmsg;
342 	struct bridge_softc	*br_softc;
343 	struct bridge_ifinfo	*br_bif_info;
344 	struct bridge_iflist_head *br_bif_list;
345 };
346 
347 struct netmsg_brsflags {
348 	struct netmsg		br_nmsg;
349 	struct bridge_softc	*br_softc;
350 	struct bridge_ifinfo	*br_bif_info;
351 	uint32_t		br_bif_flags;
352 };
353 
354 eventhandler_tag	bridge_detach_cookie = NULL;
355 
356 extern	struct mbuf *(*bridge_input_p)(struct ifnet *, struct mbuf *);
357 extern	int (*bridge_output_p)(struct ifnet *, struct mbuf *);
358 extern	void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
359 
360 static int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
361 
362 static int	bridge_clone_create(struct if_clone *, int, caddr_t);
363 static void	bridge_clone_destroy(struct ifnet *);
364 
365 static int	bridge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
366 static void	bridge_mutecaps(struct bridge_ifinfo *, struct ifnet *, int);
367 static void	bridge_ifdetach(void *, struct ifnet *);
368 static void	bridge_init(void *);
369 static void	bridge_stop(struct ifnet *);
370 static void	bridge_start(struct ifnet *);
371 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
372 static int	bridge_output(struct ifnet *, struct mbuf *);
373 
374 static void	bridge_forward(struct bridge_softc *, struct mbuf *m);
375 
376 static void	bridge_timer_handler(struct netmsg *);
377 static void	bridge_timer(void *);
378 
379 static void	bridge_start_bcast(struct bridge_softc *, struct mbuf *);
380 static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
381 		    struct mbuf *);
382 static void	bridge_span(struct bridge_softc *, struct mbuf *);
383 
384 static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
385 		    struct ifnet *, uint8_t);
386 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
387 static void	bridge_rtreap(struct bridge_softc *);
388 static void	bridge_rtreap_async(struct bridge_softc *);
389 static void	bridge_rttrim(struct bridge_softc *);
390 static int	bridge_rtage_finddead(struct bridge_softc *);
391 static void	bridge_rtage(struct bridge_softc *);
392 static void	bridge_rtflush(struct bridge_softc *, int);
393 static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
394 static int	bridge_rtsaddr(struct bridge_softc *, const uint8_t *,
395 		    struct ifnet *, uint8_t);
396 static void	bridge_rtmsg_sync(struct bridge_softc *sc);
397 static void	bridge_rtreap_handler(struct netmsg *);
398 static void	bridge_rtinstall_handler(struct netmsg *);
399 static int	bridge_rtinstall_oncpu(struct bridge_softc *, const uint8_t *,
400 		    struct ifnet *, int, uint8_t, struct bridge_rtinfo **);
401 
402 static void	bridge_rtable_init(struct bridge_softc *);
403 static void	bridge_rtable_fini(struct bridge_softc *);
404 
405 static int	bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
406 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
407 		    const uint8_t *);
408 static void	bridge_rtnode_insert(struct bridge_softc *,
409 		    struct bridge_rtnode *);
410 static void	bridge_rtnode_destroy(struct bridge_softc *,
411 		    struct bridge_rtnode *);
412 
413 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
414 		    const char *name);
415 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
416 		    struct ifnet *ifp);
417 static struct bridge_iflist *bridge_lookup_member_ifinfo(struct bridge_softc *,
418 		    struct bridge_ifinfo *);
419 static void	bridge_delete_member(struct bridge_softc *,
420 		    struct bridge_iflist *, int);
421 static void	bridge_delete_span(struct bridge_softc *,
422 		    struct bridge_iflist *);
423 
424 static int	bridge_control(struct bridge_softc *, u_long,
425 			       bridge_ctl_t, void *);
426 static int	bridge_ioctl_init(struct bridge_softc *, void *);
427 static int	bridge_ioctl_stop(struct bridge_softc *, void *);
428 static int	bridge_ioctl_add(struct bridge_softc *, void *);
429 static int	bridge_ioctl_del(struct bridge_softc *, void *);
430 static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
431 static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
432 static int	bridge_ioctl_scache(struct bridge_softc *, void *);
433 static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
434 static int	bridge_ioctl_gifs(struct bridge_softc *, void *);
435 static int	bridge_ioctl_rts(struct bridge_softc *, void *);
436 static int	bridge_ioctl_saddr(struct bridge_softc *, void *);
437 static int	bridge_ioctl_sto(struct bridge_softc *, void *);
438 static int	bridge_ioctl_gto(struct bridge_softc *, void *);
439 static int	bridge_ioctl_daddr(struct bridge_softc *, void *);
440 static int	bridge_ioctl_flush(struct bridge_softc *, void *);
441 static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
442 static int	bridge_ioctl_spri(struct bridge_softc *, void *);
443 static int	bridge_ioctl_ght(struct bridge_softc *, void *);
444 static int	bridge_ioctl_sht(struct bridge_softc *, void *);
445 static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
446 static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
447 static int	bridge_ioctl_gma(struct bridge_softc *, void *);
448 static int	bridge_ioctl_sma(struct bridge_softc *, void *);
449 static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
450 static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
451 static int	bridge_ioctl_addspan(struct bridge_softc *, void *);
452 static int	bridge_ioctl_delspan(struct bridge_softc *, void *);
453 static int	bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
454 		    int);
455 static int	bridge_ip_checkbasic(struct mbuf **mp);
456 #ifdef INET6
457 static int	bridge_ip6_checkbasic(struct mbuf **mp);
458 #endif /* INET6 */
459 static int	bridge_fragment(struct ifnet *, struct mbuf *,
460 		    struct ether_header *, int, struct llc *);
461 static void	bridge_enqueue_handler(struct netmsg *);
462 static void	bridge_handoff(struct ifnet *, struct mbuf *);
463 
464 static void	bridge_del_bif_handler(struct netmsg *);
465 static void	bridge_add_bif_handler(struct netmsg *);
466 static void	bridge_set_bifflags_handler(struct netmsg *);
467 static void	bridge_del_bif(struct bridge_softc *, struct bridge_ifinfo *,
468 		    struct bridge_iflist_head *);
469 static void	bridge_add_bif(struct bridge_softc *, struct bridge_ifinfo *,
470 		    struct ifnet *);
471 static void	bridge_set_bifflags(struct bridge_softc *,
472 		    struct bridge_ifinfo *, uint32_t);
473 
474 SYSCTL_DECL(_net_link);
475 SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge");
476 
477 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */
478 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */
479 static int pfil_member = 1; /* run pfil hooks on the member interface */
480 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW,
481     &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled");
482 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW,
483     &pfil_bridge, 0, "Packet filter on the bridge interface");
484 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW,
485     &pfil_member, 0, "Packet filter on the member interface");
486 
487 struct bridge_control_arg {
488 	union {
489 		struct ifbreq ifbreq;
490 		struct ifbifconf ifbifconf;
491 		struct ifbareq ifbareq;
492 		struct ifbaconf ifbaconf;
493 		struct ifbrparam ifbrparam;
494 	} bca_u;
495 	int	bca_len;
496 	void	*bca_uptr;
497 	void	*bca_kptr;
498 };
499 
500 struct bridge_control {
501 	bridge_ctl_t	bc_func;
502 	int		bc_argsize;
503 	int		bc_flags;
504 };
505 
506 #define	BC_F_COPYIN		0x01	/* copy arguments in */
507 #define	BC_F_COPYOUT		0x02	/* copy arguments out */
508 #define	BC_F_SUSER		0x04	/* do super-user check */
509 
510 const struct bridge_control bridge_control_table[] = {
511 	{ bridge_ioctl_add,		sizeof(struct ifbreq),
512 	  BC_F_COPYIN|BC_F_SUSER },
513 	{ bridge_ioctl_del,		sizeof(struct ifbreq),
514 	  BC_F_COPYIN|BC_F_SUSER },
515 
516 	{ bridge_ioctl_gifflags,	sizeof(struct ifbreq),
517 	  BC_F_COPYIN|BC_F_COPYOUT },
518 	{ bridge_ioctl_sifflags,	sizeof(struct ifbreq),
519 	  BC_F_COPYIN|BC_F_SUSER },
520 
521 	{ bridge_ioctl_scache,		sizeof(struct ifbrparam),
522 	  BC_F_COPYIN|BC_F_SUSER },
523 	{ bridge_ioctl_gcache,		sizeof(struct ifbrparam),
524 	  BC_F_COPYOUT },
525 
526 	{ bridge_ioctl_gifs,		sizeof(struct ifbifconf),
527 	  BC_F_COPYIN|BC_F_COPYOUT },
528 	{ bridge_ioctl_rts,		sizeof(struct ifbaconf),
529 	  BC_F_COPYIN|BC_F_COPYOUT },
530 
531 	{ bridge_ioctl_saddr,		sizeof(struct ifbareq),
532 	  BC_F_COPYIN|BC_F_SUSER },
533 
534 	{ bridge_ioctl_sto,		sizeof(struct ifbrparam),
535 	  BC_F_COPYIN|BC_F_SUSER },
536 	{ bridge_ioctl_gto,		sizeof(struct ifbrparam),
537 	  BC_F_COPYOUT },
538 
539 	{ bridge_ioctl_daddr,		sizeof(struct ifbareq),
540 	  BC_F_COPYIN|BC_F_SUSER },
541 
542 	{ bridge_ioctl_flush,		sizeof(struct ifbreq),
543 	  BC_F_COPYIN|BC_F_SUSER },
544 
545 	{ bridge_ioctl_gpri,		sizeof(struct ifbrparam),
546 	  BC_F_COPYOUT },
547 	{ bridge_ioctl_spri,		sizeof(struct ifbrparam),
548 	  BC_F_COPYIN|BC_F_SUSER },
549 
550 	{ bridge_ioctl_ght,		sizeof(struct ifbrparam),
551 	  BC_F_COPYOUT },
552 	{ bridge_ioctl_sht,		sizeof(struct ifbrparam),
553 	  BC_F_COPYIN|BC_F_SUSER },
554 
555 	{ bridge_ioctl_gfd,		sizeof(struct ifbrparam),
556 	  BC_F_COPYOUT },
557 	{ bridge_ioctl_sfd,		sizeof(struct ifbrparam),
558 	  BC_F_COPYIN|BC_F_SUSER },
559 
560 	{ bridge_ioctl_gma,		sizeof(struct ifbrparam),
561 	  BC_F_COPYOUT },
562 	{ bridge_ioctl_sma,		sizeof(struct ifbrparam),
563 	  BC_F_COPYIN|BC_F_SUSER },
564 
565 	{ bridge_ioctl_sifprio,		sizeof(struct ifbreq),
566 	  BC_F_COPYIN|BC_F_SUSER },
567 
568 	{ bridge_ioctl_sifcost,		sizeof(struct ifbreq),
569 	  BC_F_COPYIN|BC_F_SUSER },
570 
571 	{ bridge_ioctl_addspan,		sizeof(struct ifbreq),
572 	  BC_F_COPYIN|BC_F_SUSER },
573 	{ bridge_ioctl_delspan,		sizeof(struct ifbreq),
574 	  BC_F_COPYIN|BC_F_SUSER },
575 };
576 static const int bridge_control_table_size =
577     sizeof(bridge_control_table) / sizeof(bridge_control_table[0]);
578 
579 LIST_HEAD(, bridge_softc) bridge_list;
580 
581 struct if_clone bridge_cloner = IF_CLONE_INITIALIZER("bridge",
582 				bridge_clone_create,
583 				bridge_clone_destroy, 0, IF_MAXUNIT);
584 
585 static int
586 bridge_modevent(module_t mod, int type, void *data)
587 {
588 	switch (type) {
589 	case MOD_LOAD:
590 		LIST_INIT(&bridge_list);
591 		if_clone_attach(&bridge_cloner);
592 		bridge_input_p = bridge_input;
593 		bridge_output_p = bridge_output;
594 		bridge_detach_cookie = EVENTHANDLER_REGISTER(
595 		    ifnet_detach_event, bridge_ifdetach, NULL,
596 		    EVENTHANDLER_PRI_ANY);
597 #if notyet
598 		bstp_linkstate_p = bstp_linkstate;
599 #endif
600 		break;
601 	case MOD_UNLOAD:
602 		if (!LIST_EMPTY(&bridge_list))
603 			return (EBUSY);
604 		EVENTHANDLER_DEREGISTER(ifnet_detach_event,
605 		    bridge_detach_cookie);
606 		if_clone_detach(&bridge_cloner);
607 		bridge_input_p = NULL;
608 		bridge_output_p = NULL;
609 #if notyet
610 		bstp_linkstate_p = NULL;
611 #endif
612 		break;
613 	default:
614 		return (EOPNOTSUPP);
615 	}
616 	return (0);
617 }
618 
619 static moduledata_t bridge_mod = {
620 	"if_bridge",
621 	bridge_modevent,
622 	0
623 };
624 
625 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
626 
627 
628 /*
629  * bridge_clone_create:
630  *
631  *	Create a new bridge instance.
632  */
633 static int
634 bridge_clone_create(struct if_clone *ifc, int unit, caddr_t param __unused)
635 {
636 	struct bridge_softc *sc;
637 	struct ifnet *ifp;
638 	u_char eaddr[6];
639 	int cpu, rnd;
640 
641 	sc = kmalloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);
642 	ifp = sc->sc_ifp = &sc->sc_if;
643 
644 	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
645 	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
646 	sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
647 	sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
648 	sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
649 	sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
650 	sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
651 
652 	/* Initialize our routing table. */
653 	bridge_rtable_init(sc);
654 
655 	callout_init(&sc->sc_brcallout);
656 	netmsg_init(&sc->sc_brtimemsg, NULL, &netisr_adone_rport,
657 		    MSGF_DROPABLE, bridge_timer_handler);
658 	sc->sc_brtimemsg.nm_lmsg.u.ms_resultp = sc;
659 
660 	callout_init(&sc->sc_bstpcallout);
661 	netmsg_init(&sc->sc_bstptimemsg, NULL, &netisr_adone_rport,
662 		    MSGF_DROPABLE, bstp_tick_handler);
663 	sc->sc_bstptimemsg.nm_lmsg.u.ms_resultp = sc;
664 
665 	/* Initialize per-cpu member iface lists */
666 	sc->sc_iflists = kmalloc(sizeof(*sc->sc_iflists) * ncpus,
667 				 M_DEVBUF, M_WAITOK);
668 	for (cpu = 0; cpu < ncpus; ++cpu)
669 		LIST_INIT(&sc->sc_iflists[cpu]);
670 
671 	LIST_INIT(&sc->sc_spanlist);
672 
673 	ifp->if_softc = sc;
674 	if_initname(ifp, ifc->ifc_name, unit);
675 	ifp->if_mtu = ETHERMTU;
676 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
677 	ifp->if_ioctl = bridge_ioctl;
678 	ifp->if_start = bridge_start;
679 	ifp->if_init = bridge_init;
680 	ifp->if_type = IFT_BRIDGE;
681 	ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
682 	ifq_set_ready(&ifp->if_snd);
683 	ifp->if_hdrlen = ETHER_HDR_LEN;
684 
685 	/*
686 	 * Generate a random ethernet address and use the private AC:DE:48
687 	 * OUI code.
688 	 */
689 	rnd = karc4random();
690 	bcopy(&rnd, &eaddr[0], 4); /* ETHER_ADDR_LEN == 6 */
691 	rnd = karc4random();
692 	bcopy(&rnd, &eaddr[2], 4); /* ETHER_ADDR_LEN == 6 */
693 
694 	eaddr[0] &= ~1;	/* clear multicast bit */
695 	eaddr[0] |= 2;	/* set the LAA bit */
696 
697 	ether_ifattach(ifp, eaddr, NULL);
698 	/* Now undo some of the damage... */
699 	ifp->if_baudrate = 0;
700 	ifp->if_type = IFT_BRIDGE;
701 
702 	crit_enter();	/* XXX MP */
703 	LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
704 	crit_exit();
705 
706 	return (0);
707 }
708 
709 static void
710 bridge_delete_dispatch(struct netmsg *nmsg)
711 {
712 	struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
713 	struct bridge_softc *sc = lmsg->u.ms_resultp;
714 	struct ifnet *bifp = sc->sc_ifp;
715 	struct bridge_iflist *bif;
716 
717 	ifnet_serialize_all(bifp);
718 
719 	while ((bif = LIST_FIRST(&sc->sc_iflists[mycpuid])) != NULL)
720 		bridge_delete_member(sc, bif, 0);
721 
722 	while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL)
723 		bridge_delete_span(sc, bif);
724 
725 	ifnet_deserialize_all(bifp);
726 
727 	lwkt_replymsg(lmsg, 0);
728 }
729 
730 /*
731  * bridge_clone_destroy:
732  *
733  *	Destroy a bridge instance.
734  */
735 static void
736 bridge_clone_destroy(struct ifnet *ifp)
737 {
738 	struct bridge_softc *sc = ifp->if_softc;
739 	struct lwkt_msg *lmsg;
740 	struct netmsg nmsg;
741 
742 	ifnet_serialize_all(ifp);
743 
744 	bridge_stop(ifp);
745 	ifp->if_flags &= ~IFF_UP;
746 
747 	ifnet_deserialize_all(ifp);
748 
749 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
750 		    0, bridge_delete_dispatch);
751 	lmsg = &nmsg.nm_lmsg;
752 	lmsg->u.ms_resultp = sc;
753 	lwkt_domsg(BRIDGE_CFGPORT, lmsg, 0);
754 
755 	crit_enter();	/* XXX MP */
756 	LIST_REMOVE(sc, sc_list);
757 	crit_exit();
758 
759 	ether_ifdetach(ifp);
760 
761 	/* Tear down the routing table. */
762 	bridge_rtable_fini(sc);
763 
764 	/* Free per-cpu member iface lists */
765 	kfree(sc->sc_iflists, M_DEVBUF);
766 
767 	kfree(sc, M_DEVBUF);
768 }
769 
770 /*
771  * bridge_ioctl:
772  *
773  *	Handle a control request from the operator.
774  */
775 static int
776 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
777 {
778 	struct bridge_softc *sc = ifp->if_softc;
779 	struct bridge_control_arg args;
780 	struct ifdrv *ifd = (struct ifdrv *) data;
781 	const struct bridge_control *bc;
782 	int error = 0;
783 
784 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
785 
786 	switch (cmd) {
787 	case SIOCADDMULTI:
788 	case SIOCDELMULTI:
789 		break;
790 
791 	case SIOCGDRVSPEC:
792 	case SIOCSDRVSPEC:
793 		if (ifd->ifd_cmd >= bridge_control_table_size) {
794 			error = EINVAL;
795 			break;
796 		}
797 		bc = &bridge_control_table[ifd->ifd_cmd];
798 
799 		if (cmd == SIOCGDRVSPEC &&
800 		    (bc->bc_flags & BC_F_COPYOUT) == 0) {
801 			error = EINVAL;
802 			break;
803 		} else if (cmd == SIOCSDRVSPEC &&
804 			   (bc->bc_flags & BC_F_COPYOUT)) {
805 			error = EINVAL;
806 			break;
807 		}
808 
809 		if (bc->bc_flags & BC_F_SUSER) {
810 			error = priv_check_cred(cr, PRIV_ROOT, NULL_CRED_OKAY);
811 			if (error)
812 				break;
813 		}
814 
815 		if (ifd->ifd_len != bc->bc_argsize ||
816 		    ifd->ifd_len > sizeof(args.bca_u)) {
817 			error = EINVAL;
818 			break;
819 		}
820 
821 		memset(&args, 0, sizeof(args));
822 		if (bc->bc_flags & BC_F_COPYIN) {
823 			error = copyin(ifd->ifd_data, &args.bca_u,
824 				       ifd->ifd_len);
825 			if (error)
826 				break;
827 		}
828 
829 		error = bridge_control(sc, cmd, bc->bc_func, &args);
830 		if (error) {
831 			KKASSERT(args.bca_len == 0 && args.bca_kptr == NULL);
832 			break;
833 		}
834 
835 		if (bc->bc_flags & BC_F_COPYOUT) {
836 			error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
837 			if (args.bca_len != 0) {
838 				KKASSERT(args.bca_kptr != NULL);
839 				if (!error) {
840 					error = copyout(args.bca_kptr,
841 						args.bca_uptr, args.bca_len);
842 				}
843 				kfree(args.bca_kptr, M_TEMP);
844 			} else {
845 				KKASSERT(args.bca_kptr == NULL);
846 			}
847 		} else {
848 			KKASSERT(args.bca_len == 0 && args.bca_kptr == NULL);
849 		}
850 		break;
851 
852 	case SIOCSIFFLAGS:
853 		if (!(ifp->if_flags & IFF_UP) &&
854 		    (ifp->if_flags & IFF_RUNNING)) {
855 			/*
856 			 * If interface is marked down and it is running,
857 			 * then stop it.
858 			 */
859 			bridge_stop(ifp);
860 		} else if ((ifp->if_flags & IFF_UP) &&
861 		    !(ifp->if_flags & IFF_RUNNING)) {
862 			/*
863 			 * If interface is marked up and it is stopped, then
864 			 * start it.
865 			 */
866 			ifp->if_init(sc);
867 		}
868 		break;
869 
870 	case SIOCSIFMTU:
871 		/* Do not allow the MTU to be changed on the bridge */
872 		error = EINVAL;
873 		break;
874 
875 	default:
876 		error = ether_ioctl(ifp, cmd, data);
877 		break;
878 	}
879 	return (error);
880 }
881 
882 /*
883  * bridge_mutecaps:
884  *
885  *	Clear or restore unwanted capabilities on the member interface
886  */
887 static void
888 bridge_mutecaps(struct bridge_ifinfo *bif_info, struct ifnet *ifp, int mute)
889 {
890 	struct ifreq ifr;
891 	int error;
892 
893 	if (ifp->if_ioctl == NULL)
894 		return;
895 
896 	bzero(&ifr, sizeof(ifr));
897 	ifr.ifr_reqcap = ifp->if_capenable;
898 
899 	if (mute) {
900 		/* mask off and save capabilities */
901 		bif_info->bifi_mutecap = ifr.ifr_reqcap & BRIDGE_IFCAPS_MASK;
902 		if (bif_info->bifi_mutecap != 0)
903 			ifr.ifr_reqcap &= ~BRIDGE_IFCAPS_MASK;
904 	} else {
905 		/* restore muted capabilities */
906 		ifr.ifr_reqcap |= bif_info->bifi_mutecap;
907 	}
908 
909 	if (bif_info->bifi_mutecap != 0) {
910 		ifnet_serialize_all(ifp);
911 		error = ifp->if_ioctl(ifp, SIOCSIFCAP, (caddr_t)&ifr, NULL);
912 		ifnet_deserialize_all(ifp);
913 	}
914 }
915 
916 /*
917  * bridge_lookup_member:
918  *
919  *	Lookup a bridge member interface.
920  */
921 static struct bridge_iflist *
922 bridge_lookup_member(struct bridge_softc *sc, const char *name)
923 {
924 	struct bridge_iflist *bif;
925 
926 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
927 		if (strcmp(bif->bif_ifp->if_xname, name) == 0)
928 			return (bif);
929 	}
930 	return (NULL);
931 }
932 
933 /*
934  * bridge_lookup_member_if:
935  *
936  *	Lookup a bridge member interface by ifnet*.
937  */
938 static struct bridge_iflist *
939 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
940 {
941 	struct bridge_iflist *bif;
942 
943 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
944 		if (bif->bif_ifp == member_ifp)
945 			return (bif);
946 	}
947 	return (NULL);
948 }
949 
950 /*
951  * bridge_lookup_member_ifinfo:
952  *
953  *	Lookup a bridge member interface by bridge_ifinfo.
954  */
955 static struct bridge_iflist *
956 bridge_lookup_member_ifinfo(struct bridge_softc *sc,
957 			    struct bridge_ifinfo *bif_info)
958 {
959 	struct bridge_iflist *bif;
960 
961 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
962 		if (bif->bif_info == bif_info)
963 			return (bif);
964 	}
965 	return (NULL);
966 }
967 
968 /*
969  * bridge_delete_member:
970  *
971  *	Delete the specified member interface.
972  */
973 static void
974 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
975     int gone)
976 {
977 	struct ifnet *ifs = bif->bif_ifp;
978 	struct ifnet *bifp = sc->sc_ifp;
979 	struct bridge_ifinfo *bif_info = bif->bif_info;
980 	struct bridge_iflist_head saved_bifs;
981 
982 	ASSERT_IFNET_SERIALIZED_ALL(bifp);
983 	KKASSERT(bif_info != NULL);
984 
985 	ifs->if_bridge = NULL;
986 
987 	/*
988 	 * Release bridge interface's serializer:
989 	 * - To avoid possible dead lock.
990 	 * - Various sync operation will block the current thread.
991 	 */
992 	ifnet_deserialize_all(bifp);
993 
994 	if (!gone) {
995 		switch (ifs->if_type) {
996 		case IFT_ETHER:
997 		case IFT_L2VLAN:
998 			/*
999 			 * Take the interface out of promiscuous mode.
1000 			 */
1001 			ifpromisc(ifs, 0);
1002 			bridge_mutecaps(bif_info, ifs, 0);
1003 			break;
1004 
1005 		case IFT_GIF:
1006 			break;
1007 
1008 		default:
1009 			panic("bridge_delete_member: impossible");
1010 			break;
1011 		}
1012 	}
1013 
1014 	/*
1015 	 * Remove bifs from percpu linked list.
1016 	 *
1017 	 * Removed bifs are not freed immediately, instead,
1018 	 * they are saved in saved_bifs.  They will be freed
1019 	 * after we make sure that no one is accessing them,
1020 	 * i.e. after following netmsg_service_sync()
1021 	 */
1022 	LIST_INIT(&saved_bifs);
1023 	bridge_del_bif(sc, bif_info, &saved_bifs);
1024 
1025 	/*
1026 	 * Make sure that all protocol threads:
1027 	 * o  see 'ifs' if_bridge is changed
1028 	 * o  know that bif is removed from the percpu linked list
1029 	 */
1030 	netmsg_service_sync();
1031 
1032 	/*
1033 	 * Free the removed bifs
1034 	 */
1035 	KKASSERT(!LIST_EMPTY(&saved_bifs));
1036 	while ((bif = LIST_FIRST(&saved_bifs)) != NULL) {
1037 		LIST_REMOVE(bif, bif_next);
1038 		kfree(bif, M_DEVBUF);
1039 	}
1040 
1041 	/* See the comment in bridge_ioctl_stop() */
1042 	bridge_rtmsg_sync(sc);
1043 	bridge_rtdelete(sc, ifs, IFBF_FLUSHALL | IFBF_FLUSHSYNC);
1044 
1045 	ifnet_serialize_all(bifp);
1046 
1047 	if (bifp->if_flags & IFF_RUNNING)
1048 		bstp_initialization(sc);
1049 
1050 	/*
1051 	 * Free the bif_info after bstp_initialization(), so that
1052 	 * bridge_softc.sc_root_port will not reference a dangling
1053 	 * pointer.
1054 	 */
1055 	kfree(bif_info, M_DEVBUF);
1056 }
1057 
1058 /*
1059  * bridge_delete_span:
1060  *
1061  *	Delete the specified span interface.
1062  */
1063 static void
1064 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1065 {
1066 	KASSERT(bif->bif_ifp->if_bridge == NULL,
1067 	    ("%s: not a span interface", __func__));
1068 
1069 	LIST_REMOVE(bif, bif_next);
1070 	kfree(bif, M_DEVBUF);
1071 }
1072 
1073 static int
1074 bridge_ioctl_init(struct bridge_softc *sc, void *arg __unused)
1075 {
1076 	struct ifnet *ifp = sc->sc_ifp;
1077 
1078 	if (ifp->if_flags & IFF_RUNNING)
1079 		return 0;
1080 
1081 	callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1082 	    bridge_timer, sc);
1083 
1084 	ifp->if_flags |= IFF_RUNNING;
1085 	bstp_initialization(sc);
1086 	return 0;
1087 }
1088 
1089 static int
1090 bridge_ioctl_stop(struct bridge_softc *sc, void *arg __unused)
1091 {
1092 	struct ifnet *ifp = sc->sc_ifp;
1093 	struct lwkt_msg *lmsg;
1094 
1095 	if ((ifp->if_flags & IFF_RUNNING) == 0)
1096 		return 0;
1097 
1098 	callout_stop(&sc->sc_brcallout);
1099 
1100 	crit_enter();
1101 	lmsg = &sc->sc_brtimemsg.nm_lmsg;
1102 	if ((lmsg->ms_flags & MSGF_DONE) == 0) {
1103 		/* Pending to be processed; drop it */
1104 		lwkt_dropmsg(lmsg);
1105 	}
1106 	crit_exit();
1107 
1108 	bstp_stop(sc);
1109 
1110 	ifp->if_flags &= ~IFF_RUNNING;
1111 
1112 	ifnet_deserialize_all(ifp);
1113 
1114 	/* Let everyone know that we are stopped */
1115 	netmsg_service_sync();
1116 
1117 	/*
1118 	 * Sync ifnetX msgports in the order we forward rtnode
1119 	 * installation message.  This is used to make sure that
1120 	 * all rtnode installation messages sent by bridge_rtupdate()
1121 	 * during above netmsg_service_sync() are flushed.
1122 	 */
1123 	bridge_rtmsg_sync(sc);
1124 	bridge_rtflush(sc, IFBF_FLUSHDYN | IFBF_FLUSHSYNC);
1125 
1126 	ifnet_serialize_all(ifp);
1127 	return 0;
1128 }
1129 
1130 static int
1131 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1132 {
1133 	struct ifbreq *req = arg;
1134 	struct bridge_iflist *bif;
1135 	struct bridge_ifinfo *bif_info;
1136 	struct ifnet *ifs, *bifp;
1137 	int error = 0;
1138 
1139 	bifp = sc->sc_ifp;
1140 	ASSERT_IFNET_SERIALIZED_ALL(bifp);
1141 
1142 	ifs = ifunit(req->ifbr_ifsname);
1143 	if (ifs == NULL)
1144 		return (ENOENT);
1145 
1146 	/* If it's in the span list, it can't be a member. */
1147 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1148 		if (ifs == bif->bif_ifp)
1149 			return (EBUSY);
1150 
1151 	/* Allow the first Ethernet member to define the MTU */
1152 	if (ifs->if_type != IFT_GIF) {
1153 		if (LIST_EMPTY(&sc->sc_iflists[mycpuid])) {
1154 			bifp->if_mtu = ifs->if_mtu;
1155 		} else if (bifp->if_mtu != ifs->if_mtu) {
1156 			if_printf(bifp, "invalid MTU for %s\n", ifs->if_xname);
1157 			return (EINVAL);
1158 		}
1159 	}
1160 
1161 	if (ifs->if_bridge == sc)
1162 		return (EEXIST);
1163 
1164 	if (ifs->if_bridge != NULL)
1165 		return (EBUSY);
1166 
1167 	bif_info = kmalloc(sizeof(*bif_info), M_DEVBUF, M_WAITOK | M_ZERO);
1168 	bif_info->bifi_priority = BSTP_DEFAULT_PORT_PRIORITY;
1169 	bif_info->bifi_path_cost = BSTP_DEFAULT_PATH_COST;
1170 	bif_info->bifi_ifp = ifs;
1171 
1172 	/*
1173 	 * Release bridge interface's serializer:
1174 	 * - To avoid possible dead lock.
1175 	 * - Various sync operation will block the current thread.
1176 	 */
1177 	ifnet_deserialize_all(bifp);
1178 
1179 	switch (ifs->if_type) {
1180 	case IFT_ETHER:
1181 	case IFT_L2VLAN:
1182 		/*
1183 		 * Place the interface into promiscuous mode.
1184 		 */
1185 		error = ifpromisc(ifs, 1);
1186 		if (error) {
1187 			ifnet_serialize_all(bifp);
1188 			goto out;
1189 		}
1190 		bridge_mutecaps(bif_info, ifs, 1);
1191 		break;
1192 
1193 	case IFT_GIF: /* :^) */
1194 		break;
1195 
1196 	default:
1197 		error = EINVAL;
1198 		ifnet_serialize_all(bifp);
1199 		goto out;
1200 	}
1201 
1202 	/*
1203 	 * Add bifs to percpu linked lists
1204 	 */
1205 	bridge_add_bif(sc, bif_info, ifs);
1206 
1207 	ifnet_serialize_all(bifp);
1208 
1209 	if (bifp->if_flags & IFF_RUNNING)
1210 		bstp_initialization(sc);
1211 	else
1212 		bstp_stop(sc);
1213 
1214 	/*
1215 	 * Everything has been setup, so let the member interface
1216 	 * deliver packets to this bridge on its input/output path.
1217 	 */
1218 	ifs->if_bridge = sc;
1219 out:
1220 	if (error) {
1221 		if (bif_info != NULL)
1222 			kfree(bif_info, M_DEVBUF);
1223 	}
1224 	return (error);
1225 }
1226 
1227 static int
1228 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1229 {
1230 	struct ifbreq *req = arg;
1231 	struct bridge_iflist *bif;
1232 
1233 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1234 	if (bif == NULL)
1235 		return (ENOENT);
1236 
1237 	bridge_delete_member(sc, bif, 0);
1238 
1239 	return (0);
1240 }
1241 
1242 static int
1243 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1244 {
1245 	struct ifbreq *req = arg;
1246 	struct bridge_iflist *bif;
1247 
1248 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1249 	if (bif == NULL)
1250 		return (ENOENT);
1251 
1252 	req->ifbr_ifsflags = bif->bif_flags;
1253 	req->ifbr_state = bif->bif_state;
1254 	req->ifbr_priority = bif->bif_priority;
1255 	req->ifbr_path_cost = bif->bif_path_cost;
1256 	req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1257 
1258 	return (0);
1259 }
1260 
1261 static int
1262 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1263 {
1264 	struct ifbreq *req = arg;
1265 	struct bridge_iflist *bif;
1266 	struct ifnet *bifp = sc->sc_ifp;
1267 
1268 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1269 	if (bif == NULL)
1270 		return (ENOENT);
1271 
1272 	if (req->ifbr_ifsflags & IFBIF_SPAN) {
1273 		/* SPAN is readonly */
1274 		return (EINVAL);
1275 	}
1276 
1277 	if (req->ifbr_ifsflags & IFBIF_STP) {
1278 		switch (bif->bif_ifp->if_type) {
1279 		case IFT_ETHER:
1280 			/* These can do spanning tree. */
1281 			break;
1282 
1283 		default:
1284 			/* Nothing else can. */
1285 			return (EINVAL);
1286 		}
1287 	}
1288 
1289 	ifnet_deserialize_all(bifp);
1290 	bridge_set_bifflags(sc, bif->bif_info, req->ifbr_ifsflags);
1291 	ifnet_serialize_all(bifp);
1292 
1293 	if (bifp->if_flags & IFF_RUNNING)
1294 		bstp_initialization(sc);
1295 
1296 	return (0);
1297 }
1298 
1299 static int
1300 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1301 {
1302 	struct ifbrparam *param = arg;
1303 	struct ifnet *ifp = sc->sc_ifp;
1304 
1305 	sc->sc_brtmax = param->ifbrp_csize;
1306 
1307 	ifnet_deserialize_all(ifp);
1308 	bridge_rttrim(sc);
1309 	ifnet_serialize_all(ifp);
1310 
1311 	return (0);
1312 }
1313 
1314 static int
1315 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1316 {
1317 	struct ifbrparam *param = arg;
1318 
1319 	param->ifbrp_csize = sc->sc_brtmax;
1320 
1321 	return (0);
1322 }
1323 
1324 static int
1325 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1326 {
1327 	struct bridge_control_arg *bc_arg = arg;
1328 	struct ifbifconf *bifc = arg;
1329 	struct bridge_iflist *bif;
1330 	struct ifbreq *breq;
1331 	int count, len;
1332 
1333 	count = 0;
1334 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next)
1335 		count++;
1336 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1337 		count++;
1338 
1339 	if (bifc->ifbic_len == 0) {
1340 		bifc->ifbic_len = sizeof(*breq) * count;
1341 		return 0;
1342 	} else if (count == 0 || bifc->ifbic_len < sizeof(*breq)) {
1343 		bifc->ifbic_len = 0;
1344 		return 0;
1345 	}
1346 
1347 	len = min(bifc->ifbic_len, sizeof(*breq) * count);
1348 	KKASSERT(len >= sizeof(*breq));
1349 
1350 	breq = kmalloc(len, M_TEMP, M_WAITOK | M_NULLOK | M_ZERO);
1351 	if (breq == NULL) {
1352 		bifc->ifbic_len = 0;
1353 		return ENOMEM;
1354 	}
1355 	bc_arg->bca_kptr = breq;
1356 
1357 	count = 0;
1358 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
1359 		if (len < sizeof(*breq))
1360 			break;
1361 
1362 		strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1363 			sizeof(breq->ifbr_ifsname));
1364 		breq->ifbr_ifsflags = bif->bif_flags;
1365 		breq->ifbr_state = bif->bif_state;
1366 		breq->ifbr_priority = bif->bif_priority;
1367 		breq->ifbr_path_cost = bif->bif_path_cost;
1368 		breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1369 		breq++;
1370 		count++;
1371 		len -= sizeof(*breq);
1372 	}
1373 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1374 		if (len < sizeof(*breq))
1375 			break;
1376 
1377 		strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1378 			sizeof(breq->ifbr_ifsname));
1379 		breq->ifbr_ifsflags = bif->bif_flags;
1380 		breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1381 		breq++;
1382 		count++;
1383 		len -= sizeof(*breq);
1384 	}
1385 
1386 	bifc->ifbic_len = sizeof(*breq) * count;
1387 	KKASSERT(bifc->ifbic_len > 0);
1388 
1389 	bc_arg->bca_len = bifc->ifbic_len;
1390 	bc_arg->bca_uptr = bifc->ifbic_req;
1391 	return 0;
1392 }
1393 
1394 static int
1395 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1396 {
1397 	struct bridge_control_arg *bc_arg = arg;
1398 	struct ifbaconf *bac = arg;
1399 	struct bridge_rtnode *brt;
1400 	struct ifbareq *bareq;
1401 	int count, len;
1402 
1403 	count = 0;
1404 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list)
1405 		count++;
1406 
1407 	if (bac->ifbac_len == 0) {
1408 		bac->ifbac_len = sizeof(*bareq) * count;
1409 		return 0;
1410 	} else if (count == 0 || bac->ifbac_len < sizeof(*bareq)) {
1411 		bac->ifbac_len = 0;
1412 		return 0;
1413 	}
1414 
1415 	len = min(bac->ifbac_len, sizeof(*bareq) * count);
1416 	KKASSERT(len >= sizeof(*bareq));
1417 
1418 	bareq = kmalloc(len, M_TEMP, M_WAITOK | M_NULLOK | M_ZERO);
1419 	if (bareq == NULL) {
1420 		bac->ifbac_len = 0;
1421 		return ENOMEM;
1422 	}
1423 	bc_arg->bca_kptr = bareq;
1424 
1425 	count = 0;
1426 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
1427 		struct bridge_rtinfo *bri = brt->brt_info;
1428 		unsigned long expire;
1429 
1430 		if (len < sizeof(*bareq))
1431 			break;
1432 
1433 		strlcpy(bareq->ifba_ifsname, bri->bri_ifp->if_xname,
1434 			sizeof(bareq->ifba_ifsname));
1435 		memcpy(bareq->ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1436 		expire = bri->bri_expire;
1437 		if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1438 		    time_second < expire)
1439 			bareq->ifba_expire = expire - time_second;
1440 		else
1441 			bareq->ifba_expire = 0;
1442 		bareq->ifba_flags = bri->bri_flags;
1443 		bareq++;
1444 		count++;
1445 		len -= sizeof(*bareq);
1446 	}
1447 
1448 	bac->ifbac_len = sizeof(*bareq) * count;
1449 	KKASSERT(bac->ifbac_len > 0);
1450 
1451 	bc_arg->bca_len = bac->ifbac_len;
1452 	bc_arg->bca_uptr = bac->ifbac_req;
1453 	return 0;
1454 }
1455 
1456 static int
1457 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1458 {
1459 	struct ifbareq *req = arg;
1460 	struct bridge_iflist *bif;
1461 	struct ifnet *ifp = sc->sc_ifp;
1462 	int error;
1463 
1464 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
1465 
1466 	bif = bridge_lookup_member(sc, req->ifba_ifsname);
1467 	if (bif == NULL)
1468 		return (ENOENT);
1469 
1470 	ifnet_deserialize_all(ifp);
1471 	error = bridge_rtsaddr(sc, req->ifba_dst, bif->bif_ifp,
1472 			       req->ifba_flags);
1473 	ifnet_serialize_all(ifp);
1474 	return (error);
1475 }
1476 
1477 static int
1478 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1479 {
1480 	struct ifbrparam *param = arg;
1481 
1482 	sc->sc_brttimeout = param->ifbrp_ctime;
1483 
1484 	return (0);
1485 }
1486 
1487 static int
1488 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1489 {
1490 	struct ifbrparam *param = arg;
1491 
1492 	param->ifbrp_ctime = sc->sc_brttimeout;
1493 
1494 	return (0);
1495 }
1496 
1497 static int
1498 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1499 {
1500 	struct ifbareq *req = arg;
1501 	struct ifnet *ifp = sc->sc_ifp;
1502 	int error;
1503 
1504 	ifnet_deserialize_all(ifp);
1505 	error = bridge_rtdaddr(sc, req->ifba_dst);
1506 	ifnet_serialize_all(ifp);
1507 	return error;
1508 }
1509 
1510 static int
1511 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1512 {
1513 	struct ifbreq *req = arg;
1514 	struct ifnet *ifp = sc->sc_ifp;
1515 
1516 	ifnet_deserialize_all(ifp);
1517 	bridge_rtflush(sc, req->ifbr_ifsflags | IFBF_FLUSHSYNC);
1518 	ifnet_serialize_all(ifp);
1519 
1520 	return (0);
1521 }
1522 
1523 static int
1524 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1525 {
1526 	struct ifbrparam *param = arg;
1527 
1528 	param->ifbrp_prio = sc->sc_bridge_priority;
1529 
1530 	return (0);
1531 }
1532 
1533 static int
1534 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1535 {
1536 	struct ifbrparam *param = arg;
1537 
1538 	sc->sc_bridge_priority = param->ifbrp_prio;
1539 
1540 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1541 		bstp_initialization(sc);
1542 
1543 	return (0);
1544 }
1545 
1546 static int
1547 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1548 {
1549 	struct ifbrparam *param = arg;
1550 
1551 	param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1552 
1553 	return (0);
1554 }
1555 
1556 static int
1557 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1558 {
1559 	struct ifbrparam *param = arg;
1560 
1561 	if (param->ifbrp_hellotime == 0)
1562 		return (EINVAL);
1563 	sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1564 
1565 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1566 		bstp_initialization(sc);
1567 
1568 	return (0);
1569 }
1570 
1571 static int
1572 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1573 {
1574 	struct ifbrparam *param = arg;
1575 
1576 	param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1577 
1578 	return (0);
1579 }
1580 
1581 static int
1582 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1583 {
1584 	struct ifbrparam *param = arg;
1585 
1586 	if (param->ifbrp_fwddelay == 0)
1587 		return (EINVAL);
1588 	sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1589 
1590 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1591 		bstp_initialization(sc);
1592 
1593 	return (0);
1594 }
1595 
1596 static int
1597 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1598 {
1599 	struct ifbrparam *param = arg;
1600 
1601 	param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1602 
1603 	return (0);
1604 }
1605 
1606 static int
1607 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1608 {
1609 	struct ifbrparam *param = arg;
1610 
1611 	if (param->ifbrp_maxage == 0)
1612 		return (EINVAL);
1613 	sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1614 
1615 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1616 		bstp_initialization(sc);
1617 
1618 	return (0);
1619 }
1620 
1621 static int
1622 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1623 {
1624 	struct ifbreq *req = arg;
1625 	struct bridge_iflist *bif;
1626 
1627 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1628 	if (bif == NULL)
1629 		return (ENOENT);
1630 
1631 	bif->bif_priority = req->ifbr_priority;
1632 
1633 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1634 		bstp_initialization(sc);
1635 
1636 	return (0);
1637 }
1638 
1639 static int
1640 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1641 {
1642 	struct ifbreq *req = arg;
1643 	struct bridge_iflist *bif;
1644 
1645 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1646 	if (bif == NULL)
1647 		return (ENOENT);
1648 
1649 	bif->bif_path_cost = req->ifbr_path_cost;
1650 
1651 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1652 		bstp_initialization(sc);
1653 
1654 	return (0);
1655 }
1656 
1657 static int
1658 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
1659 {
1660 	struct ifbreq *req = arg;
1661 	struct bridge_iflist *bif;
1662 	struct ifnet *ifs;
1663 
1664 	ifs = ifunit(req->ifbr_ifsname);
1665 	if (ifs == NULL)
1666 		return (ENOENT);
1667 
1668 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1669 		if (ifs == bif->bif_ifp)
1670 			return (EBUSY);
1671 
1672 	if (ifs->if_bridge != NULL)
1673 		return (EBUSY);
1674 
1675 	switch (ifs->if_type) {
1676 	case IFT_ETHER:
1677 	case IFT_GIF:
1678 	case IFT_L2VLAN:
1679 		break;
1680 
1681 	default:
1682 		return (EINVAL);
1683 	}
1684 
1685 	bif = kmalloc(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
1686 	bif->bif_ifp = ifs;
1687 	bif->bif_flags = IFBIF_SPAN;
1688 	/* NOTE: span bif does not need bridge_ifinfo */
1689 
1690 	LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
1691 
1692 	sc->sc_span = 1;
1693 
1694 	return (0);
1695 }
1696 
1697 static int
1698 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
1699 {
1700 	struct ifbreq *req = arg;
1701 	struct bridge_iflist *bif;
1702 	struct ifnet *ifs;
1703 
1704 	ifs = ifunit(req->ifbr_ifsname);
1705 	if (ifs == NULL)
1706 		return (ENOENT);
1707 
1708 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1709 		if (ifs == bif->bif_ifp)
1710 			break;
1711 
1712 	if (bif == NULL)
1713 		return (ENOENT);
1714 
1715 	bridge_delete_span(sc, bif);
1716 
1717 	if (LIST_EMPTY(&sc->sc_spanlist))
1718 		sc->sc_span = 0;
1719 
1720 	return (0);
1721 }
1722 
1723 static void
1724 bridge_ifdetach_dispatch(struct netmsg *nmsg)
1725 {
1726 	struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
1727 	struct ifnet *ifp, *bifp;
1728 	struct bridge_softc *sc;
1729 	struct bridge_iflist *bif;
1730 
1731 	ifp = lmsg->u.ms_resultp;
1732 	sc = ifp->if_bridge;
1733 
1734 	/* Check if the interface is a bridge member */
1735 	if (sc != NULL) {
1736 		bifp = sc->sc_ifp;
1737 
1738 		ifnet_serialize_all(bifp);
1739 
1740 		bif = bridge_lookup_member_if(sc, ifp);
1741 		if (bif != NULL) {
1742 			bridge_delete_member(sc, bif, 1);
1743 		} else {
1744 			/* XXX Why bif will be NULL? */
1745 		}
1746 
1747 		ifnet_deserialize_all(bifp);
1748 		goto reply;
1749 	}
1750 
1751 	crit_enter();	/* XXX MP */
1752 
1753 	/* Check if the interface is a span port */
1754 	LIST_FOREACH(sc, &bridge_list, sc_list) {
1755 		bifp = sc->sc_ifp;
1756 
1757 		ifnet_serialize_all(bifp);
1758 
1759 		LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1760 			if (ifp == bif->bif_ifp) {
1761 				bridge_delete_span(sc, bif);
1762 				break;
1763 			}
1764 
1765 		ifnet_deserialize_all(bifp);
1766 	}
1767 
1768 	crit_exit();
1769 
1770 reply:
1771 	lwkt_replymsg(lmsg, 0);
1772 }
1773 
1774 /*
1775  * bridge_ifdetach:
1776  *
1777  *	Detach an interface from a bridge.  Called when a member
1778  *	interface is detaching.
1779  */
1780 static void
1781 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
1782 {
1783 	struct lwkt_msg *lmsg;
1784 	struct netmsg nmsg;
1785 
1786 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
1787 		    0, bridge_ifdetach_dispatch);
1788 	lmsg = &nmsg.nm_lmsg;
1789 	lmsg->u.ms_resultp = ifp;
1790 
1791 	lwkt_domsg(BRIDGE_CFGPORT, lmsg, 0);
1792 }
1793 
1794 /*
1795  * bridge_init:
1796  *
1797  *	Initialize a bridge interface.
1798  */
1799 static void
1800 bridge_init(void *xsc)
1801 {
1802 	bridge_control(xsc, SIOCSIFFLAGS, bridge_ioctl_init, NULL);
1803 }
1804 
1805 /*
1806  * bridge_stop:
1807  *
1808  *	Stop the bridge interface.
1809  */
1810 static void
1811 bridge_stop(struct ifnet *ifp)
1812 {
1813 	bridge_control(ifp->if_softc, SIOCSIFFLAGS, bridge_ioctl_stop, NULL);
1814 }
1815 
1816 /*
1817  * bridge_enqueue:
1818  *
1819  *	Enqueue a packet on a bridge member interface.
1820  *
1821  */
1822 void
1823 bridge_enqueue(struct ifnet *dst_ifp, struct mbuf *m)
1824 {
1825 	struct netmsg_packet *nmp;
1826 
1827 	nmp = &m->m_hdr.mh_netmsg;
1828 	netmsg_init(&nmp->nm_netmsg, NULL, &netisr_apanic_rport,
1829 		    0, bridge_enqueue_handler);
1830 	nmp->nm_packet = m;
1831 	nmp->nm_netmsg.nm_lmsg.u.ms_resultp = dst_ifp;
1832 
1833 	lwkt_sendmsg(curnetport, &nmp->nm_netmsg.nm_lmsg);
1834 }
1835 
1836 /*
1837  * bridge_output:
1838  *
1839  *	Send output from a bridge member interface.  This
1840  *	performs the bridging function for locally originated
1841  *	packets.
1842  *
1843  *	The mbuf has the Ethernet header already attached.  We must
1844  *	enqueue or free the mbuf before returning.
1845  */
1846 static int
1847 bridge_output(struct ifnet *ifp, struct mbuf *m)
1848 {
1849 	struct bridge_softc *sc = ifp->if_bridge;
1850 	struct ether_header *eh;
1851 	struct ifnet *dst_if, *bifp;
1852 
1853 	ASSERT_IFNET_NOT_SERIALIZED_ALL(ifp);
1854 
1855 	/*
1856 	 * Make sure that we are still a member of a bridge interface.
1857 	 */
1858 	if (sc == NULL) {
1859 		m_freem(m);
1860 		return (0);
1861 	}
1862 	bifp = sc->sc_ifp;
1863 
1864 	if (m->m_len < ETHER_HDR_LEN) {
1865 		m = m_pullup(m, ETHER_HDR_LEN);
1866 		if (m == NULL)
1867 			return (0);
1868 	}
1869 	eh = mtod(m, struct ether_header *);
1870 
1871 	/*
1872 	 * If bridge is down, but the original output interface is up,
1873 	 * go ahead and send out that interface.  Otherwise, the packet
1874 	 * is dropped below.
1875 	 */
1876 	if ((bifp->if_flags & IFF_RUNNING) == 0) {
1877 		dst_if = ifp;
1878 		goto sendunicast;
1879 	}
1880 
1881 	/*
1882 	 * If the packet is a multicast, or we don't know a better way to
1883 	 * get there, send to all interfaces.
1884 	 */
1885 	if (ETHER_IS_MULTICAST(eh->ether_dhost))
1886 		dst_if = NULL;
1887 	else
1888 		dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1889 	if (dst_if == NULL) {
1890 		struct bridge_iflist *bif, *nbif;
1891 		struct mbuf *mc;
1892 		int used = 0;
1893 
1894 		if (sc->sc_span)
1895 			bridge_span(sc, m);
1896 
1897 		LIST_FOREACH_MUTABLE(bif, &sc->sc_iflists[mycpuid],
1898 				     bif_next, nbif) {
1899 			dst_if = bif->bif_ifp;
1900 			if ((dst_if->if_flags & IFF_RUNNING) == 0)
1901 				continue;
1902 
1903 			/*
1904 			 * If this is not the original output interface,
1905 			 * and the interface is participating in spanning
1906 			 * tree, make sure the port is in a state that
1907 			 * allows forwarding.
1908 			 */
1909 			if (dst_if != ifp &&
1910 			    (bif->bif_flags & IFBIF_STP) != 0) {
1911 				switch (bif->bif_state) {
1912 				case BSTP_IFSTATE_BLOCKING:
1913 				case BSTP_IFSTATE_LISTENING:
1914 				case BSTP_IFSTATE_DISABLED:
1915 					continue;
1916 				}
1917 			}
1918 
1919 			if (LIST_NEXT(bif, bif_next) == NULL) {
1920 				used = 1;
1921 				mc = m;
1922 			} else {
1923 				mc = m_copypacket(m, MB_DONTWAIT);
1924 				if (mc == NULL) {
1925 					bifp->if_oerrors++;
1926 					continue;
1927 				}
1928 			}
1929 			bridge_handoff(dst_if, mc);
1930 
1931 			if (nbif != NULL && !nbif->bif_onlist) {
1932 				KKASSERT(bif->bif_onlist);
1933 				nbif = LIST_NEXT(bif, bif_next);
1934 			}
1935 		}
1936 		if (used == 0)
1937 			m_freem(m);
1938 		return (0);
1939 	}
1940 
1941 sendunicast:
1942 	/*
1943 	 * XXX Spanning tree consideration here?
1944 	 */
1945 	if (sc->sc_span)
1946 		bridge_span(sc, m);
1947 	if ((dst_if->if_flags & IFF_RUNNING) == 0)
1948 		m_freem(m);
1949 	else
1950 		bridge_handoff(dst_if, m);
1951 	return (0);
1952 }
1953 
1954 /*
1955  * bridge_start:
1956  *
1957  *	Start output on a bridge.
1958  *
1959  */
1960 static void
1961 bridge_start(struct ifnet *ifp)
1962 {
1963 	struct bridge_softc *sc = ifp->if_softc;
1964 
1965 	ASSERT_IFNET_SERIALIZED_TX(ifp);
1966 
1967 	ifp->if_flags |= IFF_OACTIVE;
1968 	for (;;) {
1969 		struct ifnet *dst_if = NULL;
1970 		struct ether_header *eh;
1971 		struct mbuf *m;
1972 
1973 		m = ifq_dequeue(&ifp->if_snd, NULL);
1974 		if (m == NULL)
1975 			break;
1976 
1977 		if (m->m_len < sizeof(*eh)) {
1978 			m = m_pullup(m, sizeof(*eh));
1979 			if (m == NULL) {
1980 				ifp->if_oerrors++;
1981 				continue;
1982 			}
1983 		}
1984 		eh = mtod(m, struct ether_header *);
1985 
1986 		BPF_MTAP(ifp, m);
1987 		ifp->if_opackets++;
1988 
1989 		if ((m->m_flags & (M_BCAST|M_MCAST)) == 0)
1990 			dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1991 
1992 		if (dst_if == NULL)
1993 			bridge_start_bcast(sc, m);
1994 		else
1995 			bridge_enqueue(dst_if, m);
1996 	}
1997 	ifp->if_flags &= ~IFF_OACTIVE;
1998 }
1999 
2000 /*
2001  * bridge_forward:
2002  *
2003  *	The forwarding function of the bridge.
2004  */
2005 static void
2006 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
2007 {
2008 	struct bridge_iflist *bif;
2009 	struct ifnet *src_if, *dst_if, *ifp;
2010 	struct ether_header *eh;
2011 
2012 	src_if = m->m_pkthdr.rcvif;
2013 	ifp = sc->sc_ifp;
2014 
2015 	ASSERT_IFNET_NOT_SERIALIZED_ALL(ifp);
2016 
2017 	ifp->if_ipackets++;
2018 	ifp->if_ibytes += m->m_pkthdr.len;
2019 
2020 	/*
2021 	 * Look up the bridge_iflist.
2022 	 */
2023 	bif = bridge_lookup_member_if(sc, src_if);
2024 	if (bif == NULL) {
2025 		/* Interface is not a bridge member (anymore?) */
2026 		m_freem(m);
2027 		return;
2028 	}
2029 
2030 	if (bif->bif_flags & IFBIF_STP) {
2031 		switch (bif->bif_state) {
2032 		case BSTP_IFSTATE_BLOCKING:
2033 		case BSTP_IFSTATE_LISTENING:
2034 		case BSTP_IFSTATE_DISABLED:
2035 			m_freem(m);
2036 			return;
2037 		}
2038 	}
2039 
2040 	eh = mtod(m, struct ether_header *);
2041 
2042 	/*
2043 	 * If the interface is learning, and the source
2044 	 * address is valid and not multicast, record
2045 	 * the address.
2046 	 */
2047 	if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
2048 	    ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
2049 	    (eh->ether_shost[0] == 0 &&
2050 	     eh->ether_shost[1] == 0 &&
2051 	     eh->ether_shost[2] == 0 &&
2052 	     eh->ether_shost[3] == 0 &&
2053 	     eh->ether_shost[4] == 0 &&
2054 	     eh->ether_shost[5] == 0) == 0)
2055 		bridge_rtupdate(sc, eh->ether_shost, src_if, IFBAF_DYNAMIC);
2056 
2057 	if ((bif->bif_flags & IFBIF_STP) != 0 &&
2058 	    bif->bif_state == BSTP_IFSTATE_LEARNING) {
2059 		m_freem(m);
2060 		return;
2061 	}
2062 
2063 	/*
2064 	 * At this point, the port either doesn't participate
2065 	 * in spanning tree or it is in the forwarding state.
2066 	 */
2067 
2068 	/*
2069 	 * If the packet is unicast, destined for someone on
2070 	 * "this" side of the bridge, drop it.
2071 	 */
2072 	if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2073 		dst_if = bridge_rtlookup(sc, eh->ether_dhost);
2074 		if (src_if == dst_if) {
2075 			m_freem(m);
2076 			return;
2077 		}
2078 	} else {
2079 		/* ...forward it to all interfaces. */
2080 		ifp->if_imcasts++;
2081 		dst_if = NULL;
2082 	}
2083 
2084 	if (dst_if == NULL) {
2085 		bridge_broadcast(sc, src_if, m);
2086 		return;
2087 	}
2088 
2089 	/*
2090 	 * At this point, we're dealing with a unicast frame
2091 	 * going to a different interface.
2092 	 */
2093 	if ((dst_if->if_flags & IFF_RUNNING) == 0) {
2094 		m_freem(m);
2095 		return;
2096 	}
2097 	bif = bridge_lookup_member_if(sc, dst_if);
2098 	if (bif == NULL) {
2099 		/* Not a member of the bridge (anymore?) */
2100 		m_freem(m);
2101 		return;
2102 	}
2103 
2104 	if (bif->bif_flags & IFBIF_STP) {
2105 		switch (bif->bif_state) {
2106 		case BSTP_IFSTATE_DISABLED:
2107 		case BSTP_IFSTATE_BLOCKING:
2108 			m_freem(m);
2109 			return;
2110 		}
2111 	}
2112 
2113 	if (inet_pfil_hook.ph_hashooks > 0
2114 #ifdef INET6
2115 	    || inet6_pfil_hook.ph_hashooks > 0
2116 #endif
2117 	    ) {
2118 		if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2119 			return;
2120 		if (m == NULL)
2121 			return;
2122 
2123 		if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2124 			return;
2125 		if (m == NULL)
2126 			return;
2127 	}
2128 	bridge_handoff(dst_if, m);
2129 }
2130 
2131 /*
2132  * bridge_input:
2133  *
2134  *	Receive input from a member interface.  Queue the packet for
2135  *	bridging if it is not for us.
2136  */
2137 static struct mbuf *
2138 bridge_input(struct ifnet *ifp, struct mbuf *m)
2139 {
2140 	struct bridge_softc *sc = ifp->if_bridge;
2141 	struct bridge_iflist *bif;
2142 	struct ifnet *bifp, *new_ifp;
2143 	struct ether_header *eh;
2144 	struct mbuf *mc, *mc2;
2145 
2146 	ASSERT_IFNET_NOT_SERIALIZED_ALL(ifp);
2147 
2148 	/*
2149 	 * Make sure that we are still a member of a bridge interface.
2150 	 */
2151 	if (sc == NULL)
2152 		return m;
2153 
2154 	new_ifp = NULL;
2155 	bifp = sc->sc_ifp;
2156 
2157 	if ((bifp->if_flags & IFF_RUNNING) == 0)
2158 		goto out;
2159 
2160 	/*
2161 	 * Implement support for bridge monitoring.  If this flag has been
2162 	 * set on this interface, discard the packet once we push it through
2163 	 * the bpf(4) machinery, but before we do, increment various counters
2164 	 * associated with this bridge.
2165 	 */
2166 	if (bifp->if_flags & IFF_MONITOR) {
2167 	 	/* Change input interface to this bridge */
2168 		m->m_pkthdr.rcvif = bifp;
2169 
2170 		BPF_MTAP(bifp, m);
2171 
2172 		/* Update bridge's ifnet statistics */
2173 		bifp->if_ipackets++;
2174 		bifp->if_ibytes += m->m_pkthdr.len;
2175 		if (m->m_flags & (M_MCAST | M_BCAST))
2176 			bifp->if_imcasts++;
2177 
2178 		m_freem(m);
2179 		m = NULL;
2180 		goto out;
2181 	}
2182 
2183 	eh = mtod(m, struct ether_header *);
2184 
2185 	if (memcmp(eh->ether_dhost, IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0) {
2186 		/*
2187 		 * If the packet is for us, set the packets source as the
2188 		 * bridge, and return the packet back to ifnet.if_input for
2189 		 * local processing.
2190 		 */
2191 		KASSERT(bifp->if_bridge == NULL,
2192 			("loop created in bridge_input"));
2193 		new_ifp = bifp;
2194 		goto out;
2195 	}
2196 
2197 	/*
2198 	 * Tap all packets arriving on the bridge, no matter if
2199 	 * they are local destinations or not.  In is in.
2200 	 */
2201 	BPF_MTAP(bifp, m);
2202 
2203 	bif = bridge_lookup_member_if(sc, ifp);
2204 	if (bif == NULL)
2205 		goto out;
2206 
2207 	if (sc->sc_span)
2208 		bridge_span(sc, m);
2209 
2210 	if (m->m_flags & (M_BCAST | M_MCAST)) {
2211 		/* Tap off 802.1D packets; they do not get forwarded. */
2212 		if (memcmp(eh->ether_dhost, bstp_etheraddr,
2213 		    ETHER_ADDR_LEN) == 0) {
2214 			ifnet_serialize_all(bifp);
2215 			bstp_input(sc, bif, m);
2216 			ifnet_deserialize_all(bifp);
2217 
2218 			/* m is freed by bstp_input */
2219 			m = NULL;
2220 			goto out;
2221 		}
2222 
2223 		if (bif->bif_flags & IFBIF_STP) {
2224 			switch (bif->bif_state) {
2225 			case BSTP_IFSTATE_BLOCKING:
2226 			case BSTP_IFSTATE_LISTENING:
2227 			case BSTP_IFSTATE_DISABLED:
2228 				goto out;
2229 			}
2230 		}
2231 
2232 		/*
2233 		 * Make a deep copy of the packet and enqueue the copy
2234 		 * for bridge processing; return the original packet for
2235 		 * local processing.
2236 		 */
2237 		mc = m_dup(m, MB_DONTWAIT);
2238 		if (mc == NULL)
2239 			goto out;
2240 
2241 		bridge_forward(sc, mc);
2242 
2243 		/*
2244 		 * Reinject the mbuf as arriving on the bridge so we have a
2245 		 * chance at claiming multicast packets. We can not loop back
2246 		 * here from ether_input as a bridge is never a member of a
2247 		 * bridge.
2248 		 */
2249 		KASSERT(bifp->if_bridge == NULL,
2250 			("loop created in bridge_input"));
2251 		mc2 = m_dup(m, MB_DONTWAIT);
2252 #ifdef notyet
2253 		if (mc2 != NULL) {
2254 			/* Keep the layer3 header aligned */
2255 			int i = min(mc2->m_pkthdr.len, max_protohdr);
2256 			mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2257 		}
2258 #endif
2259 		if (mc2 != NULL) {
2260 			/*
2261 			 * Don't tap to bpf(4) again; we have
2262 			 * already done the tapping.
2263 			 */
2264 			ether_reinput_oncpu(bifp, mc2, 0);
2265 		}
2266 
2267 		/* Return the original packet for local processing. */
2268 		goto out;
2269 	}
2270 
2271 	if (bif->bif_flags & IFBIF_STP) {
2272 		switch (bif->bif_state) {
2273 		case BSTP_IFSTATE_BLOCKING:
2274 		case BSTP_IFSTATE_LISTENING:
2275 		case BSTP_IFSTATE_DISABLED:
2276 			goto out;
2277 		}
2278 	}
2279 
2280 	/*
2281 	 * Unicast.  Make sure it's not for us.
2282 	 *
2283 	 * This loop is MPSAFE; the only blocking operation (bridge_rtupdate)
2284 	 * is followed by breaking out of the loop.
2285 	 */
2286 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
2287 		if (bif->bif_ifp->if_type != IFT_ETHER)
2288 			continue;
2289 
2290 		/* It is destined for us. */
2291 		if (memcmp(IF_LLADDR(bif->bif_ifp), eh->ether_dhost,
2292 		    ETHER_ADDR_LEN) == 0) {
2293 			if (bif->bif_ifp != ifp) {
2294 				/* XXX loop prevention */
2295 				m->m_flags |= M_ETHER_BRIDGED;
2296 				new_ifp = bif->bif_ifp;
2297 			}
2298 			if (bif->bif_flags & IFBIF_LEARNING) {
2299 				bridge_rtupdate(sc, eh->ether_shost,
2300 						ifp, IFBAF_DYNAMIC);
2301 			}
2302 			goto out;
2303 		}
2304 
2305 		/* We just received a packet that we sent out. */
2306 		if (memcmp(IF_LLADDR(bif->bif_ifp), eh->ether_shost,
2307 		    ETHER_ADDR_LEN) == 0) {
2308 			m_freem(m);
2309 			m = NULL;
2310 			goto out;
2311 		}
2312 	}
2313 
2314 	/* Perform the bridge forwarding function. */
2315 	bridge_forward(sc, m);
2316 	m = NULL;
2317 out:
2318 	if (new_ifp != NULL) {
2319 		ether_reinput_oncpu(new_ifp, m, 1);
2320 		m = NULL;
2321 	}
2322 	return (m);
2323 }
2324 
2325 /*
2326  * bridge_start_bcast:
2327  *
2328  *	Broadcast the packet sent from bridge to all member
2329  *	interfaces.
2330  *	This is a simplified version of bridge_broadcast(), however,
2331  *	this function expects caller to hold bridge's serializer.
2332  */
2333 static void
2334 bridge_start_bcast(struct bridge_softc *sc, struct mbuf *m)
2335 {
2336 	struct bridge_iflist *bif;
2337 	struct mbuf *mc;
2338 	struct ifnet *dst_if, *bifp;
2339 	int used = 0;
2340 
2341 	bifp = sc->sc_ifp;
2342 	ASSERT_IFNET_SERIALIZED_ALL(bifp);
2343 
2344 	/*
2345 	 * Following loop is MPSAFE; nothing is blocking
2346 	 * in the loop body.
2347 	 */
2348 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
2349 		dst_if = bif->bif_ifp;
2350 
2351 		if (bif->bif_flags & IFBIF_STP) {
2352 			switch (bif->bif_state) {
2353 			case BSTP_IFSTATE_BLOCKING:
2354 			case BSTP_IFSTATE_DISABLED:
2355 				continue;
2356 			}
2357 		}
2358 
2359 		if ((bif->bif_flags & IFBIF_DISCOVER) == 0 &&
2360 		    (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2361 			continue;
2362 
2363 		if ((dst_if->if_flags & IFF_RUNNING) == 0)
2364 			continue;
2365 
2366 		if (LIST_NEXT(bif, bif_next) == NULL) {
2367 			mc = m;
2368 			used = 1;
2369 		} else {
2370 			mc = m_copypacket(m, MB_DONTWAIT);
2371 			if (mc == NULL) {
2372 				bifp->if_oerrors++;
2373 				continue;
2374 			}
2375 		}
2376 		bridge_enqueue(dst_if, mc);
2377 	}
2378 	if (used == 0)
2379 		m_freem(m);
2380 }
2381 
2382 /*
2383  * bridge_broadcast:
2384  *
2385  *	Send a frame to all interfaces that are members of
2386  *	the bridge, except for the one on which the packet
2387  *	arrived.
2388  */
2389 static void
2390 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2391     struct mbuf *m)
2392 {
2393 	struct bridge_iflist *bif, *nbif;
2394 	struct mbuf *mc;
2395 	struct ifnet *dst_if, *bifp;
2396 	int used = 0;
2397 
2398 	bifp = sc->sc_ifp;
2399 	ASSERT_IFNET_NOT_SERIALIZED_ALL(bifp);
2400 
2401 	if (inet_pfil_hook.ph_hashooks > 0
2402 #ifdef INET6
2403 	    || inet6_pfil_hook.ph_hashooks > 0
2404 #endif
2405 	    ) {
2406 		if (bridge_pfil(&m, bifp, src_if, PFIL_IN) != 0)
2407 			return;
2408 		if (m == NULL)
2409 			return;
2410 
2411 		/* Filter on the bridge interface before broadcasting */
2412 		if (bridge_pfil(&m, bifp, NULL, PFIL_OUT) != 0)
2413 			return;
2414 		if (m == NULL)
2415 			return;
2416 	}
2417 
2418 	LIST_FOREACH_MUTABLE(bif, &sc->sc_iflists[mycpuid], bif_next, nbif) {
2419 		dst_if = bif->bif_ifp;
2420 		if (dst_if == src_if)
2421 			continue;
2422 
2423 		if (bif->bif_flags & IFBIF_STP) {
2424 			switch (bif->bif_state) {
2425 			case BSTP_IFSTATE_BLOCKING:
2426 			case BSTP_IFSTATE_DISABLED:
2427 				continue;
2428 			}
2429 		}
2430 
2431 		if ((bif->bif_flags & IFBIF_DISCOVER) == 0 &&
2432 		    (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2433 			continue;
2434 
2435 		if ((dst_if->if_flags & IFF_RUNNING) == 0)
2436 			continue;
2437 
2438 		if (LIST_NEXT(bif, bif_next) == NULL) {
2439 			mc = m;
2440 			used = 1;
2441 		} else {
2442 			mc = m_copypacket(m, MB_DONTWAIT);
2443 			if (mc == NULL) {
2444 				sc->sc_ifp->if_oerrors++;
2445 				continue;
2446 			}
2447 		}
2448 
2449 		/*
2450 		 * Filter on the output interface.  Pass a NULL bridge
2451 		 * interface pointer so we do not redundantly filter on
2452 		 * the bridge for each interface we broadcast on.
2453 		 */
2454 		if (inet_pfil_hook.ph_hashooks > 0
2455 #ifdef INET6
2456 		    || inet6_pfil_hook.ph_hashooks > 0
2457 #endif
2458 		    ) {
2459 			if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
2460 				continue;
2461 			if (mc == NULL)
2462 				continue;
2463 		}
2464 		bridge_handoff(dst_if, mc);
2465 
2466 		if (nbif != NULL && !nbif->bif_onlist) {
2467 			KKASSERT(bif->bif_onlist);
2468 			nbif = LIST_NEXT(bif, bif_next);
2469 		}
2470 	}
2471 	if (used == 0)
2472 		m_freem(m);
2473 }
2474 
2475 /*
2476  * bridge_span:
2477  *
2478  *	Duplicate a packet out one or more interfaces that are in span mode,
2479  *	the original mbuf is unmodified.
2480  */
2481 static void
2482 bridge_span(struct bridge_softc *sc, struct mbuf *m)
2483 {
2484 	struct bridge_iflist *bif;
2485 	struct ifnet *dst_if, *bifp;
2486 	struct mbuf *mc;
2487 
2488 	bifp = sc->sc_ifp;
2489 	ifnet_serialize_all(bifp);
2490 
2491 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
2492 		dst_if = bif->bif_ifp;
2493 
2494 		if ((dst_if->if_flags & IFF_RUNNING) == 0)
2495 			continue;
2496 
2497 		mc = m_copypacket(m, MB_DONTWAIT);
2498 		if (mc == NULL) {
2499 			sc->sc_ifp->if_oerrors++;
2500 			continue;
2501 		}
2502 		bridge_enqueue(dst_if, mc);
2503 	}
2504 
2505 	ifnet_deserialize_all(bifp);
2506 }
2507 
2508 static void
2509 bridge_rtmsg_sync_handler(struct netmsg *nmsg)
2510 {
2511 	ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2512 }
2513 
2514 static void
2515 bridge_rtmsg_sync(struct bridge_softc *sc)
2516 {
2517 	struct netmsg nmsg;
2518 
2519 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
2520 
2521 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
2522 		    0, bridge_rtmsg_sync_handler);
2523 	ifnet_domsg(&nmsg.nm_lmsg, 0);
2524 }
2525 
2526 static __inline void
2527 bridge_rtinfo_update(struct bridge_rtinfo *bri, struct ifnet *dst_if,
2528 		     int setflags, uint8_t flags, uint32_t timeo)
2529 {
2530 	if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2531 	    bri->bri_ifp != dst_if)
2532 		bri->bri_ifp = dst_if;
2533 	if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2534 	    bri->bri_expire != time_second + timeo)
2535 		bri->bri_expire = time_second + timeo;
2536 	if (setflags)
2537 		bri->bri_flags = flags;
2538 }
2539 
2540 static int
2541 bridge_rtinstall_oncpu(struct bridge_softc *sc, const uint8_t *dst,
2542 		       struct ifnet *dst_if, int setflags, uint8_t flags,
2543 		       struct bridge_rtinfo **bri0)
2544 {
2545 	struct bridge_rtnode *brt;
2546 	struct bridge_rtinfo *bri;
2547 
2548 	if (mycpuid == 0) {
2549 		brt = bridge_rtnode_lookup(sc, dst);
2550 		if (brt != NULL) {
2551 			/*
2552 			 * rtnode for 'dst' already exists.  We inform the
2553 			 * caller about this by leaving bri0 as NULL.  The
2554 			 * caller will terminate the intallation upon getting
2555 			 * NULL bri0.  However, we still need to update the
2556 			 * rtinfo.
2557 			 */
2558 			KKASSERT(*bri0 == NULL);
2559 
2560 			/* Update rtinfo */
2561 			bridge_rtinfo_update(brt->brt_info, dst_if, setflags,
2562 					     flags, sc->sc_brttimeout);
2563 			return 0;
2564 		}
2565 
2566 		/*
2567 		 * We only need to check brtcnt on CPU0, since if limit
2568 		 * is to be exceeded, ENOSPC is returned.  Caller knows
2569 		 * this and will terminate the installation.
2570 		 */
2571 		if (sc->sc_brtcnt >= sc->sc_brtmax)
2572 			return ENOSPC;
2573 
2574 		KKASSERT(*bri0 == NULL);
2575 		bri = kmalloc(sizeof(struct bridge_rtinfo), M_DEVBUF,
2576 				  M_WAITOK | M_ZERO);
2577 		*bri0 = bri;
2578 
2579 		/* Setup rtinfo */
2580 		bri->bri_flags = IFBAF_DYNAMIC;
2581 		bridge_rtinfo_update(bri, dst_if, setflags, flags,
2582 				     sc->sc_brttimeout);
2583 	} else {
2584 		bri = *bri0;
2585 		KKASSERT(bri != NULL);
2586 	}
2587 
2588 	brt = kmalloc(sizeof(struct bridge_rtnode), M_DEVBUF,
2589 		      M_WAITOK | M_ZERO);
2590 	memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2591 	brt->brt_info = bri;
2592 
2593 	bridge_rtnode_insert(sc, brt);
2594 	return 0;
2595 }
2596 
2597 static void
2598 bridge_rtinstall_handler(struct netmsg *nmsg)
2599 {
2600 	struct netmsg_brsaddr *brmsg = (struct netmsg_brsaddr *)nmsg;
2601 	int error;
2602 
2603 	error = bridge_rtinstall_oncpu(brmsg->br_softc,
2604 				       brmsg->br_dst, brmsg->br_dst_if,
2605 				       brmsg->br_setflags, brmsg->br_flags,
2606 				       &brmsg->br_rtinfo);
2607 	if (error) {
2608 		KKASSERT(mycpuid == 0 && brmsg->br_rtinfo == NULL);
2609 		lwkt_replymsg(&nmsg->nm_lmsg, error);
2610 		return;
2611 	} else if (brmsg->br_rtinfo == NULL) {
2612 		/* rtnode already exists for 'dst' */
2613 		KKASSERT(mycpuid == 0);
2614 		lwkt_replymsg(&nmsg->nm_lmsg, 0);
2615 		return;
2616 	}
2617 	ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2618 }
2619 
2620 /*
2621  * bridge_rtupdate:
2622  *
2623  *	Add/Update a bridge routing entry.
2624  */
2625 static int
2626 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2627 		struct ifnet *dst_if, uint8_t flags)
2628 {
2629 	struct bridge_rtnode *brt;
2630 
2631 	/*
2632 	 * A route for this destination might already exist.  If so,
2633 	 * update it, otherwise create a new one.
2634 	 */
2635 	if ((brt = bridge_rtnode_lookup(sc, dst)) == NULL) {
2636 		struct netmsg_brsaddr *brmsg;
2637 
2638 		if (sc->sc_brtcnt >= sc->sc_brtmax)
2639 			return ENOSPC;
2640 
2641 		brmsg = kmalloc(sizeof(*brmsg), M_LWKTMSG, M_WAITOK | M_NULLOK);
2642 		if (brmsg == NULL)
2643 			return ENOMEM;
2644 
2645 		netmsg_init(&brmsg->br_nmsg, NULL, &netisr_afree_rport,
2646 			    0, bridge_rtinstall_handler);
2647 		memcpy(brmsg->br_dst, dst, ETHER_ADDR_LEN);
2648 		brmsg->br_dst_if = dst_if;
2649 		brmsg->br_flags = flags;
2650 		brmsg->br_setflags = 0;
2651 		brmsg->br_softc = sc;
2652 		brmsg->br_rtinfo = NULL;
2653 
2654 		ifnet_sendmsg(&brmsg->br_nmsg.nm_lmsg, 0);
2655 		return 0;
2656 	}
2657 	bridge_rtinfo_update(brt->brt_info, dst_if, 0, flags,
2658 			     sc->sc_brttimeout);
2659 	return 0;
2660 }
2661 
2662 static int
2663 bridge_rtsaddr(struct bridge_softc *sc, const uint8_t *dst,
2664 	       struct ifnet *dst_if, uint8_t flags)
2665 {
2666 	struct netmsg_brsaddr brmsg;
2667 
2668 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
2669 
2670 	netmsg_init(&brmsg.br_nmsg, NULL, &curthread->td_msgport,
2671 		    0, bridge_rtinstall_handler);
2672 	memcpy(brmsg.br_dst, dst, ETHER_ADDR_LEN);
2673 	brmsg.br_dst_if = dst_if;
2674 	brmsg.br_flags = flags;
2675 	brmsg.br_setflags = 1;
2676 	brmsg.br_softc = sc;
2677 	brmsg.br_rtinfo = NULL;
2678 
2679 	return ifnet_domsg(&brmsg.br_nmsg.nm_lmsg, 0);
2680 }
2681 
2682 /*
2683  * bridge_rtlookup:
2684  *
2685  *	Lookup the destination interface for an address.
2686  */
2687 static struct ifnet *
2688 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2689 {
2690 	struct bridge_rtnode *brt;
2691 
2692 	if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
2693 		return NULL;
2694 	return brt->brt_info->bri_ifp;
2695 }
2696 
2697 static void
2698 bridge_rtreap_handler(struct netmsg *nmsg)
2699 {
2700 	struct bridge_softc *sc = nmsg->nm_lmsg.u.ms_resultp;
2701 	struct bridge_rtnode *brt, *nbrt;
2702 
2703 	LIST_FOREACH_MUTABLE(brt, &sc->sc_rtlists[mycpuid], brt_list, nbrt) {
2704 		if (brt->brt_info->bri_dead)
2705 			bridge_rtnode_destroy(sc, brt);
2706 	}
2707 	ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2708 }
2709 
2710 static void
2711 bridge_rtreap(struct bridge_softc *sc)
2712 {
2713 	struct netmsg nmsg;
2714 
2715 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
2716 
2717 	netmsg_init(&nmsg, NULL, &curthread->td_msgport,
2718 		    0, bridge_rtreap_handler);
2719 	nmsg.nm_lmsg.u.ms_resultp = sc;
2720 
2721 	ifnet_domsg(&nmsg.nm_lmsg, 0);
2722 }
2723 
2724 static void
2725 bridge_rtreap_async(struct bridge_softc *sc)
2726 {
2727 	struct netmsg *nmsg;
2728 
2729 	nmsg = kmalloc(sizeof(*nmsg), M_LWKTMSG, M_WAITOK);
2730 
2731 	netmsg_init(nmsg, NULL, &netisr_afree_rport,
2732 		    0, bridge_rtreap_handler);
2733 	nmsg->nm_lmsg.u.ms_resultp = sc;
2734 
2735 	ifnet_sendmsg(&nmsg->nm_lmsg, 0);
2736 }
2737 
2738 /*
2739  * bridge_rttrim:
2740  *
2741  *	Trim the routine table so that we have a number
2742  *	of routing entries less than or equal to the
2743  *	maximum number.
2744  */
2745 static void
2746 bridge_rttrim(struct bridge_softc *sc)
2747 {
2748 	struct bridge_rtnode *brt;
2749 	int dead;
2750 
2751 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
2752 
2753 	/* Make sure we actually need to do this. */
2754 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2755 		return;
2756 
2757 	/*
2758 	 * Find out how many rtnodes are dead
2759 	 */
2760 	dead = bridge_rtage_finddead(sc);
2761 	KKASSERT(dead <= sc->sc_brtcnt);
2762 
2763 	if (sc->sc_brtcnt - dead <= sc->sc_brtmax) {
2764 		/* Enough dead rtnodes are found */
2765 		bridge_rtreap(sc);
2766 		return;
2767 	}
2768 
2769 	/*
2770 	 * Kill some dynamic rtnodes to meet the brtmax
2771 	 */
2772 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2773 		struct bridge_rtinfo *bri = brt->brt_info;
2774 
2775 		if (bri->bri_dead) {
2776 			/*
2777 			 * We have counted this rtnode in
2778 			 * bridge_rtage_finddead()
2779 			 */
2780 			continue;
2781 		}
2782 
2783 		if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2784 			bri->bri_dead = 1;
2785 			++dead;
2786 			KKASSERT(dead <= sc->sc_brtcnt);
2787 
2788 			if (sc->sc_brtcnt - dead <= sc->sc_brtmax) {
2789 				/* Enough rtnodes are collected */
2790 				break;
2791 			}
2792 		}
2793 	}
2794 	if (dead)
2795 		bridge_rtreap(sc);
2796 }
2797 
2798 /*
2799  * bridge_timer:
2800  *
2801  *	Aging timer for the bridge.
2802  */
2803 static void
2804 bridge_timer(void *arg)
2805 {
2806 	struct bridge_softc *sc = arg;
2807 	struct lwkt_msg *lmsg;
2808 
2809 	KKASSERT(mycpuid == BRIDGE_CFGCPU);
2810 
2811 	crit_enter();
2812 
2813 	if (callout_pending(&sc->sc_brcallout) ||
2814 	    !callout_active(&sc->sc_brcallout)) {
2815 		crit_exit();
2816 		return;
2817 	}
2818 	callout_deactivate(&sc->sc_brcallout);
2819 
2820 	lmsg = &sc->sc_brtimemsg.nm_lmsg;
2821 	KKASSERT(lmsg->ms_flags & MSGF_DONE);
2822 	lwkt_sendmsg(BRIDGE_CFGPORT, lmsg);
2823 
2824 	crit_exit();
2825 }
2826 
2827 static void
2828 bridge_timer_handler(struct netmsg *nmsg)
2829 {
2830 	struct bridge_softc *sc = nmsg->nm_lmsg.u.ms_resultp;
2831 
2832 	KKASSERT(&curthread->td_msgport == BRIDGE_CFGPORT);
2833 
2834 	crit_enter();
2835 	/* Reply ASAP */
2836 	lwkt_replymsg(&nmsg->nm_lmsg, 0);
2837 	crit_exit();
2838 
2839 	bridge_rtage(sc);
2840 	if (sc->sc_ifp->if_flags & IFF_RUNNING) {
2841 		callout_reset(&sc->sc_brcallout,
2842 		    bridge_rtable_prune_period * hz, bridge_timer, sc);
2843 	}
2844 }
2845 
2846 static int
2847 bridge_rtage_finddead(struct bridge_softc *sc)
2848 {
2849 	struct bridge_rtnode *brt;
2850 	int dead = 0;
2851 
2852 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2853 		struct bridge_rtinfo *bri = brt->brt_info;
2854 
2855 		if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2856 		    time_second >= bri->bri_expire) {
2857 			bri->bri_dead = 1;
2858 			++dead;
2859 			KKASSERT(dead <= sc->sc_brtcnt);
2860 		}
2861 	}
2862 	return dead;
2863 }
2864 
2865 /*
2866  * bridge_rtage:
2867  *
2868  *	Perform an aging cycle.
2869  */
2870 static void
2871 bridge_rtage(struct bridge_softc *sc)
2872 {
2873 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
2874 
2875 	if (bridge_rtage_finddead(sc))
2876 		bridge_rtreap(sc);
2877 }
2878 
2879 /*
2880  * bridge_rtflush:
2881  *
2882  *	Remove all dynamic addresses from the bridge.
2883  */
2884 static void
2885 bridge_rtflush(struct bridge_softc *sc, int bf)
2886 {
2887 	struct bridge_rtnode *brt;
2888 	int reap;
2889 
2890 	reap = 0;
2891 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2892 		struct bridge_rtinfo *bri = brt->brt_info;
2893 
2894 		if ((bf & IFBF_FLUSHALL) ||
2895 		    (bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2896 			bri->bri_dead = 1;
2897 			reap = 1;
2898 		}
2899 	}
2900 	if (reap) {
2901 		if (bf & IFBF_FLUSHSYNC)
2902 			bridge_rtreap(sc);
2903 		else
2904 			bridge_rtreap_async(sc);
2905 	}
2906 }
2907 
2908 /*
2909  * bridge_rtdaddr:
2910  *
2911  *	Remove an address from the table.
2912  */
2913 static int
2914 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2915 {
2916 	struct bridge_rtnode *brt;
2917 
2918 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
2919 
2920 	if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
2921 		return (ENOENT);
2922 
2923 	/* TODO: add a cheaper delete operation */
2924 	brt->brt_info->bri_dead = 1;
2925 	bridge_rtreap(sc);
2926 	return (0);
2927 }
2928 
2929 /*
2930  * bridge_rtdelete:
2931  *
2932  *	Delete routes to a speicifc member interface.
2933  */
2934 void
2935 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int bf)
2936 {
2937 	struct bridge_rtnode *brt;
2938 	int reap;
2939 
2940 	reap = 0;
2941 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2942 		struct bridge_rtinfo *bri = brt->brt_info;
2943 
2944 		if (bri->bri_ifp == ifp &&
2945 		    ((bf & IFBF_FLUSHALL) ||
2946 		     (bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) {
2947 			bri->bri_dead = 1;
2948 			reap = 1;
2949 		}
2950 	}
2951 	if (reap) {
2952 		if (bf & IFBF_FLUSHSYNC)
2953 			bridge_rtreap(sc);
2954 		else
2955 			bridge_rtreap_async(sc);
2956 	}
2957 }
2958 
2959 /*
2960  * bridge_rtable_init:
2961  *
2962  *	Initialize the route table for this bridge.
2963  */
2964 static void
2965 bridge_rtable_init(struct bridge_softc *sc)
2966 {
2967 	int cpu;
2968 
2969 	/*
2970 	 * Initialize per-cpu hash tables
2971 	 */
2972 	sc->sc_rthashs = kmalloc(sizeof(*sc->sc_rthashs) * ncpus,
2973 				 M_DEVBUF, M_WAITOK);
2974 	for (cpu = 0; cpu < ncpus; ++cpu) {
2975 		int i;
2976 
2977 		sc->sc_rthashs[cpu] =
2978 		kmalloc(sizeof(struct bridge_rtnode_head) * BRIDGE_RTHASH_SIZE,
2979 			M_DEVBUF, M_WAITOK);
2980 
2981 		for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2982 			LIST_INIT(&sc->sc_rthashs[cpu][i]);
2983 	}
2984 	sc->sc_rthash_key = karc4random();
2985 
2986 	/*
2987 	 * Initialize per-cpu lists
2988 	 */
2989 	sc->sc_rtlists = kmalloc(sizeof(struct bridge_rtnode_head) * ncpus,
2990 				 M_DEVBUF, M_WAITOK);
2991 	for (cpu = 0; cpu < ncpus; ++cpu)
2992 		LIST_INIT(&sc->sc_rtlists[cpu]);
2993 }
2994 
2995 /*
2996  * bridge_rtable_fini:
2997  *
2998  *	Deconstruct the route table for this bridge.
2999  */
3000 static void
3001 bridge_rtable_fini(struct bridge_softc *sc)
3002 {
3003 	int cpu;
3004 
3005 	/*
3006 	 * Free per-cpu hash tables
3007 	 */
3008 	for (cpu = 0; cpu < ncpus; ++cpu)
3009 		kfree(sc->sc_rthashs[cpu], M_DEVBUF);
3010 	kfree(sc->sc_rthashs, M_DEVBUF);
3011 
3012 	/*
3013 	 * Free per-cpu lists
3014 	 */
3015 	kfree(sc->sc_rtlists, M_DEVBUF);
3016 }
3017 
3018 /*
3019  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
3020  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
3021  */
3022 #define	mix(a, b, c)							\
3023 do {									\
3024 	a -= b; a -= c; a ^= (c >> 13);					\
3025 	b -= c; b -= a; b ^= (a << 8);					\
3026 	c -= a; c -= b; c ^= (b >> 13);					\
3027 	a -= b; a -= c; a ^= (c >> 12);					\
3028 	b -= c; b -= a; b ^= (a << 16);					\
3029 	c -= a; c -= b; c ^= (b >> 5);					\
3030 	a -= b; a -= c; a ^= (c >> 3);					\
3031 	b -= c; b -= a; b ^= (a << 10);					\
3032 	c -= a; c -= b; c ^= (b >> 15);					\
3033 } while (/*CONSTCOND*/0)
3034 
3035 static __inline uint32_t
3036 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
3037 {
3038 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
3039 
3040 	b += addr[5] << 8;
3041 	b += addr[4];
3042 	a += addr[3] << 24;
3043 	a += addr[2] << 16;
3044 	a += addr[1] << 8;
3045 	a += addr[0];
3046 
3047 	mix(a, b, c);
3048 
3049 	return (c & BRIDGE_RTHASH_MASK);
3050 }
3051 
3052 #undef mix
3053 
3054 static int
3055 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
3056 {
3057 	int i, d;
3058 
3059 	for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
3060 		d = ((int)a[i]) - ((int)b[i]);
3061 	}
3062 
3063 	return (d);
3064 }
3065 
3066 /*
3067  * bridge_rtnode_lookup:
3068  *
3069  *	Look up a bridge route node for the specified destination.
3070  */
3071 static struct bridge_rtnode *
3072 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
3073 {
3074 	struct bridge_rtnode *brt;
3075 	uint32_t hash;
3076 	int dir;
3077 
3078 	hash = bridge_rthash(sc, addr);
3079 	LIST_FOREACH(brt, &sc->sc_rthashs[mycpuid][hash], brt_hash) {
3080 		dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
3081 		if (dir == 0)
3082 			return (brt);
3083 		if (dir > 0)
3084 			return (NULL);
3085 	}
3086 
3087 	return (NULL);
3088 }
3089 
3090 /*
3091  * bridge_rtnode_insert:
3092  *
3093  *	Insert the specified bridge node into the route table.
3094  *	Caller has to make sure that rtnode does not exist.
3095  */
3096 static void
3097 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
3098 {
3099 	struct bridge_rtnode *lbrt;
3100 	uint32_t hash;
3101 	int dir;
3102 
3103 	hash = bridge_rthash(sc, brt->brt_addr);
3104 
3105 	lbrt = LIST_FIRST(&sc->sc_rthashs[mycpuid][hash]);
3106 	if (lbrt == NULL) {
3107 		LIST_INSERT_HEAD(&sc->sc_rthashs[mycpuid][hash], brt, brt_hash);
3108 		goto out;
3109 	}
3110 
3111 	do {
3112 		dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
3113 		KASSERT(dir != 0, ("rtnode already exist\n"));
3114 
3115 		if (dir > 0) {
3116 			LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
3117 			goto out;
3118 		}
3119 		if (LIST_NEXT(lbrt, brt_hash) == NULL) {
3120 			LIST_INSERT_AFTER(lbrt, brt, brt_hash);
3121 			goto out;
3122 		}
3123 		lbrt = LIST_NEXT(lbrt, brt_hash);
3124 	} while (lbrt != NULL);
3125 
3126 	panic("no suitable position found for rtnode\n");
3127 out:
3128 	LIST_INSERT_HEAD(&sc->sc_rtlists[mycpuid], brt, brt_list);
3129 	if (mycpuid == 0) {
3130 		/*
3131 		 * Update the brtcnt.
3132 		 * We only need to do it once and we do it on CPU0.
3133 		 */
3134 		sc->sc_brtcnt++;
3135 	}
3136 }
3137 
3138 /*
3139  * bridge_rtnode_destroy:
3140  *
3141  *	Destroy a bridge rtnode.
3142  */
3143 static void
3144 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
3145 {
3146 	LIST_REMOVE(brt, brt_hash);
3147 	LIST_REMOVE(brt, brt_list);
3148 
3149 	if (mycpuid + 1 == ncpus) {
3150 		/* Free rtinfo associated with rtnode on the last cpu */
3151 		kfree(brt->brt_info, M_DEVBUF);
3152 	}
3153 	kfree(brt, M_DEVBUF);
3154 
3155 	if (mycpuid == 0) {
3156 		/* Update brtcnt only on CPU0 */
3157 		sc->sc_brtcnt--;
3158 	}
3159 }
3160 
3161 static __inline int
3162 bridge_post_pfil(struct mbuf *m)
3163 {
3164 	if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED)
3165 		return EOPNOTSUPP;
3166 
3167 	/* Not yet */
3168 	if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED)
3169 		return EOPNOTSUPP;
3170 
3171 	return 0;
3172 }
3173 
3174 /*
3175  * Send bridge packets through pfil if they are one of the types pfil can deal
3176  * with, or if they are ARP or REVARP.  (pfil will pass ARP and REVARP without
3177  * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
3178  * that interface.
3179  */
3180 static int
3181 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
3182 {
3183 	int snap, error, i, hlen;
3184 	struct ether_header *eh1, eh2;
3185 	struct ip *ip;
3186 	struct llc llc1;
3187 	u_int16_t ether_type;
3188 
3189 	snap = 0;
3190 	error = -1;	/* Default error if not error == 0 */
3191 
3192 	if (pfil_bridge == 0 && pfil_member == 0)
3193 		return (0); /* filtering is disabled */
3194 
3195 	i = min((*mp)->m_pkthdr.len, max_protohdr);
3196 	if ((*mp)->m_len < i) {
3197 		*mp = m_pullup(*mp, i);
3198 		if (*mp == NULL) {
3199 			kprintf("%s: m_pullup failed\n", __func__);
3200 			return (-1);
3201 		}
3202 	}
3203 
3204 	eh1 = mtod(*mp, struct ether_header *);
3205 	ether_type = ntohs(eh1->ether_type);
3206 
3207 	/*
3208 	 * Check for SNAP/LLC.
3209 	 */
3210 	if (ether_type < ETHERMTU) {
3211 		struct llc *llc2 = (struct llc *)(eh1 + 1);
3212 
3213 		if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
3214 		    llc2->llc_dsap == LLC_SNAP_LSAP &&
3215 		    llc2->llc_ssap == LLC_SNAP_LSAP &&
3216 		    llc2->llc_control == LLC_UI) {
3217 			ether_type = htons(llc2->llc_un.type_snap.ether_type);
3218 			snap = 1;
3219 		}
3220 	}
3221 
3222 	/*
3223 	 * If we're trying to filter bridge traffic, don't look at anything
3224 	 * other than IP and ARP traffic.  If the filter doesn't understand
3225 	 * IPv6, don't allow IPv6 through the bridge either.  This is lame
3226 	 * since if we really wanted, say, an AppleTalk filter, we are hosed,
3227 	 * but of course we don't have an AppleTalk filter to begin with.
3228 	 * (Note that since pfil doesn't understand ARP it will pass *ALL*
3229 	 * ARP traffic.)
3230 	 */
3231 	switch (ether_type) {
3232 	case ETHERTYPE_ARP:
3233 	case ETHERTYPE_REVARP:
3234 		return (0); /* Automatically pass */
3235 
3236 	case ETHERTYPE_IP:
3237 #ifdef INET6
3238 	case ETHERTYPE_IPV6:
3239 #endif /* INET6 */
3240 		break;
3241 
3242 	default:
3243 		/*
3244 		 * Check to see if the user wants to pass non-ip
3245 		 * packets, these will not be checked by pfil(9)
3246 		 * and passed unconditionally so the default is to drop.
3247 		 */
3248 		if (pfil_onlyip)
3249 			goto bad;
3250 	}
3251 
3252 	/* Strip off the Ethernet header and keep a copy. */
3253 	m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
3254 	m_adj(*mp, ETHER_HDR_LEN);
3255 
3256 	/* Strip off snap header, if present */
3257 	if (snap) {
3258 		m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
3259 		m_adj(*mp, sizeof(struct llc));
3260 	}
3261 
3262 	/*
3263 	 * Check the IP header for alignment and errors
3264 	 */
3265 	if (dir == PFIL_IN) {
3266 		switch (ether_type) {
3267 		case ETHERTYPE_IP:
3268 			error = bridge_ip_checkbasic(mp);
3269 			break;
3270 #ifdef INET6
3271 		case ETHERTYPE_IPV6:
3272 			error = bridge_ip6_checkbasic(mp);
3273 			break;
3274 #endif /* INET6 */
3275 		default:
3276 			error = 0;
3277 		}
3278 		if (error)
3279 			goto bad;
3280 	}
3281 
3282 	error = 0;
3283 
3284 	/*
3285 	 * Run the packet through pfil
3286 	 */
3287 	switch (ether_type) {
3288 	case ETHERTYPE_IP:
3289 		/*
3290 		 * before calling the firewall, swap fields the same as
3291 		 * IP does. here we assume the header is contiguous
3292 		 */
3293 		ip = mtod(*mp, struct ip *);
3294 
3295 		ip->ip_len = ntohs(ip->ip_len);
3296 		ip->ip_off = ntohs(ip->ip_off);
3297 
3298 		/*
3299 		 * Run pfil on the member interface and the bridge, both can
3300 		 * be skipped by clearing pfil_member or pfil_bridge.
3301 		 *
3302 		 * Keep the order:
3303 		 *   in_if -> bridge_if -> out_if
3304 		 */
3305 		if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) {
3306 			error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, dir);
3307 			if (*mp == NULL || error != 0) /* filter may consume */
3308 				break;
3309 			error = bridge_post_pfil(*mp);
3310 			if (error)
3311 				break;
3312 		}
3313 
3314 		if (pfil_member && ifp != NULL) {
3315 			error = pfil_run_hooks(&inet_pfil_hook, mp, ifp, dir);
3316 			if (*mp == NULL || error != 0) /* filter may consume */
3317 				break;
3318 			error = bridge_post_pfil(*mp);
3319 			if (error)
3320 				break;
3321 		}
3322 
3323 		if (pfil_bridge && dir == PFIL_IN && bifp != NULL) {
3324 			error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, dir);
3325 			if (*mp == NULL || error != 0) /* filter may consume */
3326 				break;
3327 			error = bridge_post_pfil(*mp);
3328 			if (error)
3329 				break;
3330 		}
3331 
3332 		/* check if we need to fragment the packet */
3333 		if (pfil_member && ifp != NULL && dir == PFIL_OUT) {
3334 			i = (*mp)->m_pkthdr.len;
3335 			if (i > ifp->if_mtu) {
3336 				error = bridge_fragment(ifp, *mp, &eh2, snap,
3337 					    &llc1);
3338 				return (error);
3339 			}
3340 		}
3341 
3342 		/* Recalculate the ip checksum and restore byte ordering */
3343 		ip = mtod(*mp, struct ip *);
3344 		hlen = ip->ip_hl << 2;
3345 		if (hlen < sizeof(struct ip))
3346 			goto bad;
3347 		if (hlen > (*mp)->m_len) {
3348 			if ((*mp = m_pullup(*mp, hlen)) == 0)
3349 				goto bad;
3350 			ip = mtod(*mp, struct ip *);
3351 			if (ip == NULL)
3352 				goto bad;
3353 		}
3354 		ip->ip_len = htons(ip->ip_len);
3355 		ip->ip_off = htons(ip->ip_off);
3356 		ip->ip_sum = 0;
3357 		if (hlen == sizeof(struct ip))
3358 			ip->ip_sum = in_cksum_hdr(ip);
3359 		else
3360 			ip->ip_sum = in_cksum(*mp, hlen);
3361 
3362 		break;
3363 #ifdef INET6
3364 	case ETHERTYPE_IPV6:
3365 		if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
3366 			error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3367 					dir);
3368 
3369 		if (*mp == NULL || error != 0) /* filter may consume */
3370 			break;
3371 
3372 		if (pfil_member && ifp != NULL)
3373 			error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp,
3374 					dir);
3375 
3376 		if (*mp == NULL || error != 0) /* filter may consume */
3377 			break;
3378 
3379 		if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
3380 			error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3381 					dir);
3382 		break;
3383 #endif
3384 	default:
3385 		error = 0;
3386 		break;
3387 	}
3388 
3389 	if (*mp == NULL)
3390 		return (error);
3391 	if (error != 0)
3392 		goto bad;
3393 
3394 	error = -1;
3395 
3396 	/*
3397 	 * Finally, put everything back the way it was and return
3398 	 */
3399 	if (snap) {
3400 		M_PREPEND(*mp, sizeof(struct llc), MB_DONTWAIT);
3401 		if (*mp == NULL)
3402 			return (error);
3403 		bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
3404 	}
3405 
3406 	M_PREPEND(*mp, ETHER_HDR_LEN, MB_DONTWAIT);
3407 	if (*mp == NULL)
3408 		return (error);
3409 	bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
3410 
3411 	return (0);
3412 
3413 bad:
3414 	m_freem(*mp);
3415 	*mp = NULL;
3416 	return (error);
3417 }
3418 
3419 /*
3420  * Perform basic checks on header size since
3421  * pfil assumes ip_input has already processed
3422  * it for it.  Cut-and-pasted from ip_input.c.
3423  * Given how simple the IPv6 version is,
3424  * does the IPv4 version really need to be
3425  * this complicated?
3426  *
3427  * XXX Should we update ipstat here, or not?
3428  * XXX Right now we update ipstat but not
3429  * XXX csum_counter.
3430  */
3431 static int
3432 bridge_ip_checkbasic(struct mbuf **mp)
3433 {
3434 	struct mbuf *m = *mp;
3435 	struct ip *ip;
3436 	int len, hlen;
3437 	u_short sum;
3438 
3439 	if (*mp == NULL)
3440 		return (-1);
3441 #if notyet
3442 	if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3443 		if ((m = m_copyup(m, sizeof(struct ip),
3444 			(max_linkhdr + 3) & ~3)) == NULL) {
3445 			/* XXXJRT new stat, please */
3446 			ipstat.ips_toosmall++;
3447 			goto bad;
3448 		}
3449 	} else
3450 #endif
3451 #ifndef __predict_false
3452 #define __predict_false(x) x
3453 #endif
3454 	 if (__predict_false(m->m_len < sizeof (struct ip))) {
3455 		if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
3456 			ipstat.ips_toosmall++;
3457 			goto bad;
3458 		}
3459 	}
3460 	ip = mtod(m, struct ip *);
3461 	if (ip == NULL) goto bad;
3462 
3463 	if (ip->ip_v != IPVERSION) {
3464 		ipstat.ips_badvers++;
3465 		goto bad;
3466 	}
3467 	hlen = ip->ip_hl << 2;
3468 	if (hlen < sizeof(struct ip)) { /* minimum header length */
3469 		ipstat.ips_badhlen++;
3470 		goto bad;
3471 	}
3472 	if (hlen > m->m_len) {
3473 		if ((m = m_pullup(m, hlen)) == 0) {
3474 			ipstat.ips_badhlen++;
3475 			goto bad;
3476 		}
3477 		ip = mtod(m, struct ip *);
3478 		if (ip == NULL) goto bad;
3479 	}
3480 
3481 	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3482 		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3483 	} else {
3484 		if (hlen == sizeof(struct ip)) {
3485 			sum = in_cksum_hdr(ip);
3486 		} else {
3487 			sum = in_cksum(m, hlen);
3488 		}
3489 	}
3490 	if (sum) {
3491 		ipstat.ips_badsum++;
3492 		goto bad;
3493 	}
3494 
3495 	/* Retrieve the packet length. */
3496 	len = ntohs(ip->ip_len);
3497 
3498 	/*
3499 	 * Check for additional length bogosity
3500 	 */
3501 	if (len < hlen) {
3502 		ipstat.ips_badlen++;
3503 		goto bad;
3504 	}
3505 
3506 	/*
3507 	 * Check that the amount of data in the buffers
3508 	 * is as at least much as the IP header would have us expect.
3509 	 * Drop packet if shorter than we expect.
3510 	 */
3511 	if (m->m_pkthdr.len < len) {
3512 		ipstat.ips_tooshort++;
3513 		goto bad;
3514 	}
3515 
3516 	/* Checks out, proceed */
3517 	*mp = m;
3518 	return (0);
3519 
3520 bad:
3521 	*mp = m;
3522 	return (-1);
3523 }
3524 
3525 #ifdef INET6
3526 /*
3527  * Same as above, but for IPv6.
3528  * Cut-and-pasted from ip6_input.c.
3529  * XXX Should we update ip6stat, or not?
3530  */
3531 static int
3532 bridge_ip6_checkbasic(struct mbuf **mp)
3533 {
3534 	struct mbuf *m = *mp;
3535 	struct ip6_hdr *ip6;
3536 
3537 	/*
3538 	 * If the IPv6 header is not aligned, slurp it up into a new
3539 	 * mbuf with space for link headers, in the event we forward
3540 	 * it.  Otherwise, if it is aligned, make sure the entire base
3541 	 * IPv6 header is in the first mbuf of the chain.
3542 	 */
3543 #if notyet
3544 	if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3545 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3546 		if ((m = m_copyup(m, sizeof(struct ip6_hdr),
3547 			    (max_linkhdr + 3) & ~3)) == NULL) {
3548 			/* XXXJRT new stat, please */
3549 			ip6stat.ip6s_toosmall++;
3550 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3551 			goto bad;
3552 		}
3553 	} else
3554 #endif
3555 	if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
3556 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3557 		if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
3558 			ip6stat.ip6s_toosmall++;
3559 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3560 			goto bad;
3561 		}
3562 	}
3563 
3564 	ip6 = mtod(m, struct ip6_hdr *);
3565 
3566 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
3567 		ip6stat.ip6s_badvers++;
3568 		in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
3569 		goto bad;
3570 	}
3571 
3572 	/* Checks out, proceed */
3573 	*mp = m;
3574 	return (0);
3575 
3576 bad:
3577 	*mp = m;
3578 	return (-1);
3579 }
3580 #endif /* INET6 */
3581 
3582 /*
3583  * bridge_fragment:
3584  *
3585  *	Return a fragmented mbuf chain.
3586  */
3587 static int
3588 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh,
3589     int snap, struct llc *llc)
3590 {
3591 	struct mbuf *m0;
3592 	struct ip *ip;
3593 	int error = -1;
3594 
3595 	if (m->m_len < sizeof(struct ip) &&
3596 	    (m = m_pullup(m, sizeof(struct ip))) == NULL)
3597 		goto out;
3598 	ip = mtod(m, struct ip *);
3599 
3600 	error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist,
3601 		    CSUM_DELAY_IP);
3602 	if (error)
3603 		goto out;
3604 
3605 	/* walk the chain and re-add the Ethernet header */
3606 	for (m0 = m; m0; m0 = m0->m_nextpkt) {
3607 		if (error == 0) {
3608 			if (snap) {
3609 				M_PREPEND(m0, sizeof(struct llc), MB_DONTWAIT);
3610 				if (m0 == NULL) {
3611 					error = ENOBUFS;
3612 					continue;
3613 				}
3614 				bcopy(llc, mtod(m0, caddr_t),
3615 				    sizeof(struct llc));
3616 			}
3617 			M_PREPEND(m0, ETHER_HDR_LEN, MB_DONTWAIT);
3618 			if (m0 == NULL) {
3619 				error = ENOBUFS;
3620 				continue;
3621 			}
3622 			bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN);
3623 		} else
3624 			m_freem(m);
3625 	}
3626 
3627 	if (error == 0)
3628 		ipstat.ips_fragmented++;
3629 
3630 	return (error);
3631 
3632 out:
3633 	if (m != NULL)
3634 		m_freem(m);
3635 	return (error);
3636 }
3637 
3638 static void
3639 bridge_enqueue_handler(struct netmsg *nmsg)
3640 {
3641 	struct netmsg_packet *nmp;
3642 	struct ifnet *dst_ifp;
3643 	struct mbuf *m;
3644 
3645 	nmp = (struct netmsg_packet *)nmsg;
3646 	m = nmp->nm_packet;
3647 	dst_ifp = nmp->nm_netmsg.nm_lmsg.u.ms_resultp;
3648 
3649 	bridge_handoff(dst_ifp, m);
3650 }
3651 
3652 static void
3653 bridge_handoff(struct ifnet *dst_ifp, struct mbuf *m)
3654 {
3655 	struct mbuf *m0;
3656 
3657 	/* We may be sending a fragment so traverse the mbuf */
3658 	for (; m; m = m0) {
3659 		struct altq_pktattr pktattr;
3660 
3661 		m0 = m->m_nextpkt;
3662 		m->m_nextpkt = NULL;
3663 
3664 		if (ifq_is_enabled(&dst_ifp->if_snd))
3665 			altq_etherclassify(&dst_ifp->if_snd, m, &pktattr);
3666 
3667 		ifq_dispatch(dst_ifp, m, &pktattr);
3668 	}
3669 }
3670 
3671 static void
3672 bridge_control_dispatch(struct netmsg *nmsg)
3673 {
3674 	struct netmsg_brctl *bc_msg = (struct netmsg_brctl *)nmsg;
3675 	struct ifnet *bifp = bc_msg->bc_sc->sc_ifp;
3676 	int error;
3677 
3678 	ifnet_serialize_all(bifp);
3679 	error = bc_msg->bc_func(bc_msg->bc_sc, bc_msg->bc_arg);
3680 	ifnet_deserialize_all(bifp);
3681 
3682 	lwkt_replymsg(&nmsg->nm_lmsg, error);
3683 }
3684 
3685 static int
3686 bridge_control(struct bridge_softc *sc, u_long cmd,
3687 	       bridge_ctl_t bc_func, void *bc_arg)
3688 {
3689 	struct ifnet *bifp = sc->sc_ifp;
3690 	struct netmsg_brctl bc_msg;
3691 	struct netmsg *nmsg;
3692 	int error;
3693 
3694 	ASSERT_IFNET_SERIALIZED_ALL(bifp);
3695 
3696 	bzero(&bc_msg, sizeof(bc_msg));
3697 	nmsg = &bc_msg.bc_nmsg;
3698 
3699 	netmsg_init(nmsg, NULL, &curthread->td_msgport,
3700 		    0, bridge_control_dispatch);
3701 	bc_msg.bc_func = bc_func;
3702 	bc_msg.bc_sc = sc;
3703 	bc_msg.bc_arg = bc_arg;
3704 
3705 	ifnet_deserialize_all(bifp);
3706 	error = lwkt_domsg(BRIDGE_CFGPORT, &nmsg->nm_lmsg, 0);
3707 	ifnet_serialize_all(bifp);
3708 	return error;
3709 }
3710 
3711 static void
3712 bridge_add_bif_handler(struct netmsg *nmsg)
3713 {
3714 	struct netmsg_braddbif *amsg = (struct netmsg_braddbif *)nmsg;
3715 	struct bridge_softc *sc;
3716 	struct bridge_iflist *bif;
3717 
3718 	sc = amsg->br_softc;
3719 
3720 	bif = kmalloc(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
3721 	bif->bif_ifp = amsg->br_bif_ifp;
3722 	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
3723 	bif->bif_onlist = 1;
3724 	bif->bif_info = amsg->br_bif_info;
3725 
3726 	LIST_INSERT_HEAD(&sc->sc_iflists[mycpuid], bif, bif_next);
3727 
3728 	ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3729 }
3730 
3731 static void
3732 bridge_add_bif(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3733 	       struct ifnet *ifp)
3734 {
3735 	struct netmsg_braddbif amsg;
3736 
3737 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3738 
3739 	netmsg_init(&amsg.br_nmsg, NULL, &curthread->td_msgport,
3740 		    0, bridge_add_bif_handler);
3741 	amsg.br_softc = sc;
3742 	amsg.br_bif_info = bif_info;
3743 	amsg.br_bif_ifp = ifp;
3744 
3745 	ifnet_domsg(&amsg.br_nmsg.nm_lmsg, 0);
3746 }
3747 
3748 static void
3749 bridge_del_bif_handler(struct netmsg *nmsg)
3750 {
3751 	struct netmsg_brdelbif *dmsg = (struct netmsg_brdelbif *)nmsg;
3752 	struct bridge_softc *sc;
3753 	struct bridge_iflist *bif;
3754 
3755 	sc = dmsg->br_softc;
3756 
3757 	/*
3758 	 * Locate the bif associated with the br_bif_info
3759 	 * on the current CPU
3760 	 */
3761 	bif = bridge_lookup_member_ifinfo(sc, dmsg->br_bif_info);
3762 	KKASSERT(bif != NULL && bif->bif_onlist);
3763 
3764 	/* Remove the bif from the current CPU's iflist */
3765 	bif->bif_onlist = 0;
3766 	LIST_REMOVE(bif, bif_next);
3767 
3768 	/* Save the removed bif for later freeing */
3769 	LIST_INSERT_HEAD(dmsg->br_bif_list, bif, bif_next);
3770 
3771 	ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3772 }
3773 
3774 static void
3775 bridge_del_bif(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3776 	       struct bridge_iflist_head *saved_bifs)
3777 {
3778 	struct netmsg_brdelbif dmsg;
3779 
3780 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3781 
3782 	netmsg_init(&dmsg.br_nmsg, NULL, &curthread->td_msgport,
3783 		    0, bridge_del_bif_handler);
3784 	dmsg.br_softc = sc;
3785 	dmsg.br_bif_info = bif_info;
3786 	dmsg.br_bif_list = saved_bifs;
3787 
3788 	ifnet_domsg(&dmsg.br_nmsg.nm_lmsg, 0);
3789 }
3790 
3791 static void
3792 bridge_set_bifflags_handler(struct netmsg *nmsg)
3793 {
3794 	struct netmsg_brsflags *smsg = (struct netmsg_brsflags *)nmsg;
3795 	struct bridge_softc *sc;
3796 	struct bridge_iflist *bif;
3797 
3798 	sc = smsg->br_softc;
3799 
3800 	/*
3801 	 * Locate the bif associated with the br_bif_info
3802 	 * on the current CPU
3803 	 */
3804 	bif = bridge_lookup_member_ifinfo(sc, smsg->br_bif_info);
3805 	KKASSERT(bif != NULL && bif->bif_onlist);
3806 
3807 	bif->bif_flags = smsg->br_bif_flags;
3808 
3809 	ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3810 }
3811 
3812 static void
3813 bridge_set_bifflags(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3814 		    uint32_t bif_flags)
3815 {
3816 	struct netmsg_brsflags smsg;
3817 
3818 	ASSERT_IFNET_NOT_SERIALIZED_ALL(sc->sc_ifp);
3819 
3820 	netmsg_init(&smsg.br_nmsg, NULL, &curthread->td_msgport,
3821 		    0, bridge_set_bifflags_handler);
3822 	smsg.br_softc = sc;
3823 	smsg.br_bif_info = bif_info;
3824 	smsg.br_bif_flags = bif_flags;
3825 
3826 	ifnet_domsg(&smsg.br_nmsg.nm_lmsg, 0);
3827 }
3828