xref: /dragonfly/sys/net/bridge/if_bridge.c (revision ce0e08e2)
1 /*
2  * Copyright 2001 Wasabi Systems, Inc.
3  * All rights reserved.
4  *
5  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed for the NetBSD Project by
18  *	Wasabi Systems, Inc.
19  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
20  *    or promote products derived from this software without specific prior
21  *    written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /*
37  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
38  * All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. All advertising materials mentioning features or use of this software
49  *    must display the following acknowledgement:
50  *	This product includes software developed by Jason L. Wright
51  * 4. The name of the author may not be used to endorse or promote products
52  *    derived from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
56  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
57  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
58  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
59  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
60  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
62  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
63  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64  * POSSIBILITY OF SUCH DAMAGE.
65  *
66  * $OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp $
67  * $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $
68  * $FreeBSD: src/sys/net/if_bridge.c,v 1.26 2005/10/13 23:05:55 thompsa Exp $
69  * $DragonFly: src/sys/net/bridge/if_bridge.c,v 1.60 2008/11/26 12:49:43 sephe Exp $
70  */
71 
72 /*
73  * Network interface bridge support.
74  *
75  * TODO:
76  *
77  *	- Currently only supports Ethernet-like interfaces (Ethernet,
78  *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
79  *	  to bridge other types of interfaces (FDDI-FDDI, and maybe
80  *	  consider heterogenous bridges).
81  *
82  *
83  * Bridge's route information is duplicated to each CPUs:
84  *
85  *      CPU0          CPU1          CPU2          CPU3
86  * +-----------+ +-----------+ +-----------+ +-----------+
87  * |  rtnode   | |  rtnode   | |  rtnode   | |  rtnode   |
88  * |           | |           | |           | |           |
89  * | dst eaddr | | dst eaddr | | dst eaddr | | dst eaddr |
90  * +-----------+ +-----------+ +-----------+ +-----------+
91  *       |         |                     |         |
92  *       |         |                     |         |
93  *       |         |     +----------+    |         |
94  *       |         |     |  rtinfo  |    |         |
95  *       |         +---->|          |<---+         |
96  *       |               |  flags   |              |
97  *       +-------------->|  timeout |<-------------+
98  *                       |  dst_ifp |
99  *                       +----------+
100  *
101  * We choose to put timeout and dst_ifp into shared part, so updating
102  * them will be cheaper than using message forwarding.  Also there is
103  * not need to use spinlock to protect the updating: timeout and dst_ifp
104  * is not related and specific field's updating order has no importance.
105  * The cache pollution by the share part should not be heavy: in a stable
106  * setup, dst_ifp probably will be not changed in rtnode's life time,
107  * while timeout is refreshed once per second; most of the time, timeout
108  * and dst_ifp are read-only accessed.
109  *
110  *
111  * Bridge route information installation on bridge_input path:
112  *
113  *      CPU0           CPU1         CPU2          CPU3
114  *
115  *                               tcp_thread2
116  *                                    |
117  *                                alloc nmsg
118  *                    snd nmsg        |
119  *                    w/o rtinfo      |
120  *      ifnet0<-----------------------+
121  *        |                           :
122  *    lookup dst                      :
123  *   rtnode exists?(Y)free nmsg       :
124  *        |(N)                        :
125  *        |
126  *  alloc rtinfo
127  *  alloc rtnode
128  * install rtnode
129  *        |
130  *        +---------->ifnet1
131  *        : fwd nmsg    |
132  *        : w/ rtinfo   |
133  *        :             |
134  *        :             |
135  *                 alloc rtnode
136  *               (w/ nmsg's rtinfo)
137  *                install rtnode
138  *                      |
139  *                      +---------->ifnet2
140  *                      : fwd nmsg    |
141  *                      : w/ rtinfo   |
142  *                      :             |
143  *                      :         same as ifnet1
144  *                                    |
145  *                                    +---------->ifnet3
146  *                                    : fwd nmsg    |
147  *                                    : w/ rtinfo   |
148  *                                    :             |
149  *                                    :         same as ifnet1
150  *                                               free nmsg
151  *                                                  :
152  *                                                  :
153  *
154  * The netmsgs forwarded between protocol threads and ifnet threads are
155  * allocated with (M_WAITOK|M_NULLOK), so it will not fail under most
156  * cases (route information is too precious to be not installed :).
157  * Since multiple threads may try to install route information for the
158  * same dst eaddr, we look up route information in ifnet0.  However, this
159  * looking up only need to be performed on ifnet0, which is the start
160  * point of the route information installation process.
161  *
162  *
163  * Bridge route information deleting/flushing:
164  *
165  *  CPU0            CPU1             CPU2             CPU3
166  *
167  * netisr0
168  *   |
169  * find suitable rtnodes,
170  * mark their rtinfo dead
171  *   |
172  *   | domsg <------------------------------------------+
173  *   |                                                  | replymsg
174  *   |                                                  |
175  *   V     fwdmsg           fwdmsg           fwdmsg     |
176  * ifnet0 --------> ifnet1 --------> ifnet2 --------> ifnet3
177  * delete rtnodes   delete rtnodes   delete rtnodes   delete rtnodes
178  * w/ dead rtinfo   w/ dead rtinfo   w/ dead rtinfo   w/ dead rtinfo
179  *                                                    free dead rtinfos
180  *
181  * All deleting/flushing operations are serialized by netisr0, so each
182  * operation only reaps the route information marked dead by itself.
183  *
184  *
185  * Bridge route information adding/deleting/flushing:
186  * Since all operation is serialized by the fixed message flow between
187  * ifnet threads, it is not possible to create corrupted per-cpu route
188  * information.
189  *
190  *
191  *
192  * Percpu member interface list iteration with blocking operation:
193  * Since one bridge could only delete one member interface at a time and
194  * the deleted member interface is not freed after netmsg_service_sync(),
195  * following way is used to make sure that even if the certain member
196  * interface is ripped from the percpu list during the blocking operation,
197  * the iteration still could keep going:
198  *
199  * LIST_FOREACH_MUTABLE(bif, sc->sc_iflists[mycpuid], bif_next, nbif) {
200  *     blocking operation;
201  *     blocking operation;
202  *     ...
203  *     ...
204  *     if (nbif != NULL && !nbif->bif_onlist) {
205  *         KKASSERT(bif->bif_onlist);
206  *         nbif = LIST_NEXT(bif, bif_next);
207  *     }
208  * }
209  *
210  * As mentioned above only one member interface could be unlinked from the
211  * percpu member interface list, so either bif or nbif may be not on the list,
212  * but _not_ both.  To keep the list iteration, we don't care about bif, but
213  * only nbif.  Since removed member interface will only be freed after we
214  * finish our work, it is safe to access any field in an unlinked bif (here
215  * bif_onlist).  If nbif is no longer on the list, then bif must be on the
216  * list, so we change nbif to the next element of bif and keep going.
217  */
218 
219 #include "opt_inet.h"
220 #include "opt_inet6.h"
221 
222 #include <sys/param.h>
223 #include <sys/mbuf.h>
224 #include <sys/malloc.h>
225 #include <sys/protosw.h>
226 #include <sys/systm.h>
227 #include <sys/time.h>
228 #include <sys/socket.h> /* for net/if.h */
229 #include <sys/sockio.h>
230 #include <sys/ctype.h>  /* string functions */
231 #include <sys/kernel.h>
232 #include <sys/random.h>
233 #include <sys/sysctl.h>
234 #include <sys/module.h>
235 #include <sys/proc.h>
236 #include <sys/lock.h>
237 #include <sys/thread.h>
238 #include <sys/thread2.h>
239 #include <sys/mpipe.h>
240 
241 #include <net/bpf.h>
242 #include <net/if.h>
243 #include <net/if_dl.h>
244 #include <net/if_types.h>
245 #include <net/if_var.h>
246 #include <net/pfil.h>
247 #include <net/ifq_var.h>
248 #include <net/if_clone.h>
249 
250 #include <netinet/in.h> /* for struct arpcom */
251 #include <netinet/in_systm.h>
252 #include <netinet/in_var.h>
253 #include <netinet/ip.h>
254 #include <netinet/ip_var.h>
255 #ifdef INET6
256 #include <netinet/ip6.h>
257 #include <netinet6/ip6_var.h>
258 #endif
259 #include <netinet/if_ether.h> /* for struct arpcom */
260 #include <net/bridge/if_bridgevar.h>
261 #include <net/if_llc.h>
262 #include <net/netmsg2.h>
263 
264 #include <net/route.h>
265 #include <sys/in_cksum.h>
266 
267 /*
268  * Size of the route hash table.  Must be a power of two.
269  */
270 #ifndef BRIDGE_RTHASH_SIZE
271 #define	BRIDGE_RTHASH_SIZE		1024
272 #endif
273 
274 #define	BRIDGE_RTHASH_MASK		(BRIDGE_RTHASH_SIZE - 1)
275 
276 /*
277  * Maximum number of addresses to cache.
278  */
279 #ifndef BRIDGE_RTABLE_MAX
280 #define	BRIDGE_RTABLE_MAX		100
281 #endif
282 
283 /*
284  * Spanning tree defaults.
285  */
286 #define	BSTP_DEFAULT_MAX_AGE		(20 * 256)
287 #define	BSTP_DEFAULT_HELLO_TIME		(2 * 256)
288 #define	BSTP_DEFAULT_FORWARD_DELAY	(15 * 256)
289 #define	BSTP_DEFAULT_HOLD_TIME		(1 * 256)
290 #define	BSTP_DEFAULT_BRIDGE_PRIORITY	0x8000
291 #define	BSTP_DEFAULT_PORT_PRIORITY	0x80
292 #define	BSTP_DEFAULT_PATH_COST		55
293 
294 /*
295  * Timeout (in seconds) for entries learned dynamically.
296  */
297 #ifndef BRIDGE_RTABLE_TIMEOUT
298 #define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
299 #endif
300 
301 /*
302  * Number of seconds between walks of the route list.
303  */
304 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
305 #define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
306 #endif
307 
308 /*
309  * List of capabilities to mask on the member interface.
310  */
311 #define	BRIDGE_IFCAPS_MASK		IFCAP_TXCSUM
312 
313 typedef int	(*bridge_ctl_t)(struct bridge_softc *, void *);
314 
315 struct netmsg_brctl {
316 	struct netmsg		bc_nmsg;
317 	bridge_ctl_t		bc_func;
318 	struct bridge_softc	*bc_sc;
319 	void			*bc_arg;
320 };
321 
322 struct netmsg_brsaddr {
323 	struct netmsg		br_nmsg;
324 	struct bridge_softc	*br_softc;
325 	struct ifnet		*br_dst_if;
326 	struct bridge_rtinfo	*br_rtinfo;
327 	int			br_setflags;
328 	uint8_t			br_dst[ETHER_ADDR_LEN];
329 	uint8_t			br_flags;
330 };
331 
332 struct netmsg_braddbif {
333 	struct netmsg		br_nmsg;
334 	struct bridge_softc	*br_softc;
335 	struct bridge_ifinfo	*br_bif_info;
336 	struct ifnet		*br_bif_ifp;
337 };
338 
339 struct netmsg_brdelbif {
340 	struct netmsg		br_nmsg;
341 	struct bridge_softc	*br_softc;
342 	struct bridge_ifinfo	*br_bif_info;
343 	struct bridge_iflist_head *br_bif_list;
344 };
345 
346 struct netmsg_brsflags {
347 	struct netmsg		br_nmsg;
348 	struct bridge_softc	*br_softc;
349 	struct bridge_ifinfo	*br_bif_info;
350 	uint32_t		br_bif_flags;
351 };
352 
353 eventhandler_tag	bridge_detach_cookie = NULL;
354 
355 extern	struct mbuf *(*bridge_input_p)(struct ifnet *, struct mbuf *);
356 extern	int (*bridge_output_p)(struct ifnet *, struct mbuf *);
357 extern	void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
358 
359 static int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
360 
361 static int	bridge_clone_create(struct if_clone *, int);
362 static void	bridge_clone_destroy(struct ifnet *);
363 
364 static int	bridge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
365 static void	bridge_mutecaps(struct bridge_ifinfo *, struct ifnet *, int);
366 static void	bridge_ifdetach(void *, struct ifnet *);
367 static void	bridge_init(void *);
368 static void	bridge_stop(struct ifnet *);
369 static void	bridge_start(struct ifnet *);
370 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
371 static int	bridge_output(struct ifnet *, struct mbuf *);
372 
373 static void	bridge_forward(struct bridge_softc *, struct mbuf *m);
374 
375 static void	bridge_timer_handler(struct netmsg *);
376 static void	bridge_timer(void *);
377 
378 static void	bridge_start_bcast(struct bridge_softc *, struct mbuf *);
379 static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
380 		    struct mbuf *);
381 static void	bridge_span(struct bridge_softc *, struct mbuf *);
382 
383 static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
384 		    struct ifnet *, uint8_t);
385 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
386 static void	bridge_rtreap(struct bridge_softc *);
387 static void	bridge_rtreap_async(struct bridge_softc *);
388 static void	bridge_rttrim(struct bridge_softc *);
389 static int	bridge_rtage_finddead(struct bridge_softc *);
390 static void	bridge_rtage(struct bridge_softc *);
391 static void	bridge_rtflush(struct bridge_softc *, int);
392 static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
393 static int	bridge_rtsaddr(struct bridge_softc *, const uint8_t *,
394 		    struct ifnet *, uint8_t);
395 static void	bridge_rtmsg_sync(struct bridge_softc *sc);
396 static void	bridge_rtreap_handler(struct netmsg *);
397 static void	bridge_rtinstall_handler(struct netmsg *);
398 static int	bridge_rtinstall_oncpu(struct bridge_softc *, const uint8_t *,
399 		    struct ifnet *, int, uint8_t, struct bridge_rtinfo **);
400 
401 static void	bridge_rtable_init(struct bridge_softc *);
402 static void	bridge_rtable_fini(struct bridge_softc *);
403 
404 static int	bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
405 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
406 		    const uint8_t *);
407 static void	bridge_rtnode_insert(struct bridge_softc *,
408 		    struct bridge_rtnode *);
409 static void	bridge_rtnode_destroy(struct bridge_softc *,
410 		    struct bridge_rtnode *);
411 
412 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
413 		    const char *name);
414 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
415 		    struct ifnet *ifp);
416 static struct bridge_iflist *bridge_lookup_member_ifinfo(struct bridge_softc *,
417 		    struct bridge_ifinfo *);
418 static void	bridge_delete_member(struct bridge_softc *,
419 		    struct bridge_iflist *, int);
420 static void	bridge_delete_span(struct bridge_softc *,
421 		    struct bridge_iflist *);
422 
423 static int	bridge_control(struct bridge_softc *, u_long,
424 			       bridge_ctl_t, void *);
425 static int	bridge_ioctl_init(struct bridge_softc *, void *);
426 static int	bridge_ioctl_stop(struct bridge_softc *, void *);
427 static int	bridge_ioctl_add(struct bridge_softc *, void *);
428 static int	bridge_ioctl_del(struct bridge_softc *, void *);
429 static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
430 static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
431 static int	bridge_ioctl_scache(struct bridge_softc *, void *);
432 static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
433 static int	bridge_ioctl_gifs(struct bridge_softc *, void *);
434 static int	bridge_ioctl_rts(struct bridge_softc *, void *);
435 static int	bridge_ioctl_saddr(struct bridge_softc *, void *);
436 static int	bridge_ioctl_sto(struct bridge_softc *, void *);
437 static int	bridge_ioctl_gto(struct bridge_softc *, void *);
438 static int	bridge_ioctl_daddr(struct bridge_softc *, void *);
439 static int	bridge_ioctl_flush(struct bridge_softc *, void *);
440 static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
441 static int	bridge_ioctl_spri(struct bridge_softc *, void *);
442 static int	bridge_ioctl_ght(struct bridge_softc *, void *);
443 static int	bridge_ioctl_sht(struct bridge_softc *, void *);
444 static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
445 static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
446 static int	bridge_ioctl_gma(struct bridge_softc *, void *);
447 static int	bridge_ioctl_sma(struct bridge_softc *, void *);
448 static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
449 static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
450 static int	bridge_ioctl_addspan(struct bridge_softc *, void *);
451 static int	bridge_ioctl_delspan(struct bridge_softc *, void *);
452 static int	bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
453 		    int);
454 static int	bridge_ip_checkbasic(struct mbuf **mp);
455 #ifdef INET6
456 static int	bridge_ip6_checkbasic(struct mbuf **mp);
457 #endif /* INET6 */
458 static int	bridge_fragment(struct ifnet *, struct mbuf *,
459 		    struct ether_header *, int, struct llc *);
460 static void	bridge_enqueue_handler(struct netmsg *);
461 static void	bridge_handoff(struct ifnet *, struct mbuf *);
462 
463 static void	bridge_del_bif_handler(struct netmsg *);
464 static void	bridge_add_bif_handler(struct netmsg *);
465 static void	bridge_set_bifflags_handler(struct netmsg *);
466 static void	bridge_del_bif(struct bridge_softc *, struct bridge_ifinfo *,
467 		    struct bridge_iflist_head *);
468 static void	bridge_add_bif(struct bridge_softc *, struct bridge_ifinfo *,
469 		    struct ifnet *);
470 static void	bridge_set_bifflags(struct bridge_softc *,
471 		    struct bridge_ifinfo *, uint32_t);
472 
473 SYSCTL_DECL(_net_link);
474 SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge");
475 
476 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */
477 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */
478 static int pfil_member = 1; /* run pfil hooks on the member interface */
479 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW,
480     &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled");
481 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW,
482     &pfil_bridge, 0, "Packet filter on the bridge interface");
483 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW,
484     &pfil_member, 0, "Packet filter on the member interface");
485 
486 struct bridge_control_arg {
487 	union {
488 		struct ifbreq ifbreq;
489 		struct ifbifconf ifbifconf;
490 		struct ifbareq ifbareq;
491 		struct ifbaconf ifbaconf;
492 		struct ifbrparam ifbrparam;
493 	} bca_u;
494 	int	bca_len;
495 	void	*bca_uptr;
496 	void	*bca_kptr;
497 };
498 
499 struct bridge_control {
500 	bridge_ctl_t	bc_func;
501 	int		bc_argsize;
502 	int		bc_flags;
503 };
504 
505 #define	BC_F_COPYIN		0x01	/* copy arguments in */
506 #define	BC_F_COPYOUT		0x02	/* copy arguments out */
507 #define	BC_F_SUSER		0x04	/* do super-user check */
508 
509 const struct bridge_control bridge_control_table[] = {
510 	{ bridge_ioctl_add,		sizeof(struct ifbreq),
511 	  BC_F_COPYIN|BC_F_SUSER },
512 	{ bridge_ioctl_del,		sizeof(struct ifbreq),
513 	  BC_F_COPYIN|BC_F_SUSER },
514 
515 	{ bridge_ioctl_gifflags,	sizeof(struct ifbreq),
516 	  BC_F_COPYIN|BC_F_COPYOUT },
517 	{ bridge_ioctl_sifflags,	sizeof(struct ifbreq),
518 	  BC_F_COPYIN|BC_F_SUSER },
519 
520 	{ bridge_ioctl_scache,		sizeof(struct ifbrparam),
521 	  BC_F_COPYIN|BC_F_SUSER },
522 	{ bridge_ioctl_gcache,		sizeof(struct ifbrparam),
523 	  BC_F_COPYOUT },
524 
525 	{ bridge_ioctl_gifs,		sizeof(struct ifbifconf),
526 	  BC_F_COPYIN|BC_F_COPYOUT },
527 	{ bridge_ioctl_rts,		sizeof(struct ifbaconf),
528 	  BC_F_COPYIN|BC_F_COPYOUT },
529 
530 	{ bridge_ioctl_saddr,		sizeof(struct ifbareq),
531 	  BC_F_COPYIN|BC_F_SUSER },
532 
533 	{ bridge_ioctl_sto,		sizeof(struct ifbrparam),
534 	  BC_F_COPYIN|BC_F_SUSER },
535 	{ bridge_ioctl_gto,		sizeof(struct ifbrparam),
536 	  BC_F_COPYOUT },
537 
538 	{ bridge_ioctl_daddr,		sizeof(struct ifbareq),
539 	  BC_F_COPYIN|BC_F_SUSER },
540 
541 	{ bridge_ioctl_flush,		sizeof(struct ifbreq),
542 	  BC_F_COPYIN|BC_F_SUSER },
543 
544 	{ bridge_ioctl_gpri,		sizeof(struct ifbrparam),
545 	  BC_F_COPYOUT },
546 	{ bridge_ioctl_spri,		sizeof(struct ifbrparam),
547 	  BC_F_COPYIN|BC_F_SUSER },
548 
549 	{ bridge_ioctl_ght,		sizeof(struct ifbrparam),
550 	  BC_F_COPYOUT },
551 	{ bridge_ioctl_sht,		sizeof(struct ifbrparam),
552 	  BC_F_COPYIN|BC_F_SUSER },
553 
554 	{ bridge_ioctl_gfd,		sizeof(struct ifbrparam),
555 	  BC_F_COPYOUT },
556 	{ bridge_ioctl_sfd,		sizeof(struct ifbrparam),
557 	  BC_F_COPYIN|BC_F_SUSER },
558 
559 	{ bridge_ioctl_gma,		sizeof(struct ifbrparam),
560 	  BC_F_COPYOUT },
561 	{ bridge_ioctl_sma,		sizeof(struct ifbrparam),
562 	  BC_F_COPYIN|BC_F_SUSER },
563 
564 	{ bridge_ioctl_sifprio,		sizeof(struct ifbreq),
565 	  BC_F_COPYIN|BC_F_SUSER },
566 
567 	{ bridge_ioctl_sifcost,		sizeof(struct ifbreq),
568 	  BC_F_COPYIN|BC_F_SUSER },
569 
570 	{ bridge_ioctl_addspan,		sizeof(struct ifbreq),
571 	  BC_F_COPYIN|BC_F_SUSER },
572 	{ bridge_ioctl_delspan,		sizeof(struct ifbreq),
573 	  BC_F_COPYIN|BC_F_SUSER },
574 };
575 static const int bridge_control_table_size =
576     sizeof(bridge_control_table) / sizeof(bridge_control_table[0]);
577 
578 LIST_HEAD(, bridge_softc) bridge_list;
579 
580 struct if_clone bridge_cloner = IF_CLONE_INITIALIZER("bridge",
581 				bridge_clone_create,
582 				bridge_clone_destroy, 0, IF_MAXUNIT);
583 
584 static int
585 bridge_modevent(module_t mod, int type, void *data)
586 {
587 	switch (type) {
588 	case MOD_LOAD:
589 		LIST_INIT(&bridge_list);
590 		if_clone_attach(&bridge_cloner);
591 		bridge_input_p = bridge_input;
592 		bridge_output_p = bridge_output;
593 		bridge_detach_cookie = EVENTHANDLER_REGISTER(
594 		    ifnet_detach_event, bridge_ifdetach, NULL,
595 		    EVENTHANDLER_PRI_ANY);
596 #if notyet
597 		bstp_linkstate_p = bstp_linkstate;
598 #endif
599 		break;
600 	case MOD_UNLOAD:
601 		if (!LIST_EMPTY(&bridge_list))
602 			return (EBUSY);
603 		EVENTHANDLER_DEREGISTER(ifnet_detach_event,
604 		    bridge_detach_cookie);
605 		if_clone_detach(&bridge_cloner);
606 		bridge_input_p = NULL;
607 		bridge_output_p = NULL;
608 #if notyet
609 		bstp_linkstate_p = NULL;
610 #endif
611 		break;
612 	default:
613 		return (EOPNOTSUPP);
614 	}
615 	return (0);
616 }
617 
618 static moduledata_t bridge_mod = {
619 	"if_bridge",
620 	bridge_modevent,
621 	0
622 };
623 
624 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
625 
626 
627 /*
628  * bridge_clone_create:
629  *
630  *	Create a new bridge instance.
631  */
632 static int
633 bridge_clone_create(struct if_clone *ifc, int unit)
634 {
635 	struct bridge_softc *sc;
636 	struct ifnet *ifp;
637 	u_char eaddr[6];
638 	int cpu, rnd;
639 
640 	sc = kmalloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);
641 	ifp = sc->sc_ifp = &sc->sc_if;
642 
643 	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
644 	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
645 	sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
646 	sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
647 	sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
648 	sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
649 	sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
650 
651 	/* Initialize our routing table. */
652 	bridge_rtable_init(sc);
653 
654 	callout_init(&sc->sc_brcallout);
655 	netmsg_init(&sc->sc_brtimemsg, &netisr_adone_rport,
656 		    MSGF_DROPABLE, bridge_timer_handler);
657 	sc->sc_brtimemsg.nm_lmsg.u.ms_resultp = sc;
658 
659 	callout_init(&sc->sc_bstpcallout);
660 	netmsg_init(&sc->sc_bstptimemsg, &netisr_adone_rport,
661 		    MSGF_DROPABLE, bstp_tick_handler);
662 	sc->sc_bstptimemsg.nm_lmsg.u.ms_resultp = sc;
663 
664 	/* Initialize per-cpu member iface lists */
665 	sc->sc_iflists = kmalloc(sizeof(*sc->sc_iflists) * ncpus,
666 				 M_DEVBUF, M_WAITOK);
667 	for (cpu = 0; cpu < ncpus; ++cpu)
668 		LIST_INIT(&sc->sc_iflists[cpu]);
669 
670 	LIST_INIT(&sc->sc_spanlist);
671 
672 	ifp->if_softc = sc;
673 	if_initname(ifp, ifc->ifc_name, unit);
674 	ifp->if_mtu = ETHERMTU;
675 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
676 	ifp->if_ioctl = bridge_ioctl;
677 	ifp->if_start = bridge_start;
678 	ifp->if_init = bridge_init;
679 	ifp->if_type = IFT_BRIDGE;
680 	ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
681 	ifp->if_snd.ifq_maxlen = ifqmaxlen;
682 	ifq_set_ready(&ifp->if_snd);
683 	ifp->if_hdrlen = ETHER_HDR_LEN;
684 
685 	/*
686 	 * Generate a random ethernet address and use the private AC:DE:48
687 	 * OUI code.
688 	 */
689 	rnd = karc4random();
690 	bcopy(&rnd, &eaddr[0], 4); /* ETHER_ADDR_LEN == 6 */
691 	rnd = karc4random();
692 	bcopy(&rnd, &eaddr[2], 4); /* ETHER_ADDR_LEN == 6 */
693 
694 	eaddr[0] &= ~1;	/* clear multicast bit */
695 	eaddr[0] |= 2;	/* set the LAA bit */
696 
697 	ether_ifattach(ifp, eaddr, NULL);
698 	/* Now undo some of the damage... */
699 	ifp->if_baudrate = 0;
700 	ifp->if_type = IFT_BRIDGE;
701 
702 	crit_enter();	/* XXX MP */
703 	LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
704 	crit_exit();
705 
706 	return (0);
707 }
708 
709 static void
710 bridge_delete_dispatch(struct netmsg *nmsg)
711 {
712 	struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
713 	struct bridge_softc *sc = lmsg->u.ms_resultp;
714 	struct ifnet *bifp = sc->sc_ifp;
715 	struct bridge_iflist *bif;
716 
717 	lwkt_serialize_enter(bifp->if_serializer);
718 
719 	while ((bif = LIST_FIRST(&sc->sc_iflists[mycpuid])) != NULL)
720 		bridge_delete_member(sc, bif, 0);
721 
722 	while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL)
723 		bridge_delete_span(sc, bif);
724 
725 	lwkt_serialize_exit(bifp->if_serializer);
726 
727 	lwkt_replymsg(lmsg, 0);
728 }
729 
730 /*
731  * bridge_clone_destroy:
732  *
733  *	Destroy a bridge instance.
734  */
735 static void
736 bridge_clone_destroy(struct ifnet *ifp)
737 {
738 	struct bridge_softc *sc = ifp->if_softc;
739 	struct lwkt_msg *lmsg;
740 	struct netmsg nmsg;
741 
742 	lwkt_serialize_enter(ifp->if_serializer);
743 
744 	bridge_stop(ifp);
745 	ifp->if_flags &= ~IFF_UP;
746 
747 	lwkt_serialize_exit(ifp->if_serializer);
748 
749 	netmsg_init(&nmsg, &curthread->td_msgport, 0, bridge_delete_dispatch);
750 	lmsg = &nmsg.nm_lmsg;
751 	lmsg->u.ms_resultp = sc;
752 	lwkt_domsg(BRIDGE_CFGPORT, lmsg, 0);
753 
754 	crit_enter();	/* XXX MP */
755 	LIST_REMOVE(sc, sc_list);
756 	crit_exit();
757 
758 	ether_ifdetach(ifp);
759 
760 	/* Tear down the routing table. */
761 	bridge_rtable_fini(sc);
762 
763 	/* Free per-cpu member iface lists */
764 	kfree(sc->sc_iflists, M_DEVBUF);
765 
766 	kfree(sc, M_DEVBUF);
767 }
768 
769 /*
770  * bridge_ioctl:
771  *
772  *	Handle a control request from the operator.
773  */
774 static int
775 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
776 {
777 	struct bridge_softc *sc = ifp->if_softc;
778 	struct bridge_control_arg args;
779 	struct ifdrv *ifd = (struct ifdrv *) data;
780 	const struct bridge_control *bc;
781 	int error = 0;
782 
783 	ASSERT_SERIALIZED(ifp->if_serializer);
784 
785 	switch (cmd) {
786 	case SIOCADDMULTI:
787 	case SIOCDELMULTI:
788 		break;
789 
790 	case SIOCGDRVSPEC:
791 	case SIOCSDRVSPEC:
792 		if (ifd->ifd_cmd >= bridge_control_table_size) {
793 			error = EINVAL;
794 			break;
795 		}
796 		bc = &bridge_control_table[ifd->ifd_cmd];
797 
798 		if (cmd == SIOCGDRVSPEC &&
799 		    (bc->bc_flags & BC_F_COPYOUT) == 0) {
800 			error = EINVAL;
801 			break;
802 		} else if (cmd == SIOCSDRVSPEC &&
803 			   (bc->bc_flags & BC_F_COPYOUT)) {
804 			error = EINVAL;
805 			break;
806 		}
807 
808 		if (bc->bc_flags & BC_F_SUSER) {
809 			error = suser_cred(cr, NULL_CRED_OKAY);
810 			if (error)
811 				break;
812 		}
813 
814 		if (ifd->ifd_len != bc->bc_argsize ||
815 		    ifd->ifd_len > sizeof(args.bca_u)) {
816 			error = EINVAL;
817 			break;
818 		}
819 
820 		memset(&args, 0, sizeof(args));
821 		if (bc->bc_flags & BC_F_COPYIN) {
822 			error = copyin(ifd->ifd_data, &args.bca_u,
823 				       ifd->ifd_len);
824 			if (error)
825 				break;
826 		}
827 
828 		error = bridge_control(sc, cmd, bc->bc_func, &args);
829 		if (error) {
830 			KKASSERT(args.bca_len == 0 && args.bca_kptr == NULL);
831 			break;
832 		}
833 
834 		if (bc->bc_flags & BC_F_COPYOUT) {
835 			error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
836 			if (args.bca_len != 0) {
837 				KKASSERT(args.bca_kptr != NULL);
838 				if (!error) {
839 					error = copyout(args.bca_kptr,
840 						args.bca_uptr, args.bca_len);
841 				}
842 				kfree(args.bca_kptr, M_TEMP);
843 			} else {
844 				KKASSERT(args.bca_kptr == NULL);
845 			}
846 		} else {
847 			KKASSERT(args.bca_len == 0 && args.bca_kptr == NULL);
848 		}
849 		break;
850 
851 	case SIOCSIFFLAGS:
852 		if (!(ifp->if_flags & IFF_UP) &&
853 		    (ifp->if_flags & IFF_RUNNING)) {
854 			/*
855 			 * If interface is marked down and it is running,
856 			 * then stop it.
857 			 */
858 			bridge_stop(ifp);
859 		} else if ((ifp->if_flags & IFF_UP) &&
860 		    !(ifp->if_flags & IFF_RUNNING)) {
861 			/*
862 			 * If interface is marked up and it is stopped, then
863 			 * start it.
864 			 */
865 			ifp->if_init(sc);
866 		}
867 		break;
868 
869 	case SIOCSIFMTU:
870 		/* Do not allow the MTU to be changed on the bridge */
871 		error = EINVAL;
872 		break;
873 
874 	default:
875 		error = ether_ioctl(ifp, cmd, data);
876 		break;
877 	}
878 	return (error);
879 }
880 
881 /*
882  * bridge_mutecaps:
883  *
884  *	Clear or restore unwanted capabilities on the member interface
885  */
886 static void
887 bridge_mutecaps(struct bridge_ifinfo *bif_info, struct ifnet *ifp, int mute)
888 {
889 	struct ifreq ifr;
890 	int error;
891 
892 	if (ifp->if_ioctl == NULL)
893 		return;
894 
895 	bzero(&ifr, sizeof(ifr));
896 	ifr.ifr_reqcap = ifp->if_capenable;
897 
898 	if (mute) {
899 		/* mask off and save capabilities */
900 		bif_info->bifi_mutecap = ifr.ifr_reqcap & BRIDGE_IFCAPS_MASK;
901 		if (bif_info->bifi_mutecap != 0)
902 			ifr.ifr_reqcap &= ~BRIDGE_IFCAPS_MASK;
903 	} else {
904 		/* restore muted capabilities */
905 		ifr.ifr_reqcap |= bif_info->bifi_mutecap;
906 	}
907 
908 	if (bif_info->bifi_mutecap != 0) {
909 		lwkt_serialize_enter(ifp->if_serializer);
910 		error = ifp->if_ioctl(ifp, SIOCSIFCAP, (caddr_t)&ifr, NULL);
911 		lwkt_serialize_exit(ifp->if_serializer);
912 	}
913 }
914 
915 /*
916  * bridge_lookup_member:
917  *
918  *	Lookup a bridge member interface.
919  */
920 static struct bridge_iflist *
921 bridge_lookup_member(struct bridge_softc *sc, const char *name)
922 {
923 	struct bridge_iflist *bif;
924 
925 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
926 		if (strcmp(bif->bif_ifp->if_xname, name) == 0)
927 			return (bif);
928 	}
929 	return (NULL);
930 }
931 
932 /*
933  * bridge_lookup_member_if:
934  *
935  *	Lookup a bridge member interface by ifnet*.
936  */
937 static struct bridge_iflist *
938 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
939 {
940 	struct bridge_iflist *bif;
941 
942 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
943 		if (bif->bif_ifp == member_ifp)
944 			return (bif);
945 	}
946 	return (NULL);
947 }
948 
949 /*
950  * bridge_lookup_member_ifinfo:
951  *
952  *	Lookup a bridge member interface by bridge_ifinfo.
953  */
954 static struct bridge_iflist *
955 bridge_lookup_member_ifinfo(struct bridge_softc *sc,
956 			    struct bridge_ifinfo *bif_info)
957 {
958 	struct bridge_iflist *bif;
959 
960 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
961 		if (bif->bif_info == bif_info)
962 			return (bif);
963 	}
964 	return (NULL);
965 }
966 
967 /*
968  * bridge_delete_member:
969  *
970  *	Delete the specified member interface.
971  */
972 static void
973 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
974     int gone)
975 {
976 	struct ifnet *ifs = bif->bif_ifp;
977 	struct ifnet *bifp = sc->sc_ifp;
978 	struct bridge_ifinfo *bif_info = bif->bif_info;
979 	struct bridge_iflist_head saved_bifs;
980 
981 	ASSERT_SERIALIZED(bifp->if_serializer);
982 	KKASSERT(bif_info != NULL);
983 
984 	ifs->if_bridge = NULL;
985 
986 	/*
987 	 * Release bridge interface's serializer:
988 	 * - To avoid possible dead lock.
989 	 * - Various sync operation will block the current thread.
990 	 */
991 	lwkt_serialize_exit(bifp->if_serializer);
992 
993 	if (!gone) {
994 		switch (ifs->if_type) {
995 		case IFT_ETHER:
996 		case IFT_L2VLAN:
997 			/*
998 			 * Take the interface out of promiscuous mode.
999 			 */
1000 			ifpromisc(ifs, 0);
1001 			bridge_mutecaps(bif_info, ifs, 0);
1002 			break;
1003 
1004 		case IFT_GIF:
1005 			break;
1006 
1007 		default:
1008 			panic("bridge_delete_member: impossible");
1009 			break;
1010 		}
1011 	}
1012 
1013 	/*
1014 	 * Remove bifs from percpu linked list.
1015 	 *
1016 	 * Removed bifs are not freed immediately, instead,
1017 	 * they are saved in saved_bifs.  They will be freed
1018 	 * after we make sure that no one is accessing them,
1019 	 * i.e. after following netmsg_service_sync()
1020 	 */
1021 	LIST_INIT(&saved_bifs);
1022 	bridge_del_bif(sc, bif_info, &saved_bifs);
1023 
1024 	/*
1025 	 * Make sure that all protocol threads:
1026 	 * o  see 'ifs' if_bridge is changed
1027 	 * o  know that bif is removed from the percpu linked list
1028 	 */
1029 	netmsg_service_sync();
1030 
1031 	/*
1032 	 * Free the removed bifs
1033 	 */
1034 	KKASSERT(!LIST_EMPTY(&saved_bifs));
1035 	while ((bif = LIST_FIRST(&saved_bifs)) != NULL) {
1036 		LIST_REMOVE(bif, bif_next);
1037 		kfree(bif, M_DEVBUF);
1038 	}
1039 
1040 	/* See the comment in bridge_ioctl_stop() */
1041 	bridge_rtmsg_sync(sc);
1042 	bridge_rtdelete(sc, ifs, IFBF_FLUSHALL | IFBF_FLUSHSYNC);
1043 
1044 	lwkt_serialize_enter(bifp->if_serializer);
1045 
1046 	if (bifp->if_flags & IFF_RUNNING)
1047 		bstp_initialization(sc);
1048 
1049 	/*
1050 	 * Free the bif_info after bstp_initialization(), so that
1051 	 * bridge_softc.sc_root_port will not reference a dangling
1052 	 * pointer.
1053 	 */
1054 	kfree(bif_info, M_DEVBUF);
1055 }
1056 
1057 /*
1058  * bridge_delete_span:
1059  *
1060  *	Delete the specified span interface.
1061  */
1062 static void
1063 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1064 {
1065 	KASSERT(bif->bif_ifp->if_bridge == NULL,
1066 	    ("%s: not a span interface", __func__));
1067 
1068 	LIST_REMOVE(bif, bif_next);
1069 	kfree(bif, M_DEVBUF);
1070 }
1071 
1072 static int
1073 bridge_ioctl_init(struct bridge_softc *sc, void *arg __unused)
1074 {
1075 	struct ifnet *ifp = sc->sc_ifp;
1076 
1077 	if (ifp->if_flags & IFF_RUNNING)
1078 		return 0;
1079 
1080 	callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1081 	    bridge_timer, sc);
1082 
1083 	ifp->if_flags |= IFF_RUNNING;
1084 	bstp_initialization(sc);
1085 	return 0;
1086 }
1087 
1088 static int
1089 bridge_ioctl_stop(struct bridge_softc *sc, void *arg __unused)
1090 {
1091 	struct ifnet *ifp = sc->sc_ifp;
1092 	struct lwkt_msg *lmsg;
1093 
1094 	if ((ifp->if_flags & IFF_RUNNING) == 0)
1095 		return 0;
1096 
1097 	callout_stop(&sc->sc_brcallout);
1098 
1099 	crit_enter();
1100 	lmsg = &sc->sc_brtimemsg.nm_lmsg;
1101 	if ((lmsg->ms_flags & MSGF_DONE) == 0) {
1102 		/* Pending to be processed; drop it */
1103 		lwkt_dropmsg(lmsg);
1104 	}
1105 	crit_exit();
1106 
1107 	bstp_stop(sc);
1108 
1109 	ifp->if_flags &= ~IFF_RUNNING;
1110 
1111 	lwkt_serialize_exit(ifp->if_serializer);
1112 
1113 	/* Let everyone know that we are stopped */
1114 	netmsg_service_sync();
1115 
1116 	/*
1117 	 * Sync ifnetX msgports in the order we forward rtnode
1118 	 * installation message.  This is used to make sure that
1119 	 * all rtnode installation messages sent by bridge_rtupdate()
1120 	 * during above netmsg_service_sync() are flushed.
1121 	 */
1122 	bridge_rtmsg_sync(sc);
1123 	bridge_rtflush(sc, IFBF_FLUSHDYN | IFBF_FLUSHSYNC);
1124 
1125 	lwkt_serialize_enter(ifp->if_serializer);
1126 	return 0;
1127 }
1128 
1129 static int
1130 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1131 {
1132 	struct ifbreq *req = arg;
1133 	struct bridge_iflist *bif;
1134 	struct bridge_ifinfo *bif_info;
1135 	struct ifnet *ifs, *bifp;
1136 	int error = 0;
1137 
1138 	bifp = sc->sc_ifp;
1139 	ASSERT_SERIALIZED(bifp->if_serializer);
1140 
1141 	ifs = ifunit(req->ifbr_ifsname);
1142 	if (ifs == NULL)
1143 		return (ENOENT);
1144 
1145 	/* If it's in the span list, it can't be a member. */
1146 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1147 		if (ifs == bif->bif_ifp)
1148 			return (EBUSY);
1149 
1150 	/* Allow the first Ethernet member to define the MTU */
1151 	if (ifs->if_type != IFT_GIF) {
1152 		if (LIST_EMPTY(&sc->sc_iflists[mycpuid])) {
1153 			bifp->if_mtu = ifs->if_mtu;
1154 		} else if (bifp->if_mtu != ifs->if_mtu) {
1155 			if_printf(bifp, "invalid MTU for %s\n", ifs->if_xname);
1156 			return (EINVAL);
1157 		}
1158 	}
1159 
1160 	if (ifs->if_bridge == sc)
1161 		return (EEXIST);
1162 
1163 	if (ifs->if_bridge != NULL)
1164 		return (EBUSY);
1165 
1166 	bif_info = kmalloc(sizeof(*bif_info), M_DEVBUF, M_WAITOK | M_ZERO);
1167 	bif_info->bifi_priority = BSTP_DEFAULT_PORT_PRIORITY;
1168 	bif_info->bifi_path_cost = BSTP_DEFAULT_PATH_COST;
1169 	bif_info->bifi_ifp = ifs;
1170 
1171 	/*
1172 	 * Release bridge interface's serializer:
1173 	 * - To avoid possible dead lock.
1174 	 * - Various sync operation will block the current thread.
1175 	 */
1176 	lwkt_serialize_exit(bifp->if_serializer);
1177 
1178 	switch (ifs->if_type) {
1179 	case IFT_ETHER:
1180 	case IFT_L2VLAN:
1181 		/*
1182 		 * Place the interface into promiscuous mode.
1183 		 */
1184 		error = ifpromisc(ifs, 1);
1185 		if (error) {
1186 			lwkt_serialize_enter(bifp->if_serializer);
1187 			goto out;
1188 		}
1189 		bridge_mutecaps(bif_info, ifs, 1);
1190 		break;
1191 
1192 	case IFT_GIF: /* :^) */
1193 		break;
1194 
1195 	default:
1196 		error = EINVAL;
1197 		lwkt_serialize_enter(bifp->if_serializer);
1198 		goto out;
1199 	}
1200 
1201 	/*
1202 	 * Add bifs to percpu linked lists
1203 	 */
1204 	bridge_add_bif(sc, bif_info, ifs);
1205 
1206 	lwkt_serialize_enter(bifp->if_serializer);
1207 
1208 	if (bifp->if_flags & IFF_RUNNING)
1209 		bstp_initialization(sc);
1210 	else
1211 		bstp_stop(sc);
1212 
1213 	/*
1214 	 * Everything has been setup, so let the member interface
1215 	 * deliver packets to this bridge on its input/output path.
1216 	 */
1217 	ifs->if_bridge = sc;
1218 out:
1219 	if (error) {
1220 		if (bif_info != NULL)
1221 			kfree(bif_info, M_DEVBUF);
1222 	}
1223 	return (error);
1224 }
1225 
1226 static int
1227 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1228 {
1229 	struct ifbreq *req = arg;
1230 	struct bridge_iflist *bif;
1231 
1232 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1233 	if (bif == NULL)
1234 		return (ENOENT);
1235 
1236 	bridge_delete_member(sc, bif, 0);
1237 
1238 	return (0);
1239 }
1240 
1241 static int
1242 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1243 {
1244 	struct ifbreq *req = arg;
1245 	struct bridge_iflist *bif;
1246 
1247 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1248 	if (bif == NULL)
1249 		return (ENOENT);
1250 
1251 	req->ifbr_ifsflags = bif->bif_flags;
1252 	req->ifbr_state = bif->bif_state;
1253 	req->ifbr_priority = bif->bif_priority;
1254 	req->ifbr_path_cost = bif->bif_path_cost;
1255 	req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1256 
1257 	return (0);
1258 }
1259 
1260 static int
1261 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1262 {
1263 	struct ifbreq *req = arg;
1264 	struct bridge_iflist *bif;
1265 	struct ifnet *bifp = sc->sc_ifp;
1266 
1267 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1268 	if (bif == NULL)
1269 		return (ENOENT);
1270 
1271 	if (req->ifbr_ifsflags & IFBIF_SPAN) {
1272 		/* SPAN is readonly */
1273 		return (EINVAL);
1274 	}
1275 
1276 	if (req->ifbr_ifsflags & IFBIF_STP) {
1277 		switch (bif->bif_ifp->if_type) {
1278 		case IFT_ETHER:
1279 			/* These can do spanning tree. */
1280 			break;
1281 
1282 		default:
1283 			/* Nothing else can. */
1284 			return (EINVAL);
1285 		}
1286 	}
1287 
1288 	lwkt_serialize_exit(bifp->if_serializer);
1289 	bridge_set_bifflags(sc, bif->bif_info, req->ifbr_ifsflags);
1290 	lwkt_serialize_enter(bifp->if_serializer);
1291 
1292 	if (bifp->if_flags & IFF_RUNNING)
1293 		bstp_initialization(sc);
1294 
1295 	return (0);
1296 }
1297 
1298 static int
1299 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1300 {
1301 	struct ifbrparam *param = arg;
1302 	struct ifnet *ifp = sc->sc_ifp;
1303 
1304 	sc->sc_brtmax = param->ifbrp_csize;
1305 
1306 	lwkt_serialize_exit(ifp->if_serializer);
1307 	bridge_rttrim(sc);
1308 	lwkt_serialize_enter(ifp->if_serializer);
1309 
1310 	return (0);
1311 }
1312 
1313 static int
1314 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1315 {
1316 	struct ifbrparam *param = arg;
1317 
1318 	param->ifbrp_csize = sc->sc_brtmax;
1319 
1320 	return (0);
1321 }
1322 
1323 static int
1324 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1325 {
1326 	struct bridge_control_arg *bc_arg = arg;
1327 	struct ifbifconf *bifc = arg;
1328 	struct bridge_iflist *bif;
1329 	struct ifbreq *breq;
1330 	int count, len;
1331 
1332 	count = 0;
1333 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next)
1334 		count++;
1335 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1336 		count++;
1337 
1338 	if (bifc->ifbic_len == 0) {
1339 		bifc->ifbic_len = sizeof(*breq) * count;
1340 		return 0;
1341 	} else if (count == 0 || bifc->ifbic_len < sizeof(*breq)) {
1342 		bifc->ifbic_len = 0;
1343 		return 0;
1344 	}
1345 
1346 	len = min(bifc->ifbic_len, sizeof(*breq) * count);
1347 	KKASSERT(len >= sizeof(*breq));
1348 
1349 	breq = kmalloc(len, M_TEMP, M_WAITOK | M_NULLOK | M_ZERO);
1350 	if (breq == NULL) {
1351 		bifc->ifbic_len = 0;
1352 		return ENOMEM;
1353 	}
1354 	bc_arg->bca_kptr = breq;
1355 
1356 	count = 0;
1357 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
1358 		if (len < sizeof(*breq))
1359 			break;
1360 
1361 		strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1362 			sizeof(breq->ifbr_ifsname));
1363 		breq->ifbr_ifsflags = bif->bif_flags;
1364 		breq->ifbr_state = bif->bif_state;
1365 		breq->ifbr_priority = bif->bif_priority;
1366 		breq->ifbr_path_cost = bif->bif_path_cost;
1367 		breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1368 		breq++;
1369 		count++;
1370 		len -= sizeof(*breq);
1371 	}
1372 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1373 		if (len < sizeof(*breq))
1374 			break;
1375 
1376 		strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1377 			sizeof(breq->ifbr_ifsname));
1378 		breq->ifbr_ifsflags = bif->bif_flags;
1379 		breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1380 		breq++;
1381 		count++;
1382 		len -= sizeof(*breq);
1383 	}
1384 
1385 	bifc->ifbic_len = sizeof(*breq) * count;
1386 	KKASSERT(bifc->ifbic_len > 0);
1387 
1388 	bc_arg->bca_len = bifc->ifbic_len;
1389 	bc_arg->bca_uptr = bifc->ifbic_req;
1390 	return 0;
1391 }
1392 
1393 static int
1394 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1395 {
1396 	struct bridge_control_arg *bc_arg = arg;
1397 	struct ifbaconf *bac = arg;
1398 	struct bridge_rtnode *brt;
1399 	struct ifbareq *bareq;
1400 	int count, len;
1401 
1402 	count = 0;
1403 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list)
1404 		count++;
1405 
1406 	if (bac->ifbac_len == 0) {
1407 		bac->ifbac_len = sizeof(*bareq) * count;
1408 		return 0;
1409 	} else if (count == 0 || bac->ifbac_len < sizeof(*bareq)) {
1410 		bac->ifbac_len = 0;
1411 		return 0;
1412 	}
1413 
1414 	len = min(bac->ifbac_len, sizeof(*bareq) * count);
1415 	KKASSERT(len >= sizeof(*bareq));
1416 
1417 	bareq = kmalloc(len, M_TEMP, M_WAITOK | M_NULLOK | M_ZERO);
1418 	if (bareq == NULL) {
1419 		bac->ifbac_len = 0;
1420 		return ENOMEM;
1421 	}
1422 	bc_arg->bca_kptr = bareq;
1423 
1424 	count = 0;
1425 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
1426 		struct bridge_rtinfo *bri = brt->brt_info;
1427 		unsigned long expire;
1428 
1429 		if (len < sizeof(*bareq))
1430 			break;
1431 
1432 		strlcpy(bareq->ifba_ifsname, bri->bri_ifp->if_xname,
1433 			sizeof(bareq->ifba_ifsname));
1434 		memcpy(bareq->ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1435 		expire = bri->bri_expire;
1436 		if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1437 		    time_second < expire)
1438 			bareq->ifba_expire = expire - time_second;
1439 		else
1440 			bareq->ifba_expire = 0;
1441 		bareq->ifba_flags = bri->bri_flags;
1442 		bareq++;
1443 		count++;
1444 		len -= sizeof(*bareq);
1445 	}
1446 
1447 	bac->ifbac_len = sizeof(*bareq) * count;
1448 	KKASSERT(bac->ifbac_len > 0);
1449 
1450 	bc_arg->bca_len = bac->ifbac_len;
1451 	bc_arg->bca_uptr = bac->ifbac_req;
1452 	return 0;
1453 }
1454 
1455 static int
1456 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1457 {
1458 	struct ifbareq *req = arg;
1459 	struct bridge_iflist *bif;
1460 	struct ifnet *ifp = sc->sc_ifp;
1461 	int error;
1462 
1463 	ASSERT_SERIALIZED(ifp->if_serializer);
1464 
1465 	bif = bridge_lookup_member(sc, req->ifba_ifsname);
1466 	if (bif == NULL)
1467 		return (ENOENT);
1468 
1469 	lwkt_serialize_exit(ifp->if_serializer);
1470 	error = bridge_rtsaddr(sc, req->ifba_dst, bif->bif_ifp,
1471 			       req->ifba_flags);
1472 	lwkt_serialize_enter(ifp->if_serializer);
1473 	return (error);
1474 }
1475 
1476 static int
1477 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1478 {
1479 	struct ifbrparam *param = arg;
1480 
1481 	sc->sc_brttimeout = param->ifbrp_ctime;
1482 
1483 	return (0);
1484 }
1485 
1486 static int
1487 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1488 {
1489 	struct ifbrparam *param = arg;
1490 
1491 	param->ifbrp_ctime = sc->sc_brttimeout;
1492 
1493 	return (0);
1494 }
1495 
1496 static int
1497 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1498 {
1499 	struct ifbareq *req = arg;
1500 	struct ifnet *ifp = sc->sc_ifp;
1501 	int error;
1502 
1503 	lwkt_serialize_exit(ifp->if_serializer);
1504 	error = bridge_rtdaddr(sc, req->ifba_dst);
1505 	lwkt_serialize_enter(ifp->if_serializer);
1506 	return error;
1507 }
1508 
1509 static int
1510 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1511 {
1512 	struct ifbreq *req = arg;
1513 	struct ifnet *ifp = sc->sc_ifp;
1514 
1515 	lwkt_serialize_exit(ifp->if_serializer);
1516 	bridge_rtflush(sc, req->ifbr_ifsflags | IFBF_FLUSHSYNC);
1517 	lwkt_serialize_enter(ifp->if_serializer);
1518 
1519 	return (0);
1520 }
1521 
1522 static int
1523 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1524 {
1525 	struct ifbrparam *param = arg;
1526 
1527 	param->ifbrp_prio = sc->sc_bridge_priority;
1528 
1529 	return (0);
1530 }
1531 
1532 static int
1533 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1534 {
1535 	struct ifbrparam *param = arg;
1536 
1537 	sc->sc_bridge_priority = param->ifbrp_prio;
1538 
1539 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1540 		bstp_initialization(sc);
1541 
1542 	return (0);
1543 }
1544 
1545 static int
1546 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1547 {
1548 	struct ifbrparam *param = arg;
1549 
1550 	param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1551 
1552 	return (0);
1553 }
1554 
1555 static int
1556 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1557 {
1558 	struct ifbrparam *param = arg;
1559 
1560 	if (param->ifbrp_hellotime == 0)
1561 		return (EINVAL);
1562 	sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1563 
1564 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1565 		bstp_initialization(sc);
1566 
1567 	return (0);
1568 }
1569 
1570 static int
1571 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1572 {
1573 	struct ifbrparam *param = arg;
1574 
1575 	param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1576 
1577 	return (0);
1578 }
1579 
1580 static int
1581 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1582 {
1583 	struct ifbrparam *param = arg;
1584 
1585 	if (param->ifbrp_fwddelay == 0)
1586 		return (EINVAL);
1587 	sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1588 
1589 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1590 		bstp_initialization(sc);
1591 
1592 	return (0);
1593 }
1594 
1595 static int
1596 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1597 {
1598 	struct ifbrparam *param = arg;
1599 
1600 	param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1601 
1602 	return (0);
1603 }
1604 
1605 static int
1606 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1607 {
1608 	struct ifbrparam *param = arg;
1609 
1610 	if (param->ifbrp_maxage == 0)
1611 		return (EINVAL);
1612 	sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1613 
1614 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1615 		bstp_initialization(sc);
1616 
1617 	return (0);
1618 }
1619 
1620 static int
1621 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1622 {
1623 	struct ifbreq *req = arg;
1624 	struct bridge_iflist *bif;
1625 
1626 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1627 	if (bif == NULL)
1628 		return (ENOENT);
1629 
1630 	bif->bif_priority = req->ifbr_priority;
1631 
1632 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1633 		bstp_initialization(sc);
1634 
1635 	return (0);
1636 }
1637 
1638 static int
1639 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1640 {
1641 	struct ifbreq *req = arg;
1642 	struct bridge_iflist *bif;
1643 
1644 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1645 	if (bif == NULL)
1646 		return (ENOENT);
1647 
1648 	bif->bif_path_cost = req->ifbr_path_cost;
1649 
1650 	if (sc->sc_ifp->if_flags & IFF_RUNNING)
1651 		bstp_initialization(sc);
1652 
1653 	return (0);
1654 }
1655 
1656 static int
1657 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
1658 {
1659 	struct ifbreq *req = arg;
1660 	struct bridge_iflist *bif;
1661 	struct ifnet *ifs;
1662 
1663 	ifs = ifunit(req->ifbr_ifsname);
1664 	if (ifs == NULL)
1665 		return (ENOENT);
1666 
1667 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1668 		if (ifs == bif->bif_ifp)
1669 			return (EBUSY);
1670 
1671 	if (ifs->if_bridge != NULL)
1672 		return (EBUSY);
1673 
1674 	switch (ifs->if_type) {
1675 	case IFT_ETHER:
1676 	case IFT_GIF:
1677 	case IFT_L2VLAN:
1678 		break;
1679 
1680 	default:
1681 		return (EINVAL);
1682 	}
1683 
1684 	bif = kmalloc(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
1685 	bif->bif_ifp = ifs;
1686 	bif->bif_flags = IFBIF_SPAN;
1687 	/* NOTE: span bif does not need bridge_ifinfo */
1688 
1689 	LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
1690 
1691 	sc->sc_span = 1;
1692 
1693 	return (0);
1694 }
1695 
1696 static int
1697 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
1698 {
1699 	struct ifbreq *req = arg;
1700 	struct bridge_iflist *bif;
1701 	struct ifnet *ifs;
1702 
1703 	ifs = ifunit(req->ifbr_ifsname);
1704 	if (ifs == NULL)
1705 		return (ENOENT);
1706 
1707 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1708 		if (ifs == bif->bif_ifp)
1709 			break;
1710 
1711 	if (bif == NULL)
1712 		return (ENOENT);
1713 
1714 	bridge_delete_span(sc, bif);
1715 
1716 	if (LIST_EMPTY(&sc->sc_spanlist))
1717 		sc->sc_span = 0;
1718 
1719 	return (0);
1720 }
1721 
1722 static void
1723 bridge_ifdetach_dispatch(struct netmsg *nmsg)
1724 {
1725 	struct lwkt_msg *lmsg = &nmsg->nm_lmsg;
1726 	struct ifnet *ifp, *bifp;
1727 	struct bridge_softc *sc;
1728 	struct bridge_iflist *bif;
1729 
1730 	ifp = lmsg->u.ms_resultp;
1731 	sc = ifp->if_bridge;
1732 
1733 	/* Check if the interface is a bridge member */
1734 	if (sc != NULL) {
1735 		bifp = sc->sc_ifp;
1736 
1737 		lwkt_serialize_enter(bifp->if_serializer);
1738 
1739 		bif = bridge_lookup_member_if(sc, ifp);
1740 		if (bif != NULL) {
1741 			bridge_delete_member(sc, bif, 1);
1742 		} else {
1743 			/* XXX Why bif will be NULL? */
1744 		}
1745 
1746 		lwkt_serialize_exit(bifp->if_serializer);
1747 		goto reply;
1748 	}
1749 
1750 	crit_enter();	/* XXX MP */
1751 
1752 	/* Check if the interface is a span port */
1753 	LIST_FOREACH(sc, &bridge_list, sc_list) {
1754 		bifp = sc->sc_ifp;
1755 
1756 		lwkt_serialize_enter(bifp->if_serializer);
1757 
1758 		LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1759 			if (ifp == bif->bif_ifp) {
1760 				bridge_delete_span(sc, bif);
1761 				break;
1762 			}
1763 
1764 		lwkt_serialize_exit(bifp->if_serializer);
1765 	}
1766 
1767 	crit_exit();
1768 
1769 reply:
1770 	lwkt_replymsg(lmsg, 0);
1771 }
1772 
1773 /*
1774  * bridge_ifdetach:
1775  *
1776  *	Detach an interface from a bridge.  Called when a member
1777  *	interface is detaching.
1778  */
1779 static void
1780 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
1781 {
1782 	struct lwkt_msg *lmsg;
1783 	struct netmsg nmsg;
1784 
1785 	netmsg_init(&nmsg, &curthread->td_msgport, 0, bridge_ifdetach_dispatch);
1786 	lmsg = &nmsg.nm_lmsg;
1787 	lmsg->u.ms_resultp = ifp;
1788 
1789 	lwkt_domsg(BRIDGE_CFGPORT, lmsg, 0);
1790 }
1791 
1792 /*
1793  * bridge_init:
1794  *
1795  *	Initialize a bridge interface.
1796  */
1797 static void
1798 bridge_init(void *xsc)
1799 {
1800 	bridge_control(xsc, SIOCSIFFLAGS, bridge_ioctl_init, NULL);
1801 }
1802 
1803 /*
1804  * bridge_stop:
1805  *
1806  *	Stop the bridge interface.
1807  */
1808 static void
1809 bridge_stop(struct ifnet *ifp)
1810 {
1811 	bridge_control(ifp->if_softc, SIOCSIFFLAGS, bridge_ioctl_stop, NULL);
1812 }
1813 
1814 /*
1815  * bridge_enqueue:
1816  *
1817  *	Enqueue a packet on a bridge member interface.
1818  *
1819  */
1820 void
1821 bridge_enqueue(struct ifnet *dst_ifp, struct mbuf *m)
1822 {
1823 	struct netmsg_packet *nmp;
1824 
1825 	nmp = &m->m_hdr.mh_netmsg;
1826 	netmsg_init(&nmp->nm_netmsg, &netisr_apanic_rport, 0,
1827 		    bridge_enqueue_handler);
1828 	nmp->nm_packet = m;
1829 	nmp->nm_netmsg.nm_lmsg.u.ms_resultp = dst_ifp;
1830 
1831 	lwkt_sendmsg(curnetport, &nmp->nm_netmsg.nm_lmsg);
1832 }
1833 
1834 /*
1835  * bridge_output:
1836  *
1837  *	Send output from a bridge member interface.  This
1838  *	performs the bridging function for locally originated
1839  *	packets.
1840  *
1841  *	The mbuf has the Ethernet header already attached.  We must
1842  *	enqueue or free the mbuf before returning.
1843  */
1844 static int
1845 bridge_output(struct ifnet *ifp, struct mbuf *m)
1846 {
1847 	struct bridge_softc *sc = ifp->if_bridge;
1848 	struct ether_header *eh;
1849 	struct ifnet *dst_if, *bifp;
1850 
1851 	ASSERT_NOT_SERIALIZED(ifp->if_serializer);
1852 
1853 	/*
1854 	 * Make sure that we are still a member of a bridge interface.
1855 	 */
1856 	if (sc == NULL) {
1857 		m_freem(m);
1858 		return (0);
1859 	}
1860 	bifp = sc->sc_ifp;
1861 
1862 	if (m->m_len < ETHER_HDR_LEN) {
1863 		m = m_pullup(m, ETHER_HDR_LEN);
1864 		if (m == NULL)
1865 			return (0);
1866 	}
1867 	eh = mtod(m, struct ether_header *);
1868 
1869 	/*
1870 	 * If bridge is down, but the original output interface is up,
1871 	 * go ahead and send out that interface.  Otherwise, the packet
1872 	 * is dropped below.
1873 	 */
1874 	if ((bifp->if_flags & IFF_RUNNING) == 0) {
1875 		dst_if = ifp;
1876 		goto sendunicast;
1877 	}
1878 
1879 	/*
1880 	 * If the packet is a multicast, or we don't know a better way to
1881 	 * get there, send to all interfaces.
1882 	 */
1883 	if (ETHER_IS_MULTICAST(eh->ether_dhost))
1884 		dst_if = NULL;
1885 	else
1886 		dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1887 	if (dst_if == NULL) {
1888 		struct bridge_iflist *bif, *nbif;
1889 		struct mbuf *mc;
1890 		int used = 0;
1891 
1892 		if (sc->sc_span)
1893 			bridge_span(sc, m);
1894 
1895 		LIST_FOREACH_MUTABLE(bif, &sc->sc_iflists[mycpuid],
1896 				     bif_next, nbif) {
1897 			dst_if = bif->bif_ifp;
1898 			if ((dst_if->if_flags & IFF_RUNNING) == 0)
1899 				continue;
1900 
1901 			/*
1902 			 * If this is not the original output interface,
1903 			 * and the interface is participating in spanning
1904 			 * tree, make sure the port is in a state that
1905 			 * allows forwarding.
1906 			 */
1907 			if (dst_if != ifp &&
1908 			    (bif->bif_flags & IFBIF_STP) != 0) {
1909 				switch (bif->bif_state) {
1910 				case BSTP_IFSTATE_BLOCKING:
1911 				case BSTP_IFSTATE_LISTENING:
1912 				case BSTP_IFSTATE_DISABLED:
1913 					continue;
1914 				}
1915 			}
1916 
1917 			if (LIST_NEXT(bif, bif_next) == NULL) {
1918 				used = 1;
1919 				mc = m;
1920 			} else {
1921 				mc = m_copypacket(m, MB_DONTWAIT);
1922 				if (mc == NULL) {
1923 					bifp->if_oerrors++;
1924 					continue;
1925 				}
1926 			}
1927 			bridge_handoff(dst_if, mc);
1928 
1929 			if (nbif != NULL && !nbif->bif_onlist) {
1930 				KKASSERT(bif->bif_onlist);
1931 				nbif = LIST_NEXT(bif, bif_next);
1932 			}
1933 		}
1934 		if (used == 0)
1935 			m_freem(m);
1936 		return (0);
1937 	}
1938 
1939 sendunicast:
1940 	/*
1941 	 * XXX Spanning tree consideration here?
1942 	 */
1943 	if (sc->sc_span)
1944 		bridge_span(sc, m);
1945 	if ((dst_if->if_flags & IFF_RUNNING) == 0)
1946 		m_freem(m);
1947 	else
1948 		bridge_handoff(dst_if, m);
1949 	return (0);
1950 }
1951 
1952 /*
1953  * bridge_start:
1954  *
1955  *	Start output on a bridge.
1956  *
1957  */
1958 static void
1959 bridge_start(struct ifnet *ifp)
1960 {
1961 	struct bridge_softc *sc = ifp->if_softc;
1962 
1963 	ASSERT_SERIALIZED(ifp->if_serializer);
1964 
1965 	ifp->if_flags |= IFF_OACTIVE;
1966 	for (;;) {
1967 		struct ifnet *dst_if = NULL;
1968 		struct ether_header *eh;
1969 		struct mbuf *m;
1970 
1971 		m = ifq_dequeue(&ifp->if_snd, NULL);
1972 		if (m == NULL)
1973 			break;
1974 
1975 		if (m->m_len < sizeof(*eh)) {
1976 			m = m_pullup(m, sizeof(*eh));
1977 			if (m == NULL) {
1978 				ifp->if_oerrors++;
1979 				continue;
1980 			}
1981 		}
1982 		eh = mtod(m, struct ether_header *);
1983 
1984 		BPF_MTAP(ifp, m);
1985 		ifp->if_opackets++;
1986 
1987 		if ((m->m_flags & (M_BCAST|M_MCAST)) == 0)
1988 			dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1989 
1990 		if (dst_if == NULL)
1991 			bridge_start_bcast(sc, m);
1992 		else
1993 			bridge_enqueue(dst_if, m);
1994 	}
1995 	ifp->if_flags &= ~IFF_OACTIVE;
1996 }
1997 
1998 /*
1999  * bridge_forward:
2000  *
2001  *	The forwarding function of the bridge.
2002  */
2003 static void
2004 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
2005 {
2006 	struct bridge_iflist *bif;
2007 	struct ifnet *src_if, *dst_if, *ifp;
2008 	struct ether_header *eh;
2009 
2010 	src_if = m->m_pkthdr.rcvif;
2011 	ifp = sc->sc_ifp;
2012 
2013 	ASSERT_NOT_SERIALIZED(ifp->if_serializer);
2014 
2015 	ifp->if_ipackets++;
2016 	ifp->if_ibytes += m->m_pkthdr.len;
2017 
2018 	/*
2019 	 * Look up the bridge_iflist.
2020 	 */
2021 	bif = bridge_lookup_member_if(sc, src_if);
2022 	if (bif == NULL) {
2023 		/* Interface is not a bridge member (anymore?) */
2024 		m_freem(m);
2025 		return;
2026 	}
2027 
2028 	if (bif->bif_flags & IFBIF_STP) {
2029 		switch (bif->bif_state) {
2030 		case BSTP_IFSTATE_BLOCKING:
2031 		case BSTP_IFSTATE_LISTENING:
2032 		case BSTP_IFSTATE_DISABLED:
2033 			m_freem(m);
2034 			return;
2035 		}
2036 	}
2037 
2038 	eh = mtod(m, struct ether_header *);
2039 
2040 	/*
2041 	 * If the interface is learning, and the source
2042 	 * address is valid and not multicast, record
2043 	 * the address.
2044 	 */
2045 	if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
2046 	    ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
2047 	    (eh->ether_shost[0] == 0 &&
2048 	     eh->ether_shost[1] == 0 &&
2049 	     eh->ether_shost[2] == 0 &&
2050 	     eh->ether_shost[3] == 0 &&
2051 	     eh->ether_shost[4] == 0 &&
2052 	     eh->ether_shost[5] == 0) == 0)
2053 		bridge_rtupdate(sc, eh->ether_shost, src_if, IFBAF_DYNAMIC);
2054 
2055 	if ((bif->bif_flags & IFBIF_STP) != 0 &&
2056 	    bif->bif_state == BSTP_IFSTATE_LEARNING) {
2057 		m_freem(m);
2058 		return;
2059 	}
2060 
2061 	/*
2062 	 * At this point, the port either doesn't participate
2063 	 * in spanning tree or it is in the forwarding state.
2064 	 */
2065 
2066 	/*
2067 	 * If the packet is unicast, destined for someone on
2068 	 * "this" side of the bridge, drop it.
2069 	 */
2070 	if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2071 		dst_if = bridge_rtlookup(sc, eh->ether_dhost);
2072 		if (src_if == dst_if) {
2073 			m_freem(m);
2074 			return;
2075 		}
2076 	} else {
2077 		/* ...forward it to all interfaces. */
2078 		ifp->if_imcasts++;
2079 		dst_if = NULL;
2080 	}
2081 
2082 	if (dst_if == NULL) {
2083 		bridge_broadcast(sc, src_if, m);
2084 		return;
2085 	}
2086 
2087 	/*
2088 	 * At this point, we're dealing with a unicast frame
2089 	 * going to a different interface.
2090 	 */
2091 	if ((dst_if->if_flags & IFF_RUNNING) == 0) {
2092 		m_freem(m);
2093 		return;
2094 	}
2095 	bif = bridge_lookup_member_if(sc, dst_if);
2096 	if (bif == NULL) {
2097 		/* Not a member of the bridge (anymore?) */
2098 		m_freem(m);
2099 		return;
2100 	}
2101 
2102 	if (bif->bif_flags & IFBIF_STP) {
2103 		switch (bif->bif_state) {
2104 		case BSTP_IFSTATE_DISABLED:
2105 		case BSTP_IFSTATE_BLOCKING:
2106 			m_freem(m);
2107 			return;
2108 		}
2109 	}
2110 
2111 	if (inet_pfil_hook.ph_hashooks > 0
2112 #ifdef INET6
2113 	    || inet6_pfil_hook.ph_hashooks > 0
2114 #endif
2115 	    ) {
2116 		if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2117 			return;
2118 		if (m == NULL)
2119 			return;
2120 
2121 		if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2122 			return;
2123 		if (m == NULL)
2124 			return;
2125 	}
2126 	bridge_handoff(dst_if, m);
2127 }
2128 
2129 /*
2130  * bridge_input:
2131  *
2132  *	Receive input from a member interface.  Queue the packet for
2133  *	bridging if it is not for us.
2134  */
2135 static struct mbuf *
2136 bridge_input(struct ifnet *ifp, struct mbuf *m)
2137 {
2138 	struct bridge_softc *sc = ifp->if_bridge;
2139 	struct bridge_iflist *bif;
2140 	struct ifnet *bifp, *new_ifp;
2141 	struct ether_header *eh;
2142 	struct mbuf *mc, *mc2;
2143 
2144 	ASSERT_NOT_SERIALIZED(ifp->if_serializer);
2145 
2146 	/*
2147 	 * Make sure that we are still a member of a bridge interface.
2148 	 */
2149 	if (sc == NULL)
2150 		return m;
2151 
2152 	new_ifp = NULL;
2153 	bifp = sc->sc_ifp;
2154 
2155 	if ((bifp->if_flags & IFF_RUNNING) == 0)
2156 		goto out;
2157 
2158 	/*
2159 	 * Implement support for bridge monitoring.  If this flag has been
2160 	 * set on this interface, discard the packet once we push it through
2161 	 * the bpf(4) machinery, but before we do, increment various counters
2162 	 * associated with this bridge.
2163 	 */
2164 	if (bifp->if_flags & IFF_MONITOR) {
2165 	 	/* Change input interface to this bridge */
2166 		m->m_pkthdr.rcvif = bifp;
2167 
2168 		BPF_MTAP(bifp, m);
2169 
2170 		/* Update bridge's ifnet statistics */
2171 		bifp->if_ipackets++;
2172 		bifp->if_ibytes += m->m_pkthdr.len;
2173 		if (m->m_flags & (M_MCAST | M_BCAST))
2174 			bifp->if_imcasts++;
2175 
2176 		m_freem(m);
2177 		m = NULL;
2178 		goto out;
2179 	}
2180 
2181 	eh = mtod(m, struct ether_header *);
2182 
2183 	m->m_flags &= ~M_PROTO1; /* XXX Hack - loop prevention */
2184 
2185 	if (memcmp(eh->ether_dhost, IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0) {
2186 		/*
2187 		 * If the packet is for us, set the packets source as the
2188 		 * bridge, and return the packet back to ifnet.if_input for
2189 		 * local processing.
2190 		 */
2191 		KASSERT(bifp->if_bridge == NULL,
2192 			("loop created in bridge_input"));
2193 		new_ifp = bifp;
2194 		goto out;
2195 	}
2196 
2197 	/*
2198 	 * Tap all packets arriving on the bridge, no matter if
2199 	 * they are local destinations or not.  In is in.
2200 	 */
2201 	BPF_MTAP(bifp, m);
2202 
2203 	bif = bridge_lookup_member_if(sc, ifp);
2204 	if (bif == NULL)
2205 		goto out;
2206 
2207 	if (sc->sc_span)
2208 		bridge_span(sc, m);
2209 
2210 	if (m->m_flags & (M_BCAST | M_MCAST)) {
2211 		/* Tap off 802.1D packets; they do not get forwarded. */
2212 		if (memcmp(eh->ether_dhost, bstp_etheraddr,
2213 		    ETHER_ADDR_LEN) == 0) {
2214 			lwkt_serialize_enter(bifp->if_serializer);
2215 			bstp_input(sc, bif, m);
2216 			lwkt_serialize_exit(bifp->if_serializer);
2217 
2218 			/* m is freed by bstp_input */
2219 			m = NULL;
2220 			goto out;
2221 		}
2222 
2223 		if (bif->bif_flags & IFBIF_STP) {
2224 			switch (bif->bif_state) {
2225 			case BSTP_IFSTATE_BLOCKING:
2226 			case BSTP_IFSTATE_LISTENING:
2227 			case BSTP_IFSTATE_DISABLED:
2228 				goto out;
2229 			}
2230 		}
2231 
2232 		/*
2233 		 * Make a deep copy of the packet and enqueue the copy
2234 		 * for bridge processing; return the original packet for
2235 		 * local processing.
2236 		 */
2237 		mc = m_dup(m, MB_DONTWAIT);
2238 		if (mc == NULL)
2239 			goto out;
2240 
2241 		bridge_forward(sc, mc);
2242 
2243 		/*
2244 		 * Reinject the mbuf as arriving on the bridge so we have a
2245 		 * chance at claiming multicast packets. We can not loop back
2246 		 * here from ether_input as a bridge is never a member of a
2247 		 * bridge.
2248 		 */
2249 		KASSERT(bifp->if_bridge == NULL,
2250 			("loop created in bridge_input"));
2251 		mc2 = m_dup(m, MB_DONTWAIT);
2252 #ifdef notyet
2253 		if (mc2 != NULL) {
2254 			/* Keep the layer3 header aligned */
2255 			int i = min(mc2->m_pkthdr.len, max_protohdr);
2256 			mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2257 		}
2258 #endif
2259 		if (mc2 != NULL) {
2260 			/*
2261 			 * Don't tap to bpf(4) again; we have
2262 			 * already done the tapping.
2263 			 */
2264 			ether_reinput_oncpu(bifp, mc2, 0);
2265 		}
2266 
2267 		/* Return the original packet for local processing. */
2268 		goto out;
2269 	}
2270 
2271 	if (bif->bif_flags & IFBIF_STP) {
2272 		switch (bif->bif_state) {
2273 		case BSTP_IFSTATE_BLOCKING:
2274 		case BSTP_IFSTATE_LISTENING:
2275 		case BSTP_IFSTATE_DISABLED:
2276 			goto out;
2277 		}
2278 	}
2279 
2280 	/*
2281 	 * Unicast.  Make sure it's not for us.
2282 	 *
2283 	 * This loop is MPSAFE; the only blocking operation (bridge_rtupdate)
2284 	 * is followed by breaking out of the loop.
2285 	 */
2286 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
2287 		if (bif->bif_ifp->if_type != IFT_ETHER)
2288 			continue;
2289 
2290 		/* It is destined for us. */
2291 		if (memcmp(IF_LLADDR(bif->bif_ifp), eh->ether_dhost,
2292 		    ETHER_ADDR_LEN) == 0) {
2293 			if (bif->bif_ifp != ifp) {
2294 				/* XXX loop prevention */
2295 				m->m_flags |= M_PROTO1;
2296 				new_ifp = bif->bif_ifp;
2297 			}
2298 			if (bif->bif_flags & IFBIF_LEARNING) {
2299 				bridge_rtupdate(sc, eh->ether_shost,
2300 						ifp, IFBAF_DYNAMIC);
2301 			}
2302 			goto out;
2303 		}
2304 
2305 		/* We just received a packet that we sent out. */
2306 		if (memcmp(IF_LLADDR(bif->bif_ifp), eh->ether_shost,
2307 		    ETHER_ADDR_LEN) == 0) {
2308 			m_freem(m);
2309 			m = NULL;
2310 			goto out;
2311 		}
2312 	}
2313 
2314 	/* Perform the bridge forwarding function. */
2315 	bridge_forward(sc, m);
2316 	m = NULL;
2317 out:
2318 	if (new_ifp != NULL) {
2319 		ether_reinput_oncpu(new_ifp, m, 1);
2320 		m = NULL;
2321 	}
2322 	return (m);
2323 }
2324 
2325 /*
2326  * bridge_start_bcast:
2327  *
2328  *	Broadcast the packet sent from bridge to all member
2329  *	interfaces.
2330  *	This is a simplified version of bridge_broadcast(), however,
2331  *	this function expects caller to hold bridge's serializer.
2332  */
2333 static void
2334 bridge_start_bcast(struct bridge_softc *sc, struct mbuf *m)
2335 {
2336 	struct bridge_iflist *bif;
2337 	struct mbuf *mc;
2338 	struct ifnet *dst_if, *bifp;
2339 	int used = 0;
2340 
2341 	bifp = sc->sc_ifp;
2342 	ASSERT_SERIALIZED(bifp->if_serializer);
2343 
2344 	/*
2345 	 * Following loop is MPSAFE; nothing is blocking
2346 	 * in the loop body.
2347 	 */
2348 	LIST_FOREACH(bif, &sc->sc_iflists[mycpuid], bif_next) {
2349 		dst_if = bif->bif_ifp;
2350 
2351 		if (bif->bif_flags & IFBIF_STP) {
2352 			switch (bif->bif_state) {
2353 			case BSTP_IFSTATE_BLOCKING:
2354 			case BSTP_IFSTATE_DISABLED:
2355 				continue;
2356 			}
2357 		}
2358 
2359 		if ((bif->bif_flags & IFBIF_DISCOVER) == 0 &&
2360 		    (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2361 			continue;
2362 
2363 		if ((dst_if->if_flags & IFF_RUNNING) == 0)
2364 			continue;
2365 
2366 		if (LIST_NEXT(bif, bif_next) == NULL) {
2367 			mc = m;
2368 			used = 1;
2369 		} else {
2370 			mc = m_copypacket(m, MB_DONTWAIT);
2371 			if (mc == NULL) {
2372 				bifp->if_oerrors++;
2373 				continue;
2374 			}
2375 		}
2376 		bridge_enqueue(dst_if, mc);
2377 	}
2378 	if (used == 0)
2379 		m_freem(m);
2380 }
2381 
2382 /*
2383  * bridge_broadcast:
2384  *
2385  *	Send a frame to all interfaces that are members of
2386  *	the bridge, except for the one on which the packet
2387  *	arrived.
2388  */
2389 static void
2390 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2391     struct mbuf *m)
2392 {
2393 	struct bridge_iflist *bif, *nbif;
2394 	struct mbuf *mc;
2395 	struct ifnet *dst_if, *bifp;
2396 	int used = 0;
2397 
2398 	bifp = sc->sc_ifp;
2399 	ASSERT_NOT_SERIALIZED(bifp->if_serializer);
2400 
2401 	if (inet_pfil_hook.ph_hashooks > 0
2402 #ifdef INET6
2403 	    || inet6_pfil_hook.ph_hashooks > 0
2404 #endif
2405 	    ) {
2406 		if (bridge_pfil(&m, bifp, src_if, PFIL_IN) != 0)
2407 			return;
2408 		if (m == NULL)
2409 			return;
2410 
2411 		/* Filter on the bridge interface before broadcasting */
2412 		if (bridge_pfil(&m, bifp, NULL, PFIL_OUT) != 0)
2413 			return;
2414 		if (m == NULL)
2415 			return;
2416 	}
2417 
2418 	LIST_FOREACH_MUTABLE(bif, &sc->sc_iflists[mycpuid], bif_next, nbif) {
2419 		dst_if = bif->bif_ifp;
2420 		if (dst_if == src_if)
2421 			continue;
2422 
2423 		if (bif->bif_flags & IFBIF_STP) {
2424 			switch (bif->bif_state) {
2425 			case BSTP_IFSTATE_BLOCKING:
2426 			case BSTP_IFSTATE_DISABLED:
2427 				continue;
2428 			}
2429 		}
2430 
2431 		if ((bif->bif_flags & IFBIF_DISCOVER) == 0 &&
2432 		    (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2433 			continue;
2434 
2435 		if ((dst_if->if_flags & IFF_RUNNING) == 0)
2436 			continue;
2437 
2438 		if (LIST_NEXT(bif, bif_next) == NULL) {
2439 			mc = m;
2440 			used = 1;
2441 		} else {
2442 			mc = m_copypacket(m, MB_DONTWAIT);
2443 			if (mc == NULL) {
2444 				sc->sc_ifp->if_oerrors++;
2445 				continue;
2446 			}
2447 		}
2448 
2449 		/*
2450 		 * Filter on the output interface.  Pass a NULL bridge
2451 		 * interface pointer so we do not redundantly filter on
2452 		 * the bridge for each interface we broadcast on.
2453 		 */
2454 		if (inet_pfil_hook.ph_hashooks > 0
2455 #ifdef INET6
2456 		    || inet6_pfil_hook.ph_hashooks > 0
2457 #endif
2458 		    ) {
2459 			if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
2460 				continue;
2461 			if (mc == NULL)
2462 				continue;
2463 		}
2464 		bridge_handoff(dst_if, mc);
2465 
2466 		if (nbif != NULL && !nbif->bif_onlist) {
2467 			KKASSERT(bif->bif_onlist);
2468 			nbif = LIST_NEXT(bif, bif_next);
2469 		}
2470 	}
2471 	if (used == 0)
2472 		m_freem(m);
2473 }
2474 
2475 /*
2476  * bridge_span:
2477  *
2478  *	Duplicate a packet out one or more interfaces that are in span mode,
2479  *	the original mbuf is unmodified.
2480  */
2481 static void
2482 bridge_span(struct bridge_softc *sc, struct mbuf *m)
2483 {
2484 	struct bridge_iflist *bif;
2485 	struct ifnet *dst_if, *bifp;
2486 	struct mbuf *mc;
2487 
2488 	bifp = sc->sc_ifp;
2489 	lwkt_serialize_enter(bifp->if_serializer);
2490 
2491 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
2492 		dst_if = bif->bif_ifp;
2493 
2494 		if ((dst_if->if_flags & IFF_RUNNING) == 0)
2495 			continue;
2496 
2497 		mc = m_copypacket(m, MB_DONTWAIT);
2498 		if (mc == NULL) {
2499 			sc->sc_ifp->if_oerrors++;
2500 			continue;
2501 		}
2502 		bridge_enqueue(dst_if, mc);
2503 	}
2504 
2505 	lwkt_serialize_exit(bifp->if_serializer);
2506 }
2507 
2508 static void
2509 bridge_rtmsg_sync_handler(struct netmsg *nmsg)
2510 {
2511 	ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2512 }
2513 
2514 static void
2515 bridge_rtmsg_sync(struct bridge_softc *sc)
2516 {
2517 	struct netmsg nmsg;
2518 
2519 	ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2520 
2521 	netmsg_init(&nmsg, &curthread->td_msgport, 0,
2522 		    bridge_rtmsg_sync_handler);
2523 	ifnet_domsg(&nmsg.nm_lmsg, 0);
2524 }
2525 
2526 static __inline void
2527 bridge_rtinfo_update(struct bridge_rtinfo *bri, struct ifnet *dst_if,
2528 		     int setflags, uint8_t flags, uint32_t timeo)
2529 {
2530 	if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2531 	    bri->bri_ifp != dst_if)
2532 		bri->bri_ifp = dst_if;
2533 	if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2534 	    bri->bri_expire != time_second + timeo)
2535 		bri->bri_expire = time_second + timeo;
2536 	if (setflags)
2537 		bri->bri_flags = flags;
2538 }
2539 
2540 static int
2541 bridge_rtinstall_oncpu(struct bridge_softc *sc, const uint8_t *dst,
2542 		       struct ifnet *dst_if, int setflags, uint8_t flags,
2543 		       struct bridge_rtinfo **bri0)
2544 {
2545 	struct bridge_rtnode *brt;
2546 	struct bridge_rtinfo *bri;
2547 
2548 	if (mycpuid == 0) {
2549 		brt = bridge_rtnode_lookup(sc, dst);
2550 		if (brt != NULL) {
2551 			/*
2552 			 * rtnode for 'dst' already exists.  We inform the
2553 			 * caller about this by leaving bri0 as NULL.  The
2554 			 * caller will terminate the intallation upon getting
2555 			 * NULL bri0.  However, we still need to update the
2556 			 * rtinfo.
2557 			 */
2558 			KKASSERT(*bri0 == NULL);
2559 
2560 			/* Update rtinfo */
2561 			bridge_rtinfo_update(brt->brt_info, dst_if, setflags,
2562 					     flags, sc->sc_brttimeout);
2563 			return 0;
2564 		}
2565 
2566 		/*
2567 		 * We only need to check brtcnt on CPU0, since if limit
2568 		 * is to be exceeded, ENOSPC is returned.  Caller knows
2569 		 * this and will terminate the installation.
2570 		 */
2571 		if (sc->sc_brtcnt >= sc->sc_brtmax)
2572 			return ENOSPC;
2573 
2574 		KKASSERT(*bri0 == NULL);
2575 		bri = kmalloc(sizeof(struct bridge_rtinfo), M_DEVBUF,
2576 				  M_WAITOK | M_ZERO);
2577 		*bri0 = bri;
2578 
2579 		/* Setup rtinfo */
2580 		bri->bri_flags = IFBAF_DYNAMIC;
2581 		bridge_rtinfo_update(bri, dst_if, setflags, flags,
2582 				     sc->sc_brttimeout);
2583 	} else {
2584 		bri = *bri0;
2585 		KKASSERT(bri != NULL);
2586 	}
2587 
2588 	brt = kmalloc(sizeof(struct bridge_rtnode), M_DEVBUF,
2589 		      M_WAITOK | M_ZERO);
2590 	memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2591 	brt->brt_info = bri;
2592 
2593 	bridge_rtnode_insert(sc, brt);
2594 	return 0;
2595 }
2596 
2597 static void
2598 bridge_rtinstall_handler(struct netmsg *nmsg)
2599 {
2600 	struct netmsg_brsaddr *brmsg = (struct netmsg_brsaddr *)nmsg;
2601 	int error;
2602 
2603 	error = bridge_rtinstall_oncpu(brmsg->br_softc,
2604 				       brmsg->br_dst, brmsg->br_dst_if,
2605 				       brmsg->br_setflags, brmsg->br_flags,
2606 				       &brmsg->br_rtinfo);
2607 	if (error) {
2608 		KKASSERT(mycpuid == 0 && brmsg->br_rtinfo == NULL);
2609 		lwkt_replymsg(&nmsg->nm_lmsg, error);
2610 		return;
2611 	} else if (brmsg->br_rtinfo == NULL) {
2612 		/* rtnode already exists for 'dst' */
2613 		KKASSERT(mycpuid == 0);
2614 		lwkt_replymsg(&nmsg->nm_lmsg, 0);
2615 		return;
2616 	}
2617 	ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2618 }
2619 
2620 /*
2621  * bridge_rtupdate:
2622  *
2623  *	Add/Update a bridge routing entry.
2624  */
2625 static int
2626 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2627 		struct ifnet *dst_if, uint8_t flags)
2628 {
2629 	struct bridge_rtnode *brt;
2630 
2631 	/*
2632 	 * A route for this destination might already exist.  If so,
2633 	 * update it, otherwise create a new one.
2634 	 */
2635 	if ((brt = bridge_rtnode_lookup(sc, dst)) == NULL) {
2636 		struct netmsg_brsaddr *brmsg;
2637 
2638 		if (sc->sc_brtcnt >= sc->sc_brtmax)
2639 			return ENOSPC;
2640 
2641 		brmsg = kmalloc(sizeof(*brmsg), M_LWKTMSG, M_WAITOK | M_NULLOK);
2642 		if (brmsg == NULL)
2643 			return ENOMEM;
2644 
2645 		netmsg_init(&brmsg->br_nmsg, &netisr_afree_rport, 0,
2646 			    bridge_rtinstall_handler);
2647 		memcpy(brmsg->br_dst, dst, ETHER_ADDR_LEN);
2648 		brmsg->br_dst_if = dst_if;
2649 		brmsg->br_flags = flags;
2650 		brmsg->br_setflags = 0;
2651 		brmsg->br_softc = sc;
2652 		brmsg->br_rtinfo = NULL;
2653 
2654 		ifnet_sendmsg(&brmsg->br_nmsg.nm_lmsg, 0);
2655 		return 0;
2656 	}
2657 	bridge_rtinfo_update(brt->brt_info, dst_if, 0, flags,
2658 			     sc->sc_brttimeout);
2659 	return 0;
2660 }
2661 
2662 static int
2663 bridge_rtsaddr(struct bridge_softc *sc, const uint8_t *dst,
2664 	       struct ifnet *dst_if, uint8_t flags)
2665 {
2666 	struct netmsg_brsaddr brmsg;
2667 
2668 	ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2669 
2670 	netmsg_init(&brmsg.br_nmsg, &curthread->td_msgport, 0,
2671 		    bridge_rtinstall_handler);
2672 	memcpy(brmsg.br_dst, dst, ETHER_ADDR_LEN);
2673 	brmsg.br_dst_if = dst_if;
2674 	brmsg.br_flags = flags;
2675 	brmsg.br_setflags = 1;
2676 	brmsg.br_softc = sc;
2677 	brmsg.br_rtinfo = NULL;
2678 
2679 	return ifnet_domsg(&brmsg.br_nmsg.nm_lmsg, 0);
2680 }
2681 
2682 /*
2683  * bridge_rtlookup:
2684  *
2685  *	Lookup the destination interface for an address.
2686  */
2687 static struct ifnet *
2688 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2689 {
2690 	struct bridge_rtnode *brt;
2691 
2692 	if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
2693 		return NULL;
2694 	return brt->brt_info->bri_ifp;
2695 }
2696 
2697 static void
2698 bridge_rtreap_handler(struct netmsg *nmsg)
2699 {
2700 	struct bridge_softc *sc = nmsg->nm_lmsg.u.ms_resultp;
2701 	struct bridge_rtnode *brt, *nbrt;
2702 
2703 	LIST_FOREACH_MUTABLE(brt, &sc->sc_rtlists[mycpuid], brt_list, nbrt) {
2704 		if (brt->brt_info->bri_dead)
2705 			bridge_rtnode_destroy(sc, brt);
2706 	}
2707 	ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
2708 }
2709 
2710 static void
2711 bridge_rtreap(struct bridge_softc *sc)
2712 {
2713 	struct netmsg nmsg;
2714 
2715 	ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2716 
2717 	netmsg_init(&nmsg, &curthread->td_msgport, 0, bridge_rtreap_handler);
2718 	nmsg.nm_lmsg.u.ms_resultp = sc;
2719 
2720 	ifnet_domsg(&nmsg.nm_lmsg, 0);
2721 }
2722 
2723 static void
2724 bridge_rtreap_async(struct bridge_softc *sc)
2725 {
2726 	struct netmsg *nmsg;
2727 
2728 	nmsg = kmalloc(sizeof(*nmsg), M_LWKTMSG, M_WAITOK);
2729 
2730 	netmsg_init(nmsg, &netisr_afree_rport, 0, bridge_rtreap_handler);
2731 	nmsg->nm_lmsg.u.ms_resultp = sc;
2732 
2733 	ifnet_sendmsg(&nmsg->nm_lmsg, 0);
2734 }
2735 
2736 /*
2737  * bridge_rttrim:
2738  *
2739  *	Trim the routine table so that we have a number
2740  *	of routing entries less than or equal to the
2741  *	maximum number.
2742  */
2743 static void
2744 bridge_rttrim(struct bridge_softc *sc)
2745 {
2746 	struct bridge_rtnode *brt;
2747 	int dead;
2748 
2749 	ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2750 
2751 	/* Make sure we actually need to do this. */
2752 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2753 		return;
2754 
2755 	/*
2756 	 * Find out how many rtnodes are dead
2757 	 */
2758 	dead = bridge_rtage_finddead(sc);
2759 	KKASSERT(dead <= sc->sc_brtcnt);
2760 
2761 	if (sc->sc_brtcnt - dead <= sc->sc_brtmax) {
2762 		/* Enough dead rtnodes are found */
2763 		bridge_rtreap(sc);
2764 		return;
2765 	}
2766 
2767 	/*
2768 	 * Kill some dynamic rtnodes to meet the brtmax
2769 	 */
2770 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2771 		struct bridge_rtinfo *bri = brt->brt_info;
2772 
2773 		if (bri->bri_dead) {
2774 			/*
2775 			 * We have counted this rtnode in
2776 			 * bridge_rtage_finddead()
2777 			 */
2778 			continue;
2779 		}
2780 
2781 		if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2782 			bri->bri_dead = 1;
2783 			++dead;
2784 			KKASSERT(dead <= sc->sc_brtcnt);
2785 
2786 			if (sc->sc_brtcnt - dead <= sc->sc_brtmax) {
2787 				/* Enough rtnodes are collected */
2788 				break;
2789 			}
2790 		}
2791 	}
2792 	if (dead)
2793 		bridge_rtreap(sc);
2794 }
2795 
2796 /*
2797  * bridge_timer:
2798  *
2799  *	Aging timer for the bridge.
2800  */
2801 static void
2802 bridge_timer(void *arg)
2803 {
2804 	struct bridge_softc *sc = arg;
2805 	struct lwkt_msg *lmsg;
2806 
2807 	KKASSERT(mycpuid == BRIDGE_CFGCPU);
2808 
2809 	crit_enter();
2810 
2811 	if (callout_pending(&sc->sc_brcallout) ||
2812 	    !callout_active(&sc->sc_brcallout)) {
2813 		crit_exit();
2814 		return;
2815 	}
2816 	callout_deactivate(&sc->sc_brcallout);
2817 
2818 	lmsg = &sc->sc_brtimemsg.nm_lmsg;
2819 	KKASSERT(lmsg->ms_flags & MSGF_DONE);
2820 	lwkt_sendmsg(BRIDGE_CFGPORT, lmsg);
2821 
2822 	crit_exit();
2823 }
2824 
2825 static void
2826 bridge_timer_handler(struct netmsg *nmsg)
2827 {
2828 	struct bridge_softc *sc = nmsg->nm_lmsg.u.ms_resultp;
2829 
2830 	KKASSERT(&curthread->td_msgport == BRIDGE_CFGPORT);
2831 
2832 	crit_enter();
2833 	/* Reply ASAP */
2834 	lwkt_replymsg(&nmsg->nm_lmsg, 0);
2835 	crit_exit();
2836 
2837 	bridge_rtage(sc);
2838 	if (sc->sc_ifp->if_flags & IFF_RUNNING) {
2839 		callout_reset(&sc->sc_brcallout,
2840 		    bridge_rtable_prune_period * hz, bridge_timer, sc);
2841 	}
2842 }
2843 
2844 static int
2845 bridge_rtage_finddead(struct bridge_softc *sc)
2846 {
2847 	struct bridge_rtnode *brt;
2848 	int dead = 0;
2849 
2850 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2851 		struct bridge_rtinfo *bri = brt->brt_info;
2852 
2853 		if ((bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2854 		    time_second >= bri->bri_expire) {
2855 			bri->bri_dead = 1;
2856 			++dead;
2857 			KKASSERT(dead <= sc->sc_brtcnt);
2858 		}
2859 	}
2860 	return dead;
2861 }
2862 
2863 /*
2864  * bridge_rtage:
2865  *
2866  *	Perform an aging cycle.
2867  */
2868 static void
2869 bridge_rtage(struct bridge_softc *sc)
2870 {
2871 	ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2872 
2873 	if (bridge_rtage_finddead(sc))
2874 		bridge_rtreap(sc);
2875 }
2876 
2877 /*
2878  * bridge_rtflush:
2879  *
2880  *	Remove all dynamic addresses from the bridge.
2881  */
2882 static void
2883 bridge_rtflush(struct bridge_softc *sc, int bf)
2884 {
2885 	struct bridge_rtnode *brt;
2886 	int reap;
2887 
2888 	reap = 0;
2889 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2890 		struct bridge_rtinfo *bri = brt->brt_info;
2891 
2892 		if ((bf & IFBF_FLUSHALL) ||
2893 		    (bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2894 			bri->bri_dead = 1;
2895 			reap = 1;
2896 		}
2897 	}
2898 	if (reap) {
2899 		if (bf & IFBF_FLUSHSYNC)
2900 			bridge_rtreap(sc);
2901 		else
2902 			bridge_rtreap_async(sc);
2903 	}
2904 }
2905 
2906 /*
2907  * bridge_rtdaddr:
2908  *
2909  *	Remove an address from the table.
2910  */
2911 static int
2912 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2913 {
2914 	struct bridge_rtnode *brt;
2915 
2916 	ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
2917 
2918 	if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
2919 		return (ENOENT);
2920 
2921 	/* TODO: add a cheaper delete operation */
2922 	brt->brt_info->bri_dead = 1;
2923 	bridge_rtreap(sc);
2924 	return (0);
2925 }
2926 
2927 /*
2928  * bridge_rtdelete:
2929  *
2930  *	Delete routes to a speicifc member interface.
2931  */
2932 void
2933 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int bf)
2934 {
2935 	struct bridge_rtnode *brt;
2936 	int reap;
2937 
2938 	reap = 0;
2939 	LIST_FOREACH(brt, &sc->sc_rtlists[mycpuid], brt_list) {
2940 		struct bridge_rtinfo *bri = brt->brt_info;
2941 
2942 		if (bri->bri_ifp == ifp &&
2943 		    ((bf & IFBF_FLUSHALL) ||
2944 		     (bri->bri_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) {
2945 			bri->bri_dead = 1;
2946 			reap = 1;
2947 		}
2948 	}
2949 	if (reap) {
2950 		if (bf & IFBF_FLUSHSYNC)
2951 			bridge_rtreap(sc);
2952 		else
2953 			bridge_rtreap_async(sc);
2954 	}
2955 }
2956 
2957 /*
2958  * bridge_rtable_init:
2959  *
2960  *	Initialize the route table for this bridge.
2961  */
2962 static void
2963 bridge_rtable_init(struct bridge_softc *sc)
2964 {
2965 	int cpu;
2966 
2967 	/*
2968 	 * Initialize per-cpu hash tables
2969 	 */
2970 	sc->sc_rthashs = kmalloc(sizeof(*sc->sc_rthashs) * ncpus,
2971 				 M_DEVBUF, M_WAITOK);
2972 	for (cpu = 0; cpu < ncpus; ++cpu) {
2973 		int i;
2974 
2975 		sc->sc_rthashs[cpu] =
2976 		kmalloc(sizeof(struct bridge_rtnode_head) * BRIDGE_RTHASH_SIZE,
2977 			M_DEVBUF, M_WAITOK);
2978 
2979 		for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2980 			LIST_INIT(&sc->sc_rthashs[cpu][i]);
2981 	}
2982 	sc->sc_rthash_key = karc4random();
2983 
2984 	/*
2985 	 * Initialize per-cpu lists
2986 	 */
2987 	sc->sc_rtlists = kmalloc(sizeof(struct bridge_rtnode_head) * ncpus,
2988 				 M_DEVBUF, M_WAITOK);
2989 	for (cpu = 0; cpu < ncpus; ++cpu)
2990 		LIST_INIT(&sc->sc_rtlists[cpu]);
2991 }
2992 
2993 /*
2994  * bridge_rtable_fini:
2995  *
2996  *	Deconstruct the route table for this bridge.
2997  */
2998 static void
2999 bridge_rtable_fini(struct bridge_softc *sc)
3000 {
3001 	int cpu;
3002 
3003 	/*
3004 	 * Free per-cpu hash tables
3005 	 */
3006 	for (cpu = 0; cpu < ncpus; ++cpu)
3007 		kfree(sc->sc_rthashs[cpu], M_DEVBUF);
3008 	kfree(sc->sc_rthashs, M_DEVBUF);
3009 
3010 	/*
3011 	 * Free per-cpu lists
3012 	 */
3013 	kfree(sc->sc_rtlists, M_DEVBUF);
3014 }
3015 
3016 /*
3017  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
3018  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
3019  */
3020 #define	mix(a, b, c)							\
3021 do {									\
3022 	a -= b; a -= c; a ^= (c >> 13);					\
3023 	b -= c; b -= a; b ^= (a << 8);					\
3024 	c -= a; c -= b; c ^= (b >> 13);					\
3025 	a -= b; a -= c; a ^= (c >> 12);					\
3026 	b -= c; b -= a; b ^= (a << 16);					\
3027 	c -= a; c -= b; c ^= (b >> 5);					\
3028 	a -= b; a -= c; a ^= (c >> 3);					\
3029 	b -= c; b -= a; b ^= (a << 10);					\
3030 	c -= a; c -= b; c ^= (b >> 15);					\
3031 } while (/*CONSTCOND*/0)
3032 
3033 static __inline uint32_t
3034 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
3035 {
3036 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
3037 
3038 	b += addr[5] << 8;
3039 	b += addr[4];
3040 	a += addr[3] << 24;
3041 	a += addr[2] << 16;
3042 	a += addr[1] << 8;
3043 	a += addr[0];
3044 
3045 	mix(a, b, c);
3046 
3047 	return (c & BRIDGE_RTHASH_MASK);
3048 }
3049 
3050 #undef mix
3051 
3052 static int
3053 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
3054 {
3055 	int i, d;
3056 
3057 	for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
3058 		d = ((int)a[i]) - ((int)b[i]);
3059 	}
3060 
3061 	return (d);
3062 }
3063 
3064 /*
3065  * bridge_rtnode_lookup:
3066  *
3067  *	Look up a bridge route node for the specified destination.
3068  */
3069 static struct bridge_rtnode *
3070 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
3071 {
3072 	struct bridge_rtnode *brt;
3073 	uint32_t hash;
3074 	int dir;
3075 
3076 	hash = bridge_rthash(sc, addr);
3077 	LIST_FOREACH(brt, &sc->sc_rthashs[mycpuid][hash], brt_hash) {
3078 		dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
3079 		if (dir == 0)
3080 			return (brt);
3081 		if (dir > 0)
3082 			return (NULL);
3083 	}
3084 
3085 	return (NULL);
3086 }
3087 
3088 /*
3089  * bridge_rtnode_insert:
3090  *
3091  *	Insert the specified bridge node into the route table.
3092  *	Caller has to make sure that rtnode does not exist.
3093  */
3094 static void
3095 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
3096 {
3097 	struct bridge_rtnode *lbrt;
3098 	uint32_t hash;
3099 	int dir;
3100 
3101 	hash = bridge_rthash(sc, brt->brt_addr);
3102 
3103 	lbrt = LIST_FIRST(&sc->sc_rthashs[mycpuid][hash]);
3104 	if (lbrt == NULL) {
3105 		LIST_INSERT_HEAD(&sc->sc_rthashs[mycpuid][hash], brt, brt_hash);
3106 		goto out;
3107 	}
3108 
3109 	do {
3110 		dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
3111 		KASSERT(dir != 0, ("rtnode already exist\n"));
3112 
3113 		if (dir > 0) {
3114 			LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
3115 			goto out;
3116 		}
3117 		if (LIST_NEXT(lbrt, brt_hash) == NULL) {
3118 			LIST_INSERT_AFTER(lbrt, brt, brt_hash);
3119 			goto out;
3120 		}
3121 		lbrt = LIST_NEXT(lbrt, brt_hash);
3122 	} while (lbrt != NULL);
3123 
3124 	panic("no suitable position found for rtnode\n");
3125 out:
3126 	LIST_INSERT_HEAD(&sc->sc_rtlists[mycpuid], brt, brt_list);
3127 	if (mycpuid == 0) {
3128 		/*
3129 		 * Update the brtcnt.
3130 		 * We only need to do it once and we do it on CPU0.
3131 		 */
3132 		sc->sc_brtcnt++;
3133 	}
3134 }
3135 
3136 /*
3137  * bridge_rtnode_destroy:
3138  *
3139  *	Destroy a bridge rtnode.
3140  */
3141 static void
3142 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
3143 {
3144 	LIST_REMOVE(brt, brt_hash);
3145 	LIST_REMOVE(brt, brt_list);
3146 
3147 	if (mycpuid + 1 == ncpus) {
3148 		/* Free rtinfo associated with rtnode on the last cpu */
3149 		kfree(brt->brt_info, M_DEVBUF);
3150 	}
3151 	kfree(brt, M_DEVBUF);
3152 
3153 	if (mycpuid == 0) {
3154 		/* Update brtcnt only on CPU0 */
3155 		sc->sc_brtcnt--;
3156 	}
3157 }
3158 
3159 static __inline int
3160 bridge_post_pfil(struct mbuf *m)
3161 {
3162 	if (m->m_pkthdr.fw_flags & IPFORWARD_MBUF_TAGGED)
3163 		return EOPNOTSUPP;
3164 
3165 	/* Not yet */
3166 	if (m->m_pkthdr.fw_flags & DUMMYNET_MBUF_TAGGED)
3167 		return EOPNOTSUPP;
3168 
3169 	return 0;
3170 }
3171 
3172 /*
3173  * Send bridge packets through pfil if they are one of the types pfil can deal
3174  * with, or if they are ARP or REVARP.  (pfil will pass ARP and REVARP without
3175  * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
3176  * that interface.
3177  */
3178 static int
3179 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
3180 {
3181 	int snap, error, i, hlen;
3182 	struct ether_header *eh1, eh2;
3183 	struct ip *ip;
3184 	struct llc llc1;
3185 	u_int16_t ether_type;
3186 
3187 	snap = 0;
3188 	error = -1;	/* Default error if not error == 0 */
3189 
3190 	if (pfil_bridge == 0 && pfil_member == 0)
3191 		return (0); /* filtering is disabled */
3192 
3193 	i = min((*mp)->m_pkthdr.len, max_protohdr);
3194 	if ((*mp)->m_len < i) {
3195 		*mp = m_pullup(*mp, i);
3196 		if (*mp == NULL) {
3197 			kprintf("%s: m_pullup failed\n", __func__);
3198 			return (-1);
3199 		}
3200 	}
3201 
3202 	eh1 = mtod(*mp, struct ether_header *);
3203 	ether_type = ntohs(eh1->ether_type);
3204 
3205 	/*
3206 	 * Check for SNAP/LLC.
3207 	 */
3208 	if (ether_type < ETHERMTU) {
3209 		struct llc *llc2 = (struct llc *)(eh1 + 1);
3210 
3211 		if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
3212 		    llc2->llc_dsap == LLC_SNAP_LSAP &&
3213 		    llc2->llc_ssap == LLC_SNAP_LSAP &&
3214 		    llc2->llc_control == LLC_UI) {
3215 			ether_type = htons(llc2->llc_un.type_snap.ether_type);
3216 			snap = 1;
3217 		}
3218 	}
3219 
3220 	/*
3221 	 * If we're trying to filter bridge traffic, don't look at anything
3222 	 * other than IP and ARP traffic.  If the filter doesn't understand
3223 	 * IPv6, don't allow IPv6 through the bridge either.  This is lame
3224 	 * since if we really wanted, say, an AppleTalk filter, we are hosed,
3225 	 * but of course we don't have an AppleTalk filter to begin with.
3226 	 * (Note that since pfil doesn't understand ARP it will pass *ALL*
3227 	 * ARP traffic.)
3228 	 */
3229 	switch (ether_type) {
3230 	case ETHERTYPE_ARP:
3231 	case ETHERTYPE_REVARP:
3232 		return (0); /* Automatically pass */
3233 
3234 	case ETHERTYPE_IP:
3235 #ifdef INET6
3236 	case ETHERTYPE_IPV6:
3237 #endif /* INET6 */
3238 		break;
3239 
3240 	default:
3241 		/*
3242 		 * Check to see if the user wants to pass non-ip
3243 		 * packets, these will not be checked by pfil(9)
3244 		 * and passed unconditionally so the default is to drop.
3245 		 */
3246 		if (pfil_onlyip)
3247 			goto bad;
3248 	}
3249 
3250 	/* Strip off the Ethernet header and keep a copy. */
3251 	m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
3252 	m_adj(*mp, ETHER_HDR_LEN);
3253 
3254 	/* Strip off snap header, if present */
3255 	if (snap) {
3256 		m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
3257 		m_adj(*mp, sizeof(struct llc));
3258 	}
3259 
3260 	/*
3261 	 * Check the IP header for alignment and errors
3262 	 */
3263 	if (dir == PFIL_IN) {
3264 		switch (ether_type) {
3265 		case ETHERTYPE_IP:
3266 			error = bridge_ip_checkbasic(mp);
3267 			break;
3268 #ifdef INET6
3269 		case ETHERTYPE_IPV6:
3270 			error = bridge_ip6_checkbasic(mp);
3271 			break;
3272 #endif /* INET6 */
3273 		default:
3274 			error = 0;
3275 		}
3276 		if (error)
3277 			goto bad;
3278 	}
3279 
3280 	error = 0;
3281 
3282 	/*
3283 	 * Run the packet through pfil
3284 	 */
3285 	switch (ether_type) {
3286 	case ETHERTYPE_IP:
3287 		/*
3288 		 * before calling the firewall, swap fields the same as
3289 		 * IP does. here we assume the header is contiguous
3290 		 */
3291 		ip = mtod(*mp, struct ip *);
3292 
3293 		ip->ip_len = ntohs(ip->ip_len);
3294 		ip->ip_off = ntohs(ip->ip_off);
3295 
3296 		/*
3297 		 * Run pfil on the member interface and the bridge, both can
3298 		 * be skipped by clearing pfil_member or pfil_bridge.
3299 		 *
3300 		 * Keep the order:
3301 		 *   in_if -> bridge_if -> out_if
3302 		 */
3303 		if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) {
3304 			error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, dir);
3305 			if (*mp == NULL || error != 0) /* filter may consume */
3306 				break;
3307 			error = bridge_post_pfil(*mp);
3308 			if (error)
3309 				break;
3310 		}
3311 
3312 		if (pfil_member && ifp != NULL) {
3313 			error = pfil_run_hooks(&inet_pfil_hook, mp, ifp, dir);
3314 			if (*mp == NULL || error != 0) /* filter may consume */
3315 				break;
3316 			error = bridge_post_pfil(*mp);
3317 			if (error)
3318 				break;
3319 		}
3320 
3321 		if (pfil_bridge && dir == PFIL_IN && bifp != NULL) {
3322 			error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, dir);
3323 			if (*mp == NULL || error != 0) /* filter may consume */
3324 				break;
3325 			error = bridge_post_pfil(*mp);
3326 			if (error)
3327 				break;
3328 		}
3329 
3330 		/* check if we need to fragment the packet */
3331 		if (pfil_member && ifp != NULL && dir == PFIL_OUT) {
3332 			i = (*mp)->m_pkthdr.len;
3333 			if (i > ifp->if_mtu) {
3334 				error = bridge_fragment(ifp, *mp, &eh2, snap,
3335 					    &llc1);
3336 				return (error);
3337 			}
3338 		}
3339 
3340 		/* Recalculate the ip checksum and restore byte ordering */
3341 		ip = mtod(*mp, struct ip *);
3342 		hlen = ip->ip_hl << 2;
3343 		if (hlen < sizeof(struct ip))
3344 			goto bad;
3345 		if (hlen > (*mp)->m_len) {
3346 			if ((*mp = m_pullup(*mp, hlen)) == 0)
3347 				goto bad;
3348 			ip = mtod(*mp, struct ip *);
3349 			if (ip == NULL)
3350 				goto bad;
3351 		}
3352 		ip->ip_len = htons(ip->ip_len);
3353 		ip->ip_off = htons(ip->ip_off);
3354 		ip->ip_sum = 0;
3355 		if (hlen == sizeof(struct ip))
3356 			ip->ip_sum = in_cksum_hdr(ip);
3357 		else
3358 			ip->ip_sum = in_cksum(*mp, hlen);
3359 
3360 		break;
3361 #ifdef INET6
3362 	case ETHERTYPE_IPV6:
3363 		if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
3364 			error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3365 					dir);
3366 
3367 		if (*mp == NULL || error != 0) /* filter may consume */
3368 			break;
3369 
3370 		if (pfil_member && ifp != NULL)
3371 			error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp,
3372 					dir);
3373 
3374 		if (*mp == NULL || error != 0) /* filter may consume */
3375 			break;
3376 
3377 		if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
3378 			error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3379 					dir);
3380 		break;
3381 #endif
3382 	default:
3383 		error = 0;
3384 		break;
3385 	}
3386 
3387 	if (*mp == NULL)
3388 		return (error);
3389 	if (error != 0)
3390 		goto bad;
3391 
3392 	error = -1;
3393 
3394 	/*
3395 	 * Finally, put everything back the way it was and return
3396 	 */
3397 	if (snap) {
3398 		M_PREPEND(*mp, sizeof(struct llc), MB_DONTWAIT);
3399 		if (*mp == NULL)
3400 			return (error);
3401 		bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
3402 	}
3403 
3404 	M_PREPEND(*mp, ETHER_HDR_LEN, MB_DONTWAIT);
3405 	if (*mp == NULL)
3406 		return (error);
3407 	bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
3408 
3409 	return (0);
3410 
3411 bad:
3412 	m_freem(*mp);
3413 	*mp = NULL;
3414 	return (error);
3415 }
3416 
3417 /*
3418  * Perform basic checks on header size since
3419  * pfil assumes ip_input has already processed
3420  * it for it.  Cut-and-pasted from ip_input.c.
3421  * Given how simple the IPv6 version is,
3422  * does the IPv4 version really need to be
3423  * this complicated?
3424  *
3425  * XXX Should we update ipstat here, or not?
3426  * XXX Right now we update ipstat but not
3427  * XXX csum_counter.
3428  */
3429 static int
3430 bridge_ip_checkbasic(struct mbuf **mp)
3431 {
3432 	struct mbuf *m = *mp;
3433 	struct ip *ip;
3434 	int len, hlen;
3435 	u_short sum;
3436 
3437 	if (*mp == NULL)
3438 		return (-1);
3439 #if notyet
3440 	if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3441 		if ((m = m_copyup(m, sizeof(struct ip),
3442 			(max_linkhdr + 3) & ~3)) == NULL) {
3443 			/* XXXJRT new stat, please */
3444 			ipstat.ips_toosmall++;
3445 			goto bad;
3446 		}
3447 	} else
3448 #endif
3449 #ifndef __predict_false
3450 #define __predict_false(x) x
3451 #endif
3452 	 if (__predict_false(m->m_len < sizeof (struct ip))) {
3453 		if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
3454 			ipstat.ips_toosmall++;
3455 			goto bad;
3456 		}
3457 	}
3458 	ip = mtod(m, struct ip *);
3459 	if (ip == NULL) goto bad;
3460 
3461 	if (ip->ip_v != IPVERSION) {
3462 		ipstat.ips_badvers++;
3463 		goto bad;
3464 	}
3465 	hlen = ip->ip_hl << 2;
3466 	if (hlen < sizeof(struct ip)) { /* minimum header length */
3467 		ipstat.ips_badhlen++;
3468 		goto bad;
3469 	}
3470 	if (hlen > m->m_len) {
3471 		if ((m = m_pullup(m, hlen)) == 0) {
3472 			ipstat.ips_badhlen++;
3473 			goto bad;
3474 		}
3475 		ip = mtod(m, struct ip *);
3476 		if (ip == NULL) goto bad;
3477 	}
3478 
3479 	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3480 		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3481 	} else {
3482 		if (hlen == sizeof(struct ip)) {
3483 			sum = in_cksum_hdr(ip);
3484 		} else {
3485 			sum = in_cksum(m, hlen);
3486 		}
3487 	}
3488 	if (sum) {
3489 		ipstat.ips_badsum++;
3490 		goto bad;
3491 	}
3492 
3493 	/* Retrieve the packet length. */
3494 	len = ntohs(ip->ip_len);
3495 
3496 	/*
3497 	 * Check for additional length bogosity
3498 	 */
3499 	if (len < hlen) {
3500 		ipstat.ips_badlen++;
3501 		goto bad;
3502 	}
3503 
3504 	/*
3505 	 * Check that the amount of data in the buffers
3506 	 * is as at least much as the IP header would have us expect.
3507 	 * Drop packet if shorter than we expect.
3508 	 */
3509 	if (m->m_pkthdr.len < len) {
3510 		ipstat.ips_tooshort++;
3511 		goto bad;
3512 	}
3513 
3514 	/* Checks out, proceed */
3515 	*mp = m;
3516 	return (0);
3517 
3518 bad:
3519 	*mp = m;
3520 	return (-1);
3521 }
3522 
3523 #ifdef INET6
3524 /*
3525  * Same as above, but for IPv6.
3526  * Cut-and-pasted from ip6_input.c.
3527  * XXX Should we update ip6stat, or not?
3528  */
3529 static int
3530 bridge_ip6_checkbasic(struct mbuf **mp)
3531 {
3532 	struct mbuf *m = *mp;
3533 	struct ip6_hdr *ip6;
3534 
3535 	/*
3536 	 * If the IPv6 header is not aligned, slurp it up into a new
3537 	 * mbuf with space for link headers, in the event we forward
3538 	 * it.  Otherwise, if it is aligned, make sure the entire base
3539 	 * IPv6 header is in the first mbuf of the chain.
3540 	 */
3541 #if notyet
3542 	if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3543 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3544 		if ((m = m_copyup(m, sizeof(struct ip6_hdr),
3545 			    (max_linkhdr + 3) & ~3)) == NULL) {
3546 			/* XXXJRT new stat, please */
3547 			ip6stat.ip6s_toosmall++;
3548 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3549 			goto bad;
3550 		}
3551 	} else
3552 #endif
3553 	if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
3554 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3555 		if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
3556 			ip6stat.ip6s_toosmall++;
3557 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3558 			goto bad;
3559 		}
3560 	}
3561 
3562 	ip6 = mtod(m, struct ip6_hdr *);
3563 
3564 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
3565 		ip6stat.ip6s_badvers++;
3566 		in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
3567 		goto bad;
3568 	}
3569 
3570 	/* Checks out, proceed */
3571 	*mp = m;
3572 	return (0);
3573 
3574 bad:
3575 	*mp = m;
3576 	return (-1);
3577 }
3578 #endif /* INET6 */
3579 
3580 /*
3581  * bridge_fragment:
3582  *
3583  *	Return a fragmented mbuf chain.
3584  */
3585 static int
3586 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh,
3587     int snap, struct llc *llc)
3588 {
3589 	struct mbuf *m0;
3590 	struct ip *ip;
3591 	int error = -1;
3592 
3593 	if (m->m_len < sizeof(struct ip) &&
3594 	    (m = m_pullup(m, sizeof(struct ip))) == NULL)
3595 		goto out;
3596 	ip = mtod(m, struct ip *);
3597 
3598 	error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist,
3599 		    CSUM_DELAY_IP);
3600 	if (error)
3601 		goto out;
3602 
3603 	/* walk the chain and re-add the Ethernet header */
3604 	for (m0 = m; m0; m0 = m0->m_nextpkt) {
3605 		if (error == 0) {
3606 			if (snap) {
3607 				M_PREPEND(m0, sizeof(struct llc), MB_DONTWAIT);
3608 				if (m0 == NULL) {
3609 					error = ENOBUFS;
3610 					continue;
3611 				}
3612 				bcopy(llc, mtod(m0, caddr_t),
3613 				    sizeof(struct llc));
3614 			}
3615 			M_PREPEND(m0, ETHER_HDR_LEN, MB_DONTWAIT);
3616 			if (m0 == NULL) {
3617 				error = ENOBUFS;
3618 				continue;
3619 			}
3620 			bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN);
3621 		} else
3622 			m_freem(m);
3623 	}
3624 
3625 	if (error == 0)
3626 		ipstat.ips_fragmented++;
3627 
3628 	return (error);
3629 
3630 out:
3631 	if (m != NULL)
3632 		m_freem(m);
3633 	return (error);
3634 }
3635 
3636 static void
3637 bridge_enqueue_handler(struct netmsg *nmsg)
3638 {
3639 	struct netmsg_packet *nmp;
3640 	struct ifnet *dst_ifp;
3641 	struct mbuf *m;
3642 
3643 	nmp = (struct netmsg_packet *)nmsg;
3644 	m = nmp->nm_packet;
3645 	dst_ifp = nmp->nm_netmsg.nm_lmsg.u.ms_resultp;
3646 
3647 	bridge_handoff(dst_ifp, m);
3648 }
3649 
3650 static void
3651 bridge_handoff(struct ifnet *dst_ifp, struct mbuf *m)
3652 {
3653 	struct mbuf *m0;
3654 
3655 	/* We may be sending a fragment so traverse the mbuf */
3656 	for (; m; m = m0) {
3657 		struct altq_pktattr pktattr;
3658 
3659 		m0 = m->m_nextpkt;
3660 		m->m_nextpkt = NULL;
3661 
3662 		if (ifq_is_enabled(&dst_ifp->if_snd))
3663 			altq_etherclassify(&dst_ifp->if_snd, m, &pktattr);
3664 
3665 		ifq_dispatch(dst_ifp, m, &pktattr);
3666 	}
3667 }
3668 
3669 static void
3670 bridge_control_dispatch(struct netmsg *nmsg)
3671 {
3672 	struct netmsg_brctl *bc_msg = (struct netmsg_brctl *)nmsg;
3673 	struct ifnet *bifp = bc_msg->bc_sc->sc_ifp;
3674 	int error;
3675 
3676 	lwkt_serialize_enter(bifp->if_serializer);
3677 	error = bc_msg->bc_func(bc_msg->bc_sc, bc_msg->bc_arg);
3678 	lwkt_serialize_exit(bifp->if_serializer);
3679 
3680 	lwkt_replymsg(&nmsg->nm_lmsg, error);
3681 }
3682 
3683 static int
3684 bridge_control(struct bridge_softc *sc, u_long cmd,
3685 	       bridge_ctl_t bc_func, void *bc_arg)
3686 {
3687 	struct ifnet *bifp = sc->sc_ifp;
3688 	struct netmsg_brctl bc_msg;
3689 	struct netmsg *nmsg;
3690 	int error;
3691 
3692 	ASSERT_SERIALIZED(bifp->if_serializer);
3693 
3694 	bzero(&bc_msg, sizeof(bc_msg));
3695 	nmsg = &bc_msg.bc_nmsg;
3696 
3697 	netmsg_init(nmsg, &curthread->td_msgport, 0, bridge_control_dispatch);
3698 	bc_msg.bc_func = bc_func;
3699 	bc_msg.bc_sc = sc;
3700 	bc_msg.bc_arg = bc_arg;
3701 
3702 	lwkt_serialize_exit(bifp->if_serializer);
3703 	error = lwkt_domsg(BRIDGE_CFGPORT, &nmsg->nm_lmsg, 0);
3704 	lwkt_serialize_enter(bifp->if_serializer);
3705 	return error;
3706 }
3707 
3708 static void
3709 bridge_add_bif_handler(struct netmsg *nmsg)
3710 {
3711 	struct netmsg_braddbif *amsg = (struct netmsg_braddbif *)nmsg;
3712 	struct bridge_softc *sc;
3713 	struct bridge_iflist *bif;
3714 
3715 	sc = amsg->br_softc;
3716 
3717 	bif = kmalloc(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO);
3718 	bif->bif_ifp = amsg->br_bif_ifp;
3719 	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
3720 	bif->bif_onlist = 1;
3721 	bif->bif_info = amsg->br_bif_info;
3722 
3723 	LIST_INSERT_HEAD(&sc->sc_iflists[mycpuid], bif, bif_next);
3724 
3725 	ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3726 }
3727 
3728 static void
3729 bridge_add_bif(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3730 	       struct ifnet *ifp)
3731 {
3732 	struct netmsg_braddbif amsg;
3733 
3734 	ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
3735 
3736 	netmsg_init(&amsg.br_nmsg, &curthread->td_msgport, 0,
3737 		    bridge_add_bif_handler);
3738 	amsg.br_softc = sc;
3739 	amsg.br_bif_info = bif_info;
3740 	amsg.br_bif_ifp = ifp;
3741 
3742 	ifnet_domsg(&amsg.br_nmsg.nm_lmsg, 0);
3743 }
3744 
3745 static void
3746 bridge_del_bif_handler(struct netmsg *nmsg)
3747 {
3748 	struct netmsg_brdelbif *dmsg = (struct netmsg_brdelbif *)nmsg;
3749 	struct bridge_softc *sc;
3750 	struct bridge_iflist *bif;
3751 
3752 	sc = dmsg->br_softc;
3753 
3754 	/*
3755 	 * Locate the bif associated with the br_bif_info
3756 	 * on the current CPU
3757 	 */
3758 	bif = bridge_lookup_member_ifinfo(sc, dmsg->br_bif_info);
3759 	KKASSERT(bif != NULL && bif->bif_onlist);
3760 
3761 	/* Remove the bif from the current CPU's iflist */
3762 	bif->bif_onlist = 0;
3763 	LIST_REMOVE(bif, bif_next);
3764 
3765 	/* Save the removed bif for later freeing */
3766 	LIST_INSERT_HEAD(dmsg->br_bif_list, bif, bif_next);
3767 
3768 	ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3769 }
3770 
3771 static void
3772 bridge_del_bif(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3773 	       struct bridge_iflist_head *saved_bifs)
3774 {
3775 	struct netmsg_brdelbif dmsg;
3776 
3777 	ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
3778 
3779 	netmsg_init(&dmsg.br_nmsg, &curthread->td_msgport, 0,
3780 		    bridge_del_bif_handler);
3781 	dmsg.br_softc = sc;
3782 	dmsg.br_bif_info = bif_info;
3783 	dmsg.br_bif_list = saved_bifs;
3784 
3785 	ifnet_domsg(&dmsg.br_nmsg.nm_lmsg, 0);
3786 }
3787 
3788 static void
3789 bridge_set_bifflags_handler(struct netmsg *nmsg)
3790 {
3791 	struct netmsg_brsflags *smsg = (struct netmsg_brsflags *)nmsg;
3792 	struct bridge_softc *sc;
3793 	struct bridge_iflist *bif;
3794 
3795 	sc = smsg->br_softc;
3796 
3797 	/*
3798 	 * Locate the bif associated with the br_bif_info
3799 	 * on the current CPU
3800 	 */
3801 	bif = bridge_lookup_member_ifinfo(sc, smsg->br_bif_info);
3802 	KKASSERT(bif != NULL && bif->bif_onlist);
3803 
3804 	bif->bif_flags = smsg->br_bif_flags;
3805 
3806 	ifnet_forwardmsg(&nmsg->nm_lmsg, mycpuid + 1);
3807 }
3808 
3809 static void
3810 bridge_set_bifflags(struct bridge_softc *sc, struct bridge_ifinfo *bif_info,
3811 		    uint32_t bif_flags)
3812 {
3813 	struct netmsg_brsflags smsg;
3814 
3815 	ASSERT_NOT_SERIALIZED(sc->sc_ifp->if_serializer);
3816 
3817 	netmsg_init(&smsg.br_nmsg, &curthread->td_msgport, 0,
3818 		    bridge_set_bifflags_handler);
3819 	smsg.br_softc = sc;
3820 	smsg.br_bif_info = bif_info;
3821 	smsg.br_bif_flags = bif_flags;
3822 
3823 	ifnet_domsg(&smsg.br_nmsg.nm_lmsg, 0);
3824 }
3825