xref: /netbsd/sys/net/if_bridge.c (revision 1149f061)
1 /*	$NetBSD: if_bridge.c,v 1.189 2022/07/29 07:58:18 skrll Exp $	*/
2 
3 /*
4  * Copyright 2001 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
40  * All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by Jason L. Wright
53  * 4. The name of the author may not be used to endorse or promote products
54  *    derived from this software without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
58  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
59  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
60  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
61  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
62  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
64  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
65  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66  * POSSIBILITY OF SUCH DAMAGE.
67  *
68  * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
69  */
70 
71 /*
72  * Network interface bridge support.
73  *
74  * TODO:
75  *
76  *	- Currently only supports Ethernet-like interfaces (Ethernet,
77  *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
78  *	  to bridge other types of interfaces (FDDI-FDDI, and maybe
79  *	  consider heterogenous bridges).
80  */
81 
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_bridge.c,v 1.189 2022/07/29 07:58:18 skrll Exp $");
84 
85 #ifdef _KERNEL_OPT
86 #include "opt_inet.h"
87 #include "opt_net_mpsafe.h"
88 #endif /* _KERNEL_OPT */
89 
90 #include <sys/param.h>
91 #include <sys/kernel.h>
92 #include <sys/mbuf.h>
93 #include <sys/queue.h>
94 #include <sys/socket.h>
95 #include <sys/socketvar.h> /* for softnet_lock */
96 #include <sys/sockio.h>
97 #include <sys/systm.h>
98 #include <sys/proc.h>
99 #include <sys/pool.h>
100 #include <sys/kauth.h>
101 #include <sys/cpu.h>
102 #include <sys/cprng.h>
103 #include <sys/mutex.h>
104 #include <sys/kmem.h>
105 
106 #include <net/bpf.h>
107 #include <net/if.h>
108 #include <net/if_dl.h>
109 #include <net/if_types.h>
110 #include <net/if_llc.h>
111 
112 #include <net/if_ether.h>
113 #include <net/if_bridgevar.h>
114 #include <net/ether_sw_offload.h>
115 
116 /* Used for bridge_ip[6]_checkbasic */
117 #include <netinet/in.h>
118 #include <netinet/in_systm.h>
119 #include <netinet/ip.h>
120 #include <netinet/ip_var.h>
121 #include <netinet/ip_private.h>		/* XXX */
122 #include <netinet/ip6.h>
123 #include <netinet6/in6_var.h>
124 #include <netinet6/ip6_var.h>
125 #include <netinet6/ip6_private.h>	/* XXX */
126 
127 /*
128  * Size of the route hash table.  Must be a power of two.
129  */
130 #ifndef BRIDGE_RTHASH_SIZE
131 #define	BRIDGE_RTHASH_SIZE		1024
132 #endif
133 
134 #define	BRIDGE_RTHASH_MASK		(BRIDGE_RTHASH_SIZE - 1)
135 
136 #include "carp.h"
137 #if NCARP > 0
138 #include <netinet/in.h>
139 #include <netinet/in_var.h>
140 #include <netinet/ip_carp.h>
141 #endif
142 
143 #include "ioconf.h"
144 
145 __CTASSERT(sizeof(struct ifbifconf) == sizeof(struct ifbaconf));
146 __CTASSERT(offsetof(struct ifbifconf, ifbic_len) == offsetof(struct ifbaconf, ifbac_len));
147 __CTASSERT(offsetof(struct ifbifconf, ifbic_buf) == offsetof(struct ifbaconf, ifbac_buf));
148 
149 /*
150  * Maximum number of addresses to cache.
151  */
152 #ifndef BRIDGE_RTABLE_MAX
153 #define	BRIDGE_RTABLE_MAX		100
154 #endif
155 
156 /*
157  * Spanning tree defaults.
158  */
159 #define	BSTP_DEFAULT_MAX_AGE		(20 * 256)
160 #define	BSTP_DEFAULT_HELLO_TIME		(2 * 256)
161 #define	BSTP_DEFAULT_FORWARD_DELAY	(15 * 256)
162 #define	BSTP_DEFAULT_HOLD_TIME		(1 * 256)
163 #define	BSTP_DEFAULT_BRIDGE_PRIORITY	0x8000
164 #define	BSTP_DEFAULT_PORT_PRIORITY	0x80
165 #define	BSTP_DEFAULT_PATH_COST		55
166 
167 /*
168  * Timeout (in seconds) for entries learned dynamically.
169  */
170 #ifndef BRIDGE_RTABLE_TIMEOUT
171 #define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
172 #endif
173 
174 /*
175  * Number of seconds between walks of the route list.
176  */
177 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
178 #define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
179 #endif
180 
181 #define BRIDGE_RT_LOCK(_sc)	mutex_enter((_sc)->sc_rtlist_lock)
182 #define BRIDGE_RT_UNLOCK(_sc)	mutex_exit((_sc)->sc_rtlist_lock)
183 #define BRIDGE_RT_LOCKED(_sc)	mutex_owned((_sc)->sc_rtlist_lock)
184 
185 #define BRIDGE_RT_PSZ_PERFORM(_sc) \
186 				pserialize_perform((_sc)->sc_rtlist_psz)
187 
188 #define BRIDGE_RT_RENTER(__s)	do { __s = pserialize_read_enter(); } while (0)
189 #define BRIDGE_RT_REXIT(__s)	do { pserialize_read_exit(__s); } while (0)
190 
191 #define BRIDGE_RTLIST_READER_FOREACH(_brt, _sc)			\
192 	PSLIST_READER_FOREACH((_brt), &((_sc)->sc_rtlist),		\
193 	    struct bridge_rtnode, brt_list)
194 #define BRIDGE_RTLIST_WRITER_FOREACH(_brt, _sc)			\
195 	PSLIST_WRITER_FOREACH((_brt), &((_sc)->sc_rtlist),		\
196 	    struct bridge_rtnode, brt_list)
197 #define BRIDGE_RTLIST_WRITER_INSERT_HEAD(_sc, _brt)			\
198 	PSLIST_WRITER_INSERT_HEAD(&(_sc)->sc_rtlist, brt, brt_list)
199 #define BRIDGE_RTLIST_WRITER_REMOVE(_brt)				\
200 	PSLIST_WRITER_REMOVE((_brt), brt_list)
201 
202 #define BRIDGE_RTHASH_READER_FOREACH(_brt, _sc, _hash)			\
203 	PSLIST_READER_FOREACH((_brt), &(_sc)->sc_rthash[(_hash)],	\
204 	    struct bridge_rtnode, brt_hash)
205 #define BRIDGE_RTHASH_WRITER_FOREACH(_brt, _sc, _hash)			\
206 	PSLIST_WRITER_FOREACH((_brt), &(_sc)->sc_rthash[(_hash)],	\
207 	    struct bridge_rtnode, brt_hash)
208 #define BRIDGE_RTHASH_WRITER_INSERT_HEAD(_sc, _hash, _brt)		\
209 	PSLIST_WRITER_INSERT_HEAD(&(_sc)->sc_rthash[(_hash)], brt, brt_hash)
210 #define BRIDGE_RTHASH_WRITER_INSERT_AFTER(_brt, _new)			\
211 	PSLIST_WRITER_INSERT_AFTER((_brt), (_new), brt_hash)
212 #define BRIDGE_RTHASH_WRITER_REMOVE(_brt)				\
213 	PSLIST_WRITER_REMOVE((_brt), brt_hash)
214 
215 #ifdef NET_MPSAFE
216 #define DECLARE_LOCK_VARIABLE
217 #define ACQUIRE_GLOBAL_LOCKS()	do { } while (0)
218 #define RELEASE_GLOBAL_LOCKS()	do { } while (0)
219 #else
220 #define DECLARE_LOCK_VARIABLE	int __s
221 #define ACQUIRE_GLOBAL_LOCKS()	do {					\
222 					KERNEL_LOCK(1, NULL);		\
223 					mutex_enter(softnet_lock);	\
224 					__s = splsoftnet();		\
225 				} while (0)
226 #define RELEASE_GLOBAL_LOCKS()	do {					\
227 					splx(__s);			\
228 					mutex_exit(softnet_lock);	\
229 					KERNEL_UNLOCK_ONE(NULL);	\
230 				} while (0)
231 #endif
232 
233 struct psref_class *bridge_psref_class __read_mostly;
234 
235 int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
236 
237 static struct pool bridge_rtnode_pool;
238 
239 static int	bridge_clone_create(struct if_clone *, int);
240 static int	bridge_clone_destroy(struct ifnet *);
241 
242 static int	bridge_ioctl(struct ifnet *, u_long, void *);
243 static int	bridge_init(struct ifnet *);
244 static void	bridge_stop(struct ifnet *, int);
245 static void	bridge_start(struct ifnet *);
246 static void	bridge_ifdetach(void *);
247 
248 static void	bridge_input(struct ifnet *, struct mbuf *);
249 static void	bridge_forward(struct bridge_softc *, struct mbuf *);
250 
251 static void	bridge_timer(void *);
252 
253 static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
254 				 struct mbuf *);
255 
256 static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
257 				struct ifnet *, int, uint8_t);
258 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
259 static void	bridge_rttrim(struct bridge_softc *);
260 static void	bridge_rtage(struct bridge_softc *);
261 static void	bridge_rtage_work(struct work *, void *);
262 static void	bridge_rtflush(struct bridge_softc *, int);
263 static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
264 static void	bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp);
265 
266 static void	bridge_rtable_init(struct bridge_softc *);
267 static void	bridge_rtable_fini(struct bridge_softc *);
268 
269 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
270 						  const uint8_t *);
271 static int	bridge_rtnode_insert(struct bridge_softc *,
272 				     struct bridge_rtnode *);
273 static void	bridge_rtnode_remove(struct bridge_softc *,
274 				     struct bridge_rtnode *);
275 static void	bridge_rtnode_destroy(struct bridge_rtnode *);
276 
277 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
278 						  const char *name,
279 						  struct psref *);
280 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
281 						     struct ifnet *ifp,
282 						     struct psref *);
283 static void	bridge_release_member(struct bridge_softc *, struct bridge_iflist *,
284                                       struct psref *);
285 static void	bridge_delete_member(struct bridge_softc *,
286 				     struct bridge_iflist *);
287 static void	bridge_acquire_member(struct bridge_softc *sc,
288                                       struct bridge_iflist *,
289                                       struct psref *);
290 
291 static int	bridge_ioctl_add(struct bridge_softc *, void *);
292 static int	bridge_ioctl_del(struct bridge_softc *, void *);
293 static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
294 static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
295 static int	bridge_ioctl_scache(struct bridge_softc *, void *);
296 static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
297 static int	bridge_ioctl_gifs(struct bridge_softc *, void *);
298 static int	bridge_ioctl_rts(struct bridge_softc *, void *);
299 static int	bridge_ioctl_saddr(struct bridge_softc *, void *);
300 static int	bridge_ioctl_sto(struct bridge_softc *, void *);
301 static int	bridge_ioctl_gto(struct bridge_softc *, void *);
302 static int	bridge_ioctl_daddr(struct bridge_softc *, void *);
303 static int	bridge_ioctl_flush(struct bridge_softc *, void *);
304 static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
305 static int	bridge_ioctl_spri(struct bridge_softc *, void *);
306 static int	bridge_ioctl_ght(struct bridge_softc *, void *);
307 static int	bridge_ioctl_sht(struct bridge_softc *, void *);
308 static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
309 static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
310 static int	bridge_ioctl_gma(struct bridge_softc *, void *);
311 static int	bridge_ioctl_sma(struct bridge_softc *, void *);
312 static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
313 static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
314 static int	bridge_ioctl_gfilt(struct bridge_softc *, void *);
315 static int	bridge_ioctl_sfilt(struct bridge_softc *, void *);
316 static int	bridge_ipf(void *, struct mbuf **, struct ifnet *, int);
317 static int	bridge_ip_checkbasic(struct mbuf **mp);
318 # ifdef INET6
319 static int	bridge_ip6_checkbasic(struct mbuf **mp);
320 # endif /* INET6 */
321 
322 struct bridge_control {
323 	int	(*bc_func)(struct bridge_softc *, void *);
324 	int	bc_argsize;
325 	int	bc_flags;
326 };
327 
328 #define	BC_F_COPYIN		0x01	/* copy arguments in */
329 #define	BC_F_COPYOUT		0x02	/* copy arguments out */
330 #define	BC_F_SUSER		0x04	/* do super-user check */
331 #define BC_F_XLATEIN		0x08	/* xlate arguments in */
332 #define BC_F_XLATEOUT		0x10	/* xlate arguments out */
333 
334 static const struct bridge_control bridge_control_table[] = {
335 [BRDGADD] = {bridge_ioctl_add, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
336 [BRDGDEL] = {bridge_ioctl_del, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
337 
338 [BRDGGIFFLGS] = {bridge_ioctl_gifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_COPYOUT},
339 [BRDGSIFFLGS] = {bridge_ioctl_sifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
340 
341 [BRDGSCACHE] = {bridge_ioctl_scache, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
342 [BRDGGCACHE] = {bridge_ioctl_gcache, sizeof(struct ifbrparam), BC_F_COPYOUT},
343 
344 [OBRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_COPYIN|BC_F_COPYOUT},
345 [OBRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_COPYIN|BC_F_COPYOUT},
346 
347 [BRDGSADDR] = {bridge_ioctl_saddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
348 
349 [BRDGSTO] = {bridge_ioctl_sto, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
350 [BRDGGTO] = {bridge_ioctl_gto, sizeof(struct ifbrparam), BC_F_COPYOUT},
351 
352 [BRDGDADDR] = {bridge_ioctl_daddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER},
353 
354 [BRDGFLUSH] = {bridge_ioctl_flush, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
355 
356 [BRDGGPRI] = {bridge_ioctl_gpri, sizeof(struct ifbrparam), BC_F_COPYOUT},
357 [BRDGSPRI] = {bridge_ioctl_spri, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
358 
359 [BRDGGHT] = {bridge_ioctl_ght, sizeof(struct ifbrparam), BC_F_COPYOUT},
360 [BRDGSHT] = {bridge_ioctl_sht, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
361 
362 [BRDGGFD] = {bridge_ioctl_gfd, sizeof(struct ifbrparam), BC_F_COPYOUT},
363 [BRDGSFD] = {bridge_ioctl_sfd, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
364 
365 [BRDGGMA] = {bridge_ioctl_gma, sizeof(struct ifbrparam), BC_F_COPYOUT},
366 [BRDGSMA] = {bridge_ioctl_sma, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
367 
368 [BRDGSIFPRIO] = {bridge_ioctl_sifprio, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
369 
370 [BRDGSIFCOST] = {bridge_ioctl_sifcost, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER},
371 
372 [BRDGGFILT] = {bridge_ioctl_gfilt, sizeof(struct ifbrparam), BC_F_COPYOUT},
373 [BRDGSFILT] = {bridge_ioctl_sfilt, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER},
374 
375 [BRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_XLATEIN|BC_F_XLATEOUT},
376 [BRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_XLATEIN|BC_F_XLATEOUT},
377 };
378 
379 static const int bridge_control_table_size = __arraycount(bridge_control_table);
380 
381 static struct if_clone bridge_cloner =
382     IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy);
383 
384 /*
385  * bridgeattach:
386  *
387  *	Pseudo-device attach routine.
388  */
389 void
bridgeattach(int n)390 bridgeattach(int n)
391 {
392 
393 	pool_init(&bridge_rtnode_pool, sizeof(struct bridge_rtnode),
394 	    0, 0, 0, "brtpl", NULL, IPL_NET);
395 
396 	bridge_psref_class = psref_class_create("bridge", IPL_SOFTNET);
397 
398 	if_clone_attach(&bridge_cloner);
399 }
400 
401 /*
402  * bridge_clone_create:
403  *
404  *	Create a new bridge instance.
405  */
406 static int
bridge_clone_create(struct if_clone * ifc,int unit)407 bridge_clone_create(struct if_clone *ifc, int unit)
408 {
409 	struct bridge_softc *sc;
410 	struct ifnet *ifp;
411 	int error;
412 
413 	sc = kmem_zalloc(sizeof(*sc),  KM_SLEEP);
414 	ifp = &sc->sc_if;
415 
416 	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
417 	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
418 	sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE;
419 	sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME;
420 	sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY;
421 	sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY;
422 	sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME;
423 	sc->sc_filter_flags = 0;
424 
425 	/* Initialize our routing table. */
426 	bridge_rtable_init(sc);
427 
428 	error = workqueue_create(&sc->sc_rtage_wq, "bridge_rtage",
429 	    bridge_rtage_work, sc, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
430 	if (error)
431 		panic("%s: workqueue_create %d\n", __func__, error);
432 
433 	callout_init(&sc->sc_brcallout, CALLOUT_MPSAFE);
434 	callout_init(&sc->sc_bstpcallout, CALLOUT_MPSAFE);
435 
436 	mutex_init(&sc->sc_iflist_psref.bip_lock, MUTEX_DEFAULT, IPL_NONE);
437 	PSLIST_INIT(&sc->sc_iflist_psref.bip_iflist);
438 	sc->sc_iflist_psref.bip_psz = pserialize_create();
439 
440 	if_initname(ifp, ifc->ifc_name, unit);
441 	ifp->if_softc = sc;
442 #ifdef NET_MPSAFE
443 	ifp->if_extflags = IFEF_MPSAFE;
444 #endif
445 	ifp->if_mtu = ETHERMTU;
446 	ifp->if_ioctl = bridge_ioctl;
447 	ifp->if_output = bridge_output;
448 	ifp->if_start = bridge_start;
449 	ifp->if_stop = bridge_stop;
450 	ifp->if_init = bridge_init;
451 	ifp->if_type = IFT_BRIDGE;
452 	ifp->if_addrlen = 0;
453 	ifp->if_dlt = DLT_EN10MB;
454 	ifp->if_hdrlen = ETHER_HDR_LEN;
455 	if_initialize(ifp);
456 
457 	/*
458 	 * Set the link state to down.
459 	 * When interfaces are added the link state will reflect
460 	 * the best link state of the combined interfaces.
461 	 */
462 	ifp->if_link_state = LINK_STATE_DOWN;
463 
464 	if_alloc_sadl(ifp);
465 	if_register(ifp);
466 
467 	return 0;
468 }
469 
470 /*
471  * bridge_clone_destroy:
472  *
473  *	Destroy a bridge instance.
474  */
475 static int
bridge_clone_destroy(struct ifnet * ifp)476 bridge_clone_destroy(struct ifnet *ifp)
477 {
478 	struct bridge_softc *sc = ifp->if_softc;
479 	struct bridge_iflist *bif;
480 
481 	if ((ifp->if_flags & IFF_RUNNING) != 0)
482 		bridge_stop(ifp, 1);
483 
484 	BRIDGE_LOCK(sc);
485 	for (;;) {
486 		bif = PSLIST_WRITER_FIRST(&sc->sc_iflist_psref.bip_iflist, struct bridge_iflist,
487 		    bif_next);
488 		if (bif == NULL)
489 			break;
490 		bridge_delete_member(sc, bif);
491 	}
492 	PSLIST_DESTROY(&sc->sc_iflist_psref.bip_iflist);
493 	BRIDGE_UNLOCK(sc);
494 
495 	if_detach(ifp);
496 
497 	/* Tear down the routing table. */
498 	bridge_rtable_fini(sc);
499 
500 	pserialize_destroy(sc->sc_iflist_psref.bip_psz);
501 	mutex_destroy(&sc->sc_iflist_psref.bip_lock);
502 	callout_destroy(&sc->sc_brcallout);
503 	callout_destroy(&sc->sc_bstpcallout);
504 	workqueue_destroy(sc->sc_rtage_wq);
505 	kmem_free(sc, sizeof(*sc));
506 
507 	return 0;
508 }
509 
510 /*
511  * bridge_ioctl:
512  *
513  *	Handle a control request from the operator.
514  */
515 static int
bridge_ioctl(struct ifnet * ifp,u_long cmd,void * data)516 bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
517 {
518 	struct bridge_softc *sc = ifp->if_softc;
519 	struct lwp *l = curlwp;	/* XXX */
520 	union {
521 		struct ifbreq ifbreq;
522 		struct ifbifconf ifbifconf;
523 		struct ifbareq ifbareq;
524 		struct ifbaconf ifbaconf;
525 		struct ifbrparam ifbrparam;
526 	} args;
527 	struct ifdrv *ifd = (struct ifdrv *) data;
528 	const struct bridge_control *bc = NULL; /* XXXGCC */
529 	int error = 0;
530 
531 	/* Authorize command before calling splsoftnet(). */
532 	switch (cmd) {
533 	case SIOCGDRVSPEC:
534 	case SIOCSDRVSPEC:
535 		if (ifd->ifd_cmd >= bridge_control_table_size
536 		    || (bc = &bridge_control_table[ifd->ifd_cmd]) == NULL) {
537 			error = EINVAL;
538 			return error;
539 		}
540 
541 		/* We only care about BC_F_SUSER at this point. */
542 		if ((bc->bc_flags & BC_F_SUSER) == 0)
543 			break;
544 
545 		error = kauth_authorize_network(l->l_cred,
546 		    KAUTH_NETWORK_INTERFACE_BRIDGE,
547 		    cmd == SIOCGDRVSPEC ?
548 		     KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_GETPRIV :
549 		     KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_SETPRIV,
550 		     ifd, NULL, NULL);
551 		if (error)
552 			return error;
553 
554 		break;
555 	}
556 
557 	const int s = splsoftnet();
558 
559 	switch (cmd) {
560 	case SIOCGDRVSPEC:
561 	case SIOCSDRVSPEC:
562 		KASSERT(bc != NULL);
563 		if (cmd == SIOCGDRVSPEC &&
564 		    (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) == 0) {
565 			error = EINVAL;
566 			break;
567 		}
568 		else if (cmd == SIOCSDRVSPEC &&
569 		    (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) != 0) {
570 			error = EINVAL;
571 			break;
572 		}
573 
574 		/* BC_F_SUSER is checked above, before splsoftnet(). */
575 
576 		if ((bc->bc_flags & (BC_F_XLATEIN|BC_F_XLATEOUT)) == 0
577 		    && (ifd->ifd_len != bc->bc_argsize
578 			|| ifd->ifd_len > sizeof(args))) {
579 			error = EINVAL;
580 			break;
581 		}
582 
583 		memset(&args, 0, sizeof(args));
584 		if (bc->bc_flags & BC_F_COPYIN) {
585 			error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
586 			if (error)
587 				break;
588 		} else if (bc->bc_flags & BC_F_XLATEIN) {
589 			args.ifbifconf.ifbic_len = ifd->ifd_len;
590 			args.ifbifconf.ifbic_buf = ifd->ifd_data;
591 		}
592 
593 		error = (*bc->bc_func)(sc, &args);
594 		if (error)
595 			break;
596 
597 		if (bc->bc_flags & BC_F_COPYOUT) {
598 			error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
599 		} else if (bc->bc_flags & BC_F_XLATEOUT) {
600 			ifd->ifd_len = args.ifbifconf.ifbic_len;
601 			ifd->ifd_data = args.ifbifconf.ifbic_buf;
602 		}
603 		break;
604 
605 	case SIOCSIFFLAGS:
606 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
607 			break;
608 		switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
609 		case IFF_RUNNING:
610 			/*
611 			 * If interface is marked down and it is running,
612 			 * then stop and disable it.
613 			 */
614 			if_stop(ifp, 1);
615 			break;
616 		case IFF_UP:
617 			/*
618 			 * If interface is marked up and it is stopped, then
619 			 * start it.
620 			 */
621 			error = if_init(ifp);
622 			break;
623 		default:
624 			break;
625 		}
626 		break;
627 
628 	case SIOCSIFMTU:
629 		if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
630 			error = 0;
631 		break;
632 
633         case SIOCGIFCAP:
634 	    {
635 		struct ifcapreq *ifcr = (struct ifcapreq *)data;
636                 ifcr->ifcr_capabilities = sc->sc_capenable;
637                 ifcr->ifcr_capenable = sc->sc_capenable;
638 		break;
639 	    }
640 
641 	default:
642 		error = ifioctl_common(ifp, cmd, data);
643 		break;
644 	}
645 
646 	splx(s);
647 
648 	return error;
649 }
650 
651 /*
652  * bridge_lookup_member:
653  *
654  *	Lookup a bridge member interface.
655  */
656 static struct bridge_iflist *
bridge_lookup_member(struct bridge_softc * sc,const char * name,struct psref * psref)657 bridge_lookup_member(struct bridge_softc *sc, const char *name, struct psref *psref)
658 {
659 	struct bridge_iflist *bif;
660 	struct ifnet *ifp;
661 	int s;
662 
663 	BRIDGE_PSZ_RENTER(s);
664 
665 	BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
666 		ifp = bif->bif_ifp;
667 		if (strcmp(ifp->if_xname, name) == 0)
668 			break;
669 	}
670 	if (bif != NULL)
671 		bridge_acquire_member(sc, bif, psref);
672 
673 	BRIDGE_PSZ_REXIT(s);
674 
675 	return bif;
676 }
677 
678 /*
679  * bridge_lookup_member_if:
680  *
681  *	Lookup a bridge member interface by ifnet*.
682  */
683 static struct bridge_iflist *
bridge_lookup_member_if(struct bridge_softc * sc,struct ifnet * member_ifp,struct psref * psref)684 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp,
685     struct psref *psref)
686 {
687 	struct bridge_iflist *bif;
688 	int s;
689 
690 	BRIDGE_PSZ_RENTER(s);
691 
692 	bif = member_ifp->if_bridgeif;
693 	if (bif != NULL) {
694 		psref_acquire(psref, &bif->bif_psref,
695 		    bridge_psref_class);
696 	}
697 
698 	BRIDGE_PSZ_REXIT(s);
699 
700 	return bif;
701 }
702 
703 static void
bridge_acquire_member(struct bridge_softc * sc,struct bridge_iflist * bif,struct psref * psref)704 bridge_acquire_member(struct bridge_softc *sc, struct bridge_iflist *bif,
705     struct psref *psref)
706 {
707 
708 	psref_acquire(psref, &bif->bif_psref, bridge_psref_class);
709 }
710 
711 /*
712  * bridge_release_member:
713  *
714  *	Release the specified member interface.
715  */
716 static void
bridge_release_member(struct bridge_softc * sc,struct bridge_iflist * bif,struct psref * psref)717 bridge_release_member(struct bridge_softc *sc, struct bridge_iflist *bif,
718     struct psref *psref)
719 {
720 
721 	psref_release(psref, &bif->bif_psref, bridge_psref_class);
722 }
723 
724 /*
725  * bridge_delete_member:
726  *
727  *	Delete the specified member interface.
728  */
729 static void
bridge_delete_member(struct bridge_softc * sc,struct bridge_iflist * bif)730 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif)
731 {
732 	struct ifnet *ifs = bif->bif_ifp;
733 
734 	KASSERT(BRIDGE_LOCKED(sc));
735 
736 	ifs->_if_input = ether_input;
737 	ifs->if_bridge = NULL;
738 	ifs->if_bridgeif = NULL;
739 
740 	PSLIST_WRITER_REMOVE(bif, bif_next);
741 	BRIDGE_PSZ_PERFORM(sc);
742 
743 	if_linkstate_change_disestablish(ifs,
744 	    bif->bif_linkstate_hook, BRIDGE_LOCK_OBJ(sc));
745 	ether_ifdetachhook_disestablish(ifs,
746 	    bif->bif_ifdetach_hook, BRIDGE_LOCK_OBJ(sc));
747 
748 	BRIDGE_UNLOCK(sc);
749 
750 	switch (ifs->if_type) {
751 	case IFT_ETHER:
752 	case IFT_L2TP:
753 		/*
754 		 * Take the interface out of promiscuous mode.
755 		 * Don't call it with holding a spin lock.
756 		 */
757 		(void) ifpromisc(ifs, 0);
758 		IFNET_LOCK(ifs);
759 		(void) ether_disable_vlan_mtu(ifs);
760 		IFNET_UNLOCK(ifs);
761 		break;
762 	default:
763 #ifdef DIAGNOSTIC
764 		panic("%s: impossible", __func__);
765 #endif
766 		break;
767 	}
768 
769 	psref_target_destroy(&bif->bif_psref, bridge_psref_class);
770 
771 	PSLIST_ENTRY_DESTROY(bif, bif_next);
772 	kmem_free(bif, sizeof(*bif));
773 
774 	BRIDGE_LOCK(sc);
775 }
776 
777 /*
778  * bridge_calc_csum_flags:
779  *
780  *	Calculate logical and b/w csum flags each member interface supports.
781  */
782 void
bridge_calc_csum_flags(struct bridge_softc * sc)783 bridge_calc_csum_flags(struct bridge_softc *sc)
784 {
785 	struct bridge_iflist *bif;
786 	struct ifnet *ifs = NULL;
787 	int flags = ~0;
788 	int capenable = ~0;
789 
790 	BRIDGE_LOCK(sc);
791 	BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
792 		ifs = bif->bif_ifp;
793 		flags &= ifs->if_csum_flags_tx;
794 		capenable &= ifs->if_capenable;
795 	}
796 	sc->sc_csum_flags_tx = flags;
797 	sc->sc_capenable = (ifs != NULL) ? capenable : 0;
798 	BRIDGE_UNLOCK(sc);
799 }
800 
801 /*
802  * bridge_calc_link_state:
803  *
804  *	Calculate the link state based on each member interface.
805  */
806 static void
bridge_calc_link_state(void * xsc)807 bridge_calc_link_state(void *xsc)
808 {
809 	struct bridge_softc *sc = xsc;
810 	struct bridge_iflist *bif;
811 	struct ifnet *ifs;
812 	int link_state = LINK_STATE_DOWN;
813 
814 	BRIDGE_LOCK(sc);
815 	BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
816 		ifs = bif->bif_ifp;
817 		if (ifs->if_link_state == LINK_STATE_UP) {
818 			link_state = LINK_STATE_UP;
819 			break;
820 		}
821 		if (ifs->if_link_state == LINK_STATE_UNKNOWN)
822 			link_state = LINK_STATE_UNKNOWN;
823 	}
824 	if_link_state_change(&sc->sc_if, link_state);
825 	BRIDGE_UNLOCK(sc);
826 }
827 
828 static int
bridge_ioctl_add(struct bridge_softc * sc,void * arg)829 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
830 {
831 	struct ifbreq *req = arg;
832 	struct bridge_iflist *bif = NULL;
833 	struct ifnet *ifs;
834 	int error = 0;
835 	struct psref psref;
836 
837 	ifs = if_get(req->ifbr_ifsname, &psref);
838 	if (ifs == NULL)
839 		return ENOENT;
840 
841 	if (ifs->if_bridge == sc) {
842 		error = EEXIST;
843 		goto out;
844 	}
845 
846 	if (ifs->if_bridge != NULL) {
847 		error = EBUSY;
848 		goto out;
849 	}
850 
851 	if (ifs->_if_input != ether_input) {
852 		error = EINVAL;
853 		goto out;
854 	}
855 
856 	/* FIXME: doesn't work with non-IFF_SIMPLEX interfaces */
857 	if ((ifs->if_flags & IFF_SIMPLEX) == 0) {
858 		error = EINVAL;
859 		goto out;
860 	}
861 
862 	bif = kmem_alloc(sizeof(*bif), KM_SLEEP);
863 
864 	switch (ifs->if_type) {
865 	case IFT_ETHER:
866 		if (sc->sc_if.if_mtu != ifs->if_mtu) {
867 			/* Change MTU of added interface to bridge MTU */
868 			struct ifreq ifr;
869 			memset(&ifr, 0, sizeof(ifr));
870 			ifr.ifr_mtu = sc->sc_if.if_mtu;
871 			IFNET_LOCK(ifs);
872 			error = if_ioctl(ifs, SIOCSIFMTU, &ifr);
873 			IFNET_UNLOCK(ifs);
874 			if (error != 0)
875 				goto out;
876 		}
877 		/* FALLTHROUGH */
878 	case IFT_L2TP:
879 		IFNET_LOCK(ifs);
880 		error = ether_enable_vlan_mtu(ifs);
881 		IFNET_UNLOCK(ifs);
882 		if (error > 0)
883 			goto out;
884 		/*
885 		 * Place the interface into promiscuous mode.
886 		 */
887 		error = ifpromisc(ifs, 1);
888 		if (error)
889 			goto out;
890 		break;
891 	default:
892 		error = EINVAL;
893 		goto out;
894 	}
895 
896 	bif->bif_ifp = ifs;
897 	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
898 	bif->bif_priority = BSTP_DEFAULT_PORT_PRIORITY;
899 	bif->bif_path_cost = BSTP_DEFAULT_PATH_COST;
900 	bif->bif_linkstate_hook = if_linkstate_change_establish(ifs,
901 	    bridge_calc_link_state, sc);
902 	PSLIST_ENTRY_INIT(bif, bif_next);
903 	psref_target_init(&bif->bif_psref, bridge_psref_class);
904 
905 	BRIDGE_LOCK(sc);
906 
907 	ifs->if_bridge = sc;
908 	ifs->if_bridgeif = bif;
909 	PSLIST_WRITER_INSERT_HEAD(&sc->sc_iflist_psref.bip_iflist, bif, bif_next);
910 	ifs->_if_input = bridge_input;
911 
912 	BRIDGE_UNLOCK(sc);
913 
914 	bif->bif_ifdetach_hook = ether_ifdetachhook_establish(ifs,
915 	    bridge_ifdetach, (void *)ifs);
916 
917 	bridge_calc_csum_flags(sc);
918 	bridge_calc_link_state(sc);
919 
920 	if (sc->sc_if.if_flags & IFF_RUNNING)
921 		bstp_initialization(sc);
922 	else
923 		bstp_stop(sc);
924 
925 out:
926 	if_put(ifs, &psref);
927 	if (error) {
928 		if (bif != NULL)
929 			kmem_free(bif, sizeof(*bif));
930 	}
931 	return error;
932 }
933 
934 static int
bridge_ioctl_del(struct bridge_softc * sc,void * arg)935 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
936 {
937 	struct ifbreq *req = arg;
938 	const char *name = req->ifbr_ifsname;
939 	struct bridge_iflist *bif;
940 	struct ifnet *ifs;
941 
942 	BRIDGE_LOCK(sc);
943 
944 	/*
945 	 * Don't use bridge_lookup_member. We want to get a member
946 	 * with bif_refs == 0.
947 	 */
948 	BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
949 		ifs = bif->bif_ifp;
950 		if (strcmp(ifs->if_xname, name) == 0)
951 			break;
952 	}
953 
954 	if (bif == NULL) {
955 		BRIDGE_UNLOCK(sc);
956 		return ENOENT;
957 	}
958 
959 	bridge_delete_member(sc, bif);
960 
961 	BRIDGE_UNLOCK(sc);
962 
963 	bridge_rtdelete(sc, ifs);
964 	bridge_calc_csum_flags(sc);
965 	bridge_calc_link_state(sc);
966 
967 	if (sc->sc_if.if_flags & IFF_RUNNING)
968 		bstp_initialization(sc);
969 
970 	return 0;
971 }
972 
973 static int
bridge_ioctl_gifflags(struct bridge_softc * sc,void * arg)974 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
975 {
976 	struct ifbreq *req = arg;
977 	struct bridge_iflist *bif;
978 	struct psref psref;
979 
980 	bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
981 	if (bif == NULL)
982 		return ENOENT;
983 
984 	req->ifbr_ifsflags = bif->bif_flags;
985 	req->ifbr_state = bif->bif_state;
986 	req->ifbr_priority = bif->bif_priority;
987 	req->ifbr_path_cost = bif->bif_path_cost;
988 	req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
989 
990 	bridge_release_member(sc, bif, &psref);
991 
992 	return 0;
993 }
994 
995 static int
bridge_ioctl_sifflags(struct bridge_softc * sc,void * arg)996 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
997 {
998 	struct ifbreq *req = arg;
999 	struct bridge_iflist *bif;
1000 	struct psref psref;
1001 
1002 	bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1003 	if (bif == NULL)
1004 		return ENOENT;
1005 
1006 	if (req->ifbr_ifsflags & IFBIF_STP) {
1007 		switch (bif->bif_ifp->if_type) {
1008 		case IFT_ETHER:
1009 		case IFT_L2TP:
1010 			/* These can do spanning tree. */
1011 			break;
1012 
1013 		default:
1014 			/* Nothing else can. */
1015 			bridge_release_member(sc, bif, &psref);
1016 			return EINVAL;
1017 		}
1018 	}
1019 
1020 	bif->bif_flags = req->ifbr_ifsflags;
1021 
1022 	bridge_release_member(sc, bif, &psref);
1023 
1024 	if (sc->sc_if.if_flags & IFF_RUNNING)
1025 		bstp_initialization(sc);
1026 
1027 	return 0;
1028 }
1029 
1030 static int
bridge_ioctl_scache(struct bridge_softc * sc,void * arg)1031 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1032 {
1033 	struct ifbrparam *param = arg;
1034 
1035 	sc->sc_brtmax = param->ifbrp_csize;
1036 	bridge_rttrim(sc);
1037 
1038 	return 0;
1039 }
1040 
1041 static int
bridge_ioctl_gcache(struct bridge_softc * sc,void * arg)1042 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1043 {
1044 	struct ifbrparam *param = arg;
1045 
1046 	param->ifbrp_csize = sc->sc_brtmax;
1047 
1048 	return 0;
1049 }
1050 
1051 static int
bridge_ioctl_gifs(struct bridge_softc * sc,void * arg)1052 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1053 {
1054 	struct ifbifconf *bifc = arg;
1055 	struct bridge_iflist *bif;
1056 	struct ifbreq *breqs;
1057 	int i, count, error = 0;
1058 
1059 retry:
1060 	BRIDGE_LOCK(sc);
1061 	count = 0;
1062 	BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
1063 		count++;
1064 	BRIDGE_UNLOCK(sc);
1065 
1066 	if (count == 0) {
1067 		bifc->ifbic_len = 0;
1068 		return 0;
1069 	}
1070 
1071 	if (bifc->ifbic_len == 0 || bifc->ifbic_len < (sizeof(*breqs) * count)) {
1072 		/* Tell that a larger buffer is needed */
1073 		bifc->ifbic_len = sizeof(*breqs) * count;
1074 		return 0;
1075 	}
1076 
1077 	breqs = kmem_alloc(sizeof(*breqs) * count, KM_SLEEP);
1078 
1079 	BRIDGE_LOCK(sc);
1080 
1081 	i = 0;
1082 	BRIDGE_IFLIST_WRITER_FOREACH(bif, sc)
1083 		i++;
1084 	if (i > count) {
1085 		/*
1086 		 * The number of members has been increased.
1087 		 * We need more memory!
1088 		 */
1089 		BRIDGE_UNLOCK(sc);
1090 		kmem_free(breqs, sizeof(*breqs) * count);
1091 		goto retry;
1092 	}
1093 
1094 	i = 0;
1095 	BRIDGE_IFLIST_WRITER_FOREACH(bif, sc) {
1096 		struct ifbreq *breq = &breqs[i++];
1097 		memset(breq, 0, sizeof(*breq));
1098 
1099 		strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname,
1100 		    sizeof(breq->ifbr_ifsname));
1101 		breq->ifbr_ifsflags = bif->bif_flags;
1102 		breq->ifbr_state = bif->bif_state;
1103 		breq->ifbr_priority = bif->bif_priority;
1104 		breq->ifbr_path_cost = bif->bif_path_cost;
1105 		breq->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1106 	}
1107 
1108 	/* Don't call copyout with holding the mutex */
1109 	BRIDGE_UNLOCK(sc);
1110 
1111 	for (i = 0; i < count; i++) {
1112 		error = copyout(&breqs[i], bifc->ifbic_req + i, sizeof(*breqs));
1113 		if (error)
1114 			break;
1115 	}
1116 	bifc->ifbic_len = sizeof(*breqs) * i;
1117 
1118 	kmem_free(breqs, sizeof(*breqs) * count);
1119 
1120 	return error;
1121 }
1122 
1123 static int
bridge_ioctl_rts(struct bridge_softc * sc,void * arg)1124 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1125 {
1126 	struct ifbaconf *bac = arg;
1127 	struct bridge_rtnode *brt;
1128 	struct ifbareq bareq;
1129 	int count = 0, error = 0, len;
1130 
1131 	if (bac->ifbac_len == 0)
1132 		return 0;
1133 
1134 	BRIDGE_RT_LOCK(sc);
1135 
1136 	/* The passed buffer is not enough, tell a required size. */
1137 	if (bac->ifbac_len < (sizeof(bareq) * sc->sc_brtcnt)) {
1138 		count = sc->sc_brtcnt;
1139 		goto out;
1140 	}
1141 
1142 	len = bac->ifbac_len;
1143 	BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
1144 		if (len < sizeof(bareq))
1145 			goto out;
1146 		memset(&bareq, 0, sizeof(bareq));
1147 		strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1148 		    sizeof(bareq.ifba_ifsname));
1149 		memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1150 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
1151 			bareq.ifba_expire = brt->brt_expire - time_uptime;
1152 		} else
1153 			bareq.ifba_expire = 0;
1154 		bareq.ifba_flags = brt->brt_flags;
1155 
1156 		error = copyout(&bareq, bac->ifbac_req + count, sizeof(bareq));
1157 		if (error)
1158 			goto out;
1159 		count++;
1160 		len -= sizeof(bareq);
1161 	}
1162 out:
1163 	BRIDGE_RT_UNLOCK(sc);
1164 
1165 	bac->ifbac_len = sizeof(bareq) * count;
1166 	return error;
1167 }
1168 
1169 static int
bridge_ioctl_saddr(struct bridge_softc * sc,void * arg)1170 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1171 {
1172 	struct ifbareq *req = arg;
1173 	struct bridge_iflist *bif;
1174 	int error;
1175 	struct psref psref;
1176 
1177 	bif = bridge_lookup_member(sc, req->ifba_ifsname, &psref);
1178 	if (bif == NULL)
1179 		return ENOENT;
1180 
1181 	error = bridge_rtupdate(sc, req->ifba_dst, bif->bif_ifp, 1,
1182 	    req->ifba_flags);
1183 
1184 	bridge_release_member(sc, bif, &psref);
1185 
1186 	return error;
1187 }
1188 
1189 static int
bridge_ioctl_sto(struct bridge_softc * sc,void * arg)1190 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1191 {
1192 	struct ifbrparam *param = arg;
1193 
1194 	sc->sc_brttimeout = param->ifbrp_ctime;
1195 
1196 	return 0;
1197 }
1198 
1199 static int
bridge_ioctl_gto(struct bridge_softc * sc,void * arg)1200 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1201 {
1202 	struct ifbrparam *param = arg;
1203 
1204 	param->ifbrp_ctime = sc->sc_brttimeout;
1205 
1206 	return 0;
1207 }
1208 
1209 static int
bridge_ioctl_daddr(struct bridge_softc * sc,void * arg)1210 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1211 {
1212 	struct ifbareq *req = arg;
1213 
1214 	return (bridge_rtdaddr(sc, req->ifba_dst));
1215 }
1216 
1217 static int
bridge_ioctl_flush(struct bridge_softc * sc,void * arg)1218 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1219 {
1220 	struct ifbreq *req = arg;
1221 
1222 	bridge_rtflush(sc, req->ifbr_ifsflags);
1223 
1224 	return 0;
1225 }
1226 
1227 static int
bridge_ioctl_gpri(struct bridge_softc * sc,void * arg)1228 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1229 {
1230 	struct ifbrparam *param = arg;
1231 
1232 	param->ifbrp_prio = sc->sc_bridge_priority;
1233 
1234 	return 0;
1235 }
1236 
1237 static int
bridge_ioctl_spri(struct bridge_softc * sc,void * arg)1238 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1239 {
1240 	struct ifbrparam *param = arg;
1241 
1242 	sc->sc_bridge_priority = param->ifbrp_prio;
1243 
1244 	if (sc->sc_if.if_flags & IFF_RUNNING)
1245 		bstp_initialization(sc);
1246 
1247 	return 0;
1248 }
1249 
1250 static int
bridge_ioctl_ght(struct bridge_softc * sc,void * arg)1251 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1252 {
1253 	struct ifbrparam *param = arg;
1254 
1255 	param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8;
1256 
1257 	return 0;
1258 }
1259 
1260 static int
bridge_ioctl_sht(struct bridge_softc * sc,void * arg)1261 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1262 {
1263 	struct ifbrparam *param = arg;
1264 
1265 	if (param->ifbrp_hellotime == 0)
1266 		return EINVAL;
1267 	sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8;
1268 
1269 	if (sc->sc_if.if_flags & IFF_RUNNING)
1270 		bstp_initialization(sc);
1271 
1272 	return 0;
1273 }
1274 
1275 static int
bridge_ioctl_gfd(struct bridge_softc * sc,void * arg)1276 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1277 {
1278 	struct ifbrparam *param = arg;
1279 
1280 	param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8;
1281 
1282 	return 0;
1283 }
1284 
1285 static int
bridge_ioctl_sfd(struct bridge_softc * sc,void * arg)1286 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1287 {
1288 	struct ifbrparam *param = arg;
1289 
1290 	if (param->ifbrp_fwddelay == 0)
1291 		return EINVAL;
1292 	sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1293 
1294 	if (sc->sc_if.if_flags & IFF_RUNNING)
1295 		bstp_initialization(sc);
1296 
1297 	return 0;
1298 }
1299 
1300 static int
bridge_ioctl_gma(struct bridge_softc * sc,void * arg)1301 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1302 {
1303 	struct ifbrparam *param = arg;
1304 
1305 	param->ifbrp_maxage = sc->sc_bridge_max_age >> 8;
1306 
1307 	return 0;
1308 }
1309 
1310 static int
bridge_ioctl_sma(struct bridge_softc * sc,void * arg)1311 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1312 {
1313 	struct ifbrparam *param = arg;
1314 
1315 	if (param->ifbrp_maxage == 0)
1316 		return EINVAL;
1317 	sc->sc_bridge_max_age = param->ifbrp_maxage << 8;
1318 
1319 	if (sc->sc_if.if_flags & IFF_RUNNING)
1320 		bstp_initialization(sc);
1321 
1322 	return 0;
1323 }
1324 
1325 static int
bridge_ioctl_sifprio(struct bridge_softc * sc,void * arg)1326 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1327 {
1328 	struct ifbreq *req = arg;
1329 	struct bridge_iflist *bif;
1330 	struct psref psref;
1331 
1332 	bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1333 	if (bif == NULL)
1334 		return ENOENT;
1335 
1336 	bif->bif_priority = req->ifbr_priority;
1337 
1338 	if (sc->sc_if.if_flags & IFF_RUNNING)
1339 		bstp_initialization(sc);
1340 
1341 	bridge_release_member(sc, bif, &psref);
1342 
1343 	return 0;
1344 }
1345 
1346 static int
bridge_ioctl_gfilt(struct bridge_softc * sc,void * arg)1347 bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg)
1348 {
1349 	struct ifbrparam *param = arg;
1350 
1351 	param->ifbrp_filter = sc->sc_filter_flags;
1352 
1353 	return 0;
1354 }
1355 
1356 static int
bridge_ioctl_sfilt(struct bridge_softc * sc,void * arg)1357 bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg)
1358 {
1359 	struct ifbrparam *param = arg;
1360 	uint32_t nflags, oflags;
1361 
1362 	if (param->ifbrp_filter & ~IFBF_FILT_MASK)
1363 		return EINVAL;
1364 
1365 	nflags = param->ifbrp_filter;
1366 	oflags = sc->sc_filter_flags;
1367 
1368 	if ((nflags & IFBF_FILT_USEIPF) && !(oflags & IFBF_FILT_USEIPF)) {
1369 		pfil_add_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1370 			sc->sc_if.if_pfil);
1371 	}
1372 	if (!(nflags & IFBF_FILT_USEIPF) && (oflags & IFBF_FILT_USEIPF)) {
1373 		pfil_remove_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT,
1374 			sc->sc_if.if_pfil);
1375 	}
1376 
1377 	sc->sc_filter_flags = nflags;
1378 
1379 	return 0;
1380 }
1381 
1382 static int
bridge_ioctl_sifcost(struct bridge_softc * sc,void * arg)1383 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1384 {
1385 	struct ifbreq *req = arg;
1386 	struct bridge_iflist *bif;
1387 	struct psref psref;
1388 
1389 	bif = bridge_lookup_member(sc, req->ifbr_ifsname, &psref);
1390 	if (bif == NULL)
1391 		return ENOENT;
1392 
1393 	bif->bif_path_cost = req->ifbr_path_cost;
1394 
1395 	if (sc->sc_if.if_flags & IFF_RUNNING)
1396 		bstp_initialization(sc);
1397 
1398 	bridge_release_member(sc, bif, &psref);
1399 
1400 	return 0;
1401 }
1402 
1403 /*
1404  * bridge_ifdetach:
1405  *
1406  *	Detach an interface from a bridge.  Called when a member
1407  *	interface is detaching.
1408  */
1409 static void
bridge_ifdetach(void * xifs)1410 bridge_ifdetach(void *xifs)
1411 {
1412 	struct ifnet *ifs;
1413 	struct bridge_softc *sc;
1414 	struct ifbreq breq;
1415 
1416 	ifs = (struct ifnet *)xifs;
1417 	sc = ifs->if_bridge;
1418 
1419 	/* ioctl_lock should prevent this from happening */
1420 	KASSERT(sc != NULL);
1421 
1422 	memset(&breq, 0, sizeof(breq));
1423 	strlcpy(breq.ifbr_ifsname, ifs->if_xname, sizeof(breq.ifbr_ifsname));
1424 
1425 	(void) bridge_ioctl_del(sc, &breq);
1426 }
1427 
1428 /*
1429  * bridge_init:
1430  *
1431  *	Initialize a bridge interface.
1432  */
1433 static int
bridge_init(struct ifnet * ifp)1434 bridge_init(struct ifnet *ifp)
1435 {
1436 	struct bridge_softc *sc = ifp->if_softc;
1437 
1438 	KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
1439 
1440 	callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1441 	    bridge_timer, sc);
1442 	bstp_initialization(sc);
1443 
1444 	ifp->if_flags |= IFF_RUNNING;
1445 	return 0;
1446 }
1447 
1448 /*
1449  * bridge_stop:
1450  *
1451  *	Stop the bridge interface.
1452  */
1453 static void
bridge_stop(struct ifnet * ifp,int disable)1454 bridge_stop(struct ifnet *ifp, int disable)
1455 {
1456 	struct bridge_softc *sc = ifp->if_softc;
1457 
1458 	KASSERT((ifp->if_flags & IFF_RUNNING) != 0);
1459 	ifp->if_flags &= ~IFF_RUNNING;
1460 
1461 	callout_halt(&sc->sc_brcallout, NULL);
1462 	workqueue_wait(sc->sc_rtage_wq, &sc->sc_rtage_wk);
1463 	bstp_stop(sc);
1464 	bridge_rtflush(sc, IFBF_FLUSHDYN);
1465 }
1466 
1467 /*
1468  * bridge_enqueue:
1469  *
1470  *	Enqueue a packet on a bridge member interface.
1471  */
1472 void
bridge_enqueue(struct bridge_softc * sc,struct ifnet * dst_ifp,struct mbuf * m,int runfilt)1473 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
1474     int runfilt)
1475 {
1476 	int len, error;
1477 	short mflags;
1478 
1479 	if (runfilt) {
1480 		if (pfil_run_hooks(sc->sc_if.if_pfil, &m,
1481 		    dst_ifp, PFIL_OUT) != 0) {
1482 			if (m != NULL)
1483 				m_freem(m);
1484 			return;
1485 		}
1486 		if (m == NULL)
1487 			return;
1488 	}
1489 
1490 #ifdef ALTQ
1491 	KERNEL_LOCK(1, NULL);
1492 	/*
1493 	 * If ALTQ is enabled on the member interface, do
1494 	 * classification; the queueing discipline might
1495 	 * not require classification, but might require
1496 	 * the address family/header pointer in the pktattr.
1497 	 */
1498 	if (ALTQ_IS_ENABLED(&dst_ifp->if_snd)) {
1499 		/* XXX IFT_ETHER */
1500 		altq_etherclassify(&dst_ifp->if_snd, m);
1501 	}
1502 	KERNEL_UNLOCK_ONE(NULL);
1503 #endif /* ALTQ */
1504 
1505 	if (vlan_has_tag(m) &&
1506 	    !vlan_is_hwtag_enabled(dst_ifp)) {
1507 		(void)ether_inject_vlantag(&m, ETHERTYPE_VLAN,
1508 		    vlan_get_tag(m));
1509 		if (m == NULL) {
1510 			if_statinc(&sc->sc_if, if_oerrors);
1511 			return;
1512 		}
1513 	}
1514 
1515 	len = m->m_pkthdr.len;
1516 	mflags = m->m_flags;
1517 
1518 	error = if_transmit_lock(dst_ifp, m);
1519 	if (error) {
1520 		/* mbuf is already freed */
1521 		if_statinc(&sc->sc_if, if_oerrors);
1522 		return;
1523 	}
1524 
1525 	net_stat_ref_t nsr = IF_STAT_GETREF(&sc->sc_if);
1526 	if_statinc_ref(nsr, if_opackets);
1527 	if_statadd_ref(nsr, if_obytes, len);
1528 	if (mflags & M_MCAST)
1529 		if_statinc_ref(nsr, if_omcasts);
1530 	IF_STAT_PUTREF(&sc->sc_if);
1531 }
1532 
1533 /*
1534  * bridge_output:
1535  *
1536  *	Send output from a bridge member interface.  This
1537  *	performs the bridging function for locally originated
1538  *	packets.
1539  *
1540  *	The mbuf has the Ethernet header already attached.  We must
1541  *	enqueue or free the mbuf before returning.
1542  */
1543 int
bridge_output(struct ifnet * ifp,struct mbuf * m,const struct sockaddr * sa,const struct rtentry * rt)1544 bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa,
1545     const struct rtentry *rt)
1546 {
1547 	struct ether_header *eh;
1548 	struct ifnet *dst_if;
1549 	struct bridge_softc *sc;
1550 	struct mbuf *n;
1551 	int s;
1552 
1553 	/*
1554 	 * bridge_output() is called from ether_output(), furthermore
1555 	 * ifp argument doesn't point to bridge(4). So, don't assert
1556 	 * IFEF_MPSAFE here.
1557 	 */
1558 
1559 	KASSERT(m->m_len >= ETHER_HDR_LEN);
1560 
1561 	eh = mtod(m, struct ether_header *);
1562 	sc = ifp->if_bridge;
1563 
1564 	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
1565 		if (memcmp(etherbroadcastaddr,
1566 		    eh->ether_dhost, ETHER_ADDR_LEN) == 0)
1567 			m->m_flags |= M_BCAST;
1568 		else
1569 			m->m_flags |= M_MCAST;
1570 	}
1571 
1572 	/*
1573 	 * If bridge is down, but the original output interface is up,
1574 	 * go ahead and send out that interface.  Otherwise, the packet
1575 	 * is dropped below.
1576 	 */
1577 	if (__predict_false(sc == NULL) ||
1578 	    (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1579 		dst_if = ifp;
1580 		goto unicast_asis;
1581 	}
1582 
1583 	/*
1584 	 * If the packet is a multicast, or we don't know a better way to
1585 	 * get there, send to all interfaces.
1586 	 */
1587 	if ((m->m_flags & (M_MCAST | M_BCAST)) != 0)
1588 		dst_if = NULL;
1589 	else
1590 		dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1591 
1592 	/*
1593 	 * In general, we need to handle TX offload in software before
1594 	 * enqueueing a packet. However, we can send it as is in the
1595 	 * cases of unicast via (1) the source interface, or (2) an
1596 	 * interface which supports the specified offload options.
1597 	 * For multicast or broadcast, send it as is only if (3) all
1598 	 * the member interfaces support the specified options.
1599 	 */
1600 
1601 	/*
1602 	 * Unicast via the source interface.
1603 	 */
1604 	if (dst_if == ifp)
1605 		goto unicast_asis;
1606 
1607 	/*
1608 	 * Unicast via other interface.
1609 	 */
1610 	if (dst_if != NULL) {
1611 		KASSERT(m->m_flags & M_PKTHDR);
1612 		if (TX_OFFLOAD_SUPPORTED(dst_if->if_csum_flags_tx,
1613 		    m->m_pkthdr.csum_flags)) {
1614 			/*
1615 			 * Unicast via an interface which supports the
1616 			 * specified offload options.
1617 			 */
1618 			goto unicast_asis;
1619 		}
1620 
1621 		/*
1622 		 * Handle TX offload in software. For TSO, a packet is
1623 		 * split into multiple chunks. Thus, the return value of
1624 		 * ether_sw_offload_tx() is mbuf queue consists of them.
1625 		 */
1626 		m = ether_sw_offload_tx(ifp, m);
1627 		if (m == NULL)
1628 			return 0;
1629 
1630 		do {
1631 			n = m->m_nextpkt;
1632 			if ((dst_if->if_flags & IFF_RUNNING) == 0)
1633 				m_freem(m);
1634 			else
1635 				bridge_enqueue(sc, dst_if, m, 0);
1636 			m = n;
1637 		} while (m != NULL);
1638 
1639 		return 0;
1640 	}
1641 
1642 	/*
1643 	 * Multicast or broadcast.
1644 	 */
1645 	if (TX_OFFLOAD_SUPPORTED(sc->sc_csum_flags_tx,
1646 	    m->m_pkthdr.csum_flags)) {
1647 		/*
1648 		 * Specified TX offload options are supported by all
1649 		 * the member interfaces of this bridge.
1650 		 */
1651 		m->m_nextpkt = NULL;	/* XXX */
1652 	} else {
1653 		/*
1654 		 * Otherwise, handle TX offload in software.
1655 		 */
1656 		m = ether_sw_offload_tx(ifp, m);
1657 		if (m == NULL)
1658 			return 0;
1659 	}
1660 
1661 	do {
1662 		/* XXX Should call bridge_broadcast, but there are locking
1663 		 * issues which need resolving first. */
1664 		struct bridge_iflist *bif;
1665 		struct mbuf *mc;
1666 		bool used = false;
1667 
1668 		n = m->m_nextpkt;
1669 
1670 		BRIDGE_PSZ_RENTER(s);
1671 		BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
1672 			struct psref psref;
1673 
1674 			bridge_acquire_member(sc, bif, &psref);
1675 			BRIDGE_PSZ_REXIT(s);
1676 
1677 			dst_if = bif->bif_ifp;
1678 			if ((dst_if->if_flags & IFF_RUNNING) == 0)
1679 				goto next;
1680 
1681 			/*
1682 			 * If this is not the original output interface,
1683 			 * and the interface is participating in spanning
1684 			 * tree, make sure the port is in a state that
1685 			 * allows forwarding.
1686 			 */
1687 			if (dst_if != ifp &&
1688 			    (bif->bif_flags & IFBIF_STP) != 0) {
1689 				switch (bif->bif_state) {
1690 				case BSTP_IFSTATE_BLOCKING:
1691 				case BSTP_IFSTATE_LISTENING:
1692 				case BSTP_IFSTATE_DISABLED:
1693 					goto next;
1694 				}
1695 			}
1696 
1697 			if (PSLIST_READER_NEXT(bif, struct bridge_iflist,
1698 			    bif_next) == NULL &&
1699 			    ((m->m_flags & (M_MCAST | M_BCAST)) == 0 ||
1700 			    dst_if == ifp))
1701 			{
1702 				used = true;
1703 				mc = m;
1704 			} else {
1705 				mc = m_copypacket(m, M_DONTWAIT);
1706 				if (mc == NULL) {
1707 					if_statinc(&sc->sc_if, if_oerrors);
1708 					goto next;
1709 				}
1710 			}
1711 
1712 			bridge_enqueue(sc, dst_if, mc, 0);
1713 
1714 			if ((m->m_flags & (M_MCAST | M_BCAST)) != 0 &&
1715 			    dst_if != ifp)
1716 			{
1717 				if (PSLIST_READER_NEXT(bif,
1718 				    struct bridge_iflist, bif_next) == NULL)
1719 				{
1720 					used = true;
1721 					mc = m;
1722 				} else {
1723 					mc = m_copypacket(m, M_DONTWAIT);
1724 					if (mc == NULL) {
1725 						if_statinc(&sc->sc_if,
1726 						    if_oerrors);
1727 						goto next;
1728 					}
1729 				}
1730 
1731 				m_set_rcvif(mc, dst_if);
1732 				mc->m_flags &= ~M_PROMISC;
1733 
1734 				const int _s = splsoftnet();
1735 				KERNEL_LOCK_UNLESS_IFP_MPSAFE(dst_if);
1736 				ether_input(dst_if, mc);
1737 				KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(dst_if);
1738 				splx(_s);
1739 			}
1740 
1741 next:
1742 			BRIDGE_PSZ_RENTER(s);
1743 			bridge_release_member(sc, bif, &psref);
1744 
1745 			/* Guarantee we don't re-enter the loop as we already
1746 			 * decided we're at the end. */
1747 			if (used)
1748 				break;
1749 		}
1750 		BRIDGE_PSZ_REXIT(s);
1751 
1752 		if (!used)
1753 			m_freem(m);
1754 
1755 		m = n;
1756 	} while (m != NULL);
1757 	return 0;
1758 
1759 unicast_asis:
1760 	/*
1761 	 * XXX Spanning tree consideration here?
1762 	 */
1763 	if ((dst_if->if_flags & IFF_RUNNING) == 0)
1764 		m_freem(m);
1765 	else
1766 		bridge_enqueue(sc, dst_if, m, 0);
1767 	return 0;
1768 }
1769 
1770 /*
1771  * bridge_start:
1772  *
1773  *	Start output on a bridge.
1774  *
1775  *	NOTE: This routine should never be called in this implementation.
1776  */
1777 static void
bridge_start(struct ifnet * ifp)1778 bridge_start(struct ifnet *ifp)
1779 {
1780 
1781 	printf("%s: bridge_start() called\n", ifp->if_xname);
1782 }
1783 
1784 /*
1785  * bridge_forward:
1786  *
1787  *	The forwarding function of the bridge.
1788  */
1789 static void
bridge_forward(struct bridge_softc * sc,struct mbuf * m)1790 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
1791 {
1792 	struct bridge_iflist *bif;
1793 	struct ifnet *src_if, *dst_if;
1794 	struct ether_header *eh;
1795 	struct psref psref;
1796 	struct psref psref_src;
1797 	DECLARE_LOCK_VARIABLE;
1798 
1799 	if ((sc->sc_if.if_flags & IFF_RUNNING) == 0)
1800 		return;
1801 
1802 	src_if = m_get_rcvif_psref(m, &psref_src);
1803 	if (src_if == NULL) {
1804 		/* Interface is being destroyed? */
1805 		m_freem(m);
1806 		goto out;
1807 	}
1808 
1809 	if_statadd2(&sc->sc_if, if_ipackets, 1, if_ibytes, m->m_pkthdr.len);
1810 
1811 	/*
1812 	 * Look up the bridge_iflist.
1813 	 */
1814 	bif = bridge_lookup_member_if(sc, src_if, &psref);
1815 	if (bif == NULL) {
1816 		/* Interface is not a bridge member (anymore?) */
1817 		m_freem(m);
1818 		goto out;
1819 	}
1820 
1821 	if (bif->bif_flags & IFBIF_STP) {
1822 		switch (bif->bif_state) {
1823 		case BSTP_IFSTATE_BLOCKING:
1824 		case BSTP_IFSTATE_LISTENING:
1825 		case BSTP_IFSTATE_DISABLED:
1826 			m_freem(m);
1827 			bridge_release_member(sc, bif, &psref);
1828 			goto out;
1829 		}
1830 	}
1831 
1832 	eh = mtod(m, struct ether_header *);
1833 
1834 	/*
1835 	 * If the interface is learning, and the source
1836 	 * address is valid and not multicast, record
1837 	 * the address.
1838 	 */
1839 	if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
1840 	    ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
1841 	    (eh->ether_shost[0] == 0 &&
1842 	     eh->ether_shost[1] == 0 &&
1843 	     eh->ether_shost[2] == 0 &&
1844 	     eh->ether_shost[3] == 0 &&
1845 	     eh->ether_shost[4] == 0 &&
1846 	     eh->ether_shost[5] == 0) == 0) {
1847 		(void) bridge_rtupdate(sc, eh->ether_shost,
1848 		    src_if, 0, IFBAF_DYNAMIC);
1849 	}
1850 
1851 	if ((bif->bif_flags & IFBIF_STP) != 0 &&
1852 	    bif->bif_state == BSTP_IFSTATE_LEARNING) {
1853 		m_freem(m);
1854 		bridge_release_member(sc, bif, &psref);
1855 		goto out;
1856 	}
1857 
1858 	bridge_release_member(sc, bif, &psref);
1859 
1860 	/*
1861 	 * At this point, the port either doesn't participate
1862 	 * in spanning tree or it is in the forwarding state.
1863 	 */
1864 
1865 	/*
1866 	 * If the packet is unicast, destined for someone on
1867 	 * "this" side of the bridge, drop it.
1868 	 */
1869 	if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1870 		dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1871 		if (src_if == dst_if) {
1872 			m_freem(m);
1873 			goto out;
1874 		}
1875 	} else {
1876 		/* ...forward it to all interfaces. */
1877 		if_statinc(&sc->sc_if, if_imcasts);
1878 		dst_if = NULL;
1879 	}
1880 
1881 	if (pfil_run_hooks(sc->sc_if.if_pfil, &m, src_if, PFIL_IN) != 0) {
1882 		if (m != NULL)
1883 			m_freem(m);
1884 		goto out;
1885 	}
1886 	if (m == NULL)
1887 		goto out;
1888 
1889 	if (dst_if == NULL) {
1890 		bridge_broadcast(sc, src_if, m);
1891 		goto out;
1892 	}
1893 
1894 	m_put_rcvif_psref(src_if, &psref_src);
1895 	src_if = NULL;
1896 
1897 	/*
1898 	 * At this point, we're dealing with a unicast frame
1899 	 * going to a different interface.
1900 	 */
1901 	if ((dst_if->if_flags & IFF_RUNNING) == 0) {
1902 		m_freem(m);
1903 		goto out;
1904 	}
1905 
1906 	bif = bridge_lookup_member_if(sc, dst_if, &psref);
1907 	if (bif == NULL) {
1908 		/* Not a member of the bridge (anymore?) */
1909 		m_freem(m);
1910 		goto out;
1911 	}
1912 
1913 	if (bif->bif_flags & IFBIF_STP) {
1914 		switch (bif->bif_state) {
1915 		case BSTP_IFSTATE_DISABLED:
1916 		case BSTP_IFSTATE_BLOCKING:
1917 			m_freem(m);
1918 			bridge_release_member(sc, bif, &psref);
1919 			goto out;
1920 		}
1921 	}
1922 
1923 	bridge_release_member(sc, bif, &psref);
1924 
1925 	/*
1926 	 * Before enqueueing this packet to the destination interface,
1927 	 * clear any in-bound checksum flags to prevent them from being
1928 	 * misused as out-bound flags.
1929 	 */
1930 	m->m_pkthdr.csum_flags = 0;
1931 
1932 	ACQUIRE_GLOBAL_LOCKS();
1933 	bridge_enqueue(sc, dst_if, m, 1);
1934 	RELEASE_GLOBAL_LOCKS();
1935 out:
1936 	if (src_if != NULL)
1937 		m_put_rcvif_psref(src_if, &psref_src);
1938 	return;
1939 }
1940 
1941 static bool
bstp_state_before_learning(struct bridge_iflist * bif)1942 bstp_state_before_learning(struct bridge_iflist *bif)
1943 {
1944 	if (bif->bif_flags & IFBIF_STP) {
1945 		switch (bif->bif_state) {
1946 		case BSTP_IFSTATE_BLOCKING:
1947 		case BSTP_IFSTATE_LISTENING:
1948 		case BSTP_IFSTATE_DISABLED:
1949 			return true;
1950 		}
1951 	}
1952 	return false;
1953 }
1954 
1955 static bool
bridge_ourether(struct bridge_iflist * bif,struct ether_header * eh,int src)1956 bridge_ourether(struct bridge_iflist *bif, struct ether_header *eh, int src)
1957 {
1958 	uint8_t *ether = src ? eh->ether_shost : eh->ether_dhost;
1959 
1960 	if (memcmp(CLLADDR(bif->bif_ifp->if_sadl), ether, ETHER_ADDR_LEN) == 0
1961 #if NCARP > 0
1962 	    || (bif->bif_ifp->if_carp &&
1963 	        carp_ourether(bif->bif_ifp->if_carp, eh, IFT_ETHER, src) != NULL)
1964 #endif /* NCARP > 0 */
1965 	    )
1966 		return true;
1967 
1968 	return false;
1969 }
1970 
1971 /*
1972  * bridge_input:
1973  *
1974  *	Receive input from a member interface.  Queue the packet for
1975  *	bridging if it is not for us.
1976  */
1977 static void
bridge_input(struct ifnet * ifp,struct mbuf * m)1978 bridge_input(struct ifnet *ifp, struct mbuf *m)
1979 {
1980 	struct bridge_softc *sc = ifp->if_bridge;
1981 	struct bridge_iflist *bif;
1982 	struct ether_header *eh;
1983 	struct psref psref;
1984 	int bound;
1985 	DECLARE_LOCK_VARIABLE;
1986 
1987 	KASSERT(!cpu_intr_p());
1988 
1989 	if (__predict_false(sc == NULL) ||
1990 	    (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
1991 		ACQUIRE_GLOBAL_LOCKS();
1992 		ether_input(ifp, m);
1993 		RELEASE_GLOBAL_LOCKS();
1994 		return;
1995 	}
1996 
1997 	bound = curlwp_bind();
1998 	bif = bridge_lookup_member_if(sc, ifp, &psref);
1999 	if (bif == NULL) {
2000 		curlwp_bindx(bound);
2001 		ACQUIRE_GLOBAL_LOCKS();
2002 		ether_input(ifp, m);
2003 		RELEASE_GLOBAL_LOCKS();
2004 		return;
2005 	}
2006 
2007 	eh = mtod(m, struct ether_header *);
2008 
2009 	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
2010 		if (memcmp(etherbroadcastaddr,
2011 		    eh->ether_dhost, ETHER_ADDR_LEN) == 0)
2012 			m->m_flags |= M_BCAST;
2013 		else
2014 			m->m_flags |= M_MCAST;
2015 	}
2016 
2017 	/*
2018 	 * A 'fast' path for packets addressed to interfaces that are
2019 	 * part of this bridge.
2020 	 */
2021 	if (!(m->m_flags & (M_BCAST|M_MCAST)) &&
2022 	    !bstp_state_before_learning(bif)) {
2023 		struct bridge_iflist *_bif;
2024 		struct ifnet *_ifp = NULL;
2025 		int s;
2026 		struct psref _psref;
2027 
2028 		BRIDGE_PSZ_RENTER(s);
2029 		BRIDGE_IFLIST_READER_FOREACH(_bif, sc) {
2030 			/* It is destined for us. */
2031 			if (bridge_ourether(_bif, eh, 0)) {
2032 				bridge_acquire_member(sc, _bif, &_psref);
2033 				BRIDGE_PSZ_REXIT(s);
2034 				if (_bif->bif_flags & IFBIF_LEARNING)
2035 					(void) bridge_rtupdate(sc,
2036 					    eh->ether_shost, ifp, 0, IFBAF_DYNAMIC);
2037 				m_set_rcvif(m, _bif->bif_ifp);
2038 				_ifp = _bif->bif_ifp;
2039 				bridge_release_member(sc, _bif, &_psref);
2040 				goto out;
2041 			}
2042 
2043 			/* We just received a packet that we sent out. */
2044 			if (bridge_ourether(_bif, eh, 1))
2045 				break;
2046 		}
2047 		BRIDGE_PSZ_REXIT(s);
2048 out:
2049 
2050 		if (_bif != NULL) {
2051 			bridge_release_member(sc, bif, &psref);
2052 			curlwp_bindx(bound);
2053 			if (_ifp != NULL) {
2054 				m->m_flags &= ~M_PROMISC;
2055 				ACQUIRE_GLOBAL_LOCKS();
2056 				ether_input(_ifp, m);
2057 				RELEASE_GLOBAL_LOCKS();
2058 			} else
2059 				m_freem(m);
2060 			return;
2061 		}
2062 	}
2063 
2064 	/* Tap off 802.1D packets; they do not get forwarded. */
2065 	if (bif->bif_flags & IFBIF_STP &&
2066 	    memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) {
2067 		bstp_input(sc, bif, m);
2068 		bridge_release_member(sc, bif, &psref);
2069 		curlwp_bindx(bound);
2070 		return;
2071 	}
2072 
2073 	/*
2074 	 * A normal switch would discard the packet here, but that's not what
2075 	 * we've done historically. This also prevents some obnoxious behaviour.
2076 	 */
2077 	if (bstp_state_before_learning(bif)) {
2078 		bridge_release_member(sc, bif, &psref);
2079 		curlwp_bindx(bound);
2080 		ACQUIRE_GLOBAL_LOCKS();
2081 		ether_input(ifp, m);
2082 		RELEASE_GLOBAL_LOCKS();
2083 		return;
2084 	}
2085 
2086 	bridge_release_member(sc, bif, &psref);
2087 
2088 	bridge_forward(sc, m);
2089 
2090 	curlwp_bindx(bound);
2091 }
2092 
2093 /*
2094  * bridge_broadcast:
2095  *
2096  *	Send a frame to all interfaces that are members of
2097  *	the bridge, except for the one on which the packet
2098  *	arrived.
2099  */
2100 static void
bridge_broadcast(struct bridge_softc * sc,struct ifnet * src_if,struct mbuf * m)2101 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2102     struct mbuf *m)
2103 {
2104 	struct bridge_iflist *bif;
2105 	struct mbuf *mc;
2106 	struct ifnet *dst_if;
2107 	bool bmcast;
2108 	int s;
2109 	DECLARE_LOCK_VARIABLE;
2110 
2111 	bmcast = m->m_flags & (M_BCAST|M_MCAST);
2112 
2113 	BRIDGE_PSZ_RENTER(s);
2114 	BRIDGE_IFLIST_READER_FOREACH(bif, sc) {
2115 		struct psref psref;
2116 
2117 		bridge_acquire_member(sc, bif, &psref);
2118 		BRIDGE_PSZ_REXIT(s);
2119 
2120 		dst_if = bif->bif_ifp;
2121 
2122 		if (bif->bif_flags & IFBIF_STP) {
2123 			switch (bif->bif_state) {
2124 			case BSTP_IFSTATE_BLOCKING:
2125 			case BSTP_IFSTATE_DISABLED:
2126 				goto next;
2127 			}
2128 		}
2129 
2130 		if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && !bmcast)
2131 			goto next;
2132 
2133 		if ((dst_if->if_flags & IFF_RUNNING) == 0)
2134 			goto next;
2135 
2136 		if (dst_if != src_if) {
2137 			mc = m_copypacket(m, M_DONTWAIT);
2138 			if (mc == NULL) {
2139 				if_statinc(&sc->sc_if, if_oerrors);
2140 				goto next;
2141 			}
2142 			/*
2143 			 * Before enqueueing this packet to the destination
2144 			 * interface, clear any in-bound checksum flags to
2145 			 * prevent them from being misused as out-bound flags.
2146 			 */
2147 			mc->m_pkthdr.csum_flags = 0;
2148 
2149 			ACQUIRE_GLOBAL_LOCKS();
2150 			bridge_enqueue(sc, dst_if, mc, 1);
2151 			RELEASE_GLOBAL_LOCKS();
2152 		}
2153 
2154 		if (bmcast) {
2155 			mc = m_copypacket(m, M_DONTWAIT);
2156 			if (mc == NULL) {
2157 				if_statinc(&sc->sc_if, if_oerrors);
2158 				goto next;
2159 			}
2160 			/*
2161 			 * Before enqueueing this packet to the destination
2162 			 * interface, clear any in-bound checksum flags to
2163 			 * prevent them from being misused as out-bound flags.
2164 			 */
2165 			mc->m_pkthdr.csum_flags = 0;
2166 
2167 			m_set_rcvif(mc, dst_if);
2168 			mc->m_flags &= ~M_PROMISC;
2169 
2170 			ACQUIRE_GLOBAL_LOCKS();
2171 			ether_input(dst_if, mc);
2172 			RELEASE_GLOBAL_LOCKS();
2173 		}
2174 next:
2175 		BRIDGE_PSZ_RENTER(s);
2176 		bridge_release_member(sc, bif, &psref);
2177 	}
2178 	BRIDGE_PSZ_REXIT(s);
2179 
2180 	m_freem(m);
2181 }
2182 
2183 static int
bridge_rtalloc(struct bridge_softc * sc,const uint8_t * dst,struct bridge_rtnode ** brtp)2184 bridge_rtalloc(struct bridge_softc *sc, const uint8_t *dst,
2185     struct bridge_rtnode **brtp)
2186 {
2187 	struct bridge_rtnode *brt;
2188 	int error;
2189 
2190 	if (sc->sc_brtcnt >= sc->sc_brtmax)
2191 		return ENOSPC;
2192 
2193 	/*
2194 	 * Allocate a new bridge forwarding node, and
2195 	 * initialize the expiration time and Ethernet
2196 	 * address.
2197 	 */
2198 	brt = pool_get(&bridge_rtnode_pool, PR_NOWAIT);
2199 	if (brt == NULL)
2200 		return ENOMEM;
2201 
2202 	memset(brt, 0, sizeof(*brt));
2203 	brt->brt_expire = time_uptime + sc->sc_brttimeout;
2204 	brt->brt_flags = IFBAF_DYNAMIC;
2205 	memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2206 	PSLIST_ENTRY_INIT(brt, brt_list);
2207 	PSLIST_ENTRY_INIT(brt, brt_hash);
2208 
2209 	BRIDGE_RT_LOCK(sc);
2210 	error = bridge_rtnode_insert(sc, brt);
2211 	BRIDGE_RT_UNLOCK(sc);
2212 
2213 	if (error != 0) {
2214 		pool_put(&bridge_rtnode_pool, brt);
2215 		return error;
2216 	}
2217 
2218 	*brtp = brt;
2219 	return 0;
2220 }
2221 
2222 /*
2223  * bridge_rtupdate:
2224  *
2225  *	Add a bridge routing entry.
2226  */
2227 static int
bridge_rtupdate(struct bridge_softc * sc,const uint8_t * dst,struct ifnet * dst_if,int setflags,uint8_t flags)2228 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2229     struct ifnet *dst_if, int setflags, uint8_t flags)
2230 {
2231 	struct bridge_rtnode *brt;
2232 	int s;
2233 
2234 again:
2235 	/*
2236 	 * A route for this destination might already exist.  If so,
2237 	 * update it, otherwise create a new one.
2238 	 */
2239 	BRIDGE_RT_RENTER(s);
2240 	brt = bridge_rtnode_lookup(sc, dst);
2241 
2242 	if (brt != NULL) {
2243 		brt->brt_ifp = dst_if;
2244 		if (setflags) {
2245 			brt->brt_flags = flags;
2246 			if (flags & IFBAF_STATIC)
2247 				brt->brt_expire = 0;
2248 			else
2249 				brt->brt_expire = time_uptime + sc->sc_brttimeout;
2250 		} else {
2251 			if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2252 				brt->brt_expire = time_uptime + sc->sc_brttimeout;
2253 		}
2254 	}
2255 	BRIDGE_RT_REXIT(s);
2256 
2257 	if (brt == NULL) {
2258 		int r;
2259 
2260 		r = bridge_rtalloc(sc, dst, &brt);
2261 		if (r != 0)
2262 			return r;
2263 		goto again;
2264 	}
2265 
2266 	return 0;
2267 }
2268 
2269 /*
2270  * bridge_rtlookup:
2271  *
2272  *	Lookup the destination interface for an address.
2273  */
2274 static struct ifnet *
bridge_rtlookup(struct bridge_softc * sc,const uint8_t * addr)2275 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2276 {
2277 	struct bridge_rtnode *brt;
2278 	struct ifnet *ifs = NULL;
2279 	int s;
2280 
2281 	BRIDGE_RT_RENTER(s);
2282 	brt = bridge_rtnode_lookup(sc, addr);
2283 	if (brt != NULL)
2284 		ifs = brt->brt_ifp;
2285 	BRIDGE_RT_REXIT(s);
2286 
2287 	return ifs;
2288 }
2289 
2290 typedef bool (*bridge_iterate_cb_t)
2291     (struct bridge_softc *, struct bridge_rtnode *, bool *, void *);
2292 
2293 /*
2294  * bridge_rtlist_iterate_remove:
2295  *
2296  *	It iterates on sc->sc_rtlist and removes rtnodes of it which func
2297  *	callback judges to remove. Removals of rtnodes are done in a manner
2298  *	of pserialize. To this end, all kmem_* operations are placed out of
2299  *	mutexes.
2300  */
2301 static void
bridge_rtlist_iterate_remove(struct bridge_softc * sc,bridge_iterate_cb_t func,void * arg)2302 bridge_rtlist_iterate_remove(struct bridge_softc *sc, bridge_iterate_cb_t func, void *arg)
2303 {
2304 	struct bridge_rtnode *brt;
2305 	struct bridge_rtnode **brt_list;
2306 	int i, count;
2307 
2308 retry:
2309 	count = sc->sc_brtcnt;
2310 	if (count == 0)
2311 		return;
2312 	brt_list = kmem_alloc(sizeof(*brt_list) * count, KM_SLEEP);
2313 
2314 	BRIDGE_RT_LOCK(sc);
2315 	if (__predict_false(sc->sc_brtcnt > count)) {
2316 		/* The rtnodes increased, we need more memory */
2317 		BRIDGE_RT_UNLOCK(sc);
2318 		kmem_free(brt_list, sizeof(*brt_list) * count);
2319 		goto retry;
2320 	}
2321 
2322 	i = 0;
2323 	/*
2324 	 * We don't need to use a _SAFE variant here because we know
2325 	 * that a removed item keeps its next pointer as-is thanks to
2326 	 * pslist(9) and isn't freed in the loop.
2327 	 */
2328 	BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
2329 		bool need_break = false;
2330 		if (func(sc, brt, &need_break, arg)) {
2331 			bridge_rtnode_remove(sc, brt);
2332 			brt_list[i++] = brt;
2333 		}
2334 		if (need_break)
2335 			break;
2336 	}
2337 
2338 	if (i > 0)
2339 		BRIDGE_RT_PSZ_PERFORM(sc);
2340 	BRIDGE_RT_UNLOCK(sc);
2341 
2342 	while (--i >= 0)
2343 		bridge_rtnode_destroy(brt_list[i]);
2344 
2345 	kmem_free(brt_list, sizeof(*brt_list) * count);
2346 }
2347 
2348 static bool
bridge_rttrim0_cb(struct bridge_softc * sc,struct bridge_rtnode * brt,bool * need_break,void * arg)2349 bridge_rttrim0_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2350     bool *need_break, void *arg)
2351 {
2352 	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2353 		/* Take into account of the subsequent removal */
2354 		if ((sc->sc_brtcnt - 1) <= sc->sc_brtmax)
2355 			*need_break = true;
2356 		return true;
2357 	} else
2358 		return false;
2359 }
2360 
2361 static void
bridge_rttrim0(struct bridge_softc * sc)2362 bridge_rttrim0(struct bridge_softc *sc)
2363 {
2364 	bridge_rtlist_iterate_remove(sc, bridge_rttrim0_cb, NULL);
2365 }
2366 
2367 /*
2368  * bridge_rttrim:
2369  *
2370  *	Trim the routine table so that we have a number
2371  *	of routing entries less than or equal to the
2372  *	maximum number.
2373  */
2374 static void
bridge_rttrim(struct bridge_softc * sc)2375 bridge_rttrim(struct bridge_softc *sc)
2376 {
2377 
2378 	/* Make sure we actually need to do this. */
2379 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2380 		return;
2381 
2382 	/* Force an aging cycle; this might trim enough addresses. */
2383 	bridge_rtage(sc);
2384 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2385 		return;
2386 
2387 	bridge_rttrim0(sc);
2388 
2389 	return;
2390 }
2391 
2392 /*
2393  * bridge_timer:
2394  *
2395  *	Aging timer for the bridge.
2396  */
2397 static void
bridge_timer(void * arg)2398 bridge_timer(void *arg)
2399 {
2400 	struct bridge_softc *sc = arg;
2401 
2402 	workqueue_enqueue(sc->sc_rtage_wq, &sc->sc_rtage_wk, NULL);
2403 }
2404 
2405 static void
bridge_rtage_work(struct work * wk,void * arg)2406 bridge_rtage_work(struct work *wk, void *arg)
2407 {
2408 	struct bridge_softc *sc = arg;
2409 
2410 	KASSERT(wk == &sc->sc_rtage_wk);
2411 
2412 	bridge_rtage(sc);
2413 
2414 	if (sc->sc_if.if_flags & IFF_RUNNING)
2415 		callout_reset(&sc->sc_brcallout,
2416 		    bridge_rtable_prune_period * hz, bridge_timer, sc);
2417 }
2418 
2419 static bool
bridge_rtage_cb(struct bridge_softc * sc,struct bridge_rtnode * brt,bool * need_break,void * arg)2420 bridge_rtage_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2421     bool *need_break, void *arg)
2422 {
2423 	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2424 	    time_uptime >= brt->brt_expire)
2425 		return true;
2426 	else
2427 		return false;
2428 }
2429 
2430 /*
2431  * bridge_rtage:
2432  *
2433  *	Perform an aging cycle.
2434  */
2435 static void
bridge_rtage(struct bridge_softc * sc)2436 bridge_rtage(struct bridge_softc *sc)
2437 {
2438 	bridge_rtlist_iterate_remove(sc, bridge_rtage_cb, NULL);
2439 }
2440 
2441 
2442 static bool
bridge_rtflush_cb(struct bridge_softc * sc,struct bridge_rtnode * brt,bool * need_break,void * arg)2443 bridge_rtflush_cb(struct bridge_softc *sc, struct bridge_rtnode *brt,
2444     bool *need_break, void *arg)
2445 {
2446 	int full = *(int*)arg;
2447 
2448 	if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2449 		return true;
2450 	else
2451 		return false;
2452 }
2453 
2454 /*
2455  * bridge_rtflush:
2456  *
2457  *	Remove all dynamic addresses from the bridge.
2458  */
2459 static void
bridge_rtflush(struct bridge_softc * sc,int full)2460 bridge_rtflush(struct bridge_softc *sc, int full)
2461 {
2462 	bridge_rtlist_iterate_remove(sc, bridge_rtflush_cb, &full);
2463 }
2464 
2465 /*
2466  * bridge_rtdaddr:
2467  *
2468  *	Remove an address from the table.
2469  */
2470 static int
bridge_rtdaddr(struct bridge_softc * sc,const uint8_t * addr)2471 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2472 {
2473 	struct bridge_rtnode *brt;
2474 
2475 	BRIDGE_RT_LOCK(sc);
2476 	if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL) {
2477 		BRIDGE_RT_UNLOCK(sc);
2478 		return ENOENT;
2479 	}
2480 	bridge_rtnode_remove(sc, brt);
2481 	BRIDGE_RT_PSZ_PERFORM(sc);
2482 	BRIDGE_RT_UNLOCK(sc);
2483 
2484 	bridge_rtnode_destroy(brt);
2485 
2486 	return 0;
2487 }
2488 
2489 /*
2490  * bridge_rtdelete:
2491  *
2492  *	Delete routes to a speicifc member interface.
2493  */
2494 static void
bridge_rtdelete(struct bridge_softc * sc,struct ifnet * ifp)2495 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp)
2496 {
2497 	struct bridge_rtnode *brt;
2498 
2499 	/* XXX pserialize_perform for each entry is slow */
2500 again:
2501 	BRIDGE_RT_LOCK(sc);
2502 	BRIDGE_RTLIST_WRITER_FOREACH(brt, sc) {
2503 		if (brt->brt_ifp == ifp)
2504 			break;
2505 	}
2506 	if (brt == NULL) {
2507 		BRIDGE_RT_UNLOCK(sc);
2508 		return;
2509 	}
2510 	bridge_rtnode_remove(sc, brt);
2511 	BRIDGE_RT_PSZ_PERFORM(sc);
2512 	BRIDGE_RT_UNLOCK(sc);
2513 
2514 	bridge_rtnode_destroy(brt);
2515 
2516 	goto again;
2517 }
2518 
2519 /*
2520  * bridge_rtable_init:
2521  *
2522  *	Initialize the route table for this bridge.
2523  */
2524 static void
bridge_rtable_init(struct bridge_softc * sc)2525 bridge_rtable_init(struct bridge_softc *sc)
2526 {
2527 	int i;
2528 
2529 	sc->sc_rthash = kmem_alloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2530 	    KM_SLEEP);
2531 
2532 	for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2533 		PSLIST_INIT(&sc->sc_rthash[i]);
2534 
2535 	sc->sc_rthash_key = cprng_fast32();
2536 
2537 	PSLIST_INIT(&sc->sc_rtlist);
2538 
2539 	sc->sc_rtlist_psz = pserialize_create();
2540 	sc->sc_rtlist_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
2541 }
2542 
2543 /*
2544  * bridge_rtable_fini:
2545  *
2546  *	Deconstruct the route table for this bridge.
2547  */
2548 static void
bridge_rtable_fini(struct bridge_softc * sc)2549 bridge_rtable_fini(struct bridge_softc *sc)
2550 {
2551 
2552 	kmem_free(sc->sc_rthash, sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE);
2553 	mutex_obj_free(sc->sc_rtlist_lock);
2554 	pserialize_destroy(sc->sc_rtlist_psz);
2555 }
2556 
2557 /*
2558  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2559  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2560  */
2561 #define	mix(a, b, c)							\
2562 do {									\
2563 	a -= b; a -= c; a ^= (c >> 13);					\
2564 	b -= c; b -= a; b ^= (a << 8);					\
2565 	c -= a; c -= b; c ^= (b >> 13);					\
2566 	a -= b; a -= c; a ^= (c >> 12);					\
2567 	b -= c; b -= a; b ^= (a << 16);					\
2568 	c -= a; c -= b; c ^= (b >> 5);					\
2569 	a -= b; a -= c; a ^= (c >> 3);					\
2570 	b -= c; b -= a; b ^= (a << 10);					\
2571 	c -= a; c -= b; c ^= (b >> 15);					\
2572 } while (/*CONSTCOND*/0)
2573 
2574 static inline uint32_t
bridge_rthash(struct bridge_softc * sc,const uint8_t * addr)2575 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2576 {
2577 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2578 
2579 	b += addr[5] << 8;
2580 	b += addr[4];
2581 	a += (uint32_t)addr[3] << 24;
2582 	a += addr[2] << 16;
2583 	a += addr[1] << 8;
2584 	a += addr[0];
2585 
2586 	mix(a, b, c);
2587 
2588 	return (c & BRIDGE_RTHASH_MASK);
2589 }
2590 
2591 #undef mix
2592 
2593 /*
2594  * bridge_rtnode_lookup:
2595  *
2596  *	Look up a bridge route node for the specified destination.
2597  */
2598 static struct bridge_rtnode *
bridge_rtnode_lookup(struct bridge_softc * sc,const uint8_t * addr)2599 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
2600 {
2601 	struct bridge_rtnode *brt;
2602 	uint32_t hash;
2603 	int dir;
2604 
2605 	hash = bridge_rthash(sc, addr);
2606 	BRIDGE_RTHASH_READER_FOREACH(brt, sc, hash) {
2607 		dir = memcmp(addr, brt->brt_addr, ETHER_ADDR_LEN);
2608 		if (dir == 0)
2609 			return brt;
2610 		if (dir > 0)
2611 			return NULL;
2612 	}
2613 
2614 	return NULL;
2615 }
2616 
2617 /*
2618  * bridge_rtnode_insert:
2619  *
2620  *	Insert the specified bridge node into the route table.  We
2621  *	assume the entry is not already in the table.
2622  */
2623 static int
bridge_rtnode_insert(struct bridge_softc * sc,struct bridge_rtnode * brt)2624 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
2625 {
2626 	struct bridge_rtnode *lbrt, *prev = NULL;
2627 	uint32_t hash;
2628 
2629 	KASSERT(BRIDGE_RT_LOCKED(sc));
2630 
2631 	hash = bridge_rthash(sc, brt->brt_addr);
2632 	BRIDGE_RTHASH_WRITER_FOREACH(lbrt, sc, hash) {
2633 		int dir = memcmp(brt->brt_addr, lbrt->brt_addr, ETHER_ADDR_LEN);
2634 		if (dir == 0)
2635 			return EEXIST;
2636 		if (dir > 0)
2637 			break;
2638 		prev = lbrt;
2639 	}
2640 	if (prev == NULL)
2641 		BRIDGE_RTHASH_WRITER_INSERT_HEAD(sc, hash, brt);
2642 	else
2643 		BRIDGE_RTHASH_WRITER_INSERT_AFTER(prev, brt);
2644 
2645 	BRIDGE_RTLIST_WRITER_INSERT_HEAD(sc, brt);
2646 	sc->sc_brtcnt++;
2647 
2648 	return 0;
2649 }
2650 
2651 /*
2652  * bridge_rtnode_remove:
2653  *
2654  *	Remove a bridge rtnode from the rthash and the rtlist of a bridge.
2655  */
2656 static void
bridge_rtnode_remove(struct bridge_softc * sc,struct bridge_rtnode * brt)2657 bridge_rtnode_remove(struct bridge_softc *sc, struct bridge_rtnode *brt)
2658 {
2659 
2660 	KASSERT(BRIDGE_RT_LOCKED(sc));
2661 
2662 	BRIDGE_RTHASH_WRITER_REMOVE(brt);
2663 	BRIDGE_RTLIST_WRITER_REMOVE(brt);
2664 	sc->sc_brtcnt--;
2665 }
2666 
2667 /*
2668  * bridge_rtnode_destroy:
2669  *
2670  *	Destroy a bridge rtnode.
2671  */
2672 static void
bridge_rtnode_destroy(struct bridge_rtnode * brt)2673 bridge_rtnode_destroy(struct bridge_rtnode *brt)
2674 {
2675 
2676 	PSLIST_ENTRY_DESTROY(brt, brt_list);
2677 	PSLIST_ENTRY_DESTROY(brt, brt_hash);
2678 	pool_put(&bridge_rtnode_pool, brt);
2679 }
2680 
2681 extern pfil_head_t *inet_pfil_hook;                 /* XXX */
2682 extern pfil_head_t *inet6_pfil_hook;                /* XXX */
2683 
2684 /*
2685  * Send bridge packets through IPF if they are one of the types IPF can deal
2686  * with, or if they are ARP or REVARP.  (IPF will pass ARP and REVARP without
2687  * question.)
2688  */
2689 static int
bridge_ipf(void * arg,struct mbuf ** mp,struct ifnet * ifp,int dir)2690 bridge_ipf(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir)
2691 {
2692 	int snap, error;
2693 	struct ether_header *eh1, eh2;
2694 	struct llc llc1;
2695 	uint16_t ether_type;
2696 
2697 	snap = 0;
2698 	error = -1;	/* Default error if not error == 0 */
2699 	eh1 = mtod(*mp, struct ether_header *);
2700 	ether_type = ntohs(eh1->ether_type);
2701 
2702 	/*
2703 	 * Check for SNAP/LLC.
2704 	 */
2705 	if (ether_type < ETHERMTU) {
2706 		struct llc *llc2 = (struct llc *)(eh1 + 1);
2707 
2708 		if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
2709 		    llc2->llc_dsap == LLC_SNAP_LSAP &&
2710 		    llc2->llc_ssap == LLC_SNAP_LSAP &&
2711 		    llc2->llc_control == LLC_UI) {
2712 			ether_type = htons(llc2->llc_un.type_snap.ether_type);
2713 			snap = 1;
2714 		}
2715 	}
2716 
2717 	/* drop VLAN traffic untagged by hardware offloading */
2718 	if (vlan_has_tag(*mp))
2719 		goto bad;
2720 
2721 	/*
2722 	 * If we're trying to filter bridge traffic, don't look at anything
2723 	 * other than IP and ARP traffic.  If the filter doesn't understand
2724 	 * IPv6, don't allow IPv6 through the bridge either.  This is lame
2725 	 * since if we really wanted, say, an AppleTalk filter, we are hosed,
2726 	 * but of course we don't have an AppleTalk filter to begin with.
2727 	 * (Note that since IPF doesn't understand ARP it will pass *ALL*
2728 	 * ARP traffic.)
2729 	 */
2730 	switch (ether_type) {
2731 		case ETHERTYPE_ARP:
2732 		case ETHERTYPE_REVARP:
2733 			return 0; /* Automatically pass */
2734 		case ETHERTYPE_IP:
2735 # ifdef INET6
2736 		case ETHERTYPE_IPV6:
2737 # endif /* INET6 */
2738 			break;
2739 		default:
2740 			goto bad;
2741 	}
2742 
2743 	/* Strip off the Ethernet header and keep a copy. */
2744 	m_copydata(*mp, 0, ETHER_HDR_LEN, (void *) &eh2);
2745 	m_adj(*mp, ETHER_HDR_LEN);
2746 
2747 	/* Strip off snap header, if present */
2748 	if (snap) {
2749 		m_copydata(*mp, 0, sizeof(struct llc), (void *) &llc1);
2750 		m_adj(*mp, sizeof(struct llc));
2751 	}
2752 
2753 	/*
2754 	 * Check basic packet sanity and run IPF through pfil.
2755 	 */
2756 	KASSERT(!cpu_intr_p());
2757 	switch (ether_type)
2758 	{
2759 	case ETHERTYPE_IP :
2760 		error = bridge_ip_checkbasic(mp);
2761 		if (error == 0)
2762 			error = pfil_run_hooks(inet_pfil_hook, mp, ifp, dir);
2763 		break;
2764 # ifdef INET6
2765 	case ETHERTYPE_IPV6 :
2766 		error = bridge_ip6_checkbasic(mp);
2767 		if (error == 0)
2768 			error = pfil_run_hooks(inet6_pfil_hook, mp, ifp, dir);
2769 		break;
2770 # endif
2771 	default :
2772 		error = 0;
2773 		break;
2774 	}
2775 
2776 	if (*mp == NULL)
2777 		return error;
2778 	if (error != 0)
2779 		goto bad;
2780 
2781 	error = -1;
2782 
2783 	/*
2784 	 * Finally, put everything back the way it was and return
2785 	 */
2786 	if (snap) {
2787 		M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
2788 		if (*mp == NULL)
2789 			return error;
2790 		bcopy(&llc1, mtod(*mp, void *), sizeof(struct llc));
2791 	}
2792 
2793 	M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
2794 	if (*mp == NULL)
2795 		return error;
2796 	bcopy(&eh2, mtod(*mp, void *), ETHER_HDR_LEN);
2797 
2798 	return 0;
2799 
2800     bad:
2801 	m_freem(*mp);
2802 	*mp = NULL;
2803 	return error;
2804 }
2805 
2806 /*
2807  * Perform basic checks on header size since
2808  * IPF assumes ip_input has already processed
2809  * it for it.  Cut-and-pasted from ip_input.c.
2810  * Given how simple the IPv6 version is,
2811  * does the IPv4 version really need to be
2812  * this complicated?
2813  *
2814  * XXX Should we update ipstat here, or not?
2815  * XXX Right now we update ipstat but not
2816  * XXX csum_counter.
2817  */
2818 static int
bridge_ip_checkbasic(struct mbuf ** mp)2819 bridge_ip_checkbasic(struct mbuf **mp)
2820 {
2821 	struct mbuf *m = *mp;
2822 	struct ip *ip;
2823 	int len, hlen;
2824 
2825 	if (*mp == NULL)
2826 		return -1;
2827 
2828 	if (M_GET_ALIGNED_HDR(&m, struct ip, true) != 0) {
2829 		/* XXXJRT new stat, please */
2830 		ip_statinc(IP_STAT_TOOSMALL);
2831 		goto bad;
2832 	}
2833 	ip = mtod(m, struct ip *);
2834 	if (ip == NULL) goto bad;
2835 
2836 	if (ip->ip_v != IPVERSION) {
2837 		ip_statinc(IP_STAT_BADVERS);
2838 		goto bad;
2839 	}
2840 	hlen = ip->ip_hl << 2;
2841 	if (hlen < sizeof(struct ip)) { /* minimum header length */
2842 		ip_statinc(IP_STAT_BADHLEN);
2843 		goto bad;
2844 	}
2845 	if (hlen > m->m_len) {
2846 		if ((m = m_pullup(m, hlen)) == 0) {
2847 			ip_statinc(IP_STAT_BADHLEN);
2848 			goto bad;
2849 		}
2850 		ip = mtod(m, struct ip *);
2851 		if (ip == NULL) goto bad;
2852 	}
2853 
2854 	switch (m->m_pkthdr.csum_flags &
2855 	        ((m_get_rcvif_NOMPSAFE(m)->if_csum_flags_rx & M_CSUM_IPv4) |
2856 	         M_CSUM_IPv4_BAD)) {
2857 	case M_CSUM_IPv4|M_CSUM_IPv4_BAD:
2858 		/* INET_CSUM_COUNTER_INCR(&ip_hwcsum_bad); */
2859 		goto bad;
2860 
2861 	case M_CSUM_IPv4:
2862 		/* Checksum was okay. */
2863 		/* INET_CSUM_COUNTER_INCR(&ip_hwcsum_ok); */
2864 		break;
2865 
2866 	default:
2867 		/* Must compute it ourselves. */
2868 		/* INET_CSUM_COUNTER_INCR(&ip_swcsum); */
2869 		if (in_cksum(m, hlen) != 0)
2870 			goto bad;
2871 		break;
2872 	}
2873 
2874 	/* Retrieve the packet length. */
2875 	len = ntohs(ip->ip_len);
2876 
2877 	/*
2878 	 * Check for additional length bogosity
2879 	 */
2880 	if (len < hlen) {
2881 		ip_statinc(IP_STAT_BADLEN);
2882 		goto bad;
2883 	}
2884 
2885 	/*
2886 	 * Check that the amount of data in the buffers
2887 	 * is as at least much as the IP header would have us expect.
2888 	 * Drop packet if shorter than we expect.
2889 	 */
2890 	if (m->m_pkthdr.len < len) {
2891 		ip_statinc(IP_STAT_TOOSHORT);
2892 		goto bad;
2893 	}
2894 
2895 	/* Checks out, proceed */
2896 	*mp = m;
2897 	return 0;
2898 
2899     bad:
2900 	*mp = m;
2901 	return -1;
2902 }
2903 
2904 # ifdef INET6
2905 /*
2906  * Same as above, but for IPv6.
2907  * Cut-and-pasted from ip6_input.c.
2908  * XXX Should we update ip6stat, or not?
2909  */
2910 static int
bridge_ip6_checkbasic(struct mbuf ** mp)2911 bridge_ip6_checkbasic(struct mbuf **mp)
2912 {
2913 	struct mbuf *m = *mp;
2914 	struct ip6_hdr *ip6;
2915 
2916 	/*
2917 	 * If the IPv6 header is not aligned, slurp it up into a new
2918 	 * mbuf with space for link headers, in the event we forward
2919 	 * it.  Otherwise, if it is aligned, make sure the entire base
2920 	 * IPv6 header is in the first mbuf of the chain.
2921 	 */
2922 	if (M_GET_ALIGNED_HDR(&m, struct ip6_hdr, true) != 0) {
2923 		struct ifnet *inifp = m_get_rcvif_NOMPSAFE(m);
2924 		/* XXXJRT new stat, please */
2925 		ip6_statinc(IP6_STAT_TOOSMALL);
2926 		in6_ifstat_inc(inifp, ifs6_in_hdrerr);
2927 		goto bad;
2928 	}
2929 
2930 	ip6 = mtod(m, struct ip6_hdr *);
2931 
2932 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
2933 		ip6_statinc(IP6_STAT_BADVERS);
2934 		in6_ifstat_inc(m_get_rcvif_NOMPSAFE(m), ifs6_in_hdrerr);
2935 		goto bad;
2936 	}
2937 
2938 	/* Checks out, proceed */
2939 	*mp = m;
2940 	return 0;
2941 
2942     bad:
2943 	*mp = m;
2944 	return -1;
2945 }
2946 # endif /* INET6 */
2947