1 /* $OpenBSD: bridgectl.c,v 1.25 2021/02/25 02:48:21 dlg Exp $ */
2
3 /*
4 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
25 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Effort sponsored in part by the Defense Advanced Research Projects
29 * Agency (DARPA) and Air Force Research Laboratory, Air Force
30 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
31 *
32 */
33
34 #include "pf.h"
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/mbuf.h>
39 #include <sys/socket.h>
40 #include <sys/ioctl.h>
41 #include <sys/timeout.h>
42 #include <sys/kernel.h>
43
44 #include <crypto/siphash.h>
45
46 #include <net/if.h>
47
48 #include <netinet/in.h>
49 #include <netinet/if_ether.h>
50
51 #include <net/if_bridge.h>
52
53
54 int bridge_rtfind(struct bridge_softc *, struct ifbaconf *);
55 int bridge_rtdaddr(struct bridge_softc *, struct ether_addr *);
56 u_int32_t bridge_hash(struct bridge_softc *, struct ether_addr *);
57
58 int bridge_brlconf(struct bridge_iflist *, struct ifbrlconf *);
59 int bridge_addrule(struct bridge_iflist *, struct ifbrlreq *, int out);
60
61 int
bridgectl_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)62 bridgectl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
63 {
64 struct bridge_softc *sc = (struct bridge_softc *)ifp->if_softc;
65 struct ifbreq *req = (struct ifbreq *)data;
66 struct ifbrlreq *brlreq = (struct ifbrlreq *)data;
67 struct ifbrlconf *bc = (struct ifbrlconf *)data;
68 struct ifbareq *bareq = (struct ifbareq *)data;
69 struct ifbrparam *bparam = (struct ifbrparam *)data;
70 struct bridge_iflist *bif;
71 struct ifnet *ifs;
72 int error = 0;
73
74 switch (cmd) {
75 case SIOCBRDGRTS:
76 error = bridge_rtfind(sc, (struct ifbaconf *)data);
77 break;
78 case SIOCBRDGFLUSH:
79 bridge_rtflush(sc, req->ifbr_ifsflags);
80 break;
81 case SIOCBRDGSADDR:
82 ifs = if_unit(bareq->ifba_ifsname);
83 if (ifs == NULL) { /* no such interface */
84 error = ENOENT;
85 break;
86 }
87 if (ifs->if_bridgeidx != ifp->if_index) {
88 if_put(ifs);
89 error = ESRCH;
90 break;
91 }
92
93 if (bridge_rtupdate(sc, &bareq->ifba_dst, ifs, 1,
94 bareq->ifba_flags, NULL))
95 error = ENOMEM;
96 if_put(ifs);
97 break;
98 case SIOCBRDGDADDR:
99 error = bridge_rtdaddr(sc, &bareq->ifba_dst);
100 break;
101 case SIOCBRDGGCACHE:
102 bparam->ifbrp_csize = sc->sc_brtmax;
103 break;
104 case SIOCBRDGSCACHE:
105 mtx_enter(&sc->sc_mtx);
106 sc->sc_brtmax = bparam->ifbrp_csize;
107 mtx_leave(&sc->sc_mtx);
108 break;
109 case SIOCBRDGSTO:
110 if (bparam->ifbrp_ctime < 0 ||
111 bparam->ifbrp_ctime > INT_MAX / hz) {
112 error = EINVAL;
113 break;
114 }
115 sc->sc_brttimeout = bparam->ifbrp_ctime;
116 if (bparam->ifbrp_ctime != 0)
117 timeout_add_sec(&sc->sc_brtimeout, sc->sc_brttimeout);
118 else
119 timeout_del(&sc->sc_brtimeout);
120 break;
121 case SIOCBRDGGTO:
122 bparam->ifbrp_ctime = sc->sc_brttimeout;
123 break;
124 case SIOCBRDGARL:
125 if ((brlreq->ifbr_action != BRL_ACTION_BLOCK &&
126 brlreq->ifbr_action != BRL_ACTION_PASS) ||
127 (brlreq->ifbr_flags & (BRL_FLAG_IN|BRL_FLAG_OUT)) == 0) {
128 error = EINVAL;
129 break;
130 }
131 error = bridge_findbif(sc, brlreq->ifbr_ifsname, &bif);
132 if (error != 0)
133 break;
134 if (brlreq->ifbr_flags & BRL_FLAG_IN) {
135 error = bridge_addrule(bif, brlreq, 0);
136 if (error)
137 break;
138 }
139 if (brlreq->ifbr_flags & BRL_FLAG_OUT) {
140 error = bridge_addrule(bif, brlreq, 1);
141 if (error)
142 break;
143 }
144 break;
145 case SIOCBRDGFRL:
146 error = bridge_findbif(sc, brlreq->ifbr_ifsname, &bif);
147 if (error != 0)
148 break;
149 bridge_flushrule(bif);
150 break;
151 case SIOCBRDGGRL:
152 error = bridge_findbif(sc, bc->ifbrl_ifsname, &bif);
153 if (error != 0)
154 break;
155 error = bridge_brlconf(bif, bc);
156 break;
157 default:
158 break;
159 }
160
161 return (error);
162 }
163
164 int
bridge_rtupdate(struct bridge_softc * sc,struct ether_addr * ea,struct ifnet * ifp,int setflags,u_int8_t flags,struct mbuf * m)165 bridge_rtupdate(struct bridge_softc *sc, struct ether_addr *ea,
166 struct ifnet *ifp, int setflags, u_int8_t flags, struct mbuf *m)
167 {
168 struct bridge_rtnode *p, *q;
169 struct bridge_tunneltag *brtag = NULL;
170 u_int32_t h;
171 int dir, error = 0;
172
173 if (m != NULL) {
174 /* Check if the mbuf was tagged with a tunnel endpoint addr */
175 brtag = bridge_tunnel(m);
176 }
177
178 h = bridge_hash(sc, ea);
179 mtx_enter(&sc->sc_mtx);
180 p = LIST_FIRST(&sc->sc_rts[h]);
181 if (p == NULL) {
182 if (sc->sc_brtcnt >= sc->sc_brtmax)
183 goto done;
184 p = malloc(sizeof(*p), M_DEVBUF, M_NOWAIT);
185 if (p == NULL)
186 goto done;
187
188 bcopy(ea, &p->brt_addr, sizeof(p->brt_addr));
189 p->brt_ifidx = ifp->if_index;
190 p->brt_age = 1;
191 bridge_copytag(brtag, &p->brt_tunnel);
192
193 if (setflags)
194 p->brt_flags = flags;
195 else
196 p->brt_flags = IFBAF_DYNAMIC;
197
198 LIST_INSERT_HEAD(&sc->sc_rts[h], p, brt_next);
199 sc->sc_brtcnt++;
200 goto want;
201 }
202
203 do {
204 q = p;
205 p = LIST_NEXT(p, brt_next);
206
207 dir = memcmp(ea, &q->brt_addr, sizeof(q->brt_addr));
208 if (dir == 0) {
209 if (setflags) {
210 q->brt_ifidx = ifp->if_index;
211 q->brt_flags = flags;
212 } else if (!(q->brt_flags & IFBAF_STATIC))
213 q->brt_ifidx = ifp->if_index;
214
215 if (q->brt_ifidx == ifp->if_index)
216 q->brt_age = 1;
217 bridge_copytag(brtag, &q->brt_tunnel);
218 goto want;
219 }
220
221 if (dir > 0) {
222 if (sc->sc_brtcnt >= sc->sc_brtmax)
223 goto done;
224 p = malloc(sizeof(*p), M_DEVBUF, M_NOWAIT);
225 if (p == NULL)
226 goto done;
227
228 bcopy(ea, &p->brt_addr, sizeof(p->brt_addr));
229 p->brt_ifidx = ifp->if_index;
230 p->brt_age = 1;
231 bridge_copytag(brtag, &p->brt_tunnel);
232
233 if (setflags)
234 p->brt_flags = flags;
235 else
236 p->brt_flags = IFBAF_DYNAMIC;
237
238 LIST_INSERT_BEFORE(q, p, brt_next);
239 sc->sc_brtcnt++;
240 goto want;
241 }
242
243 if (p == NULL) {
244 if (sc->sc_brtcnt >= sc->sc_brtmax)
245 goto done;
246 p = malloc(sizeof(*p), M_DEVBUF, M_NOWAIT);
247 if (p == NULL)
248 goto done;
249
250 bcopy(ea, &p->brt_addr, sizeof(p->brt_addr));
251 p->brt_ifidx = ifp->if_index;
252 p->brt_age = 1;
253 bridge_copytag(brtag, &p->brt_tunnel);
254
255 if (setflags)
256 p->brt_flags = flags;
257 else
258 p->brt_flags = IFBAF_DYNAMIC;
259 LIST_INSERT_AFTER(q, p, brt_next);
260 sc->sc_brtcnt++;
261 goto want;
262 }
263 } while (p != NULL);
264
265 done:
266 error = 1;
267 want:
268 mtx_leave(&sc->sc_mtx);
269 return (error);
270 }
271
272 unsigned int
bridge_rtlookup(struct ifnet * brifp,struct ether_addr * ea,struct mbuf * m)273 bridge_rtlookup(struct ifnet *brifp, struct ether_addr *ea, struct mbuf *m)
274 {
275 struct bridge_softc *sc = brifp->if_softc;
276 struct bridge_rtnode *p = NULL;
277 unsigned int ifidx = 0;
278 u_int32_t h;
279 int dir;
280
281 h = bridge_hash(sc, ea);
282 mtx_enter(&sc->sc_mtx);
283 LIST_FOREACH(p, &sc->sc_rts[h], brt_next) {
284 dir = memcmp(ea, &p->brt_addr, sizeof(p->brt_addr));
285 if (dir == 0)
286 break;
287 if (dir > 0) {
288 p = NULL;
289 break;
290 }
291 }
292 if (p != NULL) {
293 ifidx = p->brt_ifidx;
294
295 if (p->brt_family != AF_UNSPEC && m != NULL) {
296 struct bridge_tunneltag *brtag;
297
298 brtag = bridge_tunneltag(m);
299 if (brtag != NULL)
300 bridge_copytag(&p->brt_tunnel, brtag);
301 }
302 }
303 mtx_leave(&sc->sc_mtx);
304
305 return (ifidx);
306 }
307
308 u_int32_t
bridge_hash(struct bridge_softc * sc,struct ether_addr * addr)309 bridge_hash(struct bridge_softc *sc, struct ether_addr *addr)
310 {
311 return SipHash24((SIPHASH_KEY *)sc->sc_hashkey, addr, ETHER_ADDR_LEN) &
312 BRIDGE_RTABLE_MASK;
313 }
314
315 /*
316 * Perform an aging cycle
317 */
318 void
bridge_rtage(void * vsc)319 bridge_rtage(void *vsc)
320 {
321 struct bridge_softc *sc = vsc;
322 struct ifnet *ifp = &sc->sc_if;
323 struct bridge_rtnode *n, *p;
324 int i;
325
326 if (!ISSET(ifp->if_flags, IFF_RUNNING))
327 return;
328
329 mtx_enter(&sc->sc_mtx);
330 for (i = 0; i < BRIDGE_RTABLE_SIZE; i++) {
331 n = LIST_FIRST(&sc->sc_rts[i]);
332 while (n != NULL) {
333 if ((n->brt_flags & IFBAF_TYPEMASK) == IFBAF_STATIC) {
334 n->brt_age = !n->brt_age;
335 if (n->brt_age)
336 n->brt_age = 0;
337 n = LIST_NEXT(n, brt_next);
338 } else if (n->brt_age) {
339 n->brt_age = 0;
340 n = LIST_NEXT(n, brt_next);
341 } else {
342 p = LIST_NEXT(n, brt_next);
343 LIST_REMOVE(n, brt_next);
344 sc->sc_brtcnt--;
345 free(n, M_DEVBUF, sizeof *n);
346 n = p;
347 }
348 }
349 }
350 mtx_leave(&sc->sc_mtx);
351
352 if (sc->sc_brttimeout != 0)
353 timeout_add_sec(&sc->sc_brtimeout, sc->sc_brttimeout);
354 }
355
356 void
bridge_rtagenode(struct ifnet * ifp,int age)357 bridge_rtagenode(struct ifnet *ifp, int age)
358 {
359 struct bridge_softc *sc;
360 struct bridge_rtnode *n;
361 struct ifnet *bifp;
362 int i;
363
364 bifp = if_get(ifp->if_bridgeidx);
365 if (bifp == NULL)
366 return;
367 sc = bifp->if_softc;
368
369 /*
370 * If the age is zero then flush, otherwise set all the expiry times to
371 * age for the interface
372 */
373 if (age == 0)
374 bridge_rtdelete(sc, ifp, 1);
375 else {
376 mtx_enter(&sc->sc_mtx);
377 for (i = 0; i < BRIDGE_RTABLE_SIZE; i++) {
378 LIST_FOREACH(n, &sc->sc_rts[i], brt_next) {
379 /* Cap the expiry time to 'age' */
380 if (n->brt_ifidx == ifp->if_index &&
381 n->brt_age > getuptime() + age &&
382 (n->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
383 n->brt_age = getuptime() + age;
384 }
385 }
386 mtx_leave(&sc->sc_mtx);
387 }
388
389 if_put(bifp);
390 }
391
392 /*
393 * Remove all dynamic addresses from the cache
394 */
395 void
bridge_rtflush(struct bridge_softc * sc,int full)396 bridge_rtflush(struct bridge_softc *sc, int full)
397 {
398 int i;
399 struct bridge_rtnode *p, *n;
400
401 mtx_enter(&sc->sc_mtx);
402 for (i = 0; i < BRIDGE_RTABLE_SIZE; i++) {
403 n = LIST_FIRST(&sc->sc_rts[i]);
404 while (n != NULL) {
405 if (full ||
406 (n->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
407 p = LIST_NEXT(n, brt_next);
408 LIST_REMOVE(n, brt_next);
409 sc->sc_brtcnt--;
410 free(n, M_DEVBUF, sizeof *n);
411 n = p;
412 } else
413 n = LIST_NEXT(n, brt_next);
414 }
415 }
416 mtx_leave(&sc->sc_mtx);
417 }
418
419 /*
420 * Remove an address from the cache
421 */
422 int
bridge_rtdaddr(struct bridge_softc * sc,struct ether_addr * ea)423 bridge_rtdaddr(struct bridge_softc *sc, struct ether_addr *ea)
424 {
425 int h;
426 struct bridge_rtnode *p;
427
428 h = bridge_hash(sc, ea);
429 mtx_enter(&sc->sc_mtx);
430 LIST_FOREACH(p, &sc->sc_rts[h], brt_next) {
431 if (memcmp(ea, &p->brt_addr, sizeof(p->brt_addr)) == 0) {
432 LIST_REMOVE(p, brt_next);
433 sc->sc_brtcnt--;
434 mtx_leave(&sc->sc_mtx);
435 free(p, M_DEVBUF, sizeof *p);
436 return (0);
437 }
438 }
439 mtx_leave(&sc->sc_mtx);
440
441 return (ENOENT);
442 }
443
444 /*
445 * Delete routes to a specific interface member.
446 */
447 void
bridge_rtdelete(struct bridge_softc * sc,struct ifnet * ifp,int dynonly)448 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int dynonly)
449 {
450 int i;
451 struct bridge_rtnode *n, *p;
452
453 /*
454 * Loop through all of the hash buckets and traverse each
455 * chain looking for routes to this interface.
456 */
457 mtx_enter(&sc->sc_mtx);
458 for (i = 0; i < BRIDGE_RTABLE_SIZE; i++) {
459 n = LIST_FIRST(&sc->sc_rts[i]);
460 while (n != NULL) {
461 if (n->brt_ifidx != ifp->if_index) {
462 /* Not ours */
463 n = LIST_NEXT(n, brt_next);
464 continue;
465 }
466 if (dynonly &&
467 (n->brt_flags & IFBAF_TYPEMASK) != IFBAF_DYNAMIC) {
468 /* only deleting dynamics */
469 n = LIST_NEXT(n, brt_next);
470 continue;
471 }
472 p = LIST_NEXT(n, brt_next);
473 LIST_REMOVE(n, brt_next);
474 sc->sc_brtcnt--;
475 free(n, M_DEVBUF, sizeof *n);
476 n = p;
477 }
478 }
479 mtx_leave(&sc->sc_mtx);
480 }
481
482 /*
483 * Gather all of the routes for this interface.
484 */
485 int
bridge_rtfind(struct bridge_softc * sc,struct ifbaconf * baconf)486 bridge_rtfind(struct bridge_softc *sc, struct ifbaconf *baconf)
487 {
488 struct ifbareq *bareq, *bareqs = NULL;
489 struct bridge_rtnode *n;
490 u_int32_t i = 0, total = 0;
491 int k, error = 0;
492
493 mtx_enter(&sc->sc_mtx);
494 for (k = 0; k < BRIDGE_RTABLE_SIZE; k++) {
495 LIST_FOREACH(n, &sc->sc_rts[k], brt_next)
496 total++;
497 }
498 mtx_leave(&sc->sc_mtx);
499
500 if (baconf->ifbac_len == 0) {
501 i = total;
502 goto done;
503 }
504
505 total = MIN(total, baconf->ifbac_len / sizeof(*bareqs));
506 bareqs = mallocarray(total, sizeof(*bareqs), M_TEMP, M_NOWAIT|M_ZERO);
507 if (bareqs == NULL)
508 goto done;
509
510 mtx_enter(&sc->sc_mtx);
511 for (k = 0; k < BRIDGE_RTABLE_SIZE; k++) {
512 LIST_FOREACH(n, &sc->sc_rts[k], brt_next) {
513 struct ifnet *ifp;
514
515 if (i >= total) {
516 mtx_leave(&sc->sc_mtx);
517 goto done;
518 }
519 bareq = &bareqs[i];
520
521 ifp = if_get(n->brt_ifidx);
522 if (ifp == NULL)
523 continue;
524 bcopy(ifp->if_xname, bareq->ifba_ifsname,
525 sizeof(bareq->ifba_ifsname));
526 if_put(ifp);
527
528 bcopy(sc->sc_if.if_xname, bareq->ifba_name,
529 sizeof(bareq->ifba_name));
530 bcopy(&n->brt_addr, &bareq->ifba_dst,
531 sizeof(bareq->ifba_dst));
532 bridge_copyaddr(&n->brt_tunnel.brtag_peer.sa,
533 sstosa(&bareq->ifba_dstsa));
534 bareq->ifba_age = n->brt_age;
535 bareq->ifba_flags = n->brt_flags;
536 i++;
537 }
538 }
539 mtx_leave(&sc->sc_mtx);
540
541 error = copyout(bareqs, baconf->ifbac_req, i * sizeof(*bareqs));
542 done:
543 free(bareqs, M_TEMP, total * sizeof(*bareqs));
544 baconf->ifbac_len = i * sizeof(*bareqs);
545 return (error);
546 }
547
548 void
bridge_update(struct ifnet * ifp,struct ether_addr * ea,int delete)549 bridge_update(struct ifnet *ifp, struct ether_addr *ea, int delete)
550 {
551 struct bridge_softc *sc;
552 struct bridge_iflist *bif;
553 u_int8_t *addr;
554
555 addr = (u_int8_t *)ea;
556
557 bif = bridge_getbif(ifp);
558 if (bif == NULL)
559 return;
560 sc = bif->bridge_sc;
561 if (sc == NULL)
562 return;
563
564 /*
565 * Update the bridge interface if it is in
566 * the learning state.
567 */
568 if ((bif->bif_flags & IFBIF_LEARNING) &&
569 (ETHER_IS_MULTICAST(addr) == 0) &&
570 !(addr[0] == 0 && addr[1] == 0 && addr[2] == 0 &&
571 addr[3] == 0 && addr[4] == 0 && addr[5] == 0)) {
572 /* Care must be taken with spanning tree */
573 if ((bif->bif_flags & IFBIF_STP) &&
574 (bif->bif_state == BSTP_IFSTATE_DISCARDING))
575 return;
576
577 /* Delete the address from the bridge */
578 bridge_rtdaddr(sc, ea);
579
580 if (!delete) {
581 /* Update the bridge table */
582 bridge_rtupdate(sc, ea, ifp, 0, IFBAF_DYNAMIC, NULL);
583 }
584 }
585 }
586
587 /*
588 * bridge filter/matching rules
589 */
590 int
bridge_brlconf(struct bridge_iflist * bif,struct ifbrlconf * bc)591 bridge_brlconf(struct bridge_iflist *bif, struct ifbrlconf *bc)
592 {
593 struct bridge_softc *sc = bif->bridge_sc;
594 struct brl_node *n;
595 struct ifbrlreq *req, *reqs = NULL;
596 int error = 0;
597 u_int32_t i = 0, total = 0;
598
599 SIMPLEQ_FOREACH(n, &bif->bif_brlin, brl_next) {
600 total++;
601 }
602 SIMPLEQ_FOREACH(n, &bif->bif_brlout, brl_next) {
603 total++;
604 }
605
606 if (bc->ifbrl_len == 0) {
607 i = total;
608 goto done;
609 }
610
611 reqs = mallocarray(total, sizeof(*reqs), M_TEMP, M_NOWAIT|M_ZERO);
612 if (reqs == NULL)
613 goto done;
614
615 SIMPLEQ_FOREACH(n, &bif->bif_brlin, brl_next) {
616 if (bc->ifbrl_len < (i + 1) * sizeof(*reqs))
617 goto done;
618 req = &reqs[i];
619 strlcpy(req->ifbr_name, sc->sc_if.if_xname, IFNAMSIZ);
620 strlcpy(req->ifbr_ifsname, bif->ifp->if_xname, IFNAMSIZ);
621 req->ifbr_action = n->brl_action;
622 req->ifbr_flags = n->brl_flags;
623 req->ifbr_src = n->brl_src;
624 req->ifbr_dst = n->brl_dst;
625 req->ifbr_arpf = n->brl_arpf;
626 #if NPF > 0
627 req->ifbr_tagname[0] = '\0';
628 if (n->brl_tag)
629 pf_tag2tagname(n->brl_tag, req->ifbr_tagname);
630 #endif
631 i++;
632 }
633
634 SIMPLEQ_FOREACH(n, &bif->bif_brlout, brl_next) {
635 if (bc->ifbrl_len < (i + 1) * sizeof(*reqs))
636 goto done;
637 req = &reqs[i];
638 strlcpy(req->ifbr_name, sc->sc_if.if_xname, IFNAMSIZ);
639 strlcpy(req->ifbr_ifsname, bif->ifp->if_xname, IFNAMSIZ);
640 req->ifbr_action = n->brl_action;
641 req->ifbr_flags = n->brl_flags;
642 req->ifbr_src = n->brl_src;
643 req->ifbr_dst = n->brl_dst;
644 req->ifbr_arpf = n->brl_arpf;
645 #if NPF > 0
646 req->ifbr_tagname[0] = '\0';
647 if (n->brl_tag)
648 pf_tag2tagname(n->brl_tag, req->ifbr_tagname);
649 #endif
650 i++;
651 }
652
653 error = copyout(reqs, bc->ifbrl_buf, i * sizeof(*reqs));
654 done:
655 free(reqs, M_TEMP, total * sizeof(*reqs));
656 bc->ifbrl_len = i * sizeof(*reqs);
657 return (error);
658 }
659
660 u_int8_t
bridge_arpfilter(struct brl_node * n,struct ether_header * eh,struct mbuf * m)661 bridge_arpfilter(struct brl_node *n, struct ether_header *eh, struct mbuf *m)
662 {
663 struct ether_arp ea;
664
665 if (!(n->brl_arpf.brla_flags & (BRLA_ARP|BRLA_RARP)))
666 return (1);
667
668 if (ntohs(eh->ether_type) != ETHERTYPE_ARP)
669 return (0);
670 if (m->m_pkthdr.len < ETHER_HDR_LEN + sizeof(ea))
671 return (0); /* log error? */
672 m_copydata(m, ETHER_HDR_LEN, sizeof(ea), &ea);
673
674 if (ntohs(ea.arp_hrd) != ARPHRD_ETHER ||
675 ntohs(ea.arp_pro) != ETHERTYPE_IP ||
676 ea.arp_hln != ETHER_ADDR_LEN ||
677 ea.arp_pln != sizeof(struct in_addr))
678 return (0);
679 if ((n->brl_arpf.brla_flags & BRLA_ARP) &&
680 ntohs(ea.arp_op) != ARPOP_REQUEST &&
681 ntohs(ea.arp_op) != ARPOP_REPLY)
682 return (0);
683 if ((n->brl_arpf.brla_flags & BRLA_RARP) &&
684 ntohs(ea.arp_op) != ARPOP_REVREQUEST &&
685 ntohs(ea.arp_op) != ARPOP_REVREPLY)
686 return (0);
687 if (n->brl_arpf.brla_op && ntohs(ea.arp_op) != n->brl_arpf.brla_op)
688 return (0);
689 if (n->brl_arpf.brla_flags & BRLA_SHA &&
690 memcmp(ea.arp_sha, &n->brl_arpf.brla_sha, ETHER_ADDR_LEN))
691 return (0);
692 if (n->brl_arpf.brla_flags & BRLA_THA &&
693 memcmp(ea.arp_tha, &n->brl_arpf.brla_tha, ETHER_ADDR_LEN))
694 return (0);
695 if (n->brl_arpf.brla_flags & BRLA_SPA &&
696 memcmp(ea.arp_spa, &n->brl_arpf.brla_spa, sizeof(struct in_addr)))
697 return (0);
698 if (n->brl_arpf.brla_flags & BRLA_TPA &&
699 memcmp(ea.arp_tpa, &n->brl_arpf.brla_tpa, sizeof(struct in_addr)))
700 return (0);
701
702 return (1);
703 }
704
705 u_int8_t
bridge_filterrule(struct brl_head * h,struct ether_header * eh,struct mbuf * m)706 bridge_filterrule(struct brl_head *h, struct ether_header *eh, struct mbuf *m)
707 {
708 struct brl_node *n;
709 u_int8_t action, flags;
710
711 if (SIMPLEQ_EMPTY(h))
712 return (BRL_ACTION_PASS);
713
714 KERNEL_LOCK();
715 SIMPLEQ_FOREACH(n, h, brl_next) {
716 if (!bridge_arpfilter(n, eh, m))
717 continue;
718 flags = n->brl_flags & (BRL_FLAG_SRCVALID|BRL_FLAG_DSTVALID);
719 if (flags == 0)
720 goto return_action;
721 if (flags == (BRL_FLAG_SRCVALID|BRL_FLAG_DSTVALID)) {
722 if (memcmp(eh->ether_shost, &n->brl_src,
723 ETHER_ADDR_LEN))
724 continue;
725 if (memcmp(eh->ether_dhost, &n->brl_dst,
726 ETHER_ADDR_LEN))
727 continue;
728 goto return_action;
729 }
730 if (flags == BRL_FLAG_SRCVALID) {
731 if (memcmp(eh->ether_shost, &n->brl_src,
732 ETHER_ADDR_LEN))
733 continue;
734 goto return_action;
735 }
736 if (flags == BRL_FLAG_DSTVALID) {
737 if (memcmp(eh->ether_dhost, &n->brl_dst,
738 ETHER_ADDR_LEN))
739 continue;
740 goto return_action;
741 }
742 }
743 KERNEL_UNLOCK();
744 return (BRL_ACTION_PASS);
745
746 return_action:
747 #if NPF > 0
748 pf_tag_packet(m, n->brl_tag, -1);
749 #endif
750 action = n->brl_action;
751 KERNEL_UNLOCK();
752 return (action);
753 }
754
755 int
bridge_addrule(struct bridge_iflist * bif,struct ifbrlreq * req,int out)756 bridge_addrule(struct bridge_iflist *bif, struct ifbrlreq *req, int out)
757 {
758 struct brl_node *n;
759
760 n = malloc(sizeof(*n), M_DEVBUF, M_NOWAIT);
761 if (n == NULL)
762 return (ENOMEM);
763 bcopy(&req->ifbr_src, &n->brl_src, sizeof(struct ether_addr));
764 bcopy(&req->ifbr_dst, &n->brl_dst, sizeof(struct ether_addr));
765 n->brl_action = req->ifbr_action;
766 n->brl_flags = req->ifbr_flags;
767 n->brl_arpf = req->ifbr_arpf;
768 #if NPF > 0
769 if (req->ifbr_tagname[0])
770 n->brl_tag = pf_tagname2tag(req->ifbr_tagname, 1);
771 else
772 n->brl_tag = 0;
773 #endif
774
775 KERNEL_ASSERT_LOCKED();
776
777 if (out) {
778 n->brl_flags &= ~BRL_FLAG_IN;
779 n->brl_flags |= BRL_FLAG_OUT;
780 SIMPLEQ_INSERT_TAIL(&bif->bif_brlout, n, brl_next);
781 } else {
782 n->brl_flags &= ~BRL_FLAG_OUT;
783 n->brl_flags |= BRL_FLAG_IN;
784 SIMPLEQ_INSERT_TAIL(&bif->bif_brlin, n, brl_next);
785 }
786 return (0);
787 }
788
789 void
bridge_flushrule(struct bridge_iflist * bif)790 bridge_flushrule(struct bridge_iflist *bif)
791 {
792 struct brl_node *p;
793
794 KERNEL_ASSERT_LOCKED();
795
796 while (!SIMPLEQ_EMPTY(&bif->bif_brlin)) {
797 p = SIMPLEQ_FIRST(&bif->bif_brlin);
798 SIMPLEQ_REMOVE_HEAD(&bif->bif_brlin, brl_next);
799 #if NPF > 0
800 pf_tag_unref(p->brl_tag);
801 #endif
802 free(p, M_DEVBUF, sizeof *p);
803 }
804 while (!SIMPLEQ_EMPTY(&bif->bif_brlout)) {
805 p = SIMPLEQ_FIRST(&bif->bif_brlout);
806 SIMPLEQ_REMOVE_HEAD(&bif->bif_brlout, brl_next);
807 #if NPF > 0
808 pf_tag_unref(p->brl_tag);
809 #endif
810 free(p, M_DEVBUF, sizeof *p);
811 }
812 }
813
814 struct bridge_tunneltag *
bridge_tunnel(struct mbuf * m)815 bridge_tunnel(struct mbuf *m)
816 {
817 struct m_tag *mtag;
818
819 if ((mtag = m_tag_find(m, PACKET_TAG_TUNNEL, NULL)) == NULL)
820 return (NULL);
821
822 return ((struct bridge_tunneltag *)(mtag + 1));
823 }
824
825 struct bridge_tunneltag *
bridge_tunneltag(struct mbuf * m)826 bridge_tunneltag(struct mbuf *m)
827 {
828 struct m_tag *mtag;
829
830 if ((mtag = m_tag_find(m, PACKET_TAG_TUNNEL, NULL)) == NULL) {
831 mtag = m_tag_get(PACKET_TAG_TUNNEL,
832 sizeof(struct bridge_tunneltag), M_NOWAIT);
833 if (mtag == NULL)
834 return (NULL);
835 bzero(mtag + 1, sizeof(struct bridge_tunneltag));
836 m_tag_prepend(m, mtag);
837 }
838
839 return ((struct bridge_tunneltag *)(mtag + 1));
840 }
841
842 void
bridge_tunneluntag(struct mbuf * m)843 bridge_tunneluntag(struct mbuf *m)
844 {
845 struct m_tag *mtag;
846 if ((mtag = m_tag_find(m, PACKET_TAG_TUNNEL, NULL)) != NULL)
847 m_tag_delete(m, mtag);
848 }
849
850 void
bridge_copyaddr(struct sockaddr * src,struct sockaddr * dst)851 bridge_copyaddr(struct sockaddr *src, struct sockaddr *dst)
852 {
853 if (src != NULL && src->sa_family != AF_UNSPEC)
854 memcpy(dst, src, src->sa_len);
855 else {
856 dst->sa_family = AF_UNSPEC;
857 dst->sa_len = 0;
858 }
859 }
860
861 void
bridge_copytag(struct bridge_tunneltag * src,struct bridge_tunneltag * dst)862 bridge_copytag(struct bridge_tunneltag *src, struct bridge_tunneltag *dst)
863 {
864 if (src == NULL) {
865 memset(dst, 0, sizeof(*dst));
866 } else {
867 bridge_copyaddr(&src->brtag_peer.sa, &dst->brtag_peer.sa);
868 bridge_copyaddr(&src->brtag_local.sa, &dst->brtag_local.sa);
869 dst->brtag_id = src->brtag_id;
870 }
871 }
872