1 /* $OpenBSD: rde_filter.c,v 1.136 2023/05/09 13:11:19 claudio Exp $ */
2
3 /*
4 * Copyright (c) 2004 Claudio Jeker <claudio@openbsd.org>
5 * Copyright (c) 2016 Job Snijders <job@instituut.net>
6 * Copyright (c) 2016 Peter Hessler <phessler@openbsd.org>
7 * Copyright (c) 2018 Sebastian Benoit <benno@openbsd.org>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21 #include <sys/types.h>
22 #include <sys/queue.h>
23
24 #include <limits.h>
25 #include <stdlib.h>
26 #include <string.h>
27
28 #include "bgpd.h"
29 #include "rde.h"
30 #include "log.h"
31
32 int filterset_equal(struct filter_set_head *, struct filter_set_head *);
33
34 void
rde_apply_set(struct filter_set_head * sh,struct rde_peer * peer,struct rde_peer * from,struct filterstate * state,uint8_t aid)35 rde_apply_set(struct filter_set_head *sh, struct rde_peer *peer,
36 struct rde_peer *from, struct filterstate *state, uint8_t aid)
37 {
38 struct filter_set *set;
39 u_char *np;
40 uint32_t prep_as;
41 uint16_t nl;
42 uint8_t prepend;
43
44 TAILQ_FOREACH(set, sh, entry) {
45 switch (set->type) {
46 case ACTION_SET_LOCALPREF:
47 state->aspath.lpref = set->action.metric;
48 break;
49 case ACTION_SET_RELATIVE_LOCALPREF:
50 if (set->action.relative > 0) {
51 if (state->aspath.lpref >
52 UINT_MAX - set->action.relative)
53 state->aspath.lpref = UINT_MAX;
54 else
55 state->aspath.lpref +=
56 set->action.relative;
57 } else {
58 if (state->aspath.lpref <
59 0U - set->action.relative)
60 state->aspath.lpref = 0;
61 else
62 state->aspath.lpref +=
63 set->action.relative;
64 }
65 break;
66 case ACTION_SET_MED:
67 state->aspath.flags |= F_ATTR_MED | F_ATTR_MED_ANNOUNCE;
68 state->aspath.med = set->action.metric;
69 break;
70 case ACTION_SET_RELATIVE_MED:
71 state->aspath.flags |= F_ATTR_MED | F_ATTR_MED_ANNOUNCE;
72 if (set->action.relative > 0) {
73 if (state->aspath.med >
74 UINT_MAX - set->action.relative)
75 state->aspath.med = UINT_MAX;
76 else
77 state->aspath.med +=
78 set->action.relative;
79 } else {
80 if (state->aspath.med <
81 0U - set->action.relative)
82 state->aspath.med = 0;
83 else
84 state->aspath.med +=
85 set->action.relative;
86 }
87 break;
88 case ACTION_SET_WEIGHT:
89 state->aspath.weight = set->action.metric;
90 break;
91 case ACTION_SET_RELATIVE_WEIGHT:
92 if (set->action.relative > 0) {
93 if (state->aspath.weight >
94 UINT_MAX - set->action.relative)
95 state->aspath.weight = UINT_MAX;
96 else
97 state->aspath.weight +=
98 set->action.relative;
99 } else {
100 if (state->aspath.weight <
101 0U - set->action.relative)
102 state->aspath.weight = 0;
103 else
104 state->aspath.weight +=
105 set->action.relative;
106 }
107 break;
108 case ACTION_SET_PREPEND_SELF:
109 prep_as = peer->conf.local_as;
110 prepend = set->action.prepend;
111 np = aspath_prepend(state->aspath.aspath, prep_as,
112 prepend, &nl);
113 aspath_put(state->aspath.aspath);
114 state->aspath.aspath = aspath_get(np, nl);
115 free(np);
116 break;
117 case ACTION_SET_PREPEND_PEER:
118 if (from == NULL)
119 break;
120 prep_as = from->conf.remote_as;
121 prepend = set->action.prepend;
122 np = aspath_prepend(state->aspath.aspath, prep_as,
123 prepend, &nl);
124 aspath_put(state->aspath.aspath);
125 state->aspath.aspath = aspath_get(np, nl);
126 free(np);
127 break;
128 case ACTION_SET_AS_OVERRIDE:
129 if (from == NULL)
130 break;
131 np = aspath_override(state->aspath.aspath,
132 from->conf.remote_as, from->conf.local_as, &nl);
133 aspath_put(state->aspath.aspath);
134 state->aspath.aspath = aspath_get(np, nl);
135 free(np);
136 break;
137 case ACTION_SET_NEXTHOP:
138 fatalx("unexpected filter action in RDE");
139 case ACTION_SET_NEXTHOP_REF:
140 case ACTION_SET_NEXTHOP_REJECT:
141 case ACTION_SET_NEXTHOP_BLACKHOLE:
142 case ACTION_SET_NEXTHOP_NOMODIFY:
143 case ACTION_SET_NEXTHOP_SELF:
144 nexthop_modify(set->action.nh_ref, set->type, aid,
145 &state->nexthop, &state->nhflags);
146 break;
147 case ACTION_SET_COMMUNITY:
148 community_set(&state->communities,
149 &set->action.community, peer);
150 break;
151 case ACTION_DEL_COMMUNITY:
152 community_delete(&state->communities,
153 &set->action.community, peer);
154 break;
155 case ACTION_PFTABLE:
156 /* convert pftable name to an id */
157 set->action.id = pftable_name2id(set->action.pftable);
158 set->type = ACTION_PFTABLE_ID;
159 /* FALLTHROUGH */
160 case ACTION_PFTABLE_ID:
161 pftable_unref(state->aspath.pftableid);
162 state->aspath.pftableid = pftable_ref(set->action.id);
163 break;
164 case ACTION_RTLABEL:
165 /* convert the route label to an id for faster access */
166 set->action.id = rtlabel_name2id(set->action.rtlabel);
167 set->type = ACTION_RTLABEL_ID;
168 /* FALLTHROUGH */
169 case ACTION_RTLABEL_ID:
170 rtlabel_unref(state->aspath.rtlabelid);
171 state->aspath.rtlabelid = rtlabel_ref(set->action.id);
172 break;
173 case ACTION_SET_ORIGIN:
174 state->aspath.origin = set->action.origin;
175 break;
176 }
177 }
178 }
179
180 /* return 1 when prefix matches filter_prefix, 0 if not */
181 static int
rde_prefix_match(struct filter_prefix * fp,struct bgpd_addr * prefix,uint8_t plen)182 rde_prefix_match(struct filter_prefix *fp, struct bgpd_addr *prefix,
183 uint8_t plen)
184 {
185 if (fp->addr.aid != prefix->aid)
186 /* don't use IPv4 rules for IPv6 and vice versa */
187 return (0);
188
189 if (prefix_compare(prefix, &fp->addr, fp->len))
190 return (0);
191
192 /* test prefixlen stuff too */
193 switch (fp->op) {
194 case OP_NONE: /* perfect match */
195 return (plen == fp->len);
196 case OP_EQ:
197 return (plen == fp->len_min);
198 case OP_NE:
199 return (plen != fp->len_min);
200 case OP_RANGE:
201 return ((plen >= fp->len_min) &&
202 (plen <= fp->len_max));
203 case OP_XRANGE:
204 return ((plen < fp->len_min) ||
205 (plen > fp->len_max));
206 default:
207 log_warnx("%s: unsupported prefix operation", __func__);
208 return (0);
209 }
210 }
211
212 static int
rde_filter_match(struct filter_rule * f,struct rde_peer * peer,struct rde_peer * from,struct filterstate * state,struct bgpd_addr * prefix,uint8_t plen)213 rde_filter_match(struct filter_rule *f, struct rde_peer *peer,
214 struct rde_peer *from, struct filterstate *state,
215 struct bgpd_addr *prefix, uint8_t plen)
216 {
217 struct rde_aspath *asp = &state->aspath;
218 int i;
219
220 if (f->peer.ebgp && !peer->conf.ebgp)
221 return (0);
222 if (f->peer.ibgp && peer->conf.ebgp)
223 return (0);
224
225 if (f->match.ovs.is_set) {
226 if ((state->vstate & ROA_MASK) != f->match.ovs.validity)
227 return (0);
228 }
229
230 if (f->match.avs.is_set) {
231 if (((state->vstate >> 4) & ASPA_MASK) != f->match.avs.validity)
232 return (0);
233 }
234
235 if (asp != NULL && f->match.as.type != AS_UNDEF) {
236 if (aspath_match(asp->aspath, &f->match.as,
237 peer->conf.remote_as) == 0)
238 return (0);
239 }
240
241 if (asp != NULL && f->match.aslen.type != ASLEN_NONE)
242 if (aspath_lenmatch(asp->aspath, f->match.aslen.type,
243 f->match.aslen.aslen) == 0)
244 return (0);
245
246 for (i = 0; i < MAX_COMM_MATCH; i++) {
247 if (f->match.community[i].flags == 0)
248 break;
249 if (community_match(&state->communities,
250 &f->match.community[i], peer) == 0)
251 return (0);
252 }
253
254 if (f->match.maxcomm != 0) {
255 if (f->match.maxcomm >
256 community_count(&state->communities, COMMUNITY_TYPE_BASIC))
257 return (0);
258 }
259 if (f->match.maxextcomm != 0) {
260 if (f->match.maxextcomm >
261 community_count(&state->communities, COMMUNITY_TYPE_EXT))
262 return (0);
263 }
264 if (f->match.maxlargecomm != 0) {
265 if (f->match.maxlargecomm >
266 community_count(&state->communities, COMMUNITY_TYPE_LARGE))
267 return (0);
268 }
269
270 if (f->match.nexthop.flags != 0) {
271 struct bgpd_addr *nexthop, *cmpaddr;
272 if (state->nexthop == NULL)
273 /* no nexthop, skip */
274 return (0);
275 nexthop = &state->nexthop->exit_nexthop;
276 if (f->match.nexthop.flags == FILTER_NEXTHOP_ADDR)
277 cmpaddr = &f->match.nexthop.addr;
278 else
279 cmpaddr = &from->remote_addr;
280 if (cmpaddr->aid != nexthop->aid)
281 /* don't use IPv4 rules for IPv6 and vice versa */
282 return (0);
283
284 switch (cmpaddr->aid) {
285 case AID_INET:
286 if (cmpaddr->v4.s_addr != nexthop->v4.s_addr)
287 return (0);
288 break;
289 case AID_INET6:
290 if (memcmp(&cmpaddr->v6, &nexthop->v6,
291 sizeof(struct in6_addr)))
292 return (0);
293 break;
294 default:
295 fatalx("King Bula lost in address space");
296 }
297 }
298
299 /* origin-set lookups match only on ROA_VALID */
300 if (asp != NULL && f->match.originset.ps != NULL) {
301 if (trie_roa_check(&f->match.originset.ps->th, prefix, plen,
302 aspath_origin(asp->aspath)) != ROA_VALID)
303 return (0);
304 }
305
306 /*
307 * prefixset and prefix filter rules are mutual exclusive
308 */
309 if (f->match.prefixset.flags != 0) {
310 if (f->match.prefixset.ps == NULL ||
311 !trie_match(&f->match.prefixset.ps->th, prefix, plen,
312 (f->match.prefixset.flags & PREFIXSET_FLAG_LONGER)))
313 return (0);
314 } else if (f->match.prefix.addr.aid != 0)
315 return (rde_prefix_match(&f->match.prefix, prefix, plen));
316
317 /* matched somewhen or is anymatch rule */
318 return (1);
319 }
320
321 /* return true when the rule f can never match for this peer */
322 int
rde_filter_skip_rule(struct rde_peer * peer,struct filter_rule * f)323 rde_filter_skip_rule(struct rde_peer *peer, struct filter_rule *f)
324 {
325 /* if any of the two is unset then rule can't be skipped */
326 if (peer == NULL || f == NULL)
327 return (0);
328
329 if (f->peer.groupid != 0 &&
330 f->peer.groupid != peer->conf.groupid)
331 return (1);
332
333 if (f->peer.peerid != 0 &&
334 f->peer.peerid != peer->conf.id)
335 return (1);
336
337 if (f->peer.remote_as != 0 &&
338 f->peer.remote_as != peer->conf.remote_as)
339 return (1);
340
341 if (f->peer.ebgp != 0 &&
342 f->peer.ebgp != peer->conf.ebgp)
343 return (1);
344
345 if (f->peer.ibgp != 0 &&
346 f->peer.ibgp != !peer->conf.ebgp)
347 return (1);
348
349 return (0);
350 }
351
352 int
rde_filter_equal(struct filter_head * a,struct filter_head * b)353 rde_filter_equal(struct filter_head *a, struct filter_head *b)
354 {
355 struct filter_rule *fa, *fb;
356 struct rde_prefixset *psa, *psb, *osa, *osb;
357 struct as_set *asa, *asb;
358 int r;
359
360 fa = a ? TAILQ_FIRST(a) : NULL;
361 fb = b ? TAILQ_FIRST(b) : NULL;
362
363 while (fa != NULL || fb != NULL) {
364 /* compare the two rules */
365 if ((fa == NULL && fb != NULL) || (fa != NULL && fb == NULL))
366 /* new rule added or removed */
367 return (0);
368
369 if (fa->action != fb->action || fa->quick != fb->quick)
370 return (0);
371 if (memcmp(&fa->peer, &fb->peer, sizeof(fa->peer)))
372 return (0);
373
374 /* compare filter_rule.match without the prefixset pointer */
375 psa = fa->match.prefixset.ps;
376 psb = fb->match.prefixset.ps;
377 osa = fa->match.originset.ps;
378 osb = fb->match.originset.ps;
379 asa = fa->match.as.aset;
380 asb = fb->match.as.aset;
381 fa->match.prefixset.ps = fb->match.prefixset.ps = NULL;
382 fa->match.originset.ps = fb->match.originset.ps = NULL;
383 fa->match.as.aset = fb->match.as.aset = NULL;
384 r = memcmp(&fa->match, &fb->match, sizeof(fa->match));
385 /* fixup the struct again */
386 fa->match.prefixset.ps = psa;
387 fb->match.prefixset.ps = psb;
388 fa->match.originset.ps = osa;
389 fb->match.originset.ps = osb;
390 fa->match.as.aset = asa;
391 fb->match.as.aset = asb;
392 if (r != 0)
393 return (0);
394 if (fa->match.prefixset.ps != NULL &&
395 fa->match.prefixset.ps->dirty) {
396 log_debug("%s: prefixset %s has changed",
397 __func__, fa->match.prefixset.name);
398 return (0);
399 }
400 if (fa->match.originset.ps != NULL &&
401 fa->match.originset.ps->dirty) {
402 log_debug("%s: originset %s has changed",
403 __func__, fa->match.originset.name);
404 return (0);
405 }
406 if ((fa->match.as.flags & AS_FLAG_AS_SET) &&
407 fa->match.as.aset->dirty) {
408 log_debug("%s: as-set %s has changed",
409 __func__, fa->match.as.name);
410 return (0);
411 }
412
413 if (!filterset_equal(&fa->set, &fb->set))
414 return (0);
415
416 fa = TAILQ_NEXT(fa, entry);
417 fb = TAILQ_NEXT(fb, entry);
418 }
419 return (1);
420 }
421
422 void
rde_filterstate_init(struct filterstate * state)423 rde_filterstate_init(struct filterstate *state)
424 {
425 memset(state, 0, sizeof(*state));
426 path_prep(&state->aspath);
427 }
428
429 static void
rde_filterstate_set(struct filterstate * state,struct rde_aspath * asp,struct rde_community * communities,struct nexthop * nh,uint8_t nhflags,uint8_t vstate)430 rde_filterstate_set(struct filterstate *state, struct rde_aspath *asp,
431 struct rde_community *communities, struct nexthop *nh, uint8_t nhflags,
432 uint8_t vstate)
433 {
434 rde_filterstate_init(state);
435
436 if (asp)
437 path_copy(&state->aspath, asp);
438 if (communities)
439 communities_copy(&state->communities, communities);
440 state->nexthop = nexthop_ref(nh);
441 state->nhflags = nhflags;
442 state->vstate = vstate;
443 }
444
445 /*
446 * Build a filterstate based on the prefix p.
447 */
448 void
rde_filterstate_prep(struct filterstate * state,struct prefix * p)449 rde_filterstate_prep(struct filterstate *state, struct prefix *p)
450 {
451 rde_filterstate_set(state, prefix_aspath(p), prefix_communities(p),
452 prefix_nexthop(p), prefix_nhflags(p), p->validation_state);
453 }
454
455 /*
456 * Copy a filterstate to a new filterstate.
457 */
458 void
rde_filterstate_copy(struct filterstate * state,struct filterstate * src)459 rde_filterstate_copy(struct filterstate *state, struct filterstate *src)
460 {
461 rde_filterstate_set(state, &src->aspath, &src->communities,
462 src->nexthop, src->nhflags, src->vstate);
463 }
464
465 /*
466 * Set the vstate based on the aspa_state and the supplied roa vstate.
467 * This function must be called after rde_filterstate_init().
468 * rde_filterstate_prep() and rde_filterstate_copy() set the right vstate.
469 */
470 void
rde_filterstate_set_vstate(struct filterstate * state,uint8_t roa_vstate,uint8_t aspa_state)471 rde_filterstate_set_vstate(struct filterstate *state, uint8_t roa_vstate,
472 uint8_t aspa_state)
473 {
474 state->vstate = aspa_state << 4;
475 state->vstate |= roa_vstate & ROA_MASK;
476 }
477
478 void
rde_filterstate_clean(struct filterstate * state)479 rde_filterstate_clean(struct filterstate *state)
480 {
481 path_clean(&state->aspath);
482 communities_clean(&state->communities);
483 nexthop_unref(state->nexthop);
484 state->nexthop = NULL;
485 }
486
487 void
filterlist_free(struct filter_head * fh)488 filterlist_free(struct filter_head *fh)
489 {
490 struct filter_rule *r;
491
492 if (fh == NULL)
493 return;
494
495 while ((r = TAILQ_FIRST(fh)) != NULL) {
496 TAILQ_REMOVE(fh, r, entry);
497 filterset_free(&r->set);
498 free(r);
499 }
500 free(fh);
501 }
502
503 /* free a filterset and take care of possible name2id references */
504 void
filterset_free(struct filter_set_head * sh)505 filterset_free(struct filter_set_head *sh)
506 {
507 struct filter_set *s;
508
509 if (sh == NULL)
510 return;
511
512 while ((s = TAILQ_FIRST(sh)) != NULL) {
513 TAILQ_REMOVE(sh, s, entry);
514 if (s->type == ACTION_RTLABEL_ID)
515 rtlabel_unref(s->action.id);
516 else if (s->type == ACTION_PFTABLE_ID)
517 pftable_unref(s->action.id);
518 else if (s->type == ACTION_SET_NEXTHOP_REF)
519 nexthop_unref(s->action.nh_ref);
520 free(s);
521 }
522 }
523
524 /*
525 * this function is a bit more complicated than a memcmp() because there are
526 * types that need to be considered equal e.g. ACTION_SET_MED and
527 * ACTION_SET_RELATIVE_MED. Also ACTION_SET_COMMUNITY and ACTION_SET_NEXTHOP
528 * need some special care. It only checks the types and not the values so
529 * it does not do a real compare.
530 */
531 int
filterset_cmp(struct filter_set * a,struct filter_set * b)532 filterset_cmp(struct filter_set *a, struct filter_set *b)
533 {
534 if (strcmp(filterset_name(a->type), filterset_name(b->type)))
535 return (a->type - b->type);
536
537 if (a->type == ACTION_SET_COMMUNITY ||
538 a->type == ACTION_DEL_COMMUNITY) { /* a->type == b->type */
539 return (memcmp(&a->action.community, &b->action.community,
540 sizeof(a->action.community)));
541 }
542
543 if (a->type == ACTION_SET_NEXTHOP && b->type == ACTION_SET_NEXTHOP) {
544 /*
545 * This is the only interesting case, all others are considered
546 * equal. It does not make sense to e.g. set a nexthop and
547 * reject it at the same time. Allow one IPv4 and one IPv6
548 * per filter set or only one of the other nexthop modifiers.
549 */
550 return (a->action.nexthop.aid - b->action.nexthop.aid);
551 }
552
553 /* equal */
554 return (0);
555 }
556
557 /*
558 * move filterset from source to dest. dest will be initialized first.
559 * After the move source is an empty list.
560 */
561 void
filterset_move(struct filter_set_head * source,struct filter_set_head * dest)562 filterset_move(struct filter_set_head *source, struct filter_set_head *dest)
563 {
564 TAILQ_INIT(dest);
565 if (source == NULL)
566 return;
567 TAILQ_CONCAT(dest, source, entry);
568 }
569
570 /*
571 * copy filterset from source to dest. dest will be initialized first.
572 */
573 void
filterset_copy(struct filter_set_head * source,struct filter_set_head * dest)574 filterset_copy(struct filter_set_head *source, struct filter_set_head *dest)
575 {
576 struct filter_set *s, *t;
577
578 TAILQ_INIT(dest);
579 if (source == NULL)
580 return;
581
582 TAILQ_FOREACH(s, source, entry) {
583 if ((t = malloc(sizeof(struct filter_set))) == NULL)
584 fatal(NULL);
585 memcpy(t, s, sizeof(struct filter_set));
586 if (t->type == ACTION_RTLABEL_ID)
587 rtlabel_ref(t->action.id);
588 else if (t->type == ACTION_PFTABLE_ID)
589 pftable_ref(t->action.id);
590 else if (t->type == ACTION_SET_NEXTHOP_REF)
591 nexthop_ref(t->action.nh_ref);
592 TAILQ_INSERT_TAIL(dest, t, entry);
593 }
594 }
595
596 int
filterset_equal(struct filter_set_head * ah,struct filter_set_head * bh)597 filterset_equal(struct filter_set_head *ah, struct filter_set_head *bh)
598 {
599 struct filter_set *a, *b;
600 const char *as, *bs;
601
602 for (a = TAILQ_FIRST(ah), b = TAILQ_FIRST(bh);
603 a != NULL && b != NULL;
604 a = TAILQ_NEXT(a, entry), b = TAILQ_NEXT(b, entry)) {
605 switch (a->type) {
606 case ACTION_SET_PREPEND_SELF:
607 case ACTION_SET_PREPEND_PEER:
608 if (a->type == b->type &&
609 a->action.prepend == b->action.prepend)
610 continue;
611 break;
612 case ACTION_SET_AS_OVERRIDE:
613 if (a->type == b->type)
614 continue;
615 break;
616 case ACTION_SET_LOCALPREF:
617 case ACTION_SET_MED:
618 case ACTION_SET_WEIGHT:
619 if (a->type == b->type &&
620 a->action.metric == b->action.metric)
621 continue;
622 break;
623 case ACTION_SET_RELATIVE_LOCALPREF:
624 case ACTION_SET_RELATIVE_MED:
625 case ACTION_SET_RELATIVE_WEIGHT:
626 if (a->type == b->type &&
627 a->action.relative == b->action.relative)
628 continue;
629 break;
630 case ACTION_SET_NEXTHOP:
631 if (a->type == b->type &&
632 memcmp(&a->action.nexthop, &b->action.nexthop,
633 sizeof(a->action.nexthop)) == 0)
634 continue;
635 break;
636 case ACTION_SET_NEXTHOP_REF:
637 if (a->type == b->type &&
638 a->action.nh_ref == b->action.nh_ref)
639 continue;
640 break;
641 case ACTION_SET_NEXTHOP_BLACKHOLE:
642 case ACTION_SET_NEXTHOP_REJECT:
643 case ACTION_SET_NEXTHOP_NOMODIFY:
644 case ACTION_SET_NEXTHOP_SELF:
645 if (a->type == b->type)
646 continue;
647 break;
648 case ACTION_DEL_COMMUNITY:
649 case ACTION_SET_COMMUNITY:
650 if (a->type == b->type &&
651 memcmp(&a->action.community, &b->action.community,
652 sizeof(a->action.community)) == 0)
653 continue;
654 break;
655 case ACTION_PFTABLE:
656 case ACTION_PFTABLE_ID:
657 if (b->type == ACTION_PFTABLE)
658 bs = b->action.pftable;
659 else if (b->type == ACTION_PFTABLE_ID)
660 bs = pftable_id2name(b->action.id);
661 else
662 break;
663
664 if (a->type == ACTION_PFTABLE)
665 as = a->action.pftable;
666 else
667 as = pftable_id2name(a->action.id);
668
669 if (strcmp(as, bs) == 0)
670 continue;
671 break;
672 case ACTION_RTLABEL:
673 case ACTION_RTLABEL_ID:
674 if (b->type == ACTION_RTLABEL)
675 bs = b->action.rtlabel;
676 else if (b->type == ACTION_RTLABEL_ID)
677 bs = rtlabel_id2name(b->action.id);
678 else
679 break;
680
681 if (a->type == ACTION_RTLABEL)
682 as = a->action.rtlabel;
683 else
684 as = rtlabel_id2name(a->action.id);
685
686 if (strcmp(as, bs) == 0)
687 continue;
688 break;
689 case ACTION_SET_ORIGIN:
690 if (a->type == b->type &&
691 a->action.origin == b->action.origin)
692 continue;
693 break;
694 }
695 /* compare failed */
696 return (0);
697 }
698 if (a != NULL || b != NULL)
699 return (0);
700 return (1);
701 }
702
703 const char *
filterset_name(enum action_types type)704 filterset_name(enum action_types type)
705 {
706 switch (type) {
707 case ACTION_SET_LOCALPREF:
708 case ACTION_SET_RELATIVE_LOCALPREF:
709 return ("localpref");
710 case ACTION_SET_MED:
711 case ACTION_SET_RELATIVE_MED:
712 return ("metric");
713 case ACTION_SET_WEIGHT:
714 case ACTION_SET_RELATIVE_WEIGHT:
715 return ("weight");
716 case ACTION_SET_PREPEND_SELF:
717 return ("prepend-self");
718 case ACTION_SET_PREPEND_PEER:
719 return ("prepend-peer");
720 case ACTION_SET_AS_OVERRIDE:
721 return ("as-override");
722 case ACTION_SET_NEXTHOP:
723 case ACTION_SET_NEXTHOP_REF:
724 case ACTION_SET_NEXTHOP_REJECT:
725 case ACTION_SET_NEXTHOP_BLACKHOLE:
726 case ACTION_SET_NEXTHOP_NOMODIFY:
727 case ACTION_SET_NEXTHOP_SELF:
728 return ("nexthop");
729 case ACTION_SET_COMMUNITY:
730 return ("community");
731 case ACTION_DEL_COMMUNITY:
732 return ("community delete");
733 case ACTION_PFTABLE:
734 case ACTION_PFTABLE_ID:
735 return ("pftable");
736 case ACTION_RTLABEL:
737 case ACTION_RTLABEL_ID:
738 return ("rtlabel");
739 case ACTION_SET_ORIGIN:
740 return ("origin");
741 }
742
743 fatalx("filterset_name: got lost");
744 }
745
746 /*
747 * Copyright (c) 2001 Daniel Hartmeier
748 * All rights reserved.
749 *
750 * Redistribution and use in source and binary forms, with or without
751 * modification, are permitted provided that the following conditions
752 * are met:
753 *
754 * - Redistributions of source code must retain the above copyright
755 * notice, this list of conditions and the following disclaimer.
756 * - Redistributions in binary form must reproduce the above
757 * copyright notice, this list of conditions and the following
758 * disclaimer in the documentation and/or other materials provided
759 * with the distribution.
760 *
761 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
762 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
763 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
764 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
765 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
766 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
767 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
768 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
769 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
770 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
771 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
772 * POSSIBILITY OF SUCH DAMAGE.
773 *
774 * Effort sponsored in part by the Defense Advanced Research Projects
775 * Agency (DARPA) and Air Force Research Laboratory, Air Force
776 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
777 *
778 */
779
780 #define RDE_FILTER_SET_SKIP_STEPS(i) \
781 do { \
782 while (head[i] != cur) { \
783 head[i]->skip[i] = cur; \
784 head[i] = TAILQ_NEXT(head[i], entry); \
785 } \
786 } while (0)
787
788 void
rde_filter_calc_skip_steps(struct filter_head * rules)789 rde_filter_calc_skip_steps(struct filter_head *rules)
790 {
791 struct filter_rule *cur, *prev, *head[RDE_FILTER_SKIP_COUNT];
792 int i;
793
794 if (rules == NULL)
795 return;
796
797 cur = TAILQ_FIRST(rules);
798
799 prev = cur;
800 for (i = 0; i < RDE_FILTER_SKIP_COUNT; ++i)
801 head[i] = cur;
802 while (cur != NULL) {
803 if (cur->peer.peerid != prev->peer.peerid)
804 RDE_FILTER_SET_SKIP_STEPS(RDE_FILTER_SKIP_PEERID);
805 if (cur->peer.groupid != prev->peer.groupid)
806 RDE_FILTER_SET_SKIP_STEPS(RDE_FILTER_SKIP_GROUPID);
807 if (cur->peer.remote_as != prev->peer.remote_as)
808 RDE_FILTER_SET_SKIP_STEPS(RDE_FILTER_SKIP_REMOTE_AS);
809 prev = cur;
810 cur = TAILQ_NEXT(cur, entry);
811 }
812 for (i = 0; i < RDE_FILTER_SKIP_COUNT; ++i)
813 RDE_FILTER_SET_SKIP_STEPS(i);
814
815 }
816
817 #define RDE_FILTER_TEST_ATTRIB(t, a) \
818 do { \
819 if (t) { \
820 f = a; \
821 goto nextrule; \
822 } \
823 } while (0)
824
825 enum filter_actions
rde_filter(struct filter_head * rules,struct rde_peer * peer,struct rde_peer * from,struct bgpd_addr * prefix,uint8_t plen,struct filterstate * state)826 rde_filter(struct filter_head *rules, struct rde_peer *peer,
827 struct rde_peer *from, struct bgpd_addr *prefix, uint8_t plen,
828 struct filterstate *state)
829 {
830 struct filter_rule *f;
831 enum filter_actions action = ACTION_DENY; /* default deny */
832
833 if (state->aspath.flags & F_ATTR_PARSE_ERR)
834 /*
835 * don't try to filter bad updates just deny them
836 * so they act as implicit withdraws
837 */
838 return (ACTION_DENY);
839
840 if (rules == NULL)
841 return (action);
842
843 if (prefix->aid == AID_FLOWSPECv4 || prefix->aid == AID_FLOWSPECv6)
844 return (ACTION_ALLOW);
845
846 f = TAILQ_FIRST(rules);
847 while (f != NULL) {
848 RDE_FILTER_TEST_ATTRIB(
849 (f->peer.peerid &&
850 f->peer.peerid != peer->conf.id),
851 f->skip[RDE_FILTER_SKIP_PEERID]);
852 RDE_FILTER_TEST_ATTRIB(
853 (f->peer.groupid &&
854 f->peer.groupid != peer->conf.groupid),
855 f->skip[RDE_FILTER_SKIP_GROUPID]);
856 RDE_FILTER_TEST_ATTRIB(
857 (f->peer.remote_as &&
858 f->peer.remote_as != peer->conf.remote_as),
859 f->skip[RDE_FILTER_SKIP_REMOTE_AS]);
860
861 if (rde_filter_match(f, peer, from, state, prefix, plen)) {
862 rde_apply_set(&f->set, peer, from, state, prefix->aid);
863 if (f->action != ACTION_NONE)
864 action = f->action;
865 if (f->quick)
866 return (action);
867 }
868 f = TAILQ_NEXT(f, entry);
869 nextrule: ;
870 }
871 return (action);
872 }
873