1 /* $OpenBSD: rde_attr.c,v 1.135 2024/09/10 09:38:45 claudio Exp $ */
2
3 /*
4 * Copyright (c) 2004 Claudio Jeker <claudio@openbsd.org>
5 * Copyright (c) 2016 Job Snijders <job@instituut.net>
6 * Copyright (c) 2016 Peter Hessler <phessler@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/queue.h>
22
23 #include <endian.h>
24 #include <limits.h>
25 #include <stdlib.h>
26 #include <string.h>
27
28 #include "bgpd.h"
29 #include "rde.h"
30 #include "log.h"
31
32 int
attr_writebuf(struct ibuf * buf,uint8_t flags,uint8_t type,void * data,uint16_t data_len)33 attr_writebuf(struct ibuf *buf, uint8_t flags, uint8_t type, void *data,
34 uint16_t data_len)
35 {
36 u_char hdr[4];
37
38 flags &= ~ATTR_DEFMASK;
39 if (data_len > 255) {
40 flags |= ATTR_EXTLEN;
41 hdr[2] = (data_len >> 8) & 0xff;
42 hdr[3] = data_len & 0xff;
43 } else {
44 hdr[2] = data_len & 0xff;
45 }
46
47 hdr[0] = flags;
48 hdr[1] = type;
49
50 if (ibuf_add(buf, hdr, flags & ATTR_EXTLEN ? 4 : 3) == -1)
51 return (-1);
52 if (data != NULL && ibuf_add(buf, data, data_len) == -1)
53 return (-1);
54 return (0);
55 }
56
57 /* optional attribute specific functions */
58 struct attr *attr_alloc(uint8_t, uint8_t, void *, uint16_t);
59 struct attr *attr_lookup(uint8_t, uint8_t, void *, uint16_t);
60 void attr_put(struct attr *);
61
62 static inline int attr_diff(struct attr *, struct attr *);
63
64 RB_HEAD(attr_tree, attr) attrtable = RB_INITIALIZER(&attr);
65 RB_GENERATE_STATIC(attr_tree, attr, entry, attr_diff);
66
67
68 void
attr_shutdown(void)69 attr_shutdown(void)
70 {
71 if (!RB_EMPTY(&attrtable))
72 log_warnx("%s: free non-free attr table", __func__);
73 }
74
75 int
attr_optadd(struct rde_aspath * asp,uint8_t flags,uint8_t type,void * data,uint16_t len)76 attr_optadd(struct rde_aspath *asp, uint8_t flags, uint8_t type,
77 void *data, uint16_t len)
78 {
79 uint8_t l;
80 struct attr *a, *t;
81 void *p;
82
83 /* known optional attributes were validated previously */
84 if ((a = attr_lookup(flags, type, data, len)) == NULL)
85 a = attr_alloc(flags, type, data, len);
86
87 /* attribute allowed only once */
88 for (l = 0; l < asp->others_len; l++) {
89 if (asp->others[l] == NULL)
90 break;
91 if (type == asp->others[l]->type) {
92 if (a->refcnt == 0)
93 attr_put(a);
94 return (-1);
95 }
96 }
97
98 /* add attribute to the table but first bump refcnt */
99 a->refcnt++;
100 rdemem.attr_refs++;
101
102 for (l = 0; l < asp->others_len; l++) {
103 if (asp->others[l] == NULL) {
104 asp->others[l] = a;
105 return (0);
106 }
107 /* list is sorted */
108 if (a->type < asp->others[l]->type) {
109 t = asp->others[l];
110 asp->others[l] = a;
111 a = t;
112 }
113 }
114
115 /* no empty slot found, need to realloc */
116 if (asp->others_len == UCHAR_MAX)
117 fatalx("attr_optadd: others_len overflow");
118
119 asp->others_len++;
120 if ((p = reallocarray(asp->others,
121 asp->others_len, sizeof(struct attr *))) == NULL)
122 fatal("%s", __func__);
123 asp->others = p;
124
125 /* l stores the size of others before resize */
126 asp->others[l] = a;
127 return (0);
128 }
129
130 struct attr *
attr_optget(const struct rde_aspath * asp,uint8_t type)131 attr_optget(const struct rde_aspath *asp, uint8_t type)
132 {
133 uint8_t l;
134
135 for (l = 0; l < asp->others_len; l++) {
136 if (asp->others[l] == NULL)
137 break;
138 if (type == asp->others[l]->type)
139 return (asp->others[l]);
140 if (type < asp->others[l]->type)
141 break;
142 }
143 return (NULL);
144 }
145
146 void
attr_copy(struct rde_aspath * t,const struct rde_aspath * s)147 attr_copy(struct rde_aspath *t, const struct rde_aspath *s)
148 {
149 uint8_t l;
150
151 if (t->others != NULL)
152 attr_freeall(t);
153
154 t->others_len = s->others_len;
155 if (t->others_len == 0) {
156 t->others = NULL;
157 return;
158 }
159
160 if ((t->others = calloc(s->others_len, sizeof(struct attr *))) == 0)
161 fatal("%s", __func__);
162
163 for (l = 0; l < t->others_len; l++) {
164 if (s->others[l] == NULL)
165 break;
166 s->others[l]->refcnt++;
167 rdemem.attr_refs++;
168 t->others[l] = s->others[l];
169 }
170 }
171
172 static inline int
attr_diff(struct attr * oa,struct attr * ob)173 attr_diff(struct attr *oa, struct attr *ob)
174 {
175 int r;
176
177 if (ob == NULL)
178 return (1);
179 if (oa == NULL)
180 return (-1);
181 if (oa->flags > ob->flags)
182 return (1);
183 if (oa->flags < ob->flags)
184 return (-1);
185 if (oa->type > ob->type)
186 return (1);
187 if (oa->type < ob->type)
188 return (-1);
189 if (oa->len > ob->len)
190 return (1);
191 if (oa->len < ob->len)
192 return (-1);
193 r = memcmp(oa->data, ob->data, oa->len);
194 if (r > 0)
195 return (1);
196 if (r < 0)
197 return (-1);
198 return (0);
199 }
200
201 int
attr_compare(struct rde_aspath * a,struct rde_aspath * b)202 attr_compare(struct rde_aspath *a, struct rde_aspath *b)
203 {
204 uint8_t l, min;
205
206 min = a->others_len < b->others_len ? a->others_len : b->others_len;
207 for (l = 0; l < min; l++)
208 if (a->others[l] != b->others[l])
209 return (attr_diff(a->others[l], b->others[l]));
210
211 if (a->others_len < b->others_len) {
212 for (; l < b->others_len; l++)
213 if (b->others[l] != NULL)
214 return (-1);
215 } else if (a->others_len > b->others_len) {
216 for (; l < a->others_len; l++)
217 if (a->others[l] != NULL)
218 return (1);
219 }
220
221 return (0);
222 }
223
224 void
attr_free(struct rde_aspath * asp,struct attr * attr)225 attr_free(struct rde_aspath *asp, struct attr *attr)
226 {
227 uint8_t l;
228
229 for (l = 0; l < asp->others_len; l++)
230 if (asp->others[l] == attr) {
231 attr_put(asp->others[l]);
232 for (++l; l < asp->others_len; l++)
233 asp->others[l - 1] = asp->others[l];
234 asp->others[asp->others_len - 1] = NULL;
235 return;
236 }
237
238 /* no realloc() because the slot may be reused soon */
239 }
240
241 void
attr_freeall(struct rde_aspath * asp)242 attr_freeall(struct rde_aspath *asp)
243 {
244 uint8_t l;
245
246 for (l = 0; l < asp->others_len; l++)
247 attr_put(asp->others[l]);
248
249 free(asp->others);
250 asp->others = NULL;
251 asp->others_len = 0;
252 }
253
254 struct attr *
attr_alloc(uint8_t flags,uint8_t type,void * data,uint16_t len)255 attr_alloc(uint8_t flags, uint8_t type, void *data, uint16_t len)
256 {
257 struct attr *a;
258
259 a = calloc(1, sizeof(struct attr));
260 if (a == NULL)
261 fatal("%s", __func__);
262 rdemem.attr_cnt++;
263
264 flags &= ~ATTR_DEFMASK; /* normalize mask */
265 a->flags = flags;
266 a->type = type;
267 a->len = len;
268 if (len != 0) {
269 if ((a->data = malloc(len)) == NULL)
270 fatal("%s", __func__);
271
272 rdemem.attr_dcnt++;
273 rdemem.attr_data += len;
274 memcpy(a->data, data, len);
275 } else
276 a->data = NULL;
277
278 if (RB_INSERT(attr_tree, &attrtable, a) != NULL)
279 fatalx("corrupted attr tree");
280
281 return (a);
282 }
283
284 struct attr *
attr_lookup(uint8_t flags,uint8_t type,void * data,uint16_t len)285 attr_lookup(uint8_t flags, uint8_t type, void *data, uint16_t len)
286 {
287 struct attr needle;
288
289 flags &= ~ATTR_DEFMASK; /* normalize mask */
290
291 needle.flags = flags;
292 needle.type = type;
293 needle.len = len;
294 needle.data = data;
295 return RB_FIND(attr_tree, &attrtable, &needle);
296 }
297
298 void
attr_put(struct attr * a)299 attr_put(struct attr *a)
300 {
301 if (a == NULL)
302 return;
303
304 rdemem.attr_refs--;
305 if (--a->refcnt > 0)
306 /* somebody still holds a reference */
307 return;
308
309 /* unlink */
310 RB_REMOVE(attr_tree, &attrtable, a);
311
312 if (a->len != 0)
313 rdemem.attr_dcnt--;
314 rdemem.attr_data -= a->len;
315 rdemem.attr_cnt--;
316 free(a->data);
317 free(a);
318 }
319
320 /* aspath specific functions */
321
322 static uint16_t aspath_count(const void *, uint16_t);
323 static uint32_t aspath_extract_origin(const void *, uint16_t);
324 static uint16_t aspath_countlength(struct aspath *, uint16_t, int);
325 static void aspath_countcopy(struct aspath *, uint16_t, uint8_t *,
326 uint16_t, int);
327
328 int
aspath_compare(struct aspath * a1,struct aspath * a2)329 aspath_compare(struct aspath *a1, struct aspath *a2)
330 {
331 int r;
332
333 if (a1->len > a2->len)
334 return (1);
335 if (a1->len < a2->len)
336 return (-1);
337 r = memcmp(a1->data, a2->data, a1->len);
338 if (r > 0)
339 return (1);
340 if (r < 0)
341 return (-1);
342 return (0);
343 }
344
345 struct aspath *
aspath_get(void * data,uint16_t len)346 aspath_get(void *data, uint16_t len)
347 {
348 struct aspath *aspath;
349
350 aspath = malloc(ASPATH_HEADER_SIZE + len);
351 if (aspath == NULL)
352 fatal("%s", __func__);
353
354 rdemem.aspath_cnt++;
355 rdemem.aspath_size += ASPATH_HEADER_SIZE + len;
356
357 aspath->len = len;
358 aspath->ascnt = aspath_count(data, len);
359 aspath->source_as = aspath_extract_origin(data, len);
360 if (len != 0)
361 memcpy(aspath->data, data, len);
362
363 return (aspath);
364 }
365
366 struct aspath *
aspath_copy(struct aspath * a)367 aspath_copy(struct aspath *a)
368 {
369 struct aspath *aspath;
370
371 aspath = malloc(ASPATH_HEADER_SIZE + a->len);
372 if (aspath == NULL)
373 fatal("%s", __func__);
374
375 rdemem.aspath_cnt++;
376 rdemem.aspath_size += ASPATH_HEADER_SIZE + a->len;
377
378 memcpy(aspath, a, ASPATH_HEADER_SIZE + a->len);
379
380 return (aspath);
381 }
382
383 void
aspath_put(struct aspath * aspath)384 aspath_put(struct aspath *aspath)
385 {
386 if (aspath == NULL)
387 return;
388
389 rdemem.aspath_cnt--;
390 rdemem.aspath_size -= ASPATH_HEADER_SIZE + aspath->len;
391 free(aspath);
392 }
393
394 /*
395 * convert a 4 byte aspath to a 2 byte one.
396 */
397 u_char *
aspath_deflate(u_char * data,uint16_t * len,int * flagnew)398 aspath_deflate(u_char *data, uint16_t *len, int *flagnew)
399 {
400 uint8_t *seg, *nseg, *ndata = NULL;
401 uint32_t as;
402 int i;
403 uint16_t seg_size, olen, nlen;
404 uint8_t seg_len;
405
406 /* first calculate the length of the aspath */
407 nlen = 0;
408 seg = data;
409 olen = *len;
410 for (; olen > 0; olen -= seg_size, seg += seg_size) {
411 seg_len = seg[1];
412 seg_size = 2 + sizeof(uint32_t) * seg_len;
413 nlen += 2 + sizeof(uint16_t) * seg_len;
414
415 if (seg_size > olen)
416 fatalx("%s: would overflow", __func__);
417 }
418
419 if (nlen == 0)
420 goto done;
421
422 if ((ndata = malloc(nlen)) == NULL)
423 fatal("%s", __func__);
424
425 /* then copy the aspath */
426 seg = data;
427 olen = *len;
428 for (nseg = ndata; seg < data + olen; seg += seg_size) {
429 *nseg++ = seg[0];
430 *nseg++ = seg_len = seg[1];
431 seg_size = 2 + sizeof(uint32_t) * seg_len;
432
433 for (i = 0; i < seg_len; i++) {
434 as = aspath_extract(seg, i);
435 if (as > USHRT_MAX) {
436 as = AS_TRANS;
437 *flagnew = 1;
438 }
439 *nseg++ = (as >> 8) & 0xff;
440 *nseg++ = as & 0xff;
441 }
442 }
443
444 done:
445 *len = nlen;
446 return (ndata);
447 }
448
449 void
aspath_merge(struct rde_aspath * a,struct attr * attr)450 aspath_merge(struct rde_aspath *a, struct attr *attr)
451 {
452 uint8_t *np;
453 uint16_t ascnt, diff, nlen, difflen;
454 int hroom = 0;
455
456 ascnt = aspath_count(attr->data, attr->len);
457 if (ascnt > a->aspath->ascnt) {
458 /* ASPATH is shorter then AS4_PATH no way to merge */
459 attr_free(a, attr);
460 return;
461 }
462
463 diff = a->aspath->ascnt - ascnt;
464 if (diff && attr->len > 2 && attr->data[0] == AS_SEQUENCE)
465 hroom = attr->data[1];
466 difflen = aspath_countlength(a->aspath, diff, hroom);
467 nlen = attr->len + difflen;
468
469 if ((np = malloc(nlen)) == NULL)
470 fatal("%s", __func__);
471
472 /* copy head from old aspath */
473 aspath_countcopy(a->aspath, diff, np, difflen, hroom);
474
475 /* copy tail from new aspath */
476 if (hroom > 0)
477 memcpy(np + nlen - attr->len + 2, attr->data + 2,
478 attr->len - 2);
479 else
480 memcpy(np + nlen - attr->len, attr->data, attr->len);
481
482 aspath_put(a->aspath);
483 a->aspath = aspath_get(np, nlen);
484 free(np);
485 attr_free(a, attr);
486 }
487
488 uint32_t
aspath_neighbor(struct aspath * aspath)489 aspath_neighbor(struct aspath *aspath)
490 {
491 /*
492 * Empty aspath is OK -- internal AS route.
493 * Additionally the RFC specifies that if the path starts with an
494 * AS_SET the neighbor AS is also the local AS.
495 */
496 if (aspath->len == 0 ||
497 aspath->data[0] != AS_SEQUENCE)
498 return (rde_local_as());
499 return (aspath_extract(aspath->data, 0));
500 }
501
502 static uint16_t
aspath_count(const void * data,uint16_t len)503 aspath_count(const void *data, uint16_t len)
504 {
505 const uint8_t *seg;
506 uint16_t cnt, seg_size;
507 uint8_t seg_type, seg_len;
508
509 cnt = 0;
510 seg = data;
511 for (; len > 0; len -= seg_size, seg += seg_size) {
512 seg_type = seg[0];
513 seg_len = seg[1];
514 seg_size = 2 + sizeof(uint32_t) * seg_len;
515
516 if (seg_type == AS_SET)
517 cnt += 1;
518 else
519 cnt += seg_len;
520
521 if (seg_size > len)
522 fatalx("%s: would overflow", __func__);
523 }
524 return (cnt);
525 }
526
527 /*
528 * The origin AS number derived from a Route as follows:
529 * o the rightmost AS in the final segment of the AS_PATH attribute
530 * in the Route if that segment is of type AS_SEQUENCE, or
531 * o the BGP speaker's own AS number if that segment is of type
532 * AS_CONFED_SEQUENCE or AS_CONFED_SET or if the AS_PATH is empty,
533 * o the distinguished value "NONE" if the final segment of the
534 * AS_PATH attribute is of any other type.
535 */
536 static uint32_t
aspath_extract_origin(const void * data,uint16_t len)537 aspath_extract_origin(const void *data, uint16_t len)
538 {
539 const uint8_t *seg;
540 uint32_t as = AS_NONE;
541 uint16_t seg_size;
542 uint8_t seg_len;
543
544 /* AS_PATH is empty */
545 if (len == 0)
546 return (rde_local_as());
547
548 seg = data;
549 for (; len > 0; len -= seg_size, seg += seg_size) {
550 seg_len = seg[1];
551 seg_size = 2 + sizeof(uint32_t) * seg_len;
552
553 if (len == seg_size && seg[0] == AS_SEQUENCE) {
554 as = aspath_extract(seg, seg_len - 1);
555 }
556 if (seg_size > len)
557 fatalx("%s: would overflow", __func__);
558 }
559 return (as);
560 }
561
562 static uint16_t
aspath_countlength(struct aspath * aspath,uint16_t cnt,int headcnt)563 aspath_countlength(struct aspath *aspath, uint16_t cnt, int headcnt)
564 {
565 const uint8_t *seg;
566 uint16_t seg_size, len, clen;
567 uint8_t seg_type = 0, seg_len = 0;
568
569 seg = aspath->data;
570 clen = 0;
571 for (len = aspath->len; len > 0 && cnt > 0;
572 len -= seg_size, seg += seg_size) {
573 seg_type = seg[0];
574 seg_len = seg[1];
575 seg_size = 2 + sizeof(uint32_t) * seg_len;
576
577 if (seg_type == AS_SET)
578 cnt -= 1;
579 else if (seg_len > cnt) {
580 seg_len = cnt;
581 clen += 2 + sizeof(uint32_t) * cnt;
582 break;
583 } else
584 cnt -= seg_len;
585
586 clen += seg_size;
587
588 if (seg_size > len)
589 fatalx("%s: would overflow", __func__);
590 }
591 if (headcnt > 0 && seg_type == AS_SEQUENCE && headcnt + seg_len < 256)
592 /* no need for additional header from the new aspath. */
593 clen -= 2;
594
595 return (clen);
596 }
597
598 static void
aspath_countcopy(struct aspath * aspath,uint16_t cnt,uint8_t * buf,uint16_t size,int headcnt)599 aspath_countcopy(struct aspath *aspath, uint16_t cnt, uint8_t *buf,
600 uint16_t size, int headcnt)
601 {
602 const uint8_t *seg;
603 uint16_t seg_size, len;
604 uint8_t seg_type, seg_len;
605
606 if (headcnt > 0)
607 /*
608 * additional room because we steal the segment header
609 * from the other aspath
610 */
611 size += 2;
612 seg = aspath->data;
613 for (len = aspath->len; len > 0 && cnt > 0;
614 len -= seg_size, seg += seg_size) {
615 seg_type = seg[0];
616 seg_len = seg[1];
617 seg_size = 2 + sizeof(uint32_t) * seg_len;
618
619 if (seg_type == AS_SET)
620 cnt -= 1;
621 else if (seg_len > cnt) {
622 seg_len = cnt + headcnt;
623 seg_size = 2 + sizeof(uint32_t) * cnt;
624 cnt = 0;
625 } else {
626 cnt -= seg_len;
627 if (cnt == 0)
628 seg_len += headcnt;
629 }
630
631 memcpy(buf, seg, seg_size);
632 buf[0] = seg_type;
633 buf[1] = seg_len;
634 buf += seg_size;
635 if (size < seg_size)
636 fatalx("%s: would overflow", __func__);
637 size -= seg_size;
638 }
639 }
640
641 int
aspath_loopfree(struct aspath * aspath,uint32_t myAS)642 aspath_loopfree(struct aspath *aspath, uint32_t myAS)
643 {
644 uint8_t *seg;
645 uint16_t len, seg_size;
646 uint8_t i, seg_len;
647
648 seg = aspath->data;
649 for (len = aspath->len; len > 0; len -= seg_size, seg += seg_size) {
650 seg_len = seg[1];
651 seg_size = 2 + sizeof(uint32_t) * seg_len;
652
653 for (i = 0; i < seg_len; i++) {
654 if (myAS == aspath_extract(seg, i))
655 return (0);
656 }
657
658 if (seg_size > len)
659 fatalx("%s: would overflow", __func__);
660 }
661 return (1);
662 }
663
664 static int
as_compare(struct filter_as * f,uint32_t as,uint32_t neighas)665 as_compare(struct filter_as *f, uint32_t as, uint32_t neighas)
666 {
667 uint32_t match;
668
669 if (f->flags & AS_FLAG_AS_SET_NAME) /* should not happen */
670 return (0);
671 if (f->flags & AS_FLAG_AS_SET)
672 return (as_set_match(f->aset, as));
673
674 if (f->flags & AS_FLAG_NEIGHBORAS)
675 match = neighas;
676 else
677 match = f->as_min;
678
679 switch (f->op) {
680 case OP_NONE:
681 case OP_EQ:
682 if (as == match)
683 return (1);
684 break;
685 case OP_NE:
686 if (as != match)
687 return (1);
688 break;
689 case OP_RANGE:
690 if (as >= f->as_min && as <= f->as_max)
691 return (1);
692 break;
693 case OP_XRANGE:
694 if (as < f->as_min || as > f->as_max)
695 return (1);
696 break;
697 }
698 return (0);
699 }
700
701 /* we need to be able to search more than one as */
702 int
aspath_match(struct aspath * aspath,struct filter_as * f,uint32_t neighas)703 aspath_match(struct aspath *aspath, struct filter_as *f, uint32_t neighas)
704 {
705 const uint8_t *seg;
706 int final;
707 uint16_t len, seg_size;
708 uint8_t i, seg_len;
709 uint32_t as = AS_NONE;
710
711 if (f->type == AS_EMPTY) {
712 if (aspath_length(aspath) == 0)
713 return (1);
714 else
715 return (0);
716 }
717
718 /* just check the leftmost AS */
719 if (f->type == AS_PEER) {
720 as = aspath_neighbor(aspath);
721 if (as_compare(f, as, neighas))
722 return (1);
723 else
724 return (0);
725 }
726
727 seg = aspath->data;
728 len = aspath->len;
729 for (; len >= 6; len -= seg_size, seg += seg_size) {
730 seg_len = seg[1];
731 seg_size = 2 + sizeof(uint32_t) * seg_len;
732
733 final = (len == seg_size);
734
735 if (f->type == AS_SOURCE) {
736 /*
737 * Just extract the rightmost AS
738 * but if that segment is an AS_SET then the rightmost
739 * AS of a previous AS_SEQUENCE segment should be used.
740 * Because of that just look at AS_SEQUENCE segments.
741 */
742 if (seg[0] == AS_SEQUENCE)
743 as = aspath_extract(seg, seg_len - 1);
744 /* not yet in the final segment */
745 if (!final)
746 continue;
747 if (as_compare(f, as, neighas))
748 return (1);
749 else
750 return (0);
751 }
752 /* AS_TRANSIT or AS_ALL */
753 for (i = 0; i < seg_len; i++) {
754 /*
755 * the source (rightmost) AS is excluded from
756 * AS_TRANSIT matches.
757 */
758 if (final && i == seg_len - 1 && f->type == AS_TRANSIT)
759 return (0);
760 as = aspath_extract(seg, i);
761 if (as_compare(f, as, neighas))
762 return (1);
763 }
764
765 if (seg_size > len)
766 fatalx("%s: would overflow", __func__);
767 }
768 return (0);
769 }
770
771 /*
772 * Returns a new prepended aspath. Old needs to be freed by caller.
773 */
774 u_char *
aspath_prepend(struct aspath * asp,uint32_t as,int quantum,uint16_t * len)775 aspath_prepend(struct aspath *asp, uint32_t as, int quantum, uint16_t *len)
776 {
777 u_char *p;
778 int l, overflow = 0, shift = 0, size, wpos = 0;
779 uint8_t type;
780
781 /* lunatic prepends are blocked in the parser and limited */
782
783 /* first calculate new size */
784 if (asp->len > 0) {
785 if (asp->len < 2)
786 fatalx("aspath_prepend: bad aspath length");
787 type = asp->data[0];
788 size = asp->data[1];
789 } else {
790 /* empty as path */
791 type = AS_SET;
792 size = 0;
793 }
794
795 if (quantum > 255)
796 fatalx("aspath_prepend: preposterous prepend");
797 if (quantum == 0) {
798 /* no change needed but return a copy */
799 if (asp->len == 0) {
800 *len = 0;
801 return (NULL);
802 }
803 p = malloc(asp->len);
804 if (p == NULL)
805 fatal("%s", __func__);
806 memcpy(p, asp->data, asp->len);
807 *len = asp->len;
808 return (p);
809 } else if (type == AS_SET || size + quantum > 255) {
810 /* need to attach a new AS_SEQUENCE */
811 l = 2 + quantum * sizeof(uint32_t) + asp->len;
812 if (type == AS_SET)
813 overflow = quantum;
814 else
815 overflow = size + quantum - 255;
816 } else
817 l = quantum * sizeof(uint32_t) + asp->len;
818
819 quantum -= overflow;
820
821 p = malloc(l);
822 if (p == NULL)
823 fatal("%s", __func__);
824
825 /* first prepends */
826 as = htonl(as);
827 if (overflow > 0) {
828 p[wpos++] = AS_SEQUENCE;
829 p[wpos++] = overflow;
830
831 for (; overflow > 0; overflow--) {
832 memcpy(p + wpos, &as, sizeof(uint32_t));
833 wpos += sizeof(uint32_t);
834 }
835 }
836 if (quantum > 0) {
837 shift = 2;
838 p[wpos++] = AS_SEQUENCE;
839 p[wpos++] = quantum + size;
840
841 for (; quantum > 0; quantum--) {
842 memcpy(p + wpos, &as, sizeof(uint32_t));
843 wpos += sizeof(uint32_t);
844 }
845 }
846 if (asp->len > shift)
847 memcpy(p + wpos, asp->data + shift, asp->len - shift);
848
849 *len = l;
850 return (p);
851 }
852
853 /*
854 * Returns a new aspath where neighbor_as is replaced by local_as.
855 */
856 u_char *
aspath_override(struct aspath * asp,uint32_t neighbor_as,uint32_t local_as,uint16_t * len)857 aspath_override(struct aspath *asp, uint32_t neighbor_as, uint32_t local_as,
858 uint16_t *len)
859 {
860 u_char *p, *seg, *nseg;
861 uint32_t as;
862 uint16_t l, seg_size;
863 uint8_t i, seg_len, seg_type;
864
865 if (asp->len == 0) {
866 *len = 0;
867 return (NULL);
868 }
869
870 p = malloc(asp->len);
871 if (p == NULL)
872 fatal("%s", __func__);
873
874 seg = asp->data;
875 nseg = p;
876 for (l = asp->len; l > 0; l -= seg_size, seg += seg_size) {
877 *nseg++ = seg_type = seg[0];
878 *nseg++ = seg_len = seg[1];
879 seg_size = 2 + sizeof(uint32_t) * seg_len;
880
881 for (i = 0; i < seg_len; i++) {
882 as = aspath_extract(seg, i);
883 if (as == neighbor_as)
884 as = local_as;
885 as = htonl(as);
886 memcpy(nseg, &as, sizeof(as));
887 nseg += sizeof(as);
888 }
889
890 if (seg_size > l)
891 fatalx("%s: would overflow", __func__);
892 }
893
894 *len = asp->len;
895 return (p);
896 }
897
898 int
aspath_lenmatch(struct aspath * a,enum aslen_spec type,u_int aslen)899 aspath_lenmatch(struct aspath *a, enum aslen_spec type, u_int aslen)
900 {
901 uint8_t *seg;
902 uint32_t as, lastas = 0;
903 u_int count = 0;
904 uint16_t len, seg_size;
905 uint8_t i, seg_len, seg_type;
906
907 if (type == ASLEN_MAX) {
908 if (aslen < aspath_count(a->data, a->len))
909 return (1);
910 else
911 return (0);
912 }
913
914 /* type == ASLEN_SEQ */
915 seg = a->data;
916 for (len = a->len; len > 0; len -= seg_size, seg += seg_size) {
917 seg_type = seg[0];
918 seg_len = seg[1];
919 seg_size = 2 + sizeof(uint32_t) * seg_len;
920
921 for (i = 0; i < seg_len; i++) {
922 as = aspath_extract(seg, i);
923 if (as == lastas) {
924 if (aslen < ++count)
925 return (1);
926 } else if (seg_type == AS_SET) {
927 /* AS path 3 { 4 3 7 } 3 will have count = 3 */
928 continue;
929 } else
930 count = 1;
931 lastas = as;
932 }
933
934 if (seg_size > len)
935 fatalx("%s: would overflow", __func__);
936 }
937 return (0);
938 }
939