1 /*	$OpenBSD: rde_attr.c,v 1.124 2021/01/16 13:14:54 claudio Exp $ */
2 
3 /*
4  * Copyright (c) 2004 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2016 Job Snijders <job@instituut.net>
6  * Copyright (c) 2016 Peter Hessler <phessler@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/queue.h>
22 
23 #include <endian.h>
24 #include <limits.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <siphash.h>
28 
29 #include "bgpd.h"
30 #include "rde.h"
31 #include "log.h"
32 
33 int
attr_write(void * p,u_int16_t p_len,u_int8_t flags,u_int8_t type,void * data,u_int16_t data_len)34 attr_write(void *p, u_int16_t p_len, u_int8_t flags, u_int8_t type,
35     void *data, u_int16_t data_len)
36 {
37 	u_char		*b = p;
38 	u_int16_t	 tmp, tot_len = 2; /* attribute header (without len) */
39 
40 	flags &= ~ATTR_DEFMASK;
41 	if (data_len > 255) {
42 		tot_len += 2 + data_len;
43 		flags |= ATTR_EXTLEN;
44 	} else {
45 		tot_len += 1 + data_len;
46 	}
47 
48 	if (tot_len > p_len)
49 		return (-1);
50 
51 	*b++ = flags;
52 	*b++ = type;
53 	if (data_len > 255) {
54 		tmp = htons(data_len);
55 		memcpy(b, &tmp, sizeof(tmp));
56 		b += 2;
57 	} else
58 		*b++ = (u_char)data_len;
59 
60 	if (data == NULL)
61 		return (tot_len - data_len);
62 
63 	if (data_len != 0)
64 		memcpy(b, data, data_len);
65 
66 	return (tot_len);
67 }
68 
69 int
attr_writebuf(struct ibuf * buf,u_int8_t flags,u_int8_t type,void * data,u_int16_t data_len)70 attr_writebuf(struct ibuf *buf, u_int8_t flags, u_int8_t type, void *data,
71     u_int16_t data_len)
72 {
73 	u_char	hdr[4];
74 
75 	flags &= ~ATTR_DEFMASK;
76 	if (data_len > 255) {
77 		flags |= ATTR_EXTLEN;
78 		hdr[2] = (data_len >> 8) & 0xff;
79 		hdr[3] = data_len & 0xff;
80 	} else {
81 		hdr[2] = data_len & 0xff;
82 	}
83 
84 	hdr[0] = flags;
85 	hdr[1] = type;
86 
87 	if (ibuf_add(buf, hdr, flags & ATTR_EXTLEN ? 4 : 3) == -1)
88 		return (-1);
89 	if (data && ibuf_add(buf, data, data_len) == -1)
90 		return (-1);
91 	return (0);
92 }
93 
94 /* optional attribute specific functions */
95 int		 attr_diff(struct attr *, struct attr *);
96 struct attr	*attr_alloc(u_int8_t, u_int8_t, const void *, u_int16_t);
97 struct attr	*attr_lookup(u_int8_t, u_int8_t, const void *, u_int16_t);
98 void		 attr_put(struct attr *);
99 
100 struct attr_table {
101 	struct attr_list	*hashtbl;
102 	u_int64_t		 hashmask;
103 } attrtable;
104 
105 SIPHASH_KEY attrtablekey;
106 
107 #define ATTR_HASH(x)				\
108 	&attrtable.hashtbl[(x) & attrtable.hashmask]
109 
110 void
attr_init(u_int32_t hashsize)111 attr_init(u_int32_t hashsize)
112 {
113 	u_int32_t	hs, i;
114 
115 	arc4random_buf(&attrtablekey, sizeof(attrtablekey));
116 	for (hs = 1; hs < hashsize; hs <<= 1)
117 		;
118 	attrtable.hashtbl = calloc(hs, sizeof(struct attr_list));
119 	if (attrtable.hashtbl == NULL)
120 		fatal("attr_init");
121 
122 	for (i = 0; i < hs; i++)
123 		LIST_INIT(&attrtable.hashtbl[i]);
124 
125 	attrtable.hashmask = hs - 1;
126 }
127 
128 void
attr_shutdown(void)129 attr_shutdown(void)
130 {
131 	u_int64_t	i;
132 
133 	for (i = 0; i <= attrtable.hashmask; i++)
134 		if (!LIST_EMPTY(&attrtable.hashtbl[i]))
135 			log_warnx("%s: free non-free table", __func__);
136 
137 	free(attrtable.hashtbl);
138 }
139 
140 void
attr_hash_stats(struct rde_hashstats * hs)141 attr_hash_stats(struct rde_hashstats *hs)
142 {
143 	struct attr		*a;
144 	u_int64_t		i;
145 	int64_t			n;
146 
147 	memset(hs, 0, sizeof(*hs));
148 	strlcpy(hs->name, "attr hash", sizeof(hs->name));
149 	hs->min = LLONG_MAX;
150 	hs->num = attrtable.hashmask + 1;
151 
152 	for (i = 0; i <= attrtable.hashmask; i++) {
153 		n = 0;
154 		LIST_FOREACH(a, &attrtable.hashtbl[i], entry)
155 			n++;
156 		if (n < hs->min)
157 			hs->min = n;
158 		if (n > hs->max)
159 			hs->max = n;
160 		hs->sum += n;
161 		hs->sumq += n * n;
162 	}
163 }
164 
165 int
attr_optadd(struct rde_aspath * asp,u_int8_t flags,u_int8_t type,void * data,u_int16_t len)166 attr_optadd(struct rde_aspath *asp, u_int8_t flags, u_int8_t type,
167     void *data, u_int16_t len)
168 {
169 	u_int8_t	 l;
170 	struct attr	*a, *t;
171 	void		*p;
172 
173 	/* known optional attributes were validated previously */
174 	if ((a = attr_lookup(flags, type, data, len)) == NULL)
175 		a = attr_alloc(flags, type, data, len);
176 
177 	/* attribute allowed only once */
178 	for (l = 0; l < asp->others_len; l++) {
179 		if (asp->others[l] == NULL)
180 			break;
181 		if (type == asp->others[l]->type) {
182 			if (a->refcnt == 0)
183 				attr_put(a);
184 			return (-1);
185 		}
186 	}
187 
188 	/* add attribute to the table but first bump refcnt */
189 	a->refcnt++;
190 	rdemem.attr_refs++;
191 
192 	for (l = 0; l < asp->others_len; l++) {
193 		if (asp->others[l] == NULL) {
194 			asp->others[l] = a;
195 			return (0);
196 		}
197 		/* list is sorted */
198 		if (a->type < asp->others[l]->type) {
199 			t = asp->others[l];
200 			asp->others[l] = a;
201 			a = t;
202 		}
203 	}
204 
205 	/* no empty slot found, need to realloc */
206 	if (asp->others_len == UCHAR_MAX)
207 		fatalx("attr_optadd: others_len overflow");
208 
209 	asp->others_len++;
210 	if ((p = reallocarray(asp->others,
211 	    asp->others_len, sizeof(struct attr *))) == NULL)
212 		fatal("attr_optadd");
213 	asp->others = p;
214 
215 	/* l stores the size of others before resize */
216 	asp->others[l] = a;
217 	return (0);
218 }
219 
220 struct attr *
attr_optget(const struct rde_aspath * asp,u_int8_t type)221 attr_optget(const struct rde_aspath *asp, u_int8_t type)
222 {
223 	u_int8_t	 l;
224 
225 	for (l = 0; l < asp->others_len; l++) {
226 		if (asp->others[l] == NULL)
227 			break;
228 		if (type == asp->others[l]->type)
229 			return (asp->others[l]);
230 		if (type < asp->others[l]->type)
231 			break;
232 	}
233 	return (NULL);
234 }
235 
236 void
attr_copy(struct rde_aspath * t,const struct rde_aspath * s)237 attr_copy(struct rde_aspath *t, const struct rde_aspath *s)
238 {
239 	u_int8_t	l;
240 
241 	if (t->others != NULL)
242 		attr_freeall(t);
243 
244 	t->others_len = s->others_len;
245 	if (t->others_len == 0) {
246 		t->others = NULL;
247 		return;
248 	}
249 
250 	if ((t->others = calloc(s->others_len, sizeof(struct attr *))) == 0)
251 		fatal("attr_copy");
252 
253 	for (l = 0; l < t->others_len; l++) {
254 		if (s->others[l] == NULL)
255 			break;
256 		s->others[l]->refcnt++;
257 		rdemem.attr_refs++;
258 		t->others[l] = s->others[l];
259 	}
260 }
261 
262 int
attr_diff(struct attr * oa,struct attr * ob)263 attr_diff(struct attr *oa, struct attr *ob)
264 {
265 	int	r;
266 
267 	if (ob == NULL)
268 		return (1);
269 	if (oa == NULL)
270 		return (-1);
271 	if (oa->flags > ob->flags)
272 		return (1);
273 	if (oa->flags < ob->flags)
274 		return (-1);
275 	if (oa->type > ob->type)
276 		return (1);
277 	if (oa->type < ob->type)
278 		return (-1);
279 	if (oa->len > ob->len)
280 		return (1);
281 	if (oa->len < ob->len)
282 		return (-1);
283 	r = memcmp(oa->data, ob->data, oa->len);
284 	if (r > 0)
285 		return (1);
286 	if (r < 0)
287 		return (-1);
288 
289 	fatalx("attr_diff: equal attributes encountered");
290 }
291 
292 int
attr_compare(struct rde_aspath * a,struct rde_aspath * b)293 attr_compare(struct rde_aspath *a, struct rde_aspath *b)
294 {
295 	u_int8_t	l, min;
296 
297 	min = a->others_len < b->others_len ? a->others_len : b->others_len;
298 	for (l = 0; l < min; l++)
299 		if (a->others[l] != b->others[l])
300 			return (attr_diff(a->others[l], b->others[l]));
301 
302 	if (a->others_len < b->others_len) {
303 		for (; l < b->others_len; l++)
304 			if (b->others[l] != NULL)
305 				return (-1);
306 	} else if (a->others_len > b->others_len) {
307 		for (; l < a->others_len; l++)
308 			if (a->others[l] != NULL)
309 				return (1);
310 	}
311 
312 	return (0);
313 }
314 
315 u_int64_t
attr_hash(struct rde_aspath * a)316 attr_hash(struct rde_aspath *a)
317 {
318 	u_int64_t	hash = 0;
319 	u_int8_t	l;
320 
321 	for (l = 0; l < a->others_len; l++)
322 		if (a->others[l] != NULL)
323 			hash ^= a->others[l]->hash;
324 	return (hash);
325 }
326 
327 void
attr_free(struct rde_aspath * asp,struct attr * attr)328 attr_free(struct rde_aspath *asp, struct attr *attr)
329 {
330 	u_int8_t	l;
331 
332 	for (l = 0; l < asp->others_len; l++)
333 		if (asp->others[l] == attr) {
334 			attr_put(asp->others[l]);
335 			for (++l; l < asp->others_len; l++)
336 				asp->others[l - 1] = asp->others[l];
337 			asp->others[asp->others_len - 1] = NULL;
338 			return;
339 		}
340 
341 	/* no realloc() because the slot may be reused soon */
342 }
343 
344 void
attr_freeall(struct rde_aspath * asp)345 attr_freeall(struct rde_aspath *asp)
346 {
347 	u_int8_t	l;
348 
349 	for (l = 0; l < asp->others_len; l++)
350 		attr_put(asp->others[l]);
351 
352 	free(asp->others);
353 	asp->others = NULL;
354 	asp->others_len = 0;
355 }
356 
357 struct attr *
attr_alloc(u_int8_t flags,u_int8_t type,const void * data,u_int16_t len)358 attr_alloc(u_int8_t flags, u_int8_t type, const void *data, u_int16_t len)
359 {
360 	struct attr	*a;
361 	SIPHASH_CTX	ctx;
362 
363 	a = calloc(1, sizeof(struct attr));
364 	if (a == NULL)
365 		fatal("attr_optadd");
366 	rdemem.attr_cnt++;
367 
368 	flags &= ~ATTR_DEFMASK;	/* normalize mask */
369 	a->flags = flags;
370 	a->type = type;
371 	a->len = len;
372 	if (len != 0) {
373 		if ((a->data = malloc(len)) == NULL)
374 			fatal("attr_optadd");
375 
376 		rdemem.attr_dcnt++;
377 		rdemem.attr_data += len;
378 		memcpy(a->data, data, len);
379 	} else
380 		a->data = NULL;
381 
382 	SipHash24_Init(&ctx, &attrtablekey);
383 	SipHash24_Update(&ctx, &flags, sizeof(flags));
384 	SipHash24_Update(&ctx, &type, sizeof(type));
385 	SipHash24_Update(&ctx, &len, sizeof(len));
386 	SipHash24_Update(&ctx, a->data, a->len);
387 	a->hash = SipHash24_End(&ctx);
388 	LIST_INSERT_HEAD(ATTR_HASH(a->hash), a, entry);
389 
390 	return (a);
391 }
392 
393 struct attr *
attr_lookup(u_int8_t flags,u_int8_t type,const void * data,u_int16_t len)394 attr_lookup(u_int8_t flags, u_int8_t type, const void *data, u_int16_t len)
395 {
396 	struct attr_list	*head;
397 	struct attr		*a;
398 	u_int64_t		 hash;
399 	SIPHASH_CTX		ctx;
400 
401 	flags &= ~ATTR_DEFMASK;	/* normalize mask */
402 
403 	SipHash24_Init(&ctx, &attrtablekey);
404 	SipHash24_Update(&ctx, &flags, sizeof(flags));
405 	SipHash24_Update(&ctx, &type, sizeof(type));
406 	SipHash24_Update(&ctx, &len, sizeof(len));
407 	SipHash24_Update(&ctx, data, len);
408 	hash = SipHash24_End(&ctx);
409 	head = ATTR_HASH(hash);
410 
411 	LIST_FOREACH(a, head, entry) {
412 		if (hash == a->hash && type == a->type &&
413 		    flags == a->flags && len == a->len &&
414 		    memcmp(data, a->data, len) == 0)
415 			return (a);
416 	}
417 	return (NULL);
418 }
419 
420 void
attr_put(struct attr * a)421 attr_put(struct attr *a)
422 {
423 	if (a == NULL)
424 		return;
425 
426 	rdemem.attr_refs--;
427 	if (--a->refcnt > 0)
428 		/* somebody still holds a reference */
429 		return;
430 
431 	/* unlink */
432 	LIST_REMOVE(a, entry);
433 
434 	if (a->len != 0)
435 		rdemem.attr_dcnt--;
436 	rdemem.attr_data -= a->len;
437 	rdemem.attr_cnt--;
438 	free(a->data);
439 	free(a);
440 }
441 
442 /* aspath specific functions */
443 
444 static u_int16_t aspath_count(const void *, u_int16_t);
445 static u_int32_t aspath_extract_origin(const void *, u_int16_t);
446 static u_int16_t aspath_countlength(struct aspath *, u_int16_t, int);
447 static void	 aspath_countcopy(struct aspath *, u_int16_t, u_int8_t *,
448 		     u_int16_t, int);
449 struct aspath	*aspath_lookup(const void *, u_int16_t);
450 
451 struct aspath_table {
452 	struct aspath_list	*hashtbl;
453 	u_int32_t		 hashmask;
454 } astable;
455 
456 SIPHASH_KEY astablekey;
457 
458 #define ASPATH_HASH(x)				\
459 	&astable.hashtbl[(x) & astable.hashmask]
460 
461 void
aspath_init(u_int32_t hashsize)462 aspath_init(u_int32_t hashsize)
463 {
464 	u_int32_t	hs, i;
465 
466 	for (hs = 1; hs < hashsize; hs <<= 1)
467 		;
468 	astable.hashtbl = calloc(hs, sizeof(struct aspath_list));
469 	if (astable.hashtbl == NULL)
470 		fatal("aspath_init");
471 
472 	for (i = 0; i < hs; i++)
473 		LIST_INIT(&astable.hashtbl[i]);
474 
475 	astable.hashmask = hs - 1;
476 	arc4random_buf(&astablekey, sizeof(astablekey));
477 }
478 
479 void
aspath_shutdown(void)480 aspath_shutdown(void)
481 {
482 	u_int32_t	i;
483 
484 	for (i = 0; i <= astable.hashmask; i++)
485 		if (!LIST_EMPTY(&astable.hashtbl[i]))
486 			log_warnx("aspath_shutdown: free non-free table");
487 
488 	free(astable.hashtbl);
489 }
490 
491 void
aspath_hash_stats(struct rde_hashstats * hs)492 aspath_hash_stats(struct rde_hashstats *hs)
493 {
494 	struct aspath		*a;
495 	u_int32_t		i;
496 	int64_t			n;
497 
498 	memset(hs, 0, sizeof(*hs));
499 	strlcpy(hs->name, "aspath hash", sizeof(hs->name));
500 	hs->min = LLONG_MAX;
501 	hs->num = astable.hashmask + 1;
502 
503 	for (i = 0; i <= astable.hashmask; i++) {
504 		n = 0;
505 		LIST_FOREACH(a, &astable.hashtbl[i], entry)
506 			n++;
507 		if (n < hs->min)
508 			hs->min = n;
509 		if (n > hs->max)
510 			hs->max = n;
511 		hs->sum += n;
512 		hs->sumq += n * n;
513 	}
514 }
515 
516 struct aspath *
aspath_get(void * data,u_int16_t len)517 aspath_get(void *data, u_int16_t len)
518 {
519 	struct aspath_list	*head;
520 	struct aspath		*aspath;
521 
522 	/* The aspath must already have been checked for correctness. */
523 	aspath = aspath_lookup(data, len);
524 	if (aspath == NULL) {
525 		aspath = malloc(ASPATH_HEADER_SIZE + len);
526 		if (aspath == NULL)
527 			fatal("aspath_get");
528 
529 		rdemem.aspath_cnt++;
530 		rdemem.aspath_size += ASPATH_HEADER_SIZE + len;
531 
532 		aspath->refcnt = 0;
533 		aspath->len = len;
534 		aspath->ascnt = aspath_count(data, len);
535 		aspath->source_as = aspath_extract_origin(data, len);
536 		memcpy(aspath->data, data, len);
537 
538 		/* link */
539 		head = ASPATH_HASH(SipHash24(&astablekey, aspath->data,
540 		    aspath->len));
541 		LIST_INSERT_HEAD(head, aspath, entry);
542 	}
543 	aspath->refcnt++;
544 	rdemem.aspath_refs++;
545 
546 	return (aspath);
547 }
548 
549 void
aspath_put(struct aspath * aspath)550 aspath_put(struct aspath *aspath)
551 {
552 	if (aspath == NULL)
553 		return;
554 
555 	rdemem.aspath_refs--;
556 	if (--aspath->refcnt > 0) {
557 		/* somebody still holds a reference */
558 		return;
559 	}
560 
561 	/* unlink */
562 	LIST_REMOVE(aspath, entry);
563 
564 	rdemem.aspath_cnt--;
565 	rdemem.aspath_size -= ASPATH_HEADER_SIZE + aspath->len;
566 	free(aspath);
567 }
568 
569 /*
570  * convert a 4 byte aspath to a 2 byte one.
571  * data is freed by aspath_deflate
572  */
573 u_char *
aspath_deflate(u_char * data,u_int16_t * len,int * flagnew)574 aspath_deflate(u_char *data, u_int16_t *len, int *flagnew)
575 {
576 	u_int8_t	*seg, *nseg, *ndata;
577 	u_int32_t	 as;
578 	int		 i;
579 	u_int16_t	 seg_size, olen, nlen;
580 	u_int8_t	 seg_len;
581 
582 	/* first calculate the length of the aspath */
583 	nlen = 0;
584 	seg = data;
585 	olen = *len;
586 	for (; olen > 0; olen -= seg_size, seg += seg_size) {
587 		seg_len = seg[1];
588 		seg_size = 2 + sizeof(u_int32_t) * seg_len;
589 		nlen += 2 + sizeof(u_int16_t) * seg_len;
590 
591 		if (seg_size > olen)
592 			fatalx("%s: would overflow", __func__);
593 	}
594 
595 	if ((ndata = malloc(nlen)) == NULL)
596 		fatal("aspath_deflate");
597 
598 	/* then copy the aspath */
599 	seg = data;
600 	olen = *len;
601 	for (nseg = ndata; seg < data + olen; seg += seg_size) {
602 		*nseg++ = seg[0];
603 		*nseg++ = seg_len = seg[1];
604 		seg_size = 2 + sizeof(u_int32_t) * seg_len;
605 
606 		for (i = 0; i < seg_len; i++) {
607 			as = aspath_extract(seg, i);
608 			if (as > USHRT_MAX) {
609 				as = AS_TRANS;
610 				*flagnew = 1;
611 			}
612 			*nseg++ = (as >> 8) & 0xff;
613 			*nseg++ = as & 0xff;
614 		}
615 	}
616 
617 	free(data);
618 	*len = nlen;
619 	return (ndata);
620 }
621 
622 void
aspath_merge(struct rde_aspath * a,struct attr * attr)623 aspath_merge(struct rde_aspath *a, struct attr *attr)
624 {
625 	u_int8_t	*np;
626 	u_int16_t	 ascnt, diff, nlen, difflen;
627 	int		 hroom = 0;
628 
629 	ascnt = aspath_count(attr->data, attr->len);
630 	if (ascnt > a->aspath->ascnt) {
631 		/* ASPATH is shorter then AS4_PATH no way to merge */
632 		attr_free(a, attr);
633 		return;
634 	}
635 
636 	diff = a->aspath->ascnt - ascnt;
637 	if (diff && attr->len > 2 && attr->data[0] == AS_SEQUENCE)
638 		hroom = attr->data[1];
639 	difflen = aspath_countlength(a->aspath, diff, hroom);
640 	nlen = attr->len + difflen;
641 
642 	if ((np = malloc(nlen)) == NULL)
643 		fatal("aspath_merge");
644 
645 	/* copy head from old aspath */
646 	aspath_countcopy(a->aspath, diff, np, difflen, hroom);
647 
648 	/* copy tail from new aspath */
649 	if (hroom > 0)
650 		memcpy(np + nlen - attr->len + 2, attr->data + 2,
651 		    attr->len - 2);
652 	else
653 		memcpy(np + nlen - attr->len, attr->data, attr->len);
654 
655 	aspath_put(a->aspath);
656 	a->aspath = aspath_get(np, nlen);
657 	free(np);
658 	attr_free(a, attr);
659 }
660 
661 u_char *
aspath_dump(struct aspath * aspath)662 aspath_dump(struct aspath *aspath)
663 {
664 	return (aspath->data);
665 }
666 
667 u_int16_t
aspath_length(struct aspath * aspath)668 aspath_length(struct aspath *aspath)
669 {
670 	return (aspath->len);
671 }
672 
673 u_int32_t
aspath_neighbor(struct aspath * aspath)674 aspath_neighbor(struct aspath *aspath)
675 {
676 	/*
677 	 * Empty aspath is OK -- internal AS route.
678 	 * Additionally the RFC specifies that if the path starts with an
679 	 * AS_SET the neighbor AS is also the local AS.
680 	 */
681 	if (aspath->len == 0 ||
682 	    aspath->data[0] != AS_SEQUENCE)
683 		return (rde_local_as());
684 	return (aspath_extract(aspath->data, 0));
685 }
686 
687 u_int32_t
aspath_origin(struct aspath * aspath)688 aspath_origin(struct aspath *aspath)
689 {
690 	return aspath->source_as;
691 }
692 
693 static u_int16_t
aspath_count(const void * data,u_int16_t len)694 aspath_count(const void *data, u_int16_t len)
695 {
696 	const u_int8_t	*seg;
697 	u_int16_t	 cnt, seg_size;
698 	u_int8_t	 seg_type, seg_len;
699 
700 	cnt = 0;
701 	seg = data;
702 	for (; len > 0; len -= seg_size, seg += seg_size) {
703 		seg_type = seg[0];
704 		seg_len = seg[1];
705 		seg_size = 2 + sizeof(u_int32_t) * seg_len;
706 
707 		if (seg_type == AS_SET)
708 			cnt += 1;
709 		else
710 			cnt += seg_len;
711 
712 		if (seg_size > len)
713 			fatalx("%s: would overflow", __func__);
714 	}
715 	return (cnt);
716 }
717 
718 /*
719  * The origin AS number derived from a Route as follows:
720  * o  the rightmost AS in the final segment of the AS_PATH attribute
721  *    in the Route if that segment is of type AS_SEQUENCE, or
722  * o  the BGP speaker's own AS number if that segment is of type
723  *    AS_CONFED_SEQUENCE or AS_CONFED_SET or if the AS_PATH is empty,
724  * o  the distinguished value "NONE" if the final segment of the
725  *    AS_PATH attribute is of any other type.
726  */
727 static u_int32_t
aspath_extract_origin(const void * data,u_int16_t len)728 aspath_extract_origin(const void *data, u_int16_t len)
729 {
730 	const u_int8_t	*seg;
731 	u_int32_t	 as = AS_NONE;
732 	u_int16_t	 seg_size;
733 	u_int8_t	 seg_len;
734 
735 	/* AS_PATH is empty */
736 	if (len == 0)
737 		return (rde_local_as());
738 
739 	seg = data;
740 	for (; len > 0; len -= seg_size, seg += seg_size) {
741 		seg_len = seg[1];
742 		seg_size = 2 + sizeof(u_int32_t) * seg_len;
743 
744 		if (len == seg_size && seg[0] == AS_SEQUENCE) {
745 			as = aspath_extract(seg, seg_len - 1);
746 		}
747 		if (seg_size > len)
748 			fatalx("%s: would overflow", __func__);
749 	}
750 	return (as);
751 }
752 
753 static u_int16_t
aspath_countlength(struct aspath * aspath,u_int16_t cnt,int headcnt)754 aspath_countlength(struct aspath *aspath, u_int16_t cnt, int headcnt)
755 {
756 	const u_int8_t	*seg;
757 	u_int16_t	 seg_size, len, clen;
758 	u_int8_t	 seg_type = 0, seg_len = 0;
759 
760 	seg = aspath->data;
761 	clen = 0;
762 	for (len = aspath->len; len > 0 && cnt > 0;
763 	    len -= seg_size, seg += seg_size) {
764 		seg_type = seg[0];
765 		seg_len = seg[1];
766 		seg_size = 2 + sizeof(u_int32_t) * seg_len;
767 
768 		if (seg_type == AS_SET)
769 			cnt -= 1;
770 		else if (seg_len > cnt) {
771 			seg_len = cnt;
772 			clen += 2 + sizeof(u_int32_t) * cnt;
773 			break;
774 		} else
775 			cnt -= seg_len;
776 
777 		clen += seg_size;
778 
779 		if (seg_size > len)
780 			fatalx("%s: would overflow", __func__);
781 	}
782 	if (headcnt > 0 && seg_type == AS_SEQUENCE && headcnt + seg_len < 256)
783 		/* no need for additional header from the new aspath. */
784 		clen -= 2;
785 
786 	return (clen);
787 }
788 
789 static void
aspath_countcopy(struct aspath * aspath,u_int16_t cnt,u_int8_t * buf,u_int16_t size,int headcnt)790 aspath_countcopy(struct aspath *aspath, u_int16_t cnt, u_int8_t *buf,
791     u_int16_t size, int headcnt)
792 {
793 	const u_int8_t	*seg;
794 	u_int16_t	 seg_size, len;
795 	u_int8_t	 seg_type, seg_len;
796 
797 	if (headcnt > 0)
798 		/*
799 		 * additional room because we steal the segment header
800 		 * from the other aspath
801 		 */
802 		size += 2;
803 	seg = aspath->data;
804 	for (len = aspath->len; len > 0 && cnt > 0;
805 	    len -= seg_size, seg += seg_size) {
806 		seg_type = seg[0];
807 		seg_len = seg[1];
808 		seg_size = 2 + sizeof(u_int32_t) * seg_len;
809 
810 		if (seg_type == AS_SET)
811 			cnt -= 1;
812 		else if (seg_len > cnt) {
813 			seg_len = cnt + headcnt;
814 			seg_size = 2 + sizeof(u_int32_t) * cnt;
815 			cnt = 0;
816 		} else {
817 			cnt -= seg_len;
818 			if (cnt == 0)
819 				seg_len += headcnt;
820 		}
821 
822 		memcpy(buf, seg, seg_size);
823 		buf[0] = seg_type;
824 		buf[1] = seg_len;
825 		buf += seg_size;
826 		if (size < seg_size)
827 			fatalx("%s: would overflow", __func__);
828 		size -= seg_size;
829 	}
830 }
831 
832 int
aspath_loopfree(struct aspath * aspath,u_int32_t myAS)833 aspath_loopfree(struct aspath *aspath, u_int32_t myAS)
834 {
835 	u_int8_t	*seg;
836 	u_int16_t	 len, seg_size;
837 	u_int8_t	 i, seg_len;
838 
839 	seg = aspath->data;
840 	for (len = aspath->len; len > 0; len -= seg_size, seg += seg_size) {
841 		seg_len = seg[1];
842 		seg_size = 2 + sizeof(u_int32_t) * seg_len;
843 
844 		for (i = 0; i < seg_len; i++) {
845 			if (myAS == aspath_extract(seg, i))
846 				return (0);
847 		}
848 
849 		if (seg_size > len)
850 			fatalx("%s: would overflow", __func__);
851 	}
852 	return (1);
853 }
854 
855 int
aspath_compare(struct aspath * a1,struct aspath * a2)856 aspath_compare(struct aspath *a1, struct aspath *a2)
857 {
858 	int r;
859 
860 	if (a1->len > a2->len)
861 		return (1);
862 	if (a1->len < a2->len)
863 		return (-1);
864 	r = memcmp(a1->data, a2->data, a1->len);
865 	if (r > 0)
866 		return (1);
867 	if (r < 0)
868 		return (-1);
869 	return (0);
870 }
871 
872 struct aspath *
aspath_lookup(const void * data,u_int16_t len)873 aspath_lookup(const void *data, u_int16_t len)
874 {
875 	struct aspath_list	*head;
876 	struct aspath		*aspath;
877 	u_int32_t		 hash;
878 
879 	hash = SipHash24(&astablekey, data, len);
880 	head = ASPATH_HASH(hash);
881 
882 	LIST_FOREACH(aspath, head, entry) {
883 		if (len == aspath->len && memcmp(data, aspath->data, len) == 0)
884 			return (aspath);
885 	}
886 	return (NULL);
887 }
888 
889 
890 static int
as_compare(struct filter_as * f,u_int32_t as,u_int32_t neighas)891 as_compare(struct filter_as *f, u_int32_t as, u_int32_t neighas)
892 {
893 	u_int32_t match;
894 
895 	if (f->flags & AS_FLAG_AS_SET_NAME)	/* should not happen */
896 		return (0);
897 	if (f->flags & AS_FLAG_AS_SET)
898 		return (as_set_match(f->aset, as));
899 
900 	if (f->flags & AS_FLAG_NEIGHBORAS)
901 		match = neighas;
902 	else
903 		match = f->as_min;
904 
905 	switch (f->op) {
906 	case OP_NONE:
907 	case OP_EQ:
908 		if (as == match)
909 			return (1);
910 		break;
911 	case OP_NE:
912 		if (as != match)
913 			return (1);
914 		break;
915 	case OP_RANGE:
916 		if (as >= f->as_min && as <= f->as_max)
917 			return (1);
918 		break;
919 	case OP_XRANGE:
920 		if (as < f->as_min || as > f->as_max)
921 			return (1);
922 		break;
923 	}
924 	return (0);
925 }
926 
927 /* we need to be able to search more than one as */
928 int
aspath_match(struct aspath * aspath,struct filter_as * f,u_int32_t neighas)929 aspath_match(struct aspath *aspath, struct filter_as *f, u_int32_t neighas)
930 {
931 	const u_int8_t	*seg;
932 	int		 final;
933 	u_int16_t	 len, seg_size;
934 	u_int8_t	 i, seg_len;
935 	u_int32_t	 as = AS_NONE;
936 
937 	if (f->type == AS_EMPTY) {
938 		if (aspath_length(aspath) == 0)
939 			return (1);
940 		else
941 			return (0);
942 	}
943 
944 	/* just check the leftmost AS */
945 	if (f->type == AS_PEER) {
946 		as = aspath_neighbor(aspath);
947 		if (as_compare(f, as, neighas))
948 			return (1);
949 		else
950 			return (0);
951 	}
952 
953 	seg = aspath->data;
954 	len = aspath->len;
955 	for (; len >= 6; len -= seg_size, seg += seg_size) {
956 		seg_len = seg[1];
957 		seg_size = 2 + sizeof(u_int32_t) * seg_len;
958 
959 		final = (len == seg_size);
960 
961 		if (f->type == AS_SOURCE) {
962 			/*
963 			 * Just extract the rightmost AS
964 			 * but if that segment is an AS_SET then the rightmost
965 			 * AS of a previous AS_SEQUENCE segment should be used.
966 			 * Because of that just look at AS_SEQUENCE segments.
967 			 */
968 			if (seg[0] == AS_SEQUENCE)
969 				as = aspath_extract(seg, seg_len - 1);
970 			/* not yet in the final segment */
971 			if (!final)
972 				continue;
973 			if (as_compare(f, as, neighas))
974 				return (1);
975 			else
976 				return (0);
977 		}
978 		/* AS_TRANSIT or AS_ALL */
979 		for (i = 0; i < seg_len; i++) {
980 			/*
981 			 * the source (rightmost) AS is excluded from
982 			 * AS_TRANSIT matches.
983 			 */
984 			if (final && i == seg_len - 1 && f->type == AS_TRANSIT)
985 				return (0);
986 			as = aspath_extract(seg, i);
987 			if (as_compare(f, as, neighas))
988 				return (1);
989 		}
990 
991 		if (seg_size > len)
992 			fatalx("%s: would overflow", __func__);
993 	}
994 	return (0);
995 }
996 
997 /*
998  * Returns a new prepended aspath. Old needs to be freed by caller.
999  */
1000 u_char *
aspath_prepend(struct aspath * asp,u_int32_t as,int quantum,u_int16_t * len)1001 aspath_prepend(struct aspath *asp, u_int32_t as, int quantum, u_int16_t *len)
1002 {
1003 	u_char		*p;
1004 	int		 l, overflow = 0, shift = 0, size, wpos = 0;
1005 	u_int8_t	 type;
1006 
1007 	/* lunatic prepends are blocked in the parser and limited */
1008 
1009 	/* first calculate new size */
1010 	if (asp->len > 0) {
1011 		if (asp->len < 2)
1012 			fatalx("aspath_prepend: bad aspath length");
1013 		type = asp->data[0];
1014 		size = asp->data[1];
1015 	} else {
1016 		/* empty as path */
1017 		type = AS_SET;
1018 		size = 0;
1019 	}
1020 
1021 	if (quantum > 255)
1022 		fatalx("aspath_prepend: preposterous prepend");
1023 	if (quantum == 0) {
1024 		/* no change needed but return a copy */
1025 		p = malloc(asp->len);
1026 		if (p == NULL)
1027 			fatal("aspath_prepend");
1028 		memcpy(p, asp->data, asp->len);
1029 		*len = asp->len;
1030 		return (p);
1031 	} else if (type == AS_SET || size + quantum > 255) {
1032 		/* need to attach a new AS_SEQUENCE */
1033 		l = 2 + quantum * sizeof(u_int32_t) + asp->len;
1034 		if (type == AS_SET)
1035 			overflow = quantum;
1036 		else
1037 			overflow = size + quantum - 255;
1038 	} else
1039 		l = quantum * sizeof(u_int32_t) + asp->len;
1040 
1041 	quantum -= overflow;
1042 
1043 	p = malloc(l);
1044 	if (p == NULL)
1045 		fatal("aspath_prepend");
1046 
1047 	/* first prepends */
1048 	as = htonl(as);
1049 	if (overflow > 0) {
1050 		p[wpos++] = AS_SEQUENCE;
1051 		p[wpos++] = overflow;
1052 
1053 		for (; overflow > 0; overflow--) {
1054 			memcpy(p + wpos, &as, sizeof(u_int32_t));
1055 			wpos += sizeof(u_int32_t);
1056 		}
1057 	}
1058 	if (quantum > 0) {
1059 		shift = 2;
1060 		p[wpos++] = AS_SEQUENCE;
1061 		p[wpos++] = quantum + size;
1062 
1063 		for (; quantum > 0; quantum--) {
1064 			memcpy(p + wpos, &as, sizeof(u_int32_t));
1065 			wpos += sizeof(u_int32_t);
1066 		}
1067 	}
1068 	memcpy(p + wpos, asp->data + shift, asp->len - shift);
1069 
1070 	*len = l;
1071 	return (p);
1072 }
1073 
1074 /*
1075  * Returns a new aspath where neighbor_as is replaced by local_as.
1076  */
1077 u_char *
aspath_override(struct aspath * asp,u_int32_t neighbor_as,u_int32_t local_as,u_int16_t * len)1078 aspath_override(struct aspath *asp, u_int32_t neighbor_as, u_int32_t local_as,
1079      u_int16_t *len)
1080 {
1081 	u_char		*p, *seg, *nseg;
1082 	u_int32_t	 as;
1083 	u_int16_t	 l, seg_size;
1084 	u_int8_t	 i, seg_len, seg_type;
1085 
1086 	p = malloc(asp->len);
1087 	if (p == NULL)
1088 		fatal("aspath_override");
1089 
1090 	seg = asp->data;
1091 	nseg = p;
1092 	for (l = asp->len; l > 0; l -= seg_size, seg += seg_size) {
1093 		*nseg++ = seg_type = seg[0];
1094 		*nseg++ = seg_len = seg[1];
1095 		seg_size = 2 + sizeof(u_int32_t) * seg_len;
1096 
1097 		for (i = 0; i < seg_len; i++) {
1098 			as = aspath_extract(seg, i);
1099 			if (as == neighbor_as)
1100 				as = local_as;
1101 			as = htonl(as);
1102 			memcpy(nseg, &as, sizeof(as));
1103 			nseg += sizeof(as);
1104 		}
1105 
1106 		if (seg_size > l)
1107 			fatalx("%s: would overflow", __func__);
1108 	}
1109 
1110 	*len = asp->len;
1111 	return (p);
1112 }
1113 
1114 int
aspath_lenmatch(struct aspath * a,enum aslen_spec type,u_int aslen)1115 aspath_lenmatch(struct aspath *a, enum aslen_spec type, u_int aslen)
1116 {
1117 	u_int8_t	*seg;
1118 	u_int32_t	 as, lastas = 0;
1119 	u_int		 count = 0;
1120 	u_int16_t	 len, seg_size;
1121 	u_int8_t	 i, seg_len, seg_type;
1122 
1123 	if (type == ASLEN_MAX) {
1124 		if (aslen < aspath_count(a->data, a->len))
1125 			return (1);
1126 		else
1127 			return (0);
1128 	}
1129 
1130 	/* type == ASLEN_SEQ */
1131 	seg = a->data;
1132 	for (len = a->len; len > 0; len -= seg_size, seg += seg_size) {
1133 		seg_type = seg[0];
1134 		seg_len = seg[1];
1135 		seg_size = 2 + sizeof(u_int32_t) * seg_len;
1136 
1137 		for (i = 0; i < seg_len; i++) {
1138 			as = aspath_extract(seg, i);
1139 			if (as == lastas) {
1140 				if (aslen < ++count)
1141 					return (1);
1142 			} else if (seg_type == AS_SET) {
1143 				/* AS path 3 { 4 3 7 } 3 will have count = 3 */
1144 				continue;
1145 			} else
1146 				count = 1;
1147 			lastas = as;
1148 		}
1149 
1150 		if (seg_size > len)
1151 			fatalx("%s: would overflow", __func__);
1152 	}
1153 	return (0);
1154 }
1155