xref: /openbsd/usr.sbin/bgpd/rde_attr.c (revision 4cfece93)
1 /*	$OpenBSD: rde_attr.c,v 1.123 2019/06/24 06:39:49 claudio Exp $ */
2 
3 /*
4  * Copyright (c) 2004 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2016 Job Snijders <job@instituut.net>
6  * Copyright (c) 2016 Peter Hessler <phessler@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/queue.h>
22 
23 #include <endian.h>
24 #include <limits.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <siphash.h>
28 
29 #include "bgpd.h"
30 #include "rde.h"
31 #include "log.h"
32 
33 int
34 attr_write(void *p, u_int16_t p_len, u_int8_t flags, u_int8_t type,
35     void *data, u_int16_t data_len)
36 {
37 	u_char		*b = p;
38 	u_int16_t	 tmp, tot_len = 2; /* attribute header (without len) */
39 
40 	flags &= ~ATTR_DEFMASK;
41 	if (data_len > 255) {
42 		tot_len += 2 + data_len;
43 		flags |= ATTR_EXTLEN;
44 	} else {
45 		tot_len += 1 + data_len;
46 	}
47 
48 	if (tot_len > p_len)
49 		return (-1);
50 
51 	*b++ = flags;
52 	*b++ = type;
53 	if (data_len > 255) {
54 		tmp = htons(data_len);
55 		memcpy(b, &tmp, sizeof(tmp));
56 		b += 2;
57 	} else
58 		*b++ = (u_char)data_len;
59 
60 	if (data == NULL)
61 		return (tot_len - data_len);
62 
63 	if (data_len != 0)
64 		memcpy(b, data, data_len);
65 
66 	return (tot_len);
67 }
68 
69 int
70 attr_writebuf(struct ibuf *buf, u_int8_t flags, u_int8_t type, void *data,
71     u_int16_t data_len)
72 {
73 	u_char	hdr[4];
74 
75 	flags &= ~ATTR_DEFMASK;
76 	if (data_len > 255) {
77 		flags |= ATTR_EXTLEN;
78 		hdr[2] = (data_len >> 8) & 0xff;
79 		hdr[3] = data_len & 0xff;
80 	} else {
81 		hdr[2] = data_len & 0xff;
82 	}
83 
84 	hdr[0] = flags;
85 	hdr[1] = type;
86 
87 	if (ibuf_add(buf, hdr, flags & ATTR_EXTLEN ? 4 : 3) == -1)
88 		return (-1);
89 	if (data && ibuf_add(buf, data, data_len) == -1)
90 		return (-1);
91 	return (0);
92 }
93 
94 /* optional attribute specific functions */
95 int		 attr_diff(struct attr *, struct attr *);
96 struct attr	*attr_alloc(u_int8_t, u_int8_t, const void *, u_int16_t);
97 struct attr	*attr_lookup(u_int8_t, u_int8_t, const void *, u_int16_t);
98 void		 attr_put(struct attr *);
99 
100 struct attr_table {
101 	struct attr_list	*hashtbl;
102 	u_int64_t		 hashmask;
103 } attrtable;
104 
105 SIPHASH_KEY attrtablekey;
106 
107 #define ATTR_HASH(x)				\
108 	&attrtable.hashtbl[(x) & attrtable.hashmask]
109 
110 void
111 attr_init(u_int32_t hashsize)
112 {
113 	u_int32_t	hs, i;
114 
115 	arc4random_buf(&attrtablekey, sizeof(attrtablekey));
116 	for (hs = 1; hs < hashsize; hs <<= 1)
117 		;
118 	attrtable.hashtbl = calloc(hs, sizeof(struct attr_list));
119 	if (attrtable.hashtbl == NULL)
120 		fatal("attr_init");
121 
122 	for (i = 0; i < hs; i++)
123 		LIST_INIT(&attrtable.hashtbl[i]);
124 
125 	attrtable.hashmask = hs - 1;
126 }
127 
128 void
129 attr_shutdown(void)
130 {
131 	u_int64_t	i;
132 
133 	for (i = 0; i <= attrtable.hashmask; i++)
134 		if (!LIST_EMPTY(&attrtable.hashtbl[i]))
135 			log_warnx("%s: free non-free table", __func__);
136 
137 	free(attrtable.hashtbl);
138 }
139 
140 void
141 attr_hash_stats(struct rde_hashstats *hs)
142 {
143 	struct attr		*a;
144 	u_int64_t		i;
145 	int64_t			n;
146 
147 	memset(hs, 0, sizeof(*hs));
148 	strlcpy(hs->name, "attr hash", sizeof(hs->name));
149 	hs->min = LLONG_MAX;
150 	hs->num = attrtable.hashmask + 1;
151 
152 	for (i = 0; i <= attrtable.hashmask; i++) {
153 		n = 0;
154 		LIST_FOREACH(a, &attrtable.hashtbl[i], entry)
155 			n++;
156 		if (n < hs->min)
157 			hs->min = n;
158 		if (n > hs->max)
159 			hs->max = n;
160 		hs->sum += n;
161 		hs->sumq += n * n;
162 	}
163 }
164 
165 int
166 attr_optadd(struct rde_aspath *asp, u_int8_t flags, u_int8_t type,
167     void *data, u_int16_t len)
168 {
169 	u_int8_t	 l;
170 	struct attr	*a, *t;
171 	void		*p;
172 
173 	/* known optional attributes were validated previously */
174 	if ((a = attr_lookup(flags, type, data, len)) == NULL)
175 		a = attr_alloc(flags, type, data, len);
176 
177 	/* attribute allowed only once */
178 	for (l = 0; l < asp->others_len; l++) {
179 		if (asp->others[l] == NULL)
180 			break;
181 		if (type == asp->others[l]->type) {
182 			if (a->refcnt == 0)
183 				attr_put(a);
184 			return (-1);
185 		}
186 	}
187 
188 	/* add attribute to the table but first bump refcnt */
189 	a->refcnt++;
190 	rdemem.attr_refs++;
191 
192 	for (l = 0; l < asp->others_len; l++) {
193 		if (asp->others[l] == NULL) {
194 			asp->others[l] = a;
195 			return (0);
196 		}
197 		/* list is sorted */
198 		if (a->type < asp->others[l]->type) {
199 			t = asp->others[l];
200 			asp->others[l] = a;
201 			a = t;
202 		}
203 	}
204 
205 	/* no empty slot found, need to realloc */
206 	if (asp->others_len == UCHAR_MAX)
207 		fatalx("attr_optadd: others_len overflow");
208 
209 	asp->others_len++;
210 	if ((p = reallocarray(asp->others,
211 	    asp->others_len, sizeof(struct attr *))) == NULL)
212 		fatal("attr_optadd");
213 	asp->others = p;
214 
215 	/* l stores the size of others before resize */
216 	asp->others[l] = a;
217 	return (0);
218 }
219 
220 struct attr *
221 attr_optget(const struct rde_aspath *asp, u_int8_t type)
222 {
223 	u_int8_t	 l;
224 
225 	for (l = 0; l < asp->others_len; l++) {
226 		if (asp->others[l] == NULL)
227 			break;
228 		if (type == asp->others[l]->type)
229 			return (asp->others[l]);
230 		if (type < asp->others[l]->type)
231 			break;
232 	}
233 	return (NULL);
234 }
235 
236 void
237 attr_copy(struct rde_aspath *t, const struct rde_aspath *s)
238 {
239 	u_int8_t	l;
240 
241 	if (t->others != NULL)
242 		attr_freeall(t);
243 
244 	t->others_len = s->others_len;
245 	if (t->others_len == 0) {
246 		t->others = NULL;
247 		return;
248 	}
249 
250 	if ((t->others = calloc(s->others_len, sizeof(struct attr *))) == 0)
251 		fatal("attr_copy");
252 
253 	for (l = 0; l < t->others_len; l++) {
254 		if (s->others[l] == NULL)
255 			break;
256 		s->others[l]->refcnt++;
257 		rdemem.attr_refs++;
258 		t->others[l] = s->others[l];
259 	}
260 }
261 
262 int
263 attr_diff(struct attr *oa, struct attr *ob)
264 {
265 	int	r;
266 
267 	if (ob == NULL)
268 		return (1);
269 	if (oa == NULL)
270 		return (-1);
271 	if (oa->flags > ob->flags)
272 		return (1);
273 	if (oa->flags < ob->flags)
274 		return (-1);
275 	if (oa->type > ob->type)
276 		return (1);
277 	if (oa->type < ob->type)
278 		return (-1);
279 	if (oa->len > ob->len)
280 		return (1);
281 	if (oa->len < ob->len)
282 		return (-1);
283 	r = memcmp(oa->data, ob->data, oa->len);
284 	if (r > 0)
285 		return (1);
286 	if (r < 0)
287 		return (-1);
288 
289 	fatalx("attr_diff: equal attributes encountered");
290 }
291 
292 int
293 attr_compare(struct rde_aspath *a, struct rde_aspath *b)
294 {
295 	u_int8_t	l, min;
296 
297 	min = a->others_len < b->others_len ? a->others_len : b->others_len;
298 	for (l = 0; l < min; l++)
299 		if (a->others[l] != b->others[l])
300 			return (attr_diff(a->others[l], b->others[l]));
301 
302 	if (a->others_len < b->others_len) {
303 		for (; l < b->others_len; l++)
304 			if (b->others[l] != NULL)
305 				return (-1);
306 	} else if (a->others_len > b->others_len) {
307 		for (; l < a->others_len; l++)
308 			if (a->others[l] != NULL)
309 				return (1);
310 	}
311 
312 	return (0);
313 }
314 
315 u_int64_t
316 attr_hash(struct rde_aspath *a)
317 {
318 	u_int64_t	hash = 0;
319 	u_int8_t	l;
320 
321 	for (l = 0; l < a->others_len; l++)
322 		if (a->others[l] != NULL)
323 			hash ^= a->others[l]->hash;
324 	return (hash);
325 }
326 
327 void
328 attr_free(struct rde_aspath *asp, struct attr *attr)
329 {
330 	u_int8_t	l;
331 
332 	for (l = 0; l < asp->others_len; l++)
333 		if (asp->others[l] == attr) {
334 			attr_put(asp->others[l]);
335 			for (++l; l < asp->others_len; l++)
336 				asp->others[l - 1] = asp->others[l];
337 			asp->others[asp->others_len - 1] = NULL;
338 			return;
339 		}
340 
341 	/* no realloc() because the slot may be reused soon */
342 }
343 
344 void
345 attr_freeall(struct rde_aspath *asp)
346 {
347 	u_int8_t	l;
348 
349 	for (l = 0; l < asp->others_len; l++)
350 		attr_put(asp->others[l]);
351 
352 	free(asp->others);
353 	asp->others = NULL;
354 	asp->others_len = 0;
355 }
356 
357 struct attr *
358 attr_alloc(u_int8_t flags, u_int8_t type, const void *data, u_int16_t len)
359 {
360 	struct attr	*a;
361 	SIPHASH_CTX	ctx;
362 
363 	a = calloc(1, sizeof(struct attr));
364 	if (a == NULL)
365 		fatal("attr_optadd");
366 	rdemem.attr_cnt++;
367 
368 	flags &= ~ATTR_DEFMASK;	/* normalize mask */
369 	a->flags = flags;
370 	a->type = type;
371 	a->len = len;
372 	if (len != 0) {
373 		if ((a->data = malloc(len)) == NULL)
374 			fatal("attr_optadd");
375 
376 		rdemem.attr_dcnt++;
377 		rdemem.attr_data += len;
378 		memcpy(a->data, data, len);
379 	} else
380 		a->data = NULL;
381 
382 	SipHash24_Init(&ctx, &attrtablekey);
383 	SipHash24_Update(&ctx, &flags, sizeof(flags));
384 	SipHash24_Update(&ctx, &type, sizeof(type));
385 	SipHash24_Update(&ctx, &len, sizeof(len));
386 	SipHash24_Update(&ctx, a->data, a->len);
387 	a->hash = SipHash24_End(&ctx);
388 	LIST_INSERT_HEAD(ATTR_HASH(a->hash), a, entry);
389 
390 	return (a);
391 }
392 
393 struct attr *
394 attr_lookup(u_int8_t flags, u_int8_t type, const void *data, u_int16_t len)
395 {
396 	struct attr_list	*head;
397 	struct attr		*a;
398 	u_int64_t		 hash;
399 	SIPHASH_CTX		ctx;
400 
401 	flags &= ~ATTR_DEFMASK;	/* normalize mask */
402 
403 	SipHash24_Init(&ctx, &attrtablekey);
404 	SipHash24_Update(&ctx, &flags, sizeof(flags));
405 	SipHash24_Update(&ctx, &type, sizeof(type));
406 	SipHash24_Update(&ctx, &len, sizeof(len));
407 	SipHash24_Update(&ctx, data, len);
408 	hash = SipHash24_End(&ctx);
409 	head = ATTR_HASH(hash);
410 
411 	LIST_FOREACH(a, head, entry) {
412 		if (hash == a->hash && type == a->type &&
413 		    flags == a->flags && len == a->len &&
414 		    memcmp(data, a->data, len) == 0)
415 			return (a);
416 	}
417 	return (NULL);
418 }
419 
420 void
421 attr_put(struct attr *a)
422 {
423 	if (a == NULL)
424 		return;
425 
426 	rdemem.attr_refs--;
427 	if (--a->refcnt > 0)
428 		/* somebody still holds a reference */
429 		return;
430 
431 	/* unlink */
432 	LIST_REMOVE(a, entry);
433 
434 	if (a->len != 0)
435 		rdemem.attr_dcnt--;
436 	rdemem.attr_data -= a->len;
437 	rdemem.attr_cnt--;
438 	free(a->data);
439 	free(a);
440 }
441 
442 /* aspath specific functions */
443 
444 static u_int16_t aspath_count(const void *, u_int16_t);
445 static u_int32_t aspath_extract_origin(const void *, u_int16_t);
446 static u_int16_t aspath_countlength(struct aspath *, u_int16_t, int);
447 static void	 aspath_countcopy(struct aspath *, u_int16_t, u_int8_t *,
448 		     u_int16_t, int);
449 struct aspath	*aspath_lookup(const void *, u_int16_t);
450 
451 struct aspath_table {
452 	struct aspath_list	*hashtbl;
453 	u_int32_t		 hashmask;
454 } astable;
455 
456 SIPHASH_KEY astablekey;
457 
458 #define ASPATH_HASH(x)				\
459 	&astable.hashtbl[(x) & astable.hashmask]
460 
461 void
462 aspath_init(u_int32_t hashsize)
463 {
464 	u_int32_t	hs, i;
465 
466 	for (hs = 1; hs < hashsize; hs <<= 1)
467 		;
468 	astable.hashtbl = calloc(hs, sizeof(struct aspath_list));
469 	if (astable.hashtbl == NULL)
470 		fatal("aspath_init");
471 
472 	for (i = 0; i < hs; i++)
473 		LIST_INIT(&astable.hashtbl[i]);
474 
475 	astable.hashmask = hs - 1;
476 	arc4random_buf(&astablekey, sizeof(astablekey));
477 }
478 
479 void
480 aspath_shutdown(void)
481 {
482 	u_int32_t	i;
483 
484 	for (i = 0; i <= astable.hashmask; i++)
485 		if (!LIST_EMPTY(&astable.hashtbl[i]))
486 			log_warnx("aspath_shutdown: free non-free table");
487 
488 	free(astable.hashtbl);
489 }
490 
491 void
492 aspath_hash_stats(struct rde_hashstats *hs)
493 {
494 	struct aspath		*a;
495 	u_int32_t		i;
496 	int64_t			n;
497 
498 	memset(hs, 0, sizeof(*hs));
499 	strlcpy(hs->name, "aspath hash", sizeof(hs->name));
500 	hs->min = LLONG_MAX;
501 	hs->num = astable.hashmask + 1;
502 
503 	for (i = 0; i <= astable.hashmask; i++) {
504 		n = 0;
505 		LIST_FOREACH(a, &astable.hashtbl[i], entry)
506 			n++;
507 		if (n < hs->min)
508 			hs->min = n;
509 		if (n > hs->max)
510 			hs->max = n;
511 		hs->sum += n;
512 		hs->sumq += n * n;
513 	}
514 }
515 
516 struct aspath *
517 aspath_get(void *data, u_int16_t len)
518 {
519 	struct aspath_list	*head;
520 	struct aspath		*aspath;
521 
522 	/* The aspath must already have been checked for correctness. */
523 	aspath = aspath_lookup(data, len);
524 	if (aspath == NULL) {
525 		aspath = malloc(ASPATH_HEADER_SIZE + len);
526 		if (aspath == NULL)
527 			fatal("aspath_get");
528 
529 		rdemem.aspath_cnt++;
530 		rdemem.aspath_size += ASPATH_HEADER_SIZE + len;
531 
532 		aspath->refcnt = 0;
533 		aspath->len = len;
534 		aspath->ascnt = aspath_count(data, len);
535 		aspath->source_as = aspath_extract_origin(data, len);
536 		memcpy(aspath->data, data, len);
537 
538 		/* link */
539 		head = ASPATH_HASH(SipHash24(&astablekey, aspath->data,
540 		    aspath->len));
541 		LIST_INSERT_HEAD(head, aspath, entry);
542 	}
543 	aspath->refcnt++;
544 	rdemem.aspath_refs++;
545 
546 	return (aspath);
547 }
548 
549 void
550 aspath_put(struct aspath *aspath)
551 {
552 	if (aspath == NULL)
553 		return;
554 
555 	rdemem.aspath_refs--;
556 	if (--aspath->refcnt > 0) {
557 		/* somebody still holds a reference */
558 		return;
559 	}
560 
561 	/* unlink */
562 	LIST_REMOVE(aspath, entry);
563 
564 	rdemem.aspath_cnt--;
565 	rdemem.aspath_size -= ASPATH_HEADER_SIZE + aspath->len;
566 	free(aspath);
567 }
568 
569 /*
570  * convert a 4 byte aspath to a 2 byte one.
571  * data is freed by aspath_deflate
572  */
573 u_char *
574 aspath_deflate(u_char *data, u_int16_t *len, int *flagnew)
575 {
576 	u_int8_t	*seg, *nseg, *ndata;
577 	u_int32_t	 as;
578 	int		 i;
579 	u_int16_t	 seg_size, olen, nlen;
580 	u_int8_t	 seg_len;
581 
582 	/* first calculate the length of the aspath */
583 	nlen = 0;
584 	seg = data;
585 	olen = *len;
586 	for (; olen > 0; olen -= seg_size, seg += seg_size) {
587 		seg_len = seg[1];
588 		seg_size = 2 + sizeof(u_int32_t) * seg_len;
589 		nlen += 2 + sizeof(u_int16_t) * seg_len;
590 
591 		if (seg_size > olen)
592 			fatalx("%s: would overflow", __func__);
593 	}
594 
595 	if ((ndata = malloc(nlen)) == NULL)
596 		fatal("aspath_deflate");
597 
598 	/* then copy the aspath */
599 	seg = data;
600 	olen = *len;
601 	for (nseg = ndata; seg < data + olen; seg += seg_size) {
602 		*nseg++ = seg[0];
603 		*nseg++ = seg_len = seg[1];
604 		seg_size = 2 + sizeof(u_int32_t) * seg_len;
605 
606 		for (i = 0; i < seg_len; i++) {
607 			as = aspath_extract(seg, i);
608 			if (as > USHRT_MAX) {
609 				as = AS_TRANS;
610 				*flagnew = 1;
611 			}
612 			*nseg++ = (as >> 8) & 0xff;
613 			*nseg++ = as & 0xff;
614 		}
615 	}
616 
617 	free(data);
618 	*len = nlen;
619 	return (ndata);
620 }
621 
622 void
623 aspath_merge(struct rde_aspath *a, struct attr *attr)
624 {
625 	u_int8_t	*np;
626 	u_int16_t	 ascnt, diff, nlen, difflen;
627 	int		 hroom = 0;
628 
629 	ascnt = aspath_count(attr->data, attr->len);
630 	if (ascnt > a->aspath->ascnt) {
631 		/* ASPATH is shorter then AS4_PATH no way to merge */
632 		attr_free(a, attr);
633 		return;
634 	}
635 
636 	diff = a->aspath->ascnt - ascnt;
637 	if (diff && attr->len > 2 && attr->data[0] == AS_SEQUENCE)
638 		hroom = attr->data[1];
639 	difflen = aspath_countlength(a->aspath, diff, hroom);
640 	nlen = attr->len + difflen;
641 
642 	if ((np = malloc(nlen)) == NULL)
643 		fatal("aspath_merge");
644 
645 	/* copy head from old aspath */
646 	aspath_countcopy(a->aspath, diff, np, difflen, hroom);
647 
648 	/* copy tail from new aspath */
649 	if (hroom > 0)
650 		memcpy(np + nlen - attr->len + 2, attr->data + 2,
651 		    attr->len - 2);
652 	else
653 		memcpy(np + nlen - attr->len, attr->data, attr->len);
654 
655 	aspath_put(a->aspath);
656 	a->aspath = aspath_get(np, nlen);
657 	free(np);
658 	attr_free(a, attr);
659 }
660 
661 u_char *
662 aspath_dump(struct aspath *aspath)
663 {
664 	return (aspath->data);
665 }
666 
667 u_int16_t
668 aspath_length(struct aspath *aspath)
669 {
670 	return (aspath->len);
671 }
672 
673 u_int32_t
674 aspath_neighbor(struct aspath *aspath)
675 {
676 	/* Empty aspath is OK -- internal AS route. */
677 	if (aspath->len == 0)
678 		return (rde_local_as());
679 	return (aspath_extract(aspath->data, 0));
680 }
681 
682 u_int32_t
683 aspath_origin(struct aspath *aspath)
684 {
685 	return aspath->source_as;
686 }
687 
688 static u_int16_t
689 aspath_count(const void *data, u_int16_t len)
690 {
691 	const u_int8_t	*seg;
692 	u_int16_t	 cnt, seg_size;
693 	u_int8_t	 seg_type, seg_len;
694 
695 	cnt = 0;
696 	seg = data;
697 	for (; len > 0; len -= seg_size, seg += seg_size) {
698 		seg_type = seg[0];
699 		seg_len = seg[1];
700 		seg_size = 2 + sizeof(u_int32_t) * seg_len;
701 
702 		if (seg_type == AS_SET)
703 			cnt += 1;
704 		else
705 			cnt += seg_len;
706 
707 		if (seg_size > len)
708 			fatalx("%s: would overflow", __func__);
709 	}
710 	return (cnt);
711 }
712 
713 /*
714  * The origin AS number derived from a Route as follows:
715  * o  the rightmost AS in the final segment of the AS_PATH attribute
716  *    in the Route if that segment is of type AS_SEQUENCE, or
717  * o  the BGP speaker's own AS number if that segment is of type
718  *    AS_CONFED_SEQUENCE or AS_CONFED_SET or if the AS_PATH is empty,
719  * o  the distinguished value "NONE" if the final segment of the
720  *    AS_PATH attribute is of any other type.
721  */
722 static u_int32_t
723 aspath_extract_origin(const void *data, u_int16_t len)
724 {
725 	const u_int8_t	*seg;
726 	u_int32_t	 as = AS_NONE;
727 	u_int16_t	 seg_size;
728 	u_int8_t	 seg_len;
729 
730 	/* AS_PATH is empty */
731 	if (len == 0)
732 		return (rde_local_as());
733 
734 	seg = data;
735 	for (; len > 0; len -= seg_size, seg += seg_size) {
736 		seg_len = seg[1];
737 		seg_size = 2 + sizeof(u_int32_t) * seg_len;
738 
739 		if (len == seg_size && seg[0] == AS_SEQUENCE) {
740 			as = aspath_extract(seg, seg_len - 1);
741 		}
742 		if (seg_size > len)
743 			fatalx("%s: would overflow", __func__);
744 	}
745 	return (as);
746 }
747 
748 static u_int16_t
749 aspath_countlength(struct aspath *aspath, u_int16_t cnt, int headcnt)
750 {
751 	const u_int8_t	*seg;
752 	u_int16_t	 seg_size, len, clen;
753 	u_int8_t	 seg_type = 0, seg_len = 0;
754 
755 	seg = aspath->data;
756 	clen = 0;
757 	for (len = aspath->len; len > 0 && cnt > 0;
758 	    len -= seg_size, seg += seg_size) {
759 		seg_type = seg[0];
760 		seg_len = seg[1];
761 		seg_size = 2 + sizeof(u_int32_t) * seg_len;
762 
763 		if (seg_type == AS_SET)
764 			cnt -= 1;
765 		else if (seg_len > cnt) {
766 			seg_len = cnt;
767 			clen += 2 + sizeof(u_int32_t) * cnt;
768 			break;
769 		} else
770 			cnt -= seg_len;
771 
772 		clen += seg_size;
773 
774 		if (seg_size > len)
775 			fatalx("%s: would overflow", __func__);
776 	}
777 	if (headcnt > 0 && seg_type == AS_SEQUENCE && headcnt + seg_len < 256)
778 		/* no need for additional header from the new aspath. */
779 		clen -= 2;
780 
781 	return (clen);
782 }
783 
784 static void
785 aspath_countcopy(struct aspath *aspath, u_int16_t cnt, u_int8_t *buf,
786     u_int16_t size, int headcnt)
787 {
788 	const u_int8_t	*seg;
789 	u_int16_t	 seg_size, len;
790 	u_int8_t	 seg_type, seg_len;
791 
792 	if (headcnt > 0)
793 		/*
794 		 * additional room because we steal the segment header
795 		 * from the other aspath
796 		 */
797 		size += 2;
798 	seg = aspath->data;
799 	for (len = aspath->len; len > 0 && cnt > 0;
800 	    len -= seg_size, seg += seg_size) {
801 		seg_type = seg[0];
802 		seg_len = seg[1];
803 		seg_size = 2 + sizeof(u_int32_t) * seg_len;
804 
805 		if (seg_type == AS_SET)
806 			cnt -= 1;
807 		else if (seg_len > cnt) {
808 			seg_len = cnt + headcnt;
809 			seg_size = 2 + sizeof(u_int32_t) * cnt;
810 			cnt = 0;
811 		} else {
812 			cnt -= seg_len;
813 			if (cnt == 0)
814 				seg_len += headcnt;
815 		}
816 
817 		memcpy(buf, seg, seg_size);
818 		buf[0] = seg_type;
819 		buf[1] = seg_len;
820 		buf += seg_size;
821 		if (size < seg_size)
822 			fatalx("%s: would overflow", __func__);
823 		size -= seg_size;
824 	}
825 }
826 
827 int
828 aspath_loopfree(struct aspath *aspath, u_int32_t myAS)
829 {
830 	u_int8_t	*seg;
831 	u_int16_t	 len, seg_size;
832 	u_int8_t	 i, seg_len;
833 
834 	seg = aspath->data;
835 	for (len = aspath->len; len > 0; len -= seg_size, seg += seg_size) {
836 		seg_len = seg[1];
837 		seg_size = 2 + sizeof(u_int32_t) * seg_len;
838 
839 		for (i = 0; i < seg_len; i++) {
840 			if (myAS == aspath_extract(seg, i))
841 				return (0);
842 		}
843 
844 		if (seg_size > len)
845 			fatalx("%s: would overflow", __func__);
846 	}
847 	return (1);
848 }
849 
850 int
851 aspath_compare(struct aspath *a1, struct aspath *a2)
852 {
853 	int r;
854 
855 	if (a1->len > a2->len)
856 		return (1);
857 	if (a1->len < a2->len)
858 		return (-1);
859 	r = memcmp(a1->data, a2->data, a1->len);
860 	if (r > 0)
861 		return (1);
862 	if (r < 0)
863 		return (-1);
864 	return (0);
865 }
866 
867 struct aspath *
868 aspath_lookup(const void *data, u_int16_t len)
869 {
870 	struct aspath_list	*head;
871 	struct aspath		*aspath;
872 	u_int32_t		 hash;
873 
874 	hash = SipHash24(&astablekey, data, len);
875 	head = ASPATH_HASH(hash);
876 
877 	LIST_FOREACH(aspath, head, entry) {
878 		if (len == aspath->len && memcmp(data, aspath->data, len) == 0)
879 			return (aspath);
880 	}
881 	return (NULL);
882 }
883 
884 
885 static int
886 as_compare(struct filter_as *f, u_int32_t as, u_int32_t neighas)
887 {
888 	u_int32_t match;
889 
890 	if (f->flags & AS_FLAG_AS_SET_NAME)	/* should not happen */
891 		return (0);
892 	if (f->flags & AS_FLAG_AS_SET)
893 		return (as_set_match(f->aset, as));
894 
895 	if (f->flags & AS_FLAG_NEIGHBORAS)
896 		match = neighas;
897 	else
898 		match = f->as_min;
899 
900 	switch (f->op) {
901 	case OP_NONE:
902 	case OP_EQ:
903 		if (as == match)
904 			return (1);
905 		break;
906 	case OP_NE:
907 		if (as != match)
908 			return (1);
909 		break;
910 	case OP_RANGE:
911 		if (as >= f->as_min && as <= f->as_max)
912 			return (1);
913 		break;
914 	case OP_XRANGE:
915 		if (as < f->as_min || as > f->as_max)
916 			return (1);
917 		break;
918 	}
919 	return (0);
920 }
921 
922 /* we need to be able to search more than one as */
923 int
924 aspath_match(struct aspath *aspath, struct filter_as *f, u_int32_t neighas)
925 {
926 	const u_int8_t	*seg;
927 	int		 final;
928 	u_int16_t	 len, seg_size;
929 	u_int8_t	 i, seg_len;
930 	u_int32_t	 as = AS_NONE;
931 
932 	if (f->type == AS_EMPTY) {
933 		if (aspath_length(aspath) == 0)
934 			return (1);
935 		else
936 			return (0);
937 	}
938 
939 	/* just check the leftmost AS */
940 	if (f->type == AS_PEER) {
941 		as = aspath_neighbor(aspath);
942 		if (as_compare(f, as, neighas))
943 			return (1);
944 		else
945 			return (0);
946 	}
947 
948 	seg = aspath->data;
949 	len = aspath->len;
950 	for (; len >= 6; len -= seg_size, seg += seg_size) {
951 		seg_len = seg[1];
952 		seg_size = 2 + sizeof(u_int32_t) * seg_len;
953 
954 		final = (len == seg_size);
955 
956 		if (f->type == AS_SOURCE) {
957 			/*
958 			 * Just extract the rightmost AS
959 			 * but if that segment is an AS_SET then the rightmost
960 			 * AS of a previous AS_SEQUENCE segment should be used.
961 			 * Because of that just look at AS_SEQUENCE segments.
962 			 */
963 			if (seg[0] == AS_SEQUENCE)
964 				as = aspath_extract(seg, seg_len - 1);
965 			/* not yet in the final segment */
966 			if (!final)
967 				continue;
968 			if (as_compare(f, as, neighas))
969 				return (1);
970 			else
971 				return (0);
972 		}
973 		/* AS_TRANSIT or AS_ALL */
974 		for (i = 0; i < seg_len; i++) {
975 			/*
976 			 * the source (rightmost) AS is excluded from
977 			 * AS_TRANSIT matches.
978 			 */
979 			if (final && i == seg_len - 1 && f->type == AS_TRANSIT)
980 				return (0);
981 			as = aspath_extract(seg, i);
982 			if (as_compare(f, as, neighas))
983 				return (1);
984 		}
985 
986 		if (seg_size > len)
987 			fatalx("%s: would overflow", __func__);
988 	}
989 	return (0);
990 }
991 
992 /*
993  * Returns a new prepended aspath. Old needs to be freed by caller.
994  */
995 u_char *
996 aspath_prepend(struct aspath *asp, u_int32_t as, int quantum, u_int16_t *len)
997 {
998 	u_char		*p;
999 	int		 l, overflow = 0, shift = 0, size, wpos = 0;
1000 	u_int8_t	 type;
1001 
1002 	/* lunatic prepends are blocked in the parser and limited */
1003 
1004 	/* first calculate new size */
1005 	if (asp->len > 0) {
1006 		if (asp->len < 2)
1007 			fatalx("aspath_prepend: bad aspath length");
1008 		type = asp->data[0];
1009 		size = asp->data[1];
1010 	} else {
1011 		/* empty as path */
1012 		type = AS_SET;
1013 		size = 0;
1014 	}
1015 
1016 	if (quantum > 255)
1017 		fatalx("aspath_prepend: preposterous prepend");
1018 	if (quantum == 0) {
1019 		/* no change needed but return a copy */
1020 		p = malloc(asp->len);
1021 		if (p == NULL)
1022 			fatal("aspath_prepend");
1023 		memcpy(p, asp->data, asp->len);
1024 		*len = asp->len;
1025 		return (p);
1026 	} else if (type == AS_SET || size + quantum > 255) {
1027 		/* need to attach a new AS_SEQUENCE */
1028 		l = 2 + quantum * sizeof(u_int32_t) + asp->len;
1029 		if (type == AS_SET)
1030 			overflow = quantum;
1031 		else
1032 			overflow = size + quantum - 255;
1033 	} else
1034 		l = quantum * sizeof(u_int32_t) + asp->len;
1035 
1036 	quantum -= overflow;
1037 
1038 	p = malloc(l);
1039 	if (p == NULL)
1040 		fatal("aspath_prepend");
1041 
1042 	/* first prepends */
1043 	as = htonl(as);
1044 	if (overflow > 0) {
1045 		p[wpos++] = AS_SEQUENCE;
1046 		p[wpos++] = overflow;
1047 
1048 		for (; overflow > 0; overflow--) {
1049 			memcpy(p + wpos, &as, sizeof(u_int32_t));
1050 			wpos += sizeof(u_int32_t);
1051 		}
1052 	}
1053 	if (quantum > 0) {
1054 		shift = 2;
1055 		p[wpos++] = AS_SEQUENCE;
1056 		p[wpos++] = quantum + size;
1057 
1058 		for (; quantum > 0; quantum--) {
1059 			memcpy(p + wpos, &as, sizeof(u_int32_t));
1060 			wpos += sizeof(u_int32_t);
1061 		}
1062 	}
1063 	memcpy(p + wpos, asp->data + shift, asp->len - shift);
1064 
1065 	*len = l;
1066 	return (p);
1067 }
1068 
1069 /*
1070  * Returns a new aspath where neighbor_as is replaced by local_as.
1071  */
1072 u_char *
1073 aspath_override(struct aspath *asp, u_int32_t neighbor_as, u_int32_t local_as,
1074      u_int16_t *len)
1075 {
1076 	u_char		*p, *seg, *nseg;
1077 	u_int32_t	 as;
1078 	u_int16_t	 l, seg_size;
1079 	u_int8_t	 i, seg_len, seg_type;
1080 
1081 	p = malloc(asp->len);
1082 	if (p == NULL)
1083 		fatal("aspath_override");
1084 
1085 	seg = asp->data;
1086 	nseg = p;
1087 	for (l = asp->len; l > 0; l -= seg_size, seg += seg_size) {
1088 		*nseg++ = seg_type = seg[0];
1089 		*nseg++ = seg_len = seg[1];
1090 		seg_size = 2 + sizeof(u_int32_t) * seg_len;
1091 
1092 		for (i = 0; i < seg_len; i++) {
1093 			as = aspath_extract(seg, i);
1094 			if (as == neighbor_as)
1095 				as = local_as;
1096 			as = htonl(as);
1097 			memcpy(nseg, &as, sizeof(as));
1098 			nseg += sizeof(as);
1099 		}
1100 
1101 		if (seg_size > l)
1102 			fatalx("%s: would overflow", __func__);
1103 	}
1104 
1105 	*len = asp->len;
1106 	return (p);
1107 }
1108 
1109 int
1110 aspath_lenmatch(struct aspath *a, enum aslen_spec type, u_int aslen)
1111 {
1112 	u_int8_t	*seg;
1113 	u_int32_t	 as, lastas = 0;
1114 	u_int		 count = 0;
1115 	u_int16_t	 len, seg_size;
1116 	u_int8_t	 i, seg_len, seg_type;
1117 
1118 	if (type == ASLEN_MAX) {
1119 		if (aslen < aspath_count(a->data, a->len))
1120 			return (1);
1121 		else
1122 			return (0);
1123 	}
1124 
1125 	/* type == ASLEN_SEQ */
1126 	seg = a->data;
1127 	for (len = a->len; len > 0; len -= seg_size, seg += seg_size) {
1128 		seg_type = seg[0];
1129 		seg_len = seg[1];
1130 		seg_size = 2 + sizeof(u_int32_t) * seg_len;
1131 
1132 		for (i = 0; i < seg_len; i++) {
1133 			as = aspath_extract(seg, i);
1134 			if (as == lastas) {
1135 				if (aslen < ++count)
1136 					return (1);
1137 			} else if (seg_type == AS_SET) {
1138 				/* AS path 3 { 4 3 7 } 3 will have count = 3 */
1139 				continue;
1140 			} else
1141 				count = 1;
1142 			lastas = as;
1143 		}
1144 
1145 		if (seg_size > len)
1146 			fatalx("%s: would overflow", __func__);
1147 	}
1148 	return (0);
1149 }
1150