xref: /openbsd/usr.sbin/bgpd/rde.c (revision 666d181c)
1 /*	$OpenBSD: rde.c,v 1.647 2025/01/04 16:58:46 denis Exp $ */
2 
3 /*
4  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
5  * Copyright (c) 2016 Job Snijders <job@instituut.net>
6  * Copyright (c) 2016 Peter Hessler <phessler@openbsd.org>
7  * Copyright (c) 2018 Sebastian Benoit <benno@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #include <sys/types.h>
23 #include <sys/time.h>
24 #include <sys/resource.h>
25 
26 #include <errno.h>
27 #include <pwd.h>
28 #include <poll.h>
29 #include <signal.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <syslog.h>
34 #include <unistd.h>
35 
36 #include "bgpd.h"
37 #include "session.h"
38 #include "rde.h"
39 #include "log.h"
40 
41 #define PFD_PIPE_MAIN		0
42 #define PFD_PIPE_SESSION	1
43 #define PFD_PIPE_SESSION_CTL	2
44 #define PFD_PIPE_ROA		3
45 #define PFD_PIPE_COUNT		4
46 
47 void		 rde_sighdlr(int);
48 void		 rde_dispatch_imsg_session(struct imsgbuf *);
49 void		 rde_dispatch_imsg_parent(struct imsgbuf *);
50 void		 rde_dispatch_imsg_rtr(struct imsgbuf *);
51 void		 rde_dispatch_imsg_peer(struct rde_peer *, void *);
52 void		 rde_update_dispatch(struct rde_peer *, struct ibuf *);
53 int		 rde_update_update(struct rde_peer *, uint32_t,
54 		    struct filterstate *, struct bgpd_addr *, uint8_t);
55 void		 rde_update_withdraw(struct rde_peer *, uint32_t,
56 		    struct bgpd_addr *, uint8_t);
57 int		 rde_attr_parse(struct ibuf *, struct rde_peer *,
58 		    struct filterstate *, struct ibuf *, struct ibuf *);
59 int		 rde_attr_add(struct filterstate *, struct ibuf *);
60 uint8_t		 rde_attr_missing(struct rde_aspath *, int, uint16_t);
61 int		 rde_get_mp_nexthop(struct ibuf *, uint8_t,
62 		    struct rde_peer *, struct filterstate *);
63 void		 rde_as4byte_fixup(struct rde_peer *, struct rde_aspath *);
64 uint8_t		 rde_aspa_validity(struct rde_peer *, struct rde_aspath *,
65 		    uint8_t);
66 void		 rde_reflector(struct rde_peer *, struct rde_aspath *);
67 
68 void		 rde_dump_ctx_new(struct ctl_show_rib_request *, pid_t,
69 		    enum imsg_type);
70 void		 rde_dump_ctx_throttle(pid_t, int);
71 void		 rde_dump_ctx_terminate(pid_t);
72 void		 rde_dump_mrt_new(struct mrt *, pid_t, int);
73 
74 int		 rde_l3vpn_import(struct rde_community *, struct l3vpn *);
75 static void	 rde_commit_pftable(void);
76 void		 rde_reload_done(void);
77 static void	 rde_softreconfig_in_done(void *, uint8_t);
78 static void	 rde_softreconfig_out_done(void *, uint8_t);
79 static void	 rde_softreconfig_done(void);
80 static void	 rde_softreconfig_out(struct rib_entry *, void *);
81 static void	 rde_softreconfig_in(struct rib_entry *, void *);
82 static void	 rde_softreconfig_sync_reeval(struct rib_entry *, void *);
83 static void	 rde_softreconfig_sync_fib(struct rib_entry *, void *);
84 static void	 rde_softreconfig_sync_done(void *, uint8_t);
85 static void	 rde_rpki_reload(void);
86 static int	 rde_roa_reload(void);
87 static int	 rde_aspa_reload(void);
88 int		 rde_update_queue_pending(void);
89 void		 rde_update_queue_runner(uint8_t);
90 struct rde_prefixset *rde_find_prefixset(char *, struct rde_prefixset_head *);
91 void		 rde_mark_prefixsets_dirty(struct rde_prefixset_head *,
92 		    struct rde_prefixset_head *);
93 uint8_t		 rde_roa_validity(struct rde_prefixset *,
94 		    struct bgpd_addr *, uint8_t, uint32_t);
95 
96 static void	 rde_peer_recv_eor(struct rde_peer *, uint8_t);
97 static void	 rde_peer_send_eor(struct rde_peer *, uint8_t);
98 
99 void		 network_add(struct network_config *, struct filterstate *);
100 void		 network_delete(struct network_config *);
101 static void	 network_dump_upcall(struct rib_entry *, void *);
102 static void	 network_flush_upcall(struct rib_entry *, void *);
103 
104 void		 flowspec_add(struct flowspec *, struct filterstate *,
105 		    struct filter_set_head *);
106 void		 flowspec_delete(struct flowspec *);
107 static void	 flowspec_flush_upcall(struct rib_entry *, void *);
108 static void	 flowspec_dump_upcall(struct rib_entry *, void *);
109 static void	 flowspec_dump_done(void *, uint8_t);
110 
111 void		 rde_shutdown(void);
112 static int	 ovs_match(struct prefix *, uint32_t);
113 static int	 avs_match(struct prefix *, uint32_t);
114 
115 static struct imsgbuf		*ibuf_se;
116 static struct imsgbuf		*ibuf_se_ctl;
117 static struct imsgbuf		*ibuf_rtr;
118 static struct imsgbuf		*ibuf_main;
119 static struct bgpd_config	*conf, *nconf;
120 static struct rde_prefixset	 rde_roa, roa_new;
121 static struct rde_aspa		*rde_aspa, *aspa_new;
122 static uint8_t			 rde_aspa_generation;
123 
124 volatile sig_atomic_t	 rde_quit = 0;
125 struct filter_head	*out_rules, *out_rules_tmp;
126 struct rde_memstats	 rdemem;
127 int			 softreconfig;
128 static int		 rde_eval_all;
129 
130 extern struct peer_tree	 peertable;
131 extern struct rde_peer	*peerself;
132 
133 struct rde_dump_ctx {
134 	LIST_ENTRY(rde_dump_ctx)	entry;
135 	struct ctl_show_rib_request	req;
136 	uint32_t			peerid;
137 	uint8_t				throttled;
138 };
139 
140 LIST_HEAD(, rde_dump_ctx) rde_dump_h = LIST_HEAD_INITIALIZER(rde_dump_h);
141 
142 struct rde_mrt_ctx {
143 	LIST_ENTRY(rde_mrt_ctx)	entry;
144 	struct mrt		mrt;
145 };
146 
147 LIST_HEAD(, rde_mrt_ctx) rde_mrts = LIST_HEAD_INITIALIZER(rde_mrts);
148 u_int rde_mrt_cnt;
149 
150 void
151 rde_sighdlr(int sig)
152 {
153 	switch (sig) {
154 	case SIGINT:
155 	case SIGTERM:
156 		rde_quit = 1;
157 		break;
158 	}
159 }
160 
161 void
162 rde_main(int debug, int verbose)
163 {
164 	struct passwd		*pw;
165 	struct pollfd		*pfd = NULL;
166 	struct rde_mrt_ctx	*mctx, *xmctx;
167 	void			*newp;
168 	u_int			 pfd_elms = 0, i, j;
169 	int			 timeout;
170 	uint8_t			 aid;
171 
172 	log_init(debug, LOG_DAEMON);
173 	log_setverbose(verbose);
174 
175 	log_procinit(log_procnames[PROC_RDE]);
176 
177 	if ((pw = getpwnam(BGPD_USER)) == NULL)
178 		fatal("getpwnam");
179 
180 	if (chroot(pw->pw_dir) == -1)
181 		fatal("chroot");
182 	if (chdir("/") == -1)
183 		fatal("chdir(\"/\")");
184 
185 	setproctitle("route decision engine");
186 
187 	if (setgroups(1, &pw->pw_gid) ||
188 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
189 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
190 		fatal("can't drop privileges");
191 
192 	if (pledge("stdio recvfd", NULL) == -1)
193 		fatal("pledge");
194 
195 	signal(SIGTERM, rde_sighdlr);
196 	signal(SIGINT, rde_sighdlr);
197 	signal(SIGPIPE, SIG_IGN);
198 	signal(SIGHUP, SIG_IGN);
199 	signal(SIGALRM, SIG_IGN);
200 	signal(SIGUSR1, SIG_IGN);
201 
202 	if ((ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL)
203 		fatal(NULL);
204 	if (imsgbuf_init(ibuf_main, 3) == -1 ||
205 	    imsgbuf_set_maxsize(ibuf_main, MAX_BGPD_IMSGSIZE) == -1)
206 		fatal(NULL);
207 	imsgbuf_allow_fdpass(ibuf_main);
208 
209 	/* initialize the RIB structures */
210 	if ((out_rules = calloc(1, sizeof(struct filter_head))) == NULL)
211 		fatal(NULL);
212 	TAILQ_INIT(out_rules);
213 
214 	pt_init();
215 	peer_init(out_rules);
216 
217 	/* make sure the default RIBs are setup */
218 	rib_new("Adj-RIB-In", 0, F_RIB_NOFIB | F_RIB_NOEVALUATE);
219 
220 	conf = new_config();
221 	log_info("route decision engine ready");
222 
223 	while (rde_quit == 0) {
224 		if (pfd_elms < PFD_PIPE_COUNT + rde_mrt_cnt) {
225 			if ((newp = reallocarray(pfd,
226 			    PFD_PIPE_COUNT + rde_mrt_cnt,
227 			    sizeof(struct pollfd))) == NULL) {
228 				/* panic for now  */
229 				log_warn("could not resize pfd from %u -> %u"
230 				    " entries", pfd_elms, PFD_PIPE_COUNT +
231 				    rde_mrt_cnt);
232 				fatalx("exiting");
233 			}
234 			pfd = newp;
235 			pfd_elms = PFD_PIPE_COUNT + rde_mrt_cnt;
236 		}
237 		timeout = -1;
238 		memset(pfd, 0, sizeof(struct pollfd) * pfd_elms);
239 
240 		set_pollfd(&pfd[PFD_PIPE_MAIN], ibuf_main);
241 		set_pollfd(&pfd[PFD_PIPE_SESSION], ibuf_se);
242 		set_pollfd(&pfd[PFD_PIPE_SESSION_CTL], ibuf_se_ctl);
243 		set_pollfd(&pfd[PFD_PIPE_ROA], ibuf_rtr);
244 
245 		i = PFD_PIPE_COUNT;
246 		for (mctx = LIST_FIRST(&rde_mrts); mctx != 0; mctx = xmctx) {
247 			xmctx = LIST_NEXT(mctx, entry);
248 
249 			if (i >= pfd_elms)
250 				fatalx("poll pfd too small");
251 			if (msgbuf_queuelen(mctx->mrt.wbuf) > 0) {
252 				pfd[i].fd = mctx->mrt.fd;
253 				pfd[i].events = POLLOUT;
254 				i++;
255 			} else if (mctx->mrt.state == MRT_STATE_REMOVE) {
256 				mrt_clean(&mctx->mrt);
257 				LIST_REMOVE(mctx, entry);
258 				free(mctx);
259 				rde_mrt_cnt--;
260 			}
261 		}
262 
263 		if (peer_work_pending() || rde_update_queue_pending() ||
264 		    nexthop_pending() || rib_dump_pending())
265 			timeout = 0;
266 
267 		if (poll(pfd, i, timeout) == -1) {
268 			if (errno == EINTR)
269 				continue;
270 			fatal("poll error");
271 		}
272 
273 		if (handle_pollfd(&pfd[PFD_PIPE_MAIN], ibuf_main) == -1)
274 			fatalx("Lost connection to parent");
275 		else
276 			rde_dispatch_imsg_parent(ibuf_main);
277 
278 		if (handle_pollfd(&pfd[PFD_PIPE_SESSION], ibuf_se) == -1) {
279 			log_warnx("RDE: Lost connection to SE");
280 			imsgbuf_clear(ibuf_se);
281 			free(ibuf_se);
282 			ibuf_se = NULL;
283 		} else
284 			rde_dispatch_imsg_session(ibuf_se);
285 
286 		if (handle_pollfd(&pfd[PFD_PIPE_SESSION_CTL], ibuf_se_ctl) ==
287 		    -1) {
288 			log_warnx("RDE: Lost connection to SE control");
289 			imsgbuf_clear(ibuf_se_ctl);
290 			free(ibuf_se_ctl);
291 			ibuf_se_ctl = NULL;
292 		} else
293 			rde_dispatch_imsg_session(ibuf_se_ctl);
294 
295 		if (handle_pollfd(&pfd[PFD_PIPE_ROA], ibuf_rtr) == -1) {
296 			log_warnx("RDE: Lost connection to ROA");
297 			imsgbuf_clear(ibuf_rtr);
298 			free(ibuf_rtr);
299 			ibuf_rtr = NULL;
300 		} else
301 			rde_dispatch_imsg_rtr(ibuf_rtr);
302 
303 		for (j = PFD_PIPE_COUNT, mctx = LIST_FIRST(&rde_mrts);
304 		    j < i && mctx != 0; j++) {
305 			if (pfd[j].fd == mctx->mrt.fd &&
306 			    pfd[j].revents & POLLOUT)
307 				mrt_write(&mctx->mrt);
308 			mctx = LIST_NEXT(mctx, entry);
309 		}
310 
311 		peer_foreach(rde_dispatch_imsg_peer, NULL);
312 		peer_reaper(NULL);
313 		rib_dump_runner();
314 		nexthop_runner();
315 		if (ibuf_se && imsgbuf_queuelen(ibuf_se) < SESS_MSG_HIGH_MARK) {
316 			for (aid = AID_MIN; aid < AID_MAX; aid++)
317 				rde_update_queue_runner(aid);
318 		}
319 		/* commit pftable once per poll loop */
320 		rde_commit_pftable();
321 	}
322 
323 	/* do not clean up on shutdown on production, it takes ages. */
324 	if (debug)
325 		rde_shutdown();
326 
327 	free_config(conf);
328 	free(pfd);
329 
330 	/* close pipes */
331 	if (ibuf_se) {
332 		imsgbuf_clear(ibuf_se);
333 		close(ibuf_se->fd);
334 		free(ibuf_se);
335 	}
336 	if (ibuf_se_ctl) {
337 		imsgbuf_clear(ibuf_se_ctl);
338 		close(ibuf_se_ctl->fd);
339 		free(ibuf_se_ctl);
340 	}
341 	if (ibuf_rtr) {
342 		imsgbuf_clear(ibuf_rtr);
343 		close(ibuf_rtr->fd);
344 		free(ibuf_rtr);
345 	}
346 	imsgbuf_clear(ibuf_main);
347 	close(ibuf_main->fd);
348 	free(ibuf_main);
349 
350 	while ((mctx = LIST_FIRST(&rde_mrts)) != NULL) {
351 		mrt_clean(&mctx->mrt);
352 		LIST_REMOVE(mctx, entry);
353 		free(mctx);
354 	}
355 
356 	log_info("route decision engine exiting");
357 	exit(0);
358 }
359 
360 struct network_config	netconf_s, netconf_p;
361 struct filterstate	netconf_state;
362 struct filter_set_head	session_set = TAILQ_HEAD_INITIALIZER(session_set);
363 struct filter_set_head	parent_set = TAILQ_HEAD_INITIALIZER(parent_set);
364 
365 void
366 rde_dispatch_imsg_session(struct imsgbuf *imsgbuf)
367 {
368 	static struct flowspec	*curflow;
369 	struct imsg		 imsg;
370 	struct ibuf		 ibuf;
371 	struct rde_peer_stats	 stats;
372 	struct ctl_show_set	 cset;
373 	struct ctl_show_rib	 csr;
374 	struct ctl_show_rib_request	req;
375 	struct session_up	 sup;
376 	struct peer_config	 pconf;
377 	struct rde_peer		*peer;
378 	struct rde_aspath	*asp;
379 	struct filter_set	*s;
380 	struct as_set		*aset;
381 	struct rde_prefixset	*pset;
382 	ssize_t			 n;
383 	uint32_t		 peerid;
384 	pid_t			 pid;
385 	int			 verbose;
386 	uint8_t			 aid;
387 
388 	while (imsgbuf) {
389 		if ((n = imsg_get(imsgbuf, &imsg)) == -1)
390 			fatal("rde_dispatch_imsg_session: imsg_get error");
391 		if (n == 0)
392 			break;
393 
394 		peerid = imsg_get_id(&imsg);
395 		pid = imsg_get_pid(&imsg);
396 		switch (imsg_get_type(&imsg)) {
397 		case IMSG_UPDATE:
398 		case IMSG_REFRESH:
399 			if ((peer = peer_get(peerid)) == NULL) {
400 				log_warnx("rde_dispatch: unknown peer id %d",
401 				    peerid);
402 				break;
403 			}
404 			if (peer_is_up(peer))
405 				peer_imsg_push(peer, &imsg);
406 			break;
407 		case IMSG_SESSION_ADD:
408 			if (imsg_get_data(&imsg, &pconf, sizeof(pconf)) == -1)
409 				fatalx("incorrect size of session request");
410 			peer = peer_add(peerid, &pconf, out_rules);
411 			/* make sure rde_eval_all is on if needed. */
412 			if (peer->conf.flags & PEERFLAG_EVALUATE_ALL)
413 				rde_eval_all = 1;
414 			break;
415 		case IMSG_SESSION_UP:
416 			if ((peer = peer_get(peerid)) == NULL) {
417 				log_warnx("%s: unknown peer id %d",
418 				    "IMSG_SESSION_UP", peerid);
419 				break;
420 			}
421 			if (imsg_get_data(&imsg, &sup, sizeof(sup)) == -1)
422 				fatalx("incorrect size of session request");
423 			peer_up(peer, &sup);
424 			/* make sure rde_eval_all is on if needed. */
425 			if (peer_has_add_path(peer, AID_UNSPEC, CAPA_AP_SEND))
426 				rde_eval_all = 1;
427 			break;
428 		case IMSG_SESSION_DOWN:
429 			if ((peer = peer_get(peerid)) == NULL) {
430 				log_warnx("%s: unknown peer id %d",
431 				    "IMSG_SESSION_DOWN", peerid);
432 				break;
433 			}
434 			peer_down(peer);
435 			break;
436 		case IMSG_SESSION_DELETE:
437 			/* silently ignore deletes for unknown peers */
438 			if ((peer = peer_get(peerid)) == NULL)
439 				break;
440 			peer_delete(peer);
441 			break;
442 		case IMSG_SESSION_STALE:
443 		case IMSG_SESSION_NOGRACE:
444 		case IMSG_SESSION_FLUSH:
445 		case IMSG_SESSION_RESTARTED:
446 			if ((peer = peer_get(peerid)) == NULL) {
447 				log_warnx("%s: unknown peer id %d",
448 				    "graceful restart", peerid);
449 				break;
450 			}
451 			if (imsg_get_data(&imsg, &aid, sizeof(aid)) == -1) {
452 				log_warnx("%s: wrong imsg len", __func__);
453 				break;
454 			}
455 			if (aid < AID_MIN || aid >= AID_MAX) {
456 				log_warnx("%s: bad AID", __func__);
457 				break;
458 			}
459 
460 			switch (imsg_get_type(&imsg)) {
461 			case IMSG_SESSION_STALE:
462 				peer_stale(peer, aid, 0);
463 				break;
464 			case IMSG_SESSION_NOGRACE:
465 				peer_stale(peer, aid, 1);
466 				break;
467 			case IMSG_SESSION_FLUSH:
468 				peer_flush(peer, aid, peer->staletime[aid]);
469 				break;
470 			case IMSG_SESSION_RESTARTED:
471 				if (peer->staletime[aid])
472 					peer_flush(peer, aid,
473 					    peer->staletime[aid]);
474 				break;
475 			}
476 			break;
477 		case IMSG_NETWORK_ADD:
478 			if (imsg_get_data(&imsg, &netconf_s,
479 			    sizeof(netconf_s)) == -1) {
480 				log_warnx("rde_dispatch: wrong imsg len");
481 				break;
482 			}
483 			TAILQ_INIT(&netconf_s.attrset);
484 			rde_filterstate_init(&netconf_state);
485 			asp = &netconf_state.aspath;
486 			asp->aspath = aspath_get(NULL, 0);
487 			asp->origin = ORIGIN_IGP;
488 			asp->flags = F_ATTR_ORIGIN | F_ATTR_ASPATH |
489 			    F_ATTR_LOCALPREF | F_PREFIX_ANNOUNCED |
490 			    F_ANN_DYNAMIC;
491 			break;
492 		case IMSG_NETWORK_ASPATH:
493 			if (imsg_get_ibuf(&imsg, &ibuf) == -1) {
494 				log_warnx("rde_dispatch: bad imsg");
495 				memset(&netconf_s, 0, sizeof(netconf_s));
496 				break;
497 			}
498 			if (ibuf_get(&ibuf, &csr, sizeof(csr)) == -1) {
499 				log_warnx("rde_dispatch: wrong imsg len");
500 				memset(&netconf_s, 0, sizeof(netconf_s));
501 				break;
502 			}
503 			asp = &netconf_state.aspath;
504 			asp->lpref = csr.local_pref;
505 			asp->med = csr.med;
506 			asp->weight = csr.weight;
507 			asp->flags = csr.flags;
508 			asp->origin = csr.origin;
509 			asp->flags |= F_PREFIX_ANNOUNCED | F_ANN_DYNAMIC;
510 			aspath_put(asp->aspath);
511 			asp->aspath = aspath_get(ibuf_data(&ibuf),
512 			    ibuf_size(&ibuf));
513 			break;
514 		case IMSG_NETWORK_ATTR:
515 			/* parse optional path attributes */
516 			if (imsg_get_ibuf(&imsg, &ibuf) == -1 ||
517 			    rde_attr_add(&netconf_state, &ibuf) == -1) {
518 				log_warnx("rde_dispatch: bad network "
519 				    "attribute");
520 				rde_filterstate_clean(&netconf_state);
521 				memset(&netconf_s, 0, sizeof(netconf_s));
522 				break;
523 			}
524 			break;
525 		case IMSG_NETWORK_DONE:
526 			TAILQ_CONCAT(&netconf_s.attrset, &session_set, entry);
527 			switch (netconf_s.prefix.aid) {
528 			case AID_INET:
529 				if (netconf_s.prefixlen > 32)
530 					goto badnet;
531 				network_add(&netconf_s, &netconf_state);
532 				break;
533 			case AID_INET6:
534 				if (netconf_s.prefixlen > 128)
535 					goto badnet;
536 				network_add(&netconf_s, &netconf_state);
537 				break;
538 			case 0:
539 				/* something failed beforehand */
540 				break;
541 			default:
542 badnet:
543 				log_warnx("request to insert invalid network");
544 				break;
545 			}
546 			rde_filterstate_clean(&netconf_state);
547 			break;
548 		case IMSG_NETWORK_REMOVE:
549 			if (imsg_get_data(&imsg, &netconf_s,
550 			    sizeof(netconf_s)) == -1) {
551 				log_warnx("rde_dispatch: wrong imsg len");
552 				break;
553 			}
554 			TAILQ_INIT(&netconf_s.attrset);
555 
556 			switch (netconf_s.prefix.aid) {
557 			case AID_INET:
558 				if (netconf_s.prefixlen > 32)
559 					goto badnetdel;
560 				network_delete(&netconf_s);
561 				break;
562 			case AID_INET6:
563 				if (netconf_s.prefixlen > 128)
564 					goto badnetdel;
565 				network_delete(&netconf_s);
566 				break;
567 			default:
568 badnetdel:
569 				log_warnx("request to remove invalid network");
570 				break;
571 			}
572 			break;
573 		case IMSG_NETWORK_FLUSH:
574 			if (rib_dump_new(RIB_ADJ_IN, AID_UNSPEC,
575 			    RDE_RUNNER_ROUNDS, NULL, network_flush_upcall,
576 			    NULL, NULL) == -1)
577 				log_warn("rde_dispatch: IMSG_NETWORK_FLUSH");
578 			break;
579 		case IMSG_FLOWSPEC_ADD:
580 			if (curflow != NULL) {
581 				log_warnx("rde_dispatch: "
582 				    "unexpected flowspec add");
583 				break;
584 			}
585 			if (imsg_get_ibuf(&imsg, &ibuf) == -1 ||
586 			    ibuf_size(&ibuf) <= FLOWSPEC_SIZE) {
587 				log_warnx("rde_dispatch: wrong imsg len");
588 				break;
589 			}
590 			curflow = malloc(ibuf_size(&ibuf));
591 			if (curflow == NULL)
592 				fatal(NULL);
593 			memcpy(curflow, ibuf_data(&ibuf), ibuf_size(&ibuf));
594 			if (curflow->len + FLOWSPEC_SIZE != ibuf_size(&ibuf)) {
595 				free(curflow);
596 				curflow = NULL;
597 				log_warnx("rde_dispatch: wrong flowspec len");
598 				break;
599 			}
600 			rde_filterstate_init(&netconf_state);
601 			asp = &netconf_state.aspath;
602 			asp->aspath = aspath_get(NULL, 0);
603 			asp->origin = ORIGIN_IGP;
604 			asp->flags = F_ATTR_ORIGIN | F_ATTR_ASPATH |
605 			    F_ATTR_LOCALPREF | F_PREFIX_ANNOUNCED |
606 			    F_ANN_DYNAMIC;
607 			break;
608 		case IMSG_FLOWSPEC_DONE:
609 			if (curflow == NULL) {
610 				log_warnx("rde_dispatch: "
611 				    "unexpected flowspec done");
612 				break;
613 			}
614 
615 			if (flowspec_valid(curflow->data, curflow->len,
616 			    curflow->aid == AID_FLOWSPECv6) == -1)
617 				log_warnx("invalid flowspec update received "
618 				    "from bgpctl");
619 			else
620 				flowspec_add(curflow, &netconf_state,
621 				    &session_set);
622 
623 			rde_filterstate_clean(&netconf_state);
624 			filterset_free(&session_set);
625 			free(curflow);
626 			curflow = NULL;
627 			break;
628 		case IMSG_FLOWSPEC_REMOVE:
629 			if (curflow != NULL) {
630 				log_warnx("rde_dispatch: "
631 				    "unexpected flowspec remove");
632 				break;
633 			}
634 			if (imsg_get_ibuf(&imsg, &ibuf) == -1 ||
635 			    ibuf_size(&ibuf) <= FLOWSPEC_SIZE) {
636 				log_warnx("rde_dispatch: wrong imsg len");
637 				break;
638 			}
639 			curflow = malloc(ibuf_size(&ibuf));
640 			if (curflow == NULL)
641 				fatal(NULL);
642 			memcpy(curflow, ibuf_data(&ibuf), ibuf_size(&ibuf));
643 			if (curflow->len + FLOWSPEC_SIZE != ibuf_size(&ibuf)) {
644 				free(curflow);
645 				curflow = NULL;
646 				log_warnx("rde_dispatch: wrong flowspec len");
647 				break;
648 			}
649 
650 			if (flowspec_valid(curflow->data, curflow->len,
651 			    curflow->aid == AID_FLOWSPECv6) == -1)
652 				log_warnx("invalid flowspec withdraw received "
653 				    "from bgpctl");
654 			else
655 				flowspec_delete(curflow);
656 
657 			free(curflow);
658 			curflow = NULL;
659 			break;
660 		case IMSG_FLOWSPEC_FLUSH:
661 			prefix_flowspec_dump(AID_UNSPEC, NULL,
662 			    flowspec_flush_upcall, NULL);
663 			break;
664 		case IMSG_FILTER_SET:
665 			if ((s = malloc(sizeof(struct filter_set))) == NULL)
666 				fatal(NULL);
667 			if (imsg_get_data(&imsg, s, sizeof(struct filter_set))
668 			    == -1) {
669 				log_warnx("rde_dispatch: wrong imsg len");
670 				free(s);
671 				break;
672 			}
673 			if (s->type == ACTION_SET_NEXTHOP) {
674 				s->action.nh_ref =
675 				    nexthop_get(&s->action.nexthop);
676 				s->type = ACTION_SET_NEXTHOP_REF;
677 			}
678 			TAILQ_INSERT_TAIL(&session_set, s, entry);
679 			break;
680 		case IMSG_CTL_SHOW_NETWORK:
681 		case IMSG_CTL_SHOW_RIB:
682 		case IMSG_CTL_SHOW_RIB_PREFIX:
683 			if (imsg_get_data(&imsg, &req, sizeof(req)) == -1) {
684 				log_warnx("rde_dispatch: wrong imsg len");
685 				break;
686 			}
687 			rde_dump_ctx_new(&req, pid, imsg_get_type(&imsg));
688 			break;
689 		case IMSG_CTL_SHOW_FLOWSPEC:
690 			if (imsg_get_data(&imsg, &req, sizeof(req)) == -1) {
691 				log_warnx("rde_dispatch: wrong imsg len");
692 				break;
693 			}
694 			prefix_flowspec_dump(req.aid, &pid,
695 			    flowspec_dump_upcall, flowspec_dump_done);
696 			break;
697 		case IMSG_CTL_SHOW_NEIGHBOR:
698 			peer = peer_get(peerid);
699 			if (peer != NULL)
700 				memcpy(&stats, &peer->stats, sizeof(stats));
701 			else
702 				memset(&stats, 0, sizeof(stats));
703 			imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_NEIGHBOR,
704 			    peerid, pid, -1, &stats, sizeof(stats));
705 			break;
706 		case IMSG_CTL_SHOW_RIB_MEM:
707 			imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_RIB_MEM, 0,
708 			    pid, -1, &rdemem, sizeof(rdemem));
709 			break;
710 		case IMSG_CTL_SHOW_SET:
711 			/* first roa set */
712 			pset = &rde_roa;
713 			memset(&cset, 0, sizeof(cset));
714 			cset.type = ROA_SET;
715 			strlcpy(cset.name, "RPKI ROA", sizeof(cset.name));
716 			cset.lastchange = pset->lastchange;
717 			cset.v4_cnt = pset->th.v4_cnt;
718 			cset.v6_cnt = pset->th.v6_cnt;
719 			imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_SET, 0,
720 			    pid, -1, &cset, sizeof(cset));
721 
722 			/* then aspa set */
723 			memset(&cset, 0, sizeof(cset));
724 			cset.type = ASPA_SET;
725 			strlcpy(cset.name, "RPKI ASPA", sizeof(cset.name));
726 			aspa_table_stats(rde_aspa, &cset);
727 			imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_SET, 0,
728 			    pid, -1, &cset, sizeof(cset));
729 
730 			SIMPLEQ_FOREACH(aset, &conf->as_sets, entry) {
731 				memset(&cset, 0, sizeof(cset));
732 				cset.type = ASNUM_SET;
733 				strlcpy(cset.name, aset->name,
734 				    sizeof(cset.name));
735 				cset.lastchange = aset->lastchange;
736 				cset.as_cnt = set_nmemb(aset->set);
737 				imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_SET, 0,
738 				    pid, -1, &cset, sizeof(cset));
739 			}
740 			SIMPLEQ_FOREACH(pset, &conf->rde_prefixsets, entry) {
741 				memset(&cset, 0, sizeof(cset));
742 				cset.type = PREFIX_SET;
743 				strlcpy(cset.name, pset->name,
744 				    sizeof(cset.name));
745 				cset.lastchange = pset->lastchange;
746 				cset.v4_cnt = pset->th.v4_cnt;
747 				cset.v6_cnt = pset->th.v6_cnt;
748 				imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_SET, 0,
749 				    pid, -1, &cset, sizeof(cset));
750 			}
751 			SIMPLEQ_FOREACH(pset, &conf->rde_originsets, entry) {
752 				memset(&cset, 0, sizeof(cset));
753 				cset.type = ORIGIN_SET;
754 				strlcpy(cset.name, pset->name,
755 				    sizeof(cset.name));
756 				cset.lastchange = pset->lastchange;
757 				cset.v4_cnt = pset->th.v4_cnt;
758 				cset.v6_cnt = pset->th.v6_cnt;
759 				imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_SET, 0,
760 				    pid, -1, &cset, sizeof(cset));
761 			}
762 			imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, pid,
763 			    -1, NULL, 0);
764 			break;
765 		case IMSG_CTL_LOG_VERBOSE:
766 			/* already checked by SE */
767 			if (imsg_get_data(&imsg, &verbose, sizeof(verbose)) ==
768 			    -1) {
769 				log_warnx("rde_dispatch: wrong imsg len");
770 				break;
771 			}
772 			log_setverbose(verbose);
773 			break;
774 		case IMSG_CTL_END:
775 			imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, pid,
776 			    -1, NULL, 0);
777 			break;
778 		case IMSG_CTL_TERMINATE:
779 			rde_dump_ctx_terminate(pid);
780 			break;
781 		case IMSG_XON:
782 			if (peerid) {
783 				peer = peer_get(peerid);
784 				if (peer)
785 					peer->throttled = 0;
786 			} else {
787 				rde_dump_ctx_throttle(pid, 0);
788 			}
789 			break;
790 		case IMSG_XOFF:
791 			if (peerid) {
792 				peer = peer_get(peerid);
793 				if (peer)
794 					peer->throttled = 1;
795 			} else {
796 				rde_dump_ctx_throttle(pid, 1);
797 			}
798 			break;
799 		case IMSG_RECONF_DRAIN:
800 			imsg_compose(ibuf_se, IMSG_RECONF_DRAIN, 0, 0,
801 			    -1, NULL, 0);
802 			break;
803 		default:
804 			break;
805 		}
806 		imsg_free(&imsg);
807 	}
808 }
809 
810 void
811 rde_dispatch_imsg_parent(struct imsgbuf *imsgbuf)
812 {
813 	static struct rde_prefixset	*last_prefixset;
814 	static struct as_set	*last_as_set;
815 	static struct l3vpn	*vpn;
816 	static struct flowspec	*curflow;
817 	struct imsg		 imsg;
818 	struct ibuf		 ibuf;
819 	struct bgpd_config	 tconf;
820 	struct filterstate	 state;
821 	struct kroute_nexthop	 knext;
822 	struct mrt		 xmrt;
823 	struct prefixset_item	 psi;
824 	struct rde_rib		 rr;
825 	struct roa		 roa;
826 	char			 name[SET_NAME_LEN];
827 	struct imsgbuf		*i;
828 	struct filter_head	*nr;
829 	struct filter_rule	*r;
830 	struct filter_set	*s;
831 	struct rib		*rib;
832 	struct rde_prefixset	*ps;
833 	struct rde_aspath	*asp;
834 	size_t			 nmemb;
835 	int			 n, fd, rv;
836 	uint16_t		 rid;
837 
838 	while (imsgbuf) {
839 		if ((n = imsg_get(imsgbuf, &imsg)) == -1)
840 			fatal("rde_dispatch_imsg_parent: imsg_get error");
841 		if (n == 0)
842 			break;
843 
844 		switch (imsg_get_type(&imsg)) {
845 		case IMSG_SOCKET_CONN:
846 		case IMSG_SOCKET_CONN_CTL:
847 		case IMSG_SOCKET_CONN_RTR:
848 			if ((fd = imsg_get_fd(&imsg)) == -1) {
849 				log_warnx("expected to receive imsg fd "
850 				    "but didn't receive any");
851 				break;
852 			}
853 			if ((i = malloc(sizeof(struct imsgbuf))) == NULL)
854 				fatal(NULL);
855 			if (imsgbuf_init(i, fd) == -1 ||
856 			    imsgbuf_set_maxsize(i, MAX_BGPD_IMSGSIZE) == -1)
857 				fatal(NULL);
858 			switch (imsg_get_type(&imsg)) {
859 			case IMSG_SOCKET_CONN:
860 				if (ibuf_se) {
861 					log_warnx("Unexpected imsg connection "
862 					    "to SE received");
863 					imsgbuf_clear(ibuf_se);
864 					free(ibuf_se);
865 				}
866 				ibuf_se = i;
867 				break;
868 			case IMSG_SOCKET_CONN_CTL:
869 				if (ibuf_se_ctl) {
870 					log_warnx("Unexpected imsg ctl "
871 					    "connection to SE received");
872 					imsgbuf_clear(ibuf_se_ctl);
873 					free(ibuf_se_ctl);
874 				}
875 				ibuf_se_ctl = i;
876 				break;
877 			case IMSG_SOCKET_CONN_RTR:
878 				if (ibuf_rtr) {
879 					log_warnx("Unexpected imsg ctl "
880 					    "connection to ROA received");
881 					imsgbuf_clear(ibuf_rtr);
882 					free(ibuf_rtr);
883 				}
884 				ibuf_rtr = i;
885 				break;
886 			}
887 			break;
888 		case IMSG_NETWORK_ADD:
889 			if (imsg_get_data(&imsg, &netconf_p,
890 			    sizeof(netconf_p)) == -1) {
891 				log_warnx("rde_dispatch: wrong imsg len");
892 				break;
893 			}
894 			TAILQ_INIT(&netconf_p.attrset);
895 			break;
896 		case IMSG_NETWORK_DONE:
897 			TAILQ_CONCAT(&netconf_p.attrset, &parent_set, entry);
898 
899 			rde_filterstate_init(&state);
900 			asp = &state.aspath;
901 			asp->aspath = aspath_get(NULL, 0);
902 			asp->origin = ORIGIN_IGP;
903 			asp->flags = F_ATTR_ORIGIN | F_ATTR_ASPATH |
904 			    F_ATTR_LOCALPREF | F_PREFIX_ANNOUNCED;
905 
906 			network_add(&netconf_p, &state);
907 			rde_filterstate_clean(&state);
908 			break;
909 		case IMSG_NETWORK_REMOVE:
910 			if (imsg_get_data(&imsg, &netconf_p,
911 			    sizeof(netconf_p)) == -1) {
912 				log_warnx("rde_dispatch: wrong imsg len");
913 				break;
914 			}
915 			TAILQ_INIT(&netconf_p.attrset);
916 			network_delete(&netconf_p);
917 			break;
918 		case IMSG_FLOWSPEC_ADD:
919 			if (curflow != NULL) {
920 				log_warnx("rde_dispatch: "
921 				    "unexpected flowspec add");
922 				break;
923 			}
924 			if (imsg_get_ibuf(&imsg, &ibuf) == -1 ||
925 			    ibuf_size(&ibuf) <= FLOWSPEC_SIZE) {
926 				log_warnx("rde_dispatch: wrong imsg len");
927 				break;
928 			}
929 			curflow = malloc(ibuf_size(&ibuf));
930 			if (curflow == NULL)
931 				fatal(NULL);
932 			memcpy(curflow, ibuf_data(&ibuf), ibuf_size(&ibuf));
933 			if (curflow->len + FLOWSPEC_SIZE != ibuf_size(&ibuf)) {
934 				free(curflow);
935 				curflow = NULL;
936 				log_warnx("rde_dispatch: wrong flowspec len");
937 				break;
938 			}
939 			break;
940 		case IMSG_FLOWSPEC_DONE:
941 			if (curflow == NULL) {
942 				log_warnx("rde_dispatch: "
943 				    "unexpected flowspec done");
944 				break;
945 			}
946 
947 			rde_filterstate_init(&state);
948 			asp = &state.aspath;
949 			asp->aspath = aspath_get(NULL, 0);
950 			asp->origin = ORIGIN_IGP;
951 			asp->flags = F_ATTR_ORIGIN | F_ATTR_ASPATH |
952 			    F_ATTR_LOCALPREF | F_PREFIX_ANNOUNCED;
953 
954 			if (flowspec_valid(curflow->data, curflow->len,
955 			    curflow->aid == AID_FLOWSPECv6) == -1)
956 				log_warnx("invalid flowspec update received "
957 				    "from parent");
958 			else
959 				flowspec_add(curflow, &state, &parent_set);
960 
961 			rde_filterstate_clean(&state);
962 			filterset_free(&parent_set);
963 			free(curflow);
964 			curflow = NULL;
965 			break;
966 		case IMSG_FLOWSPEC_REMOVE:
967 			if (curflow != NULL) {
968 				log_warnx("rde_dispatch: "
969 				    "unexpected flowspec remove");
970 				break;
971 			}
972 			if (imsg_get_ibuf(&imsg, &ibuf) == -1 ||
973 			    ibuf_size(&ibuf) <= FLOWSPEC_SIZE) {
974 				log_warnx("rde_dispatch: wrong imsg len");
975 				break;
976 			}
977 			curflow = malloc(ibuf_size(&ibuf));
978 			if (curflow == NULL)
979 				fatal(NULL);
980 			memcpy(curflow, ibuf_data(&ibuf), ibuf_size(&ibuf));
981 			if (curflow->len + FLOWSPEC_SIZE != ibuf_size(&ibuf)) {
982 				free(curflow);
983 				curflow = NULL;
984 				log_warnx("rde_dispatch: wrong flowspec len");
985 				break;
986 			}
987 
988 			if (flowspec_valid(curflow->data, curflow->len,
989 			    curflow->aid == AID_FLOWSPECv6) == -1)
990 				log_warnx("invalid flowspec withdraw received "
991 				    "from parent");
992 			else
993 				flowspec_delete(curflow);
994 
995 			free(curflow);
996 			curflow = NULL;
997 			break;
998 		case IMSG_RECONF_CONF:
999 			if (imsg_get_data(&imsg, &tconf, sizeof(tconf)) == -1)
1000 				fatalx("IMSG_RECONF_CONF bad len");
1001 			out_rules_tmp = calloc(1, sizeof(struct filter_head));
1002 			if (out_rules_tmp == NULL)
1003 				fatal(NULL);
1004 			TAILQ_INIT(out_rules_tmp);
1005 			nconf = new_config();
1006 			copy_config(nconf, &tconf);
1007 
1008 			for (rid = 0; rid < rib_size; rid++) {
1009 				if ((rib = rib_byid(rid)) == NULL)
1010 					continue;
1011 				rib->state = RECONF_DELETE;
1012 				rib->fibstate = RECONF_NONE;
1013 			}
1014 			break;
1015 		case IMSG_RECONF_RIB:
1016 			if (imsg_get_data(&imsg, &rr, sizeof(rr)) == -1)
1017 				fatalx("IMSG_RECONF_RIB bad len");
1018 			rib = rib_byid(rib_find(rr.name));
1019 			if (rib == NULL) {
1020 				rib = rib_new(rr.name, rr.rtableid, rr.flags);
1021 			} else if (rib->flags == rr.flags &&
1022 			    rib->rtableid == rr.rtableid) {
1023 				/* no change to rib apart from filters */
1024 				rib->state = RECONF_KEEP;
1025 			} else {
1026 				/* reload rib because something changed */
1027 				rib->flags_tmp = rr.flags;
1028 				rib->rtableid_tmp = rr.rtableid;
1029 				rib->state = RECONF_RELOAD;
1030 			}
1031 			break;
1032 		case IMSG_RECONF_FILTER:
1033 			if ((r = malloc(sizeof(struct filter_rule))) == NULL)
1034 				fatal(NULL);
1035 			if (imsg_get_data(&imsg, r, sizeof(*r)) == -1)
1036 				fatalx("IMSG_RECONF_FILTER bad len");
1037 			if (r->match.prefixset.name[0] != '\0') {
1038 				r->match.prefixset.ps =
1039 				    rde_find_prefixset(r->match.prefixset.name,
1040 					&nconf->rde_prefixsets);
1041 				if (r->match.prefixset.ps == NULL)
1042 					log_warnx("%s: no prefixset for %s",
1043 					    __func__, r->match.prefixset.name);
1044 			}
1045 			if (r->match.originset.name[0] != '\0') {
1046 				r->match.originset.ps =
1047 				    rde_find_prefixset(r->match.originset.name,
1048 					&nconf->rde_originsets);
1049 				if (r->match.originset.ps == NULL)
1050 					log_warnx("%s: no origin-set for %s",
1051 					    __func__, r->match.originset.name);
1052 			}
1053 			if (r->match.as.flags & AS_FLAG_AS_SET_NAME) {
1054 				struct as_set * aset;
1055 
1056 				aset = as_sets_lookup(&nconf->as_sets,
1057 				    r->match.as.name);
1058 				if (aset == NULL) {
1059 					log_warnx("%s: no as-set for %s",
1060 					    __func__, r->match.as.name);
1061 				} else {
1062 					r->match.as.flags = AS_FLAG_AS_SET;
1063 					r->match.as.aset = aset;
1064 				}
1065 			}
1066 			TAILQ_INIT(&r->set);
1067 			TAILQ_CONCAT(&r->set, &parent_set, entry);
1068 			if ((rib = rib_byid(rib_find(r->rib))) == NULL) {
1069 				log_warnx("IMSG_RECONF_FILTER: filter rule "
1070 				    "for nonexistent rib %s", r->rib);
1071 				filterset_free(&r->set);
1072 				free(r);
1073 				break;
1074 			}
1075 			r->peer.ribid = rib->id;
1076 			if (r->dir == DIR_IN) {
1077 				nr = rib->in_rules_tmp;
1078 				if (nr == NULL) {
1079 					nr = calloc(1,
1080 					    sizeof(struct filter_head));
1081 					if (nr == NULL)
1082 						fatal(NULL);
1083 					TAILQ_INIT(nr);
1084 					rib->in_rules_tmp = nr;
1085 				}
1086 				TAILQ_INSERT_TAIL(nr, r, entry);
1087 			} else {
1088 				TAILQ_INSERT_TAIL(out_rules_tmp, r, entry);
1089 			}
1090 			break;
1091 		case IMSG_RECONF_PREFIX_SET:
1092 		case IMSG_RECONF_ORIGIN_SET:
1093 			ps = calloc(1, sizeof(struct rde_prefixset));
1094 			if (ps == NULL)
1095 				fatal(NULL);
1096 			if (imsg_get_data(&imsg, ps->name, sizeof(ps->name)) ==
1097 			    -1)
1098 				fatalx("IMSG_RECONF_PREFIX_SET bad len");
1099 			if (imsg_get_type(&imsg) == IMSG_RECONF_ORIGIN_SET) {
1100 				SIMPLEQ_INSERT_TAIL(&nconf->rde_originsets, ps,
1101 				    entry);
1102 			} else {
1103 				SIMPLEQ_INSERT_TAIL(&nconf->rde_prefixsets, ps,
1104 				    entry);
1105 			}
1106 			last_prefixset = ps;
1107 			break;
1108 		case IMSG_RECONF_ROA_ITEM:
1109 			if (imsg_get_data(&imsg, &roa, sizeof(roa)) == -1)
1110 				fatalx("IMSG_RECONF_ROA_ITEM bad len");
1111 			rv = trie_roa_add(&last_prefixset->th, &roa);
1112 			break;
1113 		case IMSG_RECONF_PREFIX_SET_ITEM:
1114 			if (imsg_get_data(&imsg, &psi, sizeof(psi)) == -1)
1115 				fatalx("IMSG_RECONF_PREFIX_SET_ITEM bad len");
1116 			if (last_prefixset == NULL)
1117 				fatalx("King Bula has no prefixset");
1118 			rv = trie_add(&last_prefixset->th,
1119 			    &psi.p.addr, psi.p.len,
1120 			    psi.p.len_min, psi.p.len_max);
1121 			if (rv == -1)
1122 				log_warnx("trie_add(%s) %s/%u failed",
1123 				    last_prefixset->name, log_addr(&psi.p.addr),
1124 				    psi.p.len);
1125 			break;
1126 		case IMSG_RECONF_AS_SET:
1127 			if (imsg_get_ibuf(&imsg, &ibuf) == -1 ||
1128 			    ibuf_get(&ibuf, &nmemb, sizeof(nmemb)) == -1 ||
1129 			    ibuf_get(&ibuf, name, sizeof(name)) == -1)
1130 				fatalx("IMSG_RECONF_AS_SET bad len");
1131 			if (as_sets_lookup(&nconf->as_sets, name) != NULL)
1132 				fatalx("duplicate as-set %s", name);
1133 			last_as_set = as_sets_new(&nconf->as_sets, name, nmemb,
1134 			    sizeof(uint32_t));
1135 			break;
1136 		case IMSG_RECONF_AS_SET_ITEMS:
1137 			if (imsg_get_ibuf(&imsg, &ibuf) == -1 ||
1138 			    ibuf_size(&ibuf) == 0 ||
1139 			    ibuf_size(&ibuf) % sizeof(uint32_t) != 0)
1140 				fatalx("IMSG_RECONF_AS_SET_ITEMS bad len");
1141 			nmemb = ibuf_size(&ibuf) / sizeof(uint32_t);
1142 			if (set_add(last_as_set->set, ibuf_data(&ibuf),
1143 			    nmemb) != 0)
1144 				fatal(NULL);
1145 			break;
1146 		case IMSG_RECONF_AS_SET_DONE:
1147 			set_prep(last_as_set->set);
1148 			last_as_set = NULL;
1149 			break;
1150 		case IMSG_RECONF_VPN:
1151 			if ((vpn = malloc(sizeof(*vpn))) == NULL)
1152 				fatal(NULL);
1153 			if (imsg_get_data(&imsg, vpn, sizeof(*vpn)) == -1)
1154 				fatalx("IMSG_RECONF_VPN bad len");
1155 			TAILQ_INIT(&vpn->import);
1156 			TAILQ_INIT(&vpn->export);
1157 			TAILQ_INIT(&vpn->net_l);
1158 			SIMPLEQ_INSERT_TAIL(&nconf->l3vpns, vpn, entry);
1159 			break;
1160 		case IMSG_RECONF_VPN_EXPORT:
1161 			if (vpn == NULL) {
1162 				log_warnx("rde_dispatch_imsg_parent: "
1163 				    "IMSG_RECONF_VPN_EXPORT unexpected");
1164 				break;
1165 			}
1166 			TAILQ_CONCAT(&vpn->export, &parent_set, entry);
1167 			break;
1168 		case IMSG_RECONF_VPN_IMPORT:
1169 			if (vpn == NULL) {
1170 				log_warnx("rde_dispatch_imsg_parent: "
1171 				    "IMSG_RECONF_VPN_IMPORT unexpected");
1172 				break;
1173 			}
1174 			TAILQ_CONCAT(&vpn->import, &parent_set, entry);
1175 			break;
1176 		case IMSG_RECONF_VPN_DONE:
1177 			break;
1178 		case IMSG_RECONF_DRAIN:
1179 			imsg_compose(ibuf_main, IMSG_RECONF_DRAIN, 0, 0,
1180 			    -1, NULL, 0);
1181 			break;
1182 		case IMSG_RECONF_DONE:
1183 			if (nconf == NULL)
1184 				fatalx("got IMSG_RECONF_DONE but no config");
1185 			last_prefixset = NULL;
1186 
1187 			rde_reload_done();
1188 			break;
1189 		case IMSG_NEXTHOP_UPDATE:
1190 			if (imsg_get_data(&imsg, &knext, sizeof(knext)) == -1)
1191 				fatalx("IMSG_NEXTHOP_UPDATE bad len");
1192 			nexthop_update(&knext);
1193 			break;
1194 		case IMSG_FILTER_SET:
1195 			if ((s = malloc(sizeof(*s))) == NULL)
1196 				fatal(NULL);
1197 			if (imsg_get_data(&imsg, s, sizeof(*s)) == -1)
1198 				fatalx("IMSG_FILTER_SET bad len");
1199 			if (s->type == ACTION_SET_NEXTHOP) {
1200 				s->action.nh_ref =
1201 				    nexthop_get(&s->action.nexthop);
1202 				s->type = ACTION_SET_NEXTHOP_REF;
1203 			}
1204 			TAILQ_INSERT_TAIL(&parent_set, s, entry);
1205 			break;
1206 		case IMSG_MRT_OPEN:
1207 		case IMSG_MRT_REOPEN:
1208 			if (imsg_get_data(&imsg, &xmrt, sizeof(xmrt)) == -1) {
1209 				log_warnx("wrong imsg len");
1210 				break;
1211 			}
1212 			if ((fd = imsg_get_fd(&imsg)) == -1)
1213 				log_warnx("expected to receive fd for mrt dump "
1214 				    "but didn't receive any");
1215 			else if (xmrt.type == MRT_TABLE_DUMP ||
1216 			    xmrt.type == MRT_TABLE_DUMP_MP ||
1217 			    xmrt.type == MRT_TABLE_DUMP_V2) {
1218 				rde_dump_mrt_new(&xmrt, imsg_get_pid(&imsg),
1219 				    fd);
1220 			} else
1221 				close(fd);
1222 			break;
1223 		case IMSG_MRT_CLOSE:
1224 			/* ignore end message because a dump is atomic */
1225 			break;
1226 		default:
1227 			fatalx("unhandled IMSG %u", imsg_get_type(&imsg));
1228 		}
1229 		imsg_free(&imsg);
1230 	}
1231 }
1232 
1233 void
1234 rde_dispatch_imsg_rtr(struct imsgbuf *imsgbuf)
1235 {
1236 	static struct aspa_set	*aspa;
1237 	struct imsg		 imsg;
1238 	struct roa		 roa;
1239 	struct aspa_prep	 ap;
1240 	int			 n;
1241 
1242 	while (imsgbuf) {
1243 		if ((n = imsg_get(imsgbuf, &imsg)) == -1)
1244 			fatal("rde_dispatch_imsg_parent: imsg_get error");
1245 		if (n == 0)
1246 			break;
1247 
1248 		switch (imsg_get_type(&imsg)) {
1249 		case IMSG_RECONF_ROA_SET:
1250 			/* start of update */
1251 			trie_free(&roa_new.th);	/* clear new roa */
1252 			break;
1253 		case IMSG_RECONF_ROA_ITEM:
1254 			if (imsg_get_data(&imsg, &roa, sizeof(roa)) == -1)
1255 				fatalx("IMSG_RECONF_ROA_ITEM bad len");
1256 			if (trie_roa_add(&roa_new.th, &roa) != 0) {
1257 #if defined(__GNUC__) && __GNUC__ < 4
1258 				struct bgpd_addr p = {
1259 					.aid = roa.aid
1260 				};
1261 				p.v6 = roa.prefix.inet6;
1262 #else
1263 				struct bgpd_addr p = {
1264 					.aid = roa.aid,
1265 					.v6 = roa.prefix.inet6
1266 				};
1267 #endif
1268 				log_warnx("trie_roa_add %s/%u failed",
1269 				    log_addr(&p), roa.prefixlen);
1270 			}
1271 			break;
1272 		case IMSG_RECONF_ASPA_PREP:
1273 			if (imsg_get_data(&imsg, &ap, sizeof(ap)) == -1)
1274 				fatalx("IMSG_RECONF_ASPA_PREP bad len");
1275 			if (aspa_new)
1276 				fatalx("unexpected IMSG_RECONF_ASPA_PREP");
1277 			aspa_new = aspa_table_prep(ap.entries, ap.datasize);
1278 			break;
1279 		case IMSG_RECONF_ASPA:
1280 			if (aspa_new == NULL)
1281 				fatalx("unexpected IMSG_RECONF_ASPA");
1282 			if (aspa != NULL)
1283 				fatalx("IMSG_RECONF_ASPA already sent");
1284 			if ((aspa = calloc(1, sizeof(*aspa))) == NULL)
1285 				fatal("IMSG_RECONF_ASPA");
1286 			if (imsg_get_data(&imsg, aspa,
1287 			    offsetof(struct aspa_set, tas)) == -1)
1288 				fatal("IMSG_RECONF_ASPA bad len");
1289 			break;
1290 		case IMSG_RECONF_ASPA_TAS:
1291 			if (aspa == NULL)
1292 				fatalx("unexpected IMSG_RECONF_ASPA_TAS");
1293 			aspa->tas = reallocarray(NULL, aspa->num,
1294 			    sizeof(uint32_t));
1295 			if (aspa->tas == NULL)
1296 				fatal("IMSG_RECONF_ASPA_TAS");
1297 			if (imsg_get_data(&imsg, aspa->tas,
1298 			    aspa->num * sizeof(uint32_t)) == -1)
1299 				fatal("IMSG_RECONF_ASPA_TAS bad len");
1300 			break;
1301 		case IMSG_RECONF_ASPA_DONE:
1302 			if (aspa_new == NULL)
1303 				fatalx("unexpected IMSG_RECONF_ASPA");
1304 			aspa_add_set(aspa_new, aspa->as, aspa->tas,
1305 			    aspa->num);
1306 			free_aspa(aspa);
1307 			aspa = NULL;
1308 			break;
1309 		case IMSG_RECONF_DONE:
1310 			/* end of update */
1311 			if (rde_roa_reload() + rde_aspa_reload() != 0)
1312 				rde_rpki_reload();
1313 			break;
1314 		}
1315 		imsg_free(&imsg);
1316 	}
1317 }
1318 
1319 void
1320 rde_dispatch_imsg_peer(struct rde_peer *peer, void *bula)
1321 {
1322 	struct route_refresh rr;
1323 	struct imsg imsg;
1324 	struct ibuf ibuf;
1325 
1326 	if (!peer_is_up(peer)) {
1327 		peer_imsg_flush(peer);
1328 		return;
1329 	}
1330 
1331 	if (!peer_imsg_pop(peer, &imsg))
1332 		return;
1333 
1334 	switch (imsg_get_type(&imsg)) {
1335 	case IMSG_UPDATE:
1336 		if (imsg_get_ibuf(&imsg, &ibuf) == -1)
1337 			log_warn("update: bad imsg");
1338 		else
1339 			rde_update_dispatch(peer, &ibuf);
1340 		break;
1341 	case IMSG_REFRESH:
1342 		if (imsg_get_data(&imsg, &rr, sizeof(rr)) == -1) {
1343 			log_warnx("route refresh: wrong imsg len");
1344 			break;
1345 		}
1346 		if (rr.aid < AID_MIN || rr.aid >= AID_MAX) {
1347 			log_peer_warnx(&peer->conf,
1348 			    "route refresh: bad AID %d", rr.aid);
1349 			break;
1350 		}
1351 		if (peer->capa.mp[rr.aid] == 0) {
1352 			log_peer_warnx(&peer->conf,
1353 			    "route refresh: AID %s not negotiated",
1354 			    aid2str(rr.aid));
1355 			break;
1356 		}
1357 		switch (rr.subtype) {
1358 		case ROUTE_REFRESH_REQUEST:
1359 			peer_blast(peer, rr.aid);
1360 			break;
1361 		case ROUTE_REFRESH_BEGIN_RR:
1362 			/* check if graceful restart EOR was received */
1363 			if ((peer->recv_eor & (1 << rr.aid)) == 0) {
1364 				log_peer_warnx(&peer->conf,
1365 				    "received %s BoRR before EoR",
1366 				    aid2str(rr.aid));
1367 				break;
1368 			}
1369 			peer_begin_rrefresh(peer, rr.aid);
1370 			break;
1371 		case ROUTE_REFRESH_END_RR:
1372 			if ((peer->recv_eor & (1 << rr.aid)) != 0 &&
1373 			    peer->staletime[rr.aid])
1374 				peer_flush(peer, rr.aid,
1375 				    peer->staletime[rr.aid]);
1376 			else
1377 				log_peer_warnx(&peer->conf,
1378 				    "received unexpected %s EoRR",
1379 				    aid2str(rr.aid));
1380 			break;
1381 		default:
1382 			log_peer_warnx(&peer->conf,
1383 			    "route refresh: bad subtype %d", rr.subtype);
1384 			break;
1385 		}
1386 		break;
1387 	default:
1388 		log_warnx("%s: unhandled imsg type %d", __func__,
1389 		    imsg_get_type(&imsg));
1390 		break;
1391 	}
1392 
1393 	imsg_free(&imsg);
1394 }
1395 
1396 /* handle routing updates from the session engine. */
1397 void
1398 rde_update_dispatch(struct rde_peer *peer, struct ibuf *buf)
1399 {
1400 	struct filterstate	 state;
1401 	struct bgpd_addr	 prefix;
1402 	struct ibuf		 wdbuf, attrbuf, nlribuf, reachbuf, unreachbuf;
1403 	uint16_t		 afi, len;
1404 	uint8_t			 aid, prefixlen, safi, subtype;
1405 	uint32_t		 fas, pathid;
1406 
1407 	if (ibuf_get_n16(buf, &len) == -1 ||
1408 	    ibuf_get_ibuf(buf, len, &wdbuf) == -1 ||
1409 	    ibuf_get_n16(buf, &len) == -1 ||
1410 	    ibuf_get_ibuf(buf, len, &attrbuf) == -1 ||
1411 	    ibuf_get_ibuf(buf, ibuf_size(buf), &nlribuf) == -1) {
1412 		rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRLIST, NULL);
1413 		return;
1414 	}
1415 
1416 	if (ibuf_size(&attrbuf) == 0) {
1417 		/* 0 = no NLRI information in this message */
1418 		if (ibuf_size(&nlribuf) != 0) {
1419 			/* crap at end of update which should not be there */
1420 			rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRLIST,
1421 			    NULL);
1422 			return;
1423 		}
1424 		if (ibuf_size(&wdbuf) == 0) {
1425 			/* EoR marker */
1426 			rde_peer_recv_eor(peer, AID_INET);
1427 			return;
1428 		}
1429 	}
1430 
1431 	ibuf_from_buffer(&reachbuf, NULL, 0);
1432 	ibuf_from_buffer(&unreachbuf, NULL, 0);
1433 	rde_filterstate_init(&state);
1434 	if (ibuf_size(&attrbuf) != 0) {
1435 		/* parse path attributes */
1436 		while (ibuf_size(&attrbuf) > 0) {
1437 			if (rde_attr_parse(&attrbuf, peer, &state, &reachbuf,
1438 			    &unreachbuf) == -1)
1439 				goto done;
1440 		}
1441 
1442 		/* check for missing but necessary attributes */
1443 		if ((subtype = rde_attr_missing(&state.aspath, peer->conf.ebgp,
1444 		    ibuf_size(&nlribuf)))) {
1445 			struct ibuf sbuf;
1446 			ibuf_from_buffer(&sbuf, &subtype, sizeof(subtype));
1447 			rde_update_err(peer, ERR_UPDATE, ERR_UPD_MISSNG_WK_ATTR,
1448 			    &sbuf);
1449 			goto done;
1450 		}
1451 
1452 		rde_as4byte_fixup(peer, &state.aspath);
1453 
1454 		/* enforce remote AS if requested */
1455 		if (state.aspath.flags & F_ATTR_ASPATH &&
1456 		    peer->conf.enforce_as == ENFORCE_AS_ON) {
1457 			fas = aspath_neighbor(state.aspath.aspath);
1458 			if (peer->conf.remote_as != fas) {
1459 				log_peer_warnx(&peer->conf, "bad path, "
1460 				    "starting with %s expected %u, "
1461 				    "enforce neighbor-as enabled",
1462 				    log_as(fas), peer->conf.remote_as);
1463 				rde_update_err(peer, ERR_UPDATE, ERR_UPD_ASPATH,
1464 				    NULL);
1465 				goto done;
1466 			}
1467 		}
1468 
1469 		/* aspath needs to be loop free. This is not a hard error. */
1470 		if (state.aspath.flags & F_ATTR_ASPATH &&
1471 		    peer->conf.ebgp &&
1472 		    peer->conf.enforce_local_as == ENFORCE_AS_ON &&
1473 		    !aspath_loopfree(state.aspath.aspath, peer->conf.local_as))
1474 			state.aspath.flags |= F_ATTR_LOOP;
1475 
1476 		rde_reflector(peer, &state.aspath);
1477 
1478 		/* Cache aspa lookup for all updates from ebgp sessions. */
1479 		if (state.aspath.flags & F_ATTR_ASPATH && peer->conf.ebgp) {
1480 			aspa_validation(rde_aspa, state.aspath.aspath,
1481 			    &state.aspath.aspa_state);
1482 			state.aspath.aspa_generation = rde_aspa_generation;
1483 		}
1484 	}
1485 
1486 	/* withdraw prefix */
1487 	if (ibuf_size(&wdbuf) > 0) {
1488 		if (peer->capa.mp[AID_INET] == 0) {
1489 			log_peer_warnx(&peer->conf,
1490 			    "bad withdraw, %s disabled", aid2str(AID_INET));
1491 			rde_update_err(peer, ERR_UPDATE, ERR_UPD_NETWORK,
1492 			    NULL);
1493 			goto done;
1494 		}
1495 	}
1496 	while (ibuf_size(&wdbuf) > 0) {
1497 		if (peer_has_add_path(peer, AID_INET, CAPA_AP_RECV)) {
1498 			if (ibuf_get_n32(&wdbuf, &pathid) == -1) {
1499 				log_peer_warnx(&peer->conf,
1500 				    "bad withdraw prefix");
1501 				rde_update_err(peer, ERR_UPDATE,
1502 				    ERR_UPD_NETWORK, NULL);
1503 				goto done;
1504 			}
1505 		} else
1506 			pathid = 0;
1507 
1508 		if (nlri_get_prefix(&wdbuf, &prefix, &prefixlen) == -1) {
1509 			/*
1510 			 * the RFC does not mention what we should do in
1511 			 * this case. Let's do the same as in the NLRI case.
1512 			 */
1513 			log_peer_warnx(&peer->conf, "bad withdraw prefix");
1514 			rde_update_err(peer, ERR_UPDATE, ERR_UPD_NETWORK,
1515 			    NULL);
1516 			goto done;
1517 		}
1518 
1519 		rde_update_withdraw(peer, pathid, &prefix, prefixlen);
1520 	}
1521 
1522 	/* withdraw MP_UNREACH_NLRI if available */
1523 	if (ibuf_size(&unreachbuf) != 0) {
1524 		if (ibuf_get_n16(&unreachbuf, &afi) == -1 ||
1525 		    ibuf_get_n8(&unreachbuf, &safi) == -1 ||
1526 		    afi2aid(afi, safi, &aid) == -1) {
1527 			log_peer_warnx(&peer->conf,
1528 			    "bad AFI/SAFI pair in withdraw");
1529 			rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR,
1530 			    &unreachbuf);
1531 			goto done;
1532 		}
1533 
1534 		if (peer->capa.mp[aid] == 0) {
1535 			log_peer_warnx(&peer->conf,
1536 			    "bad withdraw, %s disabled", aid2str(aid));
1537 			rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR,
1538 			    &unreachbuf);
1539 			goto done;
1540 		}
1541 
1542 		if ((state.aspath.flags & ~F_ATTR_MP_UNREACH) == 0 &&
1543 		    ibuf_size(&unreachbuf) == 0) {
1544 			/* EoR marker */
1545 			rde_peer_recv_eor(peer, aid);
1546 		}
1547 
1548 		while (ibuf_size(&unreachbuf) > 0) {
1549 			if (peer_has_add_path(peer, aid, CAPA_AP_RECV)) {
1550 				if (ibuf_get_n32(&unreachbuf,
1551 				    &pathid) == -1) {
1552 					log_peer_warnx(&peer->conf,
1553 					    "bad %s withdraw prefix",
1554 					    aid2str(aid));
1555 					rde_update_err(peer, ERR_UPDATE,
1556 					    ERR_UPD_OPTATTR, &unreachbuf);
1557 					goto done;
1558 				}
1559 			} else
1560 				pathid = 0;
1561 
1562 			switch (aid) {
1563 			case AID_INET6:
1564 				if (nlri_get_prefix6(&unreachbuf,
1565 				    &prefix, &prefixlen) == -1) {
1566 					log_peer_warnx(&peer->conf,
1567 					    "bad IPv6 withdraw prefix");
1568 					rde_update_err(peer, ERR_UPDATE,
1569 					    ERR_UPD_OPTATTR, &unreachbuf);
1570 					goto done;
1571 				}
1572 				break;
1573 			case AID_VPN_IPv4:
1574 				if (nlri_get_vpn4(&unreachbuf,
1575 				    &prefix, &prefixlen, 1) == -1) {
1576 					log_peer_warnx(&peer->conf,
1577 					    "bad VPNv4 withdraw prefix");
1578 					rde_update_err(peer, ERR_UPDATE,
1579 					    ERR_UPD_OPTATTR, &unreachbuf);
1580 					goto done;
1581 				}
1582 				break;
1583 			case AID_VPN_IPv6:
1584 				if (nlri_get_vpn6(&unreachbuf,
1585 				    &prefix, &prefixlen, 1) == -1) {
1586 					log_peer_warnx(&peer->conf,
1587 					    "bad VPNv6 withdraw prefix");
1588 					rde_update_err(peer, ERR_UPDATE,
1589 					    ERR_UPD_OPTATTR, &unreachbuf);
1590 					goto done;
1591 				}
1592 				break;
1593 			case AID_FLOWSPECv4:
1594 			case AID_FLOWSPECv6:
1595 				/* ignore flowspec for now */
1596 			default:
1597 				/* ignore unsupported multiprotocol AF */
1598 				if (ibuf_skip(&unreachbuf,
1599 				    ibuf_size(&unreachbuf)) == -1) {
1600 					log_peer_warnx(&peer->conf,
1601 					    "bad withdraw prefix");
1602 					rde_update_err(peer, ERR_UPDATE,
1603 					    ERR_UPD_OPTATTR, &unreachbuf);
1604 					goto done;
1605 				}
1606 				continue;
1607 			}
1608 
1609 			rde_update_withdraw(peer, pathid, &prefix, prefixlen);
1610 		}
1611 
1612 		if ((state.aspath.flags & ~F_ATTR_MP_UNREACH) == 0)
1613 			goto done;
1614 	}
1615 
1616 	/* parse nlri prefix */
1617 	if (ibuf_size(&nlribuf) > 0) {
1618 		if (peer->capa.mp[AID_INET] == 0) {
1619 			log_peer_warnx(&peer->conf,
1620 			    "bad update, %s disabled", aid2str(AID_INET));
1621 			rde_update_err(peer, ERR_UPDATE, ERR_UPD_NETWORK,
1622 			    NULL);
1623 			goto done;
1624 		}
1625 
1626 		/* inject open policy OTC attribute if needed */
1627 		if ((state.aspath.flags & F_ATTR_OTC) == 0) {
1628 			uint32_t tmp;
1629 			switch (peer->role) {
1630 			case ROLE_CUSTOMER:
1631 			case ROLE_RS_CLIENT:
1632 			case ROLE_PEER:
1633 				tmp = htonl(peer->conf.remote_as);
1634 				if (attr_optadd(&state.aspath,
1635 				    ATTR_OPTIONAL|ATTR_TRANSITIVE, ATTR_OTC,
1636 				    &tmp, sizeof(tmp)) == -1) {
1637 					rde_update_err(peer, ERR_UPDATE,
1638 					    ERR_UPD_ATTRLIST, NULL);
1639 					goto done;
1640 				}
1641 				state.aspath.flags |= F_ATTR_OTC;
1642 				break;
1643 			default:
1644 				break;
1645 			}
1646 		}
1647 	}
1648 	while (ibuf_size(&nlribuf) > 0) {
1649 		if (peer_has_add_path(peer, AID_INET, CAPA_AP_RECV)) {
1650 			if (ibuf_get_n32(&nlribuf, &pathid) == -1) {
1651 				log_peer_warnx(&peer->conf,
1652 				    "bad nlri prefix");
1653 				rde_update_err(peer, ERR_UPDATE,
1654 				    ERR_UPD_NETWORK, NULL);
1655 				goto done;
1656 			}
1657 		} else
1658 			pathid = 0;
1659 
1660 		if (nlri_get_prefix(&nlribuf, &prefix, &prefixlen) == -1) {
1661 			log_peer_warnx(&peer->conf, "bad nlri prefix");
1662 			rde_update_err(peer, ERR_UPDATE, ERR_UPD_NETWORK,
1663 			    NULL);
1664 			goto done;
1665 		}
1666 
1667 		if (rde_update_update(peer, pathid, &state,
1668 		    &prefix, prefixlen) == -1)
1669 			goto done;
1670 	}
1671 
1672 	/* add MP_REACH_NLRI if available */
1673 	if (ibuf_size(&reachbuf) != 0) {
1674 		if (ibuf_get_n16(&reachbuf, &afi) == -1 ||
1675 		    ibuf_get_n8(&reachbuf, &safi) == -1 ||
1676 		    afi2aid(afi, safi, &aid) == -1) {
1677 			log_peer_warnx(&peer->conf,
1678 			    "bad AFI/SAFI pair in update");
1679 			rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR,
1680 			    &reachbuf);
1681 			goto done;
1682 		}
1683 
1684 		if (peer->capa.mp[aid] == 0) {
1685 			log_peer_warnx(&peer->conf,
1686 			    "bad update, %s disabled", aid2str(aid));
1687 			rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR,
1688 			    &reachbuf);
1689 			goto done;
1690 		}
1691 
1692 		if (aid == AID_INET6) {
1693 			/* inject open policy OTC attribute if needed */
1694 			if ((state.aspath.flags & F_ATTR_OTC) == 0) {
1695 				uint32_t tmp;
1696 				switch (peer->role) {
1697 				case ROLE_CUSTOMER:
1698 				case ROLE_RS_CLIENT:
1699 				case ROLE_PEER:
1700 					tmp = htonl(peer->conf.remote_as);
1701 					if (attr_optadd(&state.aspath,
1702 					    ATTR_OPTIONAL|ATTR_TRANSITIVE,
1703 					    ATTR_OTC, &tmp,
1704 					    sizeof(tmp)) == -1) {
1705 						rde_update_err(peer, ERR_UPDATE,
1706 						    ERR_UPD_ATTRLIST, NULL);
1707 						goto done;
1708 					}
1709 					state.aspath.flags |= F_ATTR_OTC;
1710 					break;
1711 				default:
1712 					break;
1713 				}
1714 			}
1715 		} else {
1716 			/* Only IPv4 and IPv6 unicast do OTC handling */
1717 			state.aspath.flags &= ~F_ATTR_OTC_LEAK;
1718 		}
1719 
1720 		/* unlock the previously locked nexthop, it is no longer used */
1721 		nexthop_unref(state.nexthop);
1722 		state.nexthop = NULL;
1723 		if (rde_get_mp_nexthop(&reachbuf, aid, peer, &state) == -1) {
1724 			log_peer_warnx(&peer->conf, "bad nlri nexthop");
1725 			rde_update_err(peer, ERR_UPDATE, ERR_UPD_OPTATTR,
1726 			    &reachbuf);
1727 			goto done;
1728 		}
1729 
1730 		while (ibuf_size(&reachbuf) > 0) {
1731 			if (peer_has_add_path(peer, aid, CAPA_AP_RECV)) {
1732 				if (ibuf_get_n32(&reachbuf, &pathid) == -1) {
1733 					log_peer_warnx(&peer->conf,
1734 					    "bad %s nlri prefix", aid2str(aid));
1735 					rde_update_err(peer, ERR_UPDATE,
1736 					    ERR_UPD_OPTATTR, &reachbuf);
1737 					goto done;
1738 				}
1739 			} else
1740 				pathid = 0;
1741 
1742 			switch (aid) {
1743 			case AID_INET6:
1744 				if (nlri_get_prefix6(&reachbuf,
1745 				    &prefix, &prefixlen) == -1) {
1746 					log_peer_warnx(&peer->conf,
1747 					    "bad IPv6 nlri prefix");
1748 					rde_update_err(peer, ERR_UPDATE,
1749 					    ERR_UPD_OPTATTR, &reachbuf);
1750 					goto done;
1751 				}
1752 				break;
1753 			case AID_VPN_IPv4:
1754 				if (nlri_get_vpn4(&reachbuf,
1755 				    &prefix, &prefixlen, 0) == -1) {
1756 					log_peer_warnx(&peer->conf,
1757 					    "bad VPNv4 nlri prefix");
1758 					rde_update_err(peer, ERR_UPDATE,
1759 					    ERR_UPD_OPTATTR, &reachbuf);
1760 					goto done;
1761 				}
1762 				break;
1763 			case AID_VPN_IPv6:
1764 				if (nlri_get_vpn6(&reachbuf,
1765 				    &prefix, &prefixlen, 0) == -1) {
1766 					log_peer_warnx(&peer->conf,
1767 					    "bad VPNv6 nlri prefix");
1768 					rde_update_err(peer, ERR_UPDATE,
1769 					    ERR_UPD_OPTATTR, &reachbuf);
1770 					goto done;
1771 				}
1772 				break;
1773 			case AID_FLOWSPECv4:
1774 			case AID_FLOWSPECv6:
1775 				/* ignore flowspec for now */
1776 			default:
1777 				/* ignore unsupported multiprotocol AF */
1778 				if (ibuf_skip(&reachbuf,
1779 				    ibuf_size(&reachbuf)) == -1) {
1780 					log_peer_warnx(&peer->conf,
1781 					    "bad nlri prefix");
1782 					rde_update_err(peer, ERR_UPDATE,
1783 					    ERR_UPD_OPTATTR, &reachbuf);
1784 					goto done;
1785 				}
1786 				continue;
1787 			}
1788 
1789 			if (rde_update_update(peer, pathid, &state,
1790 			    &prefix, prefixlen) == -1)
1791 				goto done;
1792 		}
1793 	}
1794 
1795 done:
1796 	rde_filterstate_clean(&state);
1797 }
1798 
1799 /*
1800  * Check if path_id is already in use.
1801  */
1802 static int
1803 pathid_conflict(struct rib_entry *re, uint32_t pathid)
1804 {
1805 	struct prefix *p;
1806 
1807 	if (re == NULL)
1808 		return 0;
1809 
1810 	TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib)
1811 		if (p->path_id_tx == pathid)
1812 			return 1;
1813 	return 0;
1814 }
1815 
1816 /*
1817  * Assign a send side path_id to all paths.
1818  */
1819 static uint32_t
1820 pathid_assign(struct rde_peer *peer, uint32_t path_id,
1821     struct bgpd_addr *prefix, uint8_t prefixlen)
1822 {
1823 	struct rib_entry *re;
1824 	uint32_t path_id_tx;
1825 
1826 	/* If peer has no add-path use the per peer path_id */
1827 	if (!peer_has_add_path(peer, prefix->aid, CAPA_AP_RECV))
1828 		return peer->path_id_tx;
1829 
1830 	/* peer uses add-path, therefore new path_ids need to be assigned */
1831 	re = rib_get_addr(rib_byid(RIB_ADJ_IN), prefix, prefixlen);
1832 	if (re != NULL) {
1833 		struct prefix *p;
1834 
1835 		p = prefix_bypeer(re, peer, path_id);
1836 		if (p != NULL)
1837 			return p->path_id_tx;
1838 	}
1839 
1840 	/*
1841 	 * Assign new local path_id, must be an odd number.
1842 	 * Even numbers are used by the per peer path_id_tx.
1843 	 */
1844 	do {
1845 		path_id_tx = arc4random() | 1;
1846 	} while (pathid_conflict(re, path_id_tx));
1847 
1848 	return path_id_tx;
1849 }
1850 
1851 int
1852 rde_update_update(struct rde_peer *peer, uint32_t path_id,
1853     struct filterstate *in, struct bgpd_addr *prefix, uint8_t prefixlen)
1854 {
1855 	struct filterstate	 state;
1856 	enum filter_actions	 action;
1857 	uint32_t		 path_id_tx;
1858 	uint16_t		 i;
1859 	uint8_t			 roa_state, aspa_state;
1860 	const char		*wmsg = "filtered, withdraw";
1861 
1862 	peer->stats.prefix_rcvd_update++;
1863 
1864 	roa_state = rde_roa_validity(&rde_roa, prefix, prefixlen,
1865 	    aspath_origin(in->aspath.aspath));
1866 	aspa_state = rde_aspa_validity(peer, &in->aspath, prefix->aid);
1867 	rde_filterstate_set_vstate(in, roa_state, aspa_state);
1868 
1869 	path_id_tx = pathid_assign(peer, path_id, prefix, prefixlen);
1870 	/* add original path to the Adj-RIB-In */
1871 	if (prefix_update(rib_byid(RIB_ADJ_IN), peer, path_id, path_id_tx,
1872 	    in, 0, prefix, prefixlen) == 1)
1873 		peer->stats.prefix_cnt++;
1874 
1875 	/* max prefix checker */
1876 	if (peer->conf.max_prefix &&
1877 	    peer->stats.prefix_cnt > peer->conf.max_prefix) {
1878 		log_peer_warnx(&peer->conf, "prefix limit reached (>%u/%u)",
1879 		    peer->stats.prefix_cnt, peer->conf.max_prefix);
1880 		rde_update_err(peer, ERR_CEASE, ERR_CEASE_MAX_PREFIX, NULL);
1881 		return (-1);
1882 	}
1883 
1884 	if (in->aspath.flags & F_ATTR_PARSE_ERR)
1885 		wmsg = "path invalid, withdraw";
1886 
1887 	for (i = RIB_LOC_START; i < rib_size; i++) {
1888 		struct rib *rib = rib_byid(i);
1889 		if (rib == NULL)
1890 			continue;
1891 		rde_filterstate_copy(&state, in);
1892 		/* input filter */
1893 		action = rde_filter(rib->in_rules, peer, peer, prefix,
1894 		    prefixlen, &state);
1895 
1896 		if (action == ACTION_ALLOW) {
1897 			rde_update_log("update", i, peer,
1898 			    &state.nexthop->exit_nexthop, prefix,
1899 			    prefixlen);
1900 			prefix_update(rib, peer, path_id, path_id_tx, &state,
1901 			    0, prefix, prefixlen);
1902 		} else if (conf->filtered_in_locrib && i == RIB_LOC_START) {
1903 			rde_update_log(wmsg, i, peer, NULL, prefix, prefixlen);
1904 			prefix_update(rib, peer, path_id, path_id_tx, &state,
1905 			    1, prefix, prefixlen);
1906 		} else {
1907 			if (prefix_withdraw(rib, peer, path_id, prefix,
1908 			    prefixlen))
1909 				rde_update_log(wmsg, i, peer,
1910 				    NULL, prefix, prefixlen);
1911 		}
1912 
1913 		rde_filterstate_clean(&state);
1914 	}
1915 	return (0);
1916 }
1917 
1918 void
1919 rde_update_withdraw(struct rde_peer *peer, uint32_t path_id,
1920     struct bgpd_addr *prefix, uint8_t prefixlen)
1921 {
1922 	uint16_t i;
1923 
1924 	for (i = RIB_LOC_START; i < rib_size; i++) {
1925 		struct rib *rib = rib_byid(i);
1926 		if (rib == NULL)
1927 			continue;
1928 		if (prefix_withdraw(rib, peer, path_id, prefix, prefixlen))
1929 			rde_update_log("withdraw", i, peer, NULL, prefix,
1930 			    prefixlen);
1931 	}
1932 
1933 	/* remove original path form the Adj-RIB-In */
1934 	if (prefix_withdraw(rib_byid(RIB_ADJ_IN), peer, path_id,
1935 	    prefix, prefixlen))
1936 		peer->stats.prefix_cnt--;
1937 
1938 	peer->stats.prefix_rcvd_withdraw++;
1939 }
1940 
1941 /*
1942  * BGP UPDATE parser functions
1943  */
1944 
1945 /* attribute parser specific macros */
1946 #define CHECK_FLAGS(s, t, m)	\
1947 	(((s) & ~(ATTR_DEFMASK | (m))) == (t))
1948 
1949 int
1950 rde_attr_parse(struct ibuf *buf, struct rde_peer *peer,
1951     struct filterstate *state, struct ibuf *reach, struct ibuf *unreach)
1952 {
1953 	struct bgpd_addr nexthop;
1954 	struct rde_aspath *a = &state->aspath;
1955 	struct ibuf	 attrbuf, tmpbuf, *npath = NULL;
1956 	size_t		 alen, hlen;
1957 	uint32_t	 tmp32, zero = 0;
1958 	int		 error;
1959 	uint8_t		 flags, type;
1960 
1961 	ibuf_from_ibuf(&attrbuf, buf);
1962 	if (ibuf_get_n8(&attrbuf, &flags) == -1 ||
1963 	    ibuf_get_n8(&attrbuf, &type) == -1)
1964 		goto bad_list;
1965 
1966 	if (flags & ATTR_EXTLEN) {
1967 		uint16_t attr_len;
1968 		if (ibuf_get_n16(&attrbuf, &attr_len) == -1)
1969 			goto bad_list;
1970 		alen = attr_len;
1971 		hlen = 4;
1972 	} else {
1973 		uint8_t attr_len;
1974 		if (ibuf_get_n8(&attrbuf, &attr_len) == -1)
1975 			goto bad_list;
1976 		alen = attr_len;
1977 		hlen = 3;
1978 	}
1979 
1980 	if (ibuf_truncate(&attrbuf, alen) == -1)
1981 		goto bad_list;
1982 	/* consume the attribute in buf before moving forward */
1983 	if (ibuf_skip(buf, hlen + alen) == -1)
1984 		goto bad_list;
1985 
1986 	switch (type) {
1987 	case ATTR_UNDEF:
1988 		/* ignore and drop path attributes with a type code of 0 */
1989 		break;
1990 	case ATTR_ORIGIN:
1991 		if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN, 0))
1992 			goto bad_flags;
1993 		if (ibuf_size(&attrbuf) != 1)
1994 			goto bad_len;
1995 		if (a->flags & F_ATTR_ORIGIN)
1996 			goto bad_list;
1997 		if (ibuf_get_n8(&attrbuf, &a->origin) == -1)
1998 			goto bad_len;
1999 		if (a->origin > ORIGIN_INCOMPLETE) {
2000 			/*
2001 			 * mark update as bad and withdraw all routes as per
2002 			 * RFC 7606
2003 			 */
2004 			a->flags |= F_ATTR_PARSE_ERR;
2005 			log_peer_warnx(&peer->conf, "bad ORIGIN %u, "
2006 			    "path invalidated and prefix withdrawn",
2007 			    a->origin);
2008 			return (-1);
2009 		}
2010 		a->flags |= F_ATTR_ORIGIN;
2011 		break;
2012 	case ATTR_ASPATH:
2013 		if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN, 0))
2014 			goto bad_flags;
2015 		if (a->flags & F_ATTR_ASPATH)
2016 			goto bad_list;
2017 		error = aspath_verify(&attrbuf, peer_has_as4byte(peer),
2018 		    peer_accept_no_as_set(peer));
2019 		if (error != 0 && error != AS_ERR_SOFT) {
2020 			log_peer_warnx(&peer->conf, "bad ASPATH, %s",
2021 			    log_aspath_error(error));
2022 			rde_update_err(peer, ERR_UPDATE, ERR_UPD_ASPATH,
2023 			    NULL);
2024 			return (-1);
2025 		}
2026 		if (peer_has_as4byte(peer)) {
2027 			ibuf_from_ibuf(&tmpbuf, &attrbuf);
2028 		} else {
2029 			if ((npath = aspath_inflate(&attrbuf)) == NULL)
2030 				fatal("aspath_inflate");
2031 			ibuf_from_ibuf(&tmpbuf, npath);
2032 		}
2033 		if (error == AS_ERR_SOFT) {
2034 			char *str;
2035 
2036 			/*
2037 			 * soft errors like unexpected segment types are
2038 			 * not considered fatal and the path is just
2039 			 * marked invalid.
2040 			 */
2041 			a->flags |= F_ATTR_PARSE_ERR;
2042 
2043 			aspath_asprint(&str, &tmpbuf);
2044 			log_peer_warnx(&peer->conf, "bad ASPATH %s, "
2045 			    "path invalidated and prefix withdrawn",
2046 			    str ? str : "(bad aspath)");
2047 			free(str);
2048 		}
2049 		a->flags |= F_ATTR_ASPATH;
2050 		a->aspath = aspath_get(ibuf_data(&tmpbuf), ibuf_size(&tmpbuf));
2051 		ibuf_free(npath);
2052 		break;
2053 	case ATTR_NEXTHOP:
2054 		if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN, 0))
2055 			goto bad_flags;
2056 		if (ibuf_size(&attrbuf) != 4)
2057 			goto bad_len;
2058 		if (a->flags & F_ATTR_NEXTHOP)
2059 			goto bad_list;
2060 		a->flags |= F_ATTR_NEXTHOP;
2061 
2062 		memset(&nexthop, 0, sizeof(nexthop));
2063 		nexthop.aid = AID_INET;
2064 		if (ibuf_get_h32(&attrbuf, &nexthop.v4.s_addr) == -1)
2065 			goto bad_len;
2066 		/*
2067 		 * Check if the nexthop is a valid IP address. We consider
2068 		 * multicast addresses as invalid.
2069 		 */
2070 		tmp32 = ntohl(nexthop.v4.s_addr);
2071 		if (IN_MULTICAST(tmp32)) {
2072 			rde_update_err(peer, ERR_UPDATE, ERR_UPD_NEXTHOP,
2073 			    &attrbuf);
2074 			return (-1);
2075 		}
2076 		nexthop_unref(state->nexthop);	/* just to be sure */
2077 		state->nexthop = nexthop_get(&nexthop);
2078 		break;
2079 	case ATTR_MED:
2080 		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL, 0))
2081 			goto bad_flags;
2082 		if (ibuf_size(&attrbuf) != 4)
2083 			goto bad_len;
2084 		if (a->flags & F_ATTR_MED)
2085 			goto bad_list;
2086 		if (ibuf_get_n32(&attrbuf, &a->med) == -1)
2087 			goto bad_len;
2088 		a->flags |= F_ATTR_MED;
2089 		break;
2090 	case ATTR_LOCALPREF:
2091 		if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN, 0))
2092 			goto bad_flags;
2093 		if (ibuf_size(&attrbuf) != 4)
2094 			goto bad_len;
2095 		if (peer->conf.ebgp) {
2096 			/* ignore local-pref attr on non ibgp peers */
2097 			break;
2098 		}
2099 		if (a->flags & F_ATTR_LOCALPREF)
2100 			goto bad_list;
2101 		if (ibuf_get_n32(&attrbuf, &a->lpref) == -1)
2102 			goto bad_len;
2103 		a->flags |= F_ATTR_LOCALPREF;
2104 		break;
2105 	case ATTR_ATOMIC_AGGREGATE:
2106 		if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN, 0))
2107 			goto bad_flags;
2108 		if (ibuf_size(&attrbuf) != 0)
2109 			goto bad_len;
2110 		goto optattr;
2111 	case ATTR_AGGREGATOR:
2112 		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL|ATTR_TRANSITIVE,
2113 		    ATTR_PARTIAL))
2114 			goto bad_flags;
2115 		if ((!peer_has_as4byte(peer) && ibuf_size(&attrbuf) != 6) ||
2116 		    (peer_has_as4byte(peer) && ibuf_size(&attrbuf) != 8)) {
2117 			/*
2118 			 * ignore attribute in case of error as per
2119 			 * RFC 7606
2120 			 */
2121 			log_peer_warnx(&peer->conf, "bad AGGREGATOR, "
2122 			    "attribute discarded");
2123 			break;
2124 		}
2125 		if (!peer_has_as4byte(peer)) {
2126 			/* need to inflate aggregator AS to 4-byte */
2127 			u_char	t[8];
2128 			t[0] = t[1] = 0;
2129 			if (ibuf_get(&attrbuf, &t[2], 6) == -1)
2130 				goto bad_list;
2131 			if (memcmp(t, &zero, sizeof(uint32_t)) == 0) {
2132 				/* As per RFC7606 use "attribute discard". */
2133 				log_peer_warnx(&peer->conf, "bad AGGREGATOR, "
2134 				    "AS 0 not allowed, attribute discarded");
2135 				break;
2136 			}
2137 			if (attr_optadd(a, flags, type, t, sizeof(t)) == -1)
2138 				goto bad_list;
2139 			break;
2140 		}
2141 		/* 4-byte ready server take the default route */
2142 		ibuf_from_ibuf(&tmpbuf, &attrbuf);
2143 		if (ibuf_get_n32(&tmpbuf, &tmp32) == -1)
2144 			goto bad_len;
2145 		if (tmp32 == 0) {
2146 			/* As per RFC7606 use "attribute discard" here. */
2147 			char *pfmt = log_fmt_peer(&peer->conf);
2148 			log_debug("%s: bad AGGREGATOR, "
2149 			    "AS 0 not allowed, attribute discarded", pfmt);
2150 			free(pfmt);
2151 			break;
2152 		}
2153 		goto optattr;
2154 	case ATTR_COMMUNITIES:
2155 		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL|ATTR_TRANSITIVE,
2156 		    ATTR_PARTIAL))
2157 			goto bad_flags;
2158 		if (community_add(&state->communities, flags,
2159 		    &attrbuf) == -1) {
2160 			/*
2161 			 * mark update as bad and withdraw all routes as per
2162 			 * RFC 7606
2163 			 */
2164 			a->flags |= F_ATTR_PARSE_ERR;
2165 			log_peer_warnx(&peer->conf, "bad COMMUNITIES, "
2166 			    "path invalidated and prefix withdrawn");
2167 		}
2168 		break;
2169 	case ATTR_LARGE_COMMUNITIES:
2170 		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL|ATTR_TRANSITIVE,
2171 		    ATTR_PARTIAL))
2172 			goto bad_flags;
2173 		if (community_large_add(&state->communities, flags,
2174 		    &attrbuf) == -1) {
2175 			/*
2176 			 * mark update as bad and withdraw all routes as per
2177 			 * RFC 7606
2178 			 */
2179 			a->flags |= F_ATTR_PARSE_ERR;
2180 			log_peer_warnx(&peer->conf, "bad LARGE COMMUNITIES, "
2181 			    "path invalidated and prefix withdrawn");
2182 		}
2183 		break;
2184 	case ATTR_EXT_COMMUNITIES:
2185 		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL|ATTR_TRANSITIVE,
2186 		    ATTR_PARTIAL))
2187 			goto bad_flags;
2188 		if (community_ext_add(&state->communities, flags,
2189 		    peer->conf.ebgp, &attrbuf) == -1) {
2190 			/*
2191 			 * mark update as bad and withdraw all routes as per
2192 			 * RFC 7606
2193 			 */
2194 			a->flags |= F_ATTR_PARSE_ERR;
2195 			log_peer_warnx(&peer->conf, "bad EXT_COMMUNITIES, "
2196 			    "path invalidated and prefix withdrawn");
2197 		}
2198 		break;
2199 	case ATTR_ORIGINATOR_ID:
2200 		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL, 0))
2201 			goto bad_flags;
2202 		if (ibuf_size(&attrbuf) != 4)
2203 			goto bad_len;
2204 		goto optattr;
2205 	case ATTR_CLUSTER_LIST:
2206 		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL, 0))
2207 			goto bad_flags;
2208 		if (peer->conf.ebgp) {
2209 			/* As per RFC7606 use "attribute discard" here. */
2210 			log_peer_warnx(&peer->conf, "bad CLUSTER_LIST, "
2211 			    "received from external peer, attribute discarded");
2212 			break;
2213 		}
2214 		if (ibuf_size(&attrbuf) % 4 != 0 || ibuf_size(&attrbuf) == 0) {
2215 			/*
2216 			 * mark update as bad and withdraw all routes as per
2217 			 * RFC 7606
2218 			 */
2219 			a->flags |= F_ATTR_PARSE_ERR;
2220 			log_peer_warnx(&peer->conf, "bad CLUSTER_LIST, "
2221 			    "path invalidated and prefix withdrawn");
2222 			break;
2223 		}
2224 		goto optattr;
2225 	case ATTR_MP_REACH_NLRI:
2226 		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL, 0))
2227 			goto bad_flags;
2228 		if (ibuf_size(&attrbuf) < 5)
2229 			goto bad_len;
2230 		/* the validity is checked in rde_update_dispatch() */
2231 		if (a->flags & F_ATTR_MP_REACH)
2232 			goto bad_list;
2233 		a->flags |= F_ATTR_MP_REACH;
2234 
2235 		*reach = attrbuf;
2236 		break;
2237 	case ATTR_MP_UNREACH_NLRI:
2238 		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL, 0))
2239 			goto bad_flags;
2240 		if (ibuf_size(&attrbuf) < 3)
2241 			goto bad_len;
2242 		/* the validity is checked in rde_update_dispatch() */
2243 		if (a->flags & F_ATTR_MP_UNREACH)
2244 			goto bad_list;
2245 		a->flags |= F_ATTR_MP_UNREACH;
2246 
2247 		*unreach = attrbuf;
2248 		break;
2249 	case ATTR_AS4_AGGREGATOR:
2250 		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL|ATTR_TRANSITIVE,
2251 		    ATTR_PARTIAL))
2252 			goto bad_flags;
2253 		if (ibuf_size(&attrbuf) != 8) {
2254 			/* see ATTR_AGGREGATOR ... */
2255 			log_peer_warnx(&peer->conf, "bad AS4_AGGREGATOR, "
2256 			    "attribute discarded");
2257 			break;
2258 		}
2259 		ibuf_from_ibuf(&tmpbuf, &attrbuf);
2260 		if (ibuf_get_n32(&tmpbuf, &tmp32) == -1)
2261 			goto bad_len;
2262 		if (tmp32 == 0) {
2263 			/* As per RFC6793 use "attribute discard" here. */
2264 			log_peer_warnx(&peer->conf, "bad AS4_AGGREGATOR, "
2265 			    "AS 0 not allowed, attribute discarded");
2266 			break;
2267 		}
2268 		a->flags |= F_ATTR_AS4BYTE_NEW;
2269 		goto optattr;
2270 	case ATTR_AS4_PATH:
2271 		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL|ATTR_TRANSITIVE,
2272 		    ATTR_PARTIAL))
2273 			goto bad_flags;
2274 		if ((error = aspath_verify(&attrbuf, 1,
2275 		    peer_accept_no_as_set(peer))) != 0) {
2276 			/* As per RFC6793 use "attribute discard" here. */
2277 			log_peer_warnx(&peer->conf, "bad AS4_PATH, "
2278 			    "attribute discarded");
2279 			break;
2280 		}
2281 		a->flags |= F_ATTR_AS4BYTE_NEW;
2282 		goto optattr;
2283 	case ATTR_OTC:
2284 		if (!CHECK_FLAGS(flags, ATTR_OPTIONAL|ATTR_TRANSITIVE,
2285 		    ATTR_PARTIAL))
2286 			goto bad_flags;
2287 		if (ibuf_size(&attrbuf) != 4) {
2288 			/* treat-as-withdraw */
2289 			a->flags |= F_ATTR_PARSE_ERR;
2290 			log_peer_warnx(&peer->conf, "bad OTC, "
2291 			    "path invalidated and prefix withdrawn");
2292 			break;
2293 		}
2294 		switch (peer->role) {
2295 		case ROLE_PROVIDER:
2296 		case ROLE_RS:
2297 			a->flags |= F_ATTR_OTC_LEAK;
2298 			break;
2299 		case ROLE_PEER:
2300 			if (ibuf_get_n32(&attrbuf, &tmp32) == -1)
2301 				goto bad_len;
2302 			if (tmp32 != peer->conf.remote_as)
2303 				a->flags |= F_ATTR_OTC_LEAK;
2304 			break;
2305 		default:
2306 			break;
2307 		}
2308 		a->flags |= F_ATTR_OTC;
2309 		goto optattr;
2310 	default:
2311 		if ((flags & ATTR_OPTIONAL) == 0) {
2312 			rde_update_err(peer, ERR_UPDATE, ERR_UPD_UNKNWN_WK_ATTR,
2313 			    &attrbuf);
2314 			return (-1);
2315 		}
2316  optattr:
2317 		if (attr_optadd(a, flags, type, ibuf_data(&attrbuf),
2318 		    ibuf_size(&attrbuf)) == -1)
2319 			goto bad_list;
2320 		break;
2321 	}
2322 
2323 	return (0);
2324 
2325  bad_len:
2326 	rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRLEN, &attrbuf);
2327 	return (-1);
2328  bad_flags:
2329 	rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRFLAGS, &attrbuf);
2330 	return (-1);
2331  bad_list:
2332 	rde_update_err(peer, ERR_UPDATE, ERR_UPD_ATTRLIST, NULL);
2333 	return (-1);
2334 }
2335 
2336 #undef CHECK_FLAGS
2337 
2338 int
2339 rde_attr_add(struct filterstate *state, struct ibuf *buf)
2340 {
2341 	uint16_t	 attr_len;
2342 	uint8_t		 flags;
2343 	uint8_t		 type;
2344 	uint8_t		 tmp8;
2345 
2346 	if (ibuf_get_n8(buf, &flags) == -1 ||
2347 	    ibuf_get_n8(buf, &type) == -1)
2348 		return (-1);
2349 
2350 	if (flags & ATTR_EXTLEN) {
2351 		if (ibuf_get_n16(buf, &attr_len) == -1)
2352 			return (-1);
2353 	} else {
2354 		if (ibuf_get_n8(buf, &tmp8) == -1)
2355 			return (-1);
2356 		attr_len = tmp8;
2357 	}
2358 
2359 	if (ibuf_size(buf) != attr_len)
2360 		return (-1);
2361 
2362 	switch (type) {
2363 	case ATTR_COMMUNITIES:
2364 		return community_add(&state->communities, flags, buf);
2365 	case ATTR_LARGE_COMMUNITIES:
2366 		return community_large_add(&state->communities, flags, buf);
2367 	case ATTR_EXT_COMMUNITIES:
2368 		return community_ext_add(&state->communities, flags, 0, buf);
2369 	}
2370 
2371 	if (attr_optadd(&state->aspath, flags, type, ibuf_data(buf),
2372 	    attr_len) == -1)
2373 		return (-1);
2374 	return (0);
2375 }
2376 
2377 uint8_t
2378 rde_attr_missing(struct rde_aspath *a, int ebgp, uint16_t nlrilen)
2379 {
2380 	/* ATTR_MP_UNREACH_NLRI may be sent alone */
2381 	if (nlrilen == 0 && a->flags & F_ATTR_MP_UNREACH &&
2382 	    (a->flags & F_ATTR_MP_REACH) == 0)
2383 		return (0);
2384 
2385 	if ((a->flags & F_ATTR_ORIGIN) == 0)
2386 		return (ATTR_ORIGIN);
2387 	if ((a->flags & F_ATTR_ASPATH) == 0)
2388 		return (ATTR_ASPATH);
2389 	if ((a->flags & F_ATTR_MP_REACH) == 0 &&
2390 	    (a->flags & F_ATTR_NEXTHOP) == 0)
2391 		return (ATTR_NEXTHOP);
2392 	if (!ebgp)
2393 		if ((a->flags & F_ATTR_LOCALPREF) == 0)
2394 			return (ATTR_LOCALPREF);
2395 	return (0);
2396 }
2397 
2398 int
2399 rde_get_mp_nexthop(struct ibuf *buf, uint8_t aid,
2400     struct rde_peer *peer, struct filterstate *state)
2401 {
2402 	struct bgpd_addr	nexthop;
2403 	struct ibuf		nhbuf;
2404 	uint8_t			nhlen;
2405 
2406 	if (ibuf_get_n8(buf, &nhlen) == -1)
2407 		return (-1);
2408 	if (ibuf_get_ibuf(buf, nhlen, &nhbuf) == -1)
2409 		return (-1);
2410 	/* ignore reserved (old SNPA) field as per RFC4760 */
2411 	if (ibuf_skip(buf, 1) == -1)
2412 		return (-1);
2413 
2414 	memset(&nexthop, 0, sizeof(nexthop));
2415 	switch (aid) {
2416 	case AID_INET6:
2417 		/*
2418 		 * RFC2545 describes that there may be a link-local
2419 		 * address carried in nexthop. Yikes!
2420 		 * This is not only silly, it is wrong and we just ignore
2421 		 * this link-local nexthop. The bgpd session doesn't run
2422 		 * over the link-local address so why should all other
2423 		 * traffic.
2424 		 */
2425 		if (nhlen != 16 && nhlen != 32) {
2426 			log_peer_warnx(&peer->conf, "bad %s nexthop, "
2427 			    "bad size %d", aid2str(aid), nhlen);
2428 			return (-1);
2429 		}
2430 		if (ibuf_get(&nhbuf, &nexthop.v6, sizeof(nexthop.v6)) == -1)
2431 			return (-1);
2432 		nexthop.aid = AID_INET6;
2433 		if (IN6_IS_ADDR_LINKLOCAL(&nexthop.v6)) {
2434 			if (peer->local_if_scope != 0) {
2435 				nexthop.scope_id = peer->local_if_scope;
2436 			} else {
2437 				log_peer_warnx(&peer->conf,
2438 				    "unexpected link-local nexthop: %s",
2439 				    log_addr(&nexthop));
2440 				return (-1);
2441 			}
2442 		}
2443 		break;
2444 	case AID_VPN_IPv4:
2445 		/*
2446 		 * Neither RFC4364 nor RFC3107 specify the format of the
2447 		 * nexthop in an explicit way. The quality of RFC went down
2448 		 * the toilet the larger the number got.
2449 		 * RFC4364 is very confusing about VPN-IPv4 address and the
2450 		 * VPN-IPv4 prefix that carries also a MPLS label.
2451 		 * So the nexthop is a 12-byte address with a 64bit RD and
2452 		 * an IPv4 address following. In the nexthop case the RD can
2453 		 * be ignored.
2454 		 * Since the nexthop has to be in the main IPv4 table just
2455 		 * create an AID_INET nexthop. So we don't need to handle
2456 		 * AID_VPN_IPv4 in nexthop and kroute.
2457 		 */
2458 		if (nhlen != 12) {
2459 			log_peer_warnx(&peer->conf, "bad %s nexthop, "
2460 			    "bad size %d", aid2str(aid), nhlen);
2461 			return (-1);
2462 		}
2463 		if (ibuf_skip(&nhbuf, sizeof(uint64_t)) == -1 ||
2464 		    ibuf_get(&nhbuf, &nexthop.v4, sizeof(nexthop.v4)) == -1)
2465 			return (-1);
2466 		nexthop.aid = AID_INET;
2467 		break;
2468 	case AID_VPN_IPv6:
2469 		if (nhlen != 24) {
2470 			log_peer_warnx(&peer->conf, "bad %s nexthop, "
2471 			    "bad size %d", aid2str(aid), nhlen);
2472 			return (-1);
2473 		}
2474 		if (ibuf_skip(&nhbuf, sizeof(uint64_t)) == -1 ||
2475 		    ibuf_get(&nhbuf, &nexthop.v6, sizeof(nexthop.v6)) == -1)
2476 			return (-1);
2477 		nexthop.aid = AID_INET6;
2478 		if (IN6_IS_ADDR_LINKLOCAL(&nexthop.v6)) {
2479 			if (peer->local_if_scope != 0) {
2480 				nexthop.scope_id = peer->local_if_scope;
2481 			} else {
2482 				log_peer_warnx(&peer->conf,
2483 				    "unexpected link-local nexthop: %s",
2484 				    log_addr(&nexthop));
2485 				return (-1);
2486 			}
2487 		}
2488 		break;
2489 	case AID_FLOWSPECv4:
2490 	case AID_FLOWSPECv6:
2491 		/* nexthop must be 0 and ignored for flowspec */
2492 		if (nhlen != 0) {
2493 			log_peer_warnx(&peer->conf, "bad %s nexthop, "
2494 			    "bad size %d", aid2str(aid), nhlen);
2495 			return (-1);
2496 		}
2497 		return (0);
2498 	default:
2499 		log_peer_warnx(&peer->conf, "bad multiprotocol nexthop, "
2500 		    "bad AID");
2501 		return (-1);
2502 	}
2503 
2504 	state->nexthop = nexthop_get(&nexthop);
2505 
2506 	return (0);
2507 }
2508 
2509 void
2510 rde_update_err(struct rde_peer *peer, uint8_t error, uint8_t suberr,
2511     struct ibuf *opt)
2512 {
2513 	struct ibuf *wbuf;
2514 	size_t size = 0;
2515 
2516 	if (opt != NULL) {
2517 		ibuf_rewind(opt);
2518 		size = ibuf_size(opt);
2519 	}
2520 	if ((wbuf = imsg_create(ibuf_se, IMSG_UPDATE_ERR, peer->conf.id, 0,
2521 	    size + sizeof(error) + sizeof(suberr))) == NULL)
2522 		fatal("%s %d imsg_create error", __func__, __LINE__);
2523 	if (imsg_add(wbuf, &error, sizeof(error)) == -1 ||
2524 	    imsg_add(wbuf, &suberr, sizeof(suberr)) == -1)
2525 		fatal("%s %d imsg_add error", __func__, __LINE__);
2526 	if (opt != NULL)
2527 		if (ibuf_add_ibuf(wbuf, opt) == -1)
2528 			fatal("%s %d ibuf_add_ibuf error", __func__, __LINE__);
2529 	imsg_close(ibuf_se, wbuf);
2530 	peer->state = PEER_ERR;
2531 }
2532 
2533 void
2534 rde_update_log(const char *message, uint16_t rid,
2535     const struct rde_peer *peer, const struct bgpd_addr *next,
2536     const struct bgpd_addr *prefix, uint8_t prefixlen)
2537 {
2538 	char		*l = NULL;
2539 	char		*n = NULL;
2540 	char		*p = NULL;
2541 
2542 	if (!((conf->log & BGPD_LOG_UPDATES) ||
2543 	    (peer->flags & PEERFLAG_LOG_UPDATES)))
2544 		return;
2545 
2546 	if (next != NULL)
2547 		if (asprintf(&n, " via %s", log_addr(next)) == -1)
2548 			n = NULL;
2549 	if (asprintf(&p, "%s/%u", log_addr(prefix), prefixlen) == -1)
2550 		p = NULL;
2551 	l = log_fmt_peer(&peer->conf);
2552 	log_info("Rib %s: %s AS%s: %s %s%s", rib_byid(rid)->name,
2553 	    l, log_as(peer->conf.remote_as), message,
2554 	    p ? p : "out of memory", n ? n : "");
2555 
2556 	free(l);
2557 	free(n);
2558 	free(p);
2559 }
2560 
2561 /*
2562  * 4-Byte ASN helper function.
2563  * Two scenarios need to be considered:
2564  * - NEW session with NEW attributes present -> just remove the attributes
2565  * - OLD session with NEW attributes present -> try to merge them
2566  */
2567 void
2568 rde_as4byte_fixup(struct rde_peer *peer, struct rde_aspath *a)
2569 {
2570 	struct attr	*nasp, *naggr, *oaggr;
2571 	uint32_t	 as;
2572 
2573 	/*
2574 	 * if either ATTR_AS4_AGGREGATOR or ATTR_AS4_PATH is present
2575 	 * try to fixup the attributes.
2576 	 * Do not fixup if F_ATTR_PARSE_ERR is set.
2577 	 */
2578 	if (!(a->flags & F_ATTR_AS4BYTE_NEW) || a->flags & F_ATTR_PARSE_ERR)
2579 		return;
2580 
2581 	/* first get the attributes */
2582 	nasp = attr_optget(a, ATTR_AS4_PATH);
2583 	naggr = attr_optget(a, ATTR_AS4_AGGREGATOR);
2584 
2585 	if (peer_has_as4byte(peer)) {
2586 		/* NEW session using 4-byte ASNs */
2587 		if (nasp) {
2588 			log_peer_warnx(&peer->conf, "uses 4-byte ASN "
2589 			    "but sent AS4_PATH attribute.");
2590 			attr_free(a, nasp);
2591 		}
2592 		if (naggr) {
2593 			log_peer_warnx(&peer->conf, "uses 4-byte ASN "
2594 			    "but sent AS4_AGGREGATOR attribute.");
2595 			attr_free(a, naggr);
2596 		}
2597 		return;
2598 	}
2599 	/* OLD session using 2-byte ASNs */
2600 	/* try to merge the new attributes into the old ones */
2601 	if ((oaggr = attr_optget(a, ATTR_AGGREGATOR))) {
2602 		memcpy(&as, oaggr->data, sizeof(as));
2603 		if (ntohl(as) != AS_TRANS) {
2604 			/* per RFC ignore AS4_PATH and AS4_AGGREGATOR */
2605 			if (nasp)
2606 				attr_free(a, nasp);
2607 			if (naggr)
2608 				attr_free(a, naggr);
2609 			return;
2610 		}
2611 		if (naggr) {
2612 			/* switch over to new AGGREGATOR */
2613 			attr_free(a, oaggr);
2614 			if (attr_optadd(a, ATTR_OPTIONAL | ATTR_TRANSITIVE,
2615 			    ATTR_AGGREGATOR, naggr->data, naggr->len))
2616 				fatalx("attr_optadd failed but impossible");
2617 		}
2618 	}
2619 	/* there is no need for AS4_AGGREGATOR any more */
2620 	if (naggr)
2621 		attr_free(a, naggr);
2622 
2623 	/* merge AS4_PATH with ASPATH */
2624 	if (nasp)
2625 		aspath_merge(a, nasp);
2626 }
2627 
2628 
2629 uint8_t
2630 rde_aspa_validity(struct rde_peer *peer, struct rde_aspath *asp, uint8_t aid)
2631 {
2632 	if (!peer->conf.ebgp)	/* ASPA is only performed on ebgp sessions */
2633 		return ASPA_NEVER_KNOWN;
2634 	if (aid != AID_INET && aid != AID_INET6) /* skip uncovered aids */
2635 		return ASPA_NEVER_KNOWN;
2636 
2637 #ifdef MAYBE
2638 	/*
2639 	 * By default enforce neighbor-as is set for all ebgp sessions.
2640 	 * So if a admin disables this check should we really "reenable"
2641 	 * it here in such a dubious way?
2642 	 * This just fails the ASPA validation for these paths so maybe
2643 	 * this can be helpful. But it is not transparent to the admin.
2644 	 */
2645 
2646 	/* skip neighbor-as check for transparent RS sessions */
2647 	if (peer->role != ROLE_RS_CLIENT &&
2648 	    peer->conf.enforce_as != ENFORCE_AS_ON) {
2649 		uint32_t fas;
2650 
2651 		fas = aspath_neighbor(asp->aspath);
2652 		if (peer->conf.remote_as != fas)
2653 			return ASPA_INVALID;
2654 	}
2655 #endif
2656 
2657 	/* if no role is set, the outcome is unknown */
2658 	if (peer->role == ROLE_NONE)
2659 		return ASPA_UNKNOWN;
2660 
2661 	if (peer->role == ROLE_CUSTOMER)
2662 		return asp->aspa_state.downup;
2663 	else
2664 		return asp->aspa_state.onlyup;
2665 }
2666 
2667 /*
2668  * route reflector helper function
2669  */
2670 void
2671 rde_reflector(struct rde_peer *peer, struct rde_aspath *asp)
2672 {
2673 	struct attr	*a;
2674 	uint8_t		*p;
2675 	uint16_t	 len;
2676 	uint32_t	 id;
2677 
2678 	/* do not consider updates with parse errors */
2679 	if (asp->flags & F_ATTR_PARSE_ERR)
2680 		return;
2681 
2682 	/* check for originator id if eq router_id drop */
2683 	if ((a = attr_optget(asp, ATTR_ORIGINATOR_ID)) != NULL) {
2684 		id = htonl(conf->bgpid);
2685 		if (memcmp(&id, a->data, sizeof(id)) == 0) {
2686 			/* this is coming from myself */
2687 			asp->flags |= F_ATTR_LOOP;
2688 			return;
2689 		}
2690 	} else if (conf->flags & BGPD_FLAG_REFLECTOR) {
2691 		if (peer->conf.ebgp)
2692 			id = htonl(conf->bgpid);
2693 		else
2694 			id = htonl(peer->remote_bgpid);
2695 		if (attr_optadd(asp, ATTR_OPTIONAL, ATTR_ORIGINATOR_ID,
2696 		    &id, sizeof(id)) == -1)
2697 			fatalx("attr_optadd failed but impossible");
2698 	}
2699 
2700 	/* check for own id in the cluster list */
2701 	if (conf->flags & BGPD_FLAG_REFLECTOR) {
2702 		id = htonl(conf->clusterid);
2703 		if ((a = attr_optget(asp, ATTR_CLUSTER_LIST)) != NULL) {
2704 			for (len = 0; len < a->len; len += sizeof(id))
2705 				/* check if coming from my cluster */
2706 				if (memcmp(&id, a->data + len,
2707 				    sizeof(id)) == 0) {
2708 					asp->flags |= F_ATTR_LOOP;
2709 					return;
2710 				}
2711 
2712 			/* prepend own clusterid by replacing attribute */
2713 			len = a->len + sizeof(id);
2714 			if (len < a->len)
2715 				fatalx("rde_reflector: cluster-list overflow");
2716 			if ((p = malloc(len)) == NULL)
2717 				fatal("rde_reflector");
2718 			memcpy(p, &id, sizeof(id));
2719 			memcpy(p + sizeof(id), a->data, a->len);
2720 			attr_free(asp, a);
2721 			if (attr_optadd(asp, ATTR_OPTIONAL, ATTR_CLUSTER_LIST,
2722 			    p, len) == -1)
2723 				fatalx("attr_optadd failed but impossible");
2724 			free(p);
2725 		} else if (attr_optadd(asp, ATTR_OPTIONAL, ATTR_CLUSTER_LIST,
2726 		    &id, sizeof(id)) == -1)
2727 			fatalx("attr_optadd failed but impossible");
2728 	}
2729 }
2730 
2731 /*
2732  * control specific functions
2733  */
2734 static void
2735 rde_dump_rib_as(struct prefix *p, struct rde_aspath *asp, pid_t pid, int flags,
2736     int adjout)
2737 {
2738 	struct ctl_show_rib	 rib;
2739 	struct ibuf		*wbuf;
2740 	struct attr		*a;
2741 	struct nexthop		*nexthop;
2742 	struct rib_entry	*re;
2743 	struct prefix		*xp;
2744 	struct rde_peer		*peer;
2745 	time_t			 staletime;
2746 	size_t			 aslen;
2747 	uint8_t			 l;
2748 
2749 	nexthop = prefix_nexthop(p);
2750 	peer = prefix_peer(p);
2751 	memset(&rib, 0, sizeof(rib));
2752 	rib.age = getmonotime() - p->lastchange;
2753 	rib.local_pref = asp->lpref;
2754 	rib.med = asp->med;
2755 	rib.weight = asp->weight;
2756 	strlcpy(rib.descr, peer->conf.descr, sizeof(rib.descr));
2757 	memcpy(&rib.remote_addr, &peer->remote_addr,
2758 	    sizeof(rib.remote_addr));
2759 	rib.remote_id = peer->remote_bgpid;
2760 	if (nexthop != NULL) {
2761 		rib.exit_nexthop = nexthop->exit_nexthop;
2762 		rib.true_nexthop = nexthop->true_nexthop;
2763 	} else {
2764 		/* announced network can have a NULL nexthop */
2765 		rib.exit_nexthop.aid = p->pt->aid;
2766 		rib.true_nexthop.aid = p->pt->aid;
2767 	}
2768 	pt_getaddr(p->pt, &rib.prefix);
2769 	rib.prefixlen = p->pt->prefixlen;
2770 	rib.origin = asp->origin;
2771 	rib.roa_validation_state = prefix_roa_vstate(p);
2772 	rib.aspa_validation_state = prefix_aspa_vstate(p);
2773 	rib.dmetric = p->dmetric;
2774 	rib.flags = 0;
2775 	if (!adjout && prefix_eligible(p)) {
2776 		re = prefix_re(p);
2777 		TAILQ_FOREACH(xp, &re->prefix_h, entry.list.rib) {
2778 			switch (xp->dmetric) {
2779 			case PREFIX_DMETRIC_BEST:
2780 				if (xp == p)
2781 					rib.flags |= F_PREF_BEST;
2782 				break;
2783 			case PREFIX_DMETRIC_ECMP:
2784 				if (xp == p)
2785 					rib.flags |= F_PREF_ECMP;
2786 				break;
2787 			case PREFIX_DMETRIC_AS_WIDE:
2788 				if (xp == p)
2789 					rib.flags |= F_PREF_AS_WIDE;
2790 				break;
2791 			default:
2792 				xp = NULL;	/* stop loop */
2793 				break;
2794 			}
2795 			if (xp == NULL || xp == p)
2796 				break;
2797 		}
2798 	}
2799 	if (!peer->conf.ebgp)
2800 		rib.flags |= F_PREF_INTERNAL;
2801 	if (asp->flags & F_PREFIX_ANNOUNCED)
2802 		rib.flags |= F_PREF_ANNOUNCE;
2803 	if (prefix_eligible(p))
2804 		rib.flags |= F_PREF_ELIGIBLE;
2805 	if (prefix_filtered(p))
2806 		rib.flags |= F_PREF_FILTERED;
2807 	/* otc loop includes parse err so skip the latter if the first is set */
2808 	if (asp->flags & F_ATTR_OTC_LEAK)
2809 		rib.flags |= F_PREF_OTC_LEAK;
2810 	else if (asp->flags & F_ATTR_PARSE_ERR)
2811 		rib.flags |= F_PREF_INVALID;
2812 	staletime = peer->staletime[p->pt->aid];
2813 	if (staletime && p->lastchange <= staletime)
2814 		rib.flags |= F_PREF_STALE;
2815 	if (!adjout) {
2816 		if (peer_has_add_path(peer, p->pt->aid, CAPA_AP_RECV)) {
2817 			rib.path_id = p->path_id;
2818 			rib.flags |= F_PREF_PATH_ID;
2819 		}
2820 	} else {
2821 		if (peer_has_add_path(peer, p->pt->aid, CAPA_AP_SEND)) {
2822 			rib.path_id = p->path_id_tx;
2823 			rib.flags |= F_PREF_PATH_ID;
2824 		}
2825 	}
2826 	aslen = aspath_length(asp->aspath);
2827 
2828 	if ((wbuf = imsg_create(ibuf_se_ctl, IMSG_CTL_SHOW_RIB, 0, pid,
2829 	    sizeof(rib) + aslen)) == NULL)
2830 		return;
2831 	if (imsg_add(wbuf, &rib, sizeof(rib)) == -1 ||
2832 	    imsg_add(wbuf, aspath_dump(asp->aspath), aslen) == -1)
2833 		return;
2834 	imsg_close(ibuf_se_ctl, wbuf);
2835 
2836 	if (flags & F_CTL_DETAIL) {
2837 		struct rde_community *comm = prefix_communities(p);
2838 		size_t len = comm->nentries * sizeof(struct community);
2839 		if (comm->nentries > 0) {
2840 			if (imsg_compose(ibuf_se_ctl,
2841 			    IMSG_CTL_SHOW_RIB_COMMUNITIES, 0, pid, -1,
2842 			    comm->communities, len) == -1)
2843 				return;
2844 		}
2845 		for (l = 0; l < asp->others_len; l++) {
2846 			if ((a = asp->others[l]) == NULL)
2847 				break;
2848 			if ((wbuf = imsg_create(ibuf_se_ctl,
2849 			    IMSG_CTL_SHOW_RIB_ATTR, 0, pid, 0)) == NULL)
2850 				return;
2851 			if (attr_writebuf(wbuf, a->flags, a->type, a->data,
2852 			    a->len) == -1) {
2853 				ibuf_free(wbuf);
2854 				return;
2855 			}
2856 			imsg_close(ibuf_se_ctl, wbuf);
2857 		}
2858 	}
2859 }
2860 
2861 int
2862 rde_match_peer(struct rde_peer *p, struct ctl_neighbor *n)
2863 {
2864 	char *s;
2865 
2866 	if (n && n->addr.aid) {
2867 		if (memcmp(&p->conf.remote_addr, &n->addr,
2868 		    sizeof(p->conf.remote_addr)))
2869 			return 0;
2870 	} else if (n && n->descr[0]) {
2871 		s = n->is_group ? p->conf.group : p->conf.descr;
2872 		/* cannot trust n->descr to be properly terminated */
2873 		if (strncmp(s, n->descr, sizeof(n->descr)))
2874 			return 0;
2875 	}
2876 	return 1;
2877 }
2878 
2879 static void
2880 rde_dump_filter(struct prefix *p, struct ctl_show_rib_request *req, int adjout)
2881 {
2882 	struct rde_aspath	*asp;
2883 
2884 	if (!rde_match_peer(prefix_peer(p), &req->neighbor))
2885 		return;
2886 
2887 	asp = prefix_aspath(p);
2888 	if ((req->flags & F_CTL_BEST) && p->dmetric != PREFIX_DMETRIC_BEST)
2889 		return;
2890 	if ((req->flags & F_CTL_INVALID) &&
2891 	    (asp->flags & F_ATTR_PARSE_ERR) == 0)
2892 		return;
2893 	if ((req->flags & F_CTL_FILTERED) && !prefix_filtered(p))
2894 		return;
2895 	if ((req->flags & F_CTL_INELIGIBLE) && prefix_eligible(p))
2896 		return;
2897 	if ((req->flags & F_CTL_LEAKED) &&
2898 	    (asp->flags & F_ATTR_OTC_LEAK) == 0)
2899 		return;
2900 	if ((req->flags & F_CTL_HAS_PATHID)) {
2901 		/* Match against the transmit path id if adjout is used.  */
2902 		if (adjout) {
2903 			if (req->path_id != p->path_id_tx)
2904 				return;
2905 		} else {
2906 			if (req->path_id != p->path_id)
2907 				return;
2908 		}
2909 	}
2910 	if (req->as.type != AS_UNDEF &&
2911 	    !aspath_match(asp->aspath, &req->as, 0))
2912 		return;
2913 	if (req->community.flags != 0) {
2914 		if (!community_match(prefix_communities(p), &req->community,
2915 		    NULL))
2916 			return;
2917 	}
2918 	if (!ovs_match(p, req->flags))
2919 		return;
2920 	if (!avs_match(p, req->flags))
2921 		return;
2922 	rde_dump_rib_as(p, asp, req->pid, req->flags, adjout);
2923 }
2924 
2925 static void
2926 rde_dump_upcall(struct rib_entry *re, void *ptr)
2927 {
2928 	struct rde_dump_ctx	*ctx = ptr;
2929 	struct prefix		*p;
2930 
2931 	if (re == NULL)
2932 		return;
2933 	TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib)
2934 		rde_dump_filter(p, &ctx->req, 0);
2935 }
2936 
2937 static void
2938 rde_dump_adjout_upcall(struct prefix *p, void *ptr)
2939 {
2940 	struct rde_dump_ctx	*ctx = ptr;
2941 
2942 	if ((p->flags & PREFIX_FLAG_ADJOUT) == 0)
2943 		fatalx("%s: prefix without PREFIX_FLAG_ADJOUT hit", __func__);
2944 	if (p->flags & (PREFIX_FLAG_WITHDRAW | PREFIX_FLAG_DEAD))
2945 		return;
2946 	rde_dump_filter(p, &ctx->req, 1);
2947 }
2948 
2949 static int
2950 rde_dump_throttled(void *arg)
2951 {
2952 	struct rde_dump_ctx	*ctx = arg;
2953 
2954 	return (ctx->throttled != 0);
2955 }
2956 
2957 static void
2958 rde_dump_done(void *arg, uint8_t aid)
2959 {
2960 	struct rde_dump_ctx	*ctx = arg;
2961 	struct rde_peer		*peer;
2962 	u_int			 error;
2963 
2964 	if (ctx->req.flags & F_CTL_ADJ_OUT) {
2965 		peer = peer_match(&ctx->req.neighbor, ctx->peerid);
2966 		if (peer == NULL)
2967 			goto done;
2968 		ctx->peerid = peer->conf.id;
2969 		switch (ctx->req.type) {
2970 		case IMSG_CTL_SHOW_RIB:
2971 			if (prefix_dump_new(peer, ctx->req.aid,
2972 			    CTL_MSG_HIGH_MARK, ctx, rde_dump_adjout_upcall,
2973 			    rde_dump_done, rde_dump_throttled) == -1)
2974 				goto nomem;
2975 			break;
2976 		case IMSG_CTL_SHOW_RIB_PREFIX:
2977 			if (prefix_dump_subtree(peer, &ctx->req.prefix,
2978 			    ctx->req.prefixlen, CTL_MSG_HIGH_MARK, ctx,
2979 			    rde_dump_adjout_upcall, rde_dump_done,
2980 			    rde_dump_throttled) == -1)
2981 				goto nomem;
2982 			break;
2983 		default:
2984 			fatalx("%s: unsupported imsg type", __func__);
2985 		}
2986 		return;
2987 	}
2988 done:
2989 	imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, ctx->req.pid, -1, NULL, 0);
2990 	LIST_REMOVE(ctx, entry);
2991 	free(ctx);
2992 	return;
2993 
2994 nomem:
2995 	log_warn(__func__);
2996 	error = CTL_RES_NOMEM;
2997 	imsg_compose(ibuf_se_ctl, IMSG_CTL_RESULT, 0, ctx->req.pid, -1, &error,
2998 	    sizeof(error));
2999 	return;
3000 }
3001 
3002 void
3003 rde_dump_ctx_new(struct ctl_show_rib_request *req, pid_t pid,
3004     enum imsg_type type)
3005 {
3006 	struct rde_dump_ctx	*ctx;
3007 	struct rib_entry	*re;
3008 	struct prefix		*p;
3009 	u_int			 error;
3010 	uint8_t			 hostplen, plen;
3011 	uint16_t		 rid;
3012 
3013 	if ((ctx = calloc(1, sizeof(*ctx))) == NULL) {
3014  nomem:
3015 		log_warn(__func__);
3016 		error = CTL_RES_NOMEM;
3017 		imsg_compose(ibuf_se_ctl, IMSG_CTL_RESULT, 0, pid, -1, &error,
3018 		    sizeof(error));
3019 		free(ctx);
3020 		return;
3021 	}
3022 
3023 	if (strcmp(req->rib, "Adj-RIB-Out") == 0)
3024 		req->flags |= F_CTL_ADJ_OUT;
3025 
3026 	memcpy(&ctx->req, req, sizeof(struct ctl_show_rib_request));
3027 	ctx->req.pid = pid;
3028 	ctx->req.type = type;
3029 
3030 	if (req->flags & (F_CTL_ADJ_IN | F_CTL_INVALID)) {
3031 		rid = RIB_ADJ_IN;
3032 	} else if (req->flags & F_CTL_ADJ_OUT) {
3033 		struct rde_peer *peer;
3034 
3035 		peer = peer_match(&req->neighbor, 0);
3036 		if (peer == NULL) {
3037 			error = CTL_RES_NOSUCHPEER;
3038 			imsg_compose(ibuf_se_ctl, IMSG_CTL_RESULT, 0, pid, -1,
3039 			    &error, sizeof(error));
3040 			free(ctx);
3041 			return;
3042 		}
3043 		ctx->peerid = peer->conf.id;
3044 		switch (ctx->req.type) {
3045 		case IMSG_CTL_SHOW_RIB:
3046 			if (prefix_dump_new(peer, ctx->req.aid,
3047 			    CTL_MSG_HIGH_MARK, ctx, rde_dump_adjout_upcall,
3048 			    rde_dump_done, rde_dump_throttled) == -1)
3049 				goto nomem;
3050 			break;
3051 		case IMSG_CTL_SHOW_RIB_PREFIX:
3052 			if (req->flags & F_LONGER) {
3053 				if (prefix_dump_subtree(peer, &req->prefix,
3054 				    req->prefixlen, CTL_MSG_HIGH_MARK, ctx,
3055 				    rde_dump_adjout_upcall,
3056 				    rde_dump_done, rde_dump_throttled) == -1)
3057 					goto nomem;
3058 				break;
3059 			}
3060 			switch (req->prefix.aid) {
3061 			case AID_INET:
3062 			case AID_VPN_IPv4:
3063 				hostplen = 32;
3064 				break;
3065 			case AID_INET6:
3066 			case AID_VPN_IPv6:
3067 				hostplen = 128;
3068 				break;
3069 			default:
3070 				fatalx("%s: unknown af", __func__);
3071 			}
3072 
3073 			do {
3074 				if (req->flags & F_SHORTER) {
3075 					for (plen = 0; plen <= req->prefixlen;
3076 					    plen++) {
3077 						p = prefix_adjout_lookup(peer,
3078 						    &req->prefix, plen);
3079 						/* dump all matching paths */
3080 						while (p != NULL) {
3081 							rde_dump_adjout_upcall(
3082 							    p, ctx);
3083 							p = prefix_adjout_next(
3084 							    peer, p);
3085 						}
3086 					}
3087 					p = NULL;
3088 				} else if (req->prefixlen == hostplen) {
3089 					p = prefix_adjout_match(peer,
3090 					    &req->prefix);
3091 				} else {
3092 					p = prefix_adjout_lookup(peer,
3093 					    &req->prefix, req->prefixlen);
3094 				}
3095 				/* dump all matching paths */
3096 				while (p != NULL) {
3097 					rde_dump_adjout_upcall(p, ctx);
3098 					p = prefix_adjout_next(peer, p);
3099 				}
3100 			} while ((peer = peer_match(&req->neighbor,
3101 			    peer->conf.id)));
3102 
3103 			imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, ctx->req.pid,
3104 			    -1, NULL, 0);
3105 			free(ctx);
3106 			return;
3107 		default:
3108 			fatalx("%s: unsupported imsg type", __func__);
3109 		}
3110 
3111 		LIST_INSERT_HEAD(&rde_dump_h, ctx, entry);
3112 		return;
3113 	} else if ((rid = rib_find(req->rib)) == RIB_NOTFOUND) {
3114 		log_warnx("%s: no such rib %s", __func__, req->rib);
3115 		error = CTL_RES_NOSUCHRIB;
3116 		imsg_compose(ibuf_se_ctl, IMSG_CTL_RESULT, 0, pid, -1, &error,
3117 		    sizeof(error));
3118 		free(ctx);
3119 		return;
3120 	}
3121 
3122 	switch (ctx->req.type) {
3123 	case IMSG_CTL_SHOW_NETWORK:
3124 		if (rib_dump_new(rid, ctx->req.aid, CTL_MSG_HIGH_MARK, ctx,
3125 		    network_dump_upcall, rde_dump_done,
3126 		    rde_dump_throttled) == -1)
3127 			goto nomem;
3128 		break;
3129 	case IMSG_CTL_SHOW_RIB:
3130 		if (rib_dump_new(rid, ctx->req.aid, CTL_MSG_HIGH_MARK, ctx,
3131 		    rde_dump_upcall, rde_dump_done, rde_dump_throttled) == -1)
3132 			goto nomem;
3133 		break;
3134 	case IMSG_CTL_SHOW_RIB_PREFIX:
3135 		if (req->flags & F_LONGER) {
3136 			if (rib_dump_subtree(rid, &req->prefix, req->prefixlen,
3137 			    CTL_MSG_HIGH_MARK, ctx, rde_dump_upcall,
3138 			    rde_dump_done, rde_dump_throttled) == -1)
3139 				goto nomem;
3140 			break;
3141 		}
3142 		switch (req->prefix.aid) {
3143 		case AID_INET:
3144 		case AID_VPN_IPv4:
3145 			hostplen = 32;
3146 			break;
3147 		case AID_INET6:
3148 		case AID_VPN_IPv6:
3149 			hostplen = 128;
3150 			break;
3151 		default:
3152 			fatalx("%s: unknown af", __func__);
3153 		}
3154 
3155 		if (req->flags & F_SHORTER) {
3156 			for (plen = 0; plen <= req->prefixlen; plen++) {
3157 				re = rib_get_addr(rib_byid(rid), &req->prefix,
3158 				    plen);
3159 				rde_dump_upcall(re, ctx);
3160 			}
3161 		} else if (req->prefixlen == hostplen) {
3162 			re = rib_match(rib_byid(rid), &req->prefix);
3163 			rde_dump_upcall(re, ctx);
3164 		} else {
3165 			re = rib_get_addr(rib_byid(rid), &req->prefix,
3166 			    req->prefixlen);
3167 			rde_dump_upcall(re, ctx);
3168 		}
3169 		imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, ctx->req.pid,
3170 		    -1, NULL, 0);
3171 		free(ctx);
3172 		return;
3173 	default:
3174 		fatalx("%s: unsupported imsg type", __func__);
3175 	}
3176 	LIST_INSERT_HEAD(&rde_dump_h, ctx, entry);
3177 }
3178 
3179 void
3180 rde_dump_ctx_throttle(pid_t pid, int throttle)
3181 {
3182 	struct rde_dump_ctx	*ctx;
3183 
3184 	LIST_FOREACH(ctx, &rde_dump_h, entry) {
3185 		if (ctx->req.pid == pid) {
3186 			ctx->throttled = throttle;
3187 			return;
3188 		}
3189 	}
3190 }
3191 
3192 void
3193 rde_dump_ctx_terminate(pid_t pid)
3194 {
3195 	struct rde_dump_ctx	*ctx;
3196 
3197 	LIST_FOREACH(ctx, &rde_dump_h, entry) {
3198 		if (ctx->req.pid == pid) {
3199 			rib_dump_terminate(ctx);
3200 			return;
3201 		}
3202 	}
3203 }
3204 
3205 static int
3206 rde_mrt_throttled(void *arg)
3207 {
3208 	struct mrt	*mrt = arg;
3209 
3210 	return (msgbuf_queuelen(mrt->wbuf) > SESS_MSG_LOW_MARK);
3211 }
3212 
3213 static void
3214 rde_mrt_done(void *ptr, uint8_t aid)
3215 {
3216 	mrt_done(ptr);
3217 }
3218 
3219 void
3220 rde_dump_mrt_new(struct mrt *mrt, pid_t pid, int fd)
3221 {
3222 	struct rde_mrt_ctx *ctx;
3223 	uint16_t rid;
3224 
3225 	if ((ctx = calloc(1, sizeof(*ctx))) == NULL) {
3226 		log_warn("rde_dump_mrt_new");
3227 		return;
3228 	}
3229 	memcpy(&ctx->mrt, mrt, sizeof(struct mrt));
3230 	if ((ctx->mrt.wbuf = msgbuf_new()) == NULL) {
3231 		log_warn("rde_dump_mrt_new");
3232 		free(ctx);
3233 		return;
3234 	}
3235 	ctx->mrt.fd = fd;
3236 	ctx->mrt.state = MRT_STATE_RUNNING;
3237 	rid = rib_find(ctx->mrt.rib);
3238 	if (rid == RIB_NOTFOUND) {
3239 		log_warnx("non existing RIB %s for mrt dump", ctx->mrt.rib);
3240 		free(ctx);
3241 		return;
3242 	}
3243 
3244 	if (ctx->mrt.type == MRT_TABLE_DUMP_V2)
3245 		mrt_dump_v2_hdr(&ctx->mrt, conf);
3246 
3247 	if (rib_dump_new(rid, AID_UNSPEC, CTL_MSG_HIGH_MARK, &ctx->mrt,
3248 	    mrt_dump_upcall, rde_mrt_done, rde_mrt_throttled) == -1)
3249 		fatal("%s: rib_dump_new", __func__);
3250 
3251 	LIST_INSERT_HEAD(&rde_mrts, ctx, entry);
3252 	rde_mrt_cnt++;
3253 }
3254 
3255 /*
3256  * kroute specific functions
3257  */
3258 int
3259 rde_l3vpn_import(struct rde_community *comm, struct l3vpn *rd)
3260 {
3261 	struct filter_set	*s;
3262 
3263 	TAILQ_FOREACH(s, &rd->import, entry) {
3264 		if (community_match(comm, &s->action.community, 0))
3265 			return (1);
3266 	}
3267 	return (0);
3268 }
3269 
3270 void
3271 rde_send_kroute_flush(struct rib *rib)
3272 {
3273 	if (imsg_compose(ibuf_main, IMSG_KROUTE_FLUSH, rib->rtableid, 0, -1,
3274 	    NULL, 0) == -1)
3275 		fatal("%s %d imsg_compose error", __func__, __LINE__);
3276 }
3277 
3278 void
3279 rde_send_kroute(struct rib *rib, struct prefix *new, struct prefix *old)
3280 {
3281 	struct kroute_full	 kf;
3282 	struct prefix		*p;
3283 	struct l3vpn		*vpn;
3284 	enum imsg_type		 type;
3285 
3286 	/*
3287 	 * Make sure that self announce prefixes are not committed to the
3288 	 * FIB. If both prefixes are unreachable no update is needed.
3289 	 */
3290 	if ((old == NULL || prefix_aspath(old)->flags & F_PREFIX_ANNOUNCED) &&
3291 	    (new == NULL || prefix_aspath(new)->flags & F_PREFIX_ANNOUNCED))
3292 		return;
3293 
3294 	if (new == NULL || prefix_aspath(new)->flags & F_PREFIX_ANNOUNCED) {
3295 		type = IMSG_KROUTE_DELETE;
3296 		p = old;
3297 	} else {
3298 		type = IMSG_KROUTE_CHANGE;
3299 		p = new;
3300 	}
3301 
3302 	memset(&kf, 0, sizeof(kf));
3303 	pt_getaddr(p->pt, &kf.prefix);
3304 	kf.prefixlen = p->pt->prefixlen;
3305 	if (type == IMSG_KROUTE_CHANGE) {
3306 		if (prefix_nhflags(p) == NEXTHOP_REJECT)
3307 			kf.flags |= F_REJECT;
3308 		if (prefix_nhflags(p) == NEXTHOP_BLACKHOLE)
3309 			kf.flags |= F_BLACKHOLE;
3310 		kf.nexthop = prefix_nexthop(p)->exit_nexthop;
3311 		strlcpy(kf.label, rtlabel_id2name(prefix_aspath(p)->rtlabelid),
3312 		    sizeof(kf.label));
3313 	}
3314 
3315 	switch (kf.prefix.aid) {
3316 	case AID_VPN_IPv4:
3317 	case AID_VPN_IPv6:
3318 		if (!(rib->flags & F_RIB_LOCAL))
3319 			/* not Loc-RIB, no update for VPNs */
3320 			break;
3321 
3322 		SIMPLEQ_FOREACH(vpn, &conf->l3vpns, entry) {
3323 			if (!rde_l3vpn_import(prefix_communities(p), vpn))
3324 				continue;
3325 			/* XXX not ideal but this will change */
3326 			kf.ifindex = if_nametoindex(vpn->ifmpe);
3327 			if (imsg_compose(ibuf_main, type, vpn->rtableid, 0, -1,
3328 			    &kf, sizeof(kf)) == -1)
3329 				fatal("%s %d imsg_compose error", __func__,
3330 				    __LINE__);
3331 		}
3332 		break;
3333 	default:
3334 		if (imsg_compose(ibuf_main, type, rib->rtableid, 0, -1,
3335 		    &kf, sizeof(kf)) == -1)
3336 			fatal("%s %d imsg_compose error", __func__, __LINE__);
3337 		break;
3338 	}
3339 }
3340 
3341 /*
3342  * update specific functions
3343  */
3344 int
3345 rde_evaluate_all(void)
3346 {
3347 	return rde_eval_all;
3348 }
3349 
3350 /* flush Adj-RIB-Out by withdrawing all prefixes */
3351 static void
3352 rde_up_flush_upcall(struct prefix *p, void *ptr)
3353 {
3354 	prefix_adjout_withdraw(p);
3355 }
3356 
3357 int
3358 rde_update_queue_pending(void)
3359 {
3360 	struct rde_peer *peer;
3361 	uint8_t aid;
3362 
3363 	if (ibuf_se && imsgbuf_queuelen(ibuf_se) >= SESS_MSG_HIGH_MARK)
3364 		return 0;
3365 
3366 	RB_FOREACH(peer, peer_tree, &peertable) {
3367 		if (peer->conf.id == 0)
3368 			continue;
3369 		if (!peer_is_up(peer))
3370 			continue;
3371 		if (peer->throttled)
3372 			continue;
3373 		for (aid = AID_MIN; aid < AID_MAX; aid++) {
3374 			if (!RB_EMPTY(&peer->updates[aid]) ||
3375 			    !RB_EMPTY(&peer->withdraws[aid]))
3376 				return 1;
3377 		}
3378 	}
3379 	return 0;
3380 }
3381 
3382 void
3383 rde_update_queue_runner(uint8_t aid)
3384 {
3385 	struct rde_peer		*peer;
3386 	struct ibuf		*buf;
3387 	int			 sent, max = RDE_RUNNER_ROUNDS;
3388 
3389 	/* first withdraws ... */
3390 	do {
3391 		sent = 0;
3392 		RB_FOREACH(peer, peer_tree, &peertable) {
3393 			if (peer->conf.id == 0)
3394 				continue;
3395 			if (!peer_is_up(peer))
3396 				continue;
3397 			if (peer->throttled)
3398 				continue;
3399 			if (RB_EMPTY(&peer->withdraws[aid]))
3400 				continue;
3401 
3402 			if ((buf = up_dump_withdraws(peer, aid)) == NULL) {
3403 				continue;
3404 			}
3405 			if (imsg_compose_ibuf(ibuf_se, IMSG_UPDATE,
3406 			    peer->conf.id, 0, buf) == -1)
3407 				fatal("%s: imsg_create error", __func__);
3408 			sent++;
3409 		}
3410 		max -= sent;
3411 	} while (sent != 0 && max > 0);
3412 
3413 	/* ... then updates */
3414 	max = RDE_RUNNER_ROUNDS;
3415 	do {
3416 		sent = 0;
3417 		RB_FOREACH(peer, peer_tree, &peertable) {
3418 			if (peer->conf.id == 0)
3419 				continue;
3420 			if (!peer_is_up(peer))
3421 				continue;
3422 			if (peer->throttled)
3423 				continue;
3424 			if (RB_EMPTY(&peer->updates[aid]))
3425 				continue;
3426 
3427 			if (up_is_eor(peer, aid)) {
3428 				int sent_eor = peer->sent_eor & (1 << aid);
3429 				if (peer->capa.grestart.restart && !sent_eor)
3430 					rde_peer_send_eor(peer, aid);
3431 				if (peer->capa.enhanced_rr && sent_eor)
3432 					rde_peer_send_rrefresh(peer, aid,
3433 					    ROUTE_REFRESH_END_RR);
3434 				continue;
3435 			}
3436 
3437 			if ((buf = up_dump_update(peer, aid)) == NULL) {
3438 				continue;
3439 			}
3440 			if (imsg_compose_ibuf(ibuf_se, IMSG_UPDATE,
3441 			    peer->conf.id, 0, buf) == -1)
3442 				fatal("%s: imsg_compose_ibuf error", __func__);
3443 			sent++;
3444 		}
3445 		max -= sent;
3446 	} while (sent != 0 && max > 0);
3447 }
3448 
3449 /*
3450  * pf table specific functions
3451  */
3452 struct rde_pftable_node {
3453 	RB_ENTRY(rde_pftable_node)	 entry;
3454 	struct pt_entry			*prefix;
3455 	int				 refcnt;
3456 	uint16_t			 id;
3457 };
3458 RB_HEAD(rde_pftable_tree, rde_pftable_node);
3459 
3460 static inline int
3461 rde_pftable_cmp(struct rde_pftable_node *a, struct rde_pftable_node *b)
3462 {
3463 	if (a->prefix > b->prefix)
3464 		return 1;
3465 	if (a->prefix < b->prefix)
3466 		return -1;
3467 	return (a->id - b->id);
3468 }
3469 
3470 RB_GENERATE_STATIC(rde_pftable_tree, rde_pftable_node, entry, rde_pftable_cmp);
3471 
3472 struct rde_pftable_tree pftable_tree = RB_INITIALIZER(&pftable_tree);
3473 int need_commit;
3474 
3475 static void
3476 rde_pftable_send(uint16_t id, struct pt_entry *pt, int del)
3477 {
3478 	struct pftable_msg pfm;
3479 
3480 	if (id == 0)
3481 		return;
3482 
3483 	/* do not run while cleaning up */
3484 	if (rde_quit)
3485 		return;
3486 
3487 	memset(&pfm, 0, sizeof(pfm));
3488 	strlcpy(pfm.pftable, pftable_id2name(id), sizeof(pfm.pftable));
3489 	pt_getaddr(pt, &pfm.addr);
3490 	pfm.len = pt->prefixlen;
3491 
3492 	if (imsg_compose(ibuf_main,
3493 	    del ? IMSG_PFTABLE_REMOVE : IMSG_PFTABLE_ADD,
3494 	    0, 0, -1, &pfm, sizeof(pfm)) == -1)
3495 		fatal("%s %d imsg_compose error", __func__, __LINE__);
3496 
3497 	need_commit = 1;
3498 }
3499 
3500 void
3501 rde_pftable_add(uint16_t id, struct prefix *p)
3502 {
3503 	struct rde_pftable_node *pfn, node;
3504 
3505 	memset(&node, 0, sizeof(node));
3506 	node.prefix = p->pt;
3507 	node.id = id;
3508 
3509 	pfn = RB_FIND(rde_pftable_tree, &pftable_tree, &node);
3510 	if (pfn == NULL) {
3511 		if ((pfn = calloc(1, sizeof(*pfn))) == NULL)
3512 			fatal("%s", __func__);
3513 		pfn->prefix = pt_ref(p->pt);
3514 		pfn->id = id;
3515 
3516 		if (RB_INSERT(rde_pftable_tree, &pftable_tree, pfn) != NULL)
3517 			fatalx("%s: tree corrupt", __func__);
3518 
3519 		rde_pftable_send(id, p->pt, 0);
3520 	}
3521 	pfn->refcnt++;
3522 }
3523 
3524 void
3525 rde_pftable_del(uint16_t id, struct prefix *p)
3526 {
3527 	struct rde_pftable_node *pfn, node;
3528 
3529 	memset(&node, 0, sizeof(node));
3530 	node.prefix = p->pt;
3531 	node.id = id;
3532 
3533 	pfn = RB_FIND(rde_pftable_tree, &pftable_tree, &node);
3534 	if (pfn == NULL)
3535 		return;
3536 
3537 	if (--pfn->refcnt <= 0) {
3538 		rde_pftable_send(id, p->pt, 1);
3539 
3540 		if (RB_REMOVE(rde_pftable_tree, &pftable_tree, pfn) == NULL)
3541 			fatalx("%s: tree corrupt", __func__);
3542 
3543 		pt_unref(pfn->prefix);
3544 		free(pfn);
3545 	}
3546 }
3547 
3548 void
3549 rde_commit_pftable(void)
3550 {
3551 	/* do not run while cleaning up */
3552 	if (rde_quit)
3553 		return;
3554 
3555 	if (!need_commit)
3556 		return;
3557 
3558 	if (imsg_compose(ibuf_main, IMSG_PFTABLE_COMMIT, 0, 0, -1, NULL, 0) ==
3559 	    -1)
3560 		fatal("%s %d imsg_compose error", __func__, __LINE__);
3561 
3562 	need_commit = 0;
3563 }
3564 
3565 /*
3566  * nexthop specific functions
3567  */
3568 void
3569 rde_send_nexthop(struct bgpd_addr *next, int insert)
3570 {
3571 	int			 type;
3572 
3573 	if (insert)
3574 		type = IMSG_NEXTHOP_ADD;
3575 	else
3576 		type = IMSG_NEXTHOP_REMOVE;
3577 
3578 	if (imsg_compose(ibuf_main, type, 0, 0, -1, next,
3579 	    sizeof(struct bgpd_addr)) == -1)
3580 		fatal("%s %d imsg_compose error", __func__, __LINE__);
3581 }
3582 
3583 /*
3584  * soft reconfig specific functions
3585  */
3586 void
3587 rde_reload_done(void)
3588 {
3589 	struct rde_peer		*peer;
3590 	struct filter_head	*fh;
3591 	struct rde_prefixset_head prefixsets_old;
3592 	struct rde_prefixset_head originsets_old;
3593 	struct as_set_head	 as_sets_old;
3594 	uint16_t		 rid;
3595 	int			 reload = 0, force_locrib = 0;
3596 
3597 	softreconfig = 0;
3598 
3599 	SIMPLEQ_INIT(&prefixsets_old);
3600 	SIMPLEQ_INIT(&originsets_old);
3601 	SIMPLEQ_INIT(&as_sets_old);
3602 	SIMPLEQ_CONCAT(&prefixsets_old, &conf->rde_prefixsets);
3603 	SIMPLEQ_CONCAT(&originsets_old, &conf->rde_originsets);
3604 	SIMPLEQ_CONCAT(&as_sets_old, &conf->as_sets);
3605 
3606 	/* run softreconfig in if filter mode changed */
3607 	if (conf->filtered_in_locrib != nconf->filtered_in_locrib) {
3608 		log_debug("filter mode changed, reloading Loc-Rib");
3609 		force_locrib = 1;
3610 	}
3611 
3612 	/* merge the main config */
3613 	copy_config(conf, nconf);
3614 
3615 	/* need to copy the sets and roa table and clear them in nconf */
3616 	SIMPLEQ_CONCAT(&conf->rde_prefixsets, &nconf->rde_prefixsets);
3617 	SIMPLEQ_CONCAT(&conf->rde_originsets, &nconf->rde_originsets);
3618 	SIMPLEQ_CONCAT(&conf->as_sets, &nconf->as_sets);
3619 
3620 	/* apply new set of l3vpn, sync will be done later */
3621 	free_l3vpns(&conf->l3vpns);
3622 	SIMPLEQ_CONCAT(&conf->l3vpns, &nconf->l3vpns);
3623 	/* XXX WHERE IS THE SYNC ??? */
3624 
3625 	free_config(nconf);
3626 	nconf = NULL;
3627 
3628 	/* sync peerself with conf */
3629 	peerself->remote_bgpid = conf->bgpid;
3630 	peerself->conf.local_as = conf->as;
3631 	peerself->conf.remote_as = conf->as;
3632 	peerself->conf.remote_addr.aid = AID_INET;
3633 	peerself->conf.remote_addr.v4.s_addr = htonl(conf->bgpid);
3634 	peerself->conf.remote_masklen = 32;
3635 	peerself->short_as = conf->short_as;
3636 
3637 	rde_mark_prefixsets_dirty(&prefixsets_old, &conf->rde_prefixsets);
3638 	rde_mark_prefixsets_dirty(&originsets_old, &conf->rde_originsets);
3639 	as_sets_mark_dirty(&as_sets_old, &conf->as_sets);
3640 
3641 
3642 	/* make sure that rde_eval_all is correctly set after a config change */
3643 	rde_eval_all = 0;
3644 
3645 	/* Make the new outbound filter rules the active one. */
3646 	filterlist_free(out_rules);
3647 	out_rules = out_rules_tmp;
3648 	out_rules_tmp = NULL;
3649 
3650 	/* check if filter changed */
3651 	RB_FOREACH(peer, peer_tree, &peertable) {
3652 		if (peer->conf.id == 0)	/* ignore peerself */
3653 			continue;
3654 		peer->reconf_out = 0;
3655 		peer->reconf_rib = 0;
3656 
3657 		/* max prefix checker */
3658 		if (peer->conf.max_prefix &&
3659 		    peer->stats.prefix_cnt > peer->conf.max_prefix) {
3660 			log_peer_warnx(&peer->conf,
3661 			    "prefix limit reached (>%u/%u)",
3662 			    peer->stats.prefix_cnt, peer->conf.max_prefix);
3663 			rde_update_err(peer, ERR_CEASE, ERR_CEASE_MAX_PREFIX,
3664 			    NULL);
3665 		}
3666 		/* max prefix checker outbound */
3667 		if (peer->conf.max_out_prefix &&
3668 		    peer->stats.prefix_out_cnt > peer->conf.max_out_prefix) {
3669 			log_peer_warnx(&peer->conf,
3670 			    "outbound prefix limit reached (>%u/%u)",
3671 			    peer->stats.prefix_out_cnt,
3672 			    peer->conf.max_out_prefix);
3673 			rde_update_err(peer, ERR_CEASE,
3674 			    ERR_CEASE_MAX_SENT_PREFIX, NULL);
3675 		}
3676 
3677 		if (peer->export_type != peer->conf.export_type) {
3678 			log_peer_info(&peer->conf, "export type change, "
3679 			    "reloading");
3680 			peer->reconf_rib = 1;
3681 		}
3682 		if ((peer->flags & PEERFLAG_EVALUATE_ALL) !=
3683 		    (peer->conf.flags & PEERFLAG_EVALUATE_ALL)) {
3684 			log_peer_info(&peer->conf, "rde evaluate change, "
3685 			    "reloading");
3686 			peer->reconf_rib = 1;
3687 		}
3688 		if ((peer->flags & PEERFLAG_TRANS_AS) !=
3689 		    (peer->conf.flags & PEERFLAG_TRANS_AS)) {
3690 			log_peer_info(&peer->conf, "transparent-as change, "
3691 			    "reloading");
3692 			peer->reconf_rib = 1;
3693 		}
3694 		if (peer->loc_rib_id != rib_find(peer->conf.rib)) {
3695 			log_peer_info(&peer->conf, "rib change, reloading");
3696 			peer->loc_rib_id = rib_find(peer->conf.rib);
3697 			if (peer->loc_rib_id == RIB_NOTFOUND)
3698 				fatalx("King Bula's peer met an unknown RIB");
3699 			peer->reconf_rib = 1;
3700 		}
3701 		/*
3702 		 * Update add-path settings but only if the session is
3703 		 * running with add-path and the config uses add-path
3704 		 * as well.
3705 		 */
3706 		if (peer_has_add_path(peer, AID_UNSPEC, CAPA_AP_SEND)) {
3707 			if (peer->conf.eval.mode != ADDPATH_EVAL_NONE &&
3708 			    memcmp(&peer->eval, &peer->conf.eval,
3709 			    sizeof(peer->eval)) != 0) {
3710 				log_peer_info(&peer->conf,
3711 				    "addpath eval change, reloading");
3712 				peer->reconf_out = 1;
3713 				peer->eval = peer->conf.eval;
3714 			}
3715 			/* add-path send needs rde_eval_all */
3716 			rde_eval_all = 1;
3717 		}
3718 		if (peer->role != peer->conf.role) {
3719 			if (reload == 0)
3720 				log_debug("peer role change: "
3721 				    "reloading Adj-RIB-In");
3722 			peer->role = peer->conf.role;
3723 			reload++;
3724 		}
3725 		peer->export_type = peer->conf.export_type;
3726 		peer->flags = peer->conf.flags;
3727 		if (peer->flags & PEERFLAG_EVALUATE_ALL)
3728 			rde_eval_all = 1;
3729 
3730 		if (peer->reconf_rib) {
3731 			if (prefix_dump_new(peer, AID_UNSPEC,
3732 			    RDE_RUNNER_ROUNDS, NULL, rde_up_flush_upcall,
3733 			    rde_softreconfig_in_done, NULL) == -1)
3734 				fatal("%s: prefix_dump_new", __func__);
3735 			log_peer_info(&peer->conf, "flushing Adj-RIB-Out");
3736 			softreconfig++;	/* account for the running flush */
3737 			continue;
3738 		}
3739 
3740 		/* reapply outbound filters for this peer */
3741 		fh = peer_apply_out_filter(peer, out_rules);
3742 
3743 		if (!rde_filter_equal(peer->out_rules, fh)) {
3744 			char *p = log_fmt_peer(&peer->conf);
3745 			log_debug("out filter change: reloading peer %s", p);
3746 			free(p);
3747 			peer->reconf_out = 1;
3748 		}
3749 		filterlist_free(fh);
3750 	}
3751 
3752 	/* bring ribs in sync */
3753 	for (rid = RIB_LOC_START; rid < rib_size; rid++) {
3754 		struct rib *rib = rib_byid(rid);
3755 		if (rib == NULL)
3756 			continue;
3757 		rde_filter_calc_skip_steps(rib->in_rules_tmp);
3758 
3759 		/* flip rules, make new active */
3760 		fh = rib->in_rules;
3761 		rib->in_rules = rib->in_rules_tmp;
3762 		rib->in_rules_tmp = fh;
3763 
3764 		switch (rib->state) {
3765 		case RECONF_DELETE:
3766 			rib_free(rib);
3767 			break;
3768 		case RECONF_RELOAD:
3769 			if (rib_update(rib)) {
3770 				RB_FOREACH(peer, peer_tree, &peertable) {
3771 					/* ignore peerself */
3772 					if (peer->conf.id == 0)
3773 						continue;
3774 					/* skip peers using a different rib */
3775 					if (peer->loc_rib_id != rib->id)
3776 						continue;
3777 					/* peer rib is already being flushed */
3778 					if (peer->reconf_rib)
3779 						continue;
3780 
3781 					if (prefix_dump_new(peer, AID_UNSPEC,
3782 					    RDE_RUNNER_ROUNDS, NULL,
3783 					    rde_up_flush_upcall,
3784 					    rde_softreconfig_in_done,
3785 					    NULL) == -1)
3786 						fatal("%s: prefix_dump_new",
3787 						    __func__);
3788 
3789 					log_peer_info(&peer->conf,
3790 					    "flushing Adj-RIB-Out");
3791 					/* account for the running flush */
3792 					softreconfig++;
3793 				}
3794 			}
3795 
3796 			rib->state = RECONF_KEEP;
3797 			/* FALLTHROUGH */
3798 		case RECONF_KEEP:
3799 			if (!(force_locrib && rid == RIB_LOC_START) &&
3800 			    rde_filter_equal(rib->in_rules, rib->in_rules_tmp))
3801 				/* rib is in sync */
3802 				break;
3803 			log_debug("filter change: reloading RIB %s",
3804 			    rib->name);
3805 			rib->state = RECONF_RELOAD;
3806 			reload++;
3807 			break;
3808 		case RECONF_REINIT:
3809 			/* new rib */
3810 			rib->state = RECONF_RELOAD;
3811 			reload++;
3812 			break;
3813 		case RECONF_NONE:
3814 			break;
3815 		}
3816 		filterlist_free(rib->in_rules_tmp);
3817 		rib->in_rules_tmp = NULL;
3818 	}
3819 
3820 	/* old filters removed, free all sets */
3821 	free_rde_prefixsets(&prefixsets_old);
3822 	free_rde_prefixsets(&originsets_old);
3823 	as_sets_free(&as_sets_old);
3824 
3825 	log_info("RDE reconfigured");
3826 
3827 	softreconfig++;
3828 	if (reload > 0) {
3829 		if (rib_dump_new(RIB_ADJ_IN, AID_UNSPEC, RDE_RUNNER_ROUNDS,
3830 		    NULL, rde_softreconfig_in, rde_softreconfig_in_done,
3831 		    NULL) == -1)
3832 			fatal("%s: rib_dump_new", __func__);
3833 		log_info("running softreconfig in");
3834 	} else {
3835 		rde_softreconfig_in_done((void *)1, AID_UNSPEC);
3836 	}
3837 }
3838 
3839 static void
3840 rde_softreconfig_in_done(void *arg, uint8_t dummy)
3841 {
3842 	struct rde_peer	*peer;
3843 	uint16_t	 i;
3844 
3845 	softreconfig--;
3846 	/* one guy done but other dumps are still running */
3847 	if (softreconfig > 0)
3848 		return;
3849 
3850 	if (arg == NULL)
3851 		log_info("softreconfig in done");
3852 
3853 	/* now do the Adj-RIB-Out sync and a possible FIB sync */
3854 	softreconfig = 0;
3855 	for (i = 0; i < rib_size; i++) {
3856 		struct rib *rib = rib_byid(i);
3857 		if (rib == NULL)
3858 			continue;
3859 		rib->state = RECONF_NONE;
3860 		if (rib->fibstate == RECONF_RELOAD) {
3861 			if (rib_dump_new(i, AID_UNSPEC, RDE_RUNNER_ROUNDS,
3862 			    rib, rde_softreconfig_sync_fib,
3863 			    rde_softreconfig_sync_done, NULL) == -1)
3864 				fatal("%s: rib_dump_new", __func__);
3865 			softreconfig++;
3866 			log_info("starting fib sync for rib %s",
3867 			    rib->name);
3868 		} else if (rib->fibstate == RECONF_REINIT) {
3869 			if (rib_dump_new(i, AID_UNSPEC, RDE_RUNNER_ROUNDS,
3870 			    rib, rde_softreconfig_sync_reeval,
3871 			    rde_softreconfig_sync_done, NULL) == -1)
3872 				fatal("%s: rib_dump_new", __func__);
3873 			softreconfig++;
3874 			log_info("starting re-evaluation of rib %s",
3875 			    rib->name);
3876 		}
3877 	}
3878 
3879 	RB_FOREACH(peer, peer_tree, &peertable) {
3880 		uint8_t aid;
3881 
3882 		if (peer->reconf_out) {
3883 			if (peer->export_type == EXPORT_NONE) {
3884 				/* nothing to do here */
3885 				peer->reconf_out = 0;
3886 			} else if (peer->export_type == EXPORT_DEFAULT_ROUTE) {
3887 				/* just resend the default route */
3888 				for (aid = AID_MIN; aid < AID_MAX; aid++) {
3889 					if (peer->capa.mp[aid])
3890 						up_generate_default(peer, aid);
3891 				}
3892 				peer->reconf_out = 0;
3893 			} else
3894 				rib_byid(peer->loc_rib_id)->state =
3895 				    RECONF_RELOAD;
3896 		} else if (peer->reconf_rib) {
3897 			/* dump the full table to neighbors that changed rib */
3898 			for (aid = AID_MIN; aid < AID_MAX; aid++) {
3899 				if (peer->capa.mp[aid])
3900 					peer_dump(peer, aid);
3901 			}
3902 		}
3903 	}
3904 
3905 	for (i = 0; i < rib_size; i++) {
3906 		struct rib *rib = rib_byid(i);
3907 		if (rib == NULL)
3908 			continue;
3909 		if (rib->state == RECONF_RELOAD) {
3910 			if (rib_dump_new(i, AID_UNSPEC, RDE_RUNNER_ROUNDS,
3911 			    rib, rde_softreconfig_out,
3912 			    rde_softreconfig_out_done, NULL) == -1)
3913 				fatal("%s: rib_dump_new", __func__);
3914 			softreconfig++;
3915 			log_info("starting softreconfig out for rib %s",
3916 			    rib->name);
3917 		}
3918 	}
3919 
3920 	/* if nothing to do move to last stage */
3921 	if (softreconfig == 0)
3922 		rde_softreconfig_done();
3923 }
3924 
3925 static void
3926 rde_softreconfig_out_done(void *arg, uint8_t aid)
3927 {
3928 	struct rib	*rib = arg;
3929 
3930 	/* this RIB dump is done */
3931 	log_info("softreconfig out done for %s", rib->name);
3932 
3933 	/* check if other dumps are still running */
3934 	if (--softreconfig == 0)
3935 		rde_softreconfig_done();
3936 }
3937 
3938 static void
3939 rde_softreconfig_done(void)
3940 {
3941 	uint16_t	i;
3942 
3943 	for (i = 0; i < rib_size; i++) {
3944 		struct rib *rib = rib_byid(i);
3945 		if (rib == NULL)
3946 			continue;
3947 		rib->state = RECONF_NONE;
3948 	}
3949 
3950 	log_info("RDE soft reconfiguration done");
3951 	imsg_compose(ibuf_main, IMSG_RECONF_DONE, 0, 0,
3952 	    -1, NULL, 0);
3953 }
3954 
3955 static void
3956 rde_softreconfig_in(struct rib_entry *re, void *bula)
3957 {
3958 	struct filterstate	 state;
3959 	struct rib		*rib;
3960 	struct prefix		*p;
3961 	struct pt_entry		*pt;
3962 	struct rde_peer		*peer;
3963 	struct rde_aspath	*asp;
3964 	enum filter_actions	 action;
3965 	struct bgpd_addr	 prefix;
3966 	uint16_t		 i;
3967 	uint8_t			 aspa_vstate;
3968 
3969 	pt = re->prefix;
3970 	pt_getaddr(pt, &prefix);
3971 	TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib) {
3972 		asp = prefix_aspath(p);
3973 		peer = prefix_peer(p);
3974 
3975 		/* possible role change update ASPA validation state */
3976 		if (prefix_aspa_vstate(p) == ASPA_NEVER_KNOWN)
3977 			aspa_vstate = ASPA_NEVER_KNOWN;
3978 		else
3979 			aspa_vstate = rde_aspa_validity(peer, asp, pt->aid);
3980 		prefix_set_vstate(p, prefix_roa_vstate(p), aspa_vstate);
3981 
3982 		/* skip announced networks, they are never filtered */
3983 		if (asp->flags & F_PREFIX_ANNOUNCED)
3984 			continue;
3985 
3986 		for (i = RIB_LOC_START; i < rib_size; i++) {
3987 			rib = rib_byid(i);
3988 			if (rib == NULL)
3989 				continue;
3990 
3991 			if (rib->state != RECONF_RELOAD)
3992 				continue;
3993 
3994 			rde_filterstate_prep(&state, p);
3995 			action = rde_filter(rib->in_rules, peer, peer, &prefix,
3996 			    pt->prefixlen, &state);
3997 
3998 			if (action == ACTION_ALLOW) {
3999 				/* update Local-RIB */
4000 				prefix_update(rib, peer, p->path_id,
4001 				    p->path_id_tx, &state, 0,
4002 				    &prefix, pt->prefixlen);
4003 			} else if (conf->filtered_in_locrib &&
4004 			    i == RIB_LOC_START) {
4005 				prefix_update(rib, peer, p->path_id,
4006 				    p->path_id_tx, &state, 1,
4007 				    &prefix, pt->prefixlen);
4008 			} else {
4009 				/* remove from Local-RIB */
4010 				prefix_withdraw(rib, peer, p->path_id, &prefix,
4011 				    pt->prefixlen);
4012 			}
4013 
4014 			rde_filterstate_clean(&state);
4015 		}
4016 	}
4017 }
4018 
4019 static void
4020 rde_softreconfig_out(struct rib_entry *re, void *arg)
4021 {
4022 	if (prefix_best(re) == NULL)
4023 		/* no valid path for prefix */
4024 		return;
4025 
4026 	rde_generate_updates(re, NULL, NULL, EVAL_RECONF);
4027 }
4028 
4029 static void
4030 rde_softreconfig_sync_reeval(struct rib_entry *re, void *arg)
4031 {
4032 	struct prefix_queue	prefixes = TAILQ_HEAD_INITIALIZER(prefixes);
4033 	struct prefix		*p, *next;
4034 	struct rib		*rib = arg;
4035 
4036 	if (rib->flags & F_RIB_NOEVALUATE) {
4037 		/*
4038 		 * evaluation process is turned off
4039 		 * all dependent adj-rib-out were already flushed
4040 		 * unlink nexthop if it was linked
4041 		 */
4042 		TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib) {
4043 			if (p->flags & PREFIX_NEXTHOP_LINKED)
4044 				nexthop_unlink(p);
4045 			p->dmetric = PREFIX_DMETRIC_INVALID;
4046 		}
4047 		return;
4048 	}
4049 
4050 	/* evaluation process is turned on, so evaluate all prefixes again */
4051 	TAILQ_CONCAT(&prefixes, &re->prefix_h, entry.list.rib);
4052 
4053 	/*
4054 	 * TODO: this code works but is not optimal. prefix_evaluate()
4055 	 * does a lot of extra work in the worst case. Would be better
4056 	 * to resort the list once and then call rde_generate_updates()
4057 	 * and rde_send_kroute() once.
4058 	 */
4059 	TAILQ_FOREACH_SAFE(p, &prefixes, entry.list.rib, next) {
4060 		/* need to re-link the nexthop if not already linked */
4061 		TAILQ_REMOVE(&prefixes, p, entry.list.rib);
4062 		if ((p->flags & PREFIX_NEXTHOP_LINKED) == 0)
4063 			nexthop_link(p);
4064 		prefix_evaluate(re, p, NULL);
4065 	}
4066 }
4067 
4068 static void
4069 rde_softreconfig_sync_fib(struct rib_entry *re, void *bula)
4070 {
4071 	struct prefix *p;
4072 
4073 	if ((p = prefix_best(re)) != NULL)
4074 		rde_send_kroute(re_rib(re), p, NULL);
4075 }
4076 
4077 static void
4078 rde_softreconfig_sync_done(void *arg, uint8_t aid)
4079 {
4080 	struct rib *rib = arg;
4081 
4082 	/* this RIB dump is done */
4083 	if (rib->fibstate == RECONF_RELOAD)
4084 		log_info("fib sync done for %s", rib->name);
4085 	else
4086 		log_info("re-evaluation done for %s", rib->name);
4087 	rib->fibstate = RECONF_NONE;
4088 
4089 	/* check if other dumps are still running */
4090 	if (--softreconfig == 0)
4091 		rde_softreconfig_done();
4092 }
4093 
4094 /*
4095  * ROA specific functions. The roa set is updated independent of the config
4096  * so this runs outside of the softreconfig handlers.
4097  */
4098 static void
4099 rde_rpki_softreload(struct rib_entry *re, void *bula)
4100 {
4101 	struct filterstate	 state;
4102 	struct rib		*rib;
4103 	struct prefix		*p;
4104 	struct pt_entry		*pt;
4105 	struct rde_peer		*peer;
4106 	struct rde_aspath	*asp;
4107 	enum filter_actions	 action;
4108 	struct bgpd_addr	 prefix;
4109 	uint8_t			 roa_vstate, aspa_vstate;
4110 	uint16_t		 i;
4111 
4112 	pt = re->prefix;
4113 	pt_getaddr(pt, &prefix);
4114 	TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib) {
4115 		asp = prefix_aspath(p);
4116 		peer = prefix_peer(p);
4117 
4118 		/* ROA validation state update */
4119 		roa_vstate = rde_roa_validity(&rde_roa,
4120 		    &prefix, pt->prefixlen, aspath_origin(asp->aspath));
4121 
4122 		/* ASPA validation state update (if needed) */
4123 		if (prefix_aspa_vstate(p) == ASPA_NEVER_KNOWN) {
4124 			aspa_vstate = ASPA_NEVER_KNOWN;
4125 		} else {
4126 			if (asp->aspa_generation != rde_aspa_generation) {
4127 				asp->aspa_generation = rde_aspa_generation;
4128 				aspa_validation(rde_aspa, asp->aspath,
4129 				    &asp->aspa_state);
4130 			}
4131 			aspa_vstate = rde_aspa_validity(peer, asp, pt->aid);
4132 		}
4133 
4134 		if (roa_vstate == prefix_roa_vstate(p) &&
4135 		    aspa_vstate == prefix_aspa_vstate(p))
4136 			continue;
4137 
4138 		prefix_set_vstate(p, roa_vstate, aspa_vstate);
4139 		/* skip announced networks, they are never filtered */
4140 		if (asp->flags & F_PREFIX_ANNOUNCED)
4141 			continue;
4142 
4143 		for (i = RIB_LOC_START; i < rib_size; i++) {
4144 			rib = rib_byid(i);
4145 			if (rib == NULL)
4146 				continue;
4147 
4148 			rde_filterstate_prep(&state, p);
4149 			action = rde_filter(rib->in_rules, peer, peer, &prefix,
4150 			    pt->prefixlen, &state);
4151 
4152 			if (action == ACTION_ALLOW) {
4153 				/* update Local-RIB */
4154 				prefix_update(rib, peer, p->path_id,
4155 				    p->path_id_tx, &state, 0,
4156 				    &prefix, pt->prefixlen);
4157 			} else if (conf->filtered_in_locrib &&
4158 			    i == RIB_LOC_START) {
4159 				prefix_update(rib, peer, p->path_id,
4160 				    p->path_id_tx, &state, 1,
4161 				    &prefix, pt->prefixlen);
4162 			} else {
4163 				/* remove from Local-RIB */
4164 				prefix_withdraw(rib, peer, p->path_id, &prefix,
4165 				    pt->prefixlen);
4166 			}
4167 
4168 			rde_filterstate_clean(&state);
4169 		}
4170 	}
4171 }
4172 
4173 static int rpki_update_pending;
4174 
4175 static void
4176 rde_rpki_softreload_done(void *arg, uint8_t aid)
4177 {
4178 	/* the roa update is done */
4179 	log_info("RPKI softreload done");
4180 	rpki_update_pending = 0;
4181 }
4182 
4183 static void
4184 rde_rpki_reload(void)
4185 {
4186 	if (rpki_update_pending) {
4187 		log_info("RPKI softreload skipped, old still running");
4188 		return;
4189 	}
4190 
4191 	rpki_update_pending = 1;
4192 	if (rib_dump_new(RIB_ADJ_IN, AID_UNSPEC, RDE_RUNNER_ROUNDS,
4193 	    rib_byid(RIB_ADJ_IN), rde_rpki_softreload,
4194 	    rde_rpki_softreload_done, NULL) == -1)
4195 		fatal("%s: rib_dump_new", __func__);
4196 }
4197 
4198 static int
4199 rde_roa_reload(void)
4200 {
4201 	struct rde_prefixset roa_old;
4202 
4203 	if (rpki_update_pending) {
4204 		trie_free(&roa_new.th);	/* can't use new roa table */
4205 		return 1;		/* force call to rde_rpki_reload */
4206 	}
4207 
4208 	roa_old = rde_roa;
4209 	rde_roa = roa_new;
4210 	memset(&roa_new, 0, sizeof(roa_new));
4211 
4212 	/* check if roa changed */
4213 	if (trie_equal(&rde_roa.th, &roa_old.th)) {
4214 		rde_roa.lastchange = roa_old.lastchange;
4215 		trie_free(&roa_old.th);	/* old roa no longer needed */
4216 		return 0;
4217 	}
4218 
4219 	rde_roa.lastchange = getmonotime();
4220 	trie_free(&roa_old.th);		/* old roa no longer needed */
4221 
4222 	log_debug("ROA change: reloading Adj-RIB-In");
4223 	return 1;
4224 }
4225 
4226 static int
4227 rde_aspa_reload(void)
4228 {
4229 	struct rde_aspa *aspa_old;
4230 
4231 	if (rpki_update_pending) {
4232 		aspa_table_free(aspa_new);	/* can't use new aspa table */
4233 		aspa_new = NULL;
4234 		return 1;			/* rpki_client_relaod warns */
4235 	}
4236 
4237 	aspa_old = rde_aspa;
4238 	rde_aspa = aspa_new;
4239 	aspa_new = NULL;
4240 
4241 	/* check if aspa changed */
4242 	if (aspa_table_equal(rde_aspa, aspa_old)) {
4243 		aspa_table_unchanged(rde_aspa, aspa_old);
4244 		aspa_table_free(aspa_old);	/* old aspa no longer needed */
4245 		return 0;
4246 	}
4247 
4248 	aspa_table_free(aspa_old);		/* old aspa no longer needed */
4249 	log_debug("ASPA change: reloading Adj-RIB-In");
4250 	rde_aspa_generation++;
4251 	return 1;
4252 }
4253 
4254 /*
4255  * generic helper function
4256  */
4257 uint32_t
4258 rde_local_as(void)
4259 {
4260 	return (conf->as);
4261 }
4262 
4263 int
4264 rde_decisionflags(void)
4265 {
4266 	return (conf->flags & BGPD_FLAG_DECISION_MASK);
4267 }
4268 
4269 /* End-of-RIB marker, RFC 4724 */
4270 static void
4271 rde_peer_recv_eor(struct rde_peer *peer, uint8_t aid)
4272 {
4273 	peer->stats.prefix_rcvd_eor++;
4274 	peer->recv_eor |= 1 << aid;
4275 
4276 	/*
4277 	 * First notify SE to avert a possible race with the restart timeout.
4278 	 * If the timeout fires before this imsg is processed by the SE it will
4279 	 * result in the same operation since the timeout issues a FLUSH which
4280 	 * does the same as the RESTARTED action (flushing stale routes).
4281 	 * The logic in the SE is so that only one of FLUSH or RESTARTED will
4282 	 * be sent back to the RDE and so peer_flush is only called once.
4283 	 */
4284 	if (imsg_compose(ibuf_se, IMSG_SESSION_RESTARTED, peer->conf.id,
4285 	    0, -1, &aid, sizeof(aid)) == -1)
4286 		fatal("imsg_compose error while receiving EoR");
4287 
4288 	log_peer_info(&peer->conf, "received %s EOR marker",
4289 	    aid2str(aid));
4290 }
4291 
4292 static void
4293 rde_peer_send_eor(struct rde_peer *peer, uint8_t aid)
4294 {
4295 	uint16_t	afi;
4296 	uint8_t		safi;
4297 
4298 	peer->stats.prefix_sent_eor++;
4299 	peer->sent_eor |= 1 << aid;
4300 
4301 	if (aid == AID_INET) {
4302 		u_char null[4];
4303 
4304 		memset(&null, 0, 4);
4305 		if (imsg_compose(ibuf_se, IMSG_UPDATE, peer->conf.id,
4306 		    0, -1, &null, 4) == -1)
4307 			fatal("imsg_compose error while sending EoR");
4308 	} else {
4309 		uint16_t	i;
4310 		u_char		buf[10];
4311 
4312 		if (aid2afi(aid, &afi, &safi) == -1)
4313 			fatalx("peer_send_eor: bad AID");
4314 
4315 		i = 0;	/* v4 withdrawn len */
4316 		memcpy(&buf[0], &i, sizeof(i));
4317 		i = htons(6);	/* path attr len */
4318 		memcpy(&buf[2], &i, sizeof(i));
4319 		buf[4] = ATTR_OPTIONAL;
4320 		buf[5] = ATTR_MP_UNREACH_NLRI;
4321 		buf[6] = 3;	/* withdrawn len */
4322 		i = htons(afi);
4323 		memcpy(&buf[7], &i, sizeof(i));
4324 		buf[9] = safi;
4325 
4326 		if (imsg_compose(ibuf_se, IMSG_UPDATE, peer->conf.id,
4327 		    0, -1, &buf, 10) == -1)
4328 			fatal("%s %d imsg_compose error", __func__, __LINE__);
4329 	}
4330 
4331 	log_peer_info(&peer->conf, "sending %s EOR marker",
4332 	    aid2str(aid));
4333 }
4334 
4335 void
4336 rde_peer_send_rrefresh(struct rde_peer *peer, uint8_t aid, uint8_t subtype)
4337 {
4338 	struct route_refresh rr;
4339 
4340 	/* not strickly needed, the SE checks as well */
4341 	if (peer->capa.enhanced_rr == 0)
4342 		return;
4343 
4344 	switch (subtype) {
4345 	case ROUTE_REFRESH_END_RR:
4346 	case ROUTE_REFRESH_BEGIN_RR:
4347 		break;
4348 	default:
4349 		fatalx("%s unexpected subtype %d", __func__, subtype);
4350 	}
4351 
4352 	rr.aid = aid;
4353 	rr.subtype = subtype;
4354 
4355 	if (imsg_compose(ibuf_se, IMSG_REFRESH, peer->conf.id, 0, -1,
4356 	    &rr, sizeof(rr)) == -1)
4357 		fatal("%s %d imsg_compose error", __func__, __LINE__);
4358 
4359 	log_peer_info(&peer->conf, "sending %s %s marker",
4360 	    aid2str(aid), subtype == ROUTE_REFRESH_END_RR ? "EoRR" : "BoRR");
4361 }
4362 
4363 /*
4364  * network announcement stuff
4365  */
4366 void
4367 network_add(struct network_config *nc, struct filterstate *state)
4368 {
4369 	struct l3vpn		*vpn;
4370 	struct filter_set_head	*vpnset = NULL;
4371 	struct in_addr		 prefix4;
4372 	struct in6_addr		 prefix6;
4373 	uint32_t		 path_id_tx;
4374 	uint16_t		 i;
4375 	uint8_t			 vstate;
4376 
4377 	if (nc->rd != 0) {
4378 		SIMPLEQ_FOREACH(vpn, &conf->l3vpns, entry) {
4379 			if (vpn->rd != nc->rd)
4380 				continue;
4381 			switch (nc->prefix.aid) {
4382 			case AID_INET:
4383 				prefix4 = nc->prefix.v4;
4384 				memset(&nc->prefix, 0, sizeof(nc->prefix));
4385 				nc->prefix.aid = AID_VPN_IPv4;
4386 				nc->prefix.rd = vpn->rd;
4387 				nc->prefix.v4 = prefix4;
4388 				nc->prefix.labellen = 3;
4389 				nc->prefix.labelstack[0] =
4390 				    (vpn->label >> 12) & 0xff;
4391 				nc->prefix.labelstack[1] =
4392 				    (vpn->label >> 4) & 0xff;
4393 				nc->prefix.labelstack[2] =
4394 				    (vpn->label << 4) & 0xf0;
4395 				nc->prefix.labelstack[2] |= BGP_MPLS_BOS;
4396 				vpnset = &vpn->export;
4397 				break;
4398 			case AID_INET6:
4399 				prefix6 = nc->prefix.v6;
4400 				memset(&nc->prefix, 0, sizeof(nc->prefix));
4401 				nc->prefix.aid = AID_VPN_IPv6;
4402 				nc->prefix.rd = vpn->rd;
4403 				nc->prefix.v6 = prefix6;
4404 				nc->prefix.labellen = 3;
4405 				nc->prefix.labelstack[0] =
4406 				    (vpn->label >> 12) & 0xff;
4407 				nc->prefix.labelstack[1] =
4408 				    (vpn->label >> 4) & 0xff;
4409 				nc->prefix.labelstack[2] =
4410 				    (vpn->label << 4) & 0xf0;
4411 				nc->prefix.labelstack[2] |= BGP_MPLS_BOS;
4412 				vpnset = &vpn->export;
4413 				break;
4414 			default:
4415 				log_warnx("unable to VPNize prefix");
4416 				filterset_free(&nc->attrset);
4417 				return;
4418 			}
4419 			break;
4420 		}
4421 		if (vpn == NULL) {
4422 			log_warnx("network_add: "
4423 			    "prefix %s/%u in non-existing l3vpn %s",
4424 			    log_addr(&nc->prefix), nc->prefixlen,
4425 			    log_rd(nc->rd));
4426 			return;
4427 		}
4428 	}
4429 
4430 	rde_apply_set(&nc->attrset, peerself, peerself, state, nc->prefix.aid);
4431 	if (vpnset)
4432 		rde_apply_set(vpnset, peerself, peerself, state,
4433 		    nc->prefix.aid);
4434 
4435 	vstate = rde_roa_validity(&rde_roa, &nc->prefix, nc->prefixlen,
4436 	    aspath_origin(state->aspath.aspath));
4437 	rde_filterstate_set_vstate(state, vstate, ASPA_NEVER_KNOWN);
4438 
4439 	path_id_tx = pathid_assign(peerself, 0, &nc->prefix, nc->prefixlen);
4440 	if (prefix_update(rib_byid(RIB_ADJ_IN), peerself, 0, path_id_tx,
4441 	    state, 0, &nc->prefix, nc->prefixlen) == 1)
4442 		peerself->stats.prefix_cnt++;
4443 	for (i = RIB_LOC_START; i < rib_size; i++) {
4444 		struct rib *rib = rib_byid(i);
4445 		if (rib == NULL)
4446 			continue;
4447 		rde_update_log("announce", i, peerself,
4448 		    state->nexthop ? &state->nexthop->exit_nexthop : NULL,
4449 		    &nc->prefix, nc->prefixlen);
4450 		prefix_update(rib, peerself, 0, path_id_tx, state, 0,
4451 		    &nc->prefix, nc->prefixlen);
4452 	}
4453 	filterset_free(&nc->attrset);
4454 }
4455 
4456 void
4457 network_delete(struct network_config *nc)
4458 {
4459 	struct l3vpn	*vpn;
4460 	struct in_addr	 prefix4;
4461 	struct in6_addr	 prefix6;
4462 	uint32_t	 i;
4463 
4464 	if (nc->rd) {
4465 		SIMPLEQ_FOREACH(vpn, &conf->l3vpns, entry) {
4466 			if (vpn->rd != nc->rd)
4467 				continue;
4468 			switch (nc->prefix.aid) {
4469 			case AID_INET:
4470 				prefix4 = nc->prefix.v4;
4471 				memset(&nc->prefix, 0, sizeof(nc->prefix));
4472 				nc->prefix.aid = AID_VPN_IPv4;
4473 				nc->prefix.rd = vpn->rd;
4474 				nc->prefix.v4 = prefix4;
4475 				nc->prefix.labellen = 3;
4476 				nc->prefix.labelstack[0] =
4477 				    (vpn->label >> 12) & 0xff;
4478 				nc->prefix.labelstack[1] =
4479 				    (vpn->label >> 4) & 0xff;
4480 				nc->prefix.labelstack[2] =
4481 				    (vpn->label << 4) & 0xf0;
4482 				nc->prefix.labelstack[2] |= BGP_MPLS_BOS;
4483 				break;
4484 			case AID_INET6:
4485 				prefix6 = nc->prefix.v6;
4486 				memset(&nc->prefix, 0, sizeof(nc->prefix));
4487 				nc->prefix.aid = AID_VPN_IPv6;
4488 				nc->prefix.rd = vpn->rd;
4489 				nc->prefix.v6 = prefix6;
4490 				nc->prefix.labellen = 3;
4491 				nc->prefix.labelstack[0] =
4492 				    (vpn->label >> 12) & 0xff;
4493 				nc->prefix.labelstack[1] =
4494 				    (vpn->label >> 4) & 0xff;
4495 				nc->prefix.labelstack[2] =
4496 				    (vpn->label << 4) & 0xf0;
4497 				nc->prefix.labelstack[2] |= BGP_MPLS_BOS;
4498 				break;
4499 			default:
4500 				log_warnx("unable to VPNize prefix");
4501 				return;
4502 			}
4503 		}
4504 	}
4505 
4506 	for (i = RIB_LOC_START; i < rib_size; i++) {
4507 		struct rib *rib = rib_byid(i);
4508 		if (rib == NULL)
4509 			continue;
4510 		if (prefix_withdraw(rib, peerself, 0, &nc->prefix,
4511 		    nc->prefixlen))
4512 			rde_update_log("withdraw announce", i, peerself,
4513 			    NULL, &nc->prefix, nc->prefixlen);
4514 	}
4515 	if (prefix_withdraw(rib_byid(RIB_ADJ_IN), peerself, 0, &nc->prefix,
4516 	    nc->prefixlen))
4517 		peerself->stats.prefix_cnt--;
4518 }
4519 
4520 static void
4521 network_dump_upcall(struct rib_entry *re, void *ptr)
4522 {
4523 	struct prefix		*p;
4524 	struct rde_aspath	*asp;
4525 	struct kroute_full	 kf;
4526 	struct bgpd_addr	 addr;
4527 	struct rde_dump_ctx	*ctx = ptr;
4528 
4529 	TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib) {
4530 		asp = prefix_aspath(p);
4531 		if (!(asp->flags & F_PREFIX_ANNOUNCED))
4532 			continue;
4533 		pt_getaddr(p->pt, &addr);
4534 
4535 		memset(&kf, 0, sizeof(kf));
4536 		kf.prefix = addr;
4537 		kf.prefixlen = p->pt->prefixlen;
4538 		if (prefix_nhvalid(p) && prefix_nexthop(p) != NULL)
4539 			kf.nexthop = prefix_nexthop(p)->true_nexthop;
4540 		else
4541 			kf.nexthop.aid = kf.prefix.aid;
4542 		if ((asp->flags & F_ANN_DYNAMIC) == 0)
4543 			kf.flags = F_STATIC;
4544 		if (imsg_compose(ibuf_se_ctl, IMSG_CTL_SHOW_NETWORK, 0,
4545 		    ctx->req.pid, -1, &kf, sizeof(kf)) == -1)
4546 			log_warnx("%s: imsg_compose error", __func__);
4547 	}
4548 }
4549 
4550 static void
4551 network_flush_upcall(struct rib_entry *re, void *ptr)
4552 {
4553 	struct bgpd_addr addr;
4554 	struct prefix *p;
4555 	uint32_t i;
4556 	uint8_t prefixlen;
4557 
4558 	p = prefix_bypeer(re, peerself, 0);
4559 	if (p == NULL)
4560 		return;
4561 	if ((prefix_aspath(p)->flags & F_ANN_DYNAMIC) != F_ANN_DYNAMIC)
4562 		return;
4563 
4564 	pt_getaddr(re->prefix, &addr);
4565 	prefixlen = re->prefix->prefixlen;
4566 
4567 	for (i = RIB_LOC_START; i < rib_size; i++) {
4568 		struct rib *rib = rib_byid(i);
4569 		if (rib == NULL)
4570 			continue;
4571 		if (prefix_withdraw(rib, peerself, 0, &addr, prefixlen) == 1)
4572 			rde_update_log("flush announce", i, peerself,
4573 			    NULL, &addr, prefixlen);
4574 	}
4575 
4576 	if (prefix_withdraw(rib_byid(RIB_ADJ_IN), peerself, 0, &addr,
4577 	    prefixlen) == 1)
4578 		peerself->stats.prefix_cnt--;
4579 }
4580 
4581 /*
4582  * flowspec announcement stuff
4583  */
4584 void
4585 flowspec_add(struct flowspec *f, struct filterstate *state,
4586     struct filter_set_head *attrset)
4587 {
4588 	struct pt_entry *pte;
4589 	uint32_t path_id_tx;
4590 
4591 	rde_apply_set(attrset, peerself, peerself, state, f->aid);
4592 	rde_filterstate_set_vstate(state, ROA_NOTFOUND, ASPA_NEVER_KNOWN);
4593 	path_id_tx = peerself->path_id_tx; /* XXX should use pathid_assign() */
4594 
4595 	pte = pt_get_flow(f);
4596 	if (pte == NULL)
4597 		pte = pt_add_flow(f);
4598 
4599 	if (prefix_flowspec_update(peerself, state, pte, path_id_tx) == 1)
4600 		peerself->stats.prefix_cnt++;
4601 }
4602 
4603 void
4604 flowspec_delete(struct flowspec *f)
4605 {
4606 	struct pt_entry *pte;
4607 
4608 	pte = pt_get_flow(f);
4609 	if (pte == NULL)
4610 		return;
4611 
4612 	if (prefix_flowspec_withdraw(peerself, pte) == 1)
4613 		peerself->stats.prefix_cnt--;
4614 }
4615 
4616 static void
4617 flowspec_flush_upcall(struct rib_entry *re, void *ptr)
4618 {
4619 	struct prefix *p;
4620 
4621 	p = prefix_bypeer(re, peerself, 0);
4622 	if (p == NULL)
4623 		return;
4624 	if ((prefix_aspath(p)->flags & F_ANN_DYNAMIC) != F_ANN_DYNAMIC)
4625 		return;
4626 	if (prefix_flowspec_withdraw(peerself, re->prefix) == 1)
4627 		peerself->stats.prefix_cnt--;
4628 }
4629 
4630 static void
4631 flowspec_dump_upcall(struct rib_entry *re, void *ptr)
4632 {
4633 	pid_t *pid = ptr;
4634 	struct prefix		*p;
4635 	struct rde_aspath	*asp;
4636 	struct rde_community	*comm;
4637 	struct flowspec		ff;
4638 	struct ibuf		*ibuf;
4639 	uint8_t			*flow;
4640 	int			len;
4641 
4642 	TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib) {
4643 		asp = prefix_aspath(p);
4644 		if (!(asp->flags & F_PREFIX_ANNOUNCED))
4645 			continue;
4646 		comm = prefix_communities(p);
4647 
4648 		len = pt_getflowspec(p->pt, &flow);
4649 
4650 		memset(&ff, 0, sizeof(ff));
4651 		ff.aid = p->pt->aid;
4652 		ff.len = len;
4653 		if ((asp->flags & F_ANN_DYNAMIC) == 0)
4654 			ff.flags = F_STATIC;
4655 		if ((ibuf = imsg_create(ibuf_se_ctl, IMSG_CTL_SHOW_FLOWSPEC, 0,
4656 		    *pid, FLOWSPEC_SIZE + len)) == NULL)
4657 				continue;
4658 		if (imsg_add(ibuf, &ff, FLOWSPEC_SIZE) == -1 ||
4659 		    imsg_add(ibuf, flow, len) == -1)
4660 			continue;
4661 		imsg_close(ibuf_se_ctl, ibuf);
4662 		if (comm->nentries > 0) {
4663 			if (imsg_compose(ibuf_se_ctl,
4664 			    IMSG_CTL_SHOW_RIB_COMMUNITIES, 0, *pid, -1,
4665 			    comm->communities,
4666 			    comm->nentries * sizeof(struct community)) == -1)
4667 				continue;
4668 		}
4669 	}
4670 }
4671 
4672 static void
4673 flowspec_dump_done(void *ptr, uint8_t aid)
4674 {
4675 	pid_t *pid = ptr;
4676 
4677 	imsg_compose(ibuf_se_ctl, IMSG_CTL_END, 0, *pid, -1, NULL, 0);
4678 }
4679 
4680 
4681 /* clean up */
4682 void
4683 rde_shutdown(void)
4684 {
4685 	/*
4686 	 * the decision process is turned off if rde_quit = 1 and
4687 	 * rde_shutdown depends on this.
4688 	 */
4689 
4690 	/* First all peers go down */
4691 	peer_shutdown();
4692 
4693 	/* free filters */
4694 	filterlist_free(out_rules);
4695 	filterlist_free(out_rules_tmp);
4696 
4697 	/* kill the VPN configs */
4698 	free_l3vpns(&conf->l3vpns);
4699 
4700 	/* now check everything */
4701 	rib_shutdown();
4702 	nexthop_shutdown();
4703 	path_shutdown();
4704 	attr_shutdown();
4705 	pt_shutdown();
4706 }
4707 
4708 struct rde_prefixset *
4709 rde_find_prefixset(char *name, struct rde_prefixset_head *p)
4710 {
4711 	struct rde_prefixset *ps;
4712 
4713 	SIMPLEQ_FOREACH(ps, p, entry) {
4714 		if (!strcmp(ps->name, name))
4715 			return (ps);
4716 	}
4717 	return (NULL);
4718 }
4719 
4720 void
4721 rde_mark_prefixsets_dirty(struct rde_prefixset_head *psold,
4722     struct rde_prefixset_head *psnew)
4723 {
4724 	struct rde_prefixset *new, *old;
4725 
4726 	SIMPLEQ_FOREACH(new, psnew, entry) {
4727 		if ((psold == NULL) ||
4728 		    (old = rde_find_prefixset(new->name, psold)) == NULL) {
4729 			new->dirty = 1;
4730 			new->lastchange = getmonotime();
4731 		} else {
4732 			if (trie_equal(&new->th, &old->th) == 0) {
4733 				new->dirty = 1;
4734 				new->lastchange = getmonotime();
4735 			} else
4736 				new->lastchange = old->lastchange;
4737 		}
4738 	}
4739 }
4740 
4741 uint8_t
4742 rde_roa_validity(struct rde_prefixset *ps, struct bgpd_addr *prefix,
4743     uint8_t plen, uint32_t as)
4744 {
4745 	int r;
4746 
4747 	r = trie_roa_check(&ps->th, prefix, plen, as);
4748 	return (r & ROA_MASK);
4749 }
4750 
4751 static int
4752 ovs_match(struct prefix *p, uint32_t flag)
4753 {
4754 	if (flag & (F_CTL_OVS_VALID|F_CTL_OVS_INVALID|F_CTL_OVS_NOTFOUND)) {
4755 		switch (prefix_roa_vstate(p)) {
4756 		case ROA_VALID:
4757 			if (!(flag & F_CTL_OVS_VALID))
4758 				return 0;
4759 			break;
4760 		case ROA_INVALID:
4761 			if (!(flag & F_CTL_OVS_INVALID))
4762 				return 0;
4763 			break;
4764 		case ROA_NOTFOUND:
4765 			if (!(flag & F_CTL_OVS_NOTFOUND))
4766 				return 0;
4767 			break;
4768 		default:
4769 			break;
4770 		}
4771 	}
4772 
4773 	return 1;
4774 }
4775 
4776 static int
4777 avs_match(struct prefix *p, uint32_t flag)
4778 {
4779 	if (flag & (F_CTL_AVS_VALID|F_CTL_AVS_INVALID|F_CTL_AVS_UNKNOWN)) {
4780 		switch (prefix_aspa_vstate(p) & ASPA_MASK) {
4781 		case ASPA_VALID:
4782 			if (!(flag & F_CTL_AVS_VALID))
4783 				return 0;
4784 			break;
4785 		case ASPA_INVALID:
4786 			if (!(flag & F_CTL_AVS_INVALID))
4787 				return 0;
4788 			break;
4789 		case ASPA_UNKNOWN:
4790 			if (!(flag & F_CTL_AVS_UNKNOWN))
4791 				return 0;
4792 			break;
4793 		default:
4794 			break;
4795 		}
4796 	}
4797 
4798 	return 1;
4799 }
4800