xref: /openbsd/usr.sbin/ospfd/rde.c (revision 9b7c3dbb)
1 /*	$OpenBSD: rde.c,v 1.101 2016/06/06 15:56:22 benno Exp $ */
2 
3 /*
4  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <sys/queue.h>
24 #include <netinet/in.h>
25 #include <arpa/inet.h>
26 #include <err.h>
27 #include <errno.h>
28 #include <stdlib.h>
29 #include <signal.h>
30 #include <string.h>
31 #include <pwd.h>
32 #include <unistd.h>
33 #include <event.h>
34 
35 #include "ospf.h"
36 #include "ospfd.h"
37 #include "ospfe.h"
38 #include "log.h"
39 #include "rde.h"
40 
41 void		 rde_sig_handler(int sig, short, void *);
42 void		 rde_shutdown(void);
43 void		 rde_dispatch_imsg(int, short, void *);
44 void		 rde_dispatch_parent(int, short, void *);
45 void		 rde_dump_area(struct area *, int, pid_t);
46 
47 void		 rde_send_summary(pid_t);
48 void		 rde_send_summary_area(struct area *, pid_t);
49 void		 rde_nbr_init(u_int32_t);
50 void		 rde_nbr_free(void);
51 struct rde_nbr	*rde_nbr_find(u_int32_t);
52 struct rde_nbr	*rde_nbr_new(u_int32_t, struct rde_nbr *);
53 void		 rde_nbr_del(struct rde_nbr *);
54 
55 void		 rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
56 int		 rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
57 void		 rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
58 void		 rde_req_list_free(struct rde_nbr *);
59 
60 struct iface	*rde_asext_lookup(u_int32_t, int);
61 void		 rde_asext_get(struct kroute *);
62 void		 rde_asext_put(struct kroute *);
63 void		 rde_asext_free(void);
64 struct lsa	*orig_asext_lsa(struct kroute *, u_int32_t, u_int16_t);
65 struct lsa	*orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
66 
67 struct ospfd_conf	*rdeconf = NULL, *nconf = NULL;
68 struct imsgev		*iev_ospfe;
69 struct imsgev		*iev_main;
70 struct rde_nbr		*nbrself;
71 struct lsa_tree		 asext_tree;
72 
73 /* ARGSUSED */
74 void
75 rde_sig_handler(int sig, short event, void *arg)
76 {
77 	/*
78 	 * signal handler rules don't apply, libevent decouples for us
79 	 */
80 
81 	switch (sig) {
82 	case SIGINT:
83 	case SIGTERM:
84 		rde_shutdown();
85 		/* NOTREACHED */
86 	default:
87 		fatalx("unexpected signal");
88 	}
89 }
90 
91 /* route decision engine */
92 pid_t
93 rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
94     int pipe_parent2ospfe[2])
95 {
96 	struct event		 ev_sigint, ev_sigterm;
97 	struct timeval		 now;
98 	struct area		*area;
99 	struct iface		*iface;
100 	struct passwd		*pw;
101 	struct redistribute	*r;
102 	pid_t			 pid;
103 
104 	switch (pid = fork()) {
105 	case -1:
106 		fatal("cannot fork");
107 		/* NOTREACHED */
108 	case 0:
109 		break;
110 	default:
111 		return (pid);
112 	}
113 
114 	/* cleanup a bit */
115 	kif_clear();
116 
117 	rdeconf = xconf;
118 
119 	if ((pw = getpwnam(OSPFD_USER)) == NULL)
120 		fatal("getpwnam");
121 
122 	if (chroot(pw->pw_dir) == -1)
123 		fatal("chroot");
124 	if (chdir("/") == -1)
125 		fatal("chdir(\"/\")");
126 
127 	setproctitle("route decision engine");
128 	ospfd_process = PROC_RDE_ENGINE;
129 
130 	if (setgroups(1, &pw->pw_gid) ||
131 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
132 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
133 		fatal("can't drop privileges");
134 
135 	if (pledge("stdio", NULL) == -1)
136 		fatal("pledge");
137 
138 	event_init();
139 	rde_nbr_init(NBR_HASHSIZE);
140 	lsa_init(&asext_tree);
141 
142 	/* setup signal handler */
143 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
144 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
145 	signal_add(&ev_sigint, NULL);
146 	signal_add(&ev_sigterm, NULL);
147 	signal(SIGPIPE, SIG_IGN);
148 	signal(SIGHUP, SIG_IGN);
149 
150 	/* setup pipes */
151 	close(pipe_ospfe2rde[0]);
152 	close(pipe_parent2rde[0]);
153 	close(pipe_parent2ospfe[0]);
154 	close(pipe_parent2ospfe[1]);
155 
156 	if ((iev_ospfe = malloc(sizeof(struct imsgev))) == NULL ||
157 	    (iev_main = malloc(sizeof(struct imsgev))) == NULL)
158 		fatal(NULL);
159 	imsg_init(&iev_ospfe->ibuf, pipe_ospfe2rde[1]);
160 	iev_ospfe->handler = rde_dispatch_imsg;
161 	imsg_init(&iev_main->ibuf, pipe_parent2rde[1]);
162 	iev_main->handler = rde_dispatch_parent;
163 
164 	/* setup event handler */
165 	iev_ospfe->events = EV_READ;
166 	event_set(&iev_ospfe->ev, iev_ospfe->ibuf.fd, iev_ospfe->events,
167 	    iev_ospfe->handler, iev_ospfe);
168 	event_add(&iev_ospfe->ev, NULL);
169 
170 	iev_main->events = EV_READ;
171 	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
172 	    iev_main->handler, iev_main);
173 	event_add(&iev_main->ev, NULL);
174 
175 	evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
176 	cand_list_init();
177 	rt_init();
178 
179 	/* remove unneded stuff from config */
180 	LIST_FOREACH(area, &rdeconf->area_list, entry)
181 		LIST_FOREACH(iface, &area->iface_list, entry)
182 			md_list_clr(&iface->auth_md_list);
183 
184 	while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) {
185 		SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry);
186 		free(r);
187 	}
188 
189 	gettimeofday(&now, NULL);
190 	rdeconf->uptime = now.tv_sec;
191 
192 	event_dispatch();
193 
194 	rde_shutdown();
195 	/* NOTREACHED */
196 
197 	return (0);
198 }
199 
200 void
201 rde_shutdown(void)
202 {
203 	struct area	*a;
204 	struct vertex	*v, *nv;
205 
206 	stop_spf_timer(rdeconf);
207 	cand_list_clr();
208 	rt_clear();
209 
210 	while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
211 		LIST_REMOVE(a, entry);
212 		area_del(a);
213 	}
214 	for (v = RB_MIN(lsa_tree, &asext_tree); v != NULL; v = nv) {
215 		nv = RB_NEXT(lsa_tree, &asext_tree, v);
216 		vertex_free(v);
217 	}
218 	rde_asext_free();
219 	rde_nbr_free();
220 
221 	msgbuf_clear(&iev_ospfe->ibuf.w);
222 	free(iev_ospfe);
223 	msgbuf_clear(&iev_main->ibuf.w);
224 	free(iev_main);
225 	free(rdeconf);
226 
227 	log_info("route decision engine exiting");
228 	_exit(0);
229 }
230 
231 int
232 rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
233     u_int16_t datalen)
234 {
235 	return (imsg_compose_event(iev_ospfe, type, peerid, pid, -1,
236 	    data, datalen));
237 }
238 
239 /* ARGSUSED */
240 void
241 rde_dispatch_imsg(int fd, short event, void *bula)
242 {
243 	struct imsgev		*iev = bula;
244 	struct imsgbuf		*ibuf;
245 	struct imsg		 imsg;
246 	struct in_addr		 aid;
247 	struct ls_req_hdr	 req_hdr;
248 	struct lsa_hdr		 lsa_hdr, *db_hdr;
249 	struct rde_nbr		 rn, *nbr;
250 	struct timespec		 tp;
251 	struct lsa		*lsa;
252 	struct area		*area;
253 	struct vertex		*v;
254 	char			*buf;
255 	ssize_t			 n;
256 	time_t			 now;
257 	int			 r, state, self, error, shut = 0, verbose;
258 	u_int16_t		 l;
259 
260 	ibuf = &iev->ibuf;
261 
262 	if (event & EV_READ) {
263 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
264 			fatal("imsg_read error");
265 		if (n == 0)	/* connection closed */
266 			shut = 1;
267 	}
268 	if (event & EV_WRITE) {
269 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
270 			fatal("msgbuf_write");
271 		if (n == 0)	/* connection closed */
272 			shut = 1;
273 	}
274 
275 	clock_gettime(CLOCK_MONOTONIC, &tp);
276 	now = tp.tv_sec;
277 
278 	for (;;) {
279 		if ((n = imsg_get(ibuf, &imsg)) == -1)
280 			fatal("rde_dispatch_imsg: imsg_get error");
281 		if (n == 0)
282 			break;
283 
284 		switch (imsg.hdr.type) {
285 		case IMSG_NEIGHBOR_UP:
286 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
287 				fatalx("invalid size of OE request");
288 			memcpy(&rn, imsg.data, sizeof(rn));
289 
290 			if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL)
291 				fatalx("rde_dispatch_imsg: "
292 				    "neighbor already exists");
293 			break;
294 		case IMSG_NEIGHBOR_DOWN:
295 			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
296 			break;
297 		case IMSG_NEIGHBOR_CHANGE:
298 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
299 				fatalx("invalid size of OE request");
300 			memcpy(&state, imsg.data, sizeof(state));
301 
302 			nbr = rde_nbr_find(imsg.hdr.peerid);
303 			if (nbr == NULL)
304 				break;
305 
306 			nbr->state = state;
307 			if (nbr->state & NBR_STA_FULL)
308 				rde_req_list_free(nbr);
309 			break;
310 		case IMSG_NEIGHBOR_CAPA:
311 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(u_int8_t))
312 				fatalx("invalid size of OE request");
313 			nbr = rde_nbr_find(imsg.hdr.peerid);
314 			if (nbr == NULL)
315 				break;
316 			nbr->capa_options = *(u_int8_t *)imsg.data;
317 			break;
318 		case IMSG_AREA_CHANGE:
319 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
320 				fatalx("invalid size of OE request");
321 
322 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
323 				if (area->id.s_addr == imsg.hdr.peerid)
324 					break;
325 			}
326 			if (area == NULL)
327 				break;
328 			memcpy(&state, imsg.data, sizeof(state));
329 			area->active = state;
330 			break;
331 		case IMSG_DB_SNAPSHOT:
332 			nbr = rde_nbr_find(imsg.hdr.peerid);
333 			if (nbr == NULL)
334 				break;
335 
336 			lsa_snap(nbr);
337 
338 			imsg_compose_event(iev_ospfe, IMSG_DB_END, imsg.hdr.peerid,
339 			    0, -1, NULL, 0);
340 			break;
341 		case IMSG_DD:
342 			nbr = rde_nbr_find(imsg.hdr.peerid);
343 			if (nbr == NULL)
344 				break;
345 
346 			buf = imsg.data;
347 			error = 0;
348 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
349 			    l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
350 				memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
351 				buf += sizeof(lsa_hdr);
352 
353 				if (lsa_hdr.type == LSA_TYPE_EXTERNAL &&
354 				    nbr->area->stub) {
355 					error = 1;
356 					break;
357 				}
358 				v = lsa_find(nbr->iface, lsa_hdr.type,
359 				    lsa_hdr.ls_id, lsa_hdr.adv_rtr);
360 				if (v == NULL)
361 					db_hdr = NULL;
362 				else
363 					db_hdr = &v->lsa->hdr;
364 
365 				if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
366 					/*
367 					 * only request LSAs that are
368 					 * newer or missing
369 					 */
370 					rde_req_list_add(nbr, &lsa_hdr);
371 					imsg_compose_event(iev_ospfe, IMSG_DD,
372 					    imsg.hdr.peerid, 0, -1, &lsa_hdr,
373 					    sizeof(lsa_hdr));
374 				}
375 			}
376 			if (l != 0 && !error)
377 				log_warnx("rde_dispatch_imsg: peerid %u, "
378 				    "trailing garbage in Database Description "
379 				    "packet", imsg.hdr.peerid);
380 
381 			if (!error)
382 				imsg_compose_event(iev_ospfe, IMSG_DD_END,
383 				    imsg.hdr.peerid, 0, -1, NULL, 0);
384 			else
385 				imsg_compose_event(iev_ospfe, IMSG_DD_BADLSA,
386 				    imsg.hdr.peerid, 0, -1, NULL, 0);
387 			break;
388 		case IMSG_LS_REQ:
389 			nbr = rde_nbr_find(imsg.hdr.peerid);
390 			if (nbr == NULL)
391 				break;
392 
393 			buf = imsg.data;
394 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
395 			    l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
396 				memcpy(&req_hdr, buf, sizeof(req_hdr));
397 				buf += sizeof(req_hdr);
398 
399 				if ((v = lsa_find(nbr->iface,
400 				    ntohl(req_hdr.type), req_hdr.ls_id,
401 				    req_hdr.adv_rtr)) == NULL) {
402 					log_debug("rde_dispatch_imsg: "
403 					    "requested LSA not found");
404 					imsg_compose_event(iev_ospfe,
405 					    IMSG_LS_BADREQ, imsg.hdr.peerid,
406 					    0, -1, NULL, 0);
407 					continue;
408 				}
409 				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
410 				    imsg.hdr.peerid, 0, -1, v->lsa,
411 				    ntohs(v->lsa->hdr.len));
412 			}
413 			if (l != 0)
414 				log_warnx("rde_dispatch_imsg: peerid %u, "
415 				    "trailing garbage in LS Request "
416 				    "packet", imsg.hdr.peerid);
417 			break;
418 		case IMSG_LS_UPD:
419 			nbr = rde_nbr_find(imsg.hdr.peerid);
420 			if (nbr == NULL)
421 				break;
422 
423 			lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
424 			if (lsa == NULL)
425 				fatal(NULL);
426 			memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
427 
428 			if (!lsa_check(nbr, lsa,
429 			    imsg.hdr.len - IMSG_HEADER_SIZE)) {
430 				free(lsa);
431 				break;
432 			}
433 
434 			v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id,
435 			    lsa->hdr.adv_rtr);
436 			if (v == NULL)
437 				db_hdr = NULL;
438 			else
439 				db_hdr = &v->lsa->hdr;
440 
441 			if (nbr->self) {
442 				lsa_merge(nbr, lsa, v);
443 				/* lsa_merge frees the right lsa */
444 				break;
445 			}
446 
447 			r = lsa_newer(&lsa->hdr, db_hdr);
448 			if (r > 0) {
449 				/* new LSA newer than DB */
450 				if (v && v->flooded &&
451 				    v->changed + MIN_LS_ARRIVAL >= now) {
452 					free(lsa);
453 					break;
454 				}
455 
456 				rde_req_list_del(nbr, &lsa->hdr);
457 
458 				if (!(self = lsa_self(nbr, lsa, v)))
459 					if (lsa_add(nbr, lsa))
460 						/* delayed lsa */
461 						break;
462 
463 				/* flood and perhaps ack LSA */
464 				imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD,
465 				    imsg.hdr.peerid, 0, -1, lsa,
466 				    ntohs(lsa->hdr.len));
467 
468 				/* reflood self originated LSA */
469 				if (self && v)
470 					imsg_compose_event(iev_ospfe,
471 					    IMSG_LS_FLOOD, v->peerid, 0, -1,
472 					    v->lsa, ntohs(v->lsa->hdr.len));
473 				/* new LSA was not added so free it */
474 				if (self)
475 					free(lsa);
476 			} else if (r < 0) {
477 				/*
478 				 * point 6 of "The Flooding Procedure"
479 				 * We are violating the RFC here because
480 				 * it does not make sense to reset a session
481 				 * because an equal LSA is already in the table.
482 				 * Only if the LSA sent is older than the one
483 				 * in the table we should reset the session.
484 				 */
485 				if (rde_req_list_exists(nbr, &lsa->hdr)) {
486 					imsg_compose_event(iev_ospfe,
487 					    IMSG_LS_BADREQ, imsg.hdr.peerid,
488 					    0, -1, NULL, 0);
489 					free(lsa);
490 					break;
491 				}
492 
493 				/* lsa no longer needed */
494 				free(lsa);
495 
496 				/* new LSA older than DB */
497 				if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
498 				    ntohs(db_hdr->age) == MAX_AGE)
499 					/* seq-num wrap */
500 					break;
501 
502 				if (v->changed + MIN_LS_ARRIVAL >= now)
503 					break;
504 
505 				/* directly send current LSA, no ack */
506 				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
507 				    imsg.hdr.peerid, 0, -1, v->lsa,
508 				    ntohs(v->lsa->hdr.len));
509 			} else {
510 				/* LSA equal send direct ack */
511 				imsg_compose_event(iev_ospfe, IMSG_LS_ACK,
512 				    imsg.hdr.peerid, 0, -1, &lsa->hdr,
513 				    sizeof(lsa->hdr));
514 				free(lsa);
515 			}
516 			break;
517 		case IMSG_LS_MAXAGE:
518 			nbr = rde_nbr_find(imsg.hdr.peerid);
519 			if (nbr == NULL)
520 				break;
521 
522 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
523 			    sizeof(struct lsa_hdr))
524 				fatalx("invalid size of OE request");
525 			memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
526 
527 			if (rde_nbr_loading(nbr->area))
528 				break;
529 
530 			v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id,
531 			    lsa_hdr.adv_rtr);
532 			if (v == NULL)
533 				db_hdr = NULL;
534 			else
535 				db_hdr = &v->lsa->hdr;
536 
537 			/*
538 			 * only delete LSA if the one in the db is not newer
539 			 */
540 			if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
541 				lsa_del(nbr, &lsa_hdr);
542 			break;
543 		case IMSG_CTL_SHOW_DATABASE:
544 		case IMSG_CTL_SHOW_DB_EXT:
545 		case IMSG_CTL_SHOW_DB_NET:
546 		case IMSG_CTL_SHOW_DB_RTR:
547 		case IMSG_CTL_SHOW_DB_SELF:
548 		case IMSG_CTL_SHOW_DB_SUM:
549 		case IMSG_CTL_SHOW_DB_ASBR:
550 		case IMSG_CTL_SHOW_DB_OPAQ:
551 			if (imsg.hdr.len != IMSG_HEADER_SIZE &&
552 			    imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
553 				log_warnx("rde_dispatch_imsg: wrong imsg len");
554 				break;
555 			}
556 			if (imsg.hdr.len == IMSG_HEADER_SIZE) {
557 				LIST_FOREACH(area, &rdeconf->area_list, entry) {
558 					rde_dump_area(area, imsg.hdr.type,
559 					    imsg.hdr.pid);
560 				}
561 				lsa_dump(&asext_tree, imsg.hdr.type,
562 				    imsg.hdr.pid);
563 			} else {
564 				memcpy(&aid, imsg.data, sizeof(aid));
565 				if ((area = area_find(rdeconf, aid)) != NULL) {
566 					rde_dump_area(area, imsg.hdr.type,
567 					    imsg.hdr.pid);
568 					if (!area->stub)
569 						lsa_dump(&asext_tree,
570 						    imsg.hdr.type,
571 						    imsg.hdr.pid);
572 				}
573 			}
574 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
575 			    imsg.hdr.pid, -1, NULL, 0);
576 			break;
577 		case IMSG_CTL_SHOW_RIB:
578 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
579 				imsg_compose_event(iev_ospfe, IMSG_CTL_AREA,
580 				    0, imsg.hdr.pid, -1, area, sizeof(*area));
581 
582 				rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
583 				rt_dump(area->id, imsg.hdr.pid, RIB_NET);
584 			}
585 			aid.s_addr = 0;
586 			rt_dump(aid, imsg.hdr.pid, RIB_EXT);
587 
588 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
589 			    imsg.hdr.pid, -1, NULL, 0);
590 			break;
591 		case IMSG_CTL_SHOW_SUM:
592 			rde_send_summary(imsg.hdr.pid);
593 			LIST_FOREACH(area, &rdeconf->area_list, entry)
594 				rde_send_summary_area(area, imsg.hdr.pid);
595 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
596 			    imsg.hdr.pid, -1, NULL, 0);
597 			break;
598 		case IMSG_CTL_LOG_VERBOSE:
599 			/* already checked by ospfe */
600 			memcpy(&verbose, imsg.data, sizeof(verbose));
601 			log_verbose(verbose);
602 			break;
603 		default:
604 			log_debug("rde_dispatch_imsg: unexpected imsg %d",
605 			    imsg.hdr.type);
606 			break;
607 		}
608 		imsg_free(&imsg);
609 	}
610 	if (!shut)
611 		imsg_event_add(iev);
612 	else {
613 		/* this pipe is dead, so remove the event handler */
614 		event_del(&iev->ev);
615 		event_loopexit(NULL);
616 	}
617 }
618 
619 /* ARGSUSED */
620 void
621 rde_dispatch_parent(int fd, short event, void *bula)
622 {
623 	static struct area	*narea;
624 	struct iface		*niface;
625 	struct imsg		 imsg;
626 	struct kroute		 rr;
627 	struct imsgev		*iev = bula;
628 	struct imsgbuf		*ibuf;
629 	struct redistribute	*nred;
630 	ssize_t			 n;
631 	int			 shut = 0;
632 
633 	ibuf = &iev->ibuf;
634 
635 	if (event & EV_READ) {
636 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
637 			fatal("imsg_read error");
638 		if (n == 0)	/* connection closed */
639 			shut = 1;
640 	}
641 	if (event & EV_WRITE) {
642 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
643 			fatal("msgbuf_write");
644 		if (n == 0)	/* connection closed */
645 			shut = 1;
646 	}
647 
648 	for (;;) {
649 		if ((n = imsg_get(ibuf, &imsg)) == -1)
650 			fatal("rde_dispatch_parent: imsg_get error");
651 		if (n == 0)
652 			break;
653 
654 		switch (imsg.hdr.type) {
655 		case IMSG_NETWORK_ADD:
656 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
657 				log_warnx("rde_dispatch_parent: "
658 				    "wrong imsg len");
659 				break;
660 			}
661 			memcpy(&rr, imsg.data, sizeof(rr));
662 			rde_asext_get(&rr);
663 			break;
664 		case IMSG_NETWORK_DEL:
665 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
666 				log_warnx("rde_dispatch_parent: "
667 				    "wrong imsg len");
668 				break;
669 			}
670 			memcpy(&rr, imsg.data, sizeof(rr));
671 			rde_asext_put(&rr);
672 			break;
673 		case IMSG_RECONF_CONF:
674 			if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
675 			    NULL)
676 				fatal(NULL);
677 			memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
678 
679 			LIST_INIT(&nconf->area_list);
680 			LIST_INIT(&nconf->cand_list);
681 			break;
682 		case IMSG_RECONF_AREA:
683 			if ((narea = area_new()) == NULL)
684 				fatal(NULL);
685 			memcpy(narea, imsg.data, sizeof(struct area));
686 
687 			LIST_INIT(&narea->iface_list);
688 			LIST_INIT(&narea->nbr_list);
689 			RB_INIT(&narea->lsa_tree);
690 			SIMPLEQ_INIT(&narea->redist_list);
691 
692 			LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
693 			break;
694 		case IMSG_RECONF_REDIST:
695 			if ((nred= malloc(sizeof(struct redistribute))) == NULL)
696 				fatal(NULL);
697 			memcpy(nred, imsg.data, sizeof(struct redistribute));
698 
699 			SIMPLEQ_INSERT_TAIL(&narea->redist_list, nred, entry);
700 			break;
701 		case IMSG_RECONF_IFACE:
702 			if ((niface = malloc(sizeof(struct iface))) == NULL)
703 				fatal(NULL);
704 			memcpy(niface, imsg.data, sizeof(struct iface));
705 
706 			LIST_INIT(&niface->nbr_list);
707 			TAILQ_INIT(&niface->ls_ack_list);
708 			TAILQ_INIT(&niface->auth_md_list);
709 			RB_INIT(&niface->lsa_tree);
710 
711 			niface->area = narea;
712 			LIST_INSERT_HEAD(&narea->iface_list, niface, entry);
713 
714 			break;
715 		case IMSG_RECONF_END:
716 			merge_config(rdeconf, nconf);
717 			nconf = NULL;
718 			break;
719 		default:
720 			log_debug("rde_dispatch_parent: unexpected imsg %d",
721 			    imsg.hdr.type);
722 			break;
723 		}
724 		imsg_free(&imsg);
725 	}
726 	if (!shut)
727 		imsg_event_add(iev);
728 	else {
729 		/* this pipe is dead, so remove the event handler */
730 		event_del(&iev->ev);
731 		event_loopexit(NULL);
732 	}
733 }
734 
735 void
736 rde_dump_area(struct area *area, int imsg_type, pid_t pid)
737 {
738 	struct iface	*iface;
739 
740 	/* dump header */
741 	imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 0, pid, -1,
742 	    area, sizeof(*area));
743 
744 	/* dump link local lsa */
745 	LIST_FOREACH(iface, &area->iface_list, entry) {
746 		imsg_compose_event(iev_ospfe, IMSG_CTL_IFACE,
747 		    0, pid, -1, iface, sizeof(*iface));
748 		lsa_dump(&iface->lsa_tree, imsg_type, pid);
749 	}
750 
751 	/* dump area lsa */
752 	lsa_dump(&area->lsa_tree, imsg_type, pid);
753 }
754 
755 u_int32_t
756 rde_router_id(void)
757 {
758 	return (rdeconf->rtr_id.s_addr);
759 }
760 
761 struct area *
762 rde_backbone_area(void)
763 {
764 	struct in_addr	id;
765 
766 	id.s_addr = INADDR_ANY;
767 
768 	return (area_find(rdeconf, id));
769 }
770 
771 void
772 rde_send_change_kroute(struct rt_node *r)
773 {
774 	int			 krcount = 0;
775 	struct kroute		 kr;
776 	struct rt_nexthop	*rn;
777 	struct ibuf		*wbuf;
778 
779 	if ((wbuf = imsg_create(&iev_main->ibuf, IMSG_KROUTE_CHANGE, 0, 0,
780 	    sizeof(kr))) == NULL) {
781 		return;
782 	}
783 
784 	TAILQ_FOREACH(rn, &r->nexthop, entry) {
785 		if (rn->invalid)
786 			continue;
787 		if (rn->connected)
788 			/* skip self-originated routes */
789 			continue;
790 		krcount++;
791 
792 		bzero(&kr, sizeof(kr));
793 		kr.prefix.s_addr = r->prefix.s_addr;
794 		kr.nexthop.s_addr = rn->nexthop.s_addr;
795 		kr.prefixlen = r->prefixlen;
796 		kr.ext_tag = r->ext_tag;
797 		imsg_add(wbuf, &kr, sizeof(kr));
798 	}
799 	if (krcount == 0) {
800 		/* no valid nexthop or self originated, so remove */
801 		ibuf_free(wbuf);
802 		rde_send_delete_kroute(r);
803 		return;
804 	}
805 	imsg_close(&iev_main->ibuf, wbuf);
806 	imsg_event_add(iev_main);
807 }
808 
809 void
810 rde_send_delete_kroute(struct rt_node *r)
811 {
812 	struct kroute	 kr;
813 
814 	bzero(&kr, sizeof(kr));
815 	kr.prefix.s_addr = r->prefix.s_addr;
816 	kr.prefixlen = r->prefixlen;
817 
818 	imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1,
819 	    &kr, sizeof(kr));
820 }
821 
822 void
823 rde_send_summary(pid_t pid)
824 {
825 	static struct ctl_sum	 sumctl;
826 	struct timeval		 now;
827 	struct area		*area;
828 	struct vertex		*v;
829 
830 	bzero(&sumctl, sizeof(struct ctl_sum));
831 
832 	sumctl.rtr_id.s_addr = rde_router_id();
833 	sumctl.spf_delay = rdeconf->spf_delay;
834 	sumctl.spf_hold_time = rdeconf->spf_hold_time;
835 
836 	LIST_FOREACH(area, &rdeconf->area_list, entry)
837 		sumctl.num_area++;
838 
839 	RB_FOREACH(v, lsa_tree, &asext_tree) {
840 		sumctl.num_ext_lsa++;
841 		sumctl.ext_lsa_cksum += ntohs(v->lsa->hdr.ls_chksum);
842 
843 	}
844 
845 	gettimeofday(&now, NULL);
846 	if (rdeconf->uptime < now.tv_sec)
847 		sumctl.uptime = now.tv_sec - rdeconf->uptime;
848 	else
849 		sumctl.uptime = 0;
850 
851 	sumctl.rfc1583compat = rdeconf->rfc1583compat;
852 
853 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
854 	    sizeof(sumctl));
855 }
856 
857 void
858 rde_send_summary_area(struct area *area, pid_t pid)
859 {
860 	static struct ctl_sum_area	 sumareactl;
861 	struct iface			*iface;
862 	struct rde_nbr			*nbr;
863 	struct lsa_tree			*tree = &area->lsa_tree;
864 	struct vertex			*v;
865 
866 	bzero(&sumareactl, sizeof(struct ctl_sum_area));
867 
868 	sumareactl.area.s_addr = area->id.s_addr;
869 	sumareactl.num_spf_calc = area->num_spf_calc;
870 
871 	LIST_FOREACH(iface, &area->iface_list, entry)
872 		sumareactl.num_iface++;
873 
874 	LIST_FOREACH(nbr, &area->nbr_list, entry)
875 		if (nbr->state == NBR_STA_FULL && !nbr->self)
876 			sumareactl.num_adj_nbr++;
877 
878 	RB_FOREACH(v, lsa_tree, tree) {
879 		sumareactl.num_lsa++;
880 		sumareactl.lsa_cksum += ntohs(v->lsa->hdr.ls_chksum);
881 	}
882 
883 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
884 	    sizeof(sumareactl));
885 }
886 
887 LIST_HEAD(rde_nbr_head, rde_nbr);
888 
889 struct nbr_table {
890 	struct rde_nbr_head	*hashtbl;
891 	u_int32_t		 hashmask;
892 } rdenbrtable;
893 
894 #define RDE_NBR_HASH(x)		\
895 	&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
896 
897 void
898 rde_nbr_init(u_int32_t hashsize)
899 {
900 	struct rde_nbr_head	*head;
901 	u_int32_t		 hs, i;
902 
903 	for (hs = 1; hs < hashsize; hs <<= 1)
904 		;
905 	rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
906 	if (rdenbrtable.hashtbl == NULL)
907 		fatal("rde_nbr_init");
908 
909 	for (i = 0; i < hs; i++)
910 		LIST_INIT(&rdenbrtable.hashtbl[i]);
911 
912 	rdenbrtable.hashmask = hs - 1;
913 
914 	if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
915 		fatal("rde_nbr_init");
916 
917 	nbrself->id.s_addr = rde_router_id();
918 	nbrself->peerid = NBR_IDSELF;
919 	nbrself->state = NBR_STA_DOWN;
920 	nbrself->self = 1;
921 	head = RDE_NBR_HASH(NBR_IDSELF);
922 	LIST_INSERT_HEAD(head, nbrself, hash);
923 }
924 
925 void
926 rde_nbr_free(void)
927 {
928 	free(nbrself);
929 	free(rdenbrtable.hashtbl);
930 }
931 
932 struct rde_nbr *
933 rde_nbr_find(u_int32_t peerid)
934 {
935 	struct rde_nbr_head	*head;
936 	struct rde_nbr		*nbr;
937 
938 	head = RDE_NBR_HASH(peerid);
939 
940 	LIST_FOREACH(nbr, head, hash) {
941 		if (nbr->peerid == peerid)
942 			return (nbr);
943 	}
944 
945 	return (NULL);
946 }
947 
948 struct rde_nbr *
949 rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
950 {
951 	struct rde_nbr_head	*head;
952 	struct rde_nbr		*nbr;
953 	struct area		*area;
954 	struct iface		*iface;
955 
956 	if (rde_nbr_find(peerid))
957 		return (NULL);
958 	if ((area = area_find(rdeconf, new->area_id)) == NULL)
959 		fatalx("rde_nbr_new: unknown area");
960 
961 	LIST_FOREACH(iface, &area->iface_list, entry) {
962 		if (iface->ifindex == new->ifindex)
963 			break;
964 	}
965 	if (iface == NULL)
966 		fatalx("rde_nbr_new: unknown interface");
967 
968 	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
969 		fatal("rde_nbr_new");
970 
971 	memcpy(nbr, new, sizeof(*nbr));
972 	nbr->peerid = peerid;
973 	nbr->area = area;
974 	nbr->iface = iface;
975 
976 	TAILQ_INIT(&nbr->req_list);
977 
978 	head = RDE_NBR_HASH(peerid);
979 	LIST_INSERT_HEAD(head, nbr, hash);
980 	LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
981 
982 	return (nbr);
983 }
984 
985 void
986 rde_nbr_iface_del(struct iface *iface)
987 {
988 	struct rde_nbr_head	*head;
989 	struct rde_nbr		*nbr, *xnbr;
990 	u_int32_t		 i;
991 
992 	for (i = 0; i <= rdenbrtable.hashmask; i++) {
993 		head = &rdenbrtable.hashtbl[i];
994 		LIST_FOREACH_SAFE(nbr, head, hash, xnbr) {
995 			if (nbr->iface == iface)
996 				rde_nbr_del(nbr);
997 		}
998 	}
999 }
1000 
1001 void
1002 rde_nbr_del(struct rde_nbr *nbr)
1003 {
1004 	if (nbr == NULL)
1005 		return;
1006 
1007 	rde_req_list_free(nbr);
1008 
1009 	LIST_REMOVE(nbr, entry);
1010 	LIST_REMOVE(nbr, hash);
1011 
1012 	free(nbr);
1013 }
1014 
1015 int
1016 rde_nbr_loading(struct area *area)
1017 {
1018 	struct rde_nbr		*nbr;
1019 	int			 checkall = 0;
1020 
1021 	if (area == NULL) {
1022 		area = LIST_FIRST(&rdeconf->area_list);
1023 		checkall = 1;
1024 	}
1025 
1026 	while (area != NULL) {
1027 		LIST_FOREACH(nbr, &area->nbr_list, entry) {
1028 			if (nbr->self)
1029 				continue;
1030 			if (nbr->state & NBR_STA_XCHNG ||
1031 			    nbr->state & NBR_STA_LOAD)
1032 				return (1);
1033 		}
1034 		if (!checkall)
1035 			break;
1036 		area = LIST_NEXT(area, entry);
1037 	}
1038 
1039 	return (0);
1040 }
1041 
1042 struct rde_nbr *
1043 rde_nbr_self(struct area *area)
1044 {
1045 	struct rde_nbr		*nbr;
1046 
1047 	LIST_FOREACH(nbr, &area->nbr_list, entry)
1048 		if (nbr->self)
1049 			return (nbr);
1050 
1051 	/* this may not happen */
1052 	fatalx("rde_nbr_self: area without self");
1053 	return (NULL);
1054 }
1055 
1056 /*
1057  * LSA req list
1058  */
1059 void
1060 rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
1061 {
1062 	struct rde_req_entry	*le;
1063 
1064 	if ((le = calloc(1, sizeof(*le))) == NULL)
1065 		fatal("rde_req_list_add");
1066 
1067 	TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
1068 	le->type = lsa->type;
1069 	le->ls_id = lsa->ls_id;
1070 	le->adv_rtr = lsa->adv_rtr;
1071 }
1072 
1073 int
1074 rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1075 {
1076 	struct rde_req_entry	*le;
1077 
1078 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1079 		if ((lsa_hdr->type == le->type) &&
1080 		    (lsa_hdr->ls_id == le->ls_id) &&
1081 		    (lsa_hdr->adv_rtr == le->adv_rtr))
1082 			return (1);
1083 	}
1084 	return (0);
1085 }
1086 
1087 void
1088 rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1089 {
1090 	struct rde_req_entry	*le;
1091 
1092 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1093 		if ((lsa_hdr->type == le->type) &&
1094 		    (lsa_hdr->ls_id == le->ls_id) &&
1095 		    (lsa_hdr->adv_rtr == le->adv_rtr)) {
1096 			TAILQ_REMOVE(&nbr->req_list, le, entry);
1097 			free(le);
1098 			return;
1099 		}
1100 	}
1101 }
1102 
1103 void
1104 rde_req_list_free(struct rde_nbr *nbr)
1105 {
1106 	struct rde_req_entry	*le;
1107 
1108 	while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
1109 		TAILQ_REMOVE(&nbr->req_list, le, entry);
1110 		free(le);
1111 	}
1112 }
1113 
1114 /*
1115  * as-external LSA handling
1116  */
1117 struct asext_node {
1118 	RB_ENTRY(asext_node)    entry;
1119 	struct kroute		r;
1120 	u_int32_t		ls_id;
1121 };
1122 
1123 static __inline int	asext_compare(struct asext_node *, struct asext_node *);
1124 struct asext_node	*asext_find(u_int32_t, u_int8_t);
1125 
1126 RB_HEAD(asext_tree, asext_node)		ast;
1127 RB_PROTOTYPE(asext_tree, asext_node, entry, asext_compare)
1128 RB_GENERATE(asext_tree, asext_node, entry, asext_compare)
1129 
1130 static __inline int
1131 asext_compare(struct asext_node *a, struct asext_node *b)
1132 {
1133 	if (ntohl(a->r.prefix.s_addr) < ntohl(b->r.prefix.s_addr))
1134 		return (-1);
1135 	if (ntohl(a->r.prefix.s_addr) > ntohl(b->r.prefix.s_addr))
1136 		return (1);
1137 	if (a->r.prefixlen < b->r.prefixlen)
1138 		return (-1);
1139 	if (a->r.prefixlen > b->r.prefixlen)
1140 		return (1);
1141 	return (0);
1142 }
1143 
1144 struct asext_node *
1145 asext_find(u_int32_t addr, u_int8_t prefixlen)
1146 {
1147 	struct asext_node	a;
1148 
1149 	a.r.prefix.s_addr = addr;
1150 	a.r.prefixlen = prefixlen;
1151 
1152 	return (RB_FIND(asext_tree, &ast, &a));
1153 }
1154 
1155 struct iface *
1156 rde_asext_lookup(u_int32_t prefix, int plen)
1157 {
1158 	struct area	*area;
1159 	struct iface	*iface;
1160 
1161 	LIST_FOREACH(area, &rdeconf->area_list, entry) {
1162 		LIST_FOREACH(iface, &area->iface_list, entry) {
1163 			if ((iface->addr.s_addr & iface->mask.s_addr) ==
1164 			    (prefix & iface->mask.s_addr) && (plen == -1 ||
1165 			    iface->mask.s_addr == prefixlen2mask(plen)))
1166 				return (iface);
1167 		}
1168 	}
1169 	return (NULL);
1170 }
1171 
1172 void
1173 rde_asext_get(struct kroute *rr)
1174 {
1175 	struct asext_node	*an, *oan;
1176 	struct vertex		*v;
1177 	struct lsa		*lsa;
1178 	u_int32_t		 mask;
1179 
1180 	if (rde_asext_lookup(rr->prefix.s_addr, rr->prefixlen)) {
1181 		/* already announced as (stub) net LSA */
1182 		log_debug("rde_asext_get: %s/%d is net LSA",
1183 		    inet_ntoa(rr->prefix), rr->prefixlen);
1184 		return;
1185 	}
1186 
1187 	an = asext_find(rr->prefix.s_addr, rr->prefixlen);
1188 	if (an == NULL) {
1189 		if ((an = calloc(1, sizeof(*an))) == NULL)
1190 			fatal("rde_asext_get");
1191 		bcopy(rr, &an->r, sizeof(*rr));
1192 		an->ls_id = rr->prefix.s_addr;
1193 		RB_INSERT(asext_tree, &ast, an);
1194 	} else {
1195 		/* the bcopy does not change the lookup key so it is save */
1196 		bcopy(rr, &an->r, sizeof(*rr));
1197 	}
1198 
1199 	/*
1200 	 * ls_id must be unique, for overlapping routes this may
1201 	 * not be true. In this case a unique ls_id needs to be found.
1202 	 * The algorithm will change the ls_id of the less specific
1203 	 * route. E.g. in the case of 10.0.0.0/16 and 10.0.0.0/24
1204 	 * 10.0.0.0/24 will get the 10.0.0.0 ls_id and 10.0.0.0/16
1205 	 * will change the ls_id to 10.0.255.255 and see if that is unique.
1206 	 */
1207 	oan = an;
1208 	mask = prefixlen2mask(oan->r.prefixlen);
1209 	v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id,
1210 	    rdeconf->rtr_id.s_addr);
1211 	while (v && v->lsa->data.asext.mask != mask) {
1212 		/* conflict needs to be resolved. change less specific lsa */
1213 		if (ntohl(v->lsa->data.asext.mask) < ntohl(mask)) {
1214 			/* lsa to insert is more specific, fix other lsa */
1215 			mask = v->lsa->data.asext.mask;
1216 			oan = asext_find(v->lsa->hdr.ls_id & mask,
1217 			   mask2prefixlen(mask));
1218 			if (oan == NULL)
1219 				fatalx("as-ext LSA DB corrupted");
1220 		}
1221 		/* oan is less specific and needs new ls_id */
1222 		if (oan->ls_id == oan->r.prefix.s_addr)
1223 			oan->ls_id |= ~mask;
1224 		else {
1225 			u_int32_t	tmp = ntohl(oan->ls_id);
1226 			oan->ls_id = htonl(tmp - 1);
1227 			if (oan->ls_id == oan->r.prefix.s_addr) {
1228 				log_warnx("prefix %s/%d can not be "
1229 				    "redistributed, no unique ls_id found.",
1230 				    inet_ntoa(rr->prefix), rr->prefixlen);
1231 				RB_REMOVE(asext_tree, &ast, an);
1232 				free(an);
1233 				return;
1234 			}
1235 		}
1236 		mask = prefixlen2mask(oan->r.prefixlen);
1237 		v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id,
1238 		    rdeconf->rtr_id.s_addr);
1239 	}
1240 
1241 	v = lsa_find(NULL, LSA_TYPE_EXTERNAL, an->ls_id,
1242 	    rdeconf->rtr_id.s_addr);
1243 	lsa = orig_asext_lsa(rr, an->ls_id, DEFAULT_AGE);
1244 	lsa_merge(nbrself, lsa, v);
1245 
1246 	if (oan != an) {
1247 		v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id,
1248 		    rdeconf->rtr_id.s_addr);
1249 		lsa = orig_asext_lsa(&oan->r, oan->ls_id, DEFAULT_AGE);
1250 		lsa_merge(nbrself, lsa, v);
1251 	}
1252 }
1253 
1254 void
1255 rde_asext_put(struct kroute *rr)
1256 {
1257 	struct asext_node	*an;
1258 	struct vertex		*v;
1259 	struct lsa		*lsa;
1260 
1261 	/*
1262 	 * just try to remove the LSA. If the prefix is announced as
1263 	 * stub net LSA asext_find() will fail and nothing will happen.
1264 	 */
1265 	an = asext_find(rr->prefix.s_addr, rr->prefixlen);
1266 	if (an == NULL) {
1267 		log_debug("rde_asext_put: NO SUCH LSA %s/%d",
1268 		    inet_ntoa(rr->prefix), rr->prefixlen);
1269 		return;
1270 	}
1271 
1272 	/* inherit metric and ext_tag from the current LSA,
1273 	 * some routers don't like to get withdraws that are
1274 	 * different from what they have in their table.
1275 	 */
1276 	v = lsa_find(NULL, LSA_TYPE_EXTERNAL, an->ls_id,
1277 	    rdeconf->rtr_id.s_addr);
1278 	if (v != NULL) {
1279 		rr->metric = ntohl(v->lsa->data.asext.metric);
1280 		rr->ext_tag = ntohl(v->lsa->data.asext.ext_tag);
1281 	}
1282 
1283 	/* remove by reflooding with MAX_AGE */
1284 	lsa = orig_asext_lsa(rr, an->ls_id, MAX_AGE);
1285 	lsa_merge(nbrself, lsa, v);
1286 
1287 	RB_REMOVE(asext_tree, &ast, an);
1288 	free(an);
1289 }
1290 
1291 void
1292 rde_asext_free(void)
1293 {
1294 	struct asext_node	*an, *nan;
1295 
1296 	for (an = RB_MIN(asext_tree, &ast); an != NULL; an = nan) {
1297 		nan = RB_NEXT(asext_tree, &ast, an);
1298 		RB_REMOVE(asext_tree, &ast, an);
1299 		free(an);
1300 	}
1301 }
1302 
1303 struct lsa *
1304 orig_asext_lsa(struct kroute *rr, u_int32_t ls_id, u_int16_t age)
1305 {
1306 	struct lsa	*lsa;
1307 	struct iface	*iface;
1308 	u_int16_t	 len;
1309 
1310 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext);
1311 	if ((lsa = calloc(1, len)) == NULL)
1312 		fatal("orig_asext_lsa");
1313 
1314 	log_debug("orig_asext_lsa: %s/%d age %d",
1315 	    inet_ntoa(rr->prefix), rr->prefixlen, age);
1316 
1317 	/* LSA header */
1318 	lsa->hdr.age = htons(age);
1319 	lsa->hdr.opts = area_ospf_options(NULL);
1320 	lsa->hdr.type = LSA_TYPE_EXTERNAL;
1321 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1322 	/* update of seqnum is done by lsa_merge */
1323 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1324 	lsa->hdr.len = htons(len);
1325 
1326 	/* prefix and mask */
1327 	lsa->hdr.ls_id = ls_id;
1328 	lsa->data.asext.mask = prefixlen2mask(rr->prefixlen);
1329 
1330 	/*
1331 	 * nexthop -- on connected routes we are the nexthop,
1332 	 * in other cases we may announce the true nexthop if the
1333 	 * nexthop is reachable via an OSPF enabled interface but only
1334 	 * broadcast & NBMA interfaces are considered in that case.
1335 	 * It does not make sense to announce the nexthop of a point-to-point
1336 	 * link since the traffic has to go through this box anyway.
1337 	 * Some implementations actually check that there are multiple
1338 	 * neighbors on the particular segment, we skip that check.
1339 	 */
1340 	iface = rde_asext_lookup(rr->nexthop.s_addr, -1);
1341 	if (rr->flags & F_FORCED_NEXTHOP)
1342 		lsa->data.asext.fw_addr = rr->nexthop.s_addr;
1343 	else if (rr->flags & F_CONNECTED)
1344 		lsa->data.asext.fw_addr = 0;
1345 	else if (iface && (iface->type == IF_TYPE_BROADCAST ||
1346 	    iface->type == IF_TYPE_NBMA))
1347 		lsa->data.asext.fw_addr = rr->nexthop.s_addr;
1348 	else
1349 		lsa->data.asext.fw_addr = 0;
1350 
1351 	lsa->data.asext.metric = htonl(rr->metric);
1352 	lsa->data.asext.ext_tag = htonl(rr->ext_tag);
1353 
1354 	lsa->hdr.ls_chksum = 0;
1355 	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1356 
1357 	return (lsa);
1358 }
1359 
1360 /*
1361  * summary LSA stuff
1362  */
1363 void
1364 rde_summary_update(struct rt_node *rte, struct area *area)
1365 {
1366 	struct rt_nexthop	*rn;
1367 	struct rt_node		*nr;
1368 	struct vertex		*v = NULL;
1369 	struct lsa		*lsa;
1370 	u_int8_t		 type = 0;
1371 
1372 	/* first check if we actually need to announce this route */
1373 	if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1374 		return;
1375 	/* route is invalid, lsa_remove_invalid_sums() will do the cleanup */
1376 	if (rte->cost >= LS_INFINITY)
1377 		return;
1378 	/* never create summaries for as-ext LSA */
1379 	if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1380 		return;
1381 	/* no need for summary LSA in the originating area */
1382 	if (rte->area.s_addr == area->id.s_addr)
1383 		return;
1384 	/* no need to originate inter-area routes to the backbone */
1385 	if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1386 		return;
1387 	/* nexthop check, nexthop part of area -> no summary */
1388 	TAILQ_FOREACH(rn, &rte->nexthop, entry) {
1389 		if (rn->invalid)
1390 			continue;
1391 		nr = rt_lookup(DT_NET, rn->nexthop.s_addr);
1392 		if (nr && nr->area.s_addr == area->id.s_addr)
1393 			continue;
1394 		break;
1395 	}
1396 	if (rn == NULL)
1397 		/* all nexthops belong to this area or are invalid */
1398 		return;
1399 
1400 	/* TODO AS border router specific checks */
1401 	/* TODO inter-area network route stuff */
1402 	/* TODO intra-area stuff -- condense LSA ??? */
1403 
1404 	if (rte->d_type == DT_NET) {
1405 		type = LSA_TYPE_SUM_NETWORK;
1406 	} else if (rte->d_type == DT_RTR) {
1407 		if (area->stub)
1408 			/* do not redistribute type 4 LSA into stub areas */
1409 			return;
1410 		type = LSA_TYPE_SUM_ROUTER;
1411 	} else
1412 		fatalx("rde_summary_update: unknown route type");
1413 
1414 	/* update lsa but only if it was changed */
1415 	v = lsa_find_area(area, type, rte->prefix.s_addr, rde_router_id());
1416 	lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1417 	lsa_merge(rde_nbr_self(area), lsa, v);
1418 
1419 	if (v == NULL)
1420 		v = lsa_find_area(area, type, rte->prefix.s_addr,
1421 		    rde_router_id());
1422 
1423 	/* suppressed/deleted routes are not found in the second lsa_find */
1424 	if (v)
1425 		v->cost = rte->cost;
1426 }
1427 
1428 struct lsa *
1429 orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1430 {
1431 	struct lsa	*lsa;
1432 	u_int16_t	 len;
1433 
1434 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1435 	if ((lsa = calloc(1, len)) == NULL)
1436 		fatal("orig_sum_lsa");
1437 
1438 	/* LSA header */
1439 	lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1440 	lsa->hdr.opts = area_ospf_options(area);
1441 	lsa->hdr.type = type;
1442 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1443 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1444 	lsa->hdr.len = htons(len);
1445 
1446 	/* prefix and mask */
1447 	/*
1448 	 * TODO ls_id must be unique, for overlapping routes this may
1449 	 * not be true. In this case a hack needs to be done to
1450 	 * make the ls_id unique.
1451 	 */
1452 	lsa->hdr.ls_id = rte->prefix.s_addr;
1453 	if (type == LSA_TYPE_SUM_NETWORK)
1454 		lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1455 	else
1456 		lsa->data.sum.mask = 0;	/* must be zero per RFC */
1457 
1458 	lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1459 
1460 	lsa->hdr.ls_chksum = 0;
1461 	lsa->hdr.ls_chksum =
1462 	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1463 
1464 	return (lsa);
1465 }
1466