xref: /openbsd/usr.sbin/ospfd/rde.c (revision fc61954a)
1 /*	$OpenBSD: rde.c,v 1.105 2016/09/28 14:39:52 krw Exp $ */
2 
3 /*
4  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <sys/queue.h>
24 #include <netinet/in.h>
25 #include <arpa/inet.h>
26 #include <err.h>
27 #include <errno.h>
28 #include <stdlib.h>
29 #include <signal.h>
30 #include <string.h>
31 #include <pwd.h>
32 #include <unistd.h>
33 #include <event.h>
34 
35 #include "ospf.h"
36 #include "ospfd.h"
37 #include "ospfe.h"
38 #include "log.h"
39 #include "rde.h"
40 
41 void		 rde_sig_handler(int sig, short, void *);
42 __dead void	 rde_shutdown(void);
43 void		 rde_dispatch_imsg(int, short, void *);
44 void		 rde_dispatch_parent(int, short, void *);
45 void		 rde_dump_area(struct area *, int, pid_t);
46 
47 void		 rde_send_summary(pid_t);
48 void		 rde_send_summary_area(struct area *, pid_t);
49 void		 rde_nbr_init(u_int32_t);
50 void		 rde_nbr_free(void);
51 struct rde_nbr	*rde_nbr_find(u_int32_t);
52 struct rde_nbr	*rde_nbr_new(u_int32_t, struct rde_nbr *);
53 void		 rde_nbr_del(struct rde_nbr *);
54 
55 void		 rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
56 int		 rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
57 void		 rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
58 void		 rde_req_list_free(struct rde_nbr *);
59 
60 struct iface	*rde_asext_lookup(u_int32_t, int);
61 void		 rde_asext_get(struct kroute *);
62 void		 rde_asext_put(struct kroute *);
63 void		 rde_asext_free(void);
64 struct lsa	*orig_asext_lsa(struct kroute *, u_int32_t, u_int16_t);
65 struct lsa	*orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
66 
67 struct ospfd_conf	*rdeconf = NULL, *nconf = NULL;
68 struct imsgev		*iev_ospfe;
69 struct imsgev		*iev_main;
70 struct rde_nbr		*nbrself;
71 struct lsa_tree		 asext_tree;
72 
73 /* ARGSUSED */
74 void
75 rde_sig_handler(int sig, short event, void *arg)
76 {
77 	/*
78 	 * signal handler rules don't apply, libevent decouples for us
79 	 */
80 
81 	switch (sig) {
82 	case SIGINT:
83 	case SIGTERM:
84 		rde_shutdown();
85 		/* NOTREACHED */
86 	default:
87 		fatalx("unexpected signal");
88 	}
89 }
90 
91 /* route decision engine */
92 pid_t
93 rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
94     int pipe_parent2ospfe[2])
95 {
96 	struct event		 ev_sigint, ev_sigterm;
97 	struct timeval		 now;
98 	struct area		*area;
99 	struct iface		*iface;
100 	struct passwd		*pw;
101 	struct redistribute	*r;
102 	pid_t			 pid;
103 
104 	switch (pid = fork()) {
105 	case -1:
106 		fatal("cannot fork");
107 		/* NOTREACHED */
108 	case 0:
109 		break;
110 	default:
111 		return (pid);
112 	}
113 
114 	/* cleanup a bit */
115 	kif_clear();
116 
117 	rdeconf = xconf;
118 
119 	if ((pw = getpwnam(OSPFD_USER)) == NULL)
120 		fatal("getpwnam");
121 
122 	if (chroot(pw->pw_dir) == -1)
123 		fatal("chroot");
124 	if (chdir("/") == -1)
125 		fatal("chdir(\"/\")");
126 
127 	setproctitle("route decision engine");
128 	ospfd_process = PROC_RDE_ENGINE;
129 	log_procname = log_procnames[ospfd_process];
130 
131 	if (setgroups(1, &pw->pw_gid) ||
132 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
133 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
134 		fatal("can't drop privileges");
135 
136 	if (pledge("stdio", NULL) == -1)
137 		fatal("pledge");
138 
139 	event_init();
140 	rde_nbr_init(NBR_HASHSIZE);
141 	lsa_init(&asext_tree);
142 
143 	/* setup signal handler */
144 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
145 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
146 	signal_add(&ev_sigint, NULL);
147 	signal_add(&ev_sigterm, NULL);
148 	signal(SIGPIPE, SIG_IGN);
149 	signal(SIGHUP, SIG_IGN);
150 
151 	/* setup pipes */
152 	close(pipe_ospfe2rde[0]);
153 	close(pipe_parent2rde[0]);
154 	close(pipe_parent2ospfe[0]);
155 	close(pipe_parent2ospfe[1]);
156 
157 	if ((iev_ospfe = malloc(sizeof(struct imsgev))) == NULL ||
158 	    (iev_main = malloc(sizeof(struct imsgev))) == NULL)
159 		fatal(NULL);
160 	imsg_init(&iev_ospfe->ibuf, pipe_ospfe2rde[1]);
161 	iev_ospfe->handler = rde_dispatch_imsg;
162 	imsg_init(&iev_main->ibuf, pipe_parent2rde[1]);
163 	iev_main->handler = rde_dispatch_parent;
164 
165 	/* setup event handler */
166 	iev_ospfe->events = EV_READ;
167 	event_set(&iev_ospfe->ev, iev_ospfe->ibuf.fd, iev_ospfe->events,
168 	    iev_ospfe->handler, iev_ospfe);
169 	event_add(&iev_ospfe->ev, NULL);
170 
171 	iev_main->events = EV_READ;
172 	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
173 	    iev_main->handler, iev_main);
174 	event_add(&iev_main->ev, NULL);
175 
176 	evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
177 	cand_list_init();
178 	rt_init();
179 
180 	/* remove unneeded stuff from config */
181 	LIST_FOREACH(area, &rdeconf->area_list, entry)
182 		LIST_FOREACH(iface, &area->iface_list, entry)
183 			md_list_clr(&iface->auth_md_list);
184 
185 	while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) {
186 		SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry);
187 		free(r);
188 	}
189 
190 	gettimeofday(&now, NULL);
191 	rdeconf->uptime = now.tv_sec;
192 
193 	event_dispatch();
194 
195 	rde_shutdown();
196 	/* NOTREACHED */
197 
198 	return (0);
199 }
200 
201 __dead void
202 rde_shutdown(void)
203 {
204 	struct area	*a;
205 	struct vertex	*v, *nv;
206 
207 	/* close pipes */
208 	msgbuf_clear(&iev_ospfe->ibuf.w);
209 	close(iev_ospfe->ibuf.fd);
210 	msgbuf_clear(&iev_main->ibuf.w);
211 	close(iev_main->ibuf.fd);
212 
213 	stop_spf_timer(rdeconf);
214 	cand_list_clr();
215 	rt_clear();
216 
217 	while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
218 		LIST_REMOVE(a, entry);
219 		area_del(a);
220 	}
221 	for (v = RB_MIN(lsa_tree, &asext_tree); v != NULL; v = nv) {
222 		nv = RB_NEXT(lsa_tree, &asext_tree, v);
223 		vertex_free(v);
224 	}
225 	rde_asext_free();
226 	rde_nbr_free();
227 
228 	free(iev_ospfe);
229 	free(iev_main);
230 	free(rdeconf);
231 
232 	log_info("route decision engine exiting");
233 	_exit(0);
234 }
235 
236 int
237 rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
238     u_int16_t datalen)
239 {
240 	return (imsg_compose_event(iev_ospfe, type, peerid, pid, -1,
241 	    data, datalen));
242 }
243 
244 /* ARGSUSED */
245 void
246 rde_dispatch_imsg(int fd, short event, void *bula)
247 {
248 	struct imsgev		*iev = bula;
249 	struct imsgbuf		*ibuf;
250 	struct imsg		 imsg;
251 	struct in_addr		 aid;
252 	struct ls_req_hdr	 req_hdr;
253 	struct lsa_hdr		 lsa_hdr, *db_hdr;
254 	struct rde_nbr		 rn, *nbr;
255 	struct timespec		 tp;
256 	struct lsa		*lsa;
257 	struct area		*area;
258 	struct vertex		*v;
259 	char			*buf;
260 	ssize_t			 n;
261 	time_t			 now;
262 	int			 r, state, self, error, shut = 0, verbose;
263 	u_int16_t		 l;
264 
265 	ibuf = &iev->ibuf;
266 
267 	if (event & EV_READ) {
268 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
269 			fatal("imsg_read error");
270 		if (n == 0)	/* connection closed */
271 			shut = 1;
272 	}
273 	if (event & EV_WRITE) {
274 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
275 			fatal("msgbuf_write");
276 		if (n == 0)	/* connection closed */
277 			shut = 1;
278 	}
279 
280 	clock_gettime(CLOCK_MONOTONIC, &tp);
281 	now = tp.tv_sec;
282 
283 	for (;;) {
284 		if ((n = imsg_get(ibuf, &imsg)) == -1)
285 			fatal("rde_dispatch_imsg: imsg_get error");
286 		if (n == 0)
287 			break;
288 
289 		switch (imsg.hdr.type) {
290 		case IMSG_NEIGHBOR_UP:
291 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
292 				fatalx("invalid size of OE request");
293 			memcpy(&rn, imsg.data, sizeof(rn));
294 
295 			if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL)
296 				fatalx("rde_dispatch_imsg: "
297 				    "neighbor already exists");
298 			break;
299 		case IMSG_NEIGHBOR_DOWN:
300 			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
301 			break;
302 		case IMSG_NEIGHBOR_CHANGE:
303 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
304 				fatalx("invalid size of OE request");
305 			memcpy(&state, imsg.data, sizeof(state));
306 
307 			nbr = rde_nbr_find(imsg.hdr.peerid);
308 			if (nbr == NULL)
309 				break;
310 
311 			nbr->state = state;
312 			if (nbr->state & NBR_STA_FULL)
313 				rde_req_list_free(nbr);
314 			break;
315 		case IMSG_NEIGHBOR_CAPA:
316 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(u_int8_t))
317 				fatalx("invalid size of OE request");
318 			nbr = rde_nbr_find(imsg.hdr.peerid);
319 			if (nbr == NULL)
320 				break;
321 			nbr->capa_options = *(u_int8_t *)imsg.data;
322 			break;
323 		case IMSG_AREA_CHANGE:
324 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
325 				fatalx("invalid size of OE request");
326 
327 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
328 				if (area->id.s_addr == imsg.hdr.peerid)
329 					break;
330 			}
331 			if (area == NULL)
332 				break;
333 			memcpy(&state, imsg.data, sizeof(state));
334 			area->active = state;
335 			break;
336 		case IMSG_DB_SNAPSHOT:
337 			nbr = rde_nbr_find(imsg.hdr.peerid);
338 			if (nbr == NULL)
339 				break;
340 
341 			lsa_snap(nbr);
342 
343 			imsg_compose_event(iev_ospfe, IMSG_DB_END, imsg.hdr.peerid,
344 			    0, -1, NULL, 0);
345 			break;
346 		case IMSG_DD:
347 			nbr = rde_nbr_find(imsg.hdr.peerid);
348 			if (nbr == NULL)
349 				break;
350 
351 			buf = imsg.data;
352 			error = 0;
353 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
354 			    l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
355 				memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
356 				buf += sizeof(lsa_hdr);
357 
358 				if (lsa_hdr.type == LSA_TYPE_EXTERNAL &&
359 				    nbr->area->stub) {
360 					error = 1;
361 					break;
362 				}
363 				v = lsa_find(nbr->iface, lsa_hdr.type,
364 				    lsa_hdr.ls_id, lsa_hdr.adv_rtr);
365 				if (v == NULL)
366 					db_hdr = NULL;
367 				else
368 					db_hdr = &v->lsa->hdr;
369 
370 				if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
371 					/*
372 					 * only request LSAs that are
373 					 * newer or missing
374 					 */
375 					rde_req_list_add(nbr, &lsa_hdr);
376 					imsg_compose_event(iev_ospfe, IMSG_DD,
377 					    imsg.hdr.peerid, 0, -1, &lsa_hdr,
378 					    sizeof(lsa_hdr));
379 				}
380 			}
381 			if (l != 0 && !error)
382 				log_warnx("rde_dispatch_imsg: peerid %u, "
383 				    "trailing garbage in Database Description "
384 				    "packet", imsg.hdr.peerid);
385 
386 			if (!error)
387 				imsg_compose_event(iev_ospfe, IMSG_DD_END,
388 				    imsg.hdr.peerid, 0, -1, NULL, 0);
389 			else
390 				imsg_compose_event(iev_ospfe, IMSG_DD_BADLSA,
391 				    imsg.hdr.peerid, 0, -1, NULL, 0);
392 			break;
393 		case IMSG_LS_REQ:
394 			nbr = rde_nbr_find(imsg.hdr.peerid);
395 			if (nbr == NULL)
396 				break;
397 
398 			buf = imsg.data;
399 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
400 			    l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
401 				memcpy(&req_hdr, buf, sizeof(req_hdr));
402 				buf += sizeof(req_hdr);
403 
404 				if ((v = lsa_find(nbr->iface,
405 				    ntohl(req_hdr.type), req_hdr.ls_id,
406 				    req_hdr.adv_rtr)) == NULL) {
407 					log_debug("rde_dispatch_imsg: "
408 					    "requested LSA not found");
409 					imsg_compose_event(iev_ospfe,
410 					    IMSG_LS_BADREQ, imsg.hdr.peerid,
411 					    0, -1, NULL, 0);
412 					continue;
413 				}
414 				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
415 				    imsg.hdr.peerid, 0, -1, v->lsa,
416 				    ntohs(v->lsa->hdr.len));
417 			}
418 			if (l != 0)
419 				log_warnx("rde_dispatch_imsg: peerid %u, "
420 				    "trailing garbage in LS Request "
421 				    "packet", imsg.hdr.peerid);
422 			break;
423 		case IMSG_LS_UPD:
424 			nbr = rde_nbr_find(imsg.hdr.peerid);
425 			if (nbr == NULL)
426 				break;
427 
428 			lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
429 			if (lsa == NULL)
430 				fatal(NULL);
431 			memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
432 
433 			if (!lsa_check(nbr, lsa,
434 			    imsg.hdr.len - IMSG_HEADER_SIZE)) {
435 				free(lsa);
436 				break;
437 			}
438 
439 			v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id,
440 			    lsa->hdr.adv_rtr);
441 			if (v == NULL)
442 				db_hdr = NULL;
443 			else
444 				db_hdr = &v->lsa->hdr;
445 
446 			if (nbr->self) {
447 				lsa_merge(nbr, lsa, v);
448 				/* lsa_merge frees the right lsa */
449 				break;
450 			}
451 
452 			r = lsa_newer(&lsa->hdr, db_hdr);
453 			if (r > 0) {
454 				/* new LSA newer than DB */
455 				if (v && v->flooded &&
456 				    v->changed + MIN_LS_ARRIVAL >= now) {
457 					free(lsa);
458 					break;
459 				}
460 
461 				rde_req_list_del(nbr, &lsa->hdr);
462 
463 				if (!(self = lsa_self(nbr, lsa, v)))
464 					if (lsa_add(nbr, lsa))
465 						/* delayed lsa */
466 						break;
467 
468 				/* flood and perhaps ack LSA */
469 				imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD,
470 				    imsg.hdr.peerid, 0, -1, lsa,
471 				    ntohs(lsa->hdr.len));
472 
473 				/* reflood self originated LSA */
474 				if (self && v)
475 					imsg_compose_event(iev_ospfe,
476 					    IMSG_LS_FLOOD, v->peerid, 0, -1,
477 					    v->lsa, ntohs(v->lsa->hdr.len));
478 				/* new LSA was not added so free it */
479 				if (self)
480 					free(lsa);
481 			} else if (r < 0) {
482 				/*
483 				 * point 6 of "The Flooding Procedure"
484 				 * We are violating the RFC here because
485 				 * it does not make sense to reset a session
486 				 * because an equal LSA is already in the table.
487 				 * Only if the LSA sent is older than the one
488 				 * in the table we should reset the session.
489 				 */
490 				if (rde_req_list_exists(nbr, &lsa->hdr)) {
491 					imsg_compose_event(iev_ospfe,
492 					    IMSG_LS_BADREQ, imsg.hdr.peerid,
493 					    0, -1, NULL, 0);
494 					free(lsa);
495 					break;
496 				}
497 
498 				/* lsa no longer needed */
499 				free(lsa);
500 
501 				/* new LSA older than DB */
502 				if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
503 				    ntohs(db_hdr->age) == MAX_AGE)
504 					/* seq-num wrap */
505 					break;
506 
507 				if (v->changed + MIN_LS_ARRIVAL >= now)
508 					break;
509 
510 				/* directly send current LSA, no ack */
511 				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
512 				    imsg.hdr.peerid, 0, -1, v->lsa,
513 				    ntohs(v->lsa->hdr.len));
514 			} else {
515 				/* LSA equal send direct ack */
516 				imsg_compose_event(iev_ospfe, IMSG_LS_ACK,
517 				    imsg.hdr.peerid, 0, -1, &lsa->hdr,
518 				    sizeof(lsa->hdr));
519 				free(lsa);
520 			}
521 			break;
522 		case IMSG_LS_MAXAGE:
523 			nbr = rde_nbr_find(imsg.hdr.peerid);
524 			if (nbr == NULL)
525 				break;
526 
527 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
528 			    sizeof(struct lsa_hdr))
529 				fatalx("invalid size of OE request");
530 			memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
531 
532 			if (rde_nbr_loading(nbr->area))
533 				break;
534 
535 			v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id,
536 			    lsa_hdr.adv_rtr);
537 			if (v == NULL)
538 				db_hdr = NULL;
539 			else
540 				db_hdr = &v->lsa->hdr;
541 
542 			/*
543 			 * only delete LSA if the one in the db is not newer
544 			 */
545 			if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
546 				lsa_del(nbr, &lsa_hdr);
547 			break;
548 		case IMSG_CTL_SHOW_DATABASE:
549 		case IMSG_CTL_SHOW_DB_EXT:
550 		case IMSG_CTL_SHOW_DB_NET:
551 		case IMSG_CTL_SHOW_DB_RTR:
552 		case IMSG_CTL_SHOW_DB_SELF:
553 		case IMSG_CTL_SHOW_DB_SUM:
554 		case IMSG_CTL_SHOW_DB_ASBR:
555 		case IMSG_CTL_SHOW_DB_OPAQ:
556 			if (imsg.hdr.len != IMSG_HEADER_SIZE &&
557 			    imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
558 				log_warnx("rde_dispatch_imsg: wrong imsg len");
559 				break;
560 			}
561 			if (imsg.hdr.len == IMSG_HEADER_SIZE) {
562 				LIST_FOREACH(area, &rdeconf->area_list, entry) {
563 					rde_dump_area(area, imsg.hdr.type,
564 					    imsg.hdr.pid);
565 				}
566 				lsa_dump(&asext_tree, imsg.hdr.type,
567 				    imsg.hdr.pid);
568 			} else {
569 				memcpy(&aid, imsg.data, sizeof(aid));
570 				if ((area = area_find(rdeconf, aid)) != NULL) {
571 					rde_dump_area(area, imsg.hdr.type,
572 					    imsg.hdr.pid);
573 					if (!area->stub)
574 						lsa_dump(&asext_tree,
575 						    imsg.hdr.type,
576 						    imsg.hdr.pid);
577 				}
578 			}
579 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
580 			    imsg.hdr.pid, -1, NULL, 0);
581 			break;
582 		case IMSG_CTL_SHOW_RIB:
583 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
584 				imsg_compose_event(iev_ospfe, IMSG_CTL_AREA,
585 				    0, imsg.hdr.pid, -1, area, sizeof(*area));
586 
587 				rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
588 				rt_dump(area->id, imsg.hdr.pid, RIB_NET);
589 			}
590 			aid.s_addr = 0;
591 			rt_dump(aid, imsg.hdr.pid, RIB_EXT);
592 
593 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
594 			    imsg.hdr.pid, -1, NULL, 0);
595 			break;
596 		case IMSG_CTL_SHOW_SUM:
597 			rde_send_summary(imsg.hdr.pid);
598 			LIST_FOREACH(area, &rdeconf->area_list, entry)
599 				rde_send_summary_area(area, imsg.hdr.pid);
600 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
601 			    imsg.hdr.pid, -1, NULL, 0);
602 			break;
603 		case IMSG_CTL_LOG_VERBOSE:
604 			/* already checked by ospfe */
605 			memcpy(&verbose, imsg.data, sizeof(verbose));
606 			log_verbose(verbose);
607 			break;
608 		default:
609 			log_debug("rde_dispatch_imsg: unexpected imsg %d",
610 			    imsg.hdr.type);
611 			break;
612 		}
613 		imsg_free(&imsg);
614 	}
615 	if (!shut)
616 		imsg_event_add(iev);
617 	else {
618 		/* this pipe is dead, so remove the event handler */
619 		event_del(&iev->ev);
620 		event_loopexit(NULL);
621 	}
622 }
623 
624 /* ARGSUSED */
625 void
626 rde_dispatch_parent(int fd, short event, void *bula)
627 {
628 	static struct area	*narea;
629 	struct iface		*niface;
630 	struct imsg		 imsg;
631 	struct kroute		 rr;
632 	struct imsgev		*iev = bula;
633 	struct imsgbuf		*ibuf;
634 	struct redistribute	*nred;
635 	ssize_t			 n;
636 	int			 shut = 0;
637 
638 	ibuf = &iev->ibuf;
639 
640 	if (event & EV_READ) {
641 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
642 			fatal("imsg_read error");
643 		if (n == 0)	/* connection closed */
644 			shut = 1;
645 	}
646 	if (event & EV_WRITE) {
647 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
648 			fatal("msgbuf_write");
649 		if (n == 0)	/* connection closed */
650 			shut = 1;
651 	}
652 
653 	for (;;) {
654 		if ((n = imsg_get(ibuf, &imsg)) == -1)
655 			fatal("rde_dispatch_parent: imsg_get error");
656 		if (n == 0)
657 			break;
658 
659 		switch (imsg.hdr.type) {
660 		case IMSG_NETWORK_ADD:
661 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
662 				log_warnx("rde_dispatch_parent: "
663 				    "wrong imsg len");
664 				break;
665 			}
666 			memcpy(&rr, imsg.data, sizeof(rr));
667 			rde_asext_get(&rr);
668 			break;
669 		case IMSG_NETWORK_DEL:
670 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
671 				log_warnx("rde_dispatch_parent: "
672 				    "wrong imsg len");
673 				break;
674 			}
675 			memcpy(&rr, imsg.data, sizeof(rr));
676 			rde_asext_put(&rr);
677 			break;
678 		case IMSG_RECONF_CONF:
679 			if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
680 			    NULL)
681 				fatal(NULL);
682 			memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
683 
684 			LIST_INIT(&nconf->area_list);
685 			LIST_INIT(&nconf->cand_list);
686 			break;
687 		case IMSG_RECONF_AREA:
688 			if ((narea = area_new()) == NULL)
689 				fatal(NULL);
690 			memcpy(narea, imsg.data, sizeof(struct area));
691 
692 			LIST_INIT(&narea->iface_list);
693 			LIST_INIT(&narea->nbr_list);
694 			RB_INIT(&narea->lsa_tree);
695 			SIMPLEQ_INIT(&narea->redist_list);
696 
697 			LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
698 			break;
699 		case IMSG_RECONF_REDIST:
700 			if ((nred= malloc(sizeof(struct redistribute))) == NULL)
701 				fatal(NULL);
702 			memcpy(nred, imsg.data, sizeof(struct redistribute));
703 
704 			SIMPLEQ_INSERT_TAIL(&narea->redist_list, nred, entry);
705 			break;
706 		case IMSG_RECONF_IFACE:
707 			if ((niface = malloc(sizeof(struct iface))) == NULL)
708 				fatal(NULL);
709 			memcpy(niface, imsg.data, sizeof(struct iface));
710 
711 			LIST_INIT(&niface->nbr_list);
712 			TAILQ_INIT(&niface->ls_ack_list);
713 			TAILQ_INIT(&niface->auth_md_list);
714 			RB_INIT(&niface->lsa_tree);
715 
716 			niface->area = narea;
717 			LIST_INSERT_HEAD(&narea->iface_list, niface, entry);
718 
719 			break;
720 		case IMSG_RECONF_END:
721 			merge_config(rdeconf, nconf);
722 			nconf = NULL;
723 			break;
724 		default:
725 			log_debug("rde_dispatch_parent: unexpected imsg %d",
726 			    imsg.hdr.type);
727 			break;
728 		}
729 		imsg_free(&imsg);
730 	}
731 	if (!shut)
732 		imsg_event_add(iev);
733 	else {
734 		/* this pipe is dead, so remove the event handler */
735 		event_del(&iev->ev);
736 		event_loopexit(NULL);
737 	}
738 }
739 
740 void
741 rde_dump_area(struct area *area, int imsg_type, pid_t pid)
742 {
743 	struct iface	*iface;
744 
745 	/* dump header */
746 	imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 0, pid, -1,
747 	    area, sizeof(*area));
748 
749 	/* dump link local lsa */
750 	LIST_FOREACH(iface, &area->iface_list, entry) {
751 		imsg_compose_event(iev_ospfe, IMSG_CTL_IFACE,
752 		    0, pid, -1, iface, sizeof(*iface));
753 		lsa_dump(&iface->lsa_tree, imsg_type, pid);
754 	}
755 
756 	/* dump area lsa */
757 	lsa_dump(&area->lsa_tree, imsg_type, pid);
758 }
759 
760 u_int32_t
761 rde_router_id(void)
762 {
763 	return (rdeconf->rtr_id.s_addr);
764 }
765 
766 struct area *
767 rde_backbone_area(void)
768 {
769 	struct in_addr	id;
770 
771 	id.s_addr = INADDR_ANY;
772 
773 	return (area_find(rdeconf, id));
774 }
775 
776 void
777 rde_send_change_kroute(struct rt_node *r)
778 {
779 	int			 krcount = 0;
780 	struct kroute		 kr;
781 	struct rt_nexthop	*rn;
782 	struct ibuf		*wbuf;
783 
784 	if ((wbuf = imsg_create(&iev_main->ibuf, IMSG_KROUTE_CHANGE, 0, 0,
785 	    sizeof(kr))) == NULL) {
786 		return;
787 	}
788 
789 	TAILQ_FOREACH(rn, &r->nexthop, entry) {
790 		if (rn->invalid)
791 			continue;
792 		if (rn->connected)
793 			/* skip self-originated routes */
794 			continue;
795 		krcount++;
796 
797 		bzero(&kr, sizeof(kr));
798 		kr.prefix.s_addr = r->prefix.s_addr;
799 		kr.nexthop.s_addr = rn->nexthop.s_addr;
800 		kr.prefixlen = r->prefixlen;
801 		kr.ext_tag = r->ext_tag;
802 		imsg_add(wbuf, &kr, sizeof(kr));
803 	}
804 	if (krcount == 0) {
805 		/* no valid nexthop or self originated, so remove */
806 		ibuf_free(wbuf);
807 		rde_send_delete_kroute(r);
808 		return;
809 	}
810 	imsg_close(&iev_main->ibuf, wbuf);
811 	imsg_event_add(iev_main);
812 }
813 
814 void
815 rde_send_delete_kroute(struct rt_node *r)
816 {
817 	struct kroute	 kr;
818 
819 	bzero(&kr, sizeof(kr));
820 	kr.prefix.s_addr = r->prefix.s_addr;
821 	kr.prefixlen = r->prefixlen;
822 
823 	imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1,
824 	    &kr, sizeof(kr));
825 }
826 
827 void
828 rde_send_summary(pid_t pid)
829 {
830 	static struct ctl_sum	 sumctl;
831 	struct timeval		 now;
832 	struct area		*area;
833 	struct vertex		*v;
834 
835 	bzero(&sumctl, sizeof(struct ctl_sum));
836 
837 	sumctl.rtr_id.s_addr = rde_router_id();
838 	sumctl.spf_delay = rdeconf->spf_delay;
839 	sumctl.spf_hold_time = rdeconf->spf_hold_time;
840 
841 	LIST_FOREACH(area, &rdeconf->area_list, entry)
842 		sumctl.num_area++;
843 
844 	RB_FOREACH(v, lsa_tree, &asext_tree) {
845 		sumctl.num_ext_lsa++;
846 		sumctl.ext_lsa_cksum += ntohs(v->lsa->hdr.ls_chksum);
847 	}
848 
849 	gettimeofday(&now, NULL);
850 	if (rdeconf->uptime < now.tv_sec)
851 		sumctl.uptime = now.tv_sec - rdeconf->uptime;
852 	else
853 		sumctl.uptime = 0;
854 
855 	sumctl.rfc1583compat = rdeconf->rfc1583compat;
856 
857 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
858 	    sizeof(sumctl));
859 }
860 
861 void
862 rde_send_summary_area(struct area *area, pid_t pid)
863 {
864 	static struct ctl_sum_area	 sumareactl;
865 	struct iface			*iface;
866 	struct rde_nbr			*nbr;
867 	struct lsa_tree			*tree = &area->lsa_tree;
868 	struct vertex			*v;
869 
870 	bzero(&sumareactl, sizeof(struct ctl_sum_area));
871 
872 	sumareactl.area.s_addr = area->id.s_addr;
873 	sumareactl.num_spf_calc = area->num_spf_calc;
874 
875 	LIST_FOREACH(iface, &area->iface_list, entry)
876 		sumareactl.num_iface++;
877 
878 	LIST_FOREACH(nbr, &area->nbr_list, entry)
879 		if (nbr->state == NBR_STA_FULL && !nbr->self)
880 			sumareactl.num_adj_nbr++;
881 
882 	RB_FOREACH(v, lsa_tree, tree) {
883 		sumareactl.num_lsa++;
884 		sumareactl.lsa_cksum += ntohs(v->lsa->hdr.ls_chksum);
885 	}
886 
887 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
888 	    sizeof(sumareactl));
889 }
890 
891 LIST_HEAD(rde_nbr_head, rde_nbr);
892 
893 struct nbr_table {
894 	struct rde_nbr_head	*hashtbl;
895 	u_int32_t		 hashmask;
896 } rdenbrtable;
897 
898 #define RDE_NBR_HASH(x)		\
899 	&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
900 
901 void
902 rde_nbr_init(u_int32_t hashsize)
903 {
904 	struct rde_nbr_head	*head;
905 	u_int32_t		 hs, i;
906 
907 	for (hs = 1; hs < hashsize; hs <<= 1)
908 		;
909 	rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
910 	if (rdenbrtable.hashtbl == NULL)
911 		fatal("rde_nbr_init");
912 
913 	for (i = 0; i < hs; i++)
914 		LIST_INIT(&rdenbrtable.hashtbl[i]);
915 
916 	rdenbrtable.hashmask = hs - 1;
917 
918 	if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
919 		fatal("rde_nbr_init");
920 
921 	nbrself->id.s_addr = rde_router_id();
922 	nbrself->peerid = NBR_IDSELF;
923 	nbrself->state = NBR_STA_DOWN;
924 	nbrself->self = 1;
925 	head = RDE_NBR_HASH(NBR_IDSELF);
926 	LIST_INSERT_HEAD(head, nbrself, hash);
927 }
928 
929 void
930 rde_nbr_free(void)
931 {
932 	free(nbrself);
933 	free(rdenbrtable.hashtbl);
934 }
935 
936 struct rde_nbr *
937 rde_nbr_find(u_int32_t peerid)
938 {
939 	struct rde_nbr_head	*head;
940 	struct rde_nbr		*nbr;
941 
942 	head = RDE_NBR_HASH(peerid);
943 
944 	LIST_FOREACH(nbr, head, hash) {
945 		if (nbr->peerid == peerid)
946 			return (nbr);
947 	}
948 
949 	return (NULL);
950 }
951 
952 struct rde_nbr *
953 rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
954 {
955 	struct rde_nbr_head	*head;
956 	struct rde_nbr		*nbr;
957 	struct area		*area;
958 	struct iface		*iface;
959 
960 	if (rde_nbr_find(peerid))
961 		return (NULL);
962 	if ((area = area_find(rdeconf, new->area_id)) == NULL)
963 		fatalx("rde_nbr_new: unknown area");
964 
965 	LIST_FOREACH(iface, &area->iface_list, entry) {
966 		if (iface->ifindex == new->ifindex)
967 			break;
968 	}
969 	if (iface == NULL)
970 		fatalx("rde_nbr_new: unknown interface");
971 
972 	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
973 		fatal("rde_nbr_new");
974 
975 	memcpy(nbr, new, sizeof(*nbr));
976 	nbr->peerid = peerid;
977 	nbr->area = area;
978 	nbr->iface = iface;
979 
980 	TAILQ_INIT(&nbr->req_list);
981 
982 	head = RDE_NBR_HASH(peerid);
983 	LIST_INSERT_HEAD(head, nbr, hash);
984 	LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
985 
986 	return (nbr);
987 }
988 
989 void
990 rde_nbr_iface_del(struct iface *iface)
991 {
992 	struct rde_nbr_head	*head;
993 	struct rde_nbr		*nbr, *xnbr;
994 	u_int32_t		 i;
995 
996 	for (i = 0; i <= rdenbrtable.hashmask; i++) {
997 		head = &rdenbrtable.hashtbl[i];
998 		LIST_FOREACH_SAFE(nbr, head, hash, xnbr) {
999 			if (nbr->iface == iface)
1000 				rde_nbr_del(nbr);
1001 		}
1002 	}
1003 }
1004 
1005 void
1006 rde_nbr_del(struct rde_nbr *nbr)
1007 {
1008 	if (nbr == NULL)
1009 		return;
1010 
1011 	rde_req_list_free(nbr);
1012 
1013 	LIST_REMOVE(nbr, entry);
1014 	LIST_REMOVE(nbr, hash);
1015 
1016 	free(nbr);
1017 }
1018 
1019 int
1020 rde_nbr_loading(struct area *area)
1021 {
1022 	struct rde_nbr		*nbr;
1023 	int			 checkall = 0;
1024 
1025 	if (area == NULL) {
1026 		area = LIST_FIRST(&rdeconf->area_list);
1027 		checkall = 1;
1028 	}
1029 
1030 	while (area != NULL) {
1031 		LIST_FOREACH(nbr, &area->nbr_list, entry) {
1032 			if (nbr->self)
1033 				continue;
1034 			if (nbr->state & NBR_STA_XCHNG ||
1035 			    nbr->state & NBR_STA_LOAD)
1036 				return (1);
1037 		}
1038 		if (!checkall)
1039 			break;
1040 		area = LIST_NEXT(area, entry);
1041 	}
1042 
1043 	return (0);
1044 }
1045 
1046 struct rde_nbr *
1047 rde_nbr_self(struct area *area)
1048 {
1049 	struct rde_nbr		*nbr;
1050 
1051 	LIST_FOREACH(nbr, &area->nbr_list, entry)
1052 		if (nbr->self)
1053 			return (nbr);
1054 
1055 	/* this may not happen */
1056 	fatalx("rde_nbr_self: area without self");
1057 	return (NULL);
1058 }
1059 
1060 /*
1061  * LSA req list
1062  */
1063 void
1064 rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
1065 {
1066 	struct rde_req_entry	*le;
1067 
1068 	if ((le = calloc(1, sizeof(*le))) == NULL)
1069 		fatal("rde_req_list_add");
1070 
1071 	TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
1072 	le->type = lsa->type;
1073 	le->ls_id = lsa->ls_id;
1074 	le->adv_rtr = lsa->adv_rtr;
1075 }
1076 
1077 int
1078 rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1079 {
1080 	struct rde_req_entry	*le;
1081 
1082 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1083 		if ((lsa_hdr->type == le->type) &&
1084 		    (lsa_hdr->ls_id == le->ls_id) &&
1085 		    (lsa_hdr->adv_rtr == le->adv_rtr))
1086 			return (1);
1087 	}
1088 	return (0);
1089 }
1090 
1091 void
1092 rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1093 {
1094 	struct rde_req_entry	*le;
1095 
1096 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1097 		if ((lsa_hdr->type == le->type) &&
1098 		    (lsa_hdr->ls_id == le->ls_id) &&
1099 		    (lsa_hdr->adv_rtr == le->adv_rtr)) {
1100 			TAILQ_REMOVE(&nbr->req_list, le, entry);
1101 			free(le);
1102 			return;
1103 		}
1104 	}
1105 }
1106 
1107 void
1108 rde_req_list_free(struct rde_nbr *nbr)
1109 {
1110 	struct rde_req_entry	*le;
1111 
1112 	while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
1113 		TAILQ_REMOVE(&nbr->req_list, le, entry);
1114 		free(le);
1115 	}
1116 }
1117 
1118 /*
1119  * as-external LSA handling
1120  */
1121 struct asext_node {
1122 	RB_ENTRY(asext_node)    entry;
1123 	struct kroute		r;
1124 	u_int32_t		ls_id;
1125 };
1126 
1127 static __inline int	asext_compare(struct asext_node *, struct asext_node *);
1128 struct asext_node	*asext_find(u_int32_t, u_int8_t);
1129 
1130 RB_HEAD(asext_tree, asext_node)		ast;
1131 RB_PROTOTYPE(asext_tree, asext_node, entry, asext_compare)
1132 RB_GENERATE(asext_tree, asext_node, entry, asext_compare)
1133 
1134 static __inline int
1135 asext_compare(struct asext_node *a, struct asext_node *b)
1136 {
1137 	if (ntohl(a->r.prefix.s_addr) < ntohl(b->r.prefix.s_addr))
1138 		return (-1);
1139 	if (ntohl(a->r.prefix.s_addr) > ntohl(b->r.prefix.s_addr))
1140 		return (1);
1141 	if (a->r.prefixlen < b->r.prefixlen)
1142 		return (-1);
1143 	if (a->r.prefixlen > b->r.prefixlen)
1144 		return (1);
1145 	return (0);
1146 }
1147 
1148 struct asext_node *
1149 asext_find(u_int32_t addr, u_int8_t prefixlen)
1150 {
1151 	struct asext_node	a;
1152 
1153 	a.r.prefix.s_addr = addr;
1154 	a.r.prefixlen = prefixlen;
1155 
1156 	return (RB_FIND(asext_tree, &ast, &a));
1157 }
1158 
1159 struct iface *
1160 rde_asext_lookup(u_int32_t prefix, int plen)
1161 {
1162 	struct area	*area;
1163 	struct iface	*iface;
1164 
1165 	LIST_FOREACH(area, &rdeconf->area_list, entry) {
1166 		LIST_FOREACH(iface, &area->iface_list, entry) {
1167 			if ((iface->addr.s_addr & iface->mask.s_addr) ==
1168 			    (prefix & iface->mask.s_addr) && (plen == -1 ||
1169 			    iface->mask.s_addr == prefixlen2mask(plen)))
1170 				return (iface);
1171 		}
1172 	}
1173 	return (NULL);
1174 }
1175 
1176 void
1177 rde_asext_get(struct kroute *rr)
1178 {
1179 	struct asext_node	*an, *oan;
1180 	struct vertex		*v;
1181 	struct lsa		*lsa;
1182 	u_int32_t		 mask;
1183 
1184 	if (rde_asext_lookup(rr->prefix.s_addr, rr->prefixlen)) {
1185 		/* already announced as (stub) net LSA */
1186 		log_debug("rde_asext_get: %s/%d is net LSA",
1187 		    inet_ntoa(rr->prefix), rr->prefixlen);
1188 		return;
1189 	}
1190 
1191 	an = asext_find(rr->prefix.s_addr, rr->prefixlen);
1192 	if (an == NULL) {
1193 		if ((an = calloc(1, sizeof(*an))) == NULL)
1194 			fatal("rde_asext_get");
1195 		bcopy(rr, &an->r, sizeof(*rr));
1196 		an->ls_id = rr->prefix.s_addr;
1197 		RB_INSERT(asext_tree, &ast, an);
1198 	} else {
1199 		/* the bcopy does not change the lookup key so it is save */
1200 		bcopy(rr, &an->r, sizeof(*rr));
1201 	}
1202 
1203 	/*
1204 	 * ls_id must be unique, for overlapping routes this may
1205 	 * not be true. In this case a unique ls_id needs to be found.
1206 	 * The algorithm will change the ls_id of the less specific
1207 	 * route. E.g. in the case of 10.0.0.0/16 and 10.0.0.0/24
1208 	 * 10.0.0.0/24 will get the 10.0.0.0 ls_id and 10.0.0.0/16
1209 	 * will change the ls_id to 10.0.255.255 and see if that is unique.
1210 	 */
1211 	oan = an;
1212 	mask = prefixlen2mask(oan->r.prefixlen);
1213 	v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id,
1214 	    rdeconf->rtr_id.s_addr);
1215 	while (v && v->lsa->data.asext.mask != mask) {
1216 		/* conflict needs to be resolved. change less specific lsa */
1217 		if (ntohl(v->lsa->data.asext.mask) < ntohl(mask)) {
1218 			/* lsa to insert is more specific, fix other lsa */
1219 			mask = v->lsa->data.asext.mask;
1220 			oan = asext_find(v->lsa->hdr.ls_id & mask,
1221 			   mask2prefixlen(mask));
1222 			if (oan == NULL)
1223 				fatalx("as-ext LSA DB corrupted");
1224 		}
1225 		/* oan is less specific and needs new ls_id */
1226 		if (oan->ls_id == oan->r.prefix.s_addr)
1227 			oan->ls_id |= ~mask;
1228 		else {
1229 			u_int32_t	tmp = ntohl(oan->ls_id);
1230 			oan->ls_id = htonl(tmp - 1);
1231 			if (oan->ls_id == oan->r.prefix.s_addr) {
1232 				log_warnx("prefix %s/%d can not be "
1233 				    "redistributed, no unique ls_id found.",
1234 				    inet_ntoa(rr->prefix), rr->prefixlen);
1235 				RB_REMOVE(asext_tree, &ast, an);
1236 				free(an);
1237 				return;
1238 			}
1239 		}
1240 		mask = prefixlen2mask(oan->r.prefixlen);
1241 		v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id,
1242 		    rdeconf->rtr_id.s_addr);
1243 	}
1244 
1245 	v = lsa_find(NULL, LSA_TYPE_EXTERNAL, an->ls_id,
1246 	    rdeconf->rtr_id.s_addr);
1247 	lsa = orig_asext_lsa(rr, an->ls_id, DEFAULT_AGE);
1248 	lsa_merge(nbrself, lsa, v);
1249 
1250 	if (oan != an) {
1251 		v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id,
1252 		    rdeconf->rtr_id.s_addr);
1253 		lsa = orig_asext_lsa(&oan->r, oan->ls_id, DEFAULT_AGE);
1254 		lsa_merge(nbrself, lsa, v);
1255 	}
1256 }
1257 
1258 void
1259 rde_asext_put(struct kroute *rr)
1260 {
1261 	struct asext_node	*an;
1262 	struct vertex		*v;
1263 	struct lsa		*lsa;
1264 
1265 	/*
1266 	 * just try to remove the LSA. If the prefix is announced as
1267 	 * stub net LSA asext_find() will fail and nothing will happen.
1268 	 */
1269 	an = asext_find(rr->prefix.s_addr, rr->prefixlen);
1270 	if (an == NULL) {
1271 		log_debug("rde_asext_put: NO SUCH LSA %s/%d",
1272 		    inet_ntoa(rr->prefix), rr->prefixlen);
1273 		return;
1274 	}
1275 
1276 	/* inherit metric and ext_tag from the current LSA,
1277 	 * some routers don't like to get withdraws that are
1278 	 * different from what they have in their table.
1279 	 */
1280 	v = lsa_find(NULL, LSA_TYPE_EXTERNAL, an->ls_id,
1281 	    rdeconf->rtr_id.s_addr);
1282 	if (v != NULL) {
1283 		rr->metric = ntohl(v->lsa->data.asext.metric);
1284 		rr->ext_tag = ntohl(v->lsa->data.asext.ext_tag);
1285 	}
1286 
1287 	/* remove by reflooding with MAX_AGE */
1288 	lsa = orig_asext_lsa(rr, an->ls_id, MAX_AGE);
1289 	lsa_merge(nbrself, lsa, v);
1290 
1291 	RB_REMOVE(asext_tree, &ast, an);
1292 	free(an);
1293 }
1294 
1295 void
1296 rde_asext_free(void)
1297 {
1298 	struct asext_node	*an, *nan;
1299 
1300 	for (an = RB_MIN(asext_tree, &ast); an != NULL; an = nan) {
1301 		nan = RB_NEXT(asext_tree, &ast, an);
1302 		RB_REMOVE(asext_tree, &ast, an);
1303 		free(an);
1304 	}
1305 }
1306 
1307 struct lsa *
1308 orig_asext_lsa(struct kroute *rr, u_int32_t ls_id, u_int16_t age)
1309 {
1310 	struct lsa	*lsa;
1311 	struct iface	*iface;
1312 	u_int16_t	 len;
1313 
1314 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext);
1315 	if ((lsa = calloc(1, len)) == NULL)
1316 		fatal("orig_asext_lsa");
1317 
1318 	log_debug("orig_asext_lsa: %s/%d age %d",
1319 	    inet_ntoa(rr->prefix), rr->prefixlen, age);
1320 
1321 	/* LSA header */
1322 	lsa->hdr.age = htons(age);
1323 	lsa->hdr.opts = area_ospf_options(NULL);
1324 	lsa->hdr.type = LSA_TYPE_EXTERNAL;
1325 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1326 	/* update of seqnum is done by lsa_merge */
1327 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1328 	lsa->hdr.len = htons(len);
1329 
1330 	/* prefix and mask */
1331 	lsa->hdr.ls_id = ls_id;
1332 	lsa->data.asext.mask = prefixlen2mask(rr->prefixlen);
1333 
1334 	/*
1335 	 * nexthop -- on connected routes we are the nexthop,
1336 	 * in other cases we may announce the true nexthop if the
1337 	 * nexthop is reachable via an OSPF enabled interface but only
1338 	 * broadcast & NBMA interfaces are considered in that case.
1339 	 * It does not make sense to announce the nexthop of a point-to-point
1340 	 * link since the traffic has to go through this box anyway.
1341 	 * Some implementations actually check that there are multiple
1342 	 * neighbors on the particular segment, we skip that check.
1343 	 */
1344 	iface = rde_asext_lookup(rr->nexthop.s_addr, -1);
1345 	if (rr->flags & F_FORCED_NEXTHOP)
1346 		lsa->data.asext.fw_addr = rr->nexthop.s_addr;
1347 	else if (rr->flags & F_CONNECTED)
1348 		lsa->data.asext.fw_addr = 0;
1349 	else if (iface && (iface->type == IF_TYPE_BROADCAST ||
1350 	    iface->type == IF_TYPE_NBMA))
1351 		lsa->data.asext.fw_addr = rr->nexthop.s_addr;
1352 	else
1353 		lsa->data.asext.fw_addr = 0;
1354 
1355 	lsa->data.asext.metric = htonl(rr->metric);
1356 	lsa->data.asext.ext_tag = htonl(rr->ext_tag);
1357 
1358 	lsa->hdr.ls_chksum = 0;
1359 	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1360 
1361 	return (lsa);
1362 }
1363 
1364 /*
1365  * summary LSA stuff
1366  */
1367 void
1368 rde_summary_update(struct rt_node *rte, struct area *area)
1369 {
1370 	struct rt_nexthop	*rn;
1371 	struct rt_node		*nr;
1372 	struct vertex		*v = NULL;
1373 	struct lsa		*lsa;
1374 	u_int8_t		 type = 0;
1375 
1376 	/* first check if we actually need to announce this route */
1377 	if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1378 		return;
1379 	/* route is invalid, lsa_remove_invalid_sums() will do the cleanup */
1380 	if (rte->cost >= LS_INFINITY)
1381 		return;
1382 	/* never create summaries for as-ext LSA */
1383 	if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1384 		return;
1385 	/* no need for summary LSA in the originating area */
1386 	if (rte->area.s_addr == area->id.s_addr)
1387 		return;
1388 	/* no need to originate inter-area routes to the backbone */
1389 	if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1390 		return;
1391 	/* nexthop check, nexthop part of area -> no summary */
1392 	TAILQ_FOREACH(rn, &rte->nexthop, entry) {
1393 		if (rn->invalid)
1394 			continue;
1395 		nr = rt_lookup(DT_NET, rn->nexthop.s_addr);
1396 		if (nr && nr->area.s_addr == area->id.s_addr)
1397 			continue;
1398 		break;
1399 	}
1400 	if (rn == NULL)
1401 		/* all nexthops belong to this area or are invalid */
1402 		return;
1403 
1404 	/* TODO AS border router specific checks */
1405 	/* TODO inter-area network route stuff */
1406 	/* TODO intra-area stuff -- condense LSA ??? */
1407 
1408 	if (rte->d_type == DT_NET) {
1409 		type = LSA_TYPE_SUM_NETWORK;
1410 	} else if (rte->d_type == DT_RTR) {
1411 		if (area->stub)
1412 			/* do not redistribute type 4 LSA into stub areas */
1413 			return;
1414 		type = LSA_TYPE_SUM_ROUTER;
1415 	} else
1416 		fatalx("rde_summary_update: unknown route type");
1417 
1418 	/* update lsa but only if it was changed */
1419 	v = lsa_find_area(area, type, rte->prefix.s_addr, rde_router_id());
1420 	lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1421 	lsa_merge(rde_nbr_self(area), lsa, v);
1422 
1423 	if (v == NULL)
1424 		v = lsa_find_area(area, type, rte->prefix.s_addr,
1425 		    rde_router_id());
1426 
1427 	/* suppressed/deleted routes are not found in the second lsa_find */
1428 	if (v)
1429 		v->cost = rte->cost;
1430 }
1431 
1432 struct lsa *
1433 orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1434 {
1435 	struct lsa	*lsa;
1436 	u_int16_t	 len;
1437 
1438 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1439 	if ((lsa = calloc(1, len)) == NULL)
1440 		fatal("orig_sum_lsa");
1441 
1442 	/* LSA header */
1443 	lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1444 	lsa->hdr.opts = area_ospf_options(area);
1445 	lsa->hdr.type = type;
1446 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1447 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1448 	lsa->hdr.len = htons(len);
1449 
1450 	/* prefix and mask */
1451 	/*
1452 	 * TODO ls_id must be unique, for overlapping routes this may
1453 	 * not be true. In this case a hack needs to be done to
1454 	 * make the ls_id unique.
1455 	 */
1456 	lsa->hdr.ls_id = rte->prefix.s_addr;
1457 	if (type == LSA_TYPE_SUM_NETWORK)
1458 		lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1459 	else
1460 		lsa->data.sum.mask = 0;	/* must be zero per RFC */
1461 
1462 	lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1463 
1464 	lsa->hdr.ls_chksum = 0;
1465 	lsa->hdr.ls_chksum =
1466 	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1467 
1468 	return (lsa);
1469 }
1470