xref: /openbsd/usr.sbin/ospf6d/rde.c (revision ccbb71f5)
1 /*	$OpenBSD: rde.c,v 1.89 2021/01/19 09:54:08 claudio Exp $ */
2 
3 /*
4  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <sys/queue.h>
24 #include <net/if_types.h>
25 #include <netinet/in.h>
26 #include <arpa/inet.h>
27 #include <err.h>
28 #include <errno.h>
29 #include <stdlib.h>
30 #include <signal.h>
31 #include <string.h>
32 #include <pwd.h>
33 #include <unistd.h>
34 #include <event.h>
35 
36 #include "ospf6.h"
37 #include "ospf6d.h"
38 #include "ospfe.h"
39 #include "log.h"
40 #include "rde.h"
41 
42 #define MINIMUM(a, b)	(((a) < (b)) ? (a) : (b))
43 
44 void		 rde_sig_handler(int sig, short, void *);
45 __dead void	 rde_shutdown(void);
46 void		 rde_dispatch_imsg(int, short, void *);
47 void		 rde_dispatch_parent(int, short, void *);
48 void		 rde_dump_area(struct area *, int, pid_t);
49 
50 void		 rde_send_summary(pid_t);
51 void		 rde_send_summary_area(struct area *, pid_t);
52 void		 rde_nbr_init(u_int32_t);
53 void		 rde_nbr_free(void);
54 struct rde_nbr	*rde_nbr_new(u_int32_t, struct rde_nbr *);
55 void		 rde_nbr_del(struct rde_nbr *);
56 
57 void		 rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
58 int		 rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
59 void		 rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
60 void		 rde_req_list_free(struct rde_nbr *);
61 
62 struct iface	*rde_asext_lookup(struct in6_addr, int);
63 void		 rde_asext_get(struct kroute *);
64 void		 rde_asext_put(struct kroute *);
65 
66 int		 comp_asext(struct lsa *, struct lsa *);
67 struct lsa	*orig_asext_lsa(struct kroute *, u_int16_t);
68 struct lsa	*orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
69 struct lsa	*orig_intra_lsa_net(struct area *, struct iface *,
70 		 struct vertex *);
71 struct lsa	*orig_intra_lsa_rtr(struct area *, struct vertex *);
72 void		 append_prefix_lsa(struct lsa **, u_int16_t *,
73 		    struct lsa_prefix *);
74 
75 /* A 32-bit value != any ifindex.
76  * We assume ifindex is bound by [1, USHRT_MAX] inclusive. */
77 #define	LS_ID_INTRA_RTR	0x01000000
78 
79 /* Tree of prefixes with global scope on given a link,
80  * see orig_intra_lsa_*() */
81 struct prefix_node {
82 	RB_ENTRY(prefix_node)	 entry;
83 	struct lsa_prefix	*prefix;
84 };
85 RB_HEAD(prefix_tree, prefix_node);
86 RB_PROTOTYPE(prefix_tree, prefix_node, entry, prefix_compare);
87 int		 prefix_compare(struct prefix_node *, struct prefix_node *);
88 void		 prefix_tree_add(struct prefix_tree *, struct lsa_link *);
89 
90 struct ospfd_conf	*rdeconf = NULL, *nconf = NULL;
91 static struct imsgev	*iev_ospfe;
92 static struct imsgev	*iev_main;
93 struct rde_nbr		*nbrself;
94 struct lsa_tree		 asext_tree;
95 
96 /* ARGSUSED */
97 void
98 rde_sig_handler(int sig, short event, void *arg)
99 {
100 	/*
101 	 * signal handler rules don't apply, libevent decouples for us
102 	 */
103 
104 	switch (sig) {
105 	case SIGINT:
106 	case SIGTERM:
107 		rde_shutdown();
108 		/* NOTREACHED */
109 	default:
110 		fatalx("unexpected signal");
111 	}
112 }
113 
114 /* route decision engine */
115 pid_t
116 rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
117     int pipe_parent2ospfe[2])
118 {
119 	struct event		 ev_sigint, ev_sigterm;
120 	struct timeval		 now;
121 	struct passwd		*pw;
122 	pid_t			 pid;
123 
124 	switch (pid = fork()) {
125 	case -1:
126 		fatal("cannot fork");
127 		/* NOTREACHED */
128 	case 0:
129 		break;
130 	default:
131 		return (pid);
132 	}
133 
134 	rdeconf = xconf;
135 
136 	if ((pw = getpwnam(OSPF6D_USER)) == NULL)
137 		fatal("getpwnam");
138 
139 	if (chroot(pw->pw_dir) == -1)
140 		fatal("chroot");
141 	if (chdir("/") == -1)
142 		fatal("chdir(\"/\")");
143 
144 	setproctitle("route decision engine");
145 	/*
146 	 * XXX needed with fork+exec
147 	 * log_init(debug, LOG_DAEMON);
148 	 * log_setverbose(verbose);
149 	 */
150 
151 	ospfd_process = PROC_RDE_ENGINE;
152 	log_procinit(log_procnames[ospfd_process]);
153 
154 	if (setgroups(1, &pw->pw_gid) ||
155 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
156 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
157 		fatal("can't drop privileges");
158 
159 	if (pledge("stdio", NULL) == -1)
160 		fatal("pledge");
161 
162 	event_init();
163 	rde_nbr_init(NBR_HASHSIZE);
164 	lsa_init(&asext_tree);
165 
166 	/* setup signal handler */
167 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
168 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
169 	signal_add(&ev_sigint, NULL);
170 	signal_add(&ev_sigterm, NULL);
171 	signal(SIGPIPE, SIG_IGN);
172 	signal(SIGHUP, SIG_IGN);
173 
174 	/* setup pipes */
175 	close(pipe_ospfe2rde[0]);
176 	close(pipe_parent2rde[0]);
177 	close(pipe_parent2ospfe[0]);
178 	close(pipe_parent2ospfe[1]);
179 
180 	if ((iev_ospfe = malloc(sizeof(struct imsgev))) == NULL ||
181 	    (iev_main = malloc(sizeof(struct imsgev))) == NULL)
182 		fatal(NULL);
183 	imsg_init(&iev_ospfe->ibuf, pipe_ospfe2rde[1]);
184 	iev_ospfe->handler = rde_dispatch_imsg;
185 	imsg_init(&iev_main->ibuf, pipe_parent2rde[1]);
186 	iev_main->handler = rde_dispatch_parent;
187 
188 	/* setup event handler */
189 	iev_ospfe->events = EV_READ;
190 	event_set(&iev_ospfe->ev, iev_ospfe->ibuf.fd, iev_ospfe->events,
191 	    iev_ospfe->handler, iev_ospfe);
192 	event_add(&iev_ospfe->ev, NULL);
193 
194 	iev_main->events = EV_READ;
195 	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
196 	    iev_main->handler, iev_main);
197 	event_add(&iev_main->ev, NULL);
198 
199 	evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
200 	cand_list_init();
201 	rt_init();
202 
203 	/* remove unneeded stuff from config */
204 	conf_clear_redist_list(&rdeconf->redist_list);
205 
206 	gettimeofday(&now, NULL);
207 	rdeconf->uptime = now.tv_sec;
208 
209 	event_dispatch();
210 
211 	rde_shutdown();
212 	/* NOTREACHED */
213 
214 	return (0);
215 }
216 
217 __dead void
218 rde_shutdown(void)
219 {
220 	struct area	*a;
221 	struct vertex	*v, *nv;
222 
223 	/* close pipes */
224 	msgbuf_clear(&iev_ospfe->ibuf.w);
225 	close(iev_ospfe->ibuf.fd);
226 	msgbuf_clear(&iev_main->ibuf.w);
227 	close(iev_main->ibuf.fd);
228 
229 	stop_spf_timer(rdeconf);
230 	cand_list_clr();
231 	rt_clear();
232 
233 	while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
234 		LIST_REMOVE(a, entry);
235 		area_del(a);
236 	}
237 	for (v = RB_MIN(lsa_tree, &asext_tree); v != NULL; v = nv) {
238 		nv = RB_NEXT(lsa_tree, &asext_tree, v);
239 		vertex_free(v);
240 	}
241 	rde_nbr_free();
242 
243 	free(iev_ospfe);
244 	free(iev_main);
245 	free(rdeconf);
246 
247 	log_info("route decision engine exiting");
248 	_exit(0);
249 }
250 
251 int
252 rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
253     u_int16_t datalen)
254 {
255 	return (imsg_compose_event(iev_ospfe, type, peerid, pid, -1,
256 	    data, datalen));
257 }
258 
259 /* ARGSUSED */
260 void
261 rde_dispatch_imsg(int fd, short event, void *bula)
262 {
263 	struct imsgev		*iev = bula;
264 	struct imsgbuf		*ibuf = &iev->ibuf;
265 	struct imsg		 imsg;
266 	struct in_addr		 aid;
267 	struct ls_req_hdr	 req_hdr;
268 	struct lsa_hdr		 lsa_hdr, *db_hdr;
269 	struct rde_nbr		 rn, *nbr;
270 	struct timespec		 tp;
271 	struct lsa		*lsa;
272 	struct area		*area;
273 	struct vertex		*v;
274 	char			*buf;
275 	ssize_t			 n;
276 	time_t			 now;
277 	int			 r, state, self, shut = 0, verbose;
278 	u_int16_t		 l;
279 
280 	if (event & EV_READ) {
281 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
282 			fatal("imsg_read error");
283 		if (n == 0)	/* connection closed */
284 			shut = 1;
285 	}
286 	if (event & EV_WRITE) {
287 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
288 			fatal("msgbuf_write");
289 		if (n == 0)	/* connection closed */
290 			shut = 1;
291 	}
292 
293 	clock_gettime(CLOCK_MONOTONIC, &tp);
294 	now = tp.tv_sec;
295 
296 	for (;;) {
297 		if ((n = imsg_get(ibuf, &imsg)) == -1)
298 			fatal("rde_dispatch_imsg: imsg_get error");
299 		if (n == 0)
300 			break;
301 
302 		switch (imsg.hdr.type) {
303 		case IMSG_NEIGHBOR_UP:
304 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
305 				fatalx("invalid size of OE request");
306 			memcpy(&rn, imsg.data, sizeof(rn));
307 
308 			if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL)
309 				fatalx("rde_dispatch_imsg: "
310 				    "neighbor already exists");
311 			break;
312 		case IMSG_NEIGHBOR_DOWN:
313 			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
314 			break;
315 		case IMSG_NEIGHBOR_CHANGE:
316 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
317 				fatalx("invalid size of OE request");
318 			memcpy(&state, imsg.data, sizeof(state));
319 
320 			nbr = rde_nbr_find(imsg.hdr.peerid);
321 			if (nbr == NULL)
322 				break;
323 
324 			if (state != nbr->state &&
325 			    (nbr->state & NBR_STA_FULL ||
326 			    state & NBR_STA_FULL)) {
327 				nbr->state = state;
328 				area_track(nbr->area);
329 				orig_intra_area_prefix_lsas(nbr->area);
330 			}
331 
332 			nbr->state = state;
333 			if (nbr->state & NBR_STA_FULL)
334 				rde_req_list_free(nbr);
335 			break;
336 		case IMSG_AREA_CHANGE:
337 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
338 				fatalx("invalid size of OE request");
339 
340 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
341 				if (area->id.s_addr == imsg.hdr.peerid)
342 					break;
343 			}
344 			if (area == NULL)
345 				break;
346 			memcpy(&state, imsg.data, sizeof(state));
347 			area->active = state;
348 			break;
349 		case IMSG_DB_SNAPSHOT:
350 			nbr = rde_nbr_find(imsg.hdr.peerid);
351 			if (nbr == NULL)
352 				break;
353 
354 			lsa_snap(nbr);
355 
356 			imsg_compose_event(iev_ospfe, IMSG_DB_END, imsg.hdr.peerid,
357 			    0, -1, NULL, 0);
358 			break;
359 		case IMSG_DD:
360 			nbr = rde_nbr_find(imsg.hdr.peerid);
361 			if (nbr == NULL)
362 				break;
363 
364 			buf = imsg.data;
365 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
366 			    l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
367 				memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
368 				buf += sizeof(lsa_hdr);
369 
370 				v = lsa_find(nbr->iface, lsa_hdr.type,
371 				    lsa_hdr.ls_id, lsa_hdr.adv_rtr);
372 				if (v == NULL)
373 					db_hdr = NULL;
374 				else
375 					db_hdr = &v->lsa->hdr;
376 
377 				if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
378 					/*
379 					 * only request LSAs that are
380 					 * newer or missing
381 					 */
382 					rde_req_list_add(nbr, &lsa_hdr);
383 					imsg_compose_event(iev_ospfe, IMSG_DD,
384 					    imsg.hdr.peerid, 0, -1, &lsa_hdr,
385 					    sizeof(lsa_hdr));
386 				}
387 			}
388 			if (l != 0)
389 				log_warnx("rde_dispatch_imsg: peerid %u, "
390 				    "trailing garbage in Database Description "
391 				    "packet", imsg.hdr.peerid);
392 
393 			imsg_compose_event(iev_ospfe, IMSG_DD_END,
394 			    imsg.hdr.peerid, 0, -1, NULL, 0);
395 			break;
396 		case IMSG_LS_REQ:
397 			nbr = rde_nbr_find(imsg.hdr.peerid);
398 			if (nbr == NULL)
399 				break;
400 
401 			buf = imsg.data;
402 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
403 			    l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
404 				memcpy(&req_hdr, buf, sizeof(req_hdr));
405 				buf += sizeof(req_hdr);
406 
407 				if ((v = lsa_find(nbr->iface,
408 				    req_hdr.type, req_hdr.ls_id,
409 				    req_hdr.adv_rtr)) == NULL) {
410 					imsg_compose_event(iev_ospfe,
411 					    IMSG_LS_BADREQ, imsg.hdr.peerid,
412 					    0, -1, NULL, 0);
413 					continue;
414 				}
415 				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
416 				    imsg.hdr.peerid, 0, -1, v->lsa,
417 				    ntohs(v->lsa->hdr.len));
418 			}
419 			if (l != 0)
420 				log_warnx("rde_dispatch_imsg: peerid %u, "
421 				    "trailing garbage in LS Request "
422 				    "packet", imsg.hdr.peerid);
423 			break;
424 		case IMSG_LS_UPD:
425 			nbr = rde_nbr_find(imsg.hdr.peerid);
426 			if (nbr == NULL)
427 				break;
428 
429 			lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
430 			if (lsa == NULL)
431 				fatal(NULL);
432 			memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
433 
434 			if (!lsa_check(nbr, lsa,
435 			    imsg.hdr.len - IMSG_HEADER_SIZE)) {
436 				free(lsa);
437 				break;
438 			}
439 
440 			v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id,
441 			    lsa->hdr.adv_rtr);
442 			if (v == NULL)
443 				db_hdr = NULL;
444 			else
445 				db_hdr = &v->lsa->hdr;
446 
447 			if (nbr->self) {
448 				lsa_merge(nbr, lsa, v);
449 				/* lsa_merge frees the right lsa */
450 				break;
451 			}
452 
453 			r = lsa_newer(&lsa->hdr, db_hdr);
454 			if (r > 0) {
455 				/* new LSA newer than DB */
456 				if (v && v->flooded &&
457 				    v->changed + MIN_LS_ARRIVAL >= now) {
458 					free(lsa);
459 					break;
460 				}
461 
462 				rde_req_list_del(nbr, &lsa->hdr);
463 
464 				if (!(self = lsa_self(nbr, lsa, v)))
465 					if (lsa_add(nbr, lsa))
466 						/* delayed lsa */
467 						break;
468 
469 				/* flood and perhaps ack LSA */
470 				imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD,
471 				    imsg.hdr.peerid, 0, -1, lsa,
472 				    ntohs(lsa->hdr.len));
473 
474 				/* reflood self originated LSA */
475 				if (self && v)
476 					imsg_compose_event(iev_ospfe,
477 					    IMSG_LS_FLOOD, v->peerid, 0, -1,
478 					    v->lsa, ntohs(v->lsa->hdr.len));
479 				/* new LSA was not added so free it */
480 				if (self)
481 					free(lsa);
482 			} else if (r < 0) {
483 				/*
484 				 * point 6 of "The Flooding Procedure"
485 				 * We are violating the RFC here because
486 				 * it does not make sense to reset a session
487 				 * because an equal LSA is already in the table.
488 				 * Only if the LSA sent is older than the one
489 				 * in the table we should reset the session.
490 				 */
491 				if (rde_req_list_exists(nbr, &lsa->hdr)) {
492 					imsg_compose_event(iev_ospfe,
493 					    IMSG_LS_BADREQ, imsg.hdr.peerid,
494 					    0, -1, NULL, 0);
495 					free(lsa);
496 					break;
497 				}
498 
499 				/* lsa no longer needed */
500 				free(lsa);
501 
502 				/* new LSA older than DB */
503 				if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
504 				    ntohs(db_hdr->age) == MAX_AGE)
505 					/* seq-num wrap */
506 					break;
507 
508 				if (v->changed + MIN_LS_ARRIVAL >= now)
509 					break;
510 
511 				/* directly send current LSA, no ack */
512 				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
513 				    imsg.hdr.peerid, 0, -1, v->lsa,
514 				    ntohs(v->lsa->hdr.len));
515 			} else {
516 				/* LSA equal send direct ack */
517 				imsg_compose_event(iev_ospfe, IMSG_LS_ACK,
518 				    imsg.hdr.peerid, 0, -1, &lsa->hdr,
519 				    sizeof(lsa->hdr));
520 				free(lsa);
521 			}
522 			break;
523 		case IMSG_LS_MAXAGE:
524 			nbr = rde_nbr_find(imsg.hdr.peerid);
525 			if (nbr == NULL)
526 				break;
527 
528 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
529 			    sizeof(struct lsa_hdr))
530 				fatalx("invalid size of OE request");
531 			memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
532 
533 			if (rde_nbr_loading(nbr->area))
534 				break;
535 
536 			v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id,
537 			    lsa_hdr.adv_rtr);
538 			if (v == NULL)
539 				db_hdr = NULL;
540 			else
541 				db_hdr = &v->lsa->hdr;
542 
543 			/*
544 			 * only delete LSA if the one in the db is not newer
545 			 */
546 			if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
547 				lsa_del(nbr, &lsa_hdr);
548 			break;
549 		case IMSG_CTL_SHOW_DATABASE:
550 		case IMSG_CTL_SHOW_DB_EXT:
551 		case IMSG_CTL_SHOW_DB_LINK:
552 		case IMSG_CTL_SHOW_DB_NET:
553 		case IMSG_CTL_SHOW_DB_RTR:
554 		case IMSG_CTL_SHOW_DB_INTRA:
555 		case IMSG_CTL_SHOW_DB_SELF:
556 		case IMSG_CTL_SHOW_DB_SUM:
557 		case IMSG_CTL_SHOW_DB_ASBR:
558 			if (imsg.hdr.len != IMSG_HEADER_SIZE &&
559 			    imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
560 				log_warnx("rde_dispatch_imsg: wrong imsg len");
561 				break;
562 			}
563 			if (imsg.hdr.len == IMSG_HEADER_SIZE) {
564 				LIST_FOREACH(area, &rdeconf->area_list, entry) {
565 					rde_dump_area(area, imsg.hdr.type,
566 					    imsg.hdr.pid);
567 				}
568 				lsa_dump(&asext_tree, imsg.hdr.type,
569 				    imsg.hdr.pid);
570 			} else {
571 				memcpy(&aid, imsg.data, sizeof(aid));
572 				if ((area = area_find(rdeconf, aid)) != NULL) {
573 					rde_dump_area(area, imsg.hdr.type,
574 					    imsg.hdr.pid);
575 					if (!area->stub)
576 						lsa_dump(&asext_tree,
577 						    imsg.hdr.type,
578 						    imsg.hdr.pid);
579 				}
580 			}
581 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
582 			    imsg.hdr.pid, -1, NULL, 0);
583 			break;
584 		case IMSG_CTL_SHOW_RIB:
585 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
586 				imsg_compose_event(iev_ospfe, IMSG_CTL_AREA,
587 				    0, imsg.hdr.pid, -1, area, sizeof(*area));
588 
589 				rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
590 				rt_dump(area->id, imsg.hdr.pid, RIB_NET);
591 			}
592 			aid.s_addr = 0;
593 			rt_dump(aid, imsg.hdr.pid, RIB_EXT);
594 
595 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
596 			    imsg.hdr.pid, -1, NULL, 0);
597 			break;
598 		case IMSG_CTL_SHOW_SUM:
599 			rde_send_summary(imsg.hdr.pid);
600 			LIST_FOREACH(area, &rdeconf->area_list, entry)
601 				rde_send_summary_area(area, imsg.hdr.pid);
602 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
603 			    imsg.hdr.pid, -1, NULL, 0);
604 			break;
605 		case IMSG_IFINFO:
606 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
607 			    sizeof(int))
608 				fatalx("IFINFO imsg with wrong len");
609 
610 			nbr = rde_nbr_find(imsg.hdr.peerid);
611 			if (nbr == NULL)
612 				fatalx("IFINFO imsg with bad peerid");
613 			memcpy(&nbr->iface->state, imsg.data, sizeof(int));
614 
615 			/* Resend LSAs if interface state changes. */
616 			orig_intra_area_prefix_lsas(nbr->area);
617 			break;
618 		case IMSG_CTL_LOG_VERBOSE:
619 			/* already checked by ospfe */
620 			memcpy(&verbose, imsg.data, sizeof(verbose));
621 			log_setverbose(verbose);
622 			break;
623 		default:
624 			log_debug("rde_dispatch_imsg: unexpected imsg %d",
625 			    imsg.hdr.type);
626 			break;
627 		}
628 		imsg_free(&imsg);
629 	}
630 	if (!shut)
631 		imsg_event_add(iev);
632 	else {
633 		/* this pipe is dead, so remove the event handler */
634 		event_del(&iev->ev);
635 		event_loopexit(NULL);
636 	}
637 }
638 
639 /* ARGSUSED */
640 void
641 rde_dispatch_parent(int fd, short event, void *bula)
642 {
643 	static struct area	*narea;
644 	struct area		*area;
645 	struct iface		*iface, *ifp, *i;
646 	struct ifaddrchange	*ifc;
647 	struct iface_addr	*ia, *nia;
648 	struct imsg		 imsg;
649 	struct kroute		 kr;
650 	struct imsgev		*iev = bula;
651 	struct imsgbuf		*ibuf = &iev->ibuf;
652 	ssize_t			 n;
653 	int			 shut = 0, link_ok, prev_link_ok, orig_lsa;
654 
655 	if (event & EV_READ) {
656 		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
657 			fatal("imsg_read error");
658 		if (n == 0)	/* connection closed */
659 			shut = 1;
660 	}
661 	if (event & EV_WRITE) {
662 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
663 			fatal("msgbuf_write");
664 		if (n == 0)	/* connection closed */
665 			shut = 1;
666 	}
667 
668 	for (;;) {
669 		if ((n = imsg_get(ibuf, &imsg)) == -1)
670 			fatal("rde_dispatch_parent: imsg_get error");
671 		if (n == 0)
672 			break;
673 
674 		switch (imsg.hdr.type) {
675 		case IMSG_NETWORK_ADD:
676 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) {
677 				log_warnx("rde_dispatch_parent: "
678 				    "wrong imsg len");
679 				break;
680 			}
681 			memcpy(&kr, imsg.data, sizeof(kr));
682 			rde_asext_get(&kr);
683 			break;
684 		case IMSG_NETWORK_DEL:
685 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) {
686 				log_warnx("rde_dispatch_parent: "
687 				    "wrong imsg len");
688 				break;
689 			}
690 			memcpy(&kr, imsg.data, sizeof(kr));
691 			rde_asext_put(&kr);
692 			break;
693 		case IMSG_IFINFO:
694 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
695 			    sizeof(struct iface))
696 				fatalx("IFINFO imsg with wrong len");
697 
698 			ifp = imsg.data;
699 
700 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
701 				orig_lsa = 0;
702 				LIST_FOREACH(i, &area->iface_list, entry) {
703 					if (strcmp(i->dependon,
704 					    ifp->name) == 0) {
705 						i->depend_ok =
706 						    ifstate_is_up(ifp);
707 						if (ifstate_is_up(i))
708 							orig_lsa = 1;
709 					}
710 				}
711 				if (orig_lsa)
712 					orig_intra_area_prefix_lsas(area);
713 			}
714 
715 			if (!(ifp->cflags & F_IFACE_CONFIGURED))
716 				break;
717 			iface = if_find(ifp->ifindex);
718 			if (iface == NULL)
719 				fatalx("interface lost in rde");
720 
721 			prev_link_ok = (iface->flags & IFF_UP) &&
722 			    LINK_STATE_IS_UP(iface->linkstate);
723 
724 			if_update(iface, ifp->mtu, ifp->flags, ifp->if_type,
725 			    ifp->linkstate, ifp->baudrate, ifp->rdomain);
726 
727 			/* Resend LSAs if interface state changes. */
728 			link_ok = (iface->flags & IFF_UP) &&
729 			          LINK_STATE_IS_UP(iface->linkstate);
730 			if (prev_link_ok == link_ok)
731 				break;
732 
733 			orig_intra_area_prefix_lsas(iface->area);
734 
735 			break;
736 		case IMSG_IFADDRNEW:
737 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
738 			    sizeof(struct ifaddrchange))
739 				fatalx("IFADDRNEW imsg with wrong len");
740 			ifc = imsg.data;
741 
742 			iface = if_find(ifc->ifindex);
743 			if (iface == NULL)
744 				fatalx("IFADDRNEW interface lost in rde");
745 
746 			if ((ia = calloc(1, sizeof(struct iface_addr))) ==
747 			    NULL)
748 				fatal("rde_dispatch_parent IFADDRNEW");
749 			ia->addr = ifc->addr;
750 			ia->dstbrd = ifc->dstbrd;
751 			ia->prefixlen = ifc->prefixlen;
752 
753 			TAILQ_INSERT_TAIL(&iface->ifa_list, ia, entry);
754 			if (iface->area)
755 				orig_intra_area_prefix_lsas(iface->area);
756 			break;
757 		case IMSG_IFADDRDEL:
758 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
759 			    sizeof(struct ifaddrchange))
760 				fatalx("IFADDRDEL imsg with wrong len");
761 			ifc = imsg.data;
762 
763 			iface = if_find(ifc->ifindex);
764 			if (iface == NULL)
765 				fatalx("IFADDRDEL interface lost in rde");
766 
767 			for (ia = TAILQ_FIRST(&iface->ifa_list); ia != NULL;
768 			    ia = nia) {
769 				nia = TAILQ_NEXT(ia, entry);
770 
771 				if (IN6_ARE_ADDR_EQUAL(&ia->addr,
772 				    &ifc->addr)) {
773 					TAILQ_REMOVE(&iface->ifa_list, ia,
774 					    entry);
775 					free(ia);
776 					break;
777 				}
778 			}
779 			if (iface->area)
780 				orig_intra_area_prefix_lsas(iface->area);
781 			break;
782 		case IMSG_RECONF_CONF:
783 			if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
784 			    NULL)
785 				fatal(NULL);
786 			memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
787 
788 			LIST_INIT(&nconf->area_list);
789 			LIST_INIT(&nconf->cand_list);
790 			break;
791 		case IMSG_RECONF_AREA:
792 			if ((narea = area_new()) == NULL)
793 				fatal(NULL);
794 			memcpy(narea, imsg.data, sizeof(struct area));
795 
796 			LIST_INIT(&narea->iface_list);
797 			LIST_INIT(&narea->nbr_list);
798 			RB_INIT(&narea->lsa_tree);
799 
800 			LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
801 			break;
802 		case IMSG_RECONF_END:
803 			merge_config(rdeconf, nconf);
804 			nconf = NULL;
805 			break;
806 		default:
807 			log_debug("rde_dispatch_parent: unexpected imsg %d",
808 			    imsg.hdr.type);
809 			break;
810 		}
811 		imsg_free(&imsg);
812 	}
813 	if (!shut)
814 		imsg_event_add(iev);
815 	else {
816 		/* this pipe is dead, so remove the event handler */
817 		event_del(&iev->ev);
818 		event_loopexit(NULL);
819 	}
820 }
821 
822 void
823 rde_dump_area(struct area *area, int imsg_type, pid_t pid)
824 {
825 	struct iface	*iface;
826 
827 	/* dump header */
828 	imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 0, pid, -1,
829 	    area, sizeof(*area));
830 
831 	/* dump link local lsa */
832 	LIST_FOREACH(iface, &area->iface_list, entry) {
833 		imsg_compose_event(iev_ospfe, IMSG_CTL_IFACE,
834 		    0, pid, -1, iface, sizeof(*iface));
835 		lsa_dump(&iface->lsa_tree, imsg_type, pid);
836 	}
837 
838 	/* dump area lsa */
839 	lsa_dump(&area->lsa_tree, imsg_type, pid);
840 }
841 
842 u_int32_t
843 rde_router_id(void)
844 {
845 	return (rdeconf->rtr_id.s_addr);
846 }
847 
848 void
849 rde_send_change_kroute(struct rt_node *r)
850 {
851 	int			 krcount = 0;
852 	struct kroute		 kr;
853 	struct rt_nexthop	*rn;
854 	struct ibuf		*wbuf;
855 
856 	if ((wbuf = imsg_create(&iev_main->ibuf, IMSG_KROUTE_CHANGE, 0, 0,
857 	    sizeof(kr))) == NULL) {
858 		return;
859 	}
860 
861 	TAILQ_FOREACH(rn, &r->nexthop, entry) {
862 		if (rn->invalid)
863 			continue;
864 		if (rn->connected)
865 			/* skip self-originated routes */
866 			continue;
867 		krcount++;
868 
869 		bzero(&kr, sizeof(kr));
870 		kr.prefix = r->prefix;
871 		kr.nexthop = rn->nexthop;
872 		if (IN6_IS_ADDR_LINKLOCAL(&rn->nexthop) ||
873 		    IN6_IS_ADDR_MC_LINKLOCAL(&rn->nexthop))
874 			kr.scope = rn->ifindex;
875 		kr.ifindex = rn->ifindex;
876 		kr.prefixlen = r->prefixlen;
877 		kr.ext_tag = r->ext_tag;
878 		imsg_add(wbuf, &kr, sizeof(kr));
879 	}
880 	if (krcount == 0) {
881 		/* no valid nexthop or self originated, so remove */
882 		ibuf_free(wbuf);
883 		rde_send_delete_kroute(r);
884 		return;
885 	}
886 
887 	imsg_close(&iev_main->ibuf, wbuf);
888 	imsg_event_add(iev_main);
889 }
890 
891 void
892 rde_send_delete_kroute(struct rt_node *r)
893 {
894 	struct kroute	 kr;
895 
896 	bzero(&kr, sizeof(kr));
897 	kr.prefix = r->prefix;
898 	kr.prefixlen = r->prefixlen;
899 
900 	imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1,
901 	    &kr, sizeof(kr));
902 }
903 
904 void
905 rde_send_summary(pid_t pid)
906 {
907 	static struct ctl_sum	 sumctl;
908 	struct timeval		 now;
909 	struct area		*area;
910 	struct vertex		*v;
911 
912 	bzero(&sumctl, sizeof(struct ctl_sum));
913 
914 	sumctl.rtr_id.s_addr = rde_router_id();
915 	sumctl.spf_delay = rdeconf->spf_delay;
916 	sumctl.spf_hold_time = rdeconf->spf_hold_time;
917 
918 	LIST_FOREACH(area, &rdeconf->area_list, entry)
919 		sumctl.num_area++;
920 
921 	RB_FOREACH(v, lsa_tree, &asext_tree)
922 		sumctl.num_ext_lsa++;
923 
924 	gettimeofday(&now, NULL);
925 	if (rdeconf->uptime < now.tv_sec)
926 		sumctl.uptime = now.tv_sec - rdeconf->uptime;
927 	else
928 		sumctl.uptime = 0;
929 
930 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
931 	    sizeof(sumctl));
932 }
933 
934 void
935 rde_send_summary_area(struct area *area, pid_t pid)
936 {
937 	static struct ctl_sum_area	 sumareactl;
938 	struct iface			*iface;
939 	struct rde_nbr			*nbr;
940 	struct lsa_tree			*tree = &area->lsa_tree;
941 	struct vertex			*v;
942 
943 	bzero(&sumareactl, sizeof(struct ctl_sum_area));
944 
945 	sumareactl.area.s_addr = area->id.s_addr;
946 	sumareactl.num_spf_calc = area->num_spf_calc;
947 
948 	LIST_FOREACH(iface, &area->iface_list, entry)
949 		sumareactl.num_iface++;
950 
951 	LIST_FOREACH(nbr, &area->nbr_list, entry)
952 		if (nbr->state == NBR_STA_FULL && !nbr->self)
953 			sumareactl.num_adj_nbr++;
954 
955 	RB_FOREACH(v, lsa_tree, tree)
956 		sumareactl.num_lsa++;
957 
958 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
959 	    sizeof(sumareactl));
960 }
961 
962 LIST_HEAD(rde_nbr_head, rde_nbr);
963 
964 struct nbr_table {
965 	struct rde_nbr_head	*hashtbl;
966 	u_int32_t		 hashmask;
967 } rdenbrtable;
968 
969 #define RDE_NBR_HASH(x)		\
970 	&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
971 
972 void
973 rde_nbr_init(u_int32_t hashsize)
974 {
975 	struct rde_nbr_head	*head;
976 	u_int32_t		 hs, i;
977 
978 	for (hs = 1; hs < hashsize; hs <<= 1)
979 		;
980 	rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
981 	if (rdenbrtable.hashtbl == NULL)
982 		fatal("rde_nbr_init");
983 
984 	for (i = 0; i < hs; i++)
985 		LIST_INIT(&rdenbrtable.hashtbl[i]);
986 
987 	rdenbrtable.hashmask = hs - 1;
988 
989 	if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
990 		fatal("rde_nbr_init");
991 
992 	nbrself->id.s_addr = rde_router_id();
993 	nbrself->peerid = NBR_IDSELF;
994 	nbrself->state = NBR_STA_DOWN;
995 	nbrself->self = 1;
996 	head = RDE_NBR_HASH(NBR_IDSELF);
997 	LIST_INSERT_HEAD(head, nbrself, hash);
998 }
999 
1000 void
1001 rde_nbr_free(void)
1002 {
1003 	free(nbrself);
1004 	free(rdenbrtable.hashtbl);
1005 }
1006 
1007 struct rde_nbr *
1008 rde_nbr_find(u_int32_t peerid)
1009 {
1010 	struct rde_nbr_head	*head;
1011 	struct rde_nbr		*nbr;
1012 
1013 	head = RDE_NBR_HASH(peerid);
1014 
1015 	LIST_FOREACH(nbr, head, hash) {
1016 		if (nbr->peerid == peerid)
1017 			return (nbr);
1018 	}
1019 
1020 	return (NULL);
1021 }
1022 
1023 struct rde_nbr *
1024 rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
1025 {
1026 	struct rde_nbr_head	*head;
1027 	struct rde_nbr		*nbr;
1028 	struct area		*area;
1029 	struct iface		*iface;
1030 
1031 	if (rde_nbr_find(peerid))
1032 		return (NULL);
1033 	if ((area = area_find(rdeconf, new->area_id)) == NULL)
1034 		fatalx("rde_nbr_new: unknown area");
1035 
1036 	if ((iface = if_find(new->ifindex)) == NULL)
1037 		fatalx("rde_nbr_new: unknown interface");
1038 
1039 	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
1040 		fatal("rde_nbr_new");
1041 
1042 	memcpy(nbr, new, sizeof(*nbr));
1043 	nbr->peerid = peerid;
1044 	nbr->area = area;
1045 	nbr->iface = iface;
1046 
1047 	TAILQ_INIT(&nbr->req_list);
1048 
1049 	head = RDE_NBR_HASH(peerid);
1050 	LIST_INSERT_HEAD(head, nbr, hash);
1051 	LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
1052 
1053 	return (nbr);
1054 }
1055 
1056 void
1057 rde_nbr_del(struct rde_nbr *nbr)
1058 {
1059 	if (nbr == NULL)
1060 		return;
1061 
1062 	rde_req_list_free(nbr);
1063 
1064 	LIST_REMOVE(nbr, entry);
1065 	LIST_REMOVE(nbr, hash);
1066 
1067 	free(nbr);
1068 }
1069 
1070 int
1071 rde_nbr_loading(struct area *area)
1072 {
1073 	struct rde_nbr		*nbr;
1074 	int			 checkall = 0;
1075 
1076 	if (area == NULL) {
1077 		area = LIST_FIRST(&rdeconf->area_list);
1078 		checkall = 1;
1079 	}
1080 
1081 	while (area != NULL) {
1082 		LIST_FOREACH(nbr, &area->nbr_list, entry) {
1083 			if (nbr->self)
1084 				continue;
1085 			if (nbr->state & NBR_STA_XCHNG ||
1086 			    nbr->state & NBR_STA_LOAD)
1087 				return (1);
1088 		}
1089 		if (!checkall)
1090 			break;
1091 		area = LIST_NEXT(area, entry);
1092 	}
1093 
1094 	return (0);
1095 }
1096 
1097 struct rde_nbr *
1098 rde_nbr_self(struct area *area)
1099 {
1100 	struct rde_nbr		*nbr;
1101 
1102 	LIST_FOREACH(nbr, &area->nbr_list, entry)
1103 		if (nbr->self)
1104 			return (nbr);
1105 
1106 	/* this may not happen */
1107 	fatalx("rde_nbr_self: area without self");
1108 	return (NULL);
1109 }
1110 
1111 /*
1112  * LSA req list
1113  */
1114 void
1115 rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
1116 {
1117 	struct rde_req_entry	*le;
1118 
1119 	if ((le = calloc(1, sizeof(*le))) == NULL)
1120 		fatal("rde_req_list_add");
1121 
1122 	TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
1123 	le->type = lsa->type;
1124 	le->ls_id = lsa->ls_id;
1125 	le->adv_rtr = lsa->adv_rtr;
1126 }
1127 
1128 int
1129 rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1130 {
1131 	struct rde_req_entry	*le;
1132 
1133 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1134 		if ((lsa_hdr->type == le->type) &&
1135 		    (lsa_hdr->ls_id == le->ls_id) &&
1136 		    (lsa_hdr->adv_rtr == le->adv_rtr))
1137 			return (1);
1138 	}
1139 	return (0);
1140 }
1141 
1142 void
1143 rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1144 {
1145 	struct rde_req_entry	*le;
1146 
1147 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1148 		if ((lsa_hdr->type == le->type) &&
1149 		    (lsa_hdr->ls_id == le->ls_id) &&
1150 		    (lsa_hdr->adv_rtr == le->adv_rtr)) {
1151 			TAILQ_REMOVE(&nbr->req_list, le, entry);
1152 			free(le);
1153 			return;
1154 		}
1155 	}
1156 }
1157 
1158 void
1159 rde_req_list_free(struct rde_nbr *nbr)
1160 {
1161 	struct rde_req_entry	*le;
1162 
1163 	while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
1164 		TAILQ_REMOVE(&nbr->req_list, le, entry);
1165 		free(le);
1166 	}
1167 }
1168 
1169 /*
1170  * as-external LSA handling
1171  */
1172 struct iface *
1173 rde_asext_lookup(struct in6_addr prefix, int plen)
1174 {
1175 
1176 	struct area		*area;
1177 	struct iface		*iface;
1178 	struct iface_addr	*ia;
1179 	struct in6_addr		 ina, inb;
1180 
1181 	LIST_FOREACH(area, &rdeconf->area_list, entry) {
1182 		LIST_FOREACH(iface, &area->iface_list, entry) {
1183 			TAILQ_FOREACH(ia, &iface->ifa_list, entry) {
1184 				if (IN6_IS_ADDR_LINKLOCAL(&ia->addr))
1185 					continue;
1186 
1187 				inet6applymask(&ina, &ia->addr, ia->prefixlen);
1188 				inet6applymask(&inb, &prefix, ia->prefixlen);
1189 				if (IN6_ARE_ADDR_EQUAL(&ina, &inb) &&
1190 				    (plen == -1 || plen == ia->prefixlen))
1191 					return (iface);
1192 			}
1193 		}
1194 	}
1195 	return (NULL);
1196 }
1197 
1198 void
1199 rde_asext_get(struct kroute *kr)
1200 {
1201 	struct vertex	*v;
1202 	struct lsa	*lsa;
1203 
1204 	if (rde_asext_lookup(kr->prefix, kr->prefixlen)) {
1205 		/* already announced as (stub) net LSA */
1206 		log_debug("rde_asext_get: %s/%d is net LSA",
1207 		    log_in6addr(&kr->prefix), kr->prefixlen);
1208 		return;
1209 	}
1210 
1211 	/* update of seqnum is done by lsa_merge */
1212 	if ((lsa = orig_asext_lsa(kr, DEFAULT_AGE))) {
1213 		v = lsa_find(NULL, lsa->hdr.type, lsa->hdr.ls_id,
1214 		    lsa->hdr.adv_rtr);
1215 		lsa_merge(nbrself, lsa, v);
1216 	}
1217 }
1218 
1219 void
1220 rde_asext_put(struct kroute *kr)
1221 {
1222 	struct vertex	*v;
1223 	struct lsa	*lsa;
1224 	/*
1225 	 * just try to remove the LSA. If the prefix is announced as
1226 	 * stub net LSA lsa_find() will fail later and nothing will happen.
1227 	 */
1228 
1229 	/* remove by reflooding with MAX_AGE */
1230 	if ((lsa = orig_asext_lsa(kr, MAX_AGE))) {
1231 		v = lsa_find(NULL, lsa->hdr.type, lsa->hdr.ls_id,
1232 		    lsa->hdr.adv_rtr);
1233 
1234 		/*
1235 		 * if v == NULL no LSA is in the table and
1236 		 * nothing has to be done.
1237 		 */
1238 		if (v)
1239 			lsa_merge(nbrself, lsa, v);
1240 		else
1241 			free(lsa);
1242 	}
1243 }
1244 
1245 /*
1246  * summary LSA stuff
1247  */
1248 void
1249 rde_summary_update(struct rt_node *rte, struct area *area)
1250 {
1251 	struct vertex		*v = NULL;
1252 //XXX	struct lsa		*lsa;
1253 	u_int16_t		 type = 0;
1254 
1255 	/* first check if we actually need to announce this route */
1256 	if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1257 		return;
1258 	/* never create summaries for as-ext LSA */
1259 	if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1260 		return;
1261 	/* no need for summary LSA in the originating area */
1262 	if (rte->area.s_addr == area->id.s_addr)
1263 		return;
1264 	/* no need to originate inter-area routes to the backbone */
1265 	if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1266 		return;
1267 	/* TODO nexthop check, nexthop part of area -> no summary */
1268 	if (rte->cost >= LS_INFINITY)
1269 		return;
1270 	/* TODO AS border router specific checks */
1271 	/* TODO inter-area network route stuff */
1272 	/* TODO intra-area stuff -- condense LSA ??? */
1273 
1274 	if (rte->d_type == DT_NET) {
1275 		type = LSA_TYPE_INTER_A_PREFIX;
1276 	} else if (rte->d_type == DT_RTR) {
1277 		type = LSA_TYPE_INTER_A_ROUTER;
1278 	} else
1279 
1280 #if 0 /* XXX a lot todo */
1281 	/* update lsa but only if it was changed */
1282 	v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1283 	lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1284 	lsa_merge(rde_nbr_self(area), lsa, v);
1285 
1286 	if (v == NULL)
1287 		v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1288 #endif
1289 
1290 	/* suppressed/deleted routes are not found in the second lsa_find */
1291 	if (v)
1292 		v->cost = rte->cost;
1293 }
1294 
1295 /*
1296  * Functions for self-originated LSAs
1297  */
1298 
1299 /* Prefix LSAs have variable size. We have to be careful to copy the right
1300  * amount of bytes, and to realloc() the right amount of memory. */
1301 void
1302 append_prefix_lsa(struct lsa **lsa, u_int16_t *len, struct lsa_prefix *prefix)
1303 {
1304 	struct lsa_prefix	*copy;
1305 	unsigned int		 lsa_prefix_len;
1306 	unsigned int		 new_len;
1307 	char			*new_lsa;
1308 
1309 	lsa_prefix_len = sizeof(struct lsa_prefix)
1310 	    + LSA_PREFIXSIZE(prefix->prefixlen);
1311 
1312 	new_len = *len + lsa_prefix_len;
1313 
1314 	/* Make sure we have enough space for this prefix. */
1315 	if ((new_lsa = realloc(*lsa, new_len)) == NULL)
1316 		fatalx("append_prefix_lsa");
1317 
1318 	/* Append prefix to LSA. */
1319 	copy = (struct lsa_prefix *)(new_lsa + *len);
1320 	memcpy(copy, prefix, lsa_prefix_len);
1321 
1322 	*lsa = (struct lsa *)new_lsa;
1323 	*len = new_len;
1324 }
1325 
1326 int
1327 prefix_compare(struct prefix_node *a, struct prefix_node *b)
1328 {
1329 	struct lsa_prefix	*p;
1330 	struct lsa_prefix	*q;
1331 	int			 i;
1332 	int			 len;
1333 
1334 	p = a->prefix;
1335 	q = b->prefix;
1336 
1337 	len = MINIMUM(LSA_PREFIXSIZE(p->prefixlen), LSA_PREFIXSIZE(q->prefixlen));
1338 
1339 	i = memcmp(p + 1, q + 1, len);
1340 	if (i)
1341 		return (i);
1342 	if (p->prefixlen < q->prefixlen)
1343 		return (-1);
1344 	if (p->prefixlen > q->prefixlen)
1345 		return (1);
1346 	return (0);
1347 }
1348 
1349 void
1350 prefix_tree_add(struct prefix_tree *tree, struct lsa_link *lsa)
1351 {
1352 	struct prefix_node	*old;
1353 	struct prefix_node	*new;
1354 	struct in6_addr		 addr;
1355 	unsigned int		 len;
1356 	unsigned int		 i;
1357 	char			*cur_prefix;
1358 
1359 	cur_prefix = (char *)(lsa + 1);
1360 
1361 	for (i = 0; i < ntohl(lsa->numprefix); i++) {
1362 		if ((new = calloc(1, sizeof(*new))) == NULL)
1363 			fatal("prefix_tree_add");
1364 		new->prefix = (struct lsa_prefix *)cur_prefix;
1365 
1366 		len = sizeof(*new->prefix)
1367 		    + LSA_PREFIXSIZE(new->prefix->prefixlen);
1368 
1369 		bzero(&addr, sizeof(addr));
1370 		memcpy(&addr, new->prefix + 1,
1371 		    LSA_PREFIXSIZE(new->prefix->prefixlen));
1372 
1373 		new->prefix->metric = 0;
1374 
1375 		if (!(IN6_IS_ADDR_LINKLOCAL(&addr)) &&
1376 		    (new->prefix->options & OSPF_PREFIX_NU) == 0 &&
1377 		    (new->prefix->options & OSPF_PREFIX_LA) == 0) {
1378 			old = RB_INSERT(prefix_tree, tree, new);
1379 			if (old != NULL) {
1380 				old->prefix->options |= new->prefix->options;
1381 				free(new);
1382 			}
1383 		} else
1384 			free(new);
1385 
1386 		cur_prefix = cur_prefix + len;
1387 	}
1388 }
1389 
1390 RB_GENERATE(prefix_tree, prefix_node, entry, prefix_compare)
1391 
1392 struct lsa *
1393 orig_intra_lsa_net(struct area *area, struct iface *iface, struct vertex *old)
1394 {
1395 	struct lsa		*lsa;
1396 	struct vertex		*v;
1397 	struct rde_nbr		*nbr;
1398 	struct prefix_node	*node;
1399 	struct prefix_tree	 tree;
1400 	int			 num_full_nbr;
1401 	u_int16_t		 len;
1402 	u_int16_t		 numprefix;
1403 
1404 	log_debug("orig_intra_lsa_net: area %s, interface %s",
1405 	    inet_ntoa(area->id), iface->name);
1406 
1407 	RB_INIT(&tree);
1408 
1409 	if (iface->state & IF_STA_DR) {
1410 		num_full_nbr = 0;
1411 		LIST_FOREACH(nbr, &area->nbr_list, entry) {
1412 			if (nbr->self ||
1413 			    nbr->iface->ifindex != iface->ifindex ||
1414 			    (nbr->state & NBR_STA_FULL) == 0)
1415 				continue;
1416 			num_full_nbr++;
1417 			v = lsa_find(iface, htons(LSA_TYPE_LINK),
1418 			    htonl(nbr->iface_id), nbr->id.s_addr);
1419 			if (v)
1420 				prefix_tree_add(&tree, &v->lsa->data.link);
1421 		}
1422 		if (num_full_nbr == 0) {
1423 			/* There are no adjacent neighbors on link.
1424 			 * If a copy of this LSA already exists in DB,
1425 			 * it needs to be flushed. orig_intra_lsa_rtr()
1426 			 * will take care of prefixes configured on
1427 			 * this interface. */
1428 			if (!old)
1429 				return NULL;
1430 		} else {
1431 			/* Add our own prefixes configured for this link. */
1432 			v = lsa_find(iface, htons(LSA_TYPE_LINK),
1433 			    htonl(iface->ifindex), rde_router_id());
1434 			if (v)
1435 				prefix_tree_add(&tree, &v->lsa->data.link);
1436 		}
1437 	/* Continue only if a copy of this LSA already exists in DB.
1438 	 * It needs to be flushed. */
1439 	} else if (!old)
1440 		return NULL;
1441 
1442 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1443 	if ((lsa = calloc(1, len)) == NULL)
1444 		fatal("orig_intra_lsa_net");
1445 
1446 	lsa->data.pref_intra.ref_type = htons(LSA_TYPE_NETWORK);
1447 	lsa->data.pref_intra.ref_ls_id = htonl(iface->ifindex);
1448 	lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1449 
1450 	numprefix = 0;
1451 	RB_FOREACH(node, prefix_tree, &tree) {
1452 		append_prefix_lsa(&lsa, &len, node->prefix);
1453 		numprefix++;
1454 	}
1455 
1456 	lsa->data.pref_intra.numprefix = htons(numprefix);
1457 
1458 	while (!RB_EMPTY(&tree))
1459 		free(RB_REMOVE(prefix_tree, &tree, RB_ROOT(&tree)));
1460 
1461 	/* LSA header */
1462 	/* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1463 	lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE);
1464 	lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX);
1465 	lsa->hdr.ls_id = htonl(iface->ifindex);
1466 	lsa->hdr.adv_rtr = rde_router_id();
1467 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1468 	lsa->hdr.len = htons(len);
1469 	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1470 
1471 	return lsa;
1472 }
1473 
1474 struct lsa *
1475 orig_intra_lsa_rtr(struct area *area, struct vertex *old)
1476 {
1477 	char			lsa_prefix_buf[sizeof(struct lsa_prefix)
1478 				    + sizeof(struct in6_addr)];
1479 	struct lsa		*lsa;
1480 	struct lsa_prefix	*lsa_prefix;
1481 	struct in6_addr		*prefix;
1482 	struct iface		*iface;
1483 	struct iface_addr	*ia;
1484 	struct rde_nbr		*nbr;
1485 	u_int16_t		 len;
1486 	u_int16_t		 numprefix;
1487 
1488 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1489 	if ((lsa = calloc(1, len)) == NULL)
1490 		fatal("orig_intra_lsa_rtr");
1491 
1492 	lsa->data.pref_intra.ref_type = htons(LSA_TYPE_ROUTER);
1493 	lsa->data.pref_intra.ref_ls_id = 0;
1494 	lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1495 
1496 	numprefix = 0;
1497 	LIST_FOREACH(iface, &area->iface_list, entry) {
1498 		if (!((iface->flags & IFF_UP) &&
1499 		    LINK_STATE_IS_UP(iface->linkstate)) &&
1500 		    !(iface->if_type == IFT_CARP))
1501 			/* interface or link state down
1502 			 * and not a carp interface */
1503 			continue;
1504 
1505 		if (iface->if_type == IFT_CARP &&
1506 		    (iface->linkstate == LINK_STATE_UNKNOWN ||
1507 		    iface->linkstate == LINK_STATE_INVALID))
1508 			/* carp interface in state invalid or unknown */
1509 			continue;
1510 
1511 		if ((iface->state & IF_STA_DOWN) &&
1512 		    !(iface->cflags & F_IFACE_PASSIVE))
1513 			/* passive interfaces stay in state DOWN */
1514 			continue;
1515 
1516 		/* Broadcast links with adjacencies are handled
1517 		 * by orig_intra_lsa_net(), ignore. */
1518 		if (iface->type == IF_TYPE_BROADCAST ||
1519 		    iface->type == IF_TYPE_NBMA) {
1520 			if (iface->state & IF_STA_WAITING)
1521 				/* Skip, we're still waiting for
1522 				 * adjacencies to form. */
1523 				continue;
1524 
1525 			LIST_FOREACH(nbr, &area->nbr_list, entry)
1526 				if (!nbr->self &&
1527 				    nbr->iface->ifindex == iface->ifindex &&
1528 				    nbr->state & NBR_STA_FULL)
1529 					break;
1530 			if (nbr)
1531 				continue;
1532 		}
1533 
1534 		lsa_prefix = (struct lsa_prefix *)lsa_prefix_buf;
1535 
1536 		TAILQ_FOREACH(ia, &iface->ifa_list, entry) {
1537 			if (IN6_IS_ADDR_LINKLOCAL(&ia->addr))
1538 				continue;
1539 
1540 			bzero(lsa_prefix_buf, sizeof(lsa_prefix_buf));
1541 
1542 			if (iface->type == IF_TYPE_POINTOMULTIPOINT ||
1543 			    iface->state & IF_STA_LOOPBACK) {
1544 				lsa_prefix->prefixlen = 128;
1545 				lsa_prefix->metric = 0;
1546 			} else if ((iface->if_type == IFT_CARP &&
1547 				   iface->linkstate == LINK_STATE_DOWN) ||
1548 				   !(iface->depend_ok)) {
1549 				/* carp interfaces in state backup are
1550 				 * announced with high metric for faster
1551 				 * failover. */
1552 				lsa_prefix->prefixlen = ia->prefixlen;
1553 				lsa_prefix->metric = MAX_METRIC;
1554 			} else {
1555 				lsa_prefix->prefixlen = ia->prefixlen;
1556 				lsa_prefix->metric = htons(iface->metric);
1557 			}
1558 
1559 			if (lsa_prefix->prefixlen == 128)
1560 				lsa_prefix->options |= OSPF_PREFIX_LA;
1561 
1562 			log_debug("orig_intra_lsa_rtr: area %s, interface %s: "
1563 			    "%s/%d, metric %d", inet_ntoa(area->id),
1564 			    iface->name, log_in6addr(&ia->addr),
1565 			    lsa_prefix->prefixlen, ntohs(lsa_prefix->metric));
1566 
1567 			prefix = (struct in6_addr *)(lsa_prefix + 1);
1568 			inet6applymask(prefix, &ia->addr,
1569 			    lsa_prefix->prefixlen);
1570 			append_prefix_lsa(&lsa, &len, lsa_prefix);
1571 			numprefix++;
1572 		}
1573 
1574 		/* TOD: Add prefixes of directly attached hosts, too */
1575 		/* TOD: Add prefixes for virtual links */
1576 	}
1577 
1578 	/* If no prefixes were included, continue only if a copy of this
1579 	 * LSA already exists in DB. It needs to be flushed. */
1580 	if (numprefix == 0 && !old) {
1581 		free(lsa);
1582 		return NULL;
1583 	}
1584 
1585 	lsa->data.pref_intra.numprefix = htons(numprefix);
1586 
1587 	/* LSA header */
1588 	/* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1589 	lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE);
1590 	lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX);
1591 	lsa->hdr.ls_id = htonl(LS_ID_INTRA_RTR);
1592 	lsa->hdr.adv_rtr = rde_router_id();
1593 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1594 	lsa->hdr.len = htons(len);
1595 	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1596 
1597 	return lsa;
1598 }
1599 
1600 void
1601 orig_intra_area_prefix_lsas(struct area *area)
1602 {
1603 	struct lsa	*lsa;
1604 	struct vertex	*old;
1605 	struct iface	*iface;
1606 
1607 	LIST_FOREACH(iface, &area->iface_list, entry) {
1608 		if (iface->type == IF_TYPE_BROADCAST ||
1609 		    iface->type == IF_TYPE_NBMA) {
1610 			old = lsa_find(iface, htons(LSA_TYPE_INTRA_A_PREFIX),
1611 			    htonl(iface->ifindex), rde_router_id());
1612 			lsa = orig_intra_lsa_net(area, iface, old);
1613 			if (lsa)
1614 				lsa_merge(rde_nbr_self(area), lsa, old);
1615 		}
1616 	}
1617 
1618 	old = lsa_find_tree(&area->lsa_tree, htons(LSA_TYPE_INTRA_A_PREFIX),
1619 		htonl(LS_ID_INTRA_RTR), rde_router_id());
1620 	lsa = orig_intra_lsa_rtr(area, old);
1621 	if (lsa)
1622 		lsa_merge(rde_nbr_self(area), lsa, old);
1623 }
1624 
1625 int
1626 comp_asext(struct lsa *a, struct lsa *b)
1627 {
1628 	/* compare prefixes, if they are equal or not */
1629 	if (a->data.asext.prefix.prefixlen != b->data.asext.prefix.prefixlen)
1630 		return (-1);
1631 	return (memcmp(
1632 	    (char *)a + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1633 	    (char *)b + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1634 	    LSA_PREFIXSIZE(a->data.asext.prefix.prefixlen)));
1635 }
1636 
1637 struct lsa *
1638 orig_asext_lsa(struct kroute *kr, u_int16_t age)
1639 {
1640 	struct lsa	*lsa;
1641 	u_int32_t	 ext_tag;
1642 	u_int16_t	 len, ext_off;
1643 
1644 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext) +
1645 	    LSA_PREFIXSIZE(kr->prefixlen);
1646 
1647 	/*
1648 	 * nexthop -- on connected routes we are the nexthop,
1649 	 * on all other cases we should announce the true nexthop
1650 	 * unless that nexthop is outside of the ospf cloud.
1651 	 * XXX for now we don't do this.
1652 	 */
1653 
1654 	ext_off = len;
1655 	if (kr->ext_tag) {
1656 		len += sizeof(ext_tag);
1657 	}
1658 	if ((lsa = calloc(1, len)) == NULL)
1659 		fatal("orig_asext_lsa");
1660 
1661 	log_debug("orig_asext_lsa: %s/%d age %d",
1662 	    log_in6addr(&kr->prefix), kr->prefixlen, age);
1663 
1664 	/* LSA header */
1665 	lsa->hdr.age = htons(age);
1666 	lsa->hdr.type = htons(LSA_TYPE_EXTERNAL);
1667 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1668 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1669 	lsa->hdr.len = htons(len);
1670 
1671 	lsa->data.asext.prefix.prefixlen = kr->prefixlen;
1672 	memcpy((char *)lsa + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1673 	    &kr->prefix, LSA_PREFIXSIZE(kr->prefixlen));
1674 
1675 	lsa->hdr.ls_id = lsa_find_lsid(&asext_tree, comp_asext, lsa);
1676 
1677 	if (age == MAX_AGE) {
1678 		/* inherit metric and ext_tag from the current LSA,
1679 		 * some routers don't like to get withdraws that are
1680 		 * different from what they have in their table.
1681 		 */
1682 		struct vertex *v;
1683 		v = lsa_find(NULL, lsa->hdr.type, lsa->hdr.ls_id,
1684 		    lsa->hdr.adv_rtr);
1685 		if (v != NULL) {
1686 			kr->metric = ntohl(v->lsa->data.asext.metric);
1687 			if (kr->metric & LSA_ASEXT_T_FLAG) {
1688 				memcpy(&ext_tag, (char *)v->lsa + ext_off,
1689 				    sizeof(ext_tag));
1690 				kr->ext_tag = ntohl(ext_tag);
1691 			}
1692 			kr->metric &= LSA_METRIC_MASK;
1693 		}
1694 	}
1695 
1696 	if (kr->ext_tag) {
1697 		lsa->data.asext.metric = htonl(kr->metric | LSA_ASEXT_T_FLAG);
1698 		ext_tag = htonl(kr->ext_tag);
1699 		memcpy((char *)lsa + ext_off, &ext_tag, sizeof(ext_tag));
1700 	} else {
1701 		lsa->data.asext.metric = htonl(kr->metric);
1702 	}
1703 
1704 	lsa->hdr.ls_chksum = 0;
1705 	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1706 
1707 	return (lsa);
1708 }
1709 
1710 struct lsa *
1711 orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1712 {
1713 #if 0 /* XXX a lot todo */
1714 	struct lsa	*lsa;
1715 	u_int16_t	 len;
1716 
1717 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1718 	if ((lsa = calloc(1, len)) == NULL)
1719 		fatal("orig_sum_lsa");
1720 
1721 	/* LSA header */
1722 	lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1723 	lsa->hdr.type = type;
1724 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1725 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1726 	lsa->hdr.len = htons(len);
1727 
1728 	/* prefix and mask */
1729 	/*
1730 	 * TODO ls_id must be unique, for overlapping routes this may
1731 	 * not be true. In this case a hack needs to be done to
1732 	 * make the ls_id unique.
1733 	 */
1734 	lsa->hdr.ls_id = rte->prefix.s_addr;
1735 	if (type == LSA_TYPE_SUM_NETWORK)
1736 		lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1737 	else
1738 		lsa->data.sum.mask = 0;	/* must be zero per RFC */
1739 
1740 	lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1741 
1742 	lsa->hdr.ls_chksum = 0;
1743 	lsa->hdr.ls_chksum =
1744 	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1745 
1746 	return (lsa);
1747 #endif
1748 	return NULL;
1749 }
1750