xref: /openbsd/usr.sbin/ospf6d/rde.c (revision 91f110e0)
1 /*	$OpenBSD: rde.c,v 1.61 2013/11/13 20:49:49 benno Exp $ */
2 
3 /*
4  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/param.h>			/* for MIN() */
22 #include <sys/types.h>
23 #include <sys/socket.h>
24 #include <sys/queue.h>
25 #include <net/if_types.h>
26 #include <netinet/in.h>
27 #include <arpa/inet.h>
28 #include <err.h>
29 #include <errno.h>
30 #include <stdlib.h>
31 #include <signal.h>
32 #include <string.h>
33 #include <pwd.h>
34 #include <unistd.h>
35 #include <event.h>
36 
37 #include "ospf6.h"
38 #include "ospf6d.h"
39 #include "ospfe.h"
40 #include "log.h"
41 #include "rde.h"
42 
43 void		 rde_sig_handler(int sig, short, void *);
44 void		 rde_shutdown(void);
45 void		 rde_dispatch_imsg(int, short, void *);
46 void		 rde_dispatch_parent(int, short, void *);
47 void		 rde_dump_area(struct area *, int, pid_t);
48 
49 void		 rde_send_summary(pid_t);
50 void		 rde_send_summary_area(struct area *, pid_t);
51 void		 rde_nbr_init(u_int32_t);
52 void		 rde_nbr_free(void);
53 struct rde_nbr	*rde_nbr_new(u_int32_t, struct rde_nbr *);
54 void		 rde_nbr_del(struct rde_nbr *);
55 
56 void		 rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
57 int		 rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
58 void		 rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
59 void		 rde_req_list_free(struct rde_nbr *);
60 
61 struct lsa	*rde_asext_get(struct rroute *);
62 struct lsa	*rde_asext_put(struct rroute *);
63 
64 int		 comp_asext(struct lsa *, struct lsa *);
65 struct lsa	*orig_asext_lsa(struct rroute *, u_int16_t);
66 struct lsa	*orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
67 struct lsa	*orig_intra_lsa_net(struct area *, struct iface *,
68 		 struct vertex *);
69 struct lsa	*orig_intra_lsa_rtr(struct area *, struct vertex *);
70 void		 append_prefix_lsa(struct lsa **, u_int16_t *,
71 		    struct lsa_prefix *);
72 
73 /* A 32-bit value != any ifindex.
74  * We assume ifindex is bound by [1, USHRT_MAX] inclusive. */
75 #define	LS_ID_INTRA_RTR	0x01000000
76 
77 /* Tree of prefixes with global scope on given a link,
78  * see orig_intra_lsa_*() */
79 struct prefix_node {
80 	RB_ENTRY(prefix_node)	 entry;
81 	struct lsa_prefix	*prefix;
82 };
83 RB_HEAD(prefix_tree, prefix_node);
84 RB_PROTOTYPE(prefix_tree, prefix_node, entry, prefix_compare);
85 int		 prefix_compare(struct prefix_node *, struct prefix_node *);
86 void		 prefix_tree_add(struct prefix_tree *, struct lsa_link *);
87 
88 struct ospfd_conf	*rdeconf = NULL, *nconf = NULL;
89 struct imsgev		*iev_ospfe;
90 struct imsgev		*iev_main;
91 struct rde_nbr		*nbrself;
92 struct lsa_tree		 asext_tree;
93 
94 /* ARGSUSED */
95 void
96 rde_sig_handler(int sig, short event, void *arg)
97 {
98 	/*
99 	 * signal handler rules don't apply, libevent decouples for us
100 	 */
101 
102 	switch (sig) {
103 	case SIGINT:
104 	case SIGTERM:
105 		rde_shutdown();
106 		/* NOTREACHED */
107 	default:
108 		fatalx("unexpected signal");
109 	}
110 }
111 
112 /* route decision engine */
113 pid_t
114 rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
115     int pipe_parent2ospfe[2])
116 {
117 	struct event		 ev_sigint, ev_sigterm;
118 	struct timeval		 now;
119 	struct passwd		*pw;
120 	struct redistribute	*r;
121 	pid_t			 pid;
122 
123 	switch (pid = fork()) {
124 	case -1:
125 		fatal("cannot fork");
126 		/* NOTREACHED */
127 	case 0:
128 		break;
129 	default:
130 		return (pid);
131 	}
132 
133 	rdeconf = xconf;
134 
135 	if ((pw = getpwnam(OSPF6D_USER)) == NULL)
136 		fatal("getpwnam");
137 
138 	if (chroot(pw->pw_dir) == -1)
139 		fatal("chroot");
140 	if (chdir("/") == -1)
141 		fatal("chdir(\"/\")");
142 
143 	setproctitle("route decision engine");
144 	ospfd_process = PROC_RDE_ENGINE;
145 
146 	if (setgroups(1, &pw->pw_gid) ||
147 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
148 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
149 		fatal("can't drop privileges");
150 
151 	event_init();
152 	rde_nbr_init(NBR_HASHSIZE);
153 	lsa_init(&asext_tree);
154 
155 	/* setup signal handler */
156 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
157 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
158 	signal_add(&ev_sigint, NULL);
159 	signal_add(&ev_sigterm, NULL);
160 	signal(SIGPIPE, SIG_IGN);
161 	signal(SIGHUP, SIG_IGN);
162 
163 	/* setup pipes */
164 	close(pipe_ospfe2rde[0]);
165 	close(pipe_parent2rde[0]);
166 	close(pipe_parent2ospfe[0]);
167 	close(pipe_parent2ospfe[1]);
168 
169 	if ((iev_ospfe = malloc(sizeof(struct imsgev))) == NULL ||
170 	    (iev_main = malloc(sizeof(struct imsgev))) == NULL)
171 		fatal(NULL);
172 	imsg_init(&iev_ospfe->ibuf, pipe_ospfe2rde[1]);
173 	iev_ospfe->handler = rde_dispatch_imsg;
174 	imsg_init(&iev_main->ibuf, pipe_parent2rde[1]);
175 	iev_main->handler = rde_dispatch_parent;
176 
177 	/* setup event handler */
178 	iev_ospfe->events = EV_READ;
179 	event_set(&iev_ospfe->ev, iev_ospfe->ibuf.fd, iev_ospfe->events,
180 	    iev_ospfe->handler, iev_ospfe);
181 	event_add(&iev_ospfe->ev, NULL);
182 
183 	iev_main->events = EV_READ;
184 	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
185 	    iev_main->handler, iev_main);
186 	event_add(&iev_main->ev, NULL);
187 
188 	evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
189 	cand_list_init();
190 	rt_init();
191 
192 	while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) {
193 		SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry);
194 		free(r);
195 	}
196 
197 	gettimeofday(&now, NULL);
198 	rdeconf->uptime = now.tv_sec;
199 
200 	event_dispatch();
201 
202 	rde_shutdown();
203 	/* NOTREACHED */
204 
205 	return (0);
206 }
207 
208 void
209 rde_shutdown(void)
210 {
211 	struct area	*a;
212 
213 	stop_spf_timer(rdeconf);
214 	cand_list_clr();
215 	rt_clear();
216 
217 	while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
218 		LIST_REMOVE(a, entry);
219 		area_del(a);
220 	}
221 	rde_nbr_free();
222 
223 	msgbuf_clear(&iev_ospfe->ibuf.w);
224 	free(iev_ospfe);
225 	msgbuf_clear(&iev_main->ibuf.w);
226 	free(iev_main);
227 	free(rdeconf);
228 
229 	log_info("route decision engine exiting");
230 	_exit(0);
231 }
232 
233 int
234 rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
235     u_int16_t datalen)
236 {
237 	return (imsg_compose_event(iev_ospfe, type, peerid, pid, -1,
238 	    data, datalen));
239 }
240 
241 /* ARGSUSED */
242 void
243 rde_dispatch_imsg(int fd, short event, void *bula)
244 {
245 	struct imsgev		*iev = bula;
246 	struct imsgbuf		*ibuf = &iev->ibuf;
247 	struct imsg		 imsg;
248 	struct in_addr		 aid;
249 	struct ls_req_hdr	 req_hdr;
250 	struct lsa_hdr		 lsa_hdr, *db_hdr;
251 	struct rde_nbr		 rn, *nbr;
252 	struct timespec		 tp;
253 	struct lsa		*lsa;
254 	struct area		*area;
255 	struct vertex		*v;
256 	char			*buf;
257 	ssize_t			 n;
258 	time_t			 now;
259 	int			 r, state, self, shut = 0, verbose;
260 	u_int16_t		 l;
261 
262 	if (event & EV_READ) {
263 		if ((n = imsg_read(ibuf)) == -1)
264 			fatal("imsg_read error");
265 		if (n == 0)	/* connection closed */
266 			shut = 1;
267 	}
268 	if (event & EV_WRITE) {
269 		if (msgbuf_write(&ibuf->w) == -1 && errno != EAGAIN)
270 			fatal("msgbuf_write");
271 	}
272 
273 	clock_gettime(CLOCK_MONOTONIC, &tp);
274 	now = tp.tv_sec;
275 
276 	for (;;) {
277 		if ((n = imsg_get(ibuf, &imsg)) == -1)
278 			fatal("rde_dispatch_imsg: imsg_read error");
279 		if (n == 0)
280 			break;
281 
282 		switch (imsg.hdr.type) {
283 		case IMSG_NEIGHBOR_UP:
284 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
285 				fatalx("invalid size of OE request");
286 			memcpy(&rn, imsg.data, sizeof(rn));
287 
288 			if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL)
289 				fatalx("rde_dispatch_imsg: "
290 				    "neighbor already exists");
291 			break;
292 		case IMSG_NEIGHBOR_DOWN:
293 			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
294 			break;
295 		case IMSG_NEIGHBOR_CHANGE:
296 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
297 				fatalx("invalid size of OE request");
298 			memcpy(&state, imsg.data, sizeof(state));
299 
300 			nbr = rde_nbr_find(imsg.hdr.peerid);
301 			if (nbr == NULL)
302 				break;
303 
304 			if (state != nbr->state &&
305 			    (nbr->state & NBR_STA_FULL ||
306 			    state & NBR_STA_FULL)) {
307 				nbr->state = state;
308 				area_track(nbr->area, state);
309 				orig_intra_area_prefix_lsas(nbr->area);
310 			}
311 
312 			nbr->state = state;
313 			if (nbr->state & NBR_STA_FULL)
314 				rde_req_list_free(nbr);
315 			break;
316 		case IMSG_DB_SNAPSHOT:
317 			nbr = rde_nbr_find(imsg.hdr.peerid);
318 			if (nbr == NULL)
319 				break;
320 
321 			lsa_snap(nbr, imsg.hdr.peerid);
322 
323 			imsg_compose_event(iev_ospfe, IMSG_DB_END, imsg.hdr.peerid,
324 			    0, -1, NULL, 0);
325 			break;
326 		case IMSG_DD:
327 			nbr = rde_nbr_find(imsg.hdr.peerid);
328 			if (nbr == NULL)
329 				break;
330 
331 			buf = imsg.data;
332 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
333 			    l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
334 				memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
335 				buf += sizeof(lsa_hdr);
336 
337 				v = lsa_find(nbr->iface, lsa_hdr.type,
338 				    lsa_hdr.ls_id, lsa_hdr.adv_rtr);
339 				if (v == NULL)
340 					db_hdr = NULL;
341 				else
342 					db_hdr = &v->lsa->hdr;
343 
344 				if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
345 					/*
346 					 * only request LSAs that are
347 					 * newer or missing
348 					 */
349 					rde_req_list_add(nbr, &lsa_hdr);
350 					imsg_compose_event(iev_ospfe, IMSG_DD,
351 					    imsg.hdr.peerid, 0, -1, &lsa_hdr,
352 					    sizeof(lsa_hdr));
353 				}
354 			}
355 			if (l != 0)
356 				log_warnx("rde_dispatch_imsg: peerid %lu, "
357 				    "trailing garbage in Database Description "
358 				    "packet", imsg.hdr.peerid);
359 
360 			imsg_compose_event(iev_ospfe, IMSG_DD_END,
361 			    imsg.hdr.peerid, 0, -1, NULL, 0);
362 			break;
363 		case IMSG_LS_REQ:
364 			nbr = rde_nbr_find(imsg.hdr.peerid);
365 			if (nbr == NULL)
366 				break;
367 
368 			buf = imsg.data;
369 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
370 			    l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
371 				memcpy(&req_hdr, buf, sizeof(req_hdr));
372 				buf += sizeof(req_hdr);
373 
374 				if ((v = lsa_find(nbr->iface,
375 				    req_hdr.type, req_hdr.ls_id,
376 				    req_hdr.adv_rtr)) == NULL) {
377 					imsg_compose_event(iev_ospfe,
378 					    IMSG_LS_BADREQ, imsg.hdr.peerid,
379 					    0, -1, NULL, 0);
380 					continue;
381 				}
382 				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
383 				    imsg.hdr.peerid, 0, -1, v->lsa,
384 				    ntohs(v->lsa->hdr.len));
385 			}
386 			if (l != 0)
387 				log_warnx("rde_dispatch_imsg: peerid %lu, "
388 				    "trailing garbage in LS Request "
389 				    "packet", imsg.hdr.peerid);
390 			break;
391 		case IMSG_LS_UPD:
392 			nbr = rde_nbr_find(imsg.hdr.peerid);
393 			if (nbr == NULL)
394 				break;
395 
396 			lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
397 			if (lsa == NULL)
398 				fatal(NULL);
399 			memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
400 
401 			if (!lsa_check(nbr, lsa,
402 			    imsg.hdr.len - IMSG_HEADER_SIZE)) {
403 				free(lsa);
404 				break;
405 			}
406 
407 			v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id,
408 			    lsa->hdr.adv_rtr);
409 			if (v == NULL)
410 				db_hdr = NULL;
411 			else
412 				db_hdr = &v->lsa->hdr;
413 
414 			if (nbr->self) {
415 				lsa_merge(nbr, lsa, v);
416 				/* lsa_merge frees the right lsa */
417 				break;
418 			}
419 
420 			r = lsa_newer(&lsa->hdr, db_hdr);
421 			if (r > 0) {
422 				/* new LSA newer than DB */
423 				if (v && v->flooded &&
424 				    v->changed + MIN_LS_ARRIVAL >= now) {
425 					free(lsa);
426 					break;
427 				}
428 
429 				rde_req_list_del(nbr, &lsa->hdr);
430 
431 				self = lsa_self(lsa);
432 				if (self) {
433 					if (v == NULL)
434 						/* LSA is no longer announced,
435 						 * remove by premature aging. */
436 						lsa_flush(nbr, lsa);
437 					else
438 						lsa_reflood(v, lsa);
439 				} else if (lsa_add(nbr, lsa))
440 					/* delayed lsa, don't flood yet */
441 					break;
442 
443 				/* flood and perhaps ack LSA */
444 				imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD,
445 				    imsg.hdr.peerid, 0, -1, lsa,
446 				    ntohs(lsa->hdr.len));
447 
448 				/* reflood self originated LSA */
449 				if (self && v)
450 					imsg_compose_event(iev_ospfe,
451 					    IMSG_LS_FLOOD, v->peerid, 0, -1,
452 					    v->lsa, ntohs(v->lsa->hdr.len));
453 				/* new LSA was not added so free it */
454 				if (self)
455 					free(lsa);
456 			} else if (r < 0) {
457 				/*
458 				 * point 6 of "The Flooding Procedure"
459 				 * We are violating the RFC here because
460 				 * it does not make sense to reset a session
461 				 * because an equal LSA is already in the table.
462 				 * Only if the LSA sent is older than the one
463 				 * in the table we should reset the session.
464 				 */
465 				if (rde_req_list_exists(nbr, &lsa->hdr)) {
466 					imsg_compose_event(iev_ospfe,
467 					    IMSG_LS_BADREQ, imsg.hdr.peerid,
468 					    0, -1, NULL, 0);
469 					free(lsa);
470 					break;
471 				}
472 
473 				/* lsa no longer needed */
474 				free(lsa);
475 
476 				/* new LSA older than DB */
477 				if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
478 				    ntohs(db_hdr->age) == MAX_AGE)
479 					/* seq-num wrap */
480 					break;
481 
482 				if (v->changed + MIN_LS_ARRIVAL >= now)
483 					break;
484 
485 				/* directly send current LSA, no ack */
486 				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
487 				    imsg.hdr.peerid, 0, -1, v->lsa,
488 				    ntohs(v->lsa->hdr.len));
489 			} else {
490 				/* LSA equal send direct ack */
491 				imsg_compose_event(iev_ospfe, IMSG_LS_ACK,
492 				    imsg.hdr.peerid, 0, -1, &lsa->hdr,
493 				    sizeof(lsa->hdr));
494 				free(lsa);
495 			}
496 			break;
497 		case IMSG_LS_MAXAGE:
498 			nbr = rde_nbr_find(imsg.hdr.peerid);
499 			if (nbr == NULL)
500 				break;
501 
502 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
503 			    sizeof(struct lsa_hdr))
504 				fatalx("invalid size of OE request");
505 			memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
506 
507 			if (rde_nbr_loading(nbr->area))
508 				break;
509 
510 			v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id,
511 			    lsa_hdr.adv_rtr);
512 			if (v == NULL)
513 				db_hdr = NULL;
514 			else
515 				db_hdr = &v->lsa->hdr;
516 
517 			/*
518 			 * only delete LSA if the one in the db is not newer
519 			 */
520 			if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
521 				lsa_del(nbr, &lsa_hdr);
522 			break;
523 		case IMSG_CTL_SHOW_DATABASE:
524 		case IMSG_CTL_SHOW_DB_EXT:
525 		case IMSG_CTL_SHOW_DB_LINK:
526 		case IMSG_CTL_SHOW_DB_NET:
527 		case IMSG_CTL_SHOW_DB_RTR:
528 		case IMSG_CTL_SHOW_DB_INTRA:
529 		case IMSG_CTL_SHOW_DB_SELF:
530 		case IMSG_CTL_SHOW_DB_SUM:
531 		case IMSG_CTL_SHOW_DB_ASBR:
532 			if (imsg.hdr.len != IMSG_HEADER_SIZE &&
533 			    imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
534 				log_warnx("rde_dispatch_imsg: wrong imsg len");
535 				break;
536 			}
537 			if (imsg.hdr.len == IMSG_HEADER_SIZE) {
538 				LIST_FOREACH(area, &rdeconf->area_list, entry) {
539 					rde_dump_area(area, imsg.hdr.type,
540 					    imsg.hdr.pid);
541 				}
542 				lsa_dump(&asext_tree, imsg.hdr.type,
543 				    imsg.hdr.pid);
544 			} else {
545 				memcpy(&aid, imsg.data, sizeof(aid));
546 				if ((area = area_find(rdeconf, aid)) != NULL) {
547 					rde_dump_area(area, imsg.hdr.type,
548 					    imsg.hdr.pid);
549 					if (!area->stub)
550 						lsa_dump(&asext_tree,
551 						    imsg.hdr.type,
552 						    imsg.hdr.pid);
553 				}
554 			}
555 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
556 			    imsg.hdr.pid, -1, NULL, 0);
557 			break;
558 		case IMSG_CTL_SHOW_RIB:
559 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
560 				imsg_compose_event(iev_ospfe, IMSG_CTL_AREA,
561 				    0, imsg.hdr.pid, -1, area, sizeof(*area));
562 
563 				rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
564 				rt_dump(area->id, imsg.hdr.pid, RIB_NET);
565 			}
566 			aid.s_addr = 0;
567 			rt_dump(aid, imsg.hdr.pid, RIB_EXT);
568 
569 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
570 			    imsg.hdr.pid, -1, NULL, 0);
571 			break;
572 		case IMSG_CTL_SHOW_SUM:
573 			rde_send_summary(imsg.hdr.pid);
574 			LIST_FOREACH(area, &rdeconf->area_list, entry)
575 				rde_send_summary_area(area, imsg.hdr.pid);
576 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
577 			    imsg.hdr.pid, -1, NULL, 0);
578 			break;
579 		case IMSG_IFINFO:
580 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
581 			    sizeof(int))
582 				fatalx("IFINFO imsg with wrong len");
583 
584 			nbr = rde_nbr_find(imsg.hdr.peerid);
585 			if (nbr == NULL)
586 				fatalx("IFINFO imsg with bad peerid");
587 			memcpy(&nbr->iface->state, imsg.data, sizeof(int));
588 
589 			/* Resend LSAs if interface state changes. */
590 			orig_intra_area_prefix_lsas(nbr->area);
591 			break;
592 		case IMSG_CTL_LOG_VERBOSE:
593 			/* already checked by ospfe */
594 			memcpy(&verbose, imsg.data, sizeof(verbose));
595 			log_verbose(verbose);
596 			break;
597 		default:
598 			log_debug("rde_dispatch_imsg: unexpected imsg %d",
599 			    imsg.hdr.type);
600 			break;
601 		}
602 		imsg_free(&imsg);
603 	}
604 	if (!shut)
605 		imsg_event_add(iev);
606 	else {
607 		/* this pipe is dead, so remove the event handler */
608 		event_del(&iev->ev);
609 		event_loopexit(NULL);
610 	}
611 }
612 
613 /* ARGSUSED */
614 void
615 rde_dispatch_parent(int fd, short event, void *bula)
616 {
617 	static struct area	*narea;
618 	struct area		*area;
619 	struct iface		*iface, *ifp;
620 	struct ifaddrchange	*ifc;
621 	struct iface_addr	*ia, *nia;
622 	struct imsg		 imsg;
623 	struct kroute		 kr;
624 	struct rroute		 rr;
625 	struct imsgev		*iev = bula;
626 	struct imsgbuf		*ibuf = &iev->ibuf;
627 	struct lsa		*lsa;
628 	struct vertex		*v;
629 	struct rt_node		*rn;
630 	ssize_t			 n;
631 	int			 shut = 0, wasvalid;
632 	unsigned int		 ifindex;
633 
634 	if (event & EV_READ) {
635 		if ((n = imsg_read(ibuf)) == -1)
636 			fatal("imsg_read error");
637 		if (n == 0)	/* connection closed */
638 			shut = 1;
639 	}
640 	if (event & EV_WRITE) {
641 		if (msgbuf_write(&ibuf->w) == -1 && errno != EAGAIN)
642 			fatal("msgbuf_write");
643 	}
644 
645 	for (;;) {
646 		if ((n = imsg_get(ibuf, &imsg)) == -1)
647 			fatal("rde_dispatch_parent: imsg_read error");
648 		if (n == 0)
649 			break;
650 
651 		switch (imsg.hdr.type) {
652 		case IMSG_NETWORK_ADD:
653 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
654 				log_warnx("rde_dispatch_parent: "
655 				    "wrong imsg len");
656 				break;
657 			}
658 			memcpy(&rr, imsg.data, sizeof(rr));
659 
660 			if ((lsa = rde_asext_get(&rr)) != NULL) {
661 				v = lsa_find(NULL, lsa->hdr.type,
662 				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
663 
664 				lsa_merge(nbrself, lsa, v);
665 			}
666 			break;
667 		case IMSG_NETWORK_DEL:
668 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
669 				log_warnx("rde_dispatch_parent: "
670 				    "wrong imsg len");
671 				break;
672 			}
673 			memcpy(&rr, imsg.data, sizeof(rr));
674 
675 			if ((lsa = rde_asext_put(&rr)) != NULL) {
676 				v = lsa_find(NULL, lsa->hdr.type,
677 				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
678 
679 				/*
680 				 * if v == NULL no LSA is in the table and
681 				 * nothing has to be done.
682 				 */
683 				if (v)
684 					lsa_merge(nbrself, lsa, v);
685 				else
686 					free(lsa);
687 			}
688 			break;
689 		case IMSG_KROUTE_GET:
690 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) {
691 				log_warnx("rde_dispatch_parent: "
692 				    "wrong imsg len");
693 				break;
694 			}
695 			memcpy(&kr, imsg.data, sizeof(kr));
696 
697 			if ((rn = rt_find(&kr.prefix, kr.prefixlen,
698 			    DT_NET)) != NULL)
699 				rde_send_change_kroute(rn);
700 			else
701 				/* should not happen */
702 				imsg_compose_event(iev_main, IMSG_KROUTE_DELETE,
703 				    0, 0, -1, &kr, sizeof(kr));
704 			break;
705 		case IMSG_IFINFO:
706 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
707 			    sizeof(struct iface))
708 				fatalx("IFINFO imsg with wrong len");
709 
710 			ifp = imsg.data;
711 			iface = if_find(ifp->ifindex);
712 			if (iface == NULL)
713 				fatalx("interface lost in rde");
714 
715 			wasvalid = (iface->flags & IFF_UP) &&
716 			    LINK_STATE_IS_UP(iface->linkstate);
717 
718 			if_update(iface, ifp->mtu, ifp->flags, ifp->media_type,
719 			    ifp->linkstate, ifp->baudrate);
720 
721 			/* Resend LSAs if interface state changes. */
722 			if (wasvalid != (iface->flags & IFF_UP) &&
723 			    LINK_STATE_IS_UP(iface->linkstate)) {
724 				area = area_find(rdeconf, iface->area_id);
725 				if (!area)
726 					fatalx("interface lost area");
727 				orig_intra_area_prefix_lsas(area);
728 			}
729 			break;
730 		case IMSG_IFADD:
731 			if ((iface = malloc(sizeof(struct iface))) == NULL)
732 				fatal(NULL);
733 			memcpy(iface, imsg.data, sizeof(struct iface));
734 
735 			LIST_INIT(&iface->nbr_list);
736 			TAILQ_INIT(&iface->ls_ack_list);
737 			RB_INIT(&iface->lsa_tree);
738 
739 			area = area_find(rdeconf, iface->area_id);
740 			LIST_INSERT_HEAD(&area->iface_list, iface, entry);
741 			break;
742 		case IMSG_IFDELETE:
743 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
744 			    sizeof(ifindex))
745 				fatalx("IFDELETE imsg with wrong len");
746 
747 			memcpy(&ifindex, imsg.data, sizeof(ifindex));
748 			iface = if_find(ifindex);
749 			if (iface == NULL)
750 				fatalx("interface lost in rde");
751 
752 			LIST_REMOVE(iface, entry);
753 			if_del(iface);
754 			break;
755 		case IMSG_IFADDRNEW:
756 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
757 			    sizeof(struct ifaddrchange))
758 				fatalx("IFADDRNEW imsg with wrong len");
759 			ifc = imsg.data;
760 
761 			iface = if_find(ifc->ifindex);
762 			if (iface == NULL)
763 				fatalx("IFADDRNEW interface lost in rde");
764 
765 			if ((ia = calloc(1, sizeof(struct iface_addr))) ==
766 			    NULL)
767 				fatal("rde_dispatch_parent IFADDRNEW");
768 			ia->addr = ifc->addr;
769 			ia->dstbrd = ifc->dstbrd;
770 			ia->prefixlen = ifc->prefixlen;
771 
772 			TAILQ_INSERT_TAIL(&iface->ifa_list, ia, entry);
773 			area = area_find(rdeconf, iface->area_id);
774 			if (area)
775 				orig_intra_area_prefix_lsas(area);
776 			break;
777 		case IMSG_IFADDRDEL:
778 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
779 			    sizeof(struct ifaddrchange))
780 				fatalx("IFADDRDEL imsg with wrong len");
781 			ifc = imsg.data;
782 
783 			iface = if_find(ifc->ifindex);
784 			if (iface == NULL)
785 				fatalx("IFADDRDEL interface lost in rde");
786 
787 			for (ia = TAILQ_FIRST(&iface->ifa_list); ia != NULL;
788 			    ia = nia) {
789 				nia = TAILQ_NEXT(ia, entry);
790 
791 				if (IN6_ARE_ADDR_EQUAL(&ia->addr,
792 				    &ifc->addr)) {
793 					TAILQ_REMOVE(&iface->ifa_list, ia,
794 					    entry);
795 					free(ia);
796 					break;
797 				}
798 			}
799 			area = area_find(rdeconf, iface->area_id);
800 			if (area)
801 				orig_intra_area_prefix_lsas(area);
802 			break;
803 		case IMSG_RECONF_CONF:
804 			if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
805 			    NULL)
806 				fatal(NULL);
807 			memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
808 
809 			LIST_INIT(&nconf->area_list);
810 			LIST_INIT(&nconf->cand_list);
811 			break;
812 		case IMSG_RECONF_AREA:
813 			if ((narea = area_new()) == NULL)
814 				fatal(NULL);
815 			memcpy(narea, imsg.data, sizeof(struct area));
816 
817 			LIST_INIT(&narea->iface_list);
818 			LIST_INIT(&narea->nbr_list);
819 			RB_INIT(&narea->lsa_tree);
820 
821 			LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
822 			break;
823 		case IMSG_RECONF_END:
824 			merge_config(rdeconf, nconf);
825 			nconf = NULL;
826 			break;
827 		default:
828 			log_debug("rde_dispatch_parent: unexpected imsg %d",
829 			    imsg.hdr.type);
830 			break;
831 		}
832 		imsg_free(&imsg);
833 	}
834 	if (!shut)
835 		imsg_event_add(iev);
836 	else {
837 		/* this pipe is dead, so remove the event handler */
838 		event_del(&iev->ev);
839 		event_loopexit(NULL);
840 	}
841 }
842 
843 void
844 rde_dump_area(struct area *area, int imsg_type, pid_t pid)
845 {
846 	struct iface	*iface;
847 
848 	/* dump header */
849 	imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 0, pid, -1,
850 	    area, sizeof(*area));
851 
852 	/* dump link local lsa */
853 	LIST_FOREACH(iface, &area->iface_list, entry) {
854 		imsg_compose_event(iev_ospfe, IMSG_CTL_IFACE,
855 		    0, pid, -1, iface, sizeof(*iface));
856 		lsa_dump(&iface->lsa_tree, imsg_type, pid);
857 	}
858 
859 	/* dump area lsa */
860 	lsa_dump(&area->lsa_tree, imsg_type, pid);
861 }
862 
863 u_int32_t
864 rde_router_id(void)
865 {
866 	return (rdeconf->rtr_id.s_addr);
867 }
868 
869 void
870 rde_send_change_kroute(struct rt_node *r)
871 {
872 	struct kroute		 kr;
873 	struct rt_nexthop	*rn;
874 
875 	TAILQ_FOREACH(rn, &r->nexthop, entry) {
876 		if (!rn->invalid)
877 			break;
878 	}
879 	if (!rn)
880 		fatalx("rde_send_change_kroute: no valid nexthop found");
881 
882 	bzero(&kr, sizeof(kr));
883 	kr.prefix = r->prefix;
884 	kr.nexthop = rn->nexthop;
885 	if (IN6_IS_ADDR_LINKLOCAL(&rn->nexthop) ||
886 	    IN6_IS_ADDR_MC_LINKLOCAL(&rn->nexthop))
887 		kr.scope = rn->ifindex;
888 	kr.ifindex = rn->ifindex;
889 	kr.prefixlen = r->prefixlen;
890 	kr.ext_tag = r->ext_tag;
891 
892 	imsg_compose_event(iev_main, IMSG_KROUTE_CHANGE, 0, 0, -1,
893 	    &kr, sizeof(kr));
894 }
895 
896 void
897 rde_send_delete_kroute(struct rt_node *r)
898 {
899 	struct kroute	 kr;
900 
901 	bzero(&kr, sizeof(kr));
902 	kr.prefix = r->prefix;
903 	kr.prefixlen = r->prefixlen;
904 
905 	imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1,
906 	    &kr, sizeof(kr));
907 }
908 
909 void
910 rde_send_summary(pid_t pid)
911 {
912 	static struct ctl_sum	 sumctl;
913 	struct timeval		 now;
914 	struct area		*area;
915 	struct vertex		*v;
916 
917 	bzero(&sumctl, sizeof(struct ctl_sum));
918 
919 	sumctl.rtr_id.s_addr = rde_router_id();
920 	sumctl.spf_delay = rdeconf->spf_delay;
921 	sumctl.spf_hold_time = rdeconf->spf_hold_time;
922 
923 	LIST_FOREACH(area, &rdeconf->area_list, entry)
924 		sumctl.num_area++;
925 
926 	RB_FOREACH(v, lsa_tree, &asext_tree)
927 		sumctl.num_ext_lsa++;
928 
929 	gettimeofday(&now, NULL);
930 	if (rdeconf->uptime < now.tv_sec)
931 		sumctl.uptime = now.tv_sec - rdeconf->uptime;
932 	else
933 		sumctl.uptime = 0;
934 
935 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
936 	    sizeof(sumctl));
937 }
938 
939 void
940 rde_send_summary_area(struct area *area, pid_t pid)
941 {
942 	static struct ctl_sum_area	 sumareactl;
943 	struct iface			*iface;
944 	struct rde_nbr			*nbr;
945 	struct lsa_tree			*tree = &area->lsa_tree;
946 	struct vertex			*v;
947 
948 	bzero(&sumareactl, sizeof(struct ctl_sum_area));
949 
950 	sumareactl.area.s_addr = area->id.s_addr;
951 	sumareactl.num_spf_calc = area->num_spf_calc;
952 
953 	LIST_FOREACH(iface, &area->iface_list, entry)
954 		sumareactl.num_iface++;
955 
956 	LIST_FOREACH(nbr, &area->nbr_list, entry)
957 		if (nbr->state == NBR_STA_FULL && !nbr->self)
958 			sumareactl.num_adj_nbr++;
959 
960 	RB_FOREACH(v, lsa_tree, tree)
961 		sumareactl.num_lsa++;
962 
963 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
964 	    sizeof(sumareactl));
965 }
966 
967 LIST_HEAD(rde_nbr_head, rde_nbr);
968 
969 struct nbr_table {
970 	struct rde_nbr_head	*hashtbl;
971 	u_int32_t		 hashmask;
972 } rdenbrtable;
973 
974 #define RDE_NBR_HASH(x)		\
975 	&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
976 
977 void
978 rde_nbr_init(u_int32_t hashsize)
979 {
980 	struct rde_nbr_head	*head;
981 	u_int32_t		 hs, i;
982 
983 	for (hs = 1; hs < hashsize; hs <<= 1)
984 		;
985 	rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
986 	if (rdenbrtable.hashtbl == NULL)
987 		fatal("rde_nbr_init");
988 
989 	for (i = 0; i < hs; i++)
990 		LIST_INIT(&rdenbrtable.hashtbl[i]);
991 
992 	rdenbrtable.hashmask = hs - 1;
993 
994 	if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
995 		fatal("rde_nbr_init");
996 
997 	nbrself->id.s_addr = rde_router_id();
998 	nbrself->peerid = NBR_IDSELF;
999 	nbrself->state = NBR_STA_DOWN;
1000 	nbrself->self = 1;
1001 	head = RDE_NBR_HASH(NBR_IDSELF);
1002 	LIST_INSERT_HEAD(head, nbrself, hash);
1003 }
1004 
1005 void
1006 rde_nbr_free(void)
1007 {
1008 	free(nbrself);
1009 	free(rdenbrtable.hashtbl);
1010 }
1011 
1012 struct rde_nbr *
1013 rde_nbr_find(u_int32_t peerid)
1014 {
1015 	struct rde_nbr_head	*head;
1016 	struct rde_nbr		*nbr;
1017 
1018 	head = RDE_NBR_HASH(peerid);
1019 
1020 	LIST_FOREACH(nbr, head, hash) {
1021 		if (nbr->peerid == peerid)
1022 			return (nbr);
1023 	}
1024 
1025 	return (NULL);
1026 }
1027 
1028 struct rde_nbr *
1029 rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
1030 {
1031 	struct rde_nbr_head	*head;
1032 	struct rde_nbr		*nbr;
1033 	struct area		*area;
1034 	struct iface		*iface;
1035 
1036 	if (rde_nbr_find(peerid))
1037 		return (NULL);
1038 	if ((area = area_find(rdeconf, new->area_id)) == NULL)
1039 		fatalx("rde_nbr_new: unknown area");
1040 
1041 	if ((iface = if_find(new->ifindex)) == NULL)
1042 		fatalx("rde_nbr_new: unknown interface");
1043 
1044 	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
1045 		fatal("rde_nbr_new");
1046 
1047 	memcpy(nbr, new, sizeof(*nbr));
1048 	nbr->peerid = peerid;
1049 	nbr->area = area;
1050 	nbr->iface = iface;
1051 
1052 	TAILQ_INIT(&nbr->req_list);
1053 
1054 	head = RDE_NBR_HASH(peerid);
1055 	LIST_INSERT_HEAD(head, nbr, hash);
1056 	LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
1057 
1058 	return (nbr);
1059 }
1060 
1061 void
1062 rde_nbr_del(struct rde_nbr *nbr)
1063 {
1064 	if (nbr == NULL)
1065 		return;
1066 
1067 	rde_req_list_free(nbr);
1068 
1069 	LIST_REMOVE(nbr, entry);
1070 	LIST_REMOVE(nbr, hash);
1071 
1072 	free(nbr);
1073 }
1074 
1075 int
1076 rde_nbr_loading(struct area *area)
1077 {
1078 	struct rde_nbr		*nbr;
1079 	int			 checkall = 0;
1080 
1081 	if (area == NULL) {
1082 		area = LIST_FIRST(&rdeconf->area_list);
1083 		checkall = 1;
1084 	}
1085 
1086 	while (area != NULL) {
1087 		LIST_FOREACH(nbr, &area->nbr_list, entry) {
1088 			if (nbr->self)
1089 				continue;
1090 			if (nbr->state & NBR_STA_XCHNG ||
1091 			    nbr->state & NBR_STA_LOAD)
1092 				return (1);
1093 		}
1094 		if (!checkall)
1095 			break;
1096 		area = LIST_NEXT(area, entry);
1097 	}
1098 
1099 	return (0);
1100 }
1101 
1102 struct rde_nbr *
1103 rde_nbr_self(struct area *area)
1104 {
1105 	struct rde_nbr		*nbr;
1106 
1107 	LIST_FOREACH(nbr, &area->nbr_list, entry)
1108 		if (nbr->self)
1109 			return (nbr);
1110 
1111 	/* this may not happen */
1112 	fatalx("rde_nbr_self: area without self");
1113 	return (NULL);
1114 }
1115 
1116 /*
1117  * LSA req list
1118  */
1119 void
1120 rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
1121 {
1122 	struct rde_req_entry	*le;
1123 
1124 	if ((le = calloc(1, sizeof(*le))) == NULL)
1125 		fatal("rde_req_list_add");
1126 
1127 	TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
1128 	le->type = lsa->type;
1129 	le->ls_id = lsa->ls_id;
1130 	le->adv_rtr = lsa->adv_rtr;
1131 }
1132 
1133 int
1134 rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1135 {
1136 	struct rde_req_entry	*le;
1137 
1138 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1139 		if ((lsa_hdr->type == le->type) &&
1140 		    (lsa_hdr->ls_id == le->ls_id) &&
1141 		    (lsa_hdr->adv_rtr == le->adv_rtr))
1142 			return (1);
1143 	}
1144 	return (0);
1145 }
1146 
1147 void
1148 rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1149 {
1150 	struct rde_req_entry	*le;
1151 
1152 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1153 		if ((lsa_hdr->type == le->type) &&
1154 		    (lsa_hdr->ls_id == le->ls_id) &&
1155 		    (lsa_hdr->adv_rtr == le->adv_rtr)) {
1156 			TAILQ_REMOVE(&nbr->req_list, le, entry);
1157 			free(le);
1158 			return;
1159 		}
1160 	}
1161 }
1162 
1163 void
1164 rde_req_list_free(struct rde_nbr *nbr)
1165 {
1166 	struct rde_req_entry	*le;
1167 
1168 	while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
1169 		TAILQ_REMOVE(&nbr->req_list, le, entry);
1170 		free(le);
1171 	}
1172 }
1173 
1174 /*
1175  * as-external LSA handling
1176  */
1177 struct lsa *
1178 rde_asext_get(struct rroute *rr)
1179 {
1180 	struct area		*area;
1181 	struct iface		*iface;
1182 	struct iface_addr	*ia;
1183 	struct in6_addr		 addr;
1184 
1185 	LIST_FOREACH(area, &rdeconf->area_list, entry)
1186 		LIST_FOREACH(iface, &area->iface_list, entry)
1187 			TAILQ_FOREACH(ia, &iface->ifa_list, entry) {
1188 				if (IN6_IS_ADDR_LINKLOCAL(&ia->addr))
1189 					continue;
1190 
1191 				inet6applymask(&addr, &ia->addr,
1192 				    rr->kr.prefixlen);
1193 				if (!memcmp(&addr, &rr->kr.prefix,
1194 				    sizeof(addr)) && rr->kr.prefixlen ==
1195 				    ia->prefixlen) {
1196 					/* already announced as Prefix LSA */
1197 					log_debug("rde_asext_get: %s/%d is "
1198 					    "part of prefix LSA",
1199 					    log_in6addr(&rr->kr.prefix),
1200 					    rr->kr.prefixlen);
1201 					return (NULL);
1202 				}
1203 			}
1204 
1205 	/* update of seqnum is done by lsa_merge */
1206 	return (orig_asext_lsa(rr, DEFAULT_AGE));
1207 }
1208 
1209 struct lsa *
1210 rde_asext_put(struct rroute *rr)
1211 {
1212 	/*
1213 	 * just try to remove the LSA. If the prefix is announced as
1214 	 * stub net LSA lsa_find() will fail later and nothing will happen.
1215 	 */
1216 
1217 	/* remove by reflooding with MAX_AGE */
1218 	return (orig_asext_lsa(rr, MAX_AGE));
1219 }
1220 
1221 /*
1222  * summary LSA stuff
1223  */
1224 void
1225 rde_summary_update(struct rt_node *rte, struct area *area)
1226 {
1227 	struct vertex		*v = NULL;
1228 //XXX	struct lsa		*lsa;
1229 	u_int16_t		 type = 0;
1230 
1231 	/* first check if we actually need to announce this route */
1232 	if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1233 		return;
1234 	/* never create summaries for as-ext LSA */
1235 	if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1236 		return;
1237 	/* no need for summary LSA in the originating area */
1238 	if (rte->area.s_addr == area->id.s_addr)
1239 		return;
1240 	/* no need to originate inter-area routes to the backbone */
1241 	if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1242 		return;
1243 	/* TODO nexthop check, nexthop part of area -> no summary */
1244 	if (rte->cost >= LS_INFINITY)
1245 		return;
1246 	/* TODO AS border router specific checks */
1247 	/* TODO inter-area network route stuff */
1248 	/* TODO intra-area stuff -- condense LSA ??? */
1249 
1250 	if (rte->d_type == DT_NET) {
1251 		type = LSA_TYPE_INTER_A_PREFIX;
1252 	} else if (rte->d_type == DT_RTR) {
1253 		type = LSA_TYPE_INTER_A_ROUTER;
1254 	} else
1255 
1256 #if 0 /* XXX a lot todo */
1257 	/* update lsa but only if it was changed */
1258 	v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1259 	lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1260 	lsa_merge(rde_nbr_self(area), lsa, v);
1261 
1262 	if (v == NULL)
1263 		v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1264 #endif
1265 
1266 	/* suppressed/deleted routes are not found in the second lsa_find */
1267 	if (v)
1268 		v->cost = rte->cost;
1269 }
1270 
1271 /*
1272  * Functions for self-originated LSAs
1273  */
1274 
1275 /* Prefix LSAs have variable size. We have to be careful to copy the right
1276  * amount of bytes, and to realloc() the right amount of memory. */
1277 void
1278 append_prefix_lsa(struct lsa **lsa, u_int16_t *len, struct lsa_prefix *prefix)
1279 {
1280 	struct lsa_prefix	*copy;
1281 	unsigned int		 lsa_prefix_len;
1282 	unsigned int		 new_len;
1283 	char			*new_lsa;
1284 
1285 	lsa_prefix_len = sizeof(struct lsa_prefix)
1286 	    + LSA_PREFIXSIZE(prefix->prefixlen);
1287 
1288 	new_len = *len + lsa_prefix_len;
1289 
1290 	/* Make sure we have enough space for this prefix. */
1291 	if ((new_lsa = realloc(*lsa, new_len)) == NULL)
1292 		fatalx("append_prefix_lsa");
1293 
1294 	/* Append prefix to LSA. */
1295 	copy = (struct lsa_prefix *)(new_lsa + *len);
1296 	memcpy(copy, prefix, lsa_prefix_len);
1297 	copy->metric = 0;
1298 
1299 	*lsa = (struct lsa *)new_lsa;
1300 	*len = new_len;
1301 }
1302 
1303 int
1304 prefix_compare(struct prefix_node *a, struct prefix_node *b)
1305 {
1306 	struct lsa_prefix	*p;
1307 	struct lsa_prefix	*q;
1308 	int			 i;
1309 	int			 len;
1310 
1311 	p = a->prefix;
1312 	q = b->prefix;
1313 
1314 	len = MIN(LSA_PREFIXSIZE(p->prefixlen), LSA_PREFIXSIZE(q->prefixlen));
1315 
1316 	i = memcmp(p + 1, q + 1, len);
1317 	if (i)
1318 		return (i);
1319 	if (p->prefixlen < q->prefixlen)
1320 		return (-1);
1321 	if (p->prefixlen > q->prefixlen)
1322 		return (1);
1323 	return (0);
1324 }
1325 
1326 void
1327 prefix_tree_add(struct prefix_tree *tree, struct lsa_link *lsa)
1328 {
1329 	struct prefix_node	*old;
1330 	struct prefix_node	*new;
1331 	struct in6_addr		 addr;
1332 	unsigned int		 len;
1333 	unsigned int		 i;
1334 	char			*cur_prefix;
1335 
1336 	cur_prefix = (char *)(lsa + 1);
1337 
1338 	for (i = 0; i < ntohl(lsa->numprefix); i++) {
1339 		if ((new = calloc(1, sizeof(*new))) == NULL)
1340 			fatal("prefix_tree_add");
1341 		new->prefix = (struct lsa_prefix *)cur_prefix;
1342 
1343 		len = sizeof(*new->prefix)
1344 		    + LSA_PREFIXSIZE(new->prefix->prefixlen);
1345 
1346 		bzero(&addr, sizeof(addr));
1347 		memcpy(&addr, new->prefix + 1,
1348 		    LSA_PREFIXSIZE(new->prefix->prefixlen));
1349 
1350 		if (!(IN6_IS_ADDR_LINKLOCAL(&addr)) &&
1351 		    (new->prefix->options & OSPF_PREFIX_NU) == 0 &&
1352 		    (new->prefix->options & OSPF_PREFIX_LA) == 0) {
1353 			old = RB_INSERT(prefix_tree, tree, new);
1354 			if (old != NULL) {
1355 				old->prefix->options |= new->prefix->options;
1356 				free(new);
1357 			}
1358 		}
1359 
1360 		cur_prefix = cur_prefix + len;
1361 	}
1362 }
1363 
1364 RB_GENERATE(prefix_tree, prefix_node, entry, prefix_compare)
1365 
1366 struct lsa *
1367 orig_intra_lsa_net(struct area *area, struct iface *iface, struct vertex *old)
1368 {
1369 	struct lsa		*lsa;
1370 	struct vertex		*v;
1371 	struct rde_nbr		*nbr;
1372 	struct prefix_node	*node;
1373 	struct prefix_tree	 tree;
1374 	int			 num_full_nbr;
1375 	u_int16_t		 len;
1376 	u_int16_t		 numprefix;
1377 
1378 	log_debug("orig_intra_lsa_net: area %s, interface %s",
1379 	    inet_ntoa(area->id), iface->name);
1380 
1381 	RB_INIT(&tree);
1382 
1383 	if (iface->state & IF_STA_DR) {
1384 		num_full_nbr = 0;
1385 		LIST_FOREACH(nbr, &area->nbr_list, entry) {
1386 			if (nbr->self ||
1387 			    nbr->iface->ifindex != iface->ifindex ||
1388 			    (nbr->state & NBR_STA_FULL) == 0)
1389 				continue;
1390 			num_full_nbr++;
1391 			v = lsa_find(iface, htons(LSA_TYPE_LINK),
1392 			    htonl(nbr->iface_id), nbr->id.s_addr);
1393 			if (v)
1394 				prefix_tree_add(&tree, &v->lsa->data.link);
1395 		}
1396 		if (num_full_nbr == 0) {
1397 			/* There are no adjacent neighbors on link.
1398 			 * If a copy of this LSA already exists in DB,
1399 			 * it needs to be flushed. orig_intra_lsa_rtr()
1400 			 * will take care of prefixes configured on
1401 			 * this interface. */
1402 			if (!old)
1403 				return NULL;
1404 		} else {
1405 			/* Add our own prefixes configured for this link. */
1406 			v = lsa_find(iface, htons(LSA_TYPE_LINK),
1407 			    htonl(iface->ifindex), rde_router_id());
1408 			if (v)
1409 				prefix_tree_add(&tree, &v->lsa->data.link);
1410 		}
1411 	/* Continue only if a copy of this LSA already exists in DB.
1412 	 * It needs to be flushed. */
1413 	} else if (!old)
1414 		return NULL;
1415 
1416 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1417 	if ((lsa = calloc(1, len)) == NULL)
1418 		fatal("orig_intra_lsa_net");
1419 
1420 	lsa->data.pref_intra.ref_type = htons(LSA_TYPE_NETWORK);
1421 	lsa->data.pref_intra.ref_ls_id = htonl(iface->ifindex);
1422 	lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1423 
1424 	numprefix = 0;
1425 	RB_FOREACH(node, prefix_tree, &tree) {
1426 		append_prefix_lsa(&lsa, &len, node->prefix);
1427 		numprefix++;
1428 	}
1429 
1430 	lsa->data.pref_intra.numprefix = htons(numprefix);
1431 
1432 	while (!RB_EMPTY(&tree))
1433 		free(RB_REMOVE(prefix_tree, &tree, RB_ROOT(&tree)));
1434 
1435 	/* LSA header */
1436 	/* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1437 	lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE);
1438 	lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX);
1439 	lsa->hdr.ls_id = htonl(iface->ifindex);
1440 	lsa->hdr.adv_rtr = rde_router_id();
1441 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1442 	lsa->hdr.len = htons(len);
1443 	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1444 
1445 	return lsa;
1446 }
1447 
1448 struct lsa *
1449 orig_intra_lsa_rtr(struct area *area, struct vertex *old)
1450 {
1451 	char			lsa_prefix_buf[sizeof(struct lsa_prefix)
1452 				    + sizeof(struct in6_addr)];
1453 	struct lsa		*lsa;
1454 	struct lsa_prefix	*lsa_prefix;
1455 	struct in6_addr		*prefix;
1456 	struct iface		*iface;
1457 	struct iface_addr	*ia;
1458 	struct rde_nbr		*nbr;
1459 	u_int16_t		 len;
1460 	u_int16_t		 numprefix;
1461 
1462 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1463 	if ((lsa = calloc(1, len)) == NULL)
1464 		fatal("orig_intra_lsa_rtr");
1465 
1466 	lsa->data.pref_intra.ref_type = htons(LSA_TYPE_ROUTER);
1467 	lsa->data.pref_intra.ref_ls_id = 0;
1468 	lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1469 
1470 	numprefix = 0;
1471 	LIST_FOREACH(iface, &area->iface_list, entry) {
1472 		if (!((iface->flags & IFF_UP) &&
1473 		    LINK_STATE_IS_UP(iface->linkstate)))
1474 			/* interface or link state down */
1475 			continue;
1476 		if ((iface->state & IF_STA_DOWN) &&
1477 		    !(iface->cflags & F_IFACE_PASSIVE))
1478 			/* passive interfaces stay in state DOWN */
1479 			continue;
1480 
1481 		/* Broadcast links with adjacencies are handled
1482 		 * by orig_intra_lsa_net(), ignore. */
1483 		if (iface->type == IF_TYPE_BROADCAST ||
1484 		    iface->type == IF_TYPE_NBMA) {
1485 			if (iface->state & IF_STA_WAITING)
1486 				/* Skip, we're still waiting for
1487 				 * adjacencies to form. */
1488 				continue;
1489 
1490 			LIST_FOREACH(nbr, &area->nbr_list, entry)
1491 				if (!nbr->self &&
1492 				    nbr->iface->ifindex == iface->ifindex &&
1493 				    nbr->state & NBR_STA_FULL)
1494 					break;
1495 			if (nbr)
1496 				continue;
1497 		}
1498 
1499 		lsa_prefix = (struct lsa_prefix *)lsa_prefix_buf;
1500 
1501 		TAILQ_FOREACH(ia, &iface->ifa_list, entry) {
1502 			if (IN6_IS_ADDR_LINKLOCAL(&ia->addr))
1503 				continue;
1504 
1505 			bzero(lsa_prefix_buf, sizeof(lsa_prefix_buf));
1506 
1507 			if (iface->type == IF_TYPE_POINTOMULTIPOINT ||
1508 			    iface->state & IF_STA_LOOPBACK) {
1509 				lsa_prefix->prefixlen = 128;
1510 			} else {
1511 				lsa_prefix->prefixlen = ia->prefixlen;
1512 				lsa_prefix->metric = htons(iface->metric);
1513 			}
1514 
1515 			if (lsa_prefix->prefixlen == 128)
1516 				lsa_prefix->options |= OSPF_PREFIX_LA;
1517 
1518 			log_debug("orig_intra_lsa_rtr: area %s, interface %s: "
1519 			    "%s/%d", inet_ntoa(area->id),
1520 			    iface->name, log_in6addr(&ia->addr),
1521 			    lsa_prefix->prefixlen);
1522 
1523 			prefix = (struct in6_addr *)(lsa_prefix + 1);
1524 			inet6applymask(prefix, &ia->addr,
1525 			    lsa_prefix->prefixlen);
1526 			append_prefix_lsa(&lsa, &len, lsa_prefix);
1527 			numprefix++;
1528 		}
1529 
1530 		/* TOD: Add prefixes of directly attached hosts, too */
1531 		/* TOD: Add prefixes for virtual links */
1532 	}
1533 
1534 	/* If no prefixes were included, continue only if a copy of this
1535 	 * LSA already exists in DB. It needs to be flushed. */
1536 	if (numprefix == 0 && !old) {
1537 		free(lsa);
1538 		return NULL;
1539 	}
1540 
1541 	lsa->data.pref_intra.numprefix = htons(numprefix);
1542 
1543 	/* LSA header */
1544 	/* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1545 	lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE);
1546 	lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX);
1547 	lsa->hdr.ls_id = htonl(LS_ID_INTRA_RTR);
1548 	lsa->hdr.adv_rtr = rde_router_id();
1549 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1550 	lsa->hdr.len = htons(len);
1551 	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1552 
1553 	return lsa;
1554 }
1555 
1556 void
1557 orig_intra_area_prefix_lsas(struct area *area)
1558 {
1559 	struct lsa	*lsa;
1560 	struct vertex	*old;
1561 	struct iface	*iface;
1562 
1563 	LIST_FOREACH(iface, &area->iface_list, entry) {
1564 		if (iface->type == IF_TYPE_BROADCAST ||
1565 		    iface->type == IF_TYPE_NBMA) {
1566 			old = lsa_find(iface, htons(LSA_TYPE_INTRA_A_PREFIX),
1567 			    htonl(iface->ifindex), rde_router_id());
1568 			lsa = orig_intra_lsa_net(area, iface, old);
1569 			if (lsa)
1570 				lsa_merge(rde_nbr_self(area), lsa, old);
1571 		}
1572 	}
1573 
1574 	old = lsa_find_tree(&area->lsa_tree, htons(LSA_TYPE_INTRA_A_PREFIX),
1575 		htonl(LS_ID_INTRA_RTR), rde_router_id());
1576 	lsa = orig_intra_lsa_rtr(area, old);
1577 	if (lsa)
1578 		lsa_merge(rde_nbr_self(area), lsa, old);
1579 }
1580 
1581 int
1582 comp_asext(struct lsa *a, struct lsa *b)
1583 {
1584 	/* compare prefixes, if they are equal or not */
1585 	if (a->data.asext.prefix.prefixlen != b->data.asext.prefix.prefixlen)
1586 		return (-1);
1587 	return (memcmp(
1588 	    (char *)a + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1589 	    (char *)b + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1590 	    LSA_PREFIXSIZE(a->data.asext.prefix.prefixlen)));
1591 }
1592 
1593 struct lsa *
1594 orig_asext_lsa(struct rroute *rr, u_int16_t age)
1595 {
1596 	struct lsa	*lsa;
1597 	u_int32_t	 ext_tag;
1598 	u_int16_t	 len, ext_off;
1599 
1600 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext) +
1601 	    LSA_PREFIXSIZE(rr->kr.prefixlen);
1602 
1603 	/*
1604 	 * nexthop -- on connected routes we are the nexthop,
1605 	 * on all other cases we should announce the true nexthop
1606 	 * unless that nexthop is outside of the ospf cloud.
1607 	 * XXX for now we don't do this.
1608 	 */
1609 
1610 	ext_off = len;
1611 	if (rr->kr.ext_tag) {
1612 		len += sizeof(ext_tag);
1613 	}
1614 	if ((lsa = calloc(1, len)) == NULL)
1615 		fatal("orig_asext_lsa");
1616 
1617 	log_debug("orig_asext_lsa: %s/%d age %d",
1618 	    log_in6addr(&rr->kr.prefix), rr->kr.prefixlen, age);
1619 
1620 	/* LSA header */
1621 	lsa->hdr.age = htons(age);
1622 	lsa->hdr.type = htons(LSA_TYPE_EXTERNAL);
1623 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1624 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1625 	lsa->hdr.len = htons(len);
1626 
1627 	lsa->data.asext.prefix.prefixlen = rr->kr.prefixlen;
1628 	memcpy((char *)lsa + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1629 	    &rr->kr.prefix, LSA_PREFIXSIZE(rr->kr.prefixlen));
1630 
1631 	lsa->hdr.ls_id = lsa_find_lsid(&asext_tree, lsa->hdr.type,
1632 	    lsa->hdr.adv_rtr, comp_asext, lsa);
1633 
1634 	if (age == MAX_AGE) {
1635 		/* inherit metric and ext_tag from the current LSA,
1636 		 * some routers don't like to get withdraws that are
1637 		 * different from what they have in their table.
1638 		 */
1639 		struct vertex *v;
1640 		v = lsa_find(NULL, lsa->hdr.type, lsa->hdr.ls_id,
1641 		    lsa->hdr.adv_rtr);
1642 		if (v != NULL) {
1643 			rr->metric = ntohl(v->lsa->data.asext.metric);
1644 			if (rr->metric & LSA_ASEXT_T_FLAG) {
1645 				memcpy(&ext_tag, (char *)v->lsa + ext_off,
1646 				    sizeof(ext_tag));
1647 				rr->kr.ext_tag = ntohl(ext_tag);
1648 			}
1649 			rr->metric &= LSA_METRIC_MASK;
1650 		}
1651 	}
1652 
1653 	if (rr->kr.ext_tag) {
1654 		lsa->data.asext.metric = htonl(rr->metric | LSA_ASEXT_T_FLAG);
1655 		ext_tag = htonl(rr->kr.ext_tag);
1656 		memcpy((char *)lsa + ext_off, &ext_tag, sizeof(ext_tag));
1657 	} else {
1658 		lsa->data.asext.metric = htonl(rr->metric);
1659 	}
1660 
1661 	lsa->hdr.ls_chksum = 0;
1662 	lsa->hdr.ls_chksum =
1663 	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1664 
1665 	return (lsa);
1666 }
1667 
1668 struct lsa *
1669 orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1670 {
1671 #if 0 /* XXX a lot todo */
1672 	struct lsa	*lsa;
1673 	u_int16_t	 len;
1674 
1675 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1676 	if ((lsa = calloc(1, len)) == NULL)
1677 		fatal("orig_sum_lsa");
1678 
1679 	/* LSA header */
1680 	lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1681 	lsa->hdr.type = type;
1682 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1683 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1684 	lsa->hdr.len = htons(len);
1685 
1686 	/* prefix and mask */
1687 	/*
1688 	 * TODO ls_id must be unique, for overlapping routes this may
1689 	 * not be true. In this case a hack needs to be done to
1690 	 * make the ls_id unique.
1691 	 */
1692 	lsa->hdr.ls_id = rte->prefix.s_addr;
1693 	if (type == LSA_TYPE_SUM_NETWORK)
1694 		lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1695 	else
1696 		lsa->data.sum.mask = 0;	/* must be zero per RFC */
1697 
1698 	lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1699 
1700 	lsa->hdr.ls_chksum = 0;
1701 	lsa->hdr.ls_chksum =
1702 	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1703 
1704 	return (lsa);
1705 #endif
1706 	return NULL;
1707 }
1708