xref: /openbsd/usr.sbin/ospf6d/rde.c (revision f1b790a5)
1 /*	$OpenBSD: rde.c,v 1.97 2024/11/21 13:38:14 claudio Exp $ */
2 
3 /*
4  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <sys/queue.h>
24 #include <net/if_types.h>
25 #include <netinet/in.h>
26 #include <arpa/inet.h>
27 #include <err.h>
28 #include <errno.h>
29 #include <stdlib.h>
30 #include <signal.h>
31 #include <string.h>
32 #include <pwd.h>
33 #include <unistd.h>
34 #include <event.h>
35 
36 #include "ospf6.h"
37 #include "ospf6d.h"
38 #include "ospfe.h"
39 #include "log.h"
40 #include "rde.h"
41 
42 #define MINIMUM(a, b)	(((a) < (b)) ? (a) : (b))
43 
44 void		 rde_sig_handler(int sig, short, void *);
45 __dead void	 rde_shutdown(void);
46 void		 rde_dispatch_imsg(int, short, void *);
47 void		 rde_dispatch_parent(int, short, void *);
48 void		 rde_dump_area(struct area *, int, pid_t);
49 
50 void		 rde_send_summary(pid_t);
51 void		 rde_send_summary_area(struct area *, pid_t);
52 void		 rde_nbr_init(u_int32_t);
53 void		 rde_nbr_free(void);
54 struct rde_nbr	*rde_nbr_new(u_int32_t, struct rde_nbr *);
55 void		 rde_nbr_del(struct rde_nbr *);
56 
57 void		 rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
58 int		 rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
59 void		 rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
60 void		 rde_req_list_free(struct rde_nbr *);
61 
62 struct iface	*rde_asext_lookup(struct in6_addr, int);
63 void		 rde_asext_get(struct kroute *);
64 void		 rde_asext_put(struct kroute *);
65 
66 int		 comp_asext(struct lsa *, struct lsa *);
67 struct lsa	*orig_asext_lsa(struct kroute *, u_int16_t);
68 struct lsa	*orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
69 struct lsa	*orig_intra_lsa_net(struct area *, struct iface *,
70 		 struct vertex *);
71 struct lsa	*orig_intra_lsa_rtr(struct area *, struct vertex *);
72 void		 append_prefix_lsa(struct lsa **, u_int16_t *,
73 		    struct lsa_prefix *);
74 
75 /* A 32-bit value != any ifindex.
76  * We assume ifindex is bound by [1, USHRT_MAX] inclusive. */
77 #define	LS_ID_INTRA_RTR	0x01000000
78 
79 /* Tree of prefixes with global scope on given a link,
80  * see orig_intra_lsa_*() */
81 struct prefix_node {
82 	RB_ENTRY(prefix_node)	 entry;
83 	struct lsa_prefix	*prefix;
84 };
85 RB_HEAD(prefix_tree, prefix_node);
86 RB_PROTOTYPE(prefix_tree, prefix_node, entry, prefix_compare);
87 int		 prefix_compare(struct prefix_node *, struct prefix_node *);
88 void		 prefix_tree_add(struct prefix_tree *, struct lsa_link *);
89 
90 struct ospfd_conf	*rdeconf = NULL, *nconf = NULL;
91 static struct imsgev	*iev_ospfe;
92 static struct imsgev	*iev_main;
93 struct rde_nbr		*nbrself;
94 struct lsa_tree		 asext_tree;
95 
96 void
rde_sig_handler(int sig,short event,void * arg)97 rde_sig_handler(int sig, short event, void *arg)
98 {
99 	/*
100 	 * signal handler rules don't apply, libevent decouples for us
101 	 */
102 
103 	switch (sig) {
104 	case SIGINT:
105 	case SIGTERM:
106 		rde_shutdown();
107 		/* NOTREACHED */
108 	default:
109 		fatalx("unexpected signal");
110 	}
111 }
112 
113 /* route decision engine */
114 pid_t
rde(struct ospfd_conf * xconf,int pipe_parent2rde[2],int pipe_ospfe2rde[2],int pipe_parent2ospfe[2])115 rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
116     int pipe_parent2ospfe[2])
117 {
118 	struct event		 ev_sigint, ev_sigterm;
119 	struct timeval		 now;
120 	struct passwd		*pw;
121 	pid_t			 pid;
122 
123 	switch (pid = fork()) {
124 	case -1:
125 		fatal("cannot fork");
126 		/* NOTREACHED */
127 	case 0:
128 		break;
129 	default:
130 		return (pid);
131 	}
132 
133 	rdeconf = xconf;
134 
135 	if ((pw = getpwnam(OSPF6D_USER)) == NULL)
136 		fatal("getpwnam");
137 
138 	if (chroot(pw->pw_dir) == -1)
139 		fatal("chroot");
140 	if (chdir("/") == -1)
141 		fatal("chdir(\"/\")");
142 
143 	setproctitle("route decision engine");
144 	/*
145 	 * XXX needed with fork+exec
146 	 * log_init(debug, LOG_DAEMON);
147 	 * log_setverbose(verbose);
148 	 */
149 
150 	ospfd_process = PROC_RDE_ENGINE;
151 	log_procinit(log_procnames[ospfd_process]);
152 
153 	if (setgroups(1, &pw->pw_gid) ||
154 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
155 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
156 		fatal("can't drop privileges");
157 
158 	if (pledge("stdio", NULL) == -1)
159 		fatal("pledge");
160 
161 	event_init();
162 	rde_nbr_init(NBR_HASHSIZE);
163 	lsa_init(&asext_tree);
164 
165 	/* setup signal handler */
166 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
167 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
168 	signal_add(&ev_sigint, NULL);
169 	signal_add(&ev_sigterm, NULL);
170 	signal(SIGPIPE, SIG_IGN);
171 	signal(SIGHUP, SIG_IGN);
172 
173 	/* setup pipes */
174 	close(pipe_ospfe2rde[0]);
175 	close(pipe_parent2rde[0]);
176 	close(pipe_parent2ospfe[0]);
177 	close(pipe_parent2ospfe[1]);
178 
179 	if ((iev_ospfe = malloc(sizeof(struct imsgev))) == NULL ||
180 	    (iev_main = malloc(sizeof(struct imsgev))) == NULL)
181 		fatal(NULL);
182 	if (imsgbuf_init(&iev_ospfe->ibuf, pipe_ospfe2rde[1]) == -1)
183 		fatal(NULL);
184 	iev_ospfe->handler = rde_dispatch_imsg;
185 	if (imsgbuf_init(&iev_main->ibuf, pipe_parent2rde[1]) == -1)
186 		fatal(NULL);
187 	iev_main->handler = rde_dispatch_parent;
188 
189 	/* setup event handler */
190 	iev_ospfe->events = EV_READ;
191 	event_set(&iev_ospfe->ev, iev_ospfe->ibuf.fd, iev_ospfe->events,
192 	    iev_ospfe->handler, iev_ospfe);
193 	event_add(&iev_ospfe->ev, NULL);
194 
195 	iev_main->events = EV_READ;
196 	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
197 	    iev_main->handler, iev_main);
198 	event_add(&iev_main->ev, NULL);
199 
200 	evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
201 	cand_list_init();
202 	rt_init();
203 
204 	/* remove unneeded stuff from config */
205 	conf_clear_redist_list(&rdeconf->redist_list);
206 
207 	gettimeofday(&now, NULL);
208 	rdeconf->uptime = now.tv_sec;
209 
210 	event_dispatch();
211 
212 	rde_shutdown();
213 	/* NOTREACHED */
214 
215 	return (0);
216 }
217 
218 __dead void
rde_shutdown(void)219 rde_shutdown(void)
220 {
221 	struct area	*a;
222 	struct vertex	*v, *nv;
223 
224 	/* close pipes */
225 	imsgbuf_clear(&iev_ospfe->ibuf);
226 	close(iev_ospfe->ibuf.fd);
227 	imsgbuf_clear(&iev_main->ibuf);
228 	close(iev_main->ibuf.fd);
229 
230 	stop_spf_timer(rdeconf);
231 	cand_list_clr();
232 	rt_clear();
233 
234 	while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
235 		LIST_REMOVE(a, entry);
236 		area_del(a);
237 	}
238 	for (v = RB_MIN(lsa_tree, &asext_tree); v != NULL; v = nv) {
239 		nv = RB_NEXT(lsa_tree, &asext_tree, v);
240 		vertex_free(v);
241 	}
242 	rde_nbr_free();
243 
244 	free(iev_ospfe);
245 	free(iev_main);
246 	free(rdeconf);
247 
248 	log_info("route decision engine exiting");
249 	_exit(0);
250 }
251 
252 int
rde_imsg_compose_ospfe(int type,u_int32_t peerid,pid_t pid,void * data,u_int16_t datalen)253 rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
254     u_int16_t datalen)
255 {
256 	return (imsg_compose_event(iev_ospfe, type, peerid, pid, -1,
257 	    data, datalen));
258 }
259 
260 void
rde_dispatch_imsg(int fd,short event,void * bula)261 rde_dispatch_imsg(int fd, short event, void *bula)
262 {
263 	struct imsgev		*iev = bula;
264 	struct imsgbuf		*ibuf = &iev->ibuf;
265 	struct imsg		 imsg;
266 	struct in_addr		 aid;
267 	struct ls_req_hdr	 req_hdr;
268 	struct lsa_hdr		 lsa_hdr, *db_hdr;
269 	struct rde_nbr		 rn, *nbr;
270 	struct timespec		 tp;
271 	struct lsa		*lsa;
272 	struct area		*area;
273 	struct vertex		*v;
274 	char			*buf;
275 	ssize_t			 n;
276 	time_t			 now;
277 	int			 r, state, self, shut = 0, verbose;
278 	u_int16_t		 l;
279 
280 	if (event & EV_READ) {
281 		if ((n = imsgbuf_read(ibuf)) == -1)
282 			fatal("imsgbuf_read error");
283 		if (n == 0)	/* connection closed */
284 			shut = 1;
285 	}
286 	if (event & EV_WRITE) {
287 		if (imsgbuf_write(ibuf) == -1) {
288 			if (errno == EPIPE)	/* connection closed */
289 				shut = 1;
290 			else
291 				fatal("imsgbuf_write");
292 		}
293 	}
294 
295 	clock_gettime(CLOCK_MONOTONIC, &tp);
296 	now = tp.tv_sec;
297 
298 	for (;;) {
299 		if ((n = imsg_get(ibuf, &imsg)) == -1)
300 			fatal("rde_dispatch_imsg: imsg_get error");
301 		if (n == 0)
302 			break;
303 
304 		switch (imsg.hdr.type) {
305 		case IMSG_NEIGHBOR_UP:
306 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
307 				fatalx("invalid size of OE request");
308 			memcpy(&rn, imsg.data, sizeof(rn));
309 
310 			if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL)
311 				fatalx("rde_dispatch_imsg: "
312 				    "neighbor already exists");
313 			break;
314 		case IMSG_NEIGHBOR_DOWN:
315 			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
316 			break;
317 		case IMSG_NEIGHBOR_CHANGE:
318 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
319 				fatalx("invalid size of OE request");
320 			memcpy(&state, imsg.data, sizeof(state));
321 
322 			nbr = rde_nbr_find(imsg.hdr.peerid);
323 			if (nbr == NULL)
324 				break;
325 
326 			if (state != nbr->state &&
327 			    (nbr->state & NBR_STA_FULL ||
328 			    state & NBR_STA_FULL)) {
329 				nbr->state = state;
330 				area_track(nbr->area);
331 				orig_intra_area_prefix_lsas(nbr->area);
332 			}
333 
334 			nbr->state = state;
335 			if (nbr->state & NBR_STA_FULL)
336 				rde_req_list_free(nbr);
337 			break;
338 		case IMSG_AREA_CHANGE:
339 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
340 				fatalx("invalid size of OE request");
341 
342 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
343 				if (area->id.s_addr == imsg.hdr.peerid)
344 					break;
345 			}
346 			if (area == NULL)
347 				break;
348 			memcpy(&state, imsg.data, sizeof(state));
349 			area->active = state;
350 			break;
351 		case IMSG_DB_SNAPSHOT:
352 			nbr = rde_nbr_find(imsg.hdr.peerid);
353 			if (nbr == NULL)
354 				break;
355 
356 			lsa_snap(nbr);
357 
358 			imsg_compose_event(iev_ospfe, IMSG_DB_END, imsg.hdr.peerid,
359 			    0, -1, NULL, 0);
360 			break;
361 		case IMSG_DD:
362 			nbr = rde_nbr_find(imsg.hdr.peerid);
363 			if (nbr == NULL)
364 				break;
365 
366 			buf = imsg.data;
367 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
368 			    l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
369 				memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
370 				buf += sizeof(lsa_hdr);
371 
372 				v = lsa_find(nbr->iface, lsa_hdr.type,
373 				    lsa_hdr.ls_id, lsa_hdr.adv_rtr);
374 				if (v == NULL)
375 					db_hdr = NULL;
376 				else
377 					db_hdr = &v->lsa->hdr;
378 
379 				if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
380 					/*
381 					 * only request LSAs that are
382 					 * newer or missing
383 					 */
384 					rde_req_list_add(nbr, &lsa_hdr);
385 					imsg_compose_event(iev_ospfe, IMSG_DD,
386 					    imsg.hdr.peerid, 0, -1, &lsa_hdr,
387 					    sizeof(lsa_hdr));
388 				}
389 			}
390 			if (l != 0)
391 				log_warnx("rde_dispatch_imsg: peerid %u, "
392 				    "trailing garbage in Database Description "
393 				    "packet", imsg.hdr.peerid);
394 
395 			imsg_compose_event(iev_ospfe, IMSG_DD_END,
396 			    imsg.hdr.peerid, 0, -1, NULL, 0);
397 			break;
398 		case IMSG_LS_REQ:
399 			nbr = rde_nbr_find(imsg.hdr.peerid);
400 			if (nbr == NULL)
401 				break;
402 
403 			buf = imsg.data;
404 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
405 			    l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
406 				memcpy(&req_hdr, buf, sizeof(req_hdr));
407 				buf += sizeof(req_hdr);
408 
409 				if ((v = lsa_find(nbr->iface,
410 				    req_hdr.type, req_hdr.ls_id,
411 				    req_hdr.adv_rtr)) == NULL) {
412 					imsg_compose_event(iev_ospfe,
413 					    IMSG_LS_BADREQ, imsg.hdr.peerid,
414 					    0, -1, NULL, 0);
415 					continue;
416 				}
417 				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
418 				    imsg.hdr.peerid, 0, -1, v->lsa,
419 				    ntohs(v->lsa->hdr.len));
420 			}
421 			if (l != 0)
422 				log_warnx("rde_dispatch_imsg: peerid %u, "
423 				    "trailing garbage in LS Request "
424 				    "packet", imsg.hdr.peerid);
425 			break;
426 		case IMSG_LS_UPD:
427 			nbr = rde_nbr_find(imsg.hdr.peerid);
428 			if (nbr == NULL)
429 				break;
430 
431 			lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
432 			if (lsa == NULL)
433 				fatal(NULL);
434 			memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
435 
436 			if (!lsa_check(nbr, lsa,
437 			    imsg.hdr.len - IMSG_HEADER_SIZE)) {
438 				free(lsa);
439 				break;
440 			}
441 
442 			v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id,
443 			    lsa->hdr.adv_rtr);
444 			if (v == NULL)
445 				db_hdr = NULL;
446 			else
447 				db_hdr = &v->lsa->hdr;
448 
449 			if (nbr->self) {
450 				lsa_merge(nbr, lsa, v);
451 				/* lsa_merge frees the right lsa */
452 				break;
453 			}
454 
455 			r = lsa_newer(&lsa->hdr, db_hdr);
456 			if (r > 0) {
457 				/* new LSA newer than DB */
458 				if (v && v->flooded &&
459 				    v->changed + MIN_LS_ARRIVAL >= now) {
460 					free(lsa);
461 					break;
462 				}
463 
464 				rde_req_list_del(nbr, &lsa->hdr);
465 
466 				if (!(self = lsa_self(nbr, lsa, v)))
467 					if (lsa_add(nbr, lsa))
468 						/* delayed lsa */
469 						break;
470 
471 				/* flood and perhaps ack LSA */
472 				imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD,
473 				    imsg.hdr.peerid, 0, -1, lsa,
474 				    ntohs(lsa->hdr.len));
475 
476 				/* reflood self originated LSA */
477 				if (self && v)
478 					imsg_compose_event(iev_ospfe,
479 					    IMSG_LS_FLOOD, v->peerid, 0, -1,
480 					    v->lsa, ntohs(v->lsa->hdr.len));
481 				/* new LSA was not added so free it */
482 				if (self)
483 					free(lsa);
484 			} else if (r < 0) {
485 				/*
486 				 * point 6 of "The Flooding Procedure"
487 				 * We are violating the RFC here because
488 				 * it does not make sense to reset a session
489 				 * because an equal LSA is already in the table.
490 				 * Only if the LSA sent is older than the one
491 				 * in the table we should reset the session.
492 				 */
493 				if (rde_req_list_exists(nbr, &lsa->hdr)) {
494 					imsg_compose_event(iev_ospfe,
495 					    IMSG_LS_BADREQ, imsg.hdr.peerid,
496 					    0, -1, NULL, 0);
497 					free(lsa);
498 					break;
499 				}
500 
501 				/* lsa no longer needed */
502 				free(lsa);
503 
504 				/* new LSA older than DB */
505 				if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
506 				    ntohs(db_hdr->age) == MAX_AGE)
507 					/* seq-num wrap */
508 					break;
509 
510 				if (v->changed + MIN_LS_ARRIVAL >= now)
511 					break;
512 
513 				/* directly send current LSA, no ack */
514 				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
515 				    imsg.hdr.peerid, 0, -1, v->lsa,
516 				    ntohs(v->lsa->hdr.len));
517 			} else {
518 				/* LSA equal send direct ack */
519 				imsg_compose_event(iev_ospfe, IMSG_LS_ACK,
520 				    imsg.hdr.peerid, 0, -1, &lsa->hdr,
521 				    sizeof(lsa->hdr));
522 				free(lsa);
523 			}
524 			break;
525 		case IMSG_LS_MAXAGE:
526 			nbr = rde_nbr_find(imsg.hdr.peerid);
527 			if (nbr == NULL)
528 				break;
529 
530 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
531 			    sizeof(struct lsa_hdr))
532 				fatalx("invalid size of OE request");
533 			memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
534 
535 			if (rde_nbr_loading(nbr->area))
536 				break;
537 
538 			v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id,
539 			    lsa_hdr.adv_rtr);
540 			if (v == NULL)
541 				db_hdr = NULL;
542 			else
543 				db_hdr = &v->lsa->hdr;
544 
545 			/*
546 			 * only delete LSA if the one in the db is not newer
547 			 */
548 			if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
549 				lsa_del(nbr, &lsa_hdr);
550 			break;
551 		case IMSG_CTL_SHOW_DATABASE:
552 		case IMSG_CTL_SHOW_DB_EXT:
553 		case IMSG_CTL_SHOW_DB_LINK:
554 		case IMSG_CTL_SHOW_DB_NET:
555 		case IMSG_CTL_SHOW_DB_RTR:
556 		case IMSG_CTL_SHOW_DB_INTRA:
557 		case IMSG_CTL_SHOW_DB_SELF:
558 		case IMSG_CTL_SHOW_DB_SUM:
559 		case IMSG_CTL_SHOW_DB_ASBR:
560 			if (imsg.hdr.len != IMSG_HEADER_SIZE &&
561 			    imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
562 				log_warnx("rde_dispatch_imsg: wrong imsg len");
563 				break;
564 			}
565 			if (imsg.hdr.len == IMSG_HEADER_SIZE) {
566 				LIST_FOREACH(area, &rdeconf->area_list, entry) {
567 					rde_dump_area(area, imsg.hdr.type,
568 					    imsg.hdr.pid);
569 				}
570 				lsa_dump(&asext_tree, imsg.hdr.type,
571 				    imsg.hdr.pid);
572 			} else {
573 				memcpy(&aid, imsg.data, sizeof(aid));
574 				if ((area = area_find(rdeconf, aid)) != NULL) {
575 					rde_dump_area(area, imsg.hdr.type,
576 					    imsg.hdr.pid);
577 					if (!area->stub)
578 						lsa_dump(&asext_tree,
579 						    imsg.hdr.type,
580 						    imsg.hdr.pid);
581 				}
582 			}
583 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
584 			    imsg.hdr.pid, -1, NULL, 0);
585 			break;
586 		case IMSG_CTL_SHOW_RIB:
587 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
588 				imsg_compose_event(iev_ospfe, IMSG_CTL_AREA,
589 				    0, imsg.hdr.pid, -1, area, sizeof(*area));
590 
591 				rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
592 				rt_dump(area->id, imsg.hdr.pid, RIB_NET);
593 			}
594 			aid.s_addr = 0;
595 			rt_dump(aid, imsg.hdr.pid, RIB_EXT);
596 
597 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
598 			    imsg.hdr.pid, -1, NULL, 0);
599 			break;
600 		case IMSG_CTL_SHOW_SUM:
601 			rde_send_summary(imsg.hdr.pid);
602 			LIST_FOREACH(area, &rdeconf->area_list, entry)
603 				rde_send_summary_area(area, imsg.hdr.pid);
604 			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
605 			    imsg.hdr.pid, -1, NULL, 0);
606 			break;
607 		case IMSG_IFINFO:
608 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
609 			    sizeof(int))
610 				fatalx("IFINFO imsg with wrong len");
611 
612 			nbr = rde_nbr_find(imsg.hdr.peerid);
613 			if (nbr == NULL)
614 				fatalx("IFINFO imsg with bad peerid");
615 			memcpy(&nbr->iface->state, imsg.data, sizeof(int));
616 
617 			/* Resend LSAs if interface state changes. */
618 			orig_intra_area_prefix_lsas(nbr->area);
619 			break;
620 		case IMSG_CTL_LOG_VERBOSE:
621 			/* already checked by ospfe */
622 			memcpy(&verbose, imsg.data, sizeof(verbose));
623 			log_setverbose(verbose);
624 			break;
625 		default:
626 			log_debug("rde_dispatch_imsg: unexpected imsg %d",
627 			    imsg.hdr.type);
628 			break;
629 		}
630 		imsg_free(&imsg);
631 	}
632 	if (!shut)
633 		imsg_event_add(iev);
634 	else {
635 		/* this pipe is dead, so remove the event handler */
636 		event_del(&iev->ev);
637 		event_loopexit(NULL);
638 	}
639 }
640 
641 void
rde_dispatch_parent(int fd,short event,void * bula)642 rde_dispatch_parent(int fd, short event, void *bula)
643 {
644 	static struct area	*narea;
645 	struct area		*area;
646 	struct iface		*iface, *ifp, *i;
647 	struct ifaddrchange	*ifc;
648 	struct iface_addr	*ia, *nia;
649 	struct imsg		 imsg;
650 	struct kroute		 kr;
651 	struct imsgev		*iev = bula;
652 	struct imsgbuf		*ibuf = &iev->ibuf;
653 	ssize_t			 n;
654 	int			 shut = 0, link_ok, prev_link_ok, orig_lsa;
655 
656 	if (event & EV_READ) {
657 		if ((n = imsgbuf_read(ibuf)) == -1)
658 			fatal("imsgbuf_read error");
659 		if (n == 0)	/* connection closed */
660 			shut = 1;
661 	}
662 	if (event & EV_WRITE) {
663 		if (imsgbuf_write(ibuf) == -1) {
664 			if (errno == EPIPE)	/* connection closed */
665 				shut = 1;
666 			else
667 				fatal("imsgbuf_write");
668 		}
669 	}
670 
671 	for (;;) {
672 		if ((n = imsg_get(ibuf, &imsg)) == -1)
673 			fatal("rde_dispatch_parent: imsg_get error");
674 		if (n == 0)
675 			break;
676 
677 		switch (imsg.hdr.type) {
678 		case IMSG_NETWORK_ADD:
679 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) {
680 				log_warnx("rde_dispatch_parent: "
681 				    "wrong imsg len");
682 				break;
683 			}
684 			memcpy(&kr, imsg.data, sizeof(kr));
685 			rde_asext_get(&kr);
686 			break;
687 		case IMSG_NETWORK_DEL:
688 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) {
689 				log_warnx("rde_dispatch_parent: "
690 				    "wrong imsg len");
691 				break;
692 			}
693 			memcpy(&kr, imsg.data, sizeof(kr));
694 			rde_asext_put(&kr);
695 			break;
696 		case IMSG_IFINFO:
697 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
698 			    sizeof(struct iface))
699 				fatalx("IFINFO imsg with wrong len");
700 
701 			ifp = imsg.data;
702 
703 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
704 				orig_lsa = 0;
705 				LIST_FOREACH(i, &area->iface_list, entry) {
706 					if (strcmp(i->dependon,
707 					    ifp->name) == 0) {
708 						i->depend_ok =
709 						    ifstate_is_up(ifp);
710 						if (ifstate_is_up(i))
711 							orig_lsa = 1;
712 					}
713 				}
714 				if (orig_lsa)
715 					orig_intra_area_prefix_lsas(area);
716 			}
717 
718 			if (!(ifp->cflags & F_IFACE_CONFIGURED))
719 				break;
720 			iface = if_find(ifp->ifindex);
721 			if (iface == NULL)
722 				fatalx("interface lost in rde");
723 
724 			prev_link_ok = (iface->flags & IFF_UP) &&
725 			    LINK_STATE_IS_UP(iface->linkstate);
726 
727 			if_update(iface, ifp->mtu, ifp->flags, ifp->if_type,
728 			    ifp->linkstate, ifp->baudrate, ifp->rdomain);
729 
730 			/* Resend LSAs if interface state changes. */
731 			link_ok = (iface->flags & IFF_UP) &&
732 			          LINK_STATE_IS_UP(iface->linkstate);
733 			if (prev_link_ok == link_ok)
734 				break;
735 
736 			orig_intra_area_prefix_lsas(iface->area);
737 
738 			break;
739 		case IMSG_IFADDRNEW:
740 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
741 			    sizeof(struct ifaddrchange))
742 				fatalx("IFADDRNEW imsg with wrong len");
743 			ifc = imsg.data;
744 
745 			iface = if_find(ifc->ifindex);
746 			if (iface == NULL)
747 				fatalx("IFADDRNEW interface lost in rde");
748 
749 			if ((ia = calloc(1, sizeof(struct iface_addr))) ==
750 			    NULL)
751 				fatal("rde_dispatch_parent IFADDRNEW");
752 			ia->addr = ifc->addr;
753 			ia->dstbrd = ifc->dstbrd;
754 			ia->prefixlen = ifc->prefixlen;
755 
756 			TAILQ_INSERT_TAIL(&iface->ifa_list, ia, entry);
757 			if (iface->area)
758 				orig_intra_area_prefix_lsas(iface->area);
759 			break;
760 		case IMSG_IFADDRDEL:
761 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
762 			    sizeof(struct ifaddrchange))
763 				fatalx("IFADDRDEL imsg with wrong len");
764 			ifc = imsg.data;
765 
766 			iface = if_find(ifc->ifindex);
767 			if (iface == NULL)
768 				fatalx("IFADDRDEL interface lost in rde");
769 
770 			for (ia = TAILQ_FIRST(&iface->ifa_list); ia != NULL;
771 			    ia = nia) {
772 				nia = TAILQ_NEXT(ia, entry);
773 
774 				if (IN6_ARE_ADDR_EQUAL(&ia->addr,
775 				    &ifc->addr)) {
776 					TAILQ_REMOVE(&iface->ifa_list, ia,
777 					    entry);
778 					free(ia);
779 					break;
780 				}
781 			}
782 			if (iface->area)
783 				orig_intra_area_prefix_lsas(iface->area);
784 			break;
785 		case IMSG_RECONF_CONF:
786 			if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
787 			    NULL)
788 				fatal(NULL);
789 			memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
790 
791 			LIST_INIT(&nconf->area_list);
792 			LIST_INIT(&nconf->cand_list);
793 			break;
794 		case IMSG_RECONF_AREA:
795 			if ((narea = area_new()) == NULL)
796 				fatal(NULL);
797 			memcpy(narea, imsg.data, sizeof(struct area));
798 
799 			LIST_INIT(&narea->iface_list);
800 			LIST_INIT(&narea->nbr_list);
801 			RB_INIT(&narea->lsa_tree);
802 
803 			LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
804 			break;
805 		case IMSG_RECONF_END:
806 			merge_config(rdeconf, nconf);
807 			nconf = NULL;
808 			break;
809 		default:
810 			log_debug("rde_dispatch_parent: unexpected imsg %d",
811 			    imsg.hdr.type);
812 			break;
813 		}
814 		imsg_free(&imsg);
815 	}
816 	if (!shut)
817 		imsg_event_add(iev);
818 	else {
819 		/* this pipe is dead, so remove the event handler */
820 		event_del(&iev->ev);
821 		event_loopexit(NULL);
822 	}
823 }
824 
825 void
rde_dump_area(struct area * area,int imsg_type,pid_t pid)826 rde_dump_area(struct area *area, int imsg_type, pid_t pid)
827 {
828 	struct iface	*iface;
829 
830 	/* dump header */
831 	imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 0, pid, -1,
832 	    area, sizeof(*area));
833 
834 	/* dump link local lsa */
835 	LIST_FOREACH(iface, &area->iface_list, entry) {
836 		imsg_compose_event(iev_ospfe, IMSG_CTL_IFACE,
837 		    0, pid, -1, iface, sizeof(*iface));
838 		lsa_dump(&iface->lsa_tree, imsg_type, pid);
839 	}
840 
841 	/* dump area lsa */
842 	lsa_dump(&area->lsa_tree, imsg_type, pid);
843 }
844 
845 u_int32_t
rde_router_id(void)846 rde_router_id(void)
847 {
848 	return (rdeconf->rtr_id.s_addr);
849 }
850 
851 void
rde_send_change_kroute(struct rt_node * r)852 rde_send_change_kroute(struct rt_node *r)
853 {
854 	int			 krcount = 0;
855 	struct kroute		 kr;
856 	struct rt_nexthop	*rn;
857 	struct ibuf		*wbuf;
858 
859 	if ((wbuf = imsg_create(&iev_main->ibuf, IMSG_KROUTE_CHANGE, 0, 0,
860 	    sizeof(kr))) == NULL) {
861 		return;
862 	}
863 
864 	TAILQ_FOREACH(rn, &r->nexthop, entry) {
865 		if (rn->invalid)
866 			continue;
867 		if (rn->connected)
868 			/* skip self-originated routes */
869 			continue;
870 		krcount++;
871 
872 		bzero(&kr, sizeof(kr));
873 		kr.prefix = r->prefix;
874 		kr.nexthop = rn->nexthop;
875 		if (IN6_IS_ADDR_LINKLOCAL(&rn->nexthop) ||
876 		    IN6_IS_ADDR_MC_LINKLOCAL(&rn->nexthop))
877 			kr.scope = rn->ifindex;
878 		kr.ifindex = rn->ifindex;
879 		kr.prefixlen = r->prefixlen;
880 		kr.ext_tag = r->ext_tag;
881 		imsg_add(wbuf, &kr, sizeof(kr));
882 	}
883 	if (krcount == 0) {
884 		/* no valid nexthop or self originated, so remove */
885 		ibuf_free(wbuf);
886 		rde_send_delete_kroute(r);
887 		return;
888 	}
889 
890 	imsg_close(&iev_main->ibuf, wbuf);
891 	imsg_event_add(iev_main);
892 }
893 
894 void
rde_send_delete_kroute(struct rt_node * r)895 rde_send_delete_kroute(struct rt_node *r)
896 {
897 	struct kroute	 kr;
898 
899 	bzero(&kr, sizeof(kr));
900 	kr.prefix = r->prefix;
901 	kr.prefixlen = r->prefixlen;
902 
903 	imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1,
904 	    &kr, sizeof(kr));
905 }
906 
907 void
rde_send_summary(pid_t pid)908 rde_send_summary(pid_t pid)
909 {
910 	static struct ctl_sum	 sumctl;
911 	struct timeval		 now;
912 	struct area		*area;
913 	struct vertex		*v;
914 
915 	bzero(&sumctl, sizeof(struct ctl_sum));
916 
917 	sumctl.rtr_id.s_addr = rde_router_id();
918 	sumctl.spf_delay = rdeconf->spf_delay;
919 	sumctl.spf_hold_time = rdeconf->spf_hold_time;
920 
921 	LIST_FOREACH(area, &rdeconf->area_list, entry)
922 		sumctl.num_area++;
923 
924 	RB_FOREACH(v, lsa_tree, &asext_tree)
925 		sumctl.num_ext_lsa++;
926 
927 	gettimeofday(&now, NULL);
928 	if (rdeconf->uptime < now.tv_sec)
929 		sumctl.uptime = now.tv_sec - rdeconf->uptime;
930 	else
931 		sumctl.uptime = 0;
932 
933 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
934 	    sizeof(sumctl));
935 }
936 
937 void
rde_send_summary_area(struct area * area,pid_t pid)938 rde_send_summary_area(struct area *area, pid_t pid)
939 {
940 	static struct ctl_sum_area	 sumareactl;
941 	struct iface			*iface;
942 	struct rde_nbr			*nbr;
943 	struct lsa_tree			*tree = &area->lsa_tree;
944 	struct vertex			*v;
945 
946 	bzero(&sumareactl, sizeof(struct ctl_sum_area));
947 
948 	sumareactl.area.s_addr = area->id.s_addr;
949 	sumareactl.num_spf_calc = area->num_spf_calc;
950 
951 	LIST_FOREACH(iface, &area->iface_list, entry)
952 		sumareactl.num_iface++;
953 
954 	LIST_FOREACH(nbr, &area->nbr_list, entry)
955 		if (nbr->state == NBR_STA_FULL && !nbr->self)
956 			sumareactl.num_adj_nbr++;
957 
958 	RB_FOREACH(v, lsa_tree, tree)
959 		sumareactl.num_lsa++;
960 
961 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
962 	    sizeof(sumareactl));
963 }
964 
965 LIST_HEAD(rde_nbr_head, rde_nbr);
966 
967 struct nbr_table {
968 	struct rde_nbr_head	*hashtbl;
969 	u_int32_t		 hashmask;
970 } rdenbrtable;
971 
972 #define RDE_NBR_HASH(x)		\
973 	&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
974 
975 void
rde_nbr_init(u_int32_t hashsize)976 rde_nbr_init(u_int32_t hashsize)
977 {
978 	struct rde_nbr_head	*head;
979 	u_int32_t		 hs, i;
980 
981 	for (hs = 1; hs < hashsize; hs <<= 1)
982 		;
983 	rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
984 	if (rdenbrtable.hashtbl == NULL)
985 		fatal("rde_nbr_init");
986 
987 	for (i = 0; i < hs; i++)
988 		LIST_INIT(&rdenbrtable.hashtbl[i]);
989 
990 	rdenbrtable.hashmask = hs - 1;
991 
992 	if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
993 		fatal("rde_nbr_init");
994 
995 	nbrself->id.s_addr = rde_router_id();
996 	nbrself->peerid = NBR_IDSELF;
997 	nbrself->state = NBR_STA_DOWN;
998 	nbrself->self = 1;
999 	head = RDE_NBR_HASH(NBR_IDSELF);
1000 	LIST_INSERT_HEAD(head, nbrself, hash);
1001 }
1002 
1003 void
rde_nbr_free(void)1004 rde_nbr_free(void)
1005 {
1006 	free(nbrself);
1007 	free(rdenbrtable.hashtbl);
1008 }
1009 
1010 struct rde_nbr *
rde_nbr_find(u_int32_t peerid)1011 rde_nbr_find(u_int32_t peerid)
1012 {
1013 	struct rde_nbr_head	*head;
1014 	struct rde_nbr		*nbr;
1015 
1016 	head = RDE_NBR_HASH(peerid);
1017 
1018 	LIST_FOREACH(nbr, head, hash) {
1019 		if (nbr->peerid == peerid)
1020 			return (nbr);
1021 	}
1022 
1023 	return (NULL);
1024 }
1025 
1026 struct rde_nbr *
rde_nbr_new(u_int32_t peerid,struct rde_nbr * new)1027 rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
1028 {
1029 	struct rde_nbr_head	*head;
1030 	struct rde_nbr		*nbr;
1031 	struct area		*area;
1032 	struct iface		*iface;
1033 
1034 	if (rde_nbr_find(peerid))
1035 		return (NULL);
1036 	if ((area = area_find(rdeconf, new->area_id)) == NULL)
1037 		fatalx("rde_nbr_new: unknown area");
1038 
1039 	if ((iface = if_find(new->ifindex)) == NULL)
1040 		fatalx("rde_nbr_new: unknown interface");
1041 
1042 	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
1043 		fatal("rde_nbr_new");
1044 
1045 	memcpy(nbr, new, sizeof(*nbr));
1046 	nbr->peerid = peerid;
1047 	nbr->area = area;
1048 	nbr->iface = iface;
1049 
1050 	TAILQ_INIT(&nbr->req_list);
1051 
1052 	head = RDE_NBR_HASH(peerid);
1053 	LIST_INSERT_HEAD(head, nbr, hash);
1054 	LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
1055 
1056 	return (nbr);
1057 }
1058 
1059 void
rde_nbr_del(struct rde_nbr * nbr)1060 rde_nbr_del(struct rde_nbr *nbr)
1061 {
1062 	if (nbr == NULL)
1063 		return;
1064 
1065 	rde_req_list_free(nbr);
1066 
1067 	LIST_REMOVE(nbr, entry);
1068 	LIST_REMOVE(nbr, hash);
1069 
1070 	free(nbr);
1071 }
1072 
1073 int
rde_nbr_loading(struct area * area)1074 rde_nbr_loading(struct area *area)
1075 {
1076 	struct rde_nbr		*nbr;
1077 	int			 checkall = 0;
1078 
1079 	if (area == NULL) {
1080 		area = LIST_FIRST(&rdeconf->area_list);
1081 		checkall = 1;
1082 	}
1083 
1084 	while (area != NULL) {
1085 		LIST_FOREACH(nbr, &area->nbr_list, entry) {
1086 			if (nbr->self)
1087 				continue;
1088 			if (nbr->state & NBR_STA_XCHNG ||
1089 			    nbr->state & NBR_STA_LOAD)
1090 				return (1);
1091 		}
1092 		if (!checkall)
1093 			break;
1094 		area = LIST_NEXT(area, entry);
1095 	}
1096 
1097 	return (0);
1098 }
1099 
1100 struct rde_nbr *
rde_nbr_self(struct area * area)1101 rde_nbr_self(struct area *area)
1102 {
1103 	struct rde_nbr		*nbr;
1104 
1105 	LIST_FOREACH(nbr, &area->nbr_list, entry)
1106 		if (nbr->self)
1107 			return (nbr);
1108 
1109 	/* this may not happen */
1110 	fatalx("rde_nbr_self: area without self");
1111 	return (NULL);
1112 }
1113 
1114 /*
1115  * LSA req list
1116  */
1117 void
rde_req_list_add(struct rde_nbr * nbr,struct lsa_hdr * lsa)1118 rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
1119 {
1120 	struct rde_req_entry	*le;
1121 
1122 	if ((le = calloc(1, sizeof(*le))) == NULL)
1123 		fatal("rde_req_list_add");
1124 
1125 	TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
1126 	le->type = lsa->type;
1127 	le->ls_id = lsa->ls_id;
1128 	le->adv_rtr = lsa->adv_rtr;
1129 }
1130 
1131 int
rde_req_list_exists(struct rde_nbr * nbr,struct lsa_hdr * lsa_hdr)1132 rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1133 {
1134 	struct rde_req_entry	*le;
1135 
1136 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1137 		if ((lsa_hdr->type == le->type) &&
1138 		    (lsa_hdr->ls_id == le->ls_id) &&
1139 		    (lsa_hdr->adv_rtr == le->adv_rtr))
1140 			return (1);
1141 	}
1142 	return (0);
1143 }
1144 
1145 void
rde_req_list_del(struct rde_nbr * nbr,struct lsa_hdr * lsa_hdr)1146 rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1147 {
1148 	struct rde_req_entry	*le;
1149 
1150 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1151 		if ((lsa_hdr->type == le->type) &&
1152 		    (lsa_hdr->ls_id == le->ls_id) &&
1153 		    (lsa_hdr->adv_rtr == le->adv_rtr)) {
1154 			TAILQ_REMOVE(&nbr->req_list, le, entry);
1155 			free(le);
1156 			return;
1157 		}
1158 	}
1159 }
1160 
1161 void
rde_req_list_free(struct rde_nbr * nbr)1162 rde_req_list_free(struct rde_nbr *nbr)
1163 {
1164 	struct rde_req_entry	*le;
1165 
1166 	while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
1167 		TAILQ_REMOVE(&nbr->req_list, le, entry);
1168 		free(le);
1169 	}
1170 }
1171 
1172 /*
1173  * as-external LSA handling
1174  */
1175 struct iface *
rde_asext_lookup(struct in6_addr prefix,int plen)1176 rde_asext_lookup(struct in6_addr prefix, int plen)
1177 {
1178 
1179 	struct area		*area;
1180 	struct iface		*iface;
1181 	struct iface_addr	*ia;
1182 	struct in6_addr		 ina, inb;
1183 
1184 	LIST_FOREACH(area, &rdeconf->area_list, entry) {
1185 		LIST_FOREACH(iface, &area->iface_list, entry) {
1186 			TAILQ_FOREACH(ia, &iface->ifa_list, entry) {
1187 				if (IN6_IS_ADDR_LINKLOCAL(&ia->addr))
1188 					continue;
1189 
1190 				inet6applymask(&ina, &ia->addr, ia->prefixlen);
1191 				inet6applymask(&inb, &prefix, ia->prefixlen);
1192 				if (IN6_ARE_ADDR_EQUAL(&ina, &inb) &&
1193 				    (plen == -1 || plen == ia->prefixlen))
1194 					return (iface);
1195 			}
1196 		}
1197 	}
1198 	return (NULL);
1199 }
1200 
1201 void
rde_asext_get(struct kroute * kr)1202 rde_asext_get(struct kroute *kr)
1203 {
1204 	struct vertex	*v;
1205 	struct lsa	*lsa;
1206 
1207 	if (rde_asext_lookup(kr->prefix, kr->prefixlen)) {
1208 		/* already announced as (stub) net LSA */
1209 		log_debug("rde_asext_get: %s/%d is net LSA",
1210 		    log_in6addr(&kr->prefix), kr->prefixlen);
1211 		return;
1212 	}
1213 
1214 	/* update of seqnum is done by lsa_merge */
1215 	if ((lsa = orig_asext_lsa(kr, DEFAULT_AGE))) {
1216 		v = lsa_find(NULL, lsa->hdr.type, lsa->hdr.ls_id,
1217 		    lsa->hdr.adv_rtr);
1218 		lsa_merge(nbrself, lsa, v);
1219 	}
1220 }
1221 
1222 void
rde_asext_put(struct kroute * kr)1223 rde_asext_put(struct kroute *kr)
1224 {
1225 	struct vertex	*v;
1226 	struct lsa	*lsa;
1227 	/*
1228 	 * just try to remove the LSA. If the prefix is announced as
1229 	 * stub net LSA lsa_find() will fail later and nothing will happen.
1230 	 */
1231 
1232 	/* remove by reflooding with MAX_AGE */
1233 	if ((lsa = orig_asext_lsa(kr, MAX_AGE))) {
1234 		v = lsa_find(NULL, lsa->hdr.type, lsa->hdr.ls_id,
1235 		    lsa->hdr.adv_rtr);
1236 
1237 		/*
1238 		 * if v == NULL no LSA is in the table and
1239 		 * nothing has to be done.
1240 		 */
1241 		if (v)
1242 			lsa_merge(nbrself, lsa, v);
1243 		else
1244 			free(lsa);
1245 	}
1246 }
1247 
1248 /*
1249  * summary LSA stuff
1250  */
1251 void
rde_summary_update(struct rt_node * rte,struct area * area)1252 rde_summary_update(struct rt_node *rte, struct area *area)
1253 {
1254 	struct vertex		*v = NULL;
1255 #if 0 /* XXX */
1256 	struct lsa		*lsa;
1257 	u_int16_t		 type = 0;
1258 #endif
1259 
1260 	/* first check if we actually need to announce this route */
1261 	if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1262 		return;
1263 	/* never create summaries for as-ext LSA */
1264 	if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1265 		return;
1266 	/* no need for summary LSA in the originating area */
1267 	if (rte->area.s_addr == area->id.s_addr)
1268 		return;
1269 	/* no need to originate inter-area routes to the backbone */
1270 	if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1271 		return;
1272 	/* TODO nexthop check, nexthop part of area -> no summary */
1273 	if (rte->cost >= LS_INFINITY)
1274 		return;
1275 	/* TODO AS border router specific checks */
1276 	/* TODO inter-area network route stuff */
1277 	/* TODO intra-area stuff -- condense LSA ??? */
1278 
1279 #if 0 /* XXX a lot todo */
1280 	if (rte->d_type == DT_NET) {
1281 		type = LSA_TYPE_INTER_A_PREFIX;
1282 	} else if (rte->d_type == DT_RTR) {
1283 		type = LSA_TYPE_INTER_A_ROUTER;
1284 	} else
1285 
1286 	/* update lsa but only if it was changed */
1287 	v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1288 	lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1289 	lsa_merge(rde_nbr_self(area), lsa, v);
1290 
1291 	if (v == NULL)
1292 		v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1293 #endif
1294 
1295 	/* suppressed/deleted routes are not found in the second lsa_find */
1296 	if (v)
1297 		v->cost = rte->cost;
1298 }
1299 
1300 /*
1301  * Functions for self-originated LSAs
1302  */
1303 
1304 /* Prefix LSAs have variable size. We have to be careful to copy the right
1305  * amount of bytes, and to realloc() the right amount of memory. */
1306 void
append_prefix_lsa(struct lsa ** lsa,u_int16_t * len,struct lsa_prefix * prefix)1307 append_prefix_lsa(struct lsa **lsa, u_int16_t *len, struct lsa_prefix *prefix)
1308 {
1309 	struct lsa_prefix	*copy;
1310 	unsigned int		 lsa_prefix_len;
1311 	unsigned int		 new_len;
1312 	char			*new_lsa;
1313 
1314 	lsa_prefix_len = sizeof(struct lsa_prefix)
1315 	    + LSA_PREFIXSIZE(prefix->prefixlen);
1316 
1317 	new_len = *len + lsa_prefix_len;
1318 
1319 	/* Make sure we have enough space for this prefix. */
1320 	if ((new_lsa = realloc(*lsa, new_len)) == NULL)
1321 		fatalx("append_prefix_lsa");
1322 
1323 	/* Append prefix to LSA. */
1324 	copy = (struct lsa_prefix *)(new_lsa + *len);
1325 	memcpy(copy, prefix, lsa_prefix_len);
1326 
1327 	*lsa = (struct lsa *)new_lsa;
1328 	*len = new_len;
1329 }
1330 
1331 int
prefix_compare(struct prefix_node * a,struct prefix_node * b)1332 prefix_compare(struct prefix_node *a, struct prefix_node *b)
1333 {
1334 	struct lsa_prefix	*p;
1335 	struct lsa_prefix	*q;
1336 	int			 i;
1337 	int			 len;
1338 
1339 	p = a->prefix;
1340 	q = b->prefix;
1341 
1342 	len = MINIMUM(LSA_PREFIXSIZE(p->prefixlen), LSA_PREFIXSIZE(q->prefixlen));
1343 
1344 	i = memcmp(p + 1, q + 1, len);
1345 	if (i)
1346 		return (i);
1347 	if (p->prefixlen < q->prefixlen)
1348 		return (-1);
1349 	if (p->prefixlen > q->prefixlen)
1350 		return (1);
1351 	return (0);
1352 }
1353 
1354 void
prefix_tree_add(struct prefix_tree * tree,struct lsa_link * lsa)1355 prefix_tree_add(struct prefix_tree *tree, struct lsa_link *lsa)
1356 {
1357 	struct prefix_node	*old;
1358 	struct prefix_node	*new;
1359 	struct in6_addr		 addr;
1360 	unsigned int		 len;
1361 	unsigned int		 i;
1362 	char			*cur_prefix;
1363 
1364 	cur_prefix = (char *)(lsa + 1);
1365 
1366 	for (i = 0; i < ntohl(lsa->numprefix); i++) {
1367 		if ((new = calloc(1, sizeof(*new))) == NULL)
1368 			fatal("prefix_tree_add");
1369 		new->prefix = (struct lsa_prefix *)cur_prefix;
1370 
1371 		len = sizeof(*new->prefix)
1372 		    + LSA_PREFIXSIZE(new->prefix->prefixlen);
1373 
1374 		bzero(&addr, sizeof(addr));
1375 		memcpy(&addr, new->prefix + 1,
1376 		    LSA_PREFIXSIZE(new->prefix->prefixlen));
1377 
1378 		new->prefix->metric = 0;
1379 
1380 		if (!(IN6_IS_ADDR_LINKLOCAL(&addr)) &&
1381 		    (new->prefix->options & OSPF_PREFIX_NU) == 0 &&
1382 		    (new->prefix->options & OSPF_PREFIX_LA) == 0) {
1383 			old = RB_INSERT(prefix_tree, tree, new);
1384 			if (old != NULL) {
1385 				old->prefix->options |= new->prefix->options;
1386 				free(new);
1387 			}
1388 		} else
1389 			free(new);
1390 
1391 		cur_prefix = cur_prefix + len;
1392 	}
1393 }
1394 
RB_GENERATE(prefix_tree,prefix_node,entry,prefix_compare)1395 RB_GENERATE(prefix_tree, prefix_node, entry, prefix_compare)
1396 
1397 struct lsa *
1398 orig_intra_lsa_net(struct area *area, struct iface *iface, struct vertex *old)
1399 {
1400 	struct lsa		*lsa;
1401 	struct vertex		*v;
1402 	struct rde_nbr		*nbr;
1403 	struct prefix_node	*node;
1404 	struct prefix_tree	 tree;
1405 	int			 num_full_nbr;
1406 	u_int16_t		 len;
1407 	u_int16_t		 numprefix;
1408 
1409 	log_debug("orig_intra_lsa_net: area %s, interface %s",
1410 	    inet_ntoa(area->id), iface->name);
1411 
1412 	RB_INIT(&tree);
1413 
1414 	if (iface->state & IF_STA_DR) {
1415 		num_full_nbr = 0;
1416 		LIST_FOREACH(nbr, &area->nbr_list, entry) {
1417 			if (nbr->self ||
1418 			    nbr->iface->ifindex != iface->ifindex ||
1419 			    (nbr->state & NBR_STA_FULL) == 0)
1420 				continue;
1421 			num_full_nbr++;
1422 			v = lsa_find(iface, htons(LSA_TYPE_LINK),
1423 			    htonl(nbr->iface_id), nbr->id.s_addr);
1424 			if (v)
1425 				prefix_tree_add(&tree, &v->lsa->data.link);
1426 		}
1427 		if (num_full_nbr == 0) {
1428 			/* There are no adjacent neighbors on link.
1429 			 * If a copy of this LSA already exists in DB,
1430 			 * it needs to be flushed. orig_intra_lsa_rtr()
1431 			 * will take care of prefixes configured on
1432 			 * this interface. */
1433 			if (!old)
1434 				return NULL;
1435 		} else {
1436 			/* Add our own prefixes configured for this link. */
1437 			v = lsa_find(iface, htons(LSA_TYPE_LINK),
1438 			    htonl(iface->ifindex), rde_router_id());
1439 			if (v)
1440 				prefix_tree_add(&tree, &v->lsa->data.link);
1441 		}
1442 	/* Continue only if a copy of this LSA already exists in DB.
1443 	 * It needs to be flushed. */
1444 	} else if (!old)
1445 		return NULL;
1446 
1447 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1448 	if ((lsa = calloc(1, len)) == NULL)
1449 		fatal("orig_intra_lsa_net");
1450 
1451 	lsa->data.pref_intra.ref_type = htons(LSA_TYPE_NETWORK);
1452 	lsa->data.pref_intra.ref_ls_id = htonl(iface->ifindex);
1453 	lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1454 
1455 	numprefix = 0;
1456 	RB_FOREACH(node, prefix_tree, &tree) {
1457 		append_prefix_lsa(&lsa, &len, node->prefix);
1458 		numprefix++;
1459 	}
1460 
1461 	lsa->data.pref_intra.numprefix = htons(numprefix);
1462 
1463 	while (!RB_EMPTY(&tree))
1464 		free(RB_REMOVE(prefix_tree, &tree, RB_ROOT(&tree)));
1465 
1466 	/* LSA header */
1467 	/* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1468 	lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE);
1469 	lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX);
1470 	lsa->hdr.ls_id = htonl(iface->ifindex);
1471 	lsa->hdr.adv_rtr = rde_router_id();
1472 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1473 	lsa->hdr.len = htons(len);
1474 	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1475 
1476 	return lsa;
1477 }
1478 
1479 struct lsa *
orig_intra_lsa_rtr(struct area * area,struct vertex * old)1480 orig_intra_lsa_rtr(struct area *area, struct vertex *old)
1481 {
1482 	char			lsa_prefix_buf[sizeof(struct lsa_prefix)
1483 				    + sizeof(struct in6_addr)];
1484 	struct lsa		*lsa;
1485 	struct lsa_prefix	*lsa_prefix;
1486 	struct in6_addr		*prefix;
1487 	struct iface		*iface;
1488 	struct iface_addr	*ia;
1489 	struct rde_nbr		*nbr;
1490 	u_int16_t		 len;
1491 	u_int16_t		 numprefix;
1492 
1493 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1494 	if ((lsa = calloc(1, len)) == NULL)
1495 		fatal("orig_intra_lsa_rtr");
1496 
1497 	lsa->data.pref_intra.ref_type = htons(LSA_TYPE_ROUTER);
1498 	lsa->data.pref_intra.ref_ls_id = 0;
1499 	lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1500 
1501 	numprefix = 0;
1502 	LIST_FOREACH(iface, &area->iface_list, entry) {
1503 		if (!((iface->flags & IFF_UP) &&
1504 		    LINK_STATE_IS_UP(iface->linkstate)) &&
1505 		    !(iface->if_type == IFT_CARP))
1506 			/* interface or link state down
1507 			 * and not a carp interface */
1508 			continue;
1509 
1510 		if (iface->if_type == IFT_CARP &&
1511 		    (iface->linkstate == LINK_STATE_UNKNOWN ||
1512 		    iface->linkstate == LINK_STATE_INVALID))
1513 			/* carp interface in state invalid or unknown */
1514 			continue;
1515 
1516 		if ((iface->state & IF_STA_DOWN) &&
1517 		    !(iface->cflags & F_IFACE_PASSIVE))
1518 			/* passive interfaces stay in state DOWN */
1519 			continue;
1520 
1521 		/* Broadcast links with adjacencies are handled
1522 		 * by orig_intra_lsa_net(), ignore. */
1523 		if (iface->type == IF_TYPE_BROADCAST ||
1524 		    iface->type == IF_TYPE_NBMA) {
1525 			if (iface->state & IF_STA_WAITING)
1526 				/* Skip, we're still waiting for
1527 				 * adjacencies to form. */
1528 				continue;
1529 
1530 			LIST_FOREACH(nbr, &area->nbr_list, entry)
1531 				if (!nbr->self &&
1532 				    nbr->iface->ifindex == iface->ifindex &&
1533 				    nbr->state & NBR_STA_FULL)
1534 					break;
1535 			if (nbr)
1536 				continue;
1537 		}
1538 
1539 		lsa_prefix = (struct lsa_prefix *)lsa_prefix_buf;
1540 
1541 		TAILQ_FOREACH(ia, &iface->ifa_list, entry) {
1542 			if (IN6_IS_ADDR_LINKLOCAL(&ia->addr))
1543 				continue;
1544 
1545 			bzero(lsa_prefix_buf, sizeof(lsa_prefix_buf));
1546 
1547 			if (iface->type == IF_TYPE_POINTOMULTIPOINT ||
1548 			    iface->state & IF_STA_LOOPBACK) {
1549 				lsa_prefix->prefixlen = 128;
1550 				lsa_prefix->metric = 0;
1551 			} else if ((iface->if_type == IFT_CARP &&
1552 				   iface->linkstate == LINK_STATE_DOWN) ||
1553 				   !(iface->depend_ok)) {
1554 				/* carp interfaces in state backup are
1555 				 * announced with high metric for faster
1556 				 * failover. */
1557 				lsa_prefix->prefixlen = ia->prefixlen;
1558 				lsa_prefix->metric = MAX_METRIC;
1559 			} else {
1560 				lsa_prefix->prefixlen = ia->prefixlen;
1561 				lsa_prefix->metric = htons(iface->metric);
1562 			}
1563 
1564 			if (lsa_prefix->prefixlen == 128)
1565 				lsa_prefix->options |= OSPF_PREFIX_LA;
1566 
1567 			log_debug("orig_intra_lsa_rtr: area %s, interface %s: "
1568 			    "%s/%d, metric %d", inet_ntoa(area->id),
1569 			    iface->name, log_in6addr(&ia->addr),
1570 			    lsa_prefix->prefixlen, ntohs(lsa_prefix->metric));
1571 
1572 			prefix = (struct in6_addr *)(lsa_prefix + 1);
1573 			inet6applymask(prefix, &ia->addr,
1574 			    lsa_prefix->prefixlen);
1575 			append_prefix_lsa(&lsa, &len, lsa_prefix);
1576 			numprefix++;
1577 		}
1578 
1579 		/* TOD: Add prefixes of directly attached hosts, too */
1580 		/* TOD: Add prefixes for virtual links */
1581 	}
1582 
1583 	/* If no prefixes were included, continue only if a copy of this
1584 	 * LSA already exists in DB. It needs to be flushed. */
1585 	if (numprefix == 0 && !old) {
1586 		free(lsa);
1587 		return NULL;
1588 	}
1589 
1590 	lsa->data.pref_intra.numprefix = htons(numprefix);
1591 
1592 	/* LSA header */
1593 	/* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1594 	lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE);
1595 	lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX);
1596 	lsa->hdr.ls_id = htonl(LS_ID_INTRA_RTR);
1597 	lsa->hdr.adv_rtr = rde_router_id();
1598 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1599 	lsa->hdr.len = htons(len);
1600 	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1601 
1602 	return lsa;
1603 }
1604 
1605 void
orig_intra_area_prefix_lsas(struct area * area)1606 orig_intra_area_prefix_lsas(struct area *area)
1607 {
1608 	struct lsa	*lsa;
1609 	struct vertex	*old;
1610 	struct iface	*iface;
1611 
1612 	LIST_FOREACH(iface, &area->iface_list, entry) {
1613 		if (iface->type == IF_TYPE_BROADCAST ||
1614 		    iface->type == IF_TYPE_NBMA) {
1615 			old = lsa_find(iface, htons(LSA_TYPE_INTRA_A_PREFIX),
1616 			    htonl(iface->ifindex), rde_router_id());
1617 			lsa = orig_intra_lsa_net(area, iface, old);
1618 			if (lsa)
1619 				lsa_merge(rde_nbr_self(area), lsa, old);
1620 		}
1621 	}
1622 
1623 	old = lsa_find_tree(&area->lsa_tree, htons(LSA_TYPE_INTRA_A_PREFIX),
1624 		htonl(LS_ID_INTRA_RTR), rde_router_id());
1625 	lsa = orig_intra_lsa_rtr(area, old);
1626 	if (lsa)
1627 		lsa_merge(rde_nbr_self(area), lsa, old);
1628 }
1629 
1630 int
comp_asext(struct lsa * a,struct lsa * b)1631 comp_asext(struct lsa *a, struct lsa *b)
1632 {
1633 	/* compare prefixes, if they are equal or not */
1634 	if (a->data.asext.prefix.prefixlen != b->data.asext.prefix.prefixlen)
1635 		return (-1);
1636 	return (memcmp(
1637 	    (char *)a + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1638 	    (char *)b + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1639 	    LSA_PREFIXSIZE(a->data.asext.prefix.prefixlen)));
1640 }
1641 
1642 struct lsa *
orig_asext_lsa(struct kroute * kr,u_int16_t age)1643 orig_asext_lsa(struct kroute *kr, u_int16_t age)
1644 {
1645 	struct lsa	*lsa;
1646 	u_int32_t	 ext_tag;
1647 	u_int16_t	 len, ext_off;
1648 
1649 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext) +
1650 	    LSA_PREFIXSIZE(kr->prefixlen);
1651 
1652 	/*
1653 	 * nexthop -- on connected routes we are the nexthop,
1654 	 * on all other cases we should announce the true nexthop
1655 	 * unless that nexthop is outside of the ospf cloud.
1656 	 * XXX for now we don't do this.
1657 	 */
1658 
1659 	ext_off = len;
1660 	if (kr->ext_tag) {
1661 		len += sizeof(ext_tag);
1662 	}
1663 	if ((lsa = calloc(1, len)) == NULL)
1664 		fatal("orig_asext_lsa");
1665 
1666 	log_debug("orig_asext_lsa: %s/%d age %d",
1667 	    log_in6addr(&kr->prefix), kr->prefixlen, age);
1668 
1669 	/* LSA header */
1670 	lsa->hdr.age = htons(age);
1671 	lsa->hdr.type = htons(LSA_TYPE_EXTERNAL);
1672 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1673 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1674 	lsa->hdr.len = htons(len);
1675 
1676 	lsa->data.asext.prefix.prefixlen = kr->prefixlen;
1677 	memcpy((char *)lsa + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1678 	    &kr->prefix, LSA_PREFIXSIZE(kr->prefixlen));
1679 
1680 	lsa->hdr.ls_id = lsa_find_lsid(&asext_tree, comp_asext, lsa);
1681 
1682 	if (age == MAX_AGE) {
1683 		/* inherit metric and ext_tag from the current LSA,
1684 		 * some routers don't like to get withdraws that are
1685 		 * different from what they have in their table.
1686 		 */
1687 		struct vertex *v;
1688 		v = lsa_find(NULL, lsa->hdr.type, lsa->hdr.ls_id,
1689 		    lsa->hdr.adv_rtr);
1690 		if (v != NULL) {
1691 			kr->metric = ntohl(v->lsa->data.asext.metric);
1692 			if (kr->metric & LSA_ASEXT_T_FLAG) {
1693 				memcpy(&ext_tag, (char *)v->lsa + ext_off,
1694 				    sizeof(ext_tag));
1695 				kr->ext_tag = ntohl(ext_tag);
1696 			}
1697 			kr->metric &= LSA_METRIC_MASK;
1698 		}
1699 	}
1700 
1701 	if (kr->ext_tag) {
1702 		lsa->data.asext.metric = htonl(kr->metric | LSA_ASEXT_T_FLAG);
1703 		ext_tag = htonl(kr->ext_tag);
1704 		memcpy((char *)lsa + ext_off, &ext_tag, sizeof(ext_tag));
1705 	} else {
1706 		lsa->data.asext.metric = htonl(kr->metric);
1707 	}
1708 
1709 	lsa->hdr.ls_chksum = 0;
1710 	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1711 
1712 	return (lsa);
1713 }
1714 
1715 struct lsa *
orig_sum_lsa(struct rt_node * rte,struct area * area,u_int8_t type,int invalid)1716 orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1717 {
1718 #if 0 /* XXX a lot todo */
1719 	struct lsa	*lsa;
1720 	u_int16_t	 len;
1721 
1722 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1723 	if ((lsa = calloc(1, len)) == NULL)
1724 		fatal("orig_sum_lsa");
1725 
1726 	/* LSA header */
1727 	lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1728 	lsa->hdr.type = type;
1729 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1730 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1731 	lsa->hdr.len = htons(len);
1732 
1733 	/* prefix and mask */
1734 	/*
1735 	 * TODO ls_id must be unique, for overlapping routes this may
1736 	 * not be true. In this case a hack needs to be done to
1737 	 * make the ls_id unique.
1738 	 */
1739 	lsa->hdr.ls_id = rte->prefix.s_addr;
1740 	if (type == LSA_TYPE_SUM_NETWORK)
1741 		lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1742 	else
1743 		lsa->data.sum.mask = 0;	/* must be zero per RFC */
1744 
1745 	lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1746 
1747 	lsa->hdr.ls_chksum = 0;
1748 	lsa->hdr.ls_chksum =
1749 	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1750 
1751 	return (lsa);
1752 #endif
1753 	return NULL;
1754 }
1755