xref: /openbsd/usr.sbin/eigrpd/rde_dual.c (revision 5b133f3f)
1 /*	$OpenBSD: rde_dual.c,v 1.31 2023/03/08 04:43:13 guenther Exp $ */
2 
3 /*
4  * Copyright (c) 2015 Renato Westphal <renato@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 
21 #include <stdlib.h>
22 #include <string.h>
23 
24 #include "eigrpd.h"
25 #include "eigrpe.h"
26 #include "rde.h"
27 #include "log.h"
28 
29 static int		 dual_fsm(struct rt_node *, enum dual_event);
30 static __inline int	 rt_compare(struct rt_node *, struct rt_node *);
31 static struct rt_node	*rt_find(struct eigrp *, struct rinfo *);
32 static struct rt_node	*rt_new(struct eigrp *, struct rinfo *);
33 static struct eigrp_route *route_find(struct rde_nbr *, struct rt_node *);
34 static struct eigrp_route *route_new(struct rt_node *, struct rde_nbr *,
35 			    struct rinfo *);
36 static void		 route_del(struct rt_node *, struct eigrp_route *);
37 static uint32_t		 safe_sum_uint32(uint32_t, uint32_t);
38 static uint32_t		 safe_mul_uint32(uint32_t, uint32_t);
39 static uint32_t		 route_composite_metric(uint8_t *, uint32_t, uint32_t,
40 			    uint8_t, uint8_t);
41 static void		 route_update_metrics(struct eigrp *,
42 			    struct eigrp_route *, struct rinfo *);
43 static void		 reply_outstanding_add(struct rt_node *,
44 			    struct rde_nbr *);
45 static struct reply_node *reply_outstanding_find(struct rt_node *,
46 			    struct rde_nbr *);
47 static void		 reply_outstanding_remove(struct reply_node *);
48 static void		 reply_active_timer(int, short, void *);
49 static void		 reply_active_start_timer(struct reply_node *);
50 static void		 reply_active_stop_timer(struct reply_node *);
51 static void		 reply_sia_timer(int, short, void *);
52 static void		 reply_sia_start_timer(struct reply_node *);
53 static void		 reply_sia_stop_timer(struct reply_node *);
54 static void		 rinfo_fill_infinite(struct rt_node *, enum route_type,
55 			    struct rinfo *);
56 static void		 rt_update_fib(struct rt_node *);
57 static void		 rt_set_successor(struct rt_node *,
58 			    struct eigrp_route *);
59 static struct eigrp_route *rt_get_successor_fc(struct rt_node *);
60 static void		 rde_send_update(struct eigrp_iface *, struct rinfo *);
61 static void		 rde_send_update_all(struct rt_node *, struct rinfo *);
62 static void		 rde_send_query(struct eigrp_iface *, struct rinfo *,
63 			    int);
64 static void		 rde_send_siaquery(struct rde_nbr *, struct rinfo *);
65 static void		 rde_send_query_all(struct eigrp *, struct rt_node *,
66 			    int);
67 static void		 rde_send_reply(struct rde_nbr *, struct rinfo *, int);
68 static void		 rde_last_reply(struct rt_node *);
69 static __inline int	 rde_nbr_compare(struct rde_nbr *, struct rde_nbr *);
70 
71 RB_GENERATE(rt_tree, rt_node, entry, rt_compare)
72 RB_GENERATE(rde_nbr_head, rde_nbr, entry, rde_nbr_compare)
73 
74 struct rde_nbr_head rde_nbrs = RB_INITIALIZER(&rde_nbrs);
75 
76 /*
77  * NOTE: events that don't cause a state transition aren't triggered to avoid
78  * too much verbosity and are here mostly for illustration purposes.
79  */
80 static struct {
81 	int		state;
82 	enum dual_event	event;
83 	int		new_state;
84 } dual_fsm_tbl[] = {
85     /* current state		event		resulting state */
86 /* Passive */
87     {DUAL_STA_PASSIVE,		DUAL_EVT_1,	0},
88     {DUAL_STA_PASSIVE,		DUAL_EVT_2,	0},
89     {DUAL_STA_PASSIVE,		DUAL_EVT_3,	DUAL_STA_ACTIVE3},
90     {DUAL_STA_PASSIVE,		DUAL_EVT_4,	DUAL_STA_ACTIVE1},
91 /* Active Oij=0 */
92     {DUAL_STA_ACTIVE0,		DUAL_EVT_5,	DUAL_STA_ACTIVE2},
93     {DUAL_STA_ACTIVE0,		DUAL_EVT_11,	DUAL_STA_ACTIVE1},
94     {DUAL_STA_ACTIVE0,		DUAL_EVT_14,	DUAL_STA_PASSIVE},
95 /* Active Oij=1 */
96     {DUAL_STA_ACTIVE1,		DUAL_EVT_5,	DUAL_STA_ACTIVE2},
97     {DUAL_STA_ACTIVE1,		DUAL_EVT_9,	DUAL_STA_ACTIVE0},
98     {DUAL_STA_ACTIVE1,		DUAL_EVT_15,	DUAL_STA_PASSIVE},
99 /* Active Oij=2 */
100     {DUAL_STA_ACTIVE2,		DUAL_EVT_12,	DUAL_STA_ACTIVE3},
101     {DUAL_STA_ACTIVE2,		DUAL_EVT_16,	DUAL_STA_PASSIVE},
102 /* Active Oij=3 */
103     {DUAL_STA_ACTIVE3,		DUAL_EVT_10,	DUAL_STA_ACTIVE2},
104     {DUAL_STA_ACTIVE3,		DUAL_EVT_13,	DUAL_STA_PASSIVE},
105 /* Active (all) */
106     {DUAL_STA_ACTIVE_ALL,	DUAL_EVT_6,	0},
107     {DUAL_STA_ACTIVE_ALL,	DUAL_EVT_7,	0},
108     {DUAL_STA_ACTIVE_ALL,	DUAL_EVT_8,	0},
109 /* sentinel */
110     {-1,			0,		0},
111 };
112 
113 static const char * const dual_event_names[] = {
114 	"DUAL_EVT_1",
115 	"DUAL_EVT_2",
116 	"DUAL_EVT_3",
117 	"DUAL_EVT_4",
118 	"DUAL_EVT_5",
119 	"DUAL_EVT_6",
120 	"DUAL_EVT_7",
121 	"DUAL_EVT_8",
122 	"DUAL_EVT_9",
123 	"DUAL_EVT_10",
124 	"DUAL_EVT_11",
125 	"DUAL_EVT_12",
126 	"DUAL_EVT_13",
127 	"DUAL_EVT_14",
128 	"DUAL_EVT_15",
129 	"DUAL_EVT_16"
130 };
131 
132 static int
dual_fsm(struct rt_node * rn,enum dual_event event)133 dual_fsm(struct rt_node *rn, enum dual_event event)
134 {
135 	int		old_state;
136 	int		new_state = 0;
137 	int		i;
138 
139 	old_state = rn->state;
140 	for (i = 0; dual_fsm_tbl[i].state != -1; i++)
141 		if ((dual_fsm_tbl[i].state & old_state) &&
142 		    (dual_fsm_tbl[i].event == event)) {
143 			new_state = dual_fsm_tbl[i].new_state;
144 			break;
145 		}
146 
147 	if (dual_fsm_tbl[i].state == -1) {
148 		/* event outside of the defined fsm, ignore it. */
149 		log_warnx("%s: route %s, event %s not expected in state %s",
150 		    __func__, log_prefix(rn), dual_event_names[event],
151 		    dual_state_name(old_state));
152 		return (0);
153 	}
154 
155 	if (new_state != 0)
156 		rn->state = new_state;
157 
158 	if (old_state != rn->state) {
159 		log_debug("%s: event %s changing state for prefix %s "
160 		    "from %s to %s", __func__, dual_event_names[event],
161 		    log_prefix(rn), dual_state_name(old_state),
162 		    dual_state_name(rn->state));
163 
164 		if (old_state == DUAL_STA_PASSIVE ||
165 		    new_state == DUAL_STA_PASSIVE)
166 			rt_update_fib(rn);
167 	}
168 
169 	return (0);
170 }
171 
172 static __inline int
rt_compare(struct rt_node * a,struct rt_node * b)173 rt_compare(struct rt_node *a, struct rt_node *b)
174 {
175 	int		 addrcmp;
176 
177 	addrcmp = eigrp_addrcmp(a->eigrp->af, &a->prefix, &b->prefix);
178 	if (addrcmp != 0)
179 		return (addrcmp);
180 
181 	if (a->prefixlen < b->prefixlen)
182 		return (-1);
183 	if (a->prefixlen > b->prefixlen)
184 		return (1);
185 
186 	return (0);
187 }
188 
189 static struct rt_node *
rt_find(struct eigrp * eigrp,struct rinfo * ri)190 rt_find(struct eigrp *eigrp, struct rinfo *ri)
191 {
192 	struct rt_node	 rn;
193 
194 	rn.eigrp = eigrp;
195 	rn.prefix = ri->prefix;
196 	rn.prefixlen = ri->prefixlen;
197 
198 	return (RB_FIND(rt_tree, &eigrp->topology, &rn));
199 }
200 
201 static struct rt_node *
rt_new(struct eigrp * eigrp,struct rinfo * ri)202 rt_new(struct eigrp *eigrp, struct rinfo *ri)
203 {
204 	struct rt_node	*rn;
205 
206 	if ((rn = calloc(1, sizeof(*rn))) == NULL)
207 		fatal("rt_new");
208 
209 	rn->eigrp = eigrp;
210 	rn->prefix = ri->prefix;
211 	rn->prefixlen = ri->prefixlen;
212 	rn->state = DUAL_STA_PASSIVE;
213 	TAILQ_INIT(&rn->routes);
214 	TAILQ_INIT(&rn->rijk);
215 	rt_set_successor(rn, NULL);
216 
217 	if (RB_INSERT(rt_tree, &eigrp->topology, rn) != NULL) {
218 		log_warnx("%s failed for %s", __func__, log_prefix(rn));
219 		free(rn);
220 		return (NULL);
221 	}
222 
223 	log_debug("%s: prefix %s", __func__, log_prefix(rn));
224 
225 	return (rn);
226 }
227 
228 void
rt_del(struct rt_node * rn)229 rt_del(struct rt_node *rn)
230 {
231 	struct eigrp_route	*route;
232 	struct reply_node	*reply;
233 
234 	log_debug("%s: prefix %s", __func__, log_prefix(rn));
235 
236 	while ((reply = TAILQ_FIRST(&rn->rijk)) != NULL)
237 		reply_outstanding_remove(reply);
238 	while ((route = TAILQ_FIRST(&rn->routes)) != NULL)
239 		route_del(rn, route);
240 	RB_REMOVE(rt_tree, &rn->eigrp->topology, rn);
241 	free(rn);
242 }
243 
244 static struct eigrp_route *
route_find(struct rde_nbr * nbr,struct rt_node * rn)245 route_find(struct rde_nbr *nbr, struct rt_node *rn)
246 {
247 	struct eigrp_route	*route;
248 
249 	TAILQ_FOREACH(route, &rn->routes, entry)
250 		if (route->nbr == nbr)
251 			return (route);
252 
253 	return (NULL);
254 }
255 
256 static struct eigrp_route *
route_new(struct rt_node * rn,struct rde_nbr * nbr,struct rinfo * ri)257 route_new(struct rt_node *rn, struct rde_nbr *nbr, struct rinfo *ri)
258 {
259 	struct eigrp		*eigrp = rn->eigrp;
260 	struct eigrp_route	*route, *tmp;
261 
262 	if ((route = calloc(1, sizeof(*route))) == NULL)
263 		fatal("route_new");
264 
265 	route->nbr = nbr;
266 	route->type = ri->type;
267 	if (eigrp_addrisset(eigrp->af, &ri->nexthop))
268 		route->nexthop = ri->nexthop;
269 	else
270 		route->nexthop = nbr->addr;
271 	route_update_metrics(eigrp, route, ri);
272 
273 	/* order by nexthop */
274 	TAILQ_FOREACH(tmp, &rn->routes, entry)
275 		if (eigrp_addrcmp(eigrp->af, &tmp->nexthop,
276 		    &route->nexthop) > 0)
277 			break;
278 	if (tmp)
279 		TAILQ_INSERT_BEFORE(tmp, route, entry);
280 	else
281 		TAILQ_INSERT_TAIL(&rn->routes, route, entry);
282 
283 	log_debug("%s: prefix %s via %s distance (%u/%u)", __func__,
284 	    log_prefix(rn), log_route_origin(eigrp->af, route->nbr),
285 	    route->distance, route->rdistance);
286 
287 	return (route);
288 }
289 
290 static void
route_del(struct rt_node * rn,struct eigrp_route * route)291 route_del(struct rt_node *rn, struct eigrp_route *route)
292 {
293 	struct eigrp		*eigrp = rn->eigrp;
294 
295 	log_debug("%s: prefix %s via %s", __func__, log_prefix(rn),
296 	    log_route_origin(eigrp->af, route->nbr));
297 
298 	if (route->flags & F_EIGRP_ROUTE_INSTALLED)
299 		rde_send_delete_kroute(rn, route);
300 
301 	TAILQ_REMOVE(&rn->routes, route, entry);
302 	free(route);
303 }
304 
305 static uint32_t
safe_sum_uint32(uint32_t a,uint32_t b)306 safe_sum_uint32(uint32_t a, uint32_t b)
307 {
308 	uint64_t	total;
309 
310 	total = (uint64_t) a + (uint64_t) b;
311 
312 	if (total >> 32)
313 		return ((uint32_t )(~0));
314 
315 	return ((uint32_t) total);
316 }
317 
318 static uint32_t
safe_mul_uint32(uint32_t a,uint32_t b)319 safe_mul_uint32(uint32_t a, uint32_t b)
320 {
321 	uint64_t	total;
322 
323 	total = (uint64_t) a * (uint64_t) b;
324 
325 	if (total >> 32)
326 		return ((uint32_t )(~0));
327 
328 	return ((uint32_t) total);
329 }
330 
331 uint32_t
eigrp_composite_delay(uint32_t delay)332 eigrp_composite_delay(uint32_t delay)
333 {
334 	/* cheap overflow protection */
335 	delay = min(delay, (1 << 24) - 1);
336 	return (delay * EIGRP_SCALING_FACTOR);
337 }
338 
339 uint32_t
eigrp_real_delay(uint32_t delay)340 eigrp_real_delay(uint32_t delay)
341 {
342 	return (delay / EIGRP_SCALING_FACTOR);
343 }
344 
345 uint32_t
eigrp_composite_bandwidth(uint32_t bandwidth)346 eigrp_composite_bandwidth(uint32_t bandwidth)
347 {
348 	/* truncate before applying the scaling factor */
349 	bandwidth = 10000000 / bandwidth;
350 	return (EIGRP_SCALING_FACTOR * bandwidth);
351 }
352 
353 uint32_t
eigrp_real_bandwidth(uint32_t bandwidth)354 eigrp_real_bandwidth(uint32_t bandwidth)
355 {
356 	/*
357 	 * apply the scaling factor before the division and only then truncate.
358 	 * this is to keep consistent with what cisco does.
359 	 */
360 	return ((EIGRP_SCALING_FACTOR * (uint32_t)10000000) / bandwidth);
361 }
362 
363 static uint32_t
route_composite_metric(uint8_t * kvalues,uint32_t delay,uint32_t bandwidth,uint8_t load,uint8_t reliability)364 route_composite_metric(uint8_t *kvalues, uint32_t delay, uint32_t bandwidth,
365     uint8_t load, uint8_t reliability)
366 {
367 	uint64_t	 distance;
368 	uint32_t	 operand1, operand2, operand3;
369 	double		 operand4;
370 
371 	/*
372 	 * Need to apply the scaling factor before any division to avoid
373 	 * losing information from truncation.
374 	 */
375 	operand1 = safe_mul_uint32(kvalues[0] * EIGRP_SCALING_FACTOR,
376 	    10000000 / bandwidth);
377 	operand2 = safe_mul_uint32(kvalues[1] * EIGRP_SCALING_FACTOR,
378 	    10000000 / bandwidth) / (256 - load);
379 	operand3 = safe_mul_uint32(kvalues[2] * EIGRP_SCALING_FACTOR, delay);
380 
381 	distance = (uint64_t) operand1 + (uint64_t) operand2 +
382 	    (uint64_t) operand3;
383 
384 	/* if K5 is set to zero, the last term of the formula is not used */
385 	if (kvalues[4] != 0) {
386 		operand4 = (double) kvalues[4] / (reliability + kvalues[3]);
387 		/* no risk of overflow (64 bits), operand4 can be at most 255 */
388 		distance *= operand4;
389 	}
390 
391 	/* overflow protection */
392 	if (distance >> 32)
393 		distance = ((uint32_t )(~0));
394 
395 	return ((uint32_t) distance);
396 }
397 
398 static void
route_update_metrics(struct eigrp * eigrp,struct eigrp_route * route,struct rinfo * ri)399 route_update_metrics(struct eigrp *eigrp, struct eigrp_route *route,
400     struct rinfo *ri)
401 {
402 	struct eigrp_iface	*ei = route->nbr->ei;
403 	uint32_t		 delay, bandwidth;
404 	int			 mtu;
405 
406 	route->metric = ri->metric;
407 	route->emetric = ri->emetric;
408 	route->flags |= F_EIGRP_ROUTE_M_CHANGED;
409 
410 	delay = eigrp_real_delay(route->metric.delay);
411 	bandwidth = eigrp_real_bandwidth(route->metric.bandwidth);
412 
413 	if (route->nbr->flags & F_RDE_NBR_SELF)
414 		route->rdistance = 0;
415 	else {
416 		route->rdistance = route_composite_metric(eigrp->kvalues,
417 		    delay, bandwidth, route->metric.load,
418 		    route->metric.reliability);
419 
420 		/* update the delay */
421 		delay = safe_sum_uint32(delay, ei->delay);
422 		route->metric.delay = eigrp_composite_delay(delay);
423 
424 		/* update the bandwidth */
425 		bandwidth = min(bandwidth, ei->bandwidth);
426 		route->metric.bandwidth = eigrp_composite_bandwidth(bandwidth);
427 
428 		/* update the mtu */
429 		mtu = min(metric_decode_mtu(route->metric.mtu), ei->iface->mtu);
430 		metric_encode_mtu(route->metric.mtu, mtu);
431 
432 		/* update the hop count */
433 		if (route->metric.hop_count < UINT8_MAX)
434 			route->metric.hop_count++;
435 	}
436 
437 	route->distance = route_composite_metric(eigrp->kvalues, delay,
438 	    bandwidth, DEFAULT_LOAD, DEFAULT_RELIABILITY);
439 }
440 
441 static void
reply_outstanding_add(struct rt_node * rn,struct rde_nbr * nbr)442 reply_outstanding_add(struct rt_node *rn, struct rde_nbr *nbr)
443 {
444 	struct reply_node	*reply;
445 
446 	if ((reply = calloc(1, sizeof(*reply))) == NULL)
447 		fatal("reply_outstanding_add");
448 
449 	evtimer_set(&reply->ev_active_timeout, reply_active_timer, reply);
450 	evtimer_set(&reply->ev_sia_timeout, reply_sia_timer, reply);
451 	reply->siaquery_sent = 0;
452 	reply->siareply_recv = 0;
453 	reply->rn = rn;
454 	reply->nbr = nbr;
455 	TAILQ_INSERT_TAIL(&rn->rijk, reply, rn_entry);
456 	TAILQ_INSERT_TAIL(&nbr->rijk, reply, nbr_entry);
457 
458 	if (rn->eigrp->active_timeout > 0) {
459 		reply_active_start_timer(reply);
460 		reply_sia_start_timer(reply);
461 	}
462 }
463 
464 static struct reply_node *
reply_outstanding_find(struct rt_node * rn,struct rde_nbr * nbr)465 reply_outstanding_find(struct rt_node *rn, struct rde_nbr *nbr)
466 {
467 	struct reply_node	*reply;
468 
469 	TAILQ_FOREACH(reply, &rn->rijk, rn_entry)
470 		if (reply->nbr == nbr)
471 			return (reply);
472 
473 	return (NULL);
474 }
475 
476 static void
reply_outstanding_remove(struct reply_node * reply)477 reply_outstanding_remove(struct reply_node *reply)
478 {
479 	reply_active_stop_timer(reply);
480 	reply_sia_stop_timer(reply);
481 	TAILQ_REMOVE(&reply->rn->rijk, reply, rn_entry);
482 	TAILQ_REMOVE(&reply->nbr->rijk, reply, nbr_entry);
483 	free(reply);
484 }
485 
486 static void
reply_active_timer(int fd,short event,void * arg)487 reply_active_timer(int fd, short event, void *arg)
488 {
489 	struct reply_node	*reply = arg;
490 	struct rde_nbr		*nbr = reply->nbr;
491 
492 	log_debug("%s: neighbor %s is stuck in active", __func__,
493 	    log_addr(nbr->eigrp->af, &nbr->addr));
494 
495 	rde_nbr_del(reply->nbr, 1);
496 }
497 
498 static void
reply_active_start_timer(struct reply_node * reply)499 reply_active_start_timer(struct reply_node *reply)
500 {
501 	struct eigrp		*eigrp = reply->nbr->eigrp;
502 	struct timeval		 tv;
503 
504 	timerclear(&tv);
505 	tv.tv_sec = eigrp->active_timeout * 60;
506 	if (evtimer_add(&reply->ev_active_timeout, &tv) == -1)
507 		fatal("reply_active_start_timer");
508 }
509 
510 static void
reply_active_stop_timer(struct reply_node * reply)511 reply_active_stop_timer(struct reply_node *reply)
512 {
513 	if (evtimer_pending(&reply->ev_active_timeout, NULL) &&
514 	    evtimer_del(&reply->ev_active_timeout) == -1)
515 		fatal("reply_active_stop_timer");
516 }
517 
518 static void
reply_sia_timer(int fd,short event,void * arg)519 reply_sia_timer(int fd, short event, void *arg)
520 {
521 	struct reply_node	*reply = arg;
522 	struct rde_nbr		*nbr = reply->nbr;
523 	struct rt_node		*rn = reply->rn;
524 	struct rinfo		 ri;
525 
526 	log_debug("%s: nbr %s prefix %s", __func__, log_addr(nbr->eigrp->af,
527 	    &nbr->addr), log_prefix(rn));
528 
529 	if (reply->siaquery_sent > 0 && reply->siareply_recv == 0) {
530 		log_debug("%s: neighbor %s is stuck in active", __func__,
531 		    log_addr(nbr->eigrp->af, &nbr->addr));
532 		rde_nbr_del(nbr, 1);
533 		return;
534 	}
535 
536 	/*
537 	 * draft-savage-eigrp-04 - Section 4.4.1.1:
538 	 * "Up to three SIA-QUERY packets for a specific destination may
539 	 * be sent, each at a value of one-half the ACTIVE time, so long
540 	 * as each are successfully acknowledged and met with an SIA-REPLY".
541 	 */
542 	if (reply->siaquery_sent >= 3)
543 		return;
544 
545 	reply->siaquery_sent++;
546 	reply->siareply_recv = 0;
547 
548 	/* restart sia and active timeouts */
549 	reply_sia_start_timer(reply);
550 	reply_active_start_timer(reply);
551 
552 	/* send an sia-query */
553 	rinfo_fill_successor(rn, &ri);
554 	ri.metric.flags |= F_METRIC_ACTIVE;
555 	rde_send_siaquery(nbr, &ri);
556 }
557 
558 static void
reply_sia_start_timer(struct reply_node * reply)559 reply_sia_start_timer(struct reply_node *reply)
560 {
561 	struct eigrp		*eigrp = reply->nbr->eigrp;
562 	struct timeval		 tv;
563 
564 	/*
565 	 * draft-savage-eigrp-04 - Section 4.4.1.1:
566 	 * "The SIA-QUERY packet SHOULD be sent on a per-destination basis
567 	 * at one-half of the ACTIVE timeout period."
568 	 */
569 	timerclear(&tv);
570 	tv.tv_sec = (eigrp->active_timeout * 60) / 2;
571 	if (evtimer_add(&reply->ev_sia_timeout, &tv) == -1)
572 		fatal("reply_sia_start_timer");
573 }
574 
575 static void
reply_sia_stop_timer(struct reply_node * reply)576 reply_sia_stop_timer(struct reply_node *reply)
577 {
578 	if (evtimer_pending(&reply->ev_sia_timeout, NULL) &&
579 	    evtimer_del(&reply->ev_sia_timeout) == -1)
580 		fatal("reply_sia_stop_timer");
581 }
582 
583 void
rinfo_fill_successor(struct rt_node * rn,struct rinfo * ri)584 rinfo_fill_successor(struct rt_node *rn, struct rinfo *ri)
585 {
586 	if (rn->successor.nbr == NULL) {
587 		rinfo_fill_infinite(rn, EIGRP_ROUTE_INTERNAL, ri);
588 		return;
589 	}
590 
591 	memset(ri, 0, sizeof(*ri));
592 	ri->af = rn->eigrp->af;
593 	ri->type = rn->successor.type;
594 	ri->prefix = rn->prefix;
595 	ri->prefixlen = rn->prefixlen;
596 	ri->metric = rn->successor.metric;
597 	if (ri->type == EIGRP_ROUTE_EXTERNAL)
598 		ri->emetric = rn->successor.emetric;
599 }
600 
601 static void
rinfo_fill_infinite(struct rt_node * rn,enum route_type type,struct rinfo * ri)602 rinfo_fill_infinite(struct rt_node *rn, enum route_type type, struct rinfo *ri)
603 {
604 	memset(ri, 0, sizeof(*ri));
605 	ri->af = rn->eigrp->af;
606 	ri->type = type;
607 	ri->prefix = rn->prefix;
608 	ri->prefixlen = rn->prefixlen;
609 	ri->metric.delay = EIGRP_INFINITE_METRIC;
610 }
611 
612 static void
rt_update_fib(struct rt_node * rn)613 rt_update_fib(struct rt_node *rn)
614 {
615 	struct eigrp		*eigrp = rn->eigrp;
616 	uint8_t			 maximum_paths = eigrp->maximum_paths;
617 	uint8_t			 variance = eigrp->variance;
618 	int			 installed = 0;
619 	struct eigrp_route	*route;
620 
621 	if (rn->state == DUAL_STA_PASSIVE) {
622 		/* no multipath for attached networks. */
623 		if (rn->successor.nbr &&
624 		    (rn->successor.nbr->flags & F_RDE_NBR_LOCAL))
625 			return;
626 
627 		TAILQ_FOREACH(route, &rn->routes, entry) {
628 			/* skip redistributed routes */
629 			if (route->nbr->flags & F_RDE_NBR_REDIST)
630 				continue;
631 
632 			/*
633 			 * Only feasible successors and the successor itself
634 			 * are eligible to be installed.
635 			 */
636 			if (route->rdistance >= rn->successor.fdistance)
637 				goto uninstall;
638 
639 			if (route->distance >
640 			    (rn->successor.fdistance * variance))
641 				goto uninstall;
642 
643 			if (installed >= maximum_paths)
644 				goto uninstall;
645 
646 			installed++;
647 
648 			if ((route->flags & F_EIGRP_ROUTE_INSTALLED) &&
649 			    !(route->flags & F_EIGRP_ROUTE_M_CHANGED))
650 				continue;
651 
652 			rde_send_change_kroute(rn, route);
653 			continue;
654 
655 uninstall:
656 			if (route->flags & F_EIGRP_ROUTE_INSTALLED)
657 				rde_send_delete_kroute(rn, route);
658 		}
659 	} else {
660 		TAILQ_FOREACH(route, &rn->routes, entry)
661 			if (route->flags & F_EIGRP_ROUTE_INSTALLED)
662 				rde_send_delete_kroute(rn, route);
663 	}
664 }
665 
666 static void
rt_set_successor(struct rt_node * rn,struct eigrp_route * successor)667 rt_set_successor(struct rt_node *rn, struct eigrp_route *successor)
668 {
669 	struct eigrp		*eigrp = rn->eigrp;
670 	struct eigrp_iface	*ei;
671 	struct summary_addr	*summary;
672 
673 	if (successor == NULL) {
674 		rn->successor.nbr = NULL;
675 		rn->successor.type = 0;
676 		rn->successor.fdistance = EIGRP_INFINITE_METRIC;
677 		rn->successor.rdistance = EIGRP_INFINITE_METRIC;
678 		memset(&rn->successor.metric, 0,
679 		    sizeof(rn->successor.metric));
680 		rn->successor.metric.delay = EIGRP_INFINITE_METRIC;
681 		memset(&rn->successor.emetric, 0,
682 		    sizeof(rn->successor.emetric));
683 	} else {
684 		rn->successor.nbr = successor->nbr;
685 		rn->successor.type = successor->type;
686 		rn->successor.fdistance = successor->distance;
687 		rn->successor.rdistance = successor->rdistance;
688 		rn->successor.metric = successor->metric;
689 		rn->successor.emetric = successor->emetric;
690 	}
691 
692 	TAILQ_FOREACH(ei, &eigrp->ei_list, e_entry) {
693 		summary = rde_summary_check(ei, &rn->prefix, rn->prefixlen);
694 		if (summary)
695 			rt_summary_set(eigrp, summary, &rn->successor.metric);
696 	}
697 }
698 
699 static struct eigrp_route *
rt_get_successor_fc(struct rt_node * rn)700 rt_get_successor_fc(struct rt_node *rn)
701 {
702 	struct eigrp_route	*route, *successor = NULL;
703 	uint32_t		 distance = EIGRP_INFINITE_METRIC;
704 	int			 external_only = 1;
705 
706 	TAILQ_FOREACH(route, &rn->routes, entry)
707 		if (route->type == EIGRP_ROUTE_INTERNAL) {
708 			/*
709 			 * connected routes should always be preferred over
710 			 * received routes independent of the metric.
711 			 */
712 			if (route->nbr->flags & F_RDE_NBR_LOCAL)
713 				return (route);
714 
715 			external_only = 0;
716 		}
717 
718 	TAILQ_FOREACH(route, &rn->routes, entry) {
719 		/*
720 		 * draft-savage-eigrp-04 - Section 5.4.7:
721 		 * "Internal routes MUST be preferred over external routes
722 		 * independent of the metric."
723 		 */
724 		if (route->type == EIGRP_ROUTE_EXTERNAL && !external_only)
725 			continue;
726 
727 		/* pick the best route that meets the feasibility condition */
728 		if (route->rdistance < rn->successor.fdistance &&
729 		    route->distance < distance) {
730 			distance = route->distance;
731 			successor = route;
732 		}
733 	}
734 
735 	return (successor);
736 }
737 
738 struct summary_addr *
rde_summary_check(struct eigrp_iface * ei,union eigrpd_addr * prefix,uint8_t prefixlen)739 rde_summary_check(struct eigrp_iface *ei, union eigrpd_addr *prefix,
740     uint8_t prefixlen)
741 {
742 	struct summary_addr	*summary;
743 
744 	TAILQ_FOREACH(summary, &ei->summary_list, entry) {
745 		/* do not filter the summary itself */
746 		if (summary->prefixlen == prefixlen &&
747 		    !eigrp_addrcmp(ei->eigrp->af, prefix, &summary->prefix))
748 			return (NULL);
749 
750 		if (summary->prefixlen <= prefixlen &&
751 		    !eigrp_prefixcmp(ei->eigrp->af, prefix, &summary->prefix,
752 		    summary->prefixlen))
753 			return (summary);
754 	}
755 
756 	return (NULL);
757 }
758 
759 static void
rde_send_update(struct eigrp_iface * ei,struct rinfo * ri)760 rde_send_update(struct eigrp_iface *ei, struct rinfo *ri)
761 {
762 	if (ri->metric.hop_count >= ei->eigrp->maximum_hops ||
763 	    rde_summary_check(ei, &ri->prefix, ri->prefixlen))
764 		ri->metric.delay = EIGRP_INFINITE_METRIC;
765 
766 	rde_imsg_compose_eigrpe(IMSG_SEND_MUPDATE, ei->ifaceid, 0,
767 	    ri, sizeof(*ri));
768 	rde_imsg_compose_eigrpe(IMSG_SEND_MUPDATE_END, ei->ifaceid, 0,
769 	    NULL, 0);
770 }
771 
772 static void
rde_send_update_all(struct rt_node * rn,struct rinfo * ri)773 rde_send_update_all(struct rt_node *rn, struct rinfo *ri)
774 {
775 	struct eigrp		*eigrp = rn->eigrp;
776 	struct eigrp_iface	*ei;
777 
778 	TAILQ_FOREACH(ei, &eigrp->ei_list, e_entry) {
779 		/* respect split-horizon configuration */
780 		if (rn->successor.nbr && rn->successor.nbr->ei == ei &&
781 		    ei->splithorizon)
782 			continue;
783 		rde_send_update(ei, ri);
784 	}
785 }
786 
787 static void
rde_send_query(struct eigrp_iface * ei,struct rinfo * ri,int push)788 rde_send_query(struct eigrp_iface *ei, struct rinfo *ri, int push)
789 {
790 	rde_imsg_compose_eigrpe(IMSG_SEND_MQUERY, ei->ifaceid, 0,
791 	    ri, sizeof(*ri));
792 	if (push)
793 		rde_imsg_compose_eigrpe(IMSG_SEND_MQUERY_END, ei->ifaceid,
794 		    0, NULL, 0);
795 }
796 
797 static void
rde_send_siaquery(struct rde_nbr * nbr,struct rinfo * ri)798 rde_send_siaquery(struct rde_nbr *nbr, struct rinfo *ri)
799 {
800 	rde_imsg_compose_eigrpe(IMSG_SEND_QUERY, nbr->peerid, 0,
801 	    ri, sizeof(*ri));
802 	rde_imsg_compose_eigrpe(IMSG_SEND_SIAQUERY_END, nbr->peerid, 0,
803 	    NULL, 0);
804 }
805 
806 static void
rde_send_query_all(struct eigrp * eigrp,struct rt_node * rn,int push)807 rde_send_query_all(struct eigrp *eigrp, struct rt_node *rn, int push)
808 {
809 	struct eigrp_iface	*ei;
810 	struct rde_nbr		*nbr;
811 	struct rinfo		 ri;
812 
813 	rinfo_fill_successor(rn, &ri);
814 	ri.metric.flags |= F_METRIC_ACTIVE;
815 
816 	TAILQ_FOREACH(ei, &eigrp->ei_list, e_entry) {
817 		/* respect split-horizon configuration */
818 		if (rn->successor.nbr && rn->successor.nbr->ei == ei &&
819 		    ei->splithorizon)
820 			continue;
821 
822 		rde_send_query(ei, &ri, push);
823 	}
824 
825 	RB_FOREACH(nbr, rde_nbr_head, &rde_nbrs)
826 		if (nbr->ei->eigrp == eigrp && !(nbr->flags & F_RDE_NBR_SELF)) {
827 			/* respect split-horizon configuration */
828 			if (rn->successor.nbr &&
829 			    rn->successor.nbr->ei == nbr->ei &&
830 			    nbr->ei->splithorizon)
831 				continue;
832 
833 			reply_outstanding_add(rn, nbr);
834 		}
835 }
836 
837 void
rde_flush_queries(void)838 rde_flush_queries(void)
839 {
840 	struct eigrp		*eigrp;
841 	struct eigrp_iface	*ei;
842 
843 	TAILQ_FOREACH(eigrp, &rdeconf->instances, entry)
844 		TAILQ_FOREACH(ei, &eigrp->ei_list, e_entry)
845 			rde_imsg_compose_eigrpe(IMSG_SEND_MQUERY_END,
846 			    ei->ifaceid, 0, NULL, 0);
847 }
848 
849 static void
rde_send_reply(struct rde_nbr * nbr,struct rinfo * ri,int siareply)850 rde_send_reply(struct rde_nbr *nbr, struct rinfo *ri, int siareply)
851 {
852 	int	 type;
853 
854 	if (ri->metric.hop_count >= nbr->eigrp->maximum_hops ||
855 	    rde_summary_check(nbr->ei, &ri->prefix, ri->prefixlen))
856 		ri->metric.delay = EIGRP_INFINITE_METRIC;
857 
858 	if (!siareply)
859 		type = IMSG_SEND_REPLY_END;
860 	else
861 		type = IMSG_SEND_SIAREPLY_END;
862 
863 	rde_imsg_compose_eigrpe(IMSG_SEND_REPLY, nbr->peerid, 0,
864 	    ri, sizeof(*ri));
865 	rde_imsg_compose_eigrpe(type, nbr->peerid, 0, NULL, 0);
866 }
867 
868 void
rde_check_update(struct rde_nbr * nbr,struct rinfo * ri)869 rde_check_update(struct rde_nbr *nbr, struct rinfo *ri)
870 {
871 	struct eigrp		*eigrp = nbr->eigrp;
872 	struct rt_node		*rn;
873 	struct eigrp_route	*route, *successor;
874 	uint32_t		 old_fdistance;
875 	struct rinfo		 sri;
876 
877 	rn = rt_find(eigrp, ri);
878 	if (rn == NULL) {
879 		if (ri->metric.delay == EIGRP_INFINITE_METRIC)
880 			return;
881 
882 		rn = rt_new(eigrp, ri);
883 		route = route_new(rn, nbr, ri);
884 
885 		old_fdistance = EIGRP_INFINITE_METRIC;
886 	} else {
887 		old_fdistance = rn->successor.fdistance;
888 
889 		if (ri->metric.delay == EIGRP_INFINITE_METRIC) {
890 			route = route_find(nbr, rn);
891 			if (route)
892 				route_del(rn, route);
893 		} else {
894 			route = route_find(nbr, rn);
895 			if (route == NULL)
896 				route = route_new(rn, nbr, ri);
897 			else
898 				route_update_metrics(eigrp, route, ri);
899 		}
900 	}
901 
902 	switch (rn->state) {
903 	case DUAL_STA_PASSIVE:
904 		successor = rt_get_successor_fc(rn);
905 
906 		/*
907 		 * go active if the successor was affected and no feasible
908 		 * successor exist.
909 		 */
910 		if (successor == NULL) {
911 			rde_send_query_all(eigrp, rn, 1);
912 
913 			dual_fsm(rn, DUAL_EVT_4);
914 		} else {
915 			rt_set_successor(rn, successor);
916 			rt_update_fib(rn);
917 
918 			/* send update with new metric if necessary */
919 			rinfo_fill_successor(rn, &sri);
920 			if (rn->successor.fdistance != old_fdistance)
921 				rde_send_update_all(rn, &sri);
922 		}
923 		break;
924 	case DUAL_STA_ACTIVE1:
925 		/* XXX event 9 if cost increase? */
926 		break;
927 	case DUAL_STA_ACTIVE3:
928 		/* XXX event 10 if cost increase? */
929 		break;
930 	}
931 
932 	if ((rn->state & DUAL_STA_ACTIVE_ALL) && TAILQ_EMPTY(&rn->rijk))
933 		rde_last_reply(rn);
934 }
935 
936 void
rde_check_query(struct rde_nbr * nbr,struct rinfo * ri,int siaquery)937 rde_check_query(struct rde_nbr *nbr, struct rinfo *ri, int siaquery)
938 {
939 	struct eigrp		*eigrp = nbr->eigrp;
940 	struct rt_node		*rn;
941 	struct eigrp_route	*route, *successor;
942 	uint32_t		 old_fdistance;
943 	struct rinfo		 sri;
944 	int			 reply_sent = 0;
945 
946 	/*
947 	 * draft-savage-eigrp-02 - Section 4.3:
948 	 * "When a query is received for a route that doesn't exist in our
949 	 * topology table, a reply with infinite metric is sent and an entry
950 	 * in the topology table is added with the metric in the QUERY if
951 	 * the metric is not an infinite value".
952 	 */
953 	rn = rt_find(eigrp, ri);
954 	if (rn == NULL) {
955 		sri = *ri;
956 		sri.metric.delay = EIGRP_INFINITE_METRIC;
957 		rde_send_reply(nbr, &sri, 0);
958 
959 		if (ri->metric.delay == EIGRP_INFINITE_METRIC)
960 			return;
961 
962 		rn = rt_new(eigrp, ri);
963 		route = route_new(rn, nbr, ri);
964 		rt_set_successor(rn, route);
965 		return;
966 	}
967 
968 	old_fdistance = rn->successor.fdistance;
969 
970 	if (ri->metric.delay == EIGRP_INFINITE_METRIC) {
971 		route = route_find(nbr, rn);
972 		if (route)
973 			route_del(rn, route);
974 	} else {
975 		route = route_find(nbr, rn);
976 		if (route == NULL)
977 			route = route_new(rn, nbr, ri);
978 		else
979 			route_update_metrics(eigrp, route, ri);
980 	}
981 
982 	switch (rn->state) {
983 	case DUAL_STA_PASSIVE:
984 		successor = rt_get_successor_fc(rn);
985 
986 		/*
987 		 * go active if the successor was affected and no feasible
988 		 * successor exist.
989 		 */
990 		if (successor == NULL) {
991 			rde_send_query_all(eigrp, rn, 1);
992 			dual_fsm(rn, DUAL_EVT_3);
993 		} else {
994 			rt_set_successor(rn, successor);
995 			rt_update_fib(rn);
996 
997 			/* send reply */
998 			rinfo_fill_successor(rn, &sri);
999 			rde_send_reply(nbr, &sri, 0);
1000 			reply_sent = 1;
1001 
1002 			/* send update with new metric if necessary */
1003 			if (rn->successor.fdistance != old_fdistance)
1004 				rde_send_update_all(rn, &sri);
1005 		}
1006 		break;
1007 	case DUAL_STA_ACTIVE0:
1008 	case DUAL_STA_ACTIVE1:
1009 		if (nbr == rn->successor.nbr)
1010 			dual_fsm(rn, DUAL_EVT_5);
1011 		else {
1012 			dual_fsm(rn, DUAL_EVT_6);
1013 			rinfo_fill_successor(rn, &sri);
1014 			sri.metric.flags |= F_METRIC_ACTIVE;
1015 			rde_send_reply(nbr, &sri, 0);
1016 			reply_sent = 1;
1017 		}
1018 		break;
1019 	case DUAL_STA_ACTIVE2:
1020 	case DUAL_STA_ACTIVE3:
1021 		if (nbr == rn->successor.nbr) {
1022 			/* XXX not defined in the spec, do nothing? */
1023 		} else {
1024 			dual_fsm(rn, DUAL_EVT_6);
1025 			rinfo_fill_successor(rn, &sri);
1026 			sri.metric.flags |= F_METRIC_ACTIVE;
1027 			rde_send_reply(nbr, &sri, 0);
1028 			reply_sent = 1;
1029 		}
1030 		break;
1031 	}
1032 
1033 	if ((rn->state & DUAL_STA_ACTIVE_ALL) && TAILQ_EMPTY(&rn->rijk))
1034 		rde_last_reply(rn);
1035 
1036 	if (siaquery && !reply_sent) {
1037 		rinfo_fill_successor(rn, &sri);
1038 		sri.metric.flags |= F_METRIC_ACTIVE;
1039 		rde_send_reply(nbr, &sri, 1);
1040 	}
1041 }
1042 
1043 static void
rde_last_reply(struct rt_node * rn)1044 rde_last_reply(struct rt_node *rn)
1045 {
1046 	struct eigrp		*eigrp = rn->eigrp;
1047 	struct eigrp_route	*successor;
1048 	struct rde_nbr		*old_successor;
1049 	struct rinfo		 ri;
1050 
1051 	old_successor = rn->successor.nbr;
1052 
1053 	switch (rn->state) {
1054 	case DUAL_STA_ACTIVE0:
1055 		successor = rt_get_successor_fc(rn);
1056 		if (successor == NULL) {
1057 			/* feasibility condition is not met */
1058 			rde_send_query_all(eigrp, rn, 1);
1059 			dual_fsm(rn, DUAL_EVT_11);
1060 			break;
1061 		}
1062 
1063 		/* update successor - feasibility condition is met */
1064 		rt_set_successor(rn, successor);
1065 
1066 		/* advertise new successor to neighbors */
1067 		rinfo_fill_successor(rn, &ri);
1068 		rde_send_update_all(rn, &ri);
1069 
1070 		dual_fsm(rn, DUAL_EVT_14);
1071 		break;
1072 	case DUAL_STA_ACTIVE1:
1073 		/* update successor */
1074 		rn->successor.fdistance = EIGRP_INFINITE_METRIC;
1075 		successor = rt_get_successor_fc(rn);
1076 		rt_set_successor(rn, successor);
1077 
1078 		/* advertise new successor to neighbors */
1079 		rinfo_fill_successor(rn, &ri);
1080 		rde_send_update_all(rn, &ri);
1081 
1082 		dual_fsm(rn, DUAL_EVT_15);
1083 		break;
1084 	case DUAL_STA_ACTIVE2:
1085 		successor = rt_get_successor_fc(rn);
1086 		if (successor == NULL) {
1087 			/* feasibility condition is not met */
1088 			rde_send_query_all(eigrp, rn, 1);
1089 			dual_fsm(rn, DUAL_EVT_12);
1090 			break;
1091 		}
1092 
1093 		/* update successor - feasibility condition is met */
1094 		rt_set_successor(rn, successor);
1095 
1096 		/* send a reply to the old successor */
1097 		rinfo_fill_successor(rn, &ri);
1098 		ri.metric.flags |= F_METRIC_ACTIVE;
1099 		if (old_successor)
1100 			rde_send_reply(old_successor, &ri, 0);
1101 
1102 		/* advertise new successor to neighbors */
1103 		rde_send_update_all(rn, &ri);
1104 
1105 		dual_fsm(rn, DUAL_EVT_16);
1106 		break;
1107 	case DUAL_STA_ACTIVE3:
1108 		/* update successor */
1109 		rn->successor.fdistance = EIGRP_INFINITE_METRIC;
1110 		successor = rt_get_successor_fc(rn);
1111 		rt_set_successor(rn, successor);
1112 
1113 		/* send a reply to the old successor */
1114 		rinfo_fill_successor(rn, &ri);
1115 		ri.metric.flags |= F_METRIC_ACTIVE;
1116 		if (old_successor)
1117 			rde_send_reply(old_successor, &ri, 0);
1118 
1119 		/* advertise new successor to neighbors */
1120 		rde_send_update_all(rn, &ri);
1121 
1122 		dual_fsm(rn, DUAL_EVT_13);
1123 		break;
1124 	}
1125 
1126 	if (rn->state == DUAL_STA_PASSIVE && rn->successor.nbr == NULL)
1127 		rt_del(rn);
1128 }
1129 
1130 void
rde_check_reply(struct rde_nbr * nbr,struct rinfo * ri,int siareply)1131 rde_check_reply(struct rde_nbr *nbr, struct rinfo *ri, int siareply)
1132 {
1133 	struct eigrp		*eigrp = nbr->eigrp;
1134 	struct rt_node		*rn;
1135 	struct reply_node       *reply;
1136 	struct eigrp_route	*route;
1137 
1138 	rn = rt_find(eigrp, ri);
1139 	if (rn == NULL)
1140 		return;
1141 
1142 	/* XXX ignore reply when the state is passive? */
1143 	if (rn->state == DUAL_STA_PASSIVE)
1144 		return;
1145 
1146 	reply = reply_outstanding_find(rn, nbr);
1147 	if (reply == NULL)
1148 		return;
1149 
1150 	if (siareply) {
1151 		reply->siareply_recv = 1;
1152 		reply_active_start_timer(reply);
1153 		return;
1154 	}
1155 
1156 	if (ri->metric.delay == EIGRP_INFINITE_METRIC) {
1157 		route = route_find(nbr, rn);
1158 		if (route)
1159 			route_del(rn, route);
1160 	} else {
1161 		route = route_find(nbr, rn);
1162 		if (route == NULL)
1163 			route = route_new(rn, nbr, ri);
1164 		else
1165 			route_update_metrics(eigrp, route, ri);
1166 	}
1167 
1168 	reply_outstanding_remove(reply);
1169 	if (TAILQ_EMPTY(&rn->rijk))
1170 		rde_last_reply(rn);
1171 }
1172 
1173 void
rde_check_link_down_rn(struct rde_nbr * nbr,struct rt_node * rn,struct eigrp_route * route)1174 rde_check_link_down_rn(struct rde_nbr *nbr, struct rt_node *rn,
1175     struct eigrp_route *route)
1176 {
1177 	struct eigrp		*eigrp = nbr->eigrp;
1178 	struct reply_node       *reply;
1179 	struct eigrp_route	*successor;
1180 	uint32_t		 old_fdistance;
1181 	struct rinfo		 ri;
1182 	enum route_type		 type;
1183 
1184 	old_fdistance = rn->successor.fdistance;
1185 
1186 	type = route->type;
1187 	route_del(rn, route);
1188 
1189 	switch (rn->state) {
1190 	case DUAL_STA_PASSIVE:
1191 		successor = rt_get_successor_fc(rn);
1192 
1193 		/*
1194 		 * go active if the successor was affected and no feasible
1195 		 * successor exist.
1196 		 */
1197 		if (successor == NULL) {
1198 			rde_send_query_all(eigrp, rn, 0);
1199 
1200 			dual_fsm(rn, DUAL_EVT_4);
1201 		} else {
1202 			rt_set_successor(rn, successor);
1203 			rt_update_fib(rn);
1204 
1205 			/* send update with new metric if necessary */
1206 			rinfo_fill_successor(rn, &ri);
1207 			if (rn->successor.fdistance != old_fdistance)
1208 				rde_send_update_all(rn, &ri);
1209 		}
1210 		break;
1211 	case DUAL_STA_ACTIVE1:
1212 		if (nbr == rn->successor.nbr)
1213 			dual_fsm(rn, DUAL_EVT_9);
1214 		break;
1215 	case DUAL_STA_ACTIVE3:
1216 		if (nbr == rn->successor.nbr)
1217 			dual_fsm(rn, DUAL_EVT_10);
1218 		break;
1219 	}
1220 
1221 	if (rn->state & DUAL_STA_ACTIVE_ALL) {
1222 		reply = reply_outstanding_find(rn, nbr);
1223 		if (reply) {
1224 			rinfo_fill_infinite(rn, type, &ri);
1225 			rde_check_reply(nbr, &ri, 0);
1226 		}
1227 	}
1228 }
1229 
1230 void
rde_check_link_down_nbr(struct rde_nbr * nbr)1231 rde_check_link_down_nbr(struct rde_nbr *nbr)
1232 {
1233 	struct eigrp		*eigrp = nbr->eigrp;
1234 	struct rt_node		*rn, *safe;
1235 	struct eigrp_route	*route;
1236 
1237 	RB_FOREACH_SAFE(rn, rt_tree, &eigrp->topology, safe) {
1238 		route = route_find(nbr, rn);
1239 		if (route) {
1240 			rde_check_link_down_rn(nbr, rn, route);
1241 			if (rn->successor.nbr == nbr)
1242 				rn->successor.nbr = NULL;
1243 		}
1244 	}
1245 }
1246 
1247 void
rde_check_link_down(unsigned int ifindex)1248 rde_check_link_down(unsigned int ifindex)
1249 {
1250 	struct rde_nbr		*nbr;
1251 
1252 	RB_FOREACH(nbr, rde_nbr_head, &rde_nbrs)
1253 		if (nbr->ei->iface->ifindex == ifindex)
1254 			rde_check_link_down_nbr(nbr);
1255 
1256 	rde_flush_queries();
1257 }
1258 
1259 void
rde_check_link_cost_change(struct rde_nbr * nbr,struct eigrp_iface * ei)1260 rde_check_link_cost_change(struct rde_nbr *nbr, struct eigrp_iface *ei)
1261 {
1262 }
1263 
1264 static __inline int
rde_nbr_compare(struct rde_nbr * a,struct rde_nbr * b)1265 rde_nbr_compare(struct rde_nbr *a, struct rde_nbr *b)
1266 {
1267 	return (a->peerid - b->peerid);
1268 }
1269 
1270 struct rde_nbr *
rde_nbr_find(uint32_t peerid)1271 rde_nbr_find(uint32_t peerid)
1272 {
1273 	struct rde_nbr	n;
1274 
1275 	n.peerid = peerid;
1276 
1277 	return (RB_FIND(rde_nbr_head, &rde_nbrs, &n));
1278 }
1279 
1280 struct rde_nbr *
rde_nbr_new(uint32_t peerid,struct rde_nbr * new)1281 rde_nbr_new(uint32_t peerid, struct rde_nbr *new)
1282 {
1283 	struct rde_nbr		*nbr;
1284 
1285 	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
1286 		fatal("rde_nbr_new");
1287 
1288 	nbr->peerid = peerid;
1289 	nbr->ifaceid = new->ifaceid;
1290 	nbr->addr = new->addr;
1291 	nbr->ei = eigrp_if_lookup_id(nbr->ifaceid);
1292 	if (nbr->ei)
1293 		nbr->eigrp = nbr->ei->eigrp;
1294 	TAILQ_INIT(&nbr->rijk);
1295 	nbr->flags = new->flags;
1296 
1297 	if (nbr->peerid != NBR_IDSELF &&
1298 	    RB_INSERT(rde_nbr_head, &rde_nbrs, nbr) != NULL)
1299 		fatalx("rde_nbr_new: RB_INSERT failed");
1300 
1301 	return (nbr);
1302 }
1303 
1304 void
rde_nbr_del(struct rde_nbr * nbr,int peerterm)1305 rde_nbr_del(struct rde_nbr *nbr, int peerterm)
1306 {
1307 	struct reply_node	*reply;
1308 
1309 	if (peerterm)
1310 		rde_imsg_compose_eigrpe(IMSG_NEIGHBOR_DOWN, nbr->peerid,
1311 		    0, NULL, 0);
1312 
1313 	while((reply = TAILQ_FIRST(&nbr->rijk)) != NULL)
1314 		reply_outstanding_remove(reply);
1315 
1316 	if (nbr->peerid != NBR_IDSELF)
1317 		RB_REMOVE(rde_nbr_head, &rde_nbrs, nbr);
1318 	free(nbr);
1319 }
1320