xref: /dragonfly/sbin/routed/output.c (revision ffe53622)
1 /*
2  * Copyright (c) 1983, 1988, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sbin/routed/output.c,v 1.5.2.1 2000/08/14 17:00:03 sheldonh Exp $
30  */
31 
32 #include "defs.h"
33 
34 #if !defined(__NetBSD__)
35 static char sccsid[] __attribute__((unused)) = "@(#)output.c	8.1 (Berkeley) 6/5/93";
36 #elif defined(__NetBSD__)
37 __RCSID("$NetBSD$");
38 #endif
39 
40 
41 u_int update_seqno;
42 
43 
44 /* walk the tree of routes with this for output
45  */
46 struct {
47 	struct sockaddr_in to;
48 	naddr	to_mask;
49 	naddr	to_net;
50 	naddr	to_std_mask;
51 	naddr	to_std_net;
52 	struct interface *ifp;		/* usually output interface */
53 	struct auth *a;
54 	char	metric;			/* adjust metrics by interface */
55 	int	npackets;
56 	int	gen_limit;
57 	u_int	state;
58 #define	    WS_ST_FLASH	    0x001	/* send only changed routes */
59 #define	    WS_ST_RIP2_ALL  0x002	/* send full featured RIPv2 */
60 #define	    WS_ST_AG	    0x004	/* ok to aggregate subnets */
61 #define	    WS_ST_SUPER_AG  0x008	/* ok to aggregate networks */
62 #define	    WS_ST_QUERY	    0x010	/* responding to a query */
63 #define	    WS_ST_TO_ON_NET 0x020	/* sending onto one of our nets */
64 #define	    WS_ST_DEFAULT   0x040	/* faking a default */
65 } ws;
66 
67 /* A buffer for what can be heard by both RIPv1 and RIPv2 listeners */
68 struct ws_buf v12buf;
69 union pkt_buf ripv12_buf;
70 
71 /* Another for only RIPv2 listeners */
72 struct ws_buf v2buf;
73 union pkt_buf rip_v2_buf;
74 
75 
76 
77 void
78 bufinit(void)
79 {
80 	ripv12_buf.rip.rip_cmd = RIPCMD_RESPONSE;
81 	v12buf.buf = &ripv12_buf.rip;
82 	v12buf.base = &v12buf.buf->rip_nets[0];
83 
84 	rip_v2_buf.rip.rip_cmd = RIPCMD_RESPONSE;
85 	rip_v2_buf.rip.rip_vers = RIPv2;
86 	v2buf.buf = &rip_v2_buf.rip;
87 	v2buf.base = &v2buf.buf->rip_nets[0];
88 }
89 
90 
91 /* Send the contents of the global buffer via the non-multicast socket
92  */
93 int					/* <0 on failure */
94 output(enum output_type type,
95        struct sockaddr_in *dst,		/* send to here */
96        struct interface *ifp,
97        struct rip *buf,
98        int size)			/* this many bytes */
99 {
100 	struct sockaddr_in in;
101 	int flags;
102 	const char *msg;
103 	int res;
104 	naddr tgt_mcast;
105 	int soc;
106 	int serrno;
107 
108 	in = *dst;
109 	if (in.sin_port == 0)
110 		in.sin_port = htons(RIP_PORT);
111 #ifdef _HAVE_SIN_LEN
112 	if (in.sin_len == 0)
113 		in.sin_len = sizeof(in);
114 #endif
115 
116 	soc = rip_sock;
117 	flags = 0;
118 
119 	switch (type) {
120 	case OUT_QUERY:
121 		msg = "Answer Query";
122 		if (soc < 0)
123 			soc = ifp->int_rip_sock;
124 		break;
125 	case OUT_UNICAST:
126 		msg = "Send";
127 		if (soc < 0)
128 			soc = ifp->int_rip_sock;
129 		flags = MSG_DONTROUTE;
130 		break;
131 	case OUT_BROADCAST:
132 		if (ifp->int_if_flags & IFF_POINTOPOINT) {
133 			msg = "Send";
134 		} else {
135 			msg = "Send bcast";
136 		}
137 		flags = MSG_DONTROUTE;
138 		break;
139 	case OUT_MULTICAST:
140 		if (ifp->int_if_flags & IFF_POINTOPOINT) {
141 			msg = "Send pt-to-pt";
142 		} else if (ifp->int_state & IS_DUP) {
143 			trace_act("abort multicast output via %s"
144 				  " with duplicate address",
145 				  ifp->int_name);
146 			return 0;
147 		} else {
148 			msg = "Send mcast";
149 			if (rip_sock_mcast != ifp) {
150 #ifdef MCAST_PPP_BUG
151 				/* Do not specify the primary interface
152 				 * explicitly if we have the multicast
153 				 * point-to-point kernel bug, since the
154 				 * kernel will do the wrong thing if the
155 				 * local address of a point-to-point link
156 				 * is the same as the address of an ordinary
157 				 * interface.
158 				 */
159 				if (ifp->int_addr == myaddr) {
160 					tgt_mcast = 0;
161 				} else
162 #endif
163 				tgt_mcast = ifp->int_addr;
164 				if (0 > setsockopt(rip_sock,
165 						   IPPROTO_IP, IP_MULTICAST_IF,
166 						   &tgt_mcast,
167 						   sizeof(tgt_mcast))) {
168 					serrno = errno;
169 					LOGERR("setsockopt(rip_sock,"
170 					       "IP_MULTICAST_IF)");
171 					errno = serrno;
172 					ifp = NULL;
173 					return -1;
174 				}
175 				rip_sock_mcast = ifp;
176 			}
177 			in.sin_addr.s_addr = htonl(INADDR_RIP_GROUP);
178 		}
179 		break;
180 
181 	case NO_OUT_MULTICAST:
182 	case NO_OUT_RIPV2:
183 	default:
184 #ifdef DEBUG
185 		abort();
186 #endif
187 		return -1;
188 	}
189 
190 	trace_rip(msg, "to", &in, ifp, buf, size);
191 
192 	res = sendto(soc, buf, size, flags,
193 		     (struct sockaddr *)&in, sizeof(in));
194 	if (res < 0
195 	    && (ifp == NULL || !(ifp->int_state & IS_BROKE))) {
196 		serrno = errno;
197 		msglog("%s sendto(%s%s%s.%d): %s", msg,
198 		       ifp != NULL ? ifp->int_name : "",
199 		       ifp != NULL ? ", " : "",
200 		       inet_ntoa(in.sin_addr),
201 		       ntohs(in.sin_port),
202 		       strerror(errno));
203 		errno = serrno;
204 	}
205 
206 	return res;
207 }
208 
209 
210 /* Find the first key for a packet to send.
211  * Try for a key that is eligible and has not expired, but settle for
212  * the last key if they have all expired.
213  * If no key is ready yet, give up.
214  */
215 struct auth *
216 find_auth(struct interface *ifp)
217 {
218 	struct auth *ap, *res;
219 	int i;
220 
221 
222 	if (ifp == NULL)
223 		return 0;
224 
225 	res = NULL;
226 	ap = ifp->int_auth;
227 	for (i = 0; i < MAX_AUTH_KEYS; i++, ap++) {
228 		/* stop looking after the last key */
229 		if (ap->type == RIP_AUTH_NONE)
230 			break;
231 
232 		/* ignore keys that are not ready yet */
233 		if ((u_long)ap->start > (u_long)clk.tv_sec)
234 			continue;
235 
236 		if ((u_long)ap->end < (u_long)clk.tv_sec) {
237 			/* note best expired password as a fall-back */
238 			if (res == NULL || (u_long)ap->end > (u_long)res->end)
239 				res = ap;
240 			continue;
241 		}
242 
243 		/* note key with the best future */
244 		if (res == NULL || (u_long)res->end < (u_long)ap->end)
245 			res = ap;
246 	}
247 	return res;
248 }
249 
250 
251 void
252 clr_ws_buf(struct ws_buf *wb,
253 	   struct auth *ap)
254 {
255 	struct netauth *na;
256 
257 	wb->lim = wb->base + NETS_LEN;
258 	wb->n = wb->base;
259 	memset(wb->n, 0, NETS_LEN*sizeof(*wb->n));
260 
261 	/* (start to) install authentication if appropriate
262 	 */
263 	if (ap == NULL)
264 		return;
265 
266 	na = (struct netauth*)wb->n;
267 	if (ap->type == RIP_AUTH_PW) {
268 		na->a_family = RIP_AF_AUTH;
269 		na->a_type = RIP_AUTH_PW;
270 		memcpy(na->au.au_pw, ap->key, sizeof(na->au.au_pw));
271 		wb->n++;
272 
273 	} else if (ap->type ==  RIP_AUTH_MD5) {
274 		na->a_family = RIP_AF_AUTH;
275 		na->a_type = RIP_AUTH_MD5;
276 		na->au.a_md5.md5_keyid = ap->keyid;
277 		na->au.a_md5.md5_auth_len = RIP_AUTH_MD5_LEN;
278 		na->au.a_md5.md5_seqno = htonl(clk.tv_sec);
279 		wb->n++;
280 		wb->lim--;		/* make room for trailer */
281 	}
282 }
283 
284 
285 void
286 end_md5_auth(struct ws_buf *wb,
287 	     struct auth *ap)
288 {
289 	struct netauth *na, *na2;
290 	MD5_CTX md5_ctx;
291 	int len;
292 
293 
294 	na = (struct netauth*)wb->base;
295 	na2 = (struct netauth*)wb->n;
296 	len = (char *)na2-(char *)wb->buf;
297 	na2->a_family = RIP_AF_AUTH;
298 	na2->a_type = htons(1);
299 	na->au.a_md5.md5_pkt_len = htons(len);
300 	MD5Init(&md5_ctx);
301 	MD5Update(&md5_ctx, (u_char *)wb->buf, len);
302 	MD5Update(&md5_ctx, ap->key, RIP_AUTH_MD5_LEN);
303 	MD5Final(na2->au.au_pw, &md5_ctx);
304 	wb->n++;
305 }
306 
307 
308 /* Send the buffer
309  */
310 static void
311 supply_write(struct ws_buf *wb)
312 {
313 	/* Output multicast only if legal.
314 	 * If we would multicast and it would be illegal, then discard the
315 	 * packet.
316 	 */
317 	switch (wb->type) {
318 	case NO_OUT_MULTICAST:
319 		trace_pkt("skip multicast to %s because impossible",
320 			  naddr_ntoa(ws.to.sin_addr.s_addr));
321 		break;
322 	case NO_OUT_RIPV2:
323 		break;
324 	default:
325 		if (ws.a != NULL && ws.a->type == RIP_AUTH_MD5)
326 			end_md5_auth(wb,ws.a);
327 		if (output(wb->type, &ws.to, ws.ifp, wb->buf,
328 			   ((char *)wb->n - (char*)wb->buf)) < 0
329 		    && ws.ifp != NULL)
330 			if_sick(ws.ifp);
331 		ws.npackets++;
332 		break;
333 	}
334 
335 	clr_ws_buf(wb,ws.a);
336 }
337 
338 
339 /* put an entry into the packet
340  */
341 static void
342 supply_out(struct ag_info *ag)
343 {
344 	int i;
345 	naddr mask, v1_mask, dst_h, ddst_h = 0;
346 	struct ws_buf *wb;
347 
348 
349 	/* Skip this route if doing a flash update and it and the routes
350 	 * it aggregates have not changed recently.
351 	 */
352 	if (ag->ag_seqno < update_seqno
353 	    && (ws.state & WS_ST_FLASH))
354 		return;
355 
356 	dst_h = ag->ag_dst_h;
357 	mask = ag->ag_mask;
358 	v1_mask = ripv1_mask_host(htonl(dst_h),
359 				  (ws.state & WS_ST_TO_ON_NET) ? ws.ifp : 0);
360 	i = 0;
361 
362 	/* If we are sending RIPv2 packets that cannot (or must not) be
363 	 * heard by RIPv1 listeners, do not worry about sub- or supernets.
364 	 * Subnets (from other networks) can only be sent via multicast.
365 	 * A pair of subnet routes might have been promoted so that they
366 	 * are legal to send by RIPv1.
367 	 * If RIPv1 is off, use the multicast buffer.
368 	 */
369 	if ((ws.state & WS_ST_RIP2_ALL)
370 	    || ((ag->ag_state & AGS_RIPV2) && v1_mask != mask)) {
371 		/* use the RIPv2-only buffer */
372 		wb = &v2buf;
373 
374 	} else {
375 		/* use the RIPv1-or-RIPv2 buffer */
376 		wb = &v12buf;
377 
378 		/* Convert supernet route into corresponding set of network
379 		 * routes for RIPv1, but leave non-contiguous netmasks
380 		 * to ag_check().
381 		 */
382 		if (v1_mask > mask
383 		    && mask + (mask & -mask) == 0) {
384 			ddst_h = v1_mask & -v1_mask;
385 			i = (v1_mask & ~mask)/ddst_h;
386 
387 			if (i > ws.gen_limit) {
388 				/* Punt if we would have to generate an
389 				 * unreasonable number of routes.
390 				 */
391 				if (TRACECONTENTS)
392 					trace_misc("sending %s-->%s as 1"
393 						   " instead of %d routes",
394 						   addrname(htonl(dst_h), mask,
395 							1),
396 						   naddr_ntoa(ws.to.sin_addr
397 							.s_addr),
398 						   i+1);
399 				i = 0;
400 
401 			} else {
402 				mask = v1_mask;
403 				ws.gen_limit -= i;
404 			}
405 		}
406 	}
407 
408 	do {
409 		wb->n->n_family = RIP_AF_INET;
410 		wb->n->n_dst = htonl(dst_h);
411 		/* If the route is from router-discovery or we are
412 		 * shutting down, admit only a bad metric.
413 		 */
414 		wb->n->n_metric = ((stopint || ag->ag_metric < 1)
415 				   ? HOPCNT_INFINITY
416 				   : ag->ag_metric);
417 		wb->n->n_metric = htonl(wb->n->n_metric);
418 		/* Any non-zero bits in the supposedly unused RIPv1 fields
419 		 * cause the old `routed` to ignore the route.
420 		 * That means the mask and so forth cannot be sent
421 		 * in the hybrid RIPv1/RIPv2 mode.
422 		 */
423 		if (ws.state & WS_ST_RIP2_ALL) {
424 			if (ag->ag_nhop != 0
425 			    && ((ws.state & WS_ST_QUERY)
426 				|| (ag->ag_nhop != ws.ifp->int_addr
427 				    && on_net(ag->ag_nhop,
428 					      ws.ifp->int_net,
429 					      ws.ifp->int_mask))))
430 				wb->n->n_nhop = ag->ag_nhop;
431 			wb->n->n_mask = htonl(mask);
432 			wb->n->n_tag = ag->ag_tag;
433 		}
434 		dst_h += ddst_h;
435 
436 		if (++wb->n >= wb->lim)
437 			supply_write(wb);
438 	} while (i-- != 0);
439 }
440 
441 
442 /* supply one route from the table
443  */
444 /* ARGSUSED */
445 static int
446 walk_supply(struct radix_node *rn,
447 	    struct walkarg *argp UNUSED)
448 {
449 #define RT ((struct rt_entry *)rn)
450 	u_short ags;
451 	char metric, pref;
452 	naddr dst, nhop;
453 	struct rt_spare *rts;
454 	int i;
455 
456 
457 	/* Do not advertise external remote interfaces or passive interfaces.
458 	 */
459 	if ((RT->rt_state & RS_IF)
460 	    && RT->rt_ifp != 0
461 	    && (RT->rt_ifp->int_state & IS_PASSIVE)
462 	    && !(RT->rt_state & RS_MHOME))
463 		return 0;
464 
465 	/* If being quiet about our ability to forward, then
466 	 * do not say anything unless responding to a query,
467 	 * except about our main interface.
468 	 */
469 	if (!supplier && !(ws.state & WS_ST_QUERY)
470 	    && !(RT->rt_state & RS_MHOME))
471 		return 0;
472 
473 	dst = RT->rt_dst;
474 
475 	/* do not collide with the fake default route */
476 	if (dst == RIP_DEFAULT
477 	    && (ws.state & WS_ST_DEFAULT))
478 		return 0;
479 
480 	if (RT->rt_state & RS_NET_SYN) {
481 		if (RT->rt_state & RS_NET_INT) {
482 			/* Do not send manual synthetic network routes
483 			 * into the subnet.
484 			 */
485 			if (on_net(ws.to.sin_addr.s_addr,
486 				   ntohl(dst), RT->rt_mask))
487 				return 0;
488 
489 		} else {
490 			/* Do not send automatic synthetic network routes
491 			 * if they are not needed because no RIPv1 listeners
492 			 * can hear them.
493 			 */
494 			if (ws.state & WS_ST_RIP2_ALL)
495 				return 0;
496 
497 			/* Do not send automatic synthetic network routes to
498 			 * the real subnet.
499 			 */
500 			if (on_net(ws.to.sin_addr.s_addr,
501 				   ntohl(dst), RT->rt_mask))
502 				return 0;
503 		}
504 		nhop = 0;
505 
506 	} else {
507 		/* Advertise the next hop if this is not a route for one
508 		 * of our interfaces and the next hop is on the same
509 		 * network as the target.
510 		 * The final determination is made by supply_out().
511 		 */
512 		if (!(RT->rt_state & RS_IF)
513 		    && RT->rt_gate != myaddr
514 		    && RT->rt_gate != loopaddr)
515 			nhop = RT->rt_gate;
516 		else
517 			nhop = 0;
518 	}
519 
520 	metric = RT->rt_metric;
521 	ags = 0;
522 
523 	if (RT->rt_state & RS_MHOME) {
524 		/* retain host route of multi-homed servers */
525 		;
526 
527 	} else if (RT_ISHOST(RT)) {
528 		/* We should always suppress (into existing network routes)
529 		 * the host routes for the local end of our point-to-point
530 		 * links.
531 		 * If we are suppressing host routes in general, then do so.
532 		 * Avoid advertising host routes onto their own network,
533 		 * where they should be handled by proxy-ARP.
534 		 */
535 		if ((RT->rt_state & RS_LOCAL)
536 		    || ridhosts
537 		    || on_net(dst, ws.to_net, ws.to_mask))
538 			ags |= AGS_SUPPRESS;
539 
540 		/* Aggregate stray host routes into network routes if allowed.
541 		 * We cannot aggregate host routes into small network routes
542 		 * without confusing RIPv1 listeners into thinking the
543 		 * network routes are host routes.
544 		 */
545 		if ((ws.state & WS_ST_AG)
546 		    && !(ws.state & WS_ST_RIP2_ALL))
547 			ags |= AGS_AGGREGATE;
548 
549 	} else {
550 		/* Always suppress network routes into other, existing
551 		 * network routes
552 		 */
553 		ags |= AGS_SUPPRESS;
554 
555 		/* Generate supernets if allowed.
556 		 * If we can be heard by RIPv1 systems, we will
557 		 * later convert back to ordinary nets.
558 		 * This unifies dealing with received supernets.
559 		 */
560 		if ((ws.state & WS_ST_AG)
561 		    && ((RT->rt_state & RS_SUBNET)
562 			|| (ws.state & WS_ST_SUPER_AG)))
563 			ags |= AGS_AGGREGATE;
564 	}
565 
566 	/* Do not send RIPv1 advertisements of subnets to other
567 	 * networks. If possible, multicast them by RIPv2.
568 	 */
569 	if ((RT->rt_state & RS_SUBNET)
570 	    && !(ws.state & WS_ST_RIP2_ALL)
571 	    && !on_net(dst, ws.to_std_net, ws.to_std_mask))
572 		ags |= AGS_RIPV2 | AGS_AGGREGATE;
573 
574 
575 	/* Do not send a route back to where it came from, except in
576 	 * response to a query.  This is "split-horizon".  That means not
577 	 * advertising back to the same network	and so via the same interface.
578 	 *
579 	 * We want to suppress routes that might have been fragmented
580 	 * from this route by a RIPv1 router and sent back to us, and so we
581 	 * cannot forget this route here.  Let the split-horizon route
582 	 * suppress the fragmented routes and then itself be forgotten.
583 	 *
584 	 * Include the routes for both ends of point-to-point interfaces
585 	 * among those suppressed by split-horizon, since the other side
586 	 * should knows them as well as we do.
587 	 *
588 	 * Notice spare routes with the same metric that we are about to
589 	 * advertise, to split the horizon on redundant, inactive paths.
590 	 */
591 	if (ws.ifp != 0
592 	    && !(ws.state & WS_ST_QUERY)
593 	    && (ws.state & WS_ST_TO_ON_NET)
594 	    && (!(RT->rt_state & RS_IF)
595 		|| ws.ifp->int_if_flags & IFF_POINTOPOINT)) {
596 		for (rts = RT->rt_spares, i = NUM_SPARES; i != 0; i--, rts++) {
597 			if (rts->rts_metric > metric
598 			    || rts->rts_ifp != ws.ifp)
599 				continue;
600 
601 			/* If we do not mark the route with AGS_SPLIT_HZ here,
602 			 * it will be poisoned-reverse, or advertised back
603 			 * toward its source with an infinite metric.
604 			 * If we have recently advertised the route with a
605 			 * better metric than we now have, then we should
606 			 * poison-reverse the route before suppressing it for
607 			 * split-horizon.
608 			 *
609 			 * In almost all cases, if there is no spare for the
610 			 * route then it is either old and dead or a brand
611 			 * new route. If it is brand new, there is no need
612 			 * for poison-reverse. If it is old and dead, it
613 			 * is already poisoned.
614 			 */
615 			if (RT->rt_poison_time < now_expire
616 			    || RT->rt_poison_metric >= metric
617 			    || RT->rt_spares[1].rts_gate == 0) {
618 				ags |= AGS_SPLIT_HZ;
619 				ags &= ~AGS_SUPPRESS;
620 			}
621 			metric = HOPCNT_INFINITY;
622 			break;
623 		}
624 	}
625 
626 	/* Keep track of the best metric with which the
627 	 * route has been advertised recently.
628 	 */
629 	if (RT->rt_poison_metric >= metric
630 	    || RT->rt_poison_time < now_expire) {
631 		RT->rt_poison_time = now.tv_sec;
632 		RT->rt_poison_metric = metric;
633 	}
634 
635 	/* Adjust the outgoing metric by the cost of the link.
636 	 * Avoid aggregation when a route is counting to infinity.
637 	 */
638 	pref = RT->rt_poison_metric + ws.metric;
639 	metric += ws.metric;
640 
641 	/* Do not advertise stable routes that will be ignored,
642 	 * unless we are answering a query.
643 	 * If the route recently was advertised with a metric that
644 	 * would have been less than infinity through this interface,
645 	 * we need to continue to advertise it in order to poison it.
646 	 */
647 	if (metric >= HOPCNT_INFINITY) {
648 		if (!(ws.state & WS_ST_QUERY)
649 		    && (pref >= HOPCNT_INFINITY
650 			|| RT->rt_poison_time < now_garbage))
651 			return 0;
652 
653 		metric = HOPCNT_INFINITY;
654 	}
655 
656 	ag_check(dst, RT->rt_mask, 0, nhop, metric, pref,
657 		 RT->rt_seqno, RT->rt_tag, ags, supply_out);
658 	return 0;
659 #undef RT
660 }
661 
662 
663 /* Supply dst with the contents of the routing tables.
664  * If this won't fit in one packet, chop it up into several.
665  */
666 void
667 supply(struct sockaddr_in *dst,
668        struct interface *ifp,		/* output interface */
669        enum output_type type,
670        int flash,			/* 1=flash update */
671        int vers,			/* RIP version */
672        int passwd_ok)			/* OK to include cleartext password */
673 {
674 	struct rt_entry *rt;
675 	int def_metric;
676 
677 
678 	ws.state = 0;
679 	ws.gen_limit = 1024;
680 
681 	ws.to = *dst;
682 	ws.to_std_mask = std_mask(ws.to.sin_addr.s_addr);
683 	ws.to_std_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_std_mask;
684 
685 	if (ifp != NULL) {
686 		ws.to_mask = ifp->int_mask;
687 		ws.to_net = ifp->int_net;
688 		if (on_net(ws.to.sin_addr.s_addr, ws.to_net, ws.to_mask))
689 			ws.state |= WS_ST_TO_ON_NET;
690 
691 	} else {
692 		ws.to_mask = ripv1_mask_net(ws.to.sin_addr.s_addr, 0);
693 		ws.to_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_mask;
694 		rt = rtfind(dst->sin_addr.s_addr);
695 		if (rt)
696 			ifp = rt->rt_ifp;
697 	}
698 
699 	ws.npackets = 0;
700 	if (flash)
701 		ws.state |= WS_ST_FLASH;
702 
703 	if ((ws.ifp = ifp) == NULL) {
704 		ws.metric = 1;
705 	} else {
706 		/* Adjust the advertised metric by the outgoing interface
707 		 * metric.
708 		 */
709 		ws.metric = ifp->int_metric+1;
710 	}
711 
712 	ripv12_buf.rip.rip_vers = vers;
713 
714 	switch (type) {
715 	case OUT_MULTICAST:
716 		if (ifp->int_if_flags & IFF_MULTICAST)
717 			v2buf.type = OUT_MULTICAST;
718 		else
719 			v2buf.type = NO_OUT_MULTICAST;
720 		v12buf.type = OUT_BROADCAST;
721 		break;
722 
723 	case OUT_QUERY:
724 		ws.state |= WS_ST_QUERY;
725 		/* fall through */
726 	case OUT_BROADCAST:
727 	case OUT_UNICAST:
728 		v2buf.type = (vers == RIPv2) ? type : NO_OUT_RIPV2;
729 		v12buf.type = type;
730 		break;
731 
732 	case NO_OUT_MULTICAST:
733 	case NO_OUT_RIPV2:
734 		break;			/* no output */
735 	}
736 
737 	if (vers == RIPv2) {
738 		/* full RIPv2 only if cannot be heard by RIPv1 listeners */
739 		if (type != OUT_BROADCAST)
740 			ws.state |= WS_ST_RIP2_ALL;
741 		if ((ws.state & WS_ST_QUERY)
742 		    || !(ws.state & WS_ST_TO_ON_NET)) {
743 			ws.state |= (WS_ST_AG | WS_ST_SUPER_AG);
744 		} else if (ifp == NULL || !(ifp->int_state & IS_NO_AG)) {
745 			ws.state |= WS_ST_AG;
746 			if (type != OUT_BROADCAST
747 			    && (ifp == NULL
748 				|| !(ifp->int_state & IS_NO_SUPER_AG)))
749 				ws.state |= WS_ST_SUPER_AG;
750 		}
751 	}
752 
753 	ws.a = (vers == RIPv2) ? find_auth(ifp) : 0;
754 	if (!passwd_ok && ws.a != NULL && ws.a->type == RIP_AUTH_PW)
755 		ws.a = NULL;
756 	clr_ws_buf(&v12buf,ws.a);
757 	clr_ws_buf(&v2buf,ws.a);
758 
759 	/*  Fake a default route if asked and if there is not already
760 	 * a better, real default route.
761 	 */
762 	if (supplier && (def_metric = ifp->int_d_metric) != 0) {
763 		if (NULL == (rt = rtget(RIP_DEFAULT, 0))
764 		    || rt->rt_metric+ws.metric >= def_metric) {
765 			ws.state |= WS_ST_DEFAULT;
766 			ag_check(0, 0, 0, 0, def_metric, def_metric,
767 				 0, 0, 0, supply_out);
768 		} else {
769 			def_metric = rt->rt_metric+ws.metric;
770 		}
771 
772 		/* If both RIPv2 and the poor-man's router discovery
773 		 * kludge are on, arrange to advertise an extra
774 		 * default route via RIPv1.
775 		 */
776 		if ((ws.state & WS_ST_RIP2_ALL)
777 		    && (ifp->int_state & IS_PM_RDISC)) {
778 			ripv12_buf.rip.rip_vers = RIPv1;
779 			v12buf.n->n_family = RIP_AF_INET;
780 			v12buf.n->n_dst = htonl(RIP_DEFAULT);
781 			v12buf.n->n_metric = htonl(def_metric);
782 			v12buf.n++;
783 		}
784 	}
785 
786 	rn_walktree(rhead, walk_supply, 0);
787 	ag_flush(0,0,supply_out);
788 
789 	/* Flush the packet buffers, provided they are not empty and
790 	 * do not contain only the password.
791 	 */
792 	if (v12buf.n != v12buf.base
793 	    && (v12buf.n > v12buf.base+1
794 		|| v12buf.base->n_family != RIP_AF_AUTH))
795 		supply_write(&v12buf);
796 	if (v2buf.n != v2buf.base
797 	    && (v2buf.n > v2buf.base+1
798 		|| v2buf.base->n_family != RIP_AF_AUTH))
799 		supply_write(&v2buf);
800 
801 	/* If we sent nothing and this is an answer to a query, send
802 	 * an empty buffer.
803 	 */
804 	if (ws.npackets == 0
805 	    && (ws.state & WS_ST_QUERY))
806 		supply_write(&v12buf);
807 }
808 
809 
810 /* send all of the routing table or just do a flash update
811  */
812 void
813 rip_bcast(int flash)
814 {
815 #ifdef _HAVE_SIN_LEN
816 	static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}};
817 #else
818 	static struct sockaddr_in dst = {AF_INET};
819 #endif
820 	struct interface *ifp;
821 	enum output_type type;
822 	int vers;
823 	struct timeval rtime;
824 
825 
826 	need_flash = 0;
827 	intvl_random(&rtime, MIN_WAITTIME, MAX_WAITTIME);
828 	no_flash = rtime;
829 	timevaladd(&no_flash, &now);
830 
831 	if (rip_sock < 0)
832 		return;
833 
834 	trace_act("send %s and inhibit dynamic updates for %.3f sec",
835 		  flash ? "dynamic update" : "all routes",
836 		  rtime.tv_sec + ((float)rtime.tv_usec)/1000000.0);
837 
838 	for (ifp = ifnet; ifp != NULL; ifp = ifp->int_next) {
839 		/* Skip interfaces not doing RIP.
840 		 * Do try broken interfaces to see if they have healed.
841 		 */
842 		if (IS_RIP_OUT_OFF(ifp->int_state))
843 			continue;
844 
845 		/* skip turned off interfaces */
846 		if (!iff_up(ifp->int_if_flags))
847 			continue;
848 
849 		vers = (ifp->int_state & IS_NO_RIPV1_OUT) ? RIPv2 : RIPv1;
850 
851 		if (ifp->int_if_flags & IFF_BROADCAST) {
852 			/* ordinary, hardware interface */
853 			dst.sin_addr.s_addr = ifp->int_brdaddr;
854 
855 			if (vers == RIPv2
856 			    && !(ifp->int_state  & IS_NO_RIP_MCAST)) {
857 				type = OUT_MULTICAST;
858 			} else {
859 				type = OUT_BROADCAST;
860 			}
861 
862 		} else if (ifp->int_if_flags & IFF_POINTOPOINT) {
863 			/* point-to-point hardware interface */
864 			dst.sin_addr.s_addr = ifp->int_dstaddr;
865 			type = OUT_UNICAST;
866 
867 		} else if (ifp->int_state & IS_REMOTE) {
868 			/* remote interface */
869 			dst.sin_addr.s_addr = ifp->int_addr;
870 			type = OUT_UNICAST;
871 
872 		} else {
873 			/* ATM, HIPPI, etc. */
874 			continue;
875 		}
876 
877 		supply(&dst, ifp, type, flash, vers, 1);
878 	}
879 
880 	update_seqno++;			/* all routes are up to date */
881 }
882 
883 
884 /* Ask for routes
885  * Do it only once to an interface, and not even after the interface
886  * was broken and recovered.
887  */
888 void
889 rip_query(void)
890 {
891 #ifdef _HAVE_SIN_LEN
892 	static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}};
893 #else
894 	static struct sockaddr_in dst = {AF_INET};
895 #endif
896 	struct interface *ifp;
897 	struct rip buf;
898 	enum output_type type;
899 
900 
901 	if (rip_sock < 0)
902 		return;
903 
904 	memset(&buf, 0, sizeof(buf));
905 
906 	for (ifp = ifnet; ifp; ifp = ifp->int_next) {
907 		/* Skip interfaces those already queried.
908 		 * Do not ask via interfaces through which we don't
909 		 * accept input.  Do not ask via interfaces that cannot
910 		 * send RIP packets.
911 		 * Do try broken interfaces to see if they have healed.
912 		 */
913 		if (IS_RIP_IN_OFF(ifp->int_state)
914 		    || ifp->int_query_time != NEVER)
915 			continue;
916 
917 		/* skip turned off interfaces */
918 		if (!iff_up(ifp->int_if_flags))
919 			continue;
920 
921 		buf.rip_vers = (ifp->int_state&IS_NO_RIPV1_OUT) ? RIPv2:RIPv1;
922 		buf.rip_cmd = RIPCMD_REQUEST;
923 		buf.rip_nets[0].n_family = RIP_AF_UNSPEC;
924 		buf.rip_nets[0].n_metric = htonl(HOPCNT_INFINITY);
925 
926 		/* Send a RIPv1 query only if allowed and if we will
927 		 * listen to RIPv1 routers.
928 		 */
929 		if ((ifp->int_state & IS_NO_RIPV1_OUT)
930 		    || (ifp->int_state & IS_NO_RIPV1_IN)) {
931 			buf.rip_vers = RIPv2;
932 		} else {
933 			buf.rip_vers = RIPv1;
934 		}
935 
936 		if (ifp->int_if_flags & IFF_BROADCAST) {
937 			/* ordinary, hardware interface */
938 			dst.sin_addr.s_addr = ifp->int_brdaddr;
939 
940 			/* Broadcast RIPv1 queries and RIPv2 queries
941 			 * when the hardware cannot multicast.
942 			 */
943 			if (buf.rip_vers == RIPv2
944 			    && (ifp->int_if_flags & IFF_MULTICAST)
945 			    && !(ifp->int_state  & IS_NO_RIP_MCAST)) {
946 				type = OUT_MULTICAST;
947 			} else {
948 				type = OUT_BROADCAST;
949 			}
950 
951 		} else if (ifp->int_if_flags & IFF_POINTOPOINT) {
952 			/* point-to-point hardware interface */
953 			dst.sin_addr.s_addr = ifp->int_dstaddr;
954 			type = OUT_UNICAST;
955 
956 		} else if (ifp->int_state & IS_REMOTE) {
957 			/* remote interface */
958 			dst.sin_addr.s_addr = ifp->int_addr;
959 			type = OUT_UNICAST;
960 
961 		} else {
962 			/* ATM, HIPPI, etc. */
963 			continue;
964 		}
965 
966 		ifp->int_query_time = now.tv_sec+SUPPLY_INTERVAL;
967 		if (output(type, &dst, ifp, &buf, sizeof(buf)) < 0)
968 			if_sick(ifp);
969 	}
970 }
971