xref: /dragonfly/sbin/routed/output.c (revision 73e0051e)
1 /*
2  * Copyright (c) 1983, 1988, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgment:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sbin/routed/output.c,v 1.5.2.1 2000/08/14 17:00:03 sheldonh Exp $
34  * $DragonFly: src/sbin/routed/output.c,v 1.5 2005/03/16 21:21:34 cpressey Exp $
35  */
36 
37 #include "defs.h"
38 
39 #if !defined(sgi) && !defined(__NetBSD__)
40 static char sccsid[] __attribute__((unused)) = "@(#)output.c	8.1 (Berkeley) 6/5/93";
41 #elif defined(__NetBSD__)
42 __RCSID("$NetBSD$");
43 #endif
44 
45 
46 u_int update_seqno;
47 
48 
49 /* walk the tree of routes with this for output
50  */
51 struct {
52 	struct sockaddr_in to;
53 	naddr	to_mask;
54 	naddr	to_net;
55 	naddr	to_std_mask;
56 	naddr	to_std_net;
57 	struct interface *ifp;		/* usually output interface */
58 	struct auth *a;
59 	char	metric;			/* adjust metrics by interface */
60 	int	npackets;
61 	int	gen_limit;
62 	u_int	state;
63 #define	    WS_ST_FLASH	    0x001	/* send only changed routes */
64 #define	    WS_ST_RIP2_ALL  0x002	/* send full featured RIPv2 */
65 #define	    WS_ST_AG	    0x004	/* ok to aggregate subnets */
66 #define	    WS_ST_SUPER_AG  0x008	/* ok to aggregate networks */
67 #define	    WS_ST_QUERY	    0x010	/* responding to a query */
68 #define	    WS_ST_TO_ON_NET 0x020	/* sending onto one of our nets */
69 #define	    WS_ST_DEFAULT   0x040	/* faking a default */
70 } ws;
71 
72 /* A buffer for what can be heard by both RIPv1 and RIPv2 listeners */
73 struct ws_buf v12buf;
74 union pkt_buf ripv12_buf;
75 
76 /* Another for only RIPv2 listeners */
77 struct ws_buf v2buf;
78 union pkt_buf rip_v2_buf;
79 
80 
81 
82 void
83 bufinit(void)
84 {
85 	ripv12_buf.rip.rip_cmd = RIPCMD_RESPONSE;
86 	v12buf.buf = &ripv12_buf.rip;
87 	v12buf.base = &v12buf.buf->rip_nets[0];
88 
89 	rip_v2_buf.rip.rip_cmd = RIPCMD_RESPONSE;
90 	rip_v2_buf.rip.rip_vers = RIPv2;
91 	v2buf.buf = &rip_v2_buf.rip;
92 	v2buf.base = &v2buf.buf->rip_nets[0];
93 }
94 
95 
96 /* Send the contents of the global buffer via the non-multicast socket
97  */
98 int					/* <0 on failure */
99 output(enum output_type type,
100        struct sockaddr_in *dst,		/* send to here */
101        struct interface *ifp,
102        struct rip *buf,
103        int size)			/* this many bytes */
104 {
105 	struct sockaddr_in in;
106 	int flags;
107 	const char *msg;
108 	int res;
109 	naddr tgt_mcast;
110 	int soc;
111 	int serrno;
112 
113 	in = *dst;
114 	if (in.sin_port == 0)
115 		in.sin_port = htons(RIP_PORT);
116 #ifdef _HAVE_SIN_LEN
117 	if (in.sin_len == 0)
118 		in.sin_len = sizeof(in);
119 #endif
120 
121 	soc = rip_sock;
122 	flags = 0;
123 
124 	switch (type) {
125 	case OUT_QUERY:
126 		msg = "Answer Query";
127 		if (soc < 0)
128 			soc = ifp->int_rip_sock;
129 		break;
130 	case OUT_UNICAST:
131 		msg = "Send";
132 		if (soc < 0)
133 			soc = ifp->int_rip_sock;
134 		flags = MSG_DONTROUTE;
135 		break;
136 	case OUT_BROADCAST:
137 		if (ifp->int_if_flags & IFF_POINTOPOINT) {
138 			msg = "Send";
139 		} else {
140 			msg = "Send bcast";
141 		}
142 		flags = MSG_DONTROUTE;
143 		break;
144 	case OUT_MULTICAST:
145 		if (ifp->int_if_flags & IFF_POINTOPOINT) {
146 			msg = "Send pt-to-pt";
147 		} else if (ifp->int_state & IS_DUP) {
148 			trace_act("abort multicast output via %s"
149 				  " with duplicate address",
150 				  ifp->int_name);
151 			return 0;
152 		} else {
153 			msg = "Send mcast";
154 			if (rip_sock_mcast != ifp) {
155 #ifdef MCAST_PPP_BUG
156 				/* Do not specify the primary interface
157 				 * explicitly if we have the multicast
158 				 * point-to-point kernel bug, since the
159 				 * kernel will do the wrong thing if the
160 				 * local address of a point-to-point link
161 				 * is the same as the address of an ordinary
162 				 * interface.
163 				 */
164 				if (ifp->int_addr == myaddr) {
165 					tgt_mcast = 0;
166 				} else
167 #endif
168 				tgt_mcast = ifp->int_addr;
169 				if (0 > setsockopt(rip_sock,
170 						   IPPROTO_IP, IP_MULTICAST_IF,
171 						   &tgt_mcast,
172 						   sizeof(tgt_mcast))) {
173 					serrno = errno;
174 					LOGERR("setsockopt(rip_sock,"
175 					       "IP_MULTICAST_IF)");
176 					errno = serrno;
177 					ifp = 0;
178 					return -1;
179 				}
180 				rip_sock_mcast = ifp;
181 			}
182 			in.sin_addr.s_addr = htonl(INADDR_RIP_GROUP);
183 		}
184 		break;
185 
186 	case NO_OUT_MULTICAST:
187 	case NO_OUT_RIPV2:
188 	default:
189 #ifdef DEBUG
190 		abort();
191 #endif
192 		return -1;
193 	}
194 
195 	trace_rip(msg, "to", &in, ifp, buf, size);
196 
197 	res = sendto(soc, buf, size, flags,
198 		     (struct sockaddr *)&in, sizeof(in));
199 	if (res < 0
200 	    && (ifp == 0 || !(ifp->int_state & IS_BROKE))) {
201 		serrno = errno;
202 		msglog("%s sendto(%s%s%s.%d): %s", msg,
203 		       ifp != 0 ? ifp->int_name : "",
204 		       ifp != 0 ? ", " : "",
205 		       inet_ntoa(in.sin_addr),
206 		       ntohs(in.sin_port),
207 		       strerror(errno));
208 		errno = serrno;
209 	}
210 
211 	return res;
212 }
213 
214 
215 /* Find the first key for a packet to send.
216  * Try for a key that is eligible and has not expired, but settle for
217  * the last key if they have all expired.
218  * If no key is ready yet, give up.
219  */
220 struct auth *
221 find_auth(struct interface *ifp)
222 {
223 	struct auth *ap, *res;
224 	int i;
225 
226 
227 	if (ifp == 0)
228 		return 0;
229 
230 	res = 0;
231 	ap = ifp->int_auth;
232 	for (i = 0; i < MAX_AUTH_KEYS; i++, ap++) {
233 		/* stop looking after the last key */
234 		if (ap->type == RIP_AUTH_NONE)
235 			break;
236 
237 		/* ignore keys that are not ready yet */
238 		if ((u_long)ap->start > (u_long)clk.tv_sec)
239 			continue;
240 
241 		if ((u_long)ap->end < (u_long)clk.tv_sec) {
242 			/* note best expired password as a fall-back */
243 			if (res == 0 || (u_long)ap->end > (u_long)res->end)
244 				res = ap;
245 			continue;
246 		}
247 
248 		/* note key with the best future */
249 		if (res == 0 || (u_long)res->end < (u_long)ap->end)
250 			res = ap;
251 	}
252 	return res;
253 }
254 
255 
256 void
257 clr_ws_buf(struct ws_buf *wb,
258 	   struct auth *ap)
259 {
260 	struct netauth *na;
261 
262 	wb->lim = wb->base + NETS_LEN;
263 	wb->n = wb->base;
264 	memset(wb->n, 0, NETS_LEN*sizeof(*wb->n));
265 
266 	/* (start to) install authentication if appropriate
267 	 */
268 	if (ap == 0)
269 		return;
270 
271 	na = (struct netauth*)wb->n;
272 	if (ap->type == RIP_AUTH_PW) {
273 		na->a_family = RIP_AF_AUTH;
274 		na->a_type = RIP_AUTH_PW;
275 		memcpy(na->au.au_pw, ap->key, sizeof(na->au.au_pw));
276 		wb->n++;
277 
278 	} else if (ap->type ==  RIP_AUTH_MD5) {
279 		na->a_family = RIP_AF_AUTH;
280 		na->a_type = RIP_AUTH_MD5;
281 		na->au.a_md5.md5_keyid = ap->keyid;
282 		na->au.a_md5.md5_auth_len = RIP_AUTH_MD5_LEN;
283 		na->au.a_md5.md5_seqno = htonl(clk.tv_sec);
284 		wb->n++;
285 		wb->lim--;		/* make room for trailer */
286 	}
287 }
288 
289 
290 void
291 end_md5_auth(struct ws_buf *wb,
292 	     struct auth *ap)
293 {
294 	struct netauth *na, *na2;
295 	MD5_CTX md5_ctx;
296 	int len;
297 
298 
299 	na = (struct netauth*)wb->base;
300 	na2 = (struct netauth*)wb->n;
301 	len = (char *)na2-(char *)wb->buf;
302 	na2->a_family = RIP_AF_AUTH;
303 	na2->a_type = htons(1);
304 	na->au.a_md5.md5_pkt_len = htons(len);
305 	MD5Init(&md5_ctx);
306 	MD5Update(&md5_ctx, (u_char *)wb->buf, len);
307 	MD5Update(&md5_ctx, ap->key, RIP_AUTH_MD5_LEN);
308 	MD5Final(na2->au.au_pw, &md5_ctx);
309 	wb->n++;
310 }
311 
312 
313 /* Send the buffer
314  */
315 static void
316 supply_write(struct ws_buf *wb)
317 {
318 	/* Output multicast only if legal.
319 	 * If we would multicast and it would be illegal, then discard the
320 	 * packet.
321 	 */
322 	switch (wb->type) {
323 	case NO_OUT_MULTICAST:
324 		trace_pkt("skip multicast to %s because impossible",
325 			  naddr_ntoa(ws.to.sin_addr.s_addr));
326 		break;
327 	case NO_OUT_RIPV2:
328 		break;
329 	default:
330 		if (ws.a != 0 && ws.a->type == RIP_AUTH_MD5)
331 			end_md5_auth(wb,ws.a);
332 		if (output(wb->type, &ws.to, ws.ifp, wb->buf,
333 			   ((char *)wb->n - (char*)wb->buf)) < 0
334 		    && ws.ifp != 0)
335 			if_sick(ws.ifp);
336 		ws.npackets++;
337 		break;
338 	}
339 
340 	clr_ws_buf(wb,ws.a);
341 }
342 
343 
344 /* put an entry into the packet
345  */
346 static void
347 supply_out(struct ag_info *ag)
348 {
349 	int i;
350 	naddr mask, v1_mask, dst_h, ddst_h = 0;
351 	struct ws_buf *wb;
352 
353 
354 	/* Skip this route if doing a flash update and it and the routes
355 	 * it aggregates have not changed recently.
356 	 */
357 	if (ag->ag_seqno < update_seqno
358 	    && (ws.state & WS_ST_FLASH))
359 		return;
360 
361 	dst_h = ag->ag_dst_h;
362 	mask = ag->ag_mask;
363 	v1_mask = ripv1_mask_host(htonl(dst_h),
364 				  (ws.state & WS_ST_TO_ON_NET) ? ws.ifp : 0);
365 	i = 0;
366 
367 	/* If we are sending RIPv2 packets that cannot (or must not) be
368 	 * heard by RIPv1 listeners, do not worry about sub- or supernets.
369 	 * Subnets (from other networks) can only be sent via multicast.
370 	 * A pair of subnet routes might have been promoted so that they
371 	 * are legal to send by RIPv1.
372 	 * If RIPv1 is off, use the multicast buffer.
373 	 */
374 	if ((ws.state & WS_ST_RIP2_ALL)
375 	    || ((ag->ag_state & AGS_RIPV2) && v1_mask != mask)) {
376 		/* use the RIPv2-only buffer */
377 		wb = &v2buf;
378 
379 	} else {
380 		/* use the RIPv1-or-RIPv2 buffer */
381 		wb = &v12buf;
382 
383 		/* Convert supernet route into corresponding set of network
384 		 * routes for RIPv1, but leave non-contiguous netmasks
385 		 * to ag_check().
386 		 */
387 		if (v1_mask > mask
388 		    && mask + (mask & -mask) == 0) {
389 			ddst_h = v1_mask & -v1_mask;
390 			i = (v1_mask & ~mask)/ddst_h;
391 
392 			if (i > ws.gen_limit) {
393 				/* Punt if we would have to generate an
394 				 * unreasonable number of routes.
395 				 */
396 				if (TRACECONTENTS)
397 					trace_misc("sending %s-->%s as 1"
398 						   " instead of %d routes",
399 						   addrname(htonl(dst_h), mask,
400 							1),
401 						   naddr_ntoa(ws.to.sin_addr
402 							.s_addr),
403 						   i+1);
404 				i = 0;
405 
406 			} else {
407 				mask = v1_mask;
408 				ws.gen_limit -= i;
409 			}
410 		}
411 	}
412 
413 	do {
414 		wb->n->n_family = RIP_AF_INET;
415 		wb->n->n_dst = htonl(dst_h);
416 		/* If the route is from router-discovery or we are
417 		 * shutting down, admit only a bad metric.
418 		 */
419 		wb->n->n_metric = ((stopint || ag->ag_metric < 1)
420 				   ? HOPCNT_INFINITY
421 				   : ag->ag_metric);
422 		wb->n->n_metric = htonl(wb->n->n_metric);
423 		/* Any non-zero bits in the supposedly unused RIPv1 fields
424 		 * cause the old `routed` to ignore the route.
425 		 * That means the mask and so forth cannot be sent
426 		 * in the hybrid RIPv1/RIPv2 mode.
427 		 */
428 		if (ws.state & WS_ST_RIP2_ALL) {
429 			if (ag->ag_nhop != 0
430 			    && ((ws.state & WS_ST_QUERY)
431 				|| (ag->ag_nhop != ws.ifp->int_addr
432 				    && on_net(ag->ag_nhop,
433 					      ws.ifp->int_net,
434 					      ws.ifp->int_mask))))
435 				wb->n->n_nhop = ag->ag_nhop;
436 			wb->n->n_mask = htonl(mask);
437 			wb->n->n_tag = ag->ag_tag;
438 		}
439 		dst_h += ddst_h;
440 
441 		if (++wb->n >= wb->lim)
442 			supply_write(wb);
443 	} while (i-- != 0);
444 }
445 
446 
447 /* supply one route from the table
448  */
449 /* ARGSUSED */
450 static int
451 walk_supply(struct radix_node *rn,
452 	    struct walkarg *argp UNUSED)
453 {
454 #define RT ((struct rt_entry *)rn)
455 	u_short ags;
456 	char metric, pref;
457 	naddr dst, nhop;
458 	struct rt_spare *rts;
459 	int i;
460 
461 
462 	/* Do not advertise external remote interfaces or passive interfaces.
463 	 */
464 	if ((RT->rt_state & RS_IF)
465 	    && RT->rt_ifp != 0
466 	    && (RT->rt_ifp->int_state & IS_PASSIVE)
467 	    && !(RT->rt_state & RS_MHOME))
468 		return 0;
469 
470 	/* If being quiet about our ability to forward, then
471 	 * do not say anything unless responding to a query,
472 	 * except about our main interface.
473 	 */
474 	if (!supplier && !(ws.state & WS_ST_QUERY)
475 	    && !(RT->rt_state & RS_MHOME))
476 		return 0;
477 
478 	dst = RT->rt_dst;
479 
480 	/* do not collide with the fake default route */
481 	if (dst == RIP_DEFAULT
482 	    && (ws.state & WS_ST_DEFAULT))
483 		return 0;
484 
485 	if (RT->rt_state & RS_NET_SYN) {
486 		if (RT->rt_state & RS_NET_INT) {
487 			/* Do not send manual synthetic network routes
488 			 * into the subnet.
489 			 */
490 			if (on_net(ws.to.sin_addr.s_addr,
491 				   ntohl(dst), RT->rt_mask))
492 				return 0;
493 
494 		} else {
495 			/* Do not send automatic synthetic network routes
496 			 * if they are not needed because no RIPv1 listeners
497 			 * can hear them.
498 			 */
499 			if (ws.state & WS_ST_RIP2_ALL)
500 				return 0;
501 
502 			/* Do not send automatic synthetic network routes to
503 			 * the real subnet.
504 			 */
505 			if (on_net(ws.to.sin_addr.s_addr,
506 				   ntohl(dst), RT->rt_mask))
507 				return 0;
508 		}
509 		nhop = 0;
510 
511 	} else {
512 		/* Advertise the next hop if this is not a route for one
513 		 * of our interfaces and the next hop is on the same
514 		 * network as the target.
515 		 * The final determination is made by supply_out().
516 		 */
517 		if (!(RT->rt_state & RS_IF)
518 		    && RT->rt_gate != myaddr
519 		    && RT->rt_gate != loopaddr)
520 			nhop = RT->rt_gate;
521 		else
522 			nhop = 0;
523 	}
524 
525 	metric = RT->rt_metric;
526 	ags = 0;
527 
528 	if (RT->rt_state & RS_MHOME) {
529 		/* retain host route of multi-homed servers */
530 		;
531 
532 	} else if (RT_ISHOST(RT)) {
533 		/* We should always suppress (into existing network routes)
534 		 * the host routes for the local end of our point-to-point
535 		 * links.
536 		 * If we are suppressing host routes in general, then do so.
537 		 * Avoid advertising host routes onto their own network,
538 		 * where they should be handled by proxy-ARP.
539 		 */
540 		if ((RT->rt_state & RS_LOCAL)
541 		    || ridhosts
542 		    || on_net(dst, ws.to_net, ws.to_mask))
543 			ags |= AGS_SUPPRESS;
544 
545 		/* Aggregate stray host routes into network routes if allowed.
546 		 * We cannot aggregate host routes into small network routes
547 		 * without confusing RIPv1 listeners into thinking the
548 		 * network routes are host routes.
549 		 */
550 		if ((ws.state & WS_ST_AG)
551 		    && !(ws.state & WS_ST_RIP2_ALL))
552 			ags |= AGS_AGGREGATE;
553 
554 	} else {
555 		/* Always suppress network routes into other, existing
556 		 * network routes
557 		 */
558 		ags |= AGS_SUPPRESS;
559 
560 		/* Generate supernets if allowed.
561 		 * If we can be heard by RIPv1 systems, we will
562 		 * later convert back to ordinary nets.
563 		 * This unifies dealing with received supernets.
564 		 */
565 		if ((ws.state & WS_ST_AG)
566 		    && ((RT->rt_state & RS_SUBNET)
567 			|| (ws.state & WS_ST_SUPER_AG)))
568 			ags |= AGS_AGGREGATE;
569 	}
570 
571 	/* Do not send RIPv1 advertisements of subnets to other
572 	 * networks. If possible, multicast them by RIPv2.
573 	 */
574 	if ((RT->rt_state & RS_SUBNET)
575 	    && !(ws.state & WS_ST_RIP2_ALL)
576 	    && !on_net(dst, ws.to_std_net, ws.to_std_mask))
577 		ags |= AGS_RIPV2 | AGS_AGGREGATE;
578 
579 
580 	/* Do not send a route back to where it came from, except in
581 	 * response to a query.  This is "split-horizon".  That means not
582 	 * advertising back to the same network	and so via the same interface.
583 	 *
584 	 * We want to suppress routes that might have been fragmented
585 	 * from this route by a RIPv1 router and sent back to us, and so we
586 	 * cannot forget this route here.  Let the split-horizon route
587 	 * suppress the fragmented routes and then itself be forgotten.
588 	 *
589 	 * Include the routes for both ends of point-to-point interfaces
590 	 * among those suppressed by split-horizon, since the other side
591 	 * should knows them as well as we do.
592 	 *
593 	 * Notice spare routes with the same metric that we are about to
594 	 * advertise, to split the horizon on redundant, inactive paths.
595 	 */
596 	if (ws.ifp != 0
597 	    && !(ws.state & WS_ST_QUERY)
598 	    && (ws.state & WS_ST_TO_ON_NET)
599 	    && (!(RT->rt_state & RS_IF)
600 		|| ws.ifp->int_if_flags & IFF_POINTOPOINT)) {
601 		for (rts = RT->rt_spares, i = NUM_SPARES; i != 0; i--, rts++) {
602 			if (rts->rts_metric > metric
603 			    || rts->rts_ifp != ws.ifp)
604 				continue;
605 
606 			/* If we do not mark the route with AGS_SPLIT_HZ here,
607 			 * it will be poisoned-reverse, or advertised back
608 			 * toward its source with an infinite metric.
609 			 * If we have recently advertised the route with a
610 			 * better metric than we now have, then we should
611 			 * poison-reverse the route before suppressing it for
612 			 * split-horizon.
613 			 *
614 			 * In almost all cases, if there is no spare for the
615 			 * route then it is either old and dead or a brand
616 			 * new route. If it is brand new, there is no need
617 			 * for poison-reverse. If it is old and dead, it
618 			 * is already poisoned.
619 			 */
620 			if (RT->rt_poison_time < now_expire
621 			    || RT->rt_poison_metric >= metric
622 			    || RT->rt_spares[1].rts_gate == 0) {
623 				ags |= AGS_SPLIT_HZ;
624 				ags &= ~AGS_SUPPRESS;
625 			}
626 			metric = HOPCNT_INFINITY;
627 			break;
628 		}
629 	}
630 
631 	/* Keep track of the best metric with which the
632 	 * route has been advertised recently.
633 	 */
634 	if (RT->rt_poison_metric >= metric
635 	    || RT->rt_poison_time < now_expire) {
636 		RT->rt_poison_time = now.tv_sec;
637 		RT->rt_poison_metric = metric;
638 	}
639 
640 	/* Adjust the outgoing metric by the cost of the link.
641 	 * Avoid aggregation when a route is counting to infinity.
642 	 */
643 	pref = RT->rt_poison_metric + ws.metric;
644 	metric += ws.metric;
645 
646 	/* Do not advertise stable routes that will be ignored,
647 	 * unless we are answering a query.
648 	 * If the route recently was advertised with a metric that
649 	 * would have been less than infinity through this interface,
650 	 * we need to continue to advertise it in order to poison it.
651 	 */
652 	if (metric >= HOPCNT_INFINITY) {
653 		if (!(ws.state & WS_ST_QUERY)
654 		    && (pref >= HOPCNT_INFINITY
655 			|| RT->rt_poison_time < now_garbage))
656 			return 0;
657 
658 		metric = HOPCNT_INFINITY;
659 	}
660 
661 	ag_check(dst, RT->rt_mask, 0, nhop, metric, pref,
662 		 RT->rt_seqno, RT->rt_tag, ags, supply_out);
663 	return 0;
664 #undef RT
665 }
666 
667 
668 /* Supply dst with the contents of the routing tables.
669  * If this won't fit in one packet, chop it up into several.
670  */
671 void
672 supply(struct sockaddr_in *dst,
673        struct interface *ifp,		/* output interface */
674        enum output_type type,
675        int flash,			/* 1=flash update */
676        int vers,			/* RIP version */
677        int passwd_ok)			/* OK to include cleartext password */
678 {
679 	struct rt_entry *rt;
680 	int def_metric;
681 
682 
683 	ws.state = 0;
684 	ws.gen_limit = 1024;
685 
686 	ws.to = *dst;
687 	ws.to_std_mask = std_mask(ws.to.sin_addr.s_addr);
688 	ws.to_std_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_std_mask;
689 
690 	if (ifp != 0) {
691 		ws.to_mask = ifp->int_mask;
692 		ws.to_net = ifp->int_net;
693 		if (on_net(ws.to.sin_addr.s_addr, ws.to_net, ws.to_mask))
694 			ws.state |= WS_ST_TO_ON_NET;
695 
696 	} else {
697 		ws.to_mask = ripv1_mask_net(ws.to.sin_addr.s_addr, 0);
698 		ws.to_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_mask;
699 		rt = rtfind(dst->sin_addr.s_addr);
700 		if (rt)
701 			ifp = rt->rt_ifp;
702 	}
703 
704 	ws.npackets = 0;
705 	if (flash)
706 		ws.state |= WS_ST_FLASH;
707 
708 	if ((ws.ifp = ifp) == 0) {
709 		ws.metric = 1;
710 	} else {
711 		/* Adjust the advertised metric by the outgoing interface
712 		 * metric.
713 		 */
714 		ws.metric = ifp->int_metric+1;
715 	}
716 
717 	ripv12_buf.rip.rip_vers = vers;
718 
719 	switch (type) {
720 	case OUT_MULTICAST:
721 		if (ifp->int_if_flags & IFF_MULTICAST)
722 			v2buf.type = OUT_MULTICAST;
723 		else
724 			v2buf.type = NO_OUT_MULTICAST;
725 		v12buf.type = OUT_BROADCAST;
726 		break;
727 
728 	case OUT_QUERY:
729 		ws.state |= WS_ST_QUERY;
730 		/* fall through */
731 	case OUT_BROADCAST:
732 	case OUT_UNICAST:
733 		v2buf.type = (vers == RIPv2) ? type : NO_OUT_RIPV2;
734 		v12buf.type = type;
735 		break;
736 
737 	case NO_OUT_MULTICAST:
738 	case NO_OUT_RIPV2:
739 		break;			/* no output */
740 	}
741 
742 	if (vers == RIPv2) {
743 		/* full RIPv2 only if cannot be heard by RIPv1 listeners */
744 		if (type != OUT_BROADCAST)
745 			ws.state |= WS_ST_RIP2_ALL;
746 		if ((ws.state & WS_ST_QUERY)
747 		    || !(ws.state & WS_ST_TO_ON_NET)) {
748 			ws.state |= (WS_ST_AG | WS_ST_SUPER_AG);
749 		} else if (ifp == 0 || !(ifp->int_state & IS_NO_AG)) {
750 			ws.state |= WS_ST_AG;
751 			if (type != OUT_BROADCAST
752 			    && (ifp == 0
753 				|| !(ifp->int_state & IS_NO_SUPER_AG)))
754 				ws.state |= WS_ST_SUPER_AG;
755 		}
756 	}
757 
758 	ws.a = (vers == RIPv2) ? find_auth(ifp) : 0;
759 	if (!passwd_ok && ws.a != 0 && ws.a->type == RIP_AUTH_PW)
760 		ws.a = 0;
761 	clr_ws_buf(&v12buf,ws.a);
762 	clr_ws_buf(&v2buf,ws.a);
763 
764 	/*  Fake a default route if asked and if there is not already
765 	 * a better, real default route.
766 	 */
767 	if (supplier && (def_metric = ifp->int_d_metric) != 0) {
768 		if (0 == (rt = rtget(RIP_DEFAULT, 0))
769 		    || rt->rt_metric+ws.metric >= def_metric) {
770 			ws.state |= WS_ST_DEFAULT;
771 			ag_check(0, 0, 0, 0, def_metric, def_metric,
772 				 0, 0, 0, supply_out);
773 		} else {
774 			def_metric = rt->rt_metric+ws.metric;
775 		}
776 
777 		/* If both RIPv2 and the poor-man's router discovery
778 		 * kludge are on, arrange to advertise an extra
779 		 * default route via RIPv1.
780 		 */
781 		if ((ws.state & WS_ST_RIP2_ALL)
782 		    && (ifp->int_state & IS_PM_RDISC)) {
783 			ripv12_buf.rip.rip_vers = RIPv1;
784 			v12buf.n->n_family = RIP_AF_INET;
785 			v12buf.n->n_dst = htonl(RIP_DEFAULT);
786 			v12buf.n->n_metric = htonl(def_metric);
787 			v12buf.n++;
788 		}
789 	}
790 
791 	rn_walktree(rhead, walk_supply, 0);
792 	ag_flush(0,0,supply_out);
793 
794 	/* Flush the packet buffers, provided they are not empty and
795 	 * do not contain only the password.
796 	 */
797 	if (v12buf.n != v12buf.base
798 	    && (v12buf.n > v12buf.base+1
799 		|| v12buf.base->n_family != RIP_AF_AUTH))
800 		supply_write(&v12buf);
801 	if (v2buf.n != v2buf.base
802 	    && (v2buf.n > v2buf.base+1
803 		|| v2buf.base->n_family != RIP_AF_AUTH))
804 		supply_write(&v2buf);
805 
806 	/* If we sent nothing and this is an answer to a query, send
807 	 * an empty buffer.
808 	 */
809 	if (ws.npackets == 0
810 	    && (ws.state & WS_ST_QUERY))
811 		supply_write(&v12buf);
812 }
813 
814 
815 /* send all of the routing table or just do a flash update
816  */
817 void
818 rip_bcast(int flash)
819 {
820 #ifdef _HAVE_SIN_LEN
821 	static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}};
822 #else
823 	static struct sockaddr_in dst = {AF_INET};
824 #endif
825 	struct interface *ifp;
826 	enum output_type type;
827 	int vers;
828 	struct timeval rtime;
829 
830 
831 	need_flash = 0;
832 	intvl_random(&rtime, MIN_WAITTIME, MAX_WAITTIME);
833 	no_flash = rtime;
834 	timevaladd(&no_flash, &now);
835 
836 	if (rip_sock < 0)
837 		return;
838 
839 	trace_act("send %s and inhibit dynamic updates for %.3f sec",
840 		  flash ? "dynamic update" : "all routes",
841 		  rtime.tv_sec + ((float)rtime.tv_usec)/1000000.0);
842 
843 	for (ifp = ifnet; ifp != 0; ifp = ifp->int_next) {
844 		/* Skip interfaces not doing RIP.
845 		 * Do try broken interfaces to see if they have healed.
846 		 */
847 		if (IS_RIP_OUT_OFF(ifp->int_state))
848 			continue;
849 
850 		/* skip turned off interfaces */
851 		if (!iff_up(ifp->int_if_flags))
852 			continue;
853 
854 		vers = (ifp->int_state & IS_NO_RIPV1_OUT) ? RIPv2 : RIPv1;
855 
856 		if (ifp->int_if_flags & IFF_BROADCAST) {
857 			/* ordinary, hardware interface */
858 			dst.sin_addr.s_addr = ifp->int_brdaddr;
859 
860 			if (vers == RIPv2
861 			    && !(ifp->int_state  & IS_NO_RIP_MCAST)) {
862 				type = OUT_MULTICAST;
863 			} else {
864 				type = OUT_BROADCAST;
865 			}
866 
867 		} else if (ifp->int_if_flags & IFF_POINTOPOINT) {
868 			/* point-to-point hardware interface */
869 			dst.sin_addr.s_addr = ifp->int_dstaddr;
870 			type = OUT_UNICAST;
871 
872 		} else if (ifp->int_state & IS_REMOTE) {
873 			/* remote interface */
874 			dst.sin_addr.s_addr = ifp->int_addr;
875 			type = OUT_UNICAST;
876 
877 		} else {
878 			/* ATM, HIPPI, etc. */
879 			continue;
880 		}
881 
882 		supply(&dst, ifp, type, flash, vers, 1);
883 	}
884 
885 	update_seqno++;			/* all routes are up to date */
886 }
887 
888 
889 /* Ask for routes
890  * Do it only once to an interface, and not even after the interface
891  * was broken and recovered.
892  */
893 void
894 rip_query(void)
895 {
896 #ifdef _HAVE_SIN_LEN
897 	static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}};
898 #else
899 	static struct sockaddr_in dst = {AF_INET};
900 #endif
901 	struct interface *ifp;
902 	struct rip buf;
903 	enum output_type type;
904 
905 
906 	if (rip_sock < 0)
907 		return;
908 
909 	memset(&buf, 0, sizeof(buf));
910 
911 	for (ifp = ifnet; ifp; ifp = ifp->int_next) {
912 		/* Skip interfaces those already queried.
913 		 * Do not ask via interfaces through which we don't
914 		 * accept input.  Do not ask via interfaces that cannot
915 		 * send RIP packets.
916 		 * Do try broken interfaces to see if they have healed.
917 		 */
918 		if (IS_RIP_IN_OFF(ifp->int_state)
919 		    || ifp->int_query_time != NEVER)
920 			continue;
921 
922 		/* skip turned off interfaces */
923 		if (!iff_up(ifp->int_if_flags))
924 			continue;
925 
926 		buf.rip_vers = (ifp->int_state&IS_NO_RIPV1_OUT) ? RIPv2:RIPv1;
927 		buf.rip_cmd = RIPCMD_REQUEST;
928 		buf.rip_nets[0].n_family = RIP_AF_UNSPEC;
929 		buf.rip_nets[0].n_metric = htonl(HOPCNT_INFINITY);
930 
931 		/* Send a RIPv1 query only if allowed and if we will
932 		 * listen to RIPv1 routers.
933 		 */
934 		if ((ifp->int_state & IS_NO_RIPV1_OUT)
935 		    || (ifp->int_state & IS_NO_RIPV1_IN)) {
936 			buf.rip_vers = RIPv2;
937 		} else {
938 			buf.rip_vers = RIPv1;
939 		}
940 
941 		if (ifp->int_if_flags & IFF_BROADCAST) {
942 			/* ordinary, hardware interface */
943 			dst.sin_addr.s_addr = ifp->int_brdaddr;
944 
945 			/* Broadcast RIPv1 queries and RIPv2 queries
946 			 * when the hardware cannot multicast.
947 			 */
948 			if (buf.rip_vers == RIPv2
949 			    && (ifp->int_if_flags & IFF_MULTICAST)
950 			    && !(ifp->int_state  & IS_NO_RIP_MCAST)) {
951 				type = OUT_MULTICAST;
952 			} else {
953 				type = OUT_BROADCAST;
954 			}
955 
956 		} else if (ifp->int_if_flags & IFF_POINTOPOINT) {
957 			/* point-to-point hardware interface */
958 			dst.sin_addr.s_addr = ifp->int_dstaddr;
959 			type = OUT_UNICAST;
960 
961 		} else if (ifp->int_state & IS_REMOTE) {
962 			/* remote interface */
963 			dst.sin_addr.s_addr = ifp->int_addr;
964 			type = OUT_UNICAST;
965 
966 		} else {
967 			/* ATM, HIPPI, etc. */
968 			continue;
969 		}
970 
971 		ifp->int_query_time = now.tv_sec+SUPPLY_INTERVAL;
972 		if (output(type, &dst, ifp, &buf, sizeof(buf)) < 0)
973 			if_sick(ifp);
974 	}
975 }
976