xref: /freebsd/sbin/routed/output.c (revision 06c3fb27)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1983, 1988, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include "defs.h"
33 
34 u_int update_seqno;
35 
36 
37 /* walk the tree of routes with this for output
38  */
39 static struct {
40 	struct sockaddr_in to;
41 	naddr	to_mask;
42 	naddr	to_net;
43 	naddr	to_std_mask;
44 	naddr	to_std_net;
45 	struct interface *ifp;		/* usually output interface */
46 	struct auth *a;
47 	char	metric;			/* adjust metrics by interface */
48 	int	npackets;
49 	int	gen_limit;
50 	u_int	state;
51 #define	    WS_ST_FLASH	    0x001	/* send only changed routes */
52 #define	    WS_ST_RIP2_ALL  0x002	/* send full featured RIPv2 */
53 #define	    WS_ST_AG	    0x004	/* ok to aggregate subnets */
54 #define	    WS_ST_SUPER_AG  0x008	/* ok to aggregate networks */
55 #define	    WS_ST_QUERY	    0x010	/* responding to a query */
56 #define	    WS_ST_TO_ON_NET 0x020	/* sending onto one of our nets */
57 #define	    WS_ST_DEFAULT   0x040	/* faking a default */
58 } ws;
59 
60 /* A buffer for what can be heard by both RIPv1 and RIPv2 listeners */
61 struct ws_buf v12buf;
62 static union pkt_buf ripv12_buf;
63 
64 /* Another for only RIPv2 listeners */
65 static struct ws_buf v2buf;
66 static union pkt_buf rip_v2_buf;
67 
68 
69 
70 void
71 bufinit(void)
72 {
73 	ripv12_buf.rip.rip_cmd = RIPCMD_RESPONSE;
74 	v12buf.buf = &ripv12_buf.rip;
75 	v12buf.base = &v12buf.buf->rip_nets[0];
76 
77 	rip_v2_buf.rip.rip_cmd = RIPCMD_RESPONSE;
78 	rip_v2_buf.rip.rip_vers = RIPv2;
79 	v2buf.buf = &rip_v2_buf.rip;
80 	v2buf.base = &v2buf.buf->rip_nets[0];
81 }
82 
83 
84 /* Send the contents of the global buffer via the non-multicast socket
85  */
86 int					/* <0 on failure */
87 output(enum output_type type,
88        struct sockaddr_in *dst,		/* send to here */
89        struct interface *ifp,
90        struct rip *buf,
91        int size)			/* this many bytes */
92 {
93 	struct sockaddr_in osin;
94 	int flags;
95 	const char *msg;
96 	int res;
97 	int soc;
98 	int serrno;
99 
100 	assert(ifp != NULL);
101 	osin = *dst;
102 	if (osin.sin_port == 0)
103 		osin.sin_port = htons(RIP_PORT);
104 #ifdef _HAVE_SIN_LEN
105 	if (osin.sin_len == 0)
106 		osin.sin_len = sizeof(osin);
107 #endif
108 
109 	soc = rip_sock;
110 	flags = 0;
111 
112 	switch (type) {
113 	case OUT_QUERY:
114 		msg = "Answer Query";
115 		if (soc < 0)
116 			soc = ifp->int_rip_sock;
117 		break;
118 	case OUT_UNICAST:
119 		msg = "Send";
120 		if (soc < 0)
121 			soc = ifp->int_rip_sock;
122 		flags = MSG_DONTROUTE;
123 		break;
124 	case OUT_BROADCAST:
125 		if (ifp->int_if_flags & IFF_POINTOPOINT) {
126 			msg = "Send";
127 		} else {
128 			msg = "Send bcast";
129 		}
130 		flags = MSG_DONTROUTE;
131 		break;
132 	case OUT_MULTICAST:
133 		if ((ifp->int_if_flags & (IFF_POINTOPOINT|IFF_MULTICAST)) ==
134 		    IFF_POINTOPOINT) {
135 			msg = "Send pt-to-pt";
136 		} else if (ifp->int_state & IS_DUP) {
137 			trace_act("abort multicast output via %s"
138 				  " with duplicate address",
139 				  ifp->int_name);
140 			return 0;
141 		} else {
142 			msg = "Send mcast";
143 			if (rip_sock_mcast != ifp) {
144 				struct ip_mreqn mreqn;
145 
146 				memset(&mreqn, 0, sizeof(struct ip_mreqn));
147 				mreqn.imr_ifindex = ifp->int_index;
148 				if (0 > setsockopt(rip_sock,
149 						   IPPROTO_IP,
150 						   IP_MULTICAST_IF,
151 						   &mreqn,
152 						   sizeof(mreqn))) {
153 					serrno = errno;
154 					LOGERR("setsockopt(rip_sock, "
155 					       "IP_MULTICAST_IF)");
156 					errno = serrno;
157 					ifp = NULL;
158 					return -1;
159 				}
160 				rip_sock_mcast = ifp;
161 			}
162 			osin.sin_addr.s_addr = htonl(INADDR_RIP_GROUP);
163 		}
164 		break;
165 
166 	case NO_OUT_MULTICAST:
167 	case NO_OUT_RIPV2:
168 	default:
169 #ifdef DEBUG
170 		abort();
171 #endif
172 		return -1;
173 	}
174 
175 	trace_rip(msg, "to", &osin, ifp, buf, size);
176 
177 	res = sendto(soc, buf, size, flags,
178 		     (struct sockaddr *)&osin, sizeof(osin));
179 	if (res < 0
180 	    && (ifp == NULL || !(ifp->int_state & IS_BROKE))) {
181 		serrno = errno;
182 		msglog("%s sendto(%s%s%s.%d): %s", msg,
183 		       ifp != NULL ? ifp->int_name : "",
184 		       ifp != NULL ? ", " : "",
185 		       inet_ntoa(osin.sin_addr),
186 		       ntohs(osin.sin_port),
187 		       strerror(errno));
188 		errno = serrno;
189 	}
190 
191 	return res;
192 }
193 
194 
195 /* Find the first key for a packet to send.
196  * Try for a key that is eligible and has not expired, but settle for
197  * the last key if they have all expired.
198  * If no key is ready yet, give up.
199  */
200 struct auth *
201 find_auth(struct interface *ifp)
202 {
203 	struct auth *ap, *res;
204 	int i;
205 
206 
207 	if (ifp == NULL)
208 		return 0;
209 
210 	res = NULL;
211 	ap = ifp->int_auth;
212 	for (i = 0; i < MAX_AUTH_KEYS; i++, ap++) {
213 		/* stop looking after the last key */
214 		if (ap->type == RIP_AUTH_NONE)
215 			break;
216 
217 		/* ignore keys that are not ready yet */
218 		if ((u_long)ap->start > (u_long)clk.tv_sec)
219 			continue;
220 
221 		if ((u_long)ap->end < (u_long)clk.tv_sec) {
222 			/* note best expired password as a fall-back */
223 			if (res == NULL || (u_long)ap->end > (u_long)res->end)
224 				res = ap;
225 			continue;
226 		}
227 
228 		/* note key with the best future */
229 		if (res == NULL || (u_long)res->end < (u_long)ap->end)
230 			res = ap;
231 	}
232 	return res;
233 }
234 
235 
236 void
237 clr_ws_buf(struct ws_buf *wb,
238 	   struct auth *ap)
239 {
240 	struct netauth *na;
241 
242 	wb->lim = wb->base + NETS_LEN;
243 	wb->n = wb->base;
244 	memset(wb->n, 0, NETS_LEN*sizeof(*wb->n));
245 
246 	/* (start to) install authentication if appropriate
247 	 */
248 	if (ap == NULL)
249 		return;
250 
251 	na = (struct netauth*)wb->n;
252 	if (ap->type == RIP_AUTH_PW) {
253 		na->a_family = RIP_AF_AUTH;
254 		na->a_type = RIP_AUTH_PW;
255 		memcpy(na->au.au_pw, ap->key, sizeof(na->au.au_pw));
256 		wb->n++;
257 
258 	} else if (ap->type ==  RIP_AUTH_MD5) {
259 		na->a_family = RIP_AF_AUTH;
260 		na->a_type = RIP_AUTH_MD5;
261 		na->au.a_md5.md5_keyid = ap->keyid;
262 		na->au.a_md5.md5_auth_len = RIP_AUTH_MD5_KEY_LEN;
263 		na->au.a_md5.md5_seqno = htonl(clk.tv_sec);
264 		wb->n++;
265 		wb->lim--;		/* make room for trailer */
266 	}
267 }
268 
269 
270 void
271 end_md5_auth(struct ws_buf *wb,
272 	     struct auth *ap)
273 {
274 	struct netauth *na, *na2;
275 	MD5_CTX md5_ctx;
276 	int len;
277 
278 
279 	na = (struct netauth*)wb->base;
280 	na2 = (struct netauth*)wb->n;
281 	len = (char *)na2-(char *)wb->buf;
282 	na2->a_family = RIP_AF_AUTH;
283 	na2->a_type = htons(1);
284 	na->au.a_md5.md5_pkt_len = htons(len);
285 	MD5Init(&md5_ctx);
286 	MD5Update(&md5_ctx, (u_char *)wb->buf, len + RIP_AUTH_MD5_HASH_XTRA);
287 	MD5Update(&md5_ctx, ap->key, RIP_AUTH_MD5_KEY_LEN);
288 	MD5Final(na2->au.au_pw, &md5_ctx);
289 	wb->n++;
290 }
291 
292 
293 /* Send the buffer
294  */
295 static void
296 supply_write(struct ws_buf *wb)
297 {
298 	/* Output multicast only if legal.
299 	 * If we would multicast and it would be illegal, then discard the
300 	 * packet.
301 	 */
302 	switch (wb->type) {
303 	case NO_OUT_MULTICAST:
304 		trace_pkt("skip multicast to %s because impossible",
305 			  naddr_ntoa(ws.to.sin_addr.s_addr));
306 		break;
307 	case NO_OUT_RIPV2:
308 		break;
309 	default:
310 		if (ws.a != NULL && ws.a->type == RIP_AUTH_MD5)
311 			end_md5_auth(wb,ws.a);
312 		if (output(wb->type, &ws.to, ws.ifp, wb->buf,
313 			   ((char *)wb->n - (char*)wb->buf)) < 0
314 		    && ws.ifp != NULL)
315 			if_sick(ws.ifp);
316 		ws.npackets++;
317 		break;
318 	}
319 
320 	clr_ws_buf(wb,ws.a);
321 }
322 
323 
324 /* put an entry into the packet
325  */
326 static void
327 supply_out(struct ag_info *ag)
328 {
329 	int i;
330 	naddr mask, v1_mask, dst_h, ddst_h = 0;
331 	struct ws_buf *wb;
332 
333 
334 	/* Skip this route if doing a flash update and it and the routes
335 	 * it aggregates have not changed recently.
336 	 */
337 	if (ag->ag_seqno < update_seqno
338 	    && (ws.state & WS_ST_FLASH))
339 		return;
340 
341 	dst_h = ag->ag_dst_h;
342 	mask = ag->ag_mask;
343 	v1_mask = ripv1_mask_host(htonl(dst_h),
344 				  (ws.state & WS_ST_TO_ON_NET) ? ws.ifp : 0);
345 	i = 0;
346 
347 	/* If we are sending RIPv2 packets that cannot (or must not) be
348 	 * heard by RIPv1 listeners, do not worry about sub- or supernets.
349 	 * Subnets (from other networks) can only be sent via multicast.
350 	 * A pair of subnet routes might have been promoted so that they
351 	 * are legal to send by RIPv1.
352 	 * If RIPv1 is off, use the multicast buffer.
353 	 */
354 	if ((ws.state & WS_ST_RIP2_ALL)
355 	    || ((ag->ag_state & AGS_RIPV2) && v1_mask != mask)) {
356 		/* use the RIPv2-only buffer */
357 		wb = &v2buf;
358 
359 	} else {
360 		/* use the RIPv1-or-RIPv2 buffer */
361 		wb = &v12buf;
362 
363 		/* Convert supernet route into corresponding set of network
364 		 * routes for RIPv1, but leave non-contiguous netmasks
365 		 * to ag_check().
366 		 */
367 		if (v1_mask > mask
368 		    && mask + (mask & -mask) == 0) {
369 			ddst_h = v1_mask & -v1_mask;
370 			i = (v1_mask & ~mask)/ddst_h;
371 
372 			if (i > ws.gen_limit) {
373 				/* Punt if we would have to generate an
374 				 * unreasonable number of routes.
375 				 */
376 				if (TRACECONTENTS)
377 					trace_misc("sending %s-->%s as 1"
378 						   " instead of %d routes",
379 						   addrname(htonl(dst_h), mask,
380 							1),
381 						   naddr_ntoa(ws.to.sin_addr
382 							.s_addr),
383 						   i+1);
384 				i = 0;
385 
386 			} else {
387 				mask = v1_mask;
388 				ws.gen_limit -= i;
389 			}
390 		}
391 	}
392 
393 	do {
394 		wb->n->n_family = RIP_AF_INET;
395 		wb->n->n_dst = htonl(dst_h);
396 		/* If the route is from router-discovery or we are
397 		 * shutting down, admit only a bad metric.
398 		 */
399 		wb->n->n_metric = ((stopint || ag->ag_metric < 1)
400 				   ? HOPCNT_INFINITY
401 				   : ag->ag_metric);
402 		wb->n->n_metric = htonl(wb->n->n_metric);
403 		/* Any non-zero bits in the supposedly unused RIPv1 fields
404 		 * cause the old `routed` to ignore the route.
405 		 * That means the mask and so forth cannot be sent
406 		 * in the hybrid RIPv1/RIPv2 mode.
407 		 */
408 		if (ws.state & WS_ST_RIP2_ALL) {
409 			if (ag->ag_nhop != 0
410 			    && ((ws.state & WS_ST_QUERY)
411 				|| (ag->ag_nhop != ws.ifp->int_addr
412 				    && on_net(ag->ag_nhop,
413 					      ws.ifp->int_net,
414 					      ws.ifp->int_mask))))
415 				wb->n->n_nhop = ag->ag_nhop;
416 			wb->n->n_mask = htonl(mask);
417 			wb->n->n_tag = ag->ag_tag;
418 		}
419 		dst_h += ddst_h;
420 
421 		if (++wb->n >= wb->lim)
422 			supply_write(wb);
423 	} while (i-- != 0);
424 }
425 
426 
427 /* supply one route from the table
428  */
429 /* ARGSUSED */
430 static int
431 walk_supply(struct radix_node *rn,
432 	    struct walkarg *argp UNUSED)
433 {
434 #define RT ((struct rt_entry *)rn)
435 	u_short ags;
436 	char metric, pref;
437 	naddr dst, nhop;
438 	struct rt_spare *rts;
439 	int i;
440 
441 
442 	/* Do not advertise external remote interfaces or passive interfaces.
443 	 */
444 	if ((RT->rt_state & RS_IF)
445 	    && RT->rt_ifp != 0
446 	    && (RT->rt_ifp->int_state & IS_PASSIVE)
447 	    && !(RT->rt_state & RS_MHOME))
448 		return 0;
449 
450 	/* If being quiet about our ability to forward, then
451 	 * do not say anything unless responding to a query,
452 	 * except about our main interface.
453 	 */
454 	if (!supplier && !(ws.state & WS_ST_QUERY)
455 	    && !(RT->rt_state & RS_MHOME))
456 		return 0;
457 
458 	dst = RT->rt_dst;
459 
460 	/* do not collide with the fake default route */
461 	if (dst == RIP_DEFAULT
462 	    && (ws.state & WS_ST_DEFAULT))
463 		return 0;
464 
465 	if (RT->rt_state & RS_NET_SYN) {
466 		if (RT->rt_state & RS_NET_INT) {
467 			/* Do not send manual synthetic network routes
468 			 * into the subnet.
469 			 */
470 			if (on_net(ws.to.sin_addr.s_addr,
471 				   ntohl(dst), RT->rt_mask))
472 				return 0;
473 
474 		} else {
475 			/* Do not send automatic synthetic network routes
476 			 * if they are not needed because no RIPv1 listeners
477 			 * can hear them.
478 			 */
479 			if (ws.state & WS_ST_RIP2_ALL)
480 				return 0;
481 
482 			/* Do not send automatic synthetic network routes to
483 			 * the real subnet.
484 			 */
485 			if (on_net(ws.to.sin_addr.s_addr,
486 				   ntohl(dst), RT->rt_mask))
487 				return 0;
488 		}
489 		nhop = 0;
490 
491 	} else {
492 		/* Advertise the next hop if this is not a route for one
493 		 * of our interfaces and the next hop is on the same
494 		 * network as the target.
495 		 * The final determination is made by supply_out().
496 		 */
497 		if (!(RT->rt_state & RS_IF)
498 		    && RT->rt_gate != myaddr
499 		    && RT->rt_gate != loopaddr)
500 			nhop = RT->rt_gate;
501 		else
502 			nhop = 0;
503 	}
504 
505 	metric = RT->rt_metric;
506 	ags = 0;
507 
508 	if (RT->rt_state & RS_MHOME) {
509 		/* retain host route of multi-homed servers */
510 		;
511 
512 	} else if (RT_ISHOST(RT)) {
513 		/* We should always suppress (into existing network routes)
514 		 * the host routes for the local end of our point-to-point
515 		 * links.
516 		 * If we are suppressing host routes in general, then do so.
517 		 * Avoid advertising host routes onto their own network,
518 		 * where they should be handled by proxy-ARP.
519 		 */
520 		if ((RT->rt_state & RS_LOCAL)
521 		    || ridhosts
522 		    || on_net(dst, ws.to_net, ws.to_mask))
523 			ags |= AGS_SUPPRESS;
524 
525 		/* Aggregate stray host routes into network routes if allowed.
526 		 * We cannot aggregate host routes into small network routes
527 		 * without confusing RIPv1 listeners into thinking the
528 		 * network routes are host routes.
529 		 */
530 		if ((ws.state & WS_ST_AG) && (ws.state & WS_ST_RIP2_ALL))
531 			ags |= AGS_AGGREGATE;
532 
533 	} else {
534 		/* Always suppress network routes into other, existing
535 		 * network routes
536 		 */
537 		ags |= AGS_SUPPRESS;
538 
539 		/* Generate supernets if allowed.
540 		 * If we can be heard by RIPv1 systems, we will
541 		 * later convert back to ordinary nets.
542 		 * This unifies dealing with received supernets.
543 		 */
544 		if ((ws.state & WS_ST_AG)
545 		    && ((RT->rt_state & RS_SUBNET)
546 			|| (ws.state & WS_ST_SUPER_AG)))
547 			ags |= AGS_AGGREGATE;
548 	}
549 
550 	/* Do not send RIPv1 advertisements of subnets to other
551 	 * networks. If possible, multicast them by RIPv2.
552 	 */
553 	if ((RT->rt_state & RS_SUBNET)
554 	    && !(ws.state & WS_ST_RIP2_ALL)
555 	    && !on_net(dst, ws.to_std_net, ws.to_std_mask))
556 		ags |= AGS_RIPV2 | AGS_AGGREGATE;
557 
558 
559 	/* Do not send a route back to where it came from, except in
560 	 * response to a query.  This is "split-horizon".  That means not
561 	 * advertising back to the same network	and so via the same interface.
562 	 *
563 	 * We want to suppress routes that might have been fragmented
564 	 * from this route by a RIPv1 router and sent back to us, and so we
565 	 * cannot forget this route here.  Let the split-horizon route
566 	 * suppress the fragmented routes and then itself be forgotten.
567 	 *
568 	 * Include the routes for both ends of point-to-point interfaces
569 	 * among those suppressed by split-horizon, since the other side
570 	 * should knows them as well as we do.
571 	 *
572 	 * Notice spare routes with the same metric that we are about to
573 	 * advertise, to split the horizon on redundant, inactive paths.
574 	 *
575 	 * Do not suppress advertisements of interface-related addresses on
576 	 * non-point-to-point interfaces.  This ensures that we have something
577 	 * to say every 30 seconds to help detect broken Ethernets or
578 	 * other interfaces where one packet every 30 seconds costs nothing.
579 	 */
580 	if (ws.ifp != NULL
581 	    && !(ws.state & WS_ST_QUERY)
582 	    && (ws.state & WS_ST_TO_ON_NET)
583 	    && (!(RT->rt_state & RS_IF)
584 		|| ws.ifp->int_if_flags & IFF_POINTOPOINT)) {
585 		for (rts = RT->rt_spares, i = NUM_SPARES; i != 0; i--, rts++) {
586 			if (rts->rts_metric > metric
587 			    || rts->rts_ifp != ws.ifp)
588 				continue;
589 
590 			/* If we do not mark the route with AGS_SPLIT_HZ here,
591 			 * it will be poisoned-reverse, or advertised back
592 			 * toward its source with an infinite metric.
593 			 * If we have recently advertised the route with a
594 			 * better metric than we now have, then we should
595 			 * poison-reverse the route before suppressing it for
596 			 * split-horizon.
597 			 *
598 			 * In almost all cases, if there is no spare for the
599 			 * route then it is either old and dead or a brand
600 			 * new route. If it is brand new, there is no need
601 			 * for poison-reverse. If it is old and dead, it
602 			 * is already poisoned.
603 			 */
604 			if (RT->rt_poison_time < now_expire
605 			    || RT->rt_poison_metric >= metric
606 			    || RT->rt_spares[1].rts_gate == 0) {
607 				ags |= AGS_SPLIT_HZ;
608 				ags &= ~AGS_SUPPRESS;
609 			}
610 			metric = HOPCNT_INFINITY;
611 			break;
612 		}
613 	}
614 
615 	/* Keep track of the best metric with which the
616 	 * route has been advertised recently.
617 	 */
618 	if (RT->rt_poison_metric >= metric
619 	    || RT->rt_poison_time < now_expire) {
620 		RT->rt_poison_time = now.tv_sec;
621 		RT->rt_poison_metric = metric;
622 	}
623 
624 	/* Adjust the outgoing metric by the cost of the link.
625 	 * Avoid aggregation when a route is counting to infinity.
626 	 */
627 	pref = RT->rt_poison_metric + ws.metric;
628 	metric += ws.metric;
629 
630 	/* Do not advertise stable routes that will be ignored,
631 	 * unless we are answering a query.
632 	 * If the route recently was advertised with a metric that
633 	 * would have been less than infinity through this interface,
634 	 * we need to continue to advertise it in order to poison it.
635 	 */
636 	if (metric >= HOPCNT_INFINITY) {
637 		if (!(ws.state & WS_ST_QUERY)
638 		    && (pref >= HOPCNT_INFINITY
639 			|| RT->rt_poison_time < now_garbage))
640 			return 0;
641 
642 		metric = HOPCNT_INFINITY;
643 	}
644 
645 	ag_check(dst, RT->rt_mask, 0, nhop, metric, pref,
646 		 RT->rt_seqno, RT->rt_tag, ags, supply_out);
647 	return 0;
648 #undef RT
649 }
650 
651 
652 /* Supply dst with the contents of the routing tables.
653  * If this won't fit in one packet, chop it up into several.
654  */
655 void
656 supply(struct sockaddr_in *dst,
657        struct interface *ifp,		/* output interface */
658        enum output_type type,
659        int flash,			/* 1=flash update */
660        int vers,			/* RIP version */
661        int passwd_ok)			/* OK to include cleartext password */
662 {
663 	struct rt_entry *rt;
664 	int def_metric;
665 
666 	ws.state = 0;
667 	ws.gen_limit = 1024;
668 
669 	ws.to = *dst;
670 	ws.to_std_mask = std_mask(ws.to.sin_addr.s_addr);
671 	ws.to_std_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_std_mask;
672 
673 	if (ifp != NULL) {
674 		ws.to_mask = ifp->int_mask;
675 		ws.to_net = ifp->int_net;
676 		if (on_net(ws.to.sin_addr.s_addr, ws.to_net, ws.to_mask))
677 			ws.state |= WS_ST_TO_ON_NET;
678 
679 	} else {
680 		ws.to_mask = ripv1_mask_net(ws.to.sin_addr.s_addr, 0);
681 		ws.to_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_mask;
682 		rt = rtfind(dst->sin_addr.s_addr);
683 		if (rt)
684 			ifp = rt->rt_ifp;
685 	}
686 
687 	ws.npackets = 0;
688 	if (flash)
689 		ws.state |= WS_ST_FLASH;
690 
691 	if ((ws.ifp = ifp) == NULL) {
692 		ws.metric = 1;
693 	} else {
694 		/* Adjust the advertised metric by the outgoing interface
695 		 * metric.
696 		 */
697 		ws.metric = ifp->int_metric + 1 + ifp->int_adj_outmetric;
698 	}
699 
700 	ripv12_buf.rip.rip_vers = vers;
701 
702 	switch (type) {
703 	case OUT_MULTICAST:
704 		if (ifp != NULL && ifp->int_if_flags & IFF_MULTICAST)
705 			v2buf.type = OUT_MULTICAST;
706 		else
707 			v2buf.type = NO_OUT_MULTICAST;
708 		v12buf.type = OUT_BROADCAST;
709 		break;
710 
711 	case OUT_QUERY:
712 		ws.state |= WS_ST_QUERY;
713 		/* FALLTHROUGH */
714 	case OUT_BROADCAST:
715 	case OUT_UNICAST:
716 		v2buf.type = (vers == RIPv2) ? type : NO_OUT_RIPV2;
717 		v12buf.type = type;
718 		break;
719 
720 	case NO_OUT_MULTICAST:
721 	case NO_OUT_RIPV2:
722 		break;			/* no output */
723 	}
724 
725 	if (vers == RIPv2) {
726 		/* full RIPv2 only if cannot be heard by RIPv1 listeners */
727 		if (type != OUT_BROADCAST)
728 			ws.state |= WS_ST_RIP2_ALL;
729 		if ((ws.state & WS_ST_QUERY)
730 		    || !(ws.state & WS_ST_TO_ON_NET)) {
731 			ws.state |= (WS_ST_AG | WS_ST_SUPER_AG);
732 		} else if (ifp == NULL || !(ifp->int_state & IS_NO_AG)) {
733 			ws.state |= WS_ST_AG;
734 			if (type != OUT_BROADCAST
735 			    && (ifp == NULL
736 				|| !(ifp->int_state & IS_NO_SUPER_AG)))
737 				ws.state |= WS_ST_SUPER_AG;
738 		}
739 	}
740 
741 	ws.a = (vers == RIPv2) ? find_auth(ifp) : 0;
742 	if (!passwd_ok && ws.a != NULL && ws.a->type == RIP_AUTH_PW)
743 		ws.a = NULL;
744 	clr_ws_buf(&v12buf,ws.a);
745 	clr_ws_buf(&v2buf,ws.a);
746 
747 	/*  Fake a default route if asked and if there is not already
748 	 * a better, real default route.
749 	 */
750 	if (supplier && ifp && (def_metric = ifp->int_d_metric) != 0) {
751 		if ((rt = rtget(RIP_DEFAULT, 0)) == NULL
752 		    || rt->rt_metric+ws.metric >= def_metric) {
753 			ws.state |= WS_ST_DEFAULT;
754 			ag_check(0, 0, 0, 0, def_metric, def_metric,
755 				 0, 0, 0, supply_out);
756 		} else {
757 			def_metric = rt->rt_metric+ws.metric;
758 		}
759 
760 		/* If both RIPv2 and the poor-man's router discovery
761 		 * kludge are on, arrange to advertise an extra
762 		 * default route via RIPv1.
763 		 */
764 		if ((ws.state & WS_ST_RIP2_ALL)
765 		    && (ifp->int_state & IS_PM_RDISC)) {
766 			ripv12_buf.rip.rip_vers = RIPv1;
767 			v12buf.n->n_family = RIP_AF_INET;
768 			v12buf.n->n_dst = htonl(RIP_DEFAULT);
769 			v12buf.n->n_metric = htonl(def_metric);
770 			v12buf.n++;
771 		}
772 	}
773 
774 	(void)rn_walktree(rhead, walk_supply, 0);
775 	ag_flush(0,0,supply_out);
776 
777 	/* Flush the packet buffers, provided they are not empty and
778 	 * do not contain only the password.
779 	 */
780 	if (v12buf.n != v12buf.base
781 	    && (v12buf.n > v12buf.base+1
782 		|| v12buf.base->n_family != RIP_AF_AUTH))
783 		supply_write(&v12buf);
784 	if (v2buf.n != v2buf.base
785 	    && (v2buf.n > v2buf.base+1
786 		|| v2buf.base->n_family != RIP_AF_AUTH))
787 		supply_write(&v2buf);
788 
789 	/* If we sent nothing and this is an answer to a query, send
790 	 * an empty buffer.
791 	 */
792 	if (ws.npackets == 0
793 	    && (ws.state & WS_ST_QUERY))
794 		supply_write(&v12buf);
795 }
796 
797 
798 /* send all of the routing table or just do a flash update
799  */
800 void
801 rip_bcast(int flash)
802 {
803 #ifdef _HAVE_SIN_LEN
804 	static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}};
805 #else
806 	static struct sockaddr_in dst = {AF_INET};
807 #endif
808 	struct interface *ifp;
809 	enum output_type type;
810 	int vers;
811 	struct timeval rtime;
812 
813 
814 	need_flash = 0;
815 	intvl_random(&rtime, MIN_WAITTIME, MAX_WAITTIME);
816 	no_flash = rtime;
817 	timevaladd(&no_flash, &now);
818 
819 	if (rip_sock < 0)
820 		return;
821 
822 	trace_act("send %s and inhibit dynamic updates for %.3f sec",
823 		  flash ? "dynamic update" : "all routes",
824 		  rtime.tv_sec + ((float)rtime.tv_usec)/1000000.0);
825 
826 	LIST_FOREACH(ifp, &ifnet, int_list) {
827 		/* Skip interfaces not doing RIP.
828 		 * Do try broken interfaces to see if they have healed.
829 		 */
830 		if (IS_RIP_OUT_OFF(ifp->int_state))
831 			continue;
832 
833 		/* skip turned off interfaces */
834 		if (!iff_up(ifp->int_if_flags))
835 			continue;
836 
837 		vers = (ifp->int_state & IS_NO_RIPV1_OUT) ? RIPv2 : RIPv1;
838 
839 		if (ifp->int_if_flags & IFF_BROADCAST) {
840 			/* ordinary, hardware interface */
841 			dst.sin_addr.s_addr = ifp->int_brdaddr;
842 
843 			if (vers == RIPv2
844 			    && !(ifp->int_state  & IS_NO_RIP_MCAST)) {
845 				type = OUT_MULTICAST;
846 			} else {
847 				type = OUT_BROADCAST;
848 			}
849 
850 		} else if (ifp->int_if_flags & IFF_POINTOPOINT) {
851 			/* point-to-point hardware interface */
852 			dst.sin_addr.s_addr = ifp->int_dstaddr;
853 			if (vers == RIPv2 &&
854 			    ifp->int_if_flags & IFF_MULTICAST &&
855 			    !(ifp->int_state  & IS_NO_RIP_MCAST)) {
856 				type = OUT_MULTICAST;
857 			} else {
858 				type = OUT_UNICAST;
859 			}
860 
861 		} else if (ifp->int_state & IS_REMOTE) {
862 			/* remote interface */
863 			dst.sin_addr.s_addr = ifp->int_addr;
864 			type = OUT_UNICAST;
865 
866 		} else {
867 			/* ATM, HIPPI, etc. */
868 			continue;
869 		}
870 
871 		supply(&dst, ifp, type, flash, vers, 1);
872 	}
873 
874 	update_seqno++;			/* all routes are up to date */
875 }
876 
877 
878 /* Ask for routes
879  * Do it only once to an interface, and not even after the interface
880  * was broken and recovered.
881  */
882 void
883 rip_query(void)
884 {
885 #ifdef _HAVE_SIN_LEN
886 	static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}};
887 #else
888 	static struct sockaddr_in dst = {AF_INET};
889 #endif
890 	struct interface *ifp;
891 	struct rip buf;
892 	enum output_type type;
893 
894 
895 	if (rip_sock < 0)
896 		return;
897 
898 	memset(&buf, 0, sizeof(buf));
899 
900 	LIST_FOREACH(ifp, &ifnet, int_list) {
901 		/* Skip interfaces those already queried.
902 		 * Do not ask via interfaces through which we don't
903 		 * accept input.  Do not ask via interfaces that cannot
904 		 * send RIP packets.
905 		 * Do try broken interfaces to see if they have healed.
906 		 */
907 		if (IS_RIP_IN_OFF(ifp->int_state)
908 		    || ifp->int_query_time != NEVER)
909 			continue;
910 
911 		/* skip turned off interfaces */
912 		if (!iff_up(ifp->int_if_flags))
913 			continue;
914 
915 		buf.rip_vers = (ifp->int_state&IS_NO_RIPV1_OUT) ? RIPv2:RIPv1;
916 		buf.rip_cmd = RIPCMD_REQUEST;
917 		buf.rip_nets[0].n_family = RIP_AF_UNSPEC;
918 		buf.rip_nets[0].n_metric = htonl(HOPCNT_INFINITY);
919 
920 		/* Send a RIPv1 query only if allowed and if we will
921 		 * listen to RIPv1 routers.
922 		 */
923 		if ((ifp->int_state & IS_NO_RIPV1_OUT)
924 		    || (ifp->int_state & IS_NO_RIPV1_IN)) {
925 			buf.rip_vers = RIPv2;
926 		} else {
927 			buf.rip_vers = RIPv1;
928 		}
929 
930 		if (ifp->int_if_flags & IFF_BROADCAST) {
931 			/* ordinary, hardware interface */
932 			dst.sin_addr.s_addr = ifp->int_brdaddr;
933 
934 			/* Broadcast RIPv1 queries and RIPv2 queries
935 			 * when the hardware cannot multicast.
936 			 */
937 			if (buf.rip_vers == RIPv2
938 			    && (ifp->int_if_flags & IFF_MULTICAST)
939 			    && !(ifp->int_state  & IS_NO_RIP_MCAST)) {
940 				type = OUT_MULTICAST;
941 			} else {
942 				type = OUT_BROADCAST;
943 			}
944 
945 		} else if (ifp->int_if_flags & IFF_POINTOPOINT) {
946 			/* point-to-point hardware interface */
947 			dst.sin_addr.s_addr = ifp->int_dstaddr;
948 			type = OUT_UNICAST;
949 
950 		} else if (ifp->int_state & IS_REMOTE) {
951 			/* remote interface */
952 			dst.sin_addr.s_addr = ifp->int_addr;
953 			type = OUT_UNICAST;
954 
955 		} else {
956 			/* ATM, HIPPI, etc. */
957 			continue;
958 		}
959 
960 		ifp->int_query_time = now.tv_sec+SUPPLY_INTERVAL;
961 		if (output(type, &dst, ifp, &buf, sizeof(buf)) < 0)
962 			if_sick(ifp);
963 	}
964 }
965