xref: /dragonfly/sbin/routed/output.c (revision 0212bfce)
1 /*
2  * Copyright (c) 1983, 1988, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * @(#)output.c	8.1 (Berkeley) 6/5/93
30  * $FreeBSD: src/sbin/routed/output.c,v 1.5.2.1 2000/08/14 17:00:03 sheldonh Exp $
31  */
32 
33 #include "defs.h"
34 
35 u_int update_seqno;
36 
37 
38 /* walk the tree of routes with this for output
39  */
40 struct {
41 	struct sockaddr_in to;
42 	naddr	to_mask;
43 	naddr	to_net;
44 	naddr	to_std_mask;
45 	naddr	to_std_net;
46 	struct interface *ifp;		/* usually output interface */
47 	struct auth *a;
48 	char	metric;			/* adjust metrics by interface */
49 	int	npackets;
50 	int	gen_limit;
51 	u_int	state;
52 #define	    WS_ST_FLASH	    0x001	/* send only changed routes */
53 #define	    WS_ST_RIP2_ALL  0x002	/* send full featured RIPv2 */
54 #define	    WS_ST_AG	    0x004	/* ok to aggregate subnets */
55 #define	    WS_ST_SUPER_AG  0x008	/* ok to aggregate networks */
56 #define	    WS_ST_QUERY	    0x010	/* responding to a query */
57 #define	    WS_ST_TO_ON_NET 0x020	/* sending onto one of our nets */
58 #define	    WS_ST_DEFAULT   0x040	/* faking a default */
59 } ws;
60 
61 /* A buffer for what can be heard by both RIPv1 and RIPv2 listeners */
62 struct ws_buf v12buf;
63 union pkt_buf ripv12_buf;
64 
65 /* Another for only RIPv2 listeners */
66 struct ws_buf v2buf;
67 union pkt_buf rip_v2_buf;
68 
69 
70 
71 void
72 bufinit(void)
73 {
74 	ripv12_buf.rip.rip_cmd = RIPCMD_RESPONSE;
75 	v12buf.buf = &ripv12_buf.rip;
76 	v12buf.base = &v12buf.buf->rip_nets[0];
77 
78 	rip_v2_buf.rip.rip_cmd = RIPCMD_RESPONSE;
79 	rip_v2_buf.rip.rip_vers = RIPv2;
80 	v2buf.buf = &rip_v2_buf.rip;
81 	v2buf.base = &v2buf.buf->rip_nets[0];
82 }
83 
84 
85 /* Send the contents of the global buffer via the non-multicast socket
86  */
87 int					/* <0 on failure */
88 output(enum output_type type,
89        struct sockaddr_in *dst,		/* send to here */
90        struct interface *ifp,
91        struct rip *buf,
92        int size)			/* this many bytes */
93 {
94 	struct sockaddr_in in;
95 	int flags;
96 	const char *msg;
97 	int res;
98 	naddr tgt_mcast;
99 	int soc;
100 	int serrno;
101 
102 	in = *dst;
103 	if (in.sin_port == 0)
104 		in.sin_port = htons(RIP_PORT);
105 #ifdef _HAVE_SIN_LEN
106 	if (in.sin_len == 0)
107 		in.sin_len = sizeof(in);
108 #endif
109 
110 	soc = rip_sock;
111 	flags = 0;
112 
113 	switch (type) {
114 	case OUT_QUERY:
115 		msg = "Answer Query";
116 		if (soc < 0)
117 			soc = ifp->int_rip_sock;
118 		break;
119 	case OUT_UNICAST:
120 		msg = "Send";
121 		if (soc < 0)
122 			soc = ifp->int_rip_sock;
123 		flags = MSG_DONTROUTE;
124 		break;
125 	case OUT_BROADCAST:
126 		if (ifp->int_if_flags & IFF_POINTOPOINT) {
127 			msg = "Send";
128 		} else {
129 			msg = "Send bcast";
130 		}
131 		flags = MSG_DONTROUTE;
132 		break;
133 	case OUT_MULTICAST:
134 		if (ifp->int_if_flags & IFF_POINTOPOINT) {
135 			msg = "Send pt-to-pt";
136 		} else if (ifp->int_state & IS_DUP) {
137 			trace_act("abort multicast output via %s"
138 				  " with duplicate address",
139 				  ifp->int_name);
140 			return 0;
141 		} else {
142 			msg = "Send mcast";
143 			if (rip_sock_mcast != ifp) {
144 #ifdef MCAST_PPP_BUG
145 				/* Do not specify the primary interface
146 				 * explicitly if we have the multicast
147 				 * point-to-point kernel bug, since the
148 				 * kernel will do the wrong thing if the
149 				 * local address of a point-to-point link
150 				 * is the same as the address of an ordinary
151 				 * interface.
152 				 */
153 				if (ifp->int_addr == myaddr) {
154 					tgt_mcast = 0;
155 				} else
156 #endif
157 				tgt_mcast = ifp->int_addr;
158 				if (0 > setsockopt(rip_sock,
159 						   IPPROTO_IP, IP_MULTICAST_IF,
160 						   &tgt_mcast,
161 						   sizeof(tgt_mcast))) {
162 					serrno = errno;
163 					LOGERR("setsockopt(rip_sock,"
164 					       "IP_MULTICAST_IF)");
165 					errno = serrno;
166 					ifp = NULL;
167 					return -1;
168 				}
169 				rip_sock_mcast = ifp;
170 			}
171 			in.sin_addr.s_addr = htonl(INADDR_RIP_GROUP);
172 		}
173 		break;
174 
175 	case NO_OUT_MULTICAST:
176 	case NO_OUT_RIPV2:
177 	default:
178 #ifdef DEBUG
179 		abort();
180 #endif
181 		return -1;
182 	}
183 
184 	trace_rip(msg, "to", &in, ifp, buf, size);
185 
186 	res = sendto(soc, buf, size, flags,
187 		     (struct sockaddr *)&in, sizeof(in));
188 	if (res < 0
189 	    && (ifp == NULL || !(ifp->int_state & IS_BROKE))) {
190 		serrno = errno;
191 		msglog("%s sendto(%s%s%s.%d): %s", msg,
192 		       ifp != NULL ? ifp->int_name : "",
193 		       ifp != NULL ? ", " : "",
194 		       inet_ntoa(in.sin_addr),
195 		       ntohs(in.sin_port),
196 		       strerror(errno));
197 		errno = serrno;
198 	}
199 
200 	return res;
201 }
202 
203 
204 /* Find the first key for a packet to send.
205  * Try for a key that is eligible and has not expired, but settle for
206  * the last key if they have all expired.
207  * If no key is ready yet, give up.
208  */
209 struct auth *
210 find_auth(struct interface *ifp)
211 {
212 	struct auth *ap, *res;
213 	int i;
214 
215 
216 	if (ifp == NULL)
217 		return 0;
218 
219 	res = NULL;
220 	ap = ifp->int_auth;
221 	for (i = 0; i < MAX_AUTH_KEYS; i++, ap++) {
222 		/* stop looking after the last key */
223 		if (ap->type == RIP_AUTH_NONE)
224 			break;
225 
226 		/* ignore keys that are not ready yet */
227 		if ((u_long)ap->start > (u_long)clk.tv_sec)
228 			continue;
229 
230 		if ((u_long)ap->end < (u_long)clk.tv_sec) {
231 			/* note best expired password as a fall-back */
232 			if (res == NULL || (u_long)ap->end > (u_long)res->end)
233 				res = ap;
234 			continue;
235 		}
236 
237 		/* note key with the best future */
238 		if (res == NULL || (u_long)res->end < (u_long)ap->end)
239 			res = ap;
240 	}
241 	return res;
242 }
243 
244 
245 void
246 clr_ws_buf(struct ws_buf *wb,
247 	   struct auth *ap)
248 {
249 	struct netauth *na;
250 
251 	wb->lim = wb->base + NETS_LEN;
252 	wb->n = wb->base;
253 	memset(wb->n, 0, NETS_LEN*sizeof(*wb->n));
254 
255 	/* (start to) install authentication if appropriate
256 	 */
257 	if (ap == NULL)
258 		return;
259 
260 	na = (struct netauth*)wb->n;
261 	if (ap->type == RIP_AUTH_PW) {
262 		na->a_family = RIP_AF_AUTH;
263 		na->a_type = RIP_AUTH_PW;
264 		memcpy(na->au.au_pw, ap->key, sizeof(na->au.au_pw));
265 		wb->n++;
266 
267 	} else if (ap->type ==  RIP_AUTH_MD5) {
268 		na->a_family = RIP_AF_AUTH;
269 		na->a_type = RIP_AUTH_MD5;
270 		na->au.a_md5.md5_keyid = ap->keyid;
271 		na->au.a_md5.md5_auth_len = RIP_AUTH_MD5_LEN;
272 		na->au.a_md5.md5_seqno = htonl(clk.tv_sec);
273 		wb->n++;
274 		wb->lim--;		/* make room for trailer */
275 	}
276 }
277 
278 
279 void
280 end_md5_auth(struct ws_buf *wb,
281 	     struct auth *ap)
282 {
283 	struct netauth *na, *na2;
284 	MD5_CTX md5_ctx;
285 	int len;
286 
287 
288 	na = (struct netauth*)wb->base;
289 	na2 = (struct netauth*)wb->n;
290 	len = (char *)na2-(char *)wb->buf;
291 	na2->a_family = RIP_AF_AUTH;
292 	na2->a_type = htons(1);
293 	na->au.a_md5.md5_pkt_len = htons(len);
294 	MD5_Init(&md5_ctx);
295 	MD5_Update(&md5_ctx, (u_char *)wb->buf, len);
296 	MD5_Update(&md5_ctx, ap->key, RIP_AUTH_MD5_LEN);
297 	MD5_Final(na2->au.au_pw, &md5_ctx);
298 	wb->n++;
299 }
300 
301 
302 /* Send the buffer
303  */
304 static void
305 supply_write(struct ws_buf *wb)
306 {
307 	/* Output multicast only if legal.
308 	 * If we would multicast and it would be illegal, then discard the
309 	 * packet.
310 	 */
311 	switch (wb->type) {
312 	case NO_OUT_MULTICAST:
313 		trace_pkt("skip multicast to %s because impossible",
314 			  naddr_ntoa(ws.to.sin_addr.s_addr));
315 		break;
316 	case NO_OUT_RIPV2:
317 		break;
318 	default:
319 		if (ws.a != NULL && ws.a->type == RIP_AUTH_MD5)
320 			end_md5_auth(wb,ws.a);
321 		if (output(wb->type, &ws.to, ws.ifp, wb->buf,
322 			   ((char *)wb->n - (char*)wb->buf)) < 0
323 		    && ws.ifp != NULL)
324 			if_sick(ws.ifp);
325 		ws.npackets++;
326 		break;
327 	}
328 
329 	clr_ws_buf(wb,ws.a);
330 }
331 
332 
333 /* put an entry into the packet
334  */
335 static void
336 supply_out(struct ag_info *ag)
337 {
338 	int i;
339 	naddr mask, v1_mask, dst_h, ddst_h = 0;
340 	struct ws_buf *wb;
341 
342 
343 	/* Skip this route if doing a flash update and it and the routes
344 	 * it aggregates have not changed recently.
345 	 */
346 	if (ag->ag_seqno < update_seqno
347 	    && (ws.state & WS_ST_FLASH))
348 		return;
349 
350 	dst_h = ag->ag_dst_h;
351 	mask = ag->ag_mask;
352 	v1_mask = ripv1_mask_host(htonl(dst_h),
353 				  (ws.state & WS_ST_TO_ON_NET) ? ws.ifp : 0);
354 	i = 0;
355 
356 	/* If we are sending RIPv2 packets that cannot (or must not) be
357 	 * heard by RIPv1 listeners, do not worry about sub- or supernets.
358 	 * Subnets (from other networks) can only be sent via multicast.
359 	 * A pair of subnet routes might have been promoted so that they
360 	 * are legal to send by RIPv1.
361 	 * If RIPv1 is off, use the multicast buffer.
362 	 */
363 	if ((ws.state & WS_ST_RIP2_ALL)
364 	    || ((ag->ag_state & AGS_RIPV2) && v1_mask != mask)) {
365 		/* use the RIPv2-only buffer */
366 		wb = &v2buf;
367 
368 	} else {
369 		/* use the RIPv1-or-RIPv2 buffer */
370 		wb = &v12buf;
371 
372 		/* Convert supernet route into corresponding set of network
373 		 * routes for RIPv1, but leave non-contiguous netmasks
374 		 * to ag_check().
375 		 */
376 		if (v1_mask > mask
377 		    && mask + (mask & -mask) == 0) {
378 			ddst_h = v1_mask & -v1_mask;
379 			i = (v1_mask & ~mask)/ddst_h;
380 
381 			if (i > ws.gen_limit) {
382 				/* Punt if we would have to generate an
383 				 * unreasonable number of routes.
384 				 */
385 				if (TRACECONTENTS)
386 					trace_misc("sending %s-->%s as 1"
387 						   " instead of %d routes",
388 						   addrname(htonl(dst_h), mask,
389 							1),
390 						   naddr_ntoa(ws.to.sin_addr
391 							.s_addr),
392 						   i+1);
393 				i = 0;
394 
395 			} else {
396 				mask = v1_mask;
397 				ws.gen_limit -= i;
398 			}
399 		}
400 	}
401 
402 	do {
403 		wb->n->n_family = RIP_AF_INET;
404 		wb->n->n_dst = htonl(dst_h);
405 		/* If the route is from router-discovery or we are
406 		 * shutting down, admit only a bad metric.
407 		 */
408 		wb->n->n_metric = ((stopint || ag->ag_metric < 1)
409 				   ? HOPCNT_INFINITY
410 				   : ag->ag_metric);
411 		wb->n->n_metric = htonl(wb->n->n_metric);
412 		/* Any non-zero bits in the supposedly unused RIPv1 fields
413 		 * cause the old `routed` to ignore the route.
414 		 * That means the mask and so forth cannot be sent
415 		 * in the hybrid RIPv1/RIPv2 mode.
416 		 */
417 		if (ws.state & WS_ST_RIP2_ALL) {
418 			if (ag->ag_nhop != 0
419 			    && ((ws.state & WS_ST_QUERY)
420 				|| (ag->ag_nhop != ws.ifp->int_addr
421 				    && on_net(ag->ag_nhop,
422 					      ws.ifp->int_net,
423 					      ws.ifp->int_mask))))
424 				wb->n->n_nhop = ag->ag_nhop;
425 			wb->n->n_mask = htonl(mask);
426 			wb->n->n_tag = ag->ag_tag;
427 		}
428 		dst_h += ddst_h;
429 
430 		if (++wb->n >= wb->lim)
431 			supply_write(wb);
432 	} while (i-- != 0);
433 }
434 
435 
436 /* supply one route from the table
437  */
438 /* ARGSUSED */
439 static int
440 walk_supply(struct radix_node *rn, __unused struct walkarg *argp)
441 {
442 #define RT ((struct rt_entry *)rn)
443 	u_short ags;
444 	char metric, pref;
445 	naddr dst, nhop;
446 	struct rt_spare *rts;
447 	int i;
448 
449 
450 	/* Do not advertise external remote interfaces or passive interfaces.
451 	 */
452 	if ((RT->rt_state & RS_IF)
453 	    && RT->rt_ifp != 0
454 	    && (RT->rt_ifp->int_state & IS_PASSIVE)
455 	    && !(RT->rt_state & RS_MHOME))
456 		return 0;
457 
458 	/* If being quiet about our ability to forward, then
459 	 * do not say anything unless responding to a query,
460 	 * except about our main interface.
461 	 */
462 	if (!supplier && !(ws.state & WS_ST_QUERY)
463 	    && !(RT->rt_state & RS_MHOME))
464 		return 0;
465 
466 	dst = RT->rt_dst;
467 
468 	/* do not collide with the fake default route */
469 	if (dst == RIP_DEFAULT
470 	    && (ws.state & WS_ST_DEFAULT))
471 		return 0;
472 
473 	if (RT->rt_state & RS_NET_SYN) {
474 		if (RT->rt_state & RS_NET_INT) {
475 			/* Do not send manual synthetic network routes
476 			 * into the subnet.
477 			 */
478 			if (on_net(ws.to.sin_addr.s_addr,
479 				   ntohl(dst), RT->rt_mask))
480 				return 0;
481 
482 		} else {
483 			/* Do not send automatic synthetic network routes
484 			 * if they are not needed because no RIPv1 listeners
485 			 * can hear them.
486 			 */
487 			if (ws.state & WS_ST_RIP2_ALL)
488 				return 0;
489 
490 			/* Do not send automatic synthetic network routes to
491 			 * the real subnet.
492 			 */
493 			if (on_net(ws.to.sin_addr.s_addr,
494 				   ntohl(dst), RT->rt_mask))
495 				return 0;
496 		}
497 		nhop = 0;
498 
499 	} else {
500 		/* Advertise the next hop if this is not a route for one
501 		 * of our interfaces and the next hop is on the same
502 		 * network as the target.
503 		 * The final determination is made by supply_out().
504 		 */
505 		if (!(RT->rt_state & RS_IF)
506 		    && RT->rt_gate != myaddr
507 		    && RT->rt_gate != loopaddr)
508 			nhop = RT->rt_gate;
509 		else
510 			nhop = 0;
511 	}
512 
513 	metric = RT->rt_metric;
514 	ags = 0;
515 
516 	if (RT->rt_state & RS_MHOME) {
517 		/* retain host route of multi-homed servers */
518 		;
519 
520 	} else if (RT_ISHOST(RT)) {
521 		/* We should always suppress (into existing network routes)
522 		 * the host routes for the local end of our point-to-point
523 		 * links.
524 		 * If we are suppressing host routes in general, then do so.
525 		 * Avoid advertising host routes onto their own network,
526 		 * where they should be handled by proxy-ARP.
527 		 */
528 		if ((RT->rt_state & RS_LOCAL)
529 		    || ridhosts
530 		    || on_net(dst, ws.to_net, ws.to_mask))
531 			ags |= AGS_SUPPRESS;
532 
533 		/* Aggregate stray host routes into network routes if allowed.
534 		 * We cannot aggregate host routes into small network routes
535 		 * without confusing RIPv1 listeners into thinking the
536 		 * network routes are host routes.
537 		 */
538 		if ((ws.state & WS_ST_AG)
539 		    && !(ws.state & WS_ST_RIP2_ALL))
540 			ags |= AGS_AGGREGATE;
541 
542 	} else {
543 		/* Always suppress network routes into other, existing
544 		 * network routes
545 		 */
546 		ags |= AGS_SUPPRESS;
547 
548 		/* Generate supernets if allowed.
549 		 * If we can be heard by RIPv1 systems, we will
550 		 * later convert back to ordinary nets.
551 		 * This unifies dealing with received supernets.
552 		 */
553 		if ((ws.state & WS_ST_AG)
554 		    && ((RT->rt_state & RS_SUBNET)
555 			|| (ws.state & WS_ST_SUPER_AG)))
556 			ags |= AGS_AGGREGATE;
557 	}
558 
559 	/* Do not send RIPv1 advertisements of subnets to other
560 	 * networks. If possible, multicast them by RIPv2.
561 	 */
562 	if ((RT->rt_state & RS_SUBNET)
563 	    && !(ws.state & WS_ST_RIP2_ALL)
564 	    && !on_net(dst, ws.to_std_net, ws.to_std_mask))
565 		ags |= AGS_RIPV2 | AGS_AGGREGATE;
566 
567 
568 	/* Do not send a route back to where it came from, except in
569 	 * response to a query.  This is "split-horizon".  That means not
570 	 * advertising back to the same network	and so via the same interface.
571 	 *
572 	 * We want to suppress routes that might have been fragmented
573 	 * from this route by a RIPv1 router and sent back to us, and so we
574 	 * cannot forget this route here.  Let the split-horizon route
575 	 * suppress the fragmented routes and then itself be forgotten.
576 	 *
577 	 * Include the routes for both ends of point-to-point interfaces
578 	 * among those suppressed by split-horizon, since the other side
579 	 * should knows them as well as we do.
580 	 *
581 	 * Notice spare routes with the same metric that we are about to
582 	 * advertise, to split the horizon on redundant, inactive paths.
583 	 */
584 	if (ws.ifp != NULL
585 	    && !(ws.state & WS_ST_QUERY)
586 	    && (ws.state & WS_ST_TO_ON_NET)
587 	    && (!(RT->rt_state & RS_IF)
588 		|| ws.ifp->int_if_flags & IFF_POINTOPOINT)) {
589 		for (rts = RT->rt_spares, i = NUM_SPARES; i != 0; i--, rts++) {
590 			if (rts->rts_metric > metric
591 			    || rts->rts_ifp != ws.ifp)
592 				continue;
593 
594 			/* If we do not mark the route with AGS_SPLIT_HZ here,
595 			 * it will be poisoned-reverse, or advertised back
596 			 * toward its source with an infinite metric.
597 			 * If we have recently advertised the route with a
598 			 * better metric than we now have, then we should
599 			 * poison-reverse the route before suppressing it for
600 			 * split-horizon.
601 			 *
602 			 * In almost all cases, if there is no spare for the
603 			 * route then it is either old and dead or a brand
604 			 * new route. If it is brand new, there is no need
605 			 * for poison-reverse. If it is old and dead, it
606 			 * is already poisoned.
607 			 */
608 			if (RT->rt_poison_time < now_expire
609 			    || RT->rt_poison_metric >= metric
610 			    || RT->rt_spares[1].rts_gate == 0) {
611 				ags |= AGS_SPLIT_HZ;
612 				ags &= ~AGS_SUPPRESS;
613 			}
614 			metric = HOPCNT_INFINITY;
615 			break;
616 		}
617 	}
618 
619 	/* Keep track of the best metric with which the
620 	 * route has been advertised recently.
621 	 */
622 	if (RT->rt_poison_metric >= metric
623 	    || RT->rt_poison_time < now_expire) {
624 		RT->rt_poison_time = now.tv_sec;
625 		RT->rt_poison_metric = metric;
626 	}
627 
628 	/* Adjust the outgoing metric by the cost of the link.
629 	 * Avoid aggregation when a route is counting to infinity.
630 	 */
631 	pref = RT->rt_poison_metric + ws.metric;
632 	metric += ws.metric;
633 
634 	/* Do not advertise stable routes that will be ignored,
635 	 * unless we are answering a query.
636 	 * If the route recently was advertised with a metric that
637 	 * would have been less than infinity through this interface,
638 	 * we need to continue to advertise it in order to poison it.
639 	 */
640 	if (metric >= HOPCNT_INFINITY) {
641 		if (!(ws.state & WS_ST_QUERY)
642 		    && (pref >= HOPCNT_INFINITY
643 			|| RT->rt_poison_time < now_garbage))
644 			return 0;
645 
646 		metric = HOPCNT_INFINITY;
647 	}
648 
649 	ag_check(dst, RT->rt_mask, 0, nhop, metric, pref,
650 		 RT->rt_seqno, RT->rt_tag, ags, supply_out);
651 	return 0;
652 #undef RT
653 }
654 
655 
656 /* Supply dst with the contents of the routing tables.
657  * If this won't fit in one packet, chop it up into several.
658  */
659 void
660 supply(struct sockaddr_in *dst,
661        struct interface *ifp,		/* output interface */
662        enum output_type type,
663        int flash,			/* 1=flash update */
664        int vers,			/* RIP version */
665        int passwd_ok)			/* OK to include cleartext password */
666 {
667 	struct rt_entry *rt;
668 	int def_metric;
669 
670 
671 	ws.state = 0;
672 	ws.gen_limit = 1024;
673 
674 	ws.to = *dst;
675 	ws.to_std_mask = std_mask(ws.to.sin_addr.s_addr);
676 	ws.to_std_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_std_mask;
677 
678 	if (ifp != NULL) {
679 		ws.to_mask = ifp->int_mask;
680 		ws.to_net = ifp->int_net;
681 		if (on_net(ws.to.sin_addr.s_addr, ws.to_net, ws.to_mask))
682 			ws.state |= WS_ST_TO_ON_NET;
683 
684 	} else {
685 		ws.to_mask = ripv1_mask_net(ws.to.sin_addr.s_addr, 0);
686 		ws.to_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_mask;
687 		rt = rtfind(dst->sin_addr.s_addr);
688 		if (rt)
689 			ifp = rt->rt_ifp;
690 	}
691 
692 	ws.npackets = 0;
693 	if (flash)
694 		ws.state |= WS_ST_FLASH;
695 
696 	if ((ws.ifp = ifp) == NULL) {
697 		ws.metric = 1;
698 	} else {
699 		/* Adjust the advertised metric by the outgoing interface
700 		 * metric.
701 		 */
702 		ws.metric = ifp->int_metric+1;
703 	}
704 
705 	ripv12_buf.rip.rip_vers = vers;
706 
707 	switch (type) {
708 	case OUT_MULTICAST:
709 		if (ifp->int_if_flags & IFF_MULTICAST)
710 			v2buf.type = OUT_MULTICAST;
711 		else
712 			v2buf.type = NO_OUT_MULTICAST;
713 		v12buf.type = OUT_BROADCAST;
714 		break;
715 
716 	case OUT_QUERY:
717 		ws.state |= WS_ST_QUERY;
718 		/* fall through */
719 	case OUT_BROADCAST:
720 	case OUT_UNICAST:
721 		v2buf.type = (vers == RIPv2) ? type : NO_OUT_RIPV2;
722 		v12buf.type = type;
723 		break;
724 
725 	case NO_OUT_MULTICAST:
726 	case NO_OUT_RIPV2:
727 		break;			/* no output */
728 	}
729 
730 	if (vers == RIPv2) {
731 		/* full RIPv2 only if cannot be heard by RIPv1 listeners */
732 		if (type != OUT_BROADCAST)
733 			ws.state |= WS_ST_RIP2_ALL;
734 		if ((ws.state & WS_ST_QUERY)
735 		    || !(ws.state & WS_ST_TO_ON_NET)) {
736 			ws.state |= (WS_ST_AG | WS_ST_SUPER_AG);
737 		} else if (ifp == NULL || !(ifp->int_state & IS_NO_AG)) {
738 			ws.state |= WS_ST_AG;
739 			if (type != OUT_BROADCAST
740 			    && (ifp == NULL
741 				|| !(ifp->int_state & IS_NO_SUPER_AG)))
742 				ws.state |= WS_ST_SUPER_AG;
743 		}
744 	}
745 
746 	ws.a = (vers == RIPv2) ? find_auth(ifp) : 0;
747 	if (!passwd_ok && ws.a != NULL && ws.a->type == RIP_AUTH_PW)
748 		ws.a = NULL;
749 	clr_ws_buf(&v12buf,ws.a);
750 	clr_ws_buf(&v2buf,ws.a);
751 
752 	/*  Fake a default route if asked and if there is not already
753 	 * a better, real default route.
754 	 */
755 	if (supplier && (def_metric = ifp->int_d_metric) != 0) {
756 		if (NULL == (rt = rtget(RIP_DEFAULT, 0))
757 		    || rt->rt_metric+ws.metric >= def_metric) {
758 			ws.state |= WS_ST_DEFAULT;
759 			ag_check(0, 0, 0, 0, def_metric, def_metric,
760 				 0, 0, 0, supply_out);
761 		} else {
762 			def_metric = rt->rt_metric+ws.metric;
763 		}
764 
765 		/* If both RIPv2 and the poor-man's router discovery
766 		 * kludge are on, arrange to advertise an extra
767 		 * default route via RIPv1.
768 		 */
769 		if ((ws.state & WS_ST_RIP2_ALL)
770 		    && (ifp->int_state & IS_PM_RDISC)) {
771 			ripv12_buf.rip.rip_vers = RIPv1;
772 			v12buf.n->n_family = RIP_AF_INET;
773 			v12buf.n->n_dst = htonl(RIP_DEFAULT);
774 			v12buf.n->n_metric = htonl(def_metric);
775 			v12buf.n++;
776 		}
777 	}
778 
779 	rn_walktree(rhead, walk_supply, 0);
780 	ag_flush(0,0,supply_out);
781 
782 	/* Flush the packet buffers, provided they are not empty and
783 	 * do not contain only the password.
784 	 */
785 	if (v12buf.n != v12buf.base
786 	    && (v12buf.n > v12buf.base+1
787 		|| v12buf.base->n_family != RIP_AF_AUTH))
788 		supply_write(&v12buf);
789 	if (v2buf.n != v2buf.base
790 	    && (v2buf.n > v2buf.base+1
791 		|| v2buf.base->n_family != RIP_AF_AUTH))
792 		supply_write(&v2buf);
793 
794 	/* If we sent nothing and this is an answer to a query, send
795 	 * an empty buffer.
796 	 */
797 	if (ws.npackets == 0
798 	    && (ws.state & WS_ST_QUERY))
799 		supply_write(&v12buf);
800 }
801 
802 
803 /* send all of the routing table or just do a flash update
804  */
805 void
806 rip_bcast(int flash)
807 {
808 #ifdef _HAVE_SIN_LEN
809 	static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}};
810 #else
811 	static struct sockaddr_in dst = {AF_INET};
812 #endif
813 	struct interface *ifp;
814 	enum output_type type;
815 	int vers;
816 	struct timeval rtime;
817 
818 
819 	need_flash = 0;
820 	intvl_random(&rtime, MIN_WAITTIME, MAX_WAITTIME);
821 	no_flash = rtime;
822 	timevaladd(&no_flash, &now);
823 
824 	if (rip_sock < 0)
825 		return;
826 
827 	trace_act("send %s and inhibit dynamic updates for %.3f sec",
828 		  flash ? "dynamic update" : "all routes",
829 		  rtime.tv_sec + ((float)rtime.tv_usec)/1000000.0);
830 
831 	for (ifp = ifnet; ifp != NULL; ifp = ifp->int_next) {
832 		/* Skip interfaces not doing RIP.
833 		 * Do try broken interfaces to see if they have healed.
834 		 */
835 		if (IS_RIP_OUT_OFF(ifp->int_state))
836 			continue;
837 
838 		/* skip turned off interfaces */
839 		if (!iff_up(ifp->int_if_flags))
840 			continue;
841 
842 		vers = (ifp->int_state & IS_NO_RIPV1_OUT) ? RIPv2 : RIPv1;
843 
844 		if (ifp->int_if_flags & IFF_BROADCAST) {
845 			/* ordinary, hardware interface */
846 			dst.sin_addr.s_addr = ifp->int_brdaddr;
847 
848 			if (vers == RIPv2
849 			    && !(ifp->int_state  & IS_NO_RIP_MCAST)) {
850 				type = OUT_MULTICAST;
851 			} else {
852 				type = OUT_BROADCAST;
853 			}
854 
855 		} else if (ifp->int_if_flags & IFF_POINTOPOINT) {
856 			/* point-to-point hardware interface */
857 			dst.sin_addr.s_addr = ifp->int_dstaddr;
858 			type = OUT_UNICAST;
859 
860 		} else if (ifp->int_state & IS_REMOTE) {
861 			/* remote interface */
862 			dst.sin_addr.s_addr = ifp->int_addr;
863 			type = OUT_UNICAST;
864 
865 		} else {
866 			/* ATM, HIPPI, etc. */
867 			continue;
868 		}
869 
870 		supply(&dst, ifp, type, flash, vers, 1);
871 	}
872 
873 	update_seqno++;			/* all routes are up to date */
874 }
875 
876 
877 /* Ask for routes
878  * Do it only once to an interface, and not even after the interface
879  * was broken and recovered.
880  */
881 void
882 rip_query(void)
883 {
884 #ifdef _HAVE_SIN_LEN
885 	static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}};
886 #else
887 	static struct sockaddr_in dst = {AF_INET};
888 #endif
889 	struct interface *ifp;
890 	struct rip buf;
891 	enum output_type type;
892 
893 
894 	if (rip_sock < 0)
895 		return;
896 
897 	memset(&buf, 0, sizeof(buf));
898 
899 	for (ifp = ifnet; ifp; ifp = ifp->int_next) {
900 		/* Skip interfaces those already queried.
901 		 * Do not ask via interfaces through which we don't
902 		 * accept input.  Do not ask via interfaces that cannot
903 		 * send RIP packets.
904 		 * Do try broken interfaces to see if they have healed.
905 		 */
906 		if (IS_RIP_IN_OFF(ifp->int_state)
907 		    || ifp->int_query_time != NEVER)
908 			continue;
909 
910 		/* skip turned off interfaces */
911 		if (!iff_up(ifp->int_if_flags))
912 			continue;
913 
914 		buf.rip_vers = (ifp->int_state&IS_NO_RIPV1_OUT) ? RIPv2:RIPv1;
915 		buf.rip_cmd = RIPCMD_REQUEST;
916 		buf.rip_nets[0].n_family = RIP_AF_UNSPEC;
917 		buf.rip_nets[0].n_metric = htonl(HOPCNT_INFINITY);
918 
919 		/* Send a RIPv1 query only if allowed and if we will
920 		 * listen to RIPv1 routers.
921 		 */
922 		if ((ifp->int_state & IS_NO_RIPV1_OUT)
923 		    || (ifp->int_state & IS_NO_RIPV1_IN)) {
924 			buf.rip_vers = RIPv2;
925 		} else {
926 			buf.rip_vers = RIPv1;
927 		}
928 
929 		if (ifp->int_if_flags & IFF_BROADCAST) {
930 			/* ordinary, hardware interface */
931 			dst.sin_addr.s_addr = ifp->int_brdaddr;
932 
933 			/* Broadcast RIPv1 queries and RIPv2 queries
934 			 * when the hardware cannot multicast.
935 			 */
936 			if (buf.rip_vers == RIPv2
937 			    && (ifp->int_if_flags & IFF_MULTICAST)
938 			    && !(ifp->int_state  & IS_NO_RIP_MCAST)) {
939 				type = OUT_MULTICAST;
940 			} else {
941 				type = OUT_BROADCAST;
942 			}
943 
944 		} else if (ifp->int_if_flags & IFF_POINTOPOINT) {
945 			/* point-to-point hardware interface */
946 			dst.sin_addr.s_addr = ifp->int_dstaddr;
947 			type = OUT_UNICAST;
948 
949 		} else if (ifp->int_state & IS_REMOTE) {
950 			/* remote interface */
951 			dst.sin_addr.s_addr = ifp->int_addr;
952 			type = OUT_UNICAST;
953 
954 		} else {
955 			/* ATM, HIPPI, etc. */
956 			continue;
957 		}
958 
959 		ifp->int_query_time = now.tv_sec+SUPPLY_INTERVAL;
960 		if (output(type, &dst, ifp, &buf, sizeof(buf)) < 0)
961 			if_sick(ifp);
962 	}
963 }
964