1 /* $NetBSD: prune.c,v 1.3 1995/12/10 10:07:09 mycroft Exp $ */
2
3 /*
4 * The mrouted program is covered by the license in the accompanying file
5 * named "LICENSE". Use of the mrouted program represents acceptance of
6 * the terms and conditions listed in that file.
7 *
8 * The mrouted program is COPYRIGHT 1989 by The Board of Trustees of
9 * Leland Stanford Junior University.
10 */
11
12
13 #include "defs.h"
14
15 extern int cache_lifetime;
16 extern int max_prune_lifetime;
17 extern struct rtentry *routing_table;
18
19 extern int phys_vif;
20
21 /*
22 * dither cache lifetime to obtain a value between x and 2*x
23 */
24 #define CACHE_LIFETIME(x) ((x) + (arc4random_uniform(x)))
25
26 struct gtable *kernel_table; /* ptr to list of kernel grp entries*/
27 static struct gtable *kernel_no_route; /* list of grp entries w/o routes */
28 struct gtable *gtp; /* pointer for kernel rt entries */
29 unsigned int kroutes; /* current number of cache entries */
30
31 /****************************************************************************
32 Functions that are local to prune.c
33 ****************************************************************************/
34 static void prun_add_ttls(struct gtable *gt);
35 static int pruning_neighbor(vifi_t vifi, u_int32_t addr);
36 static int can_mtrace(vifi_t vifi, u_int32_t addr);
37 static struct ptable * find_prune_entry(u_int32_t vr, struct ptable *pt);
38 static void expire_prune(vifi_t vifi, struct gtable *gt);
39 static void send_prune(struct gtable *gt);
40 static void send_graft(struct gtable *gt);
41 static void send_graft_ack(u_int32_t src, u_int32_t dst,
42 u_int32_t origin, u_int32_t grp);
43 static void update_kernel(struct gtable *g);
44 static char * scaletime(time_t t);
45
46 /*
47 * Updates the ttl values for each vif.
48 */
49 static void
prun_add_ttls(struct gtable * gt)50 prun_add_ttls(struct gtable *gt)
51 {
52 struct uvif *v;
53 vifi_t vifi;
54
55 for (vifi = 0, v = uvifs; vifi < numvifs; ++vifi, ++v) {
56 if (VIFM_ISSET(vifi, gt->gt_grpmems))
57 gt->gt_ttls[vifi] = v->uv_threshold;
58 else
59 gt->gt_ttls[vifi] = 0;
60 }
61 }
62
63 /*
64 * checks for scoped multicast addresses
65 */
66 #define GET_SCOPE(gt) { \
67 vifi_t _i; \
68 if ((ntohl((gt)->gt_mcastgrp) & 0xff000000) == 0xef000000) \
69 for (_i = 0; _i < numvifs; _i++) \
70 if (scoped_addr(_i, (gt)->gt_mcastgrp)) \
71 VIFM_SET(_i, (gt)->gt_scope); \
72 }
73
74 int
scoped_addr(vifi_t vifi,u_int32_t addr)75 scoped_addr(vifi_t vifi, u_int32_t addr)
76 {
77 struct vif_acl *acl;
78
79 for (acl = uvifs[vifi].uv_acl; acl; acl = acl->acl_next)
80 if ((addr & acl->acl_mask) == acl->acl_addr)
81 return 1;
82
83 return 0;
84 }
85
86 /*
87 * Determine if mcastgrp has a listener on vifi
88 */
89 int
grplst_mem(vifi_t vifi,u_int32_t mcastgrp)90 grplst_mem(vifi_t vifi, u_int32_t mcastgrp)
91 {
92 struct listaddr *g;
93 struct uvif *v;
94
95 v = &uvifs[vifi];
96
97 for (g = v->uv_groups; g != NULL; g = g->al_next)
98 if (mcastgrp == g->al_addr)
99 return 1;
100
101 return 0;
102 }
103
104 /*
105 * Finds the group entry with the specified source and netmask.
106 * If netmask is 0, it uses the route's netmask.
107 *
108 * Returns TRUE if found a match, and the global variable gtp is left
109 * pointing to entry before the found entry.
110 * Returns FALSE if no exact match found, gtp is left pointing to before
111 * the entry in question belongs, or is NULL if the it belongs at the
112 * head of the list.
113 */
114 int
find_src_grp(u_int32_t src,u_int32_t mask,u_int32_t grp)115 find_src_grp(u_int32_t src, u_int32_t mask, u_int32_t grp)
116 {
117 struct gtable *gt;
118
119 gtp = NULL;
120 gt = kernel_table;
121 while (gt != NULL) {
122 if (grp == gt->gt_mcastgrp &&
123 (mask ? (gt->gt_route->rt_origin == src &&
124 gt->gt_route->rt_originmask == mask) :
125 ((src & gt->gt_route->rt_originmask) ==
126 gt->gt_route->rt_origin)))
127 return TRUE;
128 if (ntohl(grp) > ntohl(gt->gt_mcastgrp) ||
129 (grp == gt->gt_mcastgrp &&
130 (ntohl(mask) < ntohl(gt->gt_route->rt_originmask) ||
131 (mask == gt->gt_route->rt_originmask &&
132 (ntohl(src) > ntohl(gt->gt_route->rt_origin)))))) {
133 gtp = gt;
134 gt = gt->gt_gnext;
135 }
136 else break;
137 }
138 return FALSE;
139 }
140
141 /*
142 * Check if the neighbor supports pruning
143 */
144 static int
pruning_neighbor(vifi_t vifi,u_int32_t addr)145 pruning_neighbor(vifi_t vifi, u_int32_t addr)
146 {
147 struct listaddr *n = neighbor_info(vifi, addr);
148 int vers;
149
150 if (n == NULL)
151 return 0;
152
153 if (n->al_flags & NF_PRUNE)
154 return 1;
155
156 /*
157 * Versions from 3.0 to 3.4 relied on the version number to identify
158 * that they could handle pruning.
159 */
160 vers = NBR_VERS(n);
161 return (vers >= 0x0300 && vers <= 0x0304);
162 }
163
164 /*
165 * Can the neighbor in question handle multicast traceroute?
166 */
167 static int
can_mtrace(vifi_t vifi,u_int32_t addr)168 can_mtrace(vifi_t vifi, u_int32_t addr)
169 {
170 struct listaddr *n = neighbor_info(vifi, addr);
171 int vers;
172
173 if (n == NULL)
174 return 0;
175
176 if (n->al_flags & NF_MTRACE)
177 return 1;
178
179 /*
180 * Versions 3.3 and 3.4 relied on the version number to identify
181 * that they could handle traceroute.
182 */
183 vers = NBR_VERS(n);
184 return (vers >= 0x0303 && vers <= 0x0304);
185 }
186
187 /*
188 * Returns the prune entry of the router, or NULL if none exists
189 */
190 static struct ptable *
find_prune_entry(u_int32_t vr,struct ptable * pt)191 find_prune_entry(u_int32_t vr, struct ptable *pt)
192 {
193 while (pt) {
194 if (pt->pt_router == vr)
195 return pt;
196 pt = pt->pt_next;
197 }
198
199 return NULL;
200 }
201
202 /*
203 * Send a prune message to the dominant router for
204 * this source.
205 *
206 * Record an entry that a prune was sent for this group
207 */
208 static void
send_prune(struct gtable * gt)209 send_prune(struct gtable *gt)
210 {
211 struct ptable *pt;
212 char *p;
213 int i;
214 int datalen;
215 u_int32_t src;
216 u_int32_t dst;
217 u_int32_t tmp;
218
219 /* Don't process any prunes if router is not pruning */
220 if (pruning == 0)
221 return;
222
223 /* Can't process a prune if we don't have an associated route */
224 if (gt->gt_route == NULL)
225 return;
226
227 /* Don't send a prune to a non-pruning router */
228 if (!pruning_neighbor(gt->gt_route->rt_parent, gt->gt_route->rt_gateway))
229 return;
230
231 /*
232 * sends a prune message to the router upstream.
233 */
234 src = uvifs[gt->gt_route->rt_parent].uv_lcl_addr;
235 dst = gt->gt_route->rt_gateway;
236
237 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
238 datalen = 0;
239
240 /*
241 * determine prune lifetime
242 */
243 gt->gt_prsent_timer = gt->gt_timer;
244 for (pt = gt->gt_pruntbl; pt; pt = pt->pt_next)
245 if (pt->pt_timer < gt->gt_prsent_timer)
246 gt->gt_prsent_timer = pt->pt_timer;
247
248 /*
249 * If we have a graft pending, cancel graft retransmission
250 */
251 gt->gt_grftsnt = 0;
252
253 for (i = 0; i < 4; i++)
254 *p++ = ((char *)&(gt->gt_route->rt_origin))[i];
255 for (i = 0; i < 4; i++)
256 *p++ = ((char *)&(gt->gt_mcastgrp))[i];
257 tmp = htonl(gt->gt_prsent_timer);
258 for (i = 0; i < 4; i++)
259 *p++ = ((char *)&(tmp))[i];
260 datalen += 12;
261
262 send_igmp(src, dst, IGMP_DVMRP, DVMRP_PRUNE,
263 htonl(MROUTED_LEVEL), datalen);
264
265 logit(LOG_DEBUG, 0, "sent prune for (%s %s)/%d on vif %d to %s",
266 inet_fmts(gt->gt_route->rt_origin, gt->gt_route->rt_originmask, s1),
267 inet_fmt(gt->gt_mcastgrp, s2),
268 gt->gt_prsent_timer, gt->gt_route->rt_parent,
269 inet_fmt(gt->gt_route->rt_gateway, s3));
270 }
271
272 /*
273 * a prune was sent upstream
274 * so, a graft has to be sent to annul the prune
275 * set up a graft timer so that if an ack is not
276 * heard within that time, another graft request
277 * is sent out.
278 */
279 static void
send_graft(struct gtable * gt)280 send_graft(struct gtable *gt)
281 {
282 char *p;
283 int i;
284 int datalen;
285 u_int32_t src;
286 u_int32_t dst;
287
288 /* Can't send a graft without an associated route */
289 if (gt->gt_route == NULL)
290 return;
291
292 src = uvifs[gt->gt_route->rt_parent].uv_lcl_addr;
293 dst = gt->gt_route->rt_gateway;
294
295 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
296 datalen = 0;
297
298 for (i = 0; i < 4; i++)
299 *p++ = ((char *)&(gt->gt_route->rt_origin))[i];
300 for (i = 0; i < 4; i++)
301 *p++ = ((char *)&(gt->gt_mcastgrp))[i];
302 datalen += 8;
303
304 if (datalen != 0) {
305 send_igmp(src, dst, IGMP_DVMRP, DVMRP_GRAFT,
306 htonl(MROUTED_LEVEL), datalen);
307 }
308 logit(LOG_DEBUG, 0, "sent graft for (%s %s) to %s on vif %d",
309 inet_fmts(gt->gt_route->rt_origin, gt->gt_route->rt_originmask, s1),
310 inet_fmt(gt->gt_mcastgrp, s2),
311 inet_fmt(gt->gt_route->rt_gateway, s3), gt->gt_route->rt_parent);
312 }
313
314 /*
315 * Send an ack that a graft was received
316 */
317 static void
send_graft_ack(u_int32_t src,u_int32_t dst,u_int32_t origin,u_int32_t grp)318 send_graft_ack(u_int32_t src, u_int32_t dst, u_int32_t origin, u_int32_t grp)
319 {
320 char *p;
321 int i;
322 int datalen;
323
324 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
325 datalen = 0;
326
327 for (i = 0; i < 4; i++)
328 *p++ = ((char *)&(origin))[i];
329 for (i = 0; i < 4; i++)
330 *p++ = ((char *)&(grp))[i];
331 datalen += 8;
332
333 send_igmp(src, dst, IGMP_DVMRP, DVMRP_GRAFT_ACK,
334 htonl(MROUTED_LEVEL), datalen);
335
336 logit(LOG_DEBUG, 0, "sent graft ack for (%s, %s) to %s",
337 inet_fmt(origin, s1), inet_fmt(grp, s2), inet_fmt(dst, s3));
338 }
339
340 /*
341 * Update the kernel cache with all the routes hanging off the group entry
342 */
343 static void
update_kernel(struct gtable * g)344 update_kernel(struct gtable *g)
345 {
346 struct stable *st;
347
348 for (st = g->gt_srctbl; st; st = st->st_next)
349 k_add_rg(st->st_origin, g);
350 }
351
352 /****************************************************************************
353 Functions that are used externally
354 ****************************************************************************/
355
356 /*
357 * Initialize the kernel table structure
358 */
359 void
init_ktable(void)360 init_ktable(void)
361 {
362 kernel_table = NULL;
363 kernel_no_route = NULL;
364 kroutes = 0;
365 }
366
367 /*
368 * Add a new table entry for (origin, mcastgrp)
369 */
370 void
add_table_entry(u_int32_t origin,u_int32_t mcastgrp)371 add_table_entry(u_int32_t origin, u_int32_t mcastgrp)
372 {
373 struct rtentry *r;
374 struct gtable *gt,**gtnp,*prev_gt;
375 struct stable *st,**stnp;
376 vifi_t i;
377
378 #ifdef DEBUG_MFC
379 md_logit(MD_MISS, origin, mcastgrp);
380 #endif
381
382 r = determine_route(origin);
383 prev_gt = NULL;
384 if (r == NULL) {
385 /*
386 * Look for it on the no_route table; if it is found then
387 * it will be detected as a duplicate below.
388 */
389 for (gt = kernel_no_route; gt; gt = gt->gt_next)
390 if (mcastgrp == gt->gt_mcastgrp &&
391 gt->gt_srctbl && gt->gt_srctbl->st_origin == origin)
392 break;
393 gtnp = &kernel_no_route;
394 } else {
395 gtnp = &r->rt_groups;
396 while ((gt = *gtnp) != NULL) {
397 if (gt->gt_mcastgrp >= mcastgrp)
398 break;
399 gtnp = >->gt_next;
400 prev_gt = gt;
401 }
402 }
403
404 if (gt == NULL || gt->gt_mcastgrp != mcastgrp) {
405 gt = malloc(sizeof(struct gtable));
406 if (gt == NULL)
407 logit(LOG_ERR, 0, "ran out of memory");
408
409 gt->gt_mcastgrp = mcastgrp;
410 gt->gt_timer = CACHE_LIFETIME(cache_lifetime);
411 time(>->gt_ctime);
412 gt->gt_grpmems = 0;
413 gt->gt_scope = 0;
414 gt->gt_prsent_timer = 0;
415 gt->gt_grftsnt = 0;
416 gt->gt_srctbl = NULL;
417 gt->gt_pruntbl = NULL;
418 gt->gt_route = r;
419 #ifdef RSRR
420 gt->gt_rsrr_cache = NULL;
421 #endif
422
423 if (r != NULL) {
424 /* obtain the multicast group membership list */
425 for (i = 0; i < numvifs; i++) {
426 if (VIFM_ISSET(i, r->rt_children) &&
427 !(VIFM_ISSET(i, r->rt_leaves)))
428 VIFM_SET(i, gt->gt_grpmems);
429
430 if (VIFM_ISSET(i, r->rt_leaves) && grplst_mem(i, mcastgrp))
431 VIFM_SET(i, gt->gt_grpmems);
432 }
433 GET_SCOPE(gt);
434 if (VIFM_ISSET(r->rt_parent, gt->gt_scope))
435 gt->gt_scope = -1;
436 gt->gt_grpmems &= ~gt->gt_scope;
437 } else {
438 gt->gt_scope = -1;
439 gt->gt_grpmems = 0;
440 }
441
442 /* update ttls */
443 prun_add_ttls(gt);
444
445 gt->gt_next = *gtnp;
446 *gtnp = gt;
447 if (gt->gt_next)
448 gt->gt_next->gt_prev = gt;
449 gt->gt_prev = prev_gt;
450
451 if (r) {
452 if (find_src_grp(r->rt_origin, r->rt_originmask, gt->gt_mcastgrp)) {
453 struct gtable *g;
454
455 g = gtp ? gtp->gt_gnext : kernel_table;
456 logit(LOG_WARNING, 0, "Entry for (%s %s) (rt:%x) exists (rt:%x)",
457 inet_fmts(r->rt_origin, r->rt_originmask, s1),
458 inet_fmt(g->gt_mcastgrp, s2),
459 r, g->gt_route);
460 } else {
461 if (gtp) {
462 gt->gt_gnext = gtp->gt_gnext;
463 gt->gt_gprev = gtp;
464 gtp->gt_gnext = gt;
465 } else {
466 gt->gt_gnext = kernel_table;
467 gt->gt_gprev = NULL;
468 kernel_table = gt;
469 }
470 if (gt->gt_gnext)
471 gt->gt_gnext->gt_gprev = gt;
472 }
473 } else {
474 gt->gt_gnext = gt->gt_gprev = NULL;
475 }
476 }
477
478 stnp = >->gt_srctbl;
479 while ((st = *stnp) != NULL) {
480 if (ntohl(st->st_origin) >= ntohl(origin))
481 break;
482 stnp = &st->st_next;
483 }
484
485 if (st == NULL || st->st_origin != origin) {
486 st = malloc(sizeof(struct stable));
487 if (st == NULL)
488 logit(LOG_ERR, 0, "ran out of memory");
489
490 st->st_origin = origin;
491 st->st_pktcnt = 0;
492 st->st_next = *stnp;
493 *stnp = st;
494 } else {
495 #ifdef DEBUG_MFC
496 md_logit(MD_DUPE, origin, mcastgrp);
497 #endif
498 logit(LOG_WARNING, 0, "kernel entry already exists for (%s %s)",
499 inet_fmt(origin, s1), inet_fmt(mcastgrp, s2));
500 /* XXX Doing this should cause no harm, and may ensure
501 * kernel<>mrouted synchronization */
502 k_add_rg(origin, gt);
503 return;
504 }
505
506 kroutes++;
507 k_add_rg(origin, gt);
508
509 logit(LOG_DEBUG, 0, "add cache entry (%s %s) gm:%x, parent-vif:%d",
510 inet_fmt(origin, s1),
511 inet_fmt(mcastgrp, s2),
512 gt->gt_grpmems, r ? r->rt_parent : -1);
513
514 /* If there are no leaf vifs
515 * which have this group, then
516 * mark this src-grp as a prune candidate.
517 */
518 if (!gt->gt_prsent_timer && !gt->gt_grpmems && r && r->rt_gateway)
519 send_prune(gt);
520 }
521
522 /*
523 * An mrouter has gone down and come up on an interface
524 * Forward on that interface immediately
525 */
526 void
reset_neighbor_state(vifi_t vifi,u_int32_t addr)527 reset_neighbor_state(vifi_t vifi, u_int32_t addr)
528 {
529 struct rtentry *r;
530 struct gtable *g;
531 struct ptable *pt, **ptnp;
532 struct stable *st;
533
534 for (g = kernel_table; g; g = g->gt_gnext) {
535 r = g->gt_route;
536
537 /*
538 * If neighbor was the parent, remove the prune sent state
539 * and all of the source cache info so that prunes get
540 * regenerated.
541 */
542 if (vifi == r->rt_parent) {
543 if (addr == r->rt_gateway) {
544 logit(LOG_DEBUG, 0, "reset_neighbor_state parent reset (%s %s)",
545 inet_fmts(r->rt_origin, r->rt_originmask, s1),
546 inet_fmt(g->gt_mcastgrp, s2));
547
548 g->gt_prsent_timer = 0;
549 g->gt_grftsnt = 0;
550 while ((st = g->gt_srctbl)) {
551 g->gt_srctbl = st->st_next;
552 k_del_rg(st->st_origin, g);
553 kroutes--;
554 free(st);
555 }
556 }
557 } else {
558 /*
559 * Neighbor was not the parent, send grafts to join the groups
560 */
561 if (g->gt_prsent_timer) {
562 g->gt_grftsnt = 1;
563 send_graft(g);
564 g->gt_prsent_timer = 0;
565 }
566
567 /*
568 * Remove any prunes that this router has sent us.
569 */
570 ptnp = &g->gt_pruntbl;
571 while ((pt = *ptnp) != NULL) {
572 if (pt->pt_vifi == vifi && pt->pt_router == addr) {
573 *ptnp = pt->pt_next;
574 free(pt);
575 } else
576 ptnp = &pt->pt_next;
577 }
578
579 /*
580 * And see if we want to forward again.
581 */
582 if (!VIFM_ISSET(vifi, g->gt_grpmems)) {
583 if (VIFM_ISSET(vifi, r->rt_children) &&
584 !(VIFM_ISSET(vifi, r->rt_leaves)))
585 VIFM_SET(vifi, g->gt_grpmems);
586
587 if (VIFM_ISSET(vifi, r->rt_leaves) &&
588 grplst_mem(vifi, g->gt_mcastgrp))
589 VIFM_SET(vifi, g->gt_grpmems);
590
591 g->gt_grpmems &= ~g->gt_scope;
592 prun_add_ttls(g);
593
594 /* Update kernel state */
595 update_kernel(g);
596 #ifdef RSRR
597 /* Send route change notification to reservation protocol. */
598 rsrr_cache_send(g,1);
599 #endif /* RSRR */
600
601 logit(LOG_DEBUG, 0, "reset member state (%s %s) gm:%x",
602 inet_fmts(r->rt_origin, r->rt_originmask, s1),
603 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
604 }
605 }
606 }
607 }
608
609 /*
610 * Delete table entry from the kernel
611 * del_flag determines how many entries to delete
612 */
613 void
del_table_entry(struct rtentry * r,u_int32_t mcastgrp,u_int del_flag)614 del_table_entry(struct rtentry *r, u_int32_t mcastgrp, u_int del_flag)
615 {
616 struct gtable *g, *prev_g;
617 struct stable *st, *prev_st;
618 struct ptable *pt, *prev_pt;
619
620 if (del_flag == DEL_ALL_ROUTES) {
621 g = r->rt_groups;
622 while (g) {
623 logit(LOG_DEBUG, 0, "del_table_entry deleting (%s %s)",
624 inet_fmts(r->rt_origin, r->rt_originmask, s1),
625 inet_fmt(g->gt_mcastgrp, s2));
626 st = g->gt_srctbl;
627 while (st) {
628 if (k_del_rg(st->st_origin, g) < 0) {
629 logit(LOG_WARNING, errno,
630 "del_table_entry trying to delete (%s, %s)",
631 inet_fmt(st->st_origin, s1),
632 inet_fmt(g->gt_mcastgrp, s2));
633 }
634 kroutes--;
635 prev_st = st;
636 st = st->st_next;
637 free(prev_st);
638 }
639 g->gt_srctbl = NULL;
640
641 pt = g->gt_pruntbl;
642 while (pt) {
643 prev_pt = pt;
644 pt = pt->pt_next;
645 free(prev_pt);
646 }
647 g->gt_pruntbl = NULL;
648
649 if (g->gt_gnext)
650 g->gt_gnext->gt_gprev = g->gt_gprev;
651 if (g->gt_gprev)
652 g->gt_gprev->gt_gnext = g->gt_gnext;
653 else
654 kernel_table = g->gt_gnext;
655
656 #ifdef RSRR
657 /* Send route change notification to reservation protocol. */
658 rsrr_cache_send(g,0);
659 rsrr_cache_clean(g);
660 #endif /* RSRR */
661 prev_g = g;
662 g = g->gt_next;
663 free(prev_g);
664 }
665 r->rt_groups = NULL;
666 }
667
668 /*
669 * Dummy routine - someday this may be needed, so it is just there
670 */
671 if (del_flag == DEL_RTE_GROUP) {
672 prev_g = (struct gtable *)&r->rt_groups;
673 for (g = r->rt_groups; g; g = g->gt_next) {
674 if (g->gt_mcastgrp == mcastgrp) {
675 logit(LOG_DEBUG, 0, "del_table_entry deleting (%s %s)",
676 inet_fmts(r->rt_origin, r->rt_originmask, s1),
677 inet_fmt(g->gt_mcastgrp, s2));
678 st = g->gt_srctbl;
679 while (st) {
680 if (k_del_rg(st->st_origin, g) < 0) {
681 logit(LOG_WARNING, errno,
682 "del_table_entry trying to delete (%s, %s)",
683 inet_fmt(st->st_origin, s1),
684 inet_fmt(g->gt_mcastgrp, s2));
685 }
686 kroutes--;
687 prev_st = st;
688 st = st->st_next;
689 free(prev_st);
690 }
691 g->gt_srctbl = NULL;
692
693 pt = g->gt_pruntbl;
694 while (pt) {
695 prev_pt = pt;
696 pt = pt->pt_next;
697 free(prev_pt);
698 }
699 g->gt_pruntbl = NULL;
700
701 if (g->gt_gnext)
702 g->gt_gnext->gt_gprev = g->gt_gprev;
703 if (g->gt_gprev)
704 g->gt_gprev->gt_gnext = g->gt_gnext;
705 else
706 kernel_table = g->gt_gnext;
707
708 if (prev_g != (struct gtable *)&r->rt_groups)
709 g->gt_next->gt_prev = prev_g;
710 else
711 g->gt_next->gt_prev = NULL;
712 prev_g->gt_next = g->gt_next;
713
714 #ifdef RSRR
715 /* Send route change notification to reservation protocol. */
716 rsrr_cache_send(g,0);
717 rsrr_cache_clean(g);
718 #endif /* RSRR */
719 free(g);
720 g = prev_g;
721 } else {
722 prev_g = g;
723 }
724 }
725 }
726 }
727
728 /*
729 * update kernel table entry when a route entry changes
730 */
731 void
update_table_entry(struct rtentry * r)732 update_table_entry(struct rtentry *r)
733 {
734 struct gtable *g;
735 struct ptable *pt, *prev_pt;
736 vifi_t i;
737
738 for (g = r->rt_groups; g; g = g->gt_next) {
739 pt = g->gt_pruntbl;
740 while (pt) {
741 prev_pt = pt->pt_next;
742 free(pt);
743 pt = prev_pt;
744 }
745 g->gt_pruntbl = NULL;
746
747 g->gt_grpmems = 0;
748
749 /* obtain the multicast group membership list */
750 for (i = 0; i < numvifs; i++) {
751 if (VIFM_ISSET(i, r->rt_children) &&
752 !(VIFM_ISSET(i, r->rt_leaves)))
753 VIFM_SET(i, g->gt_grpmems);
754
755 if (VIFM_ISSET(i, r->rt_leaves) && grplst_mem(i, g->gt_mcastgrp))
756 VIFM_SET(i, g->gt_grpmems);
757 }
758 if (VIFM_ISSET(r->rt_parent, g->gt_scope))
759 g->gt_scope = -1;
760 g->gt_grpmems &= ~g->gt_scope;
761
762 logit(LOG_DEBUG, 0, "updating cache entries (%s %s) gm:%x",
763 inet_fmts(r->rt_origin, r->rt_originmask, s1),
764 inet_fmt(g->gt_mcastgrp, s2),
765 g->gt_grpmems);
766
767 if (g->gt_grpmems && g->gt_prsent_timer) {
768 g->gt_grftsnt = 1;
769 send_graft(g);
770 g->gt_prsent_timer = 0;
771 }
772
773 /* update ttls and add entry into kernel */
774 prun_add_ttls(g);
775 update_kernel(g);
776 #ifdef RSRR
777 /* Send route change notification to reservation protocol. */
778 rsrr_cache_send(g,1);
779 #endif /* RSRR */
780
781 /* Check if we want to prune this group */
782 if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway) {
783 g->gt_timer = CACHE_LIFETIME(cache_lifetime);
784 send_prune(g);
785 }
786 }
787 }
788
789 /*
790 * set the forwarding flag for all mcastgrps on this vifi
791 */
792 void
update_lclgrp(vifi_t vifi,u_int32_t mcastgrp)793 update_lclgrp(vifi_t vifi, u_int32_t mcastgrp)
794 {
795 struct rtentry *r;
796 struct gtable *g;
797
798 logit(LOG_DEBUG, 0, "group %s joined on vif %d",
799 inet_fmt(mcastgrp, s1), vifi);
800
801 for (g = kernel_table; g; g = g->gt_gnext) {
802 if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
803 break;
804
805 r = g->gt_route;
806 if (g->gt_mcastgrp == mcastgrp &&
807 VIFM_ISSET(vifi, r->rt_children)) {
808
809 VIFM_SET(vifi, g->gt_grpmems);
810 g->gt_grpmems &= ~g->gt_scope;
811 if (g->gt_grpmems == 0)
812 continue;
813
814 prun_add_ttls(g);
815 logit(LOG_DEBUG, 0, "update lclgrp (%s %s) gm:%x",
816 inet_fmts(r->rt_origin, r->rt_originmask, s1),
817 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
818
819 update_kernel(g);
820 #ifdef RSRR
821 /* Send route change notification to reservation protocol. */
822 rsrr_cache_send(g,1);
823 #endif /* RSRR */
824 }
825 }
826 }
827
828 /*
829 * reset forwarding flag for all mcastgrps on this vifi
830 */
831 void
delete_lclgrp(vifi_t vifi,u_int32_t mcastgrp)832 delete_lclgrp(vifi_t vifi, u_int32_t mcastgrp)
833 {
834 struct rtentry *r;
835 struct gtable *g;
836
837 logit(LOG_DEBUG, 0, "group %s left on vif %d",
838 inet_fmt(mcastgrp, s1), vifi);
839
840 for (g = kernel_table; g; g = g->gt_gnext) {
841 if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
842 break;
843
844 if (g->gt_mcastgrp == mcastgrp) {
845 int stop_sending = 1;
846
847 r = g->gt_route;
848 /*
849 * If this is not a leaf, then we have router neighbors on this
850 * vif. Only turn off forwarding if they have all pruned.
851 */
852 if (!VIFM_ISSET(vifi, r->rt_leaves)) {
853 struct listaddr *vr;
854
855 for (vr = uvifs[vifi].uv_neighbors; vr; vr = vr->al_next)
856 if (find_prune_entry(vr->al_addr, g->gt_pruntbl) == NULL) {
857 stop_sending = 0;
858 break;
859 }
860 }
861
862 if (stop_sending) {
863 VIFM_CLR(vifi, g->gt_grpmems);
864 logit(LOG_DEBUG, 0, "delete lclgrp (%s %s) gm:%x",
865 inet_fmts(r->rt_origin, r->rt_originmask, s1),
866 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
867
868 prun_add_ttls(g);
869 update_kernel(g);
870 #ifdef RSRR
871 /* Send route change notification to reservation protocol. */
872 rsrr_cache_send(g,1);
873 #endif /* RSRR */
874
875 /*
876 * If there are no more members of this particular group,
877 * send prune upstream
878 */
879 if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway)
880 send_prune(g);
881 }
882 }
883 }
884 }
885
886 /*
887 * Takes the prune message received and then strips it to
888 * determine the (src, grp) pair to be pruned.
889 *
890 * Adds the router to the (src, grp) entry then.
891 *
892 * Determines if further packets have to be sent down that vif
893 *
894 * Determines if a corresponding prune message has to be generated
895 */
896 void
accept_prune(u_int32_t src,u_int32_t dst,char * p,int datalen)897 accept_prune(u_int32_t src, u_int32_t dst, char *p, int datalen)
898 {
899 u_int32_t prun_src;
900 u_int32_t prun_grp;
901 u_int32_t prun_tmr;
902 vifi_t vifi;
903 int i;
904 int stop_sending;
905 struct rtentry *r;
906 struct gtable *g;
907 struct ptable *pt;
908 struct listaddr *vr;
909
910 /* Don't process any prunes if router is not pruning */
911 if (pruning == 0)
912 return;
913
914 if ((vifi = find_vif(src, dst)) == NO_VIF) {
915 logit(LOG_INFO, 0,
916 "ignoring prune report from non-neighbor %s",
917 inet_fmt(src, s1));
918 return;
919 }
920
921 /* Check if enough data is present */
922 if (datalen < 12)
923 {
924 logit(LOG_WARNING, 0,
925 "non-decipherable prune from %s",
926 inet_fmt(src, s1));
927 return;
928 }
929
930 for (i = 0; i< 4; i++)
931 ((char *)&prun_src)[i] = *p++;
932 for (i = 0; i< 4; i++)
933 ((char *)&prun_grp)[i] = *p++;
934 for (i = 0; i< 4; i++)
935 ((char *)&prun_tmr)[i] = *p++;
936 prun_tmr = ntohl(prun_tmr);
937
938 logit(LOG_DEBUG, 0, "%s on vif %d prunes (%s %s)/%d",
939 inet_fmt(src, s1), vifi,
940 inet_fmt(prun_src, s2), inet_fmt(prun_grp, s3), prun_tmr);
941
942 /*
943 * Find the subnet for the prune
944 */
945 if (find_src_grp(prun_src, 0, prun_grp)) {
946 g = gtp ? gtp->gt_gnext : kernel_table;
947 r = g->gt_route;
948
949 if (!VIFM_ISSET(vifi, r->rt_children)) {
950 logit(LOG_WARNING, 0, "prune received from non-child %s for (%s %s)",
951 inet_fmt(src, s1), inet_fmt(prun_src, s2),
952 inet_fmt(prun_grp, s3));
953 return;
954 }
955 if (VIFM_ISSET(vifi, g->gt_scope)) {
956 logit(LOG_WARNING, 0, "prune received from %s on scoped grp (%s %s)",
957 inet_fmt(src, s1), inet_fmt(prun_src, s2),
958 inet_fmt(prun_grp, s3));
959 return;
960 }
961 if ((pt = find_prune_entry(src, g->gt_pruntbl)) != NULL) {
962 /*
963 * If it's about to expire, then it's only still around because
964 * of timer granularity, so don't warn about it.
965 */
966 if (pt->pt_timer > 10) {
967 logit(LOG_WARNING, 0, "%s %d from %s for (%s %s)/%d %s %d %s %x",
968 "duplicate prune received on vif",
969 vifi, inet_fmt(src, s1), inet_fmt(prun_src, s2),
970 inet_fmt(prun_grp, s3), prun_tmr,
971 "old timer:", pt->pt_timer, "cur gm:", g->gt_grpmems);
972 }
973 pt->pt_timer = prun_tmr;
974 } else {
975 /* allocate space for the prune structure */
976 pt = malloc(sizeof(struct ptable));
977 if (pt == NULL)
978 logit(LOG_ERR, 0, "pt: ran out of memory");
979
980 pt->pt_vifi = vifi;
981 pt->pt_router = src;
982 pt->pt_timer = prun_tmr;
983
984 pt->pt_next = g->gt_pruntbl;
985 g->gt_pruntbl = pt;
986 }
987
988 /* Refresh the group's lifetime */
989 g->gt_timer = CACHE_LIFETIME(cache_lifetime);
990 if (g->gt_timer < prun_tmr)
991 g->gt_timer = prun_tmr;
992
993 /*
994 * check if any more packets need to be sent on the
995 * vif which sent this message
996 */
997 stop_sending = 1;
998 for (vr = uvifs[vifi].uv_neighbors; vr; vr = vr->al_next)
999 if (find_prune_entry(vr->al_addr, g->gt_pruntbl) == NULL) {
1000 stop_sending = 0;
1001 break;
1002 }
1003
1004 if (stop_sending && !grplst_mem(vifi, prun_grp)) {
1005 VIFM_CLR(vifi, g->gt_grpmems);
1006 logit(LOG_DEBUG, 0, "prune (%s %s), stop sending on vif %d, gm:%x",
1007 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1008 inet_fmt(g->gt_mcastgrp, s2), vifi, g->gt_grpmems);
1009
1010 prun_add_ttls(g);
1011 update_kernel(g);
1012 #ifdef RSRR
1013 /* Send route change notification to reservation protocol. */
1014 rsrr_cache_send(g,1);
1015 #endif /* RSRR */
1016 }
1017
1018 /*
1019 * check if all the child routers have expressed no interest
1020 * in this group and if this group does not exist in the
1021 * interface
1022 * Send a prune message then upstream
1023 */
1024 if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway) {
1025 send_prune(g);
1026 }
1027 } else {
1028 /*
1029 * There is no kernel entry for this group. Therefore, we can
1030 * simply ignore the prune, as we are not forwarding this traffic
1031 * downstream.
1032 */
1033 logit(LOG_DEBUG, 0, "%s (%s %s)/%d from %s",
1034 "prune message received with no kernel entry for",
1035 inet_fmt(prun_src, s1), inet_fmt(prun_grp, s2),
1036 prun_tmr, inet_fmt(src, s3));
1037 return;
1038 }
1039 }
1040
1041 /*
1042 * Checks if this mcastgrp is present in the kernel table
1043 * If so and if a prune was sent, it sends a graft upwards
1044 */
1045 void
chkgrp_graft(vifi_t vifi,u_int32_t mcastgrp)1046 chkgrp_graft(vifi_t vifi, u_int32_t mcastgrp)
1047 {
1048 struct rtentry *r;
1049 struct gtable *g;
1050
1051 for (g = kernel_table; g; g = g->gt_gnext) {
1052 if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1053 break;
1054
1055 r = g->gt_route;
1056 if (g->gt_mcastgrp == mcastgrp && VIFM_ISSET(vifi, r->rt_children))
1057 if (g->gt_prsent_timer) {
1058 VIFM_SET(vifi, g->gt_grpmems);
1059
1060 /*
1061 * If the vif that was joined was a scoped vif,
1062 * ignore it ; don't graft back
1063 */
1064 g->gt_grpmems &= ~g->gt_scope;
1065 if (g->gt_grpmems == 0)
1066 continue;
1067
1068 /* set the flag for graft retransmission */
1069 g->gt_grftsnt = 1;
1070
1071 /* send graft upwards */
1072 send_graft(g);
1073
1074 /* reset the prune timer and update cache timer*/
1075 g->gt_prsent_timer = 0;
1076 g->gt_timer = max_prune_lifetime;
1077
1078 logit(LOG_DEBUG, 0, "chkgrp graft (%s %s) gm:%x",
1079 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1080 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1081
1082 prun_add_ttls(g);
1083 update_kernel(g);
1084 #ifdef RSRR
1085 /* Send route change notification to reservation protocol. */
1086 rsrr_cache_send(g,1);
1087 #endif /* RSRR */
1088 }
1089 }
1090 }
1091
1092 /* determine the multicast group and src
1093 *
1094 * if it does, then determine if a prune was sent
1095 * upstream.
1096 * if prune sent upstream, send graft upstream and send
1097 * ack downstream.
1098 *
1099 * if no prune sent upstream, change the forwarding bit
1100 * for this interface and send ack downstream.
1101 *
1102 * if no entry exists for this group send ack downstream.
1103 */
1104 void
accept_graft(u_int32_t src,u_int32_t dst,char * p,int datalen)1105 accept_graft(u_int32_t src, u_int32_t dst, char *p, int datalen)
1106 {
1107 vifi_t vifi;
1108 u_int32_t graft_src;
1109 u_int32_t graft_grp;
1110 int i;
1111 struct rtentry *r;
1112 struct gtable *g;
1113 struct ptable *pt, **ptnp;
1114
1115 if ((vifi = find_vif(src, dst)) == NO_VIF) {
1116 logit(LOG_INFO, 0,
1117 "ignoring graft from non-neighbor %s",
1118 inet_fmt(src, s1));
1119 return;
1120 }
1121
1122 if (datalen < 8) {
1123 logit(LOG_WARNING, 0,
1124 "received non-decipherable graft from %s",
1125 inet_fmt(src, s1));
1126 return;
1127 }
1128
1129 for (i = 0; i< 4; i++)
1130 ((char *)&graft_src)[i] = *p++;
1131 for (i = 0; i< 4; i++)
1132 ((char *)&graft_grp)[i] = *p++;
1133
1134 logit(LOG_DEBUG, 0, "%s on vif %d grafts (%s %s)",
1135 inet_fmt(src, s1), vifi,
1136 inet_fmt(graft_src, s2), inet_fmt(graft_grp, s3));
1137
1138 /*
1139 * Find the subnet for the graft
1140 */
1141 if (find_src_grp(graft_src, 0, graft_grp)) {
1142 g = gtp ? gtp->gt_gnext : kernel_table;
1143 r = g->gt_route;
1144
1145 if (VIFM_ISSET(vifi, g->gt_scope)) {
1146 logit(LOG_WARNING, 0, "graft received from %s on scoped grp (%s %s)",
1147 inet_fmt(src, s1), inet_fmt(graft_src, s2),
1148 inet_fmt(graft_grp, s3));
1149 return;
1150 }
1151
1152 ptnp = &g->gt_pruntbl;
1153 while ((pt = *ptnp) != NULL) {
1154 if ((pt->pt_vifi == vifi) && (pt->pt_router == src)) {
1155 *ptnp = pt->pt_next;
1156 free(pt);
1157
1158 VIFM_SET(vifi, g->gt_grpmems);
1159 logit(LOG_DEBUG, 0, "accept graft (%s %s) gm:%x",
1160 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1161 inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1162
1163 prun_add_ttls(g);
1164 update_kernel(g);
1165 #ifdef RSRR
1166 /* Send route change notification to reservation protocol. */
1167 rsrr_cache_send(g,1);
1168 #endif /* RSRR */
1169 break;
1170 } else {
1171 ptnp = &pt->pt_next;
1172 }
1173 }
1174
1175 /* send ack downstream */
1176 send_graft_ack(dst, src, graft_src, graft_grp);
1177 g->gt_timer = max_prune_lifetime;
1178
1179 if (g->gt_prsent_timer) {
1180 /* set the flag for graft retransmission */
1181 g->gt_grftsnt = 1;
1182
1183 /* send graft upwards */
1184 send_graft(g);
1185
1186 /* reset the prune sent timer */
1187 g->gt_prsent_timer = 0;
1188 }
1189 } else {
1190 /*
1191 * We have no state for the source and group in question.
1192 * We can simply acknowledge the graft, since we know
1193 * that we have no prune state, and grafts are requests
1194 * to remove prune state.
1195 */
1196 send_graft_ack(dst, src, graft_src, graft_grp);
1197 logit(LOG_DEBUG, 0, "%s (%s %s) from %s",
1198 "graft received with no kernel entry for",
1199 inet_fmt(graft_src, s1), inet_fmt(graft_grp, s2),
1200 inet_fmt(src, s3));
1201 return;
1202 }
1203 }
1204
1205 /*
1206 * find out which group is involved first of all
1207 * then determine if a graft was sent.
1208 * if no graft sent, ignore the message
1209 * if graft was sent and the ack is from the right
1210 * source, remove the graft timer so that we don't
1211 * have send a graft again
1212 */
1213 void
accept_g_ack(u_int32_t src,u_int32_t dst,char * p,int datalen)1214 accept_g_ack(u_int32_t src, u_int32_t dst, char *p, int datalen)
1215 {
1216 struct gtable *g;
1217 vifi_t vifi;
1218 u_int32_t grft_src;
1219 u_int32_t grft_grp;
1220 int i;
1221
1222 if ((vifi = find_vif(src, dst)) == NO_VIF) {
1223 logit(LOG_INFO, 0,
1224 "ignoring graft ack from non-neighbor %s",
1225 inet_fmt(src, s1));
1226 return;
1227 }
1228
1229 if (datalen < 0 || datalen > 8) {
1230 logit(LOG_WARNING, 0,
1231 "received non-decipherable graft ack from %s",
1232 inet_fmt(src, s1));
1233 return;
1234 }
1235
1236 for (i = 0; i< 4; i++)
1237 ((char *)&grft_src)[i] = *p++;
1238 for (i = 0; i< 4; i++)
1239 ((char *)&grft_grp)[i] = *p++;
1240
1241 logit(LOG_DEBUG, 0, "%s on vif %d acks graft (%s, %s)",
1242 inet_fmt(src, s1), vifi,
1243 inet_fmt(grft_src, s2), inet_fmt(grft_grp, s3));
1244
1245 /*
1246 * Find the subnet for the graft ack
1247 */
1248 if (find_src_grp(grft_src, 0, grft_grp)) {
1249 g = gtp ? gtp->gt_gnext : kernel_table;
1250 g->gt_grftsnt = 0;
1251 } else {
1252 logit(LOG_WARNING, 0, "%s (%s, %s) from %s",
1253 "rcvd graft ack with no kernel entry for",
1254 inet_fmt(grft_src, s1), inet_fmt(grft_grp, s2),
1255 inet_fmt(src, s3));
1256 return;
1257 }
1258 }
1259
1260
1261 /*
1262 * free all prune entries and kernel routes
1263 * normally, this should inform the kernel that all of its routes
1264 * are going away, but this is only called by restart(), which is
1265 * about to call MRT_DONE which does that anyway.
1266 */
1267 void
free_all_prunes(void)1268 free_all_prunes(void)
1269 {
1270 struct rtentry *r;
1271 struct gtable *g, *prev_g;
1272 struct stable *s, *prev_s;
1273 struct ptable *p, *prev_p;
1274
1275 for (r = routing_table; r; r = r->rt_next) {
1276 g = r->rt_groups;
1277 while (g) {
1278 s = g->gt_srctbl;
1279 while (s) {
1280 prev_s = s;
1281 s = s->st_next;
1282 free(prev_s);
1283 }
1284
1285 p = g->gt_pruntbl;
1286 while (p) {
1287 prev_p = p;
1288 p = p->pt_next;
1289 free(prev_p);
1290 }
1291
1292 prev_g = g;
1293 g = g->gt_next;
1294 free(prev_g);
1295 }
1296 r->rt_groups = NULL;
1297 }
1298 kernel_table = NULL;
1299
1300 g = kernel_no_route;
1301 while (g) {
1302 free(g->gt_srctbl);
1303
1304 prev_g = g;
1305 g = g->gt_next;
1306 free(prev_g);
1307 }
1308 kernel_no_route = NULL;
1309 }
1310
1311 /*
1312 * When a new route is created, search
1313 * a) The less-specific part of the routing table
1314 * b) The route-less kernel table
1315 * for sources that the new route might want to handle.
1316 *
1317 * "Inheriting" these sources might be cleanest, but simply deleting
1318 * them is easier, and letting the kernel re-request them.
1319 */
1320 void
steal_sources(struct rtentry * rt)1321 steal_sources(struct rtentry *rt)
1322 {
1323 struct rtentry *rp;
1324 struct gtable *gt, **gtnp;
1325 struct stable *st, **stnp;
1326
1327 for (rp = rt->rt_next; rp; rp = rp->rt_next) {
1328 if ((rt->rt_origin & rp->rt_originmask) == rp->rt_origin) {
1329 logit(LOG_DEBUG, 0, "Route for %s stealing sources from %s",
1330 inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1331 inet_fmts(rp->rt_origin, rp->rt_originmask, s2));
1332 for (gt = rp->rt_groups; gt; gt = gt->gt_next) {
1333 stnp = >->gt_srctbl;
1334 while ((st = *stnp) != NULL) {
1335 if ((st->st_origin & rt->rt_originmask) == rt->rt_origin) {
1336 logit(LOG_DEBUG, 0, "%s stealing (%s %s) from %s",
1337 inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1338 inet_fmt(st->st_origin, s3),
1339 inet_fmt(gt->gt_mcastgrp, s4),
1340 inet_fmts(rp->rt_origin, rp->rt_originmask, s2));
1341 if (k_del_rg(st->st_origin, gt) < 0) {
1342 logit(LOG_WARNING, errno, "%s (%s, %s)",
1343 "steal_sources trying to delete",
1344 inet_fmt(st->st_origin, s1),
1345 inet_fmt(gt->gt_mcastgrp, s2));
1346 }
1347 *stnp = st->st_next;
1348 kroutes--;
1349 free(st);
1350 } else {
1351 stnp = &st->st_next;
1352 }
1353 }
1354 }
1355 }
1356 }
1357
1358 gtnp = &kernel_no_route;
1359 while ((gt = *gtnp) != NULL) {
1360 if (gt->gt_srctbl && ((gt->gt_srctbl->st_origin & rt->rt_originmask)
1361 == rt->rt_origin)) {
1362 logit(LOG_DEBUG, 0, "%s stealing (%s %s) from %s",
1363 inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1364 inet_fmt(gt->gt_srctbl->st_origin, s3),
1365 inet_fmt(gt->gt_mcastgrp, s4),
1366 "no_route table");
1367 if (k_del_rg(gt->gt_srctbl->st_origin, gt) < 0) {
1368 logit(LOG_WARNING, errno, "%s (%s %s)",
1369 "steal_sources trying to delete",
1370 inet_fmt(gt->gt_srctbl->st_origin, s1),
1371 inet_fmt(gt->gt_mcastgrp, s2));
1372 }
1373 kroutes--;
1374 free(gt->gt_srctbl);
1375 *gtnp = gt->gt_next;
1376 if (gt->gt_next)
1377 gt->gt_next->gt_prev = gt->gt_prev;
1378 free(gt);
1379 } else {
1380 gtnp = >->gt_next;
1381 }
1382 }
1383 }
1384
1385 /*
1386 * Advance the timers on all the cache entries.
1387 * If there are any entries whose timers have expired,
1388 * remove these entries from the kernel cache.
1389 */
1390 void
age_table_entry(void)1391 age_table_entry(void)
1392 {
1393 struct rtentry *r;
1394 struct gtable *gt, **gtnptr;
1395 struct stable *st, **stnp;
1396 struct ptable *pt, **ptnp;
1397 struct sioc_sg_req sg_req;
1398
1399 logit(LOG_DEBUG, 0, "ageing entries");
1400
1401 gtnptr = &kernel_table;
1402 while ((gt = *gtnptr) != NULL) {
1403 r = gt->gt_route;
1404
1405 /* advance the timer for the kernel entry */
1406 gt->gt_timer -= ROUTE_MAX_REPORT_DELAY;
1407
1408 /* decrement prune timer if need be */
1409 if (gt->gt_prsent_timer > 0) {
1410 gt->gt_prsent_timer -= ROUTE_MAX_REPORT_DELAY;
1411 if (gt->gt_prsent_timer <= 0) {
1412 logit(LOG_DEBUG, 0, "upstream prune tmo (%s %s)",
1413 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1414 inet_fmt(gt->gt_mcastgrp, s2));
1415 gt->gt_prsent_timer = -1;
1416 }
1417 }
1418
1419 /* retransmit graft if graft sent flag is still set */
1420 if (gt->gt_grftsnt) {
1421 switch(gt->gt_grftsnt++) {
1422 case 2:
1423 case 4:
1424 case 8:
1425 case 16:
1426 case 32:
1427 case 64:
1428 case 128:
1429 send_graft(gt);
1430 break;
1431 default:
1432 break;
1433 }
1434 }
1435
1436 /*
1437 * Age prunes
1438 *
1439 * If a prune expires, forward again on that vif.
1440 */
1441 ptnp = >->gt_pruntbl;
1442 while ((pt = *ptnp) != NULL) {
1443 if ((pt->pt_timer -= ROUTE_MAX_REPORT_DELAY) <= 0) {
1444 logit(LOG_DEBUG, 0, "expire prune (%s %s) from %s on vif %d",
1445 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1446 inet_fmt(gt->gt_mcastgrp, s2),
1447 inet_fmt(pt->pt_router, s3),
1448 pt->pt_vifi);
1449
1450 expire_prune(pt->pt_vifi, gt);
1451
1452 /* remove the router's prune entry and await new one */
1453 *ptnp = pt->pt_next;
1454 free(pt);
1455 } else {
1456 ptnp = &pt->pt_next;
1457 }
1458 }
1459
1460 /*
1461 * If the cache entry has expired, delete source table entries for
1462 * silent sources. If there are no source entries left, and there
1463 * are no downstream prunes, then the entry is deleted.
1464 * Otherwise, the cache entry's timer is refreshed.
1465 */
1466 if (gt->gt_timer <= 0) {
1467 /* Check for traffic before deleting source entries */
1468 sg_req.grp.s_addr = gt->gt_mcastgrp;
1469 stnp = >->gt_srctbl;
1470 while ((st = *stnp) != NULL) {
1471 sg_req.src.s_addr = st->st_origin;
1472 if (ioctl(udp_socket, SIOCGETSGCNT, (char *)&sg_req) == -1) {
1473 logit(LOG_WARNING, errno, "%s (%s %s)",
1474 "age_table_entry: SIOCGETSGCNT failing for",
1475 inet_fmt(st->st_origin, s1),
1476 inet_fmt(gt->gt_mcastgrp, s2));
1477 /* Make sure it gets deleted below */
1478 sg_req.pktcnt = st->st_pktcnt;
1479 }
1480 if (sg_req.pktcnt == st->st_pktcnt) {
1481 *stnp = st->st_next;
1482 logit(LOG_DEBUG, 0, "age_table_entry deleting (%s %s)",
1483 inet_fmt(st->st_origin, s1),
1484 inet_fmt(gt->gt_mcastgrp, s2));
1485 if (k_del_rg(st->st_origin, gt) < 0) {
1486 logit(LOG_WARNING, errno,
1487 "age_table_entry trying to delete (%s %s)",
1488 inet_fmt(st->st_origin, s1),
1489 inet_fmt(gt->gt_mcastgrp, s2));
1490 }
1491 kroutes--;
1492 free(st);
1493 } else {
1494 st->st_pktcnt = sg_req.pktcnt;
1495 stnp = &st->st_next;
1496 }
1497 }
1498
1499 /*
1500 * Retain the group entry if we have downstream prunes or if
1501 * there is at least one source in the list that still has
1502 * traffic, or if our upstream prune timer is running.
1503 */
1504 if (gt->gt_pruntbl != NULL || gt->gt_srctbl != NULL ||
1505 gt->gt_prsent_timer > 0) {
1506 gt->gt_timer = CACHE_LIFETIME(cache_lifetime);
1507 if (gt->gt_prsent_timer == -1) {
1508 if (gt->gt_grpmems == 0)
1509 send_prune(gt);
1510 else
1511 gt->gt_prsent_timer = 0;
1512 }
1513 gtnptr = >->gt_gnext;
1514 continue;
1515 }
1516
1517 logit(LOG_DEBUG, 0, "timeout cache entry (%s, %s)",
1518 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1519 inet_fmt(gt->gt_mcastgrp, s2));
1520
1521 if (gt->gt_prev)
1522 gt->gt_prev->gt_next = gt->gt_next;
1523 else
1524 gt->gt_route->rt_groups = gt->gt_next;
1525 if (gt->gt_next)
1526 gt->gt_next->gt_prev = gt->gt_prev;
1527
1528 if (gt->gt_gprev) {
1529 gt->gt_gprev->gt_gnext = gt->gt_gnext;
1530 gtnptr = >->gt_gprev->gt_gnext;
1531 } else {
1532 kernel_table = gt->gt_gnext;
1533 gtnptr = &kernel_table;
1534 }
1535 if (gt->gt_gnext)
1536 gt->gt_gnext->gt_gprev = gt->gt_gprev;
1537
1538 #ifdef RSRR
1539 /* Send route change notification to reservation protocol. */
1540 rsrr_cache_send(gt,0);
1541 rsrr_cache_clean(gt);
1542 #endif /* RSRR */
1543 free((char *)gt);
1544 } else {
1545 if (gt->gt_prsent_timer == -1) {
1546 if (gt->gt_grpmems == 0)
1547 send_prune(gt);
1548 else
1549 gt->gt_prsent_timer = 0;
1550 }
1551 gtnptr = >->gt_gnext;
1552 }
1553 }
1554
1555 /*
1556 * When traversing the no_route table, the decision is much easier.
1557 * Just delete it if it has timed out.
1558 */
1559 gtnptr = &kernel_no_route;
1560 while ((gt = *gtnptr) != NULL) {
1561 /* advance the timer for the kernel entry */
1562 gt->gt_timer -= ROUTE_MAX_REPORT_DELAY;
1563
1564 if (gt->gt_timer < 0) {
1565 if (gt->gt_srctbl) {
1566 if (k_del_rg(gt->gt_srctbl->st_origin, gt) < 0) {
1567 logit(LOG_WARNING, errno, "%s (%s %s)",
1568 "age_table_entry trying to delete no-route",
1569 inet_fmt(gt->gt_srctbl->st_origin, s1),
1570 inet_fmt(gt->gt_mcastgrp, s2));
1571 }
1572 free(gt->gt_srctbl);
1573 }
1574 *gtnptr = gt->gt_next;
1575 if (gt->gt_next)
1576 gt->gt_next->gt_prev = gt->gt_prev;
1577
1578 free((char *)gt);
1579 } else {
1580 gtnptr = >->gt_next;
1581 }
1582 }
1583 }
1584
1585 /*
1586 * Modify the kernel to forward packets when one or multiple prunes that
1587 * were received on the vif given by vifi, for the group given by gt,
1588 * have expired.
1589 */
1590 static void
expire_prune(vifi_t vifi,struct gtable * gt)1591 expire_prune(vifi_t vifi, struct gtable *gt)
1592 {
1593 /*
1594 * No need to send a graft, any prunes that we sent
1595 * will expire before any prunes that we have received.
1596 */
1597 if (gt->gt_prsent_timer > 0) {
1598 logit(LOG_DEBUG, 0, "prune expired with %d left on %s",
1599 gt->gt_prsent_timer, "prsent_timer");
1600 gt->gt_prsent_timer = 0;
1601 }
1602
1603 /* modify the kernel entry to forward packets */
1604 if (!VIFM_ISSET(vifi, gt->gt_grpmems)) {
1605 struct rtentry *rt = gt->gt_route;
1606 VIFM_SET(vifi, gt->gt_grpmems);
1607 logit(LOG_DEBUG, 0, "forw again (%s %s) gm:%x vif:%d",
1608 inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1609 inet_fmt(gt->gt_mcastgrp, s2), gt->gt_grpmems, vifi);
1610
1611 prun_add_ttls(gt);
1612 update_kernel(gt);
1613 #ifdef RSRR
1614 /* Send route change notification to reservation protocol. */
1615 rsrr_cache_send(gt,1);
1616 #endif /* RSRR */
1617 }
1618 }
1619
1620
1621 static char *
scaletime(time_t t)1622 scaletime(time_t t)
1623 {
1624 static char buf1[5];
1625 static char buf2[5];
1626 static char *buf=buf1;
1627 char s;
1628 char *p;
1629
1630 p = buf;
1631 if (buf == buf1)
1632 buf = buf2;
1633 else
1634 buf = buf1;
1635
1636 if (t < 120) {
1637 s = 's';
1638 } else if (t < 3600) {
1639 t /= 60;
1640 s = 'm';
1641 } else if (t < 86400) {
1642 t /= 3600;
1643 s = 'h';
1644 } else if (t < 864000) {
1645 t /= 86400;
1646 s = 'd';
1647 } else {
1648 t /= 604800;
1649 s = 'w';
1650 }
1651 if (t > 999)
1652 return "*** ";
1653
1654 snprintf(p, 5, "%3d%c", (int)t, s);
1655
1656 return p;
1657 }
1658
1659 /*
1660 * Print the contents of the cache table on file 'fp2'.
1661 */
1662 void
dump_cache(FILE * fp2)1663 dump_cache(FILE *fp2)
1664 {
1665 struct rtentry *r;
1666 struct gtable *gt;
1667 struct stable *st;
1668 vifi_t i;
1669 time_t thyme = time(NULL);
1670
1671 fprintf(fp2,
1672 "Multicast Routing Cache Table (%d entries)\n%s", kroutes,
1673 " Origin Mcast-group CTmr Age Ptmr IVif Forwvifs\n");
1674
1675 for (gt = kernel_no_route; gt; gt = gt->gt_next) {
1676 if (gt->gt_srctbl) {
1677 fprintf(fp2, " %-18s %-15s %-4s %-4s - -1\n",
1678 inet_fmts(gt->gt_srctbl->st_origin, 0xffffffff, s1),
1679 inet_fmt(gt->gt_mcastgrp, s2), scaletime(gt->gt_timer),
1680 scaletime(thyme - gt->gt_ctime));
1681 fprintf(fp2, ">%s\n", inet_fmt(gt->gt_srctbl->st_origin, s1));
1682 }
1683 }
1684
1685 for (gt = kernel_table; gt; gt = gt->gt_gnext) {
1686 r = gt->gt_route;
1687 fprintf(fp2, " %-18s %-15s",
1688 inet_fmts(r->rt_origin, r->rt_originmask, s1),
1689 inet_fmt(gt->gt_mcastgrp, s2));
1690
1691 fprintf(fp2, " %-4s", scaletime(gt->gt_timer));
1692
1693 fprintf(fp2, " %-4s %-4s ", scaletime(thyme - gt->gt_ctime),
1694 gt->gt_prsent_timer ? scaletime(gt->gt_prsent_timer) :
1695 " -");
1696
1697 fprintf(fp2, "%2u%c%c ", r->rt_parent,
1698 gt->gt_prsent_timer ? 'P' : ' ',
1699 VIFM_ISSET(r->rt_parent, gt->gt_scope) ? 'B' : ' ');
1700
1701 for (i = 0; i < numvifs; ++i) {
1702 if (VIFM_ISSET(i, gt->gt_grpmems))
1703 fprintf(fp2, " %u ", i);
1704 else if (VIFM_ISSET(i, r->rt_children) &&
1705 !VIFM_ISSET(i, r->rt_leaves))
1706 fprintf(fp2, " %u%c", i,
1707 VIFM_ISSET(i, gt->gt_scope) ? 'b' : 'p');
1708 }
1709 fprintf(fp2, "\n");
1710 for (st = gt->gt_srctbl; st; st = st->st_next) {
1711 fprintf(fp2, ">%s\n", inet_fmt(st->st_origin, s1));
1712 }
1713 #ifdef DEBUG_PRUNES
1714 for (pt = gt->gt_pruntbl; pt; pt = pt->pt_next) {
1715 fprintf(fp2, "<r:%s v:%d t:%d\n", inet_fmt(pt->pt_router, s1),
1716 pt->pt_vifi, pt->pt_timer);
1717 }
1718 #endif
1719 }
1720 }
1721
1722 /*
1723 * Traceroute function which returns traceroute replies to the requesting
1724 * router. Also forwards the request to downstream routers.
1725 * NOTE: u_int no is narrowed to u_char
1726 */
1727 void
accept_mtrace(u_int32_t src,u_int32_t dst,u_int32_t group,char * data,u_int no,int datalen)1728 accept_mtrace(u_int32_t src, u_int32_t dst, u_int32_t group,
1729 char *data, u_int no, int datalen)
1730 {
1731 u_char type;
1732 struct rtentry *rt;
1733 struct gtable *gt;
1734 struct tr_query *qry;
1735 struct tr_resp *resp;
1736 int vifi;
1737 char *p;
1738 int rcount;
1739 int errcode = TR_NO_ERR;
1740 int resptype;
1741 struct timeval tp;
1742 struct sioc_vif_req v_req;
1743 struct sioc_sg_req sg_req;
1744
1745 /* Remember qid across invocations */
1746 static u_int32_t oqid = 0;
1747
1748 /* timestamp the request/response */
1749 gettimeofday(&tp, 0);
1750
1751 /*
1752 * Check if it is a query or a response
1753 */
1754 if (datalen == QLEN) {
1755 type = QUERY;
1756 logit(LOG_DEBUG, 0, "Initial traceroute query rcvd from %s to %s",
1757 inet_fmt(src, s1), inet_fmt(dst, s2));
1758 }
1759 else if ((datalen - QLEN) % RLEN == 0) {
1760 type = RESP;
1761 logit(LOG_DEBUG, 0, "In-transit traceroute query rcvd from %s to %s",
1762 inet_fmt(src, s1), inet_fmt(dst, s2));
1763 if (IN_MULTICAST(ntohl(dst))) {
1764 logit(LOG_DEBUG, 0, "Dropping multicast response");
1765 return;
1766 }
1767 }
1768 else {
1769 logit(LOG_WARNING, 0, "%s from %s to %s",
1770 "Non decipherable traceroute request received",
1771 inet_fmt(src, s1), inet_fmt(dst, s2));
1772 return;
1773 }
1774
1775 qry = (struct tr_query *)data;
1776
1777 /*
1778 * if it is a packet with all reports filled, drop it
1779 */
1780 if ((rcount = (datalen - QLEN)/RLEN) == no) {
1781 logit(LOG_DEBUG, 0, "packet with all reports filled in");
1782 return;
1783 }
1784
1785 logit(LOG_DEBUG, 0, "s: %s g: %s d: %s ", inet_fmt(qry->tr_src, s1),
1786 inet_fmt(group, s2), inet_fmt(qry->tr_dst, s3));
1787 logit(LOG_DEBUG, 0, "rttl: %d rd: %s", qry->tr_rttl,
1788 inet_fmt(qry->tr_raddr, s1));
1789 logit(LOG_DEBUG, 0, "rcount:%d, qid:%06x", rcount, qry->tr_qid);
1790
1791 /* determine the routing table entry for this traceroute */
1792 rt = determine_route(qry->tr_src);
1793 if (rt) {
1794 logit(LOG_DEBUG, 0, "rt parent vif: %d rtr: %s metric: %d",
1795 rt->rt_parent, inet_fmt(rt->rt_gateway, s1), rt->rt_metric);
1796 logit(LOG_DEBUG, 0, "rt origin %s",
1797 inet_fmts(rt->rt_origin, rt->rt_originmask, s1));
1798 } else
1799 logit(LOG_DEBUG, 0, "...no route");
1800
1801 /*
1802 * Query type packet - check if rte exists
1803 * Check if the query destination is a vif connected to me.
1804 * and if so, whether I should start response back
1805 */
1806 if (type == QUERY) {
1807 if (oqid == qry->tr_qid) {
1808 /*
1809 * If the multicast router is a member of the group being
1810 * queried, and the query is multicasted, then the router can
1811 * receive multiple copies of the same query. If we have already
1812 * replied to this traceroute, just ignore it this time.
1813 *
1814 * This is not a total solution, but since if this fails you
1815 * only get N copies, N <= the number of interfaces on the router,
1816 * it is not fatal.
1817 */
1818 logit(LOG_DEBUG, 0, "ignoring duplicate traceroute packet");
1819 return;
1820 }
1821
1822 if (rt == NULL) {
1823 logit(LOG_DEBUG, 0, "Mcast traceroute: no route entry %s",
1824 inet_fmt(qry->tr_src, s1));
1825 if (IN_MULTICAST(ntohl(dst)))
1826 return;
1827 }
1828 vifi = find_vif(qry->tr_dst, 0);
1829
1830 if (vifi == NO_VIF) {
1831 /* The traceroute destination is not on one of my subnet vifs. */
1832 logit(LOG_DEBUG, 0, "Destination %s not an interface",
1833 inet_fmt(qry->tr_dst, s1));
1834 if (IN_MULTICAST(ntohl(dst)))
1835 return;
1836 errcode = TR_WRONG_IF;
1837 } else if (rt != NULL && !VIFM_ISSET(vifi, rt->rt_children)) {
1838 logit(LOG_DEBUG, 0, "Destination %s not on forwarding tree for src %s",
1839 inet_fmt(qry->tr_dst, s1), inet_fmt(qry->tr_src, s2));
1840 if (IN_MULTICAST(ntohl(dst)))
1841 return;
1842 errcode = TR_WRONG_IF;
1843 }
1844 }
1845 else {
1846 /*
1847 * determine which interface the packet came in on
1848 * RESP packets travel hop-by-hop so this either traversed
1849 * a tunnel or came from a directly attached mrouter.
1850 */
1851 if ((vifi = find_vif(src, dst)) == NO_VIF) {
1852 logit(LOG_DEBUG, 0, "Wrong interface for packet");
1853 errcode = TR_WRONG_IF;
1854 }
1855 }
1856
1857 /* Now that we've decided to send a response, save the qid */
1858 oqid = qry->tr_qid;
1859
1860 logit(LOG_DEBUG, 0, "Sending traceroute response");
1861
1862 /* copy the packet to the sending buffer */
1863 p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
1864
1865 bcopy(data, p, datalen);
1866
1867 p += datalen;
1868
1869 /*
1870 * If there is no room to insert our reply, coopt the previous hop
1871 * error indication to relay this fact.
1872 */
1873 if (p + sizeof(struct tr_resp) > send_buf + RECV_BUF_SIZE) {
1874 resp = (struct tr_resp *)p - 1;
1875 resp->tr_rflags = TR_NO_SPACE;
1876 rt = NULL;
1877 goto sendit;
1878 }
1879
1880 /*
1881 * fill in initial response fields
1882 */
1883 resp = (struct tr_resp *)p;
1884 bzero(resp, sizeof(struct tr_resp));
1885 datalen += RLEN;
1886
1887 resp->tr_qarr = htonl((tp.tv_sec + JAN_1970) << 16) +
1888 ((tp.tv_usec >> 4) & 0xffff);
1889
1890 resp->tr_rproto = PROTO_DVMRP;
1891 if (errcode != TR_NO_ERR) {
1892 resp->tr_rflags = errcode;
1893 rt = NULL; /* hack to enforce send straight to requestor */
1894 goto sendit;
1895 }
1896 resp->tr_outaddr = uvifs[vifi].uv_lcl_addr;
1897 resp->tr_fttl = uvifs[vifi].uv_threshold;
1898 resp->tr_rflags = TR_NO_ERR;
1899
1900 /*
1901 * obtain # of packets out on interface
1902 */
1903 v_req.vifi = vifi;
1904 if (ioctl(udp_socket, SIOCGETVIFCNT, (char *)&v_req) == 0)
1905 resp->tr_vifout = htonl(v_req.ocount);
1906
1907 /*
1908 * fill in scoping & pruning information
1909 */
1910 if (rt)
1911 for (gt = rt->rt_groups; gt; gt = gt->gt_next) {
1912 if (gt->gt_mcastgrp >= group)
1913 break;
1914 }
1915 else
1916 gt = NULL;
1917
1918 if (gt && gt->gt_mcastgrp == group) {
1919 sg_req.src.s_addr = qry->tr_src;
1920 sg_req.grp.s_addr = group;
1921 if (ioctl(udp_socket, SIOCGETSGCNT, (char *)&sg_req) == 0)
1922 resp->tr_pktcnt = htonl(sg_req.pktcnt);
1923
1924 if (VIFM_ISSET(vifi, gt->gt_scope))
1925 resp->tr_rflags = TR_SCOPED;
1926 else if (gt->gt_prsent_timer)
1927 resp->tr_rflags = TR_PRUNED;
1928 else if (!VIFM_ISSET(vifi, gt->gt_grpmems)) {
1929 if (VIFM_ISSET(vifi, rt->rt_children) &&
1930 !VIFM_ISSET(vifi, rt->rt_leaves))
1931 resp->tr_rflags = TR_OPRUNED;
1932 else
1933 resp->tr_rflags = TR_NO_FWD;
1934 }
1935 } else {
1936 if (scoped_addr(vifi, group))
1937 resp->tr_rflags = TR_SCOPED;
1938 else if (rt && !VIFM_ISSET(vifi, rt->rt_children))
1939 resp->tr_rflags = TR_NO_FWD;
1940 }
1941
1942 /*
1943 * if no rte exists, set NO_RTE error
1944 */
1945 if (rt == NULL) {
1946 src = dst; /* the dst address of resp. pkt */
1947 resp->tr_inaddr = 0;
1948 resp->tr_rflags = TR_NO_RTE;
1949 resp->tr_rmtaddr = 0;
1950 } else {
1951 /* get # of packets in on interface */
1952 v_req.vifi = rt->rt_parent;
1953 if (ioctl(udp_socket, SIOCGETVIFCNT, (char *)&v_req) == 0)
1954 resp->tr_vifin = htonl(v_req.icount);
1955
1956 MASK_TO_VAL(rt->rt_originmask, resp->tr_smask);
1957 src = uvifs[rt->rt_parent].uv_lcl_addr;
1958 resp->tr_inaddr = src;
1959 resp->tr_rmtaddr = rt->rt_gateway;
1960 if (!VIFM_ISSET(vifi, rt->rt_children)) {
1961 logit(LOG_DEBUG, 0, "Destination %s not on forwarding tree for src %s",
1962 inet_fmt(qry->tr_dst, s1), inet_fmt(qry->tr_src, s2));
1963 resp->tr_rflags = TR_WRONG_IF;
1964 }
1965 if (rt->rt_metric >= UNREACHABLE) {
1966 resp->tr_rflags = TR_NO_RTE;
1967 /* Hack to send reply directly */
1968 rt = NULL;
1969 }
1970 }
1971
1972 sendit:
1973 /*
1974 * if metric is 1 or no. of reports is 1, send response to requestor
1975 * else send to upstream router. If the upstream router can't handle
1976 * mtrace, set an error code and send to requestor anyway.
1977 */
1978 logit(LOG_DEBUG, 0, "rcount:%d, no:%d", rcount, no);
1979
1980 if ((rcount + 1 == no) || (rt == NULL) || (rt->rt_metric == 1)) {
1981 resptype = IGMP_MTRACE_REPLY;
1982 dst = qry->tr_raddr;
1983 } else
1984 if (!can_mtrace(rt->rt_parent, rt->rt_gateway)) {
1985 dst = qry->tr_raddr;
1986 resp->tr_rflags = TR_OLD_ROUTER;
1987 resptype = IGMP_MTRACE_REPLY;
1988 } else {
1989 dst = rt->rt_gateway;
1990 resptype = IGMP_MTRACE_QUERY;
1991 }
1992
1993 if (IN_MULTICAST(ntohl(dst))) {
1994 /*
1995 * Send the reply on a known multicast capable vif.
1996 * If we don't have one, we can't source any multicasts anyway.
1997 */
1998 if (phys_vif != -1) {
1999 logit(LOG_DEBUG, 0, "Sending reply to %s from %s",
2000 inet_fmt(dst, s1), inet_fmt(uvifs[phys_vif].uv_lcl_addr, s2));
2001 k_set_ttl(qry->tr_rttl);
2002 send_igmp(uvifs[phys_vif].uv_lcl_addr, dst,
2003 resptype, no, group,
2004 datalen);
2005 k_set_ttl(1);
2006 } else
2007 logit(LOG_INFO, 0, "No enabled phyints -- %s",
2008 "dropping traceroute reply");
2009 } else {
2010 logit(LOG_DEBUG, 0, "Sending %s to %s from %s",
2011 resptype == IGMP_MTRACE_REPLY ? "reply" : "request on",
2012 inet_fmt(dst, s1), inet_fmt(src, s2));
2013
2014 send_igmp(src, dst,
2015 resptype, no, group,
2016 datalen);
2017 }
2018 return;
2019 }
2020