1 /*
2  * Copyright (c) 2010 Christiano F. Haesbaert <haesbaert@haesbaert.org>
3  *
4  * Permission to use, copy, modify, and distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <sys/time.h>
18 #include <sys/types.h>
19 #include <sys/socket.h>
20 #include <sys/utsname.h>
21 
22 #include <netinet/in.h>
23 #include <arpa/inet.h>
24 
25 #include <err.h>
26 #include <unistd.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 
30 #include "mdnsd.h"
31 #include "log.h"
32 
33 int		 question_cmp(struct question *, struct question *);
34 struct question *question_lookup(struct rrset *);
35 
36 RB_HEAD(cache_tree, cache_node) cache_tree;
37 RB_PROTOTYPE(cache_tree, cache_node, entry, cache_node_cmp);
38 RB_GENERATE(cache_tree, cache_node, entry, cache_node_cmp);
39 
40 RB_HEAD(question_tree, question);
41 RB_PROTOTYPE(question_tree, question, qst_entry, question_cmp);
42 RB_GENERATE(question_tree, question, qst_entry, question_cmp);
43 
44 extern struct mdnsd_conf	*conf;
45 struct question_tree		 question_tree;
46 struct cache_tree		 cache_tree;
47 
48 pg_q pg_queue;
49 pge_q pge_queue;
50 
51 /*
52  * RR cache
53  */
54 
55 void
cache_init(void)56 cache_init(void)
57 {
58 	RB_INIT(&cache_tree);
59 }
60 
61 int
cache_node_cmp(struct cache_node * a,struct cache_node * b)62 cache_node_cmp(struct cache_node *a, struct cache_node *b)
63 {
64 	return (strcmp(a->dname, b->dname));
65 }
66 
67 struct rr *
cache_next_by_rrs(struct rr * rr)68 cache_next_by_rrs(struct rr *rr)
69 {
70 	struct rr *rr_aux;
71 
72 	for (rr_aux = LIST_NEXT(rr, centry);
73 	     rr_aux != NULL;
74 	     rr_aux = LIST_NEXT(rr_aux, centry)) {
75 		if (rrset_cmp(&rr->rrs, &rr_aux->rrs) == 0)
76 			return (rr_aux);
77 	}
78 
79 	return (NULL);
80 }
81 
82 struct cache_node *
cache_lookup_dname(const char * dname)83 cache_lookup_dname(const char *dname)
84 {
85 	struct cache_node s;
86 
87 	bzero(&s, sizeof(s));
88 	strlcpy(s.dname, dname, sizeof(s.dname));
89 
90 	return (RB_FIND(cache_tree, &cache_tree, &s));
91 }
92 
93 struct rr *
cache_lookup(struct rrset * rrs)94 cache_lookup(struct rrset *rrs)
95 {
96 	struct cache_node	*cn;
97 	struct rr		*rr;
98 
99 	cn = cache_lookup_dname(rrs->dname);
100 	if (cn == NULL)
101 		return (NULL);
102 	LIST_FOREACH(rr, &cn->rr_list, centry) {
103 		if (rrset_cmp(&rr->rrs, rrs) == 0)
104 			return (rr);
105 	}
106 
107 	return (NULL);
108 }
109 
110 /* Process an external rr */
111 int
cache_process(struct rr * rr)112 cache_process(struct rr *rr)
113 {
114 	struct cache_node *cn = NULL;
115 	struct rr *rr_aux, *next;
116 
117 	/* Sanity check */
118 	if (RR_AUTH(rr)) {
119 		log_warnx("cache_process on auth rr");
120 		return (-1);
121 	}
122 
123 	/* Consider received RR as published */
124 	rr->flags |= RR_FLAG_PUBLISHED;
125 
126 	/* Record receiving time */
127 	evtimer_set(&rr->timer, cache_rev, rr);
128 	if (clock_gettime(CLOCK_MONOTONIC, &rr->age) == -1)
129 		fatal("clock_gettime");
130 
131 	/*
132 	 * If no entries go forward and insert
133 	 */
134 	if ((cn = cache_lookup_dname(rr->rrs.dname)) == NULL)
135 		return (cache_insert(rr));
136 
137 	/*
138 	 * Check if we already have a matching RR which is ours.
139 	 */
140 	for (rr_aux = cache_lookup(&rr->rrs);
141 	     rr_aux != NULL;
142 	     rr_aux = next) {
143 		next = cache_next_by_rrs(rr_aux);
144 		if (rrset_cmp(&rr->rrs, &rr_aux->rrs) != 0)
145 			continue;
146 
147 		if (RR_AUTH(rr_aux)) {
148 			/* Same rdata */
149 			if (rr_rdata_cmp(rr, rr_aux) == 0) {
150 				/*
151 				 * This may be a goodbye, defend our RR batman.
152 				 */
153 				if (rr->ttl <= rr_aux->ttl / 2) {
154 					log_warnx("cache_process: defending %s",
155 					    rrs_str(&rr->rrs));
156 					rr_send_an(rr_aux);
157 				} else {
158 					/* TODO Cancel possible deletion */
159 					log_warnx("cache_process: recover %s",
160 					    rrs_str(&rr->rrs));
161 					free(rr);
162 					return (0);
163 				}
164 				free(rr);
165 				return (0);
166 			}
167 			/*
168 			 * RDATA isn't equal, if either we, or they are unique,
169 			 * this is a conflict.
170 			 */
171 			if (RR_UNIQ(rr) || RR_UNIQ(rr_aux)) {
172 				log_warnx("cache_process: conflict for %s",
173 				    rrs_str(&rr->rrs));
174 				conflict_resolve_by_rr(rr_aux);
175 				free(rr);
176 				return (-1);
177 			}
178 		}
179 		else { /* Not ours */
180 			/* Same rdata */
181 			if (rr_rdata_cmp(rr, rr_aux) == 0) {
182 				/* A goodbye RR */
183 				if (rr->ttl == 0) {
184 					log_warnx("cache_process: goodbye %s",
185 					    rrs_str(&rr->rrs));
186 					cache_delete(rr_aux);
187 					free(rr);
188 					return (0);
189 				}
190 				/* Cache refresh */
191 				log_warnx("cache_process: refresh %s",
192 				    rrs_str(&rr->rrs));
193 				rr_aux->ttl = rr->ttl;
194 				rr_aux->revision = 0;
195 				cache_schedrev(rr_aux);
196 				free(rr);
197 
198 				return (0);
199 			}
200 		}
201 	}
202 	/* Got a goodbye for a record we don't have */
203 	if (rr->ttl == 0) {
204 		free(rr);
205 		return (0);
206 	}
207 
208 	return (cache_insert(rr));
209 }
210 
211 int
cache_insert(struct rr * rr)212 cache_insert(struct rr *rr)
213 {
214 	struct cache_node *cn;
215 	struct rr *rr_aux, *next;
216 	/*
217 	 * If no entries, make a new node
218 	 */
219 	if ((cn = cache_lookup_dname(rr->rrs.dname)) == NULL) {
220 		if ((cn = calloc(1, sizeof(*cn))) == NULL)
221 			fatal("calloc");
222 		(void)strlcpy(cn->dname, rr->rrs.dname,
223 		    sizeof(cn->dname));
224 		rr->cn = cn;
225 		LIST_INIT(&cn->rr_list);
226 		if (RB_INSERT(cache_tree, &cache_tree, cn) != NULL)
227 			fatal("cache_process: RB_INSERT");
228 	}
229 
230 	/*
231 	 * If this is a unique record, we must disregard everything we know so
232 	 * far about that RRSet.
233 	 */
234 	if (RR_UNIQ(rr)) {
235 		/* Clean up all records and substitute */
236 		for (rr_aux = cache_lookup(&rr->rrs);
237 		     rr_aux != NULL;
238 		     rr_aux = next) {
239 			next = cache_next_by_rrs(rr_aux);
240 			if (rrset_cmp(&rr->rrs, &rr_aux->rrs) != 0)
241 				continue;
242 			/* This should not happen */
243 			if (RR_AUTH(rr))
244 				fatalx("cache_process: Unexpected auth");
245 			log_debug("cache_delete 1");
246 			if (cache_delete(rr_aux) == 1)
247 				break;
248 		}
249 		/* cache_delete() may free cn, so we need to lookup again */
250 		/* XXX make a function for this */
251 		if ((cn = cache_lookup_dname(rr->rrs.dname)) == NULL) {
252 			if ((cn = calloc(1, sizeof(*cn))) == NULL)
253 				fatal("calloc");
254 			(void)strlcpy(cn->dname, rr->rrs.dname,
255 			    sizeof(cn->dname));
256 			LIST_INIT(&cn->rr_list);
257 			if (RB_INSERT(cache_tree, &cache_tree, cn) != NULL)
258 				fatal("cache_process: RB_INSERT");
259 		}
260 		log_debug("cache_insert: (new, cleaned up) (%p) %s",
261 		    rr, rrs_str(&rr->rrs));
262 		/* Go on, cn is fine now. */
263 	}
264 
265 	if (cn == NULL)
266 		fatalx("cache_insert: cn is NULL !");
267 	rr->cn = cn;
268 	LIST_INSERT_HEAD(&cn->rr_list, rr, centry);
269 	if (rr->flags & RR_FLAG_PUBLISHED)
270 		rr_notify_in(rr);
271 
272 	/* Only do revisions for external RR */
273 	if (!RR_AUTH(rr))
274 		cache_schedrev(rr);
275 
276 	return (0);
277 }
278 
279 int
cache_delete(struct rr * rr)280 cache_delete(struct rr *rr)
281 {
282 	struct cache_node *cn;
283 
284 	log_debug("cache_delete: %s", rrs_str(&rr->rrs));
285 	if (rr->flags & RR_FLAG_PUBLISHED &&
286 	    RR_AUTH(rr))
287 		rr_send_goodbye(rr);
288 	if (rr->flags & RR_FLAG_PUBLISHED)
289 		rr_notify_out(rr);
290 	cn = rr->cn;
291 	if (evtimer_pending(&rr->timer, NULL))
292 		evtimer_del(&rr->timer);
293 	LIST_REMOVE(rr, centry);
294 	free(rr);
295 	if (LIST_EMPTY(&cn->rr_list)) {
296 		RB_REMOVE(cache_tree, &cache_tree, cn);
297 		free(cn);
298 
299 		return (1);	/* cache_node freed */
300 	}
301 
302 	return (0);
303 }
304 
305 void
cache_schedrev(struct rr * rr)306 cache_schedrev(struct rr *rr)
307 {
308 	struct timeval tv;
309 	u_int32_t var;
310 
311 	timerclear(&tv);
312 
313 	switch (rr->revision) {
314 	case 0:
315 		/* Expire at 80%-82% of ttl */
316 		var = 80 + arc4random_uniform(3);
317 		tv.tv_sec = ((10 * rr->ttl) * var) / 1000;
318 		break;
319 	case 1:
320 		/* Expire at 90%-92% of ttl */
321 		var = 90 + arc4random_uniform(3);
322 		tv.tv_sec  = ((10 * rr->ttl) * var) / 1000;
323 		tv.tv_sec -= ((10 * rr->ttl) * 80)  / 1000;
324 		break;
325 	case 2:
326 		/* Expire at 95%-97% of ttl */
327 		var = 95 + arc4random_uniform(3);
328 		tv.tv_sec  = ((10 * rr->ttl) * var) / 1000;
329 		tv.tv_sec -= ((10 * rr->ttl) * 90)  / 1000;
330 		break;
331 	case 3:	/* expired, delete from cache in 1 sec */
332 		tv.tv_sec = 1;
333 		break;
334 	}
335 /*	log_debug("cache_schedrev: schedule rr type: %s, name: %s (%d)", */
336 /*	    rr_type_name(rr->type), rr->dname, tv.tv_sec); */
337 
338 	rr->revision++;
339 
340 	if (evtimer_pending(&rr->timer, NULL))
341 		evtimer_del(&rr->timer);
342 	if (evtimer_add(&rr->timer, &tv) == -1)
343 		fatal("rrt_sched_rev");
344 }
345 
346 void
cache_rev(int unused,short event,void * v_rr)347 cache_rev(int unused, short event, void *v_rr)
348 {
349 	struct rr	*rr = v_rr;
350 	struct question	*qst;
351 	struct pkt	 pkt;
352 
353 /*	log_debug("cache_rev: timeout rr type: %s, name: %s (%u)", */
354 /*	    rr_type_name(rr->type), rr->dname, rr->ttl); */
355 
356 	/* If we have an active question, try to renew the answer */
357 	if ((qst = question_lookup(&rr->rrs)) != NULL) {
358 		pkt_init(&pkt);
359 		pkt.h.qr = MDNS_QUERY;
360 		pkt_add_question(&pkt, qst);
361 		if (pkt_send(&pkt, ALL_IFACE) == -1)
362 			log_warnx("can't send packet to all interfaces");
363 	}
364 
365 	if (rr->revision <= 3)
366 		cache_schedrev(rr);
367 	else
368 		cache_delete(rr);
369 }
370 
371 void
auth_release(struct rr * rr)372 auth_release(struct rr *rr)
373 {
374 	/* Sanity check */
375 	if (!RR_AUTH(rr))
376 		fatalx("auth_release on non auth rr");
377 	if (rr->auth_refcount == 1)
378 		cache_delete(rr);
379 	else
380 		rr->auth_refcount--;
381 }
382 
383 struct rr *
auth_get(struct rr * rr)384 auth_get(struct rr *rr)
385 {
386 	struct rr *rr_cache;
387 
388 	CACHE_FOREACH_RRS(rr_cache, &rr->rrs) {
389 		/* Have an entry already */
390 		if (rr_rdata_cmp(rr, rr_cache) == 0) {
391 			rr_cache->auth_refcount++;
392 			return (rr_cache);
393 		}
394 		/*
395 		 * Not the same, only ok if not UNIQ.
396 		 */
397 		if (RR_UNIQ(rr_cache) || RR_UNIQ(rr)) {
398 			log_warnx("auth_get: conflict for %s (1)",
399 			    rrs_str(&rr->rrs));
400 			return (NULL);
401 		}
402 	}
403 
404 	/* Duplicate and insert */
405 	rr_cache = rr_dup(rr);
406 	rr_cache->auth_refcount = 1;
407 
408 	if (cache_insert(rr_cache) == 0)
409 		return (rr_cache);
410 
411 	return (NULL);
412 }
413 
414 int
rrset_cmp(struct rrset * a,struct rrset * b)415 rrset_cmp(struct rrset *a, struct rrset *b)
416 {
417 	if (a->class < b->class)
418 		return (-1);
419 	if (a->class > b->class)
420 		return (1);
421 	if (a->type < b->type)
422 		return (-1);
423 	if (a->type > b->type)
424 		return (1);
425 
426 	return (strcmp(a->dname, b->dname));
427 }
428 
429 /*
430  * Querier
431  */
432 
433 void
query_init(void)434 query_init(void)
435 {
436 	RB_INIT(&question_tree);
437 }
438 
439 struct question *
question_lookup(struct rrset * rrs)440 question_lookup(struct rrset *rrs)
441 {
442 	struct question qst;
443 
444 	bzero(&qst, sizeof(qst));
445 	qst.rrs = *rrs;
446 
447 	return (RB_FIND(question_tree, &question_tree, &qst));
448 }
449 
450 struct question *
question_add(struct rrset * rrs)451 question_add(struct rrset *rrs)
452 {
453 	struct question *qst;
454 
455 	qst = question_lookup(rrs);
456 	if (qst != NULL) {
457 		qst->active++;
458 		log_debug("existing question for %s (%s) active = %d",
459 		    rrs->dname, rr_type_name(rrs->type), qst->active);
460 		return (qst);
461 	}
462 	if ((qst = calloc(1, sizeof(*qst))) == NULL)
463 		fatal("calloc");
464 	qst->active++;
465 	qst->rrs = *rrs;
466 	if (RB_INSERT(question_tree, &question_tree, qst) != NULL)
467 		fatal("question_add: RB_INSERT");
468 
469 	return (qst);
470 }
471 
472 void
question_remove(struct rrset * rrs)473 question_remove(struct rrset *rrs)
474 {
475 	struct question *qst;
476 
477 	qst = question_lookup(rrs);
478 	if (qst == NULL) {
479 		log_warnx("trying to remove non existant question");
480 		return;
481 	}
482 	if (--qst->active == 0) {
483 		RB_REMOVE(question_tree, &question_tree, qst);
484 		free(qst);
485 	}
486 }
487 
488 void
query_remove(struct query * q)489 query_remove(struct query *q)
490 {
491 	struct rrset *rrs;
492 
493 	LIST_REMOVE(q, entry);
494 	while ((rrs = LIST_FIRST(&q->rrslist)) != NULL) {
495 		question_remove(rrs);
496 		LIST_REMOVE(rrs, entry);
497 		log_debug("question_remove %s", rrs_str(rrs));
498 		free(rrs);
499 	}
500 	if (evtimer_pending(&q->timer, NULL))
501 		evtimer_del(&q->timer);
502 	free(q);
503 }
504 
505 void
query_fsm(int unused,short event,void * v_query)506 query_fsm(int unused, short event, void *v_query)
507 {
508 	struct pkt		 pkt;
509 	struct mdns_service	 nullms;
510 	struct query		*q;
511 	struct question		*qst;
512 	struct rr		*rraux, nullrr;
513 	struct rrset		*rrs;
514 	struct timespec		 tnow;
515 	struct timeval		 tv;
516 	time_t			 tosleep;
517 
518 	q = v_query;
519 	pkt_init(&pkt);
520 	pkt.h.qr = MDNS_QUERY;
521 
522 	/* This will send at seconds 0, 1, 2, 4, 8, 16... */
523 	tosleep = (2 << q->count) - (1 << q->count);
524 	if (tosleep > MAXQUERYTIME)
525 		tosleep = MAXQUERYTIME;
526 	timerclear(&tv);
527 	tv.tv_sec = tosleep;
528 
529 	if (clock_gettime(CLOCK_MONOTONIC, &tnow) == -1)
530 		fatal("clock_gettime");
531 
532 	/*
533 	 * If we're in our third call and we're still alive,
534 	 * consider a failure.
535 	 */
536 	if (q->style == QUERY_LOOKUP && q->count == 2) {
537 		rrs = LIST_FIRST(&q->rrslist);
538 		bzero(&nullrr, sizeof(nullrr));
539 		nullrr.rrs = *rrs;
540 		control_send_rr(q->ctl, &nullrr, IMSG_CTL_LOOKUP_FAILURE);
541 		query_remove(q);
542 		return;
543 	}
544 
545 	if (q->style == QUERY_RESOLVE && q->count == 3) {
546 		log_debug("query_resolve failed");
547 		bzero(&nullms, sizeof(nullms));
548 		strlcpy(nullms.name, q->ms_srv->dname, sizeof(nullms.name));
549 		control_send_ms(q->ctl, &nullms, IMSG_CTL_RESOLVE_FAILURE);
550 		query_remove(q);
551 		return;
552 	}
553 
554 	LIST_FOREACH(rrs, &q->rrslist, entry) {
555 		if (q->style == QUERY_RESOLVE && cache_lookup(rrs)) {
556 			log_debug("question for %s supressed, have answer",
557 			    rrs_str(rrs));
558 			continue;
559 		}
560 		if ((qst = question_lookup(rrs)) == NULL) {
561 			log_warnx("Can't find question in query_fsm for %s",
562 			    rrs_str(rrs));
563 			/* XXX: we leak memory */
564 			return;
565 		}
566 
567 		/* Can't send question before schedule */
568 		if (timespeccmp(&tnow, &qst->sched, <)) {
569 			log_debug("question for %s before schedule",
570 			    rrs_str(rrs));
571 			continue;
572 		}
573 
574 		pkt_add_question(&pkt, qst);
575 		qst->sent++;
576 		qst->lastsent = tnow;
577 		qst->sched = tnow;
578 		qst->sched.tv_sec += tosleep;
579 		if (q->style == QUERY_BROWSE) {
580 			/* Known Answer Supression */
581 			CACHE_FOREACH_RRS(rraux, rrs) {
582 				/* Don't include rr if it's too old */
583 				if (rr_ttl_left(rraux) < rraux->ttl / 2)
584 					continue;
585 				pkt_add_anrr(&pkt, rraux);
586 			}
587 		}
588 	}
589 
590 	if (pkt.h.qdcount > 0)
591 		if (pkt_send(&pkt, ALL_IFACE) == -1)
592 			log_warnx("can't send packet to all interfaces");
593 	q->count++;
594 	if (evtimer_pending(&q->timer, NULL))
595 		evtimer_del(&q->timer);
596 	evtimer_add(&q->timer, &tv);
597 }
598 
599 int
question_cmp(struct question * a,struct question * b)600 question_cmp(struct question *a, struct question *b)
601 {
602 	return (rrset_cmp(&a->rrs, &b->rrs));
603 }
604 
605 struct question *
question_dup(struct question * qst)606 question_dup(struct question *qst)
607 {
608 	struct question *qdup;
609 
610 	if ((qdup = malloc(sizeof(*qdup))) == NULL)
611 		fatal("malloc");
612 	memcpy(qdup, qst, sizeof(*qdup));
613 
614 	return (qdup);
615 }
616 
617 int
rr_notify_in(struct rr * rr)618 rr_notify_in(struct rr *rr)
619 {
620 	struct ctl_conn		*c;
621 	struct query		*q, *nq;
622 	struct question		*qst;
623 	struct rrset		*rrs;
624 	int			 query_done;
625 
626 	/* See if we have a question matching this rr */
627 	if ((qst = question_lookup(&rr->rrs)) == NULL)
628 		return (0);
629 
630 	/* Loop through controllers and check who wants it */
631 	TAILQ_FOREACH(c, &ctl_conns, entry) {
632 		for (q = LIST_FIRST(&c->qlist); q != NULL; q = nq) {
633 			nq = LIST_NEXT(q, entry);
634 			query_done = 0;
635 			LIST_FOREACH(rrs, &q->rrslist, entry) {
636 
637 				if (rrset_cmp(rrs, &rr->rrs) != 0)
638 					continue;
639 				/*
640 				 * Notify controller with full RR.
641 				 */
642 				switch (q->style) {
643 				case QUERY_LOOKUP:
644 					if (control_send_rr(c, rr,
645 					    IMSG_CTL_LOOKUP) == -1)
646 					query_remove(q);
647 					query_done = 1;
648 					break;
649 				case QUERY_BROWSE:
650 					if (control_send_rr(c, rr,
651 					    IMSG_CTL_BROWSE_ADD) == -1)
652 						log_warnx("control_send_rr error");
653 					break;
654 				case QUERY_RESOLVE:
655 					/*
656 					 * If this is a SRV, make sure we're
657 					 * asking for the target
658 					 */
659 					if (rr->rrs.type == T_SRV &&
660 					    q->ms_a == NULL) {
661 						if ((q->ms_a = calloc(1,
662 						    sizeof(*q->ms_a))) == NULL)
663 							err(1, "calloc");
664 						strlcpy(q->ms_a->dname,
665 						    rr->rdata.SRV.target,
666 						    sizeof(q->ms_a->dname));
667 						q->ms_a->class = C_IN;
668 						q->ms_a->type = T_A;
669 						LIST_INSERT_HEAD(&q->rrslist,
670 						    q->ms_a, entry);
671 						if (question_add(q->ms_a) ==
672 						    NULL)
673 							log_warnx("Can't add "
674 							    "question");
675 					}
676 					if (control_try_answer_ms(c,
677 					    q->ms_srv->dname) == 1) {
678 						query_remove(q);
679 						query_done = 1;
680 					}
681 					break;
682 				default:
683 					log_warnx("Unknown query style");
684 					return (-1);
685 				}
686 
687 				if (query_done)
688 					break;
689 			}
690 		}
691 	}
692 
693 	return (0);
694 }
695 
696 int
rr_notify_out(struct rr * rr)697 rr_notify_out(struct rr *rr)
698 {
699 	struct ctl_conn *c;
700 	struct query	*q;
701 	struct question *qst;
702 	struct rrset	*rrs;
703 
704 	if ((qst = question_lookup(&rr->rrs)) == NULL)
705 		return (0);
706 
707 	TAILQ_FOREACH(c, &ctl_conns, entry) {
708 		LIST_FOREACH(q, &c->qlist, entry) {
709 			if (q->style != QUERY_BROWSE)
710 				continue;
711 			LIST_FOREACH(rrs, &q->rrslist, entry) {
712 				if (rrset_cmp(rrs, &rr->rrs) != 0)
713 					continue;
714 				/*
715 				 * Notify controller with full RR.
716 				 */
717 				if (control_send_rr(c, rr, IMSG_CTL_BROWSE_DEL)
718 				    == -1)
719 					log_warnx("control_send_rr error");
720 			}
721 		}
722 	}
723 
724 	return (0);
725 }
726 
727 struct pge *
pge_from_ms(struct pg * pg,struct mdns_service * ms,struct iface * iface)728 pge_from_ms(struct pg *pg, struct mdns_service *ms, struct iface *iface)
729 {
730 	struct pge	*pge;
731 	struct rr	srv, txt, ptr_proto, ptr_services;
732 	struct question *qst;
733 	char		 servname[MAXHOSTNAMELEN], proto[MAXHOSTNAMELEN];
734 
735 
736 	qst = NULL;
737 	bzero(&srv, sizeof(srv));
738 	bzero(&txt, sizeof(txt));
739 	bzero(&ptr_proto, sizeof(ptr_proto));
740 	bzero(&ptr_services, sizeof(ptr_services));
741 	bzero(proto, sizeof(proto));
742 	bzero(servname, sizeof(servname));
743 
744 	if (snprintf(servname, sizeof(servname),
745 	    "%s._%s._%s.local",  ms->name, ms->app, ms->proto)
746 	    >= (int)sizeof(servname)) {
747 		log_warnx("pge_from_ms: name too long");
748 		return (NULL);
749 	}
750 	if (snprintf(proto, sizeof(proto),
751 	    "_%s._%s.local",  ms->app, ms->proto)
752 	    >= (int)sizeof(proto)) {
753 		log_warnx("pge_from_ms: proto too long");
754 		return (NULL);
755 	}
756 	/* T_SRV */
757 	(void)rr_set(&srv, servname, T_SRV, C_IN, TTL_SRV,
758 	    RR_FLAG_CACHEFLUSH, NULL, 0);
759 	(void)strlcpy(srv.rdata.SRV.target, ms->target,
760 	    sizeof(srv.rdata.SRV.target));
761 	srv.rdata.SRV.priority = ms->priority;
762 	srv.rdata.SRV.weight = ms->weight;
763 	srv.rdata.SRV.port = ms->port;
764 	/* T_TXT */
765 	(void)rr_set(&txt, servname, T_TXT, C_IN, TTL_TXT,
766 	    RR_FLAG_CACHEFLUSH,
767 	    ms->txt, sizeof(ms->txt));
768 	/* T_PTR proto */
769 	(void)rr_set(&ptr_proto, proto, T_PTR, C_IN, TTL_PTR,
770 	    0, servname, sizeof(servname));
771 	/* T_PTR services */
772 	(void)rr_set(&ptr_services, "_services._dns-sd._udp.local",
773 	    T_PTR, C_IN, TTL_PTR, 0, proto, sizeof(proto));
774 
775 	/* Question */
776 	if ((qst = calloc(1, sizeof(*qst))) == NULL)
777 		fatal("calloc");
778 	qst->rrs.type  = T_ANY;
779 	qst->rrs.class = C_IN;
780 	(void)strlcpy(qst->rrs.dname, srv.rrs.dname,
781 	    sizeof(qst->rrs.dname));
782 
783 	/* Alloc and init pge structure */
784 	if ((pge = calloc(1, sizeof(*pge))) == NULL)
785 		fatal("calloc");
786 	pge->pg		= pg;
787 	pge->pge_type   = PGE_TYPE_SERVICE;
788 	pge->pge_flags |= PGE_FLAG_INC_A;
789 	pge->iface	= iface;
790 	pge->state	= PGE_STA_UNPUBLISHED;
791 	evtimer_set(&pge->timer, pge_fsm, pge);
792 	pge->pqst = qst;
793 	/* Insert everyone in auth */
794 	if ((pge->rr[0] = auth_get(&srv)) == NULL)
795 		goto bad;
796 	pge->nrr++;
797 	if ((pge->rr[1] = auth_get(&txt)) == NULL) {
798 		auth_release(pge->rr[0]);
799 		goto bad;
800 	}
801 	pge->nrr++;
802 	if ((pge->rr[2] = auth_get(&ptr_proto)) == NULL) {
803 		auth_release(pge->rr[0]);
804 		auth_release(pge->rr[1]);
805 		goto bad;
806 	}
807 	pge->nrr++;
808 	if ((pge->rr[3] = auth_get(&ptr_services)) == NULL) {
809 		auth_release(pge->rr[0]);
810 		auth_release(pge->rr[1]);
811 		auth_release(pge->rr[2]);
812 		goto bad;
813 	}
814 	pge->nrr++;
815 
816 	/*
817 	 * If we got here, all is fine and we can link pge
818 	 */
819 	TAILQ_INSERT_TAIL(&pge_queue, pge, entry);
820 	if (pg != NULL)
821 		LIST_INSERT_HEAD(&pg->pge_list, pge, pge_entry);
822 
823 	return (pge);
824 
825 bad:
826 	free(pge->pqst);
827 	free(pge);
828 
829 	return (NULL);
830 }
831 
832 void
pge_fsm_restart(struct pge * pge,struct timeval * tv)833 pge_fsm_restart(struct pge *pge, struct timeval *tv)
834 {
835 	if (evtimer_pending(&pge->timer, NULL))
836 		evtimer_del(&pge->timer);
837 	evtimer_add(&pge->timer, tv);
838 }
839 
840 void
pge_fsm(int unused,short event,void * v_pge)841 pge_fsm(int unused, short event, void *v_pge)
842 {
843 	struct pg	*pg;
844 	struct pge	*pge, *pge_primary;
845 	struct timeval	 tv;
846 	struct pkt	 pkt;
847 	int		 i;
848 
849 	pge = v_pge;
850 	pg  = pge->pg;
851 
852 	/*
853 	 * In order to publish services and addresses we must first make sure
854 	 * our primary address has been sucessfully published, if not, we delay
855 	 * publication for a second. We don't really need this if the service is
856 	 * not on our local address.
857 	 */
858 	if (pge->pge_type == PGE_TYPE_SERVICE) {
859 		pge_primary = conf->pge_primary;
860 		if (pge_primary->state < PGE_STA_ANNOUNCING) {
861 			timerclear(&tv);
862 			tv.tv_sec = 1;
863 			pge_fsm_restart(pge, &tv);
864 			return;
865 		}
866 	}
867 
868 	switch (pge->state){
869 	case PGE_STA_UNPUBLISHED:
870 		pge->state = PGE_STA_PROBING;
871 		/* FALLTHROUGH */
872 	case PGE_STA_PROBING:
873 		if ((pge->pge_flags & PGE_FLAG_INTERNAL) == 0 &&
874 		    pge->sent == 0)
875 			control_notify_pg(pg->c, pg,
876 			    IMSG_CTL_GROUP_PROBING);
877 		/* Build up our probe packet */
878 		pkt_init(&pkt);
879 		pkt.h.qr = MDNS_QUERY;
880 		if (pge->pqst != NULL) {
881 			/* Unicast question ? */
882 			if (pge->sent < 2)
883 				pge->pqst->flags |= QST_FLAG_UNIRESP;
884 			else
885 				pge->pqst->flags &= ~QST_FLAG_UNIRESP;
886 			pkt_add_question(&pkt, pge->pqst);
887 		}
888 		/* Add the RRs in the NS section */
889 		for (i = 0; i < pge->nrr; i++)
890 			pkt_add_nsrr(&pkt, pge->rr[i]);
891 		/* Always probe for all interfaces, this is safer */
892 		if (pkt_send(&pkt, ALL_IFACE) == -1)
893 			log_warnx("can't send probe packet");
894 		/* Probing done, start announcing */
895 		if (++pge->sent == 3) {
896 			/* sent is re-used by PGE_STA_ANNOUNCING */
897 			pge->sent  = 0;
898 			pge->state = PGE_STA_ANNOUNCING;
899 			/*
900 			 * Consider records published
901 			 */
902 			for (i = 0; i < pge->nrr; i++) {
903 				if ((pge->rr[i]->flags & RR_FLAG_PUBLISHED) == 0)
904 					rr_notify_in(pge->rr[i]);
905 				pge->rr[i]->flags |= RR_FLAG_PUBLISHED;
906 			}
907 		}
908 		timerclear(&tv);
909 		tv.tv_usec = INTERVAL_PROBETIME;
910 		pge_fsm_restart(pge, &tv);
911 		break;
912 	case PGE_STA_ANNOUNCING:
913 		if ((pge->pge_flags & PGE_FLAG_INTERNAL) == 0 &&
914 		    pge->sent == 0)
915 			control_notify_pg(pg->c, pg,
916 			    IMSG_CTL_GROUP_ANNOUNCING);
917 		/* Build up our announcing packet */
918 		pkt_init(&pkt);
919 		pkt.h.qr = MDNS_RESPONSE;
920 		/* Add the RRs in the AN secion */
921 		for (i = 0; i < pge->nrr; i++)
922 			pkt_add_anrr(&pkt, pge->rr[i]);
923 		/*
924 		 * PGE_FLAG_INC_A, we should add our primary A resource record
925 		 * to the packet.
926 		 */
927 		if (pge->pge_flags & PGE_FLAG_INC_A)
928 			pkt_add_anrr(&pkt, conf->pge_primary->rr[PGE_RR_PRIM]);
929 
930 		if (pkt_send(&pkt, ALL_IFACE) == -1) {
931 			log_warnx("can't send announce packet");
932 			return;
933 		}
934 
935 		if (pge->pge_flags & PGE_FLAG_INC_A) {
936 			LIST_REMOVE(conf->pge_primary->rr[PGE_RR_PRIM], pentry);
937 			pkt.h.ancount--; /* XXX */
938 		}
939 
940 		if (++pge->sent < 3)  {
941 			timerclear(&tv);
942 			tv.tv_sec = pge->sent;
943 			pge_fsm_restart(pge, &tv);
944 			break;
945 		}
946 		pge->state = PGE_STA_PUBLISHED;
947 		/* FALLTHROUGH */
948 	case PGE_STA_PUBLISHED:
949 		if ((pge->pge_flags & PGE_FLAG_INTERNAL) == 0)
950 			log_debug("group %s published", pg->name);
951 		/*
952 		 * Check if every pge in pg is published, if it is
953 		 * we'll consider the group as published, notify controller
954 		 */
955 		if ((pge->pge_flags & PGE_FLAG_INTERNAL) == 0 &&
956 		    pg_published(pg))
957 			control_notify_pg(pg->c, pg, IMSG_CTL_GROUP_PUBLISHED);
958 		break;
959 	default:
960 		fatalx("invalid group state");
961 	}
962 }
963 
964 void
pge_kill(struct pge * pge)965 pge_kill(struct pge *pge)
966 {
967 	int i;
968 	struct rr *rr;
969 
970 	/* Stop pge machine */
971 	if (evtimer_pending(&pge->timer, NULL))
972 		evtimer_del(&pge->timer);
973 
974 	/*
975 	 * Release our records.
976 	 */
977 	for (i = 0; i < pge->nrr; i++) {
978 		rr = pge->rr[i];
979 		if (rr == NULL)
980 			continue;
981 		auth_release(rr);
982 		pge->rr[i] = NULL;
983 	}
984 	/*
985 	 * Unlink pge
986 	 */
987 	TAILQ_REMOVE(&pge_queue, pge, entry);
988 	if ((pge->pge_flags & PGE_FLAG_INTERNAL) == 0) {
989 		LIST_REMOVE(pge, pge_entry);
990 	}
991 	free(pge);
992 }
993 
994 void
pg_init(void)995 pg_init(void)
996 {
997 	TAILQ_INIT(&pg_queue);
998 	TAILQ_INIT(&pge_queue);
999 }
1000 
1001 void
pg_publish_byiface(struct iface * iface)1002 pg_publish_byiface(struct iface *iface)
1003 {
1004 	struct pge	*pge;
1005 
1006 	TAILQ_FOREACH(pge, &pge_queue, entry) {
1007 		/* XXX this is so wrong.... */
1008 		if (pge->iface == ALL_IFACE || pge->iface == iface) {
1009 			/* XXX must be a random probe time */
1010 			pge_revert_probe(pge);
1011 		}
1012 	}
1013 }
1014 
1015 struct pg *
pg_get(int alloc,char name[MAXHOSTNAMELEN],struct ctl_conn * c)1016 pg_get(int alloc, char name[MAXHOSTNAMELEN], struct ctl_conn *c)
1017 {
1018 	struct pg *pg;
1019 
1020 	TAILQ_FOREACH(pg, &pg_queue, entry) {
1021 		if (pg->c == c && strcmp(pg->name, name) == 0)
1022 			return (pg);
1023 	}
1024 
1025 	if (!alloc)
1026 		return (NULL);
1027 	if ((pg = calloc(1, sizeof(*pg))) == NULL)
1028 		err(1, "calloc");
1029 	(void)strlcpy(pg->name, name, sizeof(pg->name));
1030 	pg->c	  = c;
1031 	pg->flags = 0;
1032 	LIST_INIT(&pg->pge_list);
1033 	TAILQ_INSERT_TAIL(&pg_queue, pg, entry);
1034 
1035 	return (pg);
1036 }
1037 
1038 void
pge_initprimary(void)1039 pge_initprimary(void)
1040 {
1041 	struct pge	*pge;
1042 	struct question	*qst;
1043 	struct iface	*iface;
1044 	struct rr	 rr;
1045 	char		 revaddr[MAXHOSTNAMELEN];
1046 	struct in_addr	 inaddrany;
1047 
1048 	if ((conf->pge_primary = calloc(1, sizeof(*pge))) == NULL)
1049 		fatal("calloc");
1050 	pge	       = conf->pge_primary;
1051 	pge->pge_flags = PGE_FLAG_INTERNAL;
1052 	pge->pg	       = NULL;
1053 	pge->sent      = 0;
1054 	pge->state     = PGE_STA_UNPUBLISHED;
1055 	pge->iface     = ALL_IFACE;
1056 	evtimer_set(&pge->timer, pge_fsm, pge);
1057 	/* Link to global pge */
1058 	TAILQ_INSERT_TAIL(&pge_queue, pge, entry);
1059 	/* Set up primary question */
1060 	if ((qst = calloc(1, sizeof(*qst))) == NULL)
1061 		fatal("calloc");
1062 	(void)strlcpy(qst->rrs.dname, conf->myname, sizeof(qst->rrs.dname));
1063 	qst->rrs.type  = T_ANY;
1064 	qst->rrs.class = C_IN;
1065 	pge->pqst = qst;
1066 	/* Must add T_A, T_PTR(rev) and T_HINFO */
1067 	/* T_A record, NOTE: must be first to match PGE_RR_PRIM */
1068 	inaddrany.s_addr = INADDR_ANY;
1069 	bzero(&rr, sizeof(rr));
1070 	rr_set(&rr, conf->myname, T_A, C_IN, TTL_HNAME,
1071 	    RR_FLAG_CACHEFLUSH,
1072 	    &inaddrany, sizeof(inaddrany));
1073 	if ((pge->rr[pge->nrr++] = auth_get(&rr)) == NULL)
1074 		goto bad;
1075 	/* T_PTR record reverse address, one for every address */
1076 	LIST_FOREACH(iface, &conf->iface_list, entry) {
1077 		bzero(&rr, sizeof(rr));
1078 		reversstr(revaddr, &iface->addr);
1079 		rr_set(&rr, revaddr, T_PTR, C_IN, TTL_HNAME,
1080 		    RR_FLAG_CACHEFLUSH,
1081 		    conf->myname, sizeof(conf->myname));
1082 		if ((pge->rr[pge->nrr++] = auth_get(&rr)) == NULL)
1083 			goto bad;
1084 	}
1085 	/* T_HINFO record */
1086 	bzero(&rr, sizeof(rr));
1087 	rr_set(&rr, conf->myname, T_HINFO, C_IN, TTL_HNAME,
1088 	    RR_FLAG_CACHEFLUSH,
1089 	    &conf->hi, sizeof(conf->hi));
1090 	if ((pge->rr[pge->nrr++] = auth_get(&rr)) == NULL)
1091 		goto bad;
1092 
1093 	return;
1094 bad:
1095 	log_warnx("Can't init primary addresses");
1096 	fatalx("internal error");
1097 }
1098 
1099 
1100 struct pge *
pge_new_workstation(struct iface * iface)1101 pge_new_workstation(struct iface *iface)
1102 {
1103 	struct mdns_service	 ms;
1104 	struct pge		*pge;
1105 	char			 myname[MAXLABELLEN], *cp;
1106 
1107 	/* Build our service */
1108 	bzero(&ms, sizeof(ms));
1109 	ms.port = 9;	/* workstation stuff */
1110 	(void)strlcpy(ms.app, "workstation", sizeof(ms.app));
1111 	(void)strlcpy(ms.proto, "tcp", sizeof(ms.proto));
1112 	(void)strlcpy(myname, conf->myname, sizeof(myname));
1113 	/* Chomp .local suffix */
1114 	if ((cp = strchr(myname, '.')) != NULL)
1115 		*cp = '\0';
1116 	if (snprintf(ms.name, sizeof(ms.name),
1117 	    "%s [%s:%s]", myname, iface->name,
1118 	    ether_ntoa(&iface->ea)) >= (int)sizeof(ms.name))
1119 		log_warnx("Workstation name too long");
1120 	strlcpy(ms.target, conf->myname, sizeof(ms.target));
1121 
1122 	pge = pge_from_ms(NULL, &ms, iface);
1123 	pge->pge_flags |= PGE_FLAG_INTERNAL;
1124 
1125 	return (pge);
1126 }
1127 
1128 /*
1129  * Reset/Cleanup/Unpublish a group.
1130  */
1131 void
pg_kill(struct pg * pg)1132 pg_kill(struct pg *pg)
1133 {
1134 	struct pge *pge;
1135 
1136 	while ((pge = LIST_FIRST(&pg->pge_list)) != NULL)
1137 		pge_kill(pge);
1138 
1139 	TAILQ_REMOVE(&pg_queue, pg, entry);
1140 	log_debug("group %s unpublished", pg->name);
1141 	free(pg);
1142 }
1143 
1144 /*
1145  * True if group is published
1146  */
1147 int
pg_published(struct pg * pg)1148 pg_published(struct pg *pg)
1149 {
1150 	struct pge	*pge;
1151 
1152 	LIST_FOREACH(pge, &pg->pge_list, pge_entry) {
1153 		if (pge->state != PGE_STA_PUBLISHED)
1154 			return (0);
1155 	}
1156 
1157 	return (1);
1158 }
1159 
1160 int
rr_send_goodbye(struct rr * rr)1161 rr_send_goodbye(struct rr *rr)
1162 {
1163 	u_int32_t	old_ttl;
1164 	int		r = 0;
1165 
1166 	if ((rr->flags & RR_FLAG_PUBLISHED) == 0)
1167 		return (0);
1168 	old_ttl = rr->ttl;
1169 	/*
1170 	 * Send a goodbye for published records
1171 	 */
1172 	rr->ttl = 0;
1173 	r	= rr_send_an(rr);
1174 	rr->ttl = old_ttl;
1175 
1176 	return (r);
1177 }
1178 
1179 int
rr_send_an(struct rr * rr)1180 rr_send_an(struct rr *rr)
1181 {
1182 	struct pkt pkt;
1183 
1184 	pkt_init(&pkt);
1185 	pkt.h.qr = MDNS_RESPONSE;
1186 	pkt_add_anrr(&pkt, rr);
1187 	if (pkt_send(&pkt, ALL_IFACE) == -1) {
1188 		log_warnx("rr_send_an error %s", rrs_str(&rr->rrs));
1189 
1190 		return (-1);
1191 	}
1192 
1193 	return (0);
1194 }
1195 
1196 void
conflict_resolve_by_rr(struct rr * rr)1197 conflict_resolve_by_rr(struct rr *rr)
1198 {
1199 	struct pge *pge, *next;
1200 	int i;
1201 
1202 	for (pge = TAILQ_FIRST(&pge_queue); pge != NULL; pge = next) {
1203 		next = TAILQ_NEXT(pge, entry);
1204 		for (i = 0; i < pge->nrr; i++) {
1205 			if (rr != pge->rr[i])
1206 				continue;
1207 			/*
1208 			 * If unpublished or probing, give up !
1209 			 */
1210 			if (pge->state < PGE_STA_ANNOUNCING)
1211 				pge_conflict_drop(pge);
1212 			else {/* Reset to probing state */
1213 				log_warnx("Got a conflict revert to probe, "
1214 				    "HIGHLY experimental");
1215 				pge_revert_probe(pge);
1216 			}
1217 		}
1218 	}
1219 }
1220 
1221 /*
1222  * Drop this pge
1223  */
1224 void
pge_conflict_drop(struct pge * pge)1225 pge_conflict_drop(struct pge *pge)
1226 {
1227 	struct pg *pg;
1228 
1229 	log_debug("pge_conflict_drop: %p", pge);
1230 
1231 	if (pge->pge_flags & PGE_FLAG_INTERNAL) {
1232 		log_warnx("conflict for internal pge, unimplemented");
1233 		return;
1234 	}
1235 
1236 	pg = pge->pg;
1237 	control_notify_pg(pg->c, pg, IMSG_CTL_GROUP_ERR_COLLISION);
1238 	pg_kill(pg);
1239 }
1240 
1241 void
pge_revert_probe(struct pge * pge)1242 pge_revert_probe(struct pge *pge)
1243 {
1244 	struct timeval tv;
1245 	struct rr *rr;
1246 	int i;
1247 
1248 	timerclear(&tv);
1249 	pge->state	= PGE_STA_PROBING;
1250 	pge->sent	= 0;
1251 
1252 	for (i = 0; i < pge->nrr; i++) {
1253 		rr = pge->rr[i];
1254 		/* Stop answering for these RR */
1255 		/* XXX not sure about this */
1256 		rr->flags &= ~RR_FLAG_PUBLISHED;
1257 	}
1258 	/* Restart the machine */
1259 	pge_fsm_restart(pge, &tv);
1260 }
1261