1 /* $OpenBSD: pfe.c,v 1.91 2024/06/17 08:36:56 sashan Exp $ */
2
3 /*
4 * Copyright (c) 2006 Pierre-Yves Ritschard <pyr@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/types.h>
20 #include <sys/queue.h>
21 #include <sys/socket.h>
22 #include <sys/time.h>
23 #include <sys/uio.h>
24 #include <sys/ioctl.h>
25 #include <net/if.h>
26 #include <net/pfvar.h>
27
28 #include <event.h>
29 #include <fcntl.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <unistd.h>
33 #include <imsg.h>
34
35 #include "relayd.h"
36
37 void pfe_init(struct privsep *, struct privsep_proc *p, void *);
38 void pfe_shutdown(void);
39 void pfe_setup_events(void);
40 void pfe_disable_events(void);
41 void pfe_sync(void);
42 void pfe_statistics(int, short, void *);
43
44 int pfe_dispatch_parent(int, struct privsep_proc *, struct imsg *);
45 int pfe_dispatch_hce(int, struct privsep_proc *, struct imsg *);
46 int pfe_dispatch_relay(int, struct privsep_proc *, struct imsg *);
47
48 static struct relayd *env = NULL;
49
50 static struct privsep_proc procs[] = {
51 { "parent", PROC_PARENT, pfe_dispatch_parent },
52 { "relay", PROC_RELAY, pfe_dispatch_relay },
53 { "hce", PROC_HCE, pfe_dispatch_hce }
54 };
55
56 void
pfe(struct privsep * ps,struct privsep_proc * p)57 pfe(struct privsep *ps, struct privsep_proc *p)
58 {
59 int s;
60 struct pf_status status;
61
62 env = ps->ps_env;
63
64 if ((s = open(PF_SOCKET, O_RDWR)) == -1) {
65 fatal("%s: cannot open pf socket", __func__);
66 }
67 if (env->sc_pf == NULL) {
68 if ((env->sc_pf = calloc(1, sizeof(*(env->sc_pf)))) == NULL)
69 fatal("calloc");
70 env->sc_pf->dev = s;
71 }
72 if (ioctl(env->sc_pf->dev, DIOCGETSTATUS, &status) == -1)
73 fatal("%s: DIOCGETSTATUS", __func__);
74 if (!status.running)
75 fatalx("%s: pf is disabled", __func__);
76 log_debug("%s: filter init done", __func__);
77
78 proc_run(ps, p, procs, nitems(procs), pfe_init, NULL);
79 }
80
81 void
pfe_init(struct privsep * ps,struct privsep_proc * p,void * arg)82 pfe_init(struct privsep *ps, struct privsep_proc *p, void *arg)
83 {
84 if (config_init(ps->ps_env) == -1)
85 fatal("failed to initialize configuration");
86
87 if (pledge("stdio recvfd unix pf", NULL) == -1)
88 fatal("pledge");
89
90 p->p_shutdown = pfe_shutdown;
91 }
92
93 void
pfe_shutdown(void)94 pfe_shutdown(void)
95 {
96 flush_rulesets(env);
97 config_purge(env, CONFIG_ALL);
98 }
99
100 void
pfe_setup_events(void)101 pfe_setup_events(void)
102 {
103 struct timeval tv;
104
105 /* Schedule statistics timer */
106 if (!event_initialized(&env->sc_statev)) {
107 evtimer_set(&env->sc_statev, pfe_statistics, NULL);
108 bcopy(&env->sc_conf.statinterval, &tv, sizeof(tv));
109 evtimer_add(&env->sc_statev, &tv);
110 }
111 }
112
113 void
pfe_disable_events(void)114 pfe_disable_events(void)
115 {
116 event_del(&env->sc_statev);
117 }
118
119 int
pfe_dispatch_hce(int fd,struct privsep_proc * p,struct imsg * imsg)120 pfe_dispatch_hce(int fd, struct privsep_proc *p, struct imsg *imsg)
121 {
122 struct host *host;
123 struct table *table;
124 struct ctl_status st;
125
126 control_imsg_forward(p->p_ps, imsg);
127
128 switch (imsg->hdr.type) {
129 case IMSG_HOST_STATUS:
130 IMSG_SIZE_CHECK(imsg, &st);
131 memcpy(&st, imsg->data, sizeof(st));
132 if ((host = host_find(env, st.id)) == NULL)
133 fatalx("%s: invalid host id", __func__);
134 host->he = st.he;
135 if (host->flags & F_DISABLE)
136 break;
137 host->retry_cnt = st.retry_cnt;
138 if (st.up != HOST_UNKNOWN) {
139 host->check_cnt++;
140 if (st.up == HOST_UP)
141 host->up_cnt++;
142 }
143 if (host->check_cnt != st.check_cnt) {
144 log_debug("%s: host %d => %d", __func__,
145 host->conf.id, host->up);
146 fatalx("%s: desynchronized", __func__);
147 }
148
149 if (host->up == st.up)
150 break;
151
152 /* Forward to relay engine(s) */
153 proc_compose(env->sc_ps, PROC_RELAY,
154 IMSG_HOST_STATUS, &st, sizeof(st));
155
156 if ((table = table_find(env, host->conf.tableid))
157 == NULL)
158 fatalx("%s: invalid table id", __func__);
159
160 log_debug("%s: state %d for host %u %s", __func__,
161 st.up, host->conf.id, host->conf.name);
162
163 /* XXX Readd hosttrap code later */
164 #if 0
165 snmp_hosttrap(env, table, host);
166 #endif
167
168 /*
169 * Do not change the table state when the host
170 * state switches between UNKNOWN and DOWN.
171 */
172 if (HOST_ISUP(st.up)) {
173 table->conf.flags |= F_CHANGED;
174 table->up++;
175 host->flags |= F_ADD;
176 host->flags &= ~(F_DEL);
177 } else if (HOST_ISUP(host->up)) {
178 table->up--;
179 table->conf.flags |= F_CHANGED;
180 host->flags |= F_DEL;
181 host->flags &= ~(F_ADD);
182 host->up = st.up;
183 pfe_sync();
184 }
185
186 host->up = st.up;
187 break;
188 case IMSG_SYNC:
189 pfe_sync();
190 break;
191 default:
192 return (-1);
193 }
194
195 return (0);
196 }
197
198 int
pfe_dispatch_parent(int fd,struct privsep_proc * p,struct imsg * imsg)199 pfe_dispatch_parent(int fd, struct privsep_proc *p, struct imsg *imsg)
200 {
201 switch (imsg->hdr.type) {
202 case IMSG_CFG_TABLE:
203 config_gettable(env, imsg);
204 break;
205 case IMSG_CFG_HOST:
206 config_gethost(env, imsg);
207 break;
208 case IMSG_CFG_RDR:
209 config_getrdr(env, imsg);
210 break;
211 case IMSG_CFG_VIRT:
212 config_getvirt(env, imsg);
213 break;
214 case IMSG_CFG_ROUTER:
215 config_getrt(env, imsg);
216 break;
217 case IMSG_CFG_ROUTE:
218 config_getroute(env, imsg);
219 break;
220 case IMSG_CFG_PROTO:
221 config_getproto(env, imsg);
222 break;
223 case IMSG_CFG_RELAY:
224 config_getrelay(env, imsg);
225 break;
226 case IMSG_CFG_RELAY_TABLE:
227 config_getrelaytable(env, imsg);
228 break;
229 case IMSG_CFG_DONE:
230 config_getcfg(env, imsg);
231 init_tables(env);
232 agentx_init(env);
233 break;
234 case IMSG_CTL_START:
235 pfe_setup_events();
236 pfe_sync();
237 break;
238 case IMSG_CTL_RESET:
239 config_getreset(env, imsg);
240 break;
241 case IMSG_AGENTXSOCK:
242 agentx_getsock(imsg);
243 break;
244 default:
245 return (-1);
246 }
247
248 return (0);
249 }
250
251 int
pfe_dispatch_relay(int fd,struct privsep_proc * p,struct imsg * imsg)252 pfe_dispatch_relay(int fd, struct privsep_proc *p, struct imsg *imsg)
253 {
254 struct ctl_natlook cnl;
255 struct ctl_stats crs;
256 struct relay *rlay;
257 struct ctl_conn *c;
258 struct rsession con, *s, *t;
259 int cid;
260 objid_t sid;
261
262 switch (imsg->hdr.type) {
263 case IMSG_NATLOOK:
264 IMSG_SIZE_CHECK(imsg, &cnl);
265 bcopy(imsg->data, &cnl, sizeof(cnl));
266 if (cnl.proc > env->sc_conf.prefork_relay)
267 fatalx("%s: invalid relay proc", __func__);
268 if (natlook(env, &cnl) != 0)
269 cnl.in = -1;
270 proc_compose_imsg(env->sc_ps, PROC_RELAY, cnl.proc,
271 IMSG_NATLOOK, -1, -1, &cnl, sizeof(cnl));
272 break;
273 case IMSG_STATISTICS:
274 IMSG_SIZE_CHECK(imsg, &crs);
275 bcopy(imsg->data, &crs, sizeof(crs));
276 if (crs.proc > env->sc_conf.prefork_relay)
277 fatalx("%s: invalid relay proc", __func__);
278 if ((rlay = relay_find(env, crs.id)) == NULL)
279 fatalx("%s: invalid relay id", __func__);
280 bcopy(&crs, &rlay->rl_stats[crs.proc], sizeof(crs));
281 rlay->rl_stats[crs.proc].interval =
282 env->sc_conf.statinterval.tv_sec;
283 break;
284 case IMSG_CTL_SESSION:
285 IMSG_SIZE_CHECK(imsg, &con);
286 memcpy(&con, imsg->data, sizeof(con));
287 if ((c = control_connbyfd(con.se_cid)) == NULL) {
288 log_debug("%s: control connection %d not found",
289 __func__, con.se_cid);
290 return (0);
291 }
292 imsg_compose_event(&c->iev,
293 IMSG_CTL_SESSION, 0, 0, -1,
294 &con, sizeof(con));
295 break;
296 case IMSG_CTL_END:
297 IMSG_SIZE_CHECK(imsg, &cid);
298 memcpy(&cid, imsg->data, sizeof(cid));
299 if ((c = control_connbyfd(cid)) == NULL) {
300 log_debug("%s: control connection %d not found",
301 __func__, cid);
302 return (0);
303 }
304 if (c->waiting == 0) {
305 log_debug("%s: no pending control requests", __func__);
306 return (0);
307 } else if (--c->waiting == 0) {
308 /* Last ack for a previous request */
309 imsg_compose_event(&c->iev, IMSG_CTL_END,
310 0, 0, -1, NULL, 0);
311 }
312 break;
313 case IMSG_SESS_PUBLISH:
314 IMSG_SIZE_CHECK(imsg, s);
315 if ((s = calloc(1, sizeof(*s))) == NULL)
316 return (0); /* XXX */
317 memcpy(s, imsg->data, sizeof(*s));
318 TAILQ_FOREACH(t, &env->sc_sessions, se_entry) {
319 /* duplicate registration */
320 if (t->se_id == s->se_id) {
321 free(s);
322 return (0);
323 }
324 if (t->se_id > s->se_id)
325 break;
326 }
327 if (t)
328 TAILQ_INSERT_BEFORE(t, s, se_entry);
329 else
330 TAILQ_INSERT_TAIL(&env->sc_sessions, s, se_entry);
331 break;
332 case IMSG_SESS_UNPUBLISH:
333 IMSG_SIZE_CHECK(imsg, &sid);
334 memcpy(&sid, imsg->data, sizeof(sid));
335 TAILQ_FOREACH(s, &env->sc_sessions, se_entry)
336 if (s->se_id == sid)
337 break;
338 if (s) {
339 TAILQ_REMOVE(&env->sc_sessions, s, se_entry);
340 free(s);
341 } else {
342 DPRINTF("removal of unpublished session %i", sid);
343 }
344 break;
345 default:
346 return (-1);
347 }
348
349 return (0);
350 }
351
352 void
show(struct ctl_conn * c)353 show(struct ctl_conn *c)
354 {
355 struct rdr *rdr;
356 struct host *host;
357 struct relay *rlay;
358 struct router *rt;
359 struct netroute *nr;
360 struct relay_table *rlt;
361
362 if (env->sc_rdrs == NULL)
363 goto relays;
364 TAILQ_FOREACH(rdr, env->sc_rdrs, entry) {
365 imsg_compose_event(&c->iev, IMSG_CTL_RDR, 0, 0, -1,
366 rdr, sizeof(*rdr));
367 if (rdr->conf.flags & F_DISABLE)
368 continue;
369
370 imsg_compose_event(&c->iev, IMSG_CTL_RDR_STATS, 0, 0, -1,
371 &rdr->stats, sizeof(rdr->stats));
372
373 imsg_compose_event(&c->iev, IMSG_CTL_TABLE, 0, 0, -1,
374 rdr->table, sizeof(*rdr->table));
375 if (!(rdr->table->conf.flags & F_DISABLE))
376 TAILQ_FOREACH(host, &rdr->table->hosts, entry)
377 imsg_compose_event(&c->iev, IMSG_CTL_HOST,
378 0, 0, -1, host, sizeof(*host));
379
380 if (rdr->backup->conf.id == EMPTY_TABLE)
381 continue;
382 imsg_compose_event(&c->iev, IMSG_CTL_TABLE, 0, 0, -1,
383 rdr->backup, sizeof(*rdr->backup));
384 if (!(rdr->backup->conf.flags & F_DISABLE))
385 TAILQ_FOREACH(host, &rdr->backup->hosts, entry)
386 imsg_compose_event(&c->iev, IMSG_CTL_HOST,
387 0, 0, -1, host, sizeof(*host));
388 }
389 relays:
390 if (env->sc_relays == NULL)
391 goto routers;
392 TAILQ_FOREACH(rlay, env->sc_relays, rl_entry) {
393 rlay->rl_stats[env->sc_conf.prefork_relay].id = EMPTY_ID;
394 imsg_compose_event(&c->iev, IMSG_CTL_RELAY, 0, 0, -1,
395 rlay, sizeof(*rlay));
396 imsg_compose_event(&c->iev, IMSG_CTL_RELAY_STATS, 0, 0, -1,
397 &rlay->rl_stats, sizeof(rlay->rl_stats));
398
399 TAILQ_FOREACH(rlt, &rlay->rl_tables, rlt_entry) {
400 imsg_compose_event(&c->iev, IMSG_CTL_TABLE, 0, 0, -1,
401 rlt->rlt_table, sizeof(*rlt->rlt_table));
402 if (!(rlt->rlt_table->conf.flags & F_DISABLE))
403 TAILQ_FOREACH(host,
404 &rlt->rlt_table->hosts, entry)
405 imsg_compose_event(&c->iev,
406 IMSG_CTL_HOST, 0, 0, -1,
407 host, sizeof(*host));
408 }
409 }
410
411 routers:
412 if (env->sc_rts == NULL)
413 goto end;
414 TAILQ_FOREACH(rt, env->sc_rts, rt_entry) {
415 imsg_compose_event(&c->iev, IMSG_CTL_ROUTER, 0, 0, -1,
416 rt, sizeof(*rt));
417 if (rt->rt_conf.flags & F_DISABLE)
418 continue;
419
420 TAILQ_FOREACH(nr, &rt->rt_netroutes, nr_entry)
421 imsg_compose_event(&c->iev, IMSG_CTL_NETROUTE,
422 0, 0, -1, nr, sizeof(*nr));
423 imsg_compose_event(&c->iev, IMSG_CTL_TABLE, 0, 0, -1,
424 rt->rt_gwtable, sizeof(*rt->rt_gwtable));
425 if (!(rt->rt_gwtable->conf.flags & F_DISABLE))
426 TAILQ_FOREACH(host, &rt->rt_gwtable->hosts, entry)
427 imsg_compose_event(&c->iev, IMSG_CTL_HOST,
428 0, 0, -1, host, sizeof(*host));
429 }
430
431 end:
432 imsg_compose_event(&c->iev, IMSG_CTL_END, 0, 0, -1, NULL, 0);
433 }
434
435 void
show_sessions(struct ctl_conn * c)436 show_sessions(struct ctl_conn *c)
437 {
438 int proc, cid;
439
440 for (proc = 0; proc < env->sc_conf.prefork_relay; proc++) {
441 cid = c->iev.ibuf.fd;
442
443 /*
444 * Request all the running sessions from the process
445 */
446 proc_compose_imsg(env->sc_ps, PROC_RELAY, proc,
447 IMSG_CTL_SESSION, -1, -1, &cid, sizeof(cid));
448 c->waiting++;
449 }
450 }
451
452 int
disable_rdr(struct ctl_conn * c,struct ctl_id * id)453 disable_rdr(struct ctl_conn *c, struct ctl_id *id)
454 {
455 struct rdr *rdr;
456
457 if (id->id == EMPTY_ID)
458 rdr = rdr_findbyname(env, id->name);
459 else
460 rdr = rdr_find(env, id->id);
461 if (rdr == NULL)
462 return (-1);
463 id->id = rdr->conf.id;
464
465 if (rdr->conf.flags & F_DISABLE)
466 return (0);
467
468 rdr->conf.flags |= F_DISABLE;
469 rdr->conf.flags &= ~(F_ADD);
470 rdr->conf.flags |= F_DEL;
471 rdr->table->conf.flags |= F_DISABLE;
472 log_debug("%s: redirect %d", __func__, rdr->conf.id);
473 pfe_sync();
474 return (0);
475 }
476
477 int
enable_rdr(struct ctl_conn * c,struct ctl_id * id)478 enable_rdr(struct ctl_conn *c, struct ctl_id *id)
479 {
480 struct rdr *rdr;
481 struct ctl_id eid;
482
483 if (id->id == EMPTY_ID)
484 rdr = rdr_findbyname(env, id->name);
485 else
486 rdr = rdr_find(env, id->id);
487 if (rdr == NULL)
488 return (-1);
489 id->id = rdr->conf.id;
490
491 if (!(rdr->conf.flags & F_DISABLE))
492 return (0);
493
494 rdr->conf.flags &= ~(F_DISABLE);
495 rdr->conf.flags &= ~(F_DEL);
496 rdr->conf.flags |= F_ADD;
497 log_debug("%s: redirect %d", __func__, rdr->conf.id);
498
499 bzero(&eid, sizeof(eid));
500
501 /* XXX: we're syncing twice */
502 eid.id = rdr->table->conf.id;
503 if (enable_table(c, &eid) == -1)
504 return (-1);
505 if (rdr->backup->conf.id == EMPTY_ID)
506 return (0);
507 eid.id = rdr->backup->conf.id;
508 if (enable_table(c, &eid) == -1)
509 return (-1);
510 return (0);
511 }
512
513 int
disable_table(struct ctl_conn * c,struct ctl_id * id)514 disable_table(struct ctl_conn *c, struct ctl_id *id)
515 {
516 struct table *table;
517 struct host *host;
518
519 if (id->id == EMPTY_ID)
520 table = table_findbyname(env, id->name);
521 else
522 table = table_find(env, id->id);
523 if (table == NULL)
524 return (-1);
525 id->id = table->conf.id;
526 if (table->conf.rdrid > 0 && rdr_find(env, table->conf.rdrid) == NULL)
527 fatalx("%s: desynchronised", __func__);
528
529 if (table->conf.flags & F_DISABLE)
530 return (0);
531 table->conf.flags |= (F_DISABLE|F_CHANGED);
532 table->up = 0;
533 TAILQ_FOREACH(host, &table->hosts, entry)
534 host->up = HOST_UNKNOWN;
535 proc_compose(env->sc_ps, PROC_HCE, IMSG_TABLE_DISABLE,
536 &table->conf.id, sizeof(table->conf.id));
537
538 /* Forward to relay engine(s) */
539 proc_compose(env->sc_ps, PROC_RELAY, IMSG_TABLE_DISABLE,
540 &table->conf.id, sizeof(table->conf.id));
541
542 log_debug("%s: table %d", __func__, table->conf.id);
543 pfe_sync();
544 return (0);
545 }
546
547 int
enable_table(struct ctl_conn * c,struct ctl_id * id)548 enable_table(struct ctl_conn *c, struct ctl_id *id)
549 {
550 struct table *table;
551 struct host *host;
552
553 if (id->id == EMPTY_ID)
554 table = table_findbyname(env, id->name);
555 else
556 table = table_find(env, id->id);
557 if (table == NULL)
558 return (-1);
559 id->id = table->conf.id;
560
561 if (table->conf.rdrid > 0 && rdr_find(env, table->conf.rdrid) == NULL)
562 fatalx("%s: desynchronised", __func__);
563
564 if (!(table->conf.flags & F_DISABLE))
565 return (0);
566 table->conf.flags &= ~(F_DISABLE);
567 table->conf.flags |= F_CHANGED;
568 table->up = 0;
569 TAILQ_FOREACH(host, &table->hosts, entry)
570 host->up = HOST_UNKNOWN;
571 proc_compose(env->sc_ps, PROC_HCE, IMSG_TABLE_ENABLE,
572 &table->conf.id, sizeof(table->conf.id));
573
574 /* Forward to relay engine(s) */
575 proc_compose(env->sc_ps, PROC_RELAY, IMSG_TABLE_ENABLE,
576 &table->conf.id, sizeof(table->conf.id));
577
578 log_debug("%s: table %d", __func__, table->conf.id);
579 pfe_sync();
580 return (0);
581 }
582
583 int
disable_host(struct ctl_conn * c,struct ctl_id * id,struct host * host)584 disable_host(struct ctl_conn *c, struct ctl_id *id, struct host *host)
585 {
586 struct host *h;
587 struct table *table, *t;
588 int host_byname = 0;
589
590 if (host == NULL) {
591 if (id->id == EMPTY_ID) {
592 host = host_findbyname(env, id->name);
593 host_byname = 1;
594 }
595 else
596 host = host_find(env, id->id);
597 if (host == NULL || host->conf.parentid)
598 return (-1);
599 }
600 id->id = host->conf.id;
601
602 if (host->flags & F_DISABLE)
603 return (0);
604
605 if (host->up == HOST_UP) {
606 if ((table = table_find(env, host->conf.tableid)) == NULL)
607 fatalx("%s: invalid table id", __func__);
608 table->up--;
609 table->conf.flags |= F_CHANGED;
610 }
611
612 host->up = HOST_UNKNOWN;
613 host->flags |= F_DISABLE;
614 host->flags |= F_DEL;
615 host->flags &= ~(F_ADD);
616 host->check_cnt = 0;
617 host->up_cnt = 0;
618
619 proc_compose(env->sc_ps, PROC_HCE, IMSG_HOST_DISABLE,
620 &host->conf.id, sizeof(host->conf.id));
621
622 /* Forward to relay engine(s) */
623 proc_compose(env->sc_ps, PROC_RELAY, IMSG_HOST_DISABLE,
624 &host->conf.id, sizeof(host->conf.id));
625 log_debug("%s: host %d", __func__, host->conf.id);
626
627 if (!host->conf.parentid) {
628 /* Disable all children */
629 SLIST_FOREACH(h, &host->children, child)
630 disable_host(c, id, h);
631
632 /* Disable hosts with same name on all tables */
633 if (host_byname)
634 TAILQ_FOREACH(t, env->sc_tables, entry)
635 TAILQ_FOREACH(h, &t->hosts, entry)
636 if (strcmp(h->conf.name,
637 host->conf.name) == 0 &&
638 h->conf.id != host->conf.id &&
639 !h->conf.parentid)
640 disable_host(c, id, h);
641 pfe_sync();
642 }
643 return (0);
644 }
645
646 int
enable_host(struct ctl_conn * c,struct ctl_id * id,struct host * host)647 enable_host(struct ctl_conn *c, struct ctl_id *id, struct host *host)
648 {
649 struct host *h;
650 struct table *t;
651 int host_byname = 0;
652
653
654 if (host == NULL) {
655 if (id->id == EMPTY_ID) {
656 host = host_findbyname(env, id->name);
657 host_byname = 1;
658 }
659 else
660 host = host_find(env, id->id);
661 if (host == NULL || host->conf.parentid)
662 return (-1);
663 }
664 id->id = host->conf.id;
665
666 if (!(host->flags & F_DISABLE))
667 return (0);
668
669 host->up = HOST_UNKNOWN;
670 host->flags &= ~(F_DISABLE);
671 host->flags &= ~(F_DEL);
672 host->flags &= ~(F_ADD);
673
674 proc_compose(env->sc_ps, PROC_HCE, IMSG_HOST_ENABLE,
675 &host->conf.id, sizeof (host->conf.id));
676
677 /* Forward to relay engine(s) */
678 proc_compose(env->sc_ps, PROC_RELAY, IMSG_HOST_ENABLE,
679 &host->conf.id, sizeof(host->conf.id));
680
681 log_debug("%s: host %d", __func__, host->conf.id);
682
683 if (!host->conf.parentid) {
684 /* Enable all children */
685 SLIST_FOREACH(h, &host->children, child)
686 enable_host(c, id, h);
687
688 /* Enable hosts with same name on all tables */
689 if (host_byname)
690 TAILQ_FOREACH(t, env->sc_tables, entry)
691 TAILQ_FOREACH(h, &t->hosts, entry)
692 if (strcmp(h->conf.name,
693 host->conf.name) == 0 &&
694 h->conf.id != host->conf.id &&
695 !h->conf.parentid)
696 enable_host(c, id, h);
697 pfe_sync();
698 }
699 return (0);
700 }
701
702 void
pfe_sync(void)703 pfe_sync(void)
704 {
705 struct rdr *rdr;
706 struct table *active;
707 struct table *table;
708 struct ctl_id id;
709 struct imsg imsg;
710 struct ctl_demote demote;
711 struct router *rt;
712
713 bzero(&id, sizeof(id));
714 bzero(&imsg, sizeof(imsg));
715 TAILQ_FOREACH(rdr, env->sc_rdrs, entry) {
716 rdr->conf.flags &= ~(F_BACKUP);
717 rdr->conf.flags &= ~(F_DOWN);
718
719 if (rdr->conf.flags & F_DISABLE ||
720 (rdr->table->up == 0 && rdr->backup->up == 0)) {
721 rdr->conf.flags |= F_DOWN;
722 active = NULL;
723 } else if (rdr->table->up == 0 && rdr->backup->up > 0) {
724 rdr->conf.flags |= F_BACKUP;
725 active = rdr->backup;
726 active->conf.flags |=
727 rdr->table->conf.flags & F_CHANGED;
728 active->conf.flags |=
729 rdr->backup->conf.flags & F_CHANGED;
730 } else
731 active = rdr->table;
732
733 if (active != NULL && active->conf.flags & F_CHANGED) {
734 id.id = active->conf.id;
735 imsg.hdr.type = IMSG_CTL_TABLE_CHANGED;
736 imsg.hdr.len = sizeof(id) + IMSG_HEADER_SIZE;
737 imsg.data = &id;
738 sync_table(env, rdr, active);
739 control_imsg_forward(env->sc_ps, &imsg);
740 }
741
742 if (rdr->conf.flags & F_DOWN) {
743 if (rdr->conf.flags & F_ACTIVE_RULESET) {
744 flush_table(env, rdr);
745 log_debug("%s: disabling ruleset", __func__);
746 rdr->conf.flags &= ~(F_ACTIVE_RULESET);
747 id.id = rdr->conf.id;
748 imsg.hdr.type = IMSG_CTL_PULL_RULESET;
749 imsg.hdr.len = sizeof(id) + IMSG_HEADER_SIZE;
750 imsg.data = &id;
751 sync_ruleset(env, rdr, 0);
752 control_imsg_forward(env->sc_ps, &imsg);
753 }
754 } else if (!(rdr->conf.flags & F_ACTIVE_RULESET)) {
755 log_debug("%s: enabling ruleset", __func__);
756 rdr->conf.flags |= F_ACTIVE_RULESET;
757 id.id = rdr->conf.id;
758 imsg.hdr.type = IMSG_CTL_PUSH_RULESET;
759 imsg.hdr.len = sizeof(id) + IMSG_HEADER_SIZE;
760 imsg.data = &id;
761 sync_ruleset(env, rdr, 1);
762 control_imsg_forward(env->sc_ps, &imsg);
763 }
764 }
765
766 TAILQ_FOREACH(rt, env->sc_rts, rt_entry) {
767 rt->rt_conf.flags &= ~(F_BACKUP);
768 rt->rt_conf.flags &= ~(F_DOWN);
769
770 if ((rt->rt_gwtable->conf.flags & F_CHANGED))
771 sync_routes(env, rt);
772 }
773
774 TAILQ_FOREACH(table, env->sc_tables, entry) {
775 if (table->conf.check == CHECK_NOCHECK)
776 continue;
777
778 /*
779 * clean up change flag.
780 */
781 table->conf.flags &= ~(F_CHANGED);
782
783 /*
784 * handle demotion.
785 */
786 if ((table->conf.flags & F_DEMOTE) == 0)
787 continue;
788 demote.level = 0;
789 if (table->up && table->conf.flags & F_DEMOTED) {
790 demote.level = -1;
791 table->conf.flags &= ~F_DEMOTED;
792 }
793 else if (!table->up && !(table->conf.flags & F_DEMOTED)) {
794 demote.level = 1;
795 table->conf.flags |= F_DEMOTED;
796 }
797 if (demote.level == 0)
798 continue;
799 log_debug("%s: demote %d table '%s' group '%s'", __func__,
800 demote.level, table->conf.name, table->conf.demote_group);
801 (void)strlcpy(demote.group, table->conf.demote_group,
802 sizeof(demote.group));
803 proc_compose(env->sc_ps, PROC_PARENT, IMSG_DEMOTE,
804 &demote, sizeof(demote));
805 }
806 }
807
808 void
pfe_statistics(int fd,short events,void * arg)809 pfe_statistics(int fd, short events, void *arg)
810 {
811 struct rdr *rdr;
812 struct ctl_stats *cur;
813 struct timeval tv, tv_now;
814 int resethour, resetday;
815 u_long cnt;
816
817 timerclear(&tv);
818 getmonotime(&tv_now);
819
820 TAILQ_FOREACH(rdr, env->sc_rdrs, entry) {
821 cnt = check_table(env, rdr, rdr->table);
822 if (rdr->conf.backup_id != EMPTY_TABLE)
823 cnt += check_table(env, rdr, rdr->backup);
824
825 resethour = resetday = 0;
826
827 cur = &rdr->stats;
828 cur->last = cnt > cur->cnt ? cnt - cur->cnt : 0;
829
830 cur->cnt = cnt;
831 cur->tick++;
832 cur->avg = (cur->last + cur->avg) / 2;
833 cur->last_hour += cur->last;
834 if ((cur->tick %
835 (3600 / env->sc_conf.statinterval.tv_sec)) == 0) {
836 cur->avg_hour = (cur->last_hour + cur->avg_hour) / 2;
837 resethour++;
838 }
839 cur->last_day += cur->last;
840 if ((cur->tick %
841 (86400 / env->sc_conf.statinterval.tv_sec)) == 0) {
842 cur->avg_day = (cur->last_day + cur->avg_day) / 2;
843 resethour++;
844 }
845 if (resethour)
846 cur->last_hour = 0;
847 if (resetday)
848 cur->last_day = 0;
849
850 rdr->stats.interval = env->sc_conf.statinterval.tv_sec;
851 }
852
853 /* Schedule statistics timer */
854 evtimer_set(&env->sc_statev, pfe_statistics, NULL);
855 bcopy(&env->sc_conf.statinterval, &tv, sizeof(tv));
856 evtimer_add(&env->sc_statev, &tv);
857 }
858