1 /* $OpenBSD: pfe.c,v 1.90 2020/09/14 11:30:25 martijn Exp $ */ 2 3 /* 4 * Copyright (c) 2006 Pierre-Yves Ritschard <pyr@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/types.h> 20 #include <sys/queue.h> 21 #include <sys/socket.h> 22 #include <sys/time.h> 23 #include <sys/uio.h> 24 #include <sys/ioctl.h> 25 #include <net/if.h> 26 #include <net/pfvar.h> 27 28 #include <event.h> 29 #include <fcntl.h> 30 #include <stdlib.h> 31 #include <string.h> 32 #include <unistd.h> 33 #include <imsg.h> 34 35 #include "relayd.h" 36 37 void pfe_init(struct privsep *, struct privsep_proc *p, void *); 38 void pfe_shutdown(void); 39 void pfe_setup_events(void); 40 void pfe_disable_events(void); 41 void pfe_sync(void); 42 void pfe_statistics(int, short, void *); 43 44 int pfe_dispatch_parent(int, struct privsep_proc *, struct imsg *); 45 int pfe_dispatch_hce(int, struct privsep_proc *, struct imsg *); 46 int pfe_dispatch_relay(int, struct privsep_proc *, struct imsg *); 47 48 static struct relayd *env = NULL; 49 50 static struct privsep_proc procs[] = { 51 { "parent", PROC_PARENT, pfe_dispatch_parent }, 52 { "relay", PROC_RELAY, pfe_dispatch_relay }, 53 { "hce", PROC_HCE, pfe_dispatch_hce } 54 }; 55 56 void 57 pfe(struct privsep *ps, struct privsep_proc *p) 58 { 59 int s; 60 struct pf_status status; 61 62 env = ps->ps_env; 63 64 if ((s = open(PF_SOCKET, O_RDWR)) == -1) { 65 fatal("%s: cannot open pf socket", __func__); 66 } 67 if (env->sc_pf == NULL) { 68 if ((env->sc_pf = calloc(1, sizeof(*(env->sc_pf)))) == NULL) 69 fatal("calloc"); 70 env->sc_pf->dev = s; 71 } 72 if (ioctl(env->sc_pf->dev, DIOCGETSTATUS, &status) == -1) 73 fatal("%s: DIOCGETSTATUS", __func__); 74 if (!status.running) 75 fatalx("%s: pf is disabled", __func__); 76 log_debug("%s: filter init done", __func__); 77 78 proc_run(ps, p, procs, nitems(procs), pfe_init, NULL); 79 } 80 81 void 82 pfe_init(struct privsep *ps, struct privsep_proc *p, void *arg) 83 { 84 if (config_init(ps->ps_env) == -1) 85 fatal("failed to initialize configuration"); 86 87 if (pledge("stdio recvfd unix pf", NULL) == -1) 88 fatal("pledge"); 89 90 p->p_shutdown = pfe_shutdown; 91 } 92 93 void 94 pfe_shutdown(void) 95 { 96 flush_rulesets(env); 97 config_purge(env, CONFIG_ALL); 98 } 99 100 void 101 pfe_setup_events(void) 102 { 103 struct timeval tv; 104 105 /* Schedule statistics timer */ 106 if (!event_initialized(&env->sc_statev)) { 107 evtimer_set(&env->sc_statev, pfe_statistics, NULL); 108 bcopy(&env->sc_conf.statinterval, &tv, sizeof(tv)); 109 evtimer_add(&env->sc_statev, &tv); 110 } 111 } 112 113 void 114 pfe_disable_events(void) 115 { 116 event_del(&env->sc_statev); 117 } 118 119 int 120 pfe_dispatch_hce(int fd, struct privsep_proc *p, struct imsg *imsg) 121 { 122 struct host *host; 123 struct table *table; 124 struct ctl_status st; 125 126 control_imsg_forward(p->p_ps, imsg); 127 128 switch (imsg->hdr.type) { 129 case IMSG_HOST_STATUS: 130 IMSG_SIZE_CHECK(imsg, &st); 131 memcpy(&st, imsg->data, sizeof(st)); 132 if ((host = host_find(env, st.id)) == NULL) 133 fatalx("%s: invalid host id", __func__); 134 host->he = st.he; 135 if (host->flags & F_DISABLE) 136 break; 137 host->retry_cnt = st.retry_cnt; 138 if (st.up != HOST_UNKNOWN) { 139 host->check_cnt++; 140 if (st.up == HOST_UP) 141 host->up_cnt++; 142 } 143 if (host->check_cnt != st.check_cnt) { 144 log_debug("%s: host %d => %d", __func__, 145 host->conf.id, host->up); 146 fatalx("%s: desynchronized", __func__); 147 } 148 149 if (host->up == st.up) 150 break; 151 152 /* Forward to relay engine(s) */ 153 proc_compose(env->sc_ps, PROC_RELAY, 154 IMSG_HOST_STATUS, &st, sizeof(st)); 155 156 if ((table = table_find(env, host->conf.tableid)) 157 == NULL) 158 fatalx("%s: invalid table id", __func__); 159 160 log_debug("%s: state %d for host %u %s", __func__, 161 st.up, host->conf.id, host->conf.name); 162 163 /* XXX Readd hosttrap code later */ 164 #if 0 165 snmp_hosttrap(env, table, host); 166 #endif 167 168 /* 169 * Do not change the table state when the host 170 * state switches between UNKNOWN and DOWN. 171 */ 172 if (HOST_ISUP(st.up)) { 173 table->conf.flags |= F_CHANGED; 174 table->up++; 175 host->flags |= F_ADD; 176 host->flags &= ~(F_DEL); 177 } else if (HOST_ISUP(host->up)) { 178 table->up--; 179 table->conf.flags |= F_CHANGED; 180 host->flags |= F_DEL; 181 host->flags &= ~(F_ADD); 182 host->up = st.up; 183 pfe_sync(); 184 } 185 186 host->up = st.up; 187 break; 188 case IMSG_SYNC: 189 pfe_sync(); 190 break; 191 default: 192 return (-1); 193 } 194 195 return (0); 196 } 197 198 int 199 pfe_dispatch_parent(int fd, struct privsep_proc *p, struct imsg *imsg) 200 { 201 switch (imsg->hdr.type) { 202 case IMSG_CFG_TABLE: 203 config_gettable(env, imsg); 204 break; 205 case IMSG_CFG_HOST: 206 config_gethost(env, imsg); 207 break; 208 case IMSG_CFG_RDR: 209 config_getrdr(env, imsg); 210 break; 211 case IMSG_CFG_VIRT: 212 config_getvirt(env, imsg); 213 break; 214 case IMSG_CFG_ROUTER: 215 config_getrt(env, imsg); 216 break; 217 case IMSG_CFG_ROUTE: 218 config_getroute(env, imsg); 219 break; 220 case IMSG_CFG_PROTO: 221 config_getproto(env, imsg); 222 break; 223 case IMSG_CFG_RELAY: 224 config_getrelay(env, imsg); 225 break; 226 case IMSG_CFG_RELAY_TABLE: 227 config_getrelaytable(env, imsg); 228 break; 229 case IMSG_CFG_DONE: 230 config_getcfg(env, imsg); 231 init_tables(env); 232 agentx_init(env); 233 break; 234 case IMSG_CTL_START: 235 pfe_setup_events(); 236 pfe_sync(); 237 break; 238 case IMSG_CTL_RESET: 239 config_getreset(env, imsg); 240 break; 241 case IMSG_AGENTXSOCK: 242 agentx_getsock(imsg); 243 break; 244 default: 245 return (-1); 246 } 247 248 return (0); 249 } 250 251 int 252 pfe_dispatch_relay(int fd, struct privsep_proc *p, struct imsg *imsg) 253 { 254 struct ctl_natlook cnl; 255 struct ctl_stats crs; 256 struct relay *rlay; 257 struct ctl_conn *c; 258 struct rsession con, *s, *t; 259 int cid; 260 objid_t sid; 261 262 switch (imsg->hdr.type) { 263 case IMSG_NATLOOK: 264 IMSG_SIZE_CHECK(imsg, &cnl); 265 bcopy(imsg->data, &cnl, sizeof(cnl)); 266 if (cnl.proc > env->sc_conf.prefork_relay) 267 fatalx("%s: invalid relay proc", __func__); 268 if (natlook(env, &cnl) != 0) 269 cnl.in = -1; 270 proc_compose_imsg(env->sc_ps, PROC_RELAY, cnl.proc, 271 IMSG_NATLOOK, -1, -1, &cnl, sizeof(cnl)); 272 break; 273 case IMSG_STATISTICS: 274 IMSG_SIZE_CHECK(imsg, &crs); 275 bcopy(imsg->data, &crs, sizeof(crs)); 276 if (crs.proc > env->sc_conf.prefork_relay) 277 fatalx("%s: invalid relay proc", __func__); 278 if ((rlay = relay_find(env, crs.id)) == NULL) 279 fatalx("%s: invalid relay id", __func__); 280 bcopy(&crs, &rlay->rl_stats[crs.proc], sizeof(crs)); 281 rlay->rl_stats[crs.proc].interval = 282 env->sc_conf.statinterval.tv_sec; 283 break; 284 case IMSG_CTL_SESSION: 285 IMSG_SIZE_CHECK(imsg, &con); 286 memcpy(&con, imsg->data, sizeof(con)); 287 if ((c = control_connbyfd(con.se_cid)) == NULL) { 288 log_debug("%s: control connection %d not found", 289 __func__, con.se_cid); 290 return (0); 291 } 292 imsg_compose_event(&c->iev, 293 IMSG_CTL_SESSION, 0, 0, -1, 294 &con, sizeof(con)); 295 break; 296 case IMSG_CTL_END: 297 IMSG_SIZE_CHECK(imsg, &cid); 298 memcpy(&cid, imsg->data, sizeof(cid)); 299 if ((c = control_connbyfd(cid)) == NULL) { 300 log_debug("%s: control connection %d not found", 301 __func__, cid); 302 return (0); 303 } 304 if (c->waiting == 0) { 305 log_debug("%s: no pending control requests", __func__); 306 return (0); 307 } else if (--c->waiting == 0) { 308 /* Last ack for a previous request */ 309 imsg_compose_event(&c->iev, IMSG_CTL_END, 310 0, 0, -1, NULL, 0); 311 } 312 break; 313 case IMSG_SESS_PUBLISH: 314 IMSG_SIZE_CHECK(imsg, s); 315 if ((s = calloc(1, sizeof(*s))) == NULL) 316 return (0); /* XXX */ 317 memcpy(s, imsg->data, sizeof(*s)); 318 TAILQ_FOREACH(t, &env->sc_sessions, se_entry) { 319 /* duplicate registration */ 320 if (t->se_id == s->se_id) { 321 free(s); 322 return (0); 323 } 324 if (t->se_id > s->se_id) 325 break; 326 } 327 if (t) 328 TAILQ_INSERT_BEFORE(t, s, se_entry); 329 else 330 TAILQ_INSERT_TAIL(&env->sc_sessions, s, se_entry); 331 break; 332 case IMSG_SESS_UNPUBLISH: 333 IMSG_SIZE_CHECK(imsg, &sid); 334 memcpy(&sid, imsg->data, sizeof(sid)); 335 TAILQ_FOREACH(s, &env->sc_sessions, se_entry) 336 if (s->se_id == sid) 337 break; 338 if (s) { 339 TAILQ_REMOVE(&env->sc_sessions, s, se_entry); 340 free(s); 341 } else { 342 DPRINTF("removal of unpublished session %i", sid); 343 } 344 break; 345 default: 346 return (-1); 347 } 348 349 return (0); 350 } 351 352 void 353 show(struct ctl_conn *c) 354 { 355 struct rdr *rdr; 356 struct host *host; 357 struct relay *rlay; 358 struct router *rt; 359 struct netroute *nr; 360 struct relay_table *rlt; 361 362 if (env->sc_rdrs == NULL) 363 goto relays; 364 TAILQ_FOREACH(rdr, env->sc_rdrs, entry) { 365 imsg_compose_event(&c->iev, IMSG_CTL_RDR, 0, 0, -1, 366 rdr, sizeof(*rdr)); 367 if (rdr->conf.flags & F_DISABLE) 368 continue; 369 370 imsg_compose_event(&c->iev, IMSG_CTL_RDR_STATS, 0, 0, -1, 371 &rdr->stats, sizeof(rdr->stats)); 372 373 imsg_compose_event(&c->iev, IMSG_CTL_TABLE, 0, 0, -1, 374 rdr->table, sizeof(*rdr->table)); 375 if (!(rdr->table->conf.flags & F_DISABLE)) 376 TAILQ_FOREACH(host, &rdr->table->hosts, entry) 377 imsg_compose_event(&c->iev, IMSG_CTL_HOST, 378 0, 0, -1, host, sizeof(*host)); 379 380 if (rdr->backup->conf.id == EMPTY_TABLE) 381 continue; 382 imsg_compose_event(&c->iev, IMSG_CTL_TABLE, 0, 0, -1, 383 rdr->backup, sizeof(*rdr->backup)); 384 if (!(rdr->backup->conf.flags & F_DISABLE)) 385 TAILQ_FOREACH(host, &rdr->backup->hosts, entry) 386 imsg_compose_event(&c->iev, IMSG_CTL_HOST, 387 0, 0, -1, host, sizeof(*host)); 388 } 389 relays: 390 if (env->sc_relays == NULL) 391 goto routers; 392 TAILQ_FOREACH(rlay, env->sc_relays, rl_entry) { 393 rlay->rl_stats[env->sc_conf.prefork_relay].id = EMPTY_ID; 394 imsg_compose_event(&c->iev, IMSG_CTL_RELAY, 0, 0, -1, 395 rlay, sizeof(*rlay)); 396 imsg_compose_event(&c->iev, IMSG_CTL_RELAY_STATS, 0, 0, -1, 397 &rlay->rl_stats, sizeof(rlay->rl_stats)); 398 399 TAILQ_FOREACH(rlt, &rlay->rl_tables, rlt_entry) { 400 imsg_compose_event(&c->iev, IMSG_CTL_TABLE, 0, 0, -1, 401 rlt->rlt_table, sizeof(*rlt->rlt_table)); 402 if (!(rlt->rlt_table->conf.flags & F_DISABLE)) 403 TAILQ_FOREACH(host, 404 &rlt->rlt_table->hosts, entry) 405 imsg_compose_event(&c->iev, 406 IMSG_CTL_HOST, 0, 0, -1, 407 host, sizeof(*host)); 408 } 409 } 410 411 routers: 412 if (env->sc_rts == NULL) 413 goto end; 414 TAILQ_FOREACH(rt, env->sc_rts, rt_entry) { 415 imsg_compose_event(&c->iev, IMSG_CTL_ROUTER, 0, 0, -1, 416 rt, sizeof(*rt)); 417 if (rt->rt_conf.flags & F_DISABLE) 418 continue; 419 420 TAILQ_FOREACH(nr, &rt->rt_netroutes, nr_entry) 421 imsg_compose_event(&c->iev, IMSG_CTL_NETROUTE, 422 0, 0, -1, nr, sizeof(*nr)); 423 imsg_compose_event(&c->iev, IMSG_CTL_TABLE, 0, 0, -1, 424 rt->rt_gwtable, sizeof(*rt->rt_gwtable)); 425 if (!(rt->rt_gwtable->conf.flags & F_DISABLE)) 426 TAILQ_FOREACH(host, &rt->rt_gwtable->hosts, entry) 427 imsg_compose_event(&c->iev, IMSG_CTL_HOST, 428 0, 0, -1, host, sizeof(*host)); 429 } 430 431 end: 432 imsg_compose_event(&c->iev, IMSG_CTL_END, 0, 0, -1, NULL, 0); 433 } 434 435 void 436 show_sessions(struct ctl_conn *c) 437 { 438 int proc, cid; 439 440 for (proc = 0; proc < env->sc_conf.prefork_relay; proc++) { 441 cid = c->iev.ibuf.fd; 442 443 /* 444 * Request all the running sessions from the process 445 */ 446 proc_compose_imsg(env->sc_ps, PROC_RELAY, proc, 447 IMSG_CTL_SESSION, -1, -1, &cid, sizeof(cid)); 448 c->waiting++; 449 } 450 } 451 452 int 453 disable_rdr(struct ctl_conn *c, struct ctl_id *id) 454 { 455 struct rdr *rdr; 456 457 if (id->id == EMPTY_ID) 458 rdr = rdr_findbyname(env, id->name); 459 else 460 rdr = rdr_find(env, id->id); 461 if (rdr == NULL) 462 return (-1); 463 id->id = rdr->conf.id; 464 465 if (rdr->conf.flags & F_DISABLE) 466 return (0); 467 468 rdr->conf.flags |= F_DISABLE; 469 rdr->conf.flags &= ~(F_ADD); 470 rdr->conf.flags |= F_DEL; 471 rdr->table->conf.flags |= F_DISABLE; 472 log_debug("%s: redirect %d", __func__, rdr->conf.id); 473 pfe_sync(); 474 return (0); 475 } 476 477 int 478 enable_rdr(struct ctl_conn *c, struct ctl_id *id) 479 { 480 struct rdr *rdr; 481 struct ctl_id eid; 482 483 if (id->id == EMPTY_ID) 484 rdr = rdr_findbyname(env, id->name); 485 else 486 rdr = rdr_find(env, id->id); 487 if (rdr == NULL) 488 return (-1); 489 id->id = rdr->conf.id; 490 491 if (!(rdr->conf.flags & F_DISABLE)) 492 return (0); 493 494 rdr->conf.flags &= ~(F_DISABLE); 495 rdr->conf.flags &= ~(F_DEL); 496 rdr->conf.flags |= F_ADD; 497 log_debug("%s: redirect %d", __func__, rdr->conf.id); 498 499 bzero(&eid, sizeof(eid)); 500 501 /* XXX: we're syncing twice */ 502 eid.id = rdr->table->conf.id; 503 if (enable_table(c, &eid) == -1) 504 return (-1); 505 if (rdr->backup->conf.id == EMPTY_ID) 506 return (0); 507 eid.id = rdr->backup->conf.id; 508 if (enable_table(c, &eid) == -1) 509 return (-1); 510 return (0); 511 } 512 513 int 514 disable_table(struct ctl_conn *c, struct ctl_id *id) 515 { 516 struct table *table; 517 struct host *host; 518 519 if (id->id == EMPTY_ID) 520 table = table_findbyname(env, id->name); 521 else 522 table = table_find(env, id->id); 523 if (table == NULL) 524 return (-1); 525 id->id = table->conf.id; 526 if (table->conf.rdrid > 0 && rdr_find(env, table->conf.rdrid) == NULL) 527 fatalx("%s: desynchronised", __func__); 528 529 if (table->conf.flags & F_DISABLE) 530 return (0); 531 table->conf.flags |= (F_DISABLE|F_CHANGED); 532 table->up = 0; 533 TAILQ_FOREACH(host, &table->hosts, entry) 534 host->up = HOST_UNKNOWN; 535 proc_compose(env->sc_ps, PROC_HCE, IMSG_TABLE_DISABLE, 536 &table->conf.id, sizeof(table->conf.id)); 537 538 /* Forward to relay engine(s) */ 539 proc_compose(env->sc_ps, PROC_RELAY, IMSG_TABLE_DISABLE, 540 &table->conf.id, sizeof(table->conf.id)); 541 542 log_debug("%s: table %d", __func__, table->conf.id); 543 pfe_sync(); 544 return (0); 545 } 546 547 int 548 enable_table(struct ctl_conn *c, struct ctl_id *id) 549 { 550 struct table *table; 551 struct host *host; 552 553 if (id->id == EMPTY_ID) 554 table = table_findbyname(env, id->name); 555 else 556 table = table_find(env, id->id); 557 if (table == NULL) 558 return (-1); 559 id->id = table->conf.id; 560 561 if (table->conf.rdrid > 0 && rdr_find(env, table->conf.rdrid) == NULL) 562 fatalx("%s: desynchronised", __func__); 563 564 if (!(table->conf.flags & F_DISABLE)) 565 return (0); 566 table->conf.flags &= ~(F_DISABLE); 567 table->conf.flags |= F_CHANGED; 568 table->up = 0; 569 TAILQ_FOREACH(host, &table->hosts, entry) 570 host->up = HOST_UNKNOWN; 571 proc_compose(env->sc_ps, PROC_HCE, IMSG_TABLE_ENABLE, 572 &table->conf.id, sizeof(table->conf.id)); 573 574 /* Forward to relay engine(s) */ 575 proc_compose(env->sc_ps, PROC_RELAY, IMSG_TABLE_ENABLE, 576 &table->conf.id, sizeof(table->conf.id)); 577 578 log_debug("%s: table %d", __func__, table->conf.id); 579 pfe_sync(); 580 return (0); 581 } 582 583 int 584 disable_host(struct ctl_conn *c, struct ctl_id *id, struct host *host) 585 { 586 struct host *h; 587 struct table *table; 588 589 if (host == NULL) { 590 if (id->id == EMPTY_ID) 591 host = host_findbyname(env, id->name); 592 else 593 host = host_find(env, id->id); 594 if (host == NULL || host->conf.parentid) 595 return (-1); 596 } 597 id->id = host->conf.id; 598 599 if (host->flags & F_DISABLE) 600 return (0); 601 602 if (host->up == HOST_UP) { 603 if ((table = table_find(env, host->conf.tableid)) == NULL) 604 fatalx("%s: invalid table id", __func__); 605 table->up--; 606 table->conf.flags |= F_CHANGED; 607 } 608 609 host->up = HOST_UNKNOWN; 610 host->flags |= F_DISABLE; 611 host->flags |= F_DEL; 612 host->flags &= ~(F_ADD); 613 host->check_cnt = 0; 614 host->up_cnt = 0; 615 616 proc_compose(env->sc_ps, PROC_HCE, IMSG_HOST_DISABLE, 617 &host->conf.id, sizeof(host->conf.id)); 618 619 /* Forward to relay engine(s) */ 620 proc_compose(env->sc_ps, PROC_RELAY, IMSG_HOST_DISABLE, 621 &host->conf.id, sizeof(host->conf.id)); 622 log_debug("%s: host %d", __func__, host->conf.id); 623 624 if (!host->conf.parentid) { 625 /* Disable all children */ 626 SLIST_FOREACH(h, &host->children, child) 627 disable_host(c, id, h); 628 pfe_sync(); 629 } 630 return (0); 631 } 632 633 int 634 enable_host(struct ctl_conn *c, struct ctl_id *id, struct host *host) 635 { 636 struct host *h; 637 638 if (host == NULL) { 639 if (id->id == EMPTY_ID) 640 host = host_findbyname(env, id->name); 641 else 642 host = host_find(env, id->id); 643 if (host == NULL || host->conf.parentid) 644 return (-1); 645 } 646 id->id = host->conf.id; 647 648 if (!(host->flags & F_DISABLE)) 649 return (0); 650 651 host->up = HOST_UNKNOWN; 652 host->flags &= ~(F_DISABLE); 653 host->flags &= ~(F_DEL); 654 host->flags &= ~(F_ADD); 655 656 proc_compose(env->sc_ps, PROC_HCE, IMSG_HOST_ENABLE, 657 &host->conf.id, sizeof (host->conf.id)); 658 659 /* Forward to relay engine(s) */ 660 proc_compose(env->sc_ps, PROC_RELAY, IMSG_HOST_ENABLE, 661 &host->conf.id, sizeof(host->conf.id)); 662 663 log_debug("%s: host %d", __func__, host->conf.id); 664 665 if (!host->conf.parentid) { 666 /* Enable all children */ 667 SLIST_FOREACH(h, &host->children, child) 668 enable_host(c, id, h); 669 pfe_sync(); 670 } 671 return (0); 672 } 673 674 void 675 pfe_sync(void) 676 { 677 struct rdr *rdr; 678 struct table *active; 679 struct table *table; 680 struct ctl_id id; 681 struct imsg imsg; 682 struct ctl_demote demote; 683 struct router *rt; 684 685 bzero(&id, sizeof(id)); 686 bzero(&imsg, sizeof(imsg)); 687 TAILQ_FOREACH(rdr, env->sc_rdrs, entry) { 688 rdr->conf.flags &= ~(F_BACKUP); 689 rdr->conf.flags &= ~(F_DOWN); 690 691 if (rdr->conf.flags & F_DISABLE || 692 (rdr->table->up == 0 && rdr->backup->up == 0)) { 693 rdr->conf.flags |= F_DOWN; 694 active = NULL; 695 } else if (rdr->table->up == 0 && rdr->backup->up > 0) { 696 rdr->conf.flags |= F_BACKUP; 697 active = rdr->backup; 698 active->conf.flags |= 699 rdr->table->conf.flags & F_CHANGED; 700 active->conf.flags |= 701 rdr->backup->conf.flags & F_CHANGED; 702 } else 703 active = rdr->table; 704 705 if (active != NULL && active->conf.flags & F_CHANGED) { 706 id.id = active->conf.id; 707 imsg.hdr.type = IMSG_CTL_TABLE_CHANGED; 708 imsg.hdr.len = sizeof(id) + IMSG_HEADER_SIZE; 709 imsg.data = &id; 710 sync_table(env, rdr, active); 711 control_imsg_forward(env->sc_ps, &imsg); 712 } 713 714 if (rdr->conf.flags & F_DOWN) { 715 if (rdr->conf.flags & F_ACTIVE_RULESET) { 716 flush_table(env, rdr); 717 log_debug("%s: disabling ruleset", __func__); 718 rdr->conf.flags &= ~(F_ACTIVE_RULESET); 719 id.id = rdr->conf.id; 720 imsg.hdr.type = IMSG_CTL_PULL_RULESET; 721 imsg.hdr.len = sizeof(id) + IMSG_HEADER_SIZE; 722 imsg.data = &id; 723 sync_ruleset(env, rdr, 0); 724 control_imsg_forward(env->sc_ps, &imsg); 725 } 726 } else if (!(rdr->conf.flags & F_ACTIVE_RULESET)) { 727 log_debug("%s: enabling ruleset", __func__); 728 rdr->conf.flags |= F_ACTIVE_RULESET; 729 id.id = rdr->conf.id; 730 imsg.hdr.type = IMSG_CTL_PUSH_RULESET; 731 imsg.hdr.len = sizeof(id) + IMSG_HEADER_SIZE; 732 imsg.data = &id; 733 sync_ruleset(env, rdr, 1); 734 control_imsg_forward(env->sc_ps, &imsg); 735 } 736 } 737 738 TAILQ_FOREACH(rt, env->sc_rts, rt_entry) { 739 rt->rt_conf.flags &= ~(F_BACKUP); 740 rt->rt_conf.flags &= ~(F_DOWN); 741 742 if ((rt->rt_gwtable->conf.flags & F_CHANGED)) 743 sync_routes(env, rt); 744 } 745 746 TAILQ_FOREACH(table, env->sc_tables, entry) { 747 if (table->conf.check == CHECK_NOCHECK) 748 continue; 749 750 /* 751 * clean up change flag. 752 */ 753 table->conf.flags &= ~(F_CHANGED); 754 755 /* 756 * handle demotion. 757 */ 758 if ((table->conf.flags & F_DEMOTE) == 0) 759 continue; 760 demote.level = 0; 761 if (table->up && table->conf.flags & F_DEMOTED) { 762 demote.level = -1; 763 table->conf.flags &= ~F_DEMOTED; 764 } 765 else if (!table->up && !(table->conf.flags & F_DEMOTED)) { 766 demote.level = 1; 767 table->conf.flags |= F_DEMOTED; 768 } 769 if (demote.level == 0) 770 continue; 771 log_debug("%s: demote %d table '%s' group '%s'", __func__, 772 demote.level, table->conf.name, table->conf.demote_group); 773 (void)strlcpy(demote.group, table->conf.demote_group, 774 sizeof(demote.group)); 775 proc_compose(env->sc_ps, PROC_PARENT, IMSG_DEMOTE, 776 &demote, sizeof(demote)); 777 } 778 } 779 780 void 781 pfe_statistics(int fd, short events, void *arg) 782 { 783 struct rdr *rdr; 784 struct ctl_stats *cur; 785 struct timeval tv, tv_now; 786 int resethour, resetday; 787 u_long cnt; 788 789 timerclear(&tv); 790 getmonotime(&tv_now); 791 792 TAILQ_FOREACH(rdr, env->sc_rdrs, entry) { 793 cnt = check_table(env, rdr, rdr->table); 794 if (rdr->conf.backup_id != EMPTY_TABLE) 795 cnt += check_table(env, rdr, rdr->backup); 796 797 resethour = resetday = 0; 798 799 cur = &rdr->stats; 800 cur->last = cnt > cur->cnt ? cnt - cur->cnt : 0; 801 802 cur->cnt = cnt; 803 cur->tick++; 804 cur->avg = (cur->last + cur->avg) / 2; 805 cur->last_hour += cur->last; 806 if ((cur->tick % 807 (3600 / env->sc_conf.statinterval.tv_sec)) == 0) { 808 cur->avg_hour = (cur->last_hour + cur->avg_hour) / 2; 809 resethour++; 810 } 811 cur->last_day += cur->last; 812 if ((cur->tick % 813 (86400 / env->sc_conf.statinterval.tv_sec)) == 0) { 814 cur->avg_day = (cur->last_day + cur->avg_day) / 2; 815 resethour++; 816 } 817 if (resethour) 818 cur->last_hour = 0; 819 if (resetday) 820 cur->last_day = 0; 821 822 rdr->stats.interval = env->sc_conf.statinterval.tv_sec; 823 } 824 825 /* Schedule statistics timer */ 826 evtimer_set(&env->sc_statev, pfe_statistics, NULL); 827 bcopy(&env->sc_conf.statinterval, &tv, sizeof(tv)); 828 evtimer_add(&env->sc_statev, &tv); 829 } 830