1 /* $FreeBSD: src/sys/contrib/pf/net/pf_ioctl.c,v 1.12 2004/08/12 14:15:42 mlaier Exp $ */ 2 /* $OpenBSD: pf_ioctl.c,v 1.112.2.2 2004/07/24 18:28:12 brad Exp $ */ 3 /* $DragonFly: src/sys/net/pf/pf_ioctl.c,v 1.5 2005/06/15 16:32:58 joerg Exp $ */ 4 5 /* 6 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 7 * 8 * Copyright (c) 2001 Daniel Hartmeier 9 * Copyright (c) 2002,2003 Henning Brauer 10 * All rights reserved. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * - Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials provided 21 * with the distribution. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 31 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 33 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Effort sponsored in part by the Defense Advanced Research Projects 37 * Agency (DARPA) and Air Force Research Laboratory, Air Force 38 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 39 * 40 */ 41 42 #include "opt_inet.h" 43 #include "opt_inet6.h" 44 #include "use_pfsync.h" 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/mbuf.h> 49 #include <sys/filio.h> 50 #include <sys/fcntl.h> 51 #include <sys/socket.h> 52 #include <sys/socketvar.h> 53 #include <sys/kernel.h> 54 #include <sys/thread2.h> 55 #include <sys/time.h> 56 #include <sys/malloc.h> 57 #include <sys/module.h> 58 #include <sys/conf.h> 59 #include <vm/vm_zone.h> 60 61 #include <net/if.h> 62 #include <net/if_types.h> 63 #include <net/route.h> 64 65 #include <netinet/in.h> 66 #include <netinet/in_var.h> 67 #include <netinet/in_systm.h> 68 #include <netinet/ip.h> 69 #include <netinet/ip_var.h> 70 #include <netinet/ip_icmp.h> 71 72 #include <net/pf/pfvar.h> 73 74 #if NPFSYNC > 0 75 #include <net/pf/if_pfsync.h> 76 #endif /* NPFSYNC > 0 */ 77 78 #ifdef INET6 79 #include <netinet/ip6.h> 80 #include <netinet/in_pcb.h> 81 #endif /* INET6 */ 82 83 #ifdef ALTQ 84 #include <net/altq/altq.h> 85 #endif 86 87 #include <machine/limits.h> 88 #include <net/pfil.h> 89 void init_zone_var(void); 90 void cleanup_pf_zone(void); 91 int pfattach(void); 92 int pfopen(dev_t, int, int, struct thread *); 93 int pfclose(dev_t, int, int, struct thread *); 94 struct pf_pool *pf_get_pool(char *, char *, u_int32_t, 95 u_int8_t, u_int32_t, u_int8_t, u_int8_t, u_int8_t); 96 int pf_get_ruleset_number(u_int8_t); 97 void pf_init_ruleset(struct pf_ruleset *); 98 void pf_mv_pool(struct pf_palist *, struct pf_palist *); 99 void pf_empty_pool(struct pf_palist *); 100 int pfioctl(dev_t, u_long, caddr_t, int, struct thread *); 101 #ifdef ALTQ 102 int pf_begin_altq(u_int32_t *); 103 int pf_rollback_altq(u_int32_t); 104 int pf_commit_altq(u_int32_t); 105 #endif /* ALTQ */ 106 int pf_begin_rules(u_int32_t *, int, char *, char *); 107 int pf_rollback_rules(u_int32_t, int, char *, char *); 108 int pf_commit_rules(u_int32_t, int, char *, char *); 109 110 extern struct callout pf_expire_to; 111 112 struct pf_rule pf_default_rule; 113 114 #define TAGID_MAX 50000 115 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 116 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 117 118 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 119 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 120 #endif 121 static u_int16_t tagname2tag(struct pf_tags *, char *); 122 static void tag2tagname(struct pf_tags *, u_int16_t, char *); 123 static void tag_unref(struct pf_tags *, u_int16_t); 124 125 #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x 126 127 static dev_t pf_dev; 128 129 /* 130 * XXX - These are new and need to be checked when moveing to a new version 131 */ 132 static void pf_clear_states(void); 133 static int pf_clear_tables(void); 134 static void pf_clear_srcnodes(void); 135 /* 136 * XXX - These are new and need to be checked when moveing to a new version 137 */ 138 139 /* 140 * Wrapper functions for pfil(9) hooks 141 */ 142 static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, 143 int dir); 144 static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, 145 int dir); 146 #ifdef INET6 147 static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, 148 int dir); 149 static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, 150 int dir); 151 #endif 152 153 static int hook_pf(void); 154 static int dehook_pf(void); 155 static int shutdown_pf(void); 156 static int pf_load(void); 157 static int pf_unload(void); 158 159 static struct cdevsw pf_cdevsw = { /* XXX convert to port model */ 160 .d_name = PF_NAME, 161 .d_maj = 73, /* XXX */ 162 .old_open = pfopen, 163 .old_close = pfclose, 164 .old_ioctl = pfioctl 165 }; 166 167 static volatile int pf_pfil_hooked = 0; 168 169 void 170 init_zone_var(void) 171 { 172 pf_src_tree_pl = pf_rule_pl = NULL; 173 pf_state_pl = pf_altq_pl = pf_pooladdr_pl = NULL; 174 pf_frent_pl = pf_frag_pl = pf_cache_pl = pf_cent_pl = NULL; 175 pf_state_scrub_pl = NULL; 176 pfr_ktable_pl = pfr_kentry_pl = NULL; 177 } 178 179 void 180 cleanup_pf_zone(void) 181 { 182 ZONE_DESTROY(pf_src_tree_pl); 183 ZONE_DESTROY(pf_rule_pl); 184 ZONE_DESTROY(pf_state_pl); 185 ZONE_DESTROY(pf_altq_pl); 186 ZONE_DESTROY(pf_pooladdr_pl); 187 ZONE_DESTROY(pf_frent_pl); 188 ZONE_DESTROY(pf_frag_pl); 189 ZONE_DESTROY(pf_cache_pl); 190 ZONE_DESTROY(pf_cent_pl); 191 ZONE_DESTROY(pfr_ktable_pl); 192 ZONE_DESTROY(pfr_kentry_pl); 193 ZONE_DESTROY(pf_state_scrub_pl); 194 ZONE_DESTROY(pfi_addr_pl); 195 } 196 197 int 198 pfattach(void) 199 { 200 u_int32_t *my_timeout = pf_default_rule.timeout; 201 int error = 1; 202 203 do { 204 ZONE_CREATE(pf_src_tree_pl,struct pf_src_node, "pfsrctrpl"); 205 ZONE_CREATE(pf_rule_pl, struct pf_rule, "pfrulepl"); 206 ZONE_CREATE(pf_state_pl, struct pf_state, "pfstatepl"); 207 ZONE_CREATE(pf_altq_pl, struct pf_altq, "pfaltqpl"); 208 ZONE_CREATE(pf_pooladdr_pl,struct pf_pooladdr, "pfpooladdrpl"); 209 ZONE_CREATE(pfr_ktable_pl, struct pfr_ktable, "pfrktable"); 210 ZONE_CREATE(pfr_kentry_pl, struct pfr_kentry, "pfrkentry"); 211 ZONE_CREATE(pf_frent_pl, struct pf_frent, "pffrent"); 212 ZONE_CREATE(pf_frag_pl, struct pf_fragment, "pffrag"); 213 ZONE_CREATE(pf_cache_pl, struct pf_fragment, "pffrcache"); 214 ZONE_CREATE(pf_cent_pl, struct pf_frcache, "pffrcent"); 215 ZONE_CREATE(pf_state_scrub_pl, struct pf_state_scrub, 216 "pfstatescrub"); 217 ZONE_CREATE(pfi_addr_pl, struct pfi_dynaddr, "pfiaddrpl"); 218 error = 0; 219 } while(0); 220 if (error) { 221 cleanup_pf_zone(); 222 return (error); 223 } 224 pfr_initialize(); 225 pfi_initialize(); 226 error = pf_osfp_initialize(); 227 if (error) { 228 cleanup_pf_zone(); 229 pf_osfp_cleanup(); 230 return (error); 231 } 232 233 pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl; 234 pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT; 235 pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl; 236 pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 237 /* XXX uma_zone_set_max(pf_pool_limits[PF_LIMIT_STATES].pp, 238 pf_pool_limits[PF_LIMIT_STATES].limit); 239 */ 240 241 RB_INIT(&tree_src_tracking); 242 TAILQ_INIT(&pf_anchors); 243 pf_init_ruleset(&pf_main_ruleset); 244 TAILQ_INIT(&pf_altqs[0]); 245 TAILQ_INIT(&pf_altqs[1]); 246 TAILQ_INIT(&pf_pabuf); 247 pf_altqs_active = &pf_altqs[0]; 248 pf_altqs_inactive = &pf_altqs[1]; 249 TAILQ_INIT(&state_updates); 250 251 /* default rule should never be garbage collected */ 252 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 253 pf_default_rule.action = PF_PASS; 254 pf_default_rule.nr = (uint32_t)(-1); 255 256 /* initialize default timeouts */ 257 my_timeout[PFTM_TCP_FIRST_PACKET] = 120; /* First TCP packet */ 258 my_timeout[PFTM_TCP_OPENING] = 30; /* No response yet */ 259 my_timeout[PFTM_TCP_ESTABLISHED] = 24*60*60; /* Established */ 260 my_timeout[PFTM_TCP_CLOSING] = 15 * 60; /* Half closed */ 261 my_timeout[PFTM_TCP_FIN_WAIT] = 45; /* Got both FINs */ 262 my_timeout[PFTM_TCP_CLOSED] = 90; /* Got a RST */ 263 my_timeout[PFTM_UDP_FIRST_PACKET] = 60; /* First UDP packet */ 264 my_timeout[PFTM_UDP_SINGLE] = 30; /* Unidirectional */ 265 my_timeout[PFTM_UDP_MULTIPLE] = 60; /* Bidirectional */ 266 my_timeout[PFTM_ICMP_FIRST_PACKET] = 20; /* First ICMP packet */ 267 my_timeout[PFTM_ICMP_ERROR_REPLY] = 10; /* Got error response */ 268 my_timeout[PFTM_OTHER_FIRST_PACKET] = 60; /* First packet */ 269 my_timeout[PFTM_OTHER_SINGLE] = 30; /* Unidirectional */ 270 my_timeout[PFTM_OTHER_MULTIPLE] = 60; /* Bidirectional */ 271 my_timeout[PFTM_FRAG] = 30; /* Fragment expire */ 272 my_timeout[PFTM_INTERVAL] = 10; /* Expire interval */ 273 274 callout_init(&pf_expire_to); 275 callout_reset(&pf_expire_to, my_timeout[PFTM_INTERVAL] * hz, 276 pf_purge_timeout, &pf_expire_to); 277 278 pf_normalize_init(); 279 bzero(&pf_status, sizeof(pf_status)); 280 pf_status.debug = PF_DEBUG_URGENT; 281 pf_pfil_hooked = 0; 282 283 /* XXX do our best to avoid a conflict */ 284 pf_status.hostid = arc4random(); 285 286 return (error); 287 } 288 289 int 290 pfopen(dev_t dev, int flags, int devtype, struct thread *td) 291 { 292 if (minor(dev) >= 1) 293 return (ENXIO); 294 return (0); 295 } 296 297 int 298 pfclose(dev_t dev, int flags, int fmt, struct thread *td) 299 { 300 if (minor(dev) >= 1) 301 return (ENXIO); 302 return (0); 303 } 304 305 struct pf_pool * 306 pf_get_pool(char *anchorname, char *rulesetname, u_int32_t ticket, 307 u_int8_t rule_action, u_int32_t rule_number, u_int8_t r_last, 308 u_int8_t active, u_int8_t check_ticket) 309 { 310 struct pf_ruleset *ruleset; 311 struct pf_rule *rule; 312 int rs_num; 313 314 ruleset = pf_find_ruleset(anchorname, rulesetname); 315 if (ruleset == NULL) 316 return (NULL); 317 rs_num = pf_get_ruleset_number(rule_action); 318 if (rs_num >= PF_RULESET_MAX) 319 return (NULL); 320 if (active) { 321 if (check_ticket && ticket != 322 ruleset->rules[rs_num].active.ticket) 323 return (NULL); 324 if (r_last) 325 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 326 pf_rulequeue); 327 else 328 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 329 } else { 330 if (check_ticket && ticket != 331 ruleset->rules[rs_num].inactive.ticket) 332 return (NULL); 333 if (r_last) 334 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 335 pf_rulequeue); 336 else 337 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); 338 } 339 if (!r_last) { 340 while ((rule != NULL) && (rule->nr != rule_number)) 341 rule = TAILQ_NEXT(rule, entries); 342 } 343 if (rule == NULL) 344 return (NULL); 345 346 return (&rule->rpool); 347 } 348 349 int 350 pf_get_ruleset_number(u_int8_t action) 351 { 352 switch (action) { 353 case PF_SCRUB: 354 return (PF_RULESET_SCRUB); 355 break; 356 case PF_PASS: 357 case PF_DROP: 358 return (PF_RULESET_FILTER); 359 break; 360 case PF_NAT: 361 case PF_NONAT: 362 return (PF_RULESET_NAT); 363 break; 364 case PF_BINAT: 365 case PF_NOBINAT: 366 return (PF_RULESET_BINAT); 367 break; 368 case PF_RDR: 369 case PF_NORDR: 370 return (PF_RULESET_RDR); 371 break; 372 default: 373 return (PF_RULESET_MAX); 374 break; 375 } 376 } 377 378 void 379 pf_init_ruleset(struct pf_ruleset *ruleset) 380 { 381 int i; 382 383 memset(ruleset, 0, sizeof(struct pf_ruleset)); 384 for (i = 0; i < PF_RULESET_MAX; i++) { 385 TAILQ_INIT(&ruleset->rules[i].queues[0]); 386 TAILQ_INIT(&ruleset->rules[i].queues[1]); 387 ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0]; 388 ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1]; 389 } 390 } 391 392 struct pf_anchor * 393 pf_find_anchor(const char *anchorname) 394 { 395 struct pf_anchor *anchor; 396 int n = -1; 397 398 anchor = TAILQ_FIRST(&pf_anchors); 399 while (anchor != NULL && (n = strcmp(anchor->name, anchorname)) < 0) 400 anchor = TAILQ_NEXT(anchor, entries); 401 if (n == 0) 402 return (anchor); 403 else 404 return (NULL); 405 } 406 407 struct pf_ruleset * 408 pf_find_ruleset(char *anchorname, char *rulesetname) 409 { 410 struct pf_anchor *anchor; 411 struct pf_ruleset *ruleset; 412 413 if (!anchorname[0] && !rulesetname[0]) 414 return (&pf_main_ruleset); 415 if (!anchorname[0] || !rulesetname[0]) 416 return (NULL); 417 anchorname[PF_ANCHOR_NAME_SIZE-1] = 0; 418 rulesetname[PF_RULESET_NAME_SIZE-1] = 0; 419 anchor = pf_find_anchor(anchorname); 420 if (anchor == NULL) 421 return (NULL); 422 ruleset = TAILQ_FIRST(&anchor->rulesets); 423 while (ruleset != NULL && strcmp(ruleset->name, rulesetname) < 0) 424 ruleset = TAILQ_NEXT(ruleset, entries); 425 if (ruleset != NULL && !strcmp(ruleset->name, rulesetname)) 426 return (ruleset); 427 else 428 return (NULL); 429 } 430 431 struct pf_ruleset * 432 pf_find_or_create_ruleset(char anchorname[PF_ANCHOR_NAME_SIZE], 433 char rulesetname[PF_RULESET_NAME_SIZE]) 434 { 435 struct pf_anchor *anchor, *a; 436 struct pf_ruleset *ruleset, *r; 437 438 if (!anchorname[0] && !rulesetname[0]) 439 return (&pf_main_ruleset); 440 if (!anchorname[0] || !rulesetname[0]) 441 return (NULL); 442 anchorname[PF_ANCHOR_NAME_SIZE-1] = 0; 443 rulesetname[PF_RULESET_NAME_SIZE-1] = 0; 444 a = TAILQ_FIRST(&pf_anchors); 445 while (a != NULL && strcmp(a->name, anchorname) < 0) 446 a = TAILQ_NEXT(a, entries); 447 if (a != NULL && !strcmp(a->name, anchorname)) 448 anchor = a; 449 else { 450 anchor = (struct pf_anchor *)malloc(sizeof(struct pf_anchor), 451 M_TEMP, M_NOWAIT); 452 if (anchor == NULL) 453 return (NULL); 454 memset(anchor, 0, sizeof(struct pf_anchor)); 455 bcopy(anchorname, anchor->name, sizeof(anchor->name)); 456 TAILQ_INIT(&anchor->rulesets); 457 if (a != NULL) 458 TAILQ_INSERT_BEFORE(a, anchor, entries); 459 else 460 TAILQ_INSERT_TAIL(&pf_anchors, anchor, entries); 461 } 462 r = TAILQ_FIRST(&anchor->rulesets); 463 while (r != NULL && strcmp(r->name, rulesetname) < 0) 464 r = TAILQ_NEXT(r, entries); 465 if (r != NULL && !strcmp(r->name, rulesetname)) 466 return (r); 467 ruleset = (struct pf_ruleset *)malloc(sizeof(struct pf_ruleset), 468 M_TEMP, M_NOWAIT); 469 if (ruleset != NULL) { 470 pf_init_ruleset(ruleset); 471 bcopy(rulesetname, ruleset->name, sizeof(ruleset->name)); 472 ruleset->anchor = anchor; 473 if (r != NULL) 474 TAILQ_INSERT_BEFORE(r, ruleset, entries); 475 else 476 TAILQ_INSERT_TAIL(&anchor->rulesets, ruleset, entries); 477 } 478 return (ruleset); 479 } 480 481 void 482 pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset) 483 { 484 struct pf_anchor *anchor; 485 int i; 486 487 if (ruleset == NULL || ruleset->anchor == NULL || ruleset->tables > 0 || 488 ruleset->topen) 489 return; 490 for (i = 0; i < PF_RULESET_MAX; ++i) 491 if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) || 492 !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) || 493 ruleset->rules[i].inactive.open) 494 return; 495 496 anchor = ruleset->anchor; 497 TAILQ_REMOVE(&anchor->rulesets, ruleset, entries); 498 free(ruleset, M_TEMP); 499 500 if (TAILQ_EMPTY(&anchor->rulesets)) { 501 TAILQ_REMOVE(&pf_anchors, anchor, entries); 502 free(anchor, M_TEMP); 503 pf_update_anchor_rules(); 504 } 505 } 506 507 void 508 pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) 509 { 510 struct pf_pooladdr *mv_pool_pa; 511 512 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { 513 TAILQ_REMOVE(poola, mv_pool_pa, entries); 514 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries); 515 } 516 } 517 518 void 519 pf_empty_pool(struct pf_palist *poola) 520 { 521 struct pf_pooladdr *empty_pool_pa; 522 523 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { 524 pfi_dynaddr_remove(&empty_pool_pa->addr); 525 pf_tbladdr_remove(&empty_pool_pa->addr); 526 pfi_detach_rule(empty_pool_pa->kif); 527 TAILQ_REMOVE(poola, empty_pool_pa, entries); 528 pool_put(&pf_pooladdr_pl, empty_pool_pa); 529 } 530 } 531 532 void 533 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 534 { 535 if (rulequeue != NULL) { 536 if (rule->states <= 0) { 537 /* 538 * XXX - we need to remove the table *before* detaching 539 * the rule to make sure the table code does not delete 540 * the anchor under our feet. 541 */ 542 pf_tbladdr_remove(&rule->src.addr); 543 pf_tbladdr_remove(&rule->dst.addr); 544 } 545 TAILQ_REMOVE(rulequeue, rule, entries); 546 rule->entries.tqe_prev = NULL; 547 rule->nr = (uint32_t)(-1); 548 } 549 550 if (rule->states > 0 || rule->src_nodes > 0 || 551 rule->entries.tqe_prev != NULL) 552 return; 553 pf_tag_unref(rule->tag); 554 pf_tag_unref(rule->match_tag); 555 #ifdef ALTQ 556 if (rule->pqid != rule->qid) 557 pf_qid_unref(rule->pqid); 558 pf_qid_unref(rule->qid); 559 #endif 560 pfi_dynaddr_remove(&rule->src.addr); 561 pfi_dynaddr_remove(&rule->dst.addr); 562 if (rulequeue == NULL) { 563 pf_tbladdr_remove(&rule->src.addr); 564 pf_tbladdr_remove(&rule->dst.addr); 565 } 566 pfi_detach_rule(rule->kif); 567 pf_empty_pool(&rule->rpool.list); 568 pool_put(&pf_rule_pl, rule); 569 } 570 571 static u_int16_t 572 tagname2tag(struct pf_tags *head, char *tagname) 573 { 574 struct pf_tagname *tag, *p = NULL; 575 u_int16_t new_tagid = 1; 576 577 TAILQ_FOREACH(tag, head, entries) 578 if (strcmp(tagname, tag->name) == 0) { 579 tag->ref++; 580 return (tag->tag); 581 } 582 583 /* 584 * to avoid fragmentation, we do a linear search from the beginning 585 * and take the first free slot we find. if there is none or the list 586 * is empty, append a new entry at the end. 587 */ 588 589 /* new entry */ 590 if (!TAILQ_EMPTY(head)) 591 for (p = TAILQ_FIRST(head); p != NULL && 592 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 593 new_tagid = p->tag + 1; 594 595 if (new_tagid > TAGID_MAX) 596 return (0); 597 598 /* allocate and fill new struct pf_tagname */ 599 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname), 600 M_TEMP, M_NOWAIT); 601 if (tag == NULL) 602 return (0); 603 bzero(tag, sizeof(struct pf_tagname)); 604 strlcpy(tag->name, tagname, sizeof(tag->name)); 605 tag->tag = new_tagid; 606 tag->ref++; 607 608 if (p != NULL) /* insert new entry before p */ 609 TAILQ_INSERT_BEFORE(p, tag, entries); 610 else /* either list empty or no free slot in between */ 611 TAILQ_INSERT_TAIL(head, tag, entries); 612 613 return (tag->tag); 614 } 615 616 static void 617 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 618 { 619 struct pf_tagname *tag; 620 621 TAILQ_FOREACH(tag, head, entries) 622 if (tag->tag == tagid) { 623 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 624 return; 625 } 626 } 627 628 static void 629 tag_unref(struct pf_tags *head, u_int16_t tag) 630 { 631 struct pf_tagname *p, *next; 632 633 if (tag == 0) 634 return; 635 636 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 637 next = TAILQ_NEXT(p, entries); 638 if (tag == p->tag) { 639 if (--p->ref == 0) { 640 TAILQ_REMOVE(head, p, entries); 641 free(p, M_TEMP); 642 } 643 break; 644 } 645 } 646 } 647 648 u_int16_t 649 pf_tagname2tag(char *tagname) 650 { 651 return (tagname2tag(&pf_tags, tagname)); 652 } 653 654 void 655 pf_tag2tagname(u_int16_t tagid, char *p) 656 { 657 return (tag2tagname(&pf_tags, tagid, p)); 658 } 659 660 void 661 pf_tag_unref(u_int16_t tag) 662 { 663 return (tag_unref(&pf_tags, tag)); 664 } 665 666 #ifdef ALTQ 667 u_int32_t 668 pf_qname2qid(char *qname) 669 { 670 return ((u_int32_t)tagname2tag(&pf_qids, qname)); 671 } 672 673 void 674 pf_qid2qname(u_int32_t qid, char *p) 675 { 676 return (tag2tagname(&pf_qids, (u_int16_t)qid, p)); 677 } 678 679 void 680 pf_qid_unref(u_int32_t qid) 681 { 682 return (tag_unref(&pf_qids, (u_int16_t)qid)); 683 } 684 685 int 686 pf_begin_altq(u_int32_t *ticket) 687 { 688 struct pf_altq *altq; 689 int error = 0; 690 691 /* Purge the old altq list */ 692 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 693 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 694 if (altq->qname[0] == 0) { 695 /* detach and destroy the discipline */ 696 error = altq_remove(altq); 697 } else 698 pf_qid_unref(altq->qid); 699 pool_put(&pf_altq_pl, altq); 700 } 701 if (error) 702 return (error); 703 *ticket = ++ticket_altqs_inactive; 704 altqs_inactive_open = 1; 705 return (0); 706 } 707 708 int 709 pf_rollback_altq(u_int32_t ticket) 710 { 711 struct pf_altq *altq; 712 int error = 0; 713 714 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 715 return (0); 716 /* Purge the old altq list */ 717 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 718 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 719 if (altq->qname[0] == 0) { 720 /* detach and destroy the discipline */ 721 error = altq_remove(altq); 722 } else 723 pf_qid_unref(altq->qid); 724 pool_put(&pf_altq_pl, altq); 725 } 726 altqs_inactive_open = 0; 727 return (error); 728 } 729 730 int 731 pf_commit_altq(u_int32_t ticket) 732 { 733 struct pf_altqqueue *old_altqs; 734 struct pf_altq *altq; 735 int err, error = 0; 736 737 if (!altqs_inactive_open || ticket != ticket_altqs_inactive) 738 return (EBUSY); 739 740 /* swap altqs, keep the old. */ 741 crit_enter(); 742 old_altqs = pf_altqs_active; 743 pf_altqs_active = pf_altqs_inactive; 744 pf_altqs_inactive = old_altqs; 745 ticket_altqs_active = ticket_altqs_inactive; 746 747 /* Attach new disciplines */ 748 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 749 if (altq->qname[0] == 0) { 750 /* attach the discipline */ 751 error = altq_pfattach(altq); 752 if (error) { 753 crit_exit(); 754 return (error); 755 } 756 } 757 } 758 759 /* Purge the old altq list */ 760 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) { 761 TAILQ_REMOVE(pf_altqs_inactive, altq, entries); 762 if (altq->qname[0] == 0) { 763 /* detach and destroy the discipline */ 764 err = altq_pfdetach(altq); 765 if (err != 0 && error == 0) 766 error = err; 767 err = altq_remove(altq); 768 if (err != 0 && error == 0) 769 error = err; 770 } else 771 pf_qid_unref(altq->qid); 772 pool_put(&pf_altq_pl, altq); 773 } 774 crit_exit(); 775 776 altqs_inactive_open = 0; 777 return (error); 778 } 779 #endif /* ALTQ */ 780 781 int 782 pf_begin_rules(u_int32_t *ticket, int rs_num, char *anchor, char *ruleset) 783 { 784 struct pf_ruleset *rs; 785 struct pf_rule *rule; 786 787 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 788 return (EINVAL); 789 rs = pf_find_or_create_ruleset(anchor, ruleset); 790 if (rs == NULL) 791 return (EINVAL); 792 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) 793 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 794 *ticket = ++rs->rules[rs_num].inactive.ticket; 795 rs->rules[rs_num].inactive.open = 1; 796 return (0); 797 } 798 799 int 800 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor, char *ruleset) 801 { 802 struct pf_ruleset *rs; 803 struct pf_rule *rule; 804 805 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 806 return (EINVAL); 807 rs = pf_find_ruleset(anchor, ruleset); 808 if (rs == NULL || !rs->rules[rs_num].inactive.open || 809 rs->rules[rs_num].inactive.ticket != ticket) 810 return (0); 811 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) 812 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); 813 rs->rules[rs_num].inactive.open = 0; 814 return (0); 815 } 816 817 int 818 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor, char *ruleset) 819 { 820 struct pf_ruleset *rs; 821 struct pf_rule *rule; 822 struct pf_rulequeue *old_rules; 823 824 if (rs_num < 0 || rs_num >= PF_RULESET_MAX) 825 return (EINVAL); 826 rs = pf_find_ruleset(anchor, ruleset); 827 if (rs == NULL || !rs->rules[rs_num].inactive.open || 828 ticket != rs->rules[rs_num].inactive.ticket) 829 return (EBUSY); 830 831 /* Swap rules, keep the old. */ 832 crit_enter(); 833 old_rules = rs->rules[rs_num].active.ptr; 834 rs->rules[rs_num].active.ptr = 835 rs->rules[rs_num].inactive.ptr; 836 rs->rules[rs_num].inactive.ptr = old_rules; 837 rs->rules[rs_num].active.ticket = 838 rs->rules[rs_num].inactive.ticket; 839 pf_calc_skip_steps(rs->rules[rs_num].active.ptr); 840 841 /* Purge the old rule list. */ 842 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 843 pf_rm_rule(old_rules, rule); 844 rs->rules[rs_num].inactive.open = 0; 845 pf_remove_if_empty_ruleset(rs); 846 pf_update_anchor_rules(); 847 crit_exit(); 848 return (0); 849 } 850 851 int 852 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 853 { 854 struct pf_pooladdr *pa = NULL; 855 struct pf_pool *pool = NULL; 856 int error = 0; 857 858 /* XXX keep in sync with switch() below */ 859 if (securelevel > 1) 860 switch (cmd) { 861 case DIOCGETRULES: 862 case DIOCGETRULE: 863 case DIOCGETADDRS: 864 case DIOCGETADDR: 865 case DIOCGETSTATE: 866 case DIOCSETSTATUSIF: 867 case DIOCGETSTATUS: 868 case DIOCCLRSTATUS: 869 case DIOCNATLOOK: 870 case DIOCSETDEBUG: 871 case DIOCGETSTATES: 872 case DIOCGETTIMEOUT: 873 case DIOCCLRRULECTRS: 874 case DIOCGETLIMIT: 875 case DIOCGETALTQS: 876 case DIOCGETALTQ: 877 case DIOCGETQSTATS: 878 case DIOCGETANCHORS: 879 case DIOCGETANCHOR: 880 case DIOCGETRULESETS: 881 case DIOCGETRULESET: 882 case DIOCRGETTABLES: 883 case DIOCRGETTSTATS: 884 case DIOCRCLRTSTATS: 885 case DIOCRCLRADDRS: 886 case DIOCRADDADDRS: 887 case DIOCRDELADDRS: 888 case DIOCRSETADDRS: 889 case DIOCRGETADDRS: 890 case DIOCRGETASTATS: 891 case DIOCRCLRASTATS: 892 case DIOCRTSTADDRS: 893 case DIOCOSFPGET: 894 case DIOCGETSRCNODES: 895 case DIOCCLRSRCNODES: 896 case DIOCIGETIFACES: 897 case DIOCICLRISTATS: 898 case DIOCGIFSPEED: 899 break; 900 case DIOCRCLRTABLES: 901 case DIOCRADDTABLES: 902 case DIOCRDELTABLES: 903 case DIOCRSETTFLAGS: 904 if (((struct pfioc_table *)addr)->pfrio_flags & 905 PFR_FLAG_DUMMY) 906 break; /* dummy operation ok */ 907 return (EPERM); 908 default: 909 return (EPERM); 910 } 911 912 if (!(flags & FWRITE)) 913 switch (cmd) { 914 case DIOCGETRULES: 915 case DIOCGETRULE: 916 case DIOCGETADDRS: 917 case DIOCGETADDR: 918 case DIOCGETSTATE: 919 case DIOCGETSTATUS: 920 case DIOCGETSTATES: 921 case DIOCGETTIMEOUT: 922 case DIOCGETLIMIT: 923 case DIOCGETALTQS: 924 case DIOCGETALTQ: 925 case DIOCGETQSTATS: 926 case DIOCGETANCHORS: 927 case DIOCGETANCHOR: 928 case DIOCGETRULESETS: 929 case DIOCGETRULESET: 930 case DIOCRGETTABLES: 931 case DIOCRGETTSTATS: 932 case DIOCRGETADDRS: 933 case DIOCRGETASTATS: 934 case DIOCRTSTADDRS: 935 case DIOCOSFPGET: 936 case DIOCGETSRCNODES: 937 case DIOCIGETIFACES: 938 case DIOCGIFSPEED: 939 break; 940 case DIOCRCLRTABLES: 941 case DIOCRADDTABLES: 942 case DIOCRDELTABLES: 943 case DIOCRCLRTSTATS: 944 case DIOCRCLRADDRS: 945 case DIOCRADDADDRS: 946 case DIOCRDELADDRS: 947 case DIOCRSETADDRS: 948 case DIOCRSETTFLAGS: 949 if (((struct pfioc_table *)addr)->pfrio_flags & 950 PFR_FLAG_DUMMY) 951 break; /* dummy operation ok */ 952 return (EACCES); 953 default: 954 return (EACCES); 955 } 956 957 switch (cmd) { 958 959 case DIOCSTART: 960 if (pf_status.running) 961 error = EEXIST; 962 else { 963 error = hook_pf(); 964 if (error) { 965 DPFPRINTF(PF_DEBUG_MISC, 966 ("pf: pfil registeration fail\n")); 967 break; 968 } 969 pf_status.running = 1; 970 pf_status.since = time_second; 971 if (pf_status.stateid == 0) { 972 pf_status.stateid = time_second; 973 pf_status.stateid = pf_status.stateid << 32; 974 } 975 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n")); 976 } 977 break; 978 979 case DIOCSTOP: 980 if (!pf_status.running) 981 error = ENOENT; 982 else { 983 pf_status.running = 0; 984 error = dehook_pf(); 985 if (error) { 986 pf_status.running = 1; 987 DPFPRINTF(PF_DEBUG_MISC, 988 ("pf: pfil unregisteration failed\n")); 989 } 990 pf_status.since = time_second; 991 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n")); 992 } 993 break; 994 995 case DIOCBEGINRULES: { 996 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 997 998 error = pf_begin_rules(&pr->ticket, pf_get_ruleset_number( 999 pr->rule.action), pr->anchor, pr->ruleset); 1000 break; 1001 } 1002 1003 case DIOCADDRULE: { 1004 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1005 struct pf_ruleset *ruleset; 1006 struct pf_rule *rule, *tail; 1007 struct pf_pooladdr *pa; 1008 int rs_num; 1009 1010 ruleset = pf_find_ruleset(pr->anchor, pr->ruleset); 1011 if (ruleset == NULL) { 1012 error = EINVAL; 1013 break; 1014 } 1015 rs_num = pf_get_ruleset_number(pr->rule.action); 1016 if (rs_num >= PF_RULESET_MAX) { 1017 error = EINVAL; 1018 break; 1019 } 1020 if (pr->rule.anchorname[0] && ruleset != &pf_main_ruleset) { 1021 error = EINVAL; 1022 break; 1023 } 1024 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1025 error = EINVAL; 1026 break; 1027 } 1028 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) { 1029 error = EBUSY; 1030 break; 1031 } 1032 if (pr->pool_ticket != ticket_pabuf) { 1033 error = EBUSY; 1034 break; 1035 } 1036 rule = pool_get(&pf_rule_pl, PR_NOWAIT); 1037 if (rule == NULL) { 1038 error = ENOMEM; 1039 break; 1040 } 1041 bcopy(&pr->rule, rule, sizeof(struct pf_rule)); 1042 rule->anchor = NULL; 1043 rule->kif = NULL; 1044 TAILQ_INIT(&rule->rpool.list); 1045 /* initialize refcounting */ 1046 rule->states = 0; 1047 rule->src_nodes = 0; 1048 rule->entries.tqe_prev = NULL; 1049 #ifndef INET 1050 if (rule->af == AF_INET) { 1051 pool_put(&pf_rule_pl, rule); 1052 error = EAFNOSUPPORT; 1053 break; 1054 } 1055 #endif /* INET */ 1056 #ifndef INET6 1057 if (rule->af == AF_INET6) { 1058 pool_put(&pf_rule_pl, rule); 1059 error = EAFNOSUPPORT; 1060 break; 1061 } 1062 #endif /* INET6 */ 1063 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, 1064 pf_rulequeue); 1065 if (tail) 1066 rule->nr = tail->nr + 1; 1067 else 1068 rule->nr = 0; 1069 if (rule->ifname[0]) { 1070 rule->kif = pfi_attach_rule(rule->ifname); 1071 if (rule->kif == NULL) { 1072 pool_put(&pf_rule_pl, rule); 1073 error = EINVAL; 1074 break; 1075 } 1076 } 1077 1078 #ifdef ALTQ 1079 /* set queue IDs */ 1080 if (rule->qname[0] != 0) { 1081 if ((rule->qid = pf_qname2qid(rule->qname)) == 0) 1082 error = EBUSY; 1083 else if (rule->pqname[0] != 0) { 1084 if ((rule->pqid = 1085 pf_qname2qid(rule->pqname)) == 0) 1086 error = EBUSY; 1087 } else 1088 rule->pqid = rule->qid; 1089 } 1090 #endif 1091 if (rule->tagname[0]) 1092 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) 1093 error = EBUSY; 1094 if (rule->match_tagname[0]) 1095 if ((rule->match_tag = 1096 pf_tagname2tag(rule->match_tagname)) == 0) 1097 error = EBUSY; 1098 if (rule->rt && !rule->direction) 1099 error = EINVAL; 1100 if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) 1101 error = EINVAL; 1102 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) 1103 error = EINVAL; 1104 if (pf_tbladdr_setup(ruleset, &rule->src.addr)) 1105 error = EINVAL; 1106 if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) 1107 error = EINVAL; 1108 TAILQ_FOREACH(pa, &pf_pabuf, entries) 1109 if (pf_tbladdr_setup(ruleset, &pa->addr)) 1110 error = EINVAL; 1111 1112 pf_mv_pool(&pf_pabuf, &rule->rpool.list); 1113 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) || 1114 (rule->action == PF_BINAT)) && !rule->anchorname[0]) || 1115 (rule->rt > PF_FASTROUTE)) && 1116 (TAILQ_FIRST(&rule->rpool.list) == NULL)) 1117 error = EINVAL; 1118 1119 if (error) { 1120 pf_rm_rule(NULL, rule); 1121 break; 1122 } 1123 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list); 1124 rule->evaluations = rule->packets = rule->bytes = 0; 1125 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, 1126 rule, entries); 1127 break; 1128 } 1129 1130 case DIOCCOMMITRULES: { 1131 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1132 1133 error = pf_commit_rules(pr->ticket, pf_get_ruleset_number( 1134 pr->rule.action), pr->anchor, pr->ruleset); 1135 break; 1136 } 1137 1138 case DIOCGETRULES: { 1139 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1140 struct pf_ruleset *ruleset; 1141 struct pf_rule *tail; 1142 int rs_num; 1143 1144 ruleset = pf_find_ruleset(pr->anchor, pr->ruleset); 1145 if (ruleset == NULL) { 1146 error = EINVAL; 1147 break; 1148 } 1149 rs_num = pf_get_ruleset_number(pr->rule.action); 1150 if (rs_num >= PF_RULESET_MAX) { 1151 error = EINVAL; 1152 break; 1153 } 1154 crit_enter(); 1155 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, 1156 pf_rulequeue); 1157 if (tail) 1158 pr->nr = tail->nr + 1; 1159 else 1160 pr->nr = 0; 1161 pr->ticket = ruleset->rules[rs_num].active.ticket; 1162 crit_exit(); 1163 break; 1164 } 1165 1166 case DIOCGETRULE: { 1167 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1168 struct pf_ruleset *ruleset; 1169 struct pf_rule *rule; 1170 int rs_num, i; 1171 1172 ruleset = pf_find_ruleset(pr->anchor, pr->ruleset); 1173 if (ruleset == NULL) { 1174 error = EINVAL; 1175 break; 1176 } 1177 rs_num = pf_get_ruleset_number(pr->rule.action); 1178 if (rs_num >= PF_RULESET_MAX) { 1179 error = EINVAL; 1180 break; 1181 } 1182 if (pr->ticket != ruleset->rules[rs_num].active.ticket) { 1183 error = EBUSY; 1184 break; 1185 } 1186 crit_enter(); 1187 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); 1188 while ((rule != NULL) && (rule->nr != pr->nr)) 1189 rule = TAILQ_NEXT(rule, entries); 1190 if (rule == NULL) { 1191 error = EBUSY; 1192 crit_exit(); 1193 break; 1194 } 1195 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1196 pfi_dynaddr_copyout(&pr->rule.src.addr); 1197 pfi_dynaddr_copyout(&pr->rule.dst.addr); 1198 pf_tbladdr_copyout(&pr->rule.src.addr); 1199 pf_tbladdr_copyout(&pr->rule.dst.addr); 1200 for (i = 0; i < PF_SKIP_COUNT; ++i) 1201 if (rule->skip[i].ptr == NULL) 1202 pr->rule.skip[i].nr = (uint32_t)(-1); 1203 else 1204 pr->rule.skip[i].nr = 1205 rule->skip[i].ptr->nr; 1206 crit_exit(); 1207 break; 1208 } 1209 1210 case DIOCCHANGERULE: { 1211 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1212 struct pf_ruleset *ruleset; 1213 struct pf_rule *oldrule = NULL, *newrule = NULL; 1214 u_int32_t nr = 0; 1215 int rs_num; 1216 1217 if (!(pcr->action == PF_CHANGE_REMOVE || 1218 pcr->action == PF_CHANGE_GET_TICKET) && 1219 pcr->pool_ticket != ticket_pabuf) { 1220 error = EBUSY; 1221 break; 1222 } 1223 1224 if (pcr->action < PF_CHANGE_ADD_HEAD || 1225 pcr->action > PF_CHANGE_GET_TICKET) { 1226 error = EINVAL; 1227 break; 1228 } 1229 ruleset = pf_find_ruleset(pcr->anchor, pcr->ruleset); 1230 if (ruleset == NULL) { 1231 error = EINVAL; 1232 break; 1233 } 1234 rs_num = pf_get_ruleset_number(pcr->rule.action); 1235 if (rs_num >= PF_RULESET_MAX) { 1236 error = EINVAL; 1237 break; 1238 } 1239 1240 if (pcr->action == PF_CHANGE_GET_TICKET) { 1241 pcr->ticket = ++ruleset->rules[rs_num].active.ticket; 1242 break; 1243 } else { 1244 if (pcr->ticket != 1245 ruleset->rules[rs_num].active.ticket) { 1246 error = EINVAL; 1247 break; 1248 } 1249 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1250 error = EINVAL; 1251 break; 1252 } 1253 } 1254 1255 if (pcr->action != PF_CHANGE_REMOVE) { 1256 newrule = pool_get(&pf_rule_pl, PR_NOWAIT); 1257 if (newrule == NULL) { 1258 error = ENOMEM; 1259 break; 1260 } 1261 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule)); 1262 TAILQ_INIT(&newrule->rpool.list); 1263 /* initialize refcounting */ 1264 newrule->states = 0; 1265 newrule->entries.tqe_prev = NULL; 1266 #ifndef INET 1267 if (newrule->af == AF_INET) { 1268 pool_put(&pf_rule_pl, newrule); 1269 error = EAFNOSUPPORT; 1270 break; 1271 } 1272 #endif /* INET */ 1273 #ifndef INET6 1274 if (newrule->af == AF_INET6) { 1275 pool_put(&pf_rule_pl, newrule); 1276 error = EAFNOSUPPORT; 1277 break; 1278 } 1279 #endif /* INET6 */ 1280 if (newrule->ifname[0]) { 1281 newrule->kif = pfi_attach_rule(newrule->ifname); 1282 if (newrule->kif == NULL) { 1283 pool_put(&pf_rule_pl, newrule); 1284 error = EINVAL; 1285 break; 1286 } 1287 } else 1288 newrule->kif = NULL; 1289 1290 #ifdef ALTQ 1291 /* set queue IDs */ 1292 if (newrule->qname[0] != 0) { 1293 if ((newrule->qid = 1294 pf_qname2qid(newrule->qname)) == 0) 1295 error = EBUSY; 1296 else if (newrule->pqname[0] != 0) { 1297 if ((newrule->pqid = 1298 pf_qname2qid(newrule->pqname)) == 0) 1299 error = EBUSY; 1300 } else 1301 newrule->pqid = newrule->qid; 1302 } 1303 #endif 1304 if (newrule->tagname[0]) 1305 if ((newrule->tag = 1306 pf_tagname2tag(newrule->tagname)) == 0) 1307 error = EBUSY; 1308 if (newrule->match_tagname[0]) 1309 if ((newrule->match_tag = pf_tagname2tag( 1310 newrule->match_tagname)) == 0) 1311 error = EBUSY; 1312 1313 if (newrule->rt && !newrule->direction) 1314 error = EINVAL; 1315 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) 1316 error = EINVAL; 1317 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) 1318 error = EINVAL; 1319 if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) 1320 error = EINVAL; 1321 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) 1322 error = EINVAL; 1323 1324 pf_mv_pool(&pf_pabuf, &newrule->rpool.list); 1325 if (((((newrule->action == PF_NAT) || 1326 (newrule->action == PF_RDR) || 1327 (newrule->action == PF_BINAT) || 1328 (newrule->rt > PF_FASTROUTE)) && 1329 !newrule->anchorname[0])) && 1330 (TAILQ_FIRST(&newrule->rpool.list) == NULL)) 1331 error = EINVAL; 1332 1333 if (error) { 1334 pf_rm_rule(NULL, newrule); 1335 break; 1336 } 1337 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list); 1338 newrule->evaluations = newrule->packets = 0; 1339 newrule->bytes = 0; 1340 } 1341 pf_empty_pool(&pf_pabuf); 1342 1343 crit_enter(); 1344 1345 if (pcr->action == PF_CHANGE_ADD_HEAD) 1346 oldrule = TAILQ_FIRST( 1347 ruleset->rules[rs_num].active.ptr); 1348 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1349 oldrule = TAILQ_LAST( 1350 ruleset->rules[rs_num].active.ptr, pf_rulequeue); 1351 else { 1352 oldrule = TAILQ_FIRST( 1353 ruleset->rules[rs_num].active.ptr); 1354 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1355 oldrule = TAILQ_NEXT(oldrule, entries); 1356 if (oldrule == NULL) { 1357 pf_rm_rule(NULL, newrule); 1358 error = EINVAL; 1359 crit_exit(); 1360 break; 1361 } 1362 } 1363 1364 if (pcr->action == PF_CHANGE_REMOVE) 1365 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); 1366 else { 1367 if (oldrule == NULL) 1368 TAILQ_INSERT_TAIL( 1369 ruleset->rules[rs_num].active.ptr, 1370 newrule, entries); 1371 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1372 pcr->action == PF_CHANGE_ADD_BEFORE) 1373 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1374 else 1375 TAILQ_INSERT_AFTER( 1376 ruleset->rules[rs_num].active.ptr, 1377 oldrule, newrule, entries); 1378 } 1379 1380 nr = 0; 1381 TAILQ_FOREACH(oldrule, 1382 ruleset->rules[rs_num].active.ptr, entries) 1383 oldrule->nr = nr++; 1384 1385 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr); 1386 pf_remove_if_empty_ruleset(ruleset); 1387 pf_update_anchor_rules(); 1388 1389 ruleset->rules[rs_num].active.ticket++; 1390 crit_exit(); 1391 break; 1392 } 1393 1394 case DIOCCLRSTATES: { 1395 struct pf_state *state; 1396 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1397 int killed = 0; 1398 1399 crit_enter(); 1400 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1401 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1402 state->u.s.kif->pfik_name)) { 1403 state->timeout = PFTM_PURGE; 1404 #if NPFSYNC 1405 /* don't send out individual delete messages */ 1406 state->sync_flags = PFSTATE_NOSYNC; 1407 #endif 1408 killed++; 1409 } 1410 } 1411 pf_purge_expired_states(); 1412 pf_status.states = 0; 1413 psk->psk_af = killed; 1414 #if NPFSYNC 1415 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1416 #endif 1417 crit_exit(); 1418 break; 1419 } 1420 1421 case DIOCKILLSTATES: { 1422 struct pf_state *state; 1423 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1424 int killed = 0; 1425 1426 crit_enter(); 1427 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1428 if ((!psk->psk_af || state->af == psk->psk_af) 1429 && (!psk->psk_proto || psk->psk_proto == 1430 state->proto) && 1431 PF_MATCHA(psk->psk_src.not, 1432 &psk->psk_src.addr.v.a.addr, 1433 &psk->psk_src.addr.v.a.mask, 1434 &state->lan.addr, state->af) && 1435 PF_MATCHA(psk->psk_dst.not, 1436 &psk->psk_dst.addr.v.a.addr, 1437 &psk->psk_dst.addr.v.a.mask, 1438 &state->ext.addr, state->af) && 1439 (psk->psk_src.port_op == 0 || 1440 pf_match_port(psk->psk_src.port_op, 1441 psk->psk_src.port[0], psk->psk_src.port[1], 1442 state->lan.port)) && 1443 (psk->psk_dst.port_op == 0 || 1444 pf_match_port(psk->psk_dst.port_op, 1445 psk->psk_dst.port[0], psk->psk_dst.port[1], 1446 state->ext.port)) && 1447 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1448 state->u.s.kif->pfik_name))) { 1449 state->timeout = PFTM_PURGE; 1450 killed++; 1451 } 1452 } 1453 pf_purge_expired_states(); 1454 crit_exit(); 1455 psk->psk_af = killed; 1456 break; 1457 } 1458 1459 case DIOCADDSTATE: { 1460 struct pfioc_state *ps = (struct pfioc_state *)addr; 1461 struct pf_state *state; 1462 struct pfi_kif *kif; 1463 1464 if (ps->state.timeout >= PFTM_MAX && 1465 ps->state.timeout != PFTM_UNTIL_PACKET) { 1466 error = EINVAL; 1467 break; 1468 } 1469 state = pool_get(&pf_state_pl, PR_NOWAIT); 1470 if (state == NULL) { 1471 error = ENOMEM; 1472 break; 1473 } 1474 crit_enter(); 1475 kif = pfi_lookup_create(ps->state.u.ifname); 1476 if (kif == NULL) { 1477 pool_put(&pf_state_pl, state); 1478 error = ENOENT; 1479 crit_exit(); 1480 break; 1481 } 1482 bcopy(&ps->state, state, sizeof(struct pf_state)); 1483 bzero(&state->u, sizeof(state->u)); 1484 state->rule.ptr = &pf_default_rule; 1485 state->nat_rule.ptr = NULL; 1486 state->anchor.ptr = NULL; 1487 state->rt_kif = NULL; 1488 state->creation = time_second; 1489 state->pfsync_time = 0; 1490 state->packets[0] = state->packets[1] = 0; 1491 state->bytes[0] = state->bytes[1] = 0; 1492 1493 if (pf_insert_state(kif, state)) { 1494 pfi_maybe_destroy(kif); 1495 pool_put(&pf_state_pl, state); 1496 error = ENOMEM; 1497 } 1498 crit_exit(); 1499 break; 1500 } 1501 1502 case DIOCGETSTATE: { 1503 struct pfioc_state *ps = (struct pfioc_state *)addr; 1504 struct pf_state *state; 1505 u_int32_t nr; 1506 1507 nr = 0; 1508 crit_enter(); 1509 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 1510 if (nr >= ps->nr) 1511 break; 1512 nr++; 1513 } 1514 if (state == NULL) { 1515 error = EBUSY; 1516 crit_exit(); 1517 break; 1518 } 1519 bcopy(state, &ps->state, sizeof(struct pf_state)); 1520 ps->state.rule.nr = state->rule.ptr->nr; 1521 ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ? 1522 (uint32_t)(-1) : state->nat_rule.ptr->nr; 1523 ps->state.anchor.nr = (state->anchor.ptr == NULL) ? 1524 (uint32_t)(-1) : state->anchor.ptr->nr; 1525 crit_exit(); 1526 ps->state.expire = pf_state_expires(state); 1527 if (ps->state.expire > time_second) 1528 ps->state.expire -= time_second; 1529 else 1530 ps->state.expire = 0; 1531 break; 1532 } 1533 1534 case DIOCGETSTATES: { 1535 struct pfioc_states *ps = (struct pfioc_states *)addr; 1536 struct pf_state *state; 1537 struct pf_state *p, pstore; 1538 struct pfi_kif *kif; 1539 u_int32_t nr = 0; 1540 int space = ps->ps_len; 1541 1542 if (space == 0) { 1543 crit_enter(); 1544 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) 1545 nr += kif->pfik_states; 1546 crit_exit(); 1547 ps->ps_len = sizeof(struct pf_state) * nr; 1548 return (0); 1549 } 1550 1551 crit_enter(); 1552 p = ps->ps_states; 1553 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states) 1554 RB_FOREACH(state, pf_state_tree_ext_gwy, 1555 &kif->pfik_ext_gwy) { 1556 int secs = time_second; 1557 1558 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) 1559 break; 1560 1561 bcopy(state, &pstore, sizeof(pstore)); 1562 strlcpy(pstore.u.ifname, kif->pfik_name, 1563 sizeof(pstore.u.ifname)); 1564 pstore.rule.nr = state->rule.ptr->nr; 1565 pstore.nat_rule.nr = (state->nat_rule.ptr == 1566 NULL) ? (uint32_t)(-1) 1567 : state->nat_rule.ptr->nr; 1568 pstore.anchor.nr = (state->anchor.ptr == 1569 NULL) ? (uint32_t)(-1) 1570 : state->anchor.ptr->nr; 1571 pstore.creation = secs - pstore.creation; 1572 pstore.expire = pf_state_expires(state); 1573 if (pstore.expire > secs) 1574 pstore.expire -= secs; 1575 else 1576 pstore.expire = 0; 1577 error = copyout(&pstore, p, sizeof(*p)); 1578 if (error) { 1579 crit_exit(); 1580 goto fail; 1581 } 1582 p++; 1583 nr++; 1584 } 1585 ps->ps_len = sizeof(struct pf_state) * nr; 1586 crit_exit(); 1587 break; 1588 } 1589 1590 case DIOCGETSTATUS: { 1591 struct pf_status *s = (struct pf_status *)addr; 1592 bcopy(&pf_status, s, sizeof(struct pf_status)); 1593 pfi_fill_oldstatus(s); 1594 break; 1595 } 1596 1597 case DIOCSETSTATUSIF: { 1598 struct pfioc_if *pi = (struct pfioc_if *)addr; 1599 1600 if (pi->ifname[0] == 0) { 1601 bzero(pf_status.ifname, IFNAMSIZ); 1602 break; 1603 } 1604 if (ifunit(pi->ifname) == NULL) { 1605 error = EINVAL; 1606 break; 1607 } 1608 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ); 1609 break; 1610 } 1611 1612 case DIOCCLRSTATUS: { 1613 bzero(pf_status.counters, sizeof(pf_status.counters)); 1614 bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); 1615 bzero(pf_status.scounters, sizeof(pf_status.scounters)); 1616 if (*pf_status.ifname) 1617 pfi_clr_istats(pf_status.ifname, NULL, 1618 PFI_FLAG_INSTANCE); 1619 break; 1620 } 1621 1622 case DIOCNATLOOK: { 1623 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 1624 struct pf_state *state; 1625 struct pf_state key; 1626 int m = 0, direction = pnl->direction; 1627 1628 key.af = pnl->af; 1629 key.proto = pnl->proto; 1630 1631 if (!pnl->proto || 1632 PF_AZERO(&pnl->saddr, pnl->af) || 1633 PF_AZERO(&pnl->daddr, pnl->af) || 1634 !pnl->dport || !pnl->sport) 1635 error = EINVAL; 1636 else { 1637 crit_enter(); 1638 1639 /* 1640 * userland gives us source and dest of connection, 1641 * reverse the lookup so we ask for what happens with 1642 * the return traffic, enabling us to find it in the 1643 * state tree. 1644 */ 1645 if (direction == PF_IN) { 1646 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af); 1647 key.ext.port = pnl->dport; 1648 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af); 1649 key.gwy.port = pnl->sport; 1650 state = pf_find_state_all(&key, PF_EXT_GWY, &m); 1651 } else { 1652 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af); 1653 key.lan.port = pnl->dport; 1654 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af); 1655 key.ext.port = pnl->sport; 1656 state = pf_find_state_all(&key, PF_LAN_EXT, &m); 1657 } 1658 if (m > 1) 1659 error = E2BIG; /* more than one state */ 1660 else if (state != NULL) { 1661 if (direction == PF_IN) { 1662 PF_ACPY(&pnl->rsaddr, &state->lan.addr, 1663 state->af); 1664 pnl->rsport = state->lan.port; 1665 PF_ACPY(&pnl->rdaddr, &pnl->daddr, 1666 pnl->af); 1667 pnl->rdport = pnl->dport; 1668 } else { 1669 PF_ACPY(&pnl->rdaddr, &state->gwy.addr, 1670 state->af); 1671 pnl->rdport = state->gwy.port; 1672 PF_ACPY(&pnl->rsaddr, &pnl->saddr, 1673 pnl->af); 1674 pnl->rsport = pnl->sport; 1675 } 1676 } else 1677 error = ENOENT; 1678 crit_exit(); 1679 } 1680 break; 1681 } 1682 1683 case DIOCSETTIMEOUT: { 1684 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1685 int old; 1686 1687 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 1688 pt->seconds < 0) { 1689 error = EINVAL; 1690 goto fail; 1691 } 1692 old = pf_default_rule.timeout[pt->timeout]; 1693 pf_default_rule.timeout[pt->timeout] = pt->seconds; 1694 pt->seconds = old; 1695 break; 1696 } 1697 1698 case DIOCGETTIMEOUT: { 1699 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1700 1701 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 1702 error = EINVAL; 1703 goto fail; 1704 } 1705 pt->seconds = pf_default_rule.timeout[pt->timeout]; 1706 break; 1707 } 1708 1709 case DIOCGETLIMIT: { 1710 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1711 1712 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 1713 error = EINVAL; 1714 goto fail; 1715 } 1716 pl->limit = pf_pool_limits[pl->index].limit; 1717 break; 1718 } 1719 1720 case DIOCSETLIMIT: { 1721 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1722 int old_limit; 1723 1724 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 1725 pf_pool_limits[pl->index].pp == NULL) { 1726 error = EINVAL; 1727 goto fail; 1728 } 1729 1730 /* XXX Get an API to set limits on the zone/pool */ 1731 old_limit = pf_pool_limits[pl->index].limit; 1732 pf_pool_limits[pl->index].limit = pl->limit; 1733 pl->limit = old_limit; 1734 break; 1735 } 1736 1737 case DIOCSETDEBUG: { 1738 u_int32_t *level = (u_int32_t *)addr; 1739 1740 pf_status.debug = *level; 1741 break; 1742 } 1743 1744 case DIOCCLRRULECTRS: { 1745 struct pf_ruleset *ruleset = &pf_main_ruleset; 1746 struct pf_rule *rule; 1747 1748 crit_enter(); 1749 TAILQ_FOREACH(rule, 1750 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) 1751 rule->evaluations = rule->packets = 1752 rule->bytes = 0; 1753 crit_exit(); 1754 break; 1755 } 1756 1757 case DIOCGIFSPEED: { 1758 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr; 1759 struct pf_ifspeed ps; 1760 struct ifnet *ifp; 1761 1762 if (psp->ifname[0] != 0) { 1763 /* Can we completely trust user-land? */ 1764 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ); 1765 ifp = ifunit(ps.ifname); 1766 if (ifp ) 1767 psp->baudrate = ifp->if_baudrate; 1768 else 1769 error = EINVAL; 1770 } else 1771 error = EINVAL; 1772 break; 1773 } 1774 #ifdef ALTQ 1775 case DIOCSTARTALTQ: { 1776 struct pf_altq *altq; 1777 struct ifnet *ifp; 1778 struct tb_profile tb; 1779 1780 /* enable all altq interfaces on active list */ 1781 crit_enter(); 1782 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 1783 if (altq->qname[0] == 0) { 1784 if ((ifp = ifunit(altq->ifname)) == NULL) { 1785 error = EINVAL; 1786 break; 1787 } 1788 if (ifp->if_snd.altq_type != ALTQT_NONE) 1789 error = altq_enable(&ifp->if_snd); 1790 if (error != 0) 1791 break; 1792 /* set tokenbucket regulator */ 1793 tb.rate = altq->ifbandwidth; 1794 tb.depth = altq->tbrsize; 1795 error = tbr_set(&ifp->if_snd, &tb); 1796 if (error != 0) 1797 break; 1798 } 1799 } 1800 crit_exit(); 1801 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n")); 1802 break; 1803 } 1804 1805 case DIOCSTOPALTQ: { 1806 struct pf_altq *altq; 1807 struct ifnet *ifp; 1808 struct tb_profile tb; 1809 int err; 1810 1811 /* disable all altq interfaces on active list */ 1812 crit_enter(); 1813 TAILQ_FOREACH(altq, pf_altqs_active, entries) { 1814 if (altq->qname[0] == 0) { 1815 if ((ifp = ifunit(altq->ifname)) == NULL) { 1816 error = EINVAL; 1817 break; 1818 } 1819 if (ifp->if_snd.altq_type != ALTQT_NONE) { 1820 err = altq_disable(&ifp->if_snd); 1821 if (err != 0 && error == 0) 1822 error = err; 1823 } 1824 /* clear tokenbucket regulator */ 1825 tb.rate = 0; 1826 err = tbr_set(&ifp->if_snd, &tb); 1827 if (err != 0 && error == 0) 1828 error = err; 1829 } 1830 } 1831 crit_exit(); 1832 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n")); 1833 break; 1834 } 1835 1836 case DIOCBEGINALTQS: { 1837 u_int32_t *ticket = (u_int32_t *)addr; 1838 1839 error = pf_begin_altq(ticket); 1840 break; 1841 } 1842 1843 case DIOCADDALTQ: { 1844 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 1845 struct pf_altq *altq, *a; 1846 1847 if (pa->ticket != ticket_altqs_inactive) { 1848 error = EBUSY; 1849 break; 1850 } 1851 altq = pool_get(&pf_altq_pl, PR_NOWAIT); 1852 if (altq == NULL) { 1853 error = ENOMEM; 1854 break; 1855 } 1856 bcopy(&pa->altq, altq, sizeof(struct pf_altq)); 1857 1858 /* 1859 * if this is for a queue, find the discipline and 1860 * copy the necessary fields 1861 */ 1862 if (altq->qname[0] != 0) { 1863 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) { 1864 error = EBUSY; 1865 pool_put(&pf_altq_pl, altq); 1866 break; 1867 } 1868 TAILQ_FOREACH(a, pf_altqs_inactive, entries) { 1869 if (strncmp(a->ifname, altq->ifname, 1870 IFNAMSIZ) == 0 && a->qname[0] == 0) { 1871 altq->altq_disc = a->altq_disc; 1872 break; 1873 } 1874 } 1875 } 1876 1877 error = altq_add(altq); 1878 if (error) { 1879 pool_put(&pf_altq_pl, altq); 1880 break; 1881 } 1882 1883 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries); 1884 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 1885 break; 1886 } 1887 1888 case DIOCCOMMITALTQS: { 1889 u_int32_t ticket = *(u_int32_t *)addr; 1890 1891 error = pf_commit_altq(ticket); 1892 break; 1893 } 1894 1895 case DIOCGETALTQS: { 1896 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 1897 struct pf_altq *altq; 1898 1899 pa->nr = 0; 1900 crit_enter(); 1901 TAILQ_FOREACH(altq, pf_altqs_active, entries) 1902 pa->nr++; 1903 pa->ticket = ticket_altqs_active; 1904 crit_exit(); 1905 break; 1906 } 1907 1908 case DIOCGETALTQ: { 1909 struct pfioc_altq *pa = (struct pfioc_altq *)addr; 1910 struct pf_altq *altq; 1911 u_int32_t nr; 1912 1913 if (pa->ticket != ticket_altqs_active) { 1914 error = EBUSY; 1915 break; 1916 } 1917 nr = 0; 1918 crit_enter(); 1919 altq = TAILQ_FIRST(pf_altqs_active); 1920 while ((altq != NULL) && (nr < pa->nr)) { 1921 altq = TAILQ_NEXT(altq, entries); 1922 nr++; 1923 } 1924 if (altq == NULL) { 1925 error = EBUSY; 1926 crit_exit(); 1927 break; 1928 } 1929 bcopy(altq, &pa->altq, sizeof(struct pf_altq)); 1930 crit_exit(); 1931 break; 1932 } 1933 1934 case DIOCCHANGEALTQ: 1935 /* CHANGEALTQ not supported yet! */ 1936 error = ENODEV; 1937 break; 1938 1939 case DIOCGETQSTATS: { 1940 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 1941 struct pf_altq *altq; 1942 u_int32_t nr; 1943 int nbytes; 1944 1945 if (pq->ticket != ticket_altqs_active) { 1946 error = EBUSY; 1947 break; 1948 } 1949 nbytes = pq->nbytes; 1950 nr = 0; 1951 crit_enter(); 1952 altq = TAILQ_FIRST(pf_altqs_active); 1953 while ((altq != NULL) && (nr < pq->nr)) { 1954 altq = TAILQ_NEXT(altq, entries); 1955 nr++; 1956 } 1957 if (altq == NULL) { 1958 error = EBUSY; 1959 crit_exit(); 1960 break; 1961 } 1962 error = altq_getqstats(altq, pq->buf, &nbytes); 1963 crit_exit(); 1964 if (error == 0) { 1965 pq->scheduler = altq->scheduler; 1966 pq->nbytes = nbytes; 1967 } 1968 break; 1969 } 1970 #endif /* ALTQ */ 1971 1972 case DIOCBEGINADDRS: { 1973 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 1974 1975 pf_empty_pool(&pf_pabuf); 1976 pp->ticket = ++ticket_pabuf; 1977 break; 1978 } 1979 1980 case DIOCADDADDR: { 1981 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 1982 1983 #ifndef INET 1984 if (pp->af == AF_INET) { 1985 error = EAFNOSUPPORT; 1986 break; 1987 } 1988 #endif /* INET */ 1989 #ifndef INET6 1990 if (pp->af == AF_INET6) { 1991 error = EAFNOSUPPORT; 1992 break; 1993 } 1994 #endif /* INET6 */ 1995 if (pp->addr.addr.type != PF_ADDR_ADDRMASK && 1996 pp->addr.addr.type != PF_ADDR_DYNIFTL && 1997 pp->addr.addr.type != PF_ADDR_TABLE) { 1998 error = EINVAL; 1999 break; 2000 } 2001 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2002 if (pa == NULL) { 2003 error = ENOMEM; 2004 break; 2005 } 2006 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr)); 2007 if (pa->ifname[0]) { 2008 pa->kif = pfi_attach_rule(pa->ifname); 2009 if (pa->kif == NULL) { 2010 pool_put(&pf_pooladdr_pl, pa); 2011 error = EINVAL; 2012 break; 2013 } 2014 } 2015 if (pfi_dynaddr_setup(&pa->addr, pp->af)) { 2016 pfi_dynaddr_remove(&pa->addr); 2017 pfi_detach_rule(pa->kif); 2018 pool_put(&pf_pooladdr_pl, pa); 2019 error = EINVAL; 2020 break; 2021 } 2022 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries); 2023 break; 2024 } 2025 2026 case DIOCGETADDRS: { 2027 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2028 2029 pp->nr = 0; 2030 crit_enter(); 2031 pool = pf_get_pool(pp->anchor, pp->ruleset, pp->ticket, 2032 pp->r_action, pp->r_num, 0, 1, 0); 2033 if (pool == NULL) { 2034 error = EBUSY; 2035 crit_exit(); 2036 break; 2037 } 2038 TAILQ_FOREACH(pa, &pool->list, entries) 2039 pp->nr++; 2040 crit_exit(); 2041 break; 2042 } 2043 2044 case DIOCGETADDR: { 2045 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr; 2046 u_int32_t nr = 0; 2047 2048 crit_enter(); 2049 pool = pf_get_pool(pp->anchor, pp->ruleset, pp->ticket, 2050 pp->r_action, pp->r_num, 0, 1, 1); 2051 if (pool == NULL) { 2052 error = EBUSY; 2053 crit_exit(); 2054 break; 2055 } 2056 pa = TAILQ_FIRST(&pool->list); 2057 while ((pa != NULL) && (nr < pp->nr)) { 2058 pa = TAILQ_NEXT(pa, entries); 2059 nr++; 2060 } 2061 if (pa == NULL) { 2062 error = EBUSY; 2063 crit_exit(); 2064 break; 2065 } 2066 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr)); 2067 pfi_dynaddr_copyout(&pp->addr.addr); 2068 pf_tbladdr_copyout(&pp->addr.addr); 2069 crit_exit(); 2070 break; 2071 } 2072 2073 case DIOCCHANGEADDR: { 2074 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr; 2075 struct pf_pooladdr *oldpa = NULL, *newpa = NULL; 2076 struct pf_ruleset *ruleset; 2077 2078 if (pca->action < PF_CHANGE_ADD_HEAD || 2079 pca->action > PF_CHANGE_REMOVE) { 2080 error = EINVAL; 2081 break; 2082 } 2083 if (pca->addr.addr.type != PF_ADDR_ADDRMASK && 2084 pca->addr.addr.type != PF_ADDR_DYNIFTL && 2085 pca->addr.addr.type != PF_ADDR_TABLE) { 2086 error = EINVAL; 2087 break; 2088 } 2089 2090 ruleset = pf_find_ruleset(pca->anchor, pca->ruleset); 2091 if (ruleset == NULL) { 2092 error = EBUSY; 2093 break; 2094 } 2095 pool = pf_get_pool(pca->anchor, pca->ruleset, pca->ticket, 2096 pca->r_action, pca->r_num, pca->r_last, 1, 1); 2097 if (pool == NULL) { 2098 error = EBUSY; 2099 break; 2100 } 2101 if (pca->action != PF_CHANGE_REMOVE) { 2102 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT); 2103 if (newpa == NULL) { 2104 error = ENOMEM; 2105 break; 2106 } 2107 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr)); 2108 #ifndef INET 2109 if (pca->af == AF_INET) { 2110 pool_put(&pf_pooladdr_pl, newpa); 2111 error = EAFNOSUPPORT; 2112 break; 2113 } 2114 #endif /* INET */ 2115 #ifndef INET6 2116 if (pca->af == AF_INET6) { 2117 pool_put(&pf_pooladdr_pl, newpa); 2118 error = EAFNOSUPPORT; 2119 break; 2120 } 2121 #endif /* INET6 */ 2122 if (newpa->ifname[0]) { 2123 newpa->kif = pfi_attach_rule(newpa->ifname); 2124 if (newpa->kif == NULL) { 2125 pool_put(&pf_pooladdr_pl, newpa); 2126 error = EINVAL; 2127 break; 2128 } 2129 } else 2130 newpa->kif = NULL; 2131 if (pfi_dynaddr_setup(&newpa->addr, pca->af) || 2132 pf_tbladdr_setup(ruleset, &newpa->addr)) { 2133 pfi_dynaddr_remove(&newpa->addr); 2134 pfi_detach_rule(newpa->kif); 2135 pool_put(&pf_pooladdr_pl, newpa); 2136 error = EINVAL; 2137 break; 2138 } 2139 } 2140 2141 crit_enter(); 2142 2143 if (pca->action == PF_CHANGE_ADD_HEAD) 2144 oldpa = TAILQ_FIRST(&pool->list); 2145 else if (pca->action == PF_CHANGE_ADD_TAIL) 2146 oldpa = TAILQ_LAST(&pool->list, pf_palist); 2147 else { 2148 int i = 0; 2149 2150 oldpa = TAILQ_FIRST(&pool->list); 2151 while ((oldpa != NULL) && (i < pca->nr)) { 2152 oldpa = TAILQ_NEXT(oldpa, entries); 2153 i++; 2154 } 2155 if (oldpa == NULL) { 2156 error = EINVAL; 2157 crit_exit(); 2158 break; 2159 } 2160 } 2161 2162 if (pca->action == PF_CHANGE_REMOVE) { 2163 TAILQ_REMOVE(&pool->list, oldpa, entries); 2164 pfi_dynaddr_remove(&oldpa->addr); 2165 pf_tbladdr_remove(&oldpa->addr); 2166 pfi_detach_rule(oldpa->kif); 2167 pool_put(&pf_pooladdr_pl, oldpa); 2168 } else { 2169 if (oldpa == NULL) 2170 TAILQ_INSERT_TAIL(&pool->list, newpa, entries); 2171 else if (pca->action == PF_CHANGE_ADD_HEAD || 2172 pca->action == PF_CHANGE_ADD_BEFORE) 2173 TAILQ_INSERT_BEFORE(oldpa, newpa, entries); 2174 else 2175 TAILQ_INSERT_AFTER(&pool->list, oldpa, 2176 newpa, entries); 2177 } 2178 2179 pool->cur = TAILQ_FIRST(&pool->list); 2180 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, 2181 pca->af); 2182 crit_exit(); 2183 break; 2184 } 2185 2186 case DIOCGETANCHORS: { 2187 struct pfioc_anchor *pa = (struct pfioc_anchor *)addr; 2188 struct pf_anchor *anchor; 2189 2190 pa->nr = 0; 2191 TAILQ_FOREACH(anchor, &pf_anchors, entries) 2192 pa->nr++; 2193 break; 2194 } 2195 2196 case DIOCGETANCHOR: { 2197 struct pfioc_anchor *pa = (struct pfioc_anchor *)addr; 2198 struct pf_anchor *anchor; 2199 u_int32_t nr = 0; 2200 2201 anchor = TAILQ_FIRST(&pf_anchors); 2202 while (anchor != NULL && nr < pa->nr) { 2203 anchor = TAILQ_NEXT(anchor, entries); 2204 nr++; 2205 } 2206 if (anchor == NULL) 2207 error = EBUSY; 2208 else 2209 bcopy(anchor->name, pa->name, sizeof(pa->name)); 2210 break; 2211 } 2212 2213 case DIOCGETRULESETS: { 2214 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2215 struct pf_anchor *anchor; 2216 struct pf_ruleset *ruleset; 2217 2218 pr->anchor[PF_ANCHOR_NAME_SIZE-1] = 0; 2219 if ((anchor = pf_find_anchor(pr->anchor)) == NULL) { 2220 error = EINVAL; 2221 break; 2222 } 2223 pr->nr = 0; 2224 TAILQ_FOREACH(ruleset, &anchor->rulesets, entries) 2225 pr->nr++; 2226 break; 2227 } 2228 2229 case DIOCGETRULESET: { 2230 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 2231 struct pf_anchor *anchor; 2232 struct pf_ruleset *ruleset; 2233 u_int32_t nr = 0; 2234 2235 if ((anchor = pf_find_anchor(pr->anchor)) == NULL) { 2236 error = EINVAL; 2237 break; 2238 } 2239 ruleset = TAILQ_FIRST(&anchor->rulesets); 2240 while (ruleset != NULL && nr < pr->nr) { 2241 ruleset = TAILQ_NEXT(ruleset, entries); 2242 nr++; 2243 } 2244 if (ruleset == NULL) 2245 error = EBUSY; 2246 else 2247 bcopy(ruleset->name, pr->name, sizeof(pr->name)); 2248 break; 2249 } 2250 2251 case DIOCRCLRTABLES: { 2252 struct pfioc_table *io = (struct pfioc_table *)addr; 2253 2254 if (io->pfrio_esize != 0) { 2255 error = ENODEV; 2256 break; 2257 } 2258 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 2259 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2260 break; 2261 } 2262 2263 case DIOCRADDTABLES: { 2264 struct pfioc_table *io = (struct pfioc_table *)addr; 2265 2266 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2267 error = ENODEV; 2268 break; 2269 } 2270 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 2271 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2272 break; 2273 } 2274 2275 case DIOCRDELTABLES: { 2276 struct pfioc_table *io = (struct pfioc_table *)addr; 2277 2278 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2279 error = ENODEV; 2280 break; 2281 } 2282 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 2283 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2284 break; 2285 } 2286 2287 case DIOCRGETTABLES: { 2288 struct pfioc_table *io = (struct pfioc_table *)addr; 2289 2290 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2291 error = ENODEV; 2292 break; 2293 } 2294 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 2295 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2296 break; 2297 } 2298 2299 case DIOCRGETTSTATS: { 2300 struct pfioc_table *io = (struct pfioc_table *)addr; 2301 2302 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 2303 error = ENODEV; 2304 break; 2305 } 2306 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 2307 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2308 break; 2309 } 2310 2311 case DIOCRCLRTSTATS: { 2312 struct pfioc_table *io = (struct pfioc_table *)addr; 2313 2314 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2315 error = ENODEV; 2316 break; 2317 } 2318 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 2319 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2320 break; 2321 } 2322 2323 case DIOCRSETTFLAGS: { 2324 struct pfioc_table *io = (struct pfioc_table *)addr; 2325 2326 if (io->pfrio_esize != sizeof(struct pfr_table)) { 2327 error = ENODEV; 2328 break; 2329 } 2330 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 2331 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 2332 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2333 break; 2334 } 2335 2336 case DIOCRCLRADDRS: { 2337 struct pfioc_table *io = (struct pfioc_table *)addr; 2338 2339 if (io->pfrio_esize != 0) { 2340 error = ENODEV; 2341 break; 2342 } 2343 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 2344 io->pfrio_flags | PFR_FLAG_USERIOCTL); 2345 break; 2346 } 2347 2348 case DIOCRADDADDRS: { 2349 struct pfioc_table *io = (struct pfioc_table *)addr; 2350 2351 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2352 error = ENODEV; 2353 break; 2354 } 2355 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 2356 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 2357 PFR_FLAG_USERIOCTL); 2358 break; 2359 } 2360 2361 case DIOCRDELADDRS: { 2362 struct pfioc_table *io = (struct pfioc_table *)addr; 2363 2364 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2365 error = ENODEV; 2366 break; 2367 } 2368 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 2369 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 2370 PFR_FLAG_USERIOCTL); 2371 break; 2372 } 2373 2374 case DIOCRSETADDRS: { 2375 struct pfioc_table *io = (struct pfioc_table *)addr; 2376 2377 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2378 error = ENODEV; 2379 break; 2380 } 2381 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 2382 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 2383 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 2384 PFR_FLAG_USERIOCTL); 2385 break; 2386 } 2387 2388 case DIOCRGETADDRS: { 2389 struct pfioc_table *io = (struct pfioc_table *)addr; 2390 2391 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2392 error = ENODEV; 2393 break; 2394 } 2395 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 2396 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2397 break; 2398 } 2399 2400 case DIOCRGETASTATS: { 2401 struct pfioc_table *io = (struct pfioc_table *)addr; 2402 2403 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 2404 error = ENODEV; 2405 break; 2406 } 2407 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 2408 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2409 break; 2410 } 2411 2412 case DIOCRCLRASTATS: { 2413 struct pfioc_table *io = (struct pfioc_table *)addr; 2414 2415 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2416 error = ENODEV; 2417 break; 2418 } 2419 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 2420 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 2421 PFR_FLAG_USERIOCTL); 2422 break; 2423 } 2424 2425 case DIOCRTSTADDRS: { 2426 struct pfioc_table *io = (struct pfioc_table *)addr; 2427 2428 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2429 error = ENODEV; 2430 break; 2431 } 2432 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 2433 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 2434 PFR_FLAG_USERIOCTL); 2435 break; 2436 } 2437 2438 case DIOCRINABEGIN: { 2439 struct pfioc_table *io = (struct pfioc_table *)addr; 2440 2441 if (io->pfrio_esize != 0) { 2442 error = ENODEV; 2443 break; 2444 } 2445 error = pfr_ina_begin(&io->pfrio_table, &io->pfrio_ticket, 2446 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2447 break; 2448 } 2449 2450 case DIOCRINACOMMIT: { 2451 struct pfioc_table *io = (struct pfioc_table *)addr; 2452 2453 if (io->pfrio_esize != 0) { 2454 error = ENODEV; 2455 break; 2456 } 2457 error = pfr_ina_commit(&io->pfrio_table, io->pfrio_ticket, 2458 &io->pfrio_nadd, &io->pfrio_nchange, io->pfrio_flags | 2459 PFR_FLAG_USERIOCTL); 2460 break; 2461 } 2462 2463 case DIOCRINADEFINE: { 2464 struct pfioc_table *io = (struct pfioc_table *)addr; 2465 2466 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 2467 error = ENODEV; 2468 break; 2469 } 2470 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2471 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2472 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2473 break; 2474 } 2475 2476 case DIOCOSFPADD: { 2477 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2478 crit_enter(); 2479 error = pf_osfp_add(io); 2480 crit_exit(); 2481 break; 2482 } 2483 2484 case DIOCOSFPGET: { 2485 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2486 crit_enter(); 2487 error = pf_osfp_get(io); 2488 crit_exit(); 2489 break; 2490 } 2491 2492 case DIOCXBEGIN: { 2493 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2494 struct pfioc_trans_e ioe; 2495 struct pfr_table table; 2496 int i; 2497 2498 if (io->esize != sizeof(ioe)) { 2499 error = ENODEV; 2500 goto fail; 2501 } 2502 for (i = 0; i < io->size; i++) { 2503 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2504 error = EFAULT; 2505 goto fail; 2506 } 2507 switch (ioe.rs_num) { 2508 #ifdef ALTQ 2509 case PF_RULESET_ALTQ: 2510 if (ioe.anchor[0] || ioe.ruleset[0]) { 2511 error = EINVAL; 2512 goto fail; 2513 } 2514 if ((error = pf_begin_altq(&ioe.ticket))) 2515 goto fail; 2516 break; 2517 #endif /* ALTQ */ 2518 case PF_RULESET_TABLE: 2519 bzero(&table, sizeof(table)); 2520 strlcpy(table.pfrt_anchor, ioe.anchor, 2521 sizeof(table.pfrt_anchor)); 2522 strlcpy(table.pfrt_ruleset, ioe.ruleset, 2523 sizeof(table.pfrt_ruleset)); 2524 if ((error = pfr_ina_begin(&table, 2525 &ioe.ticket, NULL, 0))) 2526 goto fail; 2527 break; 2528 default: 2529 if ((error = pf_begin_rules(&ioe.ticket, 2530 ioe.rs_num, ioe.anchor, ioe.ruleset))) 2531 goto fail; 2532 break; 2533 } 2534 if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) { 2535 error = EFAULT; 2536 goto fail; 2537 } 2538 } 2539 break; 2540 } 2541 2542 case DIOCXROLLBACK: { 2543 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2544 struct pfioc_trans_e ioe; 2545 struct pfr_table table; 2546 int i; 2547 2548 if (io->esize != sizeof(ioe)) { 2549 error = ENODEV; 2550 goto fail; 2551 } 2552 for (i = 0; i < io->size; i++) { 2553 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2554 error = EFAULT; 2555 goto fail; 2556 } 2557 switch (ioe.rs_num) { 2558 #ifdef ALTQ 2559 case PF_RULESET_ALTQ: 2560 if (ioe.anchor[0] || ioe.ruleset[0]) { 2561 error = EINVAL; 2562 goto fail; 2563 } 2564 if ((error = pf_rollback_altq(ioe.ticket))) 2565 goto fail; /* really bad */ 2566 break; 2567 #endif /* ALTQ */ 2568 case PF_RULESET_TABLE: 2569 bzero(&table, sizeof(table)); 2570 strlcpy(table.pfrt_anchor, ioe.anchor, 2571 sizeof(table.pfrt_anchor)); 2572 strlcpy(table.pfrt_ruleset, ioe.ruleset, 2573 sizeof(table.pfrt_ruleset)); 2574 if ((error = pfr_ina_rollback(&table, 2575 ioe.ticket, NULL, 0))) 2576 goto fail; /* really bad */ 2577 break; 2578 default: 2579 if ((error = pf_rollback_rules(ioe.ticket, 2580 ioe.rs_num, ioe.anchor, ioe.ruleset))) 2581 goto fail; /* really bad */ 2582 break; 2583 } 2584 } 2585 break; 2586 } 2587 2588 case DIOCXCOMMIT: { 2589 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2590 struct pfioc_trans_e ioe; 2591 struct pfr_table table; 2592 struct pf_ruleset *rs; 2593 int i; 2594 2595 if (io->esize != sizeof(ioe)) { 2596 error = ENODEV; 2597 goto fail; 2598 } 2599 /* first makes sure everything will succeed */ 2600 for (i = 0; i < io->size; i++) { 2601 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2602 error = EFAULT; 2603 goto fail; 2604 } 2605 switch (ioe.rs_num) { 2606 #ifdef ALTQ 2607 case PF_RULESET_ALTQ: 2608 if (ioe.anchor[0] || ioe.ruleset[0]) { 2609 error = EINVAL; 2610 goto fail; 2611 } 2612 if (!altqs_inactive_open || ioe.ticket != 2613 ticket_altqs_inactive) { 2614 error = EBUSY; 2615 goto fail; 2616 } 2617 break; 2618 #endif /* ALTQ */ 2619 case PF_RULESET_TABLE: 2620 rs = pf_find_ruleset(ioe.anchor, ioe.ruleset); 2621 if (rs == NULL || !rs->topen || ioe.ticket != 2622 rs->tticket) { 2623 error = EBUSY; 2624 goto fail; 2625 } 2626 break; 2627 default: 2628 if (ioe.rs_num < 0 || ioe.rs_num >= 2629 PF_RULESET_MAX) { 2630 error = EINVAL; 2631 goto fail; 2632 } 2633 rs = pf_find_ruleset(ioe.anchor, ioe.ruleset); 2634 if (rs == NULL || 2635 !rs->rules[ioe.rs_num].inactive.open || 2636 rs->rules[ioe.rs_num].inactive.ticket != 2637 ioe.ticket) { 2638 error = EBUSY; 2639 goto fail; 2640 } 2641 break; 2642 } 2643 } 2644 /* now do the commit - no errors should happen here */ 2645 for (i = 0; i < io->size; i++) { 2646 if (copyin(io->array+i, &ioe, sizeof(ioe))) { 2647 error = EFAULT; 2648 goto fail; 2649 } 2650 switch (ioe.rs_num) { 2651 #ifdef ALTQ 2652 case PF_RULESET_ALTQ: 2653 if ((error = pf_commit_altq(ioe.ticket))) 2654 goto fail; /* really bad */ 2655 break; 2656 #endif /* ALTQ */ 2657 case PF_RULESET_TABLE: 2658 bzero(&table, sizeof(table)); 2659 strlcpy(table.pfrt_anchor, ioe.anchor, 2660 sizeof(table.pfrt_anchor)); 2661 strlcpy(table.pfrt_ruleset, ioe.ruleset, 2662 sizeof(table.pfrt_ruleset)); 2663 if ((error = pfr_ina_commit(&table, ioe.ticket, 2664 NULL, NULL, 0))) 2665 goto fail; /* really bad */ 2666 break; 2667 default: 2668 if ((error = pf_commit_rules(ioe.ticket, 2669 ioe.rs_num, ioe.anchor, ioe.ruleset))) 2670 goto fail; /* really bad */ 2671 break; 2672 } 2673 } 2674 break; 2675 } 2676 2677 case DIOCGETSRCNODES: { 2678 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 2679 struct pf_src_node *n; 2680 struct pf_src_node *p, pstore; 2681 u_int32_t nr = 0; 2682 int space = psn->psn_len; 2683 2684 if (space == 0) { 2685 crit_enter(); 2686 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 2687 nr++; 2688 crit_exit(); 2689 psn->psn_len = sizeof(struct pf_src_node) * nr; 2690 return (0); 2691 } 2692 2693 crit_enter(); 2694 p = psn->psn_src_nodes; 2695 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2696 int secs = time_second; 2697 2698 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 2699 break; 2700 2701 bcopy(n, &pstore, sizeof(pstore)); 2702 if (n->rule.ptr != NULL) 2703 pstore.rule.nr = n->rule.ptr->nr; 2704 pstore.creation = secs - pstore.creation; 2705 if (pstore.expire > secs) 2706 pstore.expire -= secs; 2707 else 2708 pstore.expire = 0; 2709 error = copyout(&pstore, p, sizeof(*p)); 2710 if (error) { 2711 crit_exit(); 2712 goto fail; 2713 } 2714 p++; 2715 nr++; 2716 } 2717 psn->psn_len = sizeof(struct pf_src_node) * nr; 2718 crit_exit(); 2719 break; 2720 } 2721 2722 case DIOCCLRSRCNODES: { 2723 struct pf_src_node *n; 2724 struct pf_state *state; 2725 2726 crit_enter(); 2727 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 2728 state->src_node = NULL; 2729 state->nat_src_node = NULL; 2730 } 2731 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2732 n->expire = 1; 2733 n->states = 0; 2734 } 2735 pf_purge_expired_src_nodes(); 2736 pf_status.src_nodes = 0; 2737 crit_exit(); 2738 break; 2739 } 2740 2741 case DIOCSETHOSTID: { 2742 u_int32_t *hostid = (u_int32_t *)addr; 2743 2744 if (*hostid == 0) { 2745 error = EINVAL; 2746 goto fail; 2747 } 2748 pf_status.hostid = *hostid; 2749 break; 2750 } 2751 2752 case DIOCOSFPFLUSH: 2753 crit_enter(); 2754 pf_osfp_flush(); 2755 crit_exit(); 2756 break; 2757 2758 case DIOCIGETIFACES: { 2759 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2760 2761 if (io->pfiio_esize != sizeof(struct pfi_if)) { 2762 error = ENODEV; 2763 break; 2764 } 2765 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 2766 &io->pfiio_size, io->pfiio_flags); 2767 break; 2768 } 2769 2770 case DIOCICLRISTATS: { 2771 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2772 2773 error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero, 2774 io->pfiio_flags); 2775 break; 2776 } 2777 2778 default: 2779 error = ENODEV; 2780 break; 2781 } 2782 fail: 2783 return (error); 2784 } 2785 2786 /* 2787 * XXX - Check for version missmatch!!! 2788 */ 2789 static void 2790 pf_clear_states(void) 2791 { 2792 struct pf_state *state; 2793 2794 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 2795 state->timeout = PFTM_PURGE; 2796 #if NPFSYNC 2797 /* don't send out individual delete messages */ 2798 state->sync_flags = PFSTATE_NOSYNC; 2799 #endif 2800 } 2801 pf_purge_expired_states(); 2802 pf_status.states = 0; 2803 #if 0 /* NPFSYNC */ 2804 /* 2805 * XXX This is called on module unload, we do not want to sync that over? */ 2806 */ 2807 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 2808 #endif 2809 } 2810 2811 static int 2812 pf_clear_tables(void) 2813 { 2814 struct pfioc_table io; 2815 int error; 2816 2817 bzero(&io, sizeof(io)); 2818 2819 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel, 2820 io.pfrio_flags); 2821 2822 return (error); 2823 } 2824 2825 static void 2826 pf_clear_srcnodes(void) 2827 { 2828 struct pf_src_node *n; 2829 struct pf_state *state; 2830 2831 RB_FOREACH(state, pf_state_tree_id, &tree_id) { 2832 state->src_node = NULL; 2833 state->nat_src_node = NULL; 2834 } 2835 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2836 n->expire = 1; 2837 n->states = 0; 2838 } 2839 pf_purge_expired_src_nodes(); 2840 pf_status.src_nodes = 0; 2841 } 2842 /* 2843 * XXX - Check for version missmatch!!! 2844 */ 2845 2846 /* 2847 * Duplicate pfctl -Fa operation to get rid of as much as we can. 2848 */ 2849 static int 2850 shutdown_pf(void) 2851 { 2852 int error = 0; 2853 u_int32_t t[5]; 2854 char nn = '\0'; 2855 2856 callout_stop(&pf_expire_to); 2857 2858 pf_status.running = 0; 2859 do { 2860 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn, 2861 &nn)) != 0) { 2862 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n")); 2863 break; 2864 } 2865 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn, 2866 &nn)) != 0) { 2867 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n")); 2868 break; /* XXX: rollback? */ 2869 } 2870 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn, &nn)) 2871 != 0) { 2872 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n")); 2873 break; /* XXX: rollback? */ 2874 } 2875 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn, &nn)) 2876 != 0) { 2877 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n")); 2878 break; /* XXX: rollback? */ 2879 } 2880 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn, &nn)) 2881 != 0) { 2882 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n")); 2883 break; /* XXX: rollback? */ 2884 } 2885 2886 /* XXX: these should always succeed here */ 2887 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn, &nn); 2888 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn, &nn); 2889 pf_commit_rules(t[2], PF_RULESET_NAT, &nn, &nn); 2890 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn, &nn); 2891 pf_commit_rules(t[4], PF_RULESET_RDR, &nn, &nn); 2892 2893 if ((error = pf_clear_tables()) != 0) 2894 break; 2895 2896 #ifdef ALTQ 2897 if ((error = pf_begin_altq(&t[0])) != 0) { 2898 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n")); 2899 break; 2900 } 2901 pf_commit_altq(t[0]); 2902 #endif 2903 2904 pf_clear_states(); 2905 2906 pf_clear_srcnodes(); 2907 2908 /* status does not use malloced mem so no need to cleanup */ 2909 /* fingerprints and interfaces have their own cleanup code */ 2910 } while(0); 2911 2912 return (error); 2913 } 2914 2915 static int 2916 pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir) 2917 { 2918 /* 2919 * DragonFly's version of pf uses FreeBSD's native host byte ordering 2920 * for ip_len/ip_off. This is why we don't have to change byte order 2921 * like the FreeBSD-5 version does. 2922 */ 2923 int chk; 2924 2925 chk = pf_test(PF_IN, ifp, m); 2926 if (chk && *m) { 2927 m_freem(*m); 2928 *m = NULL; 2929 } 2930 return chk; 2931 } 2932 2933 static int 2934 pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir) 2935 { 2936 /* 2937 * DragonFly's version of pf uses FreeBSD's native host byte ordering 2938 * for ip_len/ip_off. This is why we don't have to change byte order 2939 * like the FreeBSD-5 version does. 2940 */ 2941 int chk; 2942 2943 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 2944 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 2945 in_delayed_cksum(*m); 2946 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 2947 } 2948 chk = pf_test(PF_OUT, ifp, m); 2949 if (chk && *m) { 2950 m_freem(*m); 2951 *m = NULL; 2952 } 2953 return chk; 2954 } 2955 2956 #ifdef INET6 2957 static int 2958 pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir) 2959 { 2960 /* 2961 * IPv6 is not affected by ip_len/ip_off byte order changes. 2962 */ 2963 int chk; 2964 2965 chk = pf_test6(PF_IN, ifp, m); 2966 if (chk && *m) { 2967 m_freem(*m); 2968 *m = NULL; 2969 } 2970 return chk; 2971 } 2972 2973 static int 2974 pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir) 2975 { 2976 /* 2977 * IPv6 is not affected by ip_len/ip_off byte order changes. 2978 */ 2979 int chk; 2980 2981 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */ 2982 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 2983 in_delayed_cksum(*m); 2984 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 2985 } 2986 chk = pf_test6(PF_OUT, ifp, m); 2987 if (chk && *m) { 2988 m_freem(*m); 2989 *m = NULL; 2990 } 2991 return chk; 2992 } 2993 #endif /* INET6 */ 2994 2995 static int 2996 hook_pf(void) 2997 { 2998 struct pfil_head *pfh_inet; 2999 #ifdef INET6 3000 struct pfil_head *pfh_inet6; 3001 #endif 3002 3003 if (pf_pfil_hooked) 3004 return (0); 3005 3006 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3007 if (pfh_inet == NULL) 3008 return (ENODEV); 3009 pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet); 3010 pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet); 3011 #ifdef INET6 3012 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3013 if (pfh_inet6 == NULL) { 3014 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 3015 pfh_inet); 3016 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 3017 pfh_inet); 3018 return (ENODEV); 3019 } 3020 pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6); 3021 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6); 3022 #endif 3023 3024 pf_pfil_hooked = 1; 3025 return (0); 3026 } 3027 3028 static int 3029 dehook_pf(void) 3030 { 3031 struct pfil_head *pfh_inet; 3032 #ifdef INET6 3033 struct pfil_head *pfh_inet6; 3034 #endif 3035 3036 if (pf_pfil_hooked == 0) 3037 return (0); 3038 3039 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET); 3040 if (pfh_inet == NULL) 3041 return (ENODEV); 3042 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, 3043 pfh_inet); 3044 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, 3045 pfh_inet); 3046 #ifdef INET6 3047 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6); 3048 if (pfh_inet6 == NULL) 3049 return (ENODEV); 3050 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, 3051 pfh_inet6); 3052 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, 3053 pfh_inet6); 3054 #endif 3055 3056 pf_pfil_hooked = 0; 3057 return (0); 3058 } 3059 3060 static int 3061 pf_load(void) 3062 { 3063 int error; 3064 3065 init_zone_var(); 3066 error = cdevsw_add(&pf_cdevsw, 0, 0); 3067 if (error) 3068 return (error); 3069 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME); 3070 error = pfattach(); 3071 if (error) { 3072 cdevsw_remove(&pf_cdevsw, 0, 0); 3073 return (error); 3074 } 3075 return (0); 3076 } 3077 3078 static int 3079 pf_unload(void) 3080 { 3081 int error; 3082 3083 pf_status.running = 0; 3084 error = dehook_pf(); 3085 if (error) { 3086 /* 3087 * Should not happen! 3088 * XXX Due to error code ESRCH, kldunload will show 3089 * a message like 'No such process'. 3090 */ 3091 printf("pfil unregisteration fail\n"); 3092 return error; 3093 } 3094 shutdown_pf(); 3095 pfi_cleanup(); 3096 pf_osfp_flush(); 3097 pf_osfp_cleanup(); 3098 cleanup_pf_zone(); 3099 cdevsw_remove(&pf_cdevsw, 0, 0); 3100 return 0; 3101 } 3102 3103 static int 3104 pf_modevent(module_t mod, int type, void *data) 3105 { 3106 int error = 0; 3107 3108 switch(type) { 3109 case MOD_LOAD: 3110 error = pf_load(); 3111 break; 3112 3113 case MOD_UNLOAD: 3114 error = pf_unload(); 3115 break; 3116 default: 3117 error = EINVAL; 3118 break; 3119 } 3120 return error; 3121 } 3122 3123 static moduledata_t pf_mod = { 3124 "pf", 3125 pf_modevent, 3126 0 3127 }; 3128 3129 DECLARE_MODULE(pf, pf_mod, SI_SUB_PSEUDO, SI_ORDER_FIRST); 3130 MODULE_VERSION(pf, PF_MODVER); 3131