1 /* $OpenBSD: pf_ioctl.c,v 1.297 2015/12/03 13:30:18 claudio Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002 - 2013 Henning Brauer <henning@openbsd.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * - Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * - Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer in the documentation and/or other materials provided 17 * with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 * 32 * Effort sponsored in part by the Defense Advanced Research Projects 33 * Agency (DARPA) and Air Force Research Laboratory, Air Force 34 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 35 * 36 */ 37 38 #include "pfsync.h" 39 #include "pflog.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/mbuf.h> 44 #include <sys/filio.h> 45 #include <sys/fcntl.h> 46 #include <sys/socket.h> 47 #include <sys/socketvar.h> 48 #include <sys/kernel.h> 49 #include <sys/time.h> 50 #include <sys/timeout.h> 51 #include <sys/pool.h> 52 #include <sys/malloc.h> 53 #include <sys/kthread.h> 54 #include <sys/rwlock.h> 55 #include <sys/syslog.h> 56 #include <uvm/uvm_extern.h> 57 58 #include <net/if.h> 59 #include <net/if_var.h> 60 #include <net/route.h> 61 #include <net/hfsc.h> 62 63 #include <netinet/in.h> 64 #include <netinet/ip.h> 65 #include <netinet/ip_var.h> 66 #include <netinet/ip_icmp.h> 67 68 #include <crypto/md5.h> 69 #include <net/pfvar.h> 70 71 #if NPFSYNC > 0 72 #include <netinet/ip_ipsp.h> 73 #include <net/if_pfsync.h> 74 #endif /* NPFSYNC > 0 */ 75 76 #ifdef INET6 77 #include <netinet/ip6.h> 78 #include <netinet/in_pcb.h> 79 #endif /* INET6 */ 80 81 void pfattach(int); 82 void pf_thread_create(void *); 83 int pfopen(dev_t, int, int, struct proc *); 84 int pfclose(dev_t, int, int, struct proc *); 85 int pfioctl(dev_t, u_long, caddr_t, int, struct proc *); 86 int pf_begin_rules(u_int32_t *, const char *); 87 int pf_rollback_rules(u_int32_t, char *); 88 int pf_enable_queues(void); 89 void pf_remove_queues(void); 90 int pf_commit_queues(void); 91 void pf_free_queues(struct pf_queuehead *); 92 int pf_setup_pfsync_matching(struct pf_ruleset *); 93 void pf_hash_rule(MD5_CTX *, struct pf_rule *); 94 void pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *); 95 int pf_commit_rules(u_int32_t, char *); 96 int pf_addr_setup(struct pf_ruleset *, 97 struct pf_addr_wrap *, sa_family_t); 98 int pf_kif_setup(char *, struct pfi_kif **); 99 void pf_addr_copyout(struct pf_addr_wrap *); 100 void pf_trans_set_commit(void); 101 void pf_pool_copyin(struct pf_pool *, struct pf_pool *); 102 int pf_rule_copyin(struct pf_rule *, struct pf_rule *, 103 struct pf_ruleset *); 104 u_int16_t pf_qname2qid(char *, int); 105 void pf_qid2qname(u_int16_t, char *); 106 void pf_qid_unref(u_int16_t); 107 108 struct pf_rule pf_default_rule, pf_default_rule_new; 109 struct rwlock pf_consistency_lock = RWLOCK_INITIALIZER("pfcnslk"); 110 111 struct { 112 char statusif[IFNAMSIZ]; 113 u_int32_t debug; 114 u_int32_t hostid; 115 u_int32_t reass; 116 u_int32_t mask; 117 } pf_trans_set; 118 119 #define PF_TSET_STATUSIF 0x01 120 #define PF_TSET_DEBUG 0x02 121 #define PF_TSET_HOSTID 0x04 122 #define PF_TSET_REASS 0x08 123 124 #define TAGID_MAX 50000 125 TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags), 126 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids); 127 128 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) 129 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE 130 #endif 131 u_int16_t tagname2tag(struct pf_tags *, char *, int); 132 void tag2tagname(struct pf_tags *, u_int16_t, char *); 133 void tag_unref(struct pf_tags *, u_int16_t); 134 int pf_rtlabel_add(struct pf_addr_wrap *); 135 void pf_rtlabel_remove(struct pf_addr_wrap *); 136 void pf_rtlabel_copyout(struct pf_addr_wrap *); 137 138 139 void 140 pfattach(int num) 141 { 142 u_int32_t *timeout = pf_default_rule.timeout; 143 144 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrule", 145 NULL); 146 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, 147 "pfsrctr", NULL); 148 pool_init(&pf_sn_item_pl, sizeof(struct pf_sn_item), 0, 0, 0, 149 "pfsnitem", NULL); 150 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstate", 151 NULL); 152 pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0, 153 "pfstkey", NULL); 154 pool_init(&pf_state_item_pl, sizeof(struct pf_state_item), 0, 0, 0, 155 "pfstitem", NULL); 156 pool_init(&pf_rule_item_pl, sizeof(struct pf_rule_item), 0, 0, 0, 157 "pfruleitem", NULL); 158 pool_init(&pf_queue_pl, sizeof(struct pf_queuespec), 0, 0, 0, 159 "pfqueue", NULL); 160 hfsc_initialize(); 161 pfr_initialize(); 162 pfi_initialize(); 163 pf_osfp_initialize(); 164 165 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, 166 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); 167 168 if (physmem <= atop(100*1024*1024)) 169 pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = 170 PFR_KENTRY_HIWAT_SMALL; 171 172 RB_INIT(&tree_src_tracking); 173 RB_INIT(&pf_anchors); 174 pf_init_ruleset(&pf_main_ruleset); 175 TAILQ_INIT(&pf_queues[0]); 176 TAILQ_INIT(&pf_queues[1]); 177 pf_queues_active = &pf_queues[0]; 178 pf_queues_inactive = &pf_queues[1]; 179 TAILQ_INIT(&state_list); 180 181 /* default rule should never be garbage collected */ 182 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next; 183 pf_default_rule.action = PF_PASS; 184 pf_default_rule.nr = (u_int32_t)-1; 185 pf_default_rule.rtableid = -1; 186 187 /* initialize default timeouts */ 188 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL; 189 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL; 190 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL; 191 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL; 192 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL; 193 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL; 194 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL; 195 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL; 196 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL; 197 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL; 198 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL; 199 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL; 200 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL; 201 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL; 202 timeout[PFTM_FRAG] = PFTM_FRAG_VAL; 203 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL; 204 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL; 205 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL; 206 timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START; 207 timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; 208 209 pf_default_rule.src.addr.type = PF_ADDR_ADDRMASK; 210 pf_default_rule.dst.addr.type = PF_ADDR_ADDRMASK; 211 pf_default_rule.rdr.addr.type = PF_ADDR_NONE; 212 pf_default_rule.nat.addr.type = PF_ADDR_NONE; 213 pf_default_rule.route.addr.type = PF_ADDR_NONE; 214 215 pf_normalize_init(); 216 bzero(&pf_status, sizeof(pf_status)); 217 pf_status.debug = LOG_ERR; 218 pf_status.reass = PF_REASS_ENABLED; 219 220 /* XXX do our best to avoid a conflict */ 221 pf_status.hostid = arc4random(); 222 223 /* require process context to purge states, so perform in a thread */ 224 kthread_create_deferred(pf_thread_create, NULL); 225 } 226 227 void 228 pf_thread_create(void *v) 229 { 230 if (kthread_create(pf_purge_thread, NULL, NULL, "pfpurge")) 231 panic("pfpurge thread"); 232 } 233 234 int 235 pfopen(dev_t dev, int flags, int fmt, struct proc *p) 236 { 237 if (minor(dev) >= 1) 238 return (ENXIO); 239 return (0); 240 } 241 242 int 243 pfclose(dev_t dev, int flags, int fmt, struct proc *p) 244 { 245 if (minor(dev) >= 1) 246 return (ENXIO); 247 return (0); 248 } 249 250 void 251 pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) 252 { 253 if (rulequeue != NULL) { 254 if (rule->states_cur == 0 && rule->src_nodes == 0) { 255 /* 256 * XXX - we need to remove the table *before* detaching 257 * the rule to make sure the table code does not delete 258 * the anchor under our feet. 259 */ 260 pf_tbladdr_remove(&rule->src.addr); 261 pf_tbladdr_remove(&rule->dst.addr); 262 pf_tbladdr_remove(&rule->rdr.addr); 263 pf_tbladdr_remove(&rule->nat.addr); 264 pf_tbladdr_remove(&rule->route.addr); 265 if (rule->overload_tbl) 266 pfr_detach_table(rule->overload_tbl); 267 } 268 TAILQ_REMOVE(rulequeue, rule, entries); 269 rule->entries.tqe_prev = NULL; 270 rule->nr = (u_int32_t)-1; 271 } 272 273 if (rule->states_cur > 0 || rule->src_nodes > 0 || 274 rule->entries.tqe_prev != NULL) 275 return; 276 pf_tag_unref(rule->tag); 277 pf_tag_unref(rule->match_tag); 278 pf_rtlabel_remove(&rule->src.addr); 279 pf_rtlabel_remove(&rule->dst.addr); 280 pfi_dynaddr_remove(&rule->src.addr); 281 pfi_dynaddr_remove(&rule->dst.addr); 282 pfi_dynaddr_remove(&rule->rdr.addr); 283 pfi_dynaddr_remove(&rule->nat.addr); 284 pfi_dynaddr_remove(&rule->route.addr); 285 if (rulequeue == NULL) { 286 pf_tbladdr_remove(&rule->src.addr); 287 pf_tbladdr_remove(&rule->dst.addr); 288 pf_tbladdr_remove(&rule->rdr.addr); 289 pf_tbladdr_remove(&rule->nat.addr); 290 pf_tbladdr_remove(&rule->route.addr); 291 if (rule->overload_tbl) 292 pfr_detach_table(rule->overload_tbl); 293 } 294 pfi_kif_unref(rule->rcv_kif, PFI_KIF_REF_RULE); 295 pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); 296 pfi_kif_unref(rule->rdr.kif, PFI_KIF_REF_RULE); 297 pfi_kif_unref(rule->nat.kif, PFI_KIF_REF_RULE); 298 pfi_kif_unref(rule->route.kif, PFI_KIF_REF_RULE); 299 pf_anchor_remove(rule); 300 pool_put(&pf_rule_pl, rule); 301 } 302 303 void 304 pf_purge_rule(struct pf_ruleset *ruleset, struct pf_rule *rule, 305 struct pf_ruleset *aruleset, struct pf_rule *arule) 306 { 307 u_int32_t nr = 0; 308 309 KASSERT(ruleset != NULL && rule != NULL); 310 311 pf_rm_rule(ruleset->rules.active.ptr, rule); 312 ruleset->rules.active.rcount--; 313 TAILQ_FOREACH(rule, ruleset->rules.active.ptr, entries) 314 rule->nr = nr++; 315 ruleset->rules.active.ticket++; 316 pf_calc_skip_steps(ruleset->rules.active.ptr); 317 318 /* remove the parent anchor rule */ 319 if (nr == 0 && arule && aruleset) { 320 pf_rm_rule(aruleset->rules.active.ptr, arule); 321 aruleset->rules.active.rcount--; 322 TAILQ_FOREACH(rule, aruleset->rules.active.ptr, entries) 323 rule->nr = nr++; 324 aruleset->rules.active.ticket++; 325 pf_calc_skip_steps(aruleset->rules.active.ptr); 326 } 327 } 328 329 u_int16_t 330 tagname2tag(struct pf_tags *head, char *tagname, int create) 331 { 332 struct pf_tagname *tag, *p = NULL; 333 u_int16_t new_tagid = 1; 334 335 TAILQ_FOREACH(tag, head, entries) 336 if (strcmp(tagname, tag->name) == 0) { 337 tag->ref++; 338 return (tag->tag); 339 } 340 341 if (!create) 342 return (0); 343 344 /* 345 * to avoid fragmentation, we do a linear search from the beginning 346 * and take the first free slot we find. if there is none or the list 347 * is empty, append a new entry at the end. 348 */ 349 350 /* new entry */ 351 if (!TAILQ_EMPTY(head)) 352 for (p = TAILQ_FIRST(head); p != NULL && 353 p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) 354 new_tagid = p->tag + 1; 355 356 if (new_tagid > TAGID_MAX) 357 return (0); 358 359 /* allocate and fill new struct pf_tagname */ 360 tag = malloc(sizeof(*tag), M_RTABLE, M_NOWAIT|M_ZERO); 361 if (tag == NULL) 362 return (0); 363 strlcpy(tag->name, tagname, sizeof(tag->name)); 364 tag->tag = new_tagid; 365 tag->ref++; 366 367 if (p != NULL) /* insert new entry before p */ 368 TAILQ_INSERT_BEFORE(p, tag, entries); 369 else /* either list empty or no free slot in between */ 370 TAILQ_INSERT_TAIL(head, tag, entries); 371 372 return (tag->tag); 373 } 374 375 void 376 tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) 377 { 378 struct pf_tagname *tag; 379 380 TAILQ_FOREACH(tag, head, entries) 381 if (tag->tag == tagid) { 382 strlcpy(p, tag->name, PF_TAG_NAME_SIZE); 383 return; 384 } 385 } 386 387 void 388 tag_unref(struct pf_tags *head, u_int16_t tag) 389 { 390 struct pf_tagname *p, *next; 391 392 if (tag == 0) 393 return; 394 395 for (p = TAILQ_FIRST(head); p != NULL; p = next) { 396 next = TAILQ_NEXT(p, entries); 397 if (tag == p->tag) { 398 if (--p->ref == 0) { 399 TAILQ_REMOVE(head, p, entries); 400 free(p, M_RTABLE, sizeof(*p)); 401 } 402 break; 403 } 404 } 405 } 406 407 u_int16_t 408 pf_tagname2tag(char *tagname, int create) 409 { 410 return (tagname2tag(&pf_tags, tagname, create)); 411 } 412 413 void 414 pf_tag2tagname(u_int16_t tagid, char *p) 415 { 416 tag2tagname(&pf_tags, tagid, p); 417 } 418 419 void 420 pf_tag_ref(u_int16_t tag) 421 { 422 struct pf_tagname *t; 423 424 TAILQ_FOREACH(t, &pf_tags, entries) 425 if (t->tag == tag) 426 break; 427 if (t != NULL) 428 t->ref++; 429 } 430 431 void 432 pf_tag_unref(u_int16_t tag) 433 { 434 tag_unref(&pf_tags, tag); 435 } 436 437 int 438 pf_rtlabel_add(struct pf_addr_wrap *a) 439 { 440 if (a->type == PF_ADDR_RTLABEL && 441 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0) 442 return (-1); 443 return (0); 444 } 445 446 void 447 pf_rtlabel_remove(struct pf_addr_wrap *a) 448 { 449 if (a->type == PF_ADDR_RTLABEL) 450 rtlabel_unref(a->v.rtlabel); 451 } 452 453 void 454 pf_rtlabel_copyout(struct pf_addr_wrap *a) 455 { 456 const char *name; 457 458 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) { 459 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL) 460 strlcpy(a->v.rtlabelname, "?", 461 sizeof(a->v.rtlabelname)); 462 else 463 strlcpy(a->v.rtlabelname, name, 464 sizeof(a->v.rtlabelname)); 465 } 466 } 467 468 u_int16_t 469 pf_qname2qid(char *qname, int create) 470 { 471 return (tagname2tag(&pf_qids, qname, create)); 472 } 473 474 void 475 pf_qid2qname(u_int16_t qid, char *p) 476 { 477 tag2tagname(&pf_qids, qid, p); 478 } 479 480 void 481 pf_qid_unref(u_int16_t qid) 482 { 483 tag_unref(&pf_qids, (u_int16_t)qid); 484 } 485 486 int 487 pf_begin_rules(u_int32_t *ticket, const char *anchor) 488 { 489 struct pf_ruleset *rs; 490 struct pf_rule *rule; 491 492 if ((rs = pf_find_or_create_ruleset(anchor)) == NULL) 493 return (EINVAL); 494 while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) { 495 pf_rm_rule(rs->rules.inactive.ptr, rule); 496 rs->rules.inactive.rcount--; 497 } 498 *ticket = ++rs->rules.inactive.ticket; 499 rs->rules.inactive.open = 1; 500 return (0); 501 } 502 503 int 504 pf_rollback_rules(u_int32_t ticket, char *anchor) 505 { 506 struct pf_ruleset *rs; 507 struct pf_rule *rule; 508 509 rs = pf_find_ruleset(anchor); 510 if (rs == NULL || !rs->rules.inactive.open || 511 rs->rules.inactive.ticket != ticket) 512 return (0); 513 while ((rule = TAILQ_FIRST(rs->rules.inactive.ptr)) != NULL) { 514 pf_rm_rule(rs->rules.inactive.ptr, rule); 515 rs->rules.inactive.rcount--; 516 } 517 rs->rules.inactive.open = 0; 518 519 /* queue defs only in the main ruleset */ 520 if (anchor[0]) 521 return (0); 522 523 pf_free_queues(pf_queues_inactive); 524 525 return (0); 526 } 527 528 void 529 pf_free_queues(struct pf_queuehead *where) 530 { 531 struct pf_queuespec *q, *qtmp; 532 533 TAILQ_FOREACH_SAFE(q, where, entries, qtmp) { 534 TAILQ_REMOVE(where, q, entries); 535 pfi_kif_unref(q->kif, PFI_KIF_REF_RULE); 536 pool_put(&pf_queue_pl, q); 537 } 538 } 539 540 void 541 pf_remove_queues(void) 542 { 543 struct pf_queuespec *q; 544 struct ifnet *ifp; 545 546 /* put back interfaces in normal queueing mode */ 547 TAILQ_FOREACH(q, pf_queues_active, entries) { 548 if (q->parent_qid != 0) 549 continue; 550 551 ifp = q->kif->pfik_ifp; 552 if (ifp == NULL) 553 continue; 554 555 KASSERT(HFSC_ENABLED(&ifp->if_snd)); 556 557 ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL); 558 } 559 } 560 561 struct pf_hfsc_queue { 562 struct ifnet *ifp; 563 struct hfsc_if *hif; 564 struct pf_hfsc_queue *next; 565 }; 566 567 static inline struct pf_hfsc_queue * 568 pf_hfsc_ifp2q(struct pf_hfsc_queue *list, struct ifnet *ifp) 569 { 570 struct pf_hfsc_queue *phq = list; 571 572 while (phq != NULL) { 573 if (phq->ifp == ifp) 574 return (phq); 575 576 phq = phq->next; 577 } 578 579 return (phq); 580 } 581 582 int 583 pf_create_queues(void) 584 { 585 struct pf_queuespec *q; 586 struct ifnet *ifp; 587 struct pf_hfsc_queue *list = NULL, *phq; 588 int error; 589 590 /* find root queues and alloc hfsc for these interfaces */ 591 TAILQ_FOREACH(q, pf_queues_active, entries) { 592 if (q->parent_qid != 0) 593 continue; 594 595 ifp = q->kif->pfik_ifp; 596 if (ifp == NULL) 597 continue; 598 599 phq = malloc(sizeof(*phq), M_TEMP, M_WAITOK); 600 phq->ifp = ifp; 601 phq->hif = hfsc_pf_alloc(ifp); 602 603 phq->next = list; 604 list = phq; 605 } 606 607 /* and now everything */ 608 TAILQ_FOREACH(q, pf_queues_active, entries) { 609 ifp = q->kif->pfik_ifp; 610 if (ifp == NULL) 611 continue; 612 613 phq = pf_hfsc_ifp2q(list, ifp); 614 KASSERT(phq != NULL); 615 616 error = hfsc_pf_addqueue(phq->hif, q); 617 if (error != 0) 618 goto error; 619 } 620 621 /* find root queues in old list to disable them if necessary */ 622 TAILQ_FOREACH(q, pf_queues_inactive, entries) { 623 if (q->parent_qid != 0) 624 continue; 625 626 ifp = q->kif->pfik_ifp; 627 if (ifp == NULL) 628 continue; 629 630 phq = pf_hfsc_ifp2q(list, ifp); 631 if (phq != NULL) 632 continue; 633 634 ifq_attach(&ifp->if_snd, ifq_priq_ops, NULL); 635 } 636 637 /* commit the new queues */ 638 while (list != NULL) { 639 phq = list; 640 list = phq->next; 641 642 ifp = phq->ifp; 643 644 ifq_attach(&ifp->if_snd, ifq_hfsc_ops, phq->hif); 645 free(phq, M_TEMP, sizeof(*phq)); 646 } 647 648 return (0); 649 650 error: 651 while (list != NULL) { 652 phq = list; 653 list = phq->next; 654 655 hfsc_pf_free(phq->hif); 656 free(phq, M_TEMP, sizeof(*phq)); 657 } 658 659 return (error); 660 } 661 662 int 663 pf_commit_queues(void) 664 { 665 struct pf_queuehead *qswap; 666 int error; 667 668 /* swap */ 669 qswap = pf_queues_active; 670 pf_queues_active = pf_queues_inactive; 671 pf_queues_inactive = qswap; 672 673 error = pf_create_queues(); 674 if (error != 0) { 675 pf_queues_inactive = pf_queues_active; 676 pf_queues_active = qswap; 677 return (error); 678 } 679 680 pf_free_queues(pf_queues_inactive); 681 682 return (0); 683 } 684 685 #define PF_MD5_UPD(st, elm) \ 686 MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm)) 687 688 #define PF_MD5_UPD_STR(st, elm) \ 689 MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm)) 690 691 #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ 692 (stor) = htonl((st)->elm); \ 693 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\ 694 } while (0) 695 696 #define PF_MD5_UPD_HTONS(st, elm, stor) do { \ 697 (stor) = htons((st)->elm); \ 698 MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\ 699 } while (0) 700 701 void 702 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr) 703 { 704 PF_MD5_UPD(pfr, addr.type); 705 switch (pfr->addr.type) { 706 case PF_ADDR_DYNIFTL: 707 PF_MD5_UPD(pfr, addr.v.ifname); 708 PF_MD5_UPD(pfr, addr.iflags); 709 break; 710 case PF_ADDR_TABLE: 711 PF_MD5_UPD(pfr, addr.v.tblname); 712 break; 713 case PF_ADDR_ADDRMASK: 714 /* XXX ignore af? */ 715 PF_MD5_UPD(pfr, addr.v.a.addr.addr32); 716 PF_MD5_UPD(pfr, addr.v.a.mask.addr32); 717 break; 718 case PF_ADDR_RTLABEL: 719 PF_MD5_UPD(pfr, addr.v.rtlabelname); 720 break; 721 } 722 723 PF_MD5_UPD(pfr, port[0]); 724 PF_MD5_UPD(pfr, port[1]); 725 PF_MD5_UPD(pfr, neg); 726 PF_MD5_UPD(pfr, port_op); 727 } 728 729 void 730 pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) 731 { 732 u_int16_t x; 733 u_int32_t y; 734 735 pf_hash_rule_addr(ctx, &rule->src); 736 pf_hash_rule_addr(ctx, &rule->dst); 737 PF_MD5_UPD_STR(rule, label); 738 PF_MD5_UPD_STR(rule, ifname); 739 PF_MD5_UPD_STR(rule, rcv_ifname); 740 PF_MD5_UPD_STR(rule, match_tagname); 741 PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */ 742 PF_MD5_UPD_HTONL(rule, os_fingerprint, y); 743 PF_MD5_UPD_HTONL(rule, prob, y); 744 PF_MD5_UPD_HTONL(rule, uid.uid[0], y); 745 PF_MD5_UPD_HTONL(rule, uid.uid[1], y); 746 PF_MD5_UPD(rule, uid.op); 747 PF_MD5_UPD_HTONL(rule, gid.gid[0], y); 748 PF_MD5_UPD_HTONL(rule, gid.gid[1], y); 749 PF_MD5_UPD(rule, gid.op); 750 PF_MD5_UPD_HTONL(rule, rule_flag, y); 751 PF_MD5_UPD(rule, action); 752 PF_MD5_UPD(rule, direction); 753 PF_MD5_UPD(rule, af); 754 PF_MD5_UPD(rule, quick); 755 PF_MD5_UPD(rule, ifnot); 756 PF_MD5_UPD(rule, rcvifnot); 757 PF_MD5_UPD(rule, match_tag_not); 758 PF_MD5_UPD(rule, keep_state); 759 PF_MD5_UPD(rule, proto); 760 PF_MD5_UPD(rule, type); 761 PF_MD5_UPD(rule, code); 762 PF_MD5_UPD(rule, flags); 763 PF_MD5_UPD(rule, flagset); 764 PF_MD5_UPD(rule, allow_opts); 765 PF_MD5_UPD(rule, rt); 766 PF_MD5_UPD(rule, tos); 767 } 768 769 int 770 pf_commit_rules(u_int32_t ticket, char *anchor) 771 { 772 struct pf_ruleset *rs; 773 struct pf_rule *rule, **old_array; 774 struct pf_rulequeue *old_rules; 775 int s, error; 776 u_int32_t old_rcount; 777 778 rs = pf_find_ruleset(anchor); 779 if (rs == NULL || !rs->rules.inactive.open || 780 ticket != rs->rules.inactive.ticket) 781 return (EBUSY); 782 783 /* Calculate checksum for the main ruleset */ 784 if (rs == &pf_main_ruleset) { 785 error = pf_setup_pfsync_matching(rs); 786 if (error != 0) 787 return (error); 788 } 789 790 /* Swap rules, keep the old. */ 791 s = splsoftnet(); 792 old_rules = rs->rules.active.ptr; 793 old_rcount = rs->rules.active.rcount; 794 old_array = rs->rules.active.ptr_array; 795 796 rs->rules.active.ptr = rs->rules.inactive.ptr; 797 rs->rules.active.ptr_array = rs->rules.inactive.ptr_array; 798 rs->rules.active.rcount = rs->rules.inactive.rcount; 799 rs->rules.inactive.ptr = old_rules; 800 rs->rules.inactive.ptr_array = old_array; 801 rs->rules.inactive.rcount = old_rcount; 802 803 rs->rules.active.ticket = rs->rules.inactive.ticket; 804 pf_calc_skip_steps(rs->rules.active.ptr); 805 806 807 /* Purge the old rule list. */ 808 while ((rule = TAILQ_FIRST(old_rules)) != NULL) 809 pf_rm_rule(old_rules, rule); 810 if (rs->rules.inactive.ptr_array) 811 free(rs->rules.inactive.ptr_array, M_TEMP, 0); 812 rs->rules.inactive.ptr_array = NULL; 813 rs->rules.inactive.rcount = 0; 814 rs->rules.inactive.open = 0; 815 pf_remove_if_empty_ruleset(rs); 816 splx(s); 817 818 /* queue defs only in the main ruleset */ 819 if (anchor[0]) 820 return (0); 821 return (pf_commit_queues()); 822 } 823 824 int 825 pf_setup_pfsync_matching(struct pf_ruleset *rs) 826 { 827 MD5_CTX ctx; 828 struct pf_rule *rule; 829 u_int8_t digest[PF_MD5_DIGEST_LENGTH]; 830 831 MD5Init(&ctx); 832 if (rs->rules.inactive.ptr_array) 833 free(rs->rules.inactive.ptr_array, M_TEMP, 0); 834 rs->rules.inactive.ptr_array = NULL; 835 836 if (rs->rules.inactive.rcount) { 837 rs->rules.inactive.ptr_array = 838 mallocarray(rs->rules.inactive.rcount, sizeof(caddr_t), 839 M_TEMP, M_NOWAIT); 840 841 if (!rs->rules.inactive.ptr_array) 842 return (ENOMEM); 843 844 TAILQ_FOREACH(rule, rs->rules.inactive.ptr, entries) { 845 pf_hash_rule(&ctx, rule); 846 (rs->rules.inactive.ptr_array)[rule->nr] = rule; 847 } 848 } 849 850 MD5Final(digest, &ctx); 851 memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum)); 852 return (0); 853 } 854 855 int 856 pf_addr_setup(struct pf_ruleset *ruleset, struct pf_addr_wrap *addr, 857 sa_family_t af) 858 { 859 if (pfi_dynaddr_setup(addr, af) || 860 pf_tbladdr_setup(ruleset, addr) || 861 pf_rtlabel_add(addr)) 862 return (EINVAL); 863 864 return (0); 865 } 866 867 int 868 pf_kif_setup(char *ifname, struct pfi_kif **kif) 869 { 870 if (ifname[0]) { 871 *kif = pfi_kif_get(ifname); 872 if (*kif == NULL) 873 return (EINVAL); 874 875 pfi_kif_ref(*kif, PFI_KIF_REF_RULE); 876 } else 877 *kif = NULL; 878 879 return (0); 880 } 881 882 void 883 pf_addr_copyout(struct pf_addr_wrap *addr) 884 { 885 pfi_dynaddr_copyout(addr); 886 pf_tbladdr_copyout(addr); 887 pf_rtlabel_copyout(addr); 888 } 889 890 int 891 pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) 892 { 893 int s; 894 int error = 0; 895 896 /* XXX keep in sync with switch() below */ 897 if (securelevel > 1) 898 switch (cmd) { 899 case DIOCGETRULES: 900 case DIOCGETRULE: 901 case DIOCGETSTATE: 902 case DIOCSETSTATUSIF: 903 case DIOCGETSTATUS: 904 case DIOCCLRSTATUS: 905 case DIOCNATLOOK: 906 case DIOCSETDEBUG: 907 case DIOCGETSTATES: 908 case DIOCGETTIMEOUT: 909 case DIOCCLRRULECTRS: 910 case DIOCGETLIMIT: 911 case DIOCGETRULESETS: 912 case DIOCGETRULESET: 913 case DIOCGETQUEUES: 914 case DIOCGETQUEUE: 915 case DIOCGETQSTATS: 916 case DIOCRGETTABLES: 917 case DIOCRGETTSTATS: 918 case DIOCRCLRTSTATS: 919 case DIOCRCLRADDRS: 920 case DIOCRADDADDRS: 921 case DIOCRDELADDRS: 922 case DIOCRSETADDRS: 923 case DIOCRGETASTATS: 924 case DIOCRCLRASTATS: 925 case DIOCRTSTADDRS: 926 case DIOCOSFPGET: 927 case DIOCGETSRCNODES: 928 case DIOCCLRSRCNODES: 929 case DIOCIGETIFACES: 930 case DIOCSETIFFLAG: 931 case DIOCCLRIFFLAG: 932 break; 933 case DIOCRCLRTABLES: 934 case DIOCRADDTABLES: 935 case DIOCRDELTABLES: 936 case DIOCRSETTFLAGS: 937 if (((struct pfioc_table *)addr)->pfrio_flags & 938 PFR_FLAG_DUMMY) 939 break; /* dummy operation ok */ 940 return (EPERM); 941 default: 942 return (EPERM); 943 } 944 945 if (!(flags & FWRITE)) 946 switch (cmd) { 947 case DIOCGETRULES: 948 case DIOCGETSTATE: 949 case DIOCGETSTATUS: 950 case DIOCGETSTATES: 951 case DIOCGETTIMEOUT: 952 case DIOCGETLIMIT: 953 case DIOCGETRULESETS: 954 case DIOCGETRULESET: 955 case DIOCGETQUEUES: 956 case DIOCGETQUEUE: 957 case DIOCGETQSTATS: 958 case DIOCNATLOOK: 959 case DIOCRGETTABLES: 960 case DIOCRGETTSTATS: 961 case DIOCRGETADDRS: 962 case DIOCRGETASTATS: 963 case DIOCRTSTADDRS: 964 case DIOCOSFPGET: 965 case DIOCGETSRCNODES: 966 case DIOCIGETIFACES: 967 break; 968 case DIOCRCLRTABLES: 969 case DIOCRADDTABLES: 970 case DIOCRDELTABLES: 971 case DIOCRCLRTSTATS: 972 case DIOCRCLRADDRS: 973 case DIOCRADDADDRS: 974 case DIOCRDELADDRS: 975 case DIOCRSETADDRS: 976 case DIOCRSETTFLAGS: 977 if (((struct pfioc_table *)addr)->pfrio_flags & 978 PFR_FLAG_DUMMY) { 979 flags |= FWRITE; /* need write lock for dummy */ 980 break; /* dummy operation ok */ 981 } 982 return (EACCES); 983 case DIOCGETRULE: 984 if (((struct pfioc_rule *)addr)->action == 985 PF_GET_CLR_CNTR) 986 return (EACCES); 987 break; 988 default: 989 return (EACCES); 990 } 991 992 if (flags & FWRITE) 993 rw_enter_write(&pf_consistency_lock); 994 else 995 rw_enter_read(&pf_consistency_lock); 996 997 s = splsoftnet(); 998 switch (cmd) { 999 1000 case DIOCSTART: 1001 if (pf_status.running) 1002 error = EEXIST; 1003 else { 1004 pf_status.running = 1; 1005 pf_status.since = time_second; 1006 if (pf_status.stateid == 0) { 1007 pf_status.stateid = time_second; 1008 pf_status.stateid = pf_status.stateid << 32; 1009 } 1010 pf_create_queues(); 1011 DPFPRINTF(LOG_NOTICE, "pf: started"); 1012 } 1013 break; 1014 1015 case DIOCSTOP: 1016 if (!pf_status.running) 1017 error = ENOENT; 1018 else { 1019 pf_status.running = 0; 1020 pf_status.since = time_second; 1021 pf_remove_queues(); 1022 DPFPRINTF(LOG_NOTICE, "pf: stopped"); 1023 } 1024 break; 1025 1026 case DIOCGETQUEUES: { 1027 struct pfioc_queue *pq = (struct pfioc_queue *)addr; 1028 struct pf_queuespec *qs; 1029 u_int32_t nr = 0; 1030 1031 pq->ticket = pf_main_ruleset.rules.active.ticket; 1032 1033 /* save state to not run over them all each time? */ 1034 qs = TAILQ_FIRST(pf_queues_active); 1035 while (qs != NULL) { 1036 qs = TAILQ_NEXT(qs, entries); 1037 nr++; 1038 } 1039 pq->nr = nr; 1040 break; 1041 } 1042 1043 case DIOCGETQUEUE: { 1044 struct pfioc_queue *pq = (struct pfioc_queue *)addr; 1045 struct pf_queuespec *qs; 1046 u_int32_t nr = 0; 1047 1048 if (pq->ticket != pf_main_ruleset.rules.active.ticket) { 1049 error = EBUSY; 1050 break; 1051 } 1052 1053 /* save state to not run over them all each time? */ 1054 qs = TAILQ_FIRST(pf_queues_active); 1055 while ((qs != NULL) && (nr++ < pq->nr)) 1056 qs = TAILQ_NEXT(qs, entries); 1057 if (qs == NULL) { 1058 error = EBUSY; 1059 break; 1060 } 1061 bcopy(qs, &pq->queue, sizeof(pq->queue)); 1062 break; 1063 } 1064 1065 case DIOCGETQSTATS: { 1066 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr; 1067 struct pf_queuespec *qs; 1068 u_int32_t nr; 1069 int nbytes; 1070 1071 if (pq->ticket != pf_main_ruleset.rules.active.ticket) { 1072 error = EBUSY; 1073 break; 1074 } 1075 nbytes = pq->nbytes; 1076 nr = 0; 1077 1078 /* save state to not run over them all each time? */ 1079 qs = TAILQ_FIRST(pf_queues_active); 1080 while ((qs != NULL) && (nr++ < pq->nr)) 1081 qs = TAILQ_NEXT(qs, entries); 1082 if (qs == NULL) { 1083 error = EBUSY; 1084 break; 1085 } 1086 bcopy(qs, &pq->queue, sizeof(pq->queue)); 1087 error = hfsc_pf_qstats(qs, pq->buf, &nbytes); 1088 if (error == 0) 1089 pq->nbytes = nbytes; 1090 break; 1091 } 1092 1093 case DIOCADDQUEUE: { 1094 struct pfioc_queue *q = (struct pfioc_queue *)addr; 1095 struct pf_queuespec *qs; 1096 1097 if (q->ticket != pf_main_ruleset.rules.inactive.ticket) { 1098 error = EBUSY; 1099 break; 1100 } 1101 qs = pool_get(&pf_queue_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO); 1102 if (qs == NULL) { 1103 error = ENOMEM; 1104 break; 1105 } 1106 bcopy(&q->queue, qs, sizeof(*qs)); 1107 qs->qid = pf_qname2qid(qs->qname, 1); 1108 if (qs->parent[0] && (qs->parent_qid = 1109 pf_qname2qid(qs->parent, 0)) == 0) { 1110 pool_put(&pf_queue_pl, qs); 1111 error = ESRCH; 1112 break; 1113 } 1114 qs->kif = pfi_kif_get(qs->ifname); 1115 if (qs->kif == NULL) { 1116 pool_put(&pf_queue_pl, qs); 1117 error = ESRCH; 1118 break; 1119 } 1120 /* XXX resolve bw percentage specs */ 1121 pfi_kif_ref(qs->kif, PFI_KIF_REF_RULE); 1122 if (qs->qlimit == 0) 1123 qs->qlimit = HFSC_DEFAULT_QLIMIT; 1124 TAILQ_INSERT_TAIL(pf_queues_inactive, qs, entries); 1125 1126 break; 1127 } 1128 1129 case DIOCADDRULE: { 1130 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1131 struct pf_ruleset *ruleset; 1132 struct pf_rule *rule, *tail; 1133 1134 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1135 ruleset = pf_find_ruleset(pr->anchor); 1136 if (ruleset == NULL) { 1137 error = EINVAL; 1138 break; 1139 } 1140 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1141 error = EINVAL; 1142 break; 1143 } 1144 if (pr->ticket != ruleset->rules.inactive.ticket) { 1145 error = EBUSY; 1146 break; 1147 } 1148 rule = pool_get(&pf_rule_pl, PR_WAITOK|PR_LIMITFAIL|PR_ZERO); 1149 if (rule == NULL) { 1150 error = ENOMEM; 1151 break; 1152 } 1153 if ((error = pf_rule_copyin(&pr->rule, rule, ruleset))) { 1154 pf_rm_rule(NULL, rule); 1155 rule = NULL; 1156 break; 1157 } 1158 rule->cuid = p->p_ucred->cr_ruid; 1159 rule->cpid = p->p_p->ps_pid; 1160 1161 switch (rule->af) { 1162 case 0: 1163 break; 1164 case AF_INET: 1165 break; 1166 #ifdef INET6 1167 case AF_INET6: 1168 break; 1169 #endif /* INET6 */ 1170 default: 1171 pf_rm_rule(NULL, rule); 1172 rule = NULL; 1173 error = EAFNOSUPPORT; 1174 goto fail; 1175 } 1176 tail = TAILQ_LAST(ruleset->rules.inactive.ptr, 1177 pf_rulequeue); 1178 if (tail) 1179 rule->nr = tail->nr + 1; 1180 else 1181 rule->nr = 0; 1182 1183 if (rule->src.addr.type == PF_ADDR_NONE || 1184 rule->dst.addr.type == PF_ADDR_NONE) 1185 error = EINVAL; 1186 1187 if (pf_addr_setup(ruleset, &rule->src.addr, rule->af)) 1188 error = EINVAL; 1189 if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af)) 1190 error = EINVAL; 1191 if (pf_addr_setup(ruleset, &rule->rdr.addr, rule->af)) 1192 error = EINVAL; 1193 if (pf_addr_setup(ruleset, &rule->nat.addr, rule->af)) 1194 error = EINVAL; 1195 if (pf_addr_setup(ruleset, &rule->route.addr, rule->af)) 1196 error = EINVAL; 1197 if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) 1198 error = EINVAL; 1199 if (rule->rt && !rule->direction) 1200 error = EINVAL; 1201 if (rule->scrub_flags & PFSTATE_SETPRIO && 1202 (rule->set_prio[0] > IFQ_MAXPRIO || 1203 rule->set_prio[1] > IFQ_MAXPRIO)) 1204 error = EINVAL; 1205 1206 if (error) { 1207 pf_rm_rule(NULL, rule); 1208 break; 1209 } 1210 TAILQ_INSERT_TAIL(ruleset->rules.inactive.ptr, 1211 rule, entries); 1212 ruleset->rules.inactive.rcount++; 1213 break; 1214 } 1215 1216 case DIOCGETRULES: { 1217 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1218 struct pf_ruleset *ruleset; 1219 struct pf_rule *tail; 1220 1221 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1222 ruleset = pf_find_ruleset(pr->anchor); 1223 if (ruleset == NULL) { 1224 error = EINVAL; 1225 break; 1226 } 1227 tail = TAILQ_LAST(ruleset->rules.active.ptr, pf_rulequeue); 1228 if (tail) 1229 pr->nr = tail->nr + 1; 1230 else 1231 pr->nr = 0; 1232 pr->ticket = ruleset->rules.active.ticket; 1233 break; 1234 } 1235 1236 case DIOCGETRULE: { 1237 struct pfioc_rule *pr = (struct pfioc_rule *)addr; 1238 struct pf_ruleset *ruleset; 1239 struct pf_rule *rule; 1240 int i; 1241 1242 pr->anchor[sizeof(pr->anchor) - 1] = 0; 1243 ruleset = pf_find_ruleset(pr->anchor); 1244 if (ruleset == NULL) { 1245 error = EINVAL; 1246 break; 1247 } 1248 if (pr->ticket != ruleset->rules.active.ticket) { 1249 error = EBUSY; 1250 break; 1251 } 1252 rule = TAILQ_FIRST(ruleset->rules.active.ptr); 1253 while ((rule != NULL) && (rule->nr != pr->nr)) 1254 rule = TAILQ_NEXT(rule, entries); 1255 if (rule == NULL) { 1256 error = EBUSY; 1257 break; 1258 } 1259 bcopy(rule, &pr->rule, sizeof(struct pf_rule)); 1260 bzero(&pr->rule.entries, sizeof(pr->rule.entries)); 1261 pr->rule.kif = NULL; 1262 pr->rule.nat.kif = NULL; 1263 pr->rule.rdr.kif = NULL; 1264 pr->rule.route.kif = NULL; 1265 pr->rule.rcv_kif = NULL; 1266 pr->rule.anchor = NULL; 1267 pr->rule.overload_tbl = NULL; 1268 if (pf_anchor_copyout(ruleset, rule, pr)) { 1269 error = EBUSY; 1270 break; 1271 } 1272 pf_addr_copyout(&pr->rule.src.addr); 1273 pf_addr_copyout(&pr->rule.dst.addr); 1274 pf_addr_copyout(&pr->rule.rdr.addr); 1275 pf_addr_copyout(&pr->rule.nat.addr); 1276 pf_addr_copyout(&pr->rule.route.addr); 1277 for (i = 0; i < PF_SKIP_COUNT; ++i) 1278 if (rule->skip[i].ptr == NULL) 1279 pr->rule.skip[i].nr = (u_int32_t)-1; 1280 else 1281 pr->rule.skip[i].nr = 1282 rule->skip[i].ptr->nr; 1283 1284 if (pr->action == PF_GET_CLR_CNTR) { 1285 rule->evaluations = 0; 1286 rule->packets[0] = rule->packets[1] = 0; 1287 rule->bytes[0] = rule->bytes[1] = 0; 1288 rule->states_tot = 0; 1289 } 1290 break; 1291 } 1292 1293 case DIOCCHANGERULE: { 1294 struct pfioc_rule *pcr = (struct pfioc_rule *)addr; 1295 struct pf_ruleset *ruleset; 1296 struct pf_rule *oldrule = NULL, *newrule = NULL; 1297 u_int32_t nr = 0; 1298 1299 if (pcr->action < PF_CHANGE_ADD_HEAD || 1300 pcr->action > PF_CHANGE_GET_TICKET) { 1301 error = EINVAL; 1302 break; 1303 } 1304 ruleset = pf_find_ruleset(pcr->anchor); 1305 if (ruleset == NULL) { 1306 error = EINVAL; 1307 break; 1308 } 1309 1310 if (pcr->action == PF_CHANGE_GET_TICKET) { 1311 pcr->ticket = ++ruleset->rules.active.ticket; 1312 break; 1313 } else { 1314 if (pcr->ticket != 1315 ruleset->rules.active.ticket) { 1316 error = EINVAL; 1317 break; 1318 } 1319 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { 1320 error = EINVAL; 1321 break; 1322 } 1323 } 1324 1325 if (pcr->action != PF_CHANGE_REMOVE) { 1326 newrule = pool_get(&pf_rule_pl, 1327 PR_WAITOK|PR_LIMITFAIL|PR_ZERO); 1328 if (newrule == NULL) { 1329 error = ENOMEM; 1330 break; 1331 } 1332 pf_rule_copyin(&pcr->rule, newrule, ruleset); 1333 newrule->cuid = p->p_ucred->cr_ruid; 1334 newrule->cpid = p->p_p->ps_pid; 1335 1336 switch (newrule->af) { 1337 case 0: 1338 break; 1339 case AF_INET: 1340 break; 1341 #ifdef INET6 1342 case AF_INET6: 1343 break; 1344 #endif /* INET6 */ 1345 default: 1346 pool_put(&pf_rule_pl, newrule); 1347 error = EAFNOSUPPORT; 1348 goto fail; 1349 } 1350 1351 if (newrule->rt && !newrule->direction) 1352 error = EINVAL; 1353 if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af)) 1354 error = EINVAL; 1355 if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af)) 1356 error = EINVAL; 1357 if (pf_addr_setup(ruleset, &newrule->rdr.addr, newrule->af)) 1358 error = EINVAL; 1359 if (pf_addr_setup(ruleset, &newrule->nat.addr, newrule->af)) 1360 error = EINVAL; 1361 if (pf_addr_setup(ruleset, &newrule->route.addr, newrule->af)) 1362 error = EINVAL; 1363 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) 1364 error = EINVAL; 1365 1366 if (error) { 1367 pf_rm_rule(NULL, newrule); 1368 break; 1369 } 1370 } 1371 1372 if (pcr->action == PF_CHANGE_ADD_HEAD) 1373 oldrule = TAILQ_FIRST(ruleset->rules.active.ptr); 1374 else if (pcr->action == PF_CHANGE_ADD_TAIL) 1375 oldrule = TAILQ_LAST(ruleset->rules.active.ptr, 1376 pf_rulequeue); 1377 else { 1378 oldrule = TAILQ_FIRST(ruleset->rules.active.ptr); 1379 while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) 1380 oldrule = TAILQ_NEXT(oldrule, entries); 1381 if (oldrule == NULL) { 1382 if (newrule != NULL) 1383 pf_rm_rule(NULL, newrule); 1384 error = EINVAL; 1385 break; 1386 } 1387 } 1388 1389 if (pcr->action == PF_CHANGE_REMOVE) { 1390 pf_rm_rule(ruleset->rules.active.ptr, oldrule); 1391 ruleset->rules.active.rcount--; 1392 } else { 1393 if (oldrule == NULL) 1394 TAILQ_INSERT_TAIL( 1395 ruleset->rules.active.ptr, 1396 newrule, entries); 1397 else if (pcr->action == PF_CHANGE_ADD_HEAD || 1398 pcr->action == PF_CHANGE_ADD_BEFORE) 1399 TAILQ_INSERT_BEFORE(oldrule, newrule, entries); 1400 else 1401 TAILQ_INSERT_AFTER( 1402 ruleset->rules.active.ptr, 1403 oldrule, newrule, entries); 1404 ruleset->rules.active.rcount++; 1405 } 1406 1407 nr = 0; 1408 TAILQ_FOREACH(oldrule, ruleset->rules.active.ptr, entries) 1409 oldrule->nr = nr++; 1410 1411 ruleset->rules.active.ticket++; 1412 1413 pf_calc_skip_steps(ruleset->rules.active.ptr); 1414 pf_remove_if_empty_ruleset(ruleset); 1415 1416 break; 1417 } 1418 1419 case DIOCCLRSTATES: { 1420 struct pf_state *s, *nexts; 1421 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1422 u_int killed = 0; 1423 1424 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) { 1425 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1426 1427 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1428 s->kif->pfik_name)) { 1429 #if NPFSYNC > 0 1430 /* don't send out individual delete messages */ 1431 SET(s->state_flags, PFSTATE_NOSYNC); 1432 #endif /* NPFSYNC > 0 */ 1433 pf_remove_state(s); 1434 killed++; 1435 } 1436 } 1437 psk->psk_killed = killed; 1438 #if NPFSYNC > 0 1439 pfsync_clear_states(pf_status.hostid, psk->psk_ifname); 1440 #endif /* NPFSYNC > 0 */ 1441 break; 1442 } 1443 1444 case DIOCKILLSTATES: { 1445 struct pf_state *s, *nexts; 1446 struct pf_state_key *sk; 1447 struct pf_addr *srcaddr, *dstaddr; 1448 u_int16_t srcport, dstport; 1449 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr; 1450 u_int killed = 0; 1451 1452 if (psk->psk_pfcmp.id) { 1453 if (psk->psk_pfcmp.creatorid == 0) 1454 psk->psk_pfcmp.creatorid = pf_status.hostid; 1455 if ((s = pf_find_state_byid(&psk->psk_pfcmp))) { 1456 pf_remove_state(s); 1457 psk->psk_killed = 1; 1458 } 1459 break; 1460 } 1461 1462 for (s = RB_MIN(pf_state_tree_id, &tree_id); s; 1463 s = nexts) { 1464 nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); 1465 1466 if (s->direction == PF_OUT) { 1467 sk = s->key[PF_SK_STACK]; 1468 srcaddr = &sk->addr[1]; 1469 dstaddr = &sk->addr[0]; 1470 srcport = sk->port[1]; 1471 dstport = sk->port[0]; 1472 } else { 1473 sk = s->key[PF_SK_WIRE]; 1474 srcaddr = &sk->addr[0]; 1475 dstaddr = &sk->addr[1]; 1476 srcport = sk->port[0]; 1477 dstport = sk->port[1]; 1478 } 1479 if ((!psk->psk_af || sk->af == psk->psk_af) 1480 && (!psk->psk_proto || psk->psk_proto == 1481 sk->proto) && psk->psk_rdomain == sk->rdomain && 1482 PF_MATCHA(psk->psk_src.neg, 1483 &psk->psk_src.addr.v.a.addr, 1484 &psk->psk_src.addr.v.a.mask, 1485 srcaddr, sk->af) && 1486 PF_MATCHA(psk->psk_dst.neg, 1487 &psk->psk_dst.addr.v.a.addr, 1488 &psk->psk_dst.addr.v.a.mask, 1489 dstaddr, sk->af) && 1490 (psk->psk_src.port_op == 0 || 1491 pf_match_port(psk->psk_src.port_op, 1492 psk->psk_src.port[0], psk->psk_src.port[1], 1493 srcport)) && 1494 (psk->psk_dst.port_op == 0 || 1495 pf_match_port(psk->psk_dst.port_op, 1496 psk->psk_dst.port[0], psk->psk_dst.port[1], 1497 dstport)) && 1498 (!psk->psk_label[0] || (s->rule.ptr->label[0] && 1499 !strcmp(psk->psk_label, s->rule.ptr->label))) && 1500 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname, 1501 s->kif->pfik_name))) { 1502 pf_remove_state(s); 1503 killed++; 1504 } 1505 } 1506 psk->psk_killed = killed; 1507 break; 1508 } 1509 1510 #if NPFSYNC > 0 1511 case DIOCADDSTATE: { 1512 struct pfioc_state *ps = (struct pfioc_state *)addr; 1513 struct pfsync_state *sp = &ps->state; 1514 1515 if (sp->timeout >= PFTM_MAX) { 1516 error = EINVAL; 1517 break; 1518 } 1519 error = pfsync_state_import(sp, PFSYNC_SI_IOCTL); 1520 break; 1521 } 1522 #endif /* NPFSYNC > 0 */ 1523 1524 case DIOCGETSTATE: { 1525 struct pfioc_state *ps = (struct pfioc_state *)addr; 1526 struct pf_state *s; 1527 struct pf_state_cmp id_key; 1528 1529 bzero(&id_key, sizeof(id_key)); 1530 id_key.id = ps->state.id; 1531 id_key.creatorid = ps->state.creatorid; 1532 1533 s = pf_find_state_byid(&id_key); 1534 if (s == NULL) { 1535 error = ENOENT; 1536 break; 1537 } 1538 1539 pf_state_export(&ps->state, s); 1540 break; 1541 } 1542 1543 case DIOCGETSTATES: { 1544 struct pfioc_states *ps = (struct pfioc_states *)addr; 1545 struct pf_state *state; 1546 struct pfsync_state *p, *pstore; 1547 u_int32_t nr = 0; 1548 1549 if (ps->ps_len == 0) { 1550 nr = pf_status.states; 1551 ps->ps_len = sizeof(struct pfsync_state) * nr; 1552 break; 1553 } 1554 1555 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 1556 1557 p = ps->ps_states; 1558 1559 state = TAILQ_FIRST(&state_list); 1560 while (state) { 1561 if (state->timeout != PFTM_UNLINKED) { 1562 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len) 1563 break; 1564 pf_state_export(pstore, state); 1565 error = copyout(pstore, p, sizeof(*p)); 1566 if (error) { 1567 free(pstore, M_TEMP, sizeof(*pstore)); 1568 goto fail; 1569 } 1570 p++; 1571 nr++; 1572 } 1573 state = TAILQ_NEXT(state, entry_list); 1574 } 1575 1576 ps->ps_len = sizeof(struct pfsync_state) * nr; 1577 1578 free(pstore, M_TEMP, sizeof(*pstore)); 1579 break; 1580 } 1581 1582 case DIOCGETSTATUS: { 1583 struct pf_status *s = (struct pf_status *)addr; 1584 bcopy(&pf_status, s, sizeof(struct pf_status)); 1585 pfi_update_status(s->ifname, s); 1586 break; 1587 } 1588 1589 case DIOCSETSTATUSIF: { 1590 struct pfioc_iface *pi = (struct pfioc_iface *)addr; 1591 1592 if (pi->pfiio_name[0] == 0) { 1593 bzero(pf_status.ifname, IFNAMSIZ); 1594 break; 1595 } 1596 strlcpy(pf_trans_set.statusif, pi->pfiio_name, IFNAMSIZ); 1597 pf_trans_set.mask |= PF_TSET_STATUSIF; 1598 break; 1599 } 1600 1601 case DIOCCLRSTATUS: { 1602 struct pfioc_iface *pi = (struct pfioc_iface *)addr; 1603 1604 /* if ifname is specified, clear counters there only */ 1605 if (pi->pfiio_name[0]) { 1606 pfi_update_status(pi->pfiio_name, NULL); 1607 break; 1608 } 1609 1610 bzero(pf_status.counters, sizeof(pf_status.counters)); 1611 bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); 1612 bzero(pf_status.scounters, sizeof(pf_status.scounters)); 1613 pf_status.since = time_second; 1614 1615 break; 1616 } 1617 1618 case DIOCNATLOOK: { 1619 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr; 1620 struct pf_state_key *sk; 1621 struct pf_state *state; 1622 struct pf_state_key_cmp key; 1623 int m = 0, direction = pnl->direction; 1624 int sidx, didx; 1625 1626 /* NATLOOK src and dst are reversed, so reverse sidx/didx */ 1627 sidx = (direction == PF_IN) ? 1 : 0; 1628 didx = (direction == PF_IN) ? 0 : 1; 1629 1630 if (!pnl->proto || 1631 PF_AZERO(&pnl->saddr, pnl->af) || 1632 PF_AZERO(&pnl->daddr, pnl->af) || 1633 ((pnl->proto == IPPROTO_TCP || 1634 pnl->proto == IPPROTO_UDP) && 1635 (!pnl->dport || !pnl->sport)) || 1636 pnl->rdomain > RT_TABLEID_MAX) 1637 error = EINVAL; 1638 else { 1639 key.af = pnl->af; 1640 key.proto = pnl->proto; 1641 key.rdomain = pnl->rdomain; 1642 PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af); 1643 key.port[sidx] = pnl->sport; 1644 PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af); 1645 key.port[didx] = pnl->dport; 1646 1647 state = pf_find_state_all(&key, direction, &m); 1648 1649 if (m > 1) 1650 error = E2BIG; /* more than one state */ 1651 else if (state != NULL) { 1652 sk = state->key[sidx]; 1653 PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af); 1654 pnl->rsport = sk->port[sidx]; 1655 PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af); 1656 pnl->rdport = sk->port[didx]; 1657 pnl->rrdomain = sk->rdomain; 1658 } else 1659 error = ENOENT; 1660 } 1661 break; 1662 } 1663 1664 case DIOCSETTIMEOUT: { 1665 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1666 1667 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX || 1668 pt->seconds < 0) { 1669 error = EINVAL; 1670 goto fail; 1671 } 1672 if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) 1673 pt->seconds = 1; 1674 pf_default_rule_new.timeout[pt->timeout] = pt->seconds; 1675 pt->seconds = pf_default_rule.timeout[pt->timeout]; 1676 break; 1677 } 1678 1679 case DIOCGETTIMEOUT: { 1680 struct pfioc_tm *pt = (struct pfioc_tm *)addr; 1681 1682 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) { 1683 error = EINVAL; 1684 goto fail; 1685 } 1686 pt->seconds = pf_default_rule.timeout[pt->timeout]; 1687 break; 1688 } 1689 1690 case DIOCGETLIMIT: { 1691 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1692 1693 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { 1694 error = EINVAL; 1695 goto fail; 1696 } 1697 pl->limit = pf_pool_limits[pl->index].limit; 1698 break; 1699 } 1700 1701 case DIOCSETLIMIT: { 1702 struct pfioc_limit *pl = (struct pfioc_limit *)addr; 1703 1704 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX || 1705 pf_pool_limits[pl->index].pp == NULL) { 1706 error = EINVAL; 1707 goto fail; 1708 } 1709 if (((struct pool *)pf_pool_limits[pl->index].pp)->pr_nout > 1710 pl->limit) { 1711 error = EBUSY; 1712 goto fail; 1713 } 1714 /* Fragments reference mbuf clusters. */ 1715 if (pl->index == PF_LIMIT_FRAGS && pl->limit > nmbclust) { 1716 error = EINVAL; 1717 goto fail; 1718 } 1719 1720 pf_pool_limits[pl->index].limit_new = pl->limit; 1721 pl->limit = pf_pool_limits[pl->index].limit; 1722 break; 1723 } 1724 1725 case DIOCSETDEBUG: { 1726 u_int32_t *level = (u_int32_t *)addr; 1727 1728 pf_trans_set.debug = *level; 1729 pf_trans_set.mask |= PF_TSET_DEBUG; 1730 break; 1731 } 1732 1733 case DIOCCLRRULECTRS: { 1734 /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ 1735 struct pf_ruleset *ruleset = &pf_main_ruleset; 1736 struct pf_rule *rule; 1737 1738 TAILQ_FOREACH(rule, 1739 ruleset->rules.active.ptr, entries) { 1740 rule->evaluations = 0; 1741 rule->packets[0] = rule->packets[1] = 0; 1742 rule->bytes[0] = rule->bytes[1] = 0; 1743 } 1744 break; 1745 } 1746 1747 case DIOCGETRULESETS: { 1748 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 1749 struct pf_ruleset *ruleset; 1750 struct pf_anchor *anchor; 1751 1752 pr->path[sizeof(pr->path) - 1] = 0; 1753 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 1754 error = EINVAL; 1755 break; 1756 } 1757 pr->nr = 0; 1758 if (ruleset->anchor == NULL) { 1759 /* XXX kludge for pf_main_ruleset */ 1760 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 1761 if (anchor->parent == NULL) 1762 pr->nr++; 1763 } else { 1764 RB_FOREACH(anchor, pf_anchor_node, 1765 &ruleset->anchor->children) 1766 pr->nr++; 1767 } 1768 break; 1769 } 1770 1771 case DIOCGETRULESET: { 1772 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr; 1773 struct pf_ruleset *ruleset; 1774 struct pf_anchor *anchor; 1775 u_int32_t nr = 0; 1776 1777 pr->path[sizeof(pr->path) - 1] = 0; 1778 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { 1779 error = EINVAL; 1780 break; 1781 } 1782 pr->name[0] = 0; 1783 if (ruleset->anchor == NULL) { 1784 /* XXX kludge for pf_main_ruleset */ 1785 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) 1786 if (anchor->parent == NULL && nr++ == pr->nr) { 1787 strlcpy(pr->name, anchor->name, 1788 sizeof(pr->name)); 1789 break; 1790 } 1791 } else { 1792 RB_FOREACH(anchor, pf_anchor_node, 1793 &ruleset->anchor->children) 1794 if (nr++ == pr->nr) { 1795 strlcpy(pr->name, anchor->name, 1796 sizeof(pr->name)); 1797 break; 1798 } 1799 } 1800 if (!pr->name[0]) 1801 error = EBUSY; 1802 break; 1803 } 1804 1805 case DIOCRCLRTABLES: { 1806 struct pfioc_table *io = (struct pfioc_table *)addr; 1807 1808 if (io->pfrio_esize != 0) { 1809 error = ENODEV; 1810 break; 1811 } 1812 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel, 1813 io->pfrio_flags | PFR_FLAG_USERIOCTL); 1814 break; 1815 } 1816 1817 case DIOCRADDTABLES: { 1818 struct pfioc_table *io = (struct pfioc_table *)addr; 1819 1820 if (io->pfrio_esize != sizeof(struct pfr_table)) { 1821 error = ENODEV; 1822 break; 1823 } 1824 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size, 1825 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL); 1826 break; 1827 } 1828 1829 case DIOCRDELTABLES: { 1830 struct pfioc_table *io = (struct pfioc_table *)addr; 1831 1832 if (io->pfrio_esize != sizeof(struct pfr_table)) { 1833 error = ENODEV; 1834 break; 1835 } 1836 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size, 1837 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 1838 break; 1839 } 1840 1841 case DIOCRGETTABLES: { 1842 struct pfioc_table *io = (struct pfioc_table *)addr; 1843 1844 if (io->pfrio_esize != sizeof(struct pfr_table)) { 1845 error = ENODEV; 1846 break; 1847 } 1848 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer, 1849 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 1850 break; 1851 } 1852 1853 case DIOCRGETTSTATS: { 1854 struct pfioc_table *io = (struct pfioc_table *)addr; 1855 1856 if (io->pfrio_esize != sizeof(struct pfr_tstats)) { 1857 error = ENODEV; 1858 break; 1859 } 1860 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer, 1861 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 1862 break; 1863 } 1864 1865 case DIOCRCLRTSTATS: { 1866 struct pfioc_table *io = (struct pfioc_table *)addr; 1867 1868 if (io->pfrio_esize != sizeof(struct pfr_table)) { 1869 error = ENODEV; 1870 break; 1871 } 1872 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size, 1873 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL); 1874 break; 1875 } 1876 1877 case DIOCRSETTFLAGS: { 1878 struct pfioc_table *io = (struct pfioc_table *)addr; 1879 1880 if (io->pfrio_esize != sizeof(struct pfr_table)) { 1881 error = ENODEV; 1882 break; 1883 } 1884 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size, 1885 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange, 1886 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL); 1887 break; 1888 } 1889 1890 case DIOCRCLRADDRS: { 1891 struct pfioc_table *io = (struct pfioc_table *)addr; 1892 1893 if (io->pfrio_esize != 0) { 1894 error = ENODEV; 1895 break; 1896 } 1897 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel, 1898 io->pfrio_flags | PFR_FLAG_USERIOCTL); 1899 break; 1900 } 1901 1902 case DIOCRADDADDRS: { 1903 struct pfioc_table *io = (struct pfioc_table *)addr; 1904 1905 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 1906 error = ENODEV; 1907 break; 1908 } 1909 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer, 1910 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags | 1911 PFR_FLAG_USERIOCTL); 1912 break; 1913 } 1914 1915 case DIOCRDELADDRS: { 1916 struct pfioc_table *io = (struct pfioc_table *)addr; 1917 1918 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 1919 error = ENODEV; 1920 break; 1921 } 1922 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer, 1923 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags | 1924 PFR_FLAG_USERIOCTL); 1925 break; 1926 } 1927 1928 case DIOCRSETADDRS: { 1929 struct pfioc_table *io = (struct pfioc_table *)addr; 1930 1931 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 1932 error = ENODEV; 1933 break; 1934 } 1935 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer, 1936 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd, 1937 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags | 1938 PFR_FLAG_USERIOCTL, 0); 1939 break; 1940 } 1941 1942 case DIOCRGETADDRS: { 1943 struct pfioc_table *io = (struct pfioc_table *)addr; 1944 1945 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 1946 error = ENODEV; 1947 break; 1948 } 1949 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer, 1950 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 1951 break; 1952 } 1953 1954 case DIOCRGETASTATS: { 1955 struct pfioc_table *io = (struct pfioc_table *)addr; 1956 1957 if (io->pfrio_esize != sizeof(struct pfr_astats)) { 1958 error = ENODEV; 1959 break; 1960 } 1961 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer, 1962 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL); 1963 break; 1964 } 1965 1966 case DIOCRCLRASTATS: { 1967 struct pfioc_table *io = (struct pfioc_table *)addr; 1968 1969 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 1970 error = ENODEV; 1971 break; 1972 } 1973 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer, 1974 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags | 1975 PFR_FLAG_USERIOCTL); 1976 break; 1977 } 1978 1979 case DIOCRTSTADDRS: { 1980 struct pfioc_table *io = (struct pfioc_table *)addr; 1981 1982 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 1983 error = ENODEV; 1984 break; 1985 } 1986 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer, 1987 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags | 1988 PFR_FLAG_USERIOCTL); 1989 break; 1990 } 1991 1992 case DIOCRINADEFINE: { 1993 struct pfioc_table *io = (struct pfioc_table *)addr; 1994 1995 if (io->pfrio_esize != sizeof(struct pfr_addr)) { 1996 error = ENODEV; 1997 break; 1998 } 1999 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer, 2000 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr, 2001 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL); 2002 break; 2003 } 2004 2005 case DIOCOSFPADD: { 2006 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2007 error = pf_osfp_add(io); 2008 break; 2009 } 2010 2011 case DIOCOSFPGET: { 2012 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr; 2013 error = pf_osfp_get(io); 2014 break; 2015 } 2016 2017 case DIOCXBEGIN: { 2018 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2019 struct pfioc_trans_e *ioe; 2020 struct pfr_table *table; 2021 int i; 2022 2023 if (io->esize != sizeof(*ioe)) { 2024 error = ENODEV; 2025 goto fail; 2026 } 2027 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 2028 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 2029 pf_default_rule_new = pf_default_rule; 2030 bzero(&pf_trans_set, sizeof(pf_trans_set)); 2031 for (i = 0; i < io->size; i++) { 2032 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2033 free(table, M_TEMP, sizeof(*table)); 2034 free(ioe, M_TEMP, sizeof(*ioe)); 2035 error = EFAULT; 2036 goto fail; 2037 } 2038 switch (ioe->type) { 2039 case PF_TRANS_TABLE: 2040 bzero(table, sizeof(*table)); 2041 strlcpy(table->pfrt_anchor, ioe->anchor, 2042 sizeof(table->pfrt_anchor)); 2043 if ((error = pfr_ina_begin(table, 2044 &ioe->ticket, NULL, 0))) { 2045 free(table, M_TEMP, sizeof(*table)); 2046 free(ioe, M_TEMP, sizeof(*ioe)); 2047 goto fail; 2048 } 2049 break; 2050 default: 2051 if ((error = pf_begin_rules(&ioe->ticket, 2052 ioe->anchor))) { 2053 free(table, M_TEMP, sizeof(*table)); 2054 free(ioe, M_TEMP, sizeof(*ioe)); 2055 goto fail; 2056 } 2057 break; 2058 } 2059 if (copyout(ioe, io->array+i, sizeof(io->array[i]))) { 2060 free(table, M_TEMP, sizeof(*table)); 2061 free(ioe, M_TEMP, sizeof(*ioe)); 2062 error = EFAULT; 2063 goto fail; 2064 } 2065 } 2066 free(table, M_TEMP, sizeof(*table)); 2067 free(ioe, M_TEMP, sizeof(*ioe)); 2068 break; 2069 } 2070 2071 case DIOCXROLLBACK: { 2072 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2073 struct pfioc_trans_e *ioe; 2074 struct pfr_table *table; 2075 int i; 2076 2077 if (io->esize != sizeof(*ioe)) { 2078 error = ENODEV; 2079 goto fail; 2080 } 2081 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 2082 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 2083 for (i = 0; i < io->size; i++) { 2084 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2085 free(table, M_TEMP, sizeof(*table)); 2086 free(ioe, M_TEMP, sizeof(*ioe)); 2087 error = EFAULT; 2088 goto fail; 2089 } 2090 switch (ioe->type) { 2091 case PF_TRANS_TABLE: 2092 bzero(table, sizeof(*table)); 2093 strlcpy(table->pfrt_anchor, ioe->anchor, 2094 sizeof(table->pfrt_anchor)); 2095 if ((error = pfr_ina_rollback(table, 2096 ioe->ticket, NULL, 0))) { 2097 free(table, M_TEMP, sizeof(*table)); 2098 free(ioe, M_TEMP, sizeof(*ioe)); 2099 goto fail; /* really bad */ 2100 } 2101 break; 2102 default: 2103 if ((error = pf_rollback_rules(ioe->ticket, 2104 ioe->anchor))) { 2105 free(table, M_TEMP, sizeof(*table)); 2106 free(ioe, M_TEMP, sizeof(*ioe)); 2107 goto fail; /* really bad */ 2108 } 2109 break; 2110 } 2111 } 2112 free(table, M_TEMP, sizeof(*table)); 2113 free(ioe, M_TEMP, sizeof(*ioe)); 2114 break; 2115 } 2116 2117 case DIOCXCOMMIT: { 2118 struct pfioc_trans *io = (struct pfioc_trans *)addr; 2119 struct pfioc_trans_e *ioe; 2120 struct pfr_table *table; 2121 struct pf_ruleset *rs; 2122 int i; 2123 2124 if (io->esize != sizeof(*ioe)) { 2125 error = ENODEV; 2126 goto fail; 2127 } 2128 ioe = malloc(sizeof(*ioe), M_TEMP, M_WAITOK); 2129 table = malloc(sizeof(*table), M_TEMP, M_WAITOK); 2130 /* first makes sure everything will succeed */ 2131 for (i = 0; i < io->size; i++) { 2132 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2133 free(table, M_TEMP, sizeof(*table)); 2134 free(ioe, M_TEMP, sizeof(*ioe)); 2135 error = EFAULT; 2136 goto fail; 2137 } 2138 switch (ioe->type) { 2139 case PF_TRANS_TABLE: 2140 rs = pf_find_ruleset(ioe->anchor); 2141 if (rs == NULL || !rs->topen || ioe->ticket != 2142 rs->tticket) { 2143 free(table, M_TEMP, sizeof(*table)); 2144 free(ioe, M_TEMP, sizeof(*ioe)); 2145 error = EBUSY; 2146 goto fail; 2147 } 2148 break; 2149 default: 2150 rs = pf_find_ruleset(ioe->anchor); 2151 if (rs == NULL || 2152 !rs->rules.inactive.open || 2153 rs->rules.inactive.ticket != 2154 ioe->ticket) { 2155 free(table, M_TEMP, sizeof(*table)); 2156 free(ioe, M_TEMP, sizeof(*ioe)); 2157 error = EBUSY; 2158 goto fail; 2159 } 2160 break; 2161 } 2162 } 2163 2164 /* 2165 * Checked already in DIOCSETLIMIT, but check again as the 2166 * situation might have changed. 2167 */ 2168 for (i = 0; i < PF_LIMIT_MAX; i++) { 2169 if (((struct pool *)pf_pool_limits[i].pp)->pr_nout > 2170 pf_pool_limits[i].limit_new) { 2171 free(table, M_TEMP, sizeof(*table)); 2172 free(ioe, M_TEMP, sizeof(*ioe)); 2173 error = EBUSY; 2174 goto fail; 2175 } 2176 } 2177 /* now do the commit - no errors should happen here */ 2178 for (i = 0; i < io->size; i++) { 2179 if (copyin(io->array+i, ioe, sizeof(*ioe))) { 2180 free(table, M_TEMP, sizeof(*table)); 2181 free(ioe, M_TEMP, sizeof(*ioe)); 2182 error = EFAULT; 2183 goto fail; 2184 } 2185 switch (ioe->type) { 2186 case PF_TRANS_TABLE: 2187 bzero(table, sizeof(*table)); 2188 strlcpy(table->pfrt_anchor, ioe->anchor, 2189 sizeof(table->pfrt_anchor)); 2190 if ((error = pfr_ina_commit(table, ioe->ticket, 2191 NULL, NULL, 0))) { 2192 free(table, M_TEMP, sizeof(*table)); 2193 free(ioe, M_TEMP, sizeof(*ioe)); 2194 goto fail; /* really bad */ 2195 } 2196 break; 2197 default: 2198 if ((error = pf_commit_rules(ioe->ticket, 2199 ioe->anchor))) { 2200 free(table, M_TEMP, sizeof(*table)); 2201 free(ioe, M_TEMP, sizeof(*ioe)); 2202 goto fail; /* really bad */ 2203 } 2204 break; 2205 } 2206 } 2207 for (i = 0; i < PF_LIMIT_MAX; i++) { 2208 if (pf_pool_limits[i].limit_new != 2209 pf_pool_limits[i].limit && 2210 pool_sethardlimit(pf_pool_limits[i].pp, 2211 pf_pool_limits[i].limit_new, NULL, 0) != 0) { 2212 free(table, M_TEMP, sizeof(*table)); 2213 free(ioe, M_TEMP, sizeof(*ioe)); 2214 error = EBUSY; 2215 goto fail; /* really bad */ 2216 } 2217 pf_pool_limits[i].limit = pf_pool_limits[i].limit_new; 2218 } 2219 for (i = 0; i < PFTM_MAX; i++) { 2220 int old = pf_default_rule.timeout[i]; 2221 2222 pf_default_rule.timeout[i] = 2223 pf_default_rule_new.timeout[i]; 2224 if (pf_default_rule.timeout[i] == PFTM_INTERVAL && 2225 pf_default_rule.timeout[i] < old) 2226 wakeup(pf_purge_thread); 2227 } 2228 pfi_xcommit(); 2229 pf_trans_set_commit(); 2230 free(table, M_TEMP, sizeof(*table)); 2231 free(ioe, M_TEMP, sizeof(*ioe)); 2232 break; 2233 } 2234 2235 case DIOCGETSRCNODES: { 2236 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr; 2237 struct pf_src_node *n, *p, *pstore; 2238 u_int32_t nr = 0; 2239 int space = psn->psn_len; 2240 2241 if (space == 0) { 2242 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 2243 nr++; 2244 psn->psn_len = sizeof(struct pf_src_node) * nr; 2245 break; 2246 } 2247 2248 pstore = malloc(sizeof(*pstore), M_TEMP, M_WAITOK); 2249 2250 p = psn->psn_src_nodes; 2251 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { 2252 int secs = time_uptime, diff; 2253 2254 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len) 2255 break; 2256 2257 bcopy(n, pstore, sizeof(*pstore)); 2258 bzero(&pstore->entry, sizeof(pstore->entry)); 2259 pstore->rule.ptr = NULL; 2260 pstore->kif = NULL; 2261 pstore->rule.nr = n->rule.ptr->nr; 2262 pstore->creation = secs - pstore->creation; 2263 if (pstore->expire > secs) 2264 pstore->expire -= secs; 2265 else 2266 pstore->expire = 0; 2267 2268 /* adjust the connection rate estimate */ 2269 diff = secs - n->conn_rate.last; 2270 if (diff >= n->conn_rate.seconds) 2271 pstore->conn_rate.count = 0; 2272 else 2273 pstore->conn_rate.count -= 2274 n->conn_rate.count * diff / 2275 n->conn_rate.seconds; 2276 2277 error = copyout(pstore, p, sizeof(*p)); 2278 if (error) { 2279 free(pstore, M_TEMP, sizeof(*pstore)); 2280 goto fail; 2281 } 2282 p++; 2283 nr++; 2284 } 2285 psn->psn_len = sizeof(struct pf_src_node) * nr; 2286 2287 free(pstore, M_TEMP, sizeof(*pstore)); 2288 break; 2289 } 2290 2291 case DIOCCLRSRCNODES: { 2292 struct pf_src_node *n; 2293 struct pf_state *state; 2294 2295 RB_FOREACH(state, pf_state_tree_id, &tree_id) 2296 pf_src_tree_remove_state(state); 2297 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) 2298 n->expire = 1; 2299 pf_purge_expired_src_nodes(1); 2300 break; 2301 } 2302 2303 case DIOCKILLSRCNODES: { 2304 struct pf_src_node *sn; 2305 struct pf_state *s; 2306 struct pfioc_src_node_kill *psnk = 2307 (struct pfioc_src_node_kill *)addr; 2308 u_int killed = 0; 2309 2310 RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { 2311 if (PF_MATCHA(psnk->psnk_src.neg, 2312 &psnk->psnk_src.addr.v.a.addr, 2313 &psnk->psnk_src.addr.v.a.mask, 2314 &sn->addr, sn->af) && 2315 PF_MATCHA(psnk->psnk_dst.neg, 2316 &psnk->psnk_dst.addr.v.a.addr, 2317 &psnk->psnk_dst.addr.v.a.mask, 2318 &sn->raddr, sn->af)) { 2319 /* Handle state to src_node linkage */ 2320 if (sn->states != 0) 2321 RB_FOREACH(s, pf_state_tree_id, 2322 &tree_id) 2323 pf_state_rm_src_node(s, sn); 2324 sn->expire = 1; 2325 killed++; 2326 } 2327 } 2328 2329 if (killed > 0) 2330 pf_purge_expired_src_nodes(1); 2331 2332 psnk->psnk_killed = killed; 2333 break; 2334 } 2335 2336 case DIOCSETHOSTID: { 2337 u_int32_t *hostid = (u_int32_t *)addr; 2338 2339 if (*hostid == 0) 2340 pf_trans_set.hostid = arc4random(); 2341 else 2342 pf_trans_set.hostid = *hostid; 2343 pf_trans_set.mask |= PF_TSET_HOSTID; 2344 break; 2345 } 2346 2347 case DIOCOSFPFLUSH: 2348 pf_osfp_flush(); 2349 break; 2350 2351 case DIOCIGETIFACES: { 2352 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2353 2354 if (io->pfiio_esize != sizeof(struct pfi_kif)) { 2355 error = ENODEV; 2356 break; 2357 } 2358 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer, 2359 &io->pfiio_size); 2360 break; 2361 } 2362 2363 case DIOCSETIFFLAG: { 2364 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2365 2366 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags); 2367 break; 2368 } 2369 2370 case DIOCCLRIFFLAG: { 2371 struct pfioc_iface *io = (struct pfioc_iface *)addr; 2372 2373 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags); 2374 break; 2375 } 2376 2377 case DIOCSETREASS: { 2378 u_int32_t *reass = (u_int32_t *)addr; 2379 2380 pf_trans_set.reass = *reass; 2381 pf_trans_set.mask |= PF_TSET_REASS; 2382 break; 2383 } 2384 2385 default: 2386 error = ENODEV; 2387 break; 2388 } 2389 fail: 2390 splx(s); 2391 if (flags & FWRITE) 2392 rw_exit_write(&pf_consistency_lock); 2393 else 2394 rw_exit_read(&pf_consistency_lock); 2395 return (error); 2396 } 2397 2398 void 2399 pf_trans_set_commit(void) 2400 { 2401 if (pf_trans_set.mask & PF_TSET_STATUSIF) 2402 strlcpy(pf_status.ifname, pf_trans_set.statusif, IFNAMSIZ); 2403 if (pf_trans_set.mask & PF_TSET_DEBUG) 2404 pf_status.debug = pf_trans_set.debug; 2405 if (pf_trans_set.mask & PF_TSET_HOSTID) 2406 pf_status.hostid = pf_trans_set.hostid; 2407 if (pf_trans_set.mask & PF_TSET_REASS) 2408 pf_status.reass = pf_trans_set.reass; 2409 } 2410 2411 void 2412 pf_pool_copyin(struct pf_pool *from, struct pf_pool *to) 2413 { 2414 bcopy(from, to, sizeof(*to)); 2415 to->kif = NULL; 2416 } 2417 2418 int 2419 pf_rule_copyin(struct pf_rule *from, struct pf_rule *to, 2420 struct pf_ruleset *ruleset) 2421 { 2422 int i; 2423 2424 to->src = from->src; 2425 to->dst = from->dst; 2426 2427 /* XXX union skip[] */ 2428 2429 strlcpy(to->label, from->label, sizeof(to->label)); 2430 strlcpy(to->ifname, from->ifname, sizeof(to->ifname)); 2431 strlcpy(to->rcv_ifname, from->rcv_ifname, sizeof(to->rcv_ifname)); 2432 strlcpy(to->qname, from->qname, sizeof(to->qname)); 2433 strlcpy(to->pqname, from->pqname, sizeof(to->pqname)); 2434 strlcpy(to->tagname, from->tagname, sizeof(to->tagname)); 2435 strlcpy(to->match_tagname, from->match_tagname, 2436 sizeof(to->match_tagname)); 2437 strlcpy(to->overload_tblname, from->overload_tblname, 2438 sizeof(to->overload_tblname)); 2439 2440 pf_pool_copyin(&from->nat, &to->nat); 2441 pf_pool_copyin(&from->rdr, &to->rdr); 2442 pf_pool_copyin(&from->route, &to->route); 2443 2444 if (pf_kif_setup(to->ifname, &to->kif)) 2445 return (EINVAL); 2446 if (pf_kif_setup(to->rcv_ifname, &to->rcv_kif)) 2447 return (EINVAL); 2448 if (to->overload_tblname[0]) { 2449 if ((to->overload_tbl = pfr_attach_table(ruleset, 2450 to->overload_tblname, 0)) == NULL) 2451 return (EINVAL); 2452 else 2453 to->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE; 2454 } 2455 2456 if (pf_kif_setup(to->rdr.ifname, &to->rdr.kif)) 2457 return (EINVAL); 2458 if (pf_kif_setup(to->nat.ifname, &to->nat.kif)) 2459 return (EINVAL); 2460 if (pf_kif_setup(to->route.ifname, &to->route.kif)) 2461 return (EINVAL); 2462 2463 to->os_fingerprint = from->os_fingerprint; 2464 2465 to->rtableid = from->rtableid; 2466 if (to->rtableid >= 0 && !rtable_exists(to->rtableid)) 2467 return (EBUSY); 2468 to->onrdomain = from->onrdomain; 2469 if (to->onrdomain >= 0 && !rtable_exists(to->onrdomain)) 2470 return (EBUSY); 2471 if (to->onrdomain >= 0) /* make sure it is a real rdomain */ 2472 to->onrdomain = rtable_l2(to->onrdomain); 2473 2474 for (i = 0; i < PFTM_MAX; i++) 2475 to->timeout[i] = from->timeout[i]; 2476 to->states_tot = from->states_tot; 2477 to->max_states = from->max_states; 2478 to->max_src_nodes = from->max_src_nodes; 2479 to->max_src_states = from->max_src_states; 2480 to->max_src_conn = from->max_src_conn; 2481 to->max_src_conn_rate.limit = from->max_src_conn_rate.limit; 2482 to->max_src_conn_rate.seconds = from->max_src_conn_rate.seconds; 2483 2484 if (to->qname[0] != 0) { 2485 if ((to->qid = pf_qname2qid(to->qname, 0)) == 0) 2486 return (EBUSY); 2487 if (to->pqname[0] != 0) { 2488 if ((to->pqid = pf_qname2qid(to->pqname, 0)) == 0) 2489 return (EBUSY); 2490 } else 2491 to->pqid = to->qid; 2492 } 2493 to->rt_listid = from->rt_listid; 2494 to->prob = from->prob; 2495 to->return_icmp = from->return_icmp; 2496 to->return_icmp6 = from->return_icmp6; 2497 to->max_mss = from->max_mss; 2498 if (to->tagname[0]) 2499 if ((to->tag = pf_tagname2tag(to->tagname, 1)) == 0) 2500 return (EBUSY); 2501 if (to->match_tagname[0]) 2502 if ((to->match_tag = pf_tagname2tag(to->match_tagname, 1)) == 0) 2503 return (EBUSY); 2504 to->scrub_flags = from->scrub_flags; 2505 to->uid = from->uid; 2506 to->gid = from->gid; 2507 to->rule_flag = from->rule_flag; 2508 to->action = from->action; 2509 to->direction = from->direction; 2510 to->log = from->log; 2511 to->logif = from->logif; 2512 #if NPFLOG > 0 2513 if (!to->log) 2514 to->logif = 0; 2515 #endif /* NPFLOG > 0 */ 2516 to->quick = from->quick; 2517 to->ifnot = from->ifnot; 2518 to->rcvifnot = from->rcvifnot; 2519 to->match_tag_not = from->match_tag_not; 2520 to->keep_state = from->keep_state; 2521 to->af = from->af; 2522 to->naf = from->naf; 2523 to->proto = from->proto; 2524 to->type = from->type; 2525 to->code = from->code; 2526 to->flags = from->flags; 2527 to->flagset = from->flagset; 2528 to->min_ttl = from->min_ttl; 2529 to->allow_opts = from->allow_opts; 2530 to->rt = from->rt; 2531 to->return_ttl = from->return_ttl; 2532 to->tos = from->tos; 2533 to->set_tos = from->set_tos; 2534 to->anchor_relative = from->anchor_relative; /* XXX */ 2535 to->anchor_wildcard = from->anchor_wildcard; /* XXX */ 2536 to->flush = from->flush; 2537 to->divert.addr = from->divert.addr; 2538 to->divert.port = from->divert.port; 2539 to->divert_packet.addr = from->divert_packet.addr; 2540 to->divert_packet.port = from->divert_packet.port; 2541 to->prio = from->prio; 2542 to->set_prio[0] = from->set_prio[0]; 2543 to->set_prio[1] = from->set_prio[1]; 2544 2545 return (0); 2546 } 2547