1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa
5 * Copyright (c) 2014 Yandex LLC
6 * Copyright (c) 2014 Alexander V. Chernikov
7 *
8 * Supported by: Valeria Paoli
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 /*
34 * Control socket and rule management routines for ipfw.
35 * Control is currently implemented via IP_FW3 setsockopt() code.
36 */
37
38 #include "opt_ipfw.h"
39 #include "opt_inet.h"
40 #ifndef INET
41 #error IPFIREWALL requires INET.
42 #endif /* INET */
43 #include "opt_inet6.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h> /* struct m_tag used by nested headers */
49 #include <sys/kernel.h>
50 #include <sys/lock.h>
51 #include <sys/priv.h>
52 #include <sys/proc.h>
53 #include <sys/rwlock.h>
54 #include <sys/rmlock.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/sysctl.h>
58 #include <sys/syslog.h>
59 #include <sys/fnv_hash.h>
60 #include <net/if.h>
61 #include <net/route.h>
62 #include <net/vnet.h>
63 #include <vm/vm.h>
64 #include <vm/vm_extern.h>
65
66 #include <netinet/in.h>
67 #include <netinet/ip_var.h> /* hooks */
68 #include <netinet/ip_fw.h>
69
70 #include <netpfil/ipfw/ip_fw_private.h>
71 #include <netpfil/ipfw/ip_fw_table.h>
72
73 #ifdef MAC
74 #include <security/mac/mac_framework.h>
75 #endif
76
77 static int ipfw_ctl(struct sockopt *sopt);
78 static int check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len,
79 struct rule_check_info *ci);
80 static int check_ipfw_rule1(struct ip_fw_rule *rule, int size,
81 struct rule_check_info *ci);
82 static int check_ipfw_rule0(struct ip_fw_rule0 *rule, int size,
83 struct rule_check_info *ci);
84 static int rewrite_rule_uidx(struct ip_fw_chain *chain,
85 struct rule_check_info *ci);
86
87 #define NAMEDOBJ_HASH_SIZE 32
88
89 struct namedobj_instance {
90 struct namedobjects_head *names;
91 struct namedobjects_head *values;
92 uint32_t nn_size; /* names hash size */
93 uint32_t nv_size; /* number hash size */
94 u_long *idx_mask; /* used items bitmask */
95 uint32_t max_blocks; /* number of "long" blocks in bitmask */
96 uint32_t count; /* number of items */
97 uint16_t free_off[IPFW_MAX_SETS]; /* first possible free offset */
98 objhash_hash_f *hash_f;
99 objhash_cmp_f *cmp_f;
100 };
101 #define BLOCK_ITEMS (8 * sizeof(u_long)) /* Number of items for ffsl() */
102
103 static uint32_t objhash_hash_name(struct namedobj_instance *ni,
104 const void *key, uint32_t kopt);
105 static uint32_t objhash_hash_idx(struct namedobj_instance *ni, uint32_t val);
106 static int objhash_cmp_name(struct named_object *no, const void *name,
107 uint32_t set);
108
109 MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's");
110
111 static int dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
112 struct sockopt_data *sd);
113 static int add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
114 struct sockopt_data *sd);
115 static int del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
116 struct sockopt_data *sd);
117 static int clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
118 struct sockopt_data *sd);
119 static int move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
120 struct sockopt_data *sd);
121 static int manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
122 struct sockopt_data *sd);
123 static int dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
124 struct sockopt_data *sd);
125 static int dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
126 struct sockopt_data *sd);
127
128 /* ctl3 handler data */
129 struct mtx ctl3_lock;
130 #define CTL3_LOCK_INIT() mtx_init(&ctl3_lock, "ctl3_lock", NULL, MTX_DEF)
131 #define CTL3_LOCK_DESTROY() mtx_destroy(&ctl3_lock)
132 #define CTL3_LOCK() mtx_lock(&ctl3_lock)
133 #define CTL3_UNLOCK() mtx_unlock(&ctl3_lock)
134
135 static struct ipfw_sopt_handler *ctl3_handlers;
136 static size_t ctl3_hsize;
137 static uint64_t ctl3_refct, ctl3_gencnt;
138 #define CTL3_SMALLBUF 4096 /* small page-size write buffer */
139 #define CTL3_LARGEBUF 16 * 1024 * 1024 /* handle large rulesets */
140
141 static int ipfw_flush_sopt_data(struct sockopt_data *sd);
142
143 static struct ipfw_sopt_handler scodes[] = {
144 { IP_FW_XGET, 0, HDIR_GET, dump_config },
145 { IP_FW_XADD, 0, HDIR_BOTH, add_rules },
146 { IP_FW_XDEL, 0, HDIR_BOTH, del_rules },
147 { IP_FW_XZERO, 0, HDIR_SET, clear_rules },
148 { IP_FW_XRESETLOG, 0, HDIR_SET, clear_rules },
149 { IP_FW_XMOVE, 0, HDIR_SET, move_rules },
150 { IP_FW_SET_SWAP, 0, HDIR_SET, manage_sets },
151 { IP_FW_SET_MOVE, 0, HDIR_SET, manage_sets },
152 { IP_FW_SET_ENABLE, 0, HDIR_SET, manage_sets },
153 { IP_FW_DUMP_SOPTCODES, 0, HDIR_GET, dump_soptcodes },
154 { IP_FW_DUMP_SRVOBJECTS,0, HDIR_GET, dump_srvobjects },
155 };
156
157 static int
158 set_legacy_obj_kidx(struct ip_fw_chain *ch, struct ip_fw_rule0 *rule);
159 static struct opcode_obj_rewrite *find_op_rw(ipfw_insn *cmd,
160 uint16_t *puidx, uint8_t *ptype);
161 static int ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule,
162 struct rule_check_info *ci, struct obj_idx *oib, struct tid_info *ti);
163 static int ref_opcode_object(struct ip_fw_chain *ch, ipfw_insn *cmd,
164 struct tid_info *ti, struct obj_idx *pidx, int *unresolved);
165 static void unref_rule_objects(struct ip_fw_chain *chain, struct ip_fw *rule);
166 static void unref_oib_objects(struct ip_fw_chain *ch, ipfw_insn *cmd,
167 struct obj_idx *oib, struct obj_idx *end);
168 static int export_objhash_ntlv(struct namedobj_instance *ni, uint16_t kidx,
169 struct sockopt_data *sd);
170
171 /*
172 * Opcode object rewriter variables
173 */
174 struct opcode_obj_rewrite *ctl3_rewriters;
175 static size_t ctl3_rsize;
176
177 /*
178 * static variables followed by global ones
179 */
180
181 VNET_DEFINE_STATIC(uma_zone_t, ipfw_cntr_zone);
182 #define V_ipfw_cntr_zone VNET(ipfw_cntr_zone)
183
184 void
ipfw_init_counters(void)185 ipfw_init_counters(void)
186 {
187
188 V_ipfw_cntr_zone = uma_zcreate("IPFW counters",
189 IPFW_RULE_CNTR_SIZE, NULL, NULL, NULL, NULL,
190 UMA_ALIGN_PTR, UMA_ZONE_PCPU);
191 }
192
193 void
ipfw_destroy_counters(void)194 ipfw_destroy_counters(void)
195 {
196
197 uma_zdestroy(V_ipfw_cntr_zone);
198 }
199
200 struct ip_fw *
ipfw_alloc_rule(struct ip_fw_chain * chain,size_t rulesize)201 ipfw_alloc_rule(struct ip_fw_chain *chain, size_t rulesize)
202 {
203 struct ip_fw *rule;
204
205 rule = malloc(rulesize, M_IPFW, M_WAITOK | M_ZERO);
206 rule->cntr = uma_zalloc_pcpu(V_ipfw_cntr_zone, M_WAITOK | M_ZERO);
207 rule->refcnt = 1;
208
209 return (rule);
210 }
211
212 void
ipfw_free_rule(struct ip_fw * rule)213 ipfw_free_rule(struct ip_fw *rule)
214 {
215
216 /*
217 * We don't release refcnt here, since this function
218 * can be called without any locks held. The caller
219 * must release reference under IPFW_UH_WLOCK, and then
220 * call this function if refcount becomes 1.
221 */
222 if (rule->refcnt > 1)
223 return;
224 uma_zfree_pcpu(V_ipfw_cntr_zone, rule->cntr);
225 free(rule, M_IPFW);
226 }
227
228 /*
229 * Find the smallest rule >= key, id.
230 * We could use bsearch but it is so simple that we code it directly
231 */
232 int
ipfw_find_rule(struct ip_fw_chain * chain,uint32_t key,uint32_t id)233 ipfw_find_rule(struct ip_fw_chain *chain, uint32_t key, uint32_t id)
234 {
235 int i, lo, hi;
236 struct ip_fw *r;
237
238 for (lo = 0, hi = chain->n_rules - 1; lo < hi;) {
239 i = (lo + hi) / 2;
240 r = chain->map[i];
241 if (r->rulenum < key)
242 lo = i + 1; /* continue from the next one */
243 else if (r->rulenum > key)
244 hi = i; /* this might be good */
245 else if (r->id < id)
246 lo = i + 1; /* continue from the next one */
247 else /* r->id >= id */
248 hi = i; /* this might be good */
249 }
250 return hi;
251 }
252
253 /*
254 * Builds skipto cache on rule set @map.
255 */
256 static void
update_skipto_cache(struct ip_fw_chain * chain,struct ip_fw ** map)257 update_skipto_cache(struct ip_fw_chain *chain, struct ip_fw **map)
258 {
259 int *smap, rulenum;
260 int i, mi;
261
262 IPFW_UH_WLOCK_ASSERT(chain);
263
264 mi = 0;
265 rulenum = map[mi]->rulenum;
266 smap = chain->idxmap_back;
267
268 if (smap == NULL)
269 return;
270
271 for (i = 0; i < 65536; i++) {
272 smap[i] = mi;
273 /* Use the same rule index until i < rulenum */
274 if (i != rulenum || i == 65535)
275 continue;
276 /* Find next rule with num > i */
277 rulenum = map[++mi]->rulenum;
278 while (rulenum == i)
279 rulenum = map[++mi]->rulenum;
280 }
281 }
282
283 /*
284 * Swaps prepared (backup) index with current one.
285 */
286 static void
swap_skipto_cache(struct ip_fw_chain * chain)287 swap_skipto_cache(struct ip_fw_chain *chain)
288 {
289 int *map;
290
291 IPFW_UH_WLOCK_ASSERT(chain);
292 IPFW_WLOCK_ASSERT(chain);
293
294 map = chain->idxmap;
295 chain->idxmap = chain->idxmap_back;
296 chain->idxmap_back = map;
297 }
298
299 /*
300 * Allocate and initialize skipto cache.
301 */
302 void
ipfw_init_skipto_cache(struct ip_fw_chain * chain)303 ipfw_init_skipto_cache(struct ip_fw_chain *chain)
304 {
305 int *idxmap, *idxmap_back;
306
307 idxmap = malloc(65536 * sizeof(int), M_IPFW, M_WAITOK | M_ZERO);
308 idxmap_back = malloc(65536 * sizeof(int), M_IPFW, M_WAITOK);
309
310 /*
311 * Note we may be called at any time after initialization,
312 * for example, on first skipto rule, so we need to
313 * provide valid chain->idxmap on return
314 */
315
316 IPFW_UH_WLOCK(chain);
317 if (chain->idxmap != NULL) {
318 IPFW_UH_WUNLOCK(chain);
319 free(idxmap, M_IPFW);
320 free(idxmap_back, M_IPFW);
321 return;
322 }
323
324 /* Set backup pointer first to permit building cache */
325 chain->idxmap_back = idxmap_back;
326 update_skipto_cache(chain, chain->map);
327 IPFW_WLOCK(chain);
328 /* It is now safe to set chain->idxmap ptr */
329 chain->idxmap = idxmap;
330 swap_skipto_cache(chain);
331 IPFW_WUNLOCK(chain);
332 IPFW_UH_WUNLOCK(chain);
333 }
334
335 /*
336 * Destroys skipto cache.
337 */
338 void
ipfw_destroy_skipto_cache(struct ip_fw_chain * chain)339 ipfw_destroy_skipto_cache(struct ip_fw_chain *chain)
340 {
341
342 if (chain->idxmap != NULL)
343 free(chain->idxmap, M_IPFW);
344 if (chain->idxmap != NULL)
345 free(chain->idxmap_back, M_IPFW);
346 }
347
348 /*
349 * allocate a new map, returns the chain locked. extra is the number
350 * of entries to add or delete.
351 */
352 static struct ip_fw **
get_map(struct ip_fw_chain * chain,int extra,int locked)353 get_map(struct ip_fw_chain *chain, int extra, int locked)
354 {
355
356 for (;;) {
357 struct ip_fw **map;
358 u_int i, mflags;
359
360 mflags = M_ZERO | ((locked != 0) ? M_NOWAIT : M_WAITOK);
361
362 i = chain->n_rules + extra;
363 map = malloc(i * sizeof(struct ip_fw *), M_IPFW, mflags);
364 if (map == NULL) {
365 printf("%s: cannot allocate map\n", __FUNCTION__);
366 return NULL;
367 }
368 if (!locked)
369 IPFW_UH_WLOCK(chain);
370 if (i >= chain->n_rules + extra) /* good */
371 return map;
372 /* otherwise we lost the race, free and retry */
373 if (!locked)
374 IPFW_UH_WUNLOCK(chain);
375 free(map, M_IPFW);
376 }
377 }
378
379 /*
380 * swap the maps. It is supposed to be called with IPFW_UH_WLOCK
381 */
382 static struct ip_fw **
swap_map(struct ip_fw_chain * chain,struct ip_fw ** new_map,int new_len)383 swap_map(struct ip_fw_chain *chain, struct ip_fw **new_map, int new_len)
384 {
385 struct ip_fw **old_map;
386
387 IPFW_WLOCK(chain);
388 chain->id++;
389 chain->n_rules = new_len;
390 old_map = chain->map;
391 chain->map = new_map;
392 swap_skipto_cache(chain);
393 IPFW_WUNLOCK(chain);
394 return old_map;
395 }
396
397 static void
export_cntr1_base(struct ip_fw * krule,struct ip_fw_bcounter * cntr)398 export_cntr1_base(struct ip_fw *krule, struct ip_fw_bcounter *cntr)
399 {
400 struct timeval boottime;
401
402 cntr->size = sizeof(*cntr);
403
404 if (krule->cntr != NULL) {
405 cntr->pcnt = counter_u64_fetch(krule->cntr);
406 cntr->bcnt = counter_u64_fetch(krule->cntr + 1);
407 cntr->timestamp = krule->timestamp;
408 }
409 if (cntr->timestamp > 0) {
410 getboottime(&boottime);
411 cntr->timestamp += boottime.tv_sec;
412 }
413 }
414
415 static void
export_cntr0_base(struct ip_fw * krule,struct ip_fw_bcounter0 * cntr)416 export_cntr0_base(struct ip_fw *krule, struct ip_fw_bcounter0 *cntr)
417 {
418 struct timeval boottime;
419
420 if (krule->cntr != NULL) {
421 cntr->pcnt = counter_u64_fetch(krule->cntr);
422 cntr->bcnt = counter_u64_fetch(krule->cntr + 1);
423 cntr->timestamp = krule->timestamp;
424 }
425 if (cntr->timestamp > 0) {
426 getboottime(&boottime);
427 cntr->timestamp += boottime.tv_sec;
428 }
429 }
430
431 /*
432 * Copies rule @urule from v1 userland format (current).
433 * to kernel @krule.
434 * Assume @krule is zeroed.
435 */
436 static void
import_rule1(struct rule_check_info * ci)437 import_rule1(struct rule_check_info *ci)
438 {
439 struct ip_fw_rule *urule;
440 struct ip_fw *krule;
441
442 urule = (struct ip_fw_rule *)ci->urule;
443 krule = (struct ip_fw *)ci->krule;
444
445 /* copy header */
446 krule->act_ofs = urule->act_ofs;
447 krule->cmd_len = urule->cmd_len;
448 krule->rulenum = urule->rulenum;
449 krule->set = urule->set;
450 krule->flags = urule->flags;
451
452 /* Save rulenum offset */
453 ci->urule_numoff = offsetof(struct ip_fw_rule, rulenum);
454
455 /* Copy opcodes */
456 memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t));
457 }
458
459 /*
460 * Export rule into v1 format (Current).
461 * Layout:
462 * [ ipfw_obj_tlv(IPFW_TLV_RULE_ENT)
463 * [ ip_fw_rule ] OR
464 * [ ip_fw_bcounter ip_fw_rule] (depends on rcntrs).
465 * ]
466 * Assume @data is zeroed.
467 */
468 static void
export_rule1(struct ip_fw * krule,caddr_t data,int len,int rcntrs)469 export_rule1(struct ip_fw *krule, caddr_t data, int len, int rcntrs)
470 {
471 struct ip_fw_bcounter *cntr;
472 struct ip_fw_rule *urule;
473 ipfw_obj_tlv *tlv;
474
475 /* Fill in TLV header */
476 tlv = (ipfw_obj_tlv *)data;
477 tlv->type = IPFW_TLV_RULE_ENT;
478 tlv->length = len;
479
480 if (rcntrs != 0) {
481 /* Copy counters */
482 cntr = (struct ip_fw_bcounter *)(tlv + 1);
483 urule = (struct ip_fw_rule *)(cntr + 1);
484 export_cntr1_base(krule, cntr);
485 } else
486 urule = (struct ip_fw_rule *)(tlv + 1);
487
488 /* copy header */
489 urule->act_ofs = krule->act_ofs;
490 urule->cmd_len = krule->cmd_len;
491 urule->rulenum = krule->rulenum;
492 urule->set = krule->set;
493 urule->flags = krule->flags;
494 urule->id = krule->id;
495
496 /* Copy opcodes */
497 memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t));
498 }
499
500 /*
501 * Copies rule @urule from FreeBSD8 userland format (v0)
502 * to kernel @krule.
503 * Assume @krule is zeroed.
504 */
505 static void
import_rule0(struct rule_check_info * ci)506 import_rule0(struct rule_check_info *ci)
507 {
508 struct ip_fw_rule0 *urule;
509 struct ip_fw *krule;
510 int cmdlen, l;
511 ipfw_insn *cmd;
512 ipfw_insn_limit *lcmd;
513 ipfw_insn_if *cmdif;
514
515 urule = (struct ip_fw_rule0 *)ci->urule;
516 krule = (struct ip_fw *)ci->krule;
517
518 /* copy header */
519 krule->act_ofs = urule->act_ofs;
520 krule->cmd_len = urule->cmd_len;
521 krule->rulenum = urule->rulenum;
522 krule->set = urule->set;
523 if ((urule->_pad & 1) != 0)
524 krule->flags |= IPFW_RULE_NOOPT;
525
526 /* Save rulenum offset */
527 ci->urule_numoff = offsetof(struct ip_fw_rule0, rulenum);
528
529 /* Copy opcodes */
530 memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t));
531
532 /*
533 * Alter opcodes:
534 * 1) convert tablearg value from 65535 to 0
535 * 2) Add high bit to O_SETFIB/O_SETDSCP values (to make room
536 * for targ).
537 * 3) convert table number in iface opcodes to u16
538 * 4) convert old `nat global` into new 65535
539 */
540 l = krule->cmd_len;
541 cmd = krule->cmd;
542 cmdlen = 0;
543
544 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) {
545 cmdlen = F_LEN(cmd);
546
547 switch (cmd->opcode) {
548 /* Opcodes supporting tablearg */
549 case O_TAG:
550 case O_TAGGED:
551 case O_PIPE:
552 case O_QUEUE:
553 case O_DIVERT:
554 case O_TEE:
555 case O_SKIPTO:
556 case O_CALLRETURN:
557 case O_NETGRAPH:
558 case O_NGTEE:
559 case O_NAT:
560 if (cmd->arg1 == IP_FW_TABLEARG)
561 cmd->arg1 = IP_FW_TARG;
562 else if (cmd->arg1 == 0)
563 cmd->arg1 = IP_FW_NAT44_GLOBAL;
564 break;
565 case O_SETFIB:
566 case O_SETDSCP:
567 case O_SETMARK:
568 case O_MARK:
569 if (cmd->arg1 == IP_FW_TABLEARG)
570 cmd->arg1 = IP_FW_TARG;
571 else
572 cmd->arg1 |= 0x8000;
573 break;
574 case O_LIMIT:
575 lcmd = (ipfw_insn_limit *)cmd;
576 if (lcmd->conn_limit == IP_FW_TABLEARG)
577 lcmd->conn_limit = IP_FW_TARG;
578 break;
579 /* Interface tables */
580 case O_XMIT:
581 case O_RECV:
582 case O_VIA:
583 /* Interface table, possibly */
584 cmdif = (ipfw_insn_if *)cmd;
585 if (cmdif->name[0] != '\1')
586 break;
587
588 cmdif->p.kidx = (uint16_t)cmdif->p.glob;
589 break;
590 }
591 }
592 }
593
594 /*
595 * Copies rule @krule from kernel to FreeBSD8 userland format (v0)
596 */
597 static void
export_rule0(struct ip_fw * krule,struct ip_fw_rule0 * urule,int len)598 export_rule0(struct ip_fw *krule, struct ip_fw_rule0 *urule, int len)
599 {
600 int cmdlen, l;
601 ipfw_insn *cmd;
602 ipfw_insn_limit *lcmd;
603 ipfw_insn_if *cmdif;
604
605 /* copy header */
606 memset(urule, 0, len);
607 urule->act_ofs = krule->act_ofs;
608 urule->cmd_len = krule->cmd_len;
609 urule->rulenum = krule->rulenum;
610 urule->set = krule->set;
611 if ((krule->flags & IPFW_RULE_NOOPT) != 0)
612 urule->_pad |= 1;
613
614 /* Copy opcodes */
615 memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t));
616
617 /* Export counters */
618 export_cntr0_base(krule, (struct ip_fw_bcounter0 *)&urule->pcnt);
619
620 /*
621 * Alter opcodes:
622 * 1) convert tablearg value from 0 to 65535
623 * 2) Remove highest bit from O_SETFIB/O_SETDSCP values.
624 * 3) convert table number in iface opcodes to int
625 */
626 l = urule->cmd_len;
627 cmd = urule->cmd;
628 cmdlen = 0;
629
630 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) {
631 cmdlen = F_LEN(cmd);
632
633 switch (cmd->opcode) {
634 /* Opcodes supporting tablearg */
635 case O_TAG:
636 case O_TAGGED:
637 case O_PIPE:
638 case O_QUEUE:
639 case O_DIVERT:
640 case O_TEE:
641 case O_SKIPTO:
642 case O_CALLRETURN:
643 case O_NETGRAPH:
644 case O_NGTEE:
645 case O_NAT:
646 if (cmd->arg1 == IP_FW_TARG)
647 cmd->arg1 = IP_FW_TABLEARG;
648 else if (cmd->arg1 == IP_FW_NAT44_GLOBAL)
649 cmd->arg1 = 0;
650 break;
651 case O_SETFIB:
652 case O_SETDSCP:
653 case O_SETMARK:
654 case O_MARK:
655 if (cmd->arg1 == IP_FW_TARG)
656 cmd->arg1 = IP_FW_TABLEARG;
657 else
658 cmd->arg1 &= ~0x8000;
659 break;
660 case O_LIMIT:
661 lcmd = (ipfw_insn_limit *)cmd;
662 if (lcmd->conn_limit == IP_FW_TARG)
663 lcmd->conn_limit = IP_FW_TABLEARG;
664 break;
665 /* Interface tables */
666 case O_XMIT:
667 case O_RECV:
668 case O_VIA:
669 /* Interface table, possibly */
670 cmdif = (ipfw_insn_if *)cmd;
671 if (cmdif->name[0] != '\1')
672 break;
673
674 cmdif->p.glob = cmdif->p.kidx;
675 break;
676 }
677 }
678 }
679
680 /*
681 * Add new rule(s) to the list possibly creating rule number for each.
682 * Update the rule_number in the input struct so the caller knows it as well.
683 * Must be called without IPFW_UH held
684 */
685 static int
commit_rules(struct ip_fw_chain * chain,struct rule_check_info * rci,int count)686 commit_rules(struct ip_fw_chain *chain, struct rule_check_info *rci, int count)
687 {
688 int error, i, insert_before, tcount;
689 uint16_t rulenum, *pnum;
690 struct rule_check_info *ci;
691 struct ip_fw *krule;
692 struct ip_fw **map; /* the new array of pointers */
693
694 /* Check if we need to do table/obj index remap */
695 tcount = 0;
696 for (ci = rci, i = 0; i < count; ci++, i++) {
697 if (ci->object_opcodes == 0)
698 continue;
699
700 /*
701 * Rule has some object opcodes.
702 * We need to find (and create non-existing)
703 * kernel objects, and reference existing ones.
704 */
705 error = rewrite_rule_uidx(chain, ci);
706 if (error != 0) {
707 /*
708 * rewrite failed, state for current rule
709 * has been reverted. Check if we need to
710 * revert more.
711 */
712 if (tcount > 0) {
713 /*
714 * We have some more table rules
715 * we need to rollback.
716 */
717
718 IPFW_UH_WLOCK(chain);
719 while (ci != rci) {
720 ci--;
721 if (ci->object_opcodes == 0)
722 continue;
723 unref_rule_objects(chain,ci->krule);
724 }
725 IPFW_UH_WUNLOCK(chain);
726 }
727
728 return (error);
729 }
730
731 tcount++;
732 }
733
734 /* get_map returns with IPFW_UH_WLOCK if successful */
735 map = get_map(chain, count, 0 /* not locked */);
736 if (map == NULL) {
737 if (tcount > 0) {
738 /* Unbind tables */
739 IPFW_UH_WLOCK(chain);
740 for (ci = rci, i = 0; i < count; ci++, i++) {
741 if (ci->object_opcodes == 0)
742 continue;
743
744 unref_rule_objects(chain, ci->krule);
745 }
746 IPFW_UH_WUNLOCK(chain);
747 }
748
749 return (ENOSPC);
750 }
751
752 if (V_autoinc_step < 1)
753 V_autoinc_step = 1;
754 else if (V_autoinc_step > 1000)
755 V_autoinc_step = 1000;
756
757 /* FIXME: Handle count > 1 */
758 ci = rci;
759 krule = ci->krule;
760 rulenum = krule->rulenum;
761
762 /* find the insertion point, we will insert before */
763 insert_before = rulenum ? rulenum + 1 : IPFW_DEFAULT_RULE;
764 i = ipfw_find_rule(chain, insert_before, 0);
765 /* duplicate first part */
766 if (i > 0)
767 bcopy(chain->map, map, i * sizeof(struct ip_fw *));
768 map[i] = krule;
769 /* duplicate remaining part, we always have the default rule */
770 bcopy(chain->map + i, map + i + 1,
771 sizeof(struct ip_fw *) *(chain->n_rules - i));
772 if (rulenum == 0) {
773 /* Compute rule number and write it back */
774 rulenum = i > 0 ? map[i-1]->rulenum : 0;
775 if (rulenum < IPFW_DEFAULT_RULE - V_autoinc_step)
776 rulenum += V_autoinc_step;
777 krule->rulenum = rulenum;
778 /* Save number to userland rule */
779 pnum = (uint16_t *)((caddr_t)ci->urule + ci->urule_numoff);
780 *pnum = rulenum;
781 }
782
783 krule->id = chain->id + 1;
784 update_skipto_cache(chain, map);
785 map = swap_map(chain, map, chain->n_rules + 1);
786 chain->static_len += RULEUSIZE0(krule);
787 IPFW_UH_WUNLOCK(chain);
788 if (map)
789 free(map, M_IPFW);
790 return (0);
791 }
792
793 int
ipfw_add_protected_rule(struct ip_fw_chain * chain,struct ip_fw * rule,int locked)794 ipfw_add_protected_rule(struct ip_fw_chain *chain, struct ip_fw *rule,
795 int locked)
796 {
797 struct ip_fw **map;
798
799 map = get_map(chain, 1, locked);
800 if (map == NULL)
801 return (ENOMEM);
802 if (chain->n_rules > 0)
803 bcopy(chain->map, map,
804 chain->n_rules * sizeof(struct ip_fw *));
805 map[chain->n_rules] = rule;
806 rule->rulenum = IPFW_DEFAULT_RULE;
807 rule->set = RESVD_SET;
808 rule->id = chain->id + 1;
809 /* We add rule in the end of chain, no need to update skipto cache */
810 map = swap_map(chain, map, chain->n_rules + 1);
811 chain->static_len += RULEUSIZE0(rule);
812 IPFW_UH_WUNLOCK(chain);
813 free(map, M_IPFW);
814 return (0);
815 }
816
817 /*
818 * Adds @rule to the list of rules to reap
819 */
820 void
ipfw_reap_add(struct ip_fw_chain * chain,struct ip_fw ** head,struct ip_fw * rule)821 ipfw_reap_add(struct ip_fw_chain *chain, struct ip_fw **head,
822 struct ip_fw *rule)
823 {
824
825 IPFW_UH_WLOCK_ASSERT(chain);
826
827 /* Unlink rule from everywhere */
828 unref_rule_objects(chain, rule);
829
830 rule->next = *head;
831 *head = rule;
832 }
833
834 /*
835 * Reclaim storage associated with a list of rules. This is
836 * typically the list created using remove_rule.
837 * A NULL pointer on input is handled correctly.
838 */
839 void
ipfw_reap_rules(struct ip_fw * head)840 ipfw_reap_rules(struct ip_fw *head)
841 {
842 struct ip_fw *rule;
843
844 while ((rule = head) != NULL) {
845 head = head->next;
846 ipfw_free_rule(rule);
847 }
848 }
849
850 /*
851 * Rules to keep are
852 * (default || reserved || !match_set || !match_number)
853 * where
854 * default ::= (rule->rulenum == IPFW_DEFAULT_RULE)
855 * // the default rule is always protected
856 *
857 * reserved ::= (cmd == 0 && n == 0 && rule->set == RESVD_SET)
858 * // RESVD_SET is protected only if cmd == 0 and n == 0 ("ipfw flush")
859 *
860 * match_set ::= (cmd == 0 || rule->set == set)
861 * // set number is ignored for cmd == 0
862 *
863 * match_number ::= (cmd == 1 || n == 0 || n == rule->rulenum)
864 * // number is ignored for cmd == 1 or n == 0
865 *
866 */
867 int
ipfw_match_range(struct ip_fw * rule,ipfw_range_tlv * rt)868 ipfw_match_range(struct ip_fw *rule, ipfw_range_tlv *rt)
869 {
870
871 /* Don't match default rule for modification queries */
872 if (rule->rulenum == IPFW_DEFAULT_RULE &&
873 (rt->flags & IPFW_RCFLAG_DEFAULT) == 0)
874 return (0);
875
876 /* Don't match rules in reserved set for flush requests */
877 if ((rt->flags & IPFW_RCFLAG_ALL) != 0 && rule->set == RESVD_SET)
878 return (0);
879
880 /* If we're filtering by set, don't match other sets */
881 if ((rt->flags & IPFW_RCFLAG_SET) != 0 && rule->set != rt->set)
882 return (0);
883
884 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0 &&
885 (rule->rulenum < rt->start_rule || rule->rulenum > rt->end_rule))
886 return (0);
887
888 return (1);
889 }
890
891 struct manage_sets_args {
892 uint16_t set;
893 uint8_t new_set;
894 };
895
896 static int
swap_sets_cb(struct namedobj_instance * ni,struct named_object * no,void * arg)897 swap_sets_cb(struct namedobj_instance *ni, struct named_object *no,
898 void *arg)
899 {
900 struct manage_sets_args *args;
901
902 args = (struct manage_sets_args *)arg;
903 if (no->set == (uint8_t)args->set)
904 no->set = args->new_set;
905 else if (no->set == args->new_set)
906 no->set = (uint8_t)args->set;
907 return (0);
908 }
909
910 static int
move_sets_cb(struct namedobj_instance * ni,struct named_object * no,void * arg)911 move_sets_cb(struct namedobj_instance *ni, struct named_object *no,
912 void *arg)
913 {
914 struct manage_sets_args *args;
915
916 args = (struct manage_sets_args *)arg;
917 if (no->set == (uint8_t)args->set)
918 no->set = args->new_set;
919 return (0);
920 }
921
922 static int
test_sets_cb(struct namedobj_instance * ni,struct named_object * no,void * arg)923 test_sets_cb(struct namedobj_instance *ni, struct named_object *no,
924 void *arg)
925 {
926 struct manage_sets_args *args;
927
928 args = (struct manage_sets_args *)arg;
929 if (no->set != (uint8_t)args->set)
930 return (0);
931 if (ipfw_objhash_lookup_name_type(ni, args->new_set,
932 no->etlv, no->name) != NULL)
933 return (EEXIST);
934 return (0);
935 }
936
937 /*
938 * Generic function to handler moving and swapping sets.
939 */
940 int
ipfw_obj_manage_sets(struct namedobj_instance * ni,uint16_t type,uint16_t set,uint8_t new_set,enum ipfw_sets_cmd cmd)941 ipfw_obj_manage_sets(struct namedobj_instance *ni, uint16_t type,
942 uint16_t set, uint8_t new_set, enum ipfw_sets_cmd cmd)
943 {
944 struct manage_sets_args args;
945 struct named_object *no;
946
947 args.set = set;
948 args.new_set = new_set;
949 switch (cmd) {
950 case SWAP_ALL:
951 return (ipfw_objhash_foreach_type(ni, swap_sets_cb,
952 &args, type));
953 case TEST_ALL:
954 return (ipfw_objhash_foreach_type(ni, test_sets_cb,
955 &args, type));
956 case MOVE_ALL:
957 return (ipfw_objhash_foreach_type(ni, move_sets_cb,
958 &args, type));
959 case COUNT_ONE:
960 /*
961 * @set used to pass kidx.
962 * When @new_set is zero - reset object counter,
963 * otherwise increment it.
964 */
965 no = ipfw_objhash_lookup_kidx(ni, set);
966 if (new_set != 0)
967 no->ocnt++;
968 else
969 no->ocnt = 0;
970 return (0);
971 case TEST_ONE:
972 /* @set used to pass kidx */
973 no = ipfw_objhash_lookup_kidx(ni, set);
974 /*
975 * First check number of references:
976 * when it differs, this mean other rules are holding
977 * reference to given object, so it is not possible to
978 * change its set. Note that refcnt may account references
979 * to some going-to-be-added rules. Since we don't know
980 * their numbers (and even if they will be added) it is
981 * perfectly OK to return error here.
982 */
983 if (no->ocnt != no->refcnt)
984 return (EBUSY);
985 if (ipfw_objhash_lookup_name_type(ni, new_set, type,
986 no->name) != NULL)
987 return (EEXIST);
988 return (0);
989 case MOVE_ONE:
990 /* @set used to pass kidx */
991 no = ipfw_objhash_lookup_kidx(ni, set);
992 no->set = new_set;
993 return (0);
994 }
995 return (EINVAL);
996 }
997
998 /*
999 * Delete rules matching range @rt.
1000 * Saves number of deleted rules in @ndel.
1001 *
1002 * Returns 0 on success.
1003 */
1004 static int
delete_range(struct ip_fw_chain * chain,ipfw_range_tlv * rt,int * ndel)1005 delete_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int *ndel)
1006 {
1007 struct ip_fw *reap, *rule, **map;
1008 int end, start;
1009 int i, n, ndyn, ofs;
1010
1011 reap = NULL;
1012 IPFW_UH_WLOCK(chain); /* arbitrate writers */
1013
1014 /*
1015 * Stage 1: Determine range to inspect.
1016 * Range is half-inclusive, e.g [start, end).
1017 */
1018 start = 0;
1019 end = chain->n_rules - 1;
1020
1021 if ((rt->flags & IPFW_RCFLAG_RANGE) != 0) {
1022 start = ipfw_find_rule(chain, rt->start_rule, 0);
1023
1024 if (rt->end_rule >= IPFW_DEFAULT_RULE)
1025 rt->end_rule = IPFW_DEFAULT_RULE - 1;
1026 end = ipfw_find_rule(chain, rt->end_rule, UINT32_MAX);
1027 }
1028
1029 if (rt->flags & IPFW_RCFLAG_DYNAMIC) {
1030 /*
1031 * Requested deleting only for dynamic states.
1032 */
1033 *ndel = 0;
1034 ipfw_expire_dyn_states(chain, rt);
1035 IPFW_UH_WUNLOCK(chain);
1036 return (0);
1037 }
1038
1039 /* Allocate new map of the same size */
1040 map = get_map(chain, 0, 1 /* locked */);
1041 if (map == NULL) {
1042 IPFW_UH_WUNLOCK(chain);
1043 return (ENOMEM);
1044 }
1045
1046 n = 0;
1047 ndyn = 0;
1048 ofs = start;
1049 /* 1. bcopy the initial part of the map */
1050 if (start > 0)
1051 bcopy(chain->map, map, start * sizeof(struct ip_fw *));
1052 /* 2. copy active rules between start and end */
1053 for (i = start; i < end; i++) {
1054 rule = chain->map[i];
1055 if (ipfw_match_range(rule, rt) == 0) {
1056 map[ofs++] = rule;
1057 continue;
1058 }
1059
1060 n++;
1061 if (ipfw_is_dyn_rule(rule) != 0)
1062 ndyn++;
1063 }
1064 /* 3. copy the final part of the map */
1065 bcopy(chain->map + end, map + ofs,
1066 (chain->n_rules - end) * sizeof(struct ip_fw *));
1067 /* 4. recalculate skipto cache */
1068 update_skipto_cache(chain, map);
1069 /* 5. swap the maps (under UH_WLOCK + WHLOCK) */
1070 map = swap_map(chain, map, chain->n_rules - n);
1071 /* 6. Remove all dynamic states originated by deleted rules */
1072 if (ndyn > 0)
1073 ipfw_expire_dyn_states(chain, rt);
1074 /* 7. now remove the rules deleted from the old map */
1075 for (i = start; i < end; i++) {
1076 rule = map[i];
1077 if (ipfw_match_range(rule, rt) == 0)
1078 continue;
1079 chain->static_len -= RULEUSIZE0(rule);
1080 ipfw_reap_add(chain, &reap, rule);
1081 }
1082 IPFW_UH_WUNLOCK(chain);
1083
1084 ipfw_reap_rules(reap);
1085 if (map != NULL)
1086 free(map, M_IPFW);
1087 *ndel = n;
1088 return (0);
1089 }
1090
1091 static int
move_objects(struct ip_fw_chain * ch,ipfw_range_tlv * rt)1092 move_objects(struct ip_fw_chain *ch, ipfw_range_tlv *rt)
1093 {
1094 struct opcode_obj_rewrite *rw;
1095 struct ip_fw *rule;
1096 ipfw_insn *cmd;
1097 int cmdlen, i, l, c;
1098 uint16_t kidx;
1099
1100 IPFW_UH_WLOCK_ASSERT(ch);
1101
1102 /* Stage 1: count number of references by given rules */
1103 for (c = 0, i = 0; i < ch->n_rules - 1; i++) {
1104 rule = ch->map[i];
1105 if (ipfw_match_range(rule, rt) == 0)
1106 continue;
1107 if (rule->set == rt->new_set) /* nothing to do */
1108 continue;
1109 /* Search opcodes with named objects */
1110 for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd;
1111 l > 0; l -= cmdlen, cmd += cmdlen) {
1112 cmdlen = F_LEN(cmd);
1113 rw = find_op_rw(cmd, &kidx, NULL);
1114 if (rw == NULL || rw->manage_sets == NULL)
1115 continue;
1116 /*
1117 * When manage_sets() returns non-zero value to
1118 * COUNT_ONE command, consider this as an object
1119 * doesn't support sets (e.g. disabled with sysctl).
1120 * So, skip checks for this object.
1121 */
1122 if (rw->manage_sets(ch, kidx, 1, COUNT_ONE) != 0)
1123 continue;
1124 c++;
1125 }
1126 }
1127 if (c == 0) /* No objects found */
1128 return (0);
1129 /* Stage 2: verify "ownership" */
1130 for (c = 0, i = 0; (i < ch->n_rules - 1) && c == 0; i++) {
1131 rule = ch->map[i];
1132 if (ipfw_match_range(rule, rt) == 0)
1133 continue;
1134 if (rule->set == rt->new_set) /* nothing to do */
1135 continue;
1136 /* Search opcodes with named objects */
1137 for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd;
1138 l > 0 && c == 0; l -= cmdlen, cmd += cmdlen) {
1139 cmdlen = F_LEN(cmd);
1140 rw = find_op_rw(cmd, &kidx, NULL);
1141 if (rw == NULL || rw->manage_sets == NULL)
1142 continue;
1143 /* Test for ownership and conflicting names */
1144 c = rw->manage_sets(ch, kidx,
1145 (uint8_t)rt->new_set, TEST_ONE);
1146 }
1147 }
1148 /* Stage 3: change set and cleanup */
1149 for (i = 0; i < ch->n_rules - 1; i++) {
1150 rule = ch->map[i];
1151 if (ipfw_match_range(rule, rt) == 0)
1152 continue;
1153 if (rule->set == rt->new_set) /* nothing to do */
1154 continue;
1155 /* Search opcodes with named objects */
1156 for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd;
1157 l > 0; l -= cmdlen, cmd += cmdlen) {
1158 cmdlen = F_LEN(cmd);
1159 rw = find_op_rw(cmd, &kidx, NULL);
1160 if (rw == NULL || rw->manage_sets == NULL)
1161 continue;
1162 /* cleanup object counter */
1163 rw->manage_sets(ch, kidx,
1164 0 /* reset counter */, COUNT_ONE);
1165 if (c != 0)
1166 continue;
1167 /* change set */
1168 rw->manage_sets(ch, kidx,
1169 (uint8_t)rt->new_set, MOVE_ONE);
1170 }
1171 }
1172 return (c);
1173 }
1174
1175 /*
1176 * Changes set of given rule rannge @rt
1177 * with each other.
1178 *
1179 * Returns 0 on success.
1180 */
1181 static int
move_range(struct ip_fw_chain * chain,ipfw_range_tlv * rt)1182 move_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt)
1183 {
1184 struct ip_fw *rule;
1185 int i;
1186
1187 IPFW_UH_WLOCK(chain);
1188
1189 /*
1190 * Move rules with matching paramenerts to a new set.
1191 * This one is much more complex. We have to ensure
1192 * that all referenced tables (if any) are referenced
1193 * by given rule subset only. Otherwise, we can't move
1194 * them to new set and have to return error.
1195 */
1196 if ((i = move_objects(chain, rt)) != 0) {
1197 IPFW_UH_WUNLOCK(chain);
1198 return (i);
1199 }
1200
1201 /* XXX: We have to do swap holding WLOCK */
1202 for (i = 0; i < chain->n_rules; i++) {
1203 rule = chain->map[i];
1204 if (ipfw_match_range(rule, rt) == 0)
1205 continue;
1206 rule->set = rt->new_set;
1207 }
1208
1209 IPFW_UH_WUNLOCK(chain);
1210
1211 return (0);
1212 }
1213
1214 /*
1215 * Returns pointer to action instruction, skips all possible rule
1216 * modifiers like O_LOG, O_TAG, O_ALTQ.
1217 */
1218 ipfw_insn *
ipfw_get_action(struct ip_fw * rule)1219 ipfw_get_action(struct ip_fw *rule)
1220 {
1221 ipfw_insn *cmd;
1222 int l, cmdlen;
1223
1224 cmd = ACTION_PTR(rule);
1225 l = rule->cmd_len - rule->act_ofs;
1226 while (l > 0) {
1227 switch (cmd->opcode) {
1228 case O_ALTQ:
1229 case O_LOG:
1230 case O_TAG:
1231 break;
1232 default:
1233 return (cmd);
1234 }
1235 cmdlen = F_LEN(cmd);
1236 l -= cmdlen;
1237 cmd += cmdlen;
1238 }
1239 panic("%s: rule (%p) has not action opcode", __func__, rule);
1240 return (NULL);
1241 }
1242
1243 /*
1244 * Clear counters for a specific rule.
1245 * Normally run under IPFW_UH_RLOCK, but these are idempotent ops
1246 * so we only care that rules do not disappear.
1247 */
1248 static void
clear_counters(struct ip_fw * rule,int log_only)1249 clear_counters(struct ip_fw *rule, int log_only)
1250 {
1251 ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule);
1252
1253 if (log_only == 0)
1254 IPFW_ZERO_RULE_COUNTER(rule);
1255 if (l->o.opcode == O_LOG)
1256 l->log_left = l->max_log;
1257 }
1258
1259 /*
1260 * Flushes rules counters and/or log values on matching range.
1261 *
1262 * Returns number of items cleared.
1263 */
1264 static int
clear_range(struct ip_fw_chain * chain,ipfw_range_tlv * rt,int log_only)1265 clear_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int log_only)
1266 {
1267 struct ip_fw *rule;
1268 int num;
1269 int i;
1270
1271 num = 0;
1272 rt->flags |= IPFW_RCFLAG_DEFAULT;
1273
1274 IPFW_UH_WLOCK(chain); /* arbitrate writers */
1275 for (i = 0; i < chain->n_rules; i++) {
1276 rule = chain->map[i];
1277 if (ipfw_match_range(rule, rt) == 0)
1278 continue;
1279 clear_counters(rule, log_only);
1280 num++;
1281 }
1282 IPFW_UH_WUNLOCK(chain);
1283
1284 return (num);
1285 }
1286
1287 static int
check_range_tlv(ipfw_range_tlv * rt)1288 check_range_tlv(ipfw_range_tlv *rt)
1289 {
1290
1291 if (rt->head.length != sizeof(*rt))
1292 return (1);
1293 if (rt->start_rule > rt->end_rule)
1294 return (1);
1295 if (rt->set >= IPFW_MAX_SETS || rt->new_set >= IPFW_MAX_SETS)
1296 return (1);
1297
1298 if ((rt->flags & IPFW_RCFLAG_USER) != rt->flags)
1299 return (1);
1300
1301 return (0);
1302 }
1303
1304 /*
1305 * Delete rules matching specified parameters
1306 * Data layout (v0)(current):
1307 * Request: [ ipfw_obj_header ipfw_range_tlv ]
1308 * Reply: [ ipfw_obj_header ipfw_range_tlv ]
1309 *
1310 * Saves number of deleted rules in ipfw_range_tlv->new_set.
1311 *
1312 * Returns 0 on success.
1313 */
1314 static int
del_rules(struct ip_fw_chain * chain,ip_fw3_opheader * op3,struct sockopt_data * sd)1315 del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1316 struct sockopt_data *sd)
1317 {
1318 ipfw_range_header *rh;
1319 int error, ndel;
1320
1321 if (sd->valsize != sizeof(*rh))
1322 return (EINVAL);
1323
1324 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1325
1326 if (check_range_tlv(&rh->range) != 0)
1327 return (EINVAL);
1328
1329 ndel = 0;
1330 if ((error = delete_range(chain, &rh->range, &ndel)) != 0)
1331 return (error);
1332
1333 /* Save number of rules deleted */
1334 rh->range.new_set = ndel;
1335 return (0);
1336 }
1337
1338 /*
1339 * Move rules/sets matching specified parameters
1340 * Data layout (v0)(current):
1341 * Request: [ ipfw_obj_header ipfw_range_tlv ]
1342 *
1343 * Returns 0 on success.
1344 */
1345 static int
move_rules(struct ip_fw_chain * chain,ip_fw3_opheader * op3,struct sockopt_data * sd)1346 move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1347 struct sockopt_data *sd)
1348 {
1349 ipfw_range_header *rh;
1350
1351 if (sd->valsize != sizeof(*rh))
1352 return (EINVAL);
1353
1354 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1355
1356 if (check_range_tlv(&rh->range) != 0)
1357 return (EINVAL);
1358
1359 return (move_range(chain, &rh->range));
1360 }
1361
1362 /*
1363 * Clear rule accounting data matching specified parameters
1364 * Data layout (v0)(current):
1365 * Request: [ ipfw_obj_header ipfw_range_tlv ]
1366 * Reply: [ ipfw_obj_header ipfw_range_tlv ]
1367 *
1368 * Saves number of cleared rules in ipfw_range_tlv->new_set.
1369 *
1370 * Returns 0 on success.
1371 */
1372 static int
clear_rules(struct ip_fw_chain * chain,ip_fw3_opheader * op3,struct sockopt_data * sd)1373 clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1374 struct sockopt_data *sd)
1375 {
1376 ipfw_range_header *rh;
1377 int log_only, num;
1378 char *msg;
1379
1380 if (sd->valsize != sizeof(*rh))
1381 return (EINVAL);
1382
1383 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1384
1385 if (check_range_tlv(&rh->range) != 0)
1386 return (EINVAL);
1387
1388 log_only = (op3->opcode == IP_FW_XRESETLOG);
1389
1390 num = clear_range(chain, &rh->range, log_only);
1391
1392 if (rh->range.flags & IPFW_RCFLAG_ALL)
1393 msg = log_only ? "All logging counts reset" :
1394 "Accounting cleared";
1395 else
1396 msg = log_only ? "logging count reset" : "cleared";
1397
1398 if (V_fw_verbose) {
1399 int lev = LOG_SECURITY | LOG_NOTICE;
1400 log(lev, "ipfw: %s.\n", msg);
1401 }
1402
1403 /* Save number of rules cleared */
1404 rh->range.new_set = num;
1405 return (0);
1406 }
1407
1408 static void
enable_sets(struct ip_fw_chain * chain,ipfw_range_tlv * rt)1409 enable_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt)
1410 {
1411 uint32_t v_set;
1412
1413 IPFW_UH_WLOCK_ASSERT(chain);
1414
1415 /* Change enabled/disabled sets mask */
1416 v_set = (V_set_disable | rt->set) & ~rt->new_set;
1417 v_set &= ~(1 << RESVD_SET); /* set RESVD_SET always enabled */
1418 IPFW_WLOCK(chain);
1419 V_set_disable = v_set;
1420 IPFW_WUNLOCK(chain);
1421 }
1422
1423 static int
swap_sets(struct ip_fw_chain * chain,ipfw_range_tlv * rt,int mv)1424 swap_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int mv)
1425 {
1426 struct opcode_obj_rewrite *rw;
1427 struct ip_fw *rule;
1428 int i;
1429
1430 IPFW_UH_WLOCK_ASSERT(chain);
1431
1432 if (rt->set == rt->new_set) /* nothing to do */
1433 return (0);
1434
1435 if (mv != 0) {
1436 /*
1437 * Berfore moving the rules we need to check that
1438 * there aren't any conflicting named objects.
1439 */
1440 for (rw = ctl3_rewriters;
1441 rw < ctl3_rewriters + ctl3_rsize; rw++) {
1442 if (rw->manage_sets == NULL)
1443 continue;
1444 i = rw->manage_sets(chain, (uint8_t)rt->set,
1445 (uint8_t)rt->new_set, TEST_ALL);
1446 if (i != 0)
1447 return (EEXIST);
1448 }
1449 }
1450 /* Swap or move two sets */
1451 for (i = 0; i < chain->n_rules - 1; i++) {
1452 rule = chain->map[i];
1453 if (rule->set == (uint8_t)rt->set)
1454 rule->set = (uint8_t)rt->new_set;
1455 else if (rule->set == (uint8_t)rt->new_set && mv == 0)
1456 rule->set = (uint8_t)rt->set;
1457 }
1458 for (rw = ctl3_rewriters; rw < ctl3_rewriters + ctl3_rsize; rw++) {
1459 if (rw->manage_sets == NULL)
1460 continue;
1461 rw->manage_sets(chain, (uint8_t)rt->set,
1462 (uint8_t)rt->new_set, mv != 0 ? MOVE_ALL: SWAP_ALL);
1463 }
1464 return (0);
1465 }
1466
1467 /*
1468 * Swaps or moves set
1469 * Data layout (v0)(current):
1470 * Request: [ ipfw_obj_header ipfw_range_tlv ]
1471 *
1472 * Returns 0 on success.
1473 */
1474 static int
manage_sets(struct ip_fw_chain * chain,ip_fw3_opheader * op3,struct sockopt_data * sd)1475 manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1476 struct sockopt_data *sd)
1477 {
1478 ipfw_range_header *rh;
1479 int ret;
1480
1481 if (sd->valsize != sizeof(*rh))
1482 return (EINVAL);
1483
1484 rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1485
1486 if (rh->range.head.length != sizeof(ipfw_range_tlv))
1487 return (1);
1488 /* enable_sets() expects bitmasks. */
1489 if (op3->opcode != IP_FW_SET_ENABLE &&
1490 (rh->range.set >= IPFW_MAX_SETS ||
1491 rh->range.new_set >= IPFW_MAX_SETS))
1492 return (EINVAL);
1493
1494 ret = 0;
1495 IPFW_UH_WLOCK(chain);
1496 switch (op3->opcode) {
1497 case IP_FW_SET_SWAP:
1498 case IP_FW_SET_MOVE:
1499 ret = swap_sets(chain, &rh->range,
1500 op3->opcode == IP_FW_SET_MOVE);
1501 break;
1502 case IP_FW_SET_ENABLE:
1503 enable_sets(chain, &rh->range);
1504 break;
1505 }
1506 IPFW_UH_WUNLOCK(chain);
1507
1508 return (ret);
1509 }
1510
1511 /**
1512 * Remove all rules with given number, or do set manipulation.
1513 * Assumes chain != NULL && *chain != NULL.
1514 *
1515 * The argument is an uint32_t. The low 16 bit are the rule or set number;
1516 * the next 8 bits are the new set; the top 8 bits indicate the command:
1517 *
1518 * 0 delete rules numbered "rulenum"
1519 * 1 delete rules in set "rulenum"
1520 * 2 move rules "rulenum" to set "new_set"
1521 * 3 move rules from set "rulenum" to set "new_set"
1522 * 4 swap sets "rulenum" and "new_set"
1523 * 5 delete rules "rulenum" and set "new_set"
1524 */
1525 static int
del_entry(struct ip_fw_chain * chain,uint32_t arg)1526 del_entry(struct ip_fw_chain *chain, uint32_t arg)
1527 {
1528 uint32_t num; /* rule number or old_set */
1529 uint8_t cmd, new_set;
1530 int do_del, ndel;
1531 int error = 0;
1532 ipfw_range_tlv rt;
1533
1534 num = arg & 0xffff;
1535 cmd = (arg >> 24) & 0xff;
1536 new_set = (arg >> 16) & 0xff;
1537
1538 if (cmd > 5 || new_set > RESVD_SET)
1539 return EINVAL;
1540 if (cmd == 0 || cmd == 2 || cmd == 5) {
1541 if (num >= IPFW_DEFAULT_RULE)
1542 return EINVAL;
1543 } else {
1544 if (num > RESVD_SET) /* old_set */
1545 return EINVAL;
1546 }
1547
1548 /* Convert old requests into new representation */
1549 memset(&rt, 0, sizeof(rt));
1550 rt.start_rule = num;
1551 rt.end_rule = num;
1552 rt.set = num;
1553 rt.new_set = new_set;
1554 do_del = 0;
1555
1556 switch (cmd) {
1557 case 0: /* delete rules numbered "rulenum" */
1558 if (num == 0)
1559 rt.flags |= IPFW_RCFLAG_ALL;
1560 else
1561 rt.flags |= IPFW_RCFLAG_RANGE;
1562 do_del = 1;
1563 break;
1564 case 1: /* delete rules in set "rulenum" */
1565 rt.flags |= IPFW_RCFLAG_SET;
1566 do_del = 1;
1567 break;
1568 case 5: /* delete rules "rulenum" and set "new_set" */
1569 rt.flags |= IPFW_RCFLAG_RANGE | IPFW_RCFLAG_SET;
1570 rt.set = new_set;
1571 rt.new_set = 0;
1572 do_del = 1;
1573 break;
1574 case 2: /* move rules "rulenum" to set "new_set" */
1575 rt.flags |= IPFW_RCFLAG_RANGE;
1576 break;
1577 case 3: /* move rules from set "rulenum" to set "new_set" */
1578 IPFW_UH_WLOCK(chain);
1579 error = swap_sets(chain, &rt, 1);
1580 IPFW_UH_WUNLOCK(chain);
1581 return (error);
1582 case 4: /* swap sets "rulenum" and "new_set" */
1583 IPFW_UH_WLOCK(chain);
1584 error = swap_sets(chain, &rt, 0);
1585 IPFW_UH_WUNLOCK(chain);
1586 return (error);
1587 default:
1588 return (ENOTSUP);
1589 }
1590
1591 if (do_del != 0) {
1592 if ((error = delete_range(chain, &rt, &ndel)) != 0)
1593 return (error);
1594
1595 if (ndel == 0 && (cmd != 1 && num != 0))
1596 return (EINVAL);
1597
1598 return (0);
1599 }
1600
1601 return (move_range(chain, &rt));
1602 }
1603
1604 /**
1605 * Reset some or all counters on firewall rules.
1606 * The argument `arg' is an u_int32_t. The low 16 bit are the rule number,
1607 * the next 8 bits are the set number, the top 8 bits are the command:
1608 * 0 work with rules from all set's;
1609 * 1 work with rules only from specified set.
1610 * Specified rule number is zero if we want to clear all entries.
1611 * log_only is 1 if we only want to reset logs, zero otherwise.
1612 */
1613 static int
zero_entry(struct ip_fw_chain * chain,u_int32_t arg,int log_only)1614 zero_entry(struct ip_fw_chain *chain, u_int32_t arg, int log_only)
1615 {
1616 struct ip_fw *rule;
1617 char *msg;
1618 int i;
1619
1620 uint16_t rulenum = arg & 0xffff;
1621 uint8_t set = (arg >> 16) & 0xff;
1622 uint8_t cmd = (arg >> 24) & 0xff;
1623
1624 if (cmd > 1)
1625 return (EINVAL);
1626 if (cmd == 1 && set > RESVD_SET)
1627 return (EINVAL);
1628
1629 IPFW_UH_RLOCK(chain);
1630 if (rulenum == 0) {
1631 V_norule_counter = 0;
1632 for (i = 0; i < chain->n_rules; i++) {
1633 rule = chain->map[i];
1634 /* Skip rules not in our set. */
1635 if (cmd == 1 && rule->set != set)
1636 continue;
1637 clear_counters(rule, log_only);
1638 }
1639 msg = log_only ? "All logging counts reset" :
1640 "Accounting cleared";
1641 } else {
1642 int cleared = 0;
1643 for (i = 0; i < chain->n_rules; i++) {
1644 rule = chain->map[i];
1645 if (rule->rulenum == rulenum) {
1646 if (cmd == 0 || rule->set == set)
1647 clear_counters(rule, log_only);
1648 cleared = 1;
1649 }
1650 if (rule->rulenum > rulenum)
1651 break;
1652 }
1653 if (!cleared) { /* we did not find any matching rules */
1654 IPFW_UH_RUNLOCK(chain);
1655 return (EINVAL);
1656 }
1657 msg = log_only ? "logging count reset" : "cleared";
1658 }
1659 IPFW_UH_RUNLOCK(chain);
1660
1661 if (V_fw_verbose) {
1662 int lev = LOG_SECURITY | LOG_NOTICE;
1663
1664 if (rulenum)
1665 log(lev, "ipfw: Entry %d %s.\n", rulenum, msg);
1666 else
1667 log(lev, "ipfw: %s.\n", msg);
1668 }
1669 return (0);
1670 }
1671
1672 /*
1673 * Check rule head in FreeBSD11 format
1674 *
1675 */
1676 static int
check_ipfw_rule1(struct ip_fw_rule * rule,int size,struct rule_check_info * ci)1677 check_ipfw_rule1(struct ip_fw_rule *rule, int size,
1678 struct rule_check_info *ci)
1679 {
1680 int l;
1681
1682 if (size < sizeof(*rule)) {
1683 printf("ipfw: rule too short\n");
1684 return (EINVAL);
1685 }
1686
1687 /* Check for valid cmd_len */
1688 l = roundup2(RULESIZE(rule), sizeof(uint64_t));
1689 if (l != size) {
1690 printf("ipfw: size mismatch (have %d want %d)\n", size, l);
1691 return (EINVAL);
1692 }
1693 if (rule->act_ofs >= rule->cmd_len) {
1694 printf("ipfw: bogus action offset (%u > %u)\n",
1695 rule->act_ofs, rule->cmd_len - 1);
1696 return (EINVAL);
1697 }
1698
1699 if (rule->rulenum > IPFW_DEFAULT_RULE - 1)
1700 return (EINVAL);
1701
1702 return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci));
1703 }
1704
1705 /*
1706 * Check rule head in FreeBSD8 format
1707 *
1708 */
1709 static int
check_ipfw_rule0(struct ip_fw_rule0 * rule,int size,struct rule_check_info * ci)1710 check_ipfw_rule0(struct ip_fw_rule0 *rule, int size,
1711 struct rule_check_info *ci)
1712 {
1713 int l;
1714
1715 if (size < sizeof(*rule)) {
1716 printf("ipfw: rule too short\n");
1717 return (EINVAL);
1718 }
1719
1720 /* Check for valid cmd_len */
1721 l = sizeof(*rule) + rule->cmd_len * 4 - 4;
1722 if (l != size) {
1723 printf("ipfw: size mismatch (have %d want %d)\n", size, l);
1724 return (EINVAL);
1725 }
1726 if (rule->act_ofs >= rule->cmd_len) {
1727 printf("ipfw: bogus action offset (%u > %u)\n",
1728 rule->act_ofs, rule->cmd_len - 1);
1729 return (EINVAL);
1730 }
1731
1732 if (rule->rulenum > IPFW_DEFAULT_RULE - 1)
1733 return (EINVAL);
1734
1735 return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci));
1736 }
1737
1738 static int
check_ipfw_rule_body(ipfw_insn * cmd,int cmd_len,struct rule_check_info * ci)1739 check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len, struct rule_check_info *ci)
1740 {
1741 int cmdlen, l;
1742 int have_action;
1743
1744 have_action = 0;
1745
1746 /*
1747 * Now go for the individual checks. Very simple ones, basically only
1748 * instruction sizes.
1749 */
1750 for (l = cmd_len; l > 0 ; l -= cmdlen, cmd += cmdlen) {
1751 cmdlen = F_LEN(cmd);
1752 if (cmdlen > l) {
1753 printf("ipfw: opcode %d size truncated\n",
1754 cmd->opcode);
1755 return EINVAL;
1756 }
1757 switch (cmd->opcode) {
1758 case O_PROBE_STATE:
1759 case O_KEEP_STATE:
1760 if (cmdlen != F_INSN_SIZE(ipfw_insn))
1761 goto bad_size;
1762 ci->object_opcodes++;
1763 break;
1764 case O_PROTO:
1765 case O_IP_SRC_ME:
1766 case O_IP_DST_ME:
1767 case O_LAYER2:
1768 case O_IN:
1769 case O_FRAG:
1770 case O_DIVERTED:
1771 case O_IPOPT:
1772 case O_IPTOS:
1773 case O_IPPRECEDENCE:
1774 case O_IPVER:
1775 case O_SOCKARG:
1776 case O_TCPFLAGS:
1777 case O_TCPOPTS:
1778 case O_ESTAB:
1779 case O_VERREVPATH:
1780 case O_VERSRCREACH:
1781 case O_ANTISPOOF:
1782 case O_IPSEC:
1783 #ifdef INET6
1784 case O_IP6_SRC_ME:
1785 case O_IP6_DST_ME:
1786 case O_EXT_HDR:
1787 case O_IP6:
1788 #endif
1789 case O_IP4:
1790 case O_TAG:
1791 case O_SKIP_ACTION:
1792 if (cmdlen != F_INSN_SIZE(ipfw_insn))
1793 goto bad_size;
1794 break;
1795
1796 case O_EXTERNAL_ACTION:
1797 if (cmd->arg1 == 0 ||
1798 cmdlen != F_INSN_SIZE(ipfw_insn)) {
1799 printf("ipfw: invalid external "
1800 "action opcode\n");
1801 return (EINVAL);
1802 }
1803 ci->object_opcodes++;
1804 /*
1805 * Do we have O_EXTERNAL_INSTANCE or O_EXTERNAL_DATA
1806 * opcode?
1807 */
1808 if (l != cmdlen) {
1809 l -= cmdlen;
1810 cmd += cmdlen;
1811 cmdlen = F_LEN(cmd);
1812 if (cmd->opcode == O_EXTERNAL_DATA)
1813 goto check_action;
1814 if (cmd->opcode != O_EXTERNAL_INSTANCE) {
1815 printf("ipfw: invalid opcode "
1816 "next to external action %u\n",
1817 cmd->opcode);
1818 return (EINVAL);
1819 }
1820 if (cmd->arg1 == 0 ||
1821 cmdlen != F_INSN_SIZE(ipfw_insn)) {
1822 printf("ipfw: invalid external "
1823 "action instance opcode\n");
1824 return (EINVAL);
1825 }
1826 ci->object_opcodes++;
1827 }
1828 goto check_action;
1829
1830 case O_FIB:
1831 if (cmdlen != F_INSN_SIZE(ipfw_insn))
1832 goto bad_size;
1833 if (cmd->arg1 >= rt_numfibs) {
1834 printf("ipfw: invalid fib number %d\n",
1835 cmd->arg1);
1836 return EINVAL;
1837 }
1838 break;
1839
1840 case O_SETFIB:
1841 if (cmdlen != F_INSN_SIZE(ipfw_insn))
1842 goto bad_size;
1843 if ((cmd->arg1 != IP_FW_TARG) &&
1844 ((cmd->arg1 & 0x7FFF) >= rt_numfibs)) {
1845 printf("ipfw: invalid fib number %d\n",
1846 cmd->arg1 & 0x7FFF);
1847 return EINVAL;
1848 }
1849 goto check_action;
1850
1851 case O_UID:
1852 case O_GID:
1853 case O_JAIL:
1854 case O_IP_SRC:
1855 case O_IP_DST:
1856 case O_TCPSEQ:
1857 case O_TCPACK:
1858 case O_PROB:
1859 case O_ICMPTYPE:
1860 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1861 goto bad_size;
1862 break;
1863
1864 case O_LIMIT:
1865 if (cmdlen != F_INSN_SIZE(ipfw_insn_limit))
1866 goto bad_size;
1867 ci->object_opcodes++;
1868 break;
1869
1870 case O_LOG:
1871 if (cmdlen != F_INSN_SIZE(ipfw_insn_log))
1872 goto bad_size;
1873
1874 ((ipfw_insn_log *)cmd)->log_left =
1875 ((ipfw_insn_log *)cmd)->max_log;
1876
1877 break;
1878
1879 case O_IP_SRC_MASK:
1880 case O_IP_DST_MASK:
1881 /* only odd command lengths */
1882 if ((cmdlen & 1) == 0)
1883 goto bad_size;
1884 break;
1885
1886 case O_IP_SRC_SET:
1887 case O_IP_DST_SET:
1888 if (cmd->arg1 == 0 || cmd->arg1 > 256) {
1889 printf("ipfw: invalid set size %d\n",
1890 cmd->arg1);
1891 return EINVAL;
1892 }
1893 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
1894 (cmd->arg1+31)/32 )
1895 goto bad_size;
1896 break;
1897
1898 case O_IP_SRC_LOOKUP:
1899 if (cmdlen > F_INSN_SIZE(ipfw_insn_u32))
1900 goto bad_size;
1901 case O_IP_DST_LOOKUP:
1902 if (cmd->arg1 >= V_fw_tables_max) {
1903 printf("ipfw: invalid table number %d\n",
1904 cmd->arg1);
1905 return (EINVAL);
1906 }
1907 if (cmdlen != F_INSN_SIZE(ipfw_insn) &&
1908 cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1 &&
1909 cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1910 goto bad_size;
1911 ci->object_opcodes++;
1912 break;
1913 case O_IP_FLOW_LOOKUP:
1914 case O_MAC_DST_LOOKUP:
1915 case O_MAC_SRC_LOOKUP:
1916 if (cmd->arg1 >= V_fw_tables_max) {
1917 printf("ipfw: invalid table number %d\n",
1918 cmd->arg1);
1919 return (EINVAL);
1920 }
1921 if (cmdlen != F_INSN_SIZE(ipfw_insn) &&
1922 cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1923 goto bad_size;
1924 ci->object_opcodes++;
1925 break;
1926 case O_MACADDR2:
1927 if (cmdlen != F_INSN_SIZE(ipfw_insn_mac))
1928 goto bad_size;
1929 break;
1930
1931 case O_NOP:
1932 case O_IPID:
1933 case O_IPTTL:
1934 case O_IPLEN:
1935 case O_TCPDATALEN:
1936 case O_TCPMSS:
1937 case O_TCPWIN:
1938 case O_TAGGED:
1939 if (cmdlen < 1 || cmdlen > 31)
1940 goto bad_size;
1941 break;
1942
1943 case O_DSCP:
1944 case O_MARK:
1945 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1)
1946 goto bad_size;
1947 break;
1948
1949 case O_MAC_TYPE:
1950 case O_IP_SRCPORT:
1951 case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */
1952 if (cmdlen < 2 || cmdlen > 31)
1953 goto bad_size;
1954 break;
1955
1956 case O_RECV:
1957 case O_XMIT:
1958 case O_VIA:
1959 if (cmdlen != F_INSN_SIZE(ipfw_insn_if))
1960 goto bad_size;
1961 ci->object_opcodes++;
1962 break;
1963
1964 case O_ALTQ:
1965 if (cmdlen != F_INSN_SIZE(ipfw_insn_altq))
1966 goto bad_size;
1967 break;
1968
1969 case O_PIPE:
1970 case O_QUEUE:
1971 if (cmdlen != F_INSN_SIZE(ipfw_insn))
1972 goto bad_size;
1973 goto check_action;
1974
1975 case O_FORWARD_IP:
1976 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa))
1977 goto bad_size;
1978 goto check_action;
1979 #ifdef INET6
1980 case O_FORWARD_IP6:
1981 if (cmdlen != F_INSN_SIZE(ipfw_insn_sa6))
1982 goto bad_size;
1983 goto check_action;
1984 #endif /* INET6 */
1985
1986 case O_DIVERT:
1987 case O_TEE:
1988 if (ip_divert_ptr == NULL)
1989 return EINVAL;
1990 else
1991 goto check_size;
1992 case O_NETGRAPH:
1993 case O_NGTEE:
1994 if (ng_ipfw_input_p == NULL)
1995 return EINVAL;
1996 else
1997 goto check_size;
1998 case O_NAT:
1999 if (!IPFW_NAT_LOADED)
2000 return EINVAL;
2001 if (cmdlen != F_INSN_SIZE(ipfw_insn_nat))
2002 goto bad_size;
2003 goto check_action;
2004 case O_CHECK_STATE:
2005 ci->object_opcodes++;
2006 goto check_size;
2007 case O_SETMARK:
2008 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32))
2009 goto bad_size;
2010 goto check_action;
2011 case O_REJECT:
2012 /* "unreach needfrag" has variable len. */
2013 if ((cmdlen == F_INSN_SIZE(ipfw_insn) ||
2014 cmdlen == F_INSN_SIZE(ipfw_insn_u16)))
2015 goto check_action;
2016 /* FALLTHROUGH */
2017 case O_FORWARD_MAC: /* XXX not implemented yet */
2018 case O_COUNT:
2019 case O_ACCEPT:
2020 case O_DENY:
2021 case O_SETDSCP:
2022 #ifdef INET6
2023 case O_UNREACH6:
2024 #endif
2025 case O_SKIPTO:
2026 case O_REASS:
2027 case O_CALLRETURN:
2028 check_size:
2029 if (cmdlen != F_INSN_SIZE(ipfw_insn))
2030 goto bad_size;
2031 check_action:
2032 if (have_action) {
2033 printf("ipfw: opcode %d, multiple actions"
2034 " not allowed\n",
2035 cmd->opcode);
2036 return (EINVAL);
2037 }
2038 have_action = 1;
2039 if (l != cmdlen) {
2040 printf("ipfw: opcode %d, action must be"
2041 " last opcode\n",
2042 cmd->opcode);
2043 return (EINVAL);
2044 }
2045 break;
2046 #ifdef INET6
2047 case O_IP6_SRC:
2048 case O_IP6_DST:
2049 if (cmdlen != F_INSN_SIZE(struct in6_addr) +
2050 F_INSN_SIZE(ipfw_insn))
2051 goto bad_size;
2052 break;
2053
2054 case O_FLOW6ID:
2055 if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
2056 ((ipfw_insn_u32 *)cmd)->o.arg1)
2057 goto bad_size;
2058 break;
2059
2060 case O_IP6_SRC_MASK:
2061 case O_IP6_DST_MASK:
2062 if ( !(cmdlen & 1) || cmdlen > 127)
2063 goto bad_size;
2064 break;
2065 case O_ICMP6TYPE:
2066 if( cmdlen != F_INSN_SIZE( ipfw_insn_icmp6 ) )
2067 goto bad_size;
2068 break;
2069 #endif
2070
2071 default:
2072 switch (cmd->opcode) {
2073 #ifndef INET6
2074 case O_IP6_SRC_ME:
2075 case O_IP6_DST_ME:
2076 case O_EXT_HDR:
2077 case O_IP6:
2078 case O_UNREACH6:
2079 case O_IP6_SRC:
2080 case O_IP6_DST:
2081 case O_FLOW6ID:
2082 case O_IP6_SRC_MASK:
2083 case O_IP6_DST_MASK:
2084 case O_ICMP6TYPE:
2085 printf("ipfw: no IPv6 support in kernel\n");
2086 return (EPROTONOSUPPORT);
2087 #endif
2088 default:
2089 printf("ipfw: opcode %d, unknown opcode\n",
2090 cmd->opcode);
2091 return (EINVAL);
2092 }
2093 }
2094 }
2095 if (have_action == 0) {
2096 printf("ipfw: missing action\n");
2097 return (EINVAL);
2098 }
2099 return 0;
2100
2101 bad_size:
2102 printf("ipfw: opcode %d size %d wrong\n",
2103 cmd->opcode, cmdlen);
2104 return (EINVAL);
2105 }
2106
2107 /*
2108 * Translation of requests for compatibility with FreeBSD 7.2/8.
2109 * a static variable tells us if we have an old client from userland,
2110 * and if necessary we translate requests and responses between the
2111 * two formats.
2112 */
2113 static int is7 = 0;
2114
2115 struct ip_fw7 {
2116 struct ip_fw7 *next; /* linked list of rules */
2117 struct ip_fw7 *next_rule; /* ptr to next [skipto] rule */
2118 /* 'next_rule' is used to pass up 'set_disable' status */
2119
2120 uint16_t act_ofs; /* offset of action in 32-bit units */
2121 uint16_t cmd_len; /* # of 32-bit words in cmd */
2122 uint16_t rulenum; /* rule number */
2123 uint8_t set; /* rule set (0..31) */
2124 // #define RESVD_SET 31 /* set for default and persistent rules */
2125 uint8_t _pad; /* padding */
2126 // uint32_t id; /* rule id, only in v.8 */
2127 /* These fields are present in all rules. */
2128 uint64_t pcnt; /* Packet counter */
2129 uint64_t bcnt; /* Byte counter */
2130 uint32_t timestamp; /* tv_sec of last match */
2131
2132 ipfw_insn cmd[1]; /* storage for commands */
2133 };
2134
2135 static int convert_rule_to_7(struct ip_fw_rule0 *rule);
2136 static int convert_rule_to_8(struct ip_fw_rule0 *rule);
2137
2138 #ifndef RULESIZE7
2139 #define RULESIZE7(rule) (sizeof(struct ip_fw7) + \
2140 ((struct ip_fw7 *)(rule))->cmd_len * 4 - 4)
2141 #endif
2142
2143 /*
2144 * Copy the static and dynamic rules to the supplied buffer
2145 * and return the amount of space actually used.
2146 * Must be run under IPFW_UH_RLOCK
2147 */
2148 static size_t
ipfw_getrules(struct ip_fw_chain * chain,void * buf,size_t space)2149 ipfw_getrules(struct ip_fw_chain *chain, void *buf, size_t space)
2150 {
2151 char *bp = buf;
2152 char *ep = bp + space;
2153 struct ip_fw *rule;
2154 struct ip_fw_rule0 *dst;
2155 struct timeval boottime;
2156 int error, i, l, warnflag;
2157 time_t boot_seconds;
2158
2159 warnflag = 0;
2160
2161 getboottime(&boottime);
2162 boot_seconds = boottime.tv_sec;
2163 for (i = 0; i < chain->n_rules; i++) {
2164 rule = chain->map[i];
2165
2166 if (is7) {
2167 /* Convert rule to FreeBSd 7.2 format */
2168 l = RULESIZE7(rule);
2169 if (bp + l + sizeof(uint32_t) <= ep) {
2170 bcopy(rule, bp, l + sizeof(uint32_t));
2171 error = set_legacy_obj_kidx(chain,
2172 (struct ip_fw_rule0 *)bp);
2173 if (error != 0)
2174 return (0);
2175 error = convert_rule_to_7((struct ip_fw_rule0 *) bp);
2176 if (error)
2177 return 0; /*XXX correct? */
2178 /*
2179 * XXX HACK. Store the disable mask in the "next"
2180 * pointer in a wild attempt to keep the ABI the same.
2181 * Why do we do this on EVERY rule?
2182 */
2183 bcopy(&V_set_disable,
2184 &(((struct ip_fw7 *)bp)->next_rule),
2185 sizeof(V_set_disable));
2186 if (((struct ip_fw7 *)bp)->timestamp)
2187 ((struct ip_fw7 *)bp)->timestamp += boot_seconds;
2188 bp += l;
2189 }
2190 continue; /* go to next rule */
2191 }
2192
2193 l = RULEUSIZE0(rule);
2194 if (bp + l > ep) { /* should not happen */
2195 printf("overflow dumping static rules\n");
2196 break;
2197 }
2198 dst = (struct ip_fw_rule0 *)bp;
2199 export_rule0(rule, dst, l);
2200 error = set_legacy_obj_kidx(chain, dst);
2201
2202 /*
2203 * XXX HACK. Store the disable mask in the "next"
2204 * pointer in a wild attempt to keep the ABI the same.
2205 * Why do we do this on EVERY rule?
2206 *
2207 * XXX: "ipfw set show" (ab)uses IP_FW_GET to read disabled mask
2208 * so we need to fail _after_ saving at least one mask.
2209 */
2210 bcopy(&V_set_disable, &dst->next_rule, sizeof(V_set_disable));
2211 if (dst->timestamp)
2212 dst->timestamp += boot_seconds;
2213 bp += l;
2214
2215 if (error != 0) {
2216 if (error == 2) {
2217 /* Non-fatal table rewrite error. */
2218 warnflag = 1;
2219 continue;
2220 }
2221 printf("Stop on rule %d. Fail to convert table\n",
2222 rule->rulenum);
2223 break;
2224 }
2225 }
2226 if (warnflag != 0)
2227 printf("ipfw: process %s is using legacy interfaces,"
2228 " consider rebuilding\n", "");
2229 ipfw_get_dynamic(chain, &bp, ep); /* protected by the dynamic lock */
2230 return (bp - (char *)buf);
2231 }
2232
2233 struct dump_args {
2234 uint32_t b; /* start rule */
2235 uint32_t e; /* end rule */
2236 uint32_t rcount; /* number of rules */
2237 uint32_t rsize; /* rules size */
2238 uint32_t tcount; /* number of tables */
2239 int rcounters; /* counters */
2240 uint32_t *bmask; /* index bitmask of used named objects */
2241 };
2242
2243 void
ipfw_export_obj_ntlv(struct named_object * no,ipfw_obj_ntlv * ntlv)2244 ipfw_export_obj_ntlv(struct named_object *no, ipfw_obj_ntlv *ntlv)
2245 {
2246
2247 ntlv->head.type = no->etlv;
2248 ntlv->head.length = sizeof(*ntlv);
2249 ntlv->idx = no->kidx;
2250 strlcpy(ntlv->name, no->name, sizeof(ntlv->name));
2251 }
2252
2253 /*
2254 * Export named object info in instance @ni, identified by @kidx
2255 * to ipfw_obj_ntlv. TLV is allocated from @sd space.
2256 *
2257 * Returns 0 on success.
2258 */
2259 static int
export_objhash_ntlv(struct namedobj_instance * ni,uint16_t kidx,struct sockopt_data * sd)2260 export_objhash_ntlv(struct namedobj_instance *ni, uint16_t kidx,
2261 struct sockopt_data *sd)
2262 {
2263 struct named_object *no;
2264 ipfw_obj_ntlv *ntlv;
2265
2266 no = ipfw_objhash_lookup_kidx(ni, kidx);
2267 KASSERT(no != NULL, ("invalid object kernel index passed"));
2268
2269 ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv));
2270 if (ntlv == NULL)
2271 return (ENOMEM);
2272
2273 ipfw_export_obj_ntlv(no, ntlv);
2274 return (0);
2275 }
2276
2277 static int
export_named_objects(struct namedobj_instance * ni,struct dump_args * da,struct sockopt_data * sd)2278 export_named_objects(struct namedobj_instance *ni, struct dump_args *da,
2279 struct sockopt_data *sd)
2280 {
2281 int error, i;
2282
2283 for (i = 0; i < IPFW_TABLES_MAX && da->tcount > 0; i++) {
2284 if ((da->bmask[i / 32] & (1 << (i % 32))) == 0)
2285 continue;
2286 if ((error = export_objhash_ntlv(ni, i, sd)) != 0)
2287 return (error);
2288 da->tcount--;
2289 }
2290 return (0);
2291 }
2292
2293 static int
dump_named_objects(struct ip_fw_chain * ch,struct dump_args * da,struct sockopt_data * sd)2294 dump_named_objects(struct ip_fw_chain *ch, struct dump_args *da,
2295 struct sockopt_data *sd)
2296 {
2297 ipfw_obj_ctlv *ctlv;
2298 int error;
2299
2300 MPASS(da->tcount > 0);
2301 /* Header first */
2302 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv));
2303 if (ctlv == NULL)
2304 return (ENOMEM);
2305 ctlv->head.type = IPFW_TLV_TBLNAME_LIST;
2306 ctlv->head.length = da->tcount * sizeof(ipfw_obj_ntlv) +
2307 sizeof(*ctlv);
2308 ctlv->count = da->tcount;
2309 ctlv->objsize = sizeof(ipfw_obj_ntlv);
2310
2311 /* Dump table names first (if any) */
2312 error = export_named_objects(ipfw_get_table_objhash(ch), da, sd);
2313 if (error != 0)
2314 return (error);
2315 /* Then dump another named objects */
2316 da->bmask += IPFW_TABLES_MAX / 32;
2317 return (export_named_objects(CHAIN_TO_SRV(ch), da, sd));
2318 }
2319
2320 /*
2321 * Dumps static rules with table TLVs in buffer @sd.
2322 *
2323 * Returns 0 on success.
2324 */
2325 static int
dump_static_rules(struct ip_fw_chain * chain,struct dump_args * da,struct sockopt_data * sd)2326 dump_static_rules(struct ip_fw_chain *chain, struct dump_args *da,
2327 struct sockopt_data *sd)
2328 {
2329 ipfw_obj_ctlv *ctlv;
2330 struct ip_fw *krule;
2331 caddr_t dst;
2332 int i, l;
2333
2334 /* Dump rules */
2335 ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv));
2336 if (ctlv == NULL)
2337 return (ENOMEM);
2338 ctlv->head.type = IPFW_TLV_RULE_LIST;
2339 ctlv->head.length = da->rsize + sizeof(*ctlv);
2340 ctlv->count = da->rcount;
2341
2342 for (i = da->b; i < da->e; i++) {
2343 krule = chain->map[i];
2344
2345 l = RULEUSIZE1(krule) + sizeof(ipfw_obj_tlv);
2346 if (da->rcounters != 0)
2347 l += sizeof(struct ip_fw_bcounter);
2348 dst = (caddr_t)ipfw_get_sopt_space(sd, l);
2349 if (dst == NULL)
2350 return (ENOMEM);
2351
2352 export_rule1(krule, dst, l, da->rcounters);
2353 }
2354
2355 return (0);
2356 }
2357
2358 int
ipfw_mark_object_kidx(uint32_t * bmask,uint16_t etlv,uint16_t kidx)2359 ipfw_mark_object_kidx(uint32_t *bmask, uint16_t etlv, uint16_t kidx)
2360 {
2361 uint32_t bidx;
2362
2363 /*
2364 * Maintain separate bitmasks for table and non-table objects.
2365 */
2366 bidx = (etlv == IPFW_TLV_TBL_NAME) ? 0: IPFW_TABLES_MAX / 32;
2367 bidx += kidx / 32;
2368 if ((bmask[bidx] & (1 << (kidx % 32))) != 0)
2369 return (0);
2370
2371 bmask[bidx] |= 1 << (kidx % 32);
2372 return (1);
2373 }
2374
2375 /*
2376 * Marks every object index used in @rule with bit in @bmask.
2377 * Used to generate bitmask of referenced tables/objects for given ruleset
2378 * or its part.
2379 */
2380 static void
mark_rule_objects(struct ip_fw_chain * ch,struct ip_fw * rule,struct dump_args * da)2381 mark_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule,
2382 struct dump_args *da)
2383 {
2384 struct opcode_obj_rewrite *rw;
2385 ipfw_insn *cmd;
2386 int cmdlen, l;
2387 uint16_t kidx;
2388 uint8_t subtype;
2389
2390 l = rule->cmd_len;
2391 cmd = rule->cmd;
2392 cmdlen = 0;
2393 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) {
2394 cmdlen = F_LEN(cmd);
2395
2396 rw = find_op_rw(cmd, &kidx, &subtype);
2397 if (rw == NULL)
2398 continue;
2399
2400 if (ipfw_mark_object_kidx(da->bmask, rw->etlv, kidx))
2401 da->tcount++;
2402 }
2403 }
2404
2405 /*
2406 * Dumps requested objects data
2407 * Data layout (version 0)(current):
2408 * Request: [ ipfw_cfg_lheader ] + IPFW_CFG_GET_* flags
2409 * size = ipfw_cfg_lheader.size
2410 * Reply: [ ipfw_cfg_lheader
2411 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional)
2412 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST)
2413 * ipfw_obj_tlv(IPFW_TLV_RULE_ENT) [ ip_fw_bcounter (optional) ip_fw_rule ]
2414 * ] (optional)
2415 * [ ipfw_obj_ctlv(IPFW_TLV_STATE_LIST) ipfw_obj_dyntlv x N ] (optional)
2416 * ]
2417 * * NOTE IPFW_TLV_STATE_LIST has the single valid field: objsize.
2418 * The rest (size, count) are set to zero and needs to be ignored.
2419 *
2420 * Returns 0 on success.
2421 */
2422 static int
dump_config(struct ip_fw_chain * chain,ip_fw3_opheader * op3,struct sockopt_data * sd)2423 dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2424 struct sockopt_data *sd)
2425 {
2426 struct dump_args da;
2427 ipfw_cfg_lheader *hdr;
2428 struct ip_fw *rule;
2429 size_t sz, rnum;
2430 uint32_t hdr_flags, *bmask;
2431 int error, i;
2432
2433 hdr = (ipfw_cfg_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr));
2434 if (hdr == NULL)
2435 return (EINVAL);
2436
2437 error = 0;
2438 bmask = NULL;
2439 memset(&da, 0, sizeof(da));
2440 /*
2441 * Allocate needed state.
2442 * Note we allocate 2xspace mask, for table & srv
2443 */
2444 if (hdr->flags & (IPFW_CFG_GET_STATIC | IPFW_CFG_GET_STATES))
2445 da.bmask = bmask = malloc(
2446 sizeof(uint32_t) * IPFW_TABLES_MAX * 2 / 32, M_TEMP,
2447 M_WAITOK | M_ZERO);
2448 IPFW_UH_RLOCK(chain);
2449
2450 /*
2451 * STAGE 1: Determine size/count for objects in range.
2452 * Prepare used tables bitmask.
2453 */
2454 sz = sizeof(ipfw_cfg_lheader);
2455 da.e = chain->n_rules;
2456
2457 if (hdr->end_rule != 0) {
2458 /* Handle custom range */
2459 if ((rnum = hdr->start_rule) > IPFW_DEFAULT_RULE)
2460 rnum = IPFW_DEFAULT_RULE;
2461 da.b = ipfw_find_rule(chain, rnum, 0);
2462 rnum = (hdr->end_rule < IPFW_DEFAULT_RULE) ?
2463 hdr->end_rule + 1: IPFW_DEFAULT_RULE;
2464 da.e = ipfw_find_rule(chain, rnum, UINT32_MAX) + 1;
2465 }
2466
2467 if (hdr->flags & IPFW_CFG_GET_STATIC) {
2468 for (i = da.b; i < da.e; i++) {
2469 rule = chain->map[i];
2470 da.rsize += RULEUSIZE1(rule) + sizeof(ipfw_obj_tlv);
2471 da.rcount++;
2472 /* Update bitmask of used objects for given range */
2473 mark_rule_objects(chain, rule, &da);
2474 }
2475 /* Add counters if requested */
2476 if (hdr->flags & IPFW_CFG_GET_COUNTERS) {
2477 da.rsize += sizeof(struct ip_fw_bcounter) * da.rcount;
2478 da.rcounters = 1;
2479 }
2480 sz += da.rsize + sizeof(ipfw_obj_ctlv);
2481 }
2482
2483 if (hdr->flags & IPFW_CFG_GET_STATES) {
2484 sz += sizeof(ipfw_obj_ctlv) +
2485 ipfw_dyn_get_count(bmask, &i) * sizeof(ipfw_obj_dyntlv);
2486 da.tcount += i;
2487 }
2488
2489 if (da.tcount > 0)
2490 sz += da.tcount * sizeof(ipfw_obj_ntlv) +
2491 sizeof(ipfw_obj_ctlv);
2492
2493 /*
2494 * Fill header anyway.
2495 * Note we have to save header fields to stable storage
2496 * buffer inside @sd can be flushed after dumping rules
2497 */
2498 hdr->size = sz;
2499 hdr->set_mask = ~V_set_disable;
2500 hdr_flags = hdr->flags;
2501 hdr = NULL;
2502
2503 if (sd->valsize < sz) {
2504 error = ENOMEM;
2505 goto cleanup;
2506 }
2507
2508 /* STAGE2: Store actual data */
2509 if (da.tcount > 0) {
2510 error = dump_named_objects(chain, &da, sd);
2511 if (error != 0)
2512 goto cleanup;
2513 }
2514
2515 if (hdr_flags & IPFW_CFG_GET_STATIC) {
2516 error = dump_static_rules(chain, &da, sd);
2517 if (error != 0)
2518 goto cleanup;
2519 }
2520
2521 if (hdr_flags & IPFW_CFG_GET_STATES)
2522 error = ipfw_dump_states(chain, sd);
2523
2524 cleanup:
2525 IPFW_UH_RUNLOCK(chain);
2526
2527 if (bmask != NULL)
2528 free(bmask, M_TEMP);
2529
2530 return (error);
2531 }
2532
2533 int
ipfw_check_object_name_generic(const char * name)2534 ipfw_check_object_name_generic(const char *name)
2535 {
2536 int nsize;
2537
2538 nsize = sizeof(((ipfw_obj_ntlv *)0)->name);
2539 if (strnlen(name, nsize) == nsize)
2540 return (EINVAL);
2541 if (name[0] == '\0')
2542 return (EINVAL);
2543 return (0);
2544 }
2545
2546 /*
2547 * Creates non-existent objects referenced by rule.
2548 *
2549 * Return 0 on success.
2550 */
2551 int
create_objects_compat(struct ip_fw_chain * ch,ipfw_insn * cmd,struct obj_idx * oib,struct obj_idx * pidx,struct tid_info * ti)2552 create_objects_compat(struct ip_fw_chain *ch, ipfw_insn *cmd,
2553 struct obj_idx *oib, struct obj_idx *pidx, struct tid_info *ti)
2554 {
2555 struct opcode_obj_rewrite *rw;
2556 struct obj_idx *p;
2557 uint16_t kidx;
2558 int error;
2559
2560 /*
2561 * Compatibility stuff: do actual creation for non-existing,
2562 * but referenced objects.
2563 */
2564 for (p = oib; p < pidx; p++) {
2565 if (p->kidx != 0)
2566 continue;
2567
2568 ti->uidx = p->uidx;
2569 ti->type = p->type;
2570 ti->atype = 0;
2571
2572 rw = find_op_rw(cmd + p->off, NULL, NULL);
2573 KASSERT(rw != NULL, ("Unable to find handler for op %d",
2574 (cmd + p->off)->opcode));
2575
2576 if (rw->create_object == NULL)
2577 error = EOPNOTSUPP;
2578 else
2579 error = rw->create_object(ch, ti, &kidx);
2580 if (error == 0) {
2581 p->kidx = kidx;
2582 continue;
2583 }
2584
2585 /*
2586 * Error happened. We have to rollback everything.
2587 * Drop all already acquired references.
2588 */
2589 IPFW_UH_WLOCK(ch);
2590 unref_oib_objects(ch, cmd, oib, pidx);
2591 IPFW_UH_WUNLOCK(ch);
2592
2593 return (error);
2594 }
2595
2596 return (0);
2597 }
2598
2599 /*
2600 * Compatibility function for old ipfw(8) binaries.
2601 * Rewrites table/nat kernel indices with userland ones.
2602 * Convert tables matching '/^\d+$/' to their atoi() value.
2603 * Use number 65535 for other tables.
2604 *
2605 * Returns 0 on success.
2606 */
2607 static int
set_legacy_obj_kidx(struct ip_fw_chain * ch,struct ip_fw_rule0 * rule)2608 set_legacy_obj_kidx(struct ip_fw_chain *ch, struct ip_fw_rule0 *rule)
2609 {
2610 struct opcode_obj_rewrite *rw;
2611 struct named_object *no;
2612 ipfw_insn *cmd;
2613 char *end;
2614 long val;
2615 int cmdlen, error, l;
2616 uint16_t kidx, uidx;
2617 uint8_t subtype;
2618
2619 error = 0;
2620
2621 l = rule->cmd_len;
2622 cmd = rule->cmd;
2623 cmdlen = 0;
2624 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) {
2625 cmdlen = F_LEN(cmd);
2626
2627 /* Check if is index in given opcode */
2628 rw = find_op_rw(cmd, &kidx, &subtype);
2629 if (rw == NULL)
2630 continue;
2631
2632 /* Try to find referenced kernel object */
2633 no = rw->find_bykidx(ch, kidx);
2634 if (no == NULL)
2635 continue;
2636
2637 val = strtol(no->name, &end, 10);
2638 if (*end == '\0' && val < 65535) {
2639 uidx = val;
2640 } else {
2641 /*
2642 * We are called via legacy opcode.
2643 * Save error and show table as fake number
2644 * not to make ipfw(8) hang.
2645 */
2646 uidx = 65535;
2647 error = 2;
2648 }
2649
2650 rw->update(cmd, uidx);
2651 }
2652
2653 return (error);
2654 }
2655
2656 /*
2657 * Unreferences all already-referenced objects in given @cmd rule,
2658 * using information in @oib.
2659 *
2660 * Used to rollback partially converted rule on error.
2661 */
2662 static void
unref_oib_objects(struct ip_fw_chain * ch,ipfw_insn * cmd,struct obj_idx * oib,struct obj_idx * end)2663 unref_oib_objects(struct ip_fw_chain *ch, ipfw_insn *cmd, struct obj_idx *oib,
2664 struct obj_idx *end)
2665 {
2666 struct opcode_obj_rewrite *rw;
2667 struct named_object *no;
2668 struct obj_idx *p;
2669
2670 IPFW_UH_WLOCK_ASSERT(ch);
2671
2672 for (p = oib; p < end; p++) {
2673 if (p->kidx == 0)
2674 continue;
2675
2676 rw = find_op_rw(cmd + p->off, NULL, NULL);
2677 KASSERT(rw != NULL, ("Unable to find handler for op %d",
2678 (cmd + p->off)->opcode));
2679
2680 /* Find & unref by existing idx */
2681 no = rw->find_bykidx(ch, p->kidx);
2682 KASSERT(no != NULL, ("Ref'd object %d disappeared", p->kidx));
2683 no->refcnt--;
2684 }
2685 }
2686
2687 /*
2688 * Remove references from every object used in @rule.
2689 * Used at rule removal code.
2690 */
2691 static void
unref_rule_objects(struct ip_fw_chain * ch,struct ip_fw * rule)2692 unref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule)
2693 {
2694 struct opcode_obj_rewrite *rw;
2695 struct named_object *no;
2696 ipfw_insn *cmd;
2697 int cmdlen, l;
2698 uint16_t kidx;
2699 uint8_t subtype;
2700
2701 IPFW_UH_WLOCK_ASSERT(ch);
2702
2703 l = rule->cmd_len;
2704 cmd = rule->cmd;
2705 cmdlen = 0;
2706 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) {
2707 cmdlen = F_LEN(cmd);
2708
2709 rw = find_op_rw(cmd, &kidx, &subtype);
2710 if (rw == NULL)
2711 continue;
2712 no = rw->find_bykidx(ch, kidx);
2713
2714 KASSERT(no != NULL, ("object id %d not found", kidx));
2715 KASSERT(no->subtype == subtype,
2716 ("wrong type %d (%d) for object id %d",
2717 no->subtype, subtype, kidx));
2718 KASSERT(no->refcnt > 0, ("refcount for object %d is %d",
2719 kidx, no->refcnt));
2720
2721 if (no->refcnt == 1 && rw->destroy_object != NULL)
2722 rw->destroy_object(ch, no);
2723 else
2724 no->refcnt--;
2725 }
2726 }
2727
2728 /*
2729 * Find and reference object (if any) stored in instruction @cmd.
2730 *
2731 * Saves object info in @pidx, sets
2732 * - @unresolved to 1 if object should exists but not found
2733 *
2734 * Returns non-zero value in case of error.
2735 */
2736 static int
ref_opcode_object(struct ip_fw_chain * ch,ipfw_insn * cmd,struct tid_info * ti,struct obj_idx * pidx,int * unresolved)2737 ref_opcode_object(struct ip_fw_chain *ch, ipfw_insn *cmd, struct tid_info *ti,
2738 struct obj_idx *pidx, int *unresolved)
2739 {
2740 struct named_object *no;
2741 struct opcode_obj_rewrite *rw;
2742 int error;
2743
2744 /* Check if this opcode is candidate for rewrite */
2745 rw = find_op_rw(cmd, &ti->uidx, &ti->type);
2746 if (rw == NULL)
2747 return (0);
2748
2749 /* Need to rewrite. Save necessary fields */
2750 pidx->uidx = ti->uidx;
2751 pidx->type = ti->type;
2752
2753 /* Try to find referenced kernel object */
2754 error = rw->find_byname(ch, ti, &no);
2755 if (error != 0)
2756 return (error);
2757 if (no == NULL) {
2758 /*
2759 * Report about unresolved object for automaic
2760 * creation.
2761 */
2762 *unresolved = 1;
2763 return (0);
2764 }
2765
2766 /*
2767 * Object is already exist.
2768 * Its subtype should match with expected value.
2769 */
2770 if (ti->type != no->subtype)
2771 return (EINVAL);
2772
2773 /* Bump refcount and update kidx. */
2774 no->refcnt++;
2775 rw->update(cmd, no->kidx);
2776 return (0);
2777 }
2778
2779 /*
2780 * Finds and bumps refcount for objects referenced by given @rule.
2781 * Auto-creates non-existing tables.
2782 * Fills in @oib array with userland/kernel indexes.
2783 *
2784 * Returns 0 on success.
2785 */
2786 static int
ref_rule_objects(struct ip_fw_chain * ch,struct ip_fw * rule,struct rule_check_info * ci,struct obj_idx * oib,struct tid_info * ti)2787 ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule,
2788 struct rule_check_info *ci, struct obj_idx *oib, struct tid_info *ti)
2789 {
2790 struct obj_idx *pidx;
2791 ipfw_insn *cmd;
2792 int cmdlen, error, l, unresolved;
2793
2794 pidx = oib;
2795 l = rule->cmd_len;
2796 cmd = rule->cmd;
2797 cmdlen = 0;
2798 error = 0;
2799
2800 IPFW_UH_WLOCK(ch);
2801
2802 /* Increase refcount on each existing referenced table. */
2803 for ( ; l > 0 ; l -= cmdlen, cmd += cmdlen) {
2804 cmdlen = F_LEN(cmd);
2805 unresolved = 0;
2806
2807 error = ref_opcode_object(ch, cmd, ti, pidx, &unresolved);
2808 if (error != 0)
2809 break;
2810 /*
2811 * Compatibility stuff for old clients:
2812 * prepare to automaitcally create non-existing objects.
2813 */
2814 if (unresolved != 0) {
2815 pidx->off = rule->cmd_len - l;
2816 pidx++;
2817 }
2818 }
2819
2820 if (error != 0) {
2821 /* Unref everything we have already done */
2822 unref_oib_objects(ch, rule->cmd, oib, pidx);
2823 IPFW_UH_WUNLOCK(ch);
2824 return (error);
2825 }
2826 IPFW_UH_WUNLOCK(ch);
2827
2828 /* Perform auto-creation for non-existing objects */
2829 if (pidx != oib)
2830 error = create_objects_compat(ch, rule->cmd, oib, pidx, ti);
2831
2832 /* Calculate real number of dynamic objects */
2833 ci->object_opcodes = (uint16_t)(pidx - oib);
2834
2835 return (error);
2836 }
2837
2838 /*
2839 * Checks is opcode is referencing table of appropriate type.
2840 * Adds reference count for found table if true.
2841 * Rewrites user-supplied opcode values with kernel ones.
2842 *
2843 * Returns 0 on success and appropriate error code otherwise.
2844 */
2845 static int
rewrite_rule_uidx(struct ip_fw_chain * chain,struct rule_check_info * ci)2846 rewrite_rule_uidx(struct ip_fw_chain *chain, struct rule_check_info *ci)
2847 {
2848 int error;
2849 ipfw_insn *cmd;
2850 struct obj_idx *p, *pidx_first, *pidx_last;
2851 struct tid_info ti;
2852
2853 /*
2854 * Prepare an array for storing opcode indices.
2855 * Use stack allocation by default.
2856 */
2857 if (ci->object_opcodes <= (sizeof(ci->obuf)/sizeof(ci->obuf[0]))) {
2858 /* Stack */
2859 pidx_first = ci->obuf;
2860 } else
2861 pidx_first = malloc(
2862 ci->object_opcodes * sizeof(struct obj_idx),
2863 M_IPFW, M_WAITOK | M_ZERO);
2864
2865 error = 0;
2866 memset(&ti, 0, sizeof(ti));
2867
2868 /* Use set rule is assigned to. */
2869 ti.set = ci->krule->set;
2870 if (ci->ctlv != NULL) {
2871 ti.tlvs = (void *)(ci->ctlv + 1);
2872 ti.tlen = ci->ctlv->head.length - sizeof(ipfw_obj_ctlv);
2873 }
2874
2875 /* Reference all used tables and other objects */
2876 error = ref_rule_objects(chain, ci->krule, ci, pidx_first, &ti);
2877 if (error != 0)
2878 goto free;
2879 /*
2880 * Note that ref_rule_objects() might have updated ci->object_opcodes
2881 * to reflect actual number of object opcodes.
2882 */
2883
2884 /* Perform rewrite of remaining opcodes */
2885 p = pidx_first;
2886 pidx_last = pidx_first + ci->object_opcodes;
2887 for (p = pidx_first; p < pidx_last; p++) {
2888 cmd = ci->krule->cmd + p->off;
2889 update_opcode_kidx(cmd, p->kidx);
2890 }
2891
2892 free:
2893 if (pidx_first != ci->obuf)
2894 free(pidx_first, M_IPFW);
2895
2896 return (error);
2897 }
2898
2899 /*
2900 * Adds one or more rules to ipfw @chain.
2901 * Data layout (version 0)(current):
2902 * Request:
2903 * [
2904 * ip_fw3_opheader
2905 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional *1)
2906 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ] (*2) (*3)
2907 * ]
2908 * Reply:
2909 * [
2910 * ip_fw3_opheader
2911 * [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional)
2912 * [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ]
2913 * ]
2914 *
2915 * Rules in reply are modified to store their actual ruleset number.
2916 *
2917 * (*1) TLVs inside IPFW_TLV_TBL_LIST needs to be sorted ascending
2918 * according to their idx field and there has to be no duplicates.
2919 * (*2) Numbered rules inside IPFW_TLV_RULE_LIST needs to be sorted ascending.
2920 * (*3) Each ip_fw structure needs to be aligned to u64 boundary.
2921 *
2922 * Returns 0 on success.
2923 */
2924 static int
add_rules(struct ip_fw_chain * chain,ip_fw3_opheader * op3,struct sockopt_data * sd)2925 add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2926 struct sockopt_data *sd)
2927 {
2928 ipfw_obj_ctlv *ctlv, *rtlv, *tstate;
2929 ipfw_obj_ntlv *ntlv;
2930 int clen, error, idx;
2931 uint32_t count, read;
2932 struct ip_fw_rule *r;
2933 struct rule_check_info rci, *ci, *cbuf;
2934 int i, rsize;
2935
2936 op3 = (ip_fw3_opheader *)ipfw_get_sopt_space(sd, sd->valsize);
2937 ctlv = (ipfw_obj_ctlv *)(op3 + 1);
2938
2939 read = sizeof(ip_fw3_opheader);
2940 rtlv = NULL;
2941 tstate = NULL;
2942 cbuf = NULL;
2943 memset(&rci, 0, sizeof(struct rule_check_info));
2944
2945 if (read + sizeof(*ctlv) > sd->valsize)
2946 return (EINVAL);
2947
2948 if (ctlv->head.type == IPFW_TLV_TBLNAME_LIST) {
2949 clen = ctlv->head.length;
2950 /* Check size and alignment */
2951 if (clen > sd->valsize || clen < sizeof(*ctlv))
2952 return (EINVAL);
2953 if ((clen % sizeof(uint64_t)) != 0)
2954 return (EINVAL);
2955
2956 /*
2957 * Some table names or other named objects.
2958 * Check for validness.
2959 */
2960 count = (ctlv->head.length - sizeof(*ctlv)) / sizeof(*ntlv);
2961 if (ctlv->count != count || ctlv->objsize != sizeof(*ntlv))
2962 return (EINVAL);
2963
2964 /*
2965 * Check each TLV.
2966 * Ensure TLVs are sorted ascending and
2967 * there are no duplicates.
2968 */
2969 idx = -1;
2970 ntlv = (ipfw_obj_ntlv *)(ctlv + 1);
2971 while (count > 0) {
2972 if (ntlv->head.length != sizeof(ipfw_obj_ntlv))
2973 return (EINVAL);
2974
2975 error = ipfw_check_object_name_generic(ntlv->name);
2976 if (error != 0)
2977 return (error);
2978
2979 if (ntlv->idx <= idx)
2980 return (EINVAL);
2981
2982 idx = ntlv->idx;
2983 count--;
2984 ntlv++;
2985 }
2986
2987 tstate = ctlv;
2988 read += ctlv->head.length;
2989 ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length);
2990 }
2991
2992 if (read + sizeof(*ctlv) > sd->valsize)
2993 return (EINVAL);
2994
2995 if (ctlv->head.type == IPFW_TLV_RULE_LIST) {
2996 clen = ctlv->head.length;
2997 if (clen + read > sd->valsize || clen < sizeof(*ctlv))
2998 return (EINVAL);
2999 if ((clen % sizeof(uint64_t)) != 0)
3000 return (EINVAL);
3001
3002 /*
3003 * TODO: Permit adding multiple rules at once
3004 */
3005 if (ctlv->count != 1)
3006 return (ENOTSUP);
3007
3008 clen -= sizeof(*ctlv);
3009
3010 if (ctlv->count > clen / sizeof(struct ip_fw_rule))
3011 return (EINVAL);
3012
3013 /* Allocate state for each rule or use stack */
3014 if (ctlv->count == 1) {
3015 memset(&rci, 0, sizeof(struct rule_check_info));
3016 cbuf = &rci;
3017 } else
3018 cbuf = malloc(ctlv->count * sizeof(*ci), M_TEMP,
3019 M_WAITOK | M_ZERO);
3020 ci = cbuf;
3021
3022 /*
3023 * Check each rule for validness.
3024 * Ensure numbered rules are sorted ascending
3025 * and properly aligned
3026 */
3027 idx = 0;
3028 r = (struct ip_fw_rule *)(ctlv + 1);
3029 count = 0;
3030 error = 0;
3031 while (clen > 0) {
3032 rsize = roundup2(RULESIZE(r), sizeof(uint64_t));
3033 if (rsize > clen || ctlv->count <= count) {
3034 error = EINVAL;
3035 break;
3036 }
3037
3038 ci->ctlv = tstate;
3039 error = check_ipfw_rule1(r, rsize, ci);
3040 if (error != 0)
3041 break;
3042
3043 /* Check sorting */
3044 if (r->rulenum != 0 && r->rulenum < idx) {
3045 printf("rulenum %d idx %d\n", r->rulenum, idx);
3046 error = EINVAL;
3047 break;
3048 }
3049 idx = r->rulenum;
3050
3051 ci->urule = (caddr_t)r;
3052
3053 rsize = roundup2(rsize, sizeof(uint64_t));
3054 clen -= rsize;
3055 r = (struct ip_fw_rule *)((caddr_t)r + rsize);
3056 count++;
3057 ci++;
3058 }
3059
3060 if (ctlv->count != count || error != 0) {
3061 if (cbuf != &rci)
3062 free(cbuf, M_TEMP);
3063 return (EINVAL);
3064 }
3065
3066 rtlv = ctlv;
3067 read += ctlv->head.length;
3068 ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length);
3069 }
3070
3071 if (read != sd->valsize || rtlv == NULL || rtlv->count == 0) {
3072 if (cbuf != NULL && cbuf != &rci)
3073 free(cbuf, M_TEMP);
3074 return (EINVAL);
3075 }
3076
3077 /*
3078 * Passed rules seems to be valid.
3079 * Allocate storage and try to add them to chain.
3080 */
3081 for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++) {
3082 clen = RULEKSIZE1((struct ip_fw_rule *)ci->urule);
3083 ci->krule = ipfw_alloc_rule(chain, clen);
3084 import_rule1(ci);
3085 }
3086
3087 if ((error = commit_rules(chain, cbuf, rtlv->count)) != 0) {
3088 /* Free allocate krules */
3089 for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++)
3090 ipfw_free_rule(ci->krule);
3091 }
3092
3093 if (cbuf != NULL && cbuf != &rci)
3094 free(cbuf, M_TEMP);
3095
3096 return (error);
3097 }
3098
3099 /*
3100 * Lists all sopts currently registered.
3101 * Data layout (v0)(current):
3102 * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size
3103 * Reply: [ ipfw_obj_lheader ipfw_sopt_info x N ]
3104 *
3105 * Returns 0 on success
3106 */
3107 static int
dump_soptcodes(struct ip_fw_chain * chain,ip_fw3_opheader * op3,struct sockopt_data * sd)3108 dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
3109 struct sockopt_data *sd)
3110 {
3111 struct _ipfw_obj_lheader *olh;
3112 ipfw_sopt_info *i;
3113 struct ipfw_sopt_handler *sh;
3114 uint32_t count, n, size;
3115
3116 olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh));
3117 if (olh == NULL)
3118 return (EINVAL);
3119 if (sd->valsize < olh->size)
3120 return (EINVAL);
3121
3122 CTL3_LOCK();
3123 count = ctl3_hsize;
3124 size = count * sizeof(ipfw_sopt_info) + sizeof(ipfw_obj_lheader);
3125
3126 /* Fill in header regadless of buffer size */
3127 olh->count = count;
3128 olh->objsize = sizeof(ipfw_sopt_info);
3129
3130 if (size > olh->size) {
3131 olh->size = size;
3132 CTL3_UNLOCK();
3133 return (ENOMEM);
3134 }
3135 olh->size = size;
3136
3137 for (n = 1; n <= count; n++) {
3138 i = (ipfw_sopt_info *)ipfw_get_sopt_space(sd, sizeof(*i));
3139 KASSERT(i != NULL, ("previously checked buffer is not enough"));
3140 sh = &ctl3_handlers[n];
3141 i->opcode = sh->opcode;
3142 i->version = sh->version;
3143 i->refcnt = sh->refcnt;
3144 }
3145 CTL3_UNLOCK();
3146
3147 return (0);
3148 }
3149
3150 /*
3151 * Compares two opcodes.
3152 * Used both in qsort() and bsearch().
3153 *
3154 * Returns 0 if match is found.
3155 */
3156 static int
compare_opcodes(const void * _a,const void * _b)3157 compare_opcodes(const void *_a, const void *_b)
3158 {
3159 const struct opcode_obj_rewrite *a, *b;
3160
3161 a = (const struct opcode_obj_rewrite *)_a;
3162 b = (const struct opcode_obj_rewrite *)_b;
3163
3164 if (a->opcode < b->opcode)
3165 return (-1);
3166 else if (a->opcode > b->opcode)
3167 return (1);
3168
3169 return (0);
3170 }
3171
3172 /*
3173 * XXX: Rewrite bsearch()
3174 */
3175 static int
find_op_rw_range(uint16_t op,struct opcode_obj_rewrite ** plo,struct opcode_obj_rewrite ** phi)3176 find_op_rw_range(uint16_t op, struct opcode_obj_rewrite **plo,
3177 struct opcode_obj_rewrite **phi)
3178 {
3179 struct opcode_obj_rewrite *ctl3_max, *lo, *hi, h, *rw;
3180
3181 memset(&h, 0, sizeof(h));
3182 h.opcode = op;
3183
3184 rw = (struct opcode_obj_rewrite *)bsearch(&h, ctl3_rewriters,
3185 ctl3_rsize, sizeof(h), compare_opcodes);
3186 if (rw == NULL)
3187 return (1);
3188
3189 /* Find the first element matching the same opcode */
3190 lo = rw;
3191 for ( ; lo > ctl3_rewriters && (lo - 1)->opcode == op; lo--)
3192 ;
3193
3194 /* Find the last element matching the same opcode */
3195 hi = rw;
3196 ctl3_max = ctl3_rewriters + ctl3_rsize;
3197 for ( ; (hi + 1) < ctl3_max && (hi + 1)->opcode == op; hi++)
3198 ;
3199
3200 *plo = lo;
3201 *phi = hi;
3202
3203 return (0);
3204 }
3205
3206 /*
3207 * Finds opcode object rewriter based on @code.
3208 *
3209 * Returns pointer to handler or NULL.
3210 */
3211 static struct opcode_obj_rewrite *
find_op_rw(ipfw_insn * cmd,uint16_t * puidx,uint8_t * ptype)3212 find_op_rw(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype)
3213 {
3214 struct opcode_obj_rewrite *rw, *lo, *hi;
3215 uint16_t uidx;
3216 uint8_t subtype;
3217
3218 if (find_op_rw_range(cmd->opcode, &lo, &hi) != 0)
3219 return (NULL);
3220
3221 for (rw = lo; rw <= hi; rw++) {
3222 if (rw->classifier(cmd, &uidx, &subtype) == 0) {
3223 if (puidx != NULL)
3224 *puidx = uidx;
3225 if (ptype != NULL)
3226 *ptype = subtype;
3227 return (rw);
3228 }
3229 }
3230
3231 return (NULL);
3232 }
3233 int
classify_opcode_kidx(ipfw_insn * cmd,uint16_t * puidx)3234 classify_opcode_kidx(ipfw_insn *cmd, uint16_t *puidx)
3235 {
3236
3237 if (find_op_rw(cmd, puidx, NULL) == NULL)
3238 return (1);
3239 return (0);
3240 }
3241
3242 void
update_opcode_kidx(ipfw_insn * cmd,uint16_t idx)3243 update_opcode_kidx(ipfw_insn *cmd, uint16_t idx)
3244 {
3245 struct opcode_obj_rewrite *rw;
3246
3247 rw = find_op_rw(cmd, NULL, NULL);
3248 KASSERT(rw != NULL, ("No handler to update opcode %d", cmd->opcode));
3249 rw->update(cmd, idx);
3250 }
3251
3252 void
ipfw_init_obj_rewriter(void)3253 ipfw_init_obj_rewriter(void)
3254 {
3255
3256 ctl3_rewriters = NULL;
3257 ctl3_rsize = 0;
3258 }
3259
3260 void
ipfw_destroy_obj_rewriter(void)3261 ipfw_destroy_obj_rewriter(void)
3262 {
3263
3264 if (ctl3_rewriters != NULL)
3265 free(ctl3_rewriters, M_IPFW);
3266 ctl3_rewriters = NULL;
3267 ctl3_rsize = 0;
3268 }
3269
3270 /*
3271 * Adds one or more opcode object rewrite handlers to the global array.
3272 * Function may sleep.
3273 */
3274 void
ipfw_add_obj_rewriter(struct opcode_obj_rewrite * rw,size_t count)3275 ipfw_add_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count)
3276 {
3277 size_t sz;
3278 struct opcode_obj_rewrite *tmp;
3279
3280 CTL3_LOCK();
3281
3282 for (;;) {
3283 sz = ctl3_rsize + count;
3284 CTL3_UNLOCK();
3285 tmp = malloc(sizeof(*rw) * sz, M_IPFW, M_WAITOK | M_ZERO);
3286 CTL3_LOCK();
3287 if (ctl3_rsize + count <= sz)
3288 break;
3289
3290 /* Retry */
3291 free(tmp, M_IPFW);
3292 }
3293
3294 /* Merge old & new arrays */
3295 sz = ctl3_rsize + count;
3296 memcpy(tmp, ctl3_rewriters, ctl3_rsize * sizeof(*rw));
3297 memcpy(&tmp[ctl3_rsize], rw, count * sizeof(*rw));
3298 qsort(tmp, sz, sizeof(*rw), compare_opcodes);
3299 /* Switch new and free old */
3300 if (ctl3_rewriters != NULL)
3301 free(ctl3_rewriters, M_IPFW);
3302 ctl3_rewriters = tmp;
3303 ctl3_rsize = sz;
3304
3305 CTL3_UNLOCK();
3306 }
3307
3308 /*
3309 * Removes one or more object rewrite handlers from the global array.
3310 */
3311 int
ipfw_del_obj_rewriter(struct opcode_obj_rewrite * rw,size_t count)3312 ipfw_del_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count)
3313 {
3314 size_t sz;
3315 struct opcode_obj_rewrite *ctl3_max, *ktmp, *lo, *hi;
3316 int i;
3317
3318 CTL3_LOCK();
3319
3320 for (i = 0; i < count; i++) {
3321 if (find_op_rw_range(rw[i].opcode, &lo, &hi) != 0)
3322 continue;
3323
3324 for (ktmp = lo; ktmp <= hi; ktmp++) {
3325 if (ktmp->classifier != rw[i].classifier)
3326 continue;
3327
3328 ctl3_max = ctl3_rewriters + ctl3_rsize;
3329 sz = (ctl3_max - (ktmp + 1)) * sizeof(*ktmp);
3330 memmove(ktmp, ktmp + 1, sz);
3331 ctl3_rsize--;
3332 break;
3333 }
3334 }
3335
3336 if (ctl3_rsize == 0) {
3337 if (ctl3_rewriters != NULL)
3338 free(ctl3_rewriters, M_IPFW);
3339 ctl3_rewriters = NULL;
3340 }
3341
3342 CTL3_UNLOCK();
3343
3344 return (0);
3345 }
3346
3347 static int
export_objhash_ntlv_internal(struct namedobj_instance * ni,struct named_object * no,void * arg)3348 export_objhash_ntlv_internal(struct namedobj_instance *ni,
3349 struct named_object *no, void *arg)
3350 {
3351 struct sockopt_data *sd;
3352 ipfw_obj_ntlv *ntlv;
3353
3354 sd = (struct sockopt_data *)arg;
3355 ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv));
3356 if (ntlv == NULL)
3357 return (ENOMEM);
3358 ipfw_export_obj_ntlv(no, ntlv);
3359 return (0);
3360 }
3361
3362 /*
3363 * Lists all service objects.
3364 * Data layout (v0)(current):
3365 * Request: [ ipfw_obj_lheader ] size = ipfw_obj_lheader.size
3366 * Reply: [ ipfw_obj_lheader [ ipfw_obj_ntlv x N ] (optional) ]
3367 * Returns 0 on success
3368 */
3369 static int
dump_srvobjects(struct ip_fw_chain * chain,ip_fw3_opheader * op3,struct sockopt_data * sd)3370 dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
3371 struct sockopt_data *sd)
3372 {
3373 ipfw_obj_lheader *hdr;
3374 int count;
3375
3376 hdr = (ipfw_obj_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr));
3377 if (hdr == NULL)
3378 return (EINVAL);
3379
3380 IPFW_UH_RLOCK(chain);
3381 count = ipfw_objhash_count(CHAIN_TO_SRV(chain));
3382 hdr->size = sizeof(ipfw_obj_lheader) + count * sizeof(ipfw_obj_ntlv);
3383 if (sd->valsize < hdr->size) {
3384 IPFW_UH_RUNLOCK(chain);
3385 return (ENOMEM);
3386 }
3387 hdr->count = count;
3388 hdr->objsize = sizeof(ipfw_obj_ntlv);
3389 if (count > 0)
3390 ipfw_objhash_foreach(CHAIN_TO_SRV(chain),
3391 export_objhash_ntlv_internal, sd);
3392 IPFW_UH_RUNLOCK(chain);
3393 return (0);
3394 }
3395
3396 /*
3397 * Compares two sopt handlers (code, version and handler ptr).
3398 * Used both as qsort() and bsearch().
3399 * Does not compare handler for latter case.
3400 *
3401 * Returns 0 if match is found.
3402 */
3403 static int
compare_sh(const void * _a,const void * _b)3404 compare_sh(const void *_a, const void *_b)
3405 {
3406 const struct ipfw_sopt_handler *a, *b;
3407
3408 a = (const struct ipfw_sopt_handler *)_a;
3409 b = (const struct ipfw_sopt_handler *)_b;
3410
3411 if (a->opcode < b->opcode)
3412 return (-1);
3413 else if (a->opcode > b->opcode)
3414 return (1);
3415
3416 if (a->version < b->version)
3417 return (-1);
3418 else if (a->version > b->version)
3419 return (1);
3420
3421 /* bsearch helper */
3422 if (a->handler == NULL)
3423 return (0);
3424
3425 if ((uintptr_t)a->handler < (uintptr_t)b->handler)
3426 return (-1);
3427 else if ((uintptr_t)a->handler > (uintptr_t)b->handler)
3428 return (1);
3429
3430 return (0);
3431 }
3432
3433 /*
3434 * Finds sopt handler based on @code and @version.
3435 *
3436 * Returns pointer to handler or NULL.
3437 */
3438 static struct ipfw_sopt_handler *
find_sh(uint16_t code,uint8_t version,sopt_handler_f * handler)3439 find_sh(uint16_t code, uint8_t version, sopt_handler_f *handler)
3440 {
3441 struct ipfw_sopt_handler *sh, h;
3442
3443 memset(&h, 0, sizeof(h));
3444 h.opcode = code;
3445 h.version = version;
3446 h.handler = handler;
3447
3448 sh = (struct ipfw_sopt_handler *)bsearch(&h, ctl3_handlers,
3449 ctl3_hsize, sizeof(h), compare_sh);
3450
3451 return (sh);
3452 }
3453
3454 static int
find_ref_sh(uint16_t opcode,uint8_t version,struct ipfw_sopt_handler * psh)3455 find_ref_sh(uint16_t opcode, uint8_t version, struct ipfw_sopt_handler *psh)
3456 {
3457 struct ipfw_sopt_handler *sh;
3458
3459 CTL3_LOCK();
3460 if ((sh = find_sh(opcode, version, NULL)) == NULL) {
3461 CTL3_UNLOCK();
3462 printf("ipfw: ipfw_ctl3 invalid option %d""v""%d\n",
3463 opcode, version);
3464 return (EINVAL);
3465 }
3466 sh->refcnt++;
3467 ctl3_refct++;
3468 /* Copy handler data to requested buffer */
3469 *psh = *sh;
3470 CTL3_UNLOCK();
3471
3472 return (0);
3473 }
3474
3475 static void
find_unref_sh(struct ipfw_sopt_handler * psh)3476 find_unref_sh(struct ipfw_sopt_handler *psh)
3477 {
3478 struct ipfw_sopt_handler *sh;
3479
3480 CTL3_LOCK();
3481 sh = find_sh(psh->opcode, psh->version, NULL);
3482 KASSERT(sh != NULL, ("ctl3 handler disappeared"));
3483 sh->refcnt--;
3484 ctl3_refct--;
3485 CTL3_UNLOCK();
3486 }
3487
3488 void
ipfw_init_sopt_handler(void)3489 ipfw_init_sopt_handler(void)
3490 {
3491
3492 CTL3_LOCK_INIT();
3493 IPFW_ADD_SOPT_HANDLER(1, scodes);
3494 }
3495
3496 void
ipfw_destroy_sopt_handler(void)3497 ipfw_destroy_sopt_handler(void)
3498 {
3499
3500 IPFW_DEL_SOPT_HANDLER(1, scodes);
3501 CTL3_LOCK_DESTROY();
3502 }
3503
3504 /*
3505 * Adds one or more sockopt handlers to the global array.
3506 * Function may sleep.
3507 */
3508 void
ipfw_add_sopt_handler(struct ipfw_sopt_handler * sh,size_t count)3509 ipfw_add_sopt_handler(struct ipfw_sopt_handler *sh, size_t count)
3510 {
3511 size_t sz;
3512 struct ipfw_sopt_handler *tmp;
3513
3514 CTL3_LOCK();
3515
3516 for (;;) {
3517 sz = ctl3_hsize + count;
3518 CTL3_UNLOCK();
3519 tmp = malloc(sizeof(*sh) * sz, M_IPFW, M_WAITOK | M_ZERO);
3520 CTL3_LOCK();
3521 if (ctl3_hsize + count <= sz)
3522 break;
3523
3524 /* Retry */
3525 free(tmp, M_IPFW);
3526 }
3527
3528 /* Merge old & new arrays */
3529 sz = ctl3_hsize + count;
3530 memcpy(tmp, ctl3_handlers, ctl3_hsize * sizeof(*sh));
3531 memcpy(&tmp[ctl3_hsize], sh, count * sizeof(*sh));
3532 qsort(tmp, sz, sizeof(*sh), compare_sh);
3533 /* Switch new and free old */
3534 if (ctl3_handlers != NULL)
3535 free(ctl3_handlers, M_IPFW);
3536 ctl3_handlers = tmp;
3537 ctl3_hsize = sz;
3538 ctl3_gencnt++;
3539
3540 CTL3_UNLOCK();
3541 }
3542
3543 /*
3544 * Removes one or more sockopt handlers from the global array.
3545 */
3546 int
ipfw_del_sopt_handler(struct ipfw_sopt_handler * sh,size_t count)3547 ipfw_del_sopt_handler(struct ipfw_sopt_handler *sh, size_t count)
3548 {
3549 size_t sz;
3550 struct ipfw_sopt_handler *tmp, *h;
3551 int i;
3552
3553 CTL3_LOCK();
3554
3555 for (i = 0; i < count; i++) {
3556 tmp = &sh[i];
3557 h = find_sh(tmp->opcode, tmp->version, tmp->handler);
3558 if (h == NULL)
3559 continue;
3560
3561 sz = (ctl3_handlers + ctl3_hsize - (h + 1)) * sizeof(*h);
3562 memmove(h, h + 1, sz);
3563 ctl3_hsize--;
3564 }
3565
3566 if (ctl3_hsize == 0) {
3567 if (ctl3_handlers != NULL)
3568 free(ctl3_handlers, M_IPFW);
3569 ctl3_handlers = NULL;
3570 }
3571
3572 ctl3_gencnt++;
3573
3574 CTL3_UNLOCK();
3575
3576 return (0);
3577 }
3578
3579 /*
3580 * Writes data accumulated in @sd to sockopt buffer.
3581 * Zeroes internal @sd buffer.
3582 */
3583 static int
ipfw_flush_sopt_data(struct sockopt_data * sd)3584 ipfw_flush_sopt_data(struct sockopt_data *sd)
3585 {
3586 struct sockopt *sopt;
3587 int error;
3588 size_t sz;
3589
3590 sz = sd->koff;
3591 if (sz == 0)
3592 return (0);
3593
3594 sopt = sd->sopt;
3595
3596 if (sopt->sopt_dir == SOPT_GET) {
3597 error = copyout(sd->kbuf, sopt->sopt_val, sz);
3598 if (error != 0)
3599 return (error);
3600 }
3601
3602 memset(sd->kbuf, 0, sd->ksize);
3603 sd->ktotal += sz;
3604 sd->koff = 0;
3605 if (sd->ktotal + sd->ksize < sd->valsize)
3606 sd->kavail = sd->ksize;
3607 else
3608 sd->kavail = sd->valsize - sd->ktotal;
3609
3610 /* Update sopt buffer data */
3611 sopt->sopt_valsize = sd->ktotal;
3612 sopt->sopt_val = sd->sopt_val + sd->ktotal;
3613
3614 return (0);
3615 }
3616
3617 /*
3618 * Ensures that @sd buffer has contiguous @neeeded number of
3619 * bytes.
3620 *
3621 * Returns pointer to requested space or NULL.
3622 */
3623 caddr_t
ipfw_get_sopt_space(struct sockopt_data * sd,size_t needed)3624 ipfw_get_sopt_space(struct sockopt_data *sd, size_t needed)
3625 {
3626 int error;
3627 caddr_t addr;
3628
3629 if (sd->kavail < needed) {
3630 /*
3631 * Flush data and try another time.
3632 */
3633 error = ipfw_flush_sopt_data(sd);
3634
3635 if (sd->kavail < needed || error != 0)
3636 return (NULL);
3637 }
3638
3639 addr = sd->kbuf + sd->koff;
3640 sd->koff += needed;
3641 sd->kavail -= needed;
3642 return (addr);
3643 }
3644
3645 /*
3646 * Requests @needed contiguous bytes from @sd buffer.
3647 * Function is used to notify subsystem that we are
3648 * interesed in first @needed bytes (request header)
3649 * and the rest buffer can be safely zeroed.
3650 *
3651 * Returns pointer to requested space or NULL.
3652 */
3653 caddr_t
ipfw_get_sopt_header(struct sockopt_data * sd,size_t needed)3654 ipfw_get_sopt_header(struct sockopt_data *sd, size_t needed)
3655 {
3656 caddr_t addr;
3657
3658 if ((addr = ipfw_get_sopt_space(sd, needed)) == NULL)
3659 return (NULL);
3660
3661 if (sd->kavail > 0)
3662 memset(sd->kbuf + sd->koff, 0, sd->kavail);
3663
3664 return (addr);
3665 }
3666
3667 /*
3668 * New sockopt handler.
3669 */
3670 int
ipfw_ctl3(struct sockopt * sopt)3671 ipfw_ctl3(struct sockopt *sopt)
3672 {
3673 int error, locked;
3674 size_t size, valsize;
3675 struct ip_fw_chain *chain;
3676 char xbuf[256];
3677 struct sockopt_data sdata;
3678 struct ipfw_sopt_handler h;
3679 ip_fw3_opheader *op3 = NULL;
3680
3681 error = priv_check(sopt->sopt_td, PRIV_NETINET_IPFW);
3682 if (error != 0)
3683 return (error);
3684
3685 if (sopt->sopt_name != IP_FW3)
3686 return (ipfw_ctl(sopt));
3687
3688 chain = &V_layer3_chain;
3689 error = 0;
3690
3691 /* Save original valsize before it is altered via sooptcopyin() */
3692 valsize = sopt->sopt_valsize;
3693 memset(&sdata, 0, sizeof(sdata));
3694 /* Read op3 header first to determine actual operation */
3695 op3 = (ip_fw3_opheader *)xbuf;
3696 error = sooptcopyin(sopt, op3, sizeof(*op3), sizeof(*op3));
3697 if (error != 0)
3698 return (error);
3699 sopt->sopt_valsize = valsize;
3700
3701 /*
3702 * Find and reference command.
3703 */
3704 error = find_ref_sh(op3->opcode, op3->version, &h);
3705 if (error != 0)
3706 return (error);
3707
3708 /*
3709 * Disallow modifications in really-really secure mode, but still allow
3710 * the logging counters to be reset.
3711 */
3712 if ((h.dir & HDIR_SET) != 0 && h.opcode != IP_FW_XRESETLOG) {
3713 error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
3714 if (error != 0) {
3715 find_unref_sh(&h);
3716 return (error);
3717 }
3718 }
3719
3720 /*
3721 * Fill in sockopt_data structure that may be useful for
3722 * IP_FW3 get requests.
3723 */
3724 locked = 0;
3725 if (valsize <= sizeof(xbuf)) {
3726 /* use on-stack buffer */
3727 sdata.kbuf = xbuf;
3728 sdata.ksize = sizeof(xbuf);
3729 sdata.kavail = valsize;
3730 } else {
3731 /*
3732 * Determine opcode type/buffer size:
3733 * allocate sliding-window buf for data export or
3734 * contiguous buffer for special ops.
3735 */
3736 if ((h.dir & HDIR_SET) != 0) {
3737 /* Set request. Allocate contigous buffer. */
3738 if (valsize > CTL3_LARGEBUF) {
3739 find_unref_sh(&h);
3740 return (EFBIG);
3741 }
3742
3743 size = valsize;
3744 } else {
3745 /* Get request. Allocate sliding window buffer */
3746 size = (valsize<CTL3_SMALLBUF) ? valsize:CTL3_SMALLBUF;
3747
3748 if (size < valsize) {
3749 /* We have to wire user buffer */
3750 error = vslock(sopt->sopt_val, valsize);
3751 if (error != 0)
3752 return (error);
3753 locked = 1;
3754 }
3755 }
3756
3757 sdata.kbuf = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
3758 sdata.ksize = size;
3759 sdata.kavail = size;
3760 }
3761
3762 sdata.sopt = sopt;
3763 sdata.sopt_val = sopt->sopt_val;
3764 sdata.valsize = valsize;
3765
3766 /*
3767 * Copy either all request (if valsize < bsize_max)
3768 * or first bsize_max bytes to guarantee most consumers
3769 * that all necessary data has been copied).
3770 * Anyway, copy not less than sizeof(ip_fw3_opheader).
3771 */
3772 if ((error = sooptcopyin(sopt, sdata.kbuf, sdata.ksize,
3773 sizeof(ip_fw3_opheader))) != 0)
3774 return (error);
3775 op3 = (ip_fw3_opheader *)sdata.kbuf;
3776
3777 /* Finally, run handler */
3778 error = h.handler(chain, op3, &sdata);
3779 find_unref_sh(&h);
3780
3781 /* Flush state and free buffers */
3782 if (error == 0)
3783 error = ipfw_flush_sopt_data(&sdata);
3784 else
3785 ipfw_flush_sopt_data(&sdata);
3786
3787 if (locked != 0)
3788 vsunlock(sdata.sopt_val, valsize);
3789
3790 /* Restore original pointer and set number of bytes written */
3791 sopt->sopt_val = sdata.sopt_val;
3792 sopt->sopt_valsize = sdata.ktotal;
3793 if (sdata.kbuf != xbuf)
3794 free(sdata.kbuf, M_TEMP);
3795
3796 return (error);
3797 }
3798
3799 /**
3800 * {set|get}sockopt parser.
3801 */
3802 int
ipfw_ctl(struct sockopt * sopt)3803 ipfw_ctl(struct sockopt *sopt)
3804 {
3805 #define RULE_MAXSIZE (512*sizeof(u_int32_t))
3806 int error;
3807 size_t size;
3808 struct ip_fw *buf;
3809 struct ip_fw_rule0 *rule;
3810 struct ip_fw_chain *chain;
3811 u_int32_t rulenum[2];
3812 uint32_t opt;
3813 struct rule_check_info ci;
3814 IPFW_RLOCK_TRACKER;
3815
3816 chain = &V_layer3_chain;
3817 error = 0;
3818
3819 opt = sopt->sopt_name;
3820
3821 /*
3822 * Disallow modifications in really-really secure mode, but still allow
3823 * the logging counters to be reset.
3824 */
3825 if (opt == IP_FW_ADD ||
3826 (sopt->sopt_dir == SOPT_SET && opt != IP_FW_RESETLOG)) {
3827 error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
3828 if (error != 0)
3829 return (error);
3830 }
3831
3832 switch (opt) {
3833 case IP_FW_GET:
3834 /*
3835 * pass up a copy of the current rules. Static rules
3836 * come first (the last of which has number IPFW_DEFAULT_RULE),
3837 * followed by a possibly empty list of dynamic rule.
3838 * The last dynamic rule has NULL in the "next" field.
3839 *
3840 * Note that the calculated size is used to bound the
3841 * amount of data returned to the user. The rule set may
3842 * change between calculating the size and returning the
3843 * data in which case we'll just return what fits.
3844 */
3845 for (;;) {
3846 int len = 0, want;
3847
3848 size = chain->static_len;
3849 size += ipfw_dyn_len();
3850 if (size >= sopt->sopt_valsize)
3851 break;
3852 buf = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
3853 IPFW_UH_RLOCK(chain);
3854 /* check again how much space we need */
3855 want = chain->static_len + ipfw_dyn_len();
3856 if (size >= want)
3857 len = ipfw_getrules(chain, buf, size);
3858 IPFW_UH_RUNLOCK(chain);
3859 if (size >= want)
3860 error = sooptcopyout(sopt, buf, len);
3861 free(buf, M_TEMP);
3862 if (size >= want)
3863 break;
3864 }
3865 break;
3866
3867 case IP_FW_FLUSH:
3868 /* locking is done within del_entry() */
3869 error = del_entry(chain, 0); /* special case, rule=0, cmd=0 means all */
3870 break;
3871
3872 case IP_FW_ADD:
3873 rule = malloc(RULE_MAXSIZE, M_TEMP, M_WAITOK);
3874 error = sooptcopyin(sopt, rule, RULE_MAXSIZE,
3875 sizeof(struct ip_fw7) );
3876
3877 memset(&ci, 0, sizeof(struct rule_check_info));
3878
3879 /*
3880 * If the size of commands equals RULESIZE7 then we assume
3881 * a FreeBSD7.2 binary is talking to us (set is7=1).
3882 * is7 is persistent so the next 'ipfw list' command
3883 * will use this format.
3884 * NOTE: If wrong version is guessed (this can happen if
3885 * the first ipfw command is 'ipfw [pipe] list')
3886 * the ipfw binary may crash or loop infinitly...
3887 */
3888 size = sopt->sopt_valsize;
3889 if (size == RULESIZE7(rule)) {
3890 is7 = 1;
3891 error = convert_rule_to_8(rule);
3892 if (error) {
3893 free(rule, M_TEMP);
3894 return error;
3895 }
3896 size = RULESIZE(rule);
3897 } else
3898 is7 = 0;
3899 if (error == 0)
3900 error = check_ipfw_rule0(rule, size, &ci);
3901 if (error == 0) {
3902 /* locking is done within add_rule() */
3903 struct ip_fw *krule;
3904 krule = ipfw_alloc_rule(chain, RULEKSIZE0(rule));
3905 ci.urule = (caddr_t)rule;
3906 ci.krule = krule;
3907 import_rule0(&ci);
3908 error = commit_rules(chain, &ci, 1);
3909 if (error != 0)
3910 ipfw_free_rule(ci.krule);
3911 else if (sopt->sopt_dir == SOPT_GET) {
3912 if (is7) {
3913 error = convert_rule_to_7(rule);
3914 size = RULESIZE7(rule);
3915 if (error) {
3916 free(rule, M_TEMP);
3917 return error;
3918 }
3919 }
3920 error = sooptcopyout(sopt, rule, size);
3921 }
3922 }
3923 free(rule, M_TEMP);
3924 break;
3925
3926 case IP_FW_DEL:
3927 /*
3928 * IP_FW_DEL is used for deleting single rules or sets,
3929 * and (ab)used to atomically manipulate sets. Argument size
3930 * is used to distinguish between the two:
3931 * sizeof(u_int32_t)
3932 * delete single rule or set of rules,
3933 * or reassign rules (or sets) to a different set.
3934 * 2*sizeof(u_int32_t)
3935 * atomic disable/enable sets.
3936 * first u_int32_t contains sets to be disabled,
3937 * second u_int32_t contains sets to be enabled.
3938 */
3939 error = sooptcopyin(sopt, rulenum,
3940 2*sizeof(u_int32_t), sizeof(u_int32_t));
3941 if (error)
3942 break;
3943 size = sopt->sopt_valsize;
3944 if (size == sizeof(u_int32_t) && rulenum[0] != 0) {
3945 /* delete or reassign, locking done in del_entry() */
3946 error = del_entry(chain, rulenum[0]);
3947 } else if (size == 2*sizeof(u_int32_t)) { /* set enable/disable */
3948 IPFW_UH_WLOCK(chain);
3949 V_set_disable =
3950 (V_set_disable | rulenum[0]) & ~rulenum[1] &
3951 ~(1<<RESVD_SET); /* set RESVD_SET always enabled */
3952 IPFW_UH_WUNLOCK(chain);
3953 } else
3954 error = EINVAL;
3955 break;
3956
3957 case IP_FW_ZERO:
3958 case IP_FW_RESETLOG: /* argument is an u_int_32, the rule number */
3959 rulenum[0] = 0;
3960 if (sopt->sopt_val != 0) {
3961 error = sooptcopyin(sopt, rulenum,
3962 sizeof(u_int32_t), sizeof(u_int32_t));
3963 if (error)
3964 break;
3965 }
3966 error = zero_entry(chain, rulenum[0],
3967 sopt->sopt_name == IP_FW_RESETLOG);
3968 break;
3969
3970 /*--- TABLE opcodes ---*/
3971 case IP_FW_TABLE_ADD:
3972 case IP_FW_TABLE_DEL:
3973 {
3974 ipfw_table_entry ent;
3975 struct tentry_info tei;
3976 struct tid_info ti;
3977 struct table_value v;
3978
3979 error = sooptcopyin(sopt, &ent,
3980 sizeof(ent), sizeof(ent));
3981 if (error)
3982 break;
3983
3984 memset(&tei, 0, sizeof(tei));
3985 tei.paddr = &ent.addr;
3986 tei.subtype = AF_INET;
3987 tei.masklen = ent.masklen;
3988 ipfw_import_table_value_legacy(ent.value, &v);
3989 tei.pvalue = &v;
3990 memset(&ti, 0, sizeof(ti));
3991 ti.uidx = ent.tbl;
3992 ti.type = IPFW_TABLE_CIDR;
3993
3994 error = (opt == IP_FW_TABLE_ADD) ?
3995 add_table_entry(chain, &ti, &tei, 0, 1) :
3996 del_table_entry(chain, &ti, &tei, 0, 1);
3997 }
3998 break;
3999
4000 case IP_FW_TABLE_FLUSH:
4001 {
4002 u_int16_t tbl;
4003 struct tid_info ti;
4004
4005 error = sooptcopyin(sopt, &tbl,
4006 sizeof(tbl), sizeof(tbl));
4007 if (error)
4008 break;
4009 memset(&ti, 0, sizeof(ti));
4010 ti.uidx = tbl;
4011 error = flush_table(chain, &ti);
4012 }
4013 break;
4014
4015 case IP_FW_TABLE_GETSIZE:
4016 {
4017 u_int32_t tbl, cnt;
4018 struct tid_info ti;
4019
4020 if ((error = sooptcopyin(sopt, &tbl, sizeof(tbl),
4021 sizeof(tbl))))
4022 break;
4023 memset(&ti, 0, sizeof(ti));
4024 ti.uidx = tbl;
4025 IPFW_RLOCK(chain);
4026 error = ipfw_count_table(chain, &ti, &cnt);
4027 IPFW_RUNLOCK(chain);
4028 if (error)
4029 break;
4030 error = sooptcopyout(sopt, &cnt, sizeof(cnt));
4031 }
4032 break;
4033
4034 case IP_FW_TABLE_LIST:
4035 {
4036 ipfw_table *tbl;
4037 struct tid_info ti;
4038
4039 if (sopt->sopt_valsize < sizeof(*tbl)) {
4040 error = EINVAL;
4041 break;
4042 }
4043 size = sopt->sopt_valsize;
4044 tbl = malloc(size, M_TEMP, M_WAITOK);
4045 error = sooptcopyin(sopt, tbl, size, sizeof(*tbl));
4046 if (error) {
4047 free(tbl, M_TEMP);
4048 break;
4049 }
4050 tbl->size = (size - sizeof(*tbl)) /
4051 sizeof(ipfw_table_entry);
4052 memset(&ti, 0, sizeof(ti));
4053 ti.uidx = tbl->tbl;
4054 IPFW_RLOCK(chain);
4055 error = ipfw_dump_table_legacy(chain, &ti, tbl);
4056 IPFW_RUNLOCK(chain);
4057 if (error) {
4058 free(tbl, M_TEMP);
4059 break;
4060 }
4061 error = sooptcopyout(sopt, tbl, size);
4062 free(tbl, M_TEMP);
4063 }
4064 break;
4065
4066 /*--- NAT operations are protected by the IPFW_LOCK ---*/
4067 case IP_FW_NAT_CFG:
4068 if (IPFW_NAT_LOADED)
4069 error = ipfw_nat_cfg_ptr(sopt);
4070 else {
4071 printf("IP_FW_NAT_CFG: %s\n",
4072 "ipfw_nat not present, please load it");
4073 error = EINVAL;
4074 }
4075 break;
4076
4077 case IP_FW_NAT_DEL:
4078 if (IPFW_NAT_LOADED)
4079 error = ipfw_nat_del_ptr(sopt);
4080 else {
4081 printf("IP_FW_NAT_DEL: %s\n",
4082 "ipfw_nat not present, please load it");
4083 error = EINVAL;
4084 }
4085 break;
4086
4087 case IP_FW_NAT_GET_CONFIG:
4088 if (IPFW_NAT_LOADED)
4089 error = ipfw_nat_get_cfg_ptr(sopt);
4090 else {
4091 printf("IP_FW_NAT_GET_CFG: %s\n",
4092 "ipfw_nat not present, please load it");
4093 error = EINVAL;
4094 }
4095 break;
4096
4097 case IP_FW_NAT_GET_LOG:
4098 if (IPFW_NAT_LOADED)
4099 error = ipfw_nat_get_log_ptr(sopt);
4100 else {
4101 printf("IP_FW_NAT_GET_LOG: %s\n",
4102 "ipfw_nat not present, please load it");
4103 error = EINVAL;
4104 }
4105 break;
4106
4107 default:
4108 printf("ipfw: ipfw_ctl invalid option %d\n", sopt->sopt_name);
4109 error = EINVAL;
4110 }
4111
4112 return (error);
4113 #undef RULE_MAXSIZE
4114 }
4115 #define RULE_MAXSIZE (256*sizeof(u_int32_t))
4116
4117 /* Functions to convert rules 7.2 <==> 8.0 */
4118 static int
convert_rule_to_7(struct ip_fw_rule0 * rule)4119 convert_rule_to_7(struct ip_fw_rule0 *rule)
4120 {
4121 /* Used to modify original rule */
4122 struct ip_fw7 *rule7 = (struct ip_fw7 *)rule;
4123 /* copy of original rule, version 8 */
4124 struct ip_fw_rule0 *tmp;
4125
4126 /* Used to copy commands */
4127 ipfw_insn *ccmd, *dst;
4128 int ll = 0, ccmdlen = 0;
4129
4130 tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO);
4131 if (tmp == NULL) {
4132 return 1; //XXX error
4133 }
4134 bcopy(rule, tmp, RULE_MAXSIZE);
4135
4136 /* Copy fields */
4137 //rule7->_pad = tmp->_pad;
4138 rule7->set = tmp->set;
4139 rule7->rulenum = tmp->rulenum;
4140 rule7->cmd_len = tmp->cmd_len;
4141 rule7->act_ofs = tmp->act_ofs;
4142 rule7->next_rule = (struct ip_fw7 *)tmp->next_rule;
4143 rule7->cmd_len = tmp->cmd_len;
4144 rule7->pcnt = tmp->pcnt;
4145 rule7->bcnt = tmp->bcnt;
4146 rule7->timestamp = tmp->timestamp;
4147
4148 /* Copy commands */
4149 for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule7->cmd ;
4150 ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) {
4151 ccmdlen = F_LEN(ccmd);
4152
4153 bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t));
4154
4155 if (dst->opcode > O_NAT)
4156 /* O_REASS doesn't exists in 7.2 version, so
4157 * decrement opcode if it is after O_REASS
4158 */
4159 dst->opcode--;
4160
4161 if (ccmdlen > ll) {
4162 printf("ipfw: opcode %d size truncated\n",
4163 ccmd->opcode);
4164 return EINVAL;
4165 }
4166 }
4167 free(tmp, M_TEMP);
4168
4169 return 0;
4170 }
4171
4172 static int
convert_rule_to_8(struct ip_fw_rule0 * rule)4173 convert_rule_to_8(struct ip_fw_rule0 *rule)
4174 {
4175 /* Used to modify original rule */
4176 struct ip_fw7 *rule7 = (struct ip_fw7 *) rule;
4177
4178 /* Used to copy commands */
4179 ipfw_insn *ccmd, *dst;
4180 int ll = 0, ccmdlen = 0;
4181
4182 /* Copy of original rule */
4183 struct ip_fw7 *tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO);
4184 if (tmp == NULL) {
4185 return 1; //XXX error
4186 }
4187
4188 bcopy(rule7, tmp, RULE_MAXSIZE);
4189
4190 for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule->cmd ;
4191 ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) {
4192 ccmdlen = F_LEN(ccmd);
4193
4194 bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t));
4195
4196 if (dst->opcode > O_NAT)
4197 /* O_REASS doesn't exists in 7.2 version, so
4198 * increment opcode if it is after O_REASS
4199 */
4200 dst->opcode++;
4201
4202 if (ccmdlen > ll) {
4203 printf("ipfw: opcode %d size truncated\n",
4204 ccmd->opcode);
4205 return EINVAL;
4206 }
4207 }
4208
4209 rule->_pad = tmp->_pad;
4210 rule->set = tmp->set;
4211 rule->rulenum = tmp->rulenum;
4212 rule->cmd_len = tmp->cmd_len;
4213 rule->act_ofs = tmp->act_ofs;
4214 rule->next_rule = (struct ip_fw *)tmp->next_rule;
4215 rule->cmd_len = tmp->cmd_len;
4216 rule->id = 0; /* XXX see if is ok = 0 */
4217 rule->pcnt = tmp->pcnt;
4218 rule->bcnt = tmp->bcnt;
4219 rule->timestamp = tmp->timestamp;
4220
4221 free (tmp, M_TEMP);
4222 return 0;
4223 }
4224
4225 /*
4226 * Named object api
4227 *
4228 */
4229
4230 void
ipfw_init_srv(struct ip_fw_chain * ch)4231 ipfw_init_srv(struct ip_fw_chain *ch)
4232 {
4233
4234 ch->srvmap = ipfw_objhash_create(IPFW_OBJECTS_DEFAULT);
4235 ch->srvstate = malloc(sizeof(void *) * IPFW_OBJECTS_DEFAULT,
4236 M_IPFW, M_WAITOK | M_ZERO);
4237 }
4238
4239 void
ipfw_destroy_srv(struct ip_fw_chain * ch)4240 ipfw_destroy_srv(struct ip_fw_chain *ch)
4241 {
4242
4243 free(ch->srvstate, M_IPFW);
4244 ipfw_objhash_destroy(ch->srvmap);
4245 }
4246
4247 /*
4248 * Allocate new bitmask which can be used to enlarge/shrink
4249 * named instance index.
4250 */
4251 void
ipfw_objhash_bitmap_alloc(uint32_t items,void ** idx,int * pblocks)4252 ipfw_objhash_bitmap_alloc(uint32_t items, void **idx, int *pblocks)
4253 {
4254 size_t size;
4255 int max_blocks;
4256 u_long *idx_mask;
4257
4258 KASSERT((items % BLOCK_ITEMS) == 0,
4259 ("bitmask size needs to power of 2 and greater or equal to %zu",
4260 BLOCK_ITEMS));
4261
4262 max_blocks = items / BLOCK_ITEMS;
4263 size = items / 8;
4264 idx_mask = malloc(size * IPFW_MAX_SETS, M_IPFW, M_WAITOK);
4265 /* Mark all as free */
4266 memset(idx_mask, 0xFF, size * IPFW_MAX_SETS);
4267 *idx_mask &= ~(u_long)1; /* Skip index 0 */
4268
4269 *idx = idx_mask;
4270 *pblocks = max_blocks;
4271 }
4272
4273 /*
4274 * Copy current bitmask index to new one.
4275 */
4276 void
ipfw_objhash_bitmap_merge(struct namedobj_instance * ni,void ** idx,int * blocks)4277 ipfw_objhash_bitmap_merge(struct namedobj_instance *ni, void **idx, int *blocks)
4278 {
4279 int old_blocks, new_blocks;
4280 u_long *old_idx, *new_idx;
4281 int i;
4282
4283 old_idx = ni->idx_mask;
4284 old_blocks = ni->max_blocks;
4285 new_idx = *idx;
4286 new_blocks = *blocks;
4287
4288 for (i = 0; i < IPFW_MAX_SETS; i++) {
4289 memcpy(&new_idx[new_blocks * i], &old_idx[old_blocks * i],
4290 old_blocks * sizeof(u_long));
4291 }
4292 }
4293
4294 /*
4295 * Swaps current @ni index with new one.
4296 */
4297 void
ipfw_objhash_bitmap_swap(struct namedobj_instance * ni,void ** idx,int * blocks)4298 ipfw_objhash_bitmap_swap(struct namedobj_instance *ni, void **idx, int *blocks)
4299 {
4300 int old_blocks;
4301 u_long *old_idx;
4302
4303 old_idx = ni->idx_mask;
4304 old_blocks = ni->max_blocks;
4305
4306 ni->idx_mask = *idx;
4307 ni->max_blocks = *blocks;
4308
4309 /* Save old values */
4310 *idx = old_idx;
4311 *blocks = old_blocks;
4312 }
4313
4314 void
ipfw_objhash_bitmap_free(void * idx,int blocks)4315 ipfw_objhash_bitmap_free(void *idx, int blocks)
4316 {
4317
4318 free(idx, M_IPFW);
4319 }
4320
4321 /*
4322 * Creates named hash instance.
4323 * Must be called without holding any locks.
4324 * Return pointer to new instance.
4325 */
4326 struct namedobj_instance *
ipfw_objhash_create(uint32_t items)4327 ipfw_objhash_create(uint32_t items)
4328 {
4329 struct namedobj_instance *ni;
4330 int i;
4331 size_t size;
4332
4333 size = sizeof(struct namedobj_instance) +
4334 sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE +
4335 sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE;
4336
4337 ni = malloc(size, M_IPFW, M_WAITOK | M_ZERO);
4338 ni->nn_size = NAMEDOBJ_HASH_SIZE;
4339 ni->nv_size = NAMEDOBJ_HASH_SIZE;
4340
4341 ni->names = (struct namedobjects_head *)(ni +1);
4342 ni->values = &ni->names[ni->nn_size];
4343
4344 for (i = 0; i < ni->nn_size; i++)
4345 TAILQ_INIT(&ni->names[i]);
4346
4347 for (i = 0; i < ni->nv_size; i++)
4348 TAILQ_INIT(&ni->values[i]);
4349
4350 /* Set default hashing/comparison functions */
4351 ni->hash_f = objhash_hash_name;
4352 ni->cmp_f = objhash_cmp_name;
4353
4354 /* Allocate bitmask separately due to possible resize */
4355 ipfw_objhash_bitmap_alloc(items, (void*)&ni->idx_mask, &ni->max_blocks);
4356
4357 return (ni);
4358 }
4359
4360 void
ipfw_objhash_destroy(struct namedobj_instance * ni)4361 ipfw_objhash_destroy(struct namedobj_instance *ni)
4362 {
4363
4364 free(ni->idx_mask, M_IPFW);
4365 free(ni, M_IPFW);
4366 }
4367
4368 void
ipfw_objhash_set_funcs(struct namedobj_instance * ni,objhash_hash_f * hash_f,objhash_cmp_f * cmp_f)4369 ipfw_objhash_set_funcs(struct namedobj_instance *ni, objhash_hash_f *hash_f,
4370 objhash_cmp_f *cmp_f)
4371 {
4372
4373 ni->hash_f = hash_f;
4374 ni->cmp_f = cmp_f;
4375 }
4376
4377 static uint32_t
objhash_hash_name(struct namedobj_instance * ni,const void * name,uint32_t set)4378 objhash_hash_name(struct namedobj_instance *ni, const void *name, uint32_t set)
4379 {
4380
4381 return (fnv_32_str((const char *)name, FNV1_32_INIT));
4382 }
4383
4384 static int
objhash_cmp_name(struct named_object * no,const void * name,uint32_t set)4385 objhash_cmp_name(struct named_object *no, const void *name, uint32_t set)
4386 {
4387
4388 if ((strcmp(no->name, (const char *)name) == 0) && (no->set == set))
4389 return (0);
4390
4391 return (1);
4392 }
4393
4394 static uint32_t
objhash_hash_idx(struct namedobj_instance * ni,uint32_t val)4395 objhash_hash_idx(struct namedobj_instance *ni, uint32_t val)
4396 {
4397 uint32_t v;
4398
4399 v = val % (ni->nv_size - 1);
4400
4401 return (v);
4402 }
4403
4404 struct named_object *
ipfw_objhash_lookup_name(struct namedobj_instance * ni,uint32_t set,const char * name)4405 ipfw_objhash_lookup_name(struct namedobj_instance *ni, uint32_t set,
4406 const char *name)
4407 {
4408 struct named_object *no;
4409 uint32_t hash;
4410
4411 hash = ni->hash_f(ni, name, set) % ni->nn_size;
4412
4413 TAILQ_FOREACH(no, &ni->names[hash], nn_next) {
4414 if (ni->cmp_f(no, name, set) == 0)
4415 return (no);
4416 }
4417
4418 return (NULL);
4419 }
4420
4421 /*
4422 * Find named object by @uid.
4423 * Check @tlvs for valid data inside.
4424 *
4425 * Returns pointer to found TLV or NULL.
4426 */
4427 ipfw_obj_ntlv *
ipfw_find_name_tlv_type(void * tlvs,int len,uint16_t uidx,uint32_t etlv)4428 ipfw_find_name_tlv_type(void *tlvs, int len, uint16_t uidx, uint32_t etlv)
4429 {
4430 ipfw_obj_ntlv *ntlv;
4431 uintptr_t pa, pe;
4432 int l;
4433
4434 pa = (uintptr_t)tlvs;
4435 pe = pa + len;
4436 l = 0;
4437 for (; pa < pe; pa += l) {
4438 ntlv = (ipfw_obj_ntlv *)pa;
4439 l = ntlv->head.length;
4440
4441 if (l != sizeof(*ntlv))
4442 return (NULL);
4443
4444 if (ntlv->idx != uidx)
4445 continue;
4446 /*
4447 * When userland has specified zero TLV type, do
4448 * not compare it with eltv. In some cases userland
4449 * doesn't know what type should it have. Use only
4450 * uidx and name for search named_object.
4451 */
4452 if (ntlv->head.type != 0 &&
4453 ntlv->head.type != (uint16_t)etlv)
4454 continue;
4455
4456 if (ipfw_check_object_name_generic(ntlv->name) != 0)
4457 return (NULL);
4458
4459 return (ntlv);
4460 }
4461
4462 return (NULL);
4463 }
4464
4465 /*
4466 * Finds object config based on either legacy index
4467 * or name in ntlv.
4468 * Note @ti structure contains unchecked data from userland.
4469 *
4470 * Returns 0 in success and fills in @pno with found config
4471 */
4472 int
ipfw_objhash_find_type(struct namedobj_instance * ni,struct tid_info * ti,uint32_t etlv,struct named_object ** pno)4473 ipfw_objhash_find_type(struct namedobj_instance *ni, struct tid_info *ti,
4474 uint32_t etlv, struct named_object **pno)
4475 {
4476 char *name;
4477 ipfw_obj_ntlv *ntlv;
4478 uint32_t set;
4479
4480 if (ti->tlvs == NULL)
4481 return (EINVAL);
4482
4483 ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, etlv);
4484 if (ntlv == NULL)
4485 return (EINVAL);
4486 name = ntlv->name;
4487
4488 /*
4489 * Use set provided by @ti instead of @ntlv one.
4490 * This is needed due to different sets behavior
4491 * controlled by V_fw_tables_sets.
4492 */
4493 set = ti->set;
4494 *pno = ipfw_objhash_lookup_name(ni, set, name);
4495 if (*pno == NULL)
4496 return (ESRCH);
4497 return (0);
4498 }
4499
4500 /*
4501 * Find named object by name, considering also its TLV type.
4502 */
4503 struct named_object *
ipfw_objhash_lookup_name_type(struct namedobj_instance * ni,uint32_t set,uint32_t type,const char * name)4504 ipfw_objhash_lookup_name_type(struct namedobj_instance *ni, uint32_t set,
4505 uint32_t type, const char *name)
4506 {
4507 struct named_object *no;
4508 uint32_t hash;
4509
4510 hash = ni->hash_f(ni, name, set) % ni->nn_size;
4511
4512 TAILQ_FOREACH(no, &ni->names[hash], nn_next) {
4513 if (ni->cmp_f(no, name, set) == 0 &&
4514 no->etlv == (uint16_t)type)
4515 return (no);
4516 }
4517
4518 return (NULL);
4519 }
4520
4521 struct named_object *
ipfw_objhash_lookup_kidx(struct namedobj_instance * ni,uint16_t kidx)4522 ipfw_objhash_lookup_kidx(struct namedobj_instance *ni, uint16_t kidx)
4523 {
4524 struct named_object *no;
4525 uint32_t hash;
4526
4527 hash = objhash_hash_idx(ni, kidx);
4528
4529 TAILQ_FOREACH(no, &ni->values[hash], nv_next) {
4530 if (no->kidx == kidx)
4531 return (no);
4532 }
4533
4534 return (NULL);
4535 }
4536
4537 int
ipfw_objhash_same_name(struct namedobj_instance * ni,struct named_object * a,struct named_object * b)4538 ipfw_objhash_same_name(struct namedobj_instance *ni, struct named_object *a,
4539 struct named_object *b)
4540 {
4541
4542 if ((strcmp(a->name, b->name) == 0) && a->set == b->set)
4543 return (1);
4544
4545 return (0);
4546 }
4547
4548 void
ipfw_objhash_add(struct namedobj_instance * ni,struct named_object * no)4549 ipfw_objhash_add(struct namedobj_instance *ni, struct named_object *no)
4550 {
4551 uint32_t hash;
4552
4553 hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size;
4554 TAILQ_INSERT_HEAD(&ni->names[hash], no, nn_next);
4555
4556 hash = objhash_hash_idx(ni, no->kidx);
4557 TAILQ_INSERT_HEAD(&ni->values[hash], no, nv_next);
4558
4559 ni->count++;
4560 }
4561
4562 void
ipfw_objhash_del(struct namedobj_instance * ni,struct named_object * no)4563 ipfw_objhash_del(struct namedobj_instance *ni, struct named_object *no)
4564 {
4565 uint32_t hash;
4566
4567 hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size;
4568 TAILQ_REMOVE(&ni->names[hash], no, nn_next);
4569
4570 hash = objhash_hash_idx(ni, no->kidx);
4571 TAILQ_REMOVE(&ni->values[hash], no, nv_next);
4572
4573 ni->count--;
4574 }
4575
4576 uint32_t
ipfw_objhash_count(struct namedobj_instance * ni)4577 ipfw_objhash_count(struct namedobj_instance *ni)
4578 {
4579
4580 return (ni->count);
4581 }
4582
4583 uint32_t
ipfw_objhash_count_type(struct namedobj_instance * ni,uint16_t type)4584 ipfw_objhash_count_type(struct namedobj_instance *ni, uint16_t type)
4585 {
4586 struct named_object *no;
4587 uint32_t count;
4588 int i;
4589
4590 count = 0;
4591 for (i = 0; i < ni->nn_size; i++) {
4592 TAILQ_FOREACH(no, &ni->names[i], nn_next) {
4593 if (no->etlv == type)
4594 count++;
4595 }
4596 }
4597 return (count);
4598 }
4599
4600 /*
4601 * Runs @func for each found named object.
4602 * It is safe to delete objects from callback
4603 */
4604 int
ipfw_objhash_foreach(struct namedobj_instance * ni,objhash_cb_t * f,void * arg)4605 ipfw_objhash_foreach(struct namedobj_instance *ni, objhash_cb_t *f, void *arg)
4606 {
4607 struct named_object *no, *no_tmp;
4608 int i, ret;
4609
4610 for (i = 0; i < ni->nn_size; i++) {
4611 TAILQ_FOREACH_SAFE(no, &ni->names[i], nn_next, no_tmp) {
4612 ret = f(ni, no, arg);
4613 if (ret != 0)
4614 return (ret);
4615 }
4616 }
4617 return (0);
4618 }
4619
4620 /*
4621 * Runs @f for each found named object with type @type.
4622 * It is safe to delete objects from callback
4623 */
4624 int
ipfw_objhash_foreach_type(struct namedobj_instance * ni,objhash_cb_t * f,void * arg,uint16_t type)4625 ipfw_objhash_foreach_type(struct namedobj_instance *ni, objhash_cb_t *f,
4626 void *arg, uint16_t type)
4627 {
4628 struct named_object *no, *no_tmp;
4629 int i, ret;
4630
4631 for (i = 0; i < ni->nn_size; i++) {
4632 TAILQ_FOREACH_SAFE(no, &ni->names[i], nn_next, no_tmp) {
4633 if (no->etlv != type)
4634 continue;
4635 ret = f(ni, no, arg);
4636 if (ret != 0)
4637 return (ret);
4638 }
4639 }
4640 return (0);
4641 }
4642
4643 /*
4644 * Removes index from given set.
4645 * Returns 0 on success.
4646 */
4647 int
ipfw_objhash_free_idx(struct namedobj_instance * ni,uint16_t idx)4648 ipfw_objhash_free_idx(struct namedobj_instance *ni, uint16_t idx)
4649 {
4650 u_long *mask;
4651 int i, v;
4652
4653 i = idx / BLOCK_ITEMS;
4654 v = idx % BLOCK_ITEMS;
4655
4656 if (i >= ni->max_blocks)
4657 return (1);
4658
4659 mask = &ni->idx_mask[i];
4660
4661 if ((*mask & ((u_long)1 << v)) != 0)
4662 return (1);
4663
4664 /* Mark as free */
4665 *mask |= (u_long)1 << v;
4666
4667 /* Update free offset */
4668 if (ni->free_off[0] > i)
4669 ni->free_off[0] = i;
4670
4671 return (0);
4672 }
4673
4674 /*
4675 * Allocate new index in given instance and stores in in @pidx.
4676 * Returns 0 on success.
4677 */
4678 int
ipfw_objhash_alloc_idx(void * n,uint16_t * pidx)4679 ipfw_objhash_alloc_idx(void *n, uint16_t *pidx)
4680 {
4681 struct namedobj_instance *ni;
4682 u_long *mask;
4683 int i, off, v;
4684
4685 ni = (struct namedobj_instance *)n;
4686
4687 off = ni->free_off[0];
4688 mask = &ni->idx_mask[off];
4689
4690 for (i = off; i < ni->max_blocks; i++, mask++) {
4691 if ((v = ffsl(*mask)) == 0)
4692 continue;
4693
4694 /* Mark as busy */
4695 *mask &= ~ ((u_long)1 << (v - 1));
4696
4697 ni->free_off[0] = i;
4698
4699 v = BLOCK_ITEMS * i + v - 1;
4700
4701 *pidx = v;
4702 return (0);
4703 }
4704
4705 return (1);
4706 }
4707
4708 /* end of file */
4709