xref: /freebsd/sys/netpfil/ipfw/ip_fw_sockopt.c (revision 512bd18d)
1 /*-
2  * Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa
3  * Copyright (c) 2014 Yandex LLC
4  * Copyright (c) 2014 Alexander V. Chernikov
5  *
6  * Supported by: Valeria Paoli
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 /*
34  * Control socket and rule management routines for ipfw.
35  * Control is currently implemented via IP_FW3 setsockopt() code.
36  */
37 
38 #include "opt_ipfw.h"
39 #include "opt_inet.h"
40 #ifndef INET
41 #error IPFIREWALL requires INET.
42 #endif /* INET */
43 #include "opt_inet6.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>	/* struct m_tag used by nested headers */
49 #include <sys/kernel.h>
50 #include <sys/lock.h>
51 #include <sys/priv.h>
52 #include <sys/proc.h>
53 #include <sys/rwlock.h>
54 #include <sys/rmlock.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/sysctl.h>
58 #include <sys/syslog.h>
59 #include <sys/fnv_hash.h>
60 #include <net/if.h>
61 #include <net/pfil.h>
62 #include <net/route.h>
63 #include <net/vnet.h>
64 #include <vm/vm.h>
65 #include <vm/vm_extern.h>
66 
67 #include <netinet/in.h>
68 #include <netinet/ip_var.h> /* hooks */
69 #include <netinet/ip_fw.h>
70 
71 #include <netpfil/ipfw/ip_fw_private.h>
72 #include <netpfil/ipfw/ip_fw_table.h>
73 
74 #ifdef MAC
75 #include <security/mac/mac_framework.h>
76 #endif
77 
78 static int ipfw_ctl(struct sockopt *sopt);
79 static int check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len,
80     struct rule_check_info *ci);
81 static int check_ipfw_rule1(struct ip_fw_rule *rule, int size,
82     struct rule_check_info *ci);
83 static int check_ipfw_rule0(struct ip_fw_rule0 *rule, int size,
84     struct rule_check_info *ci);
85 static int rewrite_rule_uidx(struct ip_fw_chain *chain,
86     struct rule_check_info *ci);
87 
88 #define	NAMEDOBJ_HASH_SIZE	32
89 
90 struct namedobj_instance {
91 	struct namedobjects_head	*names;
92 	struct namedobjects_head	*values;
93 	uint32_t nn_size;		/* names hash size */
94 	uint32_t nv_size;		/* number hash size */
95 	u_long *idx_mask;		/* used items bitmask */
96 	uint32_t max_blocks;		/* number of "long" blocks in bitmask */
97 	uint32_t count;			/* number of items */
98 	uint16_t free_off[IPFW_MAX_SETS];	/* first possible free offset */
99 	objhash_hash_f	*hash_f;
100 	objhash_cmp_f	*cmp_f;
101 };
102 #define	BLOCK_ITEMS	(8 * sizeof(u_long))	/* Number of items for ffsl() */
103 
104 static uint32_t objhash_hash_name(struct namedobj_instance *ni,
105     const void *key, uint32_t kopt);
106 static uint32_t objhash_hash_idx(struct namedobj_instance *ni, uint32_t val);
107 static int objhash_cmp_name(struct named_object *no, const void *name,
108     uint32_t set);
109 
110 MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's");
111 
112 static int dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
113     struct sockopt_data *sd);
114 static int add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
115     struct sockopt_data *sd);
116 static int del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
117     struct sockopt_data *sd);
118 static int clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
119     struct sockopt_data *sd);
120 static int move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
121     struct sockopt_data *sd);
122 static int manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
123     struct sockopt_data *sd);
124 static int dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
125     struct sockopt_data *sd);
126 static int dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
127     struct sockopt_data *sd);
128 
129 /* ctl3 handler data */
130 struct mtx ctl3_lock;
131 #define	CTL3_LOCK_INIT()	mtx_init(&ctl3_lock, "ctl3_lock", NULL, MTX_DEF)
132 #define	CTL3_LOCK_DESTROY()	mtx_destroy(&ctl3_lock)
133 #define	CTL3_LOCK()		mtx_lock(&ctl3_lock)
134 #define	CTL3_UNLOCK()		mtx_unlock(&ctl3_lock)
135 
136 static struct ipfw_sopt_handler *ctl3_handlers;
137 static size_t ctl3_hsize;
138 static uint64_t ctl3_refct, ctl3_gencnt;
139 #define	CTL3_SMALLBUF	4096			/* small page-size write buffer */
140 #define	CTL3_LARGEBUF	16 * 1024 * 1024	/* handle large rulesets */
141 
142 static int ipfw_flush_sopt_data(struct sockopt_data *sd);
143 
144 static struct ipfw_sopt_handler	scodes[] = {
145 	{ IP_FW_XGET,		0,	HDIR_GET,	dump_config },
146 	{ IP_FW_XADD,		0,	HDIR_BOTH,	add_rules },
147 	{ IP_FW_XDEL,		0,	HDIR_BOTH,	del_rules },
148 	{ IP_FW_XZERO,		0,	HDIR_SET,	clear_rules },
149 	{ IP_FW_XRESETLOG,	0,	HDIR_SET,	clear_rules },
150 	{ IP_FW_XMOVE,		0,	HDIR_SET,	move_rules },
151 	{ IP_FW_SET_SWAP,	0,	HDIR_SET,	manage_sets },
152 	{ IP_FW_SET_MOVE,	0,	HDIR_SET,	manage_sets },
153 	{ IP_FW_SET_ENABLE,	0,	HDIR_SET,	manage_sets },
154 	{ IP_FW_DUMP_SOPTCODES,	0,	HDIR_GET,	dump_soptcodes },
155 	{ IP_FW_DUMP_SRVOBJECTS,0,	HDIR_GET,	dump_srvobjects },
156 };
157 
158 static int
159 set_legacy_obj_kidx(struct ip_fw_chain *ch, struct ip_fw_rule0 *rule);
160 static struct opcode_obj_rewrite *find_op_rw(ipfw_insn *cmd,
161     uint16_t *puidx, uint8_t *ptype);
162 static int mark_object_kidx(struct ip_fw_chain *ch, struct ip_fw *rule,
163     uint32_t *bmask);
164 static int ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule,
165     struct rule_check_info *ci, struct obj_idx *oib, struct tid_info *ti);
166 static int ref_opcode_object(struct ip_fw_chain *ch, ipfw_insn *cmd,
167     struct tid_info *ti, struct obj_idx *pidx, int *unresolved);
168 static void unref_rule_objects(struct ip_fw_chain *chain, struct ip_fw *rule);
169 static void unref_oib_objects(struct ip_fw_chain *ch, ipfw_insn *cmd,
170     struct obj_idx *oib, struct obj_idx *end);
171 static int export_objhash_ntlv(struct namedobj_instance *ni, uint16_t kidx,
172     struct sockopt_data *sd);
173 
174 /*
175  * Opcode object rewriter variables
176  */
177 struct opcode_obj_rewrite *ctl3_rewriters;
178 static size_t ctl3_rsize;
179 
180 /*
181  * static variables followed by global ones
182  */
183 
184 static VNET_DEFINE(uma_zone_t, ipfw_cntr_zone);
185 #define	V_ipfw_cntr_zone		VNET(ipfw_cntr_zone)
186 
187 void
188 ipfw_init_counters()
189 {
190 
191 	V_ipfw_cntr_zone = uma_zcreate("IPFW counters",
192 	    IPFW_RULE_CNTR_SIZE, NULL, NULL, NULL, NULL,
193 	    UMA_ALIGN_PTR, UMA_ZONE_PCPU);
194 }
195 
196 void
197 ipfw_destroy_counters()
198 {
199 
200 	uma_zdestroy(V_ipfw_cntr_zone);
201 }
202 
203 struct ip_fw *
204 ipfw_alloc_rule(struct ip_fw_chain *chain, size_t rulesize)
205 {
206 	struct ip_fw *rule;
207 
208 	rule = malloc(rulesize, M_IPFW, M_WAITOK | M_ZERO);
209 	rule->cntr = uma_zalloc(V_ipfw_cntr_zone, M_WAITOK | M_ZERO);
210 
211 	return (rule);
212 }
213 
214 static void
215 free_rule(struct ip_fw *rule)
216 {
217 
218 	uma_zfree(V_ipfw_cntr_zone, rule->cntr);
219 	free(rule, M_IPFW);
220 }
221 
222 
223 /*
224  * Find the smallest rule >= key, id.
225  * We could use bsearch but it is so simple that we code it directly
226  */
227 int
228 ipfw_find_rule(struct ip_fw_chain *chain, uint32_t key, uint32_t id)
229 {
230 	int i, lo, hi;
231 	struct ip_fw *r;
232 
233   	for (lo = 0, hi = chain->n_rules - 1; lo < hi;) {
234 		i = (lo + hi) / 2;
235 		r = chain->map[i];
236 		if (r->rulenum < key)
237 			lo = i + 1;	/* continue from the next one */
238 		else if (r->rulenum > key)
239 			hi = i;		/* this might be good */
240 		else if (r->id < id)
241 			lo = i + 1;	/* continue from the next one */
242 		else /* r->id >= id */
243 			hi = i;		/* this might be good */
244 	}
245 	return hi;
246 }
247 
248 /*
249  * Builds skipto cache on rule set @map.
250  */
251 static void
252 update_skipto_cache(struct ip_fw_chain *chain, struct ip_fw **map)
253 {
254 	int *smap, rulenum;
255 	int i, mi;
256 
257 	IPFW_UH_WLOCK_ASSERT(chain);
258 
259 	mi = 0;
260 	rulenum = map[mi]->rulenum;
261 	smap = chain->idxmap_back;
262 
263 	if (smap == NULL)
264 		return;
265 
266 	for (i = 0; i < 65536; i++) {
267 		smap[i] = mi;
268 		/* Use the same rule index until i < rulenum */
269 		if (i != rulenum || i == 65535)
270 			continue;
271 		/* Find next rule with num > i */
272 		rulenum = map[++mi]->rulenum;
273 		while (rulenum == i)
274 			rulenum = map[++mi]->rulenum;
275 	}
276 }
277 
278 /*
279  * Swaps prepared (backup) index with current one.
280  */
281 static void
282 swap_skipto_cache(struct ip_fw_chain *chain)
283 {
284 	int *map;
285 
286 	IPFW_UH_WLOCK_ASSERT(chain);
287 	IPFW_WLOCK_ASSERT(chain);
288 
289 	map = chain->idxmap;
290 	chain->idxmap = chain->idxmap_back;
291 	chain->idxmap_back = map;
292 }
293 
294 /*
295  * Allocate and initialize skipto cache.
296  */
297 void
298 ipfw_init_skipto_cache(struct ip_fw_chain *chain)
299 {
300 	int *idxmap, *idxmap_back;
301 
302 	idxmap = malloc(65536 * sizeof(uint32_t *), M_IPFW,
303 	    M_WAITOK | M_ZERO);
304 	idxmap_back = malloc(65536 * sizeof(uint32_t *), M_IPFW,
305 	    M_WAITOK | M_ZERO);
306 
307 	/*
308 	 * Note we may be called at any time after initialization,
309 	 * for example, on first skipto rule, so we need to
310 	 * provide valid chain->idxmap on return
311 	 */
312 
313 	IPFW_UH_WLOCK(chain);
314 	if (chain->idxmap != NULL) {
315 		IPFW_UH_WUNLOCK(chain);
316 		free(idxmap, M_IPFW);
317 		free(idxmap_back, M_IPFW);
318 		return;
319 	}
320 
321 	/* Set backup pointer first to permit building cache */
322 	chain->idxmap_back = idxmap_back;
323 	update_skipto_cache(chain, chain->map);
324 	IPFW_WLOCK(chain);
325 	/* It is now safe to set chain->idxmap ptr */
326 	chain->idxmap = idxmap;
327 	swap_skipto_cache(chain);
328 	IPFW_WUNLOCK(chain);
329 	IPFW_UH_WUNLOCK(chain);
330 }
331 
332 /*
333  * Destroys skipto cache.
334  */
335 void
336 ipfw_destroy_skipto_cache(struct ip_fw_chain *chain)
337 {
338 
339 	if (chain->idxmap != NULL)
340 		free(chain->idxmap, M_IPFW);
341 	if (chain->idxmap != NULL)
342 		free(chain->idxmap_back, M_IPFW);
343 }
344 
345 
346 /*
347  * allocate a new map, returns the chain locked. extra is the number
348  * of entries to add or delete.
349  */
350 static struct ip_fw **
351 get_map(struct ip_fw_chain *chain, int extra, int locked)
352 {
353 
354 	for (;;) {
355 		struct ip_fw **map;
356 		int i, mflags;
357 
358 		mflags = M_ZERO | ((locked != 0) ? M_NOWAIT : M_WAITOK);
359 
360 		i = chain->n_rules + extra;
361 		map = malloc(i * sizeof(struct ip_fw *), M_IPFW, mflags);
362 		if (map == NULL) {
363 			printf("%s: cannot allocate map\n", __FUNCTION__);
364 			return NULL;
365 		}
366 		if (!locked)
367 			IPFW_UH_WLOCK(chain);
368 		if (i >= chain->n_rules + extra) /* good */
369 			return map;
370 		/* otherwise we lost the race, free and retry */
371 		if (!locked)
372 			IPFW_UH_WUNLOCK(chain);
373 		free(map, M_IPFW);
374 	}
375 }
376 
377 /*
378  * swap the maps. It is supposed to be called with IPFW_UH_WLOCK
379  */
380 static struct ip_fw **
381 swap_map(struct ip_fw_chain *chain, struct ip_fw **new_map, int new_len)
382 {
383 	struct ip_fw **old_map;
384 
385 	IPFW_WLOCK(chain);
386 	chain->id++;
387 	chain->n_rules = new_len;
388 	old_map = chain->map;
389 	chain->map = new_map;
390 	swap_skipto_cache(chain);
391 	IPFW_WUNLOCK(chain);
392 	return old_map;
393 }
394 
395 
396 static void
397 export_cntr1_base(struct ip_fw *krule, struct ip_fw_bcounter *cntr)
398 {
399 	struct timeval boottime;
400 
401 	cntr->size = sizeof(*cntr);
402 
403 	if (krule->cntr != NULL) {
404 		cntr->pcnt = counter_u64_fetch(krule->cntr);
405 		cntr->bcnt = counter_u64_fetch(krule->cntr + 1);
406 		cntr->timestamp = krule->timestamp;
407 	}
408 	if (cntr->timestamp > 0) {
409 		getboottime(&boottime);
410 		cntr->timestamp += boottime.tv_sec;
411 	}
412 }
413 
414 static void
415 export_cntr0_base(struct ip_fw *krule, struct ip_fw_bcounter0 *cntr)
416 {
417 	struct timeval boottime;
418 
419 	if (krule->cntr != NULL) {
420 		cntr->pcnt = counter_u64_fetch(krule->cntr);
421 		cntr->bcnt = counter_u64_fetch(krule->cntr + 1);
422 		cntr->timestamp = krule->timestamp;
423 	}
424 	if (cntr->timestamp > 0) {
425 		getboottime(&boottime);
426 		cntr->timestamp += boottime.tv_sec;
427 	}
428 }
429 
430 /*
431  * Copies rule @urule from v1 userland format (current).
432  * to kernel @krule.
433  * Assume @krule is zeroed.
434  */
435 static void
436 import_rule1(struct rule_check_info *ci)
437 {
438 	struct ip_fw_rule *urule;
439 	struct ip_fw *krule;
440 
441 	urule = (struct ip_fw_rule *)ci->urule;
442 	krule = (struct ip_fw *)ci->krule;
443 
444 	/* copy header */
445 	krule->act_ofs = urule->act_ofs;
446 	krule->cmd_len = urule->cmd_len;
447 	krule->rulenum = urule->rulenum;
448 	krule->set = urule->set;
449 	krule->flags = urule->flags;
450 
451 	/* Save rulenum offset */
452 	ci->urule_numoff = offsetof(struct ip_fw_rule, rulenum);
453 
454 	/* Copy opcodes */
455 	memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t));
456 }
457 
458 /*
459  * Export rule into v1 format (Current).
460  * Layout:
461  * [ ipfw_obj_tlv(IPFW_TLV_RULE_ENT)
462  *     [ ip_fw_rule ] OR
463  *     [ ip_fw_bcounter ip_fw_rule] (depends on rcntrs).
464  * ]
465  * Assume @data is zeroed.
466  */
467 static void
468 export_rule1(struct ip_fw *krule, caddr_t data, int len, int rcntrs)
469 {
470 	struct ip_fw_bcounter *cntr;
471 	struct ip_fw_rule *urule;
472 	ipfw_obj_tlv *tlv;
473 
474 	/* Fill in TLV header */
475 	tlv = (ipfw_obj_tlv *)data;
476 	tlv->type = IPFW_TLV_RULE_ENT;
477 	tlv->length = len;
478 
479 	if (rcntrs != 0) {
480 		/* Copy counters */
481 		cntr = (struct ip_fw_bcounter *)(tlv + 1);
482 		urule = (struct ip_fw_rule *)(cntr + 1);
483 		export_cntr1_base(krule, cntr);
484 	} else
485 		urule = (struct ip_fw_rule *)(tlv + 1);
486 
487 	/* copy header */
488 	urule->act_ofs = krule->act_ofs;
489 	urule->cmd_len = krule->cmd_len;
490 	urule->rulenum = krule->rulenum;
491 	urule->set = krule->set;
492 	urule->flags = krule->flags;
493 	urule->id = krule->id;
494 
495 	/* Copy opcodes */
496 	memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t));
497 }
498 
499 
500 /*
501  * Copies rule @urule from FreeBSD8 userland format (v0)
502  * to kernel @krule.
503  * Assume @krule is zeroed.
504  */
505 static void
506 import_rule0(struct rule_check_info *ci)
507 {
508 	struct ip_fw_rule0 *urule;
509 	struct ip_fw *krule;
510 	int cmdlen, l;
511 	ipfw_insn *cmd;
512 	ipfw_insn_limit *lcmd;
513 	ipfw_insn_if *cmdif;
514 
515 	urule = (struct ip_fw_rule0 *)ci->urule;
516 	krule = (struct ip_fw *)ci->krule;
517 
518 	/* copy header */
519 	krule->act_ofs = urule->act_ofs;
520 	krule->cmd_len = urule->cmd_len;
521 	krule->rulenum = urule->rulenum;
522 	krule->set = urule->set;
523 	if ((urule->_pad & 1) != 0)
524 		krule->flags |= IPFW_RULE_NOOPT;
525 
526 	/* Save rulenum offset */
527 	ci->urule_numoff = offsetof(struct ip_fw_rule0, rulenum);
528 
529 	/* Copy opcodes */
530 	memcpy(krule->cmd, urule->cmd, krule->cmd_len * sizeof(uint32_t));
531 
532 	/*
533 	 * Alter opcodes:
534 	 * 1) convert tablearg value from 65535 to 0
535 	 * 2) Add high bit to O_SETFIB/O_SETDSCP values (to make room
536 	 *    for targ).
537 	 * 3) convert table number in iface opcodes to u16
538 	 * 4) convert old `nat global` into new 65535
539 	 */
540 	l = krule->cmd_len;
541 	cmd = krule->cmd;
542 	cmdlen = 0;
543 
544 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
545 		cmdlen = F_LEN(cmd);
546 
547 		switch (cmd->opcode) {
548 		/* Opcodes supporting tablearg */
549 		case O_TAG:
550 		case O_TAGGED:
551 		case O_PIPE:
552 		case O_QUEUE:
553 		case O_DIVERT:
554 		case O_TEE:
555 		case O_SKIPTO:
556 		case O_CALLRETURN:
557 		case O_NETGRAPH:
558 		case O_NGTEE:
559 		case O_NAT:
560 			if (cmd->arg1 == IP_FW_TABLEARG)
561 				cmd->arg1 = IP_FW_TARG;
562 			else if (cmd->arg1 == 0)
563 				cmd->arg1 = IP_FW_NAT44_GLOBAL;
564 			break;
565 		case O_SETFIB:
566 		case O_SETDSCP:
567 			if (cmd->arg1 == IP_FW_TABLEARG)
568 				cmd->arg1 = IP_FW_TARG;
569 			else
570 				cmd->arg1 |= 0x8000;
571 			break;
572 		case O_LIMIT:
573 			lcmd = (ipfw_insn_limit *)cmd;
574 			if (lcmd->conn_limit == IP_FW_TABLEARG)
575 				lcmd->conn_limit = IP_FW_TARG;
576 			break;
577 		/* Interface tables */
578 		case O_XMIT:
579 		case O_RECV:
580 		case O_VIA:
581 			/* Interface table, possibly */
582 			cmdif = (ipfw_insn_if *)cmd;
583 			if (cmdif->name[0] != '\1')
584 				break;
585 
586 			cmdif->p.kidx = (uint16_t)cmdif->p.glob;
587 			break;
588 		}
589 	}
590 }
591 
592 /*
593  * Copies rule @krule from kernel to FreeBSD8 userland format (v0)
594  */
595 static void
596 export_rule0(struct ip_fw *krule, struct ip_fw_rule0 *urule, int len)
597 {
598 	int cmdlen, l;
599 	ipfw_insn *cmd;
600 	ipfw_insn_limit *lcmd;
601 	ipfw_insn_if *cmdif;
602 
603 	/* copy header */
604 	memset(urule, 0, len);
605 	urule->act_ofs = krule->act_ofs;
606 	urule->cmd_len = krule->cmd_len;
607 	urule->rulenum = krule->rulenum;
608 	urule->set = krule->set;
609 	if ((krule->flags & IPFW_RULE_NOOPT) != 0)
610 		urule->_pad |= 1;
611 
612 	/* Copy opcodes */
613 	memcpy(urule->cmd, krule->cmd, krule->cmd_len * sizeof(uint32_t));
614 
615 	/* Export counters */
616 	export_cntr0_base(krule, (struct ip_fw_bcounter0 *)&urule->pcnt);
617 
618 	/*
619 	 * Alter opcodes:
620 	 * 1) convert tablearg value from 0 to 65535
621 	 * 2) Remove highest bit from O_SETFIB/O_SETDSCP values.
622 	 * 3) convert table number in iface opcodes to int
623 	 */
624 	l = urule->cmd_len;
625 	cmd = urule->cmd;
626 	cmdlen = 0;
627 
628 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
629 		cmdlen = F_LEN(cmd);
630 
631 		switch (cmd->opcode) {
632 		/* Opcodes supporting tablearg */
633 		case O_TAG:
634 		case O_TAGGED:
635 		case O_PIPE:
636 		case O_QUEUE:
637 		case O_DIVERT:
638 		case O_TEE:
639 		case O_SKIPTO:
640 		case O_CALLRETURN:
641 		case O_NETGRAPH:
642 		case O_NGTEE:
643 		case O_NAT:
644 			if (cmd->arg1 == IP_FW_TARG)
645 				cmd->arg1 = IP_FW_TABLEARG;
646 			else if (cmd->arg1 == IP_FW_NAT44_GLOBAL)
647 				cmd->arg1 = 0;
648 			break;
649 		case O_SETFIB:
650 		case O_SETDSCP:
651 			if (cmd->arg1 == IP_FW_TARG)
652 				cmd->arg1 = IP_FW_TABLEARG;
653 			else
654 				cmd->arg1 &= ~0x8000;
655 			break;
656 		case O_LIMIT:
657 			lcmd = (ipfw_insn_limit *)cmd;
658 			if (lcmd->conn_limit == IP_FW_TARG)
659 				lcmd->conn_limit = IP_FW_TABLEARG;
660 			break;
661 		/* Interface tables */
662 		case O_XMIT:
663 		case O_RECV:
664 		case O_VIA:
665 			/* Interface table, possibly */
666 			cmdif = (ipfw_insn_if *)cmd;
667 			if (cmdif->name[0] != '\1')
668 				break;
669 
670 			cmdif->p.glob = cmdif->p.kidx;
671 			break;
672 		}
673 	}
674 }
675 
676 /*
677  * Add new rule(s) to the list possibly creating rule number for each.
678  * Update the rule_number in the input struct so the caller knows it as well.
679  * Must be called without IPFW_UH held
680  */
681 static int
682 commit_rules(struct ip_fw_chain *chain, struct rule_check_info *rci, int count)
683 {
684 	int error, i, insert_before, tcount;
685 	uint16_t rulenum, *pnum;
686 	struct rule_check_info *ci;
687 	struct ip_fw *krule;
688 	struct ip_fw **map;	/* the new array of pointers */
689 
690 	/* Check if we need to do table/obj index remap */
691 	tcount = 0;
692 	for (ci = rci, i = 0; i < count; ci++, i++) {
693 		if (ci->object_opcodes == 0)
694 			continue;
695 
696 		/*
697 		 * Rule has some object opcodes.
698 		 * We need to find (and create non-existing)
699 		 * kernel objects, and reference existing ones.
700 		 */
701 		error = rewrite_rule_uidx(chain, ci);
702 		if (error != 0) {
703 
704 			/*
705 			 * rewrite failed, state for current rule
706 			 * has been reverted. Check if we need to
707 			 * revert more.
708 			 */
709 			if (tcount > 0) {
710 
711 				/*
712 				 * We have some more table rules
713 				 * we need to rollback.
714 				 */
715 
716 				IPFW_UH_WLOCK(chain);
717 				while (ci != rci) {
718 					ci--;
719 					if (ci->object_opcodes == 0)
720 						continue;
721 					unref_rule_objects(chain,ci->krule);
722 
723 				}
724 				IPFW_UH_WUNLOCK(chain);
725 
726 			}
727 
728 			return (error);
729 		}
730 
731 		tcount++;
732 	}
733 
734 	/* get_map returns with IPFW_UH_WLOCK if successful */
735 	map = get_map(chain, count, 0 /* not locked */);
736 	if (map == NULL) {
737 		if (tcount > 0) {
738 			/* Unbind tables */
739 			IPFW_UH_WLOCK(chain);
740 			for (ci = rci, i = 0; i < count; ci++, i++) {
741 				if (ci->object_opcodes == 0)
742 					continue;
743 
744 				unref_rule_objects(chain, ci->krule);
745 			}
746 			IPFW_UH_WUNLOCK(chain);
747 		}
748 
749 		return (ENOSPC);
750 	}
751 
752 	if (V_autoinc_step < 1)
753 		V_autoinc_step = 1;
754 	else if (V_autoinc_step > 1000)
755 		V_autoinc_step = 1000;
756 
757 	/* FIXME: Handle count > 1 */
758 	ci = rci;
759 	krule = ci->krule;
760 	rulenum = krule->rulenum;
761 
762 	/* find the insertion point, we will insert before */
763 	insert_before = rulenum ? rulenum + 1 : IPFW_DEFAULT_RULE;
764 	i = ipfw_find_rule(chain, insert_before, 0);
765 	/* duplicate first part */
766 	if (i > 0)
767 		bcopy(chain->map, map, i * sizeof(struct ip_fw *));
768 	map[i] = krule;
769 	/* duplicate remaining part, we always have the default rule */
770 	bcopy(chain->map + i, map + i + 1,
771 		sizeof(struct ip_fw *) *(chain->n_rules - i));
772 	if (rulenum == 0) {
773 		/* Compute rule number and write it back */
774 		rulenum = i > 0 ? map[i-1]->rulenum : 0;
775 		if (rulenum < IPFW_DEFAULT_RULE - V_autoinc_step)
776 			rulenum += V_autoinc_step;
777 		krule->rulenum = rulenum;
778 		/* Save number to userland rule */
779 		pnum = (uint16_t *)((caddr_t)ci->urule + ci->urule_numoff);
780 		*pnum = rulenum;
781 	}
782 
783 	krule->id = chain->id + 1;
784 	update_skipto_cache(chain, map);
785 	map = swap_map(chain, map, chain->n_rules + 1);
786 	chain->static_len += RULEUSIZE0(krule);
787 	IPFW_UH_WUNLOCK(chain);
788 	if (map)
789 		free(map, M_IPFW);
790 	return (0);
791 }
792 
793 int
794 ipfw_add_protected_rule(struct ip_fw_chain *chain, struct ip_fw *rule,
795     int locked)
796 {
797 	struct ip_fw **map;
798 
799 	map = get_map(chain, 1, locked);
800 	if (map == NULL)
801 		return (ENOMEM);
802 	if (chain->n_rules > 0)
803 		bcopy(chain->map, map,
804 		    chain->n_rules * sizeof(struct ip_fw *));
805 	map[chain->n_rules] = rule;
806 	rule->rulenum = IPFW_DEFAULT_RULE;
807 	rule->set = RESVD_SET;
808 	rule->id = chain->id + 1;
809 	/* We add rule in the end of chain, no need to update skipto cache */
810 	map = swap_map(chain, map, chain->n_rules + 1);
811 	chain->static_len += RULEUSIZE0(rule);
812 	IPFW_UH_WUNLOCK(chain);
813 	free(map, M_IPFW);
814 	return (0);
815 }
816 
817 /*
818  * Adds @rule to the list of rules to reap
819  */
820 void
821 ipfw_reap_add(struct ip_fw_chain *chain, struct ip_fw **head,
822     struct ip_fw *rule)
823 {
824 
825 	IPFW_UH_WLOCK_ASSERT(chain);
826 
827 	/* Unlink rule from everywhere */
828 	unref_rule_objects(chain, rule);
829 
830 	*((struct ip_fw **)rule) = *head;
831 	*head = rule;
832 }
833 
834 /*
835  * Reclaim storage associated with a list of rules.  This is
836  * typically the list created using remove_rule.
837  * A NULL pointer on input is handled correctly.
838  */
839 void
840 ipfw_reap_rules(struct ip_fw *head)
841 {
842 	struct ip_fw *rule;
843 
844 	while ((rule = head) != NULL) {
845 		head = *((struct ip_fw **)head);
846 		free_rule(rule);
847 	}
848 }
849 
850 /*
851  * Rules to keep are
852  *	(default || reserved || !match_set || !match_number)
853  * where
854  *   default ::= (rule->rulenum == IPFW_DEFAULT_RULE)
855  *	// the default rule is always protected
856  *
857  *   reserved ::= (cmd == 0 && n == 0 && rule->set == RESVD_SET)
858  *	// RESVD_SET is protected only if cmd == 0 and n == 0 ("ipfw flush")
859  *
860  *   match_set ::= (cmd == 0 || rule->set == set)
861  *	// set number is ignored for cmd == 0
862  *
863  *   match_number ::= (cmd == 1 || n == 0 || n == rule->rulenum)
864  *	// number is ignored for cmd == 1 or n == 0
865  *
866  */
867 int
868 ipfw_match_range(struct ip_fw *rule, ipfw_range_tlv *rt)
869 {
870 
871 	/* Don't match default rule for modification queries */
872 	if (rule->rulenum == IPFW_DEFAULT_RULE &&
873 	    (rt->flags & IPFW_RCFLAG_DEFAULT) == 0)
874 		return (0);
875 
876 	/* Don't match rules in reserved set for flush requests */
877 	if ((rt->flags & IPFW_RCFLAG_ALL) != 0 && rule->set == RESVD_SET)
878 		return (0);
879 
880 	/* If we're filtering by set, don't match other sets */
881 	if ((rt->flags & IPFW_RCFLAG_SET) != 0 && rule->set != rt->set)
882 		return (0);
883 
884 	if ((rt->flags & IPFW_RCFLAG_RANGE) != 0 &&
885 	    (rule->rulenum < rt->start_rule || rule->rulenum > rt->end_rule))
886 		return (0);
887 
888 	return (1);
889 }
890 
891 struct manage_sets_args {
892 	uint16_t	set;
893 	uint8_t		new_set;
894 };
895 
896 static int
897 swap_sets_cb(struct namedobj_instance *ni, struct named_object *no,
898     void *arg)
899 {
900 	struct manage_sets_args *args;
901 
902 	args = (struct manage_sets_args *)arg;
903 	if (no->set == (uint8_t)args->set)
904 		no->set = args->new_set;
905 	else if (no->set == args->new_set)
906 		no->set = (uint8_t)args->set;
907 	return (0);
908 }
909 
910 static int
911 move_sets_cb(struct namedobj_instance *ni, struct named_object *no,
912     void *arg)
913 {
914 	struct manage_sets_args *args;
915 
916 	args = (struct manage_sets_args *)arg;
917 	if (no->set == (uint8_t)args->set)
918 		no->set = args->new_set;
919 	return (0);
920 }
921 
922 static int
923 test_sets_cb(struct namedobj_instance *ni, struct named_object *no,
924     void *arg)
925 {
926 	struct manage_sets_args *args;
927 
928 	args = (struct manage_sets_args *)arg;
929 	if (no->set != (uint8_t)args->set)
930 		return (0);
931 	if (ipfw_objhash_lookup_name_type(ni, args->new_set,
932 	    no->etlv, no->name) != NULL)
933 		return (EEXIST);
934 	return (0);
935 }
936 
937 /*
938  * Generic function to handler moving and swapping sets.
939  */
940 int
941 ipfw_obj_manage_sets(struct namedobj_instance *ni, uint16_t type,
942     uint16_t set, uint8_t new_set, enum ipfw_sets_cmd cmd)
943 {
944 	struct manage_sets_args args;
945 	struct named_object *no;
946 
947 	args.set = set;
948 	args.new_set = new_set;
949 	switch (cmd) {
950 	case SWAP_ALL:
951 		return (ipfw_objhash_foreach_type(ni, swap_sets_cb,
952 		    &args, type));
953 	case TEST_ALL:
954 		return (ipfw_objhash_foreach_type(ni, test_sets_cb,
955 		    &args, type));
956 	case MOVE_ALL:
957 		return (ipfw_objhash_foreach_type(ni, move_sets_cb,
958 		    &args, type));
959 	case COUNT_ONE:
960 		/*
961 		 * @set used to pass kidx.
962 		 * When @new_set is zero - reset object counter,
963 		 * otherwise increment it.
964 		 */
965 		no = ipfw_objhash_lookup_kidx(ni, set);
966 		if (new_set != 0)
967 			no->ocnt++;
968 		else
969 			no->ocnt = 0;
970 		return (0);
971 	case TEST_ONE:
972 		/* @set used to pass kidx */
973 		no = ipfw_objhash_lookup_kidx(ni, set);
974 		/*
975 		 * First check number of references:
976 		 * when it differs, this mean other rules are holding
977 		 * reference to given object, so it is not possible to
978 		 * change its set. Note that refcnt may account references
979 		 * to some going-to-be-added rules. Since we don't know
980 		 * their numbers (and even if they will be added) it is
981 		 * perfectly OK to return error here.
982 		 */
983 		if (no->ocnt != no->refcnt)
984 			return (EBUSY);
985 		if (ipfw_objhash_lookup_name_type(ni, new_set, type,
986 		    no->name) != NULL)
987 			return (EEXIST);
988 		return (0);
989 	case MOVE_ONE:
990 		/* @set used to pass kidx */
991 		no = ipfw_objhash_lookup_kidx(ni, set);
992 		no->set = new_set;
993 		return (0);
994 	}
995 	return (EINVAL);
996 }
997 
998 /*
999  * Delete rules matching range @rt.
1000  * Saves number of deleted rules in @ndel.
1001  *
1002  * Returns 0 on success.
1003  */
1004 static int
1005 delete_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int *ndel)
1006 {
1007 	struct ip_fw *reap, *rule, **map;
1008 	int end, start;
1009 	int i, n, ndyn, ofs;
1010 
1011 	reap = NULL;
1012 	IPFW_UH_WLOCK(chain);	/* arbitrate writers */
1013 
1014 	/*
1015 	 * Stage 1: Determine range to inspect.
1016 	 * Range is half-inclusive, e.g [start, end).
1017 	 */
1018 	start = 0;
1019 	end = chain->n_rules - 1;
1020 
1021 	if ((rt->flags & IPFW_RCFLAG_RANGE) != 0) {
1022 		start = ipfw_find_rule(chain, rt->start_rule, 0);
1023 
1024 		end = ipfw_find_rule(chain, rt->end_rule, 0);
1025 		if (rt->end_rule != IPFW_DEFAULT_RULE)
1026 			while (chain->map[end]->rulenum == rt->end_rule)
1027 				end++;
1028 	}
1029 
1030 	/* Allocate new map of the same size */
1031 	map = get_map(chain, 0, 1 /* locked */);
1032 	if (map == NULL) {
1033 		IPFW_UH_WUNLOCK(chain);
1034 		return (ENOMEM);
1035 	}
1036 
1037 	n = 0;
1038 	ndyn = 0;
1039 	ofs = start;
1040 	/* 1. bcopy the initial part of the map */
1041 	if (start > 0)
1042 		bcopy(chain->map, map, start * sizeof(struct ip_fw *));
1043 	/* 2. copy active rules between start and end */
1044 	for (i = start; i < end; i++) {
1045 		rule = chain->map[i];
1046 		if (ipfw_match_range(rule, rt) == 0) {
1047 			map[ofs++] = rule;
1048 			continue;
1049 		}
1050 
1051 		n++;
1052 		if (ipfw_is_dyn_rule(rule) != 0)
1053 			ndyn++;
1054 	}
1055 	/* 3. copy the final part of the map */
1056 	bcopy(chain->map + end, map + ofs,
1057 		(chain->n_rules - end) * sizeof(struct ip_fw *));
1058 	/* 4. recalculate skipto cache */
1059 	update_skipto_cache(chain, map);
1060 	/* 5. swap the maps (under UH_WLOCK + WHLOCK) */
1061 	map = swap_map(chain, map, chain->n_rules - n);
1062 	/* 6. Remove all dynamic states originated by deleted rules */
1063 	if (ndyn > 0)
1064 		ipfw_expire_dyn_rules(chain, rt);
1065 	/* 7. now remove the rules deleted from the old map */
1066 	for (i = start; i < end; i++) {
1067 		rule = map[i];
1068 		if (ipfw_match_range(rule, rt) == 0)
1069 			continue;
1070 		chain->static_len -= RULEUSIZE0(rule);
1071 		ipfw_reap_add(chain, &reap, rule);
1072 	}
1073 	IPFW_UH_WUNLOCK(chain);
1074 
1075 	ipfw_reap_rules(reap);
1076 	if (map != NULL)
1077 		free(map, M_IPFW);
1078 	*ndel = n;
1079 	return (0);
1080 }
1081 
1082 static int
1083 move_objects(struct ip_fw_chain *ch, ipfw_range_tlv *rt)
1084 {
1085 	struct opcode_obj_rewrite *rw;
1086 	struct ip_fw *rule;
1087 	ipfw_insn *cmd;
1088 	int cmdlen, i, l, c;
1089 	uint16_t kidx;
1090 
1091 	IPFW_UH_WLOCK_ASSERT(ch);
1092 
1093 	/* Stage 1: count number of references by given rules */
1094 	for (c = 0, i = 0; i < ch->n_rules - 1; i++) {
1095 		rule = ch->map[i];
1096 		if (ipfw_match_range(rule, rt) == 0)
1097 			continue;
1098 		if (rule->set == rt->new_set) /* nothing to do */
1099 			continue;
1100 		/* Search opcodes with named objects */
1101 		for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd;
1102 		    l > 0; l -= cmdlen, cmd += cmdlen) {
1103 			cmdlen = F_LEN(cmd);
1104 			rw = find_op_rw(cmd, &kidx, NULL);
1105 			if (rw == NULL || rw->manage_sets == NULL)
1106 				continue;
1107 			/*
1108 			 * When manage_sets() returns non-zero value to
1109 			 * COUNT_ONE command, consider this as an object
1110 			 * doesn't support sets (e.g. disabled with sysctl).
1111 			 * So, skip checks for this object.
1112 			 */
1113 			if (rw->manage_sets(ch, kidx, 1, COUNT_ONE) != 0)
1114 				continue;
1115 			c++;
1116 		}
1117 	}
1118 	if (c == 0) /* No objects found */
1119 		return (0);
1120 	/* Stage 2: verify "ownership" */
1121 	for (c = 0, i = 0; (i < ch->n_rules - 1) && c == 0; i++) {
1122 		rule = ch->map[i];
1123 		if (ipfw_match_range(rule, rt) == 0)
1124 			continue;
1125 		if (rule->set == rt->new_set) /* nothing to do */
1126 			continue;
1127 		/* Search opcodes with named objects */
1128 		for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd;
1129 		    l > 0 && c == 0; l -= cmdlen, cmd += cmdlen) {
1130 			cmdlen = F_LEN(cmd);
1131 			rw = find_op_rw(cmd, &kidx, NULL);
1132 			if (rw == NULL || rw->manage_sets == NULL)
1133 				continue;
1134 			/* Test for ownership and conflicting names */
1135 			c = rw->manage_sets(ch, kidx,
1136 			    (uint8_t)rt->new_set, TEST_ONE);
1137 		}
1138 	}
1139 	/* Stage 3: change set and cleanup */
1140 	for (i = 0; i < ch->n_rules - 1; i++) {
1141 		rule = ch->map[i];
1142 		if (ipfw_match_range(rule, rt) == 0)
1143 			continue;
1144 		if (rule->set == rt->new_set) /* nothing to do */
1145 			continue;
1146 		/* Search opcodes with named objects */
1147 		for (l = rule->cmd_len, cmdlen = 0, cmd = rule->cmd;
1148 		    l > 0; l -= cmdlen, cmd += cmdlen) {
1149 			cmdlen = F_LEN(cmd);
1150 			rw = find_op_rw(cmd, &kidx, NULL);
1151 			if (rw == NULL || rw->manage_sets == NULL)
1152 				continue;
1153 			/* cleanup object counter */
1154 			rw->manage_sets(ch, kidx,
1155 			    0 /* reset counter */, COUNT_ONE);
1156 			if (c != 0)
1157 				continue;
1158 			/* change set */
1159 			rw->manage_sets(ch, kidx,
1160 			    (uint8_t)rt->new_set, MOVE_ONE);
1161 		}
1162 	}
1163 	return (c);
1164 }/*
1165  * Changes set of given rule rannge @rt
1166  * with each other.
1167  *
1168  * Returns 0 on success.
1169  */
1170 static int
1171 move_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt)
1172 {
1173 	struct ip_fw *rule;
1174 	int i;
1175 
1176 	IPFW_UH_WLOCK(chain);
1177 
1178 	/*
1179 	 * Move rules with matching paramenerts to a new set.
1180 	 * This one is much more complex. We have to ensure
1181 	 * that all referenced tables (if any) are referenced
1182 	 * by given rule subset only. Otherwise, we can't move
1183 	 * them to new set and have to return error.
1184 	 */
1185 	if ((i = move_objects(chain, rt)) != 0) {
1186 		IPFW_UH_WUNLOCK(chain);
1187 		return (i);
1188 	}
1189 
1190 	/* XXX: We have to do swap holding WLOCK */
1191 	for (i = 0; i < chain->n_rules; i++) {
1192 		rule = chain->map[i];
1193 		if (ipfw_match_range(rule, rt) == 0)
1194 			continue;
1195 		rule->set = rt->new_set;
1196 	}
1197 
1198 	IPFW_UH_WUNLOCK(chain);
1199 
1200 	return (0);
1201 }
1202 
1203 /*
1204  * Clear counters for a specific rule.
1205  * Normally run under IPFW_UH_RLOCK, but these are idempotent ops
1206  * so we only care that rules do not disappear.
1207  */
1208 static void
1209 clear_counters(struct ip_fw *rule, int log_only)
1210 {
1211 	ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule);
1212 
1213 	if (log_only == 0)
1214 		IPFW_ZERO_RULE_COUNTER(rule);
1215 	if (l->o.opcode == O_LOG)
1216 		l->log_left = l->max_log;
1217 }
1218 
1219 /*
1220  * Flushes rules counters and/or log values on matching range.
1221  *
1222  * Returns number of items cleared.
1223  */
1224 static int
1225 clear_range(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int log_only)
1226 {
1227 	struct ip_fw *rule;
1228 	int num;
1229 	int i;
1230 
1231 	num = 0;
1232 	rt->flags |= IPFW_RCFLAG_DEFAULT;
1233 
1234 	IPFW_UH_WLOCK(chain);	/* arbitrate writers */
1235 	for (i = 0; i < chain->n_rules; i++) {
1236 		rule = chain->map[i];
1237 		if (ipfw_match_range(rule, rt) == 0)
1238 			continue;
1239 		clear_counters(rule, log_only);
1240 		num++;
1241 	}
1242 	IPFW_UH_WUNLOCK(chain);
1243 
1244 	return (num);
1245 }
1246 
1247 static int
1248 check_range_tlv(ipfw_range_tlv *rt)
1249 {
1250 
1251 	if (rt->head.length != sizeof(*rt))
1252 		return (1);
1253 	if (rt->start_rule > rt->end_rule)
1254 		return (1);
1255 	if (rt->set >= IPFW_MAX_SETS || rt->new_set >= IPFW_MAX_SETS)
1256 		return (1);
1257 
1258 	if ((rt->flags & IPFW_RCFLAG_USER) != rt->flags)
1259 		return (1);
1260 
1261 	return (0);
1262 }
1263 
1264 /*
1265  * Delete rules matching specified parameters
1266  * Data layout (v0)(current):
1267  * Request: [ ipfw_obj_header ipfw_range_tlv ]
1268  * Reply: [ ipfw_obj_header ipfw_range_tlv ]
1269  *
1270  * Saves number of deleted rules in ipfw_range_tlv->new_set.
1271  *
1272  * Returns 0 on success.
1273  */
1274 static int
1275 del_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1276     struct sockopt_data *sd)
1277 {
1278 	ipfw_range_header *rh;
1279 	int error, ndel;
1280 
1281 	if (sd->valsize != sizeof(*rh))
1282 		return (EINVAL);
1283 
1284 	rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1285 
1286 	if (check_range_tlv(&rh->range) != 0)
1287 		return (EINVAL);
1288 
1289 	ndel = 0;
1290 	if ((error = delete_range(chain, &rh->range, &ndel)) != 0)
1291 		return (error);
1292 
1293 	/* Save number of rules deleted */
1294 	rh->range.new_set = ndel;
1295 	return (0);
1296 }
1297 
1298 /*
1299  * Move rules/sets matching specified parameters
1300  * Data layout (v0)(current):
1301  * Request: [ ipfw_obj_header ipfw_range_tlv ]
1302  *
1303  * Returns 0 on success.
1304  */
1305 static int
1306 move_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1307     struct sockopt_data *sd)
1308 {
1309 	ipfw_range_header *rh;
1310 
1311 	if (sd->valsize != sizeof(*rh))
1312 		return (EINVAL);
1313 
1314 	rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1315 
1316 	if (check_range_tlv(&rh->range) != 0)
1317 		return (EINVAL);
1318 
1319 	return (move_range(chain, &rh->range));
1320 }
1321 
1322 /*
1323  * Clear rule accounting data matching specified parameters
1324  * Data layout (v0)(current):
1325  * Request: [ ipfw_obj_header ipfw_range_tlv ]
1326  * Reply: [ ipfw_obj_header ipfw_range_tlv ]
1327  *
1328  * Saves number of cleared rules in ipfw_range_tlv->new_set.
1329  *
1330  * Returns 0 on success.
1331  */
1332 static int
1333 clear_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1334     struct sockopt_data *sd)
1335 {
1336 	ipfw_range_header *rh;
1337 	int log_only, num;
1338 	char *msg;
1339 
1340 	if (sd->valsize != sizeof(*rh))
1341 		return (EINVAL);
1342 
1343 	rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1344 
1345 	if (check_range_tlv(&rh->range) != 0)
1346 		return (EINVAL);
1347 
1348 	log_only = (op3->opcode == IP_FW_XRESETLOG);
1349 
1350 	num = clear_range(chain, &rh->range, log_only);
1351 
1352 	if (rh->range.flags & IPFW_RCFLAG_ALL)
1353 		msg = log_only ? "All logging counts reset" :
1354 		    "Accounting cleared";
1355 	else
1356 		msg = log_only ? "logging count reset" : "cleared";
1357 
1358 	if (V_fw_verbose) {
1359 		int lev = LOG_SECURITY | LOG_NOTICE;
1360 		log(lev, "ipfw: %s.\n", msg);
1361 	}
1362 
1363 	/* Save number of rules cleared */
1364 	rh->range.new_set = num;
1365 	return (0);
1366 }
1367 
1368 static void
1369 enable_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt)
1370 {
1371 	uint32_t v_set;
1372 
1373 	IPFW_UH_WLOCK_ASSERT(chain);
1374 
1375 	/* Change enabled/disabled sets mask */
1376 	v_set = (V_set_disable | rt->set) & ~rt->new_set;
1377 	v_set &= ~(1 << RESVD_SET); /* set RESVD_SET always enabled */
1378 	IPFW_WLOCK(chain);
1379 	V_set_disable = v_set;
1380 	IPFW_WUNLOCK(chain);
1381 }
1382 
1383 static int
1384 swap_sets(struct ip_fw_chain *chain, ipfw_range_tlv *rt, int mv)
1385 {
1386 	struct opcode_obj_rewrite *rw;
1387 	struct ip_fw *rule;
1388 	int i;
1389 
1390 	IPFW_UH_WLOCK_ASSERT(chain);
1391 
1392 	if (rt->set == rt->new_set) /* nothing to do */
1393 		return (0);
1394 
1395 	if (mv != 0) {
1396 		/*
1397 		 * Berfore moving the rules we need to check that
1398 		 * there aren't any conflicting named objects.
1399 		 */
1400 		for (rw = ctl3_rewriters;
1401 		    rw < ctl3_rewriters + ctl3_rsize; rw++) {
1402 			if (rw->manage_sets == NULL)
1403 				continue;
1404 			i = rw->manage_sets(chain, (uint8_t)rt->set,
1405 			    (uint8_t)rt->new_set, TEST_ALL);
1406 			if (i != 0)
1407 				return (EEXIST);
1408 		}
1409 	}
1410 	/* Swap or move two sets */
1411 	for (i = 0; i < chain->n_rules - 1; i++) {
1412 		rule = chain->map[i];
1413 		if (rule->set == (uint8_t)rt->set)
1414 			rule->set = (uint8_t)rt->new_set;
1415 		else if (rule->set == (uint8_t)rt->new_set && mv == 0)
1416 			rule->set = (uint8_t)rt->set;
1417 	}
1418 	for (rw = ctl3_rewriters; rw < ctl3_rewriters + ctl3_rsize; rw++) {
1419 		if (rw->manage_sets == NULL)
1420 			continue;
1421 		rw->manage_sets(chain, (uint8_t)rt->set,
1422 		    (uint8_t)rt->new_set, mv != 0 ? MOVE_ALL: SWAP_ALL);
1423 	}
1424 	return (0);
1425 }
1426 
1427 /*
1428  * Swaps or moves set
1429  * Data layout (v0)(current):
1430  * Request: [ ipfw_obj_header ipfw_range_tlv ]
1431  *
1432  * Returns 0 on success.
1433  */
1434 static int
1435 manage_sets(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
1436     struct sockopt_data *sd)
1437 {
1438 	ipfw_range_header *rh;
1439 	int ret;
1440 
1441 	if (sd->valsize != sizeof(*rh))
1442 		return (EINVAL);
1443 
1444 	rh = (ipfw_range_header *)ipfw_get_sopt_space(sd, sd->valsize);
1445 
1446 	if (rh->range.head.length != sizeof(ipfw_range_tlv))
1447 		return (1);
1448 	/* enable_sets() expects bitmasks. */
1449 	if (op3->opcode != IP_FW_SET_ENABLE &&
1450 	    (rh->range.set >= IPFW_MAX_SETS ||
1451 	    rh->range.new_set >= IPFW_MAX_SETS))
1452 		return (EINVAL);
1453 
1454 	ret = 0;
1455 	IPFW_UH_WLOCK(chain);
1456 	switch (op3->opcode) {
1457 	case IP_FW_SET_SWAP:
1458 	case IP_FW_SET_MOVE:
1459 		ret = swap_sets(chain, &rh->range,
1460 		    op3->opcode == IP_FW_SET_MOVE);
1461 		break;
1462 	case IP_FW_SET_ENABLE:
1463 		enable_sets(chain, &rh->range);
1464 		break;
1465 	}
1466 	IPFW_UH_WUNLOCK(chain);
1467 
1468 	return (ret);
1469 }
1470 
1471 /**
1472  * Remove all rules with given number, or do set manipulation.
1473  * Assumes chain != NULL && *chain != NULL.
1474  *
1475  * The argument is an uint32_t. The low 16 bit are the rule or set number;
1476  * the next 8 bits are the new set; the top 8 bits indicate the command:
1477  *
1478  *	0	delete rules numbered "rulenum"
1479  *	1	delete rules in set "rulenum"
1480  *	2	move rules "rulenum" to set "new_set"
1481  *	3	move rules from set "rulenum" to set "new_set"
1482  *	4	swap sets "rulenum" and "new_set"
1483  *	5	delete rules "rulenum" and set "new_set"
1484  */
1485 static int
1486 del_entry(struct ip_fw_chain *chain, uint32_t arg)
1487 {
1488 	uint32_t num;	/* rule number or old_set */
1489 	uint8_t cmd, new_set;
1490 	int do_del, ndel;
1491 	int error = 0;
1492 	ipfw_range_tlv rt;
1493 
1494 	num = arg & 0xffff;
1495 	cmd = (arg >> 24) & 0xff;
1496 	new_set = (arg >> 16) & 0xff;
1497 
1498 	if (cmd > 5 || new_set > RESVD_SET)
1499 		return EINVAL;
1500 	if (cmd == 0 || cmd == 2 || cmd == 5) {
1501 		if (num >= IPFW_DEFAULT_RULE)
1502 			return EINVAL;
1503 	} else {
1504 		if (num > RESVD_SET)	/* old_set */
1505 			return EINVAL;
1506 	}
1507 
1508 	/* Convert old requests into new representation */
1509 	memset(&rt, 0, sizeof(rt));
1510 	rt.start_rule = num;
1511 	rt.end_rule = num;
1512 	rt.set = num;
1513 	rt.new_set = new_set;
1514 	do_del = 0;
1515 
1516 	switch (cmd) {
1517 	case 0: /* delete rules numbered "rulenum" */
1518 		if (num == 0)
1519 			rt.flags |= IPFW_RCFLAG_ALL;
1520 		else
1521 			rt.flags |= IPFW_RCFLAG_RANGE;
1522 		do_del = 1;
1523 		break;
1524 	case 1: /* delete rules in set "rulenum" */
1525 		rt.flags |= IPFW_RCFLAG_SET;
1526 		do_del = 1;
1527 		break;
1528 	case 5: /* delete rules "rulenum" and set "new_set" */
1529 		rt.flags |= IPFW_RCFLAG_RANGE | IPFW_RCFLAG_SET;
1530 		rt.set = new_set;
1531 		rt.new_set = 0;
1532 		do_del = 1;
1533 		break;
1534 	case 2: /* move rules "rulenum" to set "new_set" */
1535 		rt.flags |= IPFW_RCFLAG_RANGE;
1536 		break;
1537 	case 3: /* move rules from set "rulenum" to set "new_set" */
1538 		IPFW_UH_WLOCK(chain);
1539 		error = swap_sets(chain, &rt, 1);
1540 		IPFW_UH_WUNLOCK(chain);
1541 		return (error);
1542 	case 4: /* swap sets "rulenum" and "new_set" */
1543 		IPFW_UH_WLOCK(chain);
1544 		error = swap_sets(chain, &rt, 0);
1545 		IPFW_UH_WUNLOCK(chain);
1546 		return (error);
1547 	default:
1548 		return (ENOTSUP);
1549 	}
1550 
1551 	if (do_del != 0) {
1552 		if ((error = delete_range(chain, &rt, &ndel)) != 0)
1553 			return (error);
1554 
1555 		if (ndel == 0 && (cmd != 1 && num != 0))
1556 			return (EINVAL);
1557 
1558 		return (0);
1559 	}
1560 
1561 	return (move_range(chain, &rt));
1562 }
1563 
1564 /**
1565  * Reset some or all counters on firewall rules.
1566  * The argument `arg' is an u_int32_t. The low 16 bit are the rule number,
1567  * the next 8 bits are the set number, the top 8 bits are the command:
1568  *	0	work with rules from all set's;
1569  *	1	work with rules only from specified set.
1570  * Specified rule number is zero if we want to clear all entries.
1571  * log_only is 1 if we only want to reset logs, zero otherwise.
1572  */
1573 static int
1574 zero_entry(struct ip_fw_chain *chain, u_int32_t arg, int log_only)
1575 {
1576 	struct ip_fw *rule;
1577 	char *msg;
1578 	int i;
1579 
1580 	uint16_t rulenum = arg & 0xffff;
1581 	uint8_t set = (arg >> 16) & 0xff;
1582 	uint8_t cmd = (arg >> 24) & 0xff;
1583 
1584 	if (cmd > 1)
1585 		return (EINVAL);
1586 	if (cmd == 1 && set > RESVD_SET)
1587 		return (EINVAL);
1588 
1589 	IPFW_UH_RLOCK(chain);
1590 	if (rulenum == 0) {
1591 		V_norule_counter = 0;
1592 		for (i = 0; i < chain->n_rules; i++) {
1593 			rule = chain->map[i];
1594 			/* Skip rules not in our set. */
1595 			if (cmd == 1 && rule->set != set)
1596 				continue;
1597 			clear_counters(rule, log_only);
1598 		}
1599 		msg = log_only ? "All logging counts reset" :
1600 		    "Accounting cleared";
1601 	} else {
1602 		int cleared = 0;
1603 		for (i = 0; i < chain->n_rules; i++) {
1604 			rule = chain->map[i];
1605 			if (rule->rulenum == rulenum) {
1606 				if (cmd == 0 || rule->set == set)
1607 					clear_counters(rule, log_only);
1608 				cleared = 1;
1609 			}
1610 			if (rule->rulenum > rulenum)
1611 				break;
1612 		}
1613 		if (!cleared) {	/* we did not find any matching rules */
1614 			IPFW_UH_RUNLOCK(chain);
1615 			return (EINVAL);
1616 		}
1617 		msg = log_only ? "logging count reset" : "cleared";
1618 	}
1619 	IPFW_UH_RUNLOCK(chain);
1620 
1621 	if (V_fw_verbose) {
1622 		int lev = LOG_SECURITY | LOG_NOTICE;
1623 
1624 		if (rulenum)
1625 			log(lev, "ipfw: Entry %d %s.\n", rulenum, msg);
1626 		else
1627 			log(lev, "ipfw: %s.\n", msg);
1628 	}
1629 	return (0);
1630 }
1631 
1632 
1633 /*
1634  * Check rule head in FreeBSD11 format
1635  *
1636  */
1637 static int
1638 check_ipfw_rule1(struct ip_fw_rule *rule, int size,
1639     struct rule_check_info *ci)
1640 {
1641 	int l;
1642 
1643 	if (size < sizeof(*rule)) {
1644 		printf("ipfw: rule too short\n");
1645 		return (EINVAL);
1646 	}
1647 
1648 	/* Check for valid cmd_len */
1649 	l = roundup2(RULESIZE(rule), sizeof(uint64_t));
1650 	if (l != size) {
1651 		printf("ipfw: size mismatch (have %d want %d)\n", size, l);
1652 		return (EINVAL);
1653 	}
1654 	if (rule->act_ofs >= rule->cmd_len) {
1655 		printf("ipfw: bogus action offset (%u > %u)\n",
1656 		    rule->act_ofs, rule->cmd_len - 1);
1657 		return (EINVAL);
1658 	}
1659 
1660 	if (rule->rulenum > IPFW_DEFAULT_RULE - 1)
1661 		return (EINVAL);
1662 
1663 	return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci));
1664 }
1665 
1666 /*
1667  * Check rule head in FreeBSD8 format
1668  *
1669  */
1670 static int
1671 check_ipfw_rule0(struct ip_fw_rule0 *rule, int size,
1672     struct rule_check_info *ci)
1673 {
1674 	int l;
1675 
1676 	if (size < sizeof(*rule)) {
1677 		printf("ipfw: rule too short\n");
1678 		return (EINVAL);
1679 	}
1680 
1681 	/* Check for valid cmd_len */
1682 	l = sizeof(*rule) + rule->cmd_len * 4 - 4;
1683 	if (l != size) {
1684 		printf("ipfw: size mismatch (have %d want %d)\n", size, l);
1685 		return (EINVAL);
1686 	}
1687 	if (rule->act_ofs >= rule->cmd_len) {
1688 		printf("ipfw: bogus action offset (%u > %u)\n",
1689 		    rule->act_ofs, rule->cmd_len - 1);
1690 		return (EINVAL);
1691 	}
1692 
1693 	if (rule->rulenum > IPFW_DEFAULT_RULE - 1)
1694 		return (EINVAL);
1695 
1696 	return (check_ipfw_rule_body(rule->cmd, rule->cmd_len, ci));
1697 }
1698 
1699 static int
1700 check_ipfw_rule_body(ipfw_insn *cmd, int cmd_len, struct rule_check_info *ci)
1701 {
1702 	int cmdlen, l;
1703 	int have_action;
1704 
1705 	have_action = 0;
1706 
1707 	/*
1708 	 * Now go for the individual checks. Very simple ones, basically only
1709 	 * instruction sizes.
1710 	 */
1711 	for (l = cmd_len; l > 0 ; l -= cmdlen, cmd += cmdlen) {
1712 		cmdlen = F_LEN(cmd);
1713 		if (cmdlen > l) {
1714 			printf("ipfw: opcode %d size truncated\n",
1715 			    cmd->opcode);
1716 			return EINVAL;
1717 		}
1718 		switch (cmd->opcode) {
1719 		case O_PROBE_STATE:
1720 		case O_KEEP_STATE:
1721 			if (cmdlen != F_INSN_SIZE(ipfw_insn))
1722 				goto bad_size;
1723 			ci->object_opcodes++;
1724 			break;
1725 		case O_PROTO:
1726 		case O_IP_SRC_ME:
1727 		case O_IP_DST_ME:
1728 		case O_LAYER2:
1729 		case O_IN:
1730 		case O_FRAG:
1731 		case O_DIVERTED:
1732 		case O_IPOPT:
1733 		case O_IPTOS:
1734 		case O_IPPRECEDENCE:
1735 		case O_IPVER:
1736 		case O_SOCKARG:
1737 		case O_TCPFLAGS:
1738 		case O_TCPOPTS:
1739 		case O_ESTAB:
1740 		case O_VERREVPATH:
1741 		case O_VERSRCREACH:
1742 		case O_ANTISPOOF:
1743 		case O_IPSEC:
1744 #ifdef INET6
1745 		case O_IP6_SRC_ME:
1746 		case O_IP6_DST_ME:
1747 		case O_EXT_HDR:
1748 		case O_IP6:
1749 #endif
1750 		case O_IP4:
1751 		case O_TAG:
1752 			if (cmdlen != F_INSN_SIZE(ipfw_insn))
1753 				goto bad_size;
1754 			break;
1755 
1756 		case O_EXTERNAL_ACTION:
1757 			if (cmd->arg1 == 0 ||
1758 			    cmdlen != F_INSN_SIZE(ipfw_insn)) {
1759 				printf("ipfw: invalid external "
1760 				    "action opcode\n");
1761 				return (EINVAL);
1762 			}
1763 			ci->object_opcodes++;
1764 			/*
1765 			 * Do we have O_EXTERNAL_INSTANCE or O_EXTERNAL_DATA
1766 			 * opcode?
1767 			 */
1768 			if (l != cmdlen) {
1769 				l -= cmdlen;
1770 				cmd += cmdlen;
1771 				cmdlen = F_LEN(cmd);
1772 				if (cmd->opcode == O_EXTERNAL_DATA)
1773 					goto check_action;
1774 				if (cmd->opcode != O_EXTERNAL_INSTANCE) {
1775 					printf("ipfw: invalid opcode "
1776 					    "next to external action %u\n",
1777 					    cmd->opcode);
1778 					return (EINVAL);
1779 				}
1780 				if (cmd->arg1 == 0 ||
1781 				    cmdlen != F_INSN_SIZE(ipfw_insn)) {
1782 					printf("ipfw: invalid external "
1783 					    "action instance opcode\n");
1784 					return (EINVAL);
1785 				}
1786 				ci->object_opcodes++;
1787 			}
1788 			goto check_action;
1789 
1790 		case O_FIB:
1791 			if (cmdlen != F_INSN_SIZE(ipfw_insn))
1792 				goto bad_size;
1793 			if (cmd->arg1 >= rt_numfibs) {
1794 				printf("ipfw: invalid fib number %d\n",
1795 					cmd->arg1);
1796 				return EINVAL;
1797 			}
1798 			break;
1799 
1800 		case O_SETFIB:
1801 			if (cmdlen != F_INSN_SIZE(ipfw_insn))
1802 				goto bad_size;
1803 			if ((cmd->arg1 != IP_FW_TARG) &&
1804 			    ((cmd->arg1 & 0x7FFF) >= rt_numfibs)) {
1805 				printf("ipfw: invalid fib number %d\n",
1806 					cmd->arg1 & 0x7FFF);
1807 				return EINVAL;
1808 			}
1809 			goto check_action;
1810 
1811 		case O_UID:
1812 		case O_GID:
1813 		case O_JAIL:
1814 		case O_IP_SRC:
1815 		case O_IP_DST:
1816 		case O_TCPSEQ:
1817 		case O_TCPACK:
1818 		case O_PROB:
1819 		case O_ICMPTYPE:
1820 			if (cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1821 				goto bad_size;
1822 			break;
1823 
1824 		case O_LIMIT:
1825 			if (cmdlen != F_INSN_SIZE(ipfw_insn_limit))
1826 				goto bad_size;
1827 			ci->object_opcodes++;
1828 			break;
1829 
1830 		case O_LOG:
1831 			if (cmdlen != F_INSN_SIZE(ipfw_insn_log))
1832 				goto bad_size;
1833 
1834 			((ipfw_insn_log *)cmd)->log_left =
1835 			    ((ipfw_insn_log *)cmd)->max_log;
1836 
1837 			break;
1838 
1839 		case O_IP_SRC_MASK:
1840 		case O_IP_DST_MASK:
1841 			/* only odd command lengths */
1842 			if ((cmdlen & 1) == 0)
1843 				goto bad_size;
1844 			break;
1845 
1846 		case O_IP_SRC_SET:
1847 		case O_IP_DST_SET:
1848 			if (cmd->arg1 == 0 || cmd->arg1 > 256) {
1849 				printf("ipfw: invalid set size %d\n",
1850 					cmd->arg1);
1851 				return EINVAL;
1852 			}
1853 			if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
1854 			    (cmd->arg1+31)/32 )
1855 				goto bad_size;
1856 			break;
1857 
1858 		case O_IP_SRC_LOOKUP:
1859 			if (cmdlen > F_INSN_SIZE(ipfw_insn_u32))
1860 				goto bad_size;
1861 		case O_IP_DST_LOOKUP:
1862 			if (cmd->arg1 >= V_fw_tables_max) {
1863 				printf("ipfw: invalid table number %d\n",
1864 				    cmd->arg1);
1865 				return (EINVAL);
1866 			}
1867 			if (cmdlen != F_INSN_SIZE(ipfw_insn) &&
1868 			    cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1 &&
1869 			    cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1870 				goto bad_size;
1871 			ci->object_opcodes++;
1872 			break;
1873 		case O_IP_FLOW_LOOKUP:
1874 			if (cmd->arg1 >= V_fw_tables_max) {
1875 				printf("ipfw: invalid table number %d\n",
1876 				    cmd->arg1);
1877 				return (EINVAL);
1878 			}
1879 			if (cmdlen != F_INSN_SIZE(ipfw_insn) &&
1880 			    cmdlen != F_INSN_SIZE(ipfw_insn_u32))
1881 				goto bad_size;
1882 			ci->object_opcodes++;
1883 			break;
1884 		case O_MACADDR2:
1885 			if (cmdlen != F_INSN_SIZE(ipfw_insn_mac))
1886 				goto bad_size;
1887 			break;
1888 
1889 		case O_NOP:
1890 		case O_IPID:
1891 		case O_IPTTL:
1892 		case O_IPLEN:
1893 		case O_TCPDATALEN:
1894 		case O_TCPWIN:
1895 		case O_TAGGED:
1896 			if (cmdlen < 1 || cmdlen > 31)
1897 				goto bad_size;
1898 			break;
1899 
1900 		case O_DSCP:
1901 			if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + 1)
1902 				goto bad_size;
1903 			break;
1904 
1905 		case O_MAC_TYPE:
1906 		case O_IP_SRCPORT:
1907 		case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */
1908 			if (cmdlen < 2 || cmdlen > 31)
1909 				goto bad_size;
1910 			break;
1911 
1912 		case O_RECV:
1913 		case O_XMIT:
1914 		case O_VIA:
1915 			if (cmdlen != F_INSN_SIZE(ipfw_insn_if))
1916 				goto bad_size;
1917 			ci->object_opcodes++;
1918 			break;
1919 
1920 		case O_ALTQ:
1921 			if (cmdlen != F_INSN_SIZE(ipfw_insn_altq))
1922 				goto bad_size;
1923 			break;
1924 
1925 		case O_PIPE:
1926 		case O_QUEUE:
1927 			if (cmdlen != F_INSN_SIZE(ipfw_insn))
1928 				goto bad_size;
1929 			goto check_action;
1930 
1931 		case O_FORWARD_IP:
1932 			if (cmdlen != F_INSN_SIZE(ipfw_insn_sa))
1933 				goto bad_size;
1934 			goto check_action;
1935 #ifdef INET6
1936 		case O_FORWARD_IP6:
1937 			if (cmdlen != F_INSN_SIZE(ipfw_insn_sa6))
1938 				goto bad_size;
1939 			goto check_action;
1940 #endif /* INET6 */
1941 
1942 		case O_DIVERT:
1943 		case O_TEE:
1944 			if (ip_divert_ptr == NULL)
1945 				return EINVAL;
1946 			else
1947 				goto check_size;
1948 		case O_NETGRAPH:
1949 		case O_NGTEE:
1950 			if (ng_ipfw_input_p == NULL)
1951 				return EINVAL;
1952 			else
1953 				goto check_size;
1954 		case O_NAT:
1955 			if (!IPFW_NAT_LOADED)
1956 				return EINVAL;
1957 			if (cmdlen != F_INSN_SIZE(ipfw_insn_nat))
1958  				goto bad_size;
1959  			goto check_action;
1960 		case O_CHECK_STATE:
1961 			ci->object_opcodes++;
1962 			/* FALLTHROUGH */
1963 		case O_FORWARD_MAC: /* XXX not implemented yet */
1964 		case O_COUNT:
1965 		case O_ACCEPT:
1966 		case O_DENY:
1967 		case O_REJECT:
1968 		case O_SETDSCP:
1969 #ifdef INET6
1970 		case O_UNREACH6:
1971 #endif
1972 		case O_SKIPTO:
1973 		case O_REASS:
1974 		case O_CALLRETURN:
1975 check_size:
1976 			if (cmdlen != F_INSN_SIZE(ipfw_insn))
1977 				goto bad_size;
1978 check_action:
1979 			if (have_action) {
1980 				printf("ipfw: opcode %d, multiple actions"
1981 					" not allowed\n",
1982 					cmd->opcode);
1983 				return (EINVAL);
1984 			}
1985 			have_action = 1;
1986 			if (l != cmdlen) {
1987 				printf("ipfw: opcode %d, action must be"
1988 					" last opcode\n",
1989 					cmd->opcode);
1990 				return (EINVAL);
1991 			}
1992 			break;
1993 #ifdef INET6
1994 		case O_IP6_SRC:
1995 		case O_IP6_DST:
1996 			if (cmdlen != F_INSN_SIZE(struct in6_addr) +
1997 			    F_INSN_SIZE(ipfw_insn))
1998 				goto bad_size;
1999 			break;
2000 
2001 		case O_FLOW6ID:
2002 			if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) +
2003 			    ((ipfw_insn_u32 *)cmd)->o.arg1)
2004 				goto bad_size;
2005 			break;
2006 
2007 		case O_IP6_SRC_MASK:
2008 		case O_IP6_DST_MASK:
2009 			if ( !(cmdlen & 1) || cmdlen > 127)
2010 				goto bad_size;
2011 			break;
2012 		case O_ICMP6TYPE:
2013 			if( cmdlen != F_INSN_SIZE( ipfw_insn_icmp6 ) )
2014 				goto bad_size;
2015 			break;
2016 #endif
2017 
2018 		default:
2019 			switch (cmd->opcode) {
2020 #ifndef INET6
2021 			case O_IP6_SRC_ME:
2022 			case O_IP6_DST_ME:
2023 			case O_EXT_HDR:
2024 			case O_IP6:
2025 			case O_UNREACH6:
2026 			case O_IP6_SRC:
2027 			case O_IP6_DST:
2028 			case O_FLOW6ID:
2029 			case O_IP6_SRC_MASK:
2030 			case O_IP6_DST_MASK:
2031 			case O_ICMP6TYPE:
2032 				printf("ipfw: no IPv6 support in kernel\n");
2033 				return (EPROTONOSUPPORT);
2034 #endif
2035 			default:
2036 				printf("ipfw: opcode %d, unknown opcode\n",
2037 					cmd->opcode);
2038 				return (EINVAL);
2039 			}
2040 		}
2041 	}
2042 	if (have_action == 0) {
2043 		printf("ipfw: missing action\n");
2044 		return (EINVAL);
2045 	}
2046 	return 0;
2047 
2048 bad_size:
2049 	printf("ipfw: opcode %d size %d wrong\n",
2050 		cmd->opcode, cmdlen);
2051 	return (EINVAL);
2052 }
2053 
2054 
2055 /*
2056  * Translation of requests for compatibility with FreeBSD 7.2/8.
2057  * a static variable tells us if we have an old client from userland,
2058  * and if necessary we translate requests and responses between the
2059  * two formats.
2060  */
2061 static int is7 = 0;
2062 
2063 struct ip_fw7 {
2064 	struct ip_fw7	*next;		/* linked list of rules     */
2065 	struct ip_fw7	*next_rule;	/* ptr to next [skipto] rule    */
2066 	/* 'next_rule' is used to pass up 'set_disable' status      */
2067 
2068 	uint16_t	act_ofs;	/* offset of action in 32-bit units */
2069 	uint16_t	cmd_len;	/* # of 32-bit words in cmd */
2070 	uint16_t	rulenum;	/* rule number          */
2071 	uint8_t		set;		/* rule set (0..31)     */
2072 	// #define RESVD_SET   31  /* set for default and persistent rules */
2073 	uint8_t		_pad;		/* padding          */
2074 	// uint32_t        id;             /* rule id, only in v.8 */
2075 	/* These fields are present in all rules.           */
2076 	uint64_t	pcnt;		/* Packet counter       */
2077 	uint64_t	bcnt;		/* Byte counter         */
2078 	uint32_t	timestamp;	/* tv_sec of last match     */
2079 
2080 	ipfw_insn	cmd[1];		/* storage for commands     */
2081 };
2082 
2083 static int convert_rule_to_7(struct ip_fw_rule0 *rule);
2084 static int convert_rule_to_8(struct ip_fw_rule0 *rule);
2085 
2086 #ifndef RULESIZE7
2087 #define RULESIZE7(rule)  (sizeof(struct ip_fw7) + \
2088 	((struct ip_fw7 *)(rule))->cmd_len * 4 - 4)
2089 #endif
2090 
2091 
2092 /*
2093  * Copy the static and dynamic rules to the supplied buffer
2094  * and return the amount of space actually used.
2095  * Must be run under IPFW_UH_RLOCK
2096  */
2097 static size_t
2098 ipfw_getrules(struct ip_fw_chain *chain, void *buf, size_t space)
2099 {
2100 	char *bp = buf;
2101 	char *ep = bp + space;
2102 	struct ip_fw *rule;
2103 	struct ip_fw_rule0 *dst;
2104 	struct timeval boottime;
2105 	int error, i, l, warnflag;
2106 	time_t	boot_seconds;
2107 
2108 	warnflag = 0;
2109 
2110 	getboottime(&boottime);
2111         boot_seconds = boottime.tv_sec;
2112 	for (i = 0; i < chain->n_rules; i++) {
2113 		rule = chain->map[i];
2114 
2115 		if (is7) {
2116 		    /* Convert rule to FreeBSd 7.2 format */
2117 		    l = RULESIZE7(rule);
2118 		    if (bp + l + sizeof(uint32_t) <= ep) {
2119 			bcopy(rule, bp, l + sizeof(uint32_t));
2120 			error = set_legacy_obj_kidx(chain,
2121 			    (struct ip_fw_rule0 *)bp);
2122 			if (error != 0)
2123 				return (0);
2124 			error = convert_rule_to_7((struct ip_fw_rule0 *) bp);
2125 			if (error)
2126 				return 0; /*XXX correct? */
2127 			/*
2128 			 * XXX HACK. Store the disable mask in the "next"
2129 			 * pointer in a wild attempt to keep the ABI the same.
2130 			 * Why do we do this on EVERY rule?
2131 			 */
2132 			bcopy(&V_set_disable,
2133 				&(((struct ip_fw7 *)bp)->next_rule),
2134 				sizeof(V_set_disable));
2135 			if (((struct ip_fw7 *)bp)->timestamp)
2136 			    ((struct ip_fw7 *)bp)->timestamp += boot_seconds;
2137 			bp += l;
2138 		    }
2139 		    continue; /* go to next rule */
2140 		}
2141 
2142 		l = RULEUSIZE0(rule);
2143 		if (bp + l > ep) { /* should not happen */
2144 			printf("overflow dumping static rules\n");
2145 			break;
2146 		}
2147 		dst = (struct ip_fw_rule0 *)bp;
2148 		export_rule0(rule, dst, l);
2149 		error = set_legacy_obj_kidx(chain, dst);
2150 
2151 		/*
2152 		 * XXX HACK. Store the disable mask in the "next"
2153 		 * pointer in a wild attempt to keep the ABI the same.
2154 		 * Why do we do this on EVERY rule?
2155 		 *
2156 		 * XXX: "ipfw set show" (ab)uses IP_FW_GET to read disabled mask
2157 		 * so we need to fail _after_ saving at least one mask.
2158 		 */
2159 		bcopy(&V_set_disable, &dst->next_rule, sizeof(V_set_disable));
2160 		if (dst->timestamp)
2161 			dst->timestamp += boot_seconds;
2162 		bp += l;
2163 
2164 		if (error != 0) {
2165 			if (error == 2) {
2166 				/* Non-fatal table rewrite error. */
2167 				warnflag = 1;
2168 				continue;
2169 			}
2170 			printf("Stop on rule %d. Fail to convert table\n",
2171 			    rule->rulenum);
2172 			break;
2173 		}
2174 	}
2175 	if (warnflag != 0)
2176 		printf("ipfw: process %s is using legacy interfaces,"
2177 		    " consider rebuilding\n", "");
2178 	ipfw_get_dynamic(chain, &bp, ep); /* protected by the dynamic lock */
2179 	return (bp - (char *)buf);
2180 }
2181 
2182 
2183 struct dump_args {
2184 	uint32_t	b;	/* start rule */
2185 	uint32_t	e;	/* end rule */
2186 	uint32_t	rcount;	/* number of rules */
2187 	uint32_t	rsize;	/* rules size */
2188 	uint32_t	tcount;	/* number of tables */
2189 	int		rcounters;	/* counters */
2190 };
2191 
2192 void
2193 ipfw_export_obj_ntlv(struct named_object *no, ipfw_obj_ntlv *ntlv)
2194 {
2195 
2196 	ntlv->head.type = no->etlv;
2197 	ntlv->head.length = sizeof(*ntlv);
2198 	ntlv->idx = no->kidx;
2199 	strlcpy(ntlv->name, no->name, sizeof(ntlv->name));
2200 }
2201 
2202 /*
2203  * Export named object info in instance @ni, identified by @kidx
2204  * to ipfw_obj_ntlv. TLV is allocated from @sd space.
2205  *
2206  * Returns 0 on success.
2207  */
2208 static int
2209 export_objhash_ntlv(struct namedobj_instance *ni, uint16_t kidx,
2210     struct sockopt_data *sd)
2211 {
2212 	struct named_object *no;
2213 	ipfw_obj_ntlv *ntlv;
2214 
2215 	no = ipfw_objhash_lookup_kidx(ni, kidx);
2216 	KASSERT(no != NULL, ("invalid object kernel index passed"));
2217 
2218 	ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv));
2219 	if (ntlv == NULL)
2220 		return (ENOMEM);
2221 
2222 	ipfw_export_obj_ntlv(no, ntlv);
2223 	return (0);
2224 }
2225 
2226 /*
2227  * Dumps static rules with table TLVs in buffer @sd.
2228  *
2229  * Returns 0 on success.
2230  */
2231 static int
2232 dump_static_rules(struct ip_fw_chain *chain, struct dump_args *da,
2233     uint32_t *bmask, struct sockopt_data *sd)
2234 {
2235 	int error;
2236 	int i, l;
2237 	uint32_t tcount;
2238 	ipfw_obj_ctlv *ctlv;
2239 	struct ip_fw *krule;
2240 	struct namedobj_instance *ni;
2241 	caddr_t dst;
2242 
2243 	/* Dump table names first (if any) */
2244 	if (da->tcount > 0) {
2245 		/* Header first */
2246 		ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv));
2247 		if (ctlv == NULL)
2248 			return (ENOMEM);
2249 		ctlv->head.type = IPFW_TLV_TBLNAME_LIST;
2250 		ctlv->head.length = da->tcount * sizeof(ipfw_obj_ntlv) +
2251 		    sizeof(*ctlv);
2252 		ctlv->count = da->tcount;
2253 		ctlv->objsize = sizeof(ipfw_obj_ntlv);
2254 	}
2255 
2256 	i = 0;
2257 	tcount = da->tcount;
2258 	ni = ipfw_get_table_objhash(chain);
2259 	while (tcount > 0) {
2260 		if ((bmask[i / 32] & (1 << (i % 32))) == 0) {
2261 			i++;
2262 			continue;
2263 		}
2264 
2265 		/* Jump to shared named object bitmask */
2266 		if (i >= IPFW_TABLES_MAX) {
2267 			ni = CHAIN_TO_SRV(chain);
2268 			i -= IPFW_TABLES_MAX;
2269 			bmask += IPFW_TABLES_MAX / 32;
2270 		}
2271 
2272 		if ((error = export_objhash_ntlv(ni, i, sd)) != 0)
2273 			return (error);
2274 
2275 		i++;
2276 		tcount--;
2277 	}
2278 
2279 	/* Dump rules */
2280 	ctlv = (ipfw_obj_ctlv *)ipfw_get_sopt_space(sd, sizeof(*ctlv));
2281 	if (ctlv == NULL)
2282 		return (ENOMEM);
2283 	ctlv->head.type = IPFW_TLV_RULE_LIST;
2284 	ctlv->head.length = da->rsize + sizeof(*ctlv);
2285 	ctlv->count = da->rcount;
2286 
2287 	for (i = da->b; i < da->e; i++) {
2288 		krule = chain->map[i];
2289 
2290 		l = RULEUSIZE1(krule) + sizeof(ipfw_obj_tlv);
2291 		if (da->rcounters != 0)
2292 			l += sizeof(struct ip_fw_bcounter);
2293 		dst = (caddr_t)ipfw_get_sopt_space(sd, l);
2294 		if (dst == NULL)
2295 			return (ENOMEM);
2296 
2297 		export_rule1(krule, dst, l, da->rcounters);
2298 	}
2299 
2300 	return (0);
2301 }
2302 
2303 /*
2304  * Marks every object index used in @rule with bit in @bmask.
2305  * Used to generate bitmask of referenced tables/objects for given ruleset
2306  * or its part.
2307  *
2308  * Returns number of newly-referenced objects.
2309  */
2310 static int
2311 mark_object_kidx(struct ip_fw_chain *ch, struct ip_fw *rule,
2312     uint32_t *bmask)
2313 {
2314 	struct opcode_obj_rewrite *rw;
2315 	ipfw_insn *cmd;
2316 	int bidx, cmdlen, l, count;
2317 	uint16_t kidx;
2318 	uint8_t subtype;
2319 
2320 	l = rule->cmd_len;
2321 	cmd = rule->cmd;
2322 	cmdlen = 0;
2323 	count = 0;
2324 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
2325 		cmdlen = F_LEN(cmd);
2326 
2327 		rw = find_op_rw(cmd, &kidx, &subtype);
2328 		if (rw == NULL)
2329 			continue;
2330 
2331 		bidx = kidx / 32;
2332 		/*
2333 		 * Maintain separate bitmasks for table and
2334 		 * non-table objects.
2335 		 */
2336 		if (rw->etlv != IPFW_TLV_TBL_NAME)
2337 			bidx += IPFW_TABLES_MAX / 32;
2338 
2339 		if ((bmask[bidx] & (1 << (kidx % 32))) == 0)
2340 			count++;
2341 
2342 		bmask[bidx] |= 1 << (kidx % 32);
2343 	}
2344 
2345 	return (count);
2346 }
2347 
2348 /*
2349  * Dumps requested objects data
2350  * Data layout (version 0)(current):
2351  * Request: [ ipfw_cfg_lheader ] + IPFW_CFG_GET_* flags
2352  *   size = ipfw_cfg_lheader.size
2353  * Reply: [ ipfw_cfg_lheader
2354  *   [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional)
2355  *   [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST)
2356  *     ipfw_obj_tlv(IPFW_TLV_RULE_ENT) [ ip_fw_bcounter (optional) ip_fw_rule ]
2357  *   ] (optional)
2358  *   [ ipfw_obj_ctlv(IPFW_TLV_STATE_LIST) ipfw_obj_dyntlv x N ] (optional)
2359  * ]
2360  * * NOTE IPFW_TLV_STATE_LIST has the single valid field: objsize.
2361  * The rest (size, count) are set to zero and needs to be ignored.
2362  *
2363  * Returns 0 on success.
2364  */
2365 static int
2366 dump_config(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2367     struct sockopt_data *sd)
2368 {
2369 	ipfw_cfg_lheader *hdr;
2370 	struct ip_fw *rule;
2371 	size_t sz, rnum;
2372 	uint32_t hdr_flags;
2373 	int error, i;
2374 	struct dump_args da;
2375 	uint32_t *bmask;
2376 
2377 	hdr = (ipfw_cfg_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr));
2378 	if (hdr == NULL)
2379 		return (EINVAL);
2380 
2381 	error = 0;
2382 	bmask = NULL;
2383 	/* Allocate needed state. Note we allocate 2xspace mask, for table&srv  */
2384 	if (hdr->flags & IPFW_CFG_GET_STATIC)
2385 		bmask = malloc(IPFW_TABLES_MAX / 4, M_TEMP, M_WAITOK | M_ZERO);
2386 
2387 	IPFW_UH_RLOCK(chain);
2388 
2389 	/*
2390 	 * STAGE 1: Determine size/count for objects in range.
2391 	 * Prepare used tables bitmask.
2392 	 */
2393 	sz = sizeof(ipfw_cfg_lheader);
2394 	memset(&da, 0, sizeof(da));
2395 
2396 	da.b = 0;
2397 	da.e = chain->n_rules;
2398 
2399 	if (hdr->end_rule != 0) {
2400 		/* Handle custom range */
2401 		if ((rnum = hdr->start_rule) > IPFW_DEFAULT_RULE)
2402 			rnum = IPFW_DEFAULT_RULE;
2403 		da.b = ipfw_find_rule(chain, rnum, 0);
2404 		rnum = hdr->end_rule;
2405 		rnum = (rnum < IPFW_DEFAULT_RULE) ? rnum+1 : IPFW_DEFAULT_RULE;
2406 		da.e = ipfw_find_rule(chain, rnum, 0) + 1;
2407 	}
2408 
2409 	if (hdr->flags & IPFW_CFG_GET_STATIC) {
2410 		for (i = da.b; i < da.e; i++) {
2411 			rule = chain->map[i];
2412 			da.rsize += RULEUSIZE1(rule) + sizeof(ipfw_obj_tlv);
2413 			da.rcount++;
2414 			/* Update bitmask of used objects for given range */
2415 			da.tcount += mark_object_kidx(chain, rule, bmask);
2416 		}
2417 		/* Add counters if requested */
2418 		if (hdr->flags & IPFW_CFG_GET_COUNTERS) {
2419 			da.rsize += sizeof(struct ip_fw_bcounter) * da.rcount;
2420 			da.rcounters = 1;
2421 		}
2422 
2423 		if (da.tcount > 0)
2424 			sz += da.tcount * sizeof(ipfw_obj_ntlv) +
2425 			    sizeof(ipfw_obj_ctlv);
2426 		sz += da.rsize + sizeof(ipfw_obj_ctlv);
2427 	}
2428 
2429 	if (hdr->flags & IPFW_CFG_GET_STATES)
2430 		sz += ipfw_dyn_get_count() * sizeof(ipfw_obj_dyntlv) +
2431 		     sizeof(ipfw_obj_ctlv);
2432 
2433 
2434 	/*
2435 	 * Fill header anyway.
2436 	 * Note we have to save header fields to stable storage
2437 	 * buffer inside @sd can be flushed after dumping rules
2438 	 */
2439 	hdr->size = sz;
2440 	hdr->set_mask = ~V_set_disable;
2441 	hdr_flags = hdr->flags;
2442 	hdr = NULL;
2443 
2444 	if (sd->valsize < sz) {
2445 		error = ENOMEM;
2446 		goto cleanup;
2447 	}
2448 
2449 	/* STAGE2: Store actual data */
2450 	if (hdr_flags & IPFW_CFG_GET_STATIC) {
2451 		error = dump_static_rules(chain, &da, bmask, sd);
2452 		if (error != 0)
2453 			goto cleanup;
2454 	}
2455 
2456 	if (hdr_flags & IPFW_CFG_GET_STATES)
2457 		error = ipfw_dump_states(chain, sd);
2458 
2459 cleanup:
2460 	IPFW_UH_RUNLOCK(chain);
2461 
2462 	if (bmask != NULL)
2463 		free(bmask, M_TEMP);
2464 
2465 	return (error);
2466 }
2467 
2468 int
2469 ipfw_check_object_name_generic(const char *name)
2470 {
2471 	int nsize;
2472 
2473 	nsize = sizeof(((ipfw_obj_ntlv *)0)->name);
2474 	if (strnlen(name, nsize) == nsize)
2475 		return (EINVAL);
2476 	if (name[0] == '\0')
2477 		return (EINVAL);
2478 	return (0);
2479 }
2480 
2481 /*
2482  * Creates non-existent objects referenced by rule.
2483  *
2484  * Return 0 on success.
2485  */
2486 int
2487 create_objects_compat(struct ip_fw_chain *ch, ipfw_insn *cmd,
2488     struct obj_idx *oib, struct obj_idx *pidx, struct tid_info *ti)
2489 {
2490 	struct opcode_obj_rewrite *rw;
2491 	struct obj_idx *p;
2492 	uint16_t kidx;
2493 	int error;
2494 
2495 	/*
2496 	 * Compatibility stuff: do actual creation for non-existing,
2497 	 * but referenced objects.
2498 	 */
2499 	for (p = oib; p < pidx; p++) {
2500 		if (p->kidx != 0)
2501 			continue;
2502 
2503 		ti->uidx = p->uidx;
2504 		ti->type = p->type;
2505 		ti->atype = 0;
2506 
2507 		rw = find_op_rw(cmd + p->off, NULL, NULL);
2508 		KASSERT(rw != NULL, ("Unable to find handler for op %d",
2509 		    (cmd + p->off)->opcode));
2510 
2511 		if (rw->create_object == NULL)
2512 			error = EOPNOTSUPP;
2513 		else
2514 			error = rw->create_object(ch, ti, &kidx);
2515 		if (error == 0) {
2516 			p->kidx = kidx;
2517 			continue;
2518 		}
2519 
2520 		/*
2521 		 * Error happened. We have to rollback everything.
2522 		 * Drop all already acquired references.
2523 		 */
2524 		IPFW_UH_WLOCK(ch);
2525 		unref_oib_objects(ch, cmd, oib, pidx);
2526 		IPFW_UH_WUNLOCK(ch);
2527 
2528 		return (error);
2529 	}
2530 
2531 	return (0);
2532 }
2533 
2534 /*
2535  * Compatibility function for old ipfw(8) binaries.
2536  * Rewrites table/nat kernel indices with userland ones.
2537  * Convert tables matching '/^\d+$/' to their atoi() value.
2538  * Use number 65535 for other tables.
2539  *
2540  * Returns 0 on success.
2541  */
2542 static int
2543 set_legacy_obj_kidx(struct ip_fw_chain *ch, struct ip_fw_rule0 *rule)
2544 {
2545 	struct opcode_obj_rewrite *rw;
2546 	struct named_object *no;
2547 	ipfw_insn *cmd;
2548 	char *end;
2549 	long val;
2550 	int cmdlen, error, l;
2551 	uint16_t kidx, uidx;
2552 	uint8_t subtype;
2553 
2554 	error = 0;
2555 
2556 	l = rule->cmd_len;
2557 	cmd = rule->cmd;
2558 	cmdlen = 0;
2559 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
2560 		cmdlen = F_LEN(cmd);
2561 
2562 		/* Check if is index in given opcode */
2563 		rw = find_op_rw(cmd, &kidx, &subtype);
2564 		if (rw == NULL)
2565 			continue;
2566 
2567 		/* Try to find referenced kernel object */
2568 		no = rw->find_bykidx(ch, kidx);
2569 		if (no == NULL)
2570 			continue;
2571 
2572 		val = strtol(no->name, &end, 10);
2573 		if (*end == '\0' && val < 65535) {
2574 			uidx = val;
2575 		} else {
2576 
2577 			/*
2578 			 * We are called via legacy opcode.
2579 			 * Save error and show table as fake number
2580 			 * not to make ipfw(8) hang.
2581 			 */
2582 			uidx = 65535;
2583 			error = 2;
2584 		}
2585 
2586 		rw->update(cmd, uidx);
2587 	}
2588 
2589 	return (error);
2590 }
2591 
2592 
2593 /*
2594  * Unreferences all already-referenced objects in given @cmd rule,
2595  * using information in @oib.
2596  *
2597  * Used to rollback partially converted rule on error.
2598  */
2599 static void
2600 unref_oib_objects(struct ip_fw_chain *ch, ipfw_insn *cmd, struct obj_idx *oib,
2601     struct obj_idx *end)
2602 {
2603 	struct opcode_obj_rewrite *rw;
2604 	struct named_object *no;
2605 	struct obj_idx *p;
2606 
2607 	IPFW_UH_WLOCK_ASSERT(ch);
2608 
2609 	for (p = oib; p < end; p++) {
2610 		if (p->kidx == 0)
2611 			continue;
2612 
2613 		rw = find_op_rw(cmd + p->off, NULL, NULL);
2614 		KASSERT(rw != NULL, ("Unable to find handler for op %d",
2615 		    (cmd + p->off)->opcode));
2616 
2617 		/* Find & unref by existing idx */
2618 		no = rw->find_bykidx(ch, p->kidx);
2619 		KASSERT(no != NULL, ("Ref'd object %d disappeared", p->kidx));
2620 		no->refcnt--;
2621 	}
2622 }
2623 
2624 /*
2625  * Remove references from every object used in @rule.
2626  * Used at rule removal code.
2627  */
2628 static void
2629 unref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule)
2630 {
2631 	struct opcode_obj_rewrite *rw;
2632 	struct named_object *no;
2633 	ipfw_insn *cmd;
2634 	int cmdlen, l;
2635 	uint16_t kidx;
2636 	uint8_t subtype;
2637 
2638 	IPFW_UH_WLOCK_ASSERT(ch);
2639 
2640 	l = rule->cmd_len;
2641 	cmd = rule->cmd;
2642 	cmdlen = 0;
2643 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
2644 		cmdlen = F_LEN(cmd);
2645 
2646 		rw = find_op_rw(cmd, &kidx, &subtype);
2647 		if (rw == NULL)
2648 			continue;
2649 		no = rw->find_bykidx(ch, kidx);
2650 
2651 		KASSERT(no != NULL, ("object id %d not found", kidx));
2652 		KASSERT(no->subtype == subtype,
2653 		    ("wrong type %d (%d) for object id %d",
2654 		    no->subtype, subtype, kidx));
2655 		KASSERT(no->refcnt > 0, ("refcount for object %d is %d",
2656 		    kidx, no->refcnt));
2657 
2658 		if (no->refcnt == 1 && rw->destroy_object != NULL)
2659 			rw->destroy_object(ch, no);
2660 		else
2661 			no->refcnt--;
2662 	}
2663 }
2664 
2665 
2666 /*
2667  * Find and reference object (if any) stored in instruction @cmd.
2668  *
2669  * Saves object info in @pidx, sets
2670  *  - @unresolved to 1 if object should exists but not found
2671  *
2672  * Returns non-zero value in case of error.
2673  */
2674 static int
2675 ref_opcode_object(struct ip_fw_chain *ch, ipfw_insn *cmd, struct tid_info *ti,
2676     struct obj_idx *pidx, int *unresolved)
2677 {
2678 	struct named_object *no;
2679 	struct opcode_obj_rewrite *rw;
2680 	int error;
2681 
2682 	/* Check if this opcode is candidate for rewrite */
2683 	rw = find_op_rw(cmd, &ti->uidx, &ti->type);
2684 	if (rw == NULL)
2685 		return (0);
2686 
2687 	/* Need to rewrite. Save necessary fields */
2688 	pidx->uidx = ti->uidx;
2689 	pidx->type = ti->type;
2690 
2691 	/* Try to find referenced kernel object */
2692 	error = rw->find_byname(ch, ti, &no);
2693 	if (error != 0)
2694 		return (error);
2695 	if (no == NULL) {
2696 		/*
2697 		 * Report about unresolved object for automaic
2698 		 * creation.
2699 		 */
2700 		*unresolved = 1;
2701 		return (0);
2702 	}
2703 
2704 	/*
2705 	 * Object is already exist.
2706 	 * Its subtype should match with expected value.
2707 	 */
2708 	if (ti->type != no->subtype)
2709 		return (EINVAL);
2710 
2711 	/* Bump refcount and update kidx. */
2712 	no->refcnt++;
2713 	rw->update(cmd, no->kidx);
2714 	return (0);
2715 }
2716 
2717 /*
2718  * Finds and bumps refcount for objects referenced by given @rule.
2719  * Auto-creates non-existing tables.
2720  * Fills in @oib array with userland/kernel indexes.
2721  *
2722  * Returns 0 on success.
2723  */
2724 static int
2725 ref_rule_objects(struct ip_fw_chain *ch, struct ip_fw *rule,
2726     struct rule_check_info *ci, struct obj_idx *oib, struct tid_info *ti)
2727 {
2728 	struct obj_idx *pidx;
2729 	ipfw_insn *cmd;
2730 	int cmdlen, error, l, unresolved;
2731 
2732 	pidx = oib;
2733 	l = rule->cmd_len;
2734 	cmd = rule->cmd;
2735 	cmdlen = 0;
2736 	error = 0;
2737 
2738 	IPFW_UH_WLOCK(ch);
2739 
2740 	/* Increase refcount on each existing referenced table. */
2741 	for ( ;	l > 0 ; l -= cmdlen, cmd += cmdlen) {
2742 		cmdlen = F_LEN(cmd);
2743 		unresolved = 0;
2744 
2745 		error = ref_opcode_object(ch, cmd, ti, pidx, &unresolved);
2746 		if (error != 0)
2747 			break;
2748 		/*
2749 		 * Compatibility stuff for old clients:
2750 		 * prepare to automaitcally create non-existing objects.
2751 		 */
2752 		if (unresolved != 0) {
2753 			pidx->off = rule->cmd_len - l;
2754 			pidx++;
2755 		}
2756 	}
2757 
2758 	if (error != 0) {
2759 		/* Unref everything we have already done */
2760 		unref_oib_objects(ch, rule->cmd, oib, pidx);
2761 		IPFW_UH_WUNLOCK(ch);
2762 		return (error);
2763 	}
2764 	IPFW_UH_WUNLOCK(ch);
2765 
2766 	/* Perform auto-creation for non-existing objects */
2767 	if (pidx != oib)
2768 		error = create_objects_compat(ch, rule->cmd, oib, pidx, ti);
2769 
2770 	/* Calculate real number of dynamic objects */
2771 	ci->object_opcodes = (uint16_t)(pidx - oib);
2772 
2773 	return (error);
2774 }
2775 
2776 /*
2777  * Checks is opcode is referencing table of appropriate type.
2778  * Adds reference count for found table if true.
2779  * Rewrites user-supplied opcode values with kernel ones.
2780  *
2781  * Returns 0 on success and appropriate error code otherwise.
2782  */
2783 static int
2784 rewrite_rule_uidx(struct ip_fw_chain *chain, struct rule_check_info *ci)
2785 {
2786 	int error;
2787 	ipfw_insn *cmd;
2788 	uint8_t type;
2789 	struct obj_idx *p, *pidx_first, *pidx_last;
2790 	struct tid_info ti;
2791 
2792 	/*
2793 	 * Prepare an array for storing opcode indices.
2794 	 * Use stack allocation by default.
2795 	 */
2796 	if (ci->object_opcodes <= (sizeof(ci->obuf)/sizeof(ci->obuf[0]))) {
2797 		/* Stack */
2798 		pidx_first = ci->obuf;
2799 	} else
2800 		pidx_first = malloc(
2801 		    ci->object_opcodes * sizeof(struct obj_idx),
2802 		    M_IPFW, M_WAITOK | M_ZERO);
2803 
2804 	error = 0;
2805 	type = 0;
2806 	memset(&ti, 0, sizeof(ti));
2807 
2808 	/* Use set rule is assigned to. */
2809 	ti.set = ci->krule->set;
2810 	if (ci->ctlv != NULL) {
2811 		ti.tlvs = (void *)(ci->ctlv + 1);
2812 		ti.tlen = ci->ctlv->head.length - sizeof(ipfw_obj_ctlv);
2813 	}
2814 
2815 	/* Reference all used tables and other objects */
2816 	error = ref_rule_objects(chain, ci->krule, ci, pidx_first, &ti);
2817 	if (error != 0)
2818 		goto free;
2819 	/*
2820 	 * Note that ref_rule_objects() might have updated ci->object_opcodes
2821 	 * to reflect actual number of object opcodes.
2822 	 */
2823 
2824 	/* Perform rewrite of remaining opcodes */
2825 	p = pidx_first;
2826 	pidx_last = pidx_first + ci->object_opcodes;
2827 	for (p = pidx_first; p < pidx_last; p++) {
2828 		cmd = ci->krule->cmd + p->off;
2829 		update_opcode_kidx(cmd, p->kidx);
2830 	}
2831 
2832 free:
2833 	if (pidx_first != ci->obuf)
2834 		free(pidx_first, M_IPFW);
2835 
2836 	return (error);
2837 }
2838 
2839 /*
2840  * Adds one or more rules to ipfw @chain.
2841  * Data layout (version 0)(current):
2842  * Request:
2843  * [
2844  *   ip_fw3_opheader
2845  *   [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional *1)
2846  *   [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ] (*2) (*3)
2847  * ]
2848  * Reply:
2849  * [
2850  *   ip_fw3_opheader
2851  *   [ ipfw_obj_ctlv(IPFW_TLV_TBL_LIST) ipfw_obj_ntlv x N ] (optional)
2852  *   [ ipfw_obj_ctlv(IPFW_TLV_RULE_LIST) ip_fw x N ]
2853  * ]
2854  *
2855  * Rules in reply are modified to store their actual ruleset number.
2856  *
2857  * (*1) TLVs inside IPFW_TLV_TBL_LIST needs to be sorted ascending
2858  * according to their idx field and there has to be no duplicates.
2859  * (*2) Numbered rules inside IPFW_TLV_RULE_LIST needs to be sorted ascending.
2860  * (*3) Each ip_fw structure needs to be aligned to u64 boundary.
2861  *
2862  * Returns 0 on success.
2863  */
2864 static int
2865 add_rules(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
2866     struct sockopt_data *sd)
2867 {
2868 	ipfw_obj_ctlv *ctlv, *rtlv, *tstate;
2869 	ipfw_obj_ntlv *ntlv;
2870 	int clen, error, idx;
2871 	uint32_t count, read;
2872 	struct ip_fw_rule *r;
2873 	struct rule_check_info rci, *ci, *cbuf;
2874 	int i, rsize;
2875 
2876 	op3 = (ip_fw3_opheader *)ipfw_get_sopt_space(sd, sd->valsize);
2877 	ctlv = (ipfw_obj_ctlv *)(op3 + 1);
2878 
2879 	read = sizeof(ip_fw3_opheader);
2880 	rtlv = NULL;
2881 	tstate = NULL;
2882 	cbuf = NULL;
2883 	memset(&rci, 0, sizeof(struct rule_check_info));
2884 
2885 	if (read + sizeof(*ctlv) > sd->valsize)
2886 		return (EINVAL);
2887 
2888 	if (ctlv->head.type == IPFW_TLV_TBLNAME_LIST) {
2889 		clen = ctlv->head.length;
2890 		/* Check size and alignment */
2891 		if (clen > sd->valsize || clen < sizeof(*ctlv))
2892 			return (EINVAL);
2893 		if ((clen % sizeof(uint64_t)) != 0)
2894 			return (EINVAL);
2895 
2896 		/*
2897 		 * Some table names or other named objects.
2898 		 * Check for validness.
2899 		 */
2900 		count = (ctlv->head.length - sizeof(*ctlv)) / sizeof(*ntlv);
2901 		if (ctlv->count != count || ctlv->objsize != sizeof(*ntlv))
2902 			return (EINVAL);
2903 
2904 		/*
2905 		 * Check each TLV.
2906 		 * Ensure TLVs are sorted ascending and
2907 		 * there are no duplicates.
2908 		 */
2909 		idx = -1;
2910 		ntlv = (ipfw_obj_ntlv *)(ctlv + 1);
2911 		while (count > 0) {
2912 			if (ntlv->head.length != sizeof(ipfw_obj_ntlv))
2913 				return (EINVAL);
2914 
2915 			error = ipfw_check_object_name_generic(ntlv->name);
2916 			if (error != 0)
2917 				return (error);
2918 
2919 			if (ntlv->idx <= idx)
2920 				return (EINVAL);
2921 
2922 			idx = ntlv->idx;
2923 			count--;
2924 			ntlv++;
2925 		}
2926 
2927 		tstate = ctlv;
2928 		read += ctlv->head.length;
2929 		ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length);
2930 	}
2931 
2932 	if (read + sizeof(*ctlv) > sd->valsize)
2933 		return (EINVAL);
2934 
2935 	if (ctlv->head.type == IPFW_TLV_RULE_LIST) {
2936 		clen = ctlv->head.length;
2937 		if (clen + read > sd->valsize || clen < sizeof(*ctlv))
2938 			return (EINVAL);
2939 		if ((clen % sizeof(uint64_t)) != 0)
2940 			return (EINVAL);
2941 
2942 		/*
2943 		 * TODO: Permit adding multiple rules at once
2944 		 */
2945 		if (ctlv->count != 1)
2946 			return (ENOTSUP);
2947 
2948 		clen -= sizeof(*ctlv);
2949 
2950 		if (ctlv->count > clen / sizeof(struct ip_fw_rule))
2951 			return (EINVAL);
2952 
2953 		/* Allocate state for each rule or use stack */
2954 		if (ctlv->count == 1) {
2955 			memset(&rci, 0, sizeof(struct rule_check_info));
2956 			cbuf = &rci;
2957 		} else
2958 			cbuf = malloc(ctlv->count * sizeof(*ci), M_TEMP,
2959 			    M_WAITOK | M_ZERO);
2960 		ci = cbuf;
2961 
2962 		/*
2963 		 * Check each rule for validness.
2964 		 * Ensure numbered rules are sorted ascending
2965 		 * and properly aligned
2966 		 */
2967 		idx = 0;
2968 		r = (struct ip_fw_rule *)(ctlv + 1);
2969 		count = 0;
2970 		error = 0;
2971 		while (clen > 0) {
2972 			rsize = roundup2(RULESIZE(r), sizeof(uint64_t));
2973 			if (rsize > clen || ctlv->count <= count) {
2974 				error = EINVAL;
2975 				break;
2976 			}
2977 
2978 			ci->ctlv = tstate;
2979 			error = check_ipfw_rule1(r, rsize, ci);
2980 			if (error != 0)
2981 				break;
2982 
2983 			/* Check sorting */
2984 			if (r->rulenum != 0 && r->rulenum < idx) {
2985 				printf("rulenum %d idx %d\n", r->rulenum, idx);
2986 				error = EINVAL;
2987 				break;
2988 			}
2989 			idx = r->rulenum;
2990 
2991 			ci->urule = (caddr_t)r;
2992 
2993 			rsize = roundup2(rsize, sizeof(uint64_t));
2994 			clen -= rsize;
2995 			r = (struct ip_fw_rule *)((caddr_t)r + rsize);
2996 			count++;
2997 			ci++;
2998 		}
2999 
3000 		if (ctlv->count != count || error != 0) {
3001 			if (cbuf != &rci)
3002 				free(cbuf, M_TEMP);
3003 			return (EINVAL);
3004 		}
3005 
3006 		rtlv = ctlv;
3007 		read += ctlv->head.length;
3008 		ctlv = (ipfw_obj_ctlv *)((caddr_t)ctlv + ctlv->head.length);
3009 	}
3010 
3011 	if (read != sd->valsize || rtlv == NULL || rtlv->count == 0) {
3012 		if (cbuf != NULL && cbuf != &rci)
3013 			free(cbuf, M_TEMP);
3014 		return (EINVAL);
3015 	}
3016 
3017 	/*
3018 	 * Passed rules seems to be valid.
3019 	 * Allocate storage and try to add them to chain.
3020 	 */
3021 	for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++) {
3022 		clen = RULEKSIZE1((struct ip_fw_rule *)ci->urule);
3023 		ci->krule = ipfw_alloc_rule(chain, clen);
3024 		import_rule1(ci);
3025 	}
3026 
3027 	if ((error = commit_rules(chain, cbuf, rtlv->count)) != 0) {
3028 		/* Free allocate krules */
3029 		for (i = 0, ci = cbuf; i < rtlv->count; i++, ci++)
3030 			free_rule(ci->krule);
3031 	}
3032 
3033 	if (cbuf != NULL && cbuf != &rci)
3034 		free(cbuf, M_TEMP);
3035 
3036 	return (error);
3037 }
3038 
3039 /*
3040  * Lists all sopts currently registered.
3041  * Data layout (v0)(current):
3042  * Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size
3043  * Reply: [ ipfw_obj_lheader ipfw_sopt_info x N ]
3044  *
3045  * Returns 0 on success
3046  */
3047 static int
3048 dump_soptcodes(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
3049     struct sockopt_data *sd)
3050 {
3051 	struct _ipfw_obj_lheader *olh;
3052 	ipfw_sopt_info *i;
3053 	struct ipfw_sopt_handler *sh;
3054 	uint32_t count, n, size;
3055 
3056 	olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh));
3057 	if (olh == NULL)
3058 		return (EINVAL);
3059 	if (sd->valsize < olh->size)
3060 		return (EINVAL);
3061 
3062 	CTL3_LOCK();
3063 	count = ctl3_hsize;
3064 	size = count * sizeof(ipfw_sopt_info) + sizeof(ipfw_obj_lheader);
3065 
3066 	/* Fill in header regadless of buffer size */
3067 	olh->count = count;
3068 	olh->objsize = sizeof(ipfw_sopt_info);
3069 
3070 	if (size > olh->size) {
3071 		olh->size = size;
3072 		CTL3_UNLOCK();
3073 		return (ENOMEM);
3074 	}
3075 	olh->size = size;
3076 
3077 	for (n = 1; n <= count; n++) {
3078 		i = (ipfw_sopt_info *)ipfw_get_sopt_space(sd, sizeof(*i));
3079 		KASSERT(i != NULL, ("previously checked buffer is not enough"));
3080 		sh = &ctl3_handlers[n];
3081 		i->opcode = sh->opcode;
3082 		i->version = sh->version;
3083 		i->refcnt = sh->refcnt;
3084 	}
3085 	CTL3_UNLOCK();
3086 
3087 	return (0);
3088 }
3089 
3090 /*
3091  * Compares two opcodes.
3092  * Used both in qsort() and bsearch().
3093  *
3094  * Returns 0 if match is found.
3095  */
3096 static int
3097 compare_opcodes(const void *_a, const void *_b)
3098 {
3099 	const struct opcode_obj_rewrite *a, *b;
3100 
3101 	a = (const struct opcode_obj_rewrite *)_a;
3102 	b = (const struct opcode_obj_rewrite *)_b;
3103 
3104 	if (a->opcode < b->opcode)
3105 		return (-1);
3106 	else if (a->opcode > b->opcode)
3107 		return (1);
3108 
3109 	return (0);
3110 }
3111 
3112 /*
3113  * XXX: Rewrite bsearch()
3114  */
3115 static int
3116 find_op_rw_range(uint16_t op, struct opcode_obj_rewrite **plo,
3117     struct opcode_obj_rewrite **phi)
3118 {
3119 	struct opcode_obj_rewrite *ctl3_max, *lo, *hi, h, *rw;
3120 
3121 	memset(&h, 0, sizeof(h));
3122 	h.opcode = op;
3123 
3124 	rw = (struct opcode_obj_rewrite *)bsearch(&h, ctl3_rewriters,
3125 	    ctl3_rsize, sizeof(h), compare_opcodes);
3126 	if (rw == NULL)
3127 		return (1);
3128 
3129 	/* Find the first element matching the same opcode */
3130 	lo = rw;
3131 	for ( ; lo > ctl3_rewriters && (lo - 1)->opcode == op; lo--)
3132 		;
3133 
3134 	/* Find the last element matching the same opcode */
3135 	hi = rw;
3136 	ctl3_max = ctl3_rewriters + ctl3_rsize;
3137 	for ( ; (hi + 1) < ctl3_max && (hi + 1)->opcode == op; hi++)
3138 		;
3139 
3140 	*plo = lo;
3141 	*phi = hi;
3142 
3143 	return (0);
3144 }
3145 
3146 /*
3147  * Finds opcode object rewriter based on @code.
3148  *
3149  * Returns pointer to handler or NULL.
3150  */
3151 static struct opcode_obj_rewrite *
3152 find_op_rw(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype)
3153 {
3154 	struct opcode_obj_rewrite *rw, *lo, *hi;
3155 	uint16_t uidx;
3156 	uint8_t subtype;
3157 
3158 	if (find_op_rw_range(cmd->opcode, &lo, &hi) != 0)
3159 		return (NULL);
3160 
3161 	for (rw = lo; rw <= hi; rw++) {
3162 		if (rw->classifier(cmd, &uidx, &subtype) == 0) {
3163 			if (puidx != NULL)
3164 				*puidx = uidx;
3165 			if (ptype != NULL)
3166 				*ptype = subtype;
3167 			return (rw);
3168 		}
3169 	}
3170 
3171 	return (NULL);
3172 }
3173 int
3174 classify_opcode_kidx(ipfw_insn *cmd, uint16_t *puidx)
3175 {
3176 
3177 	if (find_op_rw(cmd, puidx, NULL) == NULL)
3178 		return (1);
3179 	return (0);
3180 }
3181 
3182 void
3183 update_opcode_kidx(ipfw_insn *cmd, uint16_t idx)
3184 {
3185 	struct opcode_obj_rewrite *rw;
3186 
3187 	rw = find_op_rw(cmd, NULL, NULL);
3188 	KASSERT(rw != NULL, ("No handler to update opcode %d", cmd->opcode));
3189 	rw->update(cmd, idx);
3190 }
3191 
3192 void
3193 ipfw_init_obj_rewriter()
3194 {
3195 
3196 	ctl3_rewriters = NULL;
3197 	ctl3_rsize = 0;
3198 }
3199 
3200 void
3201 ipfw_destroy_obj_rewriter()
3202 {
3203 
3204 	if (ctl3_rewriters != NULL)
3205 		free(ctl3_rewriters, M_IPFW);
3206 	ctl3_rewriters = NULL;
3207 	ctl3_rsize = 0;
3208 }
3209 
3210 /*
3211  * Adds one or more opcode object rewrite handlers to the global array.
3212  * Function may sleep.
3213  */
3214 void
3215 ipfw_add_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count)
3216 {
3217 	size_t sz;
3218 	struct opcode_obj_rewrite *tmp;
3219 
3220 	CTL3_LOCK();
3221 
3222 	for (;;) {
3223 		sz = ctl3_rsize + count;
3224 		CTL3_UNLOCK();
3225 		tmp = malloc(sizeof(*rw) * sz, M_IPFW, M_WAITOK | M_ZERO);
3226 		CTL3_LOCK();
3227 		if (ctl3_rsize + count <= sz)
3228 			break;
3229 
3230 		/* Retry */
3231 		free(tmp, M_IPFW);
3232 	}
3233 
3234 	/* Merge old & new arrays */
3235 	sz = ctl3_rsize + count;
3236 	memcpy(tmp, ctl3_rewriters, ctl3_rsize * sizeof(*rw));
3237 	memcpy(&tmp[ctl3_rsize], rw, count * sizeof(*rw));
3238 	qsort(tmp, sz, sizeof(*rw), compare_opcodes);
3239 	/* Switch new and free old */
3240 	if (ctl3_rewriters != NULL)
3241 		free(ctl3_rewriters, M_IPFW);
3242 	ctl3_rewriters = tmp;
3243 	ctl3_rsize = sz;
3244 
3245 	CTL3_UNLOCK();
3246 }
3247 
3248 /*
3249  * Removes one or more object rewrite handlers from the global array.
3250  */
3251 int
3252 ipfw_del_obj_rewriter(struct opcode_obj_rewrite *rw, size_t count)
3253 {
3254 	size_t sz;
3255 	struct opcode_obj_rewrite *ctl3_max, *ktmp, *lo, *hi;
3256 	int i;
3257 
3258 	CTL3_LOCK();
3259 
3260 	for (i = 0; i < count; i++) {
3261 		if (find_op_rw_range(rw[i].opcode, &lo, &hi) != 0)
3262 			continue;
3263 
3264 		for (ktmp = lo; ktmp <= hi; ktmp++) {
3265 			if (ktmp->classifier != rw[i].classifier)
3266 				continue;
3267 
3268 			ctl3_max = ctl3_rewriters + ctl3_rsize;
3269 			sz = (ctl3_max - (ktmp + 1)) * sizeof(*ktmp);
3270 			memmove(ktmp, ktmp + 1, sz);
3271 			ctl3_rsize--;
3272 			break;
3273 		}
3274 
3275 	}
3276 
3277 	if (ctl3_rsize == 0) {
3278 		if (ctl3_rewriters != NULL)
3279 			free(ctl3_rewriters, M_IPFW);
3280 		ctl3_rewriters = NULL;
3281 	}
3282 
3283 	CTL3_UNLOCK();
3284 
3285 	return (0);
3286 }
3287 
3288 static int
3289 export_objhash_ntlv_internal(struct namedobj_instance *ni,
3290     struct named_object *no, void *arg)
3291 {
3292 	struct sockopt_data *sd;
3293 	ipfw_obj_ntlv *ntlv;
3294 
3295 	sd = (struct sockopt_data *)arg;
3296 	ntlv = (ipfw_obj_ntlv *)ipfw_get_sopt_space(sd, sizeof(*ntlv));
3297 	if (ntlv == NULL)
3298 		return (ENOMEM);
3299 	ipfw_export_obj_ntlv(no, ntlv);
3300 	return (0);
3301 }
3302 
3303 /*
3304  * Lists all service objects.
3305  * Data layout (v0)(current):
3306  * Request: [ ipfw_obj_lheader ] size = ipfw_obj_lheader.size
3307  * Reply: [ ipfw_obj_lheader [ ipfw_obj_ntlv x N ] (optional) ]
3308  * Returns 0 on success
3309  */
3310 static int
3311 dump_srvobjects(struct ip_fw_chain *chain, ip_fw3_opheader *op3,
3312     struct sockopt_data *sd)
3313 {
3314 	ipfw_obj_lheader *hdr;
3315 	int count;
3316 
3317 	hdr = (ipfw_obj_lheader *)ipfw_get_sopt_header(sd, sizeof(*hdr));
3318 	if (hdr == NULL)
3319 		return (EINVAL);
3320 
3321 	IPFW_UH_RLOCK(chain);
3322 	count = ipfw_objhash_count(CHAIN_TO_SRV(chain));
3323 	hdr->size = sizeof(ipfw_obj_lheader) + count * sizeof(ipfw_obj_ntlv);
3324 	if (sd->valsize < hdr->size) {
3325 		IPFW_UH_RUNLOCK(chain);
3326 		return (ENOMEM);
3327 	}
3328 	hdr->count = count;
3329 	hdr->objsize = sizeof(ipfw_obj_ntlv);
3330 	if (count > 0)
3331 		ipfw_objhash_foreach(CHAIN_TO_SRV(chain),
3332 		    export_objhash_ntlv_internal, sd);
3333 	IPFW_UH_RUNLOCK(chain);
3334 	return (0);
3335 }
3336 
3337 /*
3338  * Compares two sopt handlers (code, version and handler ptr).
3339  * Used both as qsort() and bsearch().
3340  * Does not compare handler for latter case.
3341  *
3342  * Returns 0 if match is found.
3343  */
3344 static int
3345 compare_sh(const void *_a, const void *_b)
3346 {
3347 	const struct ipfw_sopt_handler *a, *b;
3348 
3349 	a = (const struct ipfw_sopt_handler *)_a;
3350 	b = (const struct ipfw_sopt_handler *)_b;
3351 
3352 	if (a->opcode < b->opcode)
3353 		return (-1);
3354 	else if (a->opcode > b->opcode)
3355 		return (1);
3356 
3357 	if (a->version < b->version)
3358 		return (-1);
3359 	else if (a->version > b->version)
3360 		return (1);
3361 
3362 	/* bsearch helper */
3363 	if (a->handler == NULL)
3364 		return (0);
3365 
3366 	if ((uintptr_t)a->handler < (uintptr_t)b->handler)
3367 		return (-1);
3368 	else if ((uintptr_t)a->handler > (uintptr_t)b->handler)
3369 		return (1);
3370 
3371 	return (0);
3372 }
3373 
3374 /*
3375  * Finds sopt handler based on @code and @version.
3376  *
3377  * Returns pointer to handler or NULL.
3378  */
3379 static struct ipfw_sopt_handler *
3380 find_sh(uint16_t code, uint8_t version, sopt_handler_f *handler)
3381 {
3382 	struct ipfw_sopt_handler *sh, h;
3383 
3384 	memset(&h, 0, sizeof(h));
3385 	h.opcode = code;
3386 	h.version = version;
3387 	h.handler = handler;
3388 
3389 	sh = (struct ipfw_sopt_handler *)bsearch(&h, ctl3_handlers,
3390 	    ctl3_hsize, sizeof(h), compare_sh);
3391 
3392 	return (sh);
3393 }
3394 
3395 static int
3396 find_ref_sh(uint16_t opcode, uint8_t version, struct ipfw_sopt_handler *psh)
3397 {
3398 	struct ipfw_sopt_handler *sh;
3399 
3400 	CTL3_LOCK();
3401 	if ((sh = find_sh(opcode, version, NULL)) == NULL) {
3402 		CTL3_UNLOCK();
3403 		printf("ipfw: ipfw_ctl3 invalid option %d""v""%d\n",
3404 		    opcode, version);
3405 		return (EINVAL);
3406 	}
3407 	sh->refcnt++;
3408 	ctl3_refct++;
3409 	/* Copy handler data to requested buffer */
3410 	*psh = *sh;
3411 	CTL3_UNLOCK();
3412 
3413 	return (0);
3414 }
3415 
3416 static void
3417 find_unref_sh(struct ipfw_sopt_handler *psh)
3418 {
3419 	struct ipfw_sopt_handler *sh;
3420 
3421 	CTL3_LOCK();
3422 	sh = find_sh(psh->opcode, psh->version, NULL);
3423 	KASSERT(sh != NULL, ("ctl3 handler disappeared"));
3424 	sh->refcnt--;
3425 	ctl3_refct--;
3426 	CTL3_UNLOCK();
3427 }
3428 
3429 void
3430 ipfw_init_sopt_handler()
3431 {
3432 
3433 	CTL3_LOCK_INIT();
3434 	IPFW_ADD_SOPT_HANDLER(1, scodes);
3435 }
3436 
3437 void
3438 ipfw_destroy_sopt_handler()
3439 {
3440 
3441 	IPFW_DEL_SOPT_HANDLER(1, scodes);
3442 	CTL3_LOCK_DESTROY();
3443 }
3444 
3445 /*
3446  * Adds one or more sockopt handlers to the global array.
3447  * Function may sleep.
3448  */
3449 void
3450 ipfw_add_sopt_handler(struct ipfw_sopt_handler *sh, size_t count)
3451 {
3452 	size_t sz;
3453 	struct ipfw_sopt_handler *tmp;
3454 
3455 	CTL3_LOCK();
3456 
3457 	for (;;) {
3458 		sz = ctl3_hsize + count;
3459 		CTL3_UNLOCK();
3460 		tmp = malloc(sizeof(*sh) * sz, M_IPFW, M_WAITOK | M_ZERO);
3461 		CTL3_LOCK();
3462 		if (ctl3_hsize + count <= sz)
3463 			break;
3464 
3465 		/* Retry */
3466 		free(tmp, M_IPFW);
3467 	}
3468 
3469 	/* Merge old & new arrays */
3470 	sz = ctl3_hsize + count;
3471 	memcpy(tmp, ctl3_handlers, ctl3_hsize * sizeof(*sh));
3472 	memcpy(&tmp[ctl3_hsize], sh, count * sizeof(*sh));
3473 	qsort(tmp, sz, sizeof(*sh), compare_sh);
3474 	/* Switch new and free old */
3475 	if (ctl3_handlers != NULL)
3476 		free(ctl3_handlers, M_IPFW);
3477 	ctl3_handlers = tmp;
3478 	ctl3_hsize = sz;
3479 	ctl3_gencnt++;
3480 
3481 	CTL3_UNLOCK();
3482 }
3483 
3484 /*
3485  * Removes one or more sockopt handlers from the global array.
3486  */
3487 int
3488 ipfw_del_sopt_handler(struct ipfw_sopt_handler *sh, size_t count)
3489 {
3490 	size_t sz;
3491 	struct ipfw_sopt_handler *tmp, *h;
3492 	int i;
3493 
3494 	CTL3_LOCK();
3495 
3496 	for (i = 0; i < count; i++) {
3497 		tmp = &sh[i];
3498 		h = find_sh(tmp->opcode, tmp->version, tmp->handler);
3499 		if (h == NULL)
3500 			continue;
3501 
3502 		sz = (ctl3_handlers + ctl3_hsize - (h + 1)) * sizeof(*h);
3503 		memmove(h, h + 1, sz);
3504 		ctl3_hsize--;
3505 	}
3506 
3507 	if (ctl3_hsize == 0) {
3508 		if (ctl3_handlers != NULL)
3509 			free(ctl3_handlers, M_IPFW);
3510 		ctl3_handlers = NULL;
3511 	}
3512 
3513 	ctl3_gencnt++;
3514 
3515 	CTL3_UNLOCK();
3516 
3517 	return (0);
3518 }
3519 
3520 /*
3521  * Writes data accumulated in @sd to sockopt buffer.
3522  * Zeroes internal @sd buffer.
3523  */
3524 static int
3525 ipfw_flush_sopt_data(struct sockopt_data *sd)
3526 {
3527 	struct sockopt *sopt;
3528 	int error;
3529 	size_t sz;
3530 
3531 	sz = sd->koff;
3532 	if (sz == 0)
3533 		return (0);
3534 
3535 	sopt = sd->sopt;
3536 
3537 	if (sopt->sopt_dir == SOPT_GET) {
3538 		error = copyout(sd->kbuf, sopt->sopt_val, sz);
3539 		if (error != 0)
3540 			return (error);
3541 	}
3542 
3543 	memset(sd->kbuf, 0, sd->ksize);
3544 	sd->ktotal += sz;
3545 	sd->koff = 0;
3546 	if (sd->ktotal + sd->ksize < sd->valsize)
3547 		sd->kavail = sd->ksize;
3548 	else
3549 		sd->kavail = sd->valsize - sd->ktotal;
3550 
3551 	/* Update sopt buffer data */
3552 	sopt->sopt_valsize = sd->ktotal;
3553 	sopt->sopt_val = sd->sopt_val + sd->ktotal;
3554 
3555 	return (0);
3556 }
3557 
3558 /*
3559  * Ensures that @sd buffer has contiguous @neeeded number of
3560  * bytes.
3561  *
3562  * Returns pointer to requested space or NULL.
3563  */
3564 caddr_t
3565 ipfw_get_sopt_space(struct sockopt_data *sd, size_t needed)
3566 {
3567 	int error;
3568 	caddr_t addr;
3569 
3570 	if (sd->kavail < needed) {
3571 		/*
3572 		 * Flush data and try another time.
3573 		 */
3574 		error = ipfw_flush_sopt_data(sd);
3575 
3576 		if (sd->kavail < needed || error != 0)
3577 			return (NULL);
3578 	}
3579 
3580 	addr = sd->kbuf + sd->koff;
3581 	sd->koff += needed;
3582 	sd->kavail -= needed;
3583 	return (addr);
3584 }
3585 
3586 /*
3587  * Requests @needed contiguous bytes from @sd buffer.
3588  * Function is used to notify subsystem that we are
3589  * interesed in first @needed bytes (request header)
3590  * and the rest buffer can be safely zeroed.
3591  *
3592  * Returns pointer to requested space or NULL.
3593  */
3594 caddr_t
3595 ipfw_get_sopt_header(struct sockopt_data *sd, size_t needed)
3596 {
3597 	caddr_t addr;
3598 
3599 	if ((addr = ipfw_get_sopt_space(sd, needed)) == NULL)
3600 		return (NULL);
3601 
3602 	if (sd->kavail > 0)
3603 		memset(sd->kbuf + sd->koff, 0, sd->kavail);
3604 
3605 	return (addr);
3606 }
3607 
3608 /*
3609  * New sockopt handler.
3610  */
3611 int
3612 ipfw_ctl3(struct sockopt *sopt)
3613 {
3614 	int error, locked;
3615 	size_t size, valsize;
3616 	struct ip_fw_chain *chain;
3617 	char xbuf[256];
3618 	struct sockopt_data sdata;
3619 	struct ipfw_sopt_handler h;
3620 	ip_fw3_opheader *op3 = NULL;
3621 
3622 	error = priv_check(sopt->sopt_td, PRIV_NETINET_IPFW);
3623 	if (error != 0)
3624 		return (error);
3625 
3626 	if (sopt->sopt_name != IP_FW3)
3627 		return (ipfw_ctl(sopt));
3628 
3629 	chain = &V_layer3_chain;
3630 	error = 0;
3631 
3632 	/* Save original valsize before it is altered via sooptcopyin() */
3633 	valsize = sopt->sopt_valsize;
3634 	memset(&sdata, 0, sizeof(sdata));
3635 	/* Read op3 header first to determine actual operation */
3636 	op3 = (ip_fw3_opheader *)xbuf;
3637 	error = sooptcopyin(sopt, op3, sizeof(*op3), sizeof(*op3));
3638 	if (error != 0)
3639 		return (error);
3640 	sopt->sopt_valsize = valsize;
3641 
3642 	/*
3643 	 * Find and reference command.
3644 	 */
3645 	error = find_ref_sh(op3->opcode, op3->version, &h);
3646 	if (error != 0)
3647 		return (error);
3648 
3649 	/*
3650 	 * Disallow modifications in really-really secure mode, but still allow
3651 	 * the logging counters to be reset.
3652 	 */
3653 	if ((h.dir & HDIR_SET) != 0 && h.opcode != IP_FW_XRESETLOG) {
3654 		error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
3655 		if (error != 0) {
3656 			find_unref_sh(&h);
3657 			return (error);
3658 		}
3659 	}
3660 
3661 	/*
3662 	 * Fill in sockopt_data structure that may be useful for
3663 	 * IP_FW3 get requests.
3664 	 */
3665 	locked = 0;
3666 	if (valsize <= sizeof(xbuf)) {
3667 		/* use on-stack buffer */
3668 		sdata.kbuf = xbuf;
3669 		sdata.ksize = sizeof(xbuf);
3670 		sdata.kavail = valsize;
3671 	} else {
3672 
3673 		/*
3674 		 * Determine opcode type/buffer size:
3675 		 * allocate sliding-window buf for data export or
3676 		 * contiguous buffer for special ops.
3677 		 */
3678 		if ((h.dir & HDIR_SET) != 0) {
3679 			/* Set request. Allocate contigous buffer. */
3680 			if (valsize > CTL3_LARGEBUF) {
3681 				find_unref_sh(&h);
3682 				return (EFBIG);
3683 			}
3684 
3685 			size = valsize;
3686 		} else {
3687 			/* Get request. Allocate sliding window buffer */
3688 			size = (valsize<CTL3_SMALLBUF) ? valsize:CTL3_SMALLBUF;
3689 
3690 			if (size < valsize) {
3691 				/* We have to wire user buffer */
3692 				error = vslock(sopt->sopt_val, valsize);
3693 				if (error != 0)
3694 					return (error);
3695 				locked = 1;
3696 			}
3697 		}
3698 
3699 		sdata.kbuf = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
3700 		sdata.ksize = size;
3701 		sdata.kavail = size;
3702 	}
3703 
3704 	sdata.sopt = sopt;
3705 	sdata.sopt_val = sopt->sopt_val;
3706 	sdata.valsize = valsize;
3707 
3708 	/*
3709 	 * Copy either all request (if valsize < bsize_max)
3710 	 * or first bsize_max bytes to guarantee most consumers
3711 	 * that all necessary data has been copied).
3712 	 * Anyway, copy not less than sizeof(ip_fw3_opheader).
3713 	 */
3714 	if ((error = sooptcopyin(sopt, sdata.kbuf, sdata.ksize,
3715 	    sizeof(ip_fw3_opheader))) != 0)
3716 		return (error);
3717 	op3 = (ip_fw3_opheader *)sdata.kbuf;
3718 
3719 	/* Finally, run handler */
3720 	error = h.handler(chain, op3, &sdata);
3721 	find_unref_sh(&h);
3722 
3723 	/* Flush state and free buffers */
3724 	if (error == 0)
3725 		error = ipfw_flush_sopt_data(&sdata);
3726 	else
3727 		ipfw_flush_sopt_data(&sdata);
3728 
3729 	if (locked != 0)
3730 		vsunlock(sdata.sopt_val, valsize);
3731 
3732 	/* Restore original pointer and set number of bytes written */
3733 	sopt->sopt_val = sdata.sopt_val;
3734 	sopt->sopt_valsize = sdata.ktotal;
3735 	if (sdata.kbuf != xbuf)
3736 		free(sdata.kbuf, M_TEMP);
3737 
3738 	return (error);
3739 }
3740 
3741 /**
3742  * {set|get}sockopt parser.
3743  */
3744 int
3745 ipfw_ctl(struct sockopt *sopt)
3746 {
3747 #define	RULE_MAXSIZE	(512*sizeof(u_int32_t))
3748 	int error;
3749 	size_t size, valsize;
3750 	struct ip_fw *buf;
3751 	struct ip_fw_rule0 *rule;
3752 	struct ip_fw_chain *chain;
3753 	u_int32_t rulenum[2];
3754 	uint32_t opt;
3755 	struct rule_check_info ci;
3756 	IPFW_RLOCK_TRACKER;
3757 
3758 	chain = &V_layer3_chain;
3759 	error = 0;
3760 
3761 	/* Save original valsize before it is altered via sooptcopyin() */
3762 	valsize = sopt->sopt_valsize;
3763 	opt = sopt->sopt_name;
3764 
3765 	/*
3766 	 * Disallow modifications in really-really secure mode, but still allow
3767 	 * the logging counters to be reset.
3768 	 */
3769 	if (opt == IP_FW_ADD ||
3770 	    (sopt->sopt_dir == SOPT_SET && opt != IP_FW_RESETLOG)) {
3771 		error = securelevel_ge(sopt->sopt_td->td_ucred, 3);
3772 		if (error != 0)
3773 			return (error);
3774 	}
3775 
3776 	switch (opt) {
3777 	case IP_FW_GET:
3778 		/*
3779 		 * pass up a copy of the current rules. Static rules
3780 		 * come first (the last of which has number IPFW_DEFAULT_RULE),
3781 		 * followed by a possibly empty list of dynamic rule.
3782 		 * The last dynamic rule has NULL in the "next" field.
3783 		 *
3784 		 * Note that the calculated size is used to bound the
3785 		 * amount of data returned to the user.  The rule set may
3786 		 * change between calculating the size and returning the
3787 		 * data in which case we'll just return what fits.
3788 		 */
3789 		for (;;) {
3790 			int len = 0, want;
3791 
3792 			size = chain->static_len;
3793 			size += ipfw_dyn_len();
3794 			if (size >= sopt->sopt_valsize)
3795 				break;
3796 			buf = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
3797 			IPFW_UH_RLOCK(chain);
3798 			/* check again how much space we need */
3799 			want = chain->static_len + ipfw_dyn_len();
3800 			if (size >= want)
3801 				len = ipfw_getrules(chain, buf, size);
3802 			IPFW_UH_RUNLOCK(chain);
3803 			if (size >= want)
3804 				error = sooptcopyout(sopt, buf, len);
3805 			free(buf, M_TEMP);
3806 			if (size >= want)
3807 				break;
3808 		}
3809 		break;
3810 
3811 	case IP_FW_FLUSH:
3812 		/* locking is done within del_entry() */
3813 		error = del_entry(chain, 0); /* special case, rule=0, cmd=0 means all */
3814 		break;
3815 
3816 	case IP_FW_ADD:
3817 		rule = malloc(RULE_MAXSIZE, M_TEMP, M_WAITOK);
3818 		error = sooptcopyin(sopt, rule, RULE_MAXSIZE,
3819 			sizeof(struct ip_fw7) );
3820 
3821 		memset(&ci, 0, sizeof(struct rule_check_info));
3822 
3823 		/*
3824 		 * If the size of commands equals RULESIZE7 then we assume
3825 		 * a FreeBSD7.2 binary is talking to us (set is7=1).
3826 		 * is7 is persistent so the next 'ipfw list' command
3827 		 * will use this format.
3828 		 * NOTE: If wrong version is guessed (this can happen if
3829 		 *       the first ipfw command is 'ipfw [pipe] list')
3830 		 *       the ipfw binary may crash or loop infinitly...
3831 		 */
3832 		size = sopt->sopt_valsize;
3833 		if (size == RULESIZE7(rule)) {
3834 		    is7 = 1;
3835 		    error = convert_rule_to_8(rule);
3836 		    if (error) {
3837 			free(rule, M_TEMP);
3838 			return error;
3839 		    }
3840 		    size = RULESIZE(rule);
3841 		} else
3842 		    is7 = 0;
3843 		if (error == 0)
3844 			error = check_ipfw_rule0(rule, size, &ci);
3845 		if (error == 0) {
3846 			/* locking is done within add_rule() */
3847 			struct ip_fw *krule;
3848 			krule = ipfw_alloc_rule(chain, RULEKSIZE0(rule));
3849 			ci.urule = (caddr_t)rule;
3850 			ci.krule = krule;
3851 			import_rule0(&ci);
3852 			error = commit_rules(chain, &ci, 1);
3853 			if (error != 0)
3854 				free_rule(ci.krule);
3855 			else if (sopt->sopt_dir == SOPT_GET) {
3856 				if (is7) {
3857 					error = convert_rule_to_7(rule);
3858 					size = RULESIZE7(rule);
3859 					if (error) {
3860 						free(rule, M_TEMP);
3861 						return error;
3862 					}
3863 				}
3864 				error = sooptcopyout(sopt, rule, size);
3865 			}
3866 		}
3867 		free(rule, M_TEMP);
3868 		break;
3869 
3870 	case IP_FW_DEL:
3871 		/*
3872 		 * IP_FW_DEL is used for deleting single rules or sets,
3873 		 * and (ab)used to atomically manipulate sets. Argument size
3874 		 * is used to distinguish between the two:
3875 		 *    sizeof(u_int32_t)
3876 		 *	delete single rule or set of rules,
3877 		 *	or reassign rules (or sets) to a different set.
3878 		 *    2*sizeof(u_int32_t)
3879 		 *	atomic disable/enable sets.
3880 		 *	first u_int32_t contains sets to be disabled,
3881 		 *	second u_int32_t contains sets to be enabled.
3882 		 */
3883 		error = sooptcopyin(sopt, rulenum,
3884 			2*sizeof(u_int32_t), sizeof(u_int32_t));
3885 		if (error)
3886 			break;
3887 		size = sopt->sopt_valsize;
3888 		if (size == sizeof(u_int32_t) && rulenum[0] != 0) {
3889 			/* delete or reassign, locking done in del_entry() */
3890 			error = del_entry(chain, rulenum[0]);
3891 		} else if (size == 2*sizeof(u_int32_t)) { /* set enable/disable */
3892 			IPFW_UH_WLOCK(chain);
3893 			V_set_disable =
3894 			    (V_set_disable | rulenum[0]) & ~rulenum[1] &
3895 			    ~(1<<RESVD_SET); /* set RESVD_SET always enabled */
3896 			IPFW_UH_WUNLOCK(chain);
3897 		} else
3898 			error = EINVAL;
3899 		break;
3900 
3901 	case IP_FW_ZERO:
3902 	case IP_FW_RESETLOG: /* argument is an u_int_32, the rule number */
3903 		rulenum[0] = 0;
3904 		if (sopt->sopt_val != 0) {
3905 		    error = sooptcopyin(sopt, rulenum,
3906 			    sizeof(u_int32_t), sizeof(u_int32_t));
3907 		    if (error)
3908 			break;
3909 		}
3910 		error = zero_entry(chain, rulenum[0],
3911 			sopt->sopt_name == IP_FW_RESETLOG);
3912 		break;
3913 
3914 	/*--- TABLE opcodes ---*/
3915 	case IP_FW_TABLE_ADD:
3916 	case IP_FW_TABLE_DEL:
3917 		{
3918 			ipfw_table_entry ent;
3919 			struct tentry_info tei;
3920 			struct tid_info ti;
3921 			struct table_value v;
3922 
3923 			error = sooptcopyin(sopt, &ent,
3924 			    sizeof(ent), sizeof(ent));
3925 			if (error)
3926 				break;
3927 
3928 			memset(&tei, 0, sizeof(tei));
3929 			tei.paddr = &ent.addr;
3930 			tei.subtype = AF_INET;
3931 			tei.masklen = ent.masklen;
3932 			ipfw_import_table_value_legacy(ent.value, &v);
3933 			tei.pvalue = &v;
3934 			memset(&ti, 0, sizeof(ti));
3935 			ti.uidx = ent.tbl;
3936 			ti.type = IPFW_TABLE_CIDR;
3937 
3938 			error = (opt == IP_FW_TABLE_ADD) ?
3939 			    add_table_entry(chain, &ti, &tei, 0, 1) :
3940 			    del_table_entry(chain, &ti, &tei, 0, 1);
3941 		}
3942 		break;
3943 
3944 
3945 	case IP_FW_TABLE_FLUSH:
3946 		{
3947 			u_int16_t tbl;
3948 			struct tid_info ti;
3949 
3950 			error = sooptcopyin(sopt, &tbl,
3951 			    sizeof(tbl), sizeof(tbl));
3952 			if (error)
3953 				break;
3954 			memset(&ti, 0, sizeof(ti));
3955 			ti.uidx = tbl;
3956 			error = flush_table(chain, &ti);
3957 		}
3958 		break;
3959 
3960 	case IP_FW_TABLE_GETSIZE:
3961 		{
3962 			u_int32_t tbl, cnt;
3963 			struct tid_info ti;
3964 
3965 			if ((error = sooptcopyin(sopt, &tbl, sizeof(tbl),
3966 			    sizeof(tbl))))
3967 				break;
3968 			memset(&ti, 0, sizeof(ti));
3969 			ti.uidx = tbl;
3970 			IPFW_RLOCK(chain);
3971 			error = ipfw_count_table(chain, &ti, &cnt);
3972 			IPFW_RUNLOCK(chain);
3973 			if (error)
3974 				break;
3975 			error = sooptcopyout(sopt, &cnt, sizeof(cnt));
3976 		}
3977 		break;
3978 
3979 	case IP_FW_TABLE_LIST:
3980 		{
3981 			ipfw_table *tbl;
3982 			struct tid_info ti;
3983 
3984 			if (sopt->sopt_valsize < sizeof(*tbl)) {
3985 				error = EINVAL;
3986 				break;
3987 			}
3988 			size = sopt->sopt_valsize;
3989 			tbl = malloc(size, M_TEMP, M_WAITOK);
3990 			error = sooptcopyin(sopt, tbl, size, sizeof(*tbl));
3991 			if (error) {
3992 				free(tbl, M_TEMP);
3993 				break;
3994 			}
3995 			tbl->size = (size - sizeof(*tbl)) /
3996 			    sizeof(ipfw_table_entry);
3997 			memset(&ti, 0, sizeof(ti));
3998 			ti.uidx = tbl->tbl;
3999 			IPFW_RLOCK(chain);
4000 			error = ipfw_dump_table_legacy(chain, &ti, tbl);
4001 			IPFW_RUNLOCK(chain);
4002 			if (error) {
4003 				free(tbl, M_TEMP);
4004 				break;
4005 			}
4006 			error = sooptcopyout(sopt, tbl, size);
4007 			free(tbl, M_TEMP);
4008 		}
4009 		break;
4010 
4011 	/*--- NAT operations are protected by the IPFW_LOCK ---*/
4012 	case IP_FW_NAT_CFG:
4013 		if (IPFW_NAT_LOADED)
4014 			error = ipfw_nat_cfg_ptr(sopt);
4015 		else {
4016 			printf("IP_FW_NAT_CFG: %s\n",
4017 			    "ipfw_nat not present, please load it");
4018 			error = EINVAL;
4019 		}
4020 		break;
4021 
4022 	case IP_FW_NAT_DEL:
4023 		if (IPFW_NAT_LOADED)
4024 			error = ipfw_nat_del_ptr(sopt);
4025 		else {
4026 			printf("IP_FW_NAT_DEL: %s\n",
4027 			    "ipfw_nat not present, please load it");
4028 			error = EINVAL;
4029 		}
4030 		break;
4031 
4032 	case IP_FW_NAT_GET_CONFIG:
4033 		if (IPFW_NAT_LOADED)
4034 			error = ipfw_nat_get_cfg_ptr(sopt);
4035 		else {
4036 			printf("IP_FW_NAT_GET_CFG: %s\n",
4037 			    "ipfw_nat not present, please load it");
4038 			error = EINVAL;
4039 		}
4040 		break;
4041 
4042 	case IP_FW_NAT_GET_LOG:
4043 		if (IPFW_NAT_LOADED)
4044 			error = ipfw_nat_get_log_ptr(sopt);
4045 		else {
4046 			printf("IP_FW_NAT_GET_LOG: %s\n",
4047 			    "ipfw_nat not present, please load it");
4048 			error = EINVAL;
4049 		}
4050 		break;
4051 
4052 	default:
4053 		printf("ipfw: ipfw_ctl invalid option %d\n", sopt->sopt_name);
4054 		error = EINVAL;
4055 	}
4056 
4057 	return (error);
4058 #undef RULE_MAXSIZE
4059 }
4060 #define	RULE_MAXSIZE	(256*sizeof(u_int32_t))
4061 
4062 /* Functions to convert rules 7.2 <==> 8.0 */
4063 static int
4064 convert_rule_to_7(struct ip_fw_rule0 *rule)
4065 {
4066 	/* Used to modify original rule */
4067 	struct ip_fw7 *rule7 = (struct ip_fw7 *)rule;
4068 	/* copy of original rule, version 8 */
4069 	struct ip_fw_rule0 *tmp;
4070 
4071 	/* Used to copy commands */
4072 	ipfw_insn *ccmd, *dst;
4073 	int ll = 0, ccmdlen = 0;
4074 
4075 	tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO);
4076 	if (tmp == NULL) {
4077 		return 1; //XXX error
4078 	}
4079 	bcopy(rule, tmp, RULE_MAXSIZE);
4080 
4081 	/* Copy fields */
4082 	//rule7->_pad = tmp->_pad;
4083 	rule7->set = tmp->set;
4084 	rule7->rulenum = tmp->rulenum;
4085 	rule7->cmd_len = tmp->cmd_len;
4086 	rule7->act_ofs = tmp->act_ofs;
4087 	rule7->next_rule = (struct ip_fw7 *)tmp->next_rule;
4088 	rule7->cmd_len = tmp->cmd_len;
4089 	rule7->pcnt = tmp->pcnt;
4090 	rule7->bcnt = tmp->bcnt;
4091 	rule7->timestamp = tmp->timestamp;
4092 
4093 	/* Copy commands */
4094 	for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule7->cmd ;
4095 			ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) {
4096 		ccmdlen = F_LEN(ccmd);
4097 
4098 		bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t));
4099 
4100 		if (dst->opcode > O_NAT)
4101 			/* O_REASS doesn't exists in 7.2 version, so
4102 			 * decrement opcode if it is after O_REASS
4103 			 */
4104 			dst->opcode--;
4105 
4106 		if (ccmdlen > ll) {
4107 			printf("ipfw: opcode %d size truncated\n",
4108 				ccmd->opcode);
4109 			return EINVAL;
4110 		}
4111 	}
4112 	free(tmp, M_TEMP);
4113 
4114 	return 0;
4115 }
4116 
4117 static int
4118 convert_rule_to_8(struct ip_fw_rule0 *rule)
4119 {
4120 	/* Used to modify original rule */
4121 	struct ip_fw7 *rule7 = (struct ip_fw7 *) rule;
4122 
4123 	/* Used to copy commands */
4124 	ipfw_insn *ccmd, *dst;
4125 	int ll = 0, ccmdlen = 0;
4126 
4127 	/* Copy of original rule */
4128 	struct ip_fw7 *tmp = malloc(RULE_MAXSIZE, M_TEMP, M_NOWAIT | M_ZERO);
4129 	if (tmp == NULL) {
4130 		return 1; //XXX error
4131 	}
4132 
4133 	bcopy(rule7, tmp, RULE_MAXSIZE);
4134 
4135 	for (ll = tmp->cmd_len, ccmd = tmp->cmd, dst = rule->cmd ;
4136 			ll > 0 ; ll -= ccmdlen, ccmd += ccmdlen, dst += ccmdlen) {
4137 		ccmdlen = F_LEN(ccmd);
4138 
4139 		bcopy(ccmd, dst, F_LEN(ccmd)*sizeof(uint32_t));
4140 
4141 		if (dst->opcode > O_NAT)
4142 			/* O_REASS doesn't exists in 7.2 version, so
4143 			 * increment opcode if it is after O_REASS
4144 			 */
4145 			dst->opcode++;
4146 
4147 		if (ccmdlen > ll) {
4148 			printf("ipfw: opcode %d size truncated\n",
4149 			    ccmd->opcode);
4150 			return EINVAL;
4151 		}
4152 	}
4153 
4154 	rule->_pad = tmp->_pad;
4155 	rule->set = tmp->set;
4156 	rule->rulenum = tmp->rulenum;
4157 	rule->cmd_len = tmp->cmd_len;
4158 	rule->act_ofs = tmp->act_ofs;
4159 	rule->next_rule = (struct ip_fw *)tmp->next_rule;
4160 	rule->cmd_len = tmp->cmd_len;
4161 	rule->id = 0; /* XXX see if is ok = 0 */
4162 	rule->pcnt = tmp->pcnt;
4163 	rule->bcnt = tmp->bcnt;
4164 	rule->timestamp = tmp->timestamp;
4165 
4166 	free (tmp, M_TEMP);
4167 	return 0;
4168 }
4169 
4170 /*
4171  * Named object api
4172  *
4173  */
4174 
4175 void
4176 ipfw_init_srv(struct ip_fw_chain *ch)
4177 {
4178 
4179 	ch->srvmap = ipfw_objhash_create(IPFW_OBJECTS_DEFAULT);
4180 	ch->srvstate = malloc(sizeof(void *) * IPFW_OBJECTS_DEFAULT,
4181 	    M_IPFW, M_WAITOK | M_ZERO);
4182 }
4183 
4184 void
4185 ipfw_destroy_srv(struct ip_fw_chain *ch)
4186 {
4187 
4188 	free(ch->srvstate, M_IPFW);
4189 	ipfw_objhash_destroy(ch->srvmap);
4190 }
4191 
4192 /*
4193  * Allocate new bitmask which can be used to enlarge/shrink
4194  * named instance index.
4195  */
4196 void
4197 ipfw_objhash_bitmap_alloc(uint32_t items, void **idx, int *pblocks)
4198 {
4199 	size_t size;
4200 	int max_blocks;
4201 	u_long *idx_mask;
4202 
4203 	KASSERT((items % BLOCK_ITEMS) == 0,
4204 	   ("bitmask size needs to power of 2 and greater or equal to %zu",
4205 	    BLOCK_ITEMS));
4206 
4207 	max_blocks = items / BLOCK_ITEMS;
4208 	size = items / 8;
4209 	idx_mask = malloc(size * IPFW_MAX_SETS, M_IPFW, M_WAITOK);
4210 	/* Mark all as free */
4211 	memset(idx_mask, 0xFF, size * IPFW_MAX_SETS);
4212 	*idx_mask &= ~(u_long)1; /* Skip index 0 */
4213 
4214 	*idx = idx_mask;
4215 	*pblocks = max_blocks;
4216 }
4217 
4218 /*
4219  * Copy current bitmask index to new one.
4220  */
4221 void
4222 ipfw_objhash_bitmap_merge(struct namedobj_instance *ni, void **idx, int *blocks)
4223 {
4224 	int old_blocks, new_blocks;
4225 	u_long *old_idx, *new_idx;
4226 	int i;
4227 
4228 	old_idx = ni->idx_mask;
4229 	old_blocks = ni->max_blocks;
4230 	new_idx = *idx;
4231 	new_blocks = *blocks;
4232 
4233 	for (i = 0; i < IPFW_MAX_SETS; i++) {
4234 		memcpy(&new_idx[new_blocks * i], &old_idx[old_blocks * i],
4235 		    old_blocks * sizeof(u_long));
4236 	}
4237 }
4238 
4239 /*
4240  * Swaps current @ni index with new one.
4241  */
4242 void
4243 ipfw_objhash_bitmap_swap(struct namedobj_instance *ni, void **idx, int *blocks)
4244 {
4245 	int old_blocks;
4246 	u_long *old_idx;
4247 
4248 	old_idx = ni->idx_mask;
4249 	old_blocks = ni->max_blocks;
4250 
4251 	ni->idx_mask = *idx;
4252 	ni->max_blocks = *blocks;
4253 
4254 	/* Save old values */
4255 	*idx = old_idx;
4256 	*blocks = old_blocks;
4257 }
4258 
4259 void
4260 ipfw_objhash_bitmap_free(void *idx, int blocks)
4261 {
4262 
4263 	free(idx, M_IPFW);
4264 }
4265 
4266 /*
4267  * Creates named hash instance.
4268  * Must be called without holding any locks.
4269  * Return pointer to new instance.
4270  */
4271 struct namedobj_instance *
4272 ipfw_objhash_create(uint32_t items)
4273 {
4274 	struct namedobj_instance *ni;
4275 	int i;
4276 	size_t size;
4277 
4278 	size = sizeof(struct namedobj_instance) +
4279 	    sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE +
4280 	    sizeof(struct namedobjects_head) * NAMEDOBJ_HASH_SIZE;
4281 
4282 	ni = malloc(size, M_IPFW, M_WAITOK | M_ZERO);
4283 	ni->nn_size = NAMEDOBJ_HASH_SIZE;
4284 	ni->nv_size = NAMEDOBJ_HASH_SIZE;
4285 
4286 	ni->names = (struct namedobjects_head *)(ni +1);
4287 	ni->values = &ni->names[ni->nn_size];
4288 
4289 	for (i = 0; i < ni->nn_size; i++)
4290 		TAILQ_INIT(&ni->names[i]);
4291 
4292 	for (i = 0; i < ni->nv_size; i++)
4293 		TAILQ_INIT(&ni->values[i]);
4294 
4295 	/* Set default hashing/comparison functions */
4296 	ni->hash_f = objhash_hash_name;
4297 	ni->cmp_f = objhash_cmp_name;
4298 
4299 	/* Allocate bitmask separately due to possible resize */
4300 	ipfw_objhash_bitmap_alloc(items, (void*)&ni->idx_mask, &ni->max_blocks);
4301 
4302 	return (ni);
4303 }
4304 
4305 void
4306 ipfw_objhash_destroy(struct namedobj_instance *ni)
4307 {
4308 
4309 	free(ni->idx_mask, M_IPFW);
4310 	free(ni, M_IPFW);
4311 }
4312 
4313 void
4314 ipfw_objhash_set_funcs(struct namedobj_instance *ni, objhash_hash_f *hash_f,
4315     objhash_cmp_f *cmp_f)
4316 {
4317 
4318 	ni->hash_f = hash_f;
4319 	ni->cmp_f = cmp_f;
4320 }
4321 
4322 static uint32_t
4323 objhash_hash_name(struct namedobj_instance *ni, const void *name, uint32_t set)
4324 {
4325 
4326 	return (fnv_32_str((const char *)name, FNV1_32_INIT));
4327 }
4328 
4329 static int
4330 objhash_cmp_name(struct named_object *no, const void *name, uint32_t set)
4331 {
4332 
4333 	if ((strcmp(no->name, (const char *)name) == 0) && (no->set == set))
4334 		return (0);
4335 
4336 	return (1);
4337 }
4338 
4339 static uint32_t
4340 objhash_hash_idx(struct namedobj_instance *ni, uint32_t val)
4341 {
4342 	uint32_t v;
4343 
4344 	v = val % (ni->nv_size - 1);
4345 
4346 	return (v);
4347 }
4348 
4349 struct named_object *
4350 ipfw_objhash_lookup_name(struct namedobj_instance *ni, uint32_t set, char *name)
4351 {
4352 	struct named_object *no;
4353 	uint32_t hash;
4354 
4355 	hash = ni->hash_f(ni, name, set) % ni->nn_size;
4356 
4357 	TAILQ_FOREACH(no, &ni->names[hash], nn_next) {
4358 		if (ni->cmp_f(no, name, set) == 0)
4359 			return (no);
4360 	}
4361 
4362 	return (NULL);
4363 }
4364 
4365 /*
4366  * Find named object by @uid.
4367  * Check @tlvs for valid data inside.
4368  *
4369  * Returns pointer to found TLV or NULL.
4370  */
4371 ipfw_obj_ntlv *
4372 ipfw_find_name_tlv_type(void *tlvs, int len, uint16_t uidx, uint32_t etlv)
4373 {
4374 	ipfw_obj_ntlv *ntlv;
4375 	uintptr_t pa, pe;
4376 	int l;
4377 
4378 	pa = (uintptr_t)tlvs;
4379 	pe = pa + len;
4380 	l = 0;
4381 	for (; pa < pe; pa += l) {
4382 		ntlv = (ipfw_obj_ntlv *)pa;
4383 		l = ntlv->head.length;
4384 
4385 		if (l != sizeof(*ntlv))
4386 			return (NULL);
4387 
4388 		if (ntlv->idx != uidx)
4389 			continue;
4390 		/*
4391 		 * When userland has specified zero TLV type, do
4392 		 * not compare it with eltv. In some cases userland
4393 		 * doesn't know what type should it have. Use only
4394 		 * uidx and name for search named_object.
4395 		 */
4396 		if (ntlv->head.type != 0 &&
4397 		    ntlv->head.type != (uint16_t)etlv)
4398 			continue;
4399 
4400 		if (ipfw_check_object_name_generic(ntlv->name) != 0)
4401 			return (NULL);
4402 
4403 		return (ntlv);
4404 	}
4405 
4406 	return (NULL);
4407 }
4408 
4409 /*
4410  * Finds object config based on either legacy index
4411  * or name in ntlv.
4412  * Note @ti structure contains unchecked data from userland.
4413  *
4414  * Returns 0 in success and fills in @pno with found config
4415  */
4416 int
4417 ipfw_objhash_find_type(struct namedobj_instance *ni, struct tid_info *ti,
4418     uint32_t etlv, struct named_object **pno)
4419 {
4420 	char *name;
4421 	ipfw_obj_ntlv *ntlv;
4422 	uint32_t set;
4423 
4424 	if (ti->tlvs == NULL)
4425 		return (EINVAL);
4426 
4427 	ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx, etlv);
4428 	if (ntlv == NULL)
4429 		return (EINVAL);
4430 	name = ntlv->name;
4431 
4432 	/*
4433 	 * Use set provided by @ti instead of @ntlv one.
4434 	 * This is needed due to different sets behavior
4435 	 * controlled by V_fw_tables_sets.
4436 	 */
4437 	set = ti->set;
4438 	*pno = ipfw_objhash_lookup_name(ni, set, name);
4439 	if (*pno == NULL)
4440 		return (ESRCH);
4441 	return (0);
4442 }
4443 
4444 /*
4445  * Find named object by name, considering also its TLV type.
4446  */
4447 struct named_object *
4448 ipfw_objhash_lookup_name_type(struct namedobj_instance *ni, uint32_t set,
4449     uint32_t type, const char *name)
4450 {
4451 	struct named_object *no;
4452 	uint32_t hash;
4453 
4454 	hash = ni->hash_f(ni, name, set) % ni->nn_size;
4455 
4456 	TAILQ_FOREACH(no, &ni->names[hash], nn_next) {
4457 		if (ni->cmp_f(no, name, set) == 0 &&
4458 		    no->etlv == (uint16_t)type)
4459 			return (no);
4460 	}
4461 
4462 	return (NULL);
4463 }
4464 
4465 struct named_object *
4466 ipfw_objhash_lookup_kidx(struct namedobj_instance *ni, uint16_t kidx)
4467 {
4468 	struct named_object *no;
4469 	uint32_t hash;
4470 
4471 	hash = objhash_hash_idx(ni, kidx);
4472 
4473 	TAILQ_FOREACH(no, &ni->values[hash], nv_next) {
4474 		if (no->kidx == kidx)
4475 			return (no);
4476 	}
4477 
4478 	return (NULL);
4479 }
4480 
4481 int
4482 ipfw_objhash_same_name(struct namedobj_instance *ni, struct named_object *a,
4483     struct named_object *b)
4484 {
4485 
4486 	if ((strcmp(a->name, b->name) == 0) && a->set == b->set)
4487 		return (1);
4488 
4489 	return (0);
4490 }
4491 
4492 void
4493 ipfw_objhash_add(struct namedobj_instance *ni, struct named_object *no)
4494 {
4495 	uint32_t hash;
4496 
4497 	hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size;
4498 	TAILQ_INSERT_HEAD(&ni->names[hash], no, nn_next);
4499 
4500 	hash = objhash_hash_idx(ni, no->kidx);
4501 	TAILQ_INSERT_HEAD(&ni->values[hash], no, nv_next);
4502 
4503 	ni->count++;
4504 }
4505 
4506 void
4507 ipfw_objhash_del(struct namedobj_instance *ni, struct named_object *no)
4508 {
4509 	uint32_t hash;
4510 
4511 	hash = ni->hash_f(ni, no->name, no->set) % ni->nn_size;
4512 	TAILQ_REMOVE(&ni->names[hash], no, nn_next);
4513 
4514 	hash = objhash_hash_idx(ni, no->kidx);
4515 	TAILQ_REMOVE(&ni->values[hash], no, nv_next);
4516 
4517 	ni->count--;
4518 }
4519 
4520 uint32_t
4521 ipfw_objhash_count(struct namedobj_instance *ni)
4522 {
4523 
4524 	return (ni->count);
4525 }
4526 
4527 uint32_t
4528 ipfw_objhash_count_type(struct namedobj_instance *ni, uint16_t type)
4529 {
4530 	struct named_object *no;
4531 	uint32_t count;
4532 	int i;
4533 
4534 	count = 0;
4535 	for (i = 0; i < ni->nn_size; i++) {
4536 		TAILQ_FOREACH(no, &ni->names[i], nn_next) {
4537 			if (no->etlv == type)
4538 				count++;
4539 		}
4540 	}
4541 	return (count);
4542 }
4543 
4544 /*
4545  * Runs @func for each found named object.
4546  * It is safe to delete objects from callback
4547  */
4548 int
4549 ipfw_objhash_foreach(struct namedobj_instance *ni, objhash_cb_t *f, void *arg)
4550 {
4551 	struct named_object *no, *no_tmp;
4552 	int i, ret;
4553 
4554 	for (i = 0; i < ni->nn_size; i++) {
4555 		TAILQ_FOREACH_SAFE(no, &ni->names[i], nn_next, no_tmp) {
4556 			ret = f(ni, no, arg);
4557 			if (ret != 0)
4558 				return (ret);
4559 		}
4560 	}
4561 	return (0);
4562 }
4563 
4564 /*
4565  * Runs @f for each found named object with type @type.
4566  * It is safe to delete objects from callback
4567  */
4568 int
4569 ipfw_objhash_foreach_type(struct namedobj_instance *ni, objhash_cb_t *f,
4570     void *arg, uint16_t type)
4571 {
4572 	struct named_object *no, *no_tmp;
4573 	int i, ret;
4574 
4575 	for (i = 0; i < ni->nn_size; i++) {
4576 		TAILQ_FOREACH_SAFE(no, &ni->names[i], nn_next, no_tmp) {
4577 			if (no->etlv != type)
4578 				continue;
4579 			ret = f(ni, no, arg);
4580 			if (ret != 0)
4581 				return (ret);
4582 		}
4583 	}
4584 	return (0);
4585 }
4586 
4587 /*
4588  * Removes index from given set.
4589  * Returns 0 on success.
4590  */
4591 int
4592 ipfw_objhash_free_idx(struct namedobj_instance *ni, uint16_t idx)
4593 {
4594 	u_long *mask;
4595 	int i, v;
4596 
4597 	i = idx / BLOCK_ITEMS;
4598 	v = idx % BLOCK_ITEMS;
4599 
4600 	if (i >= ni->max_blocks)
4601 		return (1);
4602 
4603 	mask = &ni->idx_mask[i];
4604 
4605 	if ((*mask & ((u_long)1 << v)) != 0)
4606 		return (1);
4607 
4608 	/* Mark as free */
4609 	*mask |= (u_long)1 << v;
4610 
4611 	/* Update free offset */
4612 	if (ni->free_off[0] > i)
4613 		ni->free_off[0] = i;
4614 
4615 	return (0);
4616 }
4617 
4618 /*
4619  * Allocate new index in given instance and stores in in @pidx.
4620  * Returns 0 on success.
4621  */
4622 int
4623 ipfw_objhash_alloc_idx(void *n, uint16_t *pidx)
4624 {
4625 	struct namedobj_instance *ni;
4626 	u_long *mask;
4627 	int i, off, v;
4628 
4629 	ni = (struct namedobj_instance *)n;
4630 
4631 	off = ni->free_off[0];
4632 	mask = &ni->idx_mask[off];
4633 
4634 	for (i = off; i < ni->max_blocks; i++, mask++) {
4635 		if ((v = ffsl(*mask)) == 0)
4636 			continue;
4637 
4638 		/* Mark as busy */
4639 		*mask &= ~ ((u_long)1 << (v - 1));
4640 
4641 		ni->free_off[0] = i;
4642 
4643 		v = BLOCK_ITEMS * i + v - 1;
4644 
4645 		*pidx = v;
4646 		return (0);
4647 	}
4648 
4649 	return (1);
4650 }
4651 
4652 /* end of file */
4653