1 /* $NetBSD: npf_ruleset.c,v 1.42 2015/03/20 23:36:28 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 2009-2015 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This material is based upon work partially supported by The
8 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * NPF ruleset module.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: npf_ruleset.c,v 1.42 2015/03/20 23:36:28 rmind Exp $");
38
39 #include <sys/param.h>
40 #include <sys/types.h>
41
42 #include <sys/atomic.h>
43 #include <sys/kmem.h>
44 #include <sys/queue.h>
45 #include <sys/mbuf.h>
46 #include <sys/types.h>
47
48 #include <net/bpf.h>
49 #include <net/bpfjit.h>
50 #include <net/pfil.h>
51 #include <net/if.h>
52
53 #include "npf_impl.h"
54
55 struct npf_ruleset {
56 /*
57 * - List of all rules.
58 * - Dynamic (i.e. named) rules.
59 * - G/C list for convenience.
60 */
61 LIST_HEAD(, npf_rule) rs_all;
62 LIST_HEAD(, npf_rule) rs_dynamic;
63 LIST_HEAD(, npf_rule) rs_gc;
64
65 /* Unique ID counter. */
66 uint64_t rs_idcnt;
67
68 /* Number of array slots and active rules. */
69 u_int rs_slots;
70 u_int rs_nitems;
71
72 /* Array of ordered rules. */
73 npf_rule_t * rs_rules[];
74 };
75
76 struct npf_rule {
77 /* Attributes, interface and skip slot. */
78 uint32_t r_attr;
79 u_int r_ifid;
80 u_int r_skip_to;
81
82 /* Code to process, if any. */
83 int r_type;
84 bpfjit_func_t r_jcode;
85 void * r_code;
86 u_int r_clen;
87
88 /* NAT policy (optional), rule procedure and subset. */
89 npf_natpolicy_t * r_natp;
90 npf_rproc_t * r_rproc;
91
92 union {
93 /*
94 * Dynamic group: rule subset and a group list entry.
95 */
96 struct {
97 npf_rule_t * r_subset;
98 LIST_ENTRY(npf_rule) r_dentry;
99 };
100
101 /*
102 * Dynamic rule: priority, parent group and next rule.
103 */
104 struct {
105 int r_priority;
106 npf_rule_t * r_parent;
107 npf_rule_t * r_next;
108 };
109 };
110
111 /* Rule ID, name and the optional key. */
112 uint64_t r_id;
113 char r_name[NPF_RULE_MAXNAMELEN];
114 uint8_t r_key[NPF_RULE_MAXKEYLEN];
115
116 /* All-list entry and the auxiliary info. */
117 LIST_ENTRY(npf_rule) r_aentry;
118 prop_data_t r_info;
119 };
120
121 #define SKIPTO_ADJ_FLAG (1U << 31)
122 #define SKIPTO_MASK (SKIPTO_ADJ_FLAG - 1)
123
124 static int npf_rule_export(const npf_ruleset_t *,
125 const npf_rule_t *, prop_dictionary_t);
126
127 /*
128 * Private attributes - must be in the NPF_RULE_PRIVMASK range.
129 */
130 #define NPF_RULE_KEEPNAT (0x01000000 & NPF_RULE_PRIVMASK)
131
132 #define NPF_DYNAMIC_GROUP_P(attr) \
133 (((attr) & NPF_DYNAMIC_GROUP) == NPF_DYNAMIC_GROUP)
134
135 #define NPF_DYNAMIC_RULE_P(attr) \
136 (((attr) & NPF_DYNAMIC_GROUP) == NPF_RULE_DYNAMIC)
137
138 npf_ruleset_t *
npf_ruleset_create(size_t slots)139 npf_ruleset_create(size_t slots)
140 {
141 size_t len = offsetof(npf_ruleset_t, rs_rules[slots]);
142 npf_ruleset_t *rlset;
143
144 rlset = kmem_zalloc(len, KM_SLEEP);
145 LIST_INIT(&rlset->rs_dynamic);
146 LIST_INIT(&rlset->rs_all);
147 LIST_INIT(&rlset->rs_gc);
148 rlset->rs_slots = slots;
149
150 return rlset;
151 }
152
153 void
npf_ruleset_destroy(npf_ruleset_t * rlset)154 npf_ruleset_destroy(npf_ruleset_t *rlset)
155 {
156 size_t len = offsetof(npf_ruleset_t, rs_rules[rlset->rs_slots]);
157 npf_rule_t *rl;
158
159 while ((rl = LIST_FIRST(&rlset->rs_all)) != NULL) {
160 if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
161 /*
162 * Note: r_subset may point to the rules which
163 * were inherited by a new ruleset.
164 */
165 rl->r_subset = NULL;
166 LIST_REMOVE(rl, r_dentry);
167 }
168 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
169 /* Not removing from r_subset, see above. */
170 KASSERT(rl->r_parent != NULL);
171 }
172 LIST_REMOVE(rl, r_aentry);
173 npf_rule_free(rl);
174 }
175 KASSERT(LIST_EMPTY(&rlset->rs_dynamic));
176 KASSERT(LIST_EMPTY(&rlset->rs_gc));
177 kmem_free(rlset, len);
178 }
179
180 /*
181 * npf_ruleset_insert: insert the rule into the specified ruleset.
182 */
183 void
npf_ruleset_insert(npf_ruleset_t * rlset,npf_rule_t * rl)184 npf_ruleset_insert(npf_ruleset_t *rlset, npf_rule_t *rl)
185 {
186 u_int n = rlset->rs_nitems;
187
188 KASSERT(n < rlset->rs_slots);
189
190 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
191 if (NPF_DYNAMIC_GROUP_P(rl->r_attr)) {
192 LIST_INSERT_HEAD(&rlset->rs_dynamic, rl, r_dentry);
193 } else {
194 KASSERTMSG(rl->r_parent == NULL, "cannot be dynamic rule");
195 rl->r_attr &= ~NPF_RULE_DYNAMIC;
196 }
197
198 rlset->rs_rules[n] = rl;
199 rlset->rs_nitems++;
200
201 if (rl->r_skip_to < ++n) {
202 rl->r_skip_to = SKIPTO_ADJ_FLAG | n;
203 }
204 }
205
206 static npf_rule_t *
npf_ruleset_lookup(npf_ruleset_t * rlset,const char * name)207 npf_ruleset_lookup(npf_ruleset_t *rlset, const char *name)
208 {
209 npf_rule_t *rl;
210
211 KASSERT(npf_config_locked_p());
212
213 LIST_FOREACH(rl, &rlset->rs_dynamic, r_dentry) {
214 KASSERT(NPF_DYNAMIC_GROUP_P(rl->r_attr));
215 if (strncmp(rl->r_name, name, NPF_RULE_MAXNAMELEN) == 0)
216 break;
217 }
218 return rl;
219 }
220
221 /*
222 * npf_ruleset_add: insert dynamic rule into the (active) ruleset.
223 */
224 int
npf_ruleset_add(npf_ruleset_t * rlset,const char * rname,npf_rule_t * rl)225 npf_ruleset_add(npf_ruleset_t *rlset, const char *rname, npf_rule_t *rl)
226 {
227 npf_rule_t *rg, *it, *target;
228 int priocmd;
229
230 if (!NPF_DYNAMIC_RULE_P(rl->r_attr)) {
231 return EINVAL;
232 }
233 rg = npf_ruleset_lookup(rlset, rname);
234 if (rg == NULL) {
235 return ESRCH;
236 }
237
238 /* Dynamic rule - assign a unique ID and save the parent. */
239 rl->r_id = ++rlset->rs_idcnt;
240 rl->r_parent = rg;
241
242 /*
243 * Rule priority: (highest) 1, 2 ... n (lowest).
244 * Negative priority indicates an operation and is reset to zero.
245 */
246 if ((priocmd = rl->r_priority) < 0) {
247 rl->r_priority = 0;
248 }
249
250 /*
251 * WARNING: once rg->subset or target->r_next of an *active*
252 * rule is set, then our rule becomes globally visible and active.
253 * Must issue a load fence to ensure rl->r_next visibility first.
254 */
255 switch (priocmd) {
256 case NPF_PRI_LAST:
257 default:
258 target = NULL;
259 it = rg->r_subset;
260 while (it && it->r_priority <= rl->r_priority) {
261 target = it;
262 it = it->r_next;
263 }
264 if (target) {
265 rl->r_next = target->r_next;
266 membar_producer();
267 target->r_next = rl;
268 break;
269 }
270 /* FALLTHROUGH */
271
272 case NPF_PRI_FIRST:
273 rl->r_next = rg->r_subset;
274 membar_producer();
275 rg->r_subset = rl;
276 break;
277 }
278
279 /* Finally, add into the all-list. */
280 LIST_INSERT_HEAD(&rlset->rs_all, rl, r_aentry);
281 return 0;
282 }
283
284 static void
npf_ruleset_unlink(npf_rule_t * rl,npf_rule_t * prev)285 npf_ruleset_unlink(npf_rule_t *rl, npf_rule_t *prev)
286 {
287 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
288 if (prev) {
289 prev->r_next = rl->r_next;
290 } else {
291 npf_rule_t *rg = rl->r_parent;
292 rg->r_subset = rl->r_next;
293 }
294 LIST_REMOVE(rl, r_aentry);
295 }
296
297 /*
298 * npf_ruleset_remove: remove the dynamic rule given the rule ID.
299 */
300 int
npf_ruleset_remove(npf_ruleset_t * rlset,const char * rname,uint64_t id)301 npf_ruleset_remove(npf_ruleset_t *rlset, const char *rname, uint64_t id)
302 {
303 npf_rule_t *rg, *prev = NULL;
304
305 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
306 return ESRCH;
307 }
308 for (npf_rule_t *rl = rg->r_subset; rl; rl = rl->r_next) {
309 KASSERT(rl->r_parent == rg);
310 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
311
312 /* Compare ID. On match, remove and return. */
313 if (rl->r_id == id) {
314 npf_ruleset_unlink(rl, prev);
315 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
316 return 0;
317 }
318 prev = rl;
319 }
320 return ENOENT;
321 }
322
323 /*
324 * npf_ruleset_remkey: remove the dynamic rule given the rule key.
325 */
326 int
npf_ruleset_remkey(npf_ruleset_t * rlset,const char * rname,const void * key,size_t len)327 npf_ruleset_remkey(npf_ruleset_t *rlset, const char *rname,
328 const void *key, size_t len)
329 {
330 npf_rule_t *rg, *rlast = NULL, *prev = NULL, *lastprev = NULL;
331
332 KASSERT(len && len <= NPF_RULE_MAXKEYLEN);
333
334 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
335 return ESRCH;
336 }
337
338 /* Compare the key and find the last in the list. */
339 for (npf_rule_t *rl = rg->r_subset; rl; rl = rl->r_next) {
340 KASSERT(rl->r_parent == rg);
341 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
342 if (memcmp(rl->r_key, key, len) == 0) {
343 lastprev = prev;
344 rlast = rl;
345 }
346 prev = rl;
347 }
348 if (!rlast) {
349 return ENOENT;
350 }
351 npf_ruleset_unlink(rlast, lastprev);
352 LIST_INSERT_HEAD(&rlset->rs_gc, rlast, r_aentry);
353 return 0;
354 }
355
356 /*
357 * npf_ruleset_list: serialise and return the dynamic rules.
358 */
359 prop_dictionary_t
npf_ruleset_list(npf_ruleset_t * rlset,const char * rname)360 npf_ruleset_list(npf_ruleset_t *rlset, const char *rname)
361 {
362 prop_dictionary_t rgdict;
363 prop_array_t rules;
364 npf_rule_t *rg;
365
366 KASSERT(npf_config_locked_p());
367
368 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
369 return NULL;
370 }
371 if ((rgdict = prop_dictionary_create()) == NULL) {
372 return NULL;
373 }
374 if ((rules = prop_array_create()) == NULL) {
375 prop_object_release(rgdict);
376 return NULL;
377 }
378
379 for (npf_rule_t *rl = rg->r_subset; rl; rl = rl->r_next) {
380 prop_dictionary_t rldict;
381
382 KASSERT(rl->r_parent == rg);
383 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
384
385 rldict = prop_dictionary_create();
386 if (npf_rule_export(rlset, rl, rldict)) {
387 prop_object_release(rldict);
388 prop_object_release(rules);
389 return NULL;
390 }
391 prop_array_add(rules, rldict);
392 prop_object_release(rldict);
393 }
394
395 if (!prop_dictionary_set(rgdict, "rules", rules)) {
396 prop_object_release(rgdict);
397 rgdict = NULL;
398 }
399 prop_object_release(rules);
400 return rgdict;
401 }
402
403 /*
404 * npf_ruleset_flush: flush the dynamic rules in the ruleset by inserting
405 * them into the G/C list.
406 */
407 int
npf_ruleset_flush(npf_ruleset_t * rlset,const char * rname)408 npf_ruleset_flush(npf_ruleset_t *rlset, const char *rname)
409 {
410 npf_rule_t *rg, *rl;
411
412 if ((rg = npf_ruleset_lookup(rlset, rname)) == NULL) {
413 return ESRCH;
414 }
415
416 rl = atomic_swap_ptr(&rg->r_subset, NULL);
417 membar_producer();
418
419 while (rl) {
420 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
421 KASSERT(rl->r_parent == rg);
422
423 LIST_REMOVE(rl, r_aentry);
424 LIST_INSERT_HEAD(&rlset->rs_gc, rl, r_aentry);
425 rl = rl->r_next;
426 }
427 return 0;
428 }
429
430 /*
431 * npf_ruleset_gc: destroy the rules in G/C list.
432 */
433 void
npf_ruleset_gc(npf_ruleset_t * rlset)434 npf_ruleset_gc(npf_ruleset_t *rlset)
435 {
436 npf_rule_t *rl;
437
438 while ((rl = LIST_FIRST(&rlset->rs_gc)) != NULL) {
439 LIST_REMOVE(rl, r_aentry);
440 npf_rule_free(rl);
441 }
442 }
443
444 /*
445 * npf_ruleset_export: serialise and return the static rules.
446 */
447 int
npf_ruleset_export(const npf_ruleset_t * rlset,prop_array_t rules)448 npf_ruleset_export(const npf_ruleset_t *rlset, prop_array_t rules)
449 {
450 const u_int nitems = rlset->rs_nitems;
451 int error = 0;
452 u_int n = 0;
453
454 KASSERT(npf_config_locked_p());
455
456 while (n < nitems) {
457 const npf_rule_t *rl = rlset->rs_rules[n];
458 const npf_natpolicy_t *natp = rl->r_natp;
459 prop_dictionary_t rldict;
460
461 rldict = prop_dictionary_create();
462 if ((error = npf_rule_export(rlset, rl, rldict)) != 0) {
463 prop_object_release(rldict);
464 break;
465 }
466 if (natp && (error = npf_nat_policyexport(natp, rldict)) != 0) {
467 prop_object_release(rldict);
468 break;
469 }
470 prop_array_add(rules, rldict);
471 prop_object_release(rldict);
472 n++;
473 }
474 return error;
475 }
476
477 /*
478 * npf_ruleset_reload: prepare the new ruleset by scanning the active
479 * ruleset and: 1) sharing the dynamic rules 2) sharing NAT policies.
480 *
481 * => The active (old) ruleset should be exclusively locked.
482 */
483 void
npf_ruleset_reload(npf_ruleset_t * newset,npf_ruleset_t * oldset,bool load)484 npf_ruleset_reload(npf_ruleset_t *newset, npf_ruleset_t *oldset, bool load)
485 {
486 npf_rule_t *rg, *rl;
487 uint64_t nid = 0;
488
489 KASSERT(npf_config_locked_p());
490
491 /*
492 * Scan the dynamic rules and share (migrate) if needed.
493 */
494 LIST_FOREACH(rg, &newset->rs_dynamic, r_dentry) {
495 npf_rule_t *active_rgroup;
496
497 /* Look for a dynamic ruleset group with such name. */
498 active_rgroup = npf_ruleset_lookup(oldset, rg->r_name);
499 if (active_rgroup == NULL) {
500 continue;
501 }
502
503 /*
504 * ATOMICITY: Copy the head pointer of the linked-list,
505 * but do not remove the rules from the active r_subset.
506 * This is necessary because the rules are still active
507 * and therefore are accessible for inspection via the
508 * old ruleset.
509 */
510 rg->r_subset = active_rgroup->r_subset;
511
512 /*
513 * We can safely migrate to the new all-rule list and
514 * reset the parent rule, though.
515 */
516 for (rl = rg->r_subset; rl; rl = rl->r_next) {
517 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
518 LIST_REMOVE(rl, r_aentry);
519 LIST_INSERT_HEAD(&newset->rs_all, rl, r_aentry);
520
521 KASSERT(rl->r_parent == active_rgroup);
522 rl->r_parent = rg;
523 }
524 }
525
526 /*
527 * If performing the load of connections then NAT policies may
528 * already have translated connections associated with them and
529 * we should not share or inherit anything.
530 */
531 if (load)
532 return;
533
534 /*
535 * Scan all rules in the new ruleset and share NAT policies.
536 * Also, assign a unique ID for each policy here.
537 */
538 LIST_FOREACH(rl, &newset->rs_all, r_aentry) {
539 npf_natpolicy_t *np;
540 npf_rule_t *actrl;
541
542 /* Does the rule have a NAT policy associated? */
543 if ((np = rl->r_natp) == NULL) {
544 continue;
545 }
546
547 /*
548 * First, try to share the active port map. If this
549 * policy will be unused, npf_nat_freepolicy() will
550 * drop the reference.
551 */
552 npf_ruleset_sharepm(oldset, np);
553
554 /* Does it match with any policy in the active ruleset? */
555 LIST_FOREACH(actrl, &oldset->rs_all, r_aentry) {
556 if (!actrl->r_natp)
557 continue;
558 if ((actrl->r_attr & NPF_RULE_KEEPNAT) != 0)
559 continue;
560 if (npf_nat_cmppolicy(actrl->r_natp, np))
561 break;
562 }
563 if (!actrl) {
564 /* No: just set the ID and continue. */
565 npf_nat_setid(np, ++nid);
566 continue;
567 }
568
569 /* Yes: inherit the matching NAT policy. */
570 rl->r_natp = actrl->r_natp;
571 npf_nat_setid(rl->r_natp, ++nid);
572
573 /*
574 * Finally, mark the active rule to not destroy its NAT
575 * policy later as we inherited it (but the rule must be
576 * kept active for now). Destroy the new/unused policy.
577 */
578 actrl->r_attr |= NPF_RULE_KEEPNAT;
579 npf_nat_freepolicy(np);
580 }
581
582 /* Inherit the ID counter. */
583 newset->rs_idcnt = oldset->rs_idcnt;
584 }
585
586 /*
587 * npf_ruleset_sharepm: attempt to share the active NAT portmap.
588 */
589 npf_rule_t *
npf_ruleset_sharepm(npf_ruleset_t * rlset,npf_natpolicy_t * mnp)590 npf_ruleset_sharepm(npf_ruleset_t *rlset, npf_natpolicy_t *mnp)
591 {
592 npf_natpolicy_t *np;
593 npf_rule_t *rl;
594
595 /*
596 * Scan the NAT policies in the ruleset and match with the
597 * given policy based on the translation IP address. If they
598 * match - adjust the given NAT policy to use the active NAT
599 * portmap. In such case the reference on the old portmap is
600 * dropped and acquired on the active one.
601 */
602 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
603 np = rl->r_natp;
604 if (np == NULL || np == mnp)
605 continue;
606 if (npf_nat_sharepm(np, mnp))
607 break;
608 }
609 return rl;
610 }
611
612 npf_natpolicy_t *
npf_ruleset_findnat(npf_ruleset_t * rlset,uint64_t id)613 npf_ruleset_findnat(npf_ruleset_t *rlset, uint64_t id)
614 {
615 npf_rule_t *rl;
616
617 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
618 npf_natpolicy_t *np = rl->r_natp;
619 if (np && npf_nat_getid(np) == id) {
620 return np;
621 }
622 }
623 return NULL;
624 }
625
626 /*
627 * npf_ruleset_freealg: inspect the ruleset and disassociate specified
628 * ALG from all NAT entries using it.
629 */
630 void
npf_ruleset_freealg(npf_ruleset_t * rlset,npf_alg_t * alg)631 npf_ruleset_freealg(npf_ruleset_t *rlset, npf_alg_t *alg)
632 {
633 npf_rule_t *rl;
634 npf_natpolicy_t *np;
635
636 LIST_FOREACH(rl, &rlset->rs_all, r_aentry) {
637 if ((np = rl->r_natp) != NULL) {
638 npf_nat_freealg(np, alg);
639 }
640 }
641 }
642
643 /*
644 * npf_rule_alloc: allocate a rule and initialise it.
645 */
646 npf_rule_t *
npf_rule_alloc(prop_dictionary_t rldict)647 npf_rule_alloc(prop_dictionary_t rldict)
648 {
649 npf_rule_t *rl;
650 const char *rname;
651 prop_data_t d;
652
653 /* Allocate a rule structure. */
654 rl = kmem_zalloc(sizeof(npf_rule_t), KM_SLEEP);
655 rl->r_natp = NULL;
656
657 /* Name (optional) */
658 if (prop_dictionary_get_cstring_nocopy(rldict, "name", &rname)) {
659 strlcpy(rl->r_name, rname, NPF_RULE_MAXNAMELEN);
660 } else {
661 rl->r_name[0] = '\0';
662 }
663
664 /* Attributes, priority and interface ID (optional). */
665 prop_dictionary_get_uint32(rldict, "attr", &rl->r_attr);
666 rl->r_attr &= ~NPF_RULE_PRIVMASK;
667
668 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
669 /* Priority of the dynamic rule. */
670 prop_dictionary_get_int32(rldict, "prio", &rl->r_priority);
671 } else {
672 /* The skip-to index. No need to validate it. */
673 prop_dictionary_get_uint32(rldict, "skip-to", &rl->r_skip_to);
674 }
675
676 /* Interface name; register and get the npf-if-id. */
677 if (prop_dictionary_get_cstring_nocopy(rldict, "ifname", &rname)) {
678 if ((rl->r_ifid = npf_ifmap_register(rname)) == 0) {
679 kmem_free(rl, sizeof(npf_rule_t));
680 return NULL;
681 }
682 } else {
683 rl->r_ifid = 0;
684 }
685
686 /* Key (optional). */
687 prop_object_t obj = prop_dictionary_get(rldict, "key");
688 const void *key = prop_data_data_nocopy(obj);
689
690 if (key) {
691 size_t len = prop_data_size(obj);
692 if (len > NPF_RULE_MAXKEYLEN) {
693 kmem_free(rl, sizeof(npf_rule_t));
694 return NULL;
695 }
696 memcpy(rl->r_key, key, len);
697 }
698
699 if ((d = prop_dictionary_get(rldict, "info")) != NULL) {
700 rl->r_info = prop_data_copy(d);
701 }
702 return rl;
703 }
704
705 static int
npf_rule_export(const npf_ruleset_t * rlset,const npf_rule_t * rl,prop_dictionary_t rldict)706 npf_rule_export(const npf_ruleset_t *rlset, const npf_rule_t *rl,
707 prop_dictionary_t rldict)
708 {
709 u_int skip_to = 0;
710 prop_data_t d;
711
712 prop_dictionary_set_uint32(rldict, "attr", rl->r_attr);
713 prop_dictionary_set_int32(rldict, "prio", rl->r_priority);
714 if ((rl->r_skip_to & SKIPTO_ADJ_FLAG) == 0) {
715 skip_to = rl->r_skip_to & SKIPTO_MASK;
716 }
717 prop_dictionary_set_uint32(rldict, "skip-to", skip_to);
718 prop_dictionary_set_int32(rldict, "code-type", rl->r_type);
719 if (rl->r_code) {
720 d = prop_data_create_data(rl->r_code, rl->r_clen);
721 prop_dictionary_set_and_rel(rldict, "code", d);
722 }
723
724 if (rl->r_ifid) {
725 const char *ifname = npf_ifmap_getname(rl->r_ifid);
726 prop_dictionary_set_cstring(rldict, "ifname", ifname);
727 }
728 prop_dictionary_set_uint64(rldict, "id", rl->r_id);
729
730 if (rl->r_name[0]) {
731 prop_dictionary_set_cstring(rldict, "name", rl->r_name);
732 }
733 if (NPF_DYNAMIC_RULE_P(rl->r_attr)) {
734 d = prop_data_create_data(rl->r_key, NPF_RULE_MAXKEYLEN);
735 prop_dictionary_set_and_rel(rldict, "key", d);
736 }
737 if (rl->r_info) {
738 prop_dictionary_set(rldict, "info", rl->r_info);
739 }
740 return 0;
741 }
742
743 /*
744 * npf_rule_setcode: assign filter code to the rule.
745 *
746 * => The code must be validated by the caller.
747 * => JIT compilation may be performed here.
748 */
749 void
npf_rule_setcode(npf_rule_t * rl,const int type,void * code,size_t size)750 npf_rule_setcode(npf_rule_t *rl, const int type, void *code, size_t size)
751 {
752 KASSERT(type == NPF_CODE_BPF);
753
754 rl->r_type = type;
755 rl->r_code = code;
756 rl->r_clen = size;
757 rl->r_jcode = npf_bpf_compile(code, size);
758 }
759
760 /*
761 * npf_rule_setrproc: assign a rule procedure and hold a reference on it.
762 */
763 void
npf_rule_setrproc(npf_rule_t * rl,npf_rproc_t * rp)764 npf_rule_setrproc(npf_rule_t *rl, npf_rproc_t *rp)
765 {
766 npf_rproc_acquire(rp);
767 rl->r_rproc = rp;
768 }
769
770 /*
771 * npf_rule_free: free the specified rule.
772 */
773 void
npf_rule_free(npf_rule_t * rl)774 npf_rule_free(npf_rule_t *rl)
775 {
776 npf_natpolicy_t *np = rl->r_natp;
777 npf_rproc_t *rp = rl->r_rproc;
778
779 if (np && (rl->r_attr & NPF_RULE_KEEPNAT) == 0) {
780 /* Free NAT policy. */
781 npf_nat_freepolicy(np);
782 }
783 if (rp) {
784 /* Release rule procedure. */
785 npf_rproc_release(rp);
786 }
787 if (rl->r_code) {
788 /* Free byte-code. */
789 kmem_free(rl->r_code, rl->r_clen);
790 }
791 if (rl->r_jcode) {
792 /* Free JIT code. */
793 bpf_jit_freecode(rl->r_jcode);
794 }
795 if (rl->r_info) {
796 prop_object_release(rl->r_info);
797 }
798 kmem_free(rl, sizeof(npf_rule_t));
799 }
800
801 /*
802 * npf_rule_getid: return the unique ID of a rule.
803 * npf_rule_getrproc: acquire a reference and return rule procedure, if any.
804 * npf_rule_getnat: get NAT policy assigned to the rule.
805 */
806
807 uint64_t
npf_rule_getid(const npf_rule_t * rl)808 npf_rule_getid(const npf_rule_t *rl)
809 {
810 KASSERT(NPF_DYNAMIC_RULE_P(rl->r_attr));
811 return rl->r_id;
812 }
813
814 npf_rproc_t *
npf_rule_getrproc(const npf_rule_t * rl)815 npf_rule_getrproc(const npf_rule_t *rl)
816 {
817 npf_rproc_t *rp = rl->r_rproc;
818
819 if (rp) {
820 npf_rproc_acquire(rp);
821 }
822 return rp;
823 }
824
825 npf_natpolicy_t *
npf_rule_getnat(const npf_rule_t * rl)826 npf_rule_getnat(const npf_rule_t *rl)
827 {
828 return rl->r_natp;
829 }
830
831 /*
832 * npf_rule_setnat: assign NAT policy to the rule and insert into the
833 * NAT policy list in the ruleset.
834 */
835 void
npf_rule_setnat(npf_rule_t * rl,npf_natpolicy_t * np)836 npf_rule_setnat(npf_rule_t *rl, npf_natpolicy_t *np)
837 {
838 KASSERT(rl->r_natp == NULL);
839 rl->r_natp = np;
840 }
841
842 /*
843 * npf_rule_inspect: match the interface, direction and run the filter code.
844 * Returns true if rule matches and false otherwise.
845 */
846 static inline bool
npf_rule_inspect(const npf_rule_t * rl,bpf_args_t * bc_args,const int di_mask,const u_int ifid)847 npf_rule_inspect(const npf_rule_t *rl, bpf_args_t *bc_args,
848 const int di_mask, const u_int ifid)
849 {
850 /* Match the interface. */
851 if (rl->r_ifid && rl->r_ifid != ifid) {
852 return false;
853 }
854
855 /* Match the direction. */
856 if ((rl->r_attr & NPF_RULE_DIMASK) != NPF_RULE_DIMASK) {
857 if ((rl->r_attr & di_mask) == 0)
858 return false;
859 }
860
861 /* Any code? */
862 if (!rl->r_code) {
863 KASSERT(rl->r_jcode == NULL);
864 return true;
865 }
866 KASSERT(rl->r_type == NPF_CODE_BPF);
867 return npf_bpf_filter(bc_args, rl->r_code, rl->r_jcode) != 0;
868 }
869
870 /*
871 * npf_rule_reinspect: re-inspect the dynamic rule by iterating its list.
872 * This is only for the dynamic rules. Subrules cannot have nested rules.
873 */
874 static inline npf_rule_t *
npf_rule_reinspect(const npf_rule_t * rg,bpf_args_t * bc_args,const int di_mask,const u_int ifid)875 npf_rule_reinspect(const npf_rule_t *rg, bpf_args_t *bc_args,
876 const int di_mask, const u_int ifid)
877 {
878 npf_rule_t *final_rl = NULL, *rl;
879
880 KASSERT(NPF_DYNAMIC_GROUP_P(rg->r_attr));
881
882 for (rl = rg->r_subset; rl; rl = rl->r_next) {
883 KASSERT(!final_rl || rl->r_priority >= final_rl->r_priority);
884 if (!npf_rule_inspect(rl, bc_args, di_mask, ifid)) {
885 continue;
886 }
887 if (rl->r_attr & NPF_RULE_FINAL) {
888 return rl;
889 }
890 final_rl = rl;
891 }
892 return final_rl;
893 }
894
895 /*
896 * npf_ruleset_inspect: inspect the packet against the given ruleset.
897 *
898 * Loop through the rules in the set and run the byte-code of each rule
899 * against the packet (nbuf chain). If sub-ruleset is found, inspect it.
900 */
901 npf_rule_t *
npf_ruleset_inspect(npf_cache_t * npc,const npf_ruleset_t * rlset,const int di,const int layer)902 npf_ruleset_inspect(npf_cache_t *npc, const npf_ruleset_t *rlset,
903 const int di, const int layer)
904 {
905 nbuf_t *nbuf = npc->npc_nbuf;
906 const int di_mask = (di & PFIL_IN) ? NPF_RULE_IN : NPF_RULE_OUT;
907 const u_int nitems = rlset->rs_nitems;
908 const u_int ifid = nbuf->nb_ifid;
909 npf_rule_t *final_rl = NULL;
910 bpf_args_t bc_args;
911 u_int n = 0;
912
913 KASSERT(((di & PFIL_IN) != 0) ^ ((di & PFIL_OUT) != 0));
914
915 /*
916 * Prepare the external memory store and the arguments for
917 * the BPF programs to be executed.
918 */
919 uint32_t bc_words[NPF_BPF_NWORDS];
920 npf_bpf_prepare(npc, &bc_args, bc_words);
921
922 while (n < nitems) {
923 npf_rule_t *rl = rlset->rs_rules[n];
924 const u_int skip_to = rl->r_skip_to & SKIPTO_MASK;
925 const uint32_t attr = rl->r_attr;
926
927 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
928 KASSERT(n < skip_to);
929
930 /* Group is a barrier: return a matching if found any. */
931 if ((attr & NPF_RULE_GROUP) != 0 && final_rl) {
932 break;
933 }
934
935 /* Main inspection of the rule. */
936 if (!npf_rule_inspect(rl, &bc_args, di_mask, ifid)) {
937 n = skip_to;
938 continue;
939 }
940
941 if (NPF_DYNAMIC_GROUP_P(attr)) {
942 /*
943 * If this is a dynamic rule, re-inspect the subrules.
944 * If it has any matching rule, then it is final.
945 */
946 rl = npf_rule_reinspect(rl, &bc_args, di_mask, ifid);
947 if (rl != NULL) {
948 final_rl = rl;
949 break;
950 }
951 } else if ((attr & NPF_RULE_GROUP) == 0) {
952 /*
953 * Groups themselves are not matching.
954 */
955 final_rl = rl;
956 }
957
958 /* Set the matching rule and check for "final". */
959 if (attr & NPF_RULE_FINAL) {
960 break;
961 }
962 n++;
963 }
964
965 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
966 return final_rl;
967 }
968
969 /*
970 * npf_rule_conclude: return decision and the flags for conclusion.
971 *
972 * => Returns ENETUNREACH if "block" and 0 if "pass".
973 */
974 int
npf_rule_conclude(const npf_rule_t * rl,int * retfl)975 npf_rule_conclude(const npf_rule_t *rl, int *retfl)
976 {
977 /* If not passing - drop the packet. */
978 *retfl = rl->r_attr;
979 return (rl->r_attr & NPF_RULE_PASS) ? 0 : ENETUNREACH;
980 }
981
982
983 #if defined(DDB) || defined(_NPF_TESTING)
984
985 void
npf_ruleset_dump(const char * name)986 npf_ruleset_dump(const char *name)
987 {
988 npf_ruleset_t *rlset = npf_config_ruleset();
989 npf_rule_t *rg, *rl;
990
991 LIST_FOREACH(rg, &rlset->rs_dynamic, r_dentry) {
992 printf("ruleset '%s':\n", rg->r_name);
993 for (rl = rg->r_subset; rl; rl = rl->r_next) {
994 printf("\tid %"PRIu64", key: ", rl->r_id);
995 for (u_int i = 0; i < NPF_RULE_MAXKEYLEN; i++)
996 printf("%x", rl->r_key[i]);
997 printf("\n");
998 }
999 }
1000 }
1001
1002 #endif
1003