1 /* $NetBSD: npf_conn.c,v 1.16 2015/02/05 22:04:03 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 2014-2015 Mindaugas Rasiukevicius <rmind at netbsd org>
5 * Copyright (c) 2010-2014 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This material is based upon work partially supported by The
9 * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * NPF connection tracking for stateful filtering and translation.
35 *
36 * Overview
37 *
38 * Connection direction is identified by the direction of its first
39 * packet. Packets can be incoming or outgoing with respect to an
40 * interface. To describe the packet in the context of connection
41 * direction we will use the terms "forwards stream" and "backwards
42 * stream". All connections have two keys and thus two entries:
43 *
44 * npf_conn_t::c_forw_entry for the forwards stream and
45 * npf_conn_t::c_back_entry for the backwards stream.
46 *
47 * The keys are formed from the 5-tuple (source/destination address,
48 * source/destination port and the protocol). Additional matching
49 * is performed for the interface (a common behaviour is equivalent
50 * to the 6-tuple lookup including the interface ID). Note that the
51 * key may be formed using translated values in a case of NAT.
52 *
53 * Connections can serve two purposes: for the implicit passing or
54 * to accommodate the dynamic NAT. Connections for the former purpose
55 * are created by the rules with "stateful" attribute and are used for
56 * stateful filtering. Such connections indicate that the packet of
57 * the backwards stream should be passed without inspection of the
58 * ruleset. The other purpose is to associate a dynamic NAT mechanism
59 * with a connection. Such connections are created by the NAT policies
60 * and they have a relationship with NAT translation structure via
61 * npf_conn_t::c_nat. A single connection can serve both purposes,
62 * which is a common case.
63 *
64 * Connection life-cycle
65 *
66 * Connections are established when a packet matches said rule or
67 * NAT policy. Both keys of the established connection are inserted
68 * into the connection database. A garbage collection thread
69 * periodically scans all connections and depending on connection
70 * properties (e.g. last activity time, protocol) removes connection
71 * entries and expires the actual connections.
72 *
73 * Each connection has a reference count. The reference is acquired
74 * on lookup and should be released by the caller. It guarantees that
75 * the connection will not be destroyed, although it may be expired.
76 *
77 * Synchronisation
78 *
79 * Connection database is accessed in a lock-less manner by the main
80 * routines: npf_conn_inspect() and npf_conn_establish(). Since they
81 * are always called from a software interrupt, the database is
82 * protected using passive serialisation. The main place which can
83 * destroy a connection is npf_conn_worker(). The database itself
84 * can be replaced and destroyed in npf_conn_reload().
85 *
86 * ALG support
87 *
88 * Application-level gateways (ALGs) can override generic connection
89 * inspection (npf_alg_conn() call in npf_conn_inspect() function) by
90 * performing their own lookup using different key. Recursive call
91 * to npf_conn_inspect() is not allowed. The ALGs ought to use the
92 * npf_conn_lookup() function for this purpose.
93 *
94 * Lock order
95 *
96 * npf_config_lock ->
97 * conn_lock ->
98 * npf_conn_t::c_lock
99 */
100
101 #include <sys/cdefs.h>
102 __KERNEL_RCSID(0, "$NetBSD: npf_conn.c,v 1.16 2015/02/05 22:04:03 rmind Exp $");
103
104 #include <sys/param.h>
105 #include <sys/types.h>
106
107 #include <netinet/in.h>
108 #include <netinet/tcp.h>
109
110 #include <sys/atomic.h>
111 #include <sys/condvar.h>
112 #include <sys/kmem.h>
113 #include <sys/kthread.h>
114 #include <sys/mutex.h>
115 #include <net/pfil.h>
116 #include <sys/pool.h>
117 #include <sys/queue.h>
118 #include <sys/systm.h>
119
120 #define __NPF_CONN_PRIVATE
121 #include "npf_conn.h"
122 #include "npf_impl.h"
123
124 /*
125 * Connection flags: PFIL_IN and PFIL_OUT values are reserved for direction.
126 */
127 CTASSERT(PFIL_ALL == (0x001 | 0x002));
128 #define CONN_ACTIVE 0x004 /* visible on inspection */
129 #define CONN_PASS 0x008 /* perform implicit passing */
130 #define CONN_EXPIRE 0x010 /* explicitly expire */
131 #define CONN_REMOVED 0x020 /* "forw/back" entries removed */
132
133 /*
134 * Connection tracking state: disabled (off) or enabled (on).
135 */
136 enum { CONN_TRACKING_OFF, CONN_TRACKING_ON };
137 static volatile int conn_tracking __cacheline_aligned;
138
139 /* Connection tracking database, connection cache and the lock. */
140 static npf_conndb_t * conn_db __read_mostly;
141 static pool_cache_t conn_cache __read_mostly;
142 static kmutex_t conn_lock __cacheline_aligned;
143
144 static void npf_conn_worker(void);
145 static void npf_conn_destroy(npf_conn_t *);
146
147 /*
148 * npf_conn_sys{init,fini}: initialise/destroy connection tracking.
149 */
150
151 void
npf_conn_sysinit(void)152 npf_conn_sysinit(void)
153 {
154 conn_cache = pool_cache_init(sizeof(npf_conn_t), coherency_unit,
155 0, 0, "npfconpl", NULL, IPL_NET, NULL, NULL, NULL);
156 mutex_init(&conn_lock, MUTEX_DEFAULT, IPL_NONE);
157 conn_tracking = CONN_TRACKING_OFF;
158 conn_db = npf_conndb_create();
159
160 npf_worker_register(npf_conn_worker);
161 }
162
163 void
npf_conn_sysfini(void)164 npf_conn_sysfini(void)
165 {
166 /* Note: the caller should have flushed the connections. */
167 KASSERT(conn_tracking == CONN_TRACKING_OFF);
168 npf_worker_unregister(npf_conn_worker);
169
170 npf_conndb_destroy(conn_db);
171 pool_cache_destroy(conn_cache);
172 mutex_destroy(&conn_lock);
173 }
174
175 /*
176 * npf_conn_load: perform the load by flushing the current connection
177 * database and replacing it with the new one or just destroying.
178 *
179 * => The caller must disable the connection tracking and ensure that
180 * there are no connection database lookups or references in-flight.
181 */
182 void
npf_conn_load(npf_conndb_t * ndb,bool track)183 npf_conn_load(npf_conndb_t *ndb, bool track)
184 {
185 npf_conndb_t *odb = NULL;
186
187 KASSERT(npf_config_locked_p());
188
189 /*
190 * The connection database is in the quiescent state.
191 * Prevent G/C thread from running and install a new database.
192 */
193 mutex_enter(&conn_lock);
194 if (ndb) {
195 KASSERT(conn_tracking == CONN_TRACKING_OFF);
196 odb = conn_db;
197 conn_db = ndb;
198 membar_sync();
199 }
200 if (track) {
201 /* After this point lookups start flying in. */
202 conn_tracking = CONN_TRACKING_ON;
203 }
204 mutex_exit(&conn_lock);
205
206 if (odb) {
207 /*
208 * Flush all, no sync since the caller did it for us.
209 * Also, release the pool cache memory.
210 */
211 npf_conn_gc(odb, true, false);
212 npf_conndb_destroy(odb);
213 pool_cache_invalidate(conn_cache);
214 }
215 }
216
217 /*
218 * npf_conn_tracking: enable/disable connection tracking.
219 */
220 void
npf_conn_tracking(bool track)221 npf_conn_tracking(bool track)
222 {
223 KASSERT(npf_config_locked_p());
224 conn_tracking = track ? CONN_TRACKING_ON : CONN_TRACKING_OFF;
225 }
226
227 static inline bool
npf_conn_trackable_p(const npf_cache_t * npc)228 npf_conn_trackable_p(const npf_cache_t *npc)
229 {
230 /*
231 * Check if connection tracking is on. Also, if layer 3 and 4 are
232 * not cached - protocol is not supported or packet is invalid.
233 */
234 if (conn_tracking != CONN_TRACKING_ON) {
235 return false;
236 }
237 if (!npf_iscached(npc, NPC_IP46) || !npf_iscached(npc, NPC_LAYER4)) {
238 return false;
239 }
240 return true;
241 }
242
243 /*
244 * npf_conn_conkey: construct a key for the connection lookup.
245 *
246 * => Returns the key length in bytes or zero on failure.
247 */
248 unsigned
npf_conn_conkey(const npf_cache_t * npc,npf_connkey_t * key,const bool forw)249 npf_conn_conkey(const npf_cache_t *npc, npf_connkey_t *key, const bool forw)
250 {
251 const u_int alen = npc->npc_alen;
252 const struct tcphdr *th;
253 const struct udphdr *uh;
254 u_int keylen, isrc, idst;
255 uint16_t id[2];
256
257 switch (npc->npc_proto) {
258 case IPPROTO_TCP:
259 KASSERT(npf_iscached(npc, NPC_TCP));
260 th = npc->npc_l4.tcp;
261 id[NPF_SRC] = th->th_sport;
262 id[NPF_DST] = th->th_dport;
263 break;
264 case IPPROTO_UDP:
265 KASSERT(npf_iscached(npc, NPC_UDP));
266 uh = npc->npc_l4.udp;
267 id[NPF_SRC] = uh->uh_sport;
268 id[NPF_DST] = uh->uh_dport;
269 break;
270 case IPPROTO_ICMP:
271 if (npf_iscached(npc, NPC_ICMP_ID)) {
272 const struct icmp *ic = npc->npc_l4.icmp;
273 id[NPF_SRC] = ic->icmp_id;
274 id[NPF_DST] = ic->icmp_id;
275 break;
276 }
277 return 0;
278 case IPPROTO_ICMPV6:
279 if (npf_iscached(npc, NPC_ICMP_ID)) {
280 const struct icmp6_hdr *ic6 = npc->npc_l4.icmp6;
281 id[NPF_SRC] = ic6->icmp6_id;
282 id[NPF_DST] = ic6->icmp6_id;
283 break;
284 }
285 return 0;
286 default:
287 /* Unsupported protocol. */
288 return 0;
289 }
290
291 if (__predict_true(forw)) {
292 isrc = NPF_SRC, idst = NPF_DST;
293 } else {
294 isrc = NPF_DST, idst = NPF_SRC;
295 }
296
297 /*
298 * Construct a key formed out of 32-bit integers. The key layout:
299 *
300 * Field: | proto | alen | src-id | dst-id | src-addr | dst-addr |
301 * +--------+--------+--------+--------+----------+----------+
302 * Bits: | 16 | 16 | 16 | 16 | 32-128 | 32-128 |
303 *
304 * The source and destination are inverted if they key is for the
305 * backwards stream (forw == false). The address length depends
306 * on the 'alen' field; it is a length in bytes, either 4 or 16.
307 */
308
309 key->ck_key[0] = ((uint32_t)npc->npc_proto << 16) | (alen & 0xffff);
310 key->ck_key[1] = ((uint32_t)id[isrc] << 16) | id[idst];
311
312 if (__predict_true(alen == sizeof(in_addr_t))) {
313 key->ck_key[2] = npc->npc_ips[isrc]->s6_addr32[0];
314 key->ck_key[3] = npc->npc_ips[idst]->s6_addr32[0];
315 keylen = 4 * sizeof(uint32_t);
316 } else {
317 const u_int nwords = alen >> 2;
318 memcpy(&key->ck_key[2], npc->npc_ips[isrc], alen);
319 memcpy(&key->ck_key[2 + nwords], npc->npc_ips[idst], alen);
320 keylen = (2 + (nwords * 2)) * sizeof(uint32_t);
321 }
322 return keylen;
323 }
324
325 static __inline void
connkey_set_addr(npf_connkey_t * key,const npf_addr_t * naddr,const int di)326 connkey_set_addr(npf_connkey_t *key, const npf_addr_t *naddr, const int di)
327 {
328 const u_int alen = key->ck_key[0] & 0xffff;
329 uint32_t *addr = &key->ck_key[2 + ((alen >> 2) * di)];
330
331 KASSERT(alen > 0);
332 memcpy(addr, naddr, alen);
333 }
334
335 static __inline void
connkey_set_id(npf_connkey_t * key,const uint16_t id,const int di)336 connkey_set_id(npf_connkey_t *key, const uint16_t id, const int di)
337 {
338 const uint32_t oid = key->ck_key[1];
339 const u_int shift = 16 * !di;
340 const uint32_t mask = 0xffff0000 >> shift;
341
342 key->ck_key[1] = ((uint32_t)id << shift) | (oid & mask);
343 }
344
345 /*
346 * npf_conn_lookup: lookup if there is an established connection.
347 *
348 * => If found, we will hold a reference for the caller.
349 */
350 npf_conn_t *
npf_conn_lookup(const npf_cache_t * npc,const int di,bool * forw)351 npf_conn_lookup(const npf_cache_t *npc, const int di, bool *forw)
352 {
353 const nbuf_t *nbuf = npc->npc_nbuf;
354 npf_conn_t *con;
355 npf_connkey_t key;
356 u_int flags, cifid;
357 bool ok, pforw;
358
359 /* Construct a key and lookup for a connection in the store. */
360 if (!npf_conn_conkey(npc, &key, true)) {
361 return NULL;
362 }
363 con = npf_conndb_lookup(conn_db, &key, forw);
364 if (con == NULL) {
365 return NULL;
366 }
367 KASSERT(npc->npc_proto == con->c_proto);
368
369 /* Check if connection is active and not expired. */
370 flags = con->c_flags;
371 ok = (flags & (CONN_ACTIVE | CONN_EXPIRE)) == CONN_ACTIVE;
372 if (__predict_false(!ok)) {
373 atomic_dec_uint(&con->c_refcnt);
374 return NULL;
375 }
376
377 /*
378 * Match the interface and the direction of the connection entry
379 * and the packet.
380 */
381 cifid = con->c_ifid;
382 if (__predict_false(cifid && cifid != nbuf->nb_ifid)) {
383 atomic_dec_uint(&con->c_refcnt);
384 return NULL;
385 }
386 pforw = (flags & PFIL_ALL) == di;
387 if (__predict_false(*forw != pforw)) {
388 atomic_dec_uint(&con->c_refcnt);
389 return NULL;
390 }
391
392 /* Update the last activity time. */
393 getnanouptime(&con->c_atime);
394 return con;
395 }
396
397 /*
398 * npf_conn_inspect: lookup a connection and inspecting the protocol data.
399 *
400 * => If found, we will hold a reference for the caller.
401 */
402 npf_conn_t *
npf_conn_inspect(npf_cache_t * npc,const int di,int * error)403 npf_conn_inspect(npf_cache_t *npc, const int di, int *error)
404 {
405 nbuf_t *nbuf = npc->npc_nbuf;
406 npf_conn_t *con;
407 bool forw, ok;
408
409 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
410 if (!npf_conn_trackable_p(npc)) {
411 return NULL;
412 }
413
414 /* Query ALG which may lookup connection for us. */
415 if ((con = npf_alg_conn(npc, di)) != NULL) {
416 /* Note: reference is held. */
417 return con;
418 }
419 if (nbuf_head_mbuf(nbuf) == NULL) {
420 *error = ENOMEM;
421 return NULL;
422 }
423 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
424
425 /* Main lookup of the connection. */
426 if ((con = npf_conn_lookup(npc, di, &forw)) == NULL) {
427 return NULL;
428 }
429
430 /* Inspect the protocol data and handle state changes. */
431 mutex_enter(&con->c_lock);
432 ok = npf_state_inspect(npc, &con->c_state, forw);
433 mutex_exit(&con->c_lock);
434
435 if (__predict_false(!ok)) {
436 /* Invalid: let the rules deal with it. */
437 npf_conn_release(con);
438 npf_stats_inc(NPF_STAT_INVALID_STATE);
439 con = NULL;
440 }
441 return con;
442 }
443
444 /*
445 * npf_conn_establish: create a new connection, insert into the global list.
446 *
447 * => Connection is created with the reference held for the caller.
448 * => Connection will be activated on the first reference release.
449 */
450 npf_conn_t *
npf_conn_establish(npf_cache_t * npc,int di,bool per_if)451 npf_conn_establish(npf_cache_t *npc, int di, bool per_if)
452 {
453 const nbuf_t *nbuf = npc->npc_nbuf;
454 npf_conn_t *con;
455 int error = 0;
456
457 KASSERT(!nbuf_flag_p(nbuf, NBUF_DATAREF_RESET));
458
459 if (!npf_conn_trackable_p(npc)) {
460 return NULL;
461 }
462
463 /* Allocate and initialise the new connection. */
464 con = pool_cache_get(conn_cache, PR_NOWAIT);
465 if (__predict_false(!con)) {
466 return NULL;
467 }
468 NPF_PRINTF(("NPF: create conn %p\n", con));
469 npf_stats_inc(NPF_STAT_CONN_CREATE);
470
471 mutex_init(&con->c_lock, MUTEX_DEFAULT, IPL_SOFTNET);
472 con->c_flags = (di & PFIL_ALL);
473 con->c_refcnt = 0;
474 con->c_rproc = NULL;
475 con->c_nat = NULL;
476
477 /* Initialize the protocol state. */
478 if (!npf_state_init(npc, &con->c_state)) {
479 npf_conn_destroy(con);
480 return NULL;
481 }
482
483 KASSERT(npf_iscached(npc, NPC_IP46));
484 npf_connkey_t *fw = &con->c_forw_entry;
485 npf_connkey_t *bk = &con->c_back_entry;
486
487 /*
488 * Construct "forwards" and "backwards" keys. Also, set the
489 * interface ID for this connection (unless it is global).
490 */
491 if (!npf_conn_conkey(npc, fw, true) ||
492 !npf_conn_conkey(npc, bk, false)) {
493 npf_conn_destroy(con);
494 return NULL;
495 }
496 fw->ck_backptr = bk->ck_backptr = con;
497 con->c_ifid = per_if ? nbuf->nb_ifid : 0;
498 con->c_proto = npc->npc_proto;
499
500 /*
501 * Set last activity time for a new connection and acquire
502 * a reference for the caller before we make it visible.
503 */
504 getnanouptime(&con->c_atime);
505 con->c_refcnt = 1;
506
507 /*
508 * Insert both keys (entries representing directions) of the
509 * connection. At this point it becomes visible, but we activate
510 * the connection later.
511 */
512 mutex_enter(&con->c_lock);
513 if (!npf_conndb_insert(conn_db, fw, con)) {
514 error = EISCONN;
515 goto err;
516 }
517 if (!npf_conndb_insert(conn_db, bk, con)) {
518 npf_conn_t *ret __diagused;
519 ret = npf_conndb_remove(conn_db, fw);
520 KASSERT(ret == con);
521 error = EISCONN;
522 goto err;
523 }
524 err:
525 /*
526 * If we have hit the duplicate: mark the connection as expired
527 * and let the G/C thread to take care of it. We cannot do it
528 * here since there might be references acquired already.
529 */
530 if (error) {
531 atomic_or_uint(&con->c_flags, CONN_REMOVED | CONN_EXPIRE);
532 atomic_dec_uint(&con->c_refcnt);
533 npf_stats_inc(NPF_STAT_RACE_CONN);
534 } else {
535 NPF_PRINTF(("NPF: establish conn %p\n", con));
536 }
537
538 /* Finally, insert into the connection list. */
539 npf_conndb_enqueue(conn_db, con);
540 mutex_exit(&con->c_lock);
541
542 return error ? NULL : con;
543 }
544
545 static void
npf_conn_destroy(npf_conn_t * con)546 npf_conn_destroy(npf_conn_t *con)
547 {
548 KASSERT(con->c_refcnt == 0);
549
550 if (con->c_nat) {
551 /* Release any NAT structures. */
552 npf_nat_destroy(con->c_nat);
553 }
554 if (con->c_rproc) {
555 /* Release the rule procedure. */
556 npf_rproc_release(con->c_rproc);
557 }
558
559 /* Destroy the state. */
560 npf_state_destroy(&con->c_state);
561 mutex_destroy(&con->c_lock);
562
563 /* Free the structure, increase the counter. */
564 pool_cache_put(conn_cache, con);
565 npf_stats_inc(NPF_STAT_CONN_DESTROY);
566 NPF_PRINTF(("NPF: conn %p destroyed\n", con));
567 }
568
569 /*
570 * npf_conn_setnat: associate NAT entry with the connection, update and
571 * re-insert connection entry using the translation values.
572 *
573 * => The caller must be holding a reference.
574 */
575 int
npf_conn_setnat(const npf_cache_t * npc,npf_conn_t * con,npf_nat_t * nt,u_int ntype)576 npf_conn_setnat(const npf_cache_t *npc, npf_conn_t *con,
577 npf_nat_t *nt, u_int ntype)
578 {
579 static const u_int nat_type_dimap[] = {
580 [NPF_NATOUT] = NPF_DST,
581 [NPF_NATIN] = NPF_SRC,
582 };
583 npf_connkey_t key, *bk;
584 npf_conn_t *ret __diagused;
585 npf_addr_t *taddr;
586 in_port_t tport;
587 u_int tidx;
588
589 KASSERT(con->c_refcnt > 0);
590
591 npf_nat_gettrans(nt, &taddr, &tport);
592 KASSERT(ntype == NPF_NATOUT || ntype == NPF_NATIN);
593 tidx = nat_type_dimap[ntype];
594
595 /* Construct a "backwards" key. */
596 if (!npf_conn_conkey(npc, &key, false)) {
597 return EINVAL;
598 }
599
600 /* Acquire the lock and check for the races. */
601 mutex_enter(&con->c_lock);
602 if (__predict_false(con->c_flags & CONN_EXPIRE)) {
603 /* The connection got expired. */
604 mutex_exit(&con->c_lock);
605 return EINVAL;
606 }
607 KASSERT((con->c_flags & CONN_REMOVED) == 0);
608
609 if (__predict_false(con->c_nat != NULL)) {
610 /* Race with a duplicate packet. */
611 mutex_exit(&con->c_lock);
612 npf_stats_inc(NPF_STAT_RACE_NAT);
613 return EISCONN;
614 }
615
616 /* Remove the "backwards" entry. */
617 ret = npf_conndb_remove(conn_db, &con->c_back_entry);
618 KASSERT(ret == con);
619
620 /* Set the source/destination IDs to the translation values. */
621 bk = &con->c_back_entry;
622 connkey_set_addr(bk, taddr, tidx);
623 if (tport) {
624 connkey_set_id(bk, tport, tidx);
625 }
626
627 /* Finally, re-insert the "backwards" entry. */
628 if (!npf_conndb_insert(conn_db, bk, con)) {
629 /*
630 * Race: we have hit the duplicate, remove the "forwards"
631 * entry and expire our connection; it is no longer valid.
632 */
633 ret = npf_conndb_remove(conn_db, &con->c_forw_entry);
634 KASSERT(ret == con);
635
636 atomic_or_uint(&con->c_flags, CONN_REMOVED | CONN_EXPIRE);
637 mutex_exit(&con->c_lock);
638
639 npf_stats_inc(NPF_STAT_RACE_NAT);
640 return EISCONN;
641 }
642
643 /* Associate the NAT entry and release the lock. */
644 con->c_nat = nt;
645 mutex_exit(&con->c_lock);
646 return 0;
647 }
648
649 /*
650 * npf_conn_expire: explicitly mark connection as expired.
651 */
652 void
npf_conn_expire(npf_conn_t * con)653 npf_conn_expire(npf_conn_t *con)
654 {
655 /* KASSERT(con->c_refcnt > 0); XXX: npf_nat_freepolicy() */
656 atomic_or_uint(&con->c_flags, CONN_EXPIRE);
657 }
658
659 /*
660 * npf_conn_pass: return true if connection is "pass" one, otherwise false.
661 */
662 bool
npf_conn_pass(const npf_conn_t * con,npf_rproc_t ** rp)663 npf_conn_pass(const npf_conn_t *con, npf_rproc_t **rp)
664 {
665 KASSERT(con->c_refcnt > 0);
666 if (__predict_true(con->c_flags & CONN_PASS)) {
667 *rp = con->c_rproc;
668 return true;
669 }
670 return false;
671 }
672
673 /*
674 * npf_conn_setpass: mark connection as a "pass" one and associate the
675 * rule procedure with it.
676 */
677 void
npf_conn_setpass(npf_conn_t * con,npf_rproc_t * rp)678 npf_conn_setpass(npf_conn_t *con, npf_rproc_t *rp)
679 {
680 KASSERT((con->c_flags & CONN_ACTIVE) == 0);
681 KASSERT(con->c_refcnt > 0);
682 KASSERT(con->c_rproc == NULL);
683
684 /*
685 * No need for atomic since the connection is not yet active.
686 * If rproc is set, the caller transfers its reference to us,
687 * which will be released on npf_conn_destroy().
688 */
689 atomic_or_uint(&con->c_flags, CONN_PASS);
690 con->c_rproc = rp;
691 }
692
693 /*
694 * npf_conn_release: release a reference, which might allow G/C thread
695 * to destroy this connection.
696 */
697 void
npf_conn_release(npf_conn_t * con)698 npf_conn_release(npf_conn_t *con)
699 {
700 if ((con->c_flags & (CONN_ACTIVE | CONN_EXPIRE)) == 0) {
701 /* Activate: after this, connection is globally visible. */
702 atomic_or_uint(&con->c_flags, CONN_ACTIVE);
703 }
704 KASSERT(con->c_refcnt > 0);
705 atomic_dec_uint(&con->c_refcnt);
706 }
707
708 /*
709 * npf_conn_getnat: return associated NAT data entry and indicate
710 * whether it is a "forwards" or "backwards" stream.
711 */
712 npf_nat_t *
npf_conn_getnat(npf_conn_t * con,const int di,bool * forw)713 npf_conn_getnat(npf_conn_t *con, const int di, bool *forw)
714 {
715 KASSERT(con->c_refcnt > 0);
716 *forw = (con->c_flags & PFIL_ALL) == di;
717 return con->c_nat;
718 }
719
720 /*
721 * npf_conn_expired: criterion to check if connection is expired.
722 */
723 static inline bool
npf_conn_expired(const npf_conn_t * con,const struct timespec * tsnow)724 npf_conn_expired(const npf_conn_t *con, const struct timespec *tsnow)
725 {
726 const int etime = npf_state_etime(&con->c_state, con->c_proto);
727 struct timespec tsdiff;
728
729 if (__predict_false(con->c_flags & CONN_EXPIRE)) {
730 /* Explicitly marked to be expired. */
731 return true;
732 }
733 timespecsub(tsnow, &con->c_atime, &tsdiff);
734 return tsdiff.tv_sec > etime;
735 }
736
737 /*
738 * npf_conn_gc: garbage collect the expired connections.
739 *
740 * => Must run in a single-threaded manner.
741 * => If it is a flush request, then destroy all connections.
742 * => If 'sync' is true, then perform passive serialisation.
743 */
744 void
npf_conn_gc(npf_conndb_t * cd,bool flush,bool sync)745 npf_conn_gc(npf_conndb_t *cd, bool flush, bool sync)
746 {
747 npf_conn_t *con, *prev, *gclist = NULL;
748 struct timespec tsnow;
749
750 getnanouptime(&tsnow);
751
752 /*
753 * Scan all connections and check them for expiration.
754 */
755 prev = NULL;
756 con = npf_conndb_getlist(cd);
757 while (con) {
758 npf_conn_t *next = con->c_next;
759
760 /* Expired? Flushing all? */
761 if (!npf_conn_expired(con, &tsnow) && !flush) {
762 prev = con;
763 con = next;
764 continue;
765 }
766
767 /* Remove both entries of the connection. */
768 mutex_enter(&con->c_lock);
769 if ((con->c_flags & CONN_REMOVED) == 0) {
770 npf_conn_t *ret __diagused;
771
772 ret = npf_conndb_remove(cd, &con->c_forw_entry);
773 KASSERT(ret == con);
774 ret = npf_conndb_remove(cd, &con->c_back_entry);
775 KASSERT(ret == con);
776 }
777
778 /* Flag the removal and expiration. */
779 atomic_or_uint(&con->c_flags, CONN_REMOVED | CONN_EXPIRE);
780 mutex_exit(&con->c_lock);
781
782 /* Move to the G/C list. */
783 npf_conndb_dequeue(cd, con, prev);
784 con->c_next = gclist;
785 gclist = con;
786
787 /* Next.. */
788 con = next;
789 }
790 npf_conndb_settail(cd, prev);
791
792 /*
793 * Ensure it is safe to destroy the connections.
794 * Note: drop the conn_lock (see the lock order).
795 */
796 if (sync) {
797 mutex_exit(&conn_lock);
798 if (gclist) {
799 npf_config_enter();
800 npf_config_sync();
801 npf_config_exit();
802 }
803 }
804
805 /*
806 * Garbage collect all expired connections.
807 * May need to wait for the references to drain.
808 */
809 con = gclist;
810 while (con) {
811 npf_conn_t *next = con->c_next;
812
813 /*
814 * Destroy only if removed and no references.
815 * Otherwise, wait for a tiny moment.
816 */
817 if (__predict_false(con->c_refcnt)) {
818 kpause("npfcongc", false, 1, NULL);
819 continue;
820 }
821 npf_conn_destroy(con);
822 con = next;
823 }
824 }
825
826 /*
827 * npf_conn_worker: G/C to run from a worker thread.
828 */
829 static void
npf_conn_worker(void)830 npf_conn_worker(void)
831 {
832 mutex_enter(&conn_lock);
833 /* Note: the conn_lock will be released (sync == true). */
834 npf_conn_gc(conn_db, false, true);
835 }
836
837 /*
838 * npf_conndb_export: construct a list of connections prepared for saving.
839 * Note: this is expected to be an expensive operation.
840 */
841 int
npf_conndb_export(prop_array_t conlist)842 npf_conndb_export(prop_array_t conlist)
843 {
844 npf_conn_t *con, *prev;
845
846 /*
847 * Note: acquire conn_lock to prevent from the database
848 * destruction and G/C thread.
849 */
850 mutex_enter(&conn_lock);
851 if (conn_tracking != CONN_TRACKING_ON) {
852 mutex_exit(&conn_lock);
853 return 0;
854 }
855 prev = NULL;
856 con = npf_conndb_getlist(conn_db);
857 while (con) {
858 npf_conn_t *next = con->c_next;
859 prop_dictionary_t cdict;
860
861 if ((cdict = npf_conn_export(con)) != NULL) {
862 prop_array_add(conlist, cdict);
863 prop_object_release(cdict);
864 }
865 prev = con;
866 con = next;
867 }
868 npf_conndb_settail(conn_db, prev);
869 mutex_exit(&conn_lock);
870 return 0;
871 }
872
873 /*
874 * npf_conn_export: serialise a single connection.
875 */
876 prop_dictionary_t
npf_conn_export(const npf_conn_t * con)877 npf_conn_export(const npf_conn_t *con)
878 {
879 prop_dictionary_t cdict;
880 prop_data_t d;
881
882 if ((con->c_flags & (CONN_ACTIVE|CONN_EXPIRE)) != CONN_ACTIVE) {
883 return NULL;
884 }
885 cdict = prop_dictionary_create();
886 prop_dictionary_set_uint32(cdict, "flags", con->c_flags);
887 prop_dictionary_set_uint32(cdict, "proto", con->c_proto);
888 if (con->c_ifid) {
889 const char *ifname = npf_ifmap_getname(con->c_ifid);
890 prop_dictionary_set_cstring(cdict, "ifname", ifname);
891 }
892
893 d = prop_data_create_data(&con->c_state, sizeof(npf_state_t));
894 prop_dictionary_set_and_rel(cdict, "state", d);
895
896 const uint32_t *fkey = con->c_forw_entry.ck_key;
897 d = prop_data_create_data(fkey, NPF_CONN_MAXKEYLEN);
898 prop_dictionary_set_and_rel(cdict, "forw-key", d);
899
900 const uint32_t *bkey = con->c_back_entry.ck_key;
901 d = prop_data_create_data(bkey, NPF_CONN_MAXKEYLEN);
902 prop_dictionary_set_and_rel(cdict, "back-key", d);
903
904 if (con->c_nat) {
905 npf_nat_export(cdict, con->c_nat);
906 }
907 return cdict;
908 }
909
910 /*
911 * npf_conn_import: fully reconstruct a single connection from a
912 * directory and insert into the given database.
913 */
914 int
npf_conn_import(npf_conndb_t * cd,prop_dictionary_t cdict,npf_ruleset_t * natlist)915 npf_conn_import(npf_conndb_t *cd, prop_dictionary_t cdict,
916 npf_ruleset_t *natlist)
917 {
918 npf_conn_t *con;
919 npf_connkey_t *fw, *bk;
920 prop_object_t obj;
921 const char *ifname;
922 const void *d;
923
924 /* Allocate a connection and initialise it (clear first). */
925 con = pool_cache_get(conn_cache, PR_WAITOK);
926 memset(con, 0, sizeof(npf_conn_t));
927 mutex_init(&con->c_lock, MUTEX_DEFAULT, IPL_SOFTNET);
928 npf_stats_inc(NPF_STAT_CONN_CREATE);
929
930 prop_dictionary_get_uint32(cdict, "proto", &con->c_proto);
931 prop_dictionary_get_uint32(cdict, "flags", &con->c_flags);
932 con->c_flags &= PFIL_ALL | CONN_ACTIVE | CONN_PASS;
933 getnanouptime(&con->c_atime);
934
935 if (prop_dictionary_get_cstring_nocopy(cdict, "ifname", &ifname) &&
936 (con->c_ifid = npf_ifmap_register(ifname)) == 0) {
937 goto err;
938 }
939
940 obj = prop_dictionary_get(cdict, "state");
941 if ((d = prop_data_data_nocopy(obj)) == NULL ||
942 prop_data_size(obj) != sizeof(npf_state_t)) {
943 goto err;
944 }
945 memcpy(&con->c_state, d, sizeof(npf_state_t));
946
947 /* Reconstruct NAT association, if any. */
948 if ((obj = prop_dictionary_get(cdict, "nat")) != NULL &&
949 (con->c_nat = npf_nat_import(obj, natlist, con)) == NULL) {
950 goto err;
951 }
952
953 /*
954 * Fetch and copy the keys for each direction.
955 */
956 obj = prop_dictionary_get(cdict, "forw-key");
957 if ((d = prop_data_data_nocopy(obj)) == NULL ||
958 prop_data_size(obj) != NPF_CONN_MAXKEYLEN) {
959 goto err;
960 }
961 fw = &con->c_forw_entry;
962 memcpy(&fw->ck_key, d, NPF_CONN_MAXKEYLEN);
963
964 obj = prop_dictionary_get(cdict, "back-key");
965 if ((d = prop_data_data_nocopy(obj)) == NULL ||
966 prop_data_size(obj) != NPF_CONN_MAXKEYLEN) {
967 goto err;
968 }
969 bk = &con->c_back_entry;
970 memcpy(&bk->ck_key, d, NPF_CONN_MAXKEYLEN);
971
972 fw->ck_backptr = bk->ck_backptr = con;
973
974 /* Insert the entries and the connection itself. */
975 if (!npf_conndb_insert(cd, fw, con)) {
976 goto err;
977 }
978 if (!npf_conndb_insert(cd, bk, con)) {
979 npf_conndb_remove(cd, fw);
980 goto err;
981 }
982
983 NPF_PRINTF(("NPF: imported conn %p\n", con));
984 npf_conndb_enqueue(cd, con);
985 return 0;
986 err:
987 npf_conn_destroy(con);
988 return EINVAL;
989 }
990
991 #if defined(DDB) || defined(_NPF_TESTING)
992
993 void
npf_conn_print(const npf_conn_t * con)994 npf_conn_print(const npf_conn_t *con)
995 {
996 const u_int alen = NPF_CONN_GETALEN(&con->c_forw_entry);
997 const uint32_t *fkey = con->c_forw_entry.ck_key;
998 const uint32_t *bkey = con->c_back_entry.ck_key;
999 const u_int proto = con->c_proto;
1000 struct timespec tsnow, tsdiff;
1001 const void *src, *dst;
1002 int etime;
1003
1004 getnanouptime(&tsnow);
1005 timespecsub(&tsnow, &con->c_atime, &tsdiff);
1006 etime = npf_state_etime(&con->c_state, proto);
1007
1008 printf("%p:\n\tproto %d flags 0x%x tsdiff %d etime %d\n",
1009 con, proto, con->c_flags, (int)tsdiff.tv_sec, etime);
1010
1011 src = &fkey[2], dst = &fkey[2 + (alen >> 2)];
1012 printf("\tforw %s:%d", npf_addr_dump(src, alen), ntohs(fkey[1] >> 16));
1013 printf("-> %s:%d\n", npf_addr_dump(dst, alen), ntohs(fkey[1] & 0xffff));
1014
1015 src = &bkey[2], dst = &bkey[2 + (alen >> 2)];
1016 printf("\tback %s:%d", npf_addr_dump(src, alen), ntohs(bkey[1] >> 16));
1017 printf("-> %s:%d\n", npf_addr_dump(dst, alen), ntohs(bkey[1] & 0xffff));
1018
1019 npf_state_dump(&con->c_state);
1020 if (con->c_nat) {
1021 npf_nat_dump(con->c_nat);
1022 }
1023 }
1024
1025 #endif
1026