xref: /netbsd/sys/net/npf/npf.c (revision 6550d01e)
1 /*	$NetBSD: npf.c,v 1.4 2011/02/02 02:20:25 rmind Exp $	*/
2 
3 /*-
4  * Copyright (c) 2009-2010 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This material is based upon work partially supported by The
8  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * NPF main: dynamic load/initialisation and unload routines.
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: npf.c,v 1.4 2011/02/02 02:20:25 rmind Exp $");
38 
39 #include <sys/param.h>
40 #include <sys/types.h>
41 
42 #include <sys/atomic.h>
43 #include <sys/conf.h>
44 #include <sys/kauth.h>
45 #include <sys/kmem.h>
46 #include <sys/lwp.h>
47 #include <sys/module.h>
48 #include <sys/percpu.h>
49 #include <sys/rwlock.h>
50 #include <sys/socketvar.h>
51 #include <sys/uio.h>
52 
53 #include "npf_impl.h"
54 
55 /*
56  * Module and device structures.
57  */
58 MODULE(MODULE_CLASS_MISC, npf, NULL);
59 
60 void		npfattach(int);
61 
62 static int	npf_dev_open(dev_t, int, int, lwp_t *);
63 static int	npf_dev_close(dev_t, int, int, lwp_t *);
64 static int	npf_dev_ioctl(dev_t, u_long, void *, int, lwp_t *);
65 static int	npf_dev_poll(dev_t, int, lwp_t *);
66 static int	npf_dev_read(dev_t, struct uio *, int);
67 
68 typedef struct {
69 	npf_ruleset_t *		n_rules;
70 	npf_tableset_t *	n_tables;
71 	npf_ruleset_t *		n_nat_rules;
72 } npf_core_t;
73 
74 static void	npf_core_destroy(npf_core_t *);
75 static int	npfctl_stats(void *);
76 
77 static krwlock_t		npf_lock		__cacheline_aligned;
78 static npf_core_t *		npf_core		__cacheline_aligned;
79 static percpu_t *		npf_stats_percpu	__read_mostly;
80 
81 const struct cdevsw npf_cdevsw = {
82 	npf_dev_open, npf_dev_close, npf_dev_read, nowrite, npf_dev_ioctl,
83 	nostop, notty, npf_dev_poll, nommap, nokqfilter, D_OTHER | D_MPSAFE
84 };
85 
86 static int
87 npf_init(void)
88 {
89 #ifdef _MODULE
90 	devmajor_t bmajor = NODEVMAJOR, cmajor = NODEVMAJOR;
91 #endif
92 	npf_ruleset_t *rset, *nset;
93 	npf_tableset_t *tset;
94 	int error = 0;
95 
96 	rw_init(&npf_lock);
97 	npf_stats_percpu = percpu_alloc(NPF_STATS_SIZE);
98 	npf_tableset_sysinit();
99 	npf_session_sysinit();
100 	npf_nat_sysinit();
101 	npf_alg_sysinit();
102 	npflogattach(1);
103 
104 	/* Load empty configuration. */
105 	rset = npf_ruleset_create();
106 	tset = npf_tableset_create();
107 	nset = npf_ruleset_create();
108 	npf_reload(rset, tset, nset);
109 	KASSERT(npf_core != NULL);
110 
111 #ifdef _MODULE
112 	/* Attach /dev/npf device. */
113 	error = devsw_attach("npf", NULL, &bmajor, &npf_cdevsw, &cmajor);
114 	if (error) {
115 		/* It will call devsw_detach(), which is safe. */
116 		(void)npf_fini();
117 	}
118 #endif
119 	return error;
120 }
121 
122 static int
123 npf_fini(void)
124 {
125 
126 	/*
127 	 * At first, detach device, remove pfil hooks and unload existing
128 	 * configuration, destroy structures.
129 	 */
130 #ifdef _MODULE
131 	devsw_detach(NULL, &npf_cdevsw);
132 #endif
133 	npf_unregister_pfil();
134 	npf_core_destroy(npf_core);
135 	npflogdetach();
136 
137 	/* Note: order is particular. */
138 	npf_nat_sysfini();
139 	npf_alg_sysfini();
140 	npf_session_sysfini();
141 	npf_tableset_sysfini();
142 	percpu_free(npf_stats_percpu, NPF_STATS_SIZE);
143 	rw_destroy(&npf_lock);
144 
145 	return 0;
146 }
147 
148 /*
149  * Module interface.
150  */
151 static int
152 npf_modcmd(modcmd_t cmd, void *arg)
153 {
154 
155 	switch (cmd) {
156 	case MODULE_CMD_INIT:
157 		return npf_init();
158 	case MODULE_CMD_FINI:
159 		return npf_fini();
160 	default:
161 		return ENOTTY;
162 	}
163 	return 0;
164 }
165 
166 void
167 npfattach(int nunits)
168 {
169 
170 	/* Void. */
171 }
172 
173 static int
174 npf_dev_open(dev_t dev, int flag, int mode, lwp_t *l)
175 {
176 
177 	/* Available only for super-user. */
178 	if (kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER, NULL)) {
179 		return EPERM;
180 	}
181 	return 0;
182 }
183 
184 static int
185 npf_dev_close(dev_t dev, int flag, int mode, lwp_t *l)
186 {
187 
188 	return 0;
189 }
190 
191 static int
192 npf_dev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
193 {
194 	int error;
195 
196 	/* Available only for super-user. */
197 	if (kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER, NULL)) {
198 		return EPERM;
199 	}
200 
201 	switch (cmd) {
202 	case IOC_NPF_VERSION:
203 		*(int *)data = NPF_VERSION;
204 		error = 0;
205 		break;
206 	case IOC_NPF_SWITCH:
207 		error = npfctl_switch(data);
208 		break;
209 	case IOC_NPF_RELOAD:
210 		error = npfctl_reload(cmd, data);
211 		break;
212 	case IOC_NPF_TABLE:
213 		error = npfctl_table(data);
214 		break;
215 	case IOC_NPF_STATS:
216 		error = npfctl_stats(data);
217 		break;
218 	case IOC_NPF_SESSIONS_SAVE:
219 		error = npfctl_sessions_save(cmd, data);
220 		break;
221 	case IOC_NPF_SESSIONS_LOAD:
222 		error = npfctl_sessions_load(cmd, data);
223 		break;
224 	case IOC_NPF_UPDATE_RULE:
225 		error = npfctl_update_rule(cmd, data);
226 		break;
227 	default:
228 		error = ENOTTY;
229 		break;
230 	}
231 	return error;
232 }
233 
234 static int
235 npf_dev_poll(dev_t dev, int events, lwp_t *l)
236 {
237 
238 	return ENOTSUP;
239 }
240 
241 static int
242 npf_dev_read(dev_t dev, struct uio *uio, int flag)
243 {
244 
245 	return ENOTSUP;
246 }
247 
248 /*
249  * NPF core loading/reloading/unloading mechanism.
250  */
251 
252 static void
253 npf_core_destroy(npf_core_t *nc)
254 {
255 
256 	npf_ruleset_destroy(nc->n_rules);
257 	npf_ruleset_destroy(nc->n_nat_rules);
258 	npf_tableset_destroy(nc->n_tables);
259 	kmem_free(nc, sizeof(npf_core_t));
260 }
261 
262 /*
263  * npf_reload: atomically load new ruleset, tableset and NAT policies.
264  * Then destroy old (unloaded) structures.
265  */
266 void
267 npf_reload(npf_ruleset_t *rset, npf_tableset_t *tset, npf_ruleset_t *nset)
268 {
269 	npf_core_t *nc, *onc;
270 
271 	/* Setup a new core structure. */
272 	nc = kmem_alloc(sizeof(npf_core_t), KM_SLEEP);
273 	nc->n_rules = rset;
274 	nc->n_tables = tset;
275 	nc->n_nat_rules = nset;
276 
277 	/* Lock and load the core structure. */
278 	rw_enter(&npf_lock, RW_WRITER);
279 	onc = atomic_swap_ptr(&npf_core, nc);
280 	if (onc) {
281 		/* Reload only necessary NAT policies. */
282 		npf_ruleset_natreload(nset, onc->n_nat_rules);
283 	}
284 	/* Unlock.  Everything goes "live" now. */
285 	rw_exit(&npf_lock);
286 
287 	/* Turn on/off session tracking accordingly. */
288 	npf_session_tracking(true);
289 
290 	if (onc) {
291 		/* Destroy unloaded structures. */
292 		npf_core_destroy(onc);
293 	}
294 }
295 
296 void
297 npf_core_enter(void)
298 {
299 	rw_enter(&npf_lock, RW_READER);
300 }
301 
302 npf_ruleset_t *
303 npf_core_ruleset(void)
304 {
305 	KASSERT(rw_lock_held(&npf_lock));
306 	return npf_core->n_rules;
307 }
308 
309 npf_ruleset_t *
310 npf_core_natset(void)
311 {
312 	KASSERT(rw_lock_held(&npf_lock));
313 	return npf_core->n_nat_rules;
314 }
315 
316 npf_tableset_t *
317 npf_core_tableset(void)
318 {
319 	KASSERT(rw_lock_held(&npf_lock));
320 	return npf_core->n_tables;
321 }
322 
323 void
324 npf_core_exit(void)
325 {
326 	rw_exit(&npf_lock);
327 }
328 
329 bool
330 npf_core_locked(void)
331 {
332 	return rw_lock_held(&npf_lock);
333 }
334 
335 /*
336  * NPF statistics interface.
337  */
338 
339 void
340 npf_stats_inc(npf_stats_t st)
341 {
342 	uint64_t *stats = percpu_getref(npf_stats_percpu);
343 	stats[st]++;
344 	percpu_putref(npf_stats_percpu);
345 }
346 
347 void
348 npf_stats_dec(npf_stats_t st)
349 {
350 	uint64_t *stats = percpu_getref(npf_stats_percpu);
351 	stats[st]--;
352 	percpu_putref(npf_stats_percpu);
353 }
354 
355 static void
356 npf_stats_collect(void *mem, void *arg, struct cpu_info *ci)
357 {
358 	uint64_t *percpu_stats = mem, *full_stats = arg;
359 	int i;
360 
361 	for (i = 0; i < NPF_STATS_COUNT; i++) {
362 		full_stats[i] += percpu_stats[i];
363 	}
364 }
365 
366 /*
367  * npfctl_stats: export collected statistics.
368  */
369 static int
370 npfctl_stats(void *data)
371 {
372 	uint64_t *fullst, *uptr = *(uint64_t **)data;
373 	int error;
374 
375 	fullst = kmem_zalloc(NPF_STATS_SIZE, KM_SLEEP);
376 	percpu_foreach(npf_stats_percpu, npf_stats_collect, fullst);
377 	error = copyout(fullst, uptr, NPF_STATS_SIZE);
378 	kmem_free(fullst, NPF_STATS_SIZE);
379 	return error;
380 }
381