xref: /freebsd/sys/net/pfil.c (revision 4f52dfbb)
1 /*	$FreeBSD$ */
2 /*	$NetBSD: pfil.c,v 1.20 2001/11/12 23:49:46 lukem Exp $	*/
3 
4 /*-
5  * SPDX-License-Identifier: BSD-3-Clause
6  *
7  * Copyright (c) 1996 Matthew R. Green
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. The name of the author may not be used to endorse or promote products
19  *    derived from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/errno.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/rmlock.h>
40 #include <sys/socket.h>
41 #include <sys/socketvar.h>
42 #include <sys/systm.h>
43 #include <sys/condvar.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/proc.h>
47 #include <sys/queue.h>
48 
49 #include <net/if.h>
50 #include <net/if_var.h>
51 #include <net/pfil.h>
52 
53 static struct mtx pfil_global_lock;
54 
55 MTX_SYSINIT(pfil_heads_lock, &pfil_global_lock, "pfil_head_list lock",
56   MTX_DEF);
57 
58 static struct packet_filter_hook *pfil_chain_get(int, struct pfil_head *);
59 static int pfil_chain_add(pfil_chain_t *, struct packet_filter_hook *, int);
60 static int pfil_chain_remove(pfil_chain_t *, void *, void *);
61 static int pfil_add_hook_priv(void *, void *, int, struct pfil_head *, bool);
62 
63 LIST_HEAD(pfilheadhead, pfil_head);
64 VNET_DEFINE(struct pfilheadhead, pfil_head_list);
65 #define	V_pfil_head_list	VNET(pfil_head_list)
66 VNET_DEFINE(struct rmlock, pfil_lock);
67 
68 #define	PFIL_LOCK_INIT_REAL(l, t)	\
69 	rm_init_flags(l, "PFil " t " rmlock", RM_RECURSE)
70 #define	PFIL_LOCK_DESTROY_REAL(l)	\
71 	rm_destroy(l)
72 #define	PFIL_LOCK_INIT(p)	do {			\
73 	if ((p)->flags & PFIL_FLAG_PRIVATE_LOCK) {	\
74 		PFIL_LOCK_INIT_REAL(&(p)->ph_lock, "private");	\
75 		(p)->ph_plock = &(p)->ph_lock;		\
76 	} else						\
77 		(p)->ph_plock = &V_pfil_lock;		\
78 } while (0)
79 #define	PFIL_LOCK_DESTROY(p)	do {			\
80 	if ((p)->flags & PFIL_FLAG_PRIVATE_LOCK)	\
81 		PFIL_LOCK_DESTROY_REAL((p)->ph_plock);	\
82 } while (0)
83 
84 #define	PFIL_TRY_RLOCK(p, t)	rm_try_rlock((p)->ph_plock, (t))
85 #define	PFIL_RLOCK(p, t)	rm_rlock((p)->ph_plock, (t))
86 #define	PFIL_WLOCK(p)		rm_wlock((p)->ph_plock)
87 #define	PFIL_RUNLOCK(p, t)	rm_runlock((p)->ph_plock, (t))
88 #define	PFIL_WUNLOCK(p)		rm_wunlock((p)->ph_plock)
89 #define	PFIL_WOWNED(p)		rm_wowned((p)->ph_plock)
90 
91 #define	PFIL_HEADLIST_LOCK()	mtx_lock(&pfil_global_lock)
92 #define	PFIL_HEADLIST_UNLOCK()	mtx_unlock(&pfil_global_lock)
93 
94 /*
95  * pfil_run_hooks() runs the specified packet filter hook chain.
96  */
97 int
98 pfil_run_hooks(struct pfil_head *ph, struct mbuf **mp, struct ifnet *ifp,
99     int dir, int flags, struct inpcb *inp)
100 {
101 	struct rm_priotracker rmpt;
102 	struct packet_filter_hook *pfh;
103 	struct mbuf *m = *mp;
104 	int rv = 0;
105 
106 	PFIL_RLOCK(ph, &rmpt);
107 	KASSERT(ph->ph_nhooks >= 0, ("Pfil hook count dropped < 0"));
108 	for (pfh = pfil_chain_get(dir, ph); pfh != NULL;
109 	     pfh = TAILQ_NEXT(pfh, pfil_chain)) {
110 		if (pfh->pfil_func_flags != NULL) {
111 			rv = (*pfh->pfil_func_flags)(pfh->pfil_arg, &m, ifp,
112 			    dir, flags, inp);
113 			if (rv != 0 || m == NULL)
114 				break;
115 		}
116 		if (pfh->pfil_func != NULL) {
117 			rv = (*pfh->pfil_func)(pfh->pfil_arg, &m, ifp, dir,
118 			    inp);
119 			if (rv != 0 || m == NULL)
120 				break;
121 		}
122 	}
123 	PFIL_RUNLOCK(ph, &rmpt);
124 	*mp = m;
125 	return (rv);
126 }
127 
128 static struct packet_filter_hook *
129 pfil_chain_get(int dir, struct pfil_head *ph)
130 {
131 
132 	if (dir == PFIL_IN)
133 		return (TAILQ_FIRST(&ph->ph_in));
134 	else if (dir == PFIL_OUT)
135 		return (TAILQ_FIRST(&ph->ph_out));
136 	else
137 		return (NULL);
138 }
139 
140 /*
141  * pfil_try_rlock() acquires rm reader lock for specified head
142  * if this is immediately possible.
143  */
144 int
145 pfil_try_rlock(struct pfil_head *ph, struct rm_priotracker *tracker)
146 {
147 
148 	return (PFIL_TRY_RLOCK(ph, tracker));
149 }
150 
151 /*
152  * pfil_rlock() acquires rm reader lock for specified head.
153  */
154 void
155 pfil_rlock(struct pfil_head *ph, struct rm_priotracker *tracker)
156 {
157 
158 	PFIL_RLOCK(ph, tracker);
159 }
160 
161 /*
162  * pfil_runlock() releases reader lock for specified head.
163  */
164 void
165 pfil_runlock(struct pfil_head *ph, struct rm_priotracker *tracker)
166 {
167 
168 	PFIL_RUNLOCK(ph, tracker);
169 }
170 
171 /*
172  * pfil_wlock() acquires writer lock for specified head.
173  */
174 void
175 pfil_wlock(struct pfil_head *ph)
176 {
177 
178 	PFIL_WLOCK(ph);
179 }
180 
181 /*
182  * pfil_wunlock() releases writer lock for specified head.
183  */
184 void
185 pfil_wunlock(struct pfil_head *ph)
186 {
187 
188 	PFIL_WUNLOCK(ph);
189 }
190 
191 /*
192  * pfil_wowned() returns a non-zero value if the current thread owns
193  * an exclusive lock.
194  */
195 int
196 pfil_wowned(struct pfil_head *ph)
197 {
198 
199 	return (PFIL_WOWNED(ph));
200 }
201 
202 /*
203  * pfil_head_register() registers a pfil_head with the packet filter hook
204  * mechanism.
205  */
206 int
207 pfil_head_register(struct pfil_head *ph)
208 {
209 	struct pfil_head *lph;
210 
211 	PFIL_HEADLIST_LOCK();
212 	LIST_FOREACH(lph, &V_pfil_head_list, ph_list) {
213 		if (ph->ph_type == lph->ph_type &&
214 		    ph->ph_un.phu_val == lph->ph_un.phu_val) {
215 			PFIL_HEADLIST_UNLOCK();
216 			return (EEXIST);
217 		}
218 	}
219 	PFIL_LOCK_INIT(ph);
220 	ph->ph_nhooks = 0;
221 	TAILQ_INIT(&ph->ph_in);
222 	TAILQ_INIT(&ph->ph_out);
223 	LIST_INSERT_HEAD(&V_pfil_head_list, ph, ph_list);
224 	PFIL_HEADLIST_UNLOCK();
225 	return (0);
226 }
227 
228 /*
229  * pfil_head_unregister() removes a pfil_head from the packet filter hook
230  * mechanism.  The producer of the hook promises that all outstanding
231  * invocations of the hook have completed before it unregisters the hook.
232  */
233 int
234 pfil_head_unregister(struct pfil_head *ph)
235 {
236 	struct packet_filter_hook *pfh, *pfnext;
237 
238 	PFIL_HEADLIST_LOCK();
239 	LIST_REMOVE(ph, ph_list);
240 	PFIL_HEADLIST_UNLOCK();
241 	TAILQ_FOREACH_SAFE(pfh, &ph->ph_in, pfil_chain, pfnext)
242 		free(pfh, M_IFADDR);
243 	TAILQ_FOREACH_SAFE(pfh, &ph->ph_out, pfil_chain, pfnext)
244 		free(pfh, M_IFADDR);
245 	PFIL_LOCK_DESTROY(ph);
246 	return (0);
247 }
248 
249 /*
250  * pfil_head_get() returns the pfil_head for a given key/dlt.
251  */
252 struct pfil_head *
253 pfil_head_get(int type, u_long val)
254 {
255 	struct pfil_head *ph;
256 
257 	PFIL_HEADLIST_LOCK();
258 	LIST_FOREACH(ph, &V_pfil_head_list, ph_list)
259 		if (ph->ph_type == type && ph->ph_un.phu_val == val)
260 			break;
261 	PFIL_HEADLIST_UNLOCK();
262 	return (ph);
263 }
264 
265 /*
266  * pfil_add_hook_flags() adds a function to the packet filter hook.  the
267  * flags are:
268  *	PFIL_IN		call me on incoming packets
269  *	PFIL_OUT	call me on outgoing packets
270  *	PFIL_ALL	call me on all of the above
271  *	PFIL_WAITOK	OK to call malloc with M_WAITOK.
272  */
273 int
274 pfil_add_hook_flags(pfil_func_flags_t func, void *arg, int flags,
275     struct pfil_head *ph)
276 {
277 	return (pfil_add_hook_priv(func, arg, flags, ph, true));
278 }
279 
280 /*
281  * pfil_add_hook() adds a function to the packet filter hook.  the
282  * flags are:
283  *	PFIL_IN		call me on incoming packets
284  *	PFIL_OUT	call me on outgoing packets
285  *	PFIL_ALL	call me on all of the above
286  *	PFIL_WAITOK	OK to call malloc with M_WAITOK.
287  */
288 int
289 pfil_add_hook(pfil_func_t func, void *arg, int flags, struct pfil_head *ph)
290 {
291 	return (pfil_add_hook_priv(func, arg, flags, ph, false));
292 }
293 
294 static int
295 pfil_add_hook_priv(void *func, void *arg, int flags,
296     struct pfil_head *ph, bool hasflags)
297 {
298 	struct packet_filter_hook *pfh1 = NULL;
299 	struct packet_filter_hook *pfh2 = NULL;
300 	int err;
301 
302 	if (flags & PFIL_IN) {
303 		pfh1 = (struct packet_filter_hook *)malloc(sizeof(*pfh1),
304 		    M_IFADDR, (flags & PFIL_WAITOK) ? M_WAITOK : M_NOWAIT);
305 		if (pfh1 == NULL) {
306 			err = ENOMEM;
307 			goto error;
308 		}
309 	}
310 	if (flags & PFIL_OUT) {
311 		pfh2 = (struct packet_filter_hook *)malloc(sizeof(*pfh1),
312 		    M_IFADDR, (flags & PFIL_WAITOK) ? M_WAITOK : M_NOWAIT);
313 		if (pfh2 == NULL) {
314 			err = ENOMEM;
315 			goto error;
316 		}
317 	}
318 	PFIL_WLOCK(ph);
319 	if (flags & PFIL_IN) {
320 		pfh1->pfil_func_flags = hasflags ? func : NULL;
321 		pfh1->pfil_func = hasflags ? NULL : func;
322 		pfh1->pfil_arg = arg;
323 		err = pfil_chain_add(&ph->ph_in, pfh1, flags & ~PFIL_OUT);
324 		if (err)
325 			goto locked_error;
326 		ph->ph_nhooks++;
327 	}
328 	if (flags & PFIL_OUT) {
329 		pfh2->pfil_func_flags = hasflags ? func : NULL;
330 		pfh2->pfil_func = hasflags ? NULL : func;
331 		pfh2->pfil_arg = arg;
332 		err = pfil_chain_add(&ph->ph_out, pfh2, flags & ~PFIL_IN);
333 		if (err) {
334 			if (flags & PFIL_IN)
335 				pfil_chain_remove(&ph->ph_in, func, arg);
336 			goto locked_error;
337 		}
338 		ph->ph_nhooks++;
339 	}
340 	PFIL_WUNLOCK(ph);
341 	return (0);
342 locked_error:
343 	PFIL_WUNLOCK(ph);
344 error:
345 	if (pfh1 != NULL)
346 		free(pfh1, M_IFADDR);
347 	if (pfh2 != NULL)
348 		free(pfh2, M_IFADDR);
349 	return (err);
350 }
351 
352 /*
353  * pfil_remove_hook_flags removes a specific function from the packet filter hook
354  * chain.
355  */
356 int
357 pfil_remove_hook_flags(pfil_func_flags_t func, void *arg, int flags,
358     struct pfil_head *ph)
359 {
360 	return (pfil_remove_hook((pfil_func_t)func, arg, flags, ph));
361 }
362 
363 /*
364  * pfil_remove_hook removes a specific function from the packet filter hook
365  * chain.
366  */
367 int
368 pfil_remove_hook(pfil_func_t func, void *arg, int flags, struct pfil_head *ph)
369 {
370 	int err = 0;
371 
372 	PFIL_WLOCK(ph);
373 	if (flags & PFIL_IN) {
374 		err = pfil_chain_remove(&ph->ph_in, func, arg);
375 		if (err == 0)
376 			ph->ph_nhooks--;
377 	}
378 	if ((err == 0) && (flags & PFIL_OUT)) {
379 		err = pfil_chain_remove(&ph->ph_out, func, arg);
380 		if (err == 0)
381 			ph->ph_nhooks--;
382 	}
383 	PFIL_WUNLOCK(ph);
384 	return (err);
385 }
386 
387 /*
388  * Internal: Add a new pfil hook into a hook chain.
389  */
390 static int
391 pfil_chain_add(pfil_chain_t *chain, struct packet_filter_hook *pfh1, int flags)
392 {
393 	struct packet_filter_hook *pfh;
394 
395 	/*
396 	 * First make sure the hook is not already there.
397 	 */
398 	TAILQ_FOREACH(pfh, chain, pfil_chain)
399 		if (((pfh->pfil_func != NULL && pfh->pfil_func == pfh1->pfil_func) ||
400 		    (pfh->pfil_func_flags != NULL &&
401 		     pfh->pfil_func_flags == pfh1->pfil_func_flags)) &&
402 		    pfh->pfil_arg == pfh1->pfil_arg)
403 			return (EEXIST);
404 
405 	/*
406 	 * Insert the input list in reverse order of the output list so that
407 	 * the same path is followed in or out of the kernel.
408 	 */
409 	if (flags & PFIL_IN)
410 		TAILQ_INSERT_HEAD(chain, pfh1, pfil_chain);
411 	else
412 		TAILQ_INSERT_TAIL(chain, pfh1, pfil_chain);
413 	return (0);
414 }
415 
416 /*
417  * Internal: Remove a pfil hook from a hook chain.
418  */
419 static int
420 pfil_chain_remove(pfil_chain_t *chain, void *func, void *arg)
421 {
422 	struct packet_filter_hook *pfh;
423 
424 	TAILQ_FOREACH(pfh, chain, pfil_chain)
425 		if ((pfh->pfil_func == func || pfh->pfil_func_flags == func) &&
426 		    pfh->pfil_arg == arg) {
427 			TAILQ_REMOVE(chain, pfh, pfil_chain);
428 			free(pfh, M_IFADDR);
429 			return (0);
430 		}
431 	return (ENOENT);
432 }
433 
434 /*
435  * Stuff that must be initialized for every instance (including the first of
436  * course).
437  */
438 static void
439 vnet_pfil_init(const void *unused __unused)
440 {
441 
442 	LIST_INIT(&V_pfil_head_list);
443 	PFIL_LOCK_INIT_REAL(&V_pfil_lock, "shared");
444 }
445 
446 /*
447  * Called for the removal of each instance.
448  */
449 static void
450 vnet_pfil_uninit(const void *unused __unused)
451 {
452 
453 	KASSERT(LIST_EMPTY(&V_pfil_head_list),
454 	    ("%s: pfil_head_list %p not empty", __func__, &V_pfil_head_list));
455 	PFIL_LOCK_DESTROY_REAL(&V_pfil_lock);
456 }
457 
458 /*
459  * Starting up.
460  *
461  * VNET_SYSINIT is called for each existing vnet and each new vnet.
462  * Make sure the pfil bits are first before any possible subsystem which
463  * might piggyback on the SI_SUB_PROTO_PFIL.
464  */
465 VNET_SYSINIT(vnet_pfil_init, SI_SUB_PROTO_PFIL, SI_ORDER_FIRST,
466     vnet_pfil_init, NULL);
467 
468 /*
469  * Closing up shop.  These are done in REVERSE ORDER.  Not called on reboot.
470  *
471  * VNET_SYSUNINIT is called for each exiting vnet as it exits.
472  */
473 VNET_SYSUNINIT(vnet_pfil_uninit, SI_SUB_PROTO_PFIL, SI_ORDER_FIRST,
474     vnet_pfil_uninit, NULL);
475