xref: /netbsd/sys/net/bpf_filter.c (revision 5a5b1422)
1 /*	$NetBSD: bpf_filter.c,v 1.71 2016/06/07 01:06:28 pgoyette Exp $	*/
2 
3 /*-
4  * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from the Stanford/CMU enet packet filter,
8  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10  * Berkeley Laboratory.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)bpf_filter.c	8.1 (Berkeley) 6/10/93
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: bpf_filter.c,v 1.71 2016/06/07 01:06:28 pgoyette Exp $");
41 
42 #if 0
43 #if !(defined(lint) || defined(KERNEL))
44 static const char rcsid[] =
45     "@(#) Header: bpf_filter.c,v 1.33 97/04/26 13:37:18 leres Exp  (LBL)";
46 #endif
47 #endif
48 
49 #include <sys/param.h>
50 #include <sys/time.h>
51 #include <sys/kmem.h>
52 #include <sys/endian.h>
53 
54 #ifdef _KERNEL
55 #include <sys/module.h>
56 #endif
57 
58 #define	__BPF_PRIVATE
59 #include <net/bpf.h>
60 
61 #ifdef _KERNEL
62 
63 bpf_ctx_t *
bpf_create(void)64 bpf_create(void)
65 {
66 	return kmem_zalloc(sizeof(bpf_ctx_t), KM_SLEEP);
67 }
68 
69 void
bpf_destroy(bpf_ctx_t * bc)70 bpf_destroy(bpf_ctx_t *bc)
71 {
72 	kmem_free(bc, sizeof(bpf_ctx_t));
73 }
74 
75 int
bpf_set_cop(bpf_ctx_t * bc,const bpf_copfunc_t * funcs,size_t n)76 bpf_set_cop(bpf_ctx_t *bc, const bpf_copfunc_t *funcs, size_t n)
77 {
78 	bc->copfuncs = funcs;
79 	bc->nfuncs = n;
80 	return 0;
81 }
82 
83 int
bpf_set_extmem(bpf_ctx_t * bc,size_t nwords,bpf_memword_init_t preinited)84 bpf_set_extmem(bpf_ctx_t *bc, size_t nwords, bpf_memword_init_t preinited)
85 {
86 	if (nwords > BPF_MAX_MEMWORDS || (preinited >> nwords) != 0) {
87 		return EINVAL;
88 	}
89 	bc->extwords = nwords;
90 	bc->preinited = preinited;
91 	return 0;
92 }
93 
94 #endif
95 
96 #define EXTRACT_SHORT(p)	be16dec(p)
97 #define EXTRACT_LONG(p)		be32dec(p)
98 
99 #ifdef _KERNEL
100 #include <sys/mbuf.h>
101 #define MINDEX(len, m, k) 		\
102 {					\
103 	len = m->m_len; 		\
104 	while (k >= len) { 		\
105 		k -= len; 		\
106 		m = m->m_next; 		\
107 		if (m == 0) 		\
108 			return 0; 	\
109 		len = m->m_len; 	\
110 	}				\
111 }
112 
113 uint32_t m_xword(const struct mbuf *, uint32_t, int *);
114 uint32_t m_xhalf(const struct mbuf *, uint32_t, int *);
115 uint32_t m_xbyte(const struct mbuf *, uint32_t, int *);
116 
117 #define xword(p, k, err) m_xword((const struct mbuf *)(p), (k), (err))
118 #define xhalf(p, k, err) m_xhalf((const struct mbuf *)(p), (k), (err))
119 #define xbyte(p, k, err) m_xbyte((const struct mbuf *)(p), (k), (err))
120 
121 uint32_t
m_xword(const struct mbuf * m,uint32_t k,int * err)122 m_xword(const struct mbuf *m, uint32_t k, int *err)
123 {
124 	int len;
125 	u_char *cp, *np;
126 	struct mbuf *m0;
127 
128 	*err = 1;
129 	MINDEX(len, m, k);
130 	cp = mtod(m, u_char *) + k;
131 	if (len - k >= 4) {
132 		*err = 0;
133 		return EXTRACT_LONG(cp);
134 	}
135 	m0 = m->m_next;
136 	if (m0 == 0 || (len - k) + m0->m_len < 4)
137 		return 0;
138 	*err = 0;
139 	np = mtod(m0, u_char *);
140 
141 	switch (len - k) {
142 	case 1:
143 		return (cp[0] << 24) | (np[0] << 16) | (np[1] << 8) | np[2];
144 	case 2:
145 		return (cp[0] << 24) | (cp[1] << 16) | (np[0] << 8) | np[1];
146 	default:
147 		return (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | np[0];
148 	}
149 }
150 
151 uint32_t
m_xhalf(const struct mbuf * m,uint32_t k,int * err)152 m_xhalf(const struct mbuf *m, uint32_t k, int *err)
153 {
154 	int len;
155 	u_char *cp;
156 	struct mbuf *m0;
157 
158 	*err = 1;
159 	MINDEX(len, m, k);
160 	cp = mtod(m, u_char *) + k;
161 	if (len - k >= 2) {
162 		*err = 0;
163 		return EXTRACT_SHORT(cp);
164 	}
165 	m0 = m->m_next;
166 	if (m0 == 0)
167 		return 0;
168 	*err = 0;
169 	return (cp[0] << 8) | mtod(m0, u_char *)[0];
170 }
171 
172 uint32_t
m_xbyte(const struct mbuf * m,uint32_t k,int * err)173 m_xbyte(const struct mbuf *m, uint32_t k, int *err)
174 {
175 	int len;
176 
177 	*err = 1;
178 	MINDEX(len, m, k);
179 	*err = 0;
180 	return mtod(m, u_char *)[k];
181 }
182 #else /* _KERNEL */
183 #include <stdlib.h>
184 #endif /* !_KERNEL */
185 
186 #include <net/bpf.h>
187 
188 /*
189  * Execute the filter program starting at pc on the packet p
190  * wirelen is the length of the original packet
191  * buflen is the amount of data present
192  */
193 #ifdef _KERNEL
194 
195 u_int
bpf_filter(const struct bpf_insn * pc,const u_char * p,u_int wirelen,u_int buflen)196 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
197     u_int buflen)
198 {
199 	uint32_t mem[BPF_MEMWORDS];
200 	bpf_args_t args = {
201 		.pkt = p,
202 		.wirelen = wirelen,
203 		.buflen = buflen,
204 		.mem = mem,
205 		.arg = NULL
206 	};
207 
208 	return bpf_filter_ext(NULL, pc, &args);
209 }
210 
211 u_int
bpf_filter_ext(const bpf_ctx_t * bc,const struct bpf_insn * pc,bpf_args_t * args)212 bpf_filter_ext(const bpf_ctx_t *bc, const struct bpf_insn *pc, bpf_args_t *args)
213 #else
214 u_int
215 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
216     u_int buflen)
217 #endif
218 {
219 	uint32_t A, X, k;
220 #ifndef _KERNEL
221 	uint32_t mem[BPF_MEMWORDS];
222 	bpf_args_t args_store = {
223 		.pkt = p,
224 		.wirelen = wirelen,
225 		.buflen = buflen,
226 		.mem = mem,
227 		.arg = NULL
228 	};
229 	bpf_args_t * const args = &args_store;
230 #else
231 	const uint8_t * const p = args->pkt;
232 #endif
233 	if (pc == 0) {
234 		/*
235 		 * No filter means accept all.
236 		 */
237 		return (u_int)-1;
238 	}
239 
240 	/*
241 	 * Note: safe to leave memwords uninitialised, as the validation
242 	 * step ensures that it will not be read, if it was not written.
243 	 */
244 	A = 0;
245 	X = 0;
246 	--pc;
247 
248 	for (;;) {
249 		++pc;
250 		switch (pc->code) {
251 
252 		default:
253 #ifdef _KERNEL
254 			return 0;
255 #else
256 			abort();
257 			/*NOTREACHED*/
258 #endif
259 		case BPF_RET|BPF_K:
260 			return (u_int)pc->k;
261 
262 		case BPF_RET|BPF_A:
263 			return (u_int)A;
264 
265 		case BPF_LD|BPF_W|BPF_ABS:
266 			k = pc->k;
267 			if (k > args->buflen ||
268 			    sizeof(int32_t) > args->buflen - k) {
269 #ifdef _KERNEL
270 				int merr;
271 
272 				if (args->buflen != 0)
273 					return 0;
274 				A = xword(args->pkt, k, &merr);
275 				if (merr != 0)
276 					return 0;
277 				continue;
278 #else
279 				return 0;
280 #endif
281 			}
282 			A = EXTRACT_LONG(&p[k]);
283 			continue;
284 
285 		case BPF_LD|BPF_H|BPF_ABS:
286 			k = pc->k;
287 			if (k > args->buflen ||
288 			    sizeof(int16_t) > args->buflen - k) {
289 #ifdef _KERNEL
290 				int merr;
291 
292 				if (args->buflen != 0)
293 					return 0;
294 				A = xhalf(args->pkt, k, &merr);
295 				if (merr != 0)
296 					return 0;
297 				continue;
298 #else
299 				return 0;
300 #endif
301 			}
302 			A = EXTRACT_SHORT(&p[k]);
303 			continue;
304 
305 		case BPF_LD|BPF_B|BPF_ABS:
306 			k = pc->k;
307 			if (k >= args->buflen) {
308 #ifdef _KERNEL
309 				int merr;
310 
311 				if (args->buflen != 0)
312 					return 0;
313 				A = xbyte(args->pkt, k, &merr);
314 				if (merr != 0)
315 					return 0;
316 				continue;
317 #else
318 				return 0;
319 #endif
320 			}
321 			A = p[k];
322 			continue;
323 
324 		case BPF_LD|BPF_W|BPF_LEN:
325 			A = args->wirelen;
326 			continue;
327 
328 		case BPF_LDX|BPF_W|BPF_LEN:
329 			X = args->wirelen;
330 			continue;
331 
332 		case BPF_LD|BPF_W|BPF_IND:
333 			k = X + pc->k;
334 			if (k < X || k >= args->buflen ||
335 			    sizeof(int32_t) > args->buflen - k) {
336 #ifdef _KERNEL
337 				int merr;
338 
339 				if (k < X || args->buflen != 0)
340 					return 0;
341 				A = xword(args->pkt, k, &merr);
342 				if (merr != 0)
343 					return 0;
344 				continue;
345 #else
346 				return 0;
347 #endif
348 			}
349 			A = EXTRACT_LONG(&p[k]);
350 			continue;
351 
352 		case BPF_LD|BPF_H|BPF_IND:
353 			k = X + pc->k;
354 			if (k < X || k >= args->buflen ||
355 			    sizeof(int16_t) > args->buflen - k) {
356 #ifdef _KERNEL
357 				int merr;
358 
359 				if (k < X || args->buflen != 0)
360 					return 0;
361 				A = xhalf(args->pkt, k, &merr);
362 				if (merr != 0)
363 					return 0;
364 				continue;
365 #else
366 				return 0;
367 #endif
368 			}
369 			A = EXTRACT_SHORT(&p[k]);
370 			continue;
371 
372 		case BPF_LD|BPF_B|BPF_IND:
373 			k = X + pc->k;
374 			if (k < X || k >= args->buflen) {
375 #ifdef _KERNEL
376 				int merr;
377 
378 				if (k < X || args->buflen != 0)
379 					return 0;
380 				A = xbyte(args->pkt, k, &merr);
381 				if (merr != 0)
382 					return 0;
383 				continue;
384 #else
385 				return 0;
386 #endif
387 			}
388 			A = p[k];
389 			continue;
390 
391 		case BPF_LDX|BPF_MSH|BPF_B:
392 			k = pc->k;
393 			if (k >= args->buflen) {
394 #ifdef _KERNEL
395 				int merr;
396 
397 				if (args->buflen != 0)
398 					return 0;
399 				X = (xbyte(args->pkt, k, &merr) & 0xf) << 2;
400 				if (merr != 0)
401 					return 0;
402 				continue;
403 #else
404 				return 0;
405 #endif
406 			}
407 			X = (p[pc->k] & 0xf) << 2;
408 			continue;
409 
410 		case BPF_LD|BPF_IMM:
411 			A = pc->k;
412 			continue;
413 
414 		case BPF_LDX|BPF_IMM:
415 			X = pc->k;
416 			continue;
417 
418 		case BPF_LD|BPF_MEM:
419 			A = args->mem[pc->k];
420 			continue;
421 
422 		case BPF_LDX|BPF_MEM:
423 			X = args->mem[pc->k];
424 			continue;
425 
426 		case BPF_ST:
427 			args->mem[pc->k] = A;
428 			continue;
429 
430 		case BPF_STX:
431 			args->mem[pc->k] = X;
432 			continue;
433 
434 		case BPF_JMP|BPF_JA:
435 			pc += pc->k;
436 			continue;
437 
438 		case BPF_JMP|BPF_JGT|BPF_K:
439 			pc += (A > pc->k) ? pc->jt : pc->jf;
440 			continue;
441 
442 		case BPF_JMP|BPF_JGE|BPF_K:
443 			pc += (A >= pc->k) ? pc->jt : pc->jf;
444 			continue;
445 
446 		case BPF_JMP|BPF_JEQ|BPF_K:
447 			pc += (A == pc->k) ? pc->jt : pc->jf;
448 			continue;
449 
450 		case BPF_JMP|BPF_JSET|BPF_K:
451 			pc += (A & pc->k) ? pc->jt : pc->jf;
452 			continue;
453 
454 		case BPF_JMP|BPF_JGT|BPF_X:
455 			pc += (A > X) ? pc->jt : pc->jf;
456 			continue;
457 
458 		case BPF_JMP|BPF_JGE|BPF_X:
459 			pc += (A >= X) ? pc->jt : pc->jf;
460 			continue;
461 
462 		case BPF_JMP|BPF_JEQ|BPF_X:
463 			pc += (A == X) ? pc->jt : pc->jf;
464 			continue;
465 
466 		case BPF_JMP|BPF_JSET|BPF_X:
467 			pc += (A & X) ? pc->jt : pc->jf;
468 			continue;
469 
470 		case BPF_ALU|BPF_ADD|BPF_X:
471 			A += X;
472 			continue;
473 
474 		case BPF_ALU|BPF_SUB|BPF_X:
475 			A -= X;
476 			continue;
477 
478 		case BPF_ALU|BPF_MUL|BPF_X:
479 			A *= X;
480 			continue;
481 
482 		case BPF_ALU|BPF_DIV|BPF_X:
483 			if (X == 0)
484 				return 0;
485 			A /= X;
486 			continue;
487 
488 		case BPF_ALU|BPF_MOD|BPF_X:
489 			if (X == 0)
490 				return 0;
491 			A %= X;
492 			continue;
493 
494 		case BPF_ALU|BPF_AND|BPF_X:
495 			A &= X;
496 			continue;
497 
498 		case BPF_ALU|BPF_OR|BPF_X:
499 			A |= X;
500 			continue;
501 
502 		case BPF_ALU|BPF_XOR|BPF_X:
503 			A ^= X;
504 			continue;
505 
506 		case BPF_ALU|BPF_LSH|BPF_X:
507 			A <<= X;
508 			continue;
509 
510 		case BPF_ALU|BPF_RSH|BPF_X:
511 			A >>= X;
512 			continue;
513 
514 		case BPF_ALU|BPF_ADD|BPF_K:
515 			A += pc->k;
516 			continue;
517 
518 		case BPF_ALU|BPF_SUB|BPF_K:
519 			A -= pc->k;
520 			continue;
521 
522 		case BPF_ALU|BPF_MUL|BPF_K:
523 			A *= pc->k;
524 			continue;
525 
526 		case BPF_ALU|BPF_DIV|BPF_K:
527 			A /= pc->k;
528 			continue;
529 
530 		case BPF_ALU|BPF_MOD|BPF_K:
531 			A %= pc->k;
532 			continue;
533 
534 		case BPF_ALU|BPF_AND|BPF_K:
535 			A &= pc->k;
536 			continue;
537 
538 		case BPF_ALU|BPF_OR|BPF_K:
539 			A |= pc->k;
540 			continue;
541 
542 		case BPF_ALU|BPF_XOR|BPF_K:
543 			A ^= pc->k;
544 			continue;
545 
546 		case BPF_ALU|BPF_LSH|BPF_K:
547 			A <<= pc->k;
548 			continue;
549 
550 		case BPF_ALU|BPF_RSH|BPF_K:
551 			A >>= pc->k;
552 			continue;
553 
554 		case BPF_ALU|BPF_NEG:
555 			A = -A;
556 			continue;
557 
558 		case BPF_MISC|BPF_TAX:
559 			X = A;
560 			continue;
561 
562 		case BPF_MISC|BPF_TXA:
563 			A = X;
564 			continue;
565 
566 		case BPF_MISC|BPF_COP:
567 #ifdef _KERNEL
568 			if (pc->k < bc->nfuncs) {
569 				const bpf_copfunc_t fn = bc->copfuncs[pc->k];
570 				A = fn(bc, args, A);
571 				continue;
572 			}
573 #endif
574 			return 0;
575 
576 		case BPF_MISC|BPF_COPX:
577 #ifdef _KERNEL
578 			if (X < bc->nfuncs) {
579 				const bpf_copfunc_t fn = bc->copfuncs[X];
580 				A = fn(bc, args, A);
581 				continue;
582 			}
583 #endif
584 			return 0;
585 		}
586 	}
587 }
588 
589 /*
590  * Return true if the 'fcode' is a valid filter program.
591  * The constraints are that each jump be forward and to a valid
592  * code, that memory accesses are within valid ranges (to the
593  * extent that this can be checked statically; loads of packet
594  * data have to be, and are, also checked at run time), and that
595  * the code terminates with either an accept or reject.
596  *
597  * The kernel needs to be able to verify an application's filter code.
598  * Otherwise, a bogus program could easily crash the system.
599  */
600 
601 #if defined(KERNEL) || defined(_KERNEL)
602 
603 int
bpf_validate(const struct bpf_insn * f,int signed_len)604 bpf_validate(const struct bpf_insn *f, int signed_len)
605 {
606 	return bpf_validate_ext(NULL, f, signed_len);
607 }
608 
609 int
bpf_validate_ext(const bpf_ctx_t * bc,const struct bpf_insn * f,int signed_len)610 bpf_validate_ext(const bpf_ctx_t *bc, const struct bpf_insn *f, int signed_len)
611 #else
612 int
613 bpf_validate(const struct bpf_insn *f, int signed_len)
614 #endif
615 {
616 	u_int i, from, len, ok = 0;
617 	const struct bpf_insn *p;
618 #if defined(KERNEL) || defined(_KERNEL)
619 	bpf_memword_init_t *mem, invalid;
620 	size_t size;
621 	const size_t extwords = bc ? bc->extwords : 0;
622 	const size_t memwords = extwords ? extwords : BPF_MEMWORDS;
623 	const bpf_memword_init_t preinited = extwords ? bc->preinited : 0;
624 #else
625 	const size_t memwords = BPF_MEMWORDS;
626 #endif
627 
628 	len = (u_int)signed_len;
629 	if (len < 1)
630 		return 0;
631 #if defined(KERNEL) || defined(_KERNEL)
632 	if (len > BPF_MAXINSNS)
633 		return 0;
634 #endif
635 	if (f[len - 1].code != (BPF_RET|BPF_K) &&
636 	    f[len - 1].code != (BPF_RET|BPF_A)) {
637 		return 0;
638 	}
639 
640 #if defined(KERNEL) || defined(_KERNEL)
641 	/* Note: only the pre-initialised is valid on startup */
642 	mem = kmem_zalloc(size = sizeof(*mem) * len, KM_SLEEP);
643 	invalid = ~preinited;
644 #endif
645 
646 	for (i = 0; i < len; ++i) {
647 #if defined(KERNEL) || defined(_KERNEL)
648 		/* blend in any invalid bits for current pc */
649 		invalid |= mem[i];
650 #endif
651 		p = &f[i];
652 		switch (BPF_CLASS(p->code)) {
653 		/*
654 		 * Check that memory operations use valid addresses.
655 		 */
656 		case BPF_LD:
657 		case BPF_LDX:
658 			switch (BPF_MODE(p->code)) {
659 			case BPF_MEM:
660 				/*
661 				 * There's no maximum packet data size
662 				 * in userland.  The runtime packet length
663 				 * check suffices.
664 				 */
665 #if defined(KERNEL) || defined(_KERNEL)
666 				/*
667 				 * More strict check with actual packet length
668 				 * is done runtime.
669 				 */
670 				if (p->k >= memwords)
671 					goto out;
672 				/* check for current memory invalid */
673 				if (invalid & BPF_MEMWORD_INIT(p->k))
674 					goto out;
675 #endif
676 				break;
677 			case BPF_ABS:
678 			case BPF_IND:
679 			case BPF_MSH:
680 			case BPF_IMM:
681 			case BPF_LEN:
682 				break;
683 			default:
684 				goto out;
685 			}
686 			break;
687 		case BPF_ST:
688 		case BPF_STX:
689 			if (p->k >= memwords)
690 				goto out;
691 #if defined(KERNEL) || defined(_KERNEL)
692 			/* validate the memory word */
693 			invalid &= ~BPF_MEMWORD_INIT(p->k);
694 #endif
695 			break;
696 		case BPF_ALU:
697 			switch (BPF_OP(p->code)) {
698 			case BPF_ADD:
699 			case BPF_SUB:
700 			case BPF_MUL:
701 			case BPF_OR:
702 			case BPF_XOR:
703 			case BPF_AND:
704 			case BPF_LSH:
705 			case BPF_RSH:
706 			case BPF_NEG:
707 				break;
708 			case BPF_DIV:
709 			case BPF_MOD:
710 				/*
711 				 * Check for constant division by 0.
712 				 */
713 				if (BPF_SRC(p->code) == BPF_K && p->k == 0)
714 					goto out;
715 				break;
716 			default:
717 				goto out;
718 			}
719 			break;
720 		case BPF_JMP:
721 			/*
722 			 * Check that jumps are within the code block,
723 			 * and that unconditional branches don't go
724 			 * backwards as a result of an overflow.
725 			 * Unconditional branches have a 32-bit offset,
726 			 * so they could overflow; we check to make
727 			 * sure they don't.  Conditional branches have
728 			 * an 8-bit offset, and the from address is <=
729 			 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
730 			 * is sufficiently small that adding 255 to it
731 			 * won't overflow.
732 			 *
733 			 * We know that len is <= BPF_MAXINSNS, and we
734 			 * assume that BPF_MAXINSNS is < the maximum size
735 			 * of a u_int, so that i + 1 doesn't overflow.
736 			 *
737 			 * For userland, we don't know that the from
738 			 * or len are <= BPF_MAXINSNS, but we know that
739 			 * from <= len, and, except on a 64-bit system,
740 			 * it's unlikely that len, if it truly reflects
741 			 * the size of the program we've been handed,
742 			 * will be anywhere near the maximum size of
743 			 * a u_int.  We also don't check for backward
744 			 * branches, as we currently support them in
745 			 * userland for the protochain operation.
746 			 */
747 			from = i + 1;
748 			switch (BPF_OP(p->code)) {
749 			case BPF_JA:
750 				if (from + p->k >= len)
751 					goto out;
752 #if defined(KERNEL) || defined(_KERNEL)
753 				if (from + p->k < from)
754 					goto out;
755 				/*
756 				 * mark the currently invalid bits for the
757 				 * destination
758 				 */
759 				mem[from + p->k] |= invalid;
760 				invalid = 0;
761 #endif
762 				break;
763 			case BPF_JEQ:
764 			case BPF_JGT:
765 			case BPF_JGE:
766 			case BPF_JSET:
767 				if (from + p->jt >= len || from + p->jf >= len)
768 					goto out;
769 #if defined(KERNEL) || defined(_KERNEL)
770 				/*
771 				 * mark the currently invalid bits for both
772 				 * possible jump destinations
773 				 */
774 				mem[from + p->jt] |= invalid;
775 				mem[from + p->jf] |= invalid;
776 				invalid = 0;
777 #endif
778 				break;
779 			default:
780 				goto out;
781 			}
782 			break;
783 		case BPF_RET:
784 			break;
785 		case BPF_MISC:
786 			switch (BPF_MISCOP(p->code)) {
787 			case BPF_COP:
788 			case BPF_COPX:
789 				/* In-kernel COP use only. */
790 #if defined(KERNEL) || defined(_KERNEL)
791 				if (bc == NULL || bc->copfuncs == NULL)
792 					goto out;
793 				if (BPF_MISCOP(p->code) == BPF_COP &&
794 				    p->k >= bc->nfuncs) {
795 					goto out;
796 				}
797 				break;
798 #else
799 				goto out;
800 #endif
801 			default:
802 				break;
803 			}
804 			break;
805 		default:
806 			goto out;
807 		}
808 	}
809 	ok = 1;
810 out:
811 #if defined(KERNEL) || defined(_KERNEL)
812 	kmem_free(mem, size);
813 #endif
814 	return ok;
815 }
816 
817 /* Kernel module interface */
818 
819 #ifdef _KERNEL
820 MODULE(MODULE_CLASS_MISC, bpf_filter, NULL);
821 
822 static int
bpf_filter_modcmd(modcmd_t cmd,void * opaque)823 bpf_filter_modcmd(modcmd_t cmd, void *opaque)
824 {
825 
826 	switch (cmd) {
827 	case MODULE_CMD_INIT:
828 	case MODULE_CMD_FINI:
829 		return 0;
830 	default:
831 		return ENOTTY;
832 	}
833 }
834 #endif
835