xref: /dragonfly/sys/net/bpf_filter.c (revision bb8c85ff)
1 /*
2  * Copyright (c) 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from the Stanford/CMU enet packet filter,
6  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8  * Berkeley Laboratory.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *      @(#)bpf_filter.c	8.1 (Berkeley) 6/10/93
35  *
36  * $FreeBSD: src/sys/net/bpf_filter.c,v 1.17 1999/12/29 04:38:31 peter Exp $
37  */
38 
39 #include <sys/systm.h>
40 #include <sys/param.h>
41 
42 #if defined(sparc) || defined(mips) || defined(ibm032)
43 #define BPF_ALIGN
44 #endif
45 
46 #ifndef BPF_ALIGN
47 #define EXTRACT_SHORT(p)	((u_int16_t)ntohs(*(u_int16_t *)p))
48 #define EXTRACT_LONG(p)		(ntohl(*(u_int32_t *)p))
49 #else
50 #define EXTRACT_SHORT(p)\
51 	((u_int16_t)\
52 		((u_int16_t)*((u_char *)p+0)<<8|\
53 		 (u_int16_t)*((u_char *)p+1)<<0))
54 #define EXTRACT_LONG(p)\
55 		((u_int32_t)*((u_char *)p+0)<<24|\
56 		 (u_int32_t)*((u_char *)p+1)<<16|\
57 		 (u_int32_t)*((u_char *)p+2)<<8|\
58 		 (u_int32_t)*((u_char *)p+3)<<0)
59 #endif
60 
61 #ifdef _KERNEL
62 #include <sys/mbuf.h>
63 #endif
64 #include <net/bpf.h>
65 #ifdef _KERNEL
66 #define MINDEX(m, k)							\
67 {									\
68 	int len = m->m_len;						\
69 									\
70 	while (k >= len) {						\
71 		k -= len;						\
72 		m = m->m_next;						\
73 		if (m == 0)						\
74 			return 0;					\
75 		len = m->m_len;						\
76 	}								\
77 }
78 
79 extern int	bpf_maxbufsize;
80 
81 static u_int16_t	m_xhalf (struct mbuf *m, bpf_u_int32 k, int *err);
82 static u_int32_t	m_xword (struct mbuf *m, bpf_u_int32 k, int *err);
83 
84 static u_int32_t
85 m_xword(struct mbuf *m, bpf_u_int32 k, int *err)
86 {
87 	size_t len;
88 	u_char *cp, *np;
89 	struct mbuf *m0;
90 
91 	len = m->m_len;
92 	while (k >= len) {
93 		k -= len;
94 		m = m->m_next;
95 		if (m == NULL)
96 			goto bad;
97 		len = m->m_len;
98 	}
99 	cp = mtod(m, u_char *) + k;
100 	if (len - k >= 4) {
101 		*err = 0;
102 		return EXTRACT_LONG(cp);
103 	}
104 	m0 = m->m_next;
105 	if (m0 == NULL || m0->m_len + len - k < 4)
106 		goto bad;
107 	*err = 0;
108 	np = mtod(m0, u_char *);
109 	switch (len - k) {
110 
111 	case 1:
112 		return
113 		    ((u_int32_t)cp[0] << 24) |
114 		    ((u_int32_t)np[0] << 16) |
115 		    ((u_int32_t)np[1] << 8)  |
116 		    (u_int32_t)np[2];
117 
118 	case 2:
119 		return
120 		    ((u_int32_t)cp[0] << 24) |
121 		    ((u_int32_t)cp[1] << 16) |
122 		    ((u_int32_t)np[0] << 8) |
123 		    (u_int32_t)np[1];
124 
125 	default:
126 		return
127 		    ((u_int32_t)cp[0] << 24) |
128 		    ((u_int32_t)cp[1] << 16) |
129 		    ((u_int32_t)cp[2] << 8) |
130 		    (u_int32_t)np[0];
131 	}
132     bad:
133 	*err = 1;
134 	return 0;
135 }
136 
137 static u_int16_t
138 m_xhalf(struct mbuf *m, bpf_u_int32 k, int *err)
139 {
140 	size_t len;
141 	u_char *cp;
142 	struct mbuf *m0;
143 
144 	len = m->m_len;
145 	while (k >= len) {
146 		k -= len;
147 		m = m->m_next;
148 		if (m == NULL)
149 			goto bad;
150 		len = m->m_len;
151 	}
152 	cp = mtod(m, u_char *) + k;
153 	if (len - k >= 2) {
154 		*err = 0;
155 		return EXTRACT_SHORT(cp);
156 	}
157 	m0 = m->m_next;
158 	if (m0 == NULL)
159 		goto bad;
160 	*err = 0;
161 	return (cp[0] << 8) | mtod(m0, u_char *)[0];
162  bad:
163 	*err = 1;
164 	return 0;
165 }
166 #endif
167 
168 /*
169  * Execute the filter program starting at pc on the packet p
170  * wirelen is the length of the original packet
171  * buflen is the amount of data present
172  */
173 u_int
174 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
175 {
176 	u_int32_t A = 0, X = 0;
177 	bpf_u_int32 k;
178 	u_int32_t mem[BPF_MEMWORDS];
179 
180 	bzero(mem, sizeof(mem));
181 
182 	if (pc == NULL) {
183 		/*
184 		 * No filter means accept all.
185 		 */
186 		return (u_int)-1;
187 	}
188 
189 	--pc;
190 	while (1) {
191 		++pc;
192 		switch (pc->code) {
193 
194 		default:
195 #ifdef _KERNEL
196 			return 0;
197 #else
198 			abort();
199 #endif
200 		case BPF_RET|BPF_K:
201 			return (u_int)pc->k;
202 
203 		case BPF_RET|BPF_A:
204 			return (u_int)A;
205 
206 		case BPF_LD|BPF_W|BPF_ABS:
207 			k = pc->k;
208 			if (k > buflen || sizeof(int32_t) > buflen - k) {
209 #ifdef _KERNEL
210 				int merr;
211 
212 				if (buflen != 0)
213 					return 0;
214 				A = m_xword((struct mbuf *)p, k, &merr);
215 				if (merr != 0)
216 					return 0;
217 				continue;
218 #else
219 				return 0;
220 #endif
221 			}
222 #ifdef BPF_ALIGN
223 			if (((intptr_t)(p + k) & 3) != 0)
224 				A = EXTRACT_LONG(&p[k]);
225 			else
226 #endif
227 				A = ntohl(*(int32_t *)(p + k));
228 			continue;
229 
230 		case BPF_LD|BPF_H|BPF_ABS:
231 			k = pc->k;
232 			if (k > buflen || sizeof(int16_t) > buflen - k) {
233 #ifdef _KERNEL
234 				int merr;
235 
236 				if (buflen != 0)
237 					return 0;
238 				A = m_xhalf((struct mbuf *)p, k, &merr);
239 				continue;
240 #else
241 				return 0;
242 #endif
243 			}
244 			A = EXTRACT_SHORT(&p[k]);
245 			continue;
246 
247 		case BPF_LD|BPF_B|BPF_ABS:
248 			k = pc->k;
249 			if (k >= buflen) {
250 #ifdef _KERNEL
251 				struct mbuf *m;
252 
253 				if (buflen != 0)
254 					return 0;
255 				m = (struct mbuf *)p;
256 				MINDEX(m, k);
257 				A = mtod(m, u_char *)[k];
258 				continue;
259 #else
260 				return 0;
261 #endif
262 			}
263 			A = p[k];
264 			continue;
265 
266 		case BPF_LD|BPF_W|BPF_LEN:
267 			A = wirelen;
268 			continue;
269 
270 		case BPF_LDX|BPF_W|BPF_LEN:
271 			X = wirelen;
272 			continue;
273 
274 		case BPF_LD|BPF_W|BPF_IND:
275 			k = X + pc->k;
276 			if (pc->k > buflen || X > buflen - pc->k ||
277 			    sizeof(int32_t) > buflen - k) {
278 #ifdef _KERNEL
279 				int merr;
280 
281 				if (buflen != 0)
282 					return 0;
283 				A = m_xword((struct mbuf *)p, k, &merr);
284 				if (merr != 0)
285 					return 0;
286 				continue;
287 #else
288 				return 0;
289 #endif
290 			}
291 #ifdef BPF_ALIGN
292 			if (((intptr_t)(p + k) & 3) != 0)
293 				A = EXTRACT_LONG(&p[k]);
294 			else
295 #endif
296 				A = ntohl(*(int32_t *)(p + k));
297 			continue;
298 
299 		case BPF_LD|BPF_H|BPF_IND:
300 			k = X + pc->k;
301 			if (X > buflen || pc->k > buflen - X ||
302 			    sizeof(int16_t) > buflen - k) {
303 #ifdef _KERNEL
304 				int merr;
305 
306 				if (buflen != 0)
307 					return 0;
308 				A = m_xhalf((struct mbuf *)p, k, &merr);
309 				if (merr != 0)
310 					return 0;
311 				continue;
312 #else
313 				return 0;
314 #endif
315 			}
316 			A = EXTRACT_SHORT(&p[k]);
317 			continue;
318 
319 		case BPF_LD|BPF_B|BPF_IND:
320 			k = X + pc->k;
321 			if (pc->k >= buflen || X >= buflen - pc->k) {
322 #ifdef _KERNEL
323 				struct mbuf *m;
324 
325 				if (buflen != 0)
326 					return 0;
327 				m = (struct mbuf *)p;
328 				MINDEX(m, k);
329 				A = mtod(m, u_char *)[k];
330 				continue;
331 #else
332 				return 0;
333 #endif
334 			}
335 			A = p[k];
336 			continue;
337 
338 		case BPF_LDX|BPF_MSH|BPF_B:
339 			k = pc->k;
340 			if (k >= buflen) {
341 #ifdef _KERNEL
342 				struct mbuf *m;
343 
344 				if (buflen != 0)
345 					return 0;
346 				m = (struct mbuf *)p;
347 				MINDEX(m, k);
348 				X = (mtod(m, char *)[k] & 0xf) << 2;
349 				continue;
350 #else
351 				return 0;
352 #endif
353 			}
354 			X = (p[pc->k] & 0xf) << 2;
355 			continue;
356 
357 		case BPF_LD|BPF_IMM:
358 			A = pc->k;
359 			continue;
360 
361 		case BPF_LDX|BPF_IMM:
362 			X = pc->k;
363 			continue;
364 
365 		case BPF_LD|BPF_MEM:
366 			A = mem[pc->k];
367 			continue;
368 
369 		case BPF_LDX|BPF_MEM:
370 			X = mem[pc->k];
371 			continue;
372 
373 		case BPF_ST:
374 			mem[pc->k] = A;
375 			continue;
376 
377 		case BPF_STX:
378 			mem[pc->k] = X;
379 			continue;
380 
381 		case BPF_JMP|BPF_JA:
382 			pc += pc->k;
383 			continue;
384 
385 		case BPF_JMP|BPF_JGT|BPF_K:
386 			pc += (A > pc->k) ? pc->jt : pc->jf;
387 			continue;
388 
389 		case BPF_JMP|BPF_JGE|BPF_K:
390 			pc += (A >= pc->k) ? pc->jt : pc->jf;
391 			continue;
392 
393 		case BPF_JMP|BPF_JEQ|BPF_K:
394 			pc += (A == pc->k) ? pc->jt : pc->jf;
395 			continue;
396 
397 		case BPF_JMP|BPF_JSET|BPF_K:
398 			pc += (A & pc->k) ? pc->jt : pc->jf;
399 			continue;
400 
401 		case BPF_JMP|BPF_JGT|BPF_X:
402 			pc += (A > X) ? pc->jt : pc->jf;
403 			continue;
404 
405 		case BPF_JMP|BPF_JGE|BPF_X:
406 			pc += (A >= X) ? pc->jt : pc->jf;
407 			continue;
408 
409 		case BPF_JMP|BPF_JEQ|BPF_X:
410 			pc += (A == X) ? pc->jt : pc->jf;
411 			continue;
412 
413 		case BPF_JMP|BPF_JSET|BPF_X:
414 			pc += (A & X) ? pc->jt : pc->jf;
415 			continue;
416 
417 		case BPF_ALU|BPF_ADD|BPF_X:
418 			A += X;
419 			continue;
420 
421 		case BPF_ALU|BPF_SUB|BPF_X:
422 			A -= X;
423 			continue;
424 
425 		case BPF_ALU|BPF_MUL|BPF_X:
426 			A *= X;
427 			continue;
428 
429 		case BPF_ALU|BPF_DIV|BPF_X:
430 			if (X == 0)
431 				return 0;
432 			A /= X;
433 			continue;
434 
435 		case BPF_ALU|BPF_AND|BPF_X:
436 			A &= X;
437 			continue;
438 
439 		case BPF_ALU|BPF_OR|BPF_X:
440 			A |= X;
441 			continue;
442 
443 		case BPF_ALU|BPF_LSH|BPF_X:
444 			A <<= X;
445 			continue;
446 
447 		case BPF_ALU|BPF_RSH|BPF_X:
448 			A >>= X;
449 			continue;
450 
451 		case BPF_ALU|BPF_ADD|BPF_K:
452 			A += pc->k;
453 			continue;
454 
455 		case BPF_ALU|BPF_SUB|BPF_K:
456 			A -= pc->k;
457 			continue;
458 
459 		case BPF_ALU|BPF_MUL|BPF_K:
460 			A *= pc->k;
461 			continue;
462 
463 		case BPF_ALU|BPF_DIV|BPF_K:
464 			A /= pc->k;
465 			continue;
466 
467 		case BPF_ALU|BPF_AND|BPF_K:
468 			A &= pc->k;
469 			continue;
470 
471 		case BPF_ALU|BPF_OR|BPF_K:
472 			A |= pc->k;
473 			continue;
474 
475 		case BPF_ALU|BPF_LSH|BPF_K:
476 			A <<= pc->k;
477 			continue;
478 
479 		case BPF_ALU|BPF_RSH|BPF_K:
480 			A >>= pc->k;
481 			continue;
482 
483 		case BPF_ALU|BPF_NEG:
484 			A = -A;
485 			continue;
486 
487 		case BPF_MISC|BPF_TAX:
488 			X = A;
489 			continue;
490 
491 		case BPF_MISC|BPF_TXA:
492 			A = X;
493 			continue;
494 		}
495 	}
496 }
497 
498 #ifdef _KERNEL
499 /*
500  * Return true if the 'fcode' is a valid filter program.
501  * The constraints are that each jump be forward and to a valid
502  * code, that memory accesses are within valid ranges (to the
503  * extent that this can be checked statically; loads of packet
504  * data have to be, and are, also checked at run time), and that
505  * the code terminates with either an accept or reject.
506  *
507  * The kernel needs to be able to verify an application's filter code.
508  * Otherwise, a bogus program could easily crash the system.
509  */
510 int
511 bpf_validate(const struct bpf_insn *f, int len)
512 {
513 	u_int i, from;
514 	const struct bpf_insn *p;
515 
516 	if (len < 1 || len > BPF_MAXINSNS)
517 		return 0;
518 
519 	for (i = 0; i < len; ++i) {
520 		p = &f[i];
521 		switch (BPF_CLASS(p->code)) {
522 		/*
523 		 * Check that memory operations use valid addresses.
524 		 */
525 		case BPF_LD:
526 		case BPF_LDX:
527 			switch (BPF_MODE(p->code)) {
528 			case BPF_IMM:
529 				break;
530 			case BPF_ABS:
531 			case BPF_IND:
532 			case BPF_MSH:
533 				/*
534 				 * More strict check with actual packet length
535 				 * is done runtime.
536 				 */
537 				if (p->k >= bpf_maxbufsize)
538 					return 0;
539 				break;
540 			case BPF_MEM:
541 				if (p->k >= BPF_MEMWORDS)
542 					return 0;
543 				break;
544 			case BPF_LEN:
545 				break;
546 			default:
547 				return 0;
548 			}
549 			break;
550 		case BPF_ST:
551 		case BPF_STX:
552 			if (p->k >= BPF_MEMWORDS)
553 				return 0;
554 			break;
555 		case BPF_ALU:
556 			switch (BPF_OP(p->code)) {
557 			case BPF_ADD:
558 			case BPF_SUB:
559 			case BPF_MUL:
560 			case BPF_OR:
561 			case BPF_AND:
562 			case BPF_LSH:
563 			case BPF_RSH:
564 			case BPF_NEG:
565 				break;
566 			case BPF_DIV:
567 				/*
568 				 * Check for constant division by 0.
569 				 */
570 				if (BPF_SRC(p->code) == BPF_K && p->k == 0)
571 					return 0;
572 				break;
573 			default:
574 				return 0;
575 			}
576 			break;
577 		case BPF_JMP:
578 			/*
579 			 * Check that jumps are within the code block,
580 			 * and that unconditional branches don't go
581 			 * backwards as a result of an overflow.
582 			 * Unconditional branches have a 32-bit offset,
583 			 * so they could overflow; we check to make
584 			 * sure they don't.  Conditional branches have
585 			 * an 8-bit offset, and the from address is <=
586 			 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
587 			 * is sufficiently small that adding 255 to it
588 			 * won't overflow.
589 			 *
590 			 * We know that len is <= BPF_MAXINSNS, and we
591 			 * assume that BPF_MAXINSNS is < the maximum size
592 			 * of a u_int, so that i + 1 doesn't overflow.
593 			 */
594 			from = i + 1;
595 			switch (BPF_OP(p->code)) {
596 			case BPF_JA:
597 				if (from + p->k < from || from + p->k >= len)
598 					return 0;
599 				break;
600 			case BPF_JEQ:
601 			case BPF_JGT:
602 			case BPF_JGE:
603 			case BPF_JSET:
604 				if (from + p->jt >= len || from + p->jf >= len)
605 					return 0;
606 				break;
607 			default:
608 				return 0;
609 			}
610 			break;
611 		case BPF_RET:
612 			break;
613 		case BPF_MISC:
614 			break;
615 		default:
616 			return 0;
617 		}
618 	}
619 	return BPF_CLASS(f[len - 1].code) == BPF_RET;
620 }
621 #endif
622