1 2 /*- 3 * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from the Stanford/CMU enet packet filter, 7 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 8 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 9 * Berkeley Laboratory. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * @(#)bpf.c 7.5 (Berkeley) 7/15/91 40 */ 41 42 #if !(defined(lint) || defined(KERNEL) || defined(_KERNEL)) 43 static const char rcsid[] = 44 "@(#) $Header: /devel/CVS/IP-Filter/bpf_filter.c,v 2.2.2.3 2006/10/03 11:25:56 darrenr Exp $ (LBL)"; 45 #endif 46 47 #include <sys/param.h> 48 #include <sys/types.h> 49 #include <sys/time.h> 50 #include <sys/socket.h> 51 52 #include <netinet/in.h> 53 #include <net/if.h> 54 55 #include "netinet/ip_compat.h" 56 #include "bpf-ipf.h" 57 58 59 #if (defined(__hpux) || SOLARIS) && (defined(_KERNEL) || defined(KERNEL)) 60 # include <sys/sysmacros.h> 61 # include <sys/stream.h> 62 #endif 63 64 #include "pcap-ipf.h" 65 66 #if !defined(KERNEL) && !defined(_KERNEL) 67 #include <stdlib.h> 68 #endif 69 70 #define int32 bpf_int32 71 #define u_int32 bpf_u_int32 72 73 static int m_xword(mb_t *, int, int *); 74 static int m_xhalf(mb_t *, int, int *); 75 76 #ifndef LBL_ALIGN 77 /* 78 * XXX - IA-64? If not, this probably won't work on Win64 IA-64 79 * systems, unless LBL_ALIGN is defined elsewhere for them. 80 * XXX - SuperH? If not, this probably won't work on WinCE SuperH 81 * systems, unless LBL_ALIGN is defined elsewhere for them. 82 */ 83 #if defined(sparc) || defined(__sparc__) || defined(mips) || \ 84 defined(ibm032) || defined(__alpha) || defined(__hpux) || \ 85 defined(__arm__) 86 #define LBL_ALIGN 87 #endif 88 #endif 89 90 #ifndef LBL_ALIGN 91 92 #define EXTRACT_SHORT(p) ((u_short)ntohs(*(u_short *)p)) 93 #define EXTRACT_LONG(p) (ntohl(*(u_int32 *)p)) 94 #else 95 #define EXTRACT_SHORT(p)\ 96 ((u_short)\ 97 ((u_short)*((u_char *)p+0)<<8|\ 98 (u_short)*((u_char *)p+1)<<0)) 99 #define EXTRACT_LONG(p)\ 100 ((u_int32)*((u_char *)p+0)<<24|\ 101 (u_int32)*((u_char *)p+1)<<16|\ 102 (u_int32)*((u_char *)p+2)<<8|\ 103 (u_int32)*((u_char *)p+3)<<0) 104 #endif 105 106 #define MINDEX(len, _m, _k) \ 107 { \ 108 len = M_LEN(m); \ 109 while ((_k) >= len) { \ 110 (_k) -= len; \ 111 (_m) = (_m)->m_next; \ 112 if ((_m) == 0) \ 113 return (0); \ 114 len = M_LEN(m); \ 115 } \ 116 } 117 118 static int 119 m_xword(mb_t *m, int k, int *err) 120 { 121 register int len; 122 register u_char *cp, *np; 123 register mb_t *m0; 124 125 MINDEX(len, m, k); 126 cp = MTOD(m, u_char *) + k; 127 if (len - k >= 4) { 128 *err = 0; 129 return (EXTRACT_LONG(cp)); 130 } 131 m0 = m->m_next; 132 if (m0 == NULL || M_LEN(m0) + len - k < 4) 133 goto bad; 134 *err = 0; 135 np = MTOD(m0, u_char *); 136 switch (len - k) { 137 138 case 1: 139 return (cp[0] << 24) | (np[0] << 16) | (np[1] << 8) | np[2]; 140 141 case 2: 142 return (cp[0] << 24) | (cp[1] << 16) | (np[0] << 8) | np[1]; 143 144 default: 145 return (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | np[0]; 146 } 147 bad: 148 *err = 1; 149 return (0); 150 } 151 152 static int 153 m_xhalf(mb_t *m, int k, int *err) 154 { 155 register int len; 156 register u_char *cp; 157 register mb_t *m0; 158 159 MINDEX(len, m, k); 160 cp = MTOD(m, u_char *) + k; 161 if (len - k >= 2) { 162 *err = 0; 163 return (EXTRACT_SHORT(cp)); 164 } 165 m0 = m->m_next; 166 if (m0 == NULL) 167 goto bad; 168 *err = 0; 169 return (cp[0] << 8) | MTOD(m0, u_char *)[0]; 170 bad: 171 *err = 1; 172 return (0); 173 } 174 175 /* 176 * Execute the filter program starting at pc on the packet p 177 * wirelen is the length of the original packet 178 * buflen is the amount of data present 179 * For the kernel, p is assumed to be a pointer to an mbuf if buflen is 0, 180 * in all other cases, p is a pointer to a buffer and buflen is its size. 181 */ 182 u_int 183 bpf_filter(struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) 184 { 185 register u_int32 A, X; 186 register int k; 187 int32 mem[BPF_MEMWORDS]; 188 mb_t *m, *n; 189 int merr = 0; /* XXX: GCC */ 190 int len; 191 192 if (buflen == 0) { 193 m = (mb_t *)p; 194 p = MTOD(m, u_char *); 195 buflen = M_LEN(m); 196 } else 197 m = NULL; 198 199 if (pc == NULL) 200 /* 201 * No filter means accept all. 202 */ 203 return (u_int)-1; 204 A = 0; 205 X = 0; 206 --pc; 207 while (1) { 208 ++pc; 209 switch (pc->code) { 210 211 default: 212 return (0); 213 case BPF_RET|BPF_K: 214 return (u_int)pc->k; 215 216 case BPF_RET|BPF_A: 217 return (u_int)A; 218 219 case BPF_LD|BPF_W|BPF_ABS: 220 k = pc->k; 221 if (k + sizeof(int32) > buflen) { 222 if (m == NULL) 223 return (0); 224 A = m_xword(m, k, &merr); 225 if (merr != 0) 226 return (0); 227 continue; 228 } 229 A = EXTRACT_LONG(&p[k]); 230 continue; 231 232 case BPF_LD|BPF_H|BPF_ABS: 233 k = pc->k; 234 if (k + sizeof(short) > buflen) { 235 if (m == NULL) 236 return (0); 237 A = m_xhalf(m, k, &merr); 238 if (merr != 0) 239 return (0); 240 continue; 241 } 242 A = EXTRACT_SHORT(&p[k]); 243 continue; 244 245 case BPF_LD|BPF_B|BPF_ABS: 246 k = pc->k; 247 if (k >= buflen) { 248 if (m == NULL) 249 return (0); 250 n = m; 251 MINDEX(len, n, k); 252 A = MTOD(n, u_char *)[k]; 253 continue; 254 } 255 A = p[k]; 256 continue; 257 258 case BPF_LD|BPF_W|BPF_LEN: 259 A = wirelen; 260 continue; 261 262 case BPF_LDX|BPF_W|BPF_LEN: 263 X = wirelen; 264 continue; 265 266 case BPF_LD|BPF_W|BPF_IND: 267 k = X + pc->k; 268 if (k + sizeof(int32) > buflen) { 269 if (m == NULL) 270 return (0); 271 A = m_xword(m, k, &merr); 272 if (merr != 0) 273 return (0); 274 continue; 275 } 276 A = EXTRACT_LONG(&p[k]); 277 continue; 278 279 case BPF_LD|BPF_H|BPF_IND: 280 k = X + pc->k; 281 if (k + sizeof(short) > buflen) { 282 if (m == NULL) 283 return (0); 284 A = m_xhalf(m, k, &merr); 285 if (merr != 0) 286 return (0); 287 continue; 288 } 289 A = EXTRACT_SHORT(&p[k]); 290 continue; 291 292 case BPF_LD|BPF_B|BPF_IND: 293 k = X + pc->k; 294 if (k >= buflen) { 295 if (m == NULL) 296 return (0); 297 n = m; 298 MINDEX(len, n, k); 299 A = MTOD(n, u_char *)[k]; 300 continue; 301 } 302 A = p[k]; 303 continue; 304 305 case BPF_LDX|BPF_MSH|BPF_B: 306 k = pc->k; 307 if (k >= buflen) { 308 if (m == NULL) 309 return (0); 310 n = m; 311 MINDEX(len, n, k); 312 X = (MTOD(n, char *)[k] & 0xf) << 2; 313 continue; 314 } 315 X = (p[pc->k] & 0xf) << 2; 316 continue; 317 318 case BPF_LD|BPF_IMM: 319 A = pc->k; 320 continue; 321 322 case BPF_LDX|BPF_IMM: 323 X = pc->k; 324 continue; 325 326 case BPF_LD|BPF_MEM: 327 A = mem[pc->k]; 328 continue; 329 330 case BPF_LDX|BPF_MEM: 331 X = mem[pc->k]; 332 continue; 333 334 case BPF_ST: 335 mem[pc->k] = A; 336 continue; 337 338 case BPF_STX: 339 mem[pc->k] = X; 340 continue; 341 342 case BPF_JMP|BPF_JA: 343 pc += pc->k; 344 continue; 345 346 case BPF_JMP|BPF_JGT|BPF_K: 347 pc += (A > pc->k) ? pc->jt : pc->jf; 348 continue; 349 350 case BPF_JMP|BPF_JGE|BPF_K: 351 pc += (A >= pc->k) ? pc->jt : pc->jf; 352 continue; 353 354 case BPF_JMP|BPF_JEQ|BPF_K: 355 pc += (A == pc->k) ? pc->jt : pc->jf; 356 continue; 357 358 case BPF_JMP|BPF_JSET|BPF_K: 359 pc += (A & pc->k) ? pc->jt : pc->jf; 360 continue; 361 362 case BPF_JMP|BPF_JGT|BPF_X: 363 pc += (A > X) ? pc->jt : pc->jf; 364 continue; 365 366 case BPF_JMP|BPF_JGE|BPF_X: 367 pc += (A >= X) ? pc->jt : pc->jf; 368 continue; 369 370 case BPF_JMP|BPF_JEQ|BPF_X: 371 pc += (A == X) ? pc->jt : pc->jf; 372 continue; 373 374 case BPF_JMP|BPF_JSET|BPF_X: 375 pc += (A & X) ? pc->jt : pc->jf; 376 continue; 377 378 case BPF_ALU|BPF_ADD|BPF_X: 379 A += X; 380 continue; 381 382 case BPF_ALU|BPF_SUB|BPF_X: 383 A -= X; 384 continue; 385 386 case BPF_ALU|BPF_MUL|BPF_X: 387 A *= X; 388 continue; 389 390 case BPF_ALU|BPF_DIV|BPF_X: 391 if (X == 0) 392 return (0); 393 A /= X; 394 continue; 395 396 case BPF_ALU|BPF_AND|BPF_X: 397 A &= X; 398 continue; 399 400 case BPF_ALU|BPF_OR|BPF_X: 401 A |= X; 402 continue; 403 404 case BPF_ALU|BPF_LSH|BPF_X: 405 A <<= X; 406 continue; 407 408 case BPF_ALU|BPF_RSH|BPF_X: 409 A >>= X; 410 continue; 411 412 case BPF_ALU|BPF_ADD|BPF_K: 413 A += pc->k; 414 continue; 415 416 case BPF_ALU|BPF_SUB|BPF_K: 417 A -= pc->k; 418 continue; 419 420 case BPF_ALU|BPF_MUL|BPF_K: 421 A *= pc->k; 422 continue; 423 424 case BPF_ALU|BPF_DIV|BPF_K: 425 A /= pc->k; 426 continue; 427 428 case BPF_ALU|BPF_AND|BPF_K: 429 A &= pc->k; 430 continue; 431 432 case BPF_ALU|BPF_OR|BPF_K: 433 A |= pc->k; 434 continue; 435 436 case BPF_ALU|BPF_LSH|BPF_K: 437 A <<= pc->k; 438 continue; 439 440 case BPF_ALU|BPF_RSH|BPF_K: 441 A >>= pc->k; 442 continue; 443 444 case BPF_ALU|BPF_NEG: 445 A = -A; 446 continue; 447 448 case BPF_MISC|BPF_TAX: 449 X = A; 450 continue; 451 452 case BPF_MISC|BPF_TXA: 453 A = X; 454 continue; 455 } 456 } 457 } 458 459 460 /* 461 * Return true if the 'fcode' is a valid filter program. 462 * The constraints are that each jump be forward and to a valid 463 * code, that memory accesses are within valid ranges (to the 464 * extent that this can be checked statically; loads of packet 465 * data have to be, and are, also checked at run time), and that 466 * the code terminates with either an accept or reject. 467 * 468 * The kernel needs to be able to verify an application's filter code. 469 * Otherwise, a bogus program could easily crash the system. 470 */ 471 int 472 bpf_validate(struct bpf_insn *f, int len) 473 { 474 u_int i, from; 475 const struct bpf_insn *p; 476 477 if (len == 0) 478 return (1); 479 480 if (len < 1 || len > BPF_MAXINSNS) 481 return (0); 482 483 for (i = 0; i < len; ++i) { 484 p = &f[i]; 485 switch (BPF_CLASS(p->code)) { 486 /* 487 * Check that memory operations use valid addresses. 488 */ 489 case BPF_LD: 490 case BPF_LDX: 491 switch (BPF_MODE(p->code)) { 492 case BPF_IMM: 493 break; 494 case BPF_ABS: 495 case BPF_IND: 496 case BPF_MSH: 497 /* 498 * More strict check with actual packet length 499 * is done runtime. 500 */ 501 #if 0 502 if (p->k >= bpf_maxbufsize) 503 return (0); 504 #endif 505 break; 506 case BPF_MEM: 507 if (p->k >= BPF_MEMWORDS) 508 return (0); 509 break; 510 case BPF_LEN: 511 break; 512 default: 513 return (0); 514 } 515 break; 516 case BPF_ST: 517 case BPF_STX: 518 if (p->k >= BPF_MEMWORDS) 519 return (0); 520 break; 521 case BPF_ALU: 522 switch (BPF_OP(p->code)) { 523 case BPF_ADD: 524 case BPF_SUB: 525 case BPF_OR: 526 case BPF_AND: 527 case BPF_LSH: 528 case BPF_RSH: 529 case BPF_NEG: 530 break; 531 case BPF_DIV: 532 /* 533 * Check for constant division by 0. 534 */ 535 if (BPF_RVAL(p->code) == BPF_K && p->k == 0) 536 return (0); 537 default: 538 return (0); 539 } 540 break; 541 case BPF_JMP: 542 /* 543 * Check that jumps are within the code block, 544 * and that unconditional branches don't go 545 * backwards as a result of an overflow. 546 * Unconditional branches have a 32-bit offset, 547 * so they could overflow; we check to make 548 * sure they don't. Conditional branches have 549 * an 8-bit offset, and the from address is <= 550 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS 551 * is sufficiently small that adding 255 to it 552 * won't overflow. 553 * 554 * We know that len is <= BPF_MAXINSNS, and we 555 * assume that BPF_MAXINSNS is < the maximum size 556 * of a u_int, so that i + 1 doesn't overflow. 557 */ 558 from = i + 1; 559 switch (BPF_OP(p->code)) { 560 case BPF_JA: 561 if (from + p->k < from || from + p->k >= len) 562 return (0); 563 break; 564 case BPF_JEQ: 565 case BPF_JGT: 566 case BPF_JGE: 567 case BPF_JSET: 568 if (from + p->jt >= len || from + p->jf >= len) 569 return (0); 570 break; 571 default: 572 return (0); 573 } 574 break; 575 case BPF_RET: 576 break; 577 case BPF_MISC: 578 break; 579 default: 580 return (0); 581 } 582 } 583 return (BPF_CLASS(f[len - 1].code) == BPF_RET); 584 } 585