1 /* SPDX-License-Identifier: BSD-2-Clause */ 2 /* 3 * dhcpcd: BPF arp and bootp filtering 4 * Copyright (c) 2006-2019 Roy Marples <roy@marples.name> 5 * All rights reserved 6 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/ioctl.h> 30 #include <sys/socket.h> 31 32 #include <arpa/inet.h> 33 34 #include <net/if.h> 35 #include <netinet/in.h> 36 #include <netinet/if_ether.h> 37 38 #ifdef __linux__ 39 /* Special BPF snowflake. */ 40 #include <linux/filter.h> 41 #define bpf_insn sock_filter 42 #else 43 #include <net/bpf.h> 44 #endif 45 46 #include <errno.h> 47 #include <fcntl.h> 48 #include <paths.h> 49 #include <stddef.h> 50 #include <stdlib.h> 51 #include <string.h> 52 53 #include "common.h" 54 #include "arp.h" 55 #include "bpf.h" 56 #include "dhcp.h" 57 #include "if.h" 58 #include "logerr.h" 59 60 #define ARP_ADDRS_MAX 3 61 62 /* BPF helper macros */ 63 #ifdef __linux__ 64 #define BPF_WHOLEPACKET 0x7fffffff /* work around buggy LPF filters */ 65 #else 66 #define BPF_WHOLEPACKET ~0U 67 #endif 68 69 /* Macros to update the BPF structure */ 70 #define BPF_SET_STMT(insn, c, v) { \ 71 (insn)->code = (c); \ 72 (insn)->jt = 0; \ 73 (insn)->jf = 0; \ 74 (insn)->k = (uint32_t)(v); \ 75 }; 76 77 #define BPF_SET_JUMP(insn, c, v, t, f) { \ 78 (insn)->code = (c); \ 79 (insn)->jt = (t); \ 80 (insn)->jf = (f); \ 81 (insn)->k = (uint32_t)(v); \ 82 }; 83 84 size_t 85 bpf_frame_header_len(const struct interface *ifp) 86 { 87 88 switch (ifp->family) { 89 case ARPHRD_ETHER: 90 return sizeof(struct ether_header); 91 default: 92 return 0; 93 } 94 } 95 96 static const uint8_t etherbcastaddr[] = 97 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 98 99 int 100 bpf_frame_bcast(const struct interface *ifp, const char *frame) 101 { 102 103 switch (ifp->family) { 104 case ARPHRD_ETHER: 105 return memcmp(frame + 106 offsetof(struct ether_header, ether_dhost), 107 etherbcastaddr, sizeof(etherbcastaddr)); 108 default: 109 return -1; 110 } 111 } 112 113 #ifndef __linux__ 114 /* Linux is a special snowflake for opening, attaching and reading BPF. 115 * See if-linux.c for the Linux specific BPF functions. */ 116 117 const char *bpf_name = "Berkley Packet Filter"; 118 119 int 120 bpf_open(struct interface *ifp, int (*filter)(struct interface *, int)) 121 { 122 struct ipv4_state *state; 123 int fd = -1; 124 struct ifreq ifr; 125 int ibuf_len = 0; 126 size_t buf_len; 127 struct bpf_version pv; 128 #ifdef BIOCIMMEDIATE 129 unsigned int flags; 130 #endif 131 #ifndef O_CLOEXEC 132 int fd_opts; 133 #endif 134 135 #ifdef _PATH_BPF 136 fd = open(_PATH_BPF, O_RDWR | O_NONBLOCK 137 #ifdef O_CLOEXEC 138 | O_CLOEXEC 139 #endif 140 ); 141 #else 142 char device[32]; 143 int n = 0; 144 145 do { 146 snprintf(device, sizeof(device), "/dev/bpf%d", n++); 147 fd = open(device, O_RDWR | O_NONBLOCK 148 #ifdef O_CLOEXEC 149 | O_CLOEXEC 150 #endif 151 ); 152 } while (fd == -1 && errno == EBUSY); 153 #endif 154 155 if (fd == -1) 156 return -1; 157 158 #ifndef O_CLOEXEC 159 if ((fd_opts = fcntl(fd, F_GETFD)) == -1 || 160 fcntl(fd, F_SETFD, fd_opts | FD_CLOEXEC) == -1) { 161 close(fd); 162 return -1; 163 } 164 #endif 165 166 memset(&pv, 0, sizeof(pv)); 167 if (ioctl(fd, BIOCVERSION, &pv) == -1) 168 goto eexit; 169 if (pv.bv_major != BPF_MAJOR_VERSION || 170 pv.bv_minor < BPF_MINOR_VERSION) { 171 logerrx("BPF version mismatch - recompile"); 172 goto eexit; 173 } 174 175 if (filter(ifp, fd) != 0) 176 goto eexit; 177 178 memset(&ifr, 0, sizeof(ifr)); 179 strlcpy(ifr.ifr_name, ifp->name, sizeof(ifr.ifr_name)); 180 if (ioctl(fd, BIOCSETIF, &ifr) == -1) 181 goto eexit; 182 183 /* Get the required BPF buffer length from the kernel. */ 184 if (ioctl(fd, BIOCGBLEN, &ibuf_len) == -1) 185 goto eexit; 186 buf_len = (size_t)ibuf_len; 187 state = ipv4_getstate(ifp); 188 if (state == NULL) 189 goto eexit; 190 if (state->buffer_size != buf_len) { 191 void *nb; 192 193 if ((nb = realloc(state->buffer, buf_len)) == NULL) 194 goto eexit; 195 state->buffer = nb; 196 state->buffer_size = buf_len; 197 } 198 199 #ifdef BIOCIMMEDIATE 200 flags = 1; 201 if (ioctl(fd, BIOCIMMEDIATE, &flags) == -1) 202 goto eexit; 203 #endif 204 205 return fd; 206 207 eexit: 208 close(fd); 209 return -1; 210 } 211 212 /* BPF requires that we read the entire buffer. 213 * So we pass the buffer in the API so we can loop on >1 packet. */ 214 ssize_t 215 bpf_read(struct interface *ifp, int fd, void *data, size_t len, 216 unsigned int *flags) 217 { 218 ssize_t fl = (ssize_t)bpf_frame_header_len(ifp); 219 ssize_t bytes; 220 struct ipv4_state *state = IPV4_STATE(ifp); 221 222 struct bpf_hdr packet; 223 const char *payload; 224 225 *flags &= ~BPF_EOF; 226 for (;;) { 227 if (state->buffer_len == 0) { 228 bytes = read(fd, state->buffer, state->buffer_size); 229 #if defined(__sun) 230 /* After 2^31 bytes, the kernel offset overflows. 231 * To work around this bug, lseek 0. */ 232 if (bytes == -1 && errno == EINVAL) { 233 lseek(fd, 0, SEEK_SET); 234 continue; 235 } 236 #endif 237 if (bytes == -1 || bytes == 0) 238 return bytes; 239 state->buffer_len = (size_t)bytes; 240 state->buffer_pos = 0; 241 } 242 bytes = -1; 243 memcpy(&packet, state->buffer + state->buffer_pos, 244 sizeof(packet)); 245 if (state->buffer_pos + packet.bh_caplen + packet.bh_hdrlen > 246 state->buffer_len) 247 goto next; /* Packet beyond buffer, drop. */ 248 payload = state->buffer + state->buffer_pos + packet.bh_hdrlen; 249 if (bpf_frame_bcast(ifp, payload) == 0) 250 *flags |= BPF_BCAST; 251 else 252 *flags &= ~BPF_BCAST; 253 payload += fl; 254 bytes = (ssize_t)packet.bh_caplen - fl; 255 if ((size_t)bytes > len) 256 bytes = (ssize_t)len; 257 memcpy(data, payload, (size_t)bytes); 258 next: 259 state->buffer_pos += BPF_WORDALIGN(packet.bh_hdrlen + 260 packet.bh_caplen); 261 if (state->buffer_pos >= state->buffer_len) { 262 state->buffer_len = state->buffer_pos = 0; 263 *flags |= BPF_EOF; 264 } 265 if (bytes != -1) 266 return bytes; 267 } 268 269 /* NOTREACHED */ 270 } 271 272 int 273 bpf_attach(int fd, void *filter, unsigned int filter_len) 274 { 275 struct bpf_program pf; 276 277 /* Install the filter. */ 278 memset(&pf, 0, sizeof(pf)); 279 pf.bf_insns = filter; 280 pf.bf_len = filter_len; 281 return ioctl(fd, BIOCSETF, &pf); 282 } 283 #endif 284 285 #ifndef __sun 286 /* SunOS is special too - sending via BPF goes nowhere. */ 287 ssize_t 288 bpf_send(const struct interface *ifp, int fd, uint16_t protocol, 289 const void *data, size_t len) 290 { 291 struct iovec iov[2]; 292 struct ether_header eh; 293 294 switch(ifp->family) { 295 case ARPHRD_ETHER: 296 memset(&eh.ether_dhost, 0xff, sizeof(eh.ether_dhost)); 297 memcpy(&eh.ether_shost, ifp->hwaddr, sizeof(eh.ether_shost)); 298 eh.ether_type = htons(protocol); 299 iov[0].iov_base = &eh; 300 iov[0].iov_len = sizeof(eh); 301 break; 302 default: 303 iov[0].iov_base = NULL; 304 iov[0].iov_len = 0; 305 break; 306 } 307 iov[1].iov_base = UNCONST(data); 308 iov[1].iov_len = len; 309 return writev(fd, iov, 2); 310 } 311 #endif 312 313 int 314 bpf_close(struct interface *ifp, int fd) 315 { 316 struct ipv4_state *state = IPV4_STATE(ifp); 317 318 /* Rewind the buffer on closing. */ 319 state->buffer_len = state->buffer_pos = 0; 320 return close(fd); 321 } 322 323 /* Normally this is needed by bootp. 324 * Once that uses this again, the ARP guard here can be removed. */ 325 #ifdef ARP 326 #define BPF_CMP_HWADDR_LEN ((((HWADDR_LEN / 4) + 2) * 2) + 1) 327 static unsigned int 328 bpf_cmp_hwaddr(struct bpf_insn *bpf, size_t bpf_len, size_t off, 329 bool equal, uint8_t *hwaddr, size_t hwaddr_len) 330 { 331 struct bpf_insn *bp; 332 size_t maclen, nlft, njmps; 333 uint32_t mac32; 334 uint16_t mac16; 335 uint8_t jt, jf; 336 337 /* Calc the number of jumps */ 338 if ((hwaddr_len / 4) >= 128) { 339 errno = EINVAL; 340 return 0; 341 } 342 njmps = (hwaddr_len / 4) * 2; /* 2 instructions per check */ 343 /* We jump after the 1st check. */ 344 if (njmps) 345 njmps -= 2; 346 nlft = hwaddr_len % 4; 347 if (nlft) { 348 njmps += (nlft / 2) * 2; 349 nlft = nlft % 2; 350 if (nlft) 351 njmps += 2; 352 353 } 354 355 /* Skip to positive finish. */ 356 njmps++; 357 if (equal) { 358 jt = (uint8_t)njmps; 359 jf = 0; 360 } else { 361 jt = 0; 362 jf = (uint8_t)njmps; 363 } 364 365 bp = bpf; 366 for (; hwaddr_len > 0; 367 hwaddr += maclen, hwaddr_len -= maclen, off += maclen) 368 { 369 if (bpf_len < 3) { 370 errno = ENOBUFS; 371 return 0; 372 } 373 bpf_len -= 3; 374 375 if (hwaddr_len >= 4) { 376 maclen = sizeof(mac32); 377 memcpy(&mac32, hwaddr, maclen); 378 BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND, off); 379 bp++; 380 BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, 381 htonl(mac32), jt, jf); 382 } else if (hwaddr_len >= 2) { 383 maclen = sizeof(mac16); 384 memcpy(&mac16, hwaddr, maclen); 385 BPF_SET_STMT(bp, BPF_LD + BPF_H + BPF_IND, off); 386 bp++; 387 BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, 388 htons(mac16), jt, jf); 389 } else { 390 maclen = sizeof(*hwaddr); 391 BPF_SET_STMT(bp, BPF_LD + BPF_B + BPF_IND, off); 392 bp++; 393 BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, 394 *hwaddr, jt, jf); 395 } 396 if (jt) 397 jt = (uint8_t)(jt - 2); 398 if (jf) 399 jf = (uint8_t)(jf - 2); 400 bp++; 401 } 402 403 /* Last step is always return failure. 404 * Next step is a positive finish. */ 405 BPF_SET_STMT(bp, BPF_RET + BPF_K, 0); 406 bp++; 407 408 return (unsigned int)(bp - bpf); 409 } 410 #endif 411 412 #ifdef ARP 413 414 static const struct bpf_insn bpf_arp_ether [] = { 415 /* Ensure packet is at least correct size. */ 416 BPF_STMT(BPF_LD + BPF_W + BPF_LEN, 0), 417 BPF_JUMP(BPF_JMP + BPF_JGE + BPF_K, sizeof(struct ether_arp), 1, 0), 418 BPF_STMT(BPF_RET + BPF_K, 0), 419 420 /* Check this is an ARP packet. */ 421 BPF_STMT(BPF_LD + BPF_H + BPF_ABS, 422 offsetof(struct ether_header, ether_type)), 423 BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ETHERTYPE_ARP, 1, 0), 424 BPF_STMT(BPF_RET + BPF_K, 0), 425 426 /* Load frame header length into X */ 427 BPF_STMT(BPF_LDX + BPF_W + BPF_IMM, sizeof(struct ether_header)), 428 429 /* Make sure the hardware family matches. */ 430 BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct arphdr, ar_hrd)), 431 BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ARPHRD_ETHER, 1, 0), 432 BPF_STMT(BPF_RET + BPF_K, 0), 433 434 /* Make sure the hardware length matches. */ 435 BPF_STMT(BPF_LD + BPF_B + BPF_IND, offsetof(struct arphdr, ar_hln)), 436 BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, 437 sizeof(((struct ether_arp *)0)->arp_sha), 1, 0), 438 BPF_STMT(BPF_RET + BPF_K, 0), 439 }; 440 #define BPF_ARP_ETHER_LEN __arraycount(bpf_arp_ether) 441 442 static const struct bpf_insn bpf_arp_filter [] = { 443 /* Make sure this is for IP. */ 444 BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct arphdr, ar_pro)), 445 BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ETHERTYPE_IP, 1, 0), 446 BPF_STMT(BPF_RET + BPF_K, 0), 447 /* Make sure this is an ARP REQUEST. */ 448 BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct arphdr, ar_op)), 449 BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ARPOP_REQUEST, 2, 0), 450 /* or ARP REPLY. */ 451 BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ARPOP_REPLY, 1, 0), 452 BPF_STMT(BPF_RET + BPF_K, 0), 453 /* Make sure the protocol length matches. */ 454 BPF_STMT(BPF_LD + BPF_B + BPF_IND, offsetof(struct arphdr, ar_pln)), 455 BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, sizeof(in_addr_t), 1, 0), 456 BPF_STMT(BPF_RET + BPF_K, 0), 457 }; 458 #define BPF_ARP_FILTER_LEN __arraycount(bpf_arp_filter) 459 460 #define BPF_ARP_ADDRS_LEN 1 + (ARP_ADDRS_MAX * 2) + 3 + \ 461 (ARP_ADDRS_MAX * 2) + 1 462 463 #define BPF_ARP_LEN BPF_ARP_ETHER_LEN + BPF_ARP_FILTER_LEN + \ 464 BPF_CMP_HWADDR_LEN + BPF_ARP_ADDRS_LEN 465 466 int 467 bpf_arp(struct interface *ifp, int fd) 468 { 469 struct bpf_insn bpf[BPF_ARP_LEN]; 470 struct bpf_insn *bp; 471 struct iarp_state *state; 472 uint16_t arp_len; 473 474 if (fd == -1) 475 return 0; 476 477 bp = bpf; 478 /* Check frame header. */ 479 switch(ifp->family) { 480 case ARPHRD_ETHER: 481 memcpy(bp, bpf_arp_ether, sizeof(bpf_arp_ether)); 482 bp += BPF_ARP_ETHER_LEN; 483 arp_len = sizeof(struct ether_header)+sizeof(struct ether_arp); 484 break; 485 default: 486 errno = EINVAL; 487 return -1; 488 } 489 490 /* Copy in the main filter. */ 491 memcpy(bp, bpf_arp_filter, sizeof(bpf_arp_filter)); 492 bp += BPF_ARP_FILTER_LEN; 493 494 /* Ensure it's not from us. */ 495 bp += bpf_cmp_hwaddr(bp, BPF_CMP_HWADDR_LEN, sizeof(struct arphdr), 496 false, ifp->hwaddr, ifp->hwlen); 497 498 state = ARP_STATE(ifp); 499 if (TAILQ_FIRST(&state->arp_states)) { 500 struct arp_state *astate; 501 size_t naddrs; 502 503 /* Match sender protocol address */ 504 BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND, 505 sizeof(struct arphdr) + ifp->hwlen); 506 bp++; 507 naddrs = 0; 508 TAILQ_FOREACH(astate, &state->arp_states, next) { 509 if (++naddrs > ARP_ADDRS_MAX) { 510 errno = ENOBUFS; 511 logerr(__func__); 512 break; 513 } 514 BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, 515 htonl(astate->addr.s_addr), 0, 1); 516 bp++; 517 BPF_SET_STMT(bp, BPF_RET + BPF_K, arp_len); 518 bp++; 519 } 520 521 /* If we didn't match sender, then we're only interested in 522 * ARP probes to us, so check the null host sender. */ 523 BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, INADDR_ANY, 1, 0); 524 bp++; 525 BPF_SET_STMT(bp, BPF_RET + BPF_K, 0); 526 bp++; 527 528 /* Match target protocol address */ 529 BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND, 530 (sizeof(struct arphdr) 531 + (size_t)(ifp->hwlen * 2) + sizeof(in_addr_t))); 532 bp++; 533 naddrs = 0; 534 TAILQ_FOREACH(astate, &state->arp_states, next) { 535 if (++naddrs > ARP_ADDRS_MAX) { 536 /* Already logged error above. */ 537 break; 538 } 539 BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, 540 htonl(astate->addr.s_addr), 0, 1); 541 bp++; 542 BPF_SET_STMT(bp, BPF_RET + BPF_K, arp_len); 543 bp++; 544 } 545 546 /* Return nothing, no protocol address match. */ 547 BPF_SET_STMT(bp, BPF_RET + BPF_K, 0); 548 bp++; 549 } 550 551 return bpf_attach(fd, bpf, (unsigned int)(bp - bpf)); 552 } 553 #endif 554 555 #define BPF_M_FHLEN 0 556 #define BPF_M_IPHLEN 1 557 #define BPF_M_IPLEN 2 558 #define BPF_M_UDP 3 559 #define BPF_M_UDPLEN 4 560 561 #ifdef ARPHRD_NONE 562 static const struct bpf_insn bpf_bootp_none[] = { 563 /* Set the frame header length to zero. */ 564 BPF_STMT(BPF_LD + BPF_IMM, 0), 565 BPF_STMT(BPF_ST, BPF_M_FHLEN), 566 }; 567 #define BPF_BOOTP_NONE_LEN __arraycount(bpf_bootp_none) 568 #endif 569 570 static const struct bpf_insn bpf_bootp_ether[] = { 571 /* Make sure this is an IP packet. */ 572 BPF_STMT(BPF_LD + BPF_H + BPF_ABS, 573 offsetof(struct ether_header, ether_type)), 574 BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ETHERTYPE_IP, 1, 0), 575 BPF_STMT(BPF_RET + BPF_K, 0), 576 577 /* Load frame header length into X. */ 578 BPF_STMT(BPF_LDX + BPF_W + BPF_IMM, sizeof(struct ether_header)), 579 /* Copy frame header length to memory */ 580 BPF_STMT(BPF_STX, BPF_M_FHLEN), 581 }; 582 #define BPF_BOOTP_ETHER_LEN __arraycount(bpf_bootp_ether) 583 584 static const struct bpf_insn bpf_bootp_filter[] = { 585 /* Make sure it's an IPv4 packet. */ 586 BPF_STMT(BPF_LD + BPF_B + BPF_IND, 0), 587 BPF_STMT(BPF_ALU + BPF_AND + BPF_K, 0xf0), 588 BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, 0x40, 1, 0), 589 BPF_STMT(BPF_RET + BPF_K, 0), 590 591 /* Ensure IP header length is big enough and 592 * store the IP header length in memory. */ 593 BPF_STMT(BPF_LD + BPF_B + BPF_IND, 0), 594 BPF_STMT(BPF_ALU + BPF_AND + BPF_K, 0x0f), 595 BPF_STMT(BPF_ALU + BPF_MUL + BPF_K, 4), 596 BPF_JUMP(BPF_JMP + BPF_JGE + BPF_K, sizeof(struct ip), 1, 0), 597 BPF_STMT(BPF_RET + BPF_K, 0), 598 BPF_STMT(BPF_ST, BPF_M_IPHLEN), 599 600 /* Make sure it's a UDP packet. */ 601 BPF_STMT(BPF_LD + BPF_B + BPF_IND, offsetof(struct ip, ip_p)), 602 BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, IPPROTO_UDP, 1, 0), 603 BPF_STMT(BPF_RET + BPF_K, 0), 604 605 /* Make sure this isn't a fragment. */ 606 BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct ip, ip_off)), 607 BPF_JUMP(BPF_JMP + BPF_JSET + BPF_K, 0x1fff, 0, 1), 608 BPF_STMT(BPF_RET + BPF_K, 0), 609 610 /* Store IP length. */ 611 BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct ip, ip_len)), 612 BPF_STMT(BPF_ST, BPF_M_IPLEN), 613 614 /* Advance to the UDP header. */ 615 BPF_STMT(BPF_LD + BPF_MEM, BPF_M_IPHLEN), 616 BPF_STMT(BPF_ALU + BPF_ADD + BPF_X, 0), 617 BPF_STMT(BPF_MISC + BPF_TAX, 0), 618 619 /* Store UDP location */ 620 BPF_STMT(BPF_STX, BPF_M_UDP), 621 622 /* Make sure it's from and to the right port. */ 623 BPF_STMT(BPF_LD + BPF_W + BPF_IND, 0), 624 BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, (BOOTPS << 16) + BOOTPC, 1, 0), 625 BPF_STMT(BPF_RET + BPF_K, 0), 626 627 /* Store UDP length. */ 628 BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct udphdr, uh_ulen)), 629 BPF_STMT(BPF_ST, BPF_M_UDPLEN), 630 631 /* Ensure that UDP length + IP header length == IP length */ 632 /* Copy IP header length to X. */ 633 BPF_STMT(BPF_LDX + BPF_MEM, BPF_M_IPHLEN), 634 /* Add UDP length (A) to IP header length (X). */ 635 BPF_STMT(BPF_ALU + BPF_ADD + BPF_X, 0), 636 /* Store result in X. */ 637 BPF_STMT(BPF_MISC + BPF_TAX, 0), 638 /* Copy IP length to A. */ 639 BPF_STMT(BPF_LD + BPF_MEM, BPF_M_IPLEN), 640 /* Ensure X == A. */ 641 BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_X, 0, 1, 0), 642 BPF_STMT(BPF_RET + BPF_K, 0), 643 644 /* Advance to the BOOTP packet. */ 645 BPF_STMT(BPF_LD + BPF_MEM, BPF_M_UDP), 646 BPF_STMT(BPF_ALU + BPF_ADD + BPF_K, sizeof(struct udphdr)), 647 BPF_STMT(BPF_MISC + BPF_TAX, 0), 648 649 /* Make sure it's BOOTREPLY. */ 650 BPF_STMT(BPF_LD + BPF_B + BPF_IND, offsetof(struct bootp, op)), 651 BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, BOOTREPLY, 1, 0), 652 BPF_STMT(BPF_RET + BPF_K, 0), 653 }; 654 655 #define BPF_BOOTP_FILTER_LEN __arraycount(bpf_bootp_filter) 656 #define BPF_BOOTP_CHADDR_LEN ((BOOTP_CHADDR_LEN / 4) * 3) 657 #define BPF_BOOTP_XID_LEN 4 /* BOUND check is 4 instructions */ 658 659 #define BPF_BOOTP_LEN BPF_BOOTP_ETHER_LEN + BPF_BOOTP_FILTER_LEN \ 660 + BPF_BOOTP_XID_LEN + BPF_BOOTP_CHADDR_LEN + 4 661 662 int 663 bpf_bootp(struct interface *ifp, int fd) 664 { 665 #if 0 666 const struct dhcp_state *state = D_CSTATE(ifp); 667 #endif 668 struct bpf_insn bpf[BPF_BOOTP_LEN]; 669 struct bpf_insn *bp; 670 671 if (fd == -1) 672 return 0; 673 674 bp = bpf; 675 /* Check frame header. */ 676 switch(ifp->family) { 677 #ifdef ARPHRD_NONE 678 case ARPHRD_NONE: 679 memcpy(bp, bpf_bootp_none, sizeof(bpf_bootp_none)); 680 bp += BPF_BOOTP_NONE_LEN; 681 break; 682 #endif 683 case ARPHRD_ETHER: 684 memcpy(bp, bpf_bootp_ether, sizeof(bpf_bootp_ether)); 685 bp += BPF_BOOTP_ETHER_LEN; 686 break; 687 default: 688 errno = EINVAL; 689 return -1; 690 } 691 692 /* Copy in the main filter. */ 693 memcpy(bp, bpf_bootp_filter, sizeof(bpf_bootp_filter)); 694 bp += BPF_BOOTP_FILTER_LEN; 695 696 /* These checks won't work when same IP exists on other interfaces. */ 697 #if 0 698 if (ifp->hwlen <= sizeof(((struct bootp *)0)->chaddr)) 699 bp += bpf_cmp_hwaddr(bp, BPF_BOOTP_CHADDR_LEN, 700 offsetof(struct bootp, chaddr), 701 true, ifp->hwaddr, ifp->hwlen); 702 703 /* Make sure the BOOTP packet is for us. */ 704 if (state->state == DHS_BOUND) { 705 /* If bound, we only expect FORCERENEW messages 706 * and they need to be unicast to us. 707 * Move back to the IP header in M0 and check dst. */ 708 BPF_SET_STMT(bp, BPF_LDX + BPF_W + BPF_MEM, 0); 709 bp++; 710 BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND, 711 offsetof(struct ip, ip_dst)); 712 bp++; 713 BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, 714 htonl(state->lease.addr.s_addr), 1, 0); 715 bp++; 716 BPF_SET_STMT(bp, BPF_RET + BPF_K, 0); 717 bp++; 718 } else { 719 /* As we're not bound, we need to check xid to ensure 720 * it's a reply to our transaction. */ 721 BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND, 722 offsetof(struct bootp, xid)); 723 bp++; 724 BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, 725 state->xid, 1, 0); 726 bp++; 727 BPF_SET_STMT(bp, BPF_RET + BPF_K, 0); 728 bp++; 729 } 730 #endif 731 732 /* All passed, return the packet - frame length + ip length */ 733 BPF_SET_STMT(bp, BPF_LD + BPF_MEM, BPF_M_FHLEN); 734 bp++; 735 BPF_SET_STMT(bp, BPF_LDX + BPF_MEM, BPF_M_IPLEN); 736 bp++; 737 BPF_SET_STMT(bp, BPF_ALU + BPF_ADD + BPF_X, 0); 738 bp++; 739 BPF_SET_STMT(bp, BPF_RET + BPF_A, 0); 740 bp++; 741 742 return bpf_attach(fd, bpf, (unsigned int)(bp - bpf)); 743 } 744