xref: /dragonfly/contrib/dhcpcd/src/bpf.c (revision 9f020288)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * dhcpcd: BPF arp and bootp filtering
4  * Copyright (c) 2006-2020 Roy Marples <roy@marples.name>
5  * All rights reserved
6 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/ioctl.h>
30 #include <sys/socket.h>
31 
32 #include <arpa/inet.h>
33 
34 #include <net/if.h>
35 #include <netinet/in.h>
36 #include <netinet/if_ether.h>
37 
38 #ifdef __linux__
39 /* Special BPF snowflake. */
40 #include <linux/filter.h>
41 #define	bpf_insn		sock_filter
42 #else
43 #include <net/bpf.h>
44 #endif
45 
46 #include <errno.h>
47 #include <fcntl.h>
48 #include <paths.h>
49 #include <stddef.h>
50 #include <stdlib.h>
51 #include <string.h>
52 
53 #include "common.h"
54 #include "arp.h"
55 #include "bpf.h"
56 #include "dhcp.h"
57 #include "if.h"
58 #include "logerr.h"
59 
60 #define	ARP_ADDRS_MAX	3
61 
62 /* BPF helper macros */
63 #ifdef __linux__
64 #define	BPF_WHOLEPACKET		0x7fffffff /* work around buggy LPF filters */
65 #else
66 #define	BPF_WHOLEPACKET		~0U
67 #endif
68 
69 /* Macros to update the BPF structure */
70 #define	BPF_SET_STMT(insn, c, v) {				\
71 	(insn)->code = (c);					\
72 	(insn)->jt = 0;						\
73 	(insn)->jf = 0;						\
74 	(insn)->k = (uint32_t)(v);				\
75 };
76 
77 #define	BPF_SET_JUMP(insn, c, v, t, f) {			\
78 	(insn)->code = (c);					\
79 	(insn)->jt = (t);					\
80 	(insn)->jf = (f);					\
81 	(insn)->k = (uint32_t)(v);				\
82 };
83 
84 size_t
85 bpf_frame_header_len(const struct interface *ifp)
86 {
87 
88 	switch (ifp->family) {
89 	case ARPHRD_ETHER:
90 		return sizeof(struct ether_header);
91 	default:
92 		return 0;
93 	}
94 }
95 
96 void *
97 bpf_frame_header_src(const struct interface *ifp, void *fh, size_t *len)
98 {
99 	uint8_t *f = fh;
100 
101 	switch (ifp->family) {
102 	case ARPHRD_ETHER:
103 		*len = sizeof(((struct ether_header *)0)->ether_shost);
104 		return f + offsetof(struct ether_header, ether_shost);
105 	default:
106 		*len = 0;
107 		errno =	ENOTSUP;
108 		return NULL;
109 	}
110 }
111 
112 void *
113 bpf_frame_header_dst(const struct interface *ifp, void *fh, size_t *len)
114 {
115 	uint8_t *f = fh;
116 
117 	switch (ifp->family) {
118 	case ARPHRD_ETHER:
119 		*len = sizeof(((struct ether_header *)0)->ether_dhost);
120 		return f + offsetof(struct ether_header, ether_dhost);
121 	default:
122 		*len = 0;
123 		errno =	ENOTSUP;
124 		return NULL;
125 	}
126 }
127 
128 static const uint8_t etherbcastaddr[] =
129     { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
130 
131 int
132 bpf_frame_bcast(const struct interface *ifp, const char *frame)
133 {
134 
135 	switch (ifp->family) {
136 	case ARPHRD_ETHER:
137 		return memcmp(frame +
138 		    offsetof(struct ether_header, ether_dhost),
139 		    etherbcastaddr, sizeof(etherbcastaddr));
140 	default:
141 		return -1;
142 	}
143 }
144 
145 #ifndef __linux__
146 /* Linux is a special snowflake for opening, attaching and reading BPF.
147  * See if-linux.c for the Linux specific BPF functions. */
148 
149 const char *bpf_name = "Berkley Packet Filter";
150 
151 int
152 bpf_open(struct interface *ifp, int (*filter)(struct interface *, int))
153 {
154 	struct ipv4_state *state;
155 	int fd = -1;
156 	struct ifreq ifr;
157 	int ibuf_len = 0;
158 	size_t buf_len;
159 	struct bpf_version pv;
160 #ifdef BIOCIMMEDIATE
161 	unsigned int flags;
162 #endif
163 #ifndef O_CLOEXEC
164 	int fd_opts;
165 #endif
166 
167 #ifdef _PATH_BPF
168 	fd = open(_PATH_BPF, O_RDWR | O_NONBLOCK
169 #ifdef O_CLOEXEC
170 		| O_CLOEXEC
171 #endif
172 	);
173 #else
174 	char device[32];
175 	int n = 0;
176 
177 	do {
178 		snprintf(device, sizeof(device), "/dev/bpf%d", n++);
179 		fd = open(device, O_RDWR | O_NONBLOCK
180 #ifdef O_CLOEXEC
181 				| O_CLOEXEC
182 #endif
183 		);
184 	} while (fd == -1 && errno == EBUSY);
185 #endif
186 
187 	if (fd == -1)
188 		return -1;
189 
190 #ifndef O_CLOEXEC
191 	if ((fd_opts = fcntl(fd, F_GETFD)) == -1 ||
192 	    fcntl(fd, F_SETFD, fd_opts | FD_CLOEXEC) == -1) {
193 		close(fd);
194 		return -1;
195 	}
196 #endif
197 
198 	memset(&pv, 0, sizeof(pv));
199 	if (ioctl(fd, BIOCVERSION, &pv) == -1)
200 		goto eexit;
201 	if (pv.bv_major != BPF_MAJOR_VERSION ||
202 	    pv.bv_minor < BPF_MINOR_VERSION) {
203 		logerrx("BPF version mismatch - recompile");
204 		goto eexit;
205 	}
206 
207 	if (filter(ifp, fd) != 0)
208 		goto eexit;
209 
210 	memset(&ifr, 0, sizeof(ifr));
211 	strlcpy(ifr.ifr_name, ifp->name, sizeof(ifr.ifr_name));
212 	if (ioctl(fd, BIOCSETIF, &ifr) == -1)
213 		goto eexit;
214 
215 	/* Get the required BPF buffer length from the kernel. */
216 	if (ioctl(fd, BIOCGBLEN, &ibuf_len) == -1)
217 		goto eexit;
218 	buf_len = (size_t)ibuf_len;
219 	state = ipv4_getstate(ifp);
220 	if (state == NULL)
221 		goto eexit;
222 	if (state->buffer_size != buf_len) {
223 		void *nb;
224 
225 		if ((nb = realloc(state->buffer, buf_len)) == NULL)
226 			goto eexit;
227 		state->buffer = nb;
228 		state->buffer_size = buf_len;
229 	}
230 
231 #ifdef BIOCIMMEDIATE
232 	flags = 1;
233 	if (ioctl(fd, BIOCIMMEDIATE, &flags) == -1)
234 		goto eexit;
235 #endif
236 
237 	return fd;
238 
239 eexit:
240 	close(fd);
241 	return -1;
242 }
243 
244 /* BPF requires that we read the entire buffer.
245  * So we pass the buffer in the API so we can loop on >1 packet. */
246 ssize_t
247 bpf_read(struct interface *ifp, int fd, void *data, size_t len,
248     unsigned int *flags)
249 {
250 	ssize_t bytes;
251 	struct ipv4_state *state = IPV4_STATE(ifp);
252 
253 	struct bpf_hdr packet;
254 	const char *payload;
255 
256 	*flags &= ~BPF_EOF;
257 	for (;;) {
258 		if (state->buffer_len == 0) {
259 			bytes = read(fd, state->buffer, state->buffer_size);
260 #if defined(__sun)
261 			/* After 2^31 bytes, the kernel offset overflows.
262 			 * To work around this bug, lseek 0. */
263 			if (bytes == -1 && errno == EINVAL) {
264 				lseek(fd, 0, SEEK_SET);
265 				continue;
266 			}
267 #endif
268 			if (bytes == -1 || bytes == 0)
269 				return bytes;
270 			state->buffer_len = (size_t)bytes;
271 			state->buffer_pos = 0;
272 		}
273 		bytes = -1;
274 		memcpy(&packet, state->buffer + state->buffer_pos,
275 		    sizeof(packet));
276 		if (state->buffer_pos + packet.bh_caplen + packet.bh_hdrlen >
277 		    state->buffer_len)
278 			goto next; /* Packet beyond buffer, drop. */
279 		payload = state->buffer + state->buffer_pos + packet.bh_hdrlen;
280 		if (bpf_frame_bcast(ifp, payload) == 0)
281 			*flags |= BPF_BCAST;
282 		else
283 			*flags &= ~BPF_BCAST;
284 		if (packet.bh_caplen > len)
285 			bytes = (ssize_t)len;
286 		else
287 			bytes = (ssize_t)packet.bh_caplen;
288 		memcpy(data, payload, (size_t)bytes);
289 next:
290 		state->buffer_pos += BPF_WORDALIGN(packet.bh_hdrlen +
291 		    packet.bh_caplen);
292 		if (state->buffer_pos >= state->buffer_len) {
293 			state->buffer_len = state->buffer_pos = 0;
294 			*flags |= BPF_EOF;
295 		}
296 		if (bytes != -1)
297 			return bytes;
298 	}
299 
300 	/* NOTREACHED */
301 }
302 
303 int
304 bpf_attach(int fd, void *filter, unsigned int filter_len)
305 {
306 	struct bpf_program pf;
307 
308 	/* Install the filter. */
309 	memset(&pf, 0, sizeof(pf));
310 	pf.bf_insns = filter;
311 	pf.bf_len = filter_len;
312 	return ioctl(fd, BIOCSETF, &pf);
313 }
314 #endif
315 
316 #ifndef __sun
317 /* SunOS is special too - sending via BPF goes nowhere. */
318 ssize_t
319 bpf_send(const struct interface *ifp, int fd, uint16_t protocol,
320     const void *data, size_t len)
321 {
322 	struct iovec iov[2];
323 	struct ether_header eh;
324 
325 	switch(ifp->family) {
326 	case ARPHRD_ETHER:
327 		memset(&eh.ether_dhost, 0xff, sizeof(eh.ether_dhost));
328 		memcpy(&eh.ether_shost, ifp->hwaddr, sizeof(eh.ether_shost));
329 		eh.ether_type = htons(protocol);
330 		iov[0].iov_base = &eh;
331 		iov[0].iov_len = sizeof(eh);
332 		break;
333 	default:
334 		iov[0].iov_base = NULL;
335 		iov[0].iov_len = 0;
336 		break;
337 	}
338 	iov[1].iov_base = UNCONST(data);
339 	iov[1].iov_len = len;
340 	return writev(fd, iov, 2);
341 }
342 #endif
343 
344 int
345 bpf_close(struct interface *ifp, int fd)
346 {
347 	struct ipv4_state *state = IPV4_STATE(ifp);
348 
349 	/* Rewind the buffer on closing. */
350 	state->buffer_len = state->buffer_pos = 0;
351 	return close(fd);
352 }
353 
354 /* Normally this is needed by bootp.
355  * Once that uses this again, the ARP guard here can be removed. */
356 #ifdef ARP
357 #define BPF_CMP_HWADDR_LEN	((((HWADDR_LEN / 4) + 2) * 2) + 1)
358 static unsigned int
359 bpf_cmp_hwaddr(struct bpf_insn *bpf, size_t bpf_len, size_t off,
360     bool equal, uint8_t *hwaddr, size_t hwaddr_len)
361 {
362 	struct bpf_insn *bp;
363 	size_t maclen, nlft, njmps;
364 	uint32_t mac32;
365 	uint16_t mac16;
366 	uint8_t jt, jf;
367 
368 	/* Calc the number of jumps */
369 	if ((hwaddr_len / 4) >= 128) {
370 		errno = EINVAL;
371 		return 0;
372 	}
373 	njmps = (hwaddr_len / 4) * 2; /* 2 instructions per check */
374 	/* We jump after the 1st check. */
375 	if (njmps)
376 		njmps -= 2;
377 	nlft = hwaddr_len % 4;
378 	if (nlft) {
379 		njmps += (nlft / 2) * 2;
380 		nlft = nlft % 2;
381 		if (nlft)
382 			njmps += 2;
383 
384 	}
385 
386 	/* Skip to positive finish. */
387 	njmps++;
388 	if (equal) {
389 		jt = (uint8_t)njmps;
390 		jf = 0;
391 	} else {
392 		jt = 0;
393 		jf = (uint8_t)njmps;
394 	}
395 
396 	bp = bpf;
397 	for (; hwaddr_len > 0;
398 	     hwaddr += maclen, hwaddr_len -= maclen, off += maclen)
399 	{
400 		if (bpf_len < 3) {
401 			errno = ENOBUFS;
402 			return 0;
403 		}
404 		bpf_len -= 3;
405 
406 		if (hwaddr_len >= 4) {
407 			maclen = sizeof(mac32);
408 			memcpy(&mac32, hwaddr, maclen);
409 			BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND, off);
410 			bp++;
411 			BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K,
412 			             htonl(mac32), jt, jf);
413 		} else if (hwaddr_len >= 2) {
414 			maclen = sizeof(mac16);
415 			memcpy(&mac16, hwaddr, maclen);
416 			BPF_SET_STMT(bp, BPF_LD + BPF_H + BPF_IND, off);
417 			bp++;
418 			BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K,
419 			             htons(mac16), jt, jf);
420 		} else {
421 			maclen = sizeof(*hwaddr);
422 			BPF_SET_STMT(bp, BPF_LD + BPF_B + BPF_IND, off);
423 			bp++;
424 			BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K,
425 			             *hwaddr, jt, jf);
426 		}
427 		if (jt)
428 			jt = (uint8_t)(jt - 2);
429 		if (jf)
430 			jf = (uint8_t)(jf - 2);
431 		bp++;
432 	}
433 
434 	/* Last step is always return failure.
435 	 * Next step is a positive finish. */
436 	BPF_SET_STMT(bp, BPF_RET + BPF_K, 0);
437 	bp++;
438 
439 	return (unsigned int)(bp - bpf);
440 }
441 #endif
442 
443 #ifdef ARP
444 static const struct bpf_insn bpf_arp_ether [] = {
445 	/* Check this is an ARP packet. */
446 	BPF_STMT(BPF_LD + BPF_H + BPF_ABS,
447 	         offsetof(struct ether_header, ether_type)),
448 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ETHERTYPE_ARP, 1, 0),
449 	BPF_STMT(BPF_RET + BPF_K, 0),
450 
451 	/* Load frame header length into X */
452 	BPF_STMT(BPF_LDX + BPF_W + BPF_IMM, sizeof(struct ether_header)),
453 
454 	/* Make sure the hardware family matches. */
455 	BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct arphdr, ar_hrd)),
456 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ARPHRD_ETHER, 1, 0),
457 	BPF_STMT(BPF_RET + BPF_K, 0),
458 
459 	/* Make sure the hardware length matches. */
460 	BPF_STMT(BPF_LD + BPF_B + BPF_IND, offsetof(struct arphdr, ar_hln)),
461 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K,
462 	         sizeof(((struct ether_arp *)0)->arp_sha), 1, 0),
463 	BPF_STMT(BPF_RET + BPF_K, 0),
464 };
465 #define BPF_ARP_ETHER_LEN	__arraycount(bpf_arp_ether)
466 
467 static const struct bpf_insn bpf_arp_filter [] = {
468 	/* Make sure this is for IP. */
469 	BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct arphdr, ar_pro)),
470 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ETHERTYPE_IP, 1, 0),
471 	BPF_STMT(BPF_RET + BPF_K, 0),
472 	/* Make sure this is an ARP REQUEST. */
473 	BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct arphdr, ar_op)),
474 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ARPOP_REQUEST, 2, 0),
475 	/* or ARP REPLY. */
476 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ARPOP_REPLY, 1, 0),
477 	BPF_STMT(BPF_RET + BPF_K, 0),
478 	/* Make sure the protocol length matches. */
479 	BPF_STMT(BPF_LD + BPF_B + BPF_IND, offsetof(struct arphdr, ar_pln)),
480 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, sizeof(in_addr_t), 1, 0),
481 	BPF_STMT(BPF_RET + BPF_K, 0),
482 };
483 #define BPF_ARP_FILTER_LEN	__arraycount(bpf_arp_filter)
484 
485 #define BPF_ARP_ADDRS_LEN	1 + (ARP_ADDRS_MAX * 2) + 3 + \
486 				(ARP_ADDRS_MAX * 2) + 1
487 
488 #define BPF_ARP_LEN		BPF_ARP_ETHER_LEN + BPF_ARP_FILTER_LEN + \
489 				BPF_CMP_HWADDR_LEN + BPF_ARP_ADDRS_LEN
490 
491 int
492 bpf_arp(struct interface *ifp, int fd)
493 {
494 	struct bpf_insn bpf[BPF_ARP_LEN];
495 	struct bpf_insn *bp;
496 	struct iarp_state *state;
497 	uint16_t arp_len;
498 	struct arp_state *astate;
499 	size_t naddrs;
500 
501 	if (fd == -1)
502 		return 0;
503 
504 	bp = bpf;
505 	/* Check frame header. */
506 	switch(ifp->family) {
507 	case ARPHRD_ETHER:
508 		memcpy(bp, bpf_arp_ether, sizeof(bpf_arp_ether));
509 		bp += BPF_ARP_ETHER_LEN;
510 		arp_len = sizeof(struct ether_header)+sizeof(struct ether_arp);
511 		break;
512 	default:
513 		errno = EINVAL;
514 		return -1;
515 	}
516 
517 	/* Copy in the main filter. */
518 	memcpy(bp, bpf_arp_filter, sizeof(bpf_arp_filter));
519 	bp += BPF_ARP_FILTER_LEN;
520 
521 	/* Ensure it's not from us. */
522 	bp += bpf_cmp_hwaddr(bp, BPF_CMP_HWADDR_LEN, sizeof(struct arphdr),
523 	                     false, ifp->hwaddr, ifp->hwlen);
524 
525 	state = ARP_STATE(ifp);
526 	/* privsep may not have an initial state yet. */
527 	if (state == NULL || TAILQ_FIRST(&state->arp_states) == NULL)
528 		goto noaddrs;
529 
530 	/* Match sender protocol address */
531 	BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND,
532 	             sizeof(struct arphdr) + ifp->hwlen);
533 	bp++;
534 	naddrs = 0;
535 	TAILQ_FOREACH(astate, &state->arp_states, next) {
536 		if (IN_IS_ADDR_UNSPECIFIED(&astate->addr))
537 			continue;
538 		if (++naddrs > ARP_ADDRS_MAX) {
539 			errno = ENOBUFS;
540 			logerr(__func__);
541 			break;
542 		}
543 		BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K,
544 		             htonl(astate->addr.s_addr), 0, 1);
545 		bp++;
546 		BPF_SET_STMT(bp, BPF_RET + BPF_K, arp_len);
547 		bp++;
548 	}
549 
550 	/* If we didn't match sender, then we're only interested in
551 	 * ARP probes to us, so check the null host sender. */
552 	BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, INADDR_ANY, 1, 0);
553 	bp++;
554 	BPF_SET_STMT(bp, BPF_RET + BPF_K, 0);
555 	bp++;
556 
557 	/* Match target protocol address */
558 	BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND,
559 	             (sizeof(struct arphdr)
560 		     + (size_t)(ifp->hwlen * 2) + sizeof(in_addr_t)));
561 	bp++;
562 	naddrs = 0;
563 	TAILQ_FOREACH(astate, &state->arp_states, next) {
564 		if (++naddrs > ARP_ADDRS_MAX) {
565 			/* Already logged error above. */
566 			break;
567 		}
568 		BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K,
569 		             htonl(astate->addr.s_addr), 0, 1);
570 		bp++;
571 		BPF_SET_STMT(bp, BPF_RET + BPF_K, arp_len);
572 		bp++;
573 	}
574 
575 noaddrs:
576 	/* Return nothing, no protocol address match. */
577 	BPF_SET_STMT(bp, BPF_RET + BPF_K, 0);
578 	bp++;
579 
580 	return bpf_attach(fd, bpf, (unsigned int)(bp - bpf));
581 }
582 #endif
583 
584 #ifdef ARPHRD_NONE
585 static const struct bpf_insn bpf_bootp_none[] = {
586 };
587 #define BPF_BOOTP_NONE_LEN	__arraycount(bpf_bootp_none)
588 #endif
589 
590 static const struct bpf_insn bpf_bootp_ether[] = {
591 	/* Make sure this is an IP packet. */
592 	BPF_STMT(BPF_LD + BPF_H + BPF_ABS,
593 	         offsetof(struct ether_header, ether_type)),
594 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ETHERTYPE_IP, 1, 0),
595 	BPF_STMT(BPF_RET + BPF_K, 0),
596 
597 	/* Advance to the IP header. */
598 	BPF_STMT(BPF_LDX + BPF_K, sizeof(struct ether_header)),
599 };
600 #define BPF_BOOTP_ETHER_LEN	__arraycount(bpf_bootp_ether)
601 
602 #define BOOTP_MIN_SIZE		sizeof(struct ip) + sizeof(struct udphdr) + \
603 				sizeof(struct bootp)
604 
605 static const struct bpf_insn bpf_bootp_filter[] = {
606 	/* Make sure it's an IPv4 packet. */
607 	BPF_STMT(BPF_LD + BPF_B + BPF_IND, 0),
608 	BPF_STMT(BPF_ALU + BPF_AND + BPF_K, 0xf0),
609 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, 0x40, 1, 0),
610 	BPF_STMT(BPF_RET + BPF_K, 0),
611 
612 	/* Make sure it's a UDP packet. */
613 	BPF_STMT(BPF_LD + BPF_B + BPF_IND, offsetof(struct ip, ip_p)),
614 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, IPPROTO_UDP, 1, 0),
615 	BPF_STMT(BPF_RET + BPF_K, 0),
616 
617 	/* Make sure this isn't a fragment. */
618 	BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct ip, ip_off)),
619 	BPF_JUMP(BPF_JMP + BPF_JSET + BPF_K, 0x1fff, 0, 1),
620 	BPF_STMT(BPF_RET + BPF_K, 0),
621 
622 	/* Advance to the UDP header. */
623 	BPF_STMT(BPF_LD + BPF_B + BPF_IND, 0),
624 	BPF_STMT(BPF_ALU + BPF_AND + BPF_K, 0x0f),
625 	BPF_STMT(BPF_ALU + BPF_MUL + BPF_K, 4),
626 	BPF_STMT(BPF_ALU + BPF_ADD + BPF_X, 0),
627 	BPF_STMT(BPF_MISC + BPF_TAX, 0),
628 
629 	/* Make sure it's from and to the right port. */
630 	BPF_STMT(BPF_LD + BPF_W + BPF_IND, 0),
631 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, (BOOTPS << 16) + BOOTPC, 1, 0),
632 	BPF_STMT(BPF_RET + BPF_K, 0),
633 };
634 
635 #define BPF_BOOTP_FILTER_LEN	__arraycount(bpf_bootp_filter)
636 #define BPF_BOOTP_CHADDR_LEN	((BOOTP_CHADDR_LEN / 4) * 3)
637 #define	BPF_BOOTP_XID_LEN	4 /* BOUND check is 4 instructions */
638 
639 #define BPF_BOOTP_LEN		BPF_BOOTP_ETHER_LEN + BPF_BOOTP_FILTER_LEN \
640 				+ BPF_BOOTP_XID_LEN + BPF_BOOTP_CHADDR_LEN + 4
641 
642 int
643 bpf_bootp(struct interface *ifp, int fd)
644 {
645 #if 0
646 	const struct dhcp_state *state = D_CSTATE(ifp);
647 #endif
648 	struct bpf_insn bpf[BPF_BOOTP_LEN];
649 	struct bpf_insn *bp;
650 
651 	if (fd == -1)
652 		return 0;
653 
654 	bp = bpf;
655 	/* Check frame header. */
656 	switch(ifp->family) {
657 #ifdef ARPHRD_NONE
658 	case ARPHRD_NONE:
659 		memcpy(bp, bpf_bootp_none, sizeof(bpf_bootp_none));
660 		bp += BPF_BOOTP_NONE_LEN;
661 		break;
662 #endif
663 	case ARPHRD_ETHER:
664 		memcpy(bp, bpf_bootp_ether, sizeof(bpf_bootp_ether));
665 		bp += BPF_BOOTP_ETHER_LEN;
666 		break;
667 	default:
668 		errno = EINVAL;
669 		return -1;
670 	}
671 
672 	/* Copy in the main filter. */
673 	memcpy(bp, bpf_bootp_filter, sizeof(bpf_bootp_filter));
674 	bp += BPF_BOOTP_FILTER_LEN;
675 
676 	/* These checks won't work when same IP exists on other interfaces. */
677 #if 0
678 	if (ifp->hwlen <= sizeof(((struct bootp *)0)->chaddr))
679 		bp += bpf_cmp_hwaddr(bp, BPF_BOOTP_CHADDR_LEN,
680 		                     offsetof(struct bootp, chaddr),
681 				     true, ifp->hwaddr, ifp->hwlen);
682 
683 	/* Make sure the BOOTP packet is for us. */
684 	if (state->state == DHS_BOUND) {
685 		/* If bound, we only expect FORCERENEW messages
686 		 * and they need to be unicast to us.
687 		 * Move back to the IP header in M0 and check dst. */
688 		BPF_SET_STMT(bp, BPF_LDX + BPF_W + BPF_MEM, 0);
689 		bp++;
690 		BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND,
691 		             offsetof(struct ip, ip_dst));
692 		bp++;
693 		BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K,
694 		             htonl(state->lease.addr.s_addr), 1, 0);
695 		bp++;
696 		BPF_SET_STMT(bp, BPF_RET + BPF_K, 0);
697 		bp++;
698 	} else {
699 		/* As we're not bound, we need to check xid to ensure
700 		 * it's a reply to our transaction. */
701 		BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND,
702 		             offsetof(struct bootp, xid));
703 		bp++;
704 		BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K,
705 		             state->xid, 1, 0);
706 		bp++;
707 		BPF_SET_STMT(bp, BPF_RET + BPF_K, 0);
708 		bp++;
709 	}
710 #endif
711 
712 	/* All passed, return the packet. */
713 	BPF_SET_STMT(bp, BPF_RET + BPF_K, BPF_WHOLEPACKET);
714 	bp++;
715 
716 	return bpf_attach(fd, bpf, (unsigned int)(bp - bpf));
717 }
718