xref: /dragonfly/contrib/dhcpcd/src/bpf.c (revision 7b1120e5)
1 /*
2  * dhcpcd: BPF arp and bootp filtering
3  * Copyright (c) 2006-2018 Roy Marples <roy@marples.name>
4  * All rights reserved
5 
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/ioctl.h>
29 #include <sys/socket.h>
30 
31 #include <arpa/inet.h>
32 
33 #include <net/if.h>
34 #include <netinet/in.h>
35 #include <netinet/if_ether.h>
36 
37 #ifdef __linux__
38 /* Special BPF snowflake. */
39 #include <linux/filter.h>
40 #define	bpf_insn		sock_filter
41 #else
42 #include <net/bpf.h>
43 #endif
44 
45 #include <errno.h>
46 #include <fcntl.h>
47 #include <paths.h>
48 #include <stddef.h>
49 #include <stdlib.h>
50 #include <string.h>
51 
52 #include "common.h"
53 #include "arp.h"
54 #include "bpf.h"
55 #include "dhcp.h"
56 #include "if.h"
57 #include "logerr.h"
58 
59 #define	ARP_ADDRS_MAX	3
60 
61 /* BPF helper macros */
62 #ifdef __linux__
63 #define	BPF_WHOLEPACKET		0x7fffffff /* work around buggy LPF filters */
64 #else
65 #define	BPF_WHOLEPACKET		~0U
66 #endif
67 
68 /* Macros to update the BPF structure */
69 #define	BPF_SET_STMT(insn, c, v) {				\
70 	(insn)->code = (c);					\
71 	(insn)->jt = 0;						\
72 	(insn)->jf = 0;						\
73 	(insn)->k = (uint32_t)(v);				\
74 };
75 
76 #define	BPF_SET_JUMP(insn, c, v, t, f) {			\
77 	(insn)->code = (c);					\
78 	(insn)->jt = (t);					\
79 	(insn)->jf = (f);					\
80 	(insn)->k = (uint32_t)(v);				\
81 };
82 
83 size_t
84 bpf_frame_header_len(const struct interface *ifp)
85 {
86 
87 	switch(ifp->family) {
88 	case ARPHRD_ETHER:
89 		return sizeof(struct ether_header);
90 	default:
91 		return 0;
92 	}
93 }
94 
95 #ifndef __linux__
96 /* Linux is a special snowflake for opening, attaching and reading BPF.
97  * See if-linux.c for the Linux specific BPF functions. */
98 
99 const char *bpf_name = "Berkley Packet Filter";
100 
101 int
102 bpf_open(struct interface *ifp, int (*filter)(struct interface *, int))
103 {
104 	struct ipv4_state *state;
105 	int fd = -1;
106 	struct ifreq ifr;
107 	int ibuf_len = 0;
108 	size_t buf_len;
109 	struct bpf_version pv;
110 #ifdef BIOCIMMEDIATE
111 	unsigned int flags;
112 #endif
113 #ifndef O_CLOEXEC
114 	int fd_opts;
115 #endif
116 
117 #ifdef _PATH_BPF
118 	fd = open(_PATH_BPF, O_RDWR | O_NONBLOCK
119 #ifdef O_CLOEXEC
120 		| O_CLOEXEC
121 #endif
122 	);
123 #else
124 	char device[32];
125 	int n = 0;
126 
127 	do {
128 		snprintf(device, sizeof(device), "/dev/bpf%d", n++);
129 		fd = open(device, O_RDWR | O_NONBLOCK
130 #ifdef O_CLOEXEC
131 				| O_CLOEXEC
132 #endif
133 		);
134 	} while (fd == -1 && errno == EBUSY);
135 #endif
136 
137 	if (fd == -1)
138 		return -1;
139 
140 #ifndef O_CLOEXEC
141 	if ((fd_opts = fcntl(fd, F_GETFD)) == -1 ||
142 	    fcntl(fd, F_SETFD, fd_opts | FD_CLOEXEC) == -1) {
143 		close(fd);
144 		return -1;
145 	}
146 #endif
147 
148 	memset(&pv, 0, sizeof(pv));
149 	if (ioctl(fd, BIOCVERSION, &pv) == -1)
150 		goto eexit;
151 	if (pv.bv_major != BPF_MAJOR_VERSION ||
152 	    pv.bv_minor < BPF_MINOR_VERSION) {
153 		logerrx("BPF version mismatch - recompile");
154 		goto eexit;
155 	}
156 
157 	if (filter(ifp, fd) != 0)
158 		goto eexit;
159 
160 	memset(&ifr, 0, sizeof(ifr));
161 	strlcpy(ifr.ifr_name, ifp->name, sizeof(ifr.ifr_name));
162 	if (ioctl(fd, BIOCSETIF, &ifr) == -1)
163 		goto eexit;
164 
165 	/* Get the required BPF buffer length from the kernel. */
166 	if (ioctl(fd, BIOCGBLEN, &ibuf_len) == -1)
167 		goto eexit;
168 	buf_len = (size_t)ibuf_len;
169 	state = ipv4_getstate(ifp);
170 	if (state == NULL)
171 		goto eexit;
172 	if (state->buffer_size != buf_len) {
173 		void *nb;
174 
175 		if ((nb = realloc(state->buffer, buf_len)) == NULL)
176 			goto eexit;
177 		state->buffer = nb;
178 		state->buffer_size = buf_len;
179 	}
180 
181 #ifdef BIOCIMMEDIATE
182 	flags = 1;
183 	if (ioctl(fd, BIOCIMMEDIATE, &flags) == -1)
184 		goto eexit;
185 #endif
186 
187 	return fd;
188 
189 eexit:
190 	close(fd);
191 	return -1;
192 }
193 
194 /* BPF requires that we read the entire buffer.
195  * So we pass the buffer in the API so we can loop on >1 packet. */
196 ssize_t
197 bpf_read(struct interface *ifp, int fd, void *data, size_t len,
198     unsigned int *flags)
199 {
200 	ssize_t fl = (ssize_t)bpf_frame_header_len(ifp);
201 	ssize_t bytes;
202 	struct ipv4_state *state = IPV4_STATE(ifp);
203 
204 	struct bpf_hdr packet;
205 	const char *payload;
206 
207 	*flags &= ~BPF_EOF;
208 	for (;;) {
209 		if (state->buffer_len == 0) {
210 			bytes = read(fd, state->buffer, state->buffer_size);
211 #if defined(__sun)
212 			/* After 2^31 bytes, the kernel offset overflows.
213 			 * To work around this bug, lseek 0. */
214 			if (bytes == -1 && errno == EINVAL) {
215 				lseek(fd, 0, SEEK_SET);
216 				continue;
217 			}
218 #endif
219 			if (bytes == -1 || bytes == 0)
220 				return bytes;
221 			state->buffer_len = (size_t)bytes;
222 			state->buffer_pos = 0;
223 		}
224 		bytes = -1;
225 		memcpy(&packet, state->buffer + state->buffer_pos,
226 		    sizeof(packet));
227 		if (state->buffer_pos + packet.bh_caplen + packet.bh_hdrlen >
228 		    state->buffer_len)
229 			goto next; /* Packet beyond buffer, drop. */
230 		payload = state->buffer + state->buffer_pos +
231 		    packet.bh_hdrlen + fl;
232 		bytes = (ssize_t)packet.bh_caplen - fl;
233 		if ((size_t)bytes > len)
234 			bytes = (ssize_t)len;
235 		memcpy(data, payload, (size_t)bytes);
236 next:
237 		state->buffer_pos += BPF_WORDALIGN(packet.bh_hdrlen +
238 		    packet.bh_caplen);
239 		if (state->buffer_pos >= state->buffer_len) {
240 			state->buffer_len = state->buffer_pos = 0;
241 			*flags |= BPF_EOF;
242 		}
243 		if (bytes != -1)
244 			return bytes;
245 	}
246 
247 	/* NOTREACHED */
248 }
249 
250 int
251 bpf_attach(int fd, void *filter, unsigned int filter_len)
252 {
253 	struct bpf_program pf;
254 
255 	/* Install the filter. */
256 	memset(&pf, 0, sizeof(pf));
257 	pf.bf_insns = filter;
258 	pf.bf_len = filter_len;
259 	return ioctl(fd, BIOCSETF, &pf);
260 }
261 #endif
262 
263 #ifndef __sun
264 /* SunOS is special too - sending via BPF goes nowhere. */
265 ssize_t
266 bpf_send(const struct interface *ifp, int fd, uint16_t protocol,
267     const void *data, size_t len)
268 {
269 	struct iovec iov[2];
270 	struct ether_header eh;
271 
272 	switch(ifp->family) {
273 	case ARPHRD_ETHER:
274 		memset(&eh.ether_dhost, 0xff, sizeof(eh.ether_dhost));
275 		memcpy(&eh.ether_shost, ifp->hwaddr, sizeof(eh.ether_shost));
276 		eh.ether_type = htons(protocol);
277 		iov[0].iov_base = &eh;
278 		iov[0].iov_len = sizeof(eh);
279 		break;
280 	default:
281 		iov[0].iov_base = NULL;
282 		iov[0].iov_len = 0;
283 		break;
284 	}
285 	iov[1].iov_base = UNCONST(data);
286 	iov[1].iov_len = len;
287 	return writev(fd, iov, 2);
288 }
289 #endif
290 
291 int
292 bpf_close(struct interface *ifp, int fd)
293 {
294 	struct ipv4_state *state = IPV4_STATE(ifp);
295 
296 	/* Rewind the buffer on closing. */
297 	state->buffer_len = state->buffer_pos = 0;
298 	return close(fd);
299 }
300 
301 /* Normally this is needed by bootp.
302  * Once that uses this again, the ARP guard here can be removed. */
303 #ifdef ARP
304 static unsigned int
305 bpf_cmp_hwaddr(struct bpf_insn *bpf, size_t bpf_len, size_t off,
306     bool equal, uint8_t *hwaddr, size_t hwaddr_len)
307 {
308 	struct bpf_insn *bp;
309 	size_t maclen, nlft, njmps;
310 	uint32_t mac32;
311 	uint16_t mac16;
312 	uint8_t jt, jf;
313 
314 	/* Calc the number of jumps */
315 	if ((hwaddr_len / 4) >= 128) {
316 		errno = EINVAL;
317 		return 0;
318 	}
319 	njmps = (hwaddr_len / 4) * 2; /* 2 instructions per check */
320 	/* We jump after the 1st check. */
321 	if (njmps)
322 		njmps -= 2;
323 	nlft = hwaddr_len % 4;
324 	if (nlft) {
325 		njmps += (nlft / 2) * 2;
326 		nlft = nlft % 2;
327 		if (nlft)
328 			njmps += 2;
329 
330 	}
331 
332 	/* Skip to positive finish. */
333 	njmps++;
334 	if (equal) {
335 		jt = (uint8_t)njmps;
336 		jf = 0;
337 	} else {
338 		jt = 0;
339 		jf = (uint8_t)njmps;
340 	}
341 
342 	bp = bpf;
343 	for (; hwaddr_len > 0;
344 	     hwaddr += maclen, hwaddr_len -= maclen, off += maclen)
345 	{
346 		if (bpf_len < 3) {
347 			errno = ENOBUFS;
348 			return 0;
349 		}
350 		bpf_len -= 3;
351 
352 		if (hwaddr_len >= 4) {
353 			maclen = sizeof(mac32);
354 			memcpy(&mac32, hwaddr, maclen);
355 			BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND, off);
356 			bp++;
357 			BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K,
358 			             htonl(mac32), jt, jf);
359 		} else if (hwaddr_len >= 2) {
360 			maclen = sizeof(mac16);
361 			memcpy(&mac16, hwaddr, maclen);
362 			BPF_SET_STMT(bp, BPF_LD + BPF_H + BPF_IND, off);
363 			bp++;
364 			BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K,
365 			             htons(mac16), jt, jf);
366 		} else {
367 			maclen = sizeof(*hwaddr);
368 			BPF_SET_STMT(bp, BPF_LD + BPF_B + BPF_IND, off);
369 			bp++;
370 			BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K,
371 			             *hwaddr, jt, jf);
372 		}
373 		if (jt)
374 			jt = (uint8_t)(jt - 2);
375 		if (jf)
376 			jf = (uint8_t)(jf - 2);
377 		bp++;
378 	}
379 
380 	/* Last step is always return failure.
381 	 * Next step is a positive finish. */
382 	BPF_SET_STMT(bp, BPF_RET + BPF_K, 0);
383 	bp++;
384 
385 	return (unsigned int)(bp - bpf);
386 }
387 #endif
388 
389 #ifdef ARP
390 
391 static const struct bpf_insn bpf_arp_ether [] = {
392 	/* Ensure packet is at least correct size. */
393 	BPF_STMT(BPF_LD + BPF_W + BPF_LEN, 0),
394 	BPF_JUMP(BPF_JMP + BPF_JGE + BPF_K, sizeof(struct ether_arp), 1, 0),
395 	BPF_STMT(BPF_RET + BPF_K, 0),
396 
397 	/* Check this is an ARP packet. */
398 	BPF_STMT(BPF_LD + BPF_H + BPF_ABS,
399 	         offsetof(struct ether_header, ether_type)),
400 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ETHERTYPE_ARP, 1, 0),
401 	BPF_STMT(BPF_RET + BPF_K, 0),
402 
403 	/* Load frame header length into X */
404 	BPF_STMT(BPF_LDX + BPF_W + BPF_IMM, sizeof(struct ether_header)),
405 
406 	/* Make sure the hardware family matches. */
407 	BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct arphdr, ar_hrd)),
408 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ARPHRD_ETHER, 1, 0),
409 	BPF_STMT(BPF_RET + BPF_K, 0),
410 
411 	/* Make sure the hardware length matches. */
412 	BPF_STMT(BPF_LD + BPF_B + BPF_IND, offsetof(struct arphdr, ar_hln)),
413 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K,
414 	         sizeof(((struct ether_arp *)0)->arp_sha), 1, 0),
415 	BPF_STMT(BPF_RET + BPF_K, 0),
416 };
417 #define bpf_arp_ether_len	__arraycount(bpf_arp_ether)
418 
419 static const struct bpf_insn bpf_arp_filter [] = {
420 	/* Make sure this is for IP. */
421 	BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct arphdr, ar_pro)),
422 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ETHERTYPE_IP, 1, 0),
423 	BPF_STMT(BPF_RET + BPF_K, 0),
424 	/* Make sure this is an ARP REQUEST. */
425 	BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct arphdr, ar_op)),
426 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ARPOP_REQUEST, 2, 0),
427 	/* or ARP REPLY. */
428 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ARPOP_REPLY, 1, 1),
429 	BPF_STMT(BPF_RET + BPF_K, 0),
430 	/* Make sure the protocol length matches. */
431 	BPF_STMT(BPF_LD + BPF_B + BPF_IND, offsetof(struct arphdr, ar_pln)),
432 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, sizeof(in_addr_t), 1, 0),
433 	BPF_STMT(BPF_RET + BPF_K, 0),
434 };
435 #define bpf_arp_filter_len	__arraycount(bpf_arp_filter)
436 #define bpf_arp_extra		((((ARP_ADDRS_MAX + 1) * 2) * 2) + 2)
437 #define bpf_arp_hw		((((HWADDR_LEN / 4) + 2) * 2) + 1)
438 
439 int
440 bpf_arp(struct interface *ifp, int fd)
441 {
442 	struct bpf_insn bpf[3+ bpf_arp_filter_len + bpf_arp_hw + bpf_arp_extra];
443 	struct bpf_insn *bp;
444 	struct iarp_state *state;
445 	uint16_t arp_len;
446 
447 	if (fd == -1)
448 		return 0;
449 
450 	bp = bpf;
451 	/* Check frame header. */
452 	switch(ifp->family) {
453 	case ARPHRD_ETHER:
454 		memcpy(bp, bpf_arp_ether, sizeof(bpf_arp_ether));
455 		bp += bpf_arp_ether_len;
456 		arp_len = sizeof(struct ether_header)+sizeof(struct ether_arp);
457 		break;
458 	default:
459 		errno = EINVAL;
460 		return -1;
461 	}
462 
463 	/* Copy in the main filter. */
464 	memcpy(bp, bpf_arp_filter, sizeof(bpf_arp_filter));
465 	bp += bpf_arp_filter_len;
466 
467 	/* Ensure it's not from us. */
468 	bp += bpf_cmp_hwaddr(bp, bpf_arp_hw, sizeof(struct arphdr),
469 	                     false, ifp->hwaddr, ifp->hwlen);
470 
471 	state = ARP_STATE(ifp);
472 	if (TAILQ_FIRST(&state->arp_states)) {
473 		struct arp_state *astate;
474 		size_t naddrs;
475 
476 		/* Match sender protocol address */
477 		BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND,
478 		             sizeof(struct arphdr) + ifp->hwlen);
479 		bp++;
480 		naddrs = 0;
481 		TAILQ_FOREACH(astate, &state->arp_states, next) {
482 			if (++naddrs > ARP_ADDRS_MAX) {
483 				errno = ENOBUFS;
484 				logerr(__func__);
485 				break;
486 			}
487 			BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K,
488 			             htonl(astate->addr.s_addr), 0, 1);
489 			bp++;
490 			BPF_SET_STMT(bp, BPF_RET + BPF_K, arp_len);
491 			bp++;
492 		}
493 
494 		/* If we didn't match sender, then we're only interested in
495 		 * ARP probes to us, so check the null host sender. */
496 		BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, INADDR_ANY, 1, 0);
497 		bp++;
498 		BPF_SET_STMT(bp, BPF_RET + BPF_K, 0);
499 		bp++;
500 
501 		/* Match target protocol address */
502 		BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND,
503 		             (sizeof(struct arphdr)
504 			     + (size_t)(ifp->hwlen * 2) + sizeof(in_addr_t)));
505 		bp++;
506 		naddrs = 0;
507 		TAILQ_FOREACH(astate, &state->arp_states, next) {
508 			if (++naddrs > ARP_ADDRS_MAX) {
509 				/* Already logged error above. */
510 				break;
511 			}
512 			BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K,
513 			             htonl(astate->addr.s_addr), 0, 1);
514 			bp++;
515 			BPF_SET_STMT(bp, BPF_RET + BPF_K, arp_len);
516 			bp++;
517 		}
518 
519 		/* Return nothing, no protocol address match. */
520 		BPF_SET_STMT(bp, BPF_RET + BPF_K, 0);
521 		bp++;
522 	}
523 
524 	return bpf_attach(fd, bpf, (unsigned int)(bp - bpf));
525 }
526 #endif
527 
528 static const struct bpf_insn bpf_bootp_ether[] = {
529 	/* Make sure this is an IP packet. */
530 	BPF_STMT(BPF_LD + BPF_H + BPF_ABS,
531 	         offsetof(struct ether_header, ether_type)),
532 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ETHERTYPE_IP, 1, 0),
533 	BPF_STMT(BPF_RET + BPF_K, 0),
534 
535 	/* Load frame header length into X. */
536 	BPF_STMT(BPF_LDX + BPF_W + BPF_IMM, sizeof(struct ether_header)),
537 	/* Copy to M0. */
538 	BPF_STMT(BPF_STX, 0),
539 };
540 #define BPF_BOOTP_ETHER_LEN	__arraycount(bpf_bootp_ether)
541 
542 static const struct bpf_insn bpf_bootp_filter[] = {
543 	/* Make sure it's an optionless IPv4 packet. */
544 	BPF_STMT(BPF_LD + BPF_B + BPF_IND, 0),
545 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, 0x45, 1, 0),
546 	BPF_STMT(BPF_RET + BPF_K, 0),
547 
548 	/* Make sure it's a UDP packet. */
549 	BPF_STMT(BPF_LD + BPF_B + BPF_IND, offsetof(struct ip, ip_p)),
550 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, IPPROTO_UDP, 1, 0),
551 	BPF_STMT(BPF_RET + BPF_K, 0),
552 
553 	/* Make sure this isn't a fragment. */
554 	BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct ip, ip_off)),
555 	BPF_JUMP(BPF_JMP + BPF_JSET + BPF_K, 0x1fff, 0, 1),
556 	BPF_STMT(BPF_RET + BPF_K, 0),
557 
558 	/* Store IP location in M1. */
559 	BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct ip, ip_len)),
560 	BPF_STMT(BPF_ST, 1),
561 
562 	/* Store IP length in M2. */
563 	BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct ip, ip_len)),
564 	BPF_STMT(BPF_ST, 2),
565 
566 	/* Advance to the UDP header. */
567 	BPF_STMT(BPF_MISC + BPF_TXA, 0),
568 	BPF_STMT(BPF_ALU + BPF_ADD + BPF_K, sizeof(struct ip)),
569 	BPF_STMT(BPF_MISC + BPF_TAX, 0),
570 
571 	/* Store X in M3. */
572 	BPF_STMT(BPF_STX, 3),
573 
574 	/* Make sure it's from and to the right port. */
575 	BPF_STMT(BPF_LD + BPF_W + BPF_IND, 0),
576 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, (BOOTPS << 16) + BOOTPC, 1, 0),
577 	BPF_STMT(BPF_RET + BPF_K, 0),
578 
579 	/* Store UDP length in X. */
580 	BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct udphdr, uh_ulen)),
581 	BPF_STMT(BPF_MISC + BPF_TAX, 0),
582 	/* Copy IP length in M2 to A. */
583 	BPF_STMT(BPF_LD + BPF_MEM, 2),
584 	/* Ensure IP length - IP header size == UDP length. */
585 	BPF_STMT(BPF_ALU + BPF_SUB + BPF_K, sizeof(struct ip)),
586 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_X, 0, 1, 0),
587 	BPF_STMT(BPF_RET + BPF_K, 0),
588 
589 	/* Advance to the BOOTP packet (UDP X is in M3). */
590 	BPF_STMT(BPF_LD + BPF_MEM, 3),
591 	BPF_STMT(BPF_ALU + BPF_ADD + BPF_K, sizeof(struct udphdr)),
592 	BPF_STMT(BPF_MISC + BPF_TAX, 0),
593 
594 	/* Make sure it's BOOTREPLY. */
595 	BPF_STMT(BPF_LD + BPF_B + BPF_IND, offsetof(struct bootp, op)),
596 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, BOOTREPLY, 1, 0),
597 	BPF_STMT(BPF_RET + BPF_K, 0),
598 };
599 
600 #define BPF_BOOTP_FILTER_LEN	__arraycount(bpf_bootp_filter)
601 #define BPF_BOOTP_CHADDR_LEN	((BOOTP_CHADDR_LEN / 4) * 3)
602 #define	BPF_BOOTP_XID_LEN	4 /* BOUND check is 4 instructions */
603 
604 #define BPF_BOOTP_LEN		BPF_BOOTP_ETHER_LEN + BPF_BOOTP_FILTER_LEN \
605 				+ BPF_BOOTP_XID_LEN + BPF_BOOTP_CHADDR_LEN + 4
606 
607 int
608 bpf_bootp(struct interface *ifp, int fd)
609 {
610 #if 0
611 	const struct dhcp_state *state = D_CSTATE(ifp);
612 #endif
613 	struct bpf_insn bpf[BPF_BOOTP_LEN];
614 	struct bpf_insn *bp;
615 
616 	if (fd == -1)
617 		return 0;
618 
619 	bp = bpf;
620 	/* Check frame header. */
621 	switch(ifp->family) {
622 	case ARPHRD_ETHER:
623 		memcpy(bp, bpf_bootp_ether, sizeof(bpf_bootp_ether));
624 		bp += BPF_BOOTP_ETHER_LEN;
625 		break;
626 	default:
627 		errno = EINVAL;
628 		return -1;
629 	}
630 
631 	/* Copy in the main filter. */
632 	memcpy(bp, bpf_bootp_filter, sizeof(bpf_bootp_filter));
633 	bp += BPF_BOOTP_FILTER_LEN;
634 
635 	/* These checks won't work when same IP exists on other interfaces. */
636 #if 0
637 	if (ifp->hwlen <= sizeof(((struct bootp *)0)->chaddr))
638 		bp += bpf_cmp_hwaddr(bp, BPF_BOOTP_CHADDR_LEN,
639 		                     offsetof(struct bootp, chaddr),
640 				     true, ifp->hwaddr, ifp->hwlen);
641 
642 	/* Make sure the BOOTP packet is for us. */
643 	if (state->state == DHS_BOUND) {
644 		/* If bound, we only expect FORCERENEW messages
645 		 * and they need to be unicast to us.
646 		 * Move back to the IP header in M0 and check dst. */
647 		BPF_SET_STMT(bp, BPF_LDX + BPF_W + BPF_MEM, 0);
648 		bp++;
649 		BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND,
650 		             offsetof(struct ip, ip_dst));
651 		bp++;
652 		BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K,
653 		             htonl(state->lease.addr.s_addr), 1, 0);
654 		bp++;
655 		BPF_SET_STMT(bp, BPF_RET + BPF_K, 0);
656 		bp++;
657 	} else {
658 		/* As we're not bound, we need to check xid to ensure
659 		 * it's a reply to our transaction. */
660 		BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND,
661 		             offsetof(struct bootp, xid));
662 		bp++;
663 		BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K,
664 		             state->xid, 1, 0);
665 		bp++;
666 		BPF_SET_STMT(bp, BPF_RET + BPF_K, 0);
667 		bp++;
668 	}
669 #endif
670 
671 	/* All passed, return the packet
672 	 * (Frame length in M0, IP length in M2). */
673 	BPF_SET_STMT(bp, BPF_LD + BPF_MEM, 0);
674 	bp++;
675 	BPF_SET_STMT(bp, BPF_LDX + BPF_MEM, 2);
676 	bp++;
677 	BPF_SET_STMT(bp, BPF_ALU + BPF_ADD + BPF_X, 0);
678 	bp++;
679 	BPF_SET_STMT(bp, BPF_RET + BPF_A, 0);
680 	bp++;
681 
682 	return bpf_attach(fd, bpf, (unsigned int)(bp - bpf));
683 }
684