xref: /dragonfly/contrib/dhcpcd/src/bpf.c (revision f984587a)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * dhcpcd: BPF arp and bootp filtering
4  * Copyright (c) 2006-2023 Roy Marples <roy@marples.name>
5  * All rights reserved
6 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/ioctl.h>
30 #include <sys/socket.h>
31 
32 #include <arpa/inet.h>
33 
34 #include <net/if.h>
35 #include <netinet/in.h>
36 #include <netinet/if_ether.h>
37 
38 #ifdef __linux__
39 /* Special BPF snowflake. */
40 #include <linux/filter.h>
41 #define	bpf_insn		sock_filter
42 #else
43 #include <net/bpf.h>
44 #endif
45 
46 #include <errno.h>
47 #include <fcntl.h>
48 #include <stddef.h>
49 #include <stdlib.h>
50 #include <string.h>
51 
52 #include "common.h"
53 #include "arp.h"
54 #include "bpf.h"
55 #include "dhcp.h"
56 #include "if.h"
57 #include "logerr.h"
58 
59 /* BPF helper macros */
60 #ifdef __linux__
61 #define	BPF_WHOLEPACKET		0x7fffffff /* work around buggy LPF filters */
62 #else
63 #define	BPF_WHOLEPACKET		~0U
64 #endif
65 
66 /* Macros to update the BPF structure */
67 #define	BPF_SET_STMT(insn, c, v) {				\
68 	(insn)->code = (c);					\
69 	(insn)->jt = 0;						\
70 	(insn)->jf = 0;						\
71 	(insn)->k = (uint32_t)(v);				\
72 }
73 
74 #define	BPF_SET_JUMP(insn, c, v, t, f) {			\
75 	(insn)->code = (c);					\
76 	(insn)->jt = (t);					\
77 	(insn)->jf = (f);					\
78 	(insn)->k = (uint32_t)(v);				\
79 }
80 
81 size_t
82 bpf_frame_header_len(const struct interface *ifp)
83 {
84 
85 	switch (ifp->hwtype) {
86 	case ARPHRD_ETHER:
87 		return sizeof(struct ether_header);
88 	default:
89 		return 0;
90 	}
91 }
92 
93 void *
94 bpf_frame_header_src(const struct interface *ifp, void *fh, size_t *len)
95 {
96 	uint8_t *f = fh;
97 
98 	switch (ifp->hwtype) {
99 	case ARPHRD_ETHER:
100 		*len = sizeof(((struct ether_header *)0)->ether_shost);
101 		return f + offsetof(struct ether_header, ether_shost);
102 	default:
103 		*len = 0;
104 		errno =	ENOTSUP;
105 		return NULL;
106 	}
107 }
108 
109 void *
110 bpf_frame_header_dst(const struct interface *ifp, void *fh, size_t *len)
111 {
112 	uint8_t *f = fh;
113 
114 	switch (ifp->hwtype) {
115 	case ARPHRD_ETHER:
116 		*len = sizeof(((struct ether_header *)0)->ether_dhost);
117 		return f + offsetof(struct ether_header, ether_dhost);
118 	default:
119 		*len = 0;
120 		errno =	ENOTSUP;
121 		return NULL;
122 	}
123 }
124 
125 static const uint8_t etherbcastaddr[] =
126     { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
127 
128 int
129 bpf_frame_bcast(const struct interface *ifp, const void *frame)
130 {
131 
132 	switch (ifp->hwtype) {
133 	case ARPHRD_ETHER:
134 		return memcmp((const char *)frame +
135 		    offsetof(struct ether_header, ether_dhost),
136 		    etherbcastaddr, sizeof(etherbcastaddr));
137 	default:
138 		return -1;
139 	}
140 }
141 
142 #ifndef __linux__
143 /* Linux is a special snowflake for opening, attaching and reading BPF.
144  * See if-linux.c for the Linux specific BPF functions. */
145 
146 const char *bpf_name = "Berkley Packet Filter";
147 
148 struct bpf *
149 bpf_open(const struct interface *ifp,
150     int (*filter)(const struct bpf *, const struct in_addr *),
151     const struct in_addr *ia)
152 {
153 	struct bpf *bpf;
154 	struct bpf_version pv = { .bv_major = 0, .bv_minor = 0 };
155 	struct ifreq ifr = { .ifr_flags = 0 };
156 	int ibuf_len = 0;
157 #ifdef O_CLOEXEC
158 #define BPF_OPEN_FLAGS O_RDWR | O_NONBLOCK | O_CLOEXEC
159 #else
160 #define BPF_OPEN_FLAGS O_RDWR | O_NONBLOCK
161 #endif
162 #ifdef BIOCIMMEDIATE
163 	unsigned int flags;
164 #endif
165 #ifndef O_CLOEXEC
166 	int fd_opts;
167 #endif
168 
169 	bpf = calloc(1, sizeof(*bpf));
170 	if (bpf == NULL)
171 		return NULL;
172 	bpf->bpf_ifp = ifp;
173 
174 	/* /dev/bpf is a cloner on modern kernels */
175 	bpf->bpf_fd = open("/dev/bpf", BPF_OPEN_FLAGS);
176 
177 	/* Support older kernels where /dev/bpf is not a cloner */
178 	if (bpf->bpf_fd == -1) {
179 		char device[32];
180 		int n = 0;
181 
182 		do {
183 			snprintf(device, sizeof(device), "/dev/bpf%d", n++);
184 			bpf->bpf_fd = open(device, BPF_OPEN_FLAGS);
185 		} while (bpf->bpf_fd == -1 && errno == EBUSY);
186 	}
187 
188 	if (bpf->bpf_fd == -1)
189 		goto eexit;
190 
191 #ifndef O_CLOEXEC
192 	if ((fd_opts = fcntl(bpf->bpf_fd, F_GETFD)) == -1 ||
193 	    fcntl(bpf->bpf_fd, F_SETFD, fd_opts | FD_CLOEXEC) == -1)
194 		goto eexit;
195 #endif
196 
197 	if (ioctl(bpf->bpf_fd, BIOCVERSION, &pv) == -1)
198 		goto eexit;
199 	if (pv.bv_major != BPF_MAJOR_VERSION ||
200 	    pv.bv_minor < BPF_MINOR_VERSION) {
201 		logerrx("BPF version mismatch - recompile");
202 		goto eexit;
203 	}
204 
205 	strlcpy(ifr.ifr_name, ifp->name, sizeof(ifr.ifr_name));
206 	if (ioctl(bpf->bpf_fd, BIOCSETIF, &ifr) == -1)
207 		goto eexit;
208 
209 #ifdef BIOCIMMEDIATE
210 	flags = 1;
211 	if (ioctl(bpf->bpf_fd, BIOCIMMEDIATE, &flags) == -1)
212 		goto eexit;
213 #endif
214 
215 	if (filter(bpf, ia) != 0)
216 		goto eexit;
217 
218 	/* Get the required BPF buffer length from the kernel. */
219 	if (ioctl(bpf->bpf_fd, BIOCGBLEN, &ibuf_len) == -1)
220 		goto eexit;
221 	bpf->bpf_size = (size_t)ibuf_len;
222 	bpf->bpf_buffer = malloc(bpf->bpf_size);
223 	if (bpf->bpf_buffer == NULL)
224 		goto eexit;
225 	return bpf;
226 
227 eexit:
228 	if (bpf->bpf_fd != -1)
229 		close(bpf->bpf_fd);
230 	free(bpf);
231 	return NULL;
232 }
233 
234 /* BPF requires that we read the entire buffer.
235  * So we pass the buffer in the API so we can loop on >1 packet. */
236 ssize_t
237 bpf_read(struct bpf *bpf, void *data, size_t len)
238 {
239 	ssize_t bytes;
240 	struct bpf_hdr packet;
241 	const char *payload;
242 
243 	bpf->bpf_flags &= ~BPF_EOF;
244 	for (;;) {
245 		if (bpf->bpf_len == 0) {
246 			bytes = read(bpf->bpf_fd, bpf->bpf_buffer,
247 			    bpf->bpf_size);
248 #if defined(__sun)
249 			/* After 2^31 bytes, the kernel offset overflows.
250 			 * To work around this bug, lseek 0. */
251 			if (bytes == -1 && errno == EINVAL) {
252 				lseek(bpf->bpf_fd, 0, SEEK_SET);
253 				continue;
254 			}
255 #endif
256 			if (bytes == -1 || bytes == 0)
257 				return bytes;
258 			bpf->bpf_len = (size_t)bytes;
259 			bpf->bpf_pos = 0;
260 		}
261 		bytes = -1;
262 		payload = (const char *)bpf->bpf_buffer + bpf->bpf_pos;
263 		memcpy(&packet, payload, sizeof(packet));
264 		if (bpf->bpf_pos + packet.bh_caplen + packet.bh_hdrlen >
265 		    bpf->bpf_len)
266 			goto next; /* Packet beyond buffer, drop. */
267 		payload += packet.bh_hdrlen;
268 		if (packet.bh_caplen > len)
269 			bytes = (ssize_t)len;
270 		else
271 			bytes = (ssize_t)packet.bh_caplen;
272 		if (bpf_frame_bcast(bpf->bpf_ifp, payload) == 0)
273 			bpf->bpf_flags |= BPF_BCAST;
274 		else
275 			bpf->bpf_flags &= ~BPF_BCAST;
276 		memcpy(data, payload, (size_t)bytes);
277 next:
278 		bpf->bpf_pos += BPF_WORDALIGN(packet.bh_hdrlen +
279 		    packet.bh_caplen);
280 		if (bpf->bpf_pos >= bpf->bpf_len) {
281 			bpf->bpf_len = bpf->bpf_pos = 0;
282 			bpf->bpf_flags |= BPF_EOF;
283 		}
284 		if (bytes != -1)
285 			return bytes;
286 	}
287 
288 	/* NOTREACHED */
289 }
290 
291 int
292 bpf_attach(int fd, void *filter, unsigned int filter_len)
293 {
294 	struct bpf_program pf = { .bf_insns = filter, .bf_len = filter_len };
295 
296 	/* Install the filter. */
297 	return ioctl(fd, BIOCSETF, &pf);
298 }
299 
300 #ifdef BIOCSETWF
301 static int
302 bpf_wattach(int fd, void *filter, unsigned int filter_len)
303 {
304 	struct bpf_program pf = { .bf_insns = filter, .bf_len = filter_len };
305 
306 	/* Install the filter. */
307 	return ioctl(fd, BIOCSETWF, &pf);
308 }
309 #endif
310 #endif
311 
312 #ifndef __sun
313 /* SunOS is special too - sending via BPF goes nowhere. */
314 ssize_t
315 bpf_send(const struct bpf *bpf, uint16_t protocol,
316     const void *data, size_t len)
317 {
318 	struct iovec iov[2];
319 	struct ether_header eh;
320 
321 	switch(bpf->bpf_ifp->hwtype) {
322 	case ARPHRD_ETHER:
323 		memset(&eh.ether_dhost, 0xff, sizeof(eh.ether_dhost));
324 		memcpy(&eh.ether_shost, bpf->bpf_ifp->hwaddr,
325 		    sizeof(eh.ether_shost));
326 		eh.ether_type = htons(protocol);
327 		iov[0].iov_base = &eh;
328 		iov[0].iov_len = sizeof(eh);
329 		break;
330 	default:
331 		iov[0].iov_base = NULL;
332 		iov[0].iov_len = 0;
333 		break;
334 	}
335 	iov[1].iov_base = UNCONST(data);
336 	iov[1].iov_len = len;
337 	return writev(bpf->bpf_fd, iov, 2);
338 }
339 #endif
340 
341 void
342 bpf_close(struct bpf *bpf)
343 {
344 
345 	close(bpf->bpf_fd);
346 	free(bpf->bpf_buffer);
347 	free(bpf);
348 }
349 
350 #ifdef ARP
351 #define BPF_CMP_HWADDR_LEN	((((HWADDR_LEN / 4) + 2) * 2) + 1)
352 static unsigned int
353 bpf_cmp_hwaddr(struct bpf_insn *bpf, size_t bpf_len, size_t off,
354     bool equal, const uint8_t *hwaddr, size_t hwaddr_len)
355 {
356 	struct bpf_insn *bp;
357 	size_t maclen, nlft, njmps;
358 	uint32_t mac32;
359 	uint16_t mac16;
360 	uint8_t jt, jf;
361 
362 	/* Calc the number of jumps */
363 	if ((hwaddr_len / 4) >= 128) {
364 		errno = EINVAL;
365 		return 0;
366 	}
367 	njmps = (hwaddr_len / 4) * 2; /* 2 instructions per check */
368 	/* We jump after the 1st check. */
369 	if (njmps)
370 		njmps -= 2;
371 	nlft = hwaddr_len % 4;
372 	if (nlft) {
373 		njmps += (nlft / 2) * 2;
374 		nlft = nlft % 2;
375 		if (nlft)
376 			njmps += 2;
377 
378 	}
379 
380 	/* Skip to positive finish. */
381 	njmps++;
382 	if (equal) {
383 		jt = (uint8_t)njmps;
384 		jf = 0;
385 	} else {
386 		jt = 0;
387 		jf = (uint8_t)njmps;
388 	}
389 
390 	bp = bpf;
391 	for (; hwaddr_len > 0;
392 	     hwaddr += maclen, hwaddr_len -= maclen, off += maclen)
393 	{
394 		if (bpf_len < 3) {
395 			errno = ENOBUFS;
396 			return 0;
397 		}
398 		bpf_len -= 3;
399 
400 		if (hwaddr_len >= 4) {
401 			maclen = sizeof(mac32);
402 			memcpy(&mac32, hwaddr, maclen);
403 			BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND, off);
404 			bp++;
405 			BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K,
406 			             htonl(mac32), jt, jf);
407 		} else if (hwaddr_len >= 2) {
408 			maclen = sizeof(mac16);
409 			memcpy(&mac16, hwaddr, maclen);
410 			BPF_SET_STMT(bp, BPF_LD + BPF_H + BPF_IND, off);
411 			bp++;
412 			BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K,
413 			             htons(mac16), jt, jf);
414 		} else {
415 			maclen = sizeof(*hwaddr);
416 			BPF_SET_STMT(bp, BPF_LD + BPF_B + BPF_IND, off);
417 			bp++;
418 			BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K,
419 			             *hwaddr, jt, jf);
420 		}
421 		if (jt)
422 			jt = (uint8_t)(jt - 2);
423 		if (jf)
424 			jf = (uint8_t)(jf - 2);
425 		bp++;
426 	}
427 
428 	/* Last step is always return failure.
429 	 * Next step is a positive finish. */
430 	BPF_SET_STMT(bp, BPF_RET + BPF_K, 0);
431 	bp++;
432 
433 	return (unsigned int)(bp - bpf);
434 }
435 #endif
436 
437 #ifdef ARP
438 static const struct bpf_insn bpf_arp_ether [] = {
439 	/* Check this is an ARP packet. */
440 	BPF_STMT(BPF_LD + BPF_H + BPF_ABS,
441 	         offsetof(struct ether_header, ether_type)),
442 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ETHERTYPE_ARP, 1, 0),
443 	BPF_STMT(BPF_RET + BPF_K, 0),
444 
445 	/* Load frame header length into X */
446 	BPF_STMT(BPF_LDX + BPF_W + BPF_IMM, sizeof(struct ether_header)),
447 
448 	/* Make sure the hardware type matches. */
449 	BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct arphdr, ar_hrd)),
450 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ARPHRD_ETHER, 1, 0),
451 	BPF_STMT(BPF_RET + BPF_K, 0),
452 
453 	/* Make sure the hardware length matches. */
454 	BPF_STMT(BPF_LD + BPF_B + BPF_IND, offsetof(struct arphdr, ar_hln)),
455 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K,
456 	         sizeof(((struct ether_arp *)0)->arp_sha), 1, 0),
457 	BPF_STMT(BPF_RET + BPF_K, 0),
458 };
459 #define BPF_ARP_ETHER_LEN	__arraycount(bpf_arp_ether)
460 
461 static const struct bpf_insn bpf_arp_filter [] = {
462 	/* Make sure this is for IP. */
463 	BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct arphdr, ar_pro)),
464 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ETHERTYPE_IP, 1, 0),
465 	BPF_STMT(BPF_RET + BPF_K, 0),
466 	/* Make sure this is an ARP REQUEST. */
467 	BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct arphdr, ar_op)),
468 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ARPOP_REQUEST, 2, 0),
469 	/* or ARP REPLY. */
470 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ARPOP_REPLY, 1, 0),
471 	BPF_STMT(BPF_RET + BPF_K, 0),
472 	/* Make sure the protocol length matches. */
473 	BPF_STMT(BPF_LD + BPF_B + BPF_IND, offsetof(struct arphdr, ar_pln)),
474 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, sizeof(in_addr_t), 1, 0),
475 	BPF_STMT(BPF_RET + BPF_K, 0),
476 };
477 #define BPF_ARP_FILTER_LEN	__arraycount(bpf_arp_filter)
478 
479 /* One address is two checks of two statements. */
480 #define BPF_NADDRS		1
481 #define BPF_ARP_ADDRS_LEN	5 + ((BPF_NADDRS * 2) * 2)
482 
483 #define BPF_ARP_LEN		BPF_ARP_ETHER_LEN + BPF_ARP_FILTER_LEN + \
484 				BPF_CMP_HWADDR_LEN + BPF_ARP_ADDRS_LEN
485 
486 static int
487 bpf_arp_rw(const struct bpf *bpf, const struct in_addr *ia, bool recv)
488 {
489 	const struct interface *ifp = bpf->bpf_ifp;
490 	struct bpf_insn buf[BPF_ARP_LEN + 1];
491 	struct bpf_insn *bp;
492 	uint16_t arp_len;
493 
494 	bp = buf;
495 	/* Check frame header. */
496 	switch(ifp->hwtype) {
497 	case ARPHRD_ETHER:
498 		memcpy(bp, bpf_arp_ether, sizeof(bpf_arp_ether));
499 		bp += BPF_ARP_ETHER_LEN;
500 		arp_len = sizeof(struct ether_header)+sizeof(struct ether_arp);
501 		break;
502 	default:
503 		errno = EINVAL;
504 		return -1;
505 	}
506 
507 	/* Copy in the main filter. */
508 	memcpy(bp, bpf_arp_filter, sizeof(bpf_arp_filter));
509 	bp += BPF_ARP_FILTER_LEN;
510 
511 	/* Ensure it's not from us. */
512 	bp += bpf_cmp_hwaddr(bp, BPF_CMP_HWADDR_LEN, sizeof(struct arphdr),
513 	                     !recv, ifp->hwaddr, ifp->hwlen);
514 
515 	/* Match sender protocol address */
516 	BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND,
517 	    sizeof(struct arphdr) + ifp->hwlen);
518 	bp++;
519 	BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, htonl(ia->s_addr), 0, 1);
520 	bp++;
521 	BPF_SET_STMT(bp, BPF_RET + BPF_K, arp_len);
522 	bp++;
523 
524 	/* If we didn't match sender, then we're only interested in
525 	 * ARP probes to us, so check the null host sender. */
526 	BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, INADDR_ANY, 1, 0);
527 	bp++;
528 	BPF_SET_STMT(bp, BPF_RET + BPF_K, 0);
529 	bp++;
530 
531 	/* Match target protocol address */
532 	BPF_SET_STMT(bp, BPF_LD + BPF_W + BPF_IND, (sizeof(struct arphdr) +
533 	    (size_t)(ifp->hwlen * 2) + sizeof(in_addr_t)));
534 	bp++;
535 	BPF_SET_JUMP(bp, BPF_JMP + BPF_JEQ + BPF_K, htonl(ia->s_addr), 0, 1);
536 	bp++;
537 	BPF_SET_STMT(bp, BPF_RET + BPF_K, arp_len);
538 	bp++;
539 
540 	/* No match, drop it */
541 	BPF_SET_STMT(bp, BPF_RET + BPF_K, 0);
542 	bp++;
543 
544 #ifdef BIOCSETWF
545 	if (!recv)
546 		return bpf_wattach(bpf->bpf_fd, buf, (unsigned int)(bp - buf));
547 #endif
548 
549 	return bpf_attach(bpf->bpf_fd, buf, (unsigned int)(bp - buf));
550 }
551 
552 int
553 bpf_arp(const struct bpf *bpf, const struct in_addr *ia)
554 {
555 
556 #ifdef BIOCSETWF
557 	if (bpf_arp_rw(bpf, ia, true) == -1 ||
558 	    bpf_arp_rw(bpf, ia, false) == -1 ||
559 	    ioctl(bpf->bpf_fd, BIOCLOCK) == -1)
560 		return -1;
561 	return 0;
562 #else
563 	return bpf_arp_rw(bpf, ia, true);
564 #endif
565 }
566 #endif
567 
568 #ifdef ARPHRD_NONE
569 static const struct bpf_insn bpf_bootp_none[] = {
570 };
571 #define BPF_BOOTP_NONE_LEN	__arraycount(bpf_bootp_none)
572 #endif
573 
574 static const struct bpf_insn bpf_bootp_ether[] = {
575 	/* Make sure this is an IP packet. */
576 	BPF_STMT(BPF_LD + BPF_H + BPF_ABS,
577 	         offsetof(struct ether_header, ether_type)),
578 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ETHERTYPE_IP, 1, 0),
579 	BPF_STMT(BPF_RET + BPF_K, 0),
580 
581 	/* Advance to the IP header. */
582 	BPF_STMT(BPF_LDX + BPF_K, sizeof(struct ether_header)),
583 };
584 #define BPF_BOOTP_ETHER_LEN	__arraycount(bpf_bootp_ether)
585 
586 static const struct bpf_insn bpf_bootp_base[] = {
587 	/* Make sure it's an IPv4 packet. */
588 	BPF_STMT(BPF_LD + BPF_B + BPF_IND, 0),
589 	BPF_STMT(BPF_ALU + BPF_AND + BPF_K, 0xf0),
590 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, 0x40, 1, 0),
591 	BPF_STMT(BPF_RET + BPF_K, 0),
592 
593 	/* Make sure it's a UDP packet. */
594 	BPF_STMT(BPF_LD + BPF_B + BPF_IND, offsetof(struct ip, ip_p)),
595 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, IPPROTO_UDP, 1, 0),
596 	BPF_STMT(BPF_RET + BPF_K, 0),
597 
598 	/* Make sure this isn't a fragment. */
599 	BPF_STMT(BPF_LD + BPF_H + BPF_IND, offsetof(struct ip, ip_off)),
600 	BPF_JUMP(BPF_JMP + BPF_JSET + BPF_K, 0x1fff, 0, 1),
601 	BPF_STMT(BPF_RET + BPF_K, 0),
602 
603 	/* Advance to the UDP header. */
604 	BPF_STMT(BPF_LD + BPF_B + BPF_IND, 0),
605 	BPF_STMT(BPF_ALU + BPF_AND + BPF_K, 0x0f),
606 	BPF_STMT(BPF_ALU + BPF_MUL + BPF_K, 4),
607 	BPF_STMT(BPF_ALU + BPF_ADD + BPF_X, 0),
608 	BPF_STMT(BPF_MISC + BPF_TAX, 0),
609 };
610 #define BPF_BOOTP_BASE_LEN	__arraycount(bpf_bootp_base)
611 
612 static const struct bpf_insn bpf_bootp_read[] = {
613 	/* Make sure it's from and to the right port. */
614 	BPF_STMT(BPF_LD + BPF_W + BPF_IND, 0),
615 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, (BOOTPS << 16) + BOOTPC, 1, 0),
616 	BPF_STMT(BPF_RET + BPF_K, 0),
617 };
618 #define BPF_BOOTP_READ_LEN	__arraycount(bpf_bootp_read)
619 
620 #ifdef BIOCSETWF
621 static const struct bpf_insn bpf_bootp_write[] = {
622 	/* Make sure it's from and to the right port. */
623 	BPF_STMT(BPF_LD + BPF_W + BPF_IND, 0),
624 	BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, (BOOTPC << 16) + BOOTPS, 1, 0),
625 	BPF_STMT(BPF_RET + BPF_K, 0),
626 };
627 #define BPF_BOOTP_WRITE_LEN	__arraycount(bpf_bootp_write)
628 #endif
629 
630 #define BPF_BOOTP_CHADDR_LEN	((BOOTP_CHADDR_LEN / 4) * 3)
631 #define	BPF_BOOTP_XID_LEN	4 /* BOUND check is 4 instructions */
632 
633 #define BPF_BOOTP_LEN		BPF_BOOTP_ETHER_LEN + \
634 				BPF_BOOTP_BASE_LEN + BPF_BOOTP_READ_LEN + \
635 				BPF_BOOTP_XID_LEN + BPF_BOOTP_CHADDR_LEN + 4
636 
637 static int
638 bpf_bootp_rw(const struct bpf *bpf, bool read)
639 {
640 	struct bpf_insn buf[BPF_BOOTP_LEN + 1];
641 	struct bpf_insn *bp;
642 
643 	bp = buf;
644 	/* Check frame header. */
645 	switch(bpf->bpf_ifp->hwtype) {
646 #ifdef ARPHRD_NONE
647 	case ARPHRD_NONE:
648 		memcpy(bp, bpf_bootp_none, sizeof(bpf_bootp_none));
649 		bp += BPF_BOOTP_NONE_LEN;
650 		break;
651 #endif
652 	case ARPHRD_ETHER:
653 		memcpy(bp, bpf_bootp_ether, sizeof(bpf_bootp_ether));
654 		bp += BPF_BOOTP_ETHER_LEN;
655 		break;
656 	default:
657 		errno = EINVAL;
658 		return -1;
659 	}
660 
661 	/* Copy in the main filter. */
662 	memcpy(bp, bpf_bootp_base, sizeof(bpf_bootp_base));
663 	bp += BPF_BOOTP_BASE_LEN;
664 
665 #ifdef BIOCSETWF
666 	if (!read) {
667 		memcpy(bp, bpf_bootp_write, sizeof(bpf_bootp_write));
668 		bp += BPF_BOOTP_WRITE_LEN;
669 
670 		/* All passed, return the packet. */
671 		BPF_SET_STMT(bp, BPF_RET + BPF_K, BPF_WHOLEPACKET);
672 		bp++;
673 
674 		return bpf_wattach(bpf->bpf_fd, buf, (unsigned int)(bp - buf));
675 	}
676 #else
677 	UNUSED(read);
678 #endif
679 
680 	memcpy(bp, bpf_bootp_read, sizeof(bpf_bootp_read));
681 	bp += BPF_BOOTP_READ_LEN;
682 
683 	/* All passed, return the packet. */
684 	BPF_SET_STMT(bp, BPF_RET + BPF_K, BPF_WHOLEPACKET);
685 	bp++;
686 
687 	return bpf_attach(bpf->bpf_fd, buf, (unsigned int)(bp - buf));
688 }
689 
690 int
691 bpf_bootp(const struct bpf *bpf, __unused const struct in_addr *ia)
692 {
693 
694 #ifdef BIOCSETWF
695 	if (bpf_bootp_rw(bpf, true) == -1 ||
696 	    bpf_bootp_rw(bpf, false) == -1 ||
697 	    ioctl(bpf->bpf_fd, BIOCLOCK) == -1)
698 		return -1;
699 	return 0;
700 #else
701 #ifdef PRIVSEP
702 #if defined(__sun) /* Solaris cannot send via BPF. */
703 #elif defined(BIOCSETF)
704 #warning No BIOCSETWF support - a compromised BPF can be used as a raw socket
705 #else
706 #warning A compromised PF_PACKET socket can be used as a raw socket
707 #endif
708 #endif
709 	return bpf_bootp_rw(bpf, true);
710 #endif
711 }
712