1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2022 Facebook */
3 
4 #include <errno.h>
5 #include <string.h>
6 #include <stdbool.h>
7 #include <linux/bpf.h>
8 #include <bpf/bpf_helpers.h>
9 #include <linux/if_ether.h>
10 #include "bpf_misc.h"
11 #include "bpf_kfuncs.h"
12 
13 char _license[] SEC("license") = "GPL";
14 
15 struct test_info {
16 	int x;
17 	struct bpf_dynptr ptr;
18 };
19 
20 struct {
21 	__uint(type, BPF_MAP_TYPE_ARRAY);
22 	__uint(max_entries, 1);
23 	__type(key, __u32);
24 	__type(value, struct bpf_dynptr);
25 } array_map1 SEC(".maps");
26 
27 struct {
28 	__uint(type, BPF_MAP_TYPE_ARRAY);
29 	__uint(max_entries, 1);
30 	__type(key, __u32);
31 	__type(value, struct test_info);
32 } array_map2 SEC(".maps");
33 
34 struct {
35 	__uint(type, BPF_MAP_TYPE_ARRAY);
36 	__uint(max_entries, 1);
37 	__type(key, __u32);
38 	__type(value, __u32);
39 } array_map3 SEC(".maps");
40 
41 struct {
42 	__uint(type, BPF_MAP_TYPE_ARRAY);
43 	__uint(max_entries, 1);
44 	__type(key, __u32);
45 	__type(value, __u64);
46 } array_map4 SEC(".maps");
47 
48 struct sample {
49 	int pid;
50 	long value;
51 	char comm[16];
52 };
53 
54 struct {
55 	__uint(type, BPF_MAP_TYPE_RINGBUF);
56 	__uint(max_entries, 4096);
57 } ringbuf SEC(".maps");
58 
59 int err, val;
60 
get_map_val_dynptr(struct bpf_dynptr * ptr)61 static int get_map_val_dynptr(struct bpf_dynptr *ptr)
62 {
63 	__u32 key = 0, *map_val;
64 
65 	bpf_map_update_elem(&array_map3, &key, &val, 0);
66 
67 	map_val = bpf_map_lookup_elem(&array_map3, &key);
68 	if (!map_val)
69 		return -ENOENT;
70 
71 	bpf_dynptr_from_mem(map_val, sizeof(*map_val), 0, ptr);
72 
73 	return 0;
74 }
75 
76 /* Every bpf_ringbuf_reserve_dynptr call must have a corresponding
77  * bpf_ringbuf_submit/discard_dynptr call
78  */
79 SEC("?raw_tp")
80 __failure __msg("Unreleased reference id=2")
ringbuf_missing_release1(void * ctx)81 int ringbuf_missing_release1(void *ctx)
82 {
83 	struct bpf_dynptr ptr = {};
84 
85 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
86 
87 	/* missing a call to bpf_ringbuf_discard/submit_dynptr */
88 
89 	return 0;
90 }
91 
92 SEC("?raw_tp")
93 __failure __msg("Unreleased reference id=4")
ringbuf_missing_release2(void * ctx)94 int ringbuf_missing_release2(void *ctx)
95 {
96 	struct bpf_dynptr ptr1, ptr2;
97 	struct sample *sample;
98 
99 	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr1);
100 	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr2);
101 
102 	sample = bpf_dynptr_data(&ptr1, 0, sizeof(*sample));
103 	if (!sample) {
104 		bpf_ringbuf_discard_dynptr(&ptr1, 0);
105 		bpf_ringbuf_discard_dynptr(&ptr2, 0);
106 		return 0;
107 	}
108 
109 	bpf_ringbuf_submit_dynptr(&ptr1, 0);
110 
111 	/* missing a call to bpf_ringbuf_discard/submit_dynptr on ptr2 */
112 
113 	return 0;
114 }
115 
missing_release_callback_fn(__u32 index,void * data)116 static int missing_release_callback_fn(__u32 index, void *data)
117 {
118 	struct bpf_dynptr ptr;
119 
120 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
121 
122 	/* missing a call to bpf_ringbuf_discard/submit_dynptr */
123 
124 	return 0;
125 }
126 
127 /* Any dynptr initialized within a callback must have bpf_dynptr_put called */
128 SEC("?raw_tp")
129 __failure __msg("Unreleased reference id")
ringbuf_missing_release_callback(void * ctx)130 int ringbuf_missing_release_callback(void *ctx)
131 {
132 	bpf_loop(10, missing_release_callback_fn, NULL, 0);
133 	return 0;
134 }
135 
136 /* Can't call bpf_ringbuf_submit/discard_dynptr on a non-initialized dynptr */
137 SEC("?raw_tp")
138 __failure __msg("arg 1 is an unacquired reference")
ringbuf_release_uninit_dynptr(void * ctx)139 int ringbuf_release_uninit_dynptr(void *ctx)
140 {
141 	struct bpf_dynptr ptr;
142 
143 	/* this should fail */
144 	bpf_ringbuf_submit_dynptr(&ptr, 0);
145 
146 	return 0;
147 }
148 
149 /* A dynptr can't be used after it has been invalidated */
150 SEC("?raw_tp")
151 __failure __msg("Expected an initialized dynptr as arg #3")
use_after_invalid(void * ctx)152 int use_after_invalid(void *ctx)
153 {
154 	struct bpf_dynptr ptr;
155 	char read_data[64];
156 
157 	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(read_data), 0, &ptr);
158 
159 	bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
160 
161 	bpf_ringbuf_submit_dynptr(&ptr, 0);
162 
163 	/* this should fail */
164 	bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
165 
166 	return 0;
167 }
168 
169 /* Can't call non-dynptr ringbuf APIs on a dynptr ringbuf sample */
170 SEC("?raw_tp")
171 __failure __msg("type=mem expected=ringbuf_mem")
ringbuf_invalid_api(void * ctx)172 int ringbuf_invalid_api(void *ctx)
173 {
174 	struct bpf_dynptr ptr;
175 	struct sample *sample;
176 
177 	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr);
178 	sample = bpf_dynptr_data(&ptr, 0, sizeof(*sample));
179 	if (!sample)
180 		goto done;
181 
182 	sample->pid = 123;
183 
184 	/* invalid API use. need to use dynptr API to submit/discard */
185 	bpf_ringbuf_submit(sample, 0);
186 
187 done:
188 	bpf_ringbuf_discard_dynptr(&ptr, 0);
189 	return 0;
190 }
191 
192 /* Can't add a dynptr to a map */
193 SEC("?raw_tp")
194 __failure __msg("invalid indirect read from stack")
add_dynptr_to_map1(void * ctx)195 int add_dynptr_to_map1(void *ctx)
196 {
197 	struct bpf_dynptr ptr;
198 	int key = 0;
199 
200 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
201 
202 	/* this should fail */
203 	bpf_map_update_elem(&array_map1, &key, &ptr, 0);
204 
205 	bpf_ringbuf_submit_dynptr(&ptr, 0);
206 
207 	return 0;
208 }
209 
210 /* Can't add a struct with an embedded dynptr to a map */
211 SEC("?raw_tp")
212 __failure __msg("invalid indirect read from stack")
add_dynptr_to_map2(void * ctx)213 int add_dynptr_to_map2(void *ctx)
214 {
215 	struct test_info x;
216 	int key = 0;
217 
218 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &x.ptr);
219 
220 	/* this should fail */
221 	bpf_map_update_elem(&array_map2, &key, &x, 0);
222 
223 	bpf_ringbuf_submit_dynptr(&x.ptr, 0);
224 
225 	return 0;
226 }
227 
228 /* A data slice can't be accessed out of bounds */
229 SEC("?raw_tp")
230 __failure __msg("value is outside of the allowed memory range")
data_slice_out_of_bounds_ringbuf(void * ctx)231 int data_slice_out_of_bounds_ringbuf(void *ctx)
232 {
233 	struct bpf_dynptr ptr;
234 	void *data;
235 
236 	bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
237 
238 	data  = bpf_dynptr_data(&ptr, 0, 8);
239 	if (!data)
240 		goto done;
241 
242 	/* can't index out of bounds of the data slice */
243 	val = *((char *)data + 8);
244 
245 done:
246 	bpf_ringbuf_submit_dynptr(&ptr, 0);
247 	return 0;
248 }
249 
250 /* A data slice can't be accessed out of bounds */
251 SEC("?tc")
252 __failure __msg("value is outside of the allowed memory range")
data_slice_out_of_bounds_skb(struct __sk_buff * skb)253 int data_slice_out_of_bounds_skb(struct __sk_buff *skb)
254 {
255 	struct bpf_dynptr ptr;
256 	struct ethhdr *hdr;
257 	char buffer[sizeof(*hdr)] = {};
258 
259 	bpf_dynptr_from_skb(skb, 0, &ptr);
260 
261 	hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
262 	if (!hdr)
263 		return SK_DROP;
264 
265 	/* this should fail */
266 	*(__u8*)(hdr + 1) = 1;
267 
268 	return SK_PASS;
269 }
270 
271 SEC("?raw_tp")
272 __failure __msg("value is outside of the allowed memory range")
data_slice_out_of_bounds_map_value(void * ctx)273 int data_slice_out_of_bounds_map_value(void *ctx)
274 {
275 	__u32 map_val;
276 	struct bpf_dynptr ptr;
277 	void *data;
278 
279 	get_map_val_dynptr(&ptr);
280 
281 	data  = bpf_dynptr_data(&ptr, 0, sizeof(map_val));
282 	if (!data)
283 		return 0;
284 
285 	/* can't index out of bounds of the data slice */
286 	val = *((char *)data + (sizeof(map_val) + 1));
287 
288 	return 0;
289 }
290 
291 /* A data slice can't be used after it has been released */
292 SEC("?raw_tp")
293 __failure __msg("invalid mem access 'scalar'")
data_slice_use_after_release1(void * ctx)294 int data_slice_use_after_release1(void *ctx)
295 {
296 	struct bpf_dynptr ptr;
297 	struct sample *sample;
298 
299 	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr);
300 	sample = bpf_dynptr_data(&ptr, 0, sizeof(*sample));
301 	if (!sample)
302 		goto done;
303 
304 	sample->pid = 123;
305 
306 	bpf_ringbuf_submit_dynptr(&ptr, 0);
307 
308 	/* this should fail */
309 	val = sample->pid;
310 
311 	return 0;
312 
313 done:
314 	bpf_ringbuf_discard_dynptr(&ptr, 0);
315 	return 0;
316 }
317 
318 /* A data slice can't be used after it has been released.
319  *
320  * This tests the case where the data slice tracks a dynptr (ptr2)
321  * that is at a non-zero offset from the frame pointer (ptr1 is at fp,
322  * ptr2 is at fp - 16).
323  */
324 SEC("?raw_tp")
325 __failure __msg("invalid mem access 'scalar'")
data_slice_use_after_release2(void * ctx)326 int data_slice_use_after_release2(void *ctx)
327 {
328 	struct bpf_dynptr ptr1, ptr2;
329 	struct sample *sample;
330 
331 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr1);
332 	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr2);
333 
334 	sample = bpf_dynptr_data(&ptr2, 0, sizeof(*sample));
335 	if (!sample)
336 		goto done;
337 
338 	sample->pid = 23;
339 
340 	bpf_ringbuf_submit_dynptr(&ptr2, 0);
341 
342 	/* this should fail */
343 	sample->pid = 23;
344 
345 	bpf_ringbuf_submit_dynptr(&ptr1, 0);
346 
347 	return 0;
348 
349 done:
350 	bpf_ringbuf_discard_dynptr(&ptr2, 0);
351 	bpf_ringbuf_discard_dynptr(&ptr1, 0);
352 	return 0;
353 }
354 
355 /* A data slice must be first checked for NULL */
356 SEC("?raw_tp")
357 __failure __msg("invalid mem access 'mem_or_null'")
data_slice_missing_null_check1(void * ctx)358 int data_slice_missing_null_check1(void *ctx)
359 {
360 	struct bpf_dynptr ptr;
361 	void *data;
362 
363 	bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
364 
365 	data  = bpf_dynptr_data(&ptr, 0, 8);
366 
367 	/* missing if (!data) check */
368 
369 	/* this should fail */
370 	*(__u8 *)data = 3;
371 
372 	bpf_ringbuf_submit_dynptr(&ptr, 0);
373 	return 0;
374 }
375 
376 /* A data slice can't be dereferenced if it wasn't checked for null */
377 SEC("?raw_tp")
378 __failure __msg("invalid mem access 'mem_or_null'")
data_slice_missing_null_check2(void * ctx)379 int data_slice_missing_null_check2(void *ctx)
380 {
381 	struct bpf_dynptr ptr;
382 	__u64 *data1, *data2;
383 
384 	bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr);
385 
386 	data1 = bpf_dynptr_data(&ptr, 0, 8);
387 	data2 = bpf_dynptr_data(&ptr, 0, 8);
388 	if (data1)
389 		/* this should fail */
390 		*data2 = 3;
391 
392 	bpf_ringbuf_discard_dynptr(&ptr, 0);
393 	return 0;
394 }
395 
396 /* Can't pass in a dynptr as an arg to a helper function that doesn't take in a
397  * dynptr argument
398  */
399 SEC("?raw_tp")
400 __failure __msg("invalid indirect read from stack")
invalid_helper1(void * ctx)401 int invalid_helper1(void *ctx)
402 {
403 	struct bpf_dynptr ptr;
404 
405 	get_map_val_dynptr(&ptr);
406 
407 	/* this should fail */
408 	bpf_strncmp((const char *)&ptr, sizeof(ptr), "hello!");
409 
410 	return 0;
411 }
412 
413 /* A dynptr can't be passed into a helper function at a non-zero offset */
414 SEC("?raw_tp")
415 __failure __msg("cannot pass in dynptr at an offset=-8")
invalid_helper2(void * ctx)416 int invalid_helper2(void *ctx)
417 {
418 	struct bpf_dynptr ptr;
419 	char read_data[64];
420 
421 	get_map_val_dynptr(&ptr);
422 
423 	/* this should fail */
424 	bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 8, 0, 0);
425 	return 0;
426 }
427 
428 /* A bpf_dynptr is invalidated if it's been written into */
429 SEC("?raw_tp")
430 __failure __msg("Expected an initialized dynptr as arg #1")
invalid_write1(void * ctx)431 int invalid_write1(void *ctx)
432 {
433 	struct bpf_dynptr ptr;
434 	void *data;
435 	__u8 x = 0;
436 
437 	get_map_val_dynptr(&ptr);
438 
439 	memcpy(&ptr, &x, sizeof(x));
440 
441 	/* this should fail */
442 	data = bpf_dynptr_data(&ptr, 0, 1);
443 	__sink(data);
444 
445 	return 0;
446 }
447 
448 /*
449  * A bpf_dynptr can't be used as a dynptr if it has been written into at a fixed
450  * offset
451  */
452 SEC("?raw_tp")
453 __failure __msg("cannot overwrite referenced dynptr")
invalid_write2(void * ctx)454 int invalid_write2(void *ctx)
455 {
456 	struct bpf_dynptr ptr;
457 	char read_data[64];
458 	__u8 x = 0;
459 
460 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
461 
462 	memcpy((void *)&ptr + 8, &x, sizeof(x));
463 
464 	/* this should fail */
465 	bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
466 
467 	bpf_ringbuf_submit_dynptr(&ptr, 0);
468 
469 	return 0;
470 }
471 
472 /*
473  * A bpf_dynptr can't be used as a dynptr if it has been written into at a
474  * non-const offset
475  */
476 SEC("?raw_tp")
477 __failure __msg("cannot overwrite referenced dynptr")
invalid_write3(void * ctx)478 int invalid_write3(void *ctx)
479 {
480 	struct bpf_dynptr ptr;
481 	char stack_buf[16];
482 	unsigned long len;
483 	__u8 x = 0;
484 
485 	bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
486 
487 	memcpy(stack_buf, &val, sizeof(val));
488 	len = stack_buf[0] & 0xf;
489 
490 	memcpy((void *)&ptr + len, &x, sizeof(x));
491 
492 	/* this should fail */
493 	bpf_ringbuf_submit_dynptr(&ptr, 0);
494 
495 	return 0;
496 }
497 
invalid_write4_callback(__u32 index,void * data)498 static int invalid_write4_callback(__u32 index, void *data)
499 {
500 	*(__u32 *)data = 123;
501 
502 	return 0;
503 }
504 
505 /* If the dynptr is written into in a callback function, it should
506  * be invalidated as a dynptr
507  */
508 SEC("?raw_tp")
509 __failure __msg("cannot overwrite referenced dynptr")
invalid_write4(void * ctx)510 int invalid_write4(void *ctx)
511 {
512 	struct bpf_dynptr ptr;
513 
514 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
515 
516 	bpf_loop(10, invalid_write4_callback, &ptr, 0);
517 
518 	/* this should fail */
519 	bpf_ringbuf_submit_dynptr(&ptr, 0);
520 
521 	return 0;
522 }
523 
524 /* A globally-defined bpf_dynptr can't be used (it must reside as a stack frame) */
525 struct bpf_dynptr global_dynptr;
526 
527 SEC("?raw_tp")
528 __failure __msg("type=map_value expected=fp")
global(void * ctx)529 int global(void *ctx)
530 {
531 	/* this should fail */
532 	bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &global_dynptr);
533 
534 	bpf_ringbuf_discard_dynptr(&global_dynptr, 0);
535 
536 	return 0;
537 }
538 
539 /* A direct read should fail */
540 SEC("?raw_tp")
541 __failure __msg("invalid read from stack")
invalid_read1(void * ctx)542 int invalid_read1(void *ctx)
543 {
544 	struct bpf_dynptr ptr;
545 
546 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
547 
548 	/* this should fail */
549 	val = *(int *)&ptr;
550 
551 	bpf_ringbuf_discard_dynptr(&ptr, 0);
552 
553 	return 0;
554 }
555 
556 /* A direct read at an offset should fail */
557 SEC("?raw_tp")
558 __failure __msg("cannot pass in dynptr at an offset")
invalid_read2(void * ctx)559 int invalid_read2(void *ctx)
560 {
561 	struct bpf_dynptr ptr;
562 	char read_data[64];
563 
564 	get_map_val_dynptr(&ptr);
565 
566 	/* this should fail */
567 	bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 1, 0, 0);
568 
569 	return 0;
570 }
571 
572 /* A direct read at an offset into the lower stack slot should fail */
573 SEC("?raw_tp")
574 __failure __msg("invalid read from stack")
invalid_read3(void * ctx)575 int invalid_read3(void *ctx)
576 {
577 	struct bpf_dynptr ptr1, ptr2;
578 
579 	bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr1);
580 	bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr2);
581 
582 	/* this should fail */
583 	memcpy(&val, (void *)&ptr1 + 8, sizeof(val));
584 
585 	bpf_ringbuf_discard_dynptr(&ptr1, 0);
586 	bpf_ringbuf_discard_dynptr(&ptr2, 0);
587 
588 	return 0;
589 }
590 
invalid_read4_callback(__u32 index,void * data)591 static int invalid_read4_callback(__u32 index, void *data)
592 {
593 	/* this should fail */
594 	val = *(__u32 *)data;
595 
596 	return 0;
597 }
598 
599 /* A direct read within a callback function should fail */
600 SEC("?raw_tp")
601 __failure __msg("invalid read from stack")
invalid_read4(void * ctx)602 int invalid_read4(void *ctx)
603 {
604 	struct bpf_dynptr ptr;
605 
606 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
607 
608 	bpf_loop(10, invalid_read4_callback, &ptr, 0);
609 
610 	bpf_ringbuf_submit_dynptr(&ptr, 0);
611 
612 	return 0;
613 }
614 
615 /* Initializing a dynptr on an offset should fail */
616 SEC("?raw_tp")
617 __failure __msg("cannot pass in dynptr at an offset=0")
invalid_offset(void * ctx)618 int invalid_offset(void *ctx)
619 {
620 	struct bpf_dynptr ptr;
621 
622 	/* this should fail */
623 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr + 1);
624 
625 	bpf_ringbuf_discard_dynptr(&ptr, 0);
626 
627 	return 0;
628 }
629 
630 /* Can't release a dynptr twice */
631 SEC("?raw_tp")
632 __failure __msg("arg 1 is an unacquired reference")
release_twice(void * ctx)633 int release_twice(void *ctx)
634 {
635 	struct bpf_dynptr ptr;
636 
637 	bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr);
638 
639 	bpf_ringbuf_discard_dynptr(&ptr, 0);
640 
641 	/* this second release should fail */
642 	bpf_ringbuf_discard_dynptr(&ptr, 0);
643 
644 	return 0;
645 }
646 
release_twice_callback_fn(__u32 index,void * data)647 static int release_twice_callback_fn(__u32 index, void *data)
648 {
649 	/* this should fail */
650 	bpf_ringbuf_discard_dynptr(data, 0);
651 
652 	return 0;
653 }
654 
655 /* Test that releasing a dynptr twice, where one of the releases happens
656  * within a callback function, fails
657  */
658 SEC("?raw_tp")
659 __failure __msg("arg 1 is an unacquired reference")
release_twice_callback(void * ctx)660 int release_twice_callback(void *ctx)
661 {
662 	struct bpf_dynptr ptr;
663 
664 	bpf_ringbuf_reserve_dynptr(&ringbuf, 32, 0, &ptr);
665 
666 	bpf_ringbuf_discard_dynptr(&ptr, 0);
667 
668 	bpf_loop(10, release_twice_callback_fn, &ptr, 0);
669 
670 	return 0;
671 }
672 
673 /* Reject unsupported local mem types for dynptr_from_mem API */
674 SEC("?raw_tp")
675 __failure __msg("Unsupported reg type fp for bpf_dynptr_from_mem data")
dynptr_from_mem_invalid_api(void * ctx)676 int dynptr_from_mem_invalid_api(void *ctx)
677 {
678 	struct bpf_dynptr ptr;
679 	int x = 0;
680 
681 	/* this should fail */
682 	bpf_dynptr_from_mem(&x, sizeof(x), 0, &ptr);
683 
684 	return 0;
685 }
686 
687 SEC("?tc")
688 __failure __msg("cannot overwrite referenced dynptr") __log_level(2)
dynptr_pruning_overwrite(struct __sk_buff * ctx)689 int dynptr_pruning_overwrite(struct __sk_buff *ctx)
690 {
691 	asm volatile (
692 		"r9 = 0xeB9F;				\
693 		 r6 = %[ringbuf] ll;			\
694 		 r1 = r6;				\
695 		 r2 = 8;				\
696 		 r3 = 0;				\
697 		 r4 = r10;				\
698 		 r4 += -16;				\
699 		 call %[bpf_ringbuf_reserve_dynptr];	\
700 		 if r0 == 0 goto pjmp1;			\
701 		 goto pjmp2;				\
702 	pjmp1:						\
703 		 *(u64 *)(r10 - 16) = r9;		\
704 	pjmp2:						\
705 		 r1 = r10;				\
706 		 r1 += -16;				\
707 		 r2 = 0;				\
708 		 call %[bpf_ringbuf_discard_dynptr];	"
709 		:
710 		: __imm(bpf_ringbuf_reserve_dynptr),
711 		  __imm(bpf_ringbuf_discard_dynptr),
712 		  __imm_addr(ringbuf)
713 		: __clobber_all
714 	);
715 	return 0;
716 }
717 
718 SEC("?tc")
719 __success __msg("12: safe") __log_level(2)
dynptr_pruning_stacksafe(struct __sk_buff * ctx)720 int dynptr_pruning_stacksafe(struct __sk_buff *ctx)
721 {
722 	asm volatile (
723 		"r9 = 0xeB9F;				\
724 		 r6 = %[ringbuf] ll;			\
725 		 r1 = r6;				\
726 		 r2 = 8;				\
727 		 r3 = 0;				\
728 		 r4 = r10;				\
729 		 r4 += -16;				\
730 		 call %[bpf_ringbuf_reserve_dynptr];	\
731 		 if r0 == 0 goto stjmp1;		\
732 		 goto stjmp2;				\
733 	stjmp1:						\
734 		 r9 = r9;				\
735 	stjmp2:						\
736 		 r1 = r10;				\
737 		 r1 += -16;				\
738 		 r2 = 0;				\
739 		 call %[bpf_ringbuf_discard_dynptr];	"
740 		:
741 		: __imm(bpf_ringbuf_reserve_dynptr),
742 		  __imm(bpf_ringbuf_discard_dynptr),
743 		  __imm_addr(ringbuf)
744 		: __clobber_all
745 	);
746 	return 0;
747 }
748 
749 SEC("?tc")
750 __failure __msg("cannot overwrite referenced dynptr") __log_level(2)
dynptr_pruning_type_confusion(struct __sk_buff * ctx)751 int dynptr_pruning_type_confusion(struct __sk_buff *ctx)
752 {
753 	asm volatile (
754 		"r6 = %[array_map4] ll;			\
755 		 r7 = %[ringbuf] ll;			\
756 		 r1 = r6;				\
757 		 r2 = r10;				\
758 		 r2 += -8;				\
759 		 r9 = 0;				\
760 		 *(u64 *)(r2 + 0) = r9;			\
761 		 r3 = r10;				\
762 		 r3 += -24;				\
763 		 r9 = 0xeB9FeB9F;			\
764 		 *(u64 *)(r10 - 16) = r9;		\
765 		 *(u64 *)(r10 - 24) = r9;		\
766 		 r9 = 0;				\
767 		 r4 = 0;				\
768 		 r8 = r2;				\
769 		 call %[bpf_map_update_elem];		\
770 		 r1 = r6;				\
771 		 r2 = r8;				\
772 		 call %[bpf_map_lookup_elem];		\
773 		 if r0 != 0 goto tjmp1;			\
774 		 exit;					\
775 	tjmp1:						\
776 		 r8 = r0;				\
777 		 r1 = r7;				\
778 		 r2 = 8;				\
779 		 r3 = 0;				\
780 		 r4 = r10;				\
781 		 r4 += -16;				\
782 		 r0 = *(u64 *)(r0 + 0);			\
783 		 call %[bpf_ringbuf_reserve_dynptr];	\
784 		 if r0 == 0 goto tjmp2;			\
785 		 r8 = r8;				\
786 		 r8 = r8;				\
787 		 r8 = r8;				\
788 		 r8 = r8;				\
789 		 r8 = r8;				\
790 		 r8 = r8;				\
791 		 r8 = r8;				\
792 		 goto tjmp3;				\
793 	tjmp2:						\
794 		 *(u64 *)(r10 - 8) = r9;		\
795 		 *(u64 *)(r10 - 16) = r9;		\
796 		 r1 = r8;				\
797 		 r1 += 8;				\
798 		 r2 = 0;				\
799 		 r3 = 0;				\
800 		 r4 = r10;				\
801 		 r4 += -16;				\
802 		 call %[bpf_dynptr_from_mem];		\
803 	tjmp3:						\
804 		 r1 = r10;				\
805 		 r1 += -16;				\
806 		 r2 = 0;				\
807 		 call %[bpf_ringbuf_discard_dynptr];	"
808 		:
809 		: __imm(bpf_map_update_elem),
810 		  __imm(bpf_map_lookup_elem),
811 		  __imm(bpf_ringbuf_reserve_dynptr),
812 		  __imm(bpf_dynptr_from_mem),
813 		  __imm(bpf_ringbuf_discard_dynptr),
814 		  __imm_addr(array_map4),
815 		  __imm_addr(ringbuf)
816 		: __clobber_all
817 	);
818 	return 0;
819 }
820 
821 SEC("?tc")
822 __failure __msg("dynptr has to be at a constant offset") __log_level(2)
dynptr_var_off_overwrite(struct __sk_buff * ctx)823 int dynptr_var_off_overwrite(struct __sk_buff *ctx)
824 {
825 	asm volatile (
826 		"r9 = 16;				\
827 		 *(u32 *)(r10 - 4) = r9;		\
828 		 r8 = *(u32 *)(r10 - 4);		\
829 		 if r8 >= 0 goto vjmp1;			\
830 		 r0 = 1;				\
831 		 exit;					\
832 	vjmp1:						\
833 		 if r8 <= 16 goto vjmp2;		\
834 		 r0 = 1;				\
835 		 exit;					\
836 	vjmp2:						\
837 		 r8 &= 16;				\
838 		 r1 = %[ringbuf] ll;			\
839 		 r2 = 8;				\
840 		 r3 = 0;				\
841 		 r4 = r10;				\
842 		 r4 += -32;				\
843 		 r4 += r8;				\
844 		 call %[bpf_ringbuf_reserve_dynptr];	\
845 		 r9 = 0xeB9F;				\
846 		 *(u64 *)(r10 - 16) = r9;		\
847 		 r1 = r10;				\
848 		 r1 += -32;				\
849 		 r1 += r8;				\
850 		 r2 = 0;				\
851 		 call %[bpf_ringbuf_discard_dynptr];	"
852 		:
853 		: __imm(bpf_ringbuf_reserve_dynptr),
854 		  __imm(bpf_ringbuf_discard_dynptr),
855 		  __imm_addr(ringbuf)
856 		: __clobber_all
857 	);
858 	return 0;
859 }
860 
861 SEC("?tc")
862 __failure __msg("cannot overwrite referenced dynptr") __log_level(2)
dynptr_partial_slot_invalidate(struct __sk_buff * ctx)863 int dynptr_partial_slot_invalidate(struct __sk_buff *ctx)
864 {
865 	asm volatile (
866 		"r6 = %[ringbuf] ll;			\
867 		 r7 = %[array_map4] ll;			\
868 		 r1 = r7;				\
869 		 r2 = r10;				\
870 		 r2 += -8;				\
871 		 r9 = 0;				\
872 		 *(u64 *)(r2 + 0) = r9;			\
873 		 r3 = r2;				\
874 		 r4 = 0;				\
875 		 r8 = r2;				\
876 		 call %[bpf_map_update_elem];		\
877 		 r1 = r7;				\
878 		 r2 = r8;				\
879 		 call %[bpf_map_lookup_elem];		\
880 		 if r0 != 0 goto sjmp1;			\
881 		 exit;					\
882 	sjmp1:						\
883 		 r7 = r0;				\
884 		 r1 = r6;				\
885 		 r2 = 8;				\
886 		 r3 = 0;				\
887 		 r4 = r10;				\
888 		 r4 += -24;				\
889 		 call %[bpf_ringbuf_reserve_dynptr];	\
890 		 *(u64 *)(r10 - 16) = r9;		\
891 		 r1 = r7;				\
892 		 r2 = 8;				\
893 		 r3 = 0;				\
894 		 r4 = r10;				\
895 		 r4 += -16;				\
896 		 call %[bpf_dynptr_from_mem];		\
897 		 r1 = r10;				\
898 		 r1 += -512;				\
899 		 r2 = 488;				\
900 		 r3 = r10;				\
901 		 r3 += -24;				\
902 		 r4 = 0;				\
903 		 r5 = 0;				\
904 		 call %[bpf_dynptr_read];		\
905 		 r8 = 1;				\
906 		 if r0 != 0 goto sjmp2;			\
907 		 r8 = 0;				\
908 	sjmp2:						\
909 		 r1 = r10;				\
910 		 r1 += -24;				\
911 		 r2 = 0;				\
912 		 call %[bpf_ringbuf_discard_dynptr];	"
913 		:
914 		: __imm(bpf_map_update_elem),
915 		  __imm(bpf_map_lookup_elem),
916 		  __imm(bpf_ringbuf_reserve_dynptr),
917 		  __imm(bpf_ringbuf_discard_dynptr),
918 		  __imm(bpf_dynptr_from_mem),
919 		  __imm(bpf_dynptr_read),
920 		  __imm_addr(ringbuf),
921 		  __imm_addr(array_map4)
922 		: __clobber_all
923 	);
924 	return 0;
925 }
926 
927 /* Test that it is allowed to overwrite unreferenced dynptr. */
928 SEC("?raw_tp")
929 __success
dynptr_overwrite_unref(void * ctx)930 int dynptr_overwrite_unref(void *ctx)
931 {
932 	struct bpf_dynptr ptr;
933 
934 	if (get_map_val_dynptr(&ptr))
935 		return 0;
936 	if (get_map_val_dynptr(&ptr))
937 		return 0;
938 	if (get_map_val_dynptr(&ptr))
939 		return 0;
940 
941 	return 0;
942 }
943 
944 /* Test that slices are invalidated on reinitializing a dynptr. */
945 SEC("?raw_tp")
946 __failure __msg("invalid mem access 'scalar'")
dynptr_invalidate_slice_reinit(void * ctx)947 int dynptr_invalidate_slice_reinit(void *ctx)
948 {
949 	struct bpf_dynptr ptr;
950 	__u8 *p;
951 
952 	if (get_map_val_dynptr(&ptr))
953 		return 0;
954 	p = bpf_dynptr_data(&ptr, 0, 1);
955 	if (!p)
956 		return 0;
957 	if (get_map_val_dynptr(&ptr))
958 		return 0;
959 	/* this should fail */
960 	return *p;
961 }
962 
963 /* Invalidation of dynptr slices on destruction of dynptr should not miss
964  * mem_or_null pointers.
965  */
966 SEC("?raw_tp")
967 __failure __msg("R1 type=scalar expected=percpu_ptr_")
dynptr_invalidate_slice_or_null(void * ctx)968 int dynptr_invalidate_slice_or_null(void *ctx)
969 {
970 	struct bpf_dynptr ptr;
971 	__u8 *p;
972 
973 	if (get_map_val_dynptr(&ptr))
974 		return 0;
975 
976 	p = bpf_dynptr_data(&ptr, 0, 1);
977 	*(__u8 *)&ptr = 0;
978 	/* this should fail */
979 	bpf_this_cpu_ptr(p);
980 	return 0;
981 }
982 
983 /* Destruction of dynptr should also any slices obtained from it */
984 SEC("?raw_tp")
985 __failure __msg("R7 invalid mem access 'scalar'")
dynptr_invalidate_slice_failure(void * ctx)986 int dynptr_invalidate_slice_failure(void *ctx)
987 {
988 	struct bpf_dynptr ptr1;
989 	struct bpf_dynptr ptr2;
990 	__u8 *p1, *p2;
991 
992 	if (get_map_val_dynptr(&ptr1))
993 		return 0;
994 	if (get_map_val_dynptr(&ptr2))
995 		return 0;
996 
997 	p1 = bpf_dynptr_data(&ptr1, 0, 1);
998 	if (!p1)
999 		return 0;
1000 	p2 = bpf_dynptr_data(&ptr2, 0, 1);
1001 	if (!p2)
1002 		return 0;
1003 
1004 	*(__u8 *)&ptr1 = 0;
1005 	/* this should fail */
1006 	return *p1;
1007 }
1008 
1009 /* Invalidation of slices should be scoped and should not prevent dereferencing
1010  * slices of another dynptr after destroying unrelated dynptr
1011  */
1012 SEC("?raw_tp")
1013 __success
dynptr_invalidate_slice_success(void * ctx)1014 int dynptr_invalidate_slice_success(void *ctx)
1015 {
1016 	struct bpf_dynptr ptr1;
1017 	struct bpf_dynptr ptr2;
1018 	__u8 *p1, *p2;
1019 
1020 	if (get_map_val_dynptr(&ptr1))
1021 		return 1;
1022 	if (get_map_val_dynptr(&ptr2))
1023 		return 1;
1024 
1025 	p1 = bpf_dynptr_data(&ptr1, 0, 1);
1026 	if (!p1)
1027 		return 1;
1028 	p2 = bpf_dynptr_data(&ptr2, 0, 1);
1029 	if (!p2)
1030 		return 1;
1031 
1032 	*(__u8 *)&ptr1 = 0;
1033 	return *p2;
1034 }
1035 
1036 /* Overwriting referenced dynptr should be rejected */
1037 SEC("?raw_tp")
1038 __failure __msg("cannot overwrite referenced dynptr")
dynptr_overwrite_ref(void * ctx)1039 int dynptr_overwrite_ref(void *ctx)
1040 {
1041 	struct bpf_dynptr ptr;
1042 
1043 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
1044 	/* this should fail */
1045 	if (get_map_val_dynptr(&ptr))
1046 		bpf_ringbuf_discard_dynptr(&ptr, 0);
1047 	return 0;
1048 }
1049 
1050 /* Reject writes to dynptr slot from bpf_dynptr_read */
1051 SEC("?raw_tp")
1052 __failure __msg("potential write to dynptr at off=-16")
dynptr_read_into_slot(void * ctx)1053 int dynptr_read_into_slot(void *ctx)
1054 {
1055 	union {
1056 		struct {
1057 			char _pad[48];
1058 			struct bpf_dynptr ptr;
1059 		};
1060 		char buf[64];
1061 	} data;
1062 
1063 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &data.ptr);
1064 	/* this should fail */
1065 	bpf_dynptr_read(data.buf, sizeof(data.buf), &data.ptr, 0, 0);
1066 
1067 	return 0;
1068 }
1069 
1070 /* bpf_dynptr_slice()s are read-only and cannot be written to */
1071 SEC("?tc")
1072 __failure __msg("R0 cannot write into rdonly_mem")
skb_invalid_slice_write(struct __sk_buff * skb)1073 int skb_invalid_slice_write(struct __sk_buff *skb)
1074 {
1075 	struct bpf_dynptr ptr;
1076 	struct ethhdr *hdr;
1077 	char buffer[sizeof(*hdr)] = {};
1078 
1079 	bpf_dynptr_from_skb(skb, 0, &ptr);
1080 
1081 	hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
1082 	if (!hdr)
1083 		return SK_DROP;
1084 
1085 	/* this should fail */
1086 	hdr->h_proto = 1;
1087 
1088 	return SK_PASS;
1089 }
1090 
1091 /* The read-only data slice is invalidated whenever a helper changes packet data */
1092 SEC("?tc")
1093 __failure __msg("invalid mem access 'scalar'")
skb_invalid_data_slice1(struct __sk_buff * skb)1094 int skb_invalid_data_slice1(struct __sk_buff *skb)
1095 {
1096 	struct bpf_dynptr ptr;
1097 	struct ethhdr *hdr;
1098 	char buffer[sizeof(*hdr)] = {};
1099 
1100 	bpf_dynptr_from_skb(skb, 0, &ptr);
1101 
1102 	hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
1103 	if (!hdr)
1104 		return SK_DROP;
1105 
1106 	val = hdr->h_proto;
1107 
1108 	if (bpf_skb_pull_data(skb, skb->len))
1109 		return SK_DROP;
1110 
1111 	/* this should fail */
1112 	val = hdr->h_proto;
1113 
1114 	return SK_PASS;
1115 }
1116 
1117 /* The read-write data slice is invalidated whenever a helper changes packet data */
1118 SEC("?tc")
1119 __failure __msg("invalid mem access 'scalar'")
skb_invalid_data_slice2(struct __sk_buff * skb)1120 int skb_invalid_data_slice2(struct __sk_buff *skb)
1121 {
1122 	struct bpf_dynptr ptr;
1123 	struct ethhdr *hdr;
1124 	char buffer[sizeof(*hdr)] = {};
1125 
1126 	bpf_dynptr_from_skb(skb, 0, &ptr);
1127 
1128 	hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
1129 	if (!hdr)
1130 		return SK_DROP;
1131 
1132 	hdr->h_proto = 123;
1133 
1134 	if (bpf_skb_pull_data(skb, skb->len))
1135 		return SK_DROP;
1136 
1137 	/* this should fail */
1138 	hdr->h_proto = 1;
1139 
1140 	return SK_PASS;
1141 }
1142 
1143 /* The read-only data slice is invalidated whenever bpf_dynptr_write() is called */
1144 SEC("?tc")
1145 __failure __msg("invalid mem access 'scalar'")
skb_invalid_data_slice3(struct __sk_buff * skb)1146 int skb_invalid_data_slice3(struct __sk_buff *skb)
1147 {
1148 	char write_data[64] = "hello there, world!!";
1149 	struct bpf_dynptr ptr;
1150 	struct ethhdr *hdr;
1151 	char buffer[sizeof(*hdr)] = {};
1152 
1153 	bpf_dynptr_from_skb(skb, 0, &ptr);
1154 
1155 	hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
1156 	if (!hdr)
1157 		return SK_DROP;
1158 
1159 	val = hdr->h_proto;
1160 
1161 	bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
1162 
1163 	/* this should fail */
1164 	val = hdr->h_proto;
1165 
1166 	return SK_PASS;
1167 }
1168 
1169 /* The read-write data slice is invalidated whenever bpf_dynptr_write() is called */
1170 SEC("?tc")
1171 __failure __msg("invalid mem access 'scalar'")
skb_invalid_data_slice4(struct __sk_buff * skb)1172 int skb_invalid_data_slice4(struct __sk_buff *skb)
1173 {
1174 	char write_data[64] = "hello there, world!!";
1175 	struct bpf_dynptr ptr;
1176 	struct ethhdr *hdr;
1177 	char buffer[sizeof(*hdr)] = {};
1178 
1179 	bpf_dynptr_from_skb(skb, 0, &ptr);
1180 	hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
1181 	if (!hdr)
1182 		return SK_DROP;
1183 
1184 	hdr->h_proto = 123;
1185 
1186 	bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
1187 
1188 	/* this should fail */
1189 	hdr->h_proto = 1;
1190 
1191 	return SK_PASS;
1192 }
1193 
1194 /* The read-only data slice is invalidated whenever a helper changes packet data */
1195 SEC("?xdp")
1196 __failure __msg("invalid mem access 'scalar'")
xdp_invalid_data_slice1(struct xdp_md * xdp)1197 int xdp_invalid_data_slice1(struct xdp_md *xdp)
1198 {
1199 	struct bpf_dynptr ptr;
1200 	struct ethhdr *hdr;
1201 	char buffer[sizeof(*hdr)] = {};
1202 
1203 	bpf_dynptr_from_xdp(xdp, 0, &ptr);
1204 	hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer));
1205 	if (!hdr)
1206 		return SK_DROP;
1207 
1208 	val = hdr->h_proto;
1209 
1210 	if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
1211 		return XDP_DROP;
1212 
1213 	/* this should fail */
1214 	val = hdr->h_proto;
1215 
1216 	return XDP_PASS;
1217 }
1218 
1219 /* The read-write data slice is invalidated whenever a helper changes packet data */
1220 SEC("?xdp")
1221 __failure __msg("invalid mem access 'scalar'")
xdp_invalid_data_slice2(struct xdp_md * xdp)1222 int xdp_invalid_data_slice2(struct xdp_md *xdp)
1223 {
1224 	struct bpf_dynptr ptr;
1225 	struct ethhdr *hdr;
1226 	char buffer[sizeof(*hdr)] = {};
1227 
1228 	bpf_dynptr_from_xdp(xdp, 0, &ptr);
1229 	hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
1230 	if (!hdr)
1231 		return SK_DROP;
1232 
1233 	hdr->h_proto = 9;
1234 
1235 	if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
1236 		return XDP_DROP;
1237 
1238 	/* this should fail */
1239 	hdr->h_proto = 1;
1240 
1241 	return XDP_PASS;
1242 }
1243 
1244 /* Only supported prog type can create skb-type dynptrs */
1245 SEC("?raw_tp")
1246 __failure __msg("calling kernel function bpf_dynptr_from_skb is not allowed")
skb_invalid_ctx(void * ctx)1247 int skb_invalid_ctx(void *ctx)
1248 {
1249 	struct bpf_dynptr ptr;
1250 
1251 	/* this should fail */
1252 	bpf_dynptr_from_skb(ctx, 0, &ptr);
1253 
1254 	return 0;
1255 }
1256 
1257 /* Reject writes to dynptr slot for uninit arg */
1258 SEC("?raw_tp")
1259 __failure __msg("potential write to dynptr at off=-16")
uninit_write_into_slot(void * ctx)1260 int uninit_write_into_slot(void *ctx)
1261 {
1262 	struct {
1263 		char buf[64];
1264 		struct bpf_dynptr ptr;
1265 	} data;
1266 
1267 	bpf_ringbuf_reserve_dynptr(&ringbuf, 80, 0, &data.ptr);
1268 	/* this should fail */
1269 	bpf_get_current_comm(data.buf, 80);
1270 
1271 	return 0;
1272 }
1273 
1274 /* Only supported prog type can create xdp-type dynptrs */
1275 SEC("?raw_tp")
1276 __failure __msg("calling kernel function bpf_dynptr_from_xdp is not allowed")
xdp_invalid_ctx(void * ctx)1277 int xdp_invalid_ctx(void *ctx)
1278 {
1279 	struct bpf_dynptr ptr;
1280 
1281 	/* this should fail */
1282 	bpf_dynptr_from_xdp(ctx, 0, &ptr);
1283 
1284 	return 0;
1285 }
1286 
1287 __u32 hdr_size = sizeof(struct ethhdr);
1288 /* Can't pass in variable-sized len to bpf_dynptr_slice */
1289 SEC("?tc")
1290 __failure __msg("unbounded memory access")
dynptr_slice_var_len1(struct __sk_buff * skb)1291 int dynptr_slice_var_len1(struct __sk_buff *skb)
1292 {
1293 	struct bpf_dynptr ptr;
1294 	struct ethhdr *hdr;
1295 	char buffer[sizeof(*hdr)] = {};
1296 
1297 	bpf_dynptr_from_skb(skb, 0, &ptr);
1298 
1299 	/* this should fail */
1300 	hdr = bpf_dynptr_slice(&ptr, 0, buffer, hdr_size);
1301 	if (!hdr)
1302 		return SK_DROP;
1303 
1304 	return SK_PASS;
1305 }
1306 
1307 /* Can't pass in variable-sized len to bpf_dynptr_slice */
1308 SEC("?tc")
1309 __failure __msg("must be a known constant")
dynptr_slice_var_len2(struct __sk_buff * skb)1310 int dynptr_slice_var_len2(struct __sk_buff *skb)
1311 {
1312 	char buffer[sizeof(struct ethhdr)] = {};
1313 	struct bpf_dynptr ptr;
1314 	struct ethhdr *hdr;
1315 
1316 	bpf_dynptr_from_skb(skb, 0, &ptr);
1317 
1318 	if (hdr_size <= sizeof(buffer)) {
1319 		/* this should fail */
1320 		hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, hdr_size);
1321 		if (!hdr)
1322 			return SK_DROP;
1323 		hdr->h_proto = 12;
1324 	}
1325 
1326 	return SK_PASS;
1327 }
1328 
callback(__u32 index,void * data)1329 static int callback(__u32 index, void *data)
1330 {
1331         *(__u32 *)data = 123;
1332 
1333         return 0;
1334 }
1335 
1336 /* If the dynptr is written into in a callback function, its data
1337  * slices should be invalidated as well.
1338  */
1339 SEC("?raw_tp")
1340 __failure __msg("invalid mem access 'scalar'")
invalid_data_slices(void * ctx)1341 int invalid_data_slices(void *ctx)
1342 {
1343 	struct bpf_dynptr ptr;
1344 	__u32 *slice;
1345 
1346 	if (get_map_val_dynptr(&ptr))
1347 		return 0;
1348 
1349 	slice = bpf_dynptr_data(&ptr, 0, sizeof(__u32));
1350 	if (!slice)
1351 		return 0;
1352 
1353 	bpf_loop(10, callback, &ptr, 0);
1354 
1355 	/* this should fail */
1356 	*slice = 1;
1357 
1358 	return 0;
1359 }
1360 
1361 /* Program types that don't allow writes to packet data should fail if
1362  * bpf_dynptr_slice_rdwr is called
1363  */
1364 SEC("cgroup_skb/ingress")
1365 __failure __msg("the prog does not allow writes to packet data")
invalid_slice_rdwr_rdonly(struct __sk_buff * skb)1366 int invalid_slice_rdwr_rdonly(struct __sk_buff *skb)
1367 {
1368 	char buffer[sizeof(struct ethhdr)] = {};
1369 	struct bpf_dynptr ptr;
1370 	struct ethhdr *hdr;
1371 
1372 	bpf_dynptr_from_skb(skb, 0, &ptr);
1373 
1374 	/* this should fail since cgroup_skb doesn't allow
1375 	 * changing packet data
1376 	 */
1377 	hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer));
1378 	__sink(hdr);
1379 
1380 	return 0;
1381 }
1382 
1383 /* bpf_dynptr_adjust can only be called on initialized dynptrs */
1384 SEC("?raw_tp")
1385 __failure __msg("Expected an initialized dynptr as arg #1")
dynptr_adjust_invalid(void * ctx)1386 int dynptr_adjust_invalid(void *ctx)
1387 {
1388 	struct bpf_dynptr ptr = {};
1389 
1390 	/* this should fail */
1391 	bpf_dynptr_adjust(&ptr, 1, 2);
1392 
1393 	return 0;
1394 }
1395 
1396 /* bpf_dynptr_is_null can only be called on initialized dynptrs */
1397 SEC("?raw_tp")
1398 __failure __msg("Expected an initialized dynptr as arg #1")
dynptr_is_null_invalid(void * ctx)1399 int dynptr_is_null_invalid(void *ctx)
1400 {
1401 	struct bpf_dynptr ptr = {};
1402 
1403 	/* this should fail */
1404 	bpf_dynptr_is_null(&ptr);
1405 
1406 	return 0;
1407 }
1408 
1409 /* bpf_dynptr_is_rdonly can only be called on initialized dynptrs */
1410 SEC("?raw_tp")
1411 __failure __msg("Expected an initialized dynptr as arg #1")
dynptr_is_rdonly_invalid(void * ctx)1412 int dynptr_is_rdonly_invalid(void *ctx)
1413 {
1414 	struct bpf_dynptr ptr = {};
1415 
1416 	/* this should fail */
1417 	bpf_dynptr_is_rdonly(&ptr);
1418 
1419 	return 0;
1420 }
1421 
1422 /* bpf_dynptr_size can only be called on initialized dynptrs */
1423 SEC("?raw_tp")
1424 __failure __msg("Expected an initialized dynptr as arg #1")
dynptr_size_invalid(void * ctx)1425 int dynptr_size_invalid(void *ctx)
1426 {
1427 	struct bpf_dynptr ptr = {};
1428 
1429 	/* this should fail */
1430 	bpf_dynptr_size(&ptr);
1431 
1432 	return 0;
1433 }
1434 
1435 /* Only initialized dynptrs can be cloned */
1436 SEC("?raw_tp")
1437 __failure __msg("Expected an initialized dynptr as arg #1")
clone_invalid1(void * ctx)1438 int clone_invalid1(void *ctx)
1439 {
1440 	struct bpf_dynptr ptr1 = {};
1441 	struct bpf_dynptr ptr2;
1442 
1443 	/* this should fail */
1444 	bpf_dynptr_clone(&ptr1, &ptr2);
1445 
1446 	return 0;
1447 }
1448 
1449 /* Can't overwrite an existing dynptr when cloning */
1450 SEC("?xdp")
1451 __failure __msg("cannot overwrite referenced dynptr")
clone_invalid2(struct xdp_md * xdp)1452 int clone_invalid2(struct xdp_md *xdp)
1453 {
1454 	struct bpf_dynptr ptr1;
1455 	struct bpf_dynptr clone;
1456 
1457 	bpf_dynptr_from_xdp(xdp, 0, &ptr1);
1458 
1459 	bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &clone);
1460 
1461 	/* this should fail */
1462 	bpf_dynptr_clone(&ptr1, &clone);
1463 
1464 	bpf_ringbuf_submit_dynptr(&clone, 0);
1465 
1466 	return 0;
1467 }
1468 
1469 /* Invalidating a dynptr should invalidate its clones */
1470 SEC("?raw_tp")
1471 __failure __msg("Expected an initialized dynptr as arg #3")
clone_invalidate1(void * ctx)1472 int clone_invalidate1(void *ctx)
1473 {
1474 	struct bpf_dynptr clone;
1475 	struct bpf_dynptr ptr;
1476 	char read_data[64];
1477 
1478 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
1479 
1480 	bpf_dynptr_clone(&ptr, &clone);
1481 
1482 	bpf_ringbuf_submit_dynptr(&ptr, 0);
1483 
1484 	/* this should fail */
1485 	bpf_dynptr_read(read_data, sizeof(read_data), &clone, 0, 0);
1486 
1487 	return 0;
1488 }
1489 
1490 /* Invalidating a dynptr should invalidate its parent */
1491 SEC("?raw_tp")
1492 __failure __msg("Expected an initialized dynptr as arg #3")
clone_invalidate2(void * ctx)1493 int clone_invalidate2(void *ctx)
1494 {
1495 	struct bpf_dynptr ptr;
1496 	struct bpf_dynptr clone;
1497 	char read_data[64];
1498 
1499 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
1500 
1501 	bpf_dynptr_clone(&ptr, &clone);
1502 
1503 	bpf_ringbuf_submit_dynptr(&clone, 0);
1504 
1505 	/* this should fail */
1506 	bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
1507 
1508 	return 0;
1509 }
1510 
1511 /* Invalidating a dynptr should invalidate its siblings */
1512 SEC("?raw_tp")
1513 __failure __msg("Expected an initialized dynptr as arg #3")
clone_invalidate3(void * ctx)1514 int clone_invalidate3(void *ctx)
1515 {
1516 	struct bpf_dynptr ptr;
1517 	struct bpf_dynptr clone1;
1518 	struct bpf_dynptr clone2;
1519 	char read_data[64];
1520 
1521 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
1522 
1523 	bpf_dynptr_clone(&ptr, &clone1);
1524 
1525 	bpf_dynptr_clone(&ptr, &clone2);
1526 
1527 	bpf_ringbuf_submit_dynptr(&clone2, 0);
1528 
1529 	/* this should fail */
1530 	bpf_dynptr_read(read_data, sizeof(read_data), &clone1, 0, 0);
1531 
1532 	return 0;
1533 }
1534 
1535 /* Invalidating a dynptr should invalidate any data slices
1536  * of its clones
1537  */
1538 SEC("?raw_tp")
1539 __failure __msg("invalid mem access 'scalar'")
clone_invalidate4(void * ctx)1540 int clone_invalidate4(void *ctx)
1541 {
1542 	struct bpf_dynptr ptr;
1543 	struct bpf_dynptr clone;
1544 	int *data;
1545 
1546 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
1547 
1548 	bpf_dynptr_clone(&ptr, &clone);
1549 	data = bpf_dynptr_data(&clone, 0, sizeof(val));
1550 	if (!data)
1551 		return 0;
1552 
1553 	bpf_ringbuf_submit_dynptr(&ptr, 0);
1554 
1555 	/* this should fail */
1556 	*data = 123;
1557 
1558 	return 0;
1559 }
1560 
1561 /* Invalidating a dynptr should invalidate any data slices
1562  * of its parent
1563  */
1564 SEC("?raw_tp")
1565 __failure __msg("invalid mem access 'scalar'")
clone_invalidate5(void * ctx)1566 int clone_invalidate5(void *ctx)
1567 {
1568 	struct bpf_dynptr ptr;
1569 	struct bpf_dynptr clone;
1570 	int *data;
1571 
1572 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
1573 	data = bpf_dynptr_data(&ptr, 0, sizeof(val));
1574 	if (!data)
1575 		return 0;
1576 
1577 	bpf_dynptr_clone(&ptr, &clone);
1578 
1579 	bpf_ringbuf_submit_dynptr(&clone, 0);
1580 
1581 	/* this should fail */
1582 	*data = 123;
1583 
1584 	return 0;
1585 }
1586 
1587 /* Invalidating a dynptr should invalidate any data slices
1588  * of its sibling
1589  */
1590 SEC("?raw_tp")
1591 __failure __msg("invalid mem access 'scalar'")
clone_invalidate6(void * ctx)1592 int clone_invalidate6(void *ctx)
1593 {
1594 	struct bpf_dynptr ptr;
1595 	struct bpf_dynptr clone1;
1596 	struct bpf_dynptr clone2;
1597 	int *data;
1598 
1599 	bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
1600 
1601 	bpf_dynptr_clone(&ptr, &clone1);
1602 
1603 	bpf_dynptr_clone(&ptr, &clone2);
1604 
1605 	data = bpf_dynptr_data(&clone1, 0, sizeof(val));
1606 	if (!data)
1607 		return 0;
1608 
1609 	bpf_ringbuf_submit_dynptr(&clone2, 0);
1610 
1611 	/* this should fail */
1612 	*data = 123;
1613 
1614 	return 0;
1615 }
1616 
1617 /* A skb clone's data slices should be invalid anytime packet data changes */
1618 SEC("?tc")
1619 __failure __msg("invalid mem access 'scalar'")
clone_skb_packet_data(struct __sk_buff * skb)1620 int clone_skb_packet_data(struct __sk_buff *skb)
1621 {
1622 	char buffer[sizeof(__u32)] = {};
1623 	struct bpf_dynptr clone;
1624 	struct bpf_dynptr ptr;
1625 	__u32 *data;
1626 
1627 	bpf_dynptr_from_skb(skb, 0, &ptr);
1628 
1629 	bpf_dynptr_clone(&ptr, &clone);
1630 	data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer));
1631 	if (!data)
1632 		return XDP_DROP;
1633 
1634 	if (bpf_skb_pull_data(skb, skb->len))
1635 		return SK_DROP;
1636 
1637 	/* this should fail */
1638 	*data = 123;
1639 
1640 	return 0;
1641 }
1642 
1643 /* A xdp clone's data slices should be invalid anytime packet data changes */
1644 SEC("?xdp")
1645 __failure __msg("invalid mem access 'scalar'")
clone_xdp_packet_data(struct xdp_md * xdp)1646 int clone_xdp_packet_data(struct xdp_md *xdp)
1647 {
1648 	char buffer[sizeof(__u32)] = {};
1649 	struct bpf_dynptr clone;
1650 	struct bpf_dynptr ptr;
1651 	struct ethhdr *hdr;
1652 	__u32 *data;
1653 
1654 	bpf_dynptr_from_xdp(xdp, 0, &ptr);
1655 
1656 	bpf_dynptr_clone(&ptr, &clone);
1657 	data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer));
1658 	if (!data)
1659 		return XDP_DROP;
1660 
1661 	if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
1662 		return XDP_DROP;
1663 
1664 	/* this should fail */
1665 	*data = 123;
1666 
1667 	return 0;
1668 }
1669 
1670 /* Buffers that are provided must be sufficiently long */
1671 SEC("?cgroup_skb/egress")
1672 __failure __msg("memory, len pair leads to invalid memory access")
test_dynptr_skb_small_buff(struct __sk_buff * skb)1673 int test_dynptr_skb_small_buff(struct __sk_buff *skb)
1674 {
1675 	struct bpf_dynptr ptr;
1676 	char buffer[8] = {};
1677 	__u64 *data;
1678 
1679 	if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
1680 		err = 1;
1681 		return 1;
1682 	}
1683 
1684 	/* This may return NULL. SKB may require a buffer */
1685 	data = bpf_dynptr_slice(&ptr, 0, buffer, 9);
1686 
1687 	return !!data;
1688 }
1689