1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <test_progs.h>
4 #include "bpf_iter_ipv6_route.skel.h"
5 #include "bpf_iter_netlink.skel.h"
6 #include "bpf_iter_bpf_map.skel.h"
7 #include "bpf_iter_task.skel.h"
8 #include "bpf_iter_task_stack.skel.h"
9 #include "bpf_iter_task_file.skel.h"
10 #include "bpf_iter_task_vma.skel.h"
11 #include "bpf_iter_task_btf.skel.h"
12 #include "bpf_iter_tcp4.skel.h"
13 #include "bpf_iter_tcp6.skel.h"
14 #include "bpf_iter_udp4.skel.h"
15 #include "bpf_iter_udp6.skel.h"
16 #include "bpf_iter_test_kern1.skel.h"
17 #include "bpf_iter_test_kern2.skel.h"
18 #include "bpf_iter_test_kern3.skel.h"
19 #include "bpf_iter_test_kern4.skel.h"
20 #include "bpf_iter_bpf_hash_map.skel.h"
21 #include "bpf_iter_bpf_percpu_hash_map.skel.h"
22 #include "bpf_iter_bpf_array_map.skel.h"
23 #include "bpf_iter_bpf_percpu_array_map.skel.h"
24 #include "bpf_iter_bpf_sk_storage_helpers.skel.h"
25 #include "bpf_iter_bpf_sk_storage_map.skel.h"
26 #include "bpf_iter_test_kern5.skel.h"
27 #include "bpf_iter_test_kern6.skel.h"
28
29 static int duration;
30
test_btf_id_or_null(void)31 static void test_btf_id_or_null(void)
32 {
33 struct bpf_iter_test_kern3 *skel;
34
35 skel = bpf_iter_test_kern3__open_and_load();
36 if (CHECK(skel, "bpf_iter_test_kern3__open_and_load",
37 "skeleton open_and_load unexpectedly succeeded\n")) {
38 bpf_iter_test_kern3__destroy(skel);
39 return;
40 }
41 }
42
do_dummy_read(struct bpf_program * prog)43 static void do_dummy_read(struct bpf_program *prog)
44 {
45 struct bpf_link *link;
46 char buf[16] = {};
47 int iter_fd, len;
48
49 link = bpf_program__attach_iter(prog, NULL);
50 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
51 return;
52
53 iter_fd = bpf_iter_create(bpf_link__fd(link));
54 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
55 goto free_link;
56
57 /* not check contents, but ensure read() ends without error */
58 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
59 ;
60 CHECK(len < 0, "read", "read failed: %s\n", strerror(errno));
61
62 close(iter_fd);
63
64 free_link:
65 bpf_link__destroy(link);
66 }
67
read_fd_into_buffer(int fd,char * buf,int size)68 static int read_fd_into_buffer(int fd, char *buf, int size)
69 {
70 int bufleft = size;
71 int len;
72
73 do {
74 len = read(fd, buf, bufleft);
75 if (len > 0) {
76 buf += len;
77 bufleft -= len;
78 }
79 } while (len > 0);
80
81 return len < 0 ? len : size - bufleft;
82 }
83
test_ipv6_route(void)84 static void test_ipv6_route(void)
85 {
86 struct bpf_iter_ipv6_route *skel;
87
88 skel = bpf_iter_ipv6_route__open_and_load();
89 if (CHECK(!skel, "bpf_iter_ipv6_route__open_and_load",
90 "skeleton open_and_load failed\n"))
91 return;
92
93 do_dummy_read(skel->progs.dump_ipv6_route);
94
95 bpf_iter_ipv6_route__destroy(skel);
96 }
97
test_netlink(void)98 static void test_netlink(void)
99 {
100 struct bpf_iter_netlink *skel;
101
102 skel = bpf_iter_netlink__open_and_load();
103 if (CHECK(!skel, "bpf_iter_netlink__open_and_load",
104 "skeleton open_and_load failed\n"))
105 return;
106
107 do_dummy_read(skel->progs.dump_netlink);
108
109 bpf_iter_netlink__destroy(skel);
110 }
111
test_bpf_map(void)112 static void test_bpf_map(void)
113 {
114 struct bpf_iter_bpf_map *skel;
115
116 skel = bpf_iter_bpf_map__open_and_load();
117 if (CHECK(!skel, "bpf_iter_bpf_map__open_and_load",
118 "skeleton open_and_load failed\n"))
119 return;
120
121 do_dummy_read(skel->progs.dump_bpf_map);
122
123 bpf_iter_bpf_map__destroy(skel);
124 }
125
test_task(void)126 static void test_task(void)
127 {
128 struct bpf_iter_task *skel;
129
130 skel = bpf_iter_task__open_and_load();
131 if (CHECK(!skel, "bpf_iter_task__open_and_load",
132 "skeleton open_and_load failed\n"))
133 return;
134
135 do_dummy_read(skel->progs.dump_task);
136
137 bpf_iter_task__destroy(skel);
138 }
139
test_task_stack(void)140 static void test_task_stack(void)
141 {
142 struct bpf_iter_task_stack *skel;
143
144 skel = bpf_iter_task_stack__open_and_load();
145 if (CHECK(!skel, "bpf_iter_task_stack__open_and_load",
146 "skeleton open_and_load failed\n"))
147 return;
148
149 do_dummy_read(skel->progs.dump_task_stack);
150 do_dummy_read(skel->progs.get_task_user_stacks);
151
152 bpf_iter_task_stack__destroy(skel);
153 }
154
do_nothing(void * arg)155 static void *do_nothing(void *arg)
156 {
157 pthread_exit(arg);
158 }
159
test_task_file(void)160 static void test_task_file(void)
161 {
162 struct bpf_iter_task_file *skel;
163 pthread_t thread_id;
164 void *ret;
165
166 skel = bpf_iter_task_file__open_and_load();
167 if (CHECK(!skel, "bpf_iter_task_file__open_and_load",
168 "skeleton open_and_load failed\n"))
169 return;
170
171 skel->bss->tgid = getpid();
172
173 if (CHECK(pthread_create(&thread_id, NULL, &do_nothing, NULL),
174 "pthread_create", "pthread_create failed\n"))
175 goto done;
176
177 do_dummy_read(skel->progs.dump_task_file);
178
179 if (CHECK(pthread_join(thread_id, &ret) || ret != NULL,
180 "pthread_join", "pthread_join failed\n"))
181 goto done;
182
183 CHECK(skel->bss->count != 0, "check_count",
184 "invalid non pthread file visit count %d\n", skel->bss->count);
185
186 done:
187 bpf_iter_task_file__destroy(skel);
188 }
189
190 #define TASKBUFSZ 32768
191
192 static char taskbuf[TASKBUFSZ];
193
do_btf_read(struct bpf_iter_task_btf * skel)194 static int do_btf_read(struct bpf_iter_task_btf *skel)
195 {
196 struct bpf_program *prog = skel->progs.dump_task_struct;
197 struct bpf_iter_task_btf__bss *bss = skel->bss;
198 int iter_fd = -1, err;
199 struct bpf_link *link;
200 char *buf = taskbuf;
201 int ret = 0;
202
203 link = bpf_program__attach_iter(prog, NULL);
204 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
205 return ret;
206
207 iter_fd = bpf_iter_create(bpf_link__fd(link));
208 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
209 goto free_link;
210
211 err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ);
212 if (bss->skip) {
213 printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
214 ret = 1;
215 test__skip();
216 goto free_link;
217 }
218
219 if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno)))
220 goto free_link;
221
222 CHECK(strstr(taskbuf, "(struct task_struct)") == NULL,
223 "check for btf representation of task_struct in iter data",
224 "struct task_struct not found");
225 free_link:
226 if (iter_fd > 0)
227 close(iter_fd);
228 bpf_link__destroy(link);
229 return ret;
230 }
231
test_task_btf(void)232 static void test_task_btf(void)
233 {
234 struct bpf_iter_task_btf__bss *bss;
235 struct bpf_iter_task_btf *skel;
236 int ret;
237
238 skel = bpf_iter_task_btf__open_and_load();
239 if (CHECK(!skel, "bpf_iter_task_btf__open_and_load",
240 "skeleton open_and_load failed\n"))
241 return;
242
243 bss = skel->bss;
244
245 ret = do_btf_read(skel);
246 if (ret)
247 goto cleanup;
248
249 if (CHECK(bss->tasks == 0, "check if iterated over tasks",
250 "no task iteration, did BPF program run?\n"))
251 goto cleanup;
252
253 CHECK(bss->seq_err != 0, "check for unexpected err",
254 "bpf_seq_printf_btf returned %ld", bss->seq_err);
255
256 cleanup:
257 bpf_iter_task_btf__destroy(skel);
258 }
259
test_tcp4(void)260 static void test_tcp4(void)
261 {
262 struct bpf_iter_tcp4 *skel;
263
264 skel = bpf_iter_tcp4__open_and_load();
265 if (CHECK(!skel, "bpf_iter_tcp4__open_and_load",
266 "skeleton open_and_load failed\n"))
267 return;
268
269 do_dummy_read(skel->progs.dump_tcp4);
270
271 bpf_iter_tcp4__destroy(skel);
272 }
273
test_tcp6(void)274 static void test_tcp6(void)
275 {
276 struct bpf_iter_tcp6 *skel;
277
278 skel = bpf_iter_tcp6__open_and_load();
279 if (CHECK(!skel, "bpf_iter_tcp6__open_and_load",
280 "skeleton open_and_load failed\n"))
281 return;
282
283 do_dummy_read(skel->progs.dump_tcp6);
284
285 bpf_iter_tcp6__destroy(skel);
286 }
287
test_udp4(void)288 static void test_udp4(void)
289 {
290 struct bpf_iter_udp4 *skel;
291
292 skel = bpf_iter_udp4__open_and_load();
293 if (CHECK(!skel, "bpf_iter_udp4__open_and_load",
294 "skeleton open_and_load failed\n"))
295 return;
296
297 do_dummy_read(skel->progs.dump_udp4);
298
299 bpf_iter_udp4__destroy(skel);
300 }
301
test_udp6(void)302 static void test_udp6(void)
303 {
304 struct bpf_iter_udp6 *skel;
305
306 skel = bpf_iter_udp6__open_and_load();
307 if (CHECK(!skel, "bpf_iter_udp6__open_and_load",
308 "skeleton open_and_load failed\n"))
309 return;
310
311 do_dummy_read(skel->progs.dump_udp6);
312
313 bpf_iter_udp6__destroy(skel);
314 }
315
316 /* The expected string is less than 16 bytes */
do_read_with_fd(int iter_fd,const char * expected,bool read_one_char)317 static int do_read_with_fd(int iter_fd, const char *expected,
318 bool read_one_char)
319 {
320 int err = -1, len, read_buf_len, start;
321 char buf[16] = {};
322
323 read_buf_len = read_one_char ? 1 : 16;
324 start = 0;
325 while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) {
326 start += len;
327 if (CHECK(start >= 16, "read", "read len %d\n", len))
328 return -1;
329 read_buf_len = read_one_char ? 1 : 16 - start;
330 }
331 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
332 return -1;
333
334 err = strcmp(buf, expected);
335 if (CHECK(err, "read", "incorrect read result: buf %s, expected %s\n",
336 buf, expected))
337 return -1;
338
339 return 0;
340 }
341
test_anon_iter(bool read_one_char)342 static void test_anon_iter(bool read_one_char)
343 {
344 struct bpf_iter_test_kern1 *skel;
345 struct bpf_link *link;
346 int iter_fd, err;
347
348 skel = bpf_iter_test_kern1__open_and_load();
349 if (CHECK(!skel, "bpf_iter_test_kern1__open_and_load",
350 "skeleton open_and_load failed\n"))
351 return;
352
353 err = bpf_iter_test_kern1__attach(skel);
354 if (CHECK(err, "bpf_iter_test_kern1__attach",
355 "skeleton attach failed\n")) {
356 goto out;
357 }
358
359 link = skel->links.dump_task;
360 iter_fd = bpf_iter_create(bpf_link__fd(link));
361 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
362 goto out;
363
364 do_read_with_fd(iter_fd, "abcd", read_one_char);
365 close(iter_fd);
366
367 out:
368 bpf_iter_test_kern1__destroy(skel);
369 }
370
do_read(const char * path,const char * expected)371 static int do_read(const char *path, const char *expected)
372 {
373 int err, iter_fd;
374
375 iter_fd = open(path, O_RDONLY);
376 if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n",
377 path, strerror(errno)))
378 return -1;
379
380 err = do_read_with_fd(iter_fd, expected, false);
381 close(iter_fd);
382 return err;
383 }
384
test_file_iter(void)385 static void test_file_iter(void)
386 {
387 const char *path = "/sys/fs/bpf/bpf_iter_test1";
388 struct bpf_iter_test_kern1 *skel1;
389 struct bpf_iter_test_kern2 *skel2;
390 struct bpf_link *link;
391 int err;
392
393 skel1 = bpf_iter_test_kern1__open_and_load();
394 if (CHECK(!skel1, "bpf_iter_test_kern1__open_and_load",
395 "skeleton open_and_load failed\n"))
396 return;
397
398 link = bpf_program__attach_iter(skel1->progs.dump_task, NULL);
399 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
400 goto out;
401
402 /* unlink this path if it exists. */
403 unlink(path);
404
405 err = bpf_link__pin(link, path);
406 if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err))
407 goto free_link;
408
409 err = do_read(path, "abcd");
410 if (err)
411 goto unlink_path;
412
413 /* file based iterator seems working fine. Let us a link update
414 * of the underlying link and `cat` the iterator again, its content
415 * should change.
416 */
417 skel2 = bpf_iter_test_kern2__open_and_load();
418 if (CHECK(!skel2, "bpf_iter_test_kern2__open_and_load",
419 "skeleton open_and_load failed\n"))
420 goto unlink_path;
421
422 err = bpf_link__update_program(link, skel2->progs.dump_task);
423 if (CHECK(err, "update_prog", "update_prog failed\n"))
424 goto destroy_skel2;
425
426 do_read(path, "ABCD");
427
428 destroy_skel2:
429 bpf_iter_test_kern2__destroy(skel2);
430 unlink_path:
431 unlink(path);
432 free_link:
433 bpf_link__destroy(link);
434 out:
435 bpf_iter_test_kern1__destroy(skel1);
436 }
437
test_overflow(bool test_e2big_overflow,bool ret1)438 static void test_overflow(bool test_e2big_overflow, bool ret1)
439 {
440 __u32 map_info_len, total_read_len, expected_read_len;
441 int err, iter_fd, map1_fd, map2_fd, len;
442 struct bpf_map_info map_info = {};
443 struct bpf_iter_test_kern4 *skel;
444 struct bpf_link *link;
445 __u32 iter_size;
446 char *buf;
447
448 skel = bpf_iter_test_kern4__open();
449 if (CHECK(!skel, "bpf_iter_test_kern4__open",
450 "skeleton open failed\n"))
451 return;
452
453 /* create two maps: bpf program will only do bpf_seq_write
454 * for these two maps. The goal is one map output almost
455 * fills seq_file buffer and then the other will trigger
456 * overflow and needs restart.
457 */
458 map1_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
459 if (CHECK(map1_fd < 0, "bpf_create_map",
460 "map_creation failed: %s\n", strerror(errno)))
461 goto out;
462 map2_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
463 if (CHECK(map2_fd < 0, "bpf_create_map",
464 "map_creation failed: %s\n", strerror(errno)))
465 goto free_map1;
466
467 /* bpf_seq_printf kernel buffer is 8 pages, so one map
468 * bpf_seq_write will mostly fill it, and the other map
469 * will partially fill and then trigger overflow and need
470 * bpf_seq_read restart.
471 */
472 iter_size = sysconf(_SC_PAGE_SIZE) << 3;
473
474 if (test_e2big_overflow) {
475 skel->rodata->print_len = (iter_size + 8) / 8;
476 expected_read_len = 2 * (iter_size + 8);
477 } else if (!ret1) {
478 skel->rodata->print_len = (iter_size - 8) / 8;
479 expected_read_len = 2 * (iter_size - 8);
480 } else {
481 skel->rodata->print_len = 1;
482 expected_read_len = 2 * 8;
483 }
484 skel->rodata->ret1 = ret1;
485
486 if (CHECK(bpf_iter_test_kern4__load(skel),
487 "bpf_iter_test_kern4__load", "skeleton load failed\n"))
488 goto free_map2;
489
490 /* setup filtering map_id in bpf program */
491 map_info_len = sizeof(map_info);
492 err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len);
493 if (CHECK(err, "get_map_info", "get map info failed: %s\n",
494 strerror(errno)))
495 goto free_map2;
496 skel->bss->map1_id = map_info.id;
497
498 err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len);
499 if (CHECK(err, "get_map_info", "get map info failed: %s\n",
500 strerror(errno)))
501 goto free_map2;
502 skel->bss->map2_id = map_info.id;
503
504 link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
505 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
506 goto free_map2;
507
508 iter_fd = bpf_iter_create(bpf_link__fd(link));
509 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
510 goto free_link;
511
512 buf = malloc(expected_read_len);
513 if (!buf)
514 goto close_iter;
515
516 /* do read */
517 total_read_len = 0;
518 if (test_e2big_overflow) {
519 while ((len = read(iter_fd, buf, expected_read_len)) > 0)
520 total_read_len += len;
521
522 CHECK(len != -1 || errno != E2BIG, "read",
523 "expected ret -1, errno E2BIG, but get ret %d, error %s\n",
524 len, strerror(errno));
525 goto free_buf;
526 } else if (!ret1) {
527 while ((len = read(iter_fd, buf, expected_read_len)) > 0)
528 total_read_len += len;
529
530 if (CHECK(len < 0, "read", "read failed: %s\n",
531 strerror(errno)))
532 goto free_buf;
533 } else {
534 do {
535 len = read(iter_fd, buf, expected_read_len);
536 if (len > 0)
537 total_read_len += len;
538 } while (len > 0 || len == -EAGAIN);
539
540 if (CHECK(len < 0, "read", "read failed: %s\n",
541 strerror(errno)))
542 goto free_buf;
543 }
544
545 if (CHECK(total_read_len != expected_read_len, "read",
546 "total len %u, expected len %u\n", total_read_len,
547 expected_read_len))
548 goto free_buf;
549
550 if (CHECK(skel->bss->map1_accessed != 1, "map1_accessed",
551 "expected 1 actual %d\n", skel->bss->map1_accessed))
552 goto free_buf;
553
554 if (CHECK(skel->bss->map2_accessed != 2, "map2_accessed",
555 "expected 2 actual %d\n", skel->bss->map2_accessed))
556 goto free_buf;
557
558 CHECK(skel->bss->map2_seqnum1 != skel->bss->map2_seqnum2,
559 "map2_seqnum", "two different seqnum %lld %lld\n",
560 skel->bss->map2_seqnum1, skel->bss->map2_seqnum2);
561
562 free_buf:
563 free(buf);
564 close_iter:
565 close(iter_fd);
566 free_link:
567 bpf_link__destroy(link);
568 free_map2:
569 close(map2_fd);
570 free_map1:
571 close(map1_fd);
572 out:
573 bpf_iter_test_kern4__destroy(skel);
574 }
575
test_bpf_hash_map(void)576 static void test_bpf_hash_map(void)
577 {
578 __u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0;
579 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
580 struct bpf_iter_bpf_hash_map *skel;
581 int err, i, len, map_fd, iter_fd;
582 union bpf_iter_link_info linfo;
583 __u64 val, expected_val = 0;
584 struct bpf_link *link;
585 struct key_t {
586 int a;
587 int b;
588 int c;
589 } key;
590 char buf[64];
591
592 skel = bpf_iter_bpf_hash_map__open();
593 if (CHECK(!skel, "bpf_iter_bpf_hash_map__open",
594 "skeleton open failed\n"))
595 return;
596
597 skel->bss->in_test_mode = true;
598
599 err = bpf_iter_bpf_hash_map__load(skel);
600 if (CHECK(!skel, "bpf_iter_bpf_hash_map__load",
601 "skeleton load failed\n"))
602 goto out;
603
604 /* iterator with hashmap2 and hashmap3 should fail */
605 memset(&linfo, 0, sizeof(linfo));
606 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
607 opts.link_info = &linfo;
608 opts.link_info_len = sizeof(linfo);
609 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
610 if (CHECK(!IS_ERR(link), "attach_iter",
611 "attach_iter for hashmap2 unexpected succeeded\n"))
612 goto out;
613
614 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
615 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
616 if (CHECK(!IS_ERR(link), "attach_iter",
617 "attach_iter for hashmap3 unexpected succeeded\n"))
618 goto out;
619
620 /* hashmap1 should be good, update map values here */
621 map_fd = bpf_map__fd(skel->maps.hashmap1);
622 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
623 key.a = i + 1;
624 key.b = i + 2;
625 key.c = i + 3;
626 val = i + 4;
627 expected_key_a += key.a;
628 expected_key_b += key.b;
629 expected_key_c += key.c;
630 expected_val += val;
631
632 err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
633 if (CHECK(err, "map_update", "map_update failed\n"))
634 goto out;
635 }
636
637 linfo.map.map_fd = map_fd;
638 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
639 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
640 goto out;
641
642 iter_fd = bpf_iter_create(bpf_link__fd(link));
643 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
644 goto free_link;
645
646 /* do some tests */
647 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
648 ;
649 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
650 goto close_iter;
651
652 /* test results */
653 if (CHECK(skel->bss->key_sum_a != expected_key_a,
654 "key_sum_a", "got %u expected %u\n",
655 skel->bss->key_sum_a, expected_key_a))
656 goto close_iter;
657 if (CHECK(skel->bss->key_sum_b != expected_key_b,
658 "key_sum_b", "got %u expected %u\n",
659 skel->bss->key_sum_b, expected_key_b))
660 goto close_iter;
661 if (CHECK(skel->bss->val_sum != expected_val,
662 "val_sum", "got %llu expected %llu\n",
663 skel->bss->val_sum, expected_val))
664 goto close_iter;
665
666 close_iter:
667 close(iter_fd);
668 free_link:
669 bpf_link__destroy(link);
670 out:
671 bpf_iter_bpf_hash_map__destroy(skel);
672 }
673
test_bpf_percpu_hash_map(void)674 static void test_bpf_percpu_hash_map(void)
675 {
676 __u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0;
677 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
678 struct bpf_iter_bpf_percpu_hash_map *skel;
679 int err, i, j, len, map_fd, iter_fd;
680 union bpf_iter_link_info linfo;
681 __u32 expected_val = 0;
682 struct bpf_link *link;
683 struct key_t {
684 int a;
685 int b;
686 int c;
687 } key;
688 char buf[64];
689 void *val;
690
691 val = malloc(8 * bpf_num_possible_cpus());
692
693 skel = bpf_iter_bpf_percpu_hash_map__open();
694 if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__open",
695 "skeleton open failed\n"))
696 return;
697
698 skel->rodata->num_cpus = bpf_num_possible_cpus();
699
700 err = bpf_iter_bpf_percpu_hash_map__load(skel);
701 if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__load",
702 "skeleton load failed\n"))
703 goto out;
704
705 /* update map values here */
706 map_fd = bpf_map__fd(skel->maps.hashmap1);
707 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
708 key.a = i + 1;
709 key.b = i + 2;
710 key.c = i + 3;
711 expected_key_a += key.a;
712 expected_key_b += key.b;
713 expected_key_c += key.c;
714
715 for (j = 0; j < bpf_num_possible_cpus(); j++) {
716 *(__u32 *)(val + j * 8) = i + j;
717 expected_val += i + j;
718 }
719
720 err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY);
721 if (CHECK(err, "map_update", "map_update failed\n"))
722 goto out;
723 }
724
725 memset(&linfo, 0, sizeof(linfo));
726 linfo.map.map_fd = map_fd;
727 opts.link_info = &linfo;
728 opts.link_info_len = sizeof(linfo);
729 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
730 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
731 goto out;
732
733 iter_fd = bpf_iter_create(bpf_link__fd(link));
734 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
735 goto free_link;
736
737 /* do some tests */
738 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
739 ;
740 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
741 goto close_iter;
742
743 /* test results */
744 if (CHECK(skel->bss->key_sum_a != expected_key_a,
745 "key_sum_a", "got %u expected %u\n",
746 skel->bss->key_sum_a, expected_key_a))
747 goto close_iter;
748 if (CHECK(skel->bss->key_sum_b != expected_key_b,
749 "key_sum_b", "got %u expected %u\n",
750 skel->bss->key_sum_b, expected_key_b))
751 goto close_iter;
752 if (CHECK(skel->bss->val_sum != expected_val,
753 "val_sum", "got %u expected %u\n",
754 skel->bss->val_sum, expected_val))
755 goto close_iter;
756
757 close_iter:
758 close(iter_fd);
759 free_link:
760 bpf_link__destroy(link);
761 out:
762 bpf_iter_bpf_percpu_hash_map__destroy(skel);
763 }
764
test_bpf_array_map(void)765 static void test_bpf_array_map(void)
766 {
767 __u64 val, expected_val = 0, res_first_val, first_val = 0;
768 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
769 __u32 expected_key = 0, res_first_key;
770 struct bpf_iter_bpf_array_map *skel;
771 union bpf_iter_link_info linfo;
772 int err, i, map_fd, iter_fd;
773 struct bpf_link *link;
774 char buf[64] = {};
775 int len, start;
776
777 skel = bpf_iter_bpf_array_map__open_and_load();
778 if (CHECK(!skel, "bpf_iter_bpf_array_map__open_and_load",
779 "skeleton open_and_load failed\n"))
780 return;
781
782 map_fd = bpf_map__fd(skel->maps.arraymap1);
783 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
784 val = i + 4;
785 expected_key += i;
786 expected_val += val;
787
788 if (i == 0)
789 first_val = val;
790
791 err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY);
792 if (CHECK(err, "map_update", "map_update failed\n"))
793 goto out;
794 }
795
796 memset(&linfo, 0, sizeof(linfo));
797 linfo.map.map_fd = map_fd;
798 opts.link_info = &linfo;
799 opts.link_info_len = sizeof(linfo);
800 link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
801 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
802 goto out;
803
804 iter_fd = bpf_iter_create(bpf_link__fd(link));
805 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
806 goto free_link;
807
808 /* do some tests */
809 start = 0;
810 while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
811 start += len;
812 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
813 goto close_iter;
814
815 /* test results */
816 res_first_key = *(__u32 *)buf;
817 res_first_val = *(__u64 *)(buf + sizeof(__u32));
818 if (CHECK(res_first_key != 0 || res_first_val != first_val,
819 "bpf_seq_write",
820 "seq_write failure: first key %u vs expected 0, "
821 " first value %llu vs expected %llu\n",
822 res_first_key, res_first_val, first_val))
823 goto close_iter;
824
825 if (CHECK(skel->bss->key_sum != expected_key,
826 "key_sum", "got %u expected %u\n",
827 skel->bss->key_sum, expected_key))
828 goto close_iter;
829 if (CHECK(skel->bss->val_sum != expected_val,
830 "val_sum", "got %llu expected %llu\n",
831 skel->bss->val_sum, expected_val))
832 goto close_iter;
833
834 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
835 err = bpf_map_lookup_elem(map_fd, &i, &val);
836 if (CHECK(err, "map_lookup", "map_lookup failed\n"))
837 goto out;
838 if (CHECK(i != val, "invalid_val",
839 "got value %llu expected %u\n", val, i))
840 goto out;
841 }
842
843 close_iter:
844 close(iter_fd);
845 free_link:
846 bpf_link__destroy(link);
847 out:
848 bpf_iter_bpf_array_map__destroy(skel);
849 }
850
test_bpf_percpu_array_map(void)851 static void test_bpf_percpu_array_map(void)
852 {
853 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
854 struct bpf_iter_bpf_percpu_array_map *skel;
855 __u32 expected_key = 0, expected_val = 0;
856 union bpf_iter_link_info linfo;
857 int err, i, j, map_fd, iter_fd;
858 struct bpf_link *link;
859 char buf[64];
860 void *val;
861 int len;
862
863 val = malloc(8 * bpf_num_possible_cpus());
864
865 skel = bpf_iter_bpf_percpu_array_map__open();
866 if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open",
867 "skeleton open failed\n"))
868 return;
869
870 skel->rodata->num_cpus = bpf_num_possible_cpus();
871
872 err = bpf_iter_bpf_percpu_array_map__load(skel);
873 if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load",
874 "skeleton load failed\n"))
875 goto out;
876
877 /* update map values here */
878 map_fd = bpf_map__fd(skel->maps.arraymap1);
879 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
880 expected_key += i;
881
882 for (j = 0; j < bpf_num_possible_cpus(); j++) {
883 *(__u32 *)(val + j * 8) = i + j;
884 expected_val += i + j;
885 }
886
887 err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY);
888 if (CHECK(err, "map_update", "map_update failed\n"))
889 goto out;
890 }
891
892 memset(&linfo, 0, sizeof(linfo));
893 linfo.map.map_fd = map_fd;
894 opts.link_info = &linfo;
895 opts.link_info_len = sizeof(linfo);
896 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
897 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
898 goto out;
899
900 iter_fd = bpf_iter_create(bpf_link__fd(link));
901 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
902 goto free_link;
903
904 /* do some tests */
905 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
906 ;
907 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
908 goto close_iter;
909
910 /* test results */
911 if (CHECK(skel->bss->key_sum != expected_key,
912 "key_sum", "got %u expected %u\n",
913 skel->bss->key_sum, expected_key))
914 goto close_iter;
915 if (CHECK(skel->bss->val_sum != expected_val,
916 "val_sum", "got %u expected %u\n",
917 skel->bss->val_sum, expected_val))
918 goto close_iter;
919
920 close_iter:
921 close(iter_fd);
922 free_link:
923 bpf_link__destroy(link);
924 out:
925 bpf_iter_bpf_percpu_array_map__destroy(skel);
926 }
927
928 /* An iterator program deletes all local storage in a map. */
test_bpf_sk_storage_delete(void)929 static void test_bpf_sk_storage_delete(void)
930 {
931 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
932 struct bpf_iter_bpf_sk_storage_helpers *skel;
933 union bpf_iter_link_info linfo;
934 int err, len, map_fd, iter_fd;
935 struct bpf_link *link;
936 int sock_fd = -1;
937 __u32 val = 42;
938 char buf[64];
939
940 skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
941 if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load",
942 "skeleton open_and_load failed\n"))
943 return;
944
945 map_fd = bpf_map__fd(skel->maps.sk_stg_map);
946
947 sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
948 if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno))
949 goto out;
950 err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
951 if (CHECK(err, "map_update", "map_update failed\n"))
952 goto out;
953
954 memset(&linfo, 0, sizeof(linfo));
955 linfo.map.map_fd = map_fd;
956 opts.link_info = &linfo;
957 opts.link_info_len = sizeof(linfo);
958 link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map,
959 &opts);
960 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
961 goto out;
962
963 iter_fd = bpf_iter_create(bpf_link__fd(link));
964 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
965 goto free_link;
966
967 /* do some tests */
968 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
969 ;
970 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
971 goto close_iter;
972
973 /* test results */
974 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
975 if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem",
976 "map value wasn't deleted (err=%d, errno=%d)\n", err, errno))
977 goto close_iter;
978
979 close_iter:
980 close(iter_fd);
981 free_link:
982 bpf_link__destroy(link);
983 out:
984 if (sock_fd >= 0)
985 close(sock_fd);
986 bpf_iter_bpf_sk_storage_helpers__destroy(skel);
987 }
988
989 /* This creates a socket and its local storage. It then runs a task_iter BPF
990 * program that replaces the existing socket local storage with the tgid of the
991 * only task owning a file descriptor to this socket, this process, prog_tests.
992 * It then runs a tcp socket iterator that negates the value in the existing
993 * socket local storage, the test verifies that the resulting value is -pid.
994 */
test_bpf_sk_storage_get(void)995 static void test_bpf_sk_storage_get(void)
996 {
997 struct bpf_iter_bpf_sk_storage_helpers *skel;
998 int err, map_fd, val = -1;
999 int sock_fd = -1;
1000
1001 skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
1002 if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load",
1003 "skeleton open_and_load failed\n"))
1004 return;
1005
1006 sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
1007 if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno))
1008 goto out;
1009
1010 err = listen(sock_fd, 1);
1011 if (CHECK(err != 0, "listen", "errno: %d\n", errno))
1012 goto close_socket;
1013
1014 map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1015
1016 err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1017 if (CHECK(err, "bpf_map_update_elem", "map_update_failed\n"))
1018 goto close_socket;
1019
1020 do_dummy_read(skel->progs.fill_socket_owner);
1021
1022 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1023 if (CHECK(err || val != getpid(), "bpf_map_lookup_elem",
1024 "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1025 getpid(), val, err))
1026 goto close_socket;
1027
1028 do_dummy_read(skel->progs.negate_socket_local_storage);
1029
1030 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1031 CHECK(err || val != -getpid(), "bpf_map_lookup_elem",
1032 "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1033 -getpid(), val, err);
1034
1035 close_socket:
1036 close(sock_fd);
1037 out:
1038 bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1039 }
1040
test_bpf_sk_storage_map(void)1041 static void test_bpf_sk_storage_map(void)
1042 {
1043 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1044 int err, i, len, map_fd, iter_fd, num_sockets;
1045 struct bpf_iter_bpf_sk_storage_map *skel;
1046 union bpf_iter_link_info linfo;
1047 int sock_fd[3] = {-1, -1, -1};
1048 __u32 val, expected_val = 0;
1049 struct bpf_link *link;
1050 char buf[64];
1051
1052 skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1053 if (CHECK(!skel, "bpf_iter_bpf_sk_storage_map__open_and_load",
1054 "skeleton open_and_load failed\n"))
1055 return;
1056
1057 map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1058 num_sockets = ARRAY_SIZE(sock_fd);
1059 for (i = 0; i < num_sockets; i++) {
1060 sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0);
1061 if (CHECK(sock_fd[i] < 0, "socket", "errno: %d\n", errno))
1062 goto out;
1063
1064 val = i + 1;
1065 expected_val += val;
1066
1067 err = bpf_map_update_elem(map_fd, &sock_fd[i], &val,
1068 BPF_NOEXIST);
1069 if (CHECK(err, "map_update", "map_update failed\n"))
1070 goto out;
1071 }
1072
1073 memset(&linfo, 0, sizeof(linfo));
1074 linfo.map.map_fd = map_fd;
1075 opts.link_info = &linfo;
1076 opts.link_info_len = sizeof(linfo);
1077 link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts);
1078 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
1079 goto out;
1080
1081 iter_fd = bpf_iter_create(bpf_link__fd(link));
1082 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
1083 goto free_link;
1084
1085 /* do some tests */
1086 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1087 ;
1088 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1089 goto close_iter;
1090
1091 /* test results */
1092 if (CHECK(skel->bss->ipv6_sk_count != num_sockets,
1093 "ipv6_sk_count", "got %u expected %u\n",
1094 skel->bss->ipv6_sk_count, num_sockets))
1095 goto close_iter;
1096
1097 if (CHECK(skel->bss->val_sum != expected_val,
1098 "val_sum", "got %u expected %u\n",
1099 skel->bss->val_sum, expected_val))
1100 goto close_iter;
1101
1102 close_iter:
1103 close(iter_fd);
1104 free_link:
1105 bpf_link__destroy(link);
1106 out:
1107 for (i = 0; i < num_sockets; i++) {
1108 if (sock_fd[i] >= 0)
1109 close(sock_fd[i]);
1110 }
1111 bpf_iter_bpf_sk_storage_map__destroy(skel);
1112 }
1113
test_rdonly_buf_out_of_bound(void)1114 static void test_rdonly_buf_out_of_bound(void)
1115 {
1116 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1117 struct bpf_iter_test_kern5 *skel;
1118 union bpf_iter_link_info linfo;
1119 struct bpf_link *link;
1120
1121 skel = bpf_iter_test_kern5__open_and_load();
1122 if (CHECK(!skel, "bpf_iter_test_kern5__open_and_load",
1123 "skeleton open_and_load failed\n"))
1124 return;
1125
1126 memset(&linfo, 0, sizeof(linfo));
1127 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
1128 opts.link_info = &linfo;
1129 opts.link_info_len = sizeof(linfo);
1130 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
1131 if (CHECK(!IS_ERR(link), "attach_iter", "unexpected success\n"))
1132 bpf_link__destroy(link);
1133
1134 bpf_iter_test_kern5__destroy(skel);
1135 }
1136
test_buf_neg_offset(void)1137 static void test_buf_neg_offset(void)
1138 {
1139 struct bpf_iter_test_kern6 *skel;
1140
1141 skel = bpf_iter_test_kern6__open_and_load();
1142 if (CHECK(skel, "bpf_iter_test_kern6__open_and_load",
1143 "skeleton open_and_load unexpected success\n"))
1144 bpf_iter_test_kern6__destroy(skel);
1145 }
1146
1147 #define CMP_BUFFER_SIZE 1024
1148 static char task_vma_output[CMP_BUFFER_SIZE];
1149 static char proc_maps_output[CMP_BUFFER_SIZE];
1150
1151 /* remove \0 and \t from str, and only keep the first line */
str_strip_first_line(char * str)1152 static void str_strip_first_line(char *str)
1153 {
1154 char *dst = str, *src = str;
1155
1156 do {
1157 if (*src == ' ' || *src == '\t')
1158 src++;
1159 else
1160 *(dst++) = *(src++);
1161
1162 } while (*src != '\0' && *src != '\n');
1163
1164 *dst = '\0';
1165 }
1166
1167 #define min(a, b) ((a) < (b) ? (a) : (b))
1168
test_task_vma(void)1169 static void test_task_vma(void)
1170 {
1171 int err, iter_fd = -1, proc_maps_fd = -1;
1172 struct bpf_iter_task_vma *skel;
1173 int len, read_size = 4;
1174 char maps_path[64];
1175
1176 skel = bpf_iter_task_vma__open();
1177 if (CHECK(!skel, "bpf_iter_task_vma__open", "skeleton open failed\n"))
1178 return;
1179
1180 skel->bss->pid = getpid();
1181
1182 err = bpf_iter_task_vma__load(skel);
1183 if (CHECK(err, "bpf_iter_task_vma__load", "skeleton load failed\n"))
1184 goto out;
1185
1186 skel->links.proc_maps = bpf_program__attach_iter(
1187 skel->progs.proc_maps, NULL);
1188
1189 if (CHECK(IS_ERR(skel->links.proc_maps), "bpf_program__attach_iter",
1190 "attach iterator failed\n")) {
1191 skel->links.proc_maps = NULL;
1192 goto out;
1193 }
1194
1195 iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
1196 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
1197 goto out;
1198
1199 /* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks
1200 * to trigger seq_file corner cases. The expected output is much
1201 * longer than 1kB, so the while loop will terminate.
1202 */
1203 len = 0;
1204 while (len < CMP_BUFFER_SIZE) {
1205 err = read_fd_into_buffer(iter_fd, task_vma_output + len,
1206 min(read_size, CMP_BUFFER_SIZE - len));
1207 if (CHECK(err < 0, "read_iter_fd", "read_iter_fd failed\n"))
1208 goto out;
1209 len += err;
1210 }
1211
1212 /* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */
1213 snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid);
1214 proc_maps_fd = open(maps_path, O_RDONLY);
1215 if (CHECK(proc_maps_fd < 0, "open_proc_maps", "open_proc_maps failed\n"))
1216 goto out;
1217 err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE);
1218 if (CHECK(err < 0, "read_prog_maps_fd", "read_prog_maps_fd failed\n"))
1219 goto out;
1220
1221 /* strip and compare the first line of the two files */
1222 str_strip_first_line(task_vma_output);
1223 str_strip_first_line(proc_maps_output);
1224
1225 CHECK(strcmp(task_vma_output, proc_maps_output), "compare_output",
1226 "found mismatch\n");
1227 out:
1228 close(proc_maps_fd);
1229 close(iter_fd);
1230 bpf_iter_task_vma__destroy(skel);
1231 }
1232
test_bpf_iter(void)1233 void test_bpf_iter(void)
1234 {
1235 if (test__start_subtest("btf_id_or_null"))
1236 test_btf_id_or_null();
1237 if (test__start_subtest("ipv6_route"))
1238 test_ipv6_route();
1239 if (test__start_subtest("netlink"))
1240 test_netlink();
1241 if (test__start_subtest("bpf_map"))
1242 test_bpf_map();
1243 if (test__start_subtest("task"))
1244 test_task();
1245 if (test__start_subtest("task_stack"))
1246 test_task_stack();
1247 if (test__start_subtest("task_file"))
1248 test_task_file();
1249 if (test__start_subtest("task_vma"))
1250 test_task_vma();
1251 if (test__start_subtest("task_btf"))
1252 test_task_btf();
1253 if (test__start_subtest("tcp4"))
1254 test_tcp4();
1255 if (test__start_subtest("tcp6"))
1256 test_tcp6();
1257 if (test__start_subtest("udp4"))
1258 test_udp4();
1259 if (test__start_subtest("udp6"))
1260 test_udp6();
1261 if (test__start_subtest("anon"))
1262 test_anon_iter(false);
1263 if (test__start_subtest("anon-read-one-char"))
1264 test_anon_iter(true);
1265 if (test__start_subtest("file"))
1266 test_file_iter();
1267 if (test__start_subtest("overflow"))
1268 test_overflow(false, false);
1269 if (test__start_subtest("overflow-e2big"))
1270 test_overflow(true, false);
1271 if (test__start_subtest("prog-ret-1"))
1272 test_overflow(false, true);
1273 if (test__start_subtest("bpf_hash_map"))
1274 test_bpf_hash_map();
1275 if (test__start_subtest("bpf_percpu_hash_map"))
1276 test_bpf_percpu_hash_map();
1277 if (test__start_subtest("bpf_array_map"))
1278 test_bpf_array_map();
1279 if (test__start_subtest("bpf_percpu_array_map"))
1280 test_bpf_percpu_array_map();
1281 if (test__start_subtest("bpf_sk_storage_map"))
1282 test_bpf_sk_storage_map();
1283 if (test__start_subtest("bpf_sk_storage_delete"))
1284 test_bpf_sk_storage_delete();
1285 if (test__start_subtest("bpf_sk_storage_get"))
1286 test_bpf_sk_storage_get();
1287 if (test__start_subtest("rdonly-buf-out-of-bound"))
1288 test_rdonly_buf_out_of_bound();
1289 if (test__start_subtest("buf-neg-offset"))
1290 test_buf_neg_offset();
1291 }
1292