1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 
4 #include <test_progs.h>
5 
6 #include "test_btf_map_in_map.skel.h"
7 
8 static int duration;
9 
10 static __u32 bpf_map_id(struct bpf_map *map)
11 {
12 	struct bpf_map_info info;
13 	__u32 info_len = sizeof(info);
14 	int err;
15 
16 	memset(&info, 0, info_len);
17 	err = bpf_obj_get_info_by_fd(bpf_map__fd(map), &info, &info_len);
18 	if (err)
19 		return 0;
20 	return info.id;
21 }
22 
23 static void test_lookup_update(void)
24 {
25 	int map1_fd, map2_fd, map3_fd, map4_fd, map5_fd, map1_id, map2_id;
26 	int outer_arr_fd, outer_hash_fd, outer_arr_dyn_fd;
27 	struct test_btf_map_in_map *skel;
28 	int err, key = 0, val, i, fd;
29 
30 	skel = test_btf_map_in_map__open_and_load();
31 	if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n"))
32 		return;
33 
34 	err = test_btf_map_in_map__attach(skel);
35 	if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
36 		goto cleanup;
37 
38 	map1_fd = bpf_map__fd(skel->maps.inner_map1);
39 	map2_fd = bpf_map__fd(skel->maps.inner_map2);
40 	map3_fd = bpf_map__fd(skel->maps.inner_map3);
41 	map4_fd = bpf_map__fd(skel->maps.inner_map4);
42 	map5_fd = bpf_map__fd(skel->maps.inner_map5);
43 	outer_arr_dyn_fd = bpf_map__fd(skel->maps.outer_arr_dyn);
44 	outer_arr_fd = bpf_map__fd(skel->maps.outer_arr);
45 	outer_hash_fd = bpf_map__fd(skel->maps.outer_hash);
46 
47 	/* inner1 = input, inner2 = input + 1, inner3 = input + 2 */
48 	bpf_map_update_elem(outer_arr_fd, &key, &map1_fd, 0);
49 	bpf_map_update_elem(outer_hash_fd, &key, &map2_fd, 0);
50 	bpf_map_update_elem(outer_arr_dyn_fd, &key, &map3_fd, 0);
51 	skel->bss->input = 1;
52 	usleep(1);
53 	bpf_map_lookup_elem(map1_fd, &key, &val);
54 	CHECK(val != 1, "inner1", "got %d != exp %d\n", val, 1);
55 	bpf_map_lookup_elem(map2_fd, &key, &val);
56 	CHECK(val != 2, "inner2", "got %d != exp %d\n", val, 2);
57 	bpf_map_lookup_elem(map3_fd, &key, &val);
58 	CHECK(val != 3, "inner3", "got %d != exp %d\n", val, 3);
59 
60 	/* inner2 = input, inner1 = input + 1, inner4 = input + 2 */
61 	bpf_map_update_elem(outer_arr_fd, &key, &map2_fd, 0);
62 	bpf_map_update_elem(outer_hash_fd, &key, &map1_fd, 0);
63 	bpf_map_update_elem(outer_arr_dyn_fd, &key, &map4_fd, 0);
64 	skel->bss->input = 3;
65 	usleep(1);
66 	bpf_map_lookup_elem(map1_fd, &key, &val);
67 	CHECK(val != 4, "inner1", "got %d != exp %d\n", val, 4);
68 	bpf_map_lookup_elem(map2_fd, &key, &val);
69 	CHECK(val != 3, "inner2", "got %d != exp %d\n", val, 3);
70 	bpf_map_lookup_elem(map4_fd, &key, &val);
71 	CHECK(val != 5, "inner4", "got %d != exp %d\n", val, 5);
72 
73 	/* inner5 = input + 2 */
74 	bpf_map_update_elem(outer_arr_dyn_fd, &key, &map5_fd, 0);
75 	skel->bss->input = 5;
76 	usleep(1);
77 	bpf_map_lookup_elem(map5_fd, &key, &val);
78 	CHECK(val != 7, "inner5", "got %d != exp %d\n", val, 7);
79 
80 	for (i = 0; i < 5; i++) {
81 		val = i % 2 ? map1_fd : map2_fd;
82 		err = bpf_map_update_elem(outer_hash_fd, &key, &val, 0);
83 		if (CHECK_FAIL(err)) {
84 			printf("failed to update hash_of_maps on iter #%d\n", i);
85 			goto cleanup;
86 		}
87 		err = bpf_map_update_elem(outer_arr_fd, &key, &val, 0);
88 		if (CHECK_FAIL(err)) {
89 			printf("failed to update array_of_maps on iter #%d\n", i);
90 			goto cleanup;
91 		}
92 		val = i % 2 ? map4_fd : map5_fd;
93 		err = bpf_map_update_elem(outer_arr_dyn_fd, &key, &val, 0);
94 		if (CHECK_FAIL(err)) {
95 			printf("failed to update array_of_maps (dyn) on iter #%d\n", i);
96 			goto cleanup;
97 		}
98 	}
99 
100 	map1_id = bpf_map_id(skel->maps.inner_map1);
101 	map2_id = bpf_map_id(skel->maps.inner_map2);
102 	CHECK(map1_id == 0, "map1_id", "failed to get ID 1\n");
103 	CHECK(map2_id == 0, "map2_id", "failed to get ID 2\n");
104 
105 	test_btf_map_in_map__destroy(skel);
106 	skel = NULL;
107 
108 	/* we need to either wait for or force synchronize_rcu(), before
109 	 * checking for "still exists" condition, otherwise map could still be
110 	 * resolvable by ID, causing false positives.
111 	 *
112 	 * Older kernels (5.8 and earlier) freed map only after two
113 	 * synchronize_rcu()s, so trigger two, to be entirely sure.
114 	 */
115 	CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
116 	CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
117 
118 	fd = bpf_map_get_fd_by_id(map1_id);
119 	if (CHECK(fd >= 0, "map1_leak", "inner_map1 leaked!\n")) {
120 		close(fd);
121 		goto cleanup;
122 	}
123 	fd = bpf_map_get_fd_by_id(map2_id);
124 	if (CHECK(fd >= 0, "map2_leak", "inner_map2 leaked!\n")) {
125 		close(fd);
126 		goto cleanup;
127 	}
128 
129 cleanup:
130 	test_btf_map_in_map__destroy(skel);
131 }
132 
133 static void test_diff_size(void)
134 {
135 	struct test_btf_map_in_map *skel;
136 	int err, inner_map_fd, zero = 0;
137 
138 	skel = test_btf_map_in_map__open_and_load();
139 	if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n"))
140 		return;
141 
142 	inner_map_fd = bpf_map__fd(skel->maps.sockarr_sz2);
143 	err = bpf_map_update_elem(bpf_map__fd(skel->maps.outer_sockarr), &zero,
144 				  &inner_map_fd, 0);
145 	CHECK(err, "outer_sockarr inner map size check",
146 	      "cannot use a different size inner_map\n");
147 
148 	inner_map_fd = bpf_map__fd(skel->maps.inner_map_sz2);
149 	err = bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &zero,
150 				  &inner_map_fd, 0);
151 	CHECK(!err, "outer_arr inner map size check",
152 	      "incorrectly updated with a different size inner_map\n");
153 
154 	test_btf_map_in_map__destroy(skel);
155 }
156 
157 void test_btf_map_in_map(void)
158 {
159 	if (test__start_subtest("lookup_update"))
160 		test_lookup_update();
161 
162 	if (test__start_subtest("diff_size"))
163 		test_diff_size();
164 }
165