xref: /linux/kernel/bpf/map_iter.c (revision 6f3189f3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2020 Facebook */
3 #include <linux/bpf.h>
4 #include <linux/fs.h>
5 #include <linux/filter.h>
6 #include <linux/kernel.h>
7 #include <linux/btf_ids.h>
8 
9 struct bpf_iter_seq_map_info {
10 	u32 map_id;
11 };
12 
bpf_map_seq_start(struct seq_file * seq,loff_t * pos)13 static void *bpf_map_seq_start(struct seq_file *seq, loff_t *pos)
14 {
15 	struct bpf_iter_seq_map_info *info = seq->private;
16 	struct bpf_map *map;
17 
18 	map = bpf_map_get_curr_or_next(&info->map_id);
19 	if (!map)
20 		return NULL;
21 
22 	if (*pos == 0)
23 		++*pos;
24 	return map;
25 }
26 
bpf_map_seq_next(struct seq_file * seq,void * v,loff_t * pos)27 static void *bpf_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
28 {
29 	struct bpf_iter_seq_map_info *info = seq->private;
30 
31 	++*pos;
32 	++info->map_id;
33 	bpf_map_put((struct bpf_map *)v);
34 	return bpf_map_get_curr_or_next(&info->map_id);
35 }
36 
37 struct bpf_iter__bpf_map {
38 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
39 	__bpf_md_ptr(struct bpf_map *, map);
40 };
41 
DEFINE_BPF_ITER_FUNC(bpf_map,struct bpf_iter_meta * meta,struct bpf_map * map)42 DEFINE_BPF_ITER_FUNC(bpf_map, struct bpf_iter_meta *meta, struct bpf_map *map)
43 
44 static int __bpf_map_seq_show(struct seq_file *seq, void *v, bool in_stop)
45 {
46 	struct bpf_iter__bpf_map ctx;
47 	struct bpf_iter_meta meta;
48 	struct bpf_prog *prog;
49 	int ret = 0;
50 
51 	ctx.meta = &meta;
52 	ctx.map = v;
53 	meta.seq = seq;
54 	prog = bpf_iter_get_info(&meta, in_stop);
55 	if (prog)
56 		ret = bpf_iter_run_prog(prog, &ctx);
57 
58 	return ret;
59 }
60 
bpf_map_seq_show(struct seq_file * seq,void * v)61 static int bpf_map_seq_show(struct seq_file *seq, void *v)
62 {
63 	return __bpf_map_seq_show(seq, v, false);
64 }
65 
bpf_map_seq_stop(struct seq_file * seq,void * v)66 static void bpf_map_seq_stop(struct seq_file *seq, void *v)
67 {
68 	if (!v)
69 		(void)__bpf_map_seq_show(seq, v, true);
70 	else
71 		bpf_map_put((struct bpf_map *)v);
72 }
73 
74 static const struct seq_operations bpf_map_seq_ops = {
75 	.start	= bpf_map_seq_start,
76 	.next	= bpf_map_seq_next,
77 	.stop	= bpf_map_seq_stop,
78 	.show	= bpf_map_seq_show,
79 };
80 
81 BTF_ID_LIST_GLOBAL_SINGLE(btf_bpf_map_id, struct, bpf_map)
82 
83 static const struct bpf_iter_seq_info bpf_map_seq_info = {
84 	.seq_ops		= &bpf_map_seq_ops,
85 	.init_seq_private	= NULL,
86 	.fini_seq_private	= NULL,
87 	.seq_priv_size		= sizeof(struct bpf_iter_seq_map_info),
88 };
89 
90 static struct bpf_iter_reg bpf_map_reg_info = {
91 	.target			= "bpf_map",
92 	.ctx_arg_info_size	= 1,
93 	.ctx_arg_info		= {
94 		{ offsetof(struct bpf_iter__bpf_map, map),
95 		  PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED },
96 	},
97 	.seq_info		= &bpf_map_seq_info,
98 };
99 
bpf_iter_attach_map(struct bpf_prog * prog,union bpf_iter_link_info * linfo,struct bpf_iter_aux_info * aux)100 static int bpf_iter_attach_map(struct bpf_prog *prog,
101 			       union bpf_iter_link_info *linfo,
102 			       struct bpf_iter_aux_info *aux)
103 {
104 	u32 key_acc_size, value_acc_size, key_size, value_size;
105 	struct bpf_map *map;
106 	bool is_percpu = false;
107 	int err = -EINVAL;
108 
109 	if (!linfo->map.map_fd)
110 		return -EBADF;
111 
112 	map = bpf_map_get_with_uref(linfo->map.map_fd);
113 	if (IS_ERR(map))
114 		return PTR_ERR(map);
115 
116 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
117 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
118 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
119 		is_percpu = true;
120 	else if (map->map_type != BPF_MAP_TYPE_HASH &&
121 		 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
122 		 map->map_type != BPF_MAP_TYPE_ARRAY)
123 		goto put_map;
124 
125 	key_acc_size = prog->aux->max_rdonly_access;
126 	value_acc_size = prog->aux->max_rdwr_access;
127 	key_size = map->key_size;
128 	if (!is_percpu)
129 		value_size = map->value_size;
130 	else
131 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
132 
133 	if (key_acc_size > key_size || value_acc_size > value_size) {
134 		err = -EACCES;
135 		goto put_map;
136 	}
137 
138 	aux->map = map;
139 	return 0;
140 
141 put_map:
142 	bpf_map_put_with_uref(map);
143 	return err;
144 }
145 
bpf_iter_detach_map(struct bpf_iter_aux_info * aux)146 static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
147 {
148 	bpf_map_put_with_uref(aux->map);
149 }
150 
bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info * aux,struct seq_file * seq)151 void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
152 			      struct seq_file *seq)
153 {
154 	seq_printf(seq, "map_id:\t%u\n", aux->map->id);
155 }
156 
bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info * aux,struct bpf_link_info * info)157 int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
158 				struct bpf_link_info *info)
159 {
160 	info->iter.map.map_id = aux->map->id;
161 	return 0;
162 }
163 
164 DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta,
165 		     struct bpf_map *map, void *key, void *value)
166 
167 static const struct bpf_iter_reg bpf_map_elem_reg_info = {
168 	.target			= "bpf_map_elem",
169 	.attach_target		= bpf_iter_attach_map,
170 	.detach_target		= bpf_iter_detach_map,
171 	.show_fdinfo		= bpf_iter_map_show_fdinfo,
172 	.fill_link_info		= bpf_iter_map_fill_link_info,
173 	.ctx_arg_info_size	= 2,
174 	.ctx_arg_info		= {
175 		{ offsetof(struct bpf_iter__bpf_map_elem, key),
176 		  PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY },
177 		{ offsetof(struct bpf_iter__bpf_map_elem, value),
178 		  PTR_TO_BUF | PTR_MAYBE_NULL },
179 	},
180 };
181 
bpf_map_iter_init(void)182 static int __init bpf_map_iter_init(void)
183 {
184 	int ret;
185 
186 	bpf_map_reg_info.ctx_arg_info[0].btf_id = *btf_bpf_map_id;
187 	ret = bpf_iter_reg_target(&bpf_map_reg_info);
188 	if (ret)
189 		return ret;
190 
191 	return bpf_iter_reg_target(&bpf_map_elem_reg_info);
192 }
193 
194 late_initcall(bpf_map_iter_init);
195 
196 __bpf_kfunc_start_defs();
197 
bpf_map_sum_elem_count(const struct bpf_map * map)198 __bpf_kfunc s64 bpf_map_sum_elem_count(const struct bpf_map *map)
199 {
200 	s64 *pcount;
201 	s64 ret = 0;
202 	int cpu;
203 
204 	if (!map || !map->elem_count)
205 		return 0;
206 
207 	for_each_possible_cpu(cpu) {
208 		pcount = per_cpu_ptr(map->elem_count, cpu);
209 		ret += READ_ONCE(*pcount);
210 	}
211 	return ret;
212 }
213 
214 __bpf_kfunc_end_defs();
215 
216 BTF_KFUNCS_START(bpf_map_iter_kfunc_ids)
217 BTF_ID_FLAGS(func, bpf_map_sum_elem_count, KF_TRUSTED_ARGS)
218 BTF_KFUNCS_END(bpf_map_iter_kfunc_ids)
219 
220 static const struct btf_kfunc_id_set bpf_map_iter_kfunc_set = {
221 	.owner = THIS_MODULE,
222 	.set   = &bpf_map_iter_kfunc_ids,
223 };
224 
init_subsystem(void)225 static int init_subsystem(void)
226 {
227 	return register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_map_iter_kfunc_set);
228 }
229 late_initcall(init_subsystem);
230