1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3 
4 #include <linux/bpf.h>
5 #include <linux/bpf_verifier.h>
6 #include <linux/btf.h>
7 #include <linux/filter.h>
8 #include <linux/slab.h>
9 #include <linux/numa.h>
10 #include <linux/seq_file.h>
11 #include <linux/refcount.h>
12 #include <linux/mutex.h>
13 
14 enum bpf_struct_ops_state {
15 	BPF_STRUCT_OPS_STATE_INIT,
16 	BPF_STRUCT_OPS_STATE_INUSE,
17 	BPF_STRUCT_OPS_STATE_TOBEFREE,
18 };
19 
20 #define BPF_STRUCT_OPS_COMMON_VALUE			\
21 	refcount_t refcnt;				\
22 	enum bpf_struct_ops_state state
23 
24 struct bpf_struct_ops_value {
25 	BPF_STRUCT_OPS_COMMON_VALUE;
26 	char data[] ____cacheline_aligned_in_smp;
27 };
28 
29 struct bpf_struct_ops_map {
30 	struct bpf_map map;
31 	const struct bpf_struct_ops *st_ops;
32 	/* protect map_update */
33 	struct mutex lock;
34 	/* progs has all the bpf_prog that is populated
35 	 * to the func ptr of the kernel's struct
36 	 * (in kvalue.data).
37 	 */
38 	struct bpf_prog **progs;
39 	/* image is a page that has all the trampolines
40 	 * that stores the func args before calling the bpf_prog.
41 	 * A PAGE_SIZE "image" is enough to store all trampoline for
42 	 * "progs[]".
43 	 */
44 	void *image;
45 	/* uvalue->data stores the kernel struct
46 	 * (e.g. tcp_congestion_ops) that is more useful
47 	 * to userspace than the kvalue.  For example,
48 	 * the bpf_prog's id is stored instead of the kernel
49 	 * address of a func ptr.
50 	 */
51 	struct bpf_struct_ops_value *uvalue;
52 	/* kvalue.data stores the actual kernel's struct
53 	 * (e.g. tcp_congestion_ops) that will be
54 	 * registered to the kernel subsystem.
55 	 */
56 	struct bpf_struct_ops_value kvalue;
57 };
58 
59 #define VALUE_PREFIX "bpf_struct_ops_"
60 #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
61 
62 /* bpf_struct_ops_##_name (e.g. bpf_struct_ops_tcp_congestion_ops) is
63  * the map's value exposed to the userspace and its btf-type-id is
64  * stored at the map->btf_vmlinux_value_type_id.
65  *
66  */
67 #define BPF_STRUCT_OPS_TYPE(_name)				\
68 extern struct bpf_struct_ops bpf_##_name;			\
69 								\
70 struct bpf_struct_ops_##_name {						\
71 	BPF_STRUCT_OPS_COMMON_VALUE;				\
72 	struct _name data ____cacheline_aligned_in_smp;		\
73 };
74 #include "bpf_struct_ops_types.h"
75 #undef BPF_STRUCT_OPS_TYPE
76 
77 enum {
78 #define BPF_STRUCT_OPS_TYPE(_name) BPF_STRUCT_OPS_TYPE_##_name,
79 #include "bpf_struct_ops_types.h"
80 #undef BPF_STRUCT_OPS_TYPE
81 	__NR_BPF_STRUCT_OPS_TYPE,
82 };
83 
84 static struct bpf_struct_ops * const bpf_struct_ops[] = {
85 #define BPF_STRUCT_OPS_TYPE(_name)				\
86 	[BPF_STRUCT_OPS_TYPE_##_name] = &bpf_##_name,
87 #include "bpf_struct_ops_types.h"
88 #undef BPF_STRUCT_OPS_TYPE
89 };
90 
91 const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
92 };
93 
94 const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
95 };
96 
97 static const struct btf_type *module_type;
98 
bpf_struct_ops_init(struct btf * btf,struct bpf_verifier_log * log)99 void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log)
100 {
101 	s32 type_id, value_id, module_id;
102 	const struct btf_member *member;
103 	struct bpf_struct_ops *st_ops;
104 	const struct btf_type *t;
105 	char value_name[128];
106 	const char *mname;
107 	u32 i, j;
108 
109 	/* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */
110 #define BPF_STRUCT_OPS_TYPE(_name) BTF_TYPE_EMIT(struct bpf_struct_ops_##_name);
111 #include "bpf_struct_ops_types.h"
112 #undef BPF_STRUCT_OPS_TYPE
113 
114 	module_id = btf_find_by_name_kind(btf, "module", BTF_KIND_STRUCT);
115 	if (module_id < 0) {
116 		pr_warn("Cannot find struct module in btf_vmlinux\n");
117 		return;
118 	}
119 	module_type = btf_type_by_id(btf, module_id);
120 
121 	for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
122 		st_ops = bpf_struct_ops[i];
123 
124 		if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
125 		    sizeof(value_name)) {
126 			pr_warn("struct_ops name %s is too long\n",
127 				st_ops->name);
128 			continue;
129 		}
130 		sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
131 
132 		value_id = btf_find_by_name_kind(btf, value_name,
133 						 BTF_KIND_STRUCT);
134 		if (value_id < 0) {
135 			pr_warn("Cannot find struct %s in btf_vmlinux\n",
136 				value_name);
137 			continue;
138 		}
139 
140 		type_id = btf_find_by_name_kind(btf, st_ops->name,
141 						BTF_KIND_STRUCT);
142 		if (type_id < 0) {
143 			pr_warn("Cannot find struct %s in btf_vmlinux\n",
144 				st_ops->name);
145 			continue;
146 		}
147 		t = btf_type_by_id(btf, type_id);
148 		if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
149 			pr_warn("Cannot support #%u members in struct %s\n",
150 				btf_type_vlen(t), st_ops->name);
151 			continue;
152 		}
153 
154 		for_each_member(j, t, member) {
155 			const struct btf_type *func_proto;
156 
157 			mname = btf_name_by_offset(btf, member->name_off);
158 			if (!*mname) {
159 				pr_warn("anon member in struct %s is not supported\n",
160 					st_ops->name);
161 				break;
162 			}
163 
164 			if (btf_member_bitfield_size(t, member)) {
165 				pr_warn("bit field member %s in struct %s is not supported\n",
166 					mname, st_ops->name);
167 				break;
168 			}
169 
170 			func_proto = btf_type_resolve_func_ptr(btf,
171 							       member->type,
172 							       NULL);
173 			if (func_proto &&
174 			    btf_distill_func_proto(log, btf,
175 						   func_proto, mname,
176 						   &st_ops->func_models[j])) {
177 				pr_warn("Error in parsing func ptr %s in struct %s\n",
178 					mname, st_ops->name);
179 				break;
180 			}
181 		}
182 
183 		if (j == btf_type_vlen(t)) {
184 			if (st_ops->init(btf)) {
185 				pr_warn("Error in init bpf_struct_ops %s\n",
186 					st_ops->name);
187 			} else {
188 				st_ops->type_id = type_id;
189 				st_ops->type = t;
190 				st_ops->value_id = value_id;
191 				st_ops->value_type = btf_type_by_id(btf,
192 								    value_id);
193 			}
194 		}
195 	}
196 }
197 
198 extern struct btf *btf_vmlinux;
199 
200 static const struct bpf_struct_ops *
bpf_struct_ops_find_value(u32 value_id)201 bpf_struct_ops_find_value(u32 value_id)
202 {
203 	unsigned int i;
204 
205 	if (!value_id || !btf_vmlinux)
206 		return NULL;
207 
208 	for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
209 		if (bpf_struct_ops[i]->value_id == value_id)
210 			return bpf_struct_ops[i];
211 	}
212 
213 	return NULL;
214 }
215 
bpf_struct_ops_find(u32 type_id)216 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
217 {
218 	unsigned int i;
219 
220 	if (!type_id || !btf_vmlinux)
221 		return NULL;
222 
223 	for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
224 		if (bpf_struct_ops[i]->type_id == type_id)
225 			return bpf_struct_ops[i];
226 	}
227 
228 	return NULL;
229 }
230 
bpf_struct_ops_map_get_next_key(struct bpf_map * map,void * key,void * next_key)231 static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
232 					   void *next_key)
233 {
234 	if (key && *(u32 *)key == 0)
235 		return -ENOENT;
236 
237 	*(u32 *)next_key = 0;
238 	return 0;
239 }
240 
bpf_struct_ops_map_sys_lookup_elem(struct bpf_map * map,void * key,void * value)241 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
242 				       void *value)
243 {
244 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
245 	struct bpf_struct_ops_value *uvalue, *kvalue;
246 	enum bpf_struct_ops_state state;
247 
248 	if (unlikely(*(u32 *)key != 0))
249 		return -ENOENT;
250 
251 	kvalue = &st_map->kvalue;
252 	/* Pair with smp_store_release() during map_update */
253 	state = smp_load_acquire(&kvalue->state);
254 	if (state == BPF_STRUCT_OPS_STATE_INIT) {
255 		memset(value, 0, map->value_size);
256 		return 0;
257 	}
258 
259 	/* No lock is needed.  state and refcnt do not need
260 	 * to be updated together under atomic context.
261 	 */
262 	uvalue = (struct bpf_struct_ops_value *)value;
263 	memcpy(uvalue, st_map->uvalue, map->value_size);
264 	uvalue->state = state;
265 	refcount_set(&uvalue->refcnt, refcount_read(&kvalue->refcnt));
266 
267 	return 0;
268 }
269 
bpf_struct_ops_map_lookup_elem(struct bpf_map * map,void * key)270 static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
271 {
272 	return ERR_PTR(-EINVAL);
273 }
274 
bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map * st_map)275 static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
276 {
277 	const struct btf_type *t = st_map->st_ops->type;
278 	u32 i;
279 
280 	for (i = 0; i < btf_type_vlen(t); i++) {
281 		if (st_map->progs[i]) {
282 			bpf_prog_put(st_map->progs[i]);
283 			st_map->progs[i] = NULL;
284 		}
285 	}
286 }
287 
check_zero_holes(const struct btf_type * t,void * data)288 static int check_zero_holes(const struct btf_type *t, void *data)
289 {
290 	const struct btf_member *member;
291 	u32 i, moff, msize, prev_mend = 0;
292 	const struct btf_type *mtype;
293 
294 	for_each_member(i, t, member) {
295 		moff = btf_member_bit_offset(t, member) / 8;
296 		if (moff > prev_mend &&
297 		    memchr_inv(data + prev_mend, 0, moff - prev_mend))
298 			return -EINVAL;
299 
300 		mtype = btf_type_by_id(btf_vmlinux, member->type);
301 		mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
302 		if (IS_ERR(mtype))
303 			return PTR_ERR(mtype);
304 		prev_mend = moff + msize;
305 	}
306 
307 	if (t->size > prev_mend &&
308 	    memchr_inv(data + prev_mend, 0, t->size - prev_mend))
309 		return -EINVAL;
310 
311 	return 0;
312 }
313 
bpf_struct_ops_map_update_elem(struct bpf_map * map,void * key,void * value,u64 flags)314 static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
315 					  void *value, u64 flags)
316 {
317 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
318 	const struct bpf_struct_ops *st_ops = st_map->st_ops;
319 	struct bpf_struct_ops_value *uvalue, *kvalue;
320 	const struct btf_member *member;
321 	const struct btf_type *t = st_ops->type;
322 	struct bpf_tramp_progs *tprogs = NULL;
323 	void *udata, *kdata;
324 	int prog_fd, err = 0;
325 	void *image;
326 	u32 i;
327 
328 	if (flags)
329 		return -EINVAL;
330 
331 	if (*(u32 *)key != 0)
332 		return -E2BIG;
333 
334 	err = check_zero_holes(st_ops->value_type, value);
335 	if (err)
336 		return err;
337 
338 	uvalue = (struct bpf_struct_ops_value *)value;
339 	err = check_zero_holes(t, uvalue->data);
340 	if (err)
341 		return err;
342 
343 	if (uvalue->state || refcount_read(&uvalue->refcnt))
344 		return -EINVAL;
345 
346 	tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
347 	if (!tprogs)
348 		return -ENOMEM;
349 
350 	uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
351 	kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
352 
353 	mutex_lock(&st_map->lock);
354 
355 	if (kvalue->state != BPF_STRUCT_OPS_STATE_INIT) {
356 		err = -EBUSY;
357 		goto unlock;
358 	}
359 
360 	memcpy(uvalue, value, map->value_size);
361 
362 	udata = &uvalue->data;
363 	kdata = &kvalue->data;
364 	image = st_map->image;
365 
366 	for_each_member(i, t, member) {
367 		const struct btf_type *mtype, *ptype;
368 		struct bpf_prog *prog;
369 		u32 moff;
370 
371 		moff = btf_member_bit_offset(t, member) / 8;
372 		ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
373 		if (ptype == module_type) {
374 			if (*(void **)(udata + moff))
375 				goto reset_unlock;
376 			*(void **)(kdata + moff) = BPF_MODULE_OWNER;
377 			continue;
378 		}
379 
380 		err = st_ops->init_member(t, member, kdata, udata);
381 		if (err < 0)
382 			goto reset_unlock;
383 
384 		/* The ->init_member() has handled this member */
385 		if (err > 0)
386 			continue;
387 
388 		/* If st_ops->init_member does not handle it,
389 		 * we will only handle func ptrs and zero-ed members
390 		 * here.  Reject everything else.
391 		 */
392 
393 		/* All non func ptr member must be 0 */
394 		if (!ptype || !btf_type_is_func_proto(ptype)) {
395 			u32 msize;
396 
397 			mtype = btf_type_by_id(btf_vmlinux, member->type);
398 			mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
399 			if (IS_ERR(mtype)) {
400 				err = PTR_ERR(mtype);
401 				goto reset_unlock;
402 			}
403 
404 			if (memchr_inv(udata + moff, 0, msize)) {
405 				err = -EINVAL;
406 				goto reset_unlock;
407 			}
408 
409 			continue;
410 		}
411 
412 		prog_fd = (int)(*(unsigned long *)(udata + moff));
413 		/* Similar check as the attr->attach_prog_fd */
414 		if (!prog_fd)
415 			continue;
416 
417 		prog = bpf_prog_get(prog_fd);
418 		if (IS_ERR(prog)) {
419 			err = PTR_ERR(prog);
420 			goto reset_unlock;
421 		}
422 		st_map->progs[i] = prog;
423 
424 		if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
425 		    prog->aux->attach_btf_id != st_ops->type_id ||
426 		    prog->expected_attach_type != i) {
427 			err = -EINVAL;
428 			goto reset_unlock;
429 		}
430 
431 		tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
432 		tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
433 		err = arch_prepare_bpf_trampoline(NULL, image,
434 						  st_map->image + PAGE_SIZE,
435 						  &st_ops->func_models[i], 0,
436 						  tprogs, NULL);
437 		if (err < 0)
438 			goto reset_unlock;
439 
440 		*(void **)(kdata + moff) = image;
441 		image += err;
442 
443 		/* put prog_id to udata */
444 		*(unsigned long *)(udata + moff) = prog->aux->id;
445 	}
446 
447 	refcount_set(&kvalue->refcnt, 1);
448 	bpf_map_inc(map);
449 
450 	set_memory_ro((long)st_map->image, 1);
451 	set_memory_x((long)st_map->image, 1);
452 	err = st_ops->reg(kdata);
453 	if (likely(!err)) {
454 		/* Pair with smp_load_acquire() during lookup_elem().
455 		 * It ensures the above udata updates (e.g. prog->aux->id)
456 		 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
457 		 */
458 		smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_INUSE);
459 		goto unlock;
460 	}
461 
462 	/* Error during st_ops->reg().  It is very unlikely since
463 	 * the above init_member() should have caught it earlier
464 	 * before reg().  The only possibility is if there was a race
465 	 * in registering the struct_ops (under the same name) to
466 	 * a sub-system through different struct_ops's maps.
467 	 */
468 	set_memory_nx((long)st_map->image, 1);
469 	set_memory_rw((long)st_map->image, 1);
470 	bpf_map_put(map);
471 
472 reset_unlock:
473 	bpf_struct_ops_map_put_progs(st_map);
474 	memset(uvalue, 0, map->value_size);
475 	memset(kvalue, 0, map->value_size);
476 unlock:
477 	kfree(tprogs);
478 	mutex_unlock(&st_map->lock);
479 	return err;
480 }
481 
bpf_struct_ops_map_delete_elem(struct bpf_map * map,void * key)482 static int bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
483 {
484 	enum bpf_struct_ops_state prev_state;
485 	struct bpf_struct_ops_map *st_map;
486 
487 	st_map = (struct bpf_struct_ops_map *)map;
488 	prev_state = cmpxchg(&st_map->kvalue.state,
489 			     BPF_STRUCT_OPS_STATE_INUSE,
490 			     BPF_STRUCT_OPS_STATE_TOBEFREE);
491 	switch (prev_state) {
492 	case BPF_STRUCT_OPS_STATE_INUSE:
493 		st_map->st_ops->unreg(&st_map->kvalue.data);
494 		if (refcount_dec_and_test(&st_map->kvalue.refcnt))
495 			bpf_map_put(map);
496 		return 0;
497 	case BPF_STRUCT_OPS_STATE_TOBEFREE:
498 		return -EINPROGRESS;
499 	case BPF_STRUCT_OPS_STATE_INIT:
500 		return -ENOENT;
501 	default:
502 		WARN_ON_ONCE(1);
503 		/* Should never happen.  Treat it as not found. */
504 		return -ENOENT;
505 	}
506 }
507 
bpf_struct_ops_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)508 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
509 					     struct seq_file *m)
510 {
511 	void *value;
512 	int err;
513 
514 	value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
515 	if (!value)
516 		return;
517 
518 	err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
519 	if (!err) {
520 		btf_type_seq_show(btf_vmlinux, map->btf_vmlinux_value_type_id,
521 				  value, m);
522 		seq_puts(m, "\n");
523 	}
524 
525 	kfree(value);
526 }
527 
bpf_struct_ops_map_free(struct bpf_map * map)528 static void bpf_struct_ops_map_free(struct bpf_map *map)
529 {
530 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
531 
532 	if (st_map->progs)
533 		bpf_struct_ops_map_put_progs(st_map);
534 	bpf_map_area_free(st_map->progs);
535 	bpf_jit_free_exec(st_map->image);
536 	bpf_map_area_free(st_map->uvalue);
537 	bpf_map_area_free(st_map);
538 }
539 
bpf_struct_ops_map_alloc_check(union bpf_attr * attr)540 static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
541 {
542 	if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
543 	    attr->map_flags || !attr->btf_vmlinux_value_type_id)
544 		return -EINVAL;
545 	return 0;
546 }
547 
bpf_struct_ops_map_alloc(union bpf_attr * attr)548 static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
549 {
550 	const struct bpf_struct_ops *st_ops;
551 	size_t st_map_size;
552 	struct bpf_struct_ops_map *st_map;
553 	const struct btf_type *t, *vt;
554 	struct bpf_map *map;
555 
556 	if (!bpf_capable())
557 		return ERR_PTR(-EPERM);
558 
559 	st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
560 	if (!st_ops)
561 		return ERR_PTR(-ENOTSUPP);
562 
563 	vt = st_ops->value_type;
564 	if (attr->value_size != vt->size)
565 		return ERR_PTR(-EINVAL);
566 
567 	t = st_ops->type;
568 
569 	st_map_size = sizeof(*st_map) +
570 		/* kvalue stores the
571 		 * struct bpf_struct_ops_tcp_congestions_ops
572 		 */
573 		(vt->size - sizeof(struct bpf_struct_ops_value));
574 
575 	st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
576 	if (!st_map)
577 		return ERR_PTR(-ENOMEM);
578 
579 	st_map->st_ops = st_ops;
580 	map = &st_map->map;
581 
582 	st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
583 	st_map->progs =
584 		bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_prog *),
585 				   NUMA_NO_NODE);
586 	st_map->image = bpf_jit_alloc_exec(PAGE_SIZE);
587 	if (!st_map->uvalue || !st_map->progs || !st_map->image) {
588 		bpf_struct_ops_map_free(map);
589 		return ERR_PTR(-ENOMEM);
590 	}
591 
592 	mutex_init(&st_map->lock);
593 	set_vm_flush_reset_perms(st_map->image);
594 	bpf_map_init_from_attr(map, attr);
595 
596 	return map;
597 }
598 
599 static int bpf_struct_ops_map_btf_id;
600 const struct bpf_map_ops bpf_struct_ops_map_ops = {
601 	.map_alloc_check = bpf_struct_ops_map_alloc_check,
602 	.map_alloc = bpf_struct_ops_map_alloc,
603 	.map_free = bpf_struct_ops_map_free,
604 	.map_get_next_key = bpf_struct_ops_map_get_next_key,
605 	.map_lookup_elem = bpf_struct_ops_map_lookup_elem,
606 	.map_delete_elem = bpf_struct_ops_map_delete_elem,
607 	.map_update_elem = bpf_struct_ops_map_update_elem,
608 	.map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
609 	.map_btf_name = "bpf_struct_ops_map",
610 	.map_btf_id = &bpf_struct_ops_map_btf_id,
611 };
612 
613 /* "const void *" because some subsystem is
614  * passing a const (e.g. const struct tcp_congestion_ops *)
615  */
bpf_struct_ops_get(const void * kdata)616 bool bpf_struct_ops_get(const void *kdata)
617 {
618 	struct bpf_struct_ops_value *kvalue;
619 
620 	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
621 
622 	return refcount_inc_not_zero(&kvalue->refcnt);
623 }
624 
bpf_struct_ops_put(const void * kdata)625 void bpf_struct_ops_put(const void *kdata)
626 {
627 	struct bpf_struct_ops_value *kvalue;
628 
629 	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
630 	if (refcount_dec_and_test(&kvalue->refcnt)) {
631 		struct bpf_struct_ops_map *st_map;
632 
633 		st_map = container_of(kvalue, struct bpf_struct_ops_map,
634 				      kvalue);
635 		bpf_map_put(&st_map->map);
636 	}
637 }
638