xref: /linux/kernel/bpf/bpf_local_storage.c (revision 0be3ff0c)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook  */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
8 #include <linux/bpf.h>
9 #include <linux/btf_ids.h>
10 #include <linux/bpf_local_storage.h>
11 #include <net/sock.h>
12 #include <uapi/linux/sock_diag.h>
13 #include <uapi/linux/btf.h>
14 #include <linux/rcupdate.h>
15 #include <linux/rcupdate_trace.h>
16 #include <linux/rcupdate_wait.h>
17 
18 #define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
19 
20 static struct bpf_local_storage_map_bucket *
21 select_bucket(struct bpf_local_storage_map *smap,
22 	      struct bpf_local_storage_elem *selem)
23 {
24 	return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
25 }
26 
27 static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
28 {
29 	struct bpf_map *map = &smap->map;
30 
31 	if (!map->ops->map_local_storage_charge)
32 		return 0;
33 
34 	return map->ops->map_local_storage_charge(smap, owner, size);
35 }
36 
37 static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
38 			 u32 size)
39 {
40 	struct bpf_map *map = &smap->map;
41 
42 	if (map->ops->map_local_storage_uncharge)
43 		map->ops->map_local_storage_uncharge(smap, owner, size);
44 }
45 
46 static struct bpf_local_storage __rcu **
47 owner_storage(struct bpf_local_storage_map *smap, void *owner)
48 {
49 	struct bpf_map *map = &smap->map;
50 
51 	return map->ops->map_owner_storage_ptr(owner);
52 }
53 
54 static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
55 {
56 	return !hlist_unhashed(&selem->snode);
57 }
58 
59 static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
60 {
61 	return !hlist_unhashed(&selem->map_node);
62 }
63 
64 struct bpf_local_storage_elem *
65 bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
66 		void *value, bool charge_mem, gfp_t gfp_flags)
67 {
68 	struct bpf_local_storage_elem *selem;
69 
70 	if (charge_mem && mem_charge(smap, owner, smap->elem_size))
71 		return NULL;
72 
73 	selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
74 				gfp_flags | __GFP_NOWARN);
75 	if (selem) {
76 		if (value)
77 			memcpy(SDATA(selem)->data, value, smap->map.value_size);
78 		return selem;
79 	}
80 
81 	if (charge_mem)
82 		mem_uncharge(smap, owner, smap->elem_size);
83 
84 	return NULL;
85 }
86 
87 void bpf_local_storage_free_rcu(struct rcu_head *rcu)
88 {
89 	struct bpf_local_storage *local_storage;
90 
91 	local_storage = container_of(rcu, struct bpf_local_storage, rcu);
92 	kfree_rcu(local_storage, rcu);
93 }
94 
95 static void bpf_selem_free_rcu(struct rcu_head *rcu)
96 {
97 	struct bpf_local_storage_elem *selem;
98 
99 	selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
100 	kfree_rcu(selem, rcu);
101 }
102 
103 /* local_storage->lock must be held and selem->local_storage == local_storage.
104  * The caller must ensure selem->smap is still valid to be
105  * dereferenced for its smap->elem_size and smap->cache_idx.
106  */
107 bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
108 				     struct bpf_local_storage_elem *selem,
109 				     bool uncharge_mem)
110 {
111 	struct bpf_local_storage_map *smap;
112 	bool free_local_storage;
113 	void *owner;
114 
115 	smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
116 	owner = local_storage->owner;
117 
118 	/* All uncharging on the owner must be done first.
119 	 * The owner may be freed once the last selem is unlinked
120 	 * from local_storage.
121 	 */
122 	if (uncharge_mem)
123 		mem_uncharge(smap, owner, smap->elem_size);
124 
125 	free_local_storage = hlist_is_singular_node(&selem->snode,
126 						    &local_storage->list);
127 	if (free_local_storage) {
128 		mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
129 		local_storage->owner = NULL;
130 
131 		/* After this RCU_INIT, owner may be freed and cannot be used */
132 		RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
133 
134 		/* local_storage is not freed now.  local_storage->lock is
135 		 * still held and raw_spin_unlock_bh(&local_storage->lock)
136 		 * will be done by the caller.
137 		 *
138 		 * Although the unlock will be done under
139 		 * rcu_read_lock(),  it is more intuitive to
140 		 * read if the freeing of the storage is done
141 		 * after the raw_spin_unlock_bh(&local_storage->lock).
142 		 *
143 		 * Hence, a "bool free_local_storage" is returned
144 		 * to the caller which then calls then frees the storage after
145 		 * all the RCU grace periods have expired.
146 		 */
147 	}
148 	hlist_del_init_rcu(&selem->snode);
149 	if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
150 	    SDATA(selem))
151 		RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
152 
153 	call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu);
154 	return free_local_storage;
155 }
156 
157 static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem)
158 {
159 	struct bpf_local_storage *local_storage;
160 	bool free_local_storage = false;
161 	unsigned long flags;
162 
163 	if (unlikely(!selem_linked_to_storage(selem)))
164 		/* selem has already been unlinked from sk */
165 		return;
166 
167 	local_storage = rcu_dereference_check(selem->local_storage,
168 					      bpf_rcu_lock_held());
169 	raw_spin_lock_irqsave(&local_storage->lock, flags);
170 	if (likely(selem_linked_to_storage(selem)))
171 		free_local_storage = bpf_selem_unlink_storage_nolock(
172 			local_storage, selem, true);
173 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
174 
175 	if (free_local_storage)
176 		call_rcu_tasks_trace(&local_storage->rcu,
177 				     bpf_local_storage_free_rcu);
178 }
179 
180 void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
181 				   struct bpf_local_storage_elem *selem)
182 {
183 	RCU_INIT_POINTER(selem->local_storage, local_storage);
184 	hlist_add_head_rcu(&selem->snode, &local_storage->list);
185 }
186 
187 void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
188 {
189 	struct bpf_local_storage_map *smap;
190 	struct bpf_local_storage_map_bucket *b;
191 	unsigned long flags;
192 
193 	if (unlikely(!selem_linked_to_map(selem)))
194 		/* selem has already be unlinked from smap */
195 		return;
196 
197 	smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
198 	b = select_bucket(smap, selem);
199 	raw_spin_lock_irqsave(&b->lock, flags);
200 	if (likely(selem_linked_to_map(selem)))
201 		hlist_del_init_rcu(&selem->map_node);
202 	raw_spin_unlock_irqrestore(&b->lock, flags);
203 }
204 
205 void bpf_selem_link_map(struct bpf_local_storage_map *smap,
206 			struct bpf_local_storage_elem *selem)
207 {
208 	struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
209 	unsigned long flags;
210 
211 	raw_spin_lock_irqsave(&b->lock, flags);
212 	RCU_INIT_POINTER(SDATA(selem)->smap, smap);
213 	hlist_add_head_rcu(&selem->map_node, &b->list);
214 	raw_spin_unlock_irqrestore(&b->lock, flags);
215 }
216 
217 void bpf_selem_unlink(struct bpf_local_storage_elem *selem)
218 {
219 	/* Always unlink from map before unlinking from local_storage
220 	 * because selem will be freed after successfully unlinked from
221 	 * the local_storage.
222 	 */
223 	bpf_selem_unlink_map(selem);
224 	__bpf_selem_unlink_storage(selem);
225 }
226 
227 struct bpf_local_storage_data *
228 bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
229 			 struct bpf_local_storage_map *smap,
230 			 bool cacheit_lockit)
231 {
232 	struct bpf_local_storage_data *sdata;
233 	struct bpf_local_storage_elem *selem;
234 
235 	/* Fast path (cache hit) */
236 	sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
237 				      bpf_rcu_lock_held());
238 	if (sdata && rcu_access_pointer(sdata->smap) == smap)
239 		return sdata;
240 
241 	/* Slow path (cache miss) */
242 	hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
243 				  rcu_read_lock_trace_held())
244 		if (rcu_access_pointer(SDATA(selem)->smap) == smap)
245 			break;
246 
247 	if (!selem)
248 		return NULL;
249 
250 	sdata = SDATA(selem);
251 	if (cacheit_lockit) {
252 		unsigned long flags;
253 
254 		/* spinlock is needed to avoid racing with the
255 		 * parallel delete.  Otherwise, publishing an already
256 		 * deleted sdata to the cache will become a use-after-free
257 		 * problem in the next bpf_local_storage_lookup().
258 		 */
259 		raw_spin_lock_irqsave(&local_storage->lock, flags);
260 		if (selem_linked_to_storage(selem))
261 			rcu_assign_pointer(local_storage->cache[smap->cache_idx],
262 					   sdata);
263 		raw_spin_unlock_irqrestore(&local_storage->lock, flags);
264 	}
265 
266 	return sdata;
267 }
268 
269 static int check_flags(const struct bpf_local_storage_data *old_sdata,
270 		       u64 map_flags)
271 {
272 	if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
273 		/* elem already exists */
274 		return -EEXIST;
275 
276 	if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
277 		/* elem doesn't exist, cannot update it */
278 		return -ENOENT;
279 
280 	return 0;
281 }
282 
283 int bpf_local_storage_alloc(void *owner,
284 			    struct bpf_local_storage_map *smap,
285 			    struct bpf_local_storage_elem *first_selem,
286 			    gfp_t gfp_flags)
287 {
288 	struct bpf_local_storage *prev_storage, *storage;
289 	struct bpf_local_storage **owner_storage_ptr;
290 	int err;
291 
292 	err = mem_charge(smap, owner, sizeof(*storage));
293 	if (err)
294 		return err;
295 
296 	storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
297 				  gfp_flags | __GFP_NOWARN);
298 	if (!storage) {
299 		err = -ENOMEM;
300 		goto uncharge;
301 	}
302 
303 	INIT_HLIST_HEAD(&storage->list);
304 	raw_spin_lock_init(&storage->lock);
305 	storage->owner = owner;
306 
307 	bpf_selem_link_storage_nolock(storage, first_selem);
308 	bpf_selem_link_map(smap, first_selem);
309 
310 	owner_storage_ptr =
311 		(struct bpf_local_storage **)owner_storage(smap, owner);
312 	/* Publish storage to the owner.
313 	 * Instead of using any lock of the kernel object (i.e. owner),
314 	 * cmpxchg will work with any kernel object regardless what
315 	 * the running context is, bh, irq...etc.
316 	 *
317 	 * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
318 	 * is protected by the storage->lock.  Hence, when freeing
319 	 * the owner->storage, the storage->lock must be held before
320 	 * setting owner->storage ptr to NULL.
321 	 */
322 	prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
323 	if (unlikely(prev_storage)) {
324 		bpf_selem_unlink_map(first_selem);
325 		err = -EAGAIN;
326 		goto uncharge;
327 
328 		/* Note that even first_selem was linked to smap's
329 		 * bucket->list, first_selem can be freed immediately
330 		 * (instead of kfree_rcu) because
331 		 * bpf_local_storage_map_free() does a
332 		 * synchronize_rcu_mult (waiting for both sleepable and
333 		 * normal programs) before walking the bucket->list.
334 		 * Hence, no one is accessing selem from the
335 		 * bucket->list under rcu_read_lock().
336 		 */
337 	}
338 
339 	return 0;
340 
341 uncharge:
342 	kfree(storage);
343 	mem_uncharge(smap, owner, sizeof(*storage));
344 	return err;
345 }
346 
347 /* sk cannot be going away because it is linking new elem
348  * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
349  * Otherwise, it will become a leak (and other memory issues
350  * during map destruction).
351  */
352 struct bpf_local_storage_data *
353 bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
354 			 void *value, u64 map_flags, gfp_t gfp_flags)
355 {
356 	struct bpf_local_storage_data *old_sdata = NULL;
357 	struct bpf_local_storage_elem *selem = NULL;
358 	struct bpf_local_storage *local_storage;
359 	unsigned long flags;
360 	int err;
361 
362 	/* BPF_EXIST and BPF_NOEXIST cannot be both set */
363 	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
364 	    /* BPF_F_LOCK can only be used in a value with spin_lock */
365 	    unlikely((map_flags & BPF_F_LOCK) &&
366 		     !map_value_has_spin_lock(&smap->map)))
367 		return ERR_PTR(-EINVAL);
368 
369 	if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST)
370 		return ERR_PTR(-EINVAL);
371 
372 	local_storage = rcu_dereference_check(*owner_storage(smap, owner),
373 					      bpf_rcu_lock_held());
374 	if (!local_storage || hlist_empty(&local_storage->list)) {
375 		/* Very first elem for the owner */
376 		err = check_flags(NULL, map_flags);
377 		if (err)
378 			return ERR_PTR(err);
379 
380 		selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
381 		if (!selem)
382 			return ERR_PTR(-ENOMEM);
383 
384 		err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
385 		if (err) {
386 			kfree(selem);
387 			mem_uncharge(smap, owner, smap->elem_size);
388 			return ERR_PTR(err);
389 		}
390 
391 		return SDATA(selem);
392 	}
393 
394 	if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
395 		/* Hoping to find an old_sdata to do inline update
396 		 * such that it can avoid taking the local_storage->lock
397 		 * and changing the lists.
398 		 */
399 		old_sdata =
400 			bpf_local_storage_lookup(local_storage, smap, false);
401 		err = check_flags(old_sdata, map_flags);
402 		if (err)
403 			return ERR_PTR(err);
404 		if (old_sdata && selem_linked_to_storage(SELEM(old_sdata))) {
405 			copy_map_value_locked(&smap->map, old_sdata->data,
406 					      value, false);
407 			return old_sdata;
408 		}
409 	}
410 
411 	if (gfp_flags == GFP_KERNEL) {
412 		selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
413 		if (!selem)
414 			return ERR_PTR(-ENOMEM);
415 	}
416 
417 	raw_spin_lock_irqsave(&local_storage->lock, flags);
418 
419 	/* Recheck local_storage->list under local_storage->lock */
420 	if (unlikely(hlist_empty(&local_storage->list))) {
421 		/* A parallel del is happening and local_storage is going
422 		 * away.  It has just been checked before, so very
423 		 * unlikely.  Return instead of retry to keep things
424 		 * simple.
425 		 */
426 		err = -EAGAIN;
427 		goto unlock_err;
428 	}
429 
430 	old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
431 	err = check_flags(old_sdata, map_flags);
432 	if (err)
433 		goto unlock_err;
434 
435 	if (old_sdata && (map_flags & BPF_F_LOCK)) {
436 		copy_map_value_locked(&smap->map, old_sdata->data, value,
437 				      false);
438 		selem = SELEM(old_sdata);
439 		goto unlock;
440 	}
441 
442 	if (gfp_flags != GFP_KERNEL) {
443 		/* local_storage->lock is held.  Hence, we are sure
444 		 * we can unlink and uncharge the old_sdata successfully
445 		 * later.  Hence, instead of charging the new selem now
446 		 * and then uncharge the old selem later (which may cause
447 		 * a potential but unnecessary charge failure),  avoid taking
448 		 * a charge at all here (the "!old_sdata" check) and the
449 		 * old_sdata will not be uncharged later during
450 		 * bpf_selem_unlink_storage_nolock().
451 		 */
452 		selem = bpf_selem_alloc(smap, owner, value, !old_sdata, gfp_flags);
453 		if (!selem) {
454 			err = -ENOMEM;
455 			goto unlock_err;
456 		}
457 	}
458 
459 	/* First, link the new selem to the map */
460 	bpf_selem_link_map(smap, selem);
461 
462 	/* Second, link (and publish) the new selem to local_storage */
463 	bpf_selem_link_storage_nolock(local_storage, selem);
464 
465 	/* Third, remove old selem, SELEM(old_sdata) */
466 	if (old_sdata) {
467 		bpf_selem_unlink_map(SELEM(old_sdata));
468 		bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
469 						false);
470 	}
471 
472 unlock:
473 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
474 	return SDATA(selem);
475 
476 unlock_err:
477 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
478 	if (selem) {
479 		mem_uncharge(smap, owner, smap->elem_size);
480 		kfree(selem);
481 	}
482 	return ERR_PTR(err);
483 }
484 
485 u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
486 {
487 	u64 min_usage = U64_MAX;
488 	u16 i, res = 0;
489 
490 	spin_lock(&cache->idx_lock);
491 
492 	for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
493 		if (cache->idx_usage_counts[i] < min_usage) {
494 			min_usage = cache->idx_usage_counts[i];
495 			res = i;
496 
497 			/* Found a free cache_idx */
498 			if (!min_usage)
499 				break;
500 		}
501 	}
502 	cache->idx_usage_counts[res]++;
503 
504 	spin_unlock(&cache->idx_lock);
505 
506 	return res;
507 }
508 
509 void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
510 				      u16 idx)
511 {
512 	spin_lock(&cache->idx_lock);
513 	cache->idx_usage_counts[idx]--;
514 	spin_unlock(&cache->idx_lock);
515 }
516 
517 void bpf_local_storage_map_free(struct bpf_local_storage_map *smap,
518 				int __percpu *busy_counter)
519 {
520 	struct bpf_local_storage_elem *selem;
521 	struct bpf_local_storage_map_bucket *b;
522 	unsigned int i;
523 
524 	/* Note that this map might be concurrently cloned from
525 	 * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
526 	 * RCU read section to finish before proceeding. New RCU
527 	 * read sections should be prevented via bpf_map_inc_not_zero.
528 	 */
529 	synchronize_rcu();
530 
531 	/* bpf prog and the userspace can no longer access this map
532 	 * now.  No new selem (of this map) can be added
533 	 * to the owner->storage or to the map bucket's list.
534 	 *
535 	 * The elem of this map can be cleaned up here
536 	 * or when the storage is freed e.g.
537 	 * by bpf_sk_storage_free() during __sk_destruct().
538 	 */
539 	for (i = 0; i < (1U << smap->bucket_log); i++) {
540 		b = &smap->buckets[i];
541 
542 		rcu_read_lock();
543 		/* No one is adding to b->list now */
544 		while ((selem = hlist_entry_safe(
545 				rcu_dereference_raw(hlist_first_rcu(&b->list)),
546 				struct bpf_local_storage_elem, map_node))) {
547 			if (busy_counter) {
548 				migrate_disable();
549 				__this_cpu_inc(*busy_counter);
550 			}
551 			bpf_selem_unlink(selem);
552 			if (busy_counter) {
553 				__this_cpu_dec(*busy_counter);
554 				migrate_enable();
555 			}
556 			cond_resched_rcu();
557 		}
558 		rcu_read_unlock();
559 	}
560 
561 	/* While freeing the storage we may still need to access the map.
562 	 *
563 	 * e.g. when bpf_sk_storage_free() has unlinked selem from the map
564 	 * which then made the above while((selem = ...)) loop
565 	 * exit immediately.
566 	 *
567 	 * However, while freeing the storage one still needs to access the
568 	 * smap->elem_size to do the uncharging in
569 	 * bpf_selem_unlink_storage_nolock().
570 	 *
571 	 * Hence, wait another rcu grace period for the storage to be freed.
572 	 */
573 	synchronize_rcu();
574 
575 	kvfree(smap->buckets);
576 	kfree(smap);
577 }
578 
579 int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
580 {
581 	if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
582 	    !(attr->map_flags & BPF_F_NO_PREALLOC) ||
583 	    attr->max_entries ||
584 	    attr->key_size != sizeof(int) || !attr->value_size ||
585 	    /* Enforce BTF for userspace sk dumping */
586 	    !attr->btf_key_type_id || !attr->btf_value_type_id)
587 		return -EINVAL;
588 
589 	if (!bpf_capable())
590 		return -EPERM;
591 
592 	if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
593 		return -E2BIG;
594 
595 	return 0;
596 }
597 
598 struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr)
599 {
600 	struct bpf_local_storage_map *smap;
601 	unsigned int i;
602 	u32 nbuckets;
603 
604 	smap = kzalloc(sizeof(*smap), GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT);
605 	if (!smap)
606 		return ERR_PTR(-ENOMEM);
607 	bpf_map_init_from_attr(&smap->map, attr);
608 
609 	nbuckets = roundup_pow_of_two(num_possible_cpus());
610 	/* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
611 	nbuckets = max_t(u32, 2, nbuckets);
612 	smap->bucket_log = ilog2(nbuckets);
613 
614 	smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
615 				 GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT);
616 	if (!smap->buckets) {
617 		kfree(smap);
618 		return ERR_PTR(-ENOMEM);
619 	}
620 
621 	for (i = 0; i < nbuckets; i++) {
622 		INIT_HLIST_HEAD(&smap->buckets[i].list);
623 		raw_spin_lock_init(&smap->buckets[i].lock);
624 	}
625 
626 	smap->elem_size =
627 		sizeof(struct bpf_local_storage_elem) + attr->value_size;
628 
629 	return smap;
630 }
631 
632 int bpf_local_storage_map_check_btf(const struct bpf_map *map,
633 				    const struct btf *btf,
634 				    const struct btf_type *key_type,
635 				    const struct btf_type *value_type)
636 {
637 	u32 int_data;
638 
639 	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
640 		return -EINVAL;
641 
642 	int_data = *(u32 *)(key_type + 1);
643 	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
644 		return -EINVAL;
645 
646 	return 0;
647 }
648