1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (c) 2019 Facebook
4 * Copyright 2020 Google LLC.
5 */
6
7 #ifndef _BPF_LOCAL_STORAGE_H
8 #define _BPF_LOCAL_STORAGE_H
9
10 #include <linux/bpf.h>
11 #include <linux/filter.h>
12 #include <linux/rculist.h>
13 #include <linux/list.h>
14 #include <linux/hash.h>
15 #include <linux/types.h>
16 #include <linux/bpf_mem_alloc.h>
17 #include <uapi/linux/btf.h>
18
19 #define BPF_LOCAL_STORAGE_CACHE_SIZE 16
20
21 #define bpf_rcu_lock_held() \
22 (rcu_read_lock_held() || rcu_read_lock_trace_held() || \
23 rcu_read_lock_bh_held())
24 struct bpf_local_storage_map_bucket {
25 struct hlist_head list;
26 raw_spinlock_t lock;
27 };
28
29 /* Thp map is not the primary owner of a bpf_local_storage_elem.
30 * Instead, the container object (eg. sk->sk_bpf_storage) is.
31 *
32 * The map (bpf_local_storage_map) is for two purposes
33 * 1. Define the size of the "local storage". It is
34 * the map's value_size.
35 *
36 * 2. Maintain a list to keep track of all elems such
37 * that they can be cleaned up during the map destruction.
38 *
39 * When a bpf local storage is being looked up for a
40 * particular object, the "bpf_map" pointer is actually used
41 * as the "key" to search in the list of elem in
42 * the respective bpf_local_storage owned by the object.
43 *
44 * e.g. sk->sk_bpf_storage is the mini-map with the "bpf_map" pointer
45 * as the searching key.
46 */
47 struct bpf_local_storage_map {
48 struct bpf_map map;
49 /* Lookup elem does not require accessing the map.
50 *
51 * Updating/Deleting requires a bucket lock to
52 * link/unlink the elem from the map. Having
53 * multiple buckets to improve contention.
54 */
55 struct bpf_local_storage_map_bucket *buckets;
56 u32 bucket_log;
57 u16 elem_size;
58 u16 cache_idx;
59 struct bpf_mem_alloc selem_ma;
60 struct bpf_mem_alloc storage_ma;
61 bool bpf_ma;
62 };
63
64 struct bpf_local_storage_data {
65 /* smap is used as the searching key when looking up
66 * from the object's bpf_local_storage.
67 *
68 * Put it in the same cacheline as the data to minimize
69 * the number of cachelines accessed during the cache hit case.
70 */
71 struct bpf_local_storage_map __rcu *smap;
72 u8 data[] __aligned(8);
73 };
74
75 /* Linked to bpf_local_storage and bpf_local_storage_map */
76 struct bpf_local_storage_elem {
77 struct hlist_node map_node; /* Linked to bpf_local_storage_map */
78 struct hlist_node snode; /* Linked to bpf_local_storage */
79 struct bpf_local_storage __rcu *local_storage;
80 struct rcu_head rcu;
81 /* 8 bytes hole */
82 /* The data is stored in another cacheline to minimize
83 * the number of cachelines access during a cache hit.
84 */
85 struct bpf_local_storage_data sdata ____cacheline_aligned;
86 };
87
88 struct bpf_local_storage {
89 struct bpf_local_storage_data __rcu *cache[BPF_LOCAL_STORAGE_CACHE_SIZE];
90 struct bpf_local_storage_map __rcu *smap;
91 struct hlist_head list; /* List of bpf_local_storage_elem */
92 void *owner; /* The object that owns the above "list" of
93 * bpf_local_storage_elem.
94 */
95 struct rcu_head rcu;
96 raw_spinlock_t lock; /* Protect adding/removing from the "list" */
97 };
98
99 /* U16_MAX is much more than enough for sk local storage
100 * considering a tcp_sock is ~2k.
101 */
102 #define BPF_LOCAL_STORAGE_MAX_VALUE_SIZE \
103 min_t(u32, \
104 (KMALLOC_MAX_SIZE - MAX_BPF_STACK - \
105 sizeof(struct bpf_local_storage_elem)), \
106 (U16_MAX - sizeof(struct bpf_local_storage_elem)))
107
108 #define SELEM(_SDATA) \
109 container_of((_SDATA), struct bpf_local_storage_elem, sdata)
110 #define SDATA(_SELEM) (&(_SELEM)->sdata)
111
112 #define BPF_LOCAL_STORAGE_CACHE_SIZE 16
113
114 struct bpf_local_storage_cache {
115 spinlock_t idx_lock;
116 u64 idx_usage_counts[BPF_LOCAL_STORAGE_CACHE_SIZE];
117 };
118
119 #define DEFINE_BPF_STORAGE_CACHE(name) \
120 static struct bpf_local_storage_cache name = { \
121 .idx_lock = __SPIN_LOCK_UNLOCKED(name.idx_lock), \
122 }
123
124 /* Helper functions for bpf_local_storage */
125 int bpf_local_storage_map_alloc_check(union bpf_attr *attr);
126
127 struct bpf_map *
128 bpf_local_storage_map_alloc(union bpf_attr *attr,
129 struct bpf_local_storage_cache *cache,
130 bool bpf_ma);
131
132 void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage,
133 struct bpf_local_storage_map *smap,
134 struct bpf_local_storage_elem *selem);
135 /* If cacheit_lockit is false, this lookup function is lockless */
136 static inline struct bpf_local_storage_data *
bpf_local_storage_lookup(struct bpf_local_storage * local_storage,struct bpf_local_storage_map * smap,bool cacheit_lockit)137 bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
138 struct bpf_local_storage_map *smap,
139 bool cacheit_lockit)
140 {
141 struct bpf_local_storage_data *sdata;
142 struct bpf_local_storage_elem *selem;
143
144 /* Fast path (cache hit) */
145 sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
146 bpf_rcu_lock_held());
147 if (sdata && rcu_access_pointer(sdata->smap) == smap)
148 return sdata;
149
150 /* Slow path (cache miss) */
151 hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
152 rcu_read_lock_trace_held())
153 if (rcu_access_pointer(SDATA(selem)->smap) == smap)
154 break;
155
156 if (!selem)
157 return NULL;
158 if (cacheit_lockit)
159 __bpf_local_storage_insert_cache(local_storage, smap, selem);
160 return SDATA(selem);
161 }
162
163 void bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
164
165 void bpf_local_storage_map_free(struct bpf_map *map,
166 struct bpf_local_storage_cache *cache,
167 int __percpu *busy_counter);
168
169 int bpf_local_storage_map_check_btf(const struct bpf_map *map,
170 const struct btf *btf,
171 const struct btf_type *key_type,
172 const struct btf_type *value_type);
173
174 void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
175 struct bpf_local_storage_elem *selem);
176
177 void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now);
178
179 void bpf_selem_link_map(struct bpf_local_storage_map *smap,
180 struct bpf_local_storage_elem *selem);
181
182 struct bpf_local_storage_elem *
183 bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
184 bool charge_mem, gfp_t gfp_flags);
185
186 void bpf_selem_free(struct bpf_local_storage_elem *selem,
187 struct bpf_local_storage_map *smap,
188 bool reuse_now);
189
190 int
191 bpf_local_storage_alloc(void *owner,
192 struct bpf_local_storage_map *smap,
193 struct bpf_local_storage_elem *first_selem,
194 gfp_t gfp_flags);
195
196 struct bpf_local_storage_data *
197 bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
198 void *value, u64 map_flags, gfp_t gfp_flags);
199
200 u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map);
201
202 #endif /* _BPF_LOCAL_STORAGE_H */
203