xref: /linux/kernel/bpf/bloom_filter.c (revision 908fc4c2)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Facebook */
3 
4 #include <linux/bitmap.h>
5 #include <linux/bpf.h>
6 #include <linux/btf.h>
7 #include <linux/err.h>
8 #include <linux/jhash.h>
9 #include <linux/random.h>
10 #include <linux/btf_ids.h>
11 
12 #define BLOOM_CREATE_FLAG_MASK \
13 	(BPF_F_NUMA_NODE | BPF_F_ZERO_SEED | BPF_F_ACCESS_MASK)
14 
15 struct bpf_bloom_filter {
16 	struct bpf_map map;
17 	u32 bitset_mask;
18 	u32 hash_seed;
19 	/* If the size of the values in the bloom filter is u32 aligned,
20 	 * then it is more performant to use jhash2 as the underlying hash
21 	 * function, else we use jhash. This tracks the number of u32s
22 	 * in an u32-aligned value size. If the value size is not u32 aligned,
23 	 * this will be 0.
24 	 */
25 	u32 aligned_u32_count;
26 	u32 nr_hash_funcs;
27 	unsigned long bitset[];
28 };
29 
30 static u32 hash(struct bpf_bloom_filter *bloom, void *value,
31 		u32 value_size, u32 index)
32 {
33 	u32 h;
34 
35 	if (bloom->aligned_u32_count)
36 		h = jhash2(value, bloom->aligned_u32_count,
37 			   bloom->hash_seed + index);
38 	else
39 		h = jhash(value, value_size, bloom->hash_seed + index);
40 
41 	return h & bloom->bitset_mask;
42 }
43 
44 static int bloom_map_peek_elem(struct bpf_map *map, void *value)
45 {
46 	struct bpf_bloom_filter *bloom =
47 		container_of(map, struct bpf_bloom_filter, map);
48 	u32 i, h;
49 
50 	for (i = 0; i < bloom->nr_hash_funcs; i++) {
51 		h = hash(bloom, value, map->value_size, i);
52 		if (!test_bit(h, bloom->bitset))
53 			return -ENOENT;
54 	}
55 
56 	return 0;
57 }
58 
59 static int bloom_map_push_elem(struct bpf_map *map, void *value, u64 flags)
60 {
61 	struct bpf_bloom_filter *bloom =
62 		container_of(map, struct bpf_bloom_filter, map);
63 	u32 i, h;
64 
65 	if (flags != BPF_ANY)
66 		return -EINVAL;
67 
68 	for (i = 0; i < bloom->nr_hash_funcs; i++) {
69 		h = hash(bloom, value, map->value_size, i);
70 		set_bit(h, bloom->bitset);
71 	}
72 
73 	return 0;
74 }
75 
76 static int bloom_map_pop_elem(struct bpf_map *map, void *value)
77 {
78 	return -EOPNOTSUPP;
79 }
80 
81 static int bloom_map_delete_elem(struct bpf_map *map, void *value)
82 {
83 	return -EOPNOTSUPP;
84 }
85 
86 static int bloom_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
87 {
88 	return -EOPNOTSUPP;
89 }
90 
91 static struct bpf_map *bloom_map_alloc(union bpf_attr *attr)
92 {
93 	u32 bitset_bytes, bitset_mask, nr_hash_funcs, nr_bits;
94 	int numa_node = bpf_map_attr_numa_node(attr);
95 	struct bpf_bloom_filter *bloom;
96 
97 	if (!bpf_capable())
98 		return ERR_PTR(-EPERM);
99 
100 	if (attr->key_size != 0 || attr->value_size == 0 ||
101 	    attr->max_entries == 0 ||
102 	    attr->map_flags & ~BLOOM_CREATE_FLAG_MASK ||
103 	    !bpf_map_flags_access_ok(attr->map_flags) ||
104 	    /* The lower 4 bits of map_extra (0xF) specify the number
105 	     * of hash functions
106 	     */
107 	    (attr->map_extra & ~0xF))
108 		return ERR_PTR(-EINVAL);
109 
110 	nr_hash_funcs = attr->map_extra;
111 	if (nr_hash_funcs == 0)
112 		/* Default to using 5 hash functions if unspecified */
113 		nr_hash_funcs = 5;
114 
115 	/* For the bloom filter, the optimal bit array size that minimizes the
116 	 * false positive probability is n * k / ln(2) where n is the number of
117 	 * expected entries in the bloom filter and k is the number of hash
118 	 * functions. We use 7 / 5 to approximate 1 / ln(2).
119 	 *
120 	 * We round this up to the nearest power of two to enable more efficient
121 	 * hashing using bitmasks. The bitmask will be the bit array size - 1.
122 	 *
123 	 * If this overflows a u32, the bit array size will have 2^32 (4
124 	 * GB) bits.
125 	 */
126 	if (check_mul_overflow(attr->max_entries, nr_hash_funcs, &nr_bits) ||
127 	    check_mul_overflow(nr_bits / 5, (u32)7, &nr_bits) ||
128 	    nr_bits > (1UL << 31)) {
129 		/* The bit array size is 2^32 bits but to avoid overflowing the
130 		 * u32, we use U32_MAX, which will round up to the equivalent
131 		 * number of bytes
132 		 */
133 		bitset_bytes = BITS_TO_BYTES(U32_MAX);
134 		bitset_mask = U32_MAX;
135 	} else {
136 		if (nr_bits <= BITS_PER_LONG)
137 			nr_bits = BITS_PER_LONG;
138 		else
139 			nr_bits = roundup_pow_of_two(nr_bits);
140 		bitset_bytes = BITS_TO_BYTES(nr_bits);
141 		bitset_mask = nr_bits - 1;
142 	}
143 
144 	bitset_bytes = roundup(bitset_bytes, sizeof(unsigned long));
145 	bloom = bpf_map_area_alloc(sizeof(*bloom) + bitset_bytes, numa_node);
146 
147 	if (!bloom)
148 		return ERR_PTR(-ENOMEM);
149 
150 	bpf_map_init_from_attr(&bloom->map, attr);
151 
152 	bloom->nr_hash_funcs = nr_hash_funcs;
153 	bloom->bitset_mask = bitset_mask;
154 
155 	/* Check whether the value size is u32-aligned */
156 	if ((attr->value_size & (sizeof(u32) - 1)) == 0)
157 		bloom->aligned_u32_count =
158 			attr->value_size / sizeof(u32);
159 
160 	if (!(attr->map_flags & BPF_F_ZERO_SEED))
161 		bloom->hash_seed = get_random_int();
162 
163 	return &bloom->map;
164 }
165 
166 static void bloom_map_free(struct bpf_map *map)
167 {
168 	struct bpf_bloom_filter *bloom =
169 		container_of(map, struct bpf_bloom_filter, map);
170 
171 	bpf_map_area_free(bloom);
172 }
173 
174 static void *bloom_map_lookup_elem(struct bpf_map *map, void *key)
175 {
176 	/* The eBPF program should use map_peek_elem instead */
177 	return ERR_PTR(-EINVAL);
178 }
179 
180 static int bloom_map_update_elem(struct bpf_map *map, void *key,
181 				 void *value, u64 flags)
182 {
183 	/* The eBPF program should use map_push_elem instead */
184 	return -EINVAL;
185 }
186 
187 static int bloom_map_check_btf(const struct bpf_map *map,
188 			       const struct btf *btf,
189 			       const struct btf_type *key_type,
190 			       const struct btf_type *value_type)
191 {
192 	/* Bloom filter maps are keyless */
193 	return btf_type_is_void(key_type) ? 0 : -EINVAL;
194 }
195 
196 BTF_ID_LIST_SINGLE(bpf_bloom_map_btf_ids, struct, bpf_bloom_filter)
197 const struct bpf_map_ops bloom_filter_map_ops = {
198 	.map_meta_equal = bpf_map_meta_equal,
199 	.map_alloc = bloom_map_alloc,
200 	.map_free = bloom_map_free,
201 	.map_get_next_key = bloom_map_get_next_key,
202 	.map_push_elem = bloom_map_push_elem,
203 	.map_peek_elem = bloom_map_peek_elem,
204 	.map_pop_elem = bloom_map_pop_elem,
205 	.map_lookup_elem = bloom_map_lookup_elem,
206 	.map_update_elem = bloom_map_update_elem,
207 	.map_delete_elem = bloom_map_delete_elem,
208 	.map_check_btf = bloom_map_check_btf,
209 	.map_btf_id = &bpf_bloom_map_btf_ids[0],
210 };
211