xref: /linux/lib/generic-radix-tree.c (revision d642ef71)
1 
2 #include <linux/atomic.h>
3 #include <linux/export.h>
4 #include <linux/generic-radix-tree.h>
5 #include <linux/gfp.h>
6 #include <linux/kmemleak.h>
7 
8 #define GENRADIX_ARY		(PAGE_SIZE / sizeof(struct genradix_node *))
9 #define GENRADIX_ARY_SHIFT	ilog2(GENRADIX_ARY)
10 
11 struct genradix_node {
12 	union {
13 		/* Interior node: */
14 		struct genradix_node	*children[GENRADIX_ARY];
15 
16 		/* Leaf: */
17 		u8			data[PAGE_SIZE];
18 	};
19 };
20 
21 static inline int genradix_depth_shift(unsigned depth)
22 {
23 	return PAGE_SHIFT + GENRADIX_ARY_SHIFT * depth;
24 }
25 
26 /*
27  * Returns size (of data, in bytes) that a tree of a given depth holds:
28  */
29 static inline size_t genradix_depth_size(unsigned depth)
30 {
31 	return 1UL << genradix_depth_shift(depth);
32 }
33 
34 /* depth that's needed for a genradix that can address up to ULONG_MAX: */
35 #define GENRADIX_MAX_DEPTH	\
36 	DIV_ROUND_UP(BITS_PER_LONG - PAGE_SHIFT, GENRADIX_ARY_SHIFT)
37 
38 #define GENRADIX_DEPTH_MASK				\
39 	((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
40 
41 static inline unsigned genradix_root_to_depth(struct genradix_root *r)
42 {
43 	return (unsigned long) r & GENRADIX_DEPTH_MASK;
44 }
45 
46 static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r)
47 {
48 	return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK);
49 }
50 
51 /*
52  * Returns pointer to the specified byte @offset within @radix, or NULL if not
53  * allocated
54  */
55 void *__genradix_ptr(struct __genradix *radix, size_t offset)
56 {
57 	struct genradix_root *r = READ_ONCE(radix->root);
58 	struct genradix_node *n = genradix_root_to_node(r);
59 	unsigned level		= genradix_root_to_depth(r);
60 
61 	if (ilog2(offset) >= genradix_depth_shift(level))
62 		return NULL;
63 
64 	while (1) {
65 		if (!n)
66 			return NULL;
67 		if (!level)
68 			break;
69 
70 		level--;
71 
72 		n = n->children[offset >> genradix_depth_shift(level)];
73 		offset &= genradix_depth_size(level) - 1;
74 	}
75 
76 	return &n->data[offset];
77 }
78 EXPORT_SYMBOL(__genradix_ptr);
79 
80 static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
81 {
82 	struct genradix_node *node;
83 
84 	node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO);
85 
86 	/*
87 	 * We're using pages (not slab allocations) directly for kernel data
88 	 * structures, so we need to explicitly inform kmemleak of them in order
89 	 * to avoid false positive memory leak reports.
90 	 */
91 	kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask);
92 	return node;
93 }
94 
95 static inline void genradix_free_node(struct genradix_node *node)
96 {
97 	kmemleak_free(node);
98 	free_page((unsigned long)node);
99 }
100 
101 /*
102  * Returns pointer to the specified byte @offset within @radix, allocating it if
103  * necessary - newly allocated slots are always zeroed out:
104  */
105 void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
106 			   gfp_t gfp_mask)
107 {
108 	struct genradix_root *v = READ_ONCE(radix->root);
109 	struct genradix_node *n, *new_node = NULL;
110 	unsigned level;
111 
112 	/* Increase tree depth if necessary: */
113 	while (1) {
114 		struct genradix_root *r = v, *new_root;
115 
116 		n	= genradix_root_to_node(r);
117 		level	= genradix_root_to_depth(r);
118 
119 		if (n && ilog2(offset) < genradix_depth_shift(level))
120 			break;
121 
122 		if (!new_node) {
123 			new_node = genradix_alloc_node(gfp_mask);
124 			if (!new_node)
125 				return NULL;
126 		}
127 
128 		new_node->children[0] = n;
129 		new_root = ((struct genradix_root *)
130 			    ((unsigned long) new_node | (n ? level + 1 : 0)));
131 
132 		if ((v = cmpxchg_release(&radix->root, r, new_root)) == r) {
133 			v = new_root;
134 			new_node = NULL;
135 		}
136 	}
137 
138 	while (level--) {
139 		struct genradix_node **p =
140 			&n->children[offset >> genradix_depth_shift(level)];
141 		offset &= genradix_depth_size(level) - 1;
142 
143 		n = READ_ONCE(*p);
144 		if (!n) {
145 			if (!new_node) {
146 				new_node = genradix_alloc_node(gfp_mask);
147 				if (!new_node)
148 					return NULL;
149 			}
150 
151 			if (!(n = cmpxchg_release(p, NULL, new_node)))
152 				swap(n, new_node);
153 		}
154 	}
155 
156 	if (new_node)
157 		genradix_free_node(new_node);
158 
159 	return &n->data[offset];
160 }
161 EXPORT_SYMBOL(__genradix_ptr_alloc);
162 
163 void *__genradix_iter_peek(struct genradix_iter *iter,
164 			   struct __genradix *radix,
165 			   size_t objs_per_page)
166 {
167 	struct genradix_root *r;
168 	struct genradix_node *n;
169 	unsigned level, i;
170 
171 	if (iter->offset == SIZE_MAX)
172 		return NULL;
173 
174 restart:
175 	r = READ_ONCE(radix->root);
176 	if (!r)
177 		return NULL;
178 
179 	n	= genradix_root_to_node(r);
180 	level	= genradix_root_to_depth(r);
181 
182 	if (ilog2(iter->offset) >= genradix_depth_shift(level))
183 		return NULL;
184 
185 	while (level) {
186 		level--;
187 
188 		i = (iter->offset >> genradix_depth_shift(level)) &
189 			(GENRADIX_ARY - 1);
190 
191 		while (!n->children[i]) {
192 			size_t objs_per_ptr = genradix_depth_size(level);
193 
194 			if (iter->offset + objs_per_ptr < iter->offset) {
195 				iter->offset	= SIZE_MAX;
196 				iter->pos	= SIZE_MAX;
197 				return NULL;
198 			}
199 
200 			i++;
201 			iter->offset = round_down(iter->offset + objs_per_ptr,
202 						  objs_per_ptr);
203 			iter->pos = (iter->offset >> PAGE_SHIFT) *
204 				objs_per_page;
205 			if (i == GENRADIX_ARY)
206 				goto restart;
207 		}
208 
209 		n = n->children[i];
210 	}
211 
212 	return &n->data[iter->offset & (PAGE_SIZE - 1)];
213 }
214 EXPORT_SYMBOL(__genradix_iter_peek);
215 
216 void *__genradix_iter_peek_prev(struct genradix_iter *iter,
217 				struct __genradix *radix,
218 				size_t objs_per_page,
219 				size_t obj_size_plus_page_remainder)
220 {
221 	struct genradix_root *r;
222 	struct genradix_node *n;
223 	unsigned level, i;
224 
225 	if (iter->offset == SIZE_MAX)
226 		return NULL;
227 
228 restart:
229 	r = READ_ONCE(radix->root);
230 	if (!r)
231 		return NULL;
232 
233 	n	= genradix_root_to_node(r);
234 	level	= genradix_root_to_depth(r);
235 
236 	if (ilog2(iter->offset) >= genradix_depth_shift(level)) {
237 		iter->offset = genradix_depth_size(level);
238 		iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page;
239 
240 		iter->offset -= obj_size_plus_page_remainder;
241 		iter->pos--;
242 	}
243 
244 	while (level) {
245 		level--;
246 
247 		i = (iter->offset >> genradix_depth_shift(level)) &
248 			(GENRADIX_ARY - 1);
249 
250 		while (!n->children[i]) {
251 			size_t objs_per_ptr = genradix_depth_size(level);
252 
253 			iter->offset = round_down(iter->offset, objs_per_ptr);
254 			iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page;
255 
256 			if (!iter->offset)
257 				return NULL;
258 
259 			iter->offset -= obj_size_plus_page_remainder;
260 			iter->pos--;
261 
262 			if (!i)
263 				goto restart;
264 			--i;
265 		}
266 
267 		n = n->children[i];
268 	}
269 
270 	return &n->data[iter->offset & (PAGE_SIZE - 1)];
271 }
272 EXPORT_SYMBOL(__genradix_iter_peek_prev);
273 
274 static void genradix_free_recurse(struct genradix_node *n, unsigned level)
275 {
276 	if (level) {
277 		unsigned i;
278 
279 		for (i = 0; i < GENRADIX_ARY; i++)
280 			if (n->children[i])
281 				genradix_free_recurse(n->children[i], level - 1);
282 	}
283 
284 	genradix_free_node(n);
285 }
286 
287 int __genradix_prealloc(struct __genradix *radix, size_t size,
288 			gfp_t gfp_mask)
289 {
290 	size_t offset;
291 
292 	for (offset = 0; offset < size; offset += PAGE_SIZE)
293 		if (!__genradix_ptr_alloc(radix, offset, gfp_mask))
294 			return -ENOMEM;
295 
296 	return 0;
297 }
298 EXPORT_SYMBOL(__genradix_prealloc);
299 
300 void __genradix_free(struct __genradix *radix)
301 {
302 	struct genradix_root *r = xchg(&radix->root, NULL);
303 
304 	genradix_free_recurse(genradix_root_to_node(r),
305 			      genradix_root_to_depth(r));
306 }
307 EXPORT_SYMBOL(__genradix_free);
308