xref: /dragonfly/sys/libkern/linux_idr.c (revision 19b217af)
1 /*
2  * Copyright (c) 2005-2012 The DragonFly Project.
3  * Copyright (c) 2013 François Tigeot
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The DragonFly Project
7  * by Jeffrey Hsu.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  */
37 
38 #include <sys/idr.h>
39 #include <sys/kernel.h>
40 #include <sys/libkern.h>
41 #include <sys/malloc.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/spinlock2.h>
45 #include <sys/limits.h>
46 
47 /* Must be 2^n - 1 */
48 #define IDR_DEFAULT_SIZE    255
49 
50 MALLOC_DEFINE(M_IDR, "idr", "Integer ID management");
51 
52 static void	idr_grow(struct idr *idp, int want);
53 static void	idr_reserve(struct idr *idp, int id, int incr);
54 static void	idr_set(struct idr *idp, int id, void *ptr);
55 static int	idr_find_free(struct idr *idp, int want, int lim);
56 static int	idr_pre_get1(struct idr *idp, int want, int lim);
57 
58 /*
59  * Number of nodes in right subtree, including the root.
60  */
61 static __inline int
62 right_subtree_size(int n)
63 {
64 	return (n ^ (n | (n + 1)));
65 }
66 
67 /*
68  * Bigger ancestor.
69  */
70 static __inline int
71 right_ancestor(int n)
72 {
73 	return (n | (n + 1));
74 }
75 
76 /*
77  * Smaller ancestor.
78  */
79 static __inline int
80 left_ancestor(int n)
81 {
82 	return ((n & (n + 1)) - 1);
83 }
84 
85 static __inline void
86 idrfixup(struct idr *idp, int id)
87 {
88 	if (id < idp->idr_freeindex) {
89 		idp->idr_freeindex = id;
90 	}
91 	while (idp->idr_lastindex >= 0 &&
92 		idp->idr_nodes[idp->idr_lastindex].data == NULL &&
93 		idp->idr_nodes[idp->idr_lastindex].reserved == 0
94 	) {
95 		--idp->idr_lastindex;
96 	}
97 }
98 
99 static __inline struct idr_node *
100 idr_get_node(struct idr *idp, int id)
101 {
102 	struct idr_node *idrnp;
103 	if (id >= idp->idr_count)
104 		return (NULL);
105 	idrnp = &idp->idr_nodes[id];
106 	if (idrnp->allocated == 0)
107 		return (NULL);
108 	return (idrnp);
109 }
110 
111 static void
112 idr_reserve(struct idr *idp, int id, int incr)
113 {
114 	while (id >= 0) {
115 		idp->idr_nodes[id].allocated += incr;
116 		KKASSERT(idp->idr_nodes[id].allocated >= 0);
117 		id = left_ancestor(id);
118 	}
119 }
120 
121 static int
122 idr_find_free(struct idr *idp, int want, int lim)
123 {
124 	int id, rsum, rsize, node;
125 
126 	/*
127 	 * Search for a free descriptor starting at the higher
128 	 * of want or fd_freefile.  If that fails, consider
129 	 * expanding the ofile array.
130 	 *
131 	 * NOTE! the 'allocated' field is a cumulative recursive allocation
132 	 * count.  If we happen to see a value of 0 then we can shortcut
133 	 * our search.  Otherwise we run through through the tree going
134 	 * down branches we know have free descriptor(s) until we hit a
135 	 * leaf node.  The leaf node will be free but will not necessarily
136 	 * have an allocated field of 0.
137 	 */
138 
139 	/* move up the tree looking for a subtree with a free node */
140 	for (id = max(want, idp->idr_freeindex); id < min(idp->idr_count, lim);
141 			id = right_ancestor(id)) {
142 		if (idp->idr_nodes[id].allocated == 0)
143 			return (id);
144 
145 		rsize = right_subtree_size(id);
146 		if (idp->idr_nodes[id].allocated == rsize)
147 			continue;	/* right subtree full */
148 
149 		/*
150 		 * Free fd is in the right subtree of the tree rooted at fd.
151 		 * Call that subtree R.  Look for the smallest (leftmost)
152 		 * subtree of R with an unallocated fd: continue moving
153 		 * down the left branch until encountering a full left
154 		 * subtree, then move to the right.
155 		 */
156 		for (rsum = 0, rsize /= 2; rsize > 0; rsize /= 2) {
157 			node = id + rsize;
158 			rsum += idp->idr_nodes[node].allocated;
159 			if (idp->idr_nodes[id].allocated == rsum + rsize) {
160 				id = node;	/* move to the right */
161 				if (idp->idr_nodes[node].allocated == 0)
162 					return (id);
163 				rsum = 0;
164 			}
165 		}
166 		return (id);
167 	}
168 	return (-1);
169 }
170 
171 static int
172 idr_pre_get1(struct idr *idp, int want, int lim)
173 {
174 	int id;
175 
176 	if (want >= idp->idr_count)
177 		idr_grow(idp, want);
178 
179 retry:
180 	id = idr_find_free(idp, want, lim);
181 	if (id > -1)
182 		goto found;
183 
184 	/*
185 	 * No space in current array.  Expand?
186 	 */
187 	if (idp->idr_count >= lim) {
188 		return (ENOSPC);
189 	}
190 	idr_grow(idp, want);
191 	goto retry;
192 
193 found:
194 	return (0);
195 }
196 
197 int
198 idr_pre_get(struct idr *idp, __unused unsigned gfp_mask)
199 {
200 	lwkt_gettoken(&idp->idr_token);
201 	int error = idr_pre_get1(idp, idp->idr_maxwant, INT_MAX);
202 	lwkt_reltoken(&idp->idr_token);
203 	return (error == 0);
204 }
205 
206 int
207 idr_get_new_above(struct idr *idp, void *ptr, int sid, int *id)
208 {
209 	int resid;
210 
211 	KKASSERT(ptr != NULL);
212 
213 	lwkt_gettoken(&idp->idr_token);
214 	if (sid >= idp->idr_count) {
215 		idp->idr_maxwant = max(idp->idr_maxwant, sid);
216 		lwkt_reltoken(&idp->idr_token);
217 		return -EAGAIN;
218 	}
219 
220 	resid = idr_find_free(idp, sid, INT_MAX);
221 	if (resid == -1) {
222 		lwkt_reltoken(&idp->idr_token);
223 		return -EAGAIN;
224 	}
225 
226 	if (resid >= idp->idr_count)
227 		idr_grow(idp, resid);
228 	if (resid > idp->idr_lastindex)
229 		idp->idr_lastindex = resid;
230 	if (sid <= idp->idr_freeindex)
231 		idp->idr_freeindex = resid;
232 	*id = resid;
233 	KKASSERT(idp->idr_nodes[resid].reserved == 0);
234 	idp->idr_nodes[resid].reserved = 1;
235 	idr_reserve(idp, resid, 1);
236 	idr_set(idp, resid, ptr);
237 
238 	lwkt_reltoken(&idp->idr_token);
239 	return (0);
240 }
241 
242 int
243 idr_get_new(struct idr *idp, void *ptr, int *id)
244 {
245 	return idr_get_new_above(idp, ptr, 0, id);
246 }
247 
248 /*
249  * Grow the file table so it can hold through descriptor (want).
250  */
251 static void
252 idr_grow(struct idr *idp, int want)
253 {
254 	struct idr_node *oldnodes, *newnodes;
255 	int nf;
256 
257 	/* We want 2^n - 1 descriptors */
258 	nf = idp->idr_count;
259 	do {
260 		nf = 2 * nf + 1;
261 	} while (nf <= want);
262 
263 	/* Allocate a new zero'ed node array */
264 	newnodes = kmalloc(nf * sizeof(struct idr_node), M_IDR, M_ZERO|M_WAITOK);
265 
266 	/* We might race another grow */
267 	if (nf <= idp->idr_count) {
268 		kfree(newnodes, M_IDR);
269 		return;
270 	}
271 
272 	/* Copy the existing nodes at the beginning of the new array */
273 	if (idp->idr_nodes != NULL)
274 		bcopy(idp->idr_nodes, newnodes, idp->idr_count * sizeof(struct idr_node));
275 
276 	oldnodes = idp->idr_nodes;
277 	idp->idr_nodes = newnodes;
278 	idp->idr_count = nf;
279 
280 	if (oldnodes != NULL) {
281 		kfree(oldnodes, M_IDR);
282 	}
283 
284 	idp->idr_nexpands++;
285 }
286 
287 void
288 idr_remove(struct idr *idp, int id)
289 {
290 	void *ptr;
291 
292 	lwkt_gettoken(&idp->idr_token);
293 
294 	if (id >= idp->idr_count)
295 		goto out;
296 	if ((ptr = idp->idr_nodes[id].data) == NULL)
297 		goto out;
298 	idp->idr_nodes[id].data = NULL;
299 
300 	idr_reserve(idp, id, -1);
301 	idrfixup(idp, id);
302 
303 out:
304 	lwkt_reltoken(&idp->idr_token);
305 }
306 
307 void
308 idr_remove_all(struct idr *idp)
309 {
310 	struct      idr_node *oldnodes;
311 	struct      idr_node *newnodes;
312 
313 	lwkt_gettoken(&idp->idr_token);
314 
315 retry:
316 	oldnodes = idp->idr_nodes;
317 	newnodes = kmalloc(idp->idr_count * sizeof(struct idr_node), M_IDR, M_WAITOK | M_ZERO);
318 
319 	if (idp->idr_nodes != oldnodes) {
320 		kfree(newnodes, M_IDR);
321 		goto retry;
322 	}
323 
324 	idp->idr_nodes = newnodes;
325 	idp->idr_lastindex = -1;
326 	idp->idr_freeindex = 0;
327 	idp->idr_nexpands = 0;
328 	idp->idr_maxwant = 0;
329 	lwkt_reltoken(&idp->idr_token);
330 
331 	kfree(oldnodes, M_IDR);
332 
333 }
334 
335 void
336 idr_destroy(struct idr *idp)
337 {
338 	lwkt_token_uninit(&idp->idr_token);
339 	kfree(idp->idr_nodes, M_IDR);
340 	memset(idp, 0, sizeof(struct idr));
341 }
342 
343 void *
344 idr_find(struct idr *idp, int id)
345 {
346 	void * ret = NULL;
347 
348 	lwkt_gettoken(&idp->idr_token);
349 
350 	if (id > idp->idr_count) {
351 		goto out;
352 	} else if (idp->idr_nodes[id].allocated == 0) {
353 		goto out;
354 	}
355 	ret = idp->idr_nodes[id].data;
356 
357 out:
358 	lwkt_reltoken(&idp->idr_token);
359 	return ret;
360 }
361 
362 static void
363 idr_set(struct idr *idp, int id, void *ptr)
364 {
365 	KKASSERT(id < idp->idr_count);
366 	KKASSERT(idp->idr_nodes[id].reserved != 0);
367 	if (ptr) {
368 		idp->idr_nodes[id].data = ptr;
369 		idp->idr_nodes[id].reserved = 0;
370 	} else {
371 		idp->idr_nodes[id].reserved = 0;
372 		idr_reserve(idp, id, -1);
373 		idrfixup(idp, id);
374 	}
375 }
376 
377 int
378 idr_for_each(struct idr *idp, int (*fn)(int id, void *p, void *data), void *data)
379 {
380 	int i, error = 0;
381 	struct idr_node *nodes;
382 
383 	lwkt_gettoken(&idp->idr_token);
384 	nodes = idp->idr_nodes;
385 	for (i = 0; i < idp->idr_count; i++) {
386 		if (nodes[i].data != NULL && nodes[i].allocated > 0) {
387 			error = fn(i, nodes[i].data, data);
388 			if (error != 0)
389 				goto out;
390 		}
391 	}
392 out:
393 	lwkt_reltoken(&idp->idr_token);
394 	return error;
395 }
396 
397 void *
398 idr_replace(struct idr *idp, void *ptr, int id)
399 {
400 	struct idr_node *idrnp;
401 	void *ret = NULL;
402 
403 	lwkt_gettoken(&idp->idr_token);
404 
405 	idrnp = idr_get_node(idp, id);
406 	if (idrnp == NULL || ptr == NULL)
407 		goto out;
408 
409 	ret = idrnp->data;
410 	idrnp->data = ptr;
411 
412 out:
413 	lwkt_reltoken(&idp->idr_token);
414 	return (ret);
415 }
416 
417 void
418 idr_init(struct idr *idp)
419 {
420 	bzero(idp, sizeof(struct idr));
421 	idp->idr_nodes = kmalloc(IDR_DEFAULT_SIZE * sizeof(struct idr_node),
422 						M_IDR, M_WAITOK | M_ZERO);
423 	idp->idr_count = IDR_DEFAULT_SIZE;
424 	idp->idr_lastindex = -1;
425 	idp->idr_maxwant = 0;
426 	lwkt_token_init(&idp->idr_token, "idr token");
427 }
428