1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/sysctl.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 
41 #include <machine/stdarg.h>
42 
43 #include <linux/bitmap.h>
44 #include <linux/kobject.h>
45 #include <linux/slab.h>
46 #include <linux/idr.h>
47 #include <linux/err.h>
48 
49 #define	MAX_IDR_LEVEL	((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
50 #define	MAX_IDR_FREE	(MAX_IDR_LEVEL * 2)
51 
52 struct linux_idr_cache {
53 	spinlock_t lock;
54 	struct idr_layer *head;
55 	unsigned count;
56 };
57 
58 static DPCPU_DEFINE(struct linux_idr_cache, linux_idr_cache);
59 
60 /*
61  * IDR Implementation.
62  *
63  * This is quick and dirty and not as re-entrant as the linux version
64  * however it should be fairly fast.  It is basically a radix tree with
65  * a builtin bitmap for allocation.
66  */
67 static MALLOC_DEFINE(M_IDR, "idr", "Linux IDR compat");
68 
69 static struct idr_layer *
70 idr_preload_dequeue_locked(struct linux_idr_cache *lic)
71 {
72 	struct idr_layer *retval;
73 
74 	/* check if wrong thread is trying to dequeue */
75 	if (mtx_owned(&lic->lock.m) == 0)
76 		return (NULL);
77 
78 	retval = lic->head;
79 	if (likely(retval != NULL)) {
80 		lic->head = retval->ary[0];
81 		lic->count--;
82 		retval->ary[0] = NULL;
83 	}
84 	return (retval);
85 }
86 
87 static void
88 idr_preload_init(void *arg)
89 {
90 	int cpu;
91 
92 	CPU_FOREACH(cpu) {
93 		struct linux_idr_cache *lic =
94 		    DPCPU_ID_PTR(cpu, linux_idr_cache);
95 
96 		spin_lock_init(&lic->lock);
97 	}
98 }
99 SYSINIT(idr_preload_init, SI_SUB_CPU, SI_ORDER_ANY, idr_preload_init, NULL);
100 
101 static void
102 idr_preload_uninit(void *arg)
103 {
104 	int cpu;
105 
106 	CPU_FOREACH(cpu) {
107 		struct idr_layer *cacheval;
108 		struct linux_idr_cache *lic =
109 		    DPCPU_ID_PTR(cpu, linux_idr_cache);
110 
111 		while (1) {
112 			spin_lock(&lic->lock);
113 			cacheval = idr_preload_dequeue_locked(lic);
114 			spin_unlock(&lic->lock);
115 
116 			if (cacheval == NULL)
117 				break;
118 			free(cacheval, M_IDR);
119 		}
120 		spin_lock_destroy(&lic->lock);
121 	}
122 }
123 SYSUNINIT(idr_preload_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, idr_preload_uninit, NULL);
124 
125 void
126 idr_preload(gfp_t gfp_mask)
127 {
128 	struct linux_idr_cache *lic;
129 	struct idr_layer *cacheval;
130 
131 	sched_pin();
132 
133 	lic = &DPCPU_GET(linux_idr_cache);
134 
135 	/* fill up cache */
136 	spin_lock(&lic->lock);
137 	while (lic->count < MAX_IDR_FREE) {
138 		spin_unlock(&lic->lock);
139 		cacheval = malloc(sizeof(*cacheval), M_IDR, M_ZERO | gfp_mask);
140 		spin_lock(&lic->lock);
141 		if (cacheval == NULL)
142 			break;
143 		cacheval->ary[0] = lic->head;
144 		lic->head = cacheval;
145 		lic->count++;
146 	}
147 }
148 
149 void
150 idr_preload_end(void)
151 {
152 	struct linux_idr_cache *lic;
153 
154 	lic = &DPCPU_GET(linux_idr_cache);
155 	spin_unlock(&lic->lock);
156 	sched_unpin();
157 }
158 
159 static inline int
160 idr_max(struct idr *idr)
161 {
162 	return (1 << (idr->layers * IDR_BITS)) - 1;
163 }
164 
165 static inline int
166 idr_pos(int id, int layer)
167 {
168 	return (id >> (IDR_BITS * layer)) & IDR_MASK;
169 }
170 
171 void
172 idr_init(struct idr *idr)
173 {
174 	bzero(idr, sizeof(*idr));
175 	mtx_init(&idr->lock, "idr", NULL, MTX_DEF);
176 }
177 
178 /* Only frees cached pages. */
179 void
180 idr_destroy(struct idr *idr)
181 {
182 	struct idr_layer *il, *iln;
183 
184 	idr_remove_all(idr);
185 	mtx_lock(&idr->lock);
186 	for (il = idr->free; il != NULL; il = iln) {
187 		iln = il->ary[0];
188 		free(il, M_IDR);
189 	}
190 	mtx_unlock(&idr->lock);
191 	mtx_destroy(&idr->lock);
192 }
193 
194 static void
195 idr_remove_layer(struct idr_layer *il, int layer)
196 {
197 	int i;
198 
199 	if (il == NULL)
200 		return;
201 	if (layer == 0) {
202 		free(il, M_IDR);
203 		return;
204 	}
205 	for (i = 0; i < IDR_SIZE; i++)
206 		if (il->ary[i])
207 			idr_remove_layer(il->ary[i], layer - 1);
208 }
209 
210 void
211 idr_remove_all(struct idr *idr)
212 {
213 
214 	mtx_lock(&idr->lock);
215 	idr_remove_layer(idr->top, idr->layers - 1);
216 	idr->top = NULL;
217 	idr->layers = 0;
218 	mtx_unlock(&idr->lock);
219 }
220 
221 static void
222 idr_remove_locked(struct idr *idr, int id)
223 {
224 	struct idr_layer *il;
225 	int layer;
226 	int idx;
227 
228 	id &= MAX_ID_MASK;
229 	il = idr->top;
230 	layer = idr->layers - 1;
231 	if (il == NULL || id > idr_max(idr))
232 		return;
233 	/*
234 	 * Walk down the tree to this item setting bitmaps along the way
235 	 * as we know at least one item will be free along this path.
236 	 */
237 	while (layer && il) {
238 		idx = idr_pos(id, layer);
239 		il->bitmap |= 1 << idx;
240 		il = il->ary[idx];
241 		layer--;
242 	}
243 	idx = id & IDR_MASK;
244 	/*
245 	 * At this point we've set free space bitmaps up the whole tree.
246 	 * We could make this non-fatal and unwind but linux dumps a stack
247 	 * and a warning so I don't think it's necessary.
248 	 */
249 	if (il == NULL || (il->bitmap & (1 << idx)) != 0)
250 		panic("idr_remove: Item %d not allocated (%p, %p)\n",
251 		    id, idr, il);
252 	il->ary[idx] = NULL;
253 	il->bitmap |= 1 << idx;
254 }
255 
256 void
257 idr_remove(struct idr *idr, int id)
258 {
259 	mtx_lock(&idr->lock);
260 	idr_remove_locked(idr, id);
261 	mtx_unlock(&idr->lock);
262 }
263 
264 
265 static inline struct idr_layer *
266 idr_find_layer_locked(struct idr *idr, int id)
267 {
268 	struct idr_layer *il;
269 	int layer;
270 
271 	id &= MAX_ID_MASK;
272 	il = idr->top;
273 	layer = idr->layers - 1;
274 	if (il == NULL || id > idr_max(idr))
275 		return (NULL);
276 	while (layer && il) {
277 		il = il->ary[idr_pos(id, layer)];
278 		layer--;
279 	}
280 	return (il);
281 }
282 
283 void *
284 idr_replace(struct idr *idr, void *ptr, int id)
285 {
286 	struct idr_layer *il;
287 	void *res;
288 	int idx;
289 
290 	mtx_lock(&idr->lock);
291 	il = idr_find_layer_locked(idr, id);
292 	idx = id & IDR_MASK;
293 
294 	/* Replace still returns an error if the item was not allocated. */
295 	if (il == NULL || (il->bitmap & (1 << idx))) {
296 		res = ERR_PTR(-ENOENT);
297 	} else {
298 		res = il->ary[idx];
299 		il->ary[idx] = ptr;
300 	}
301 	mtx_unlock(&idr->lock);
302 	return (res);
303 }
304 
305 static inline void *
306 idr_find_locked(struct idr *idr, int id)
307 {
308 	struct idr_layer *il;
309 	void *res;
310 
311 	mtx_assert(&idr->lock, MA_OWNED);
312 	il = idr_find_layer_locked(idr, id);
313 	if (il != NULL)
314 		res = il->ary[id & IDR_MASK];
315 	else
316 		res = NULL;
317 	return (res);
318 }
319 
320 void *
321 idr_find(struct idr *idr, int id)
322 {
323 	void *res;
324 
325 	mtx_lock(&idr->lock);
326 	res = idr_find_locked(idr, id);
327 	mtx_unlock(&idr->lock);
328 	return (res);
329 }
330 
331 void *
332 idr_get_next(struct idr *idr, int *nextidp)
333 {
334 	void *res = NULL;
335 	int id = *nextidp;
336 
337 	mtx_lock(&idr->lock);
338 	for (; id <= idr_max(idr); id++) {
339 		res = idr_find_locked(idr, id);
340 		if (res == NULL)
341 			continue;
342 		*nextidp = id;
343 		break;
344 	}
345 	mtx_unlock(&idr->lock);
346 	return (res);
347 }
348 
349 int
350 idr_pre_get(struct idr *idr, gfp_t gfp_mask)
351 {
352 	struct idr_layer *il, *iln;
353 	struct idr_layer *head;
354 	int need;
355 
356 	mtx_lock(&idr->lock);
357 	for (;;) {
358 		need = idr->layers + 1;
359 		for (il = idr->free; il != NULL; il = il->ary[0])
360 			need--;
361 		mtx_unlock(&idr->lock);
362 		if (need <= 0)
363 			break;
364 		for (head = NULL; need; need--) {
365 			iln = malloc(sizeof(*il), M_IDR, M_ZERO | gfp_mask);
366 			if (iln == NULL)
367 				break;
368 			bitmap_fill(&iln->bitmap, IDR_SIZE);
369 			if (head != NULL) {
370 				il->ary[0] = iln;
371 				il = iln;
372 			} else
373 				head = il = iln;
374 		}
375 		if (head == NULL)
376 			return (0);
377 		mtx_lock(&idr->lock);
378 		il->ary[0] = idr->free;
379 		idr->free = head;
380 	}
381 	return (1);
382 }
383 
384 static struct idr_layer *
385 idr_free_list_get(struct idr *idp)
386 {
387 	struct idr_layer *il;
388 
389 	if ((il = idp->free) != NULL) {
390 		idp->free = il->ary[0];
391 		il->ary[0] = NULL;
392 	}
393 	return (il);
394 }
395 
396 static inline struct idr_layer *
397 idr_get(struct idr *idp)
398 {
399 	struct idr_layer *il;
400 
401 	if ((il = idr_free_list_get(idp)) != NULL) {
402 		MPASS(il->bitmap != 0);
403 	} else if ((il = malloc(sizeof(*il), M_IDR, M_ZERO | M_NOWAIT)) != NULL) {
404 		bitmap_fill(&il->bitmap, IDR_SIZE);
405 	} else if ((il = idr_preload_dequeue_locked(&DPCPU_GET(linux_idr_cache))) != NULL) {
406 		bitmap_fill(&il->bitmap, IDR_SIZE);
407 	} else {
408 		return (NULL);
409 	}
410 	return (il);
411 }
412 
413 /*
414  * Could be implemented as get_new_above(idr, ptr, 0, idp) but written
415  * first for simplicity sake.
416  */
417 static int
418 idr_get_new_locked(struct idr *idr, void *ptr, int *idp)
419 {
420 	struct idr_layer *stack[MAX_LEVEL];
421 	struct idr_layer *il;
422 	int error;
423 	int layer;
424 	int idx;
425 	int id;
426 
427 	mtx_assert(&idr->lock, MA_OWNED);
428 
429 	error = -EAGAIN;
430 	/*
431 	 * Expand the tree until there is free space.
432 	 */
433 	if (idr->top == NULL || idr->top->bitmap == 0) {
434 		if (idr->layers == MAX_LEVEL + 1) {
435 			error = -ENOSPC;
436 			goto out;
437 		}
438 		il = idr_get(idr);
439 		if (il == NULL)
440 			goto out;
441 		il->ary[0] = idr->top;
442 		if (idr->top)
443 			il->bitmap &= ~1;
444 		idr->top = il;
445 		idr->layers++;
446 	}
447 	il = idr->top;
448 	id = 0;
449 	/*
450 	 * Walk the tree following free bitmaps, record our path.
451 	 */
452 	for (layer = idr->layers - 1;; layer--) {
453 		stack[layer] = il;
454 		idx = ffsl(il->bitmap);
455 		if (idx == 0)
456 			panic("idr_get_new: Invalid leaf state (%p, %p)\n",
457 			    idr, il);
458 		idx--;
459 		id |= idx << (layer * IDR_BITS);
460 		if (layer == 0)
461 			break;
462 		if (il->ary[idx] == NULL) {
463 			il->ary[idx] = idr_get(idr);
464 			if (il->ary[idx] == NULL)
465 				goto out;
466 		}
467 		il = il->ary[idx];
468 	}
469 	/*
470 	 * Allocate the leaf to the consumer.
471 	 */
472 	il->bitmap &= ~(1 << idx);
473 	il->ary[idx] = ptr;
474 	*idp = id;
475 	/*
476 	 * Clear bitmaps potentially up to the root.
477 	 */
478 	while (il->bitmap == 0 && ++layer < idr->layers) {
479 		il = stack[layer];
480 		il->bitmap &= ~(1 << idr_pos(id, layer));
481 	}
482 	error = 0;
483 out:
484 #ifdef INVARIANTS
485 	if (error == 0 && idr_find_locked(idr, id) != ptr) {
486 		panic("idr_get_new: Failed for idr %p, id %d, ptr %p\n",
487 		    idr, id, ptr);
488 	}
489 #endif
490 	return (error);
491 }
492 
493 int
494 idr_get_new(struct idr *idr, void *ptr, int *idp)
495 {
496 	int retval;
497 
498 	mtx_lock(&idr->lock);
499 	retval = idr_get_new_locked(idr, ptr, idp);
500 	mtx_unlock(&idr->lock);
501 	return (retval);
502 }
503 
504 static int
505 idr_get_new_above_locked(struct idr *idr, void *ptr, int starting_id, int *idp)
506 {
507 	struct idr_layer *stack[MAX_LEVEL];
508 	struct idr_layer *il;
509 	int error;
510 	int layer;
511 	int idx, sidx;
512 	int id;
513 
514 	mtx_assert(&idr->lock, MA_OWNED);
515 
516 	error = -EAGAIN;
517 	/*
518 	 * Compute the layers required to support starting_id and the mask
519 	 * at the top layer.
520 	 */
521 restart:
522 	idx = starting_id;
523 	layer = 0;
524 	while (idx & ~IDR_MASK) {
525 		layer++;
526 		idx >>= IDR_BITS;
527 	}
528 	if (layer == MAX_LEVEL + 1) {
529 		error = -ENOSPC;
530 		goto out;
531 	}
532 	/*
533 	 * Expand the tree until there is free space at or beyond starting_id.
534 	 */
535 	while (idr->layers <= layer ||
536 	    idr->top->bitmap < (1 << idr_pos(starting_id, idr->layers - 1))) {
537 		if (idr->layers == MAX_LEVEL + 1) {
538 			error = -ENOSPC;
539 			goto out;
540 		}
541 		il = idr_get(idr);
542 		if (il == NULL)
543 			goto out;
544 		il->ary[0] = idr->top;
545 		if (idr->top && idr->top->bitmap == 0)
546 			il->bitmap &= ~1;
547 		idr->top = il;
548 		idr->layers++;
549 	}
550 	il = idr->top;
551 	id = 0;
552 	/*
553 	 * Walk the tree following free bitmaps, record our path.
554 	 */
555 	for (layer = idr->layers - 1;; layer--) {
556 		stack[layer] = il;
557 		sidx = idr_pos(starting_id, layer);
558 		/* Returns index numbered from 0 or size if none exists. */
559 		idx = find_next_bit(&il->bitmap, IDR_SIZE, sidx);
560 		if (idx == IDR_SIZE && sidx == 0)
561 			panic("idr_get_new: Invalid leaf state (%p, %p)\n",
562 			    idr, il);
563 		/*
564 		 * We may have walked a path where there was a free bit but
565 		 * it was lower than what we wanted.  Restart the search with
566 		 * a larger starting id.  id contains the progress we made so
567 		 * far.  Search the leaf one above this level.  This may
568 		 * restart as many as MAX_LEVEL times but that is expected
569 		 * to be rare.
570 		 */
571 		if (idx == IDR_SIZE) {
572 			starting_id = id + (1 << ((layer + 1) * IDR_BITS));
573 			goto restart;
574 		}
575 		if (idx > sidx)
576 			starting_id = 0;	/* Search the whole subtree. */
577 		id |= idx << (layer * IDR_BITS);
578 		if (layer == 0)
579 			break;
580 		if (il->ary[idx] == NULL) {
581 			il->ary[idx] = idr_get(idr);
582 			if (il->ary[idx] == NULL)
583 				goto out;
584 		}
585 		il = il->ary[idx];
586 	}
587 	/*
588 	 * Allocate the leaf to the consumer.
589 	 */
590 	il->bitmap &= ~(1 << idx);
591 	il->ary[idx] = ptr;
592 	*idp = id;
593 	/*
594 	 * Clear bitmaps potentially up to the root.
595 	 */
596 	while (il->bitmap == 0 && ++layer < idr->layers) {
597 		il = stack[layer];
598 		il->bitmap &= ~(1 << idr_pos(id, layer));
599 	}
600 	error = 0;
601 out:
602 #ifdef INVARIANTS
603 	if (error == 0 && idr_find_locked(idr, id) != ptr) {
604 		panic("idr_get_new_above: Failed for idr %p, id %d, ptr %p\n",
605 		    idr, id, ptr);
606 	}
607 #endif
608 	return (error);
609 }
610 
611 int
612 idr_get_new_above(struct idr *idr, void *ptr, int starting_id, int *idp)
613 {
614 	int retval;
615 
616 	mtx_lock(&idr->lock);
617 	retval = idr_get_new_above_locked(idr, ptr, starting_id, idp);
618 	mtx_unlock(&idr->lock);
619 	return (retval);
620 }
621 
622 int
623 ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
624 {
625 	return (idr_get_new_above(&ida->idr, NULL, starting_id, p_id));
626 }
627 
628 static int
629 idr_alloc_locked(struct idr *idr, void *ptr, int start, int end)
630 {
631 	int max = end > 0 ? end - 1 : INT_MAX;
632 	int error;
633 	int id;
634 
635 	mtx_assert(&idr->lock, MA_OWNED);
636 
637 	if (unlikely(start < 0))
638 		return (-EINVAL);
639 	if (unlikely(max < start))
640 		return (-ENOSPC);
641 
642 	if (start == 0)
643 		error = idr_get_new_locked(idr, ptr, &id);
644 	else
645 		error = idr_get_new_above_locked(idr, ptr, start, &id);
646 
647 	if (unlikely(error < 0))
648 		return (error);
649 	if (unlikely(id > max)) {
650 		idr_remove_locked(idr, id);
651 		return (-ENOSPC);
652 	}
653 	return (id);
654 }
655 
656 int
657 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
658 {
659 	int retval;
660 
661 	mtx_lock(&idr->lock);
662 	retval = idr_alloc_locked(idr, ptr, start, end);
663 	mtx_unlock(&idr->lock);
664 	return (retval);
665 }
666 
667 int
668 idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
669 {
670 	int retval;
671 
672 	mtx_lock(&idr->lock);
673 	retval = idr_alloc_locked(idr, ptr, max(start, idr->next_cyclic_id), end);
674 	if (unlikely(retval == -ENOSPC))
675 		retval = idr_alloc_locked(idr, ptr, start, end);
676 	if (likely(retval >= 0))
677 		idr->next_cyclic_id = retval + 1;
678 	mtx_unlock(&idr->lock);
679 	return (retval);
680 }
681 
682 static int
683 idr_for_each_layer(struct idr_layer *il, int layer,
684     int (*f)(int id, void *p, void *data), void *data)
685 {
686 	int i, err;
687 
688 	if (il == NULL)
689 		return (0);
690 	if (layer == 0) {
691 		for (i = 0; i < IDR_SIZE; i++) {
692 			if (il->ary[i] == NULL)
693 				continue;
694 			err = f(i, il->ary[i],  data);
695 			if (err)
696 				return (err);
697 		}
698 		return (0);
699 	}
700 	for (i = 0; i < IDR_SIZE; i++) {
701 		if (il->ary[i] == NULL)
702 			continue;
703 		err = idr_for_each_layer(il->ary[i], layer - 1, f, data);
704 		if (err)
705 			return (err);
706 	}
707 	return (0);
708 }
709 
710 /* NOTE: It is not allowed to modify the IDR tree while it is being iterated */
711 int
712 idr_for_each(struct idr *idp, int (*f)(int id, void *p, void *data), void *data)
713 {
714 	return (idr_for_each_layer(idp->top, idp->layers - 1, f, data));
715 }
716 
717 int
718 ida_pre_get(struct ida *ida, gfp_t flags)
719 {
720 	if (idr_pre_get(&ida->idr, flags) == 0)
721 		return (0);
722 
723 	if (ida->free_bitmap == NULL) {
724 		ida->free_bitmap =
725 		    malloc(sizeof(struct ida_bitmap), M_IDR, flags);
726 	}
727 	return (ida->free_bitmap != NULL);
728 }
729 
730 int
731 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
732     gfp_t flags)
733 {
734 	int ret, id;
735 	unsigned int max;
736 
737 	MPASS((int)start >= 0);
738 	MPASS((int)end >= 0);
739 
740 	if (end == 0)
741 		max = 0x80000000;
742 	else {
743 		MPASS(end > start);
744 		max = end - 1;
745 	}
746 again:
747 	if (!ida_pre_get(ida, flags))
748 		return (-ENOMEM);
749 
750 	if ((ret = ida_get_new_above(ida, start, &id)) == 0) {
751 		if (id > max) {
752 			ida_remove(ida, id);
753 			ret = -ENOSPC;
754 		} else {
755 			ret = id;
756 		}
757 	}
758 	if (__predict_false(ret == -EAGAIN))
759 		goto again;
760 
761 	return (ret);
762 }
763 
764 void
765 ida_simple_remove(struct ida *ida, unsigned int id)
766 {
767 	idr_remove(&ida->idr, id);
768 }
769 
770 void
771 ida_remove(struct ida *ida, int id)
772 {
773 	idr_remove(&ida->idr, id);
774 }
775 
776 void
777 ida_init(struct ida *ida)
778 {
779 	idr_init(&ida->idr);
780 }
781 
782 void
783 ida_destroy(struct ida *ida)
784 {
785 	idr_destroy(&ida->idr);
786 	free(ida->free_bitmap, M_IDR);
787 }
788