1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/sysctl.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 
41 #include <machine/stdarg.h>
42 
43 #include <linux/bitmap.h>
44 #include <linux/kobject.h>
45 #include <linux/slab.h>
46 #include <linux/idr.h>
47 #include <linux/err.h>
48 
49 #define	MAX_IDR_LEVEL	((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
50 #define	MAX_IDR_FREE	(MAX_IDR_LEVEL * 2)
51 
52 struct linux_idr_cache {
53 	spinlock_t lock;
54 	struct idr_layer *head;
55 	unsigned count;
56 };
57 
58 DPCPU_DEFINE_STATIC(struct linux_idr_cache, linux_idr_cache);
59 
60 /*
61  * IDR Implementation.
62  *
63  * This is quick and dirty and not as re-entrant as the linux version
64  * however it should be fairly fast.  It is basically a radix tree with
65  * a builtin bitmap for allocation.
66  */
67 static MALLOC_DEFINE(M_IDR, "idr", "Linux IDR compat");
68 
69 static struct idr_layer *
70 idr_preload_dequeue_locked(struct linux_idr_cache *lic)
71 {
72 	struct idr_layer *retval;
73 
74 	/* check if wrong thread is trying to dequeue */
75 	if (mtx_owned(&lic->lock.m) == 0)
76 		return (NULL);
77 
78 	retval = lic->head;
79 	if (likely(retval != NULL)) {
80 		lic->head = retval->ary[0];
81 		lic->count--;
82 		retval->ary[0] = NULL;
83 	}
84 	return (retval);
85 }
86 
87 static void
88 idr_preload_init(void *arg)
89 {
90 	int cpu;
91 
92 	CPU_FOREACH(cpu) {
93 		struct linux_idr_cache *lic =
94 		    DPCPU_ID_PTR(cpu, linux_idr_cache);
95 
96 		spin_lock_init(&lic->lock);
97 	}
98 }
99 SYSINIT(idr_preload_init, SI_SUB_CPU, SI_ORDER_ANY, idr_preload_init, NULL);
100 
101 static void
102 idr_preload_uninit(void *arg)
103 {
104 	int cpu;
105 
106 	CPU_FOREACH(cpu) {
107 		struct idr_layer *cacheval;
108 		struct linux_idr_cache *lic =
109 		    DPCPU_ID_PTR(cpu, linux_idr_cache);
110 
111 		while (1) {
112 			spin_lock(&lic->lock);
113 			cacheval = idr_preload_dequeue_locked(lic);
114 			spin_unlock(&lic->lock);
115 
116 			if (cacheval == NULL)
117 				break;
118 			free(cacheval, M_IDR);
119 		}
120 		spin_lock_destroy(&lic->lock);
121 	}
122 }
123 SYSUNINIT(idr_preload_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, idr_preload_uninit, NULL);
124 
125 void
126 idr_preload(gfp_t gfp_mask)
127 {
128 	struct linux_idr_cache *lic;
129 	struct idr_layer *cacheval;
130 
131 	sched_pin();
132 
133 	lic = &DPCPU_GET(linux_idr_cache);
134 
135 	/* fill up cache */
136 	spin_lock(&lic->lock);
137 	while (lic->count < MAX_IDR_FREE) {
138 		spin_unlock(&lic->lock);
139 		cacheval = malloc(sizeof(*cacheval), M_IDR, M_ZERO | gfp_mask);
140 		spin_lock(&lic->lock);
141 		if (cacheval == NULL)
142 			break;
143 		cacheval->ary[0] = lic->head;
144 		lic->head = cacheval;
145 		lic->count++;
146 	}
147 }
148 
149 void
150 idr_preload_end(void)
151 {
152 	struct linux_idr_cache *lic;
153 
154 	lic = &DPCPU_GET(linux_idr_cache);
155 	spin_unlock(&lic->lock);
156 	sched_unpin();
157 }
158 
159 static inline int
160 idr_max(struct idr *idr)
161 {
162 	return (1 << (idr->layers * IDR_BITS)) - 1;
163 }
164 
165 static inline int
166 idr_pos(int id, int layer)
167 {
168 	return (id >> (IDR_BITS * layer)) & IDR_MASK;
169 }
170 
171 void
172 idr_init(struct idr *idr)
173 {
174 	bzero(idr, sizeof(*idr));
175 	mtx_init(&idr->lock, "idr", NULL, MTX_DEF);
176 }
177 
178 /* Only frees cached pages. */
179 void
180 idr_destroy(struct idr *idr)
181 {
182 	struct idr_layer *il, *iln;
183 
184 	idr_remove_all(idr);
185 	mtx_lock(&idr->lock);
186 	for (il = idr->free; il != NULL; il = iln) {
187 		iln = il->ary[0];
188 		free(il, M_IDR);
189 	}
190 	mtx_unlock(&idr->lock);
191 	mtx_destroy(&idr->lock);
192 }
193 
194 static void
195 idr_remove_layer(struct idr_layer *il, int layer)
196 {
197 	int i;
198 
199 	if (il == NULL)
200 		return;
201 	if (layer == 0) {
202 		free(il, M_IDR);
203 		return;
204 	}
205 	for (i = 0; i < IDR_SIZE; i++)
206 		if (il->ary[i])
207 			idr_remove_layer(il->ary[i], layer - 1);
208 }
209 
210 void
211 idr_remove_all(struct idr *idr)
212 {
213 
214 	mtx_lock(&idr->lock);
215 	idr_remove_layer(idr->top, idr->layers - 1);
216 	idr->top = NULL;
217 	idr->layers = 0;
218 	mtx_unlock(&idr->lock);
219 }
220 
221 static void *
222 idr_remove_locked(struct idr *idr, int id)
223 {
224 	struct idr_layer *il;
225 	void *res;
226 	int layer;
227 	int idx;
228 
229 	id &= MAX_ID_MASK;
230 	il = idr->top;
231 	layer = idr->layers - 1;
232 	if (il == NULL || id > idr_max(idr))
233 		return (NULL);
234 	/*
235 	 * Walk down the tree to this item setting bitmaps along the way
236 	 * as we know at least one item will be free along this path.
237 	 */
238 	while (layer && il) {
239 		idx = idr_pos(id, layer);
240 		il->bitmap |= 1 << idx;
241 		il = il->ary[idx];
242 		layer--;
243 	}
244 	idx = id & IDR_MASK;
245 	/*
246 	 * At this point we've set free space bitmaps up the whole tree.
247 	 * We could make this non-fatal and unwind but linux dumps a stack
248 	 * and a warning so I don't think it's necessary.
249 	 */
250 	if (il == NULL || (il->bitmap & (1 << idx)) != 0)
251 		panic("idr_remove: Item %d not allocated (%p, %p)\n",
252 		    id, idr, il);
253 	res = il->ary[idx];
254 	il->ary[idx] = NULL;
255 	il->bitmap |= 1 << idx;
256 
257 	return (res);
258 }
259 
260 void *
261 idr_remove(struct idr *idr, int id)
262 {
263 	void *res;
264 
265 	mtx_lock(&idr->lock);
266 	res = idr_remove_locked(idr, id);
267 	mtx_unlock(&idr->lock);
268 
269 	return (res);
270 }
271 
272 
273 static inline struct idr_layer *
274 idr_find_layer_locked(struct idr *idr, int id)
275 {
276 	struct idr_layer *il;
277 	int layer;
278 
279 	id &= MAX_ID_MASK;
280 	il = idr->top;
281 	layer = idr->layers - 1;
282 	if (il == NULL || id > idr_max(idr))
283 		return (NULL);
284 	while (layer && il) {
285 		il = il->ary[idr_pos(id, layer)];
286 		layer--;
287 	}
288 	return (il);
289 }
290 
291 void *
292 idr_replace(struct idr *idr, void *ptr, int id)
293 {
294 	struct idr_layer *il;
295 	void *res;
296 	int idx;
297 
298 	mtx_lock(&idr->lock);
299 	il = idr_find_layer_locked(idr, id);
300 	idx = id & IDR_MASK;
301 
302 	/* Replace still returns an error if the item was not allocated. */
303 	if (il == NULL || (il->bitmap & (1 << idx))) {
304 		res = ERR_PTR(-ENOENT);
305 	} else {
306 		res = il->ary[idx];
307 		il->ary[idx] = ptr;
308 	}
309 	mtx_unlock(&idr->lock);
310 	return (res);
311 }
312 
313 static inline void *
314 idr_find_locked(struct idr *idr, int id)
315 {
316 	struct idr_layer *il;
317 	void *res;
318 
319 	mtx_assert(&idr->lock, MA_OWNED);
320 	il = idr_find_layer_locked(idr, id);
321 	if (il != NULL)
322 		res = il->ary[id & IDR_MASK];
323 	else
324 		res = NULL;
325 	return (res);
326 }
327 
328 void *
329 idr_find(struct idr *idr, int id)
330 {
331 	void *res;
332 
333 	mtx_lock(&idr->lock);
334 	res = idr_find_locked(idr, id);
335 	mtx_unlock(&idr->lock);
336 	return (res);
337 }
338 
339 void *
340 idr_get_next(struct idr *idr, int *nextidp)
341 {
342 	void *res = NULL;
343 	int id = *nextidp;
344 
345 	mtx_lock(&idr->lock);
346 	for (; id <= idr_max(idr); id++) {
347 		res = idr_find_locked(idr, id);
348 		if (res == NULL)
349 			continue;
350 		*nextidp = id;
351 		break;
352 	}
353 	mtx_unlock(&idr->lock);
354 	return (res);
355 }
356 
357 int
358 idr_pre_get(struct idr *idr, gfp_t gfp_mask)
359 {
360 	struct idr_layer *il, *iln;
361 	struct idr_layer *head;
362 	int need;
363 
364 	mtx_lock(&idr->lock);
365 	for (;;) {
366 		need = idr->layers + 1;
367 		for (il = idr->free; il != NULL; il = il->ary[0])
368 			need--;
369 		mtx_unlock(&idr->lock);
370 		if (need <= 0)
371 			break;
372 		for (head = NULL; need; need--) {
373 			iln = malloc(sizeof(*il), M_IDR, M_ZERO | gfp_mask);
374 			if (iln == NULL)
375 				break;
376 			bitmap_fill(&iln->bitmap, IDR_SIZE);
377 			if (head != NULL) {
378 				il->ary[0] = iln;
379 				il = iln;
380 			} else
381 				head = il = iln;
382 		}
383 		if (head == NULL)
384 			return (0);
385 		mtx_lock(&idr->lock);
386 		il->ary[0] = idr->free;
387 		idr->free = head;
388 	}
389 	return (1);
390 }
391 
392 static struct idr_layer *
393 idr_free_list_get(struct idr *idp)
394 {
395 	struct idr_layer *il;
396 
397 	if ((il = idp->free) != NULL) {
398 		idp->free = il->ary[0];
399 		il->ary[0] = NULL;
400 	}
401 	return (il);
402 }
403 
404 static inline struct idr_layer *
405 idr_get(struct idr *idp)
406 {
407 	struct idr_layer *il;
408 
409 	if ((il = idr_free_list_get(idp)) != NULL) {
410 		MPASS(il->bitmap != 0);
411 	} else if ((il = malloc(sizeof(*il), M_IDR, M_ZERO | M_NOWAIT)) != NULL) {
412 		bitmap_fill(&il->bitmap, IDR_SIZE);
413 	} else if ((il = idr_preload_dequeue_locked(&DPCPU_GET(linux_idr_cache))) != NULL) {
414 		bitmap_fill(&il->bitmap, IDR_SIZE);
415 	} else {
416 		return (NULL);
417 	}
418 	return (il);
419 }
420 
421 /*
422  * Could be implemented as get_new_above(idr, ptr, 0, idp) but written
423  * first for simplicity sake.
424  */
425 static int
426 idr_get_new_locked(struct idr *idr, void *ptr, int *idp)
427 {
428 	struct idr_layer *stack[MAX_LEVEL];
429 	struct idr_layer *il;
430 	int error;
431 	int layer;
432 	int idx;
433 	int id;
434 
435 	mtx_assert(&idr->lock, MA_OWNED);
436 
437 	error = -EAGAIN;
438 	/*
439 	 * Expand the tree until there is free space.
440 	 */
441 	if (idr->top == NULL || idr->top->bitmap == 0) {
442 		if (idr->layers == MAX_LEVEL + 1) {
443 			error = -ENOSPC;
444 			goto out;
445 		}
446 		il = idr_get(idr);
447 		if (il == NULL)
448 			goto out;
449 		il->ary[0] = idr->top;
450 		if (idr->top)
451 			il->bitmap &= ~1;
452 		idr->top = il;
453 		idr->layers++;
454 	}
455 	il = idr->top;
456 	id = 0;
457 	/*
458 	 * Walk the tree following free bitmaps, record our path.
459 	 */
460 	for (layer = idr->layers - 1;; layer--) {
461 		stack[layer] = il;
462 		idx = ffsl(il->bitmap);
463 		if (idx == 0)
464 			panic("idr_get_new: Invalid leaf state (%p, %p)\n",
465 			    idr, il);
466 		idx--;
467 		id |= idx << (layer * IDR_BITS);
468 		if (layer == 0)
469 			break;
470 		if (il->ary[idx] == NULL) {
471 			il->ary[idx] = idr_get(idr);
472 			if (il->ary[idx] == NULL)
473 				goto out;
474 		}
475 		il = il->ary[idx];
476 	}
477 	/*
478 	 * Allocate the leaf to the consumer.
479 	 */
480 	il->bitmap &= ~(1 << idx);
481 	il->ary[idx] = ptr;
482 	*idp = id;
483 	/*
484 	 * Clear bitmaps potentially up to the root.
485 	 */
486 	while (il->bitmap == 0 && ++layer < idr->layers) {
487 		il = stack[layer];
488 		il->bitmap &= ~(1 << idr_pos(id, layer));
489 	}
490 	error = 0;
491 out:
492 #ifdef INVARIANTS
493 	if (error == 0 && idr_find_locked(idr, id) != ptr) {
494 		panic("idr_get_new: Failed for idr %p, id %d, ptr %p\n",
495 		    idr, id, ptr);
496 	}
497 #endif
498 	return (error);
499 }
500 
501 int
502 idr_get_new(struct idr *idr, void *ptr, int *idp)
503 {
504 	int retval;
505 
506 	mtx_lock(&idr->lock);
507 	retval = idr_get_new_locked(idr, ptr, idp);
508 	mtx_unlock(&idr->lock);
509 	return (retval);
510 }
511 
512 static int
513 idr_get_new_above_locked(struct idr *idr, void *ptr, int starting_id, int *idp)
514 {
515 	struct idr_layer *stack[MAX_LEVEL];
516 	struct idr_layer *il;
517 	int error;
518 	int layer;
519 	int idx, sidx;
520 	int id;
521 
522 	mtx_assert(&idr->lock, MA_OWNED);
523 
524 	error = -EAGAIN;
525 	/*
526 	 * Compute the layers required to support starting_id and the mask
527 	 * at the top layer.
528 	 */
529 restart:
530 	idx = starting_id;
531 	layer = 0;
532 	while (idx & ~IDR_MASK) {
533 		layer++;
534 		idx >>= IDR_BITS;
535 	}
536 	if (layer == MAX_LEVEL + 1) {
537 		error = -ENOSPC;
538 		goto out;
539 	}
540 	/*
541 	 * Expand the tree until there is free space at or beyond starting_id.
542 	 */
543 	while (idr->layers <= layer ||
544 	    idr->top->bitmap < (1 << idr_pos(starting_id, idr->layers - 1))) {
545 		if (idr->layers == MAX_LEVEL + 1) {
546 			error = -ENOSPC;
547 			goto out;
548 		}
549 		il = idr_get(idr);
550 		if (il == NULL)
551 			goto out;
552 		il->ary[0] = idr->top;
553 		if (idr->top && idr->top->bitmap == 0)
554 			il->bitmap &= ~1;
555 		idr->top = il;
556 		idr->layers++;
557 	}
558 	il = idr->top;
559 	id = 0;
560 	/*
561 	 * Walk the tree following free bitmaps, record our path.
562 	 */
563 	for (layer = idr->layers - 1;; layer--) {
564 		stack[layer] = il;
565 		sidx = idr_pos(starting_id, layer);
566 		/* Returns index numbered from 0 or size if none exists. */
567 		idx = find_next_bit(&il->bitmap, IDR_SIZE, sidx);
568 		if (idx == IDR_SIZE && sidx == 0)
569 			panic("idr_get_new: Invalid leaf state (%p, %p)\n",
570 			    idr, il);
571 		/*
572 		 * We may have walked a path where there was a free bit but
573 		 * it was lower than what we wanted.  Restart the search with
574 		 * a larger starting id.  id contains the progress we made so
575 		 * far.  Search the leaf one above this level.  This may
576 		 * restart as many as MAX_LEVEL times but that is expected
577 		 * to be rare.
578 		 */
579 		if (idx == IDR_SIZE) {
580 			starting_id = id + (1 << ((layer + 1) * IDR_BITS));
581 			goto restart;
582 		}
583 		if (idx > sidx)
584 			starting_id = 0;	/* Search the whole subtree. */
585 		id |= idx << (layer * IDR_BITS);
586 		if (layer == 0)
587 			break;
588 		if (il->ary[idx] == NULL) {
589 			il->ary[idx] = idr_get(idr);
590 			if (il->ary[idx] == NULL)
591 				goto out;
592 		}
593 		il = il->ary[idx];
594 	}
595 	/*
596 	 * Allocate the leaf to the consumer.
597 	 */
598 	il->bitmap &= ~(1 << idx);
599 	il->ary[idx] = ptr;
600 	*idp = id;
601 	/*
602 	 * Clear bitmaps potentially up to the root.
603 	 */
604 	while (il->bitmap == 0 && ++layer < idr->layers) {
605 		il = stack[layer];
606 		il->bitmap &= ~(1 << idr_pos(id, layer));
607 	}
608 	error = 0;
609 out:
610 #ifdef INVARIANTS
611 	if (error == 0 && idr_find_locked(idr, id) != ptr) {
612 		panic("idr_get_new_above: Failed for idr %p, id %d, ptr %p\n",
613 		    idr, id, ptr);
614 	}
615 #endif
616 	return (error);
617 }
618 
619 int
620 idr_get_new_above(struct idr *idr, void *ptr, int starting_id, int *idp)
621 {
622 	int retval;
623 
624 	mtx_lock(&idr->lock);
625 	retval = idr_get_new_above_locked(idr, ptr, starting_id, idp);
626 	mtx_unlock(&idr->lock);
627 	return (retval);
628 }
629 
630 int
631 ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
632 {
633 	return (idr_get_new_above(&ida->idr, NULL, starting_id, p_id));
634 }
635 
636 static int
637 idr_alloc_locked(struct idr *idr, void *ptr, int start, int end)
638 {
639 	int max = end > 0 ? end - 1 : INT_MAX;
640 	int error;
641 	int id;
642 
643 	mtx_assert(&idr->lock, MA_OWNED);
644 
645 	if (unlikely(start < 0))
646 		return (-EINVAL);
647 	if (unlikely(max < start))
648 		return (-ENOSPC);
649 
650 	if (start == 0)
651 		error = idr_get_new_locked(idr, ptr, &id);
652 	else
653 		error = idr_get_new_above_locked(idr, ptr, start, &id);
654 
655 	if (unlikely(error < 0))
656 		return (error);
657 	if (unlikely(id > max)) {
658 		idr_remove_locked(idr, id);
659 		return (-ENOSPC);
660 	}
661 	return (id);
662 }
663 
664 int
665 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
666 {
667 	int retval;
668 
669 	mtx_lock(&idr->lock);
670 	retval = idr_alloc_locked(idr, ptr, start, end);
671 	mtx_unlock(&idr->lock);
672 	return (retval);
673 }
674 
675 int
676 idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
677 {
678 	int retval;
679 
680 	mtx_lock(&idr->lock);
681 	retval = idr_alloc_locked(idr, ptr, max(start, idr->next_cyclic_id), end);
682 	if (unlikely(retval == -ENOSPC))
683 		retval = idr_alloc_locked(idr, ptr, start, end);
684 	if (likely(retval >= 0))
685 		idr->next_cyclic_id = retval + 1;
686 	mtx_unlock(&idr->lock);
687 	return (retval);
688 }
689 
690 static int
691 idr_for_each_layer(struct idr_layer *il, int offset, int layer,
692     int (*f)(int id, void *p, void *data), void *data)
693 {
694 	int i, err;
695 
696 	if (il == NULL)
697 		return (0);
698 	if (layer == 0) {
699 		for (i = 0; i < IDR_SIZE; i++) {
700 			if (il->ary[i] == NULL)
701 				continue;
702 			err = f(i + offset, il->ary[i],  data);
703 			if (err)
704 				return (err);
705 		}
706 		return (0);
707 	}
708 	for (i = 0; i < IDR_SIZE; i++) {
709 		if (il->ary[i] == NULL)
710 			continue;
711 		err = idr_for_each_layer(il->ary[i],
712 		    (i + offset) * IDR_SIZE, layer - 1, f, data);
713 		if (err)
714 			return (err);
715 	}
716 	return (0);
717 }
718 
719 /* NOTE: It is not allowed to modify the IDR tree while it is being iterated */
720 int
721 idr_for_each(struct idr *idp, int (*f)(int id, void *p, void *data), void *data)
722 {
723 	return (idr_for_each_layer(idp->top, 0, idp->layers - 1, f, data));
724 }
725 
726 static int
727 idr_has_entry(int id, void *p, void *data)
728 {
729 
730 	return (1);
731 }
732 
733 bool
734 idr_is_empty(struct idr *idp)
735 {
736 
737 	return (idr_for_each(idp, idr_has_entry, NULL) == 0);
738 }
739 
740 int
741 ida_pre_get(struct ida *ida, gfp_t flags)
742 {
743 	if (idr_pre_get(&ida->idr, flags) == 0)
744 		return (0);
745 
746 	if (ida->free_bitmap == NULL) {
747 		ida->free_bitmap =
748 		    malloc(sizeof(struct ida_bitmap), M_IDR, flags);
749 	}
750 	return (ida->free_bitmap != NULL);
751 }
752 
753 int
754 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
755     gfp_t flags)
756 {
757 	int ret, id;
758 	unsigned int max;
759 
760 	MPASS((int)start >= 0);
761 	MPASS((int)end >= 0);
762 
763 	if (end == 0)
764 		max = 0x80000000;
765 	else {
766 		MPASS(end > start);
767 		max = end - 1;
768 	}
769 again:
770 	if (!ida_pre_get(ida, flags))
771 		return (-ENOMEM);
772 
773 	if ((ret = ida_get_new_above(ida, start, &id)) == 0) {
774 		if (id > max) {
775 			ida_remove(ida, id);
776 			ret = -ENOSPC;
777 		} else {
778 			ret = id;
779 		}
780 	}
781 	if (__predict_false(ret == -EAGAIN))
782 		goto again;
783 
784 	return (ret);
785 }
786 
787 void
788 ida_simple_remove(struct ida *ida, unsigned int id)
789 {
790 	idr_remove(&ida->idr, id);
791 }
792 
793 void
794 ida_remove(struct ida *ida, int id)
795 {
796 	idr_remove(&ida->idr, id);
797 }
798 
799 void
800 ida_init(struct ida *ida)
801 {
802 	idr_init(&ida->idr);
803 }
804 
805 void
806 ida_destroy(struct ida *ida)
807 {
808 	idr_destroy(&ida->idr);
809 	free(ida->free_bitmap, M_IDR);
810 }
811