xref: /freebsd/sys/kern/subr_unit.c (revision bdd1243d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2004 Poul-Henning Kamp
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  *
30  *
31  * Unit number allocation functions.
32  *
33  * These functions implement a mixed run-length/bitmap management of unit
34  * number spaces in a very memory efficient manner.
35  *
36  * Allocation policy is always lowest free number first.
37  *
38  * A return value of -1 signals that no more unit numbers are available.
39  *
40  * There is no cost associated with the range of unitnumbers, so unless
41  * the resource really is finite, specify INT_MAX to new_unrhdr() and
42  * forget about checking the return value.
43  *
44  * If a mutex is not provided when the unit number space is created, a
45  * default global mutex is used.  The advantage to passing a mutex in, is
46  * that the alloc_unrl() function can be called with the mutex already
47  * held (it will not be released by alloc_unrl()).
48  *
49  * The allocation function alloc_unr{l}() never sleeps (but it may block on
50  * the mutex of course).
51  *
52  * Freeing a unit number may require allocating memory, and can therefore
53  * sleep so the free_unr() function does not come in a pre-locked variant.
54  *
55  * A userland test program is included.
56  *
57  * Memory usage is a very complex function of the exact allocation
58  * pattern, but always very compact:
59  *    * For the very typical case where a single unbroken run of unit
60  *      numbers are allocated 44 bytes are used on i386.
61  *    * For a unit number space of 1000 units and the random pattern
62  *      in the usermode test program included, the worst case usage
63  *	was 252 bytes on i386 for 500 allocated and 500 free units.
64  *    * For a unit number space of 10000 units and the random pattern
65  *      in the usermode test program included, the worst case usage
66  *	was 798 bytes on i386 for 5000 allocated and 5000 free units.
67  *    * The worst case is where every other unit number is allocated and
68  *	the rest are free.  In that case 44 + N/4 bytes are used where
69  *	N is the number of the highest unit allocated.
70  */
71 
72 #include <sys/param.h>
73 #include <sys/types.h>
74 #include <sys/_unrhdr.h>
75 
76 #ifdef _KERNEL
77 
78 #include <sys/bitstring.h>
79 #include <sys/malloc.h>
80 #include <sys/kernel.h>
81 #include <sys/systm.h>
82 #include <sys/limits.h>
83 #include <sys/lock.h>
84 #include <sys/mutex.h>
85 
86 /*
87  * In theory it would be smarter to allocate the individual blocks
88  * with the zone allocator, but at this time the expectation is that
89  * there will typically not even be enough allocations to fill a single
90  * page, so we stick with malloc for now.
91  */
92 static MALLOC_DEFINE(M_UNIT, "Unitno", "Unit number allocation");
93 
94 #define Malloc(foo) malloc(foo, M_UNIT, M_WAITOK | M_ZERO)
95 #define Free(foo) free(foo, M_UNIT)
96 
97 static struct mtx unitmtx;
98 
99 MTX_SYSINIT(unit, &unitmtx, "unit# allocation", MTX_DEF);
100 
101 #else /* ...USERLAND */
102 
103 #include <bitstring.h>
104 #include <err.h>
105 #include <errno.h>
106 #include <getopt.h>
107 #include <stdbool.h>
108 #include <stdio.h>
109 #include <stdlib.h>
110 #include <string.h>
111 
112 #define KASSERT(cond, arg) \
113 	do { \
114 		if (!(cond)) { \
115 			printf arg; \
116 			abort(); \
117 		} \
118 	} while (0)
119 
120 static int no_alloc;
121 #define Malloc(foo) _Malloc(foo, __LINE__)
122 static void *
123 _Malloc(size_t foo, int line)
124 {
125 
126 	KASSERT(no_alloc == 0, ("malloc in wrong place() line %d", line));
127 	return (calloc(foo, 1));
128 }
129 #define Free(foo) free(foo)
130 
131 struct unrhdr;
132 
133 #define	UNR_NO_MTX	((void *)(uintptr_t)-1)
134 
135 struct mtx {
136 	int	state;
137 } unitmtx;
138 
139 static void
140 mtx_lock(struct mtx *mp)
141 {
142 	KASSERT(mp->state == 0, ("mutex already locked"));
143 	mp->state = 1;
144 }
145 
146 static void
147 mtx_unlock(struct mtx *mp)
148 {
149 	KASSERT(mp->state == 1, ("mutex not locked"));
150 	mp->state = 0;
151 }
152 
153 #define MA_OWNED	9
154 
155 static void
156 mtx_assert(struct mtx *mp, int flag)
157 {
158 	if (flag == MA_OWNED) {
159 		KASSERT(mp->state == 1, ("mtx_assert(MA_OWNED) not true"));
160 	}
161 }
162 
163 #define CTASSERT(foo)
164 #define WITNESS_WARN(flags, lock, fmt, ...)	(void)0
165 
166 #endif /* USERLAND */
167 
168 /*
169  * This is our basic building block.
170  *
171  * It can be used in three different ways depending on the value of the ptr
172  * element:
173  *     If ptr is NULL, it represents a run of free items.
174  *     If ptr points to the unrhdr it represents a run of allocated items.
175  *     Otherwise it points to a bitstring of allocated items.
176  *
177  * For runs the len field is the length of the run.
178  * For bitmaps the len field represents the number of allocated items.
179  *
180  * The bitmap is the same size as struct unr to optimize memory management.
181  *
182  * Two special ranges are not covered by unrs:
183  * - at the start of the allocator space, all elements in [low, low + first)
184  *   are allocated;
185  * - at the end of the allocator space, all elements in [high - last, high]
186  *   are free.
187  */
188 struct unr {
189 	TAILQ_ENTRY(unr)	list;
190 	u_int			len;
191 	void			*ptr;
192 };
193 
194 struct unrb {
195 	bitstr_t		map[sizeof(struct unr) / sizeof(bitstr_t)];
196 };
197 
198 CTASSERT((sizeof(struct unr) % sizeof(bitstr_t)) == 0);
199 
200 /* Number of bits we can store in the bitmap */
201 #define NBITS (NBBY * sizeof(((struct unrb *)NULL)->map))
202 
203 static inline bool
204 is_bitmap(struct unrhdr *uh, struct unr *up)
205 {
206 	return (up->ptr != uh && up->ptr != NULL);
207 }
208 
209 /* Is the unrb empty in at least the first len bits? */
210 static inline bool
211 ub_empty(struct unrb *ub, int len) {
212 	int first_set;
213 
214 	bit_ffs(ub->map, len, &first_set);
215 	return (first_set == -1);
216 }
217 
218 /* Is the unrb full?  That is, is the number of set elements equal to len? */
219 static inline bool
220 ub_full(struct unrb *ub, int len)
221 {
222 	int first_clear;
223 
224 	bit_ffc(ub->map, len, &first_clear);
225 	return (first_clear == -1);
226 }
227 
228 /*
229  * start: ipos = -1, upos = NULL;
230  * end:   ipos = -1, upos = uh
231  */
232 struct unrhdr_iter {
233 	struct unrhdr *uh;
234 	int ipos;
235 	int upos_first_item;
236 	void *upos;
237 };
238 
239 void *
240 create_iter_unr(struct unrhdr *uh)
241 {
242 	struct unrhdr_iter *iter;
243 
244 	iter = Malloc(sizeof(*iter));
245 	iter->ipos = -1;
246 	iter->uh = uh;
247 	iter->upos = NULL;
248 	iter->upos_first_item = -1;
249 	return (iter);
250 }
251 
252 static void
253 next_iter_unrl(struct unrhdr *uh, struct unrhdr_iter *iter)
254 {
255 	struct unr *up;
256 	struct unrb *ub;
257 	u_int y;
258 	int c;
259 
260 	if (iter->ipos == -1) {
261 		if (iter->upos == uh)
262 			return;
263 		y = uh->low - 1;
264 		if (uh->first == 0) {
265 			up = TAILQ_FIRST(&uh->head);
266 			if (up == NULL) {
267 				iter->upos = uh;
268 				return;
269 			}
270 			iter->upos = up;
271 			if (up->ptr == NULL)
272 				iter->upos = NULL;
273 			else
274 				iter->upos_first_item = uh->low;
275 		}
276 	} else {
277 		y = iter->ipos;
278 	}
279 
280 	up = iter->upos;
281 
282 	/* Special case for the compacted [low, first) run. */
283 	if (up == NULL) {
284 		if (y + 1 < uh->low + uh->first) {
285 			iter->ipos = y + 1;
286 			return;
287 		}
288 		up = iter->upos = TAILQ_FIRST(&uh->head);
289 		iter->upos_first_item = uh->low + uh->first;
290 	}
291 
292 	for (;;) {
293 		if (y + 1 < iter->upos_first_item + up->len) {
294 			if (up->ptr == uh) {
295 				iter->ipos = y + 1;
296 				return;
297 			} else if (is_bitmap(uh, up)) {
298 				ub = up->ptr;
299 				bit_ffs_at(&ub->map[0],
300 				    y + 1 - iter->upos_first_item,
301 				    up->len, &c);
302 				if (c != -1) {
303 					iter->ipos = iter->upos_first_item + c;
304 					return;
305 				}
306 			}
307 		}
308 		iter->upos_first_item += up->len;
309 		y = iter->upos_first_item - 1;
310 		up = iter->upos = TAILQ_NEXT((struct unr *)iter->upos, list);
311 		if (iter->upos == NULL) {
312 			iter->ipos = -1;
313 			iter->upos = uh;
314 			return;
315 		}
316 	}
317 }
318 
319 /*
320  * returns -1 on end, otherwise the next element
321  */
322 int
323 next_iter_unr(void *handle)
324 {
325 	struct unrhdr *uh;
326 	struct unrhdr_iter *iter;
327 
328 	iter = handle;
329 	uh = iter->uh;
330 	if (uh->mtx != NULL)
331 		mtx_lock(uh->mtx);
332 	next_iter_unrl(uh, iter);
333 	if (uh->mtx != NULL)
334 		mtx_unlock(uh->mtx);
335 	return (iter->ipos);
336 }
337 
338 void
339 free_iter_unr(void *handle)
340 {
341 	Free(handle);
342 }
343 
344 #if defined(DIAGNOSTIC) || !defined(_KERNEL)
345 /*
346  * Consistency check function.
347  *
348  * Checks the internal consistency as well as we can.
349  *
350  * Called at all boundaries of this API.
351  */
352 static void
353 check_unrhdr(struct unrhdr *uh, int line)
354 {
355 	struct unr *up;
356 	struct unrb *ub;
357 	int w;
358 	u_int y, z;
359 
360 	y = uh->first;
361 	z = 0;
362 	TAILQ_FOREACH(up, &uh->head, list) {
363 		z++;
364 		if (is_bitmap(uh, up)) {
365 			ub = up->ptr;
366 			KASSERT (up->len <= NBITS,
367 			    ("UNR inconsistency: len %u max %zd (line %d)\n",
368 			    up->len, NBITS, line));
369 			z++;
370 			w = 0;
371 			bit_count(ub->map, 0, up->len, &w);
372 			y += w;
373 		} else if (up->ptr != NULL)
374 			y += up->len;
375 	}
376 	KASSERT (y == uh->busy,
377 	    ("UNR inconsistency: items %u found %u (line %d)\n",
378 	    uh->busy, y, line));
379 	KASSERT (z == uh->alloc,
380 	    ("UNR inconsistency: chunks %u found %u (line %d)\n",
381 	    uh->alloc, z, line));
382 }
383 
384 #else
385 
386 static __inline void
387 check_unrhdr(struct unrhdr *uh __unused, int line __unused)
388 {
389 
390 }
391 
392 #endif
393 
394 /*
395  * Userland memory management.  Just use calloc and keep track of how
396  * many elements we have allocated for check_unrhdr().
397  */
398 
399 static __inline void *
400 new_unr(struct unrhdr *uh, void **p1, void **p2)
401 {
402 	void *p;
403 
404 	uh->alloc++;
405 	KASSERT(*p1 != NULL || *p2 != NULL, ("Out of cached memory"));
406 	if (*p1 != NULL) {
407 		p = *p1;
408 		*p1 = NULL;
409 		return (p);
410 	} else {
411 		p = *p2;
412 		*p2 = NULL;
413 		return (p);
414 	}
415 }
416 
417 static __inline void
418 delete_unr(struct unrhdr *uh, void *ptr)
419 {
420 	struct unr *up;
421 
422 	uh->alloc--;
423 	up = ptr;
424 	TAILQ_INSERT_TAIL(&uh->ppfree, up, list);
425 }
426 
427 void
428 clean_unrhdrl(struct unrhdr *uh)
429 {
430 	struct unr *up;
431 
432 	if (uh->mtx != NULL)
433 		mtx_assert(uh->mtx, MA_OWNED);
434 	while ((up = TAILQ_FIRST(&uh->ppfree)) != NULL) {
435 		TAILQ_REMOVE(&uh->ppfree, up, list);
436 		if (uh->mtx != NULL)
437 			mtx_unlock(uh->mtx);
438 		Free(up);
439 		if (uh->mtx != NULL)
440 			mtx_lock(uh->mtx);
441 	}
442 
443 }
444 
445 void
446 clean_unrhdr(struct unrhdr *uh)
447 {
448 
449 	if (uh->mtx != NULL)
450 		mtx_lock(uh->mtx);
451 	clean_unrhdrl(uh);
452 	if (uh->mtx != NULL)
453 		mtx_unlock(uh->mtx);
454 }
455 
456 void
457 init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex)
458 {
459 
460 	KASSERT(low >= 0 && low <= high,
461 	    ("UNR: use error: new_unrhdr(%d, %d)", low, high));
462 	if (mutex == UNR_NO_MTX)
463 		uh->mtx = NULL;
464 	else if (mutex != NULL)
465 		uh->mtx = mutex;
466 	else
467 		uh->mtx = &unitmtx;
468 	TAILQ_INIT(&uh->head);
469 	TAILQ_INIT(&uh->ppfree);
470 	uh->low = low;
471 	uh->high = high;
472 	uh->first = 0;
473 	uh->last = 1 + (high - low);
474 	uh->busy = 0;
475 	uh->alloc = 0;
476 	check_unrhdr(uh, __LINE__);
477 }
478 
479 /*
480  * Allocate a new unrheader set.
481  *
482  * Highest and lowest valid values given as parameters.
483  */
484 
485 struct unrhdr *
486 new_unrhdr(int low, int high, struct mtx *mutex)
487 {
488 	struct unrhdr *uh;
489 
490 	uh = Malloc(sizeof *uh);
491 	init_unrhdr(uh, low, high, mutex);
492 	return (uh);
493 }
494 
495 void
496 delete_unrhdr(struct unrhdr *uh)
497 {
498 
499 	check_unrhdr(uh, __LINE__);
500 	KASSERT(uh->busy == 0, ("unrhdr has %u allocations", uh->busy));
501 	KASSERT(uh->alloc == 0, ("UNR memory leak in delete_unrhdr"));
502 	KASSERT(TAILQ_FIRST(&uh->ppfree) == NULL,
503 	    ("unrhdr has postponed item for free"));
504 	Free(uh);
505 }
506 
507 void
508 clear_unrhdr(struct unrhdr *uh)
509 {
510 	struct unr *up, *uq;
511 
512 	KASSERT(TAILQ_EMPTY(&uh->ppfree),
513 	    ("unrhdr has postponed item for free"));
514 	TAILQ_FOREACH_SAFE(up, &uh->head, list, uq) {
515 		if (up->ptr != uh) {
516 			Free(up->ptr);
517 		}
518 		Free(up);
519 	}
520 	uh->busy = 0;
521 	uh->alloc = 0;
522 	init_unrhdr(uh, uh->low, uh->high, uh->mtx);
523 
524 	check_unrhdr(uh, __LINE__);
525 }
526 
527 /*
528  * Look for sequence of items which can be combined into a bitmap, if
529  * multiple are present, take the one which saves most memory.
530  *
531  * Return (1) if a sequence was found to indicate that another call
532  * might be able to do more.  Return (0) if we found no suitable sequence.
533  *
534  * NB: called from alloc_unr(), no new memory allocation allowed.
535  */
536 static int
537 optimize_unr(struct unrhdr *uh)
538 {
539 	struct unr *up, *uf, *us;
540 	struct unrb *ub, *ubf;
541 	u_int a, l, ba;
542 
543 	/*
544 	 * Look for the run of items (if any) which when collapsed into
545 	 * a bitmap would save most memory.
546 	 */
547 	us = NULL;
548 	ba = 0;
549 	TAILQ_FOREACH(uf, &uh->head, list) {
550 		if (uf->len >= NBITS)
551 			continue;
552 		a = 1;
553 		if (is_bitmap(uh, uf))
554 			a++;
555 		l = uf->len;
556 		up = uf;
557 		while (1) {
558 			up = TAILQ_NEXT(up, list);
559 			if (up == NULL)
560 				break;
561 			if ((up->len + l) > NBITS)
562 				break;
563 			a++;
564 			if (is_bitmap(uh, up))
565 				a++;
566 			l += up->len;
567 		}
568 		if (a > ba) {
569 			ba = a;
570 			us = uf;
571 		}
572 	}
573 	if (ba < 3)
574 		return (0);
575 
576 	/*
577 	 * If the first element is not a bitmap, make it one.
578 	 * Trying to do so without allocating more memory complicates things
579 	 * a bit
580 	 */
581 	if (!is_bitmap(uh, us)) {
582 		uf = TAILQ_NEXT(us, list);
583 		TAILQ_REMOVE(&uh->head, us, list);
584 		a = us->len;
585 		l = us->ptr == uh ? 1 : 0;
586 		ub = (void *)us;
587 		bit_nclear(ub->map, 0, NBITS - 1);
588 		if (l)
589 			bit_nset(ub->map, 0, a);
590 		if (!is_bitmap(uh, uf)) {
591 			if (uf->ptr == NULL)
592 				bit_nclear(ub->map, a, a + uf->len - 1);
593 			else
594 				bit_nset(ub->map, a, a + uf->len - 1);
595 			uf->ptr = ub;
596 			uf->len += a;
597 			us = uf;
598 		} else {
599 			ubf = uf->ptr;
600 			for (l = 0; l < uf->len; l++, a++) {
601 				if (bit_test(ubf->map, l))
602 					bit_set(ub->map, a);
603 				else
604 					bit_clear(ub->map, a);
605 			}
606 			uf->len = a;
607 			delete_unr(uh, uf->ptr);
608 			uf->ptr = ub;
609 			us = uf;
610 		}
611 	}
612 	ub = us->ptr;
613 	while (1) {
614 		uf = TAILQ_NEXT(us, list);
615 		if (uf == NULL)
616 			return (1);
617 		if (uf->len + us->len > NBITS)
618 			return (1);
619 		if (uf->ptr == NULL) {
620 			bit_nclear(ub->map, us->len, us->len + uf->len - 1);
621 			us->len += uf->len;
622 			TAILQ_REMOVE(&uh->head, uf, list);
623 			delete_unr(uh, uf);
624 		} else if (uf->ptr == uh) {
625 			bit_nset(ub->map, us->len, us->len + uf->len - 1);
626 			us->len += uf->len;
627 			TAILQ_REMOVE(&uh->head, uf, list);
628 			delete_unr(uh, uf);
629 		} else {
630 			ubf = uf->ptr;
631 			for (l = 0; l < uf->len; l++, us->len++) {
632 				if (bit_test(ubf->map, l))
633 					bit_set(ub->map, us->len);
634 				else
635 					bit_clear(ub->map, us->len);
636 			}
637 			TAILQ_REMOVE(&uh->head, uf, list);
638 			delete_unr(uh, ubf);
639 			delete_unr(uh, uf);
640 		}
641 	}
642 }
643 
644 /*
645  * See if a given unr should be collapsed with a neighbor.
646  *
647  * NB: called from alloc_unr(), no new memory allocation allowed.
648  */
649 static void
650 collapse_unr(struct unrhdr *uh, struct unr *up)
651 {
652 	struct unr *upp;
653 	struct unrb *ub;
654 
655 	/* If bitmap is all set or clear, change it to runlength */
656 	if (is_bitmap(uh, up)) {
657 		ub = up->ptr;
658 		if (ub_full(ub, up->len)) {
659 			delete_unr(uh, up->ptr);
660 			up->ptr = uh;
661 		} else if (ub_empty(ub, up->len)) {
662 			delete_unr(uh, up->ptr);
663 			up->ptr = NULL;
664 		}
665 	}
666 
667 	/* If nothing left in runlength, delete it */
668 	if (up->len == 0) {
669 		upp = TAILQ_PREV(up, unrhd, list);
670 		if (upp == NULL)
671 			upp = TAILQ_NEXT(up, list);
672 		TAILQ_REMOVE(&uh->head, up, list);
673 		delete_unr(uh, up);
674 		up = upp;
675 	}
676 
677 	/* If we have "hot-spot" still, merge with neighbor if possible */
678 	if (up != NULL) {
679 		upp = TAILQ_PREV(up, unrhd, list);
680 		if (upp != NULL && up->ptr == upp->ptr) {
681 			up->len += upp->len;
682 			TAILQ_REMOVE(&uh->head, upp, list);
683 			delete_unr(uh, upp);
684 			}
685 		upp = TAILQ_NEXT(up, list);
686 		if (upp != NULL && up->ptr == upp->ptr) {
687 			up->len += upp->len;
688 			TAILQ_REMOVE(&uh->head, upp, list);
689 			delete_unr(uh, upp);
690 		}
691 	}
692 
693 	/* Merge into ->first if possible */
694 	upp = TAILQ_FIRST(&uh->head);
695 	if (upp != NULL && upp->ptr == uh) {
696 		uh->first += upp->len;
697 		TAILQ_REMOVE(&uh->head, upp, list);
698 		delete_unr(uh, upp);
699 		if (up == upp)
700 			up = NULL;
701 	}
702 
703 	/* Merge into ->last if possible */
704 	upp = TAILQ_LAST(&uh->head, unrhd);
705 	if (upp != NULL && upp->ptr == NULL) {
706 		uh->last += upp->len;
707 		TAILQ_REMOVE(&uh->head, upp, list);
708 		delete_unr(uh, upp);
709 		if (up == upp)
710 			up = NULL;
711 	}
712 
713 	/* Try to make bitmaps */
714 	while (optimize_unr(uh))
715 		continue;
716 }
717 
718 /*
719  * Allocate a free unr.
720  */
721 int
722 alloc_unrl(struct unrhdr *uh)
723 {
724 	struct unr *up;
725 	struct unrb *ub;
726 	u_int x;
727 	int y;
728 
729 	if (uh->mtx != NULL)
730 		mtx_assert(uh->mtx, MA_OWNED);
731 	check_unrhdr(uh, __LINE__);
732 	x = uh->low + uh->first;
733 
734 	up = TAILQ_FIRST(&uh->head);
735 
736 	/*
737 	 * If we have an ideal split, just adjust the first+last
738 	 */
739 	if (up == NULL && uh->last > 0) {
740 		uh->first++;
741 		uh->last--;
742 		uh->busy++;
743 		return (x);
744 	}
745 
746 	/*
747 	 * We can always allocate from the first list element, so if we have
748 	 * nothing on the list, we must have run out of unit numbers.
749 	 */
750 	if (up == NULL)
751 		return (-1);
752 
753 	KASSERT(up->ptr != uh, ("UNR first element is allocated"));
754 
755 	if (up->ptr == NULL) {	/* free run */
756 		uh->first++;
757 		up->len--;
758 	} else {		/* bitmap */
759 		ub = up->ptr;
760 		bit_ffc(ub->map, up->len, &y);
761 		KASSERT(y != -1, ("UNR corruption: No clear bit in bitmap."));
762 		bit_set(ub->map, y);
763 		x += y;
764 	}
765 	uh->busy++;
766 	collapse_unr(uh, up);
767 	return (x);
768 }
769 
770 int
771 alloc_unr(struct unrhdr *uh)
772 {
773 	int i;
774 
775 	if (uh->mtx != NULL)
776 		mtx_lock(uh->mtx);
777 	i = alloc_unrl(uh);
778 	clean_unrhdrl(uh);
779 	if (uh->mtx != NULL)
780 		mtx_unlock(uh->mtx);
781 	return (i);
782 }
783 
784 static int
785 alloc_unr_specificl(struct unrhdr *uh, u_int item, void **p1, void **p2)
786 {
787 	struct unr *up, *upn;
788 	struct unrb *ub;
789 	u_int i, last, tl;
790 
791 	if (uh->mtx != NULL)
792 		mtx_assert(uh->mtx, MA_OWNED);
793 
794 	if (item < uh->low + uh->first || item > uh->high)
795 		return (-1);
796 
797 	up = TAILQ_FIRST(&uh->head);
798 	/* Ideal split. */
799 	if (up == NULL && item - uh->low == uh->first) {
800 		uh->first++;
801 		uh->last--;
802 		uh->busy++;
803 		check_unrhdr(uh, __LINE__);
804 		return (item);
805 	}
806 
807 	i = item - uh->low - uh->first;
808 
809 	if (up == NULL) {
810 		up = new_unr(uh, p1, p2);
811 		up->ptr = NULL;
812 		up->len = i;
813 		TAILQ_INSERT_TAIL(&uh->head, up, list);
814 		up = new_unr(uh, p1, p2);
815 		up->ptr = uh;
816 		up->len = 1;
817 		TAILQ_INSERT_TAIL(&uh->head, up, list);
818 		uh->last = uh->high - uh->low - i;
819 		uh->busy++;
820 		check_unrhdr(uh, __LINE__);
821 		return (item);
822 	} else {
823 		/* Find the item which contains the unit we want to allocate. */
824 		TAILQ_FOREACH(up, &uh->head, list) {
825 			if (up->len > i)
826 				break;
827 			i -= up->len;
828 		}
829 	}
830 
831 	if (up == NULL) {
832 		if (i > 0) {
833 			up = new_unr(uh, p1, p2);
834 			up->ptr = NULL;
835 			up->len = i;
836 			TAILQ_INSERT_TAIL(&uh->head, up, list);
837 		}
838 		up = new_unr(uh, p1, p2);
839 		up->ptr = uh;
840 		up->len = 1;
841 		TAILQ_INSERT_TAIL(&uh->head, up, list);
842 		goto done;
843 	}
844 
845 	if (is_bitmap(uh, up)) {
846 		ub = up->ptr;
847 		if (bit_test(ub->map, i) == 0) {
848 			bit_set(ub->map, i);
849 			goto done;
850 		} else
851 			return (-1);
852 	} else if (up->ptr == uh)
853 		return (-1);
854 
855 	KASSERT(up->ptr == NULL,
856 	    ("alloc_unr_specificl: up->ptr != NULL (up=%p)", up));
857 
858 	/* Split off the tail end, if any. */
859 	tl = up->len - (1 + i);
860 	if (tl > 0) {
861 		upn = new_unr(uh, p1, p2);
862 		upn->ptr = NULL;
863 		upn->len = tl;
864 		TAILQ_INSERT_AFTER(&uh->head, up, upn, list);
865 	}
866 
867 	/* Split off head end, if any */
868 	if (i > 0) {
869 		upn = new_unr(uh, p1, p2);
870 		upn->len = i;
871 		upn->ptr = NULL;
872 		TAILQ_INSERT_BEFORE(up, upn, list);
873 	}
874 	up->len = 1;
875 	up->ptr = uh;
876 
877 done:
878 	last = uh->high - uh->low - (item - uh->low);
879 	if (uh->last > last)
880 		uh->last = last;
881 	uh->busy++;
882 	collapse_unr(uh, up);
883 	check_unrhdr(uh, __LINE__);
884 	return (item);
885 }
886 
887 int
888 alloc_unr_specific(struct unrhdr *uh, u_int item)
889 {
890 	void *p1, *p2;
891 	int i;
892 
893 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "alloc_unr_specific");
894 
895 	p1 = Malloc(sizeof(struct unr));
896 	p2 = Malloc(sizeof(struct unr));
897 
898 	if (uh->mtx != NULL)
899 		mtx_lock(uh->mtx);
900 	i = alloc_unr_specificl(uh, item, &p1, &p2);
901 	if (uh->mtx != NULL)
902 		mtx_unlock(uh->mtx);
903 
904 	if (p1 != NULL)
905 		Free(p1);
906 	if (p2 != NULL)
907 		Free(p2);
908 
909 	return (i);
910 }
911 
912 /*
913  * Free a unr.
914  *
915  * If we can save unrs by using a bitmap, do so.
916  */
917 static void
918 free_unrl(struct unrhdr *uh, u_int item, void **p1, void **p2)
919 {
920 	struct unr *up, *upp, *upn;
921 	struct unrb *ub;
922 	u_int pl;
923 
924 	KASSERT(item >= uh->low && item <= uh->high,
925 	    ("UNR: free_unr(%u) out of range [%u...%u]",
926 	     item, uh->low, uh->high));
927 	check_unrhdr(uh, __LINE__);
928 	item -= uh->low;
929 	upp = TAILQ_FIRST(&uh->head);
930 	/*
931 	 * Freeing in the ideal split case
932 	 */
933 	if (item + 1 == uh->first && upp == NULL) {
934 		uh->last++;
935 		uh->first--;
936 		uh->busy--;
937 		check_unrhdr(uh, __LINE__);
938 		return;
939 	}
940 	/*
941  	 * Freeing in the ->first section.  Create a run starting at the
942 	 * freed item.  The code below will subdivide it.
943 	 */
944 	if (item < uh->first) {
945 		up = new_unr(uh, p1, p2);
946 		up->ptr = uh;
947 		up->len = uh->first - item;
948 		TAILQ_INSERT_HEAD(&uh->head, up, list);
949 		uh->first -= up->len;
950 	}
951 
952 	item -= uh->first;
953 
954 	/* Find the item which contains the unit we want to free */
955 	TAILQ_FOREACH(up, &uh->head, list) {
956 		if (up->len > item)
957 			break;
958 		item -= up->len;
959 	}
960 
961 	/* Handle bitmap items */
962 	if (is_bitmap(uh, up)) {
963 		ub = up->ptr;
964 
965 		KASSERT(bit_test(ub->map, item) != 0,
966 		    ("UNR: Freeing free item %d (bitmap)\n", item));
967 		bit_clear(ub->map, item);
968 		uh->busy--;
969 		collapse_unr(uh, up);
970 		return;
971 	}
972 
973 	KASSERT(up->ptr == uh, ("UNR Freeing free item %d (run))\n", item));
974 
975 	/* Just this one left, reap it */
976 	if (up->len == 1) {
977 		up->ptr = NULL;
978 		uh->busy--;
979 		collapse_unr(uh, up);
980 		return;
981 	}
982 
983 	/* Check if we can shift the item into the previous 'free' run */
984 	upp = TAILQ_PREV(up, unrhd, list);
985 	if (item == 0 && upp != NULL && upp->ptr == NULL) {
986 		upp->len++;
987 		up->len--;
988 		uh->busy--;
989 		collapse_unr(uh, up);
990 		return;
991 	}
992 
993 	/* Check if we can shift the item to the next 'free' run */
994 	upn = TAILQ_NEXT(up, list);
995 	if (item == up->len - 1 && upn != NULL && upn->ptr == NULL) {
996 		upn->len++;
997 		up->len--;
998 		uh->busy--;
999 		collapse_unr(uh, up);
1000 		return;
1001 	}
1002 
1003 	/* Split off the tail end, if any. */
1004 	pl = up->len - (1 + item);
1005 	if (pl > 0) {
1006 		upp = new_unr(uh, p1, p2);
1007 		upp->ptr = uh;
1008 		upp->len = pl;
1009 		TAILQ_INSERT_AFTER(&uh->head, up, upp, list);
1010 	}
1011 
1012 	/* Split off head end, if any */
1013 	if (item > 0) {
1014 		upp = new_unr(uh, p1, p2);
1015 		upp->len = item;
1016 		upp->ptr = uh;
1017 		TAILQ_INSERT_BEFORE(up, upp, list);
1018 	}
1019 	up->len = 1;
1020 	up->ptr = NULL;
1021 	uh->busy--;
1022 	collapse_unr(uh, up);
1023 }
1024 
1025 void
1026 free_unr(struct unrhdr *uh, u_int item)
1027 {
1028 	void *p1, *p2;
1029 
1030 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "free_unr");
1031 	p1 = Malloc(sizeof(struct unr));
1032 	p2 = Malloc(sizeof(struct unr));
1033 	if (uh->mtx != NULL)
1034 		mtx_lock(uh->mtx);
1035 	free_unrl(uh, item, &p1, &p2);
1036 	clean_unrhdrl(uh);
1037 	if (uh->mtx != NULL)
1038 		mtx_unlock(uh->mtx);
1039 	if (p1 != NULL)
1040 		Free(p1);
1041 	if (p2 != NULL)
1042 		Free(p2);
1043 }
1044 
1045 #ifdef _KERNEL
1046 #include "opt_ddb.h"
1047 #ifdef DDB
1048 #include <ddb/ddb.h>
1049 #endif
1050 #endif
1051 
1052 #if (defined(_KERNEL) && defined(DDB)) || !defined(_KERNEL)
1053 
1054 #if !defined(_KERNEL)
1055 #define db_printf printf
1056 #endif
1057 
1058 static void
1059 print_unr(struct unrhdr *uh, struct unr *up)
1060 {
1061 	u_int x;
1062 	struct unrb *ub;
1063 
1064 	db_printf("  %p len = %5u ", up, up->len);
1065 	if (up->ptr == NULL)
1066 		db_printf("free\n");
1067 	else if (up->ptr == uh)
1068 		db_printf("alloc\n");
1069 	else {
1070 		ub = up->ptr;
1071 		db_printf("bitmap [");
1072 		for (x = 0; x < up->len; x++) {
1073 			if (bit_test(ub->map, x))
1074 				db_printf("#");
1075 			else
1076 				db_printf(" ");
1077 		}
1078 		db_printf("]\n");
1079 	}
1080 }
1081 
1082 static void
1083 print_unrhdr(struct unrhdr *uh)
1084 {
1085 	struct unr *up;
1086 	u_int x;
1087 
1088 	db_printf(
1089 	    "%p low = %u high = %u first = %u last = %u busy %u chunks = %u\n",
1090 	    uh, uh->low, uh->high, uh->first, uh->last, uh->busy, uh->alloc);
1091 	x = uh->low + uh->first;
1092 	TAILQ_FOREACH(up, &uh->head, list) {
1093 		db_printf("  from = %5u", x);
1094 		print_unr(uh, up);
1095 		if (up->ptr == NULL || up->ptr == uh)
1096 			x += up->len;
1097 		else
1098 			x += NBITS;
1099 	}
1100 }
1101 
1102 #endif
1103 
1104 #if defined(_KERNEL) && defined(DDB)
1105 DB_SHOW_COMMAND(unrhdr, unrhdr_print_unrhdr)
1106 {
1107 	if (!have_addr) {
1108 		db_printf("show unrhdr addr\n");
1109 		return;
1110 	}
1111 
1112 	print_unrhdr((struct unrhdr *)addr);
1113 }
1114 
1115 static void
1116 print_unrhdr_iter(struct unrhdr_iter *iter)
1117 {
1118 	db_printf("iter %p unrhdr %p ipos %d upos %p ufi %d\n",
1119 	    iter, iter->uh, iter->ipos, iter->upos, iter->upos_first_item);
1120 }
1121 
1122 DB_SHOW_COMMAND(unrhdr_iter, unrhdr_print_iter)
1123 {
1124 	if (!have_addr) {
1125 		db_printf("show unrhdr_iter addr\n");
1126 		return;
1127 	}
1128 
1129 	print_unrhdr_iter((struct unrhdr_iter *)addr);
1130 }
1131 #endif
1132 
1133 #ifndef _KERNEL	/* USERLAND test driver */
1134 
1135 /*
1136  * Simple stochastic test driver for the above functions.  The code resides
1137  * here so that it can access static functions and structures.
1138  */
1139 
1140 static bool verbose;
1141 #define VPRINTF(...)	{if (verbose) printf(__VA_ARGS__);}
1142 
1143 static void
1144 test_alloc_unr(struct unrhdr *uh, u_int i, char a[])
1145 {
1146 	int j;
1147 
1148 	if (a[i]) {
1149 		VPRINTF("F %u\n", i);
1150 		free_unr(uh, i);
1151 		a[i] = 0;
1152 	} else {
1153 		no_alloc = 1;
1154 		j = alloc_unr(uh);
1155 		if (j != -1) {
1156 			a[j] = 1;
1157 			VPRINTF("A %d\n", j);
1158 		}
1159 		no_alloc = 0;
1160 	}
1161 }
1162 
1163 static void
1164 test_alloc_unr_specific(struct unrhdr *uh, u_int i, char a[])
1165 {
1166 	int j;
1167 
1168 	j = alloc_unr_specific(uh, i);
1169 	if (j == -1) {
1170 		VPRINTF("F %u\n", i);
1171 		a[i] = 0;
1172 		free_unr(uh, i);
1173 	} else {
1174 		a[i] = 1;
1175 		VPRINTF("A %d\n", j);
1176 	}
1177 }
1178 
1179 #define	TBASE	7
1180 #define	XSIZE	10
1181 #define	ISIZE	1000
1182 
1183 static int
1184 test_iter_compar(const void *a, const void *b)
1185 {
1186 	return (*(const int *)a - *(const int *)b);
1187 }
1188 
1189 static void
1190 test_iter_fill(int *vals, struct unrhdr *uh, int i, int v, int *res)
1191 {
1192 	int x;
1193 
1194 	vals[i] = v;
1195 	x = alloc_unr_specific(uh, v);
1196 	if (x != v) {
1197 		VPRINTF("alloc_unr_specific failed %d %d\n", x, v);
1198 		*res = 1;
1199 	}
1200 }
1201 
1202 static void
1203 test_iter(void)
1204 {
1205 	struct unrhdr *uh;
1206 	void *ihandle;
1207 	int vals[ISIZE];
1208 	int i, j, v, x, res;
1209 
1210 	res = 0;
1211 	uh = new_unrhdr(TBASE, INT_MAX, NULL);
1212 	for (i = 0; i < XSIZE; i++) {
1213 		vals[i] = i + TBASE;
1214 		x = alloc_unr_specific(uh, i + TBASE);
1215 		if (x != i + TBASE) {
1216 			VPRINTF("alloc_unr_specific failed %d %d\n", x,
1217 			    i + TBASE);
1218 			res = 1;
1219 		}
1220 	}
1221 	for (; i < ISIZE; i++) {
1222 		for (;;) {
1223 again:
1224 			v = arc4random_uniform(INT_MAX);
1225 			if (v < TBASE)
1226 				goto again;
1227 			for (j = 0; j < i; j++) {
1228 				if (v == vals[j] || v + 1 == vals[j])
1229 					goto again;
1230 			}
1231 			break;
1232 		}
1233 		test_iter_fill(vals, uh, i, v, &res);
1234 		i++, v++;
1235 		if (i < ISIZE)
1236 			test_iter_fill(vals, uh, i, v, &res);
1237 	}
1238 	qsort(vals, ISIZE, sizeof(vals[0]), test_iter_compar);
1239 
1240 	ihandle = create_iter_unr(uh);
1241 	i = 0;
1242 	while ((v = next_iter_unr(ihandle)) != -1) {
1243 		if (vals[i] != v) {
1244 			VPRINTF("iter %d: iter %d != val %d\n", i, v, vals[i]);
1245 			if (res == 0) {
1246 				if (verbose)
1247 					print_unrhdr(uh);
1248 				res = 1;
1249 			}
1250 		} else {
1251 			VPRINTF("iter %d: val %d\n", i, v);
1252 		}
1253 		i++;
1254 	}
1255 	free_iter_unr(ihandle);
1256 	clean_unrhdr(uh);
1257 	clear_unrhdr(uh);
1258 	delete_unrhdr(uh);
1259 	exit(res);
1260 }
1261 
1262 static void
1263 usage(char **argv)
1264 {
1265 	printf("%s [-h] [-i] [-r REPETITIONS] [-v]\n", argv[0]);
1266 }
1267 
1268 int
1269 main(int argc, char **argv)
1270 {
1271 	struct unrhdr *uh;
1272 	char *a;
1273 	long count = 10000;	/* Number of unrs to test */
1274 	long reps = 1, m;
1275 	int ch;
1276 	u_int i;
1277 	bool testing_iter;
1278 
1279 	verbose = false;
1280 	testing_iter = false;
1281 
1282 	while ((ch = getopt(argc, argv, "hir:v")) != -1) {
1283 		switch (ch) {
1284 		case 'i':
1285 			testing_iter = true;
1286 			break;
1287 		case 'r':
1288 			errno = 0;
1289 			reps = strtol(optarg, NULL, 0);
1290 			if (errno == ERANGE || errno == EINVAL) {
1291 				usage(argv);
1292 				exit(2);
1293 			}
1294 
1295 			break;
1296 		case 'v':
1297 			verbose = true;
1298 			break;
1299 		case 'h':
1300 		default:
1301 			usage(argv);
1302 			exit(2);
1303 		}
1304 	}
1305 
1306 	setbuf(stdout, NULL);
1307 
1308 	if (testing_iter)
1309 		test_iter();
1310 
1311 	uh = new_unrhdr(0, count - 1, NULL);
1312 	print_unrhdr(uh);
1313 
1314 	a = calloc(count, sizeof(char));
1315 	if (a == NULL)
1316 		err(1, "calloc failed");
1317 
1318 	printf("sizeof(struct unr) %zu\n", sizeof(struct unr));
1319 	printf("sizeof(struct unrb) %zu\n", sizeof(struct unrb));
1320 	printf("sizeof(struct unrhdr) %zu\n", sizeof(struct unrhdr));
1321 	printf("NBITS %lu\n", (unsigned long)NBITS);
1322 	for (m = 0; m < count * reps; m++) {
1323 		i = arc4random_uniform(count);
1324 #if 0
1325 		if (a[i] && (j & 1))
1326 			continue;
1327 #endif
1328 		if ((arc4random() & 1) != 0)
1329 			test_alloc_unr(uh, i, a);
1330 		else
1331 			test_alloc_unr_specific(uh, i, a);
1332 
1333 		if (verbose)
1334 			print_unrhdr(uh);
1335 		check_unrhdr(uh, __LINE__);
1336 	}
1337 	for (i = 0; i < (u_int)count; i++) {
1338 		if (a[i]) {
1339 			if (verbose) {
1340 				printf("C %u\n", i);
1341 				print_unrhdr(uh);
1342 			}
1343 			free_unr(uh, i);
1344 		}
1345 	}
1346 	print_unrhdr(uh);
1347 	delete_unrhdr(uh);
1348 	free(a);
1349 	return (0);
1350 }
1351 #endif
1352