xref: /dragonfly/sys/kern/subr_unit.c (revision e6d22e9b)
1 /*-
2  * Copyright (c) 2004 Poul-Henning Kamp
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/kern/subr_unit.c,v 1.7 2005/03/14 06:51:29 phk Exp $
27  *
28  *
29  * Unit number allocation functions.
30  *
31  * These functions implement a mixed run-length/bitmap management of unit
32  * number spaces in a very memory efficient manner.
33  *
34  * Allocation policy is always lowest free number first.
35  *
36  * A return value of -1 signals that no more unit numbers are available.
37  *
38  * There is no cost associated with the range of unitnumbers, so unless
39  * the resource really is finite, specify INT_MAX to new_unrhdr() and
40  * forget about checking the return value.
41  *
42  * If a mutex is not provided when the unit number space is created, a
43  * default global mutex is used.  The advantage to passing a mutex in, is
44  * that the the alloc_unrl() function can be called with the mutex already
45  * held (it will not be released by alloc_unrl()).
46  *
47  * The allocation function alloc_unr{l}() never sleeps (but it may block on
48  * the mutex of course).
49  *
50  * Freeing a unit number may require allocating memory, and can therefore
51  * sleep so the free_unr() function does not come in a pre-locked variant.
52  *
53  * A userland test program is included.
54  *
55  * Memory usage is a very complex function of the the exact allocation
56  * pattern, but always very compact:
57  *    * For the very typical case where a single unbroken run of unit
58  *      numbers are allocated 44 bytes are used on x86.
59  *    * For a unit number space of 1000 units and the random pattern
60  *      in the usermode test program included, the worst case usage
61  *	was 252 bytes on x86 for 500 allocated and 500 free units.
62  *    * For a unit number space of 10000 units and the random pattern
63  *      in the usermode test program included, the worst case usage
64  *	was 798 bytes on x86 for 5000 allocated and 5000 free units.
65  *    * The worst case is where every other unit number is allocated and
66  *	the the rest are free.  In that case 44 + N/4 bytes are used where
67  *	N is the number of the highest unit allocated.
68  */
69 
70 #include <sys/types.h>
71 #include <sys/queue.h>
72 #include <sys/bitstring.h>
73 
74 #ifdef _KERNEL
75 
76 #include <sys/param.h>
77 #include <sys/malloc.h>
78 #include <sys/kernel.h>
79 #include <sys/systm.h>
80 #include <sys/limits.h>
81 #include <sys/lock.h>
82 #include <sys/proc.h>
83 
84 /*
85  * In theory it would be smarter to allocate the individual blocks
86  * with the zone allocator, but at this time the expectation is that
87  * there will typically not even be enough allocations to fill a single
88  * page, so we stick with malloc for now.
89  */
90 static MALLOC_DEFINE(M_UNIT, "Unitno", "Unit number allocation");
91 
92 #define Malloc(foo) kmalloc(foo, M_UNIT, M_WAITOK | M_ZERO)
93 #define Free(foo) kfree(foo, M_UNIT)
94 
95 static struct lock unit_lock;
96 
97 LOCK_SYSINIT(unit, &unit_lock, "unit# allocation", LK_CANRECURSE);
98 
99 #else /* ...USERLAND */
100 
101 /* No unit allocation on DragonFly's userland */
102 
103 #endif /* USERLAND */
104 
105 /*
106  * This is our basic building block.
107  *
108  * It can be used in three different ways depending on the value of the ptr
109  * element:
110  *     If ptr is NULL, it represents a run of free items.
111  *     If ptr points to the unrhdr it represents a run of allocated items.
112  *     Otherwise it points to an bitstring of allocated items.
113  *
114  * For runs the len field is the length of the run.
115  * For bitmaps the len field represents the number of allocated items.
116  *
117  * The bitmap is the same size as struct unr to optimize memory management.
118  */
119 struct unr {
120 	TAILQ_ENTRY(unr)	list;
121 	u_int			len;
122 	void			*ptr;
123 };
124 
125 struct unrb {
126 	u_char			busy;
127 	bitstr_t		map[sizeof(struct unr) - 1];
128 };
129 
130 CTASSERT(sizeof(struct unr) == sizeof(struct unrb));
131 
132 /* Number of bits in the bitmap */
133 #define NBITS	((int)sizeof(((struct unrb *)NULL)->map) * 8)
134 
135 /* Header element for a unr number space. */
136 
137 struct unrhdr {
138 	TAILQ_HEAD(unrhd,unr)	head;
139 	u_int			low;	/* Lowest item */
140 	u_int			high;	/* Highest item */
141 	u_int			busy;	/* Count of allocated items */
142 	u_int			alloc;	/* Count of memory allocations */
143 	u_int			first;	/* items in allocated from start */
144 	u_int			last;	/* items free at end */
145 	struct lock		*lock;
146 };
147 
148 
149 #if defined(DIAGNOSTIC) || !defined(_KERNEL)
150 /*
151  * Consistency check function.
152  *
153  * Checks the internal consistency as well as we can.
154  *
155  * Called at all boundaries of this API.
156  */
157 static void
158 check_unrhdr(struct unrhdr *uh, int line)
159 {
160 	struct unr *up;
161 	struct unrb *ub;
162 	u_int x, y, z, w;
163 
164 	y = uh->first;
165 	z = 0;
166 	TAILQ_FOREACH(up, &uh->head, list) {
167 		z++;
168 		if (up->ptr != uh && up->ptr != NULL) {
169 			ub = up->ptr;
170 			KASSERT (up->len <= NBITS,
171 			    ("UNR inconsistency: len %u max %d (line %d)\n",
172 			    up->len, NBITS, line));
173 			z++;
174 			w = 0;
175 			for (x = 0; x < up->len; x++)
176 				if (bit_test(ub->map, x))
177 					w++;
178 			KASSERT (w == ub->busy,
179 			    ("UNR inconsistency: busy %u found %u (line %d)\n",
180 			    ub->busy, w, line));
181 			y += w;
182 		} else if (up->ptr != NULL)
183 			y += up->len;
184 	}
185 	KASSERT (y == uh->busy,
186 	    ("UNR inconsistency: items %u found %u (line %d)\n",
187 	    uh->busy, y, line));
188 	KASSERT (z == uh->alloc,
189 	    ("UNR inconsistency: chunks %u found %u (line %d)\n",
190 	    uh->alloc, z, line));
191 }
192 
193 #else
194 
195 static __inline void
196 check_unrhdr(struct unrhdr *uh, int line)
197 {
198 
199 }
200 
201 #endif
202 
203 
204 /*
205  * Userland memory management.  Just use calloc and keep track of how
206  * many elements we have allocated for check_unrhdr().
207  */
208 
209 static __inline void *
210 new_unr(struct unrhdr *uh, void **p1, void **p2)
211 {
212 	void *p;
213 
214 	uh->alloc++;
215 	KASSERT(*p1 != NULL || *p2 != NULL, ("Out of cached memory"));
216 	if (*p1 != NULL) {
217 		p = *p1;
218 		*p1 = NULL;
219 		return (p);
220 	} else {
221 		p = *p2;
222 		*p2 = NULL;
223 		return (p);
224 	}
225 }
226 
227 static __inline void
228 delete_unr(struct unrhdr *uh, void *ptr)
229 {
230 
231 	uh->alloc--;
232 	Free(ptr);
233 }
234 
235 /*
236  * Allocate a new unrheader set.
237  *
238  * Highest and lowest valid values given as paramters.
239  */
240 
241 struct unrhdr *
242 new_unrhdr(int low, int high, struct lock *lock)
243 {
244 	struct unrhdr *uh;
245 
246 	KASSERT(low <= high,
247 	    ("UNR: use error: new_unrhdr(%u, %u)", low, high));
248 	uh = Malloc(sizeof *uh);
249 	if (lock != NULL)
250 		uh->lock = lock;
251 	else
252 		uh->lock = &unit_lock;
253 	TAILQ_INIT(&uh->head);
254 	uh->low = low;
255 	uh->high = high;
256 	uh->first = 0;
257 	uh->last = 1 + (high - low);
258 	check_unrhdr(uh, __LINE__);
259 	return (uh);
260 }
261 
262 void
263 delete_unrhdr(struct unrhdr *uh)
264 {
265 
266 	check_unrhdr(uh, __LINE__);
267 	KASSERT(uh->busy == 0, ("unrhdr has %u allocations", uh->busy));
268 	KASSERT(uh->alloc == 0, ("UNR memory leak in delete_unrhdr"));
269 	Free(uh);
270 }
271 
272 static __inline int
273 is_bitmap(struct unrhdr *uh, struct unr *up)
274 {
275 	return (up->ptr != uh && up->ptr != NULL);
276 }
277 
278 /*
279  * Look for sequence of items which can be combined into a bitmap, if
280  * multiple are present, take the one which saves most memory.
281  *
282  * Return (1) if a sequence was found to indicate that another call
283  * might be able to do more.  Return (0) if we found no suitable sequence.
284  *
285  * NB: called from alloc_unr(), no new memory allocation allowed.
286  */
287 static int
288 optimize_unr(struct unrhdr *uh)
289 {
290 	struct unr *up, *uf, *us;
291 	struct unrb *ub, *ubf;
292 	u_int a, l, ba;
293 
294 	/*
295 	 * Look for the run of items (if any) which when collapsed into
296 	 * a bitmap would save most memory.
297 	 */
298 	us = NULL;
299 	ba = 0;
300 	TAILQ_FOREACH(uf, &uh->head, list) {
301 		if (uf->len >= NBITS)
302 			continue;
303 		a = 1;
304 		if (is_bitmap(uh, uf))
305 			a++;
306 		l = uf->len;
307 		up = uf;
308 		while (1) {
309 			up = TAILQ_NEXT(up, list);
310 			if (up == NULL)
311 				break;
312 			if ((up->len + l) > NBITS)
313 				break;
314 			a++;
315 			if (is_bitmap(uh, up))
316 				a++;
317 			l += up->len;
318 		}
319 		if (a > ba) {
320 			ba = a;
321 			us = uf;
322 		}
323 	}
324 	if (ba < 3)
325 		return (0);
326 
327 	/*
328 	 * If the first element is not a bitmap, make it one.
329 	 * Trying to do so without allocating more memory complicates things
330 	 * a bit
331 	 */
332 	if (!is_bitmap(uh, us)) {
333 		uf = TAILQ_NEXT(us, list);
334 		TAILQ_REMOVE(&uh->head, us, list);
335 		a = us->len;
336 		l = us->ptr == uh ? 1 : 0;
337 		ub = (void *)us;
338 		ub->busy = 0;
339 		if (l) {
340 			bit_nset(ub->map, 0, a);
341 			ub->busy += a;
342 		} else {
343 			bit_nclear(ub->map, 0, a);
344 		}
345 		if (!is_bitmap(uh, uf)) {
346 			if (uf->ptr == NULL) {
347 				bit_nclear(ub->map, a, a + uf->len - 1);
348 			} else {
349 				bit_nset(ub->map, a, a + uf->len - 1);
350 				ub->busy += uf->len;
351 			}
352 			uf->ptr = ub;
353 			uf->len += a;
354 			us = uf;
355 		} else {
356 			ubf = uf->ptr;
357 			for (l = 0; l < uf->len; l++, a++) {
358 				if (bit_test(ubf->map, l)) {
359 					bit_set(ub->map, a);
360 					ub->busy++;
361 				} else {
362 					bit_clear(ub->map, a);
363 				}
364 			}
365 			uf->len = a;
366 			delete_unr(uh, uf->ptr);
367 			uf->ptr = ub;
368 			us = uf;
369 		}
370 	}
371 	ub = us->ptr;
372 	while (1) {
373 		uf = TAILQ_NEXT(us, list);
374 		if (uf == NULL)
375 			return (1);
376 		if (uf->len + us->len > NBITS)
377 			return (1);
378 		if (uf->ptr == NULL) {
379 			bit_nclear(ub->map, us->len, us->len + uf->len - 1);
380 			us->len += uf->len;
381 			TAILQ_REMOVE(&uh->head, uf, list);
382 			delete_unr(uh, uf);
383 		} else if (uf->ptr == uh) {
384 			bit_nset(ub->map, us->len, us->len + uf->len - 1);
385 			ub->busy += uf->len;
386 			us->len += uf->len;
387 			TAILQ_REMOVE(&uh->head, uf, list);
388 			delete_unr(uh, uf);
389 		} else {
390 			ubf = uf->ptr;
391 			for (l = 0; l < uf->len; l++, us->len++) {
392 				if (bit_test(ubf->map, l)) {
393 					bit_set(ub->map, us->len);
394 					ub->busy++;
395 				} else {
396 					bit_clear(ub->map, us->len);
397 				}
398 			}
399 			TAILQ_REMOVE(&uh->head, uf, list);
400 			delete_unr(uh, ubf);
401 			delete_unr(uh, uf);
402 		}
403 	}
404 }
405 
406 /*
407  * See if a given unr should be collapsed with a neighbor.
408  *
409  * NB: called from alloc_unr(), no new memory allocation allowed.
410  */
411 static void
412 collapse_unr(struct unrhdr *uh, struct unr *up)
413 {
414 	struct unr *upp;
415 	struct unrb *ub;
416 
417 	/* If bitmap is all set or clear, change it to runlength */
418 	if (is_bitmap(uh, up)) {
419 		ub = up->ptr;
420 		if (ub->busy == up->len) {
421 			delete_unr(uh, up->ptr);
422 			up->ptr = uh;
423 		} else if (ub->busy == 0) {
424 			delete_unr(uh, up->ptr);
425 			up->ptr = NULL;
426 		}
427 	}
428 
429 	/* If nothing left in runlength, delete it */
430 	if (up->len == 0) {
431 		upp = TAILQ_PREV(up, unrhd, list);
432 		if (upp == NULL)
433 			upp = TAILQ_NEXT(up, list);
434 		TAILQ_REMOVE(&uh->head, up, list);
435 		delete_unr(uh, up);
436 		up = upp;
437 	}
438 
439 	/* If we have "hot-spot" still, merge with neighbor if possible */
440 	if (up != NULL) {
441 		upp = TAILQ_PREV(up, unrhd, list);
442 		if (upp != NULL && up->ptr == upp->ptr) {
443 			up->len += upp->len;
444 			TAILQ_REMOVE(&uh->head, upp, list);
445 			delete_unr(uh, upp);
446 			}
447 		upp = TAILQ_NEXT(up, list);
448 		if (upp != NULL && up->ptr == upp->ptr) {
449 			up->len += upp->len;
450 			TAILQ_REMOVE(&uh->head, upp, list);
451 			delete_unr(uh, upp);
452 		}
453 	}
454 
455 	/* Merge into ->first if possible */
456 	upp = TAILQ_FIRST(&uh->head);
457 	if (upp != NULL && upp->ptr == uh) {
458 		uh->first += upp->len;
459 		TAILQ_REMOVE(&uh->head, upp, list);
460 		delete_unr(uh, upp);
461 		if (up == upp)
462 			up = NULL;
463 	}
464 
465 	/* Merge into ->last if possible */
466 	upp = TAILQ_LAST(&uh->head, unrhd);
467 	if (upp != NULL && upp->ptr == NULL) {
468 		uh->last += upp->len;
469 		TAILQ_REMOVE(&uh->head, upp, list);
470 		delete_unr(uh, upp);
471 		if (up == upp)
472 			up = NULL;
473 	}
474 
475 	/* Try to make bitmaps */
476 	while (optimize_unr(uh))
477 		continue;
478 }
479 
480 /*
481  * Allocate a free unr.
482  */
483 int
484 alloc_unrl(struct unrhdr *uh)
485 {
486 	struct unr *up;
487 	struct unrb *ub;
488 	u_int x;
489 	int y;
490 	struct lock *ml __debugvar = uh->lock;
491 	struct thread *td __debugvar = curthread;
492 
493 	KKASSERT(lockstatus(ml, td) != 0);
494 	check_unrhdr(uh, __LINE__);
495 	x = uh->low + uh->first;
496 
497 	up = TAILQ_FIRST(&uh->head);
498 
499 	/*
500 	 * If we have an ideal split, just adjust the first+last
501 	 */
502 	if (up == NULL && uh->last > 0) {
503 		uh->first++;
504 		uh->last--;
505 		uh->busy++;
506 		return (x);
507 	}
508 
509 	/*
510 	 * We can always allocate from the first list element, so if we have
511 	 * nothing on the list, we must have run out of unit numbers.
512 	 */
513 	if (up == NULL)
514 		return (-1);
515 
516 	KASSERT(up->ptr != uh, ("UNR first element is allocated"));
517 
518 	if (up->ptr == NULL) {	/* free run */
519 		uh->first++;
520 		up->len--;
521 	} else {		/* bitmap */
522 		ub = up->ptr;
523 		KASSERT(ub->busy < up->len, ("UNR bitmap confusion"));
524 		bit_ffc(ub->map, up->len, &y);
525 		KASSERT(y != -1, ("UNR corruption: No clear bit in bitmap."));
526 		bit_set(ub->map, y);
527 		ub->busy++;
528 		x += y;
529 	}
530 	uh->busy++;
531 	collapse_unr(uh, up);
532 	return (x);
533 }
534 
535 int
536 alloc_unr(struct unrhdr *uh)
537 {
538 	int i;
539 
540 	lockmgr(uh->lock, LK_EXCLUSIVE);
541 	i = alloc_unrl(uh);
542 	lockmgr(uh->lock, LK_RELEASE);
543 	return (i);
544 }
545 
546 /*
547  * Free a unr.
548  *
549  * If we can save unrs by using a bitmap, do so.
550  */
551 static void
552 free_unrl(struct unrhdr *uh, u_int item, void **p1, void **p2)
553 {
554 	struct unr *up, *upp, *upn;
555 	struct unrb *ub;
556 	u_int pl;
557 
558 	KASSERT(item >= uh->low && item <= uh->high,
559 	    ("UNR: free_unr(%u) out of range [%u...%u]",
560 	     item, uh->low, uh->high));
561 	check_unrhdr(uh, __LINE__);
562 	item -= uh->low;
563 	upp = TAILQ_FIRST(&uh->head);
564 	/*
565 	 * Freeing in the ideal split case
566 	 */
567 	if (item + 1 == uh->first && upp == NULL) {
568 		uh->last++;
569 		uh->first--;
570 		uh->busy--;
571 		check_unrhdr(uh, __LINE__);
572 		return;
573 	}
574 	/*
575  	 * Freeing in the ->first section.  Create a run starting at the
576 	 * freed item.  The code below will subdivide it.
577 	 */
578 	if (item < uh->first) {
579 		up = new_unr(uh, p1, p2);
580 		up->ptr = uh;
581 		up->len = uh->first - item;
582 		TAILQ_INSERT_HEAD(&uh->head, up, list);
583 		uh->first -= up->len;
584 	}
585 
586 	item -= uh->first;
587 
588 	/* Find the item which contains the unit we want to free */
589 	TAILQ_FOREACH(up, &uh->head, list) {
590 		if (up->len > item)
591 			break;
592 		item -= up->len;
593 	}
594 
595 	/* Handle bitmap items */
596 	if (is_bitmap(uh, up)) {
597 		ub = up->ptr;
598 
599 		KASSERT(bit_test(ub->map, item) != 0,
600 		    ("UNR: Freeing free item %d (bitmap)\n", item));
601 		bit_clear(ub->map, item);
602 		uh->busy--;
603 		ub->busy--;
604 		collapse_unr(uh, up);
605 		return;
606 	}
607 
608 	KASSERT(up->ptr == uh, ("UNR Freeing free item %d (run))\n", item));
609 
610 	/* Just this one left, reap it */
611 	if (up->len == 1) {
612 		up->ptr = NULL;
613 		uh->busy--;
614 		collapse_unr(uh, up);
615 		return;
616 	}
617 
618 	/* Check if we can shift the item into the previous 'free' run */
619 	upp = TAILQ_PREV(up, unrhd, list);
620 	if (item == 0 && upp != NULL && upp->ptr == NULL) {
621 		upp->len++;
622 		up->len--;
623 		uh->busy--;
624 		collapse_unr(uh, up);
625 		return;
626 	}
627 
628 	/* Check if we can shift the item to the next 'free' run */
629 	upn = TAILQ_NEXT(up, list);
630 	if (item == up->len - 1 && upn != NULL && upn->ptr == NULL) {
631 		upn->len++;
632 		up->len--;
633 		uh->busy--;
634 		collapse_unr(uh, up);
635 		return;
636 	}
637 
638 	/* Split off the tail end, if any. */
639 	pl = up->len - (1 + item);
640 	if (pl > 0) {
641 		upp = new_unr(uh, p1, p2);
642 		upp->ptr = uh;
643 		upp->len = pl;
644 		TAILQ_INSERT_AFTER(&uh->head, up, upp, list);
645 	}
646 
647 	/* Split off head end, if any */
648 	if (item > 0) {
649 		upp = new_unr(uh, p1, p2);
650 		upp->len = item;
651 		upp->ptr = uh;
652 		TAILQ_INSERT_BEFORE(up, upp, list);
653 	}
654 	up->len = 1;
655 	up->ptr = NULL;
656 	uh->busy--;
657 	collapse_unr(uh, up);
658 }
659 
660 void
661 free_unr(struct unrhdr *uh, u_int item)
662 {
663 	void *p1, *p2;
664 
665 	p1 = Malloc(sizeof(struct unr));
666 	p2 = Malloc(sizeof(struct unr));
667 	lockmgr(uh->lock, LK_EXCLUSIVE);
668 	free_unrl(uh, item, &p1, &p2);
669 	lockmgr(uh->lock, LK_RELEASE);
670 	if (p1 != NULL)
671 		Free(p1);
672 	if (p2 != NULL)
673 		Free(p2);
674 }
675 
676 #ifndef _KERNEL	/* USERLAND test driver */
677 
678 /*
679  * Simple stochastic test driver for the above functions
680  */
681 
682 static void
683 print_unr(struct unrhdr *uh, struct unr *up)
684 {
685 	u_int x;
686 	struct unrb *ub;
687 
688 	printf("  %p len = %5u ", up, up->len);
689 	if (up->ptr == NULL)
690 		printf("free\n");
691 	else if (up->ptr == uh)
692 		printf("alloc\n");
693 	else {
694 		ub = up->ptr;
695 		printf("bitmap(%d) [", ub->busy);
696 		for (x = 0; x < up->len; x++) {
697 			if (bit_test(ub->map, x))
698 				printf("#");
699 			else
700 				printf(" ");
701 		}
702 		printf("]\n");
703 	}
704 }
705 
706 static void
707 print_unrhdr(struct unrhdr *uh)
708 {
709 	struct unr *up;
710 	u_int x;
711 
712 	printf(
713 	    "%p low = %u high = %u first = %u last = %u busy %u chunks = %u\n",
714 	    uh, uh->low, uh->high, uh->first, uh->last, uh->busy, uh->alloc);
715 	x = uh->low + uh->first;
716 	TAILQ_FOREACH(up, &uh->head, list) {
717 		printf("  from = %5u", x);
718 		print_unr(uh, up);
719 		if (up->ptr == NULL || up->ptr == uh)
720 			x += up->len;
721 		else
722 			x += NBITS;
723 	}
724 }
725 
726 /* Number of unrs to test */
727 #define NN	10000
728 
729 int
730 main(int argc __unused, const char **argv __unused)
731 {
732 	struct unrhdr *uh;
733 	u_int i, x, m, j;
734 	char a[NN];
735 
736 	setbuf(stdout, NULL);
737 	uh = new_unrhdr(0, NN - 1, NULL);
738 	print_unrhdr(uh);
739 
740 	memset(a, 0, sizeof a);
741 
742 	fprintf(stderr, "sizeof(struct unr) %d\n", sizeof (struct unr));
743 	fprintf(stderr, "sizeof(struct unrb) %d\n", sizeof (struct unrb));
744 	fprintf(stderr, "sizeof(struct unrhdr) %d\n", sizeof (struct unrhdr));
745 	fprintf(stderr, "NBITS %d\n", NBITS);
746 	x = 1;
747 	for (m = 0; m < NN * 100; m++) {
748 		j = random();
749 		i = (j >> 1) % NN;
750 #if 0
751 		if (a[i] && (j & 1))
752 			continue;
753 #endif
754 		if (a[i]) {
755 			printf("F %u\n", i);
756 			free_unr(uh, i);
757 			a[i] = 0;
758 		} else {
759 			no_alloc = 1;
760 			i = alloc_unr(uh);
761 			if (i != -1) {
762 				a[i] = 1;
763 				printf("A %u\n", i);
764 			}
765 			no_alloc = 0;
766 		}
767 		if (1)	/* XXX: change this for detailed debug printout */
768 			print_unrhdr(uh);
769 		check_unrhdr(uh, __LINE__);
770 	}
771 	for (i = 0; i < NN; i++) {
772 		if (a[i]) {
773 			printf("C %u\n", i);
774 			free_unr(uh, i);
775 			print_unrhdr(uh);
776 		}
777 	}
778 	print_unrhdr(uh);
779 	delete_unrhdr(uh);
780 	return (0);
781 }
782 #endif
783