xref: /dragonfly/sys/kern/subr_unit.c (revision caaec4e3)
1 /*-
2  * Copyright (c) 2004 Poul-Henning Kamp
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/kern/subr_unit.c,v 1.7 2005/03/14 06:51:29 phk Exp $
27  *
28  *
29  * Unit number allocation functions.
30  *
31  * These functions implement a mixed run-length/bitmap management of unit
32  * number spaces in a very memory efficient manner.
33  *
34  * Allocation policy is always lowest free number first.
35  *
36  * A return value of -1 signals that no more unit numbers are available.
37  *
38  * There is no cost associated with the range of unitnumbers, so unless
39  * the resource really is finite, specify INT_MAX to new_unrhdr() and
40  * forget about checking the return value.
41  *
42  * If a mutex is not provided when the unit number space is created, a
43  * default global mutex is used.  The advantage to passing a mutex in, is
44  * that the the alloc_unrl() function can be called with the mutex already
45  * held (it will not be released by alloc_unrl()).
46  *
47  * The allocation function alloc_unr{l}() never sleeps (but it may block on
48  * the mutex of course).
49  *
50  * Freeing a unit number may require allocating memory, and can therefore
51  * sleep so the free_unr() function does not come in a pre-locked variant.
52  *
53  * A userland test program is included.
54  *
55  * Memory usage is a very complex function of the the exact allocation
56  * pattern, but always very compact:
57  *    * For the very typical case where a single unbroken run of unit
58  *      numbers are allocated 44 bytes are used on x86.
59  *    * For a unit number space of 1000 units and the random pattern
60  *      in the usermode test program included, the worst case usage
61  *	was 252 bytes on x86 for 500 allocated and 500 free units.
62  *    * For a unit number space of 10000 units and the random pattern
63  *      in the usermode test program included, the worst case usage
64  *	was 798 bytes on x86 for 5000 allocated and 5000 free units.
65  *    * The worst case is where every other unit number is allocated and
66  *	the the rest are free.  In that case 44 + N/4 bytes are used where
67  *	N is the number of the highest unit allocated.
68  */
69 
70 #include <sys/types.h>
71 #include <sys/queue.h>
72 #include <sys/bitstring.h>
73 
74 #ifdef _KERNEL
75 
76 #include <sys/param.h>
77 #include <sys/malloc.h>
78 #include <sys/kernel.h>
79 #include <sys/systm.h>
80 #include <sys/limits.h>
81 #include <sys/lock.h>
82 #include <sys/mutex.h>
83 #include <sys/proc.h>
84 
85 /*
86  * In theory it would be smarter to allocate the individual blocks
87  * with the zone allocator, but at this time the expectation is that
88  * there will typically not even be enough allocations to fill a single
89  * page, so we stick with malloc for now.
90  */
91 static MALLOC_DEFINE(M_UNIT, "Unitno", "Unit number allocation");
92 
93 #define Malloc(foo) kmalloc(foo, M_UNIT, M_WAITOK | M_ZERO)
94 #define Free(foo) kfree(foo, M_UNIT)
95 
96 static struct lock unit_lock;
97 
98 LOCK_SYSINIT(unit, &unit_lock, "unit# allocation", LK_CANRECURSE);
99 
100 #else /* ...USERLAND */
101 
102 /* No unit allocation on DragonFly's userland */
103 
104 #endif /* USERLAND */
105 
106 /*
107  * This is our basic building block.
108  *
109  * It can be used in three different ways depending on the value of the ptr
110  * element:
111  *     If ptr is NULL, it represents a run of free items.
112  *     If ptr points to the unrhdr it represents a run of allocated items.
113  *     Otherwise it points to an bitstring of allocated items.
114  *
115  * For runs the len field is the length of the run.
116  * For bitmaps the len field represents the number of allocated items.
117  *
118  * The bitmap is the same size as struct unr to optimize memory management.
119  */
120 struct unr {
121 	TAILQ_ENTRY(unr)	list;
122 	u_int			len;
123 	void			*ptr;
124 };
125 
126 struct unrb {
127 	u_char			busy;
128 	bitstr_t		map[sizeof(struct unr) - 1];
129 };
130 
131 CTASSERT(sizeof(struct unr) == sizeof(struct unrb));
132 
133 /* Number of bits in the bitmap */
134 #define NBITS	((int)sizeof(((struct unrb *)NULL)->map) * 8)
135 
136 /* Header element for a unr number space. */
137 
138 struct unrhdr {
139 	TAILQ_HEAD(unrhd,unr)	head;
140 	u_int			low;	/* Lowest item */
141 	u_int			high;	/* Highest item */
142 	u_int			busy;	/* Count of allocated items */
143 	u_int			alloc;	/* Count of memory allocations */
144 	u_int			first;	/* items in allocated from start */
145 	u_int			last;	/* items free at end */
146 	struct lock		*lock;
147 };
148 
149 
150 #if defined(DIAGNOSTIC) || !defined(_KERNEL)
151 /*
152  * Consistency check function.
153  *
154  * Checks the internal consistency as well as we can.
155  *
156  * Called at all boundaries of this API.
157  */
158 static void
159 check_unrhdr(struct unrhdr *uh, int line)
160 {
161 	struct unr *up;
162 	struct unrb *ub;
163 	u_int x, y, z, w;
164 
165 	y = uh->first;
166 	z = 0;
167 	TAILQ_FOREACH(up, &uh->head, list) {
168 		z++;
169 		if (up->ptr != uh && up->ptr != NULL) {
170 			ub = up->ptr;
171 			KASSERT (up->len <= NBITS,
172 			    ("UNR inconsistency: len %u max %d (line %d)\n",
173 			    up->len, NBITS, line));
174 			z++;
175 			w = 0;
176 			for (x = 0; x < up->len; x++)
177 				if (bit_test(ub->map, x))
178 					w++;
179 			KASSERT (w == ub->busy,
180 			    ("UNR inconsistency: busy %u found %u (line %d)\n",
181 			    ub->busy, w, line));
182 			y += w;
183 		} else if (up->ptr != NULL)
184 			y += up->len;
185 	}
186 	KASSERT (y == uh->busy,
187 	    ("UNR inconsistency: items %u found %u (line %d)\n",
188 	    uh->busy, y, line));
189 	KASSERT (z == uh->alloc,
190 	    ("UNR inconsistency: chunks %u found %u (line %d)\n",
191 	    uh->alloc, z, line));
192 }
193 
194 #else
195 
196 static __inline void
197 check_unrhdr(struct unrhdr *uh, int line)
198 {
199 
200 }
201 
202 #endif
203 
204 
205 /*
206  * Userland memory management.  Just use calloc and keep track of how
207  * many elements we have allocated for check_unrhdr().
208  */
209 
210 static __inline void *
211 new_unr(struct unrhdr *uh, void **p1, void **p2)
212 {
213 	void *p;
214 
215 	uh->alloc++;
216 	KASSERT(*p1 != NULL || *p2 != NULL, ("Out of cached memory"));
217 	if (*p1 != NULL) {
218 		p = *p1;
219 		*p1 = NULL;
220 		return (p);
221 	} else {
222 		p = *p2;
223 		*p2 = NULL;
224 		return (p);
225 	}
226 }
227 
228 static __inline void
229 delete_unr(struct unrhdr *uh, void *ptr)
230 {
231 
232 	uh->alloc--;
233 	Free(ptr);
234 }
235 
236 /*
237  * Allocate a new unrheader set.
238  *
239  * Highest and lowest valid values given as paramters.
240  */
241 
242 struct unrhdr *
243 new_unrhdr(int low, int high, struct lock *lock)
244 {
245 	struct unrhdr *uh;
246 
247 	KASSERT(low <= high,
248 	    ("UNR: use error: new_unrhdr(%u, %u)", low, high));
249 	uh = Malloc(sizeof *uh);
250 	if (lock != NULL)
251 		uh->lock = lock;
252 	else
253 		uh->lock = &unit_lock;
254 	TAILQ_INIT(&uh->head);
255 	uh->low = low;
256 	uh->high = high;
257 	uh->first = 0;
258 	uh->last = 1 + (high - low);
259 	check_unrhdr(uh, __LINE__);
260 	return (uh);
261 }
262 
263 void
264 delete_unrhdr(struct unrhdr *uh)
265 {
266 
267 	check_unrhdr(uh, __LINE__);
268 	KASSERT(uh->busy == 0, ("unrhdr has %u allocations", uh->busy));
269 	KASSERT(uh->alloc == 0, ("UNR memory leak in delete_unrhdr"));
270 	Free(uh);
271 }
272 
273 static __inline int
274 is_bitmap(struct unrhdr *uh, struct unr *up)
275 {
276 	return (up->ptr != uh && up->ptr != NULL);
277 }
278 
279 /*
280  * Look for sequence of items which can be combined into a bitmap, if
281  * multiple are present, take the one which saves most memory.
282  *
283  * Return (1) if a sequence was found to indicate that another call
284  * might be able to do more.  Return (0) if we found no suitable sequence.
285  *
286  * NB: called from alloc_unr(), no new memory allocation allowed.
287  */
288 static int
289 optimize_unr(struct unrhdr *uh)
290 {
291 	struct unr *up, *uf, *us;
292 	struct unrb *ub, *ubf;
293 	u_int a, l, ba;
294 
295 	/*
296 	 * Look for the run of items (if any) which when collapsed into
297 	 * a bitmap would save most memory.
298 	 */
299 	us = NULL;
300 	ba = 0;
301 	TAILQ_FOREACH(uf, &uh->head, list) {
302 		if (uf->len >= NBITS)
303 			continue;
304 		a = 1;
305 		if (is_bitmap(uh, uf))
306 			a++;
307 		l = uf->len;
308 		up = uf;
309 		while (1) {
310 			up = TAILQ_NEXT(up, list);
311 			if (up == NULL)
312 				break;
313 			if ((up->len + l) > NBITS)
314 				break;
315 			a++;
316 			if (is_bitmap(uh, up))
317 				a++;
318 			l += up->len;
319 		}
320 		if (a > ba) {
321 			ba = a;
322 			us = uf;
323 		}
324 	}
325 	if (ba < 3)
326 		return (0);
327 
328 	/*
329 	 * If the first element is not a bitmap, make it one.
330 	 * Trying to do so without allocating more memory complicates things
331 	 * a bit
332 	 */
333 	if (!is_bitmap(uh, us)) {
334 		uf = TAILQ_NEXT(us, list);
335 		TAILQ_REMOVE(&uh->head, us, list);
336 		a = us->len;
337 		l = us->ptr == uh ? 1 : 0;
338 		ub = (void *)us;
339 		ub->busy = 0;
340 		if (l) {
341 			bit_nset(ub->map, 0, a);
342 			ub->busy += a;
343 		} else {
344 			bit_nclear(ub->map, 0, a);
345 		}
346 		if (!is_bitmap(uh, uf)) {
347 			if (uf->ptr == NULL) {
348 				bit_nclear(ub->map, a, a + uf->len - 1);
349 			} else {
350 				bit_nset(ub->map, a, a + uf->len - 1);
351 				ub->busy += uf->len;
352 			}
353 			uf->ptr = ub;
354 			uf->len += a;
355 			us = uf;
356 		} else {
357 			ubf = uf->ptr;
358 			for (l = 0; l < uf->len; l++, a++) {
359 				if (bit_test(ubf->map, l)) {
360 					bit_set(ub->map, a);
361 					ub->busy++;
362 				} else {
363 					bit_clear(ub->map, a);
364 				}
365 			}
366 			uf->len = a;
367 			delete_unr(uh, uf->ptr);
368 			uf->ptr = ub;
369 			us = uf;
370 		}
371 	}
372 	ub = us->ptr;
373 	while (1) {
374 		uf = TAILQ_NEXT(us, list);
375 		if (uf == NULL)
376 			return (1);
377 		if (uf->len + us->len > NBITS)
378 			return (1);
379 		if (uf->ptr == NULL) {
380 			bit_nclear(ub->map, us->len, us->len + uf->len - 1);
381 			us->len += uf->len;
382 			TAILQ_REMOVE(&uh->head, uf, list);
383 			delete_unr(uh, uf);
384 		} else if (uf->ptr == uh) {
385 			bit_nset(ub->map, us->len, us->len + uf->len - 1);
386 			ub->busy += uf->len;
387 			us->len += uf->len;
388 			TAILQ_REMOVE(&uh->head, uf, list);
389 			delete_unr(uh, uf);
390 		} else {
391 			ubf = uf->ptr;
392 			for (l = 0; l < uf->len; l++, us->len++) {
393 				if (bit_test(ubf->map, l)) {
394 					bit_set(ub->map, us->len);
395 					ub->busy++;
396 				} else {
397 					bit_clear(ub->map, us->len);
398 				}
399 			}
400 			TAILQ_REMOVE(&uh->head, uf, list);
401 			delete_unr(uh, ubf);
402 			delete_unr(uh, uf);
403 		}
404 	}
405 }
406 
407 /*
408  * See if a given unr should be collapsed with a neighbor.
409  *
410  * NB: called from alloc_unr(), no new memory allocation allowed.
411  */
412 static void
413 collapse_unr(struct unrhdr *uh, struct unr *up)
414 {
415 	struct unr *upp;
416 	struct unrb *ub;
417 
418 	/* If bitmap is all set or clear, change it to runlength */
419 	if (is_bitmap(uh, up)) {
420 		ub = up->ptr;
421 		if (ub->busy == up->len) {
422 			delete_unr(uh, up->ptr);
423 			up->ptr = uh;
424 		} else if (ub->busy == 0) {
425 			delete_unr(uh, up->ptr);
426 			up->ptr = NULL;
427 		}
428 	}
429 
430 	/* If nothing left in runlength, delete it */
431 	if (up->len == 0) {
432 		upp = TAILQ_PREV(up, unrhd, list);
433 		if (upp == NULL)
434 			upp = TAILQ_NEXT(up, list);
435 		TAILQ_REMOVE(&uh->head, up, list);
436 		delete_unr(uh, up);
437 		up = upp;
438 	}
439 
440 	/* If we have "hot-spot" still, merge with neighbor if possible */
441 	if (up != NULL) {
442 		upp = TAILQ_PREV(up, unrhd, list);
443 		if (upp != NULL && up->ptr == upp->ptr) {
444 			up->len += upp->len;
445 			TAILQ_REMOVE(&uh->head, upp, list);
446 			delete_unr(uh, upp);
447 			}
448 		upp = TAILQ_NEXT(up, list);
449 		if (upp != NULL && up->ptr == upp->ptr) {
450 			up->len += upp->len;
451 			TAILQ_REMOVE(&uh->head, upp, list);
452 			delete_unr(uh, upp);
453 		}
454 	}
455 
456 	/* Merge into ->first if possible */
457 	upp = TAILQ_FIRST(&uh->head);
458 	if (upp != NULL && upp->ptr == uh) {
459 		uh->first += upp->len;
460 		TAILQ_REMOVE(&uh->head, upp, list);
461 		delete_unr(uh, upp);
462 		if (up == upp)
463 			up = NULL;
464 	}
465 
466 	/* Merge into ->last if possible */
467 	upp = TAILQ_LAST(&uh->head, unrhd);
468 	if (upp != NULL && upp->ptr == NULL) {
469 		uh->last += upp->len;
470 		TAILQ_REMOVE(&uh->head, upp, list);
471 		delete_unr(uh, upp);
472 		if (up == upp)
473 			up = NULL;
474 	}
475 
476 	/* Try to make bitmaps */
477 	while (optimize_unr(uh))
478 		continue;
479 }
480 
481 /*
482  * Allocate a free unr.
483  */
484 int
485 alloc_unrl(struct unrhdr *uh)
486 {
487 	struct unr *up;
488 	struct unrb *ub;
489 	u_int x;
490 	int y;
491 	struct lock *ml __debugvar = uh->lock;
492 	struct thread *td __debugvar = curthread;
493 
494 	KKASSERT(lockstatus(ml, td) != 0);
495 	check_unrhdr(uh, __LINE__);
496 	x = uh->low + uh->first;
497 
498 	up = TAILQ_FIRST(&uh->head);
499 
500 	/*
501 	 * If we have an ideal split, just adjust the first+last
502 	 */
503 	if (up == NULL && uh->last > 0) {
504 		uh->first++;
505 		uh->last--;
506 		uh->busy++;
507 		return (x);
508 	}
509 
510 	/*
511 	 * We can always allocate from the first list element, so if we have
512 	 * nothing on the list, we must have run out of unit numbers.
513 	 */
514 	if (up == NULL)
515 		return (-1);
516 
517 	KASSERT(up->ptr != uh, ("UNR first element is allocated"));
518 
519 	if (up->ptr == NULL) {	/* free run */
520 		uh->first++;
521 		up->len--;
522 	} else {		/* bitmap */
523 		ub = up->ptr;
524 		KASSERT(ub->busy < up->len, ("UNR bitmap confusion"));
525 		bit_ffc(ub->map, up->len, &y);
526 		KASSERT(y != -1, ("UNR corruption: No clear bit in bitmap."));
527 		bit_set(ub->map, y);
528 		ub->busy++;
529 		x += y;
530 	}
531 	uh->busy++;
532 	collapse_unr(uh, up);
533 	return (x);
534 }
535 
536 int
537 alloc_unr(struct unrhdr *uh)
538 {
539 	int i;
540 
541 	lockmgr(uh->lock, LK_EXCLUSIVE);
542 	i = alloc_unrl(uh);
543 	lockmgr(uh->lock, LK_RELEASE);
544 	return (i);
545 }
546 
547 /*
548  * Free a unr.
549  *
550  * If we can save unrs by using a bitmap, do so.
551  */
552 static void
553 free_unrl(struct unrhdr *uh, u_int item, void **p1, void **p2)
554 {
555 	struct unr *up, *upp, *upn;
556 	struct unrb *ub;
557 	u_int pl;
558 
559 	KASSERT(item >= uh->low && item <= uh->high,
560 	    ("UNR: free_unr(%u) out of range [%u...%u]",
561 	     item, uh->low, uh->high));
562 	check_unrhdr(uh, __LINE__);
563 	item -= uh->low;
564 	upp = TAILQ_FIRST(&uh->head);
565 	/*
566 	 * Freeing in the ideal split case
567 	 */
568 	if (item + 1 == uh->first && upp == NULL) {
569 		uh->last++;
570 		uh->first--;
571 		uh->busy--;
572 		check_unrhdr(uh, __LINE__);
573 		return;
574 	}
575 	/*
576  	 * Freeing in the ->first section.  Create a run starting at the
577 	 * freed item.  The code below will subdivide it.
578 	 */
579 	if (item < uh->first) {
580 		up = new_unr(uh, p1, p2);
581 		up->ptr = uh;
582 		up->len = uh->first - item;
583 		TAILQ_INSERT_HEAD(&uh->head, up, list);
584 		uh->first -= up->len;
585 	}
586 
587 	item -= uh->first;
588 
589 	/* Find the item which contains the unit we want to free */
590 	TAILQ_FOREACH(up, &uh->head, list) {
591 		if (up->len > item)
592 			break;
593 		item -= up->len;
594 	}
595 
596 	/* Handle bitmap items */
597 	if (is_bitmap(uh, up)) {
598 		ub = up->ptr;
599 
600 		KASSERT(bit_test(ub->map, item) != 0,
601 		    ("UNR: Freeing free item %d (bitmap)\n", item));
602 		bit_clear(ub->map, item);
603 		uh->busy--;
604 		ub->busy--;
605 		collapse_unr(uh, up);
606 		return;
607 	}
608 
609 	KASSERT(up->ptr == uh, ("UNR Freeing free item %d (run))\n", item));
610 
611 	/* Just this one left, reap it */
612 	if (up->len == 1) {
613 		up->ptr = NULL;
614 		uh->busy--;
615 		collapse_unr(uh, up);
616 		return;
617 	}
618 
619 	/* Check if we can shift the item into the previous 'free' run */
620 	upp = TAILQ_PREV(up, unrhd, list);
621 	if (item == 0 && upp != NULL && upp->ptr == NULL) {
622 		upp->len++;
623 		up->len--;
624 		uh->busy--;
625 		collapse_unr(uh, up);
626 		return;
627 	}
628 
629 	/* Check if we can shift the item to the next 'free' run */
630 	upn = TAILQ_NEXT(up, list);
631 	if (item == up->len - 1 && upn != NULL && upn->ptr == NULL) {
632 		upn->len++;
633 		up->len--;
634 		uh->busy--;
635 		collapse_unr(uh, up);
636 		return;
637 	}
638 
639 	/* Split off the tail end, if any. */
640 	pl = up->len - (1 + item);
641 	if (pl > 0) {
642 		upp = new_unr(uh, p1, p2);
643 		upp->ptr = uh;
644 		upp->len = pl;
645 		TAILQ_INSERT_AFTER(&uh->head, up, upp, list);
646 	}
647 
648 	/* Split off head end, if any */
649 	if (item > 0) {
650 		upp = new_unr(uh, p1, p2);
651 		upp->len = item;
652 		upp->ptr = uh;
653 		TAILQ_INSERT_BEFORE(up, upp, list);
654 	}
655 	up->len = 1;
656 	up->ptr = NULL;
657 	uh->busy--;
658 	collapse_unr(uh, up);
659 }
660 
661 void
662 free_unr(struct unrhdr *uh, u_int item)
663 {
664 	void *p1, *p2;
665 
666 	p1 = Malloc(sizeof(struct unr));
667 	p2 = Malloc(sizeof(struct unr));
668 	lockmgr(uh->lock, LK_EXCLUSIVE);
669 	free_unrl(uh, item, &p1, &p2);
670 	lockmgr(uh->lock, LK_RELEASE);
671 	if (p1 != NULL)
672 		Free(p1);
673 	if (p2 != NULL)
674 		Free(p2);
675 }
676 
677 #ifndef _KERNEL	/* USERLAND test driver */
678 
679 /*
680  * Simple stochastic test driver for the above functions
681  */
682 
683 static void
684 print_unr(struct unrhdr *uh, struct unr *up)
685 {
686 	u_int x;
687 	struct unrb *ub;
688 
689 	printf("  %p len = %5u ", up, up->len);
690 	if (up->ptr == NULL)
691 		printf("free\n");
692 	else if (up->ptr == uh)
693 		printf("alloc\n");
694 	else {
695 		ub = up->ptr;
696 		printf("bitmap(%d) [", ub->busy);
697 		for (x = 0; x < up->len; x++) {
698 			if (bit_test(ub->map, x))
699 				printf("#");
700 			else
701 				printf(" ");
702 		}
703 		printf("]\n");
704 	}
705 }
706 
707 static void
708 print_unrhdr(struct unrhdr *uh)
709 {
710 	struct unr *up;
711 	u_int x;
712 
713 	printf(
714 	    "%p low = %u high = %u first = %u last = %u busy %u chunks = %u\n",
715 	    uh, uh->low, uh->high, uh->first, uh->last, uh->busy, uh->alloc);
716 	x = uh->low + uh->first;
717 	TAILQ_FOREACH(up, &uh->head, list) {
718 		printf("  from = %5u", x);
719 		print_unr(uh, up);
720 		if (up->ptr == NULL || up->ptr == uh)
721 			x += up->len;
722 		else
723 			x += NBITS;
724 	}
725 }
726 
727 /* Number of unrs to test */
728 #define NN	10000
729 
730 int
731 main(int argc __unused, const char **argv __unused)
732 {
733 	struct unrhdr *uh;
734 	u_int i, x, m, j;
735 	char a[NN];
736 
737 	setbuf(stdout, NULL);
738 	uh = new_unrhdr(0, NN - 1, NULL);
739 	print_unrhdr(uh);
740 
741 	memset(a, 0, sizeof a);
742 
743 	fprintf(stderr, "sizeof(struct unr) %d\n", sizeof (struct unr));
744 	fprintf(stderr, "sizeof(struct unrb) %d\n", sizeof (struct unrb));
745 	fprintf(stderr, "sizeof(struct unrhdr) %d\n", sizeof (struct unrhdr));
746 	fprintf(stderr, "NBITS %d\n", NBITS);
747 	x = 1;
748 	for (m = 0; m < NN * 100; m++) {
749 		j = random();
750 		i = (j >> 1) % NN;
751 #if 0
752 		if (a[i] && (j & 1))
753 			continue;
754 #endif
755 		if (a[i]) {
756 			printf("F %u\n", i);
757 			free_unr(uh, i);
758 			a[i] = 0;
759 		} else {
760 			no_alloc = 1;
761 			i = alloc_unr(uh);
762 			if (i != -1) {
763 				a[i] = 1;
764 				printf("A %u\n", i);
765 			}
766 			no_alloc = 0;
767 		}
768 		if (1)	/* XXX: change this for detailed debug printout */
769 			print_unrhdr(uh);
770 		check_unrhdr(uh, __LINE__);
771 	}
772 	for (i = 0; i < NN; i++) {
773 		if (a[i]) {
774 			printf("C %u\n", i);
775 			free_unr(uh, i);
776 			print_unrhdr(uh);
777 		}
778 	}
779 	print_unrhdr(uh);
780 	delete_unrhdr(uh);
781 	return (0);
782 }
783 #endif
784