xref: /dragonfly/sys/kern/subr_blist.c (revision c6ecc293)
1 /*
2  * BLIST.C -	Bitmap allocator/deallocator, using a radix tree with hinting
3  *
4  * Copyright (c) 1998,2004 The DragonFly Project.  All rights reserved.
5  *
6  * This code is derived from software contributed to The DragonFly Project
7  * by Matthew Dillon <dillon@backplane.com>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *
37  *	This module implements a general bitmap allocator/deallocator.  The
38  *	allocator eats around 2 bits per 'block'.  The module does not
39  *	try to interpret the meaning of a 'block' other then to return
40  *	SWAPBLK_NONE on an allocation failure.
41  *
42  *	A radix tree is used to maintain the bitmap.  Two radix constants are
43  *	involved:  One for the bitmaps contained in the leaf nodes (typically
44  *	32), and one for the meta nodes (typically 16).  Both meta and leaf
45  *	nodes have a hint field.  This field gives us a hint as to the largest
46  *	free contiguous range of blocks under the node.  It may contain a
47  *	value that is too high, but will never contain a value that is too
48  *	low.  When the radix tree is searched, allocation failures in subtrees
49  *	update the hint.
50  *
51  *	The radix tree also implements two collapsed states for meta nodes:
52  *	the ALL-ALLOCATED state and the ALL-FREE state.  If a meta node is
53  *	in either of these two states, all information contained underneath
54  *	the node is considered stale.  These states are used to optimize
55  *	allocation and freeing operations.
56  *
57  * 	The hinting greatly increases code efficiency for allocations while
58  *	the general radix structure optimizes both allocations and frees.  The
59  *	radix tree should be able to operate well no matter how much
60  *	fragmentation there is and no matter how large a bitmap is used.
61  *
62  *	Unlike the rlist code, the blist code wires all necessary memory at
63  *	creation time.  Neither allocations nor frees require interaction with
64  *	the memory subsystem.  In contrast, the rlist code may allocate memory
65  *	on an rlist_free() call.  The non-blocking features of the blist code
66  *	are used to great advantage in the swap code (vm/nswap_pager.c).  The
67  *	rlist code uses a little less overall memory then the blist code (but
68  *	due to swap interleaving not all that much less), but the blist code
69  *	scales much, much better.
70  *
71  *	LAYOUT: The radix tree is layed out recursively using a
72  *	linear array.  Each meta node is immediately followed (layed out
73  *	sequentially in memory) by BLIST_META_RADIX lower level nodes.  This
74  *	is a recursive structure but one that can be easily scanned through
75  *	a very simple 'skip' calculation.  In order to support large radixes,
76  *	portions of the tree may reside outside our memory allocation.  We
77  *	handle this with an early-termination optimization (when bighint is
78  *	set to -1) on the scan.  The memory allocation is only large enough
79  *	to cover the number of blocks requested at creation time even if it
80  *	must be encompassed in larger root-node radix.
81  *
82  *	NOTE: The allocator cannot currently allocate more then
83  *	BLIST_BMAP_RADIX blocks per call.  It will panic with 'allocation too
84  *	large' if you try.  This is an area that could use improvement.  The
85  *	radix is large enough that this restriction does not effect the swap
86  *	system, though.  Currently only the allocation code is effected by
87  *	this algorithmic unfeature.  The freeing code can handle arbitrary
88  *	ranges.
89  *
90  *	NOTE: The radix may exceed BLIST_BMAP_RADIX bits in order to support
91  *	      up to 2^(BLIST_BMAP_RADIX-1) blocks.  The first divison will
92  *	      drop the radix down and fit it within a signed BLIST_BMAP_RADIX
93  *	      bit integer.
94  *
95  *	This code can be compiled stand-alone for debugging.
96  */
97 
98 #ifdef _KERNEL
99 
100 #include <sys/param.h>
101 #include <sys/systm.h>
102 #include <sys/lock.h>
103 #include <sys/kernel.h>
104 #include <sys/blist.h>
105 #include <sys/malloc.h>
106 
107 #else
108 
109 #ifndef BLIST_NO_DEBUG
110 #define BLIST_DEBUG
111 #endif
112 
113 #define SWAPBLK_NONE ((swblk_t)-1)
114 
115 #include <sys/types.h>
116 #include <stdio.h>
117 #include <string.h>
118 #include <stdlib.h>
119 #include <stdarg.h>
120 #include <limits.h>
121 
122 #define kmalloc(a,b,c)	malloc(a)
123 #define kfree(a,b)	free(a)
124 #define kprintf		printf
125 #define KKASSERT(exp)
126 
127 #include <sys/blist.h>
128 
129 void panic(const char *ctl, ...);
130 
131 #endif
132 
133 /*
134  * static support functions
135  */
136 
137 static swblk_t blst_leaf_alloc(blmeta_t *scan, swblk_t blkat,
138 				swblk_t blk, swblk_t count);
139 static swblk_t blst_meta_alloc(blmeta_t *scan, swblk_t blkat,
140 				swblk_t blk, swblk_t count,
141 				int64_t radix, swblk_t skip);
142 static void blst_leaf_free(blmeta_t *scan, swblk_t relblk, swblk_t count);
143 static void blst_meta_free(blmeta_t *scan, swblk_t freeBlk, swblk_t count,
144 					int64_t radix, swblk_t skip,
145 					swblk_t blk);
146 static swblk_t blst_leaf_fill(blmeta_t *scan, swblk_t blk, swblk_t count);
147 static swblk_t blst_meta_fill(blmeta_t *scan, swblk_t fillBlk, swblk_t count,
148 					int64_t radix, swblk_t skip,
149 					swblk_t blk);
150 static void blst_copy(blmeta_t *scan, swblk_t blk, int64_t radix,
151 				swblk_t skip, blist_t dest, swblk_t count);
152 static swblk_t	blst_radix_init(blmeta_t *scan, int64_t radix,
153 						swblk_t skip, swblk_t count);
154 #ifndef _KERNEL
155 static void	blst_radix_print(blmeta_t *scan, swblk_t blk,
156 					int64_t radix, swblk_t skip, int tab);
157 #endif
158 
159 #ifdef _KERNEL
160 static MALLOC_DEFINE(M_SWAP, "SWAP", "Swap space");
161 #endif
162 
163 /*
164  * blist_create() - create a blist capable of handling up to the specified
165  *		    number of blocks
166  *
167  *	blocks must be greater then 0
168  *
169  *	The smallest blist consists of a single leaf node capable of
170  *	managing BLIST_BMAP_RADIX blocks.
171  */
172 
173 blist_t
174 blist_create(swblk_t blocks)
175 {
176 	blist_t bl;
177 	int64_t radix;
178 	swblk_t skip = 0;
179 
180 	/*
181 	 * Calculate radix and skip field used for scanning.
182 	 *
183 	 * Radix can exceed BLIST_BMAP_RADIX bits even if swblk_t is limited
184 	 * to BLIST_BMAP_RADIX bits.
185 	 */
186 	radix = BLIST_BMAP_RADIX;
187 
188 	while (radix < blocks) {
189 		radix *= BLIST_META_RADIX;
190 		skip = (skip + 1) * BLIST_META_RADIX;
191 		KKASSERT(skip > 0);
192 	}
193 
194 	bl = kmalloc(sizeof(struct blist), M_SWAP, M_WAITOK | M_ZERO);
195 
196 	bl->bl_blocks = blocks;
197 	bl->bl_radix = radix;
198 	bl->bl_skip = skip;
199 	bl->bl_rootblks = 1 +
200 	    blst_radix_init(NULL, bl->bl_radix, bl->bl_skip, blocks);
201 	bl->bl_root = kmalloc(sizeof(blmeta_t) * bl->bl_rootblks,
202 			      M_SWAP, M_WAITOK);
203 
204 #if defined(BLIST_DEBUG)
205 	kprintf(
206 		"BLIST representing %lu blocks (%lu MB of swap)"
207 		", requiring %6.2fM of ram\n",
208 		bl->bl_blocks,
209 		bl->bl_blocks * 4 / 1024,
210 		(bl->bl_rootblks * sizeof(blmeta_t) + 1023) / (1024.0 * 1024.0)
211 	);
212 	kprintf("BLIST raw radix tree: %lu records, top-radix %lu\n",
213 		bl->bl_rootblks, bl->bl_radix);
214 #endif
215 	blst_radix_init(bl->bl_root, bl->bl_radix, bl->bl_skip, blocks);
216 
217 	return(bl);
218 }
219 
220 void
221 blist_destroy(blist_t bl)
222 {
223 	kfree(bl->bl_root, M_SWAP);
224 	kfree(bl, M_SWAP);
225 }
226 
227 /*
228  * blist_alloc() - reserve space in the block bitmap.  Return the base
229  *		     of a contiguous region or SWAPBLK_NONE if space could
230  *		     not be allocated.
231  */
232 
233 swblk_t
234 blist_alloc(blist_t bl, swblk_t count)
235 {
236 	swblk_t blk = SWAPBLK_NONE;
237 
238 	if (bl) {
239 		if (bl->bl_radix == BLIST_BMAP_RADIX)
240 			blk = blst_leaf_alloc(bl->bl_root, 0, 0, count);
241 		else
242 			blk = blst_meta_alloc(bl->bl_root, 0, 0, count,
243 					      bl->bl_radix, bl->bl_skip);
244 		if (blk != SWAPBLK_NONE)
245 			bl->bl_free -= count;
246 	}
247 	return(blk);
248 }
249 
250 swblk_t
251 blist_allocat(blist_t bl, swblk_t count, swblk_t blkat)
252 {
253 	swblk_t blk = SWAPBLK_NONE;
254 
255 	if (bl) {
256 		if (bl->bl_radix == BLIST_BMAP_RADIX)
257 			blk = blst_leaf_alloc(bl->bl_root, blkat, 0, count);
258 		else
259 			blk = blst_meta_alloc(bl->bl_root, blkat, 0, count,
260 					      bl->bl_radix, bl->bl_skip);
261 		if (blk != SWAPBLK_NONE)
262 			bl->bl_free -= count;
263 	}
264 	return(blk);
265 }
266 
267 /*
268  * blist_free() -	free up space in the block bitmap.  Return the base
269  *		     	of a contiguous region.  Panic if an inconsistancy is
270  *			found.
271  */
272 
273 void
274 blist_free(blist_t bl, swblk_t blkno, swblk_t count)
275 {
276 	if (bl) {
277 		if (bl->bl_radix == BLIST_BMAP_RADIX)
278 			blst_leaf_free(bl->bl_root, blkno, count);
279 		else
280 			blst_meta_free(bl->bl_root, blkno, count, bl->bl_radix, bl->bl_skip, 0);
281 		bl->bl_free += count;
282 	}
283 }
284 
285 /*
286  * blist_fill() -	mark a region in the block bitmap as off-limits
287  *			to the allocator (i.e. allocate it), ignoring any
288  *			existing allocations.  Return the number of blocks
289  *			actually filled that were free before the call.
290  */
291 
292 swblk_t
293 blist_fill(blist_t bl, swblk_t blkno, swblk_t count)
294 {
295 	swblk_t filled;
296 
297 	if (bl) {
298 		if (bl->bl_radix == BLIST_BMAP_RADIX) {
299 			filled = blst_leaf_fill(bl->bl_root, blkno, count);
300 		} else {
301 			filled = blst_meta_fill(bl->bl_root, blkno, count,
302 			    bl->bl_radix, bl->bl_skip, 0);
303 		}
304 		bl->bl_free -= filled;
305 		return (filled);
306 	} else {
307 		return 0;
308 	}
309 }
310 
311 /*
312  * blist_resize() -	resize an existing radix tree to handle the
313  *			specified number of blocks.  This will reallocate
314  *			the tree and transfer the previous bitmap to the new
315  *			one.  When extending the tree you can specify whether
316  *			the new blocks are to left allocated or freed.
317  */
318 
319 void
320 blist_resize(blist_t *pbl, swblk_t count, int freenew)
321 {
322     blist_t newbl = blist_create(count);
323     blist_t save = *pbl;
324 
325     *pbl = newbl;
326     if (count > save->bl_blocks)
327 	    count = save->bl_blocks;
328     blst_copy(save->bl_root, 0, save->bl_radix, save->bl_skip, newbl, count);
329 
330     /*
331      * If resizing upwards, should we free the new space or not?
332      */
333     if (freenew && count < newbl->bl_blocks) {
334 	    blist_free(newbl, count, newbl->bl_blocks - count);
335     }
336     blist_destroy(save);
337 }
338 
339 #ifdef BLIST_DEBUG
340 
341 /*
342  * blist_print()    - dump radix tree
343  */
344 
345 void
346 blist_print(blist_t bl)
347 {
348 	kprintf("BLIST {\n");
349 	blst_radix_print(bl->bl_root, 0, bl->bl_radix, bl->bl_skip, 4);
350 	kprintf("}\n");
351 }
352 
353 #endif
354 
355 /************************************************************************
356  *			  ALLOCATION SUPPORT FUNCTIONS			*
357  ************************************************************************
358  *
359  *	These support functions do all the actual work.  They may seem
360  *	rather longish, but that's because I've commented them up.  The
361  *	actual code is straight forward.
362  *
363  */
364 
365 /*
366  * blist_leaf_alloc() -	allocate at a leaf in the radix tree (a bitmap).
367  *
368  *	This is the core of the allocator and is optimized for the 1 block
369  *	and the BLIST_BMAP_RADIX block allocation cases.  Other cases are
370  *	somewhat slower.  The 1 block allocation case is log2 and extremely
371  *	quick.
372  */
373 
374 static swblk_t
375 blst_leaf_alloc(blmeta_t *scan, swblk_t blkat __unused, swblk_t blk,
376 		swblk_t count)
377 {
378 	u_swblk_t orig = scan->u.bmu_bitmap;
379 
380 	if (orig == 0) {
381 		/*
382 		 * Optimize bitmap all-allocated case.  Also, count = 1
383 		 * case assumes at least 1 bit is free in the bitmap, so
384 		 * we have to take care of this case here.
385 		 */
386 		scan->bm_bighint = 0;
387 		return(SWAPBLK_NONE);
388 	}
389 	if (count == 1) {
390 		/*
391 		 * Optimized code to allocate one bit out of the bitmap
392 		 */
393 		u_swblk_t mask;
394 		int j = BLIST_BMAP_RADIX/2;
395 		int r = 0;
396 
397 		mask = (u_swblk_t)-1 >> (BLIST_BMAP_RADIX/2);
398 
399 		while (j) {
400 			if ((orig & mask) == 0) {
401 			    r += j;
402 			    orig >>= j;
403 			}
404 			j >>= 1;
405 			mask >>= j;
406 		}
407 		scan->u.bmu_bitmap &= ~((swblk_t)1 << r);
408 		return(blk + r);
409 	}
410 	if (count <= BLIST_BMAP_RADIX) {
411 		/*
412 		 * non-optimized code to allocate N bits out of the bitmap.
413 		 * The more bits, the faster the code runs.  It will run
414 		 * the slowest allocating 2 bits, but since there aren't any
415 		 * memory ops in the core loop (or shouldn't be, anyway),
416 		 * you probably won't notice the difference.
417 		 */
418 		int j;
419 		int n = (int)(BLIST_BMAP_RADIX - count);
420 		u_swblk_t mask;
421 
422 		mask = (u_swblk_t)-1 >> n;
423 
424 		for (j = 0; j <= n; ++j) {
425 			if ((orig & mask) == mask) {
426 				scan->u.bmu_bitmap &= ~mask;
427 				return(blk + j);
428 			}
429 			mask = (mask << 1);
430 		}
431 	}
432 
433 	/*
434 	 * We couldn't allocate count in this subtree, update bighint.
435 	 */
436 	scan->bm_bighint = count - 1;
437 
438 	return(SWAPBLK_NONE);
439 }
440 
441 /*
442  * blist_meta_alloc() -	allocate at a meta in the radix tree.
443  *
444  *	Attempt to allocate at a meta node.  If we can't, we update
445  *	bighint and return a failure.  Updating bighint optimize future
446  *	calls that hit this node.  We have to check for our collapse cases
447  *	and we have a few optimizations strewn in as well.
448  */
449 static swblk_t
450 blst_meta_alloc(blmeta_t *scan, swblk_t blkat,
451 		swblk_t blk, swblk_t count,
452 		int64_t radix, swblk_t skip)
453 {
454 	int hintok = (blk >= blkat);
455 	swblk_t next_skip = ((swblk_t)skip / BLIST_META_RADIX);
456 	swblk_t i;
457 
458 #ifndef _KERNEL
459 	kprintf("blist_meta_alloc blkat %ld blk %ld count %ld radix %ld\n",
460 		blkat, blk, count, radix);
461 #endif
462 
463 	/*
464 	 * ALL-ALLOCATED special case
465 	 */
466 	if (scan->u.bmu_avail == 0)  {
467 		scan->bm_bighint = 0;
468 		return(SWAPBLK_NONE);
469 	}
470 
471 	/*
472 	 * ALL-FREE special case, initialize uninitialized
473 	 * sublevel.
474 	 *
475 	 * NOTE: radix may exceed 32 bits until first division.
476 	 */
477 	if (scan->u.bmu_avail == radix) {
478 		scan->bm_bighint = radix;
479 
480 		radix /= BLIST_META_RADIX;
481 		for (i = 1; i <= skip; i += next_skip) {
482 			if (scan[i].bm_bighint == (swblk_t)-1)
483 				break;
484 			if (next_skip == 1) {
485 				scan[i].u.bmu_bitmap = (u_swblk_t)-1;
486 				scan[i].bm_bighint = BLIST_BMAP_RADIX;
487 			} else {
488 				scan[i].bm_bighint = (swblk_t)radix;
489 				scan[i].u.bmu_avail = (swblk_t)radix;
490 			}
491 		}
492 	} else {
493 		radix /= BLIST_META_RADIX;
494 	}
495 
496 	for (i = 1; i <= skip; i += next_skip) {
497 		if (count <= scan[i].bm_bighint &&
498 		    blk + (swblk_t)radix > blkat) {
499 			/*
500 			 * count fits in object
501 			 */
502 			swblk_t r;
503 			if (next_skip == 1) {
504 				r = blst_leaf_alloc(&scan[i], blkat,
505 						    blk, count);
506 			} else {
507 				r = blst_meta_alloc(&scan[i], blkat,
508 						    blk, count,
509 						    radix, next_skip - 1);
510 			}
511 			if (r != SWAPBLK_NONE) {
512 				scan->u.bmu_avail -= count;
513 				if (scan->bm_bighint > scan->u.bmu_avail)
514 					scan->bm_bighint = scan->u.bmu_avail;
515 				return(r);
516 			}
517 			/* bighint was updated by recursion */
518 		} else if (scan[i].bm_bighint == (swblk_t)-1) {
519 			/*
520 			 * Terminator
521 			 */
522 			break;
523 		} else if (count > (swblk_t)radix) {
524 			/*
525 			 * count does not fit in object even if it were
526 			 * complete free.
527 			 */
528 			panic("%s: allocation too large %lu/%lu",
529 			      __func__, count, radix);
530 		}
531 		blk += (swblk_t)radix;
532 	}
533 
534 	/*
535 	 * We couldn't allocate count in this subtree, update bighint.
536 	 */
537 	if (hintok && scan->bm_bighint >= count)
538 		scan->bm_bighint = count - 1;
539 	return(SWAPBLK_NONE);
540 }
541 
542 /*
543  * BLST_LEAF_FREE() -	free allocated block from leaf bitmap
544  */
545 static void
546 blst_leaf_free(blmeta_t *scan, swblk_t blk, swblk_t count)
547 {
548 	/*
549 	 * free some data in this bitmap
550 	 *
551 	 * e.g.
552 	 *	0000111111111110000
553 	 *          \_________/\__/
554 	 *		v        n
555 	 */
556 	int n = blk & (BLIST_BMAP_RADIX - 1);
557 	u_swblk_t mask;
558 
559 	mask = ((u_swblk_t)-1 << n) &
560 	    ((u_swblk_t)-1 >> (BLIST_BMAP_RADIX - count - n));
561 
562 	if (scan->u.bmu_bitmap & mask)
563 		panic("%s: freeing free block", __func__);
564 	scan->u.bmu_bitmap |= mask;
565 
566 	/*
567 	 * We could probably do a better job here.  We are required to make
568 	 * bighint at least as large as the biggest contiguous block of
569 	 * data.  If we just shoehorn it, a little extra overhead will
570 	 * be incured on the next allocation (but only that one typically).
571 	 */
572 	scan->bm_bighint = BLIST_BMAP_RADIX;
573 }
574 
575 /*
576  * BLST_META_FREE() - free allocated blocks from radix tree meta info
577  *
578  *	This support routine frees a range of blocks from the bitmap.
579  *	The range must be entirely enclosed by this radix node.  If a
580  *	meta node, we break the range down recursively to free blocks
581  *	in subnodes (which means that this code can free an arbitrary
582  *	range whereas the allocation code cannot allocate an arbitrary
583  *	range).
584  */
585 
586 static void
587 blst_meta_free(blmeta_t *scan, swblk_t freeBlk, swblk_t count,
588 	       int64_t radix, swblk_t skip, swblk_t blk)
589 {
590 	swblk_t i;
591 	swblk_t next_skip = ((swblk_t)skip / BLIST_META_RADIX);
592 
593 #if 0
594 	kprintf("FREE (%lx,%lu) FROM (%lx,%lu)\n",
595 	    freeBlk, count,
596 	    blk, radix
597 	);
598 #endif
599 
600 	/*
601 	 * ALL-ALLOCATED special case, initialize for recursion.
602 	 *
603 	 * We will short-cut the ALL-ALLOCATED -> ALL-FREE case.
604 	 */
605 	if (scan->u.bmu_avail == 0) {
606 		scan->u.bmu_avail = count;
607 		scan->bm_bighint = count;
608 
609 		if (count != radix)  {
610 			for (i = 1; i <= skip; i += next_skip) {
611 				if (scan[i].bm_bighint == (swblk_t)-1)
612 					break;
613 				scan[i].bm_bighint = 0;
614 				if (next_skip == 1) {
615 					scan[i].u.bmu_bitmap = 0;
616 				} else {
617 					scan[i].u.bmu_avail = 0;
618 				}
619 			}
620 			/* fall through */
621 		}
622 	} else {
623 		scan->u.bmu_avail += count;
624 		/* scan->bm_bighint = radix; */
625 	}
626 
627 	/*
628 	 * ALL-FREE special case.
629 	 *
630 	 * Set bighint for higher levels to snoop.
631 	 */
632 	if (scan->u.bmu_avail == radix) {
633 		scan->bm_bighint = radix;
634 		return;
635 	}
636 
637 	/*
638 	 * Break the free down into its components
639 	 */
640 	if (scan->u.bmu_avail > radix) {
641 		panic("%s: freeing already "
642 		      "free blocks (%lu) %lu/%lu",
643 		      __func__, count, (long)scan->u.bmu_avail, radix);
644 	}
645 
646 	radix /= BLIST_META_RADIX;
647 
648 	i = (freeBlk - blk) / (swblk_t)radix;
649 	blk += i * (swblk_t)radix;
650 	i = i * next_skip + 1;
651 
652 	while (i <= skip && blk < freeBlk + count) {
653 		swblk_t v;
654 
655 		v = blk + (swblk_t)radix - freeBlk;
656 		if (v > count)
657 			v = count;
658 
659 		if (scan->bm_bighint == (swblk_t)-1)
660 			panic("%s: freeing unexpected range", __func__);
661 
662 		if (next_skip == 1) {
663 			blst_leaf_free(&scan[i], freeBlk, v);
664 		} else {
665 			blst_meta_free(&scan[i], freeBlk, v,
666 				       radix, next_skip - 1, blk);
667 		}
668 
669 		/*
670 		 * After having dealt with the becomes-all-free case any
671 		 * partial free will not be able to bring us to the
672 		 * becomes-all-free state.
673 		 *
674 		 * We can raise bighint to at least the sub-segment's
675 		 * bighint.
676 		 */
677 		if (scan->bm_bighint < scan[i].bm_bighint) {
678 		    scan->bm_bighint = scan[i].bm_bighint;
679 		}
680 		count -= v;
681 		freeBlk += v;
682 		blk += (swblk_t)radix;
683 		i += next_skip;
684 	}
685 }
686 
687 /*
688  * BLST_LEAF_FILL() -	allocate specific blocks in leaf bitmap
689  *
690  *	Allocates all blocks in the specified range regardless of
691  *	any existing allocations in that range.  Returns the number
692  *	of blocks allocated by the call.
693  */
694 static swblk_t
695 blst_leaf_fill(blmeta_t *scan, swblk_t blk, swblk_t count)
696 {
697 	int n = blk & (BLIST_BMAP_RADIX - 1);
698 	swblk_t nblks;
699 	u_swblk_t mask, bitmap;
700 
701 	mask = ((u_swblk_t)-1 << n) &
702 	    ((u_swblk_t)-1 >> (BLIST_BMAP_RADIX - count - n));
703 
704 	/* Count the number of blocks we're about to allocate */
705 	bitmap = scan->u.bmu_bitmap & mask;
706 	for (nblks = 0; bitmap != 0; nblks++)
707 		bitmap &= bitmap - 1;
708 
709 	scan->u.bmu_bitmap &= ~mask;
710 	return (nblks);
711 }
712 
713 /*
714  * BLST_META_FILL() -	allocate specific blocks at a meta node
715  *
716  *	Allocates the specified range of blocks, regardless of
717  *	any existing allocations in the range.  The range must
718  *	be within the extent of this node.  Returns the number
719  *	of blocks allocated by the call.
720  */
721 static swblk_t
722 blst_meta_fill(blmeta_t *scan, swblk_t fillBlk, swblk_t count,
723 	       int64_t radix, swblk_t skip, swblk_t blk)
724 {
725 	swblk_t i;
726 	swblk_t next_skip = ((swblk_t)skip / BLIST_META_RADIX);
727 	swblk_t nblks = 0;
728 
729 	if (count == radix || scan->u.bmu_avail == 0) {
730 		/*
731 		 * ALL-ALLOCATED special case
732 		 */
733 		nblks = scan->u.bmu_avail;
734 		scan->u.bmu_avail = 0;
735 		scan->bm_bighint = count;
736 		return (nblks);
737 	}
738 
739 	if (scan->u.bmu_avail == radix) {
740 		radix /= BLIST_META_RADIX;
741 
742 		/*
743 		 * ALL-FREE special case, initialize sublevel
744 		 */
745 		for (i = 1; i <= skip; i += next_skip) {
746 			if (scan[i].bm_bighint == (swblk_t)-1)
747 				break;
748 			if (next_skip == 1) {
749 				scan[i].u.bmu_bitmap = (u_swblk_t)-1;
750 				scan[i].bm_bighint = BLIST_BMAP_RADIX;
751 			} else {
752 				scan[i].bm_bighint = (swblk_t)radix;
753 				scan[i].u.bmu_avail = (swblk_t)radix;
754 			}
755 		}
756 	} else {
757 		radix /= BLIST_META_RADIX;
758 	}
759 
760 	if (count > (swblk_t)radix)
761 		panic("%s: allocation too large", __func__);
762 
763 	i = (fillBlk - blk) / (swblk_t)radix;
764 	blk += i * (swblk_t)radix;
765 	i = i * next_skip + 1;
766 
767 	while (i <= skip && blk < fillBlk + count) {
768 		swblk_t v;
769 
770 		v = blk + (swblk_t)radix - fillBlk;
771 		if (v > count)
772 			v = count;
773 
774 		if (scan->bm_bighint == (swblk_t)-1)
775 			panic("%s: filling unexpected range", __func__);
776 
777 		if (next_skip == 1) {
778 			nblks += blst_leaf_fill(&scan[i], fillBlk, v);
779 		} else {
780 			nblks += blst_meta_fill(&scan[i], fillBlk, v,
781 			    radix, next_skip - 1, blk);
782 		}
783 		count -= v;
784 		fillBlk += v;
785 		blk += (swblk_t)radix;
786 		i += next_skip;
787 	}
788 	scan->u.bmu_avail -= nblks;
789 	return (nblks);
790 }
791 
792 /*
793  * BLIST_RADIX_COPY() - copy one radix tree to another
794  *
795  *	Locates free space in the source tree and frees it in the destination
796  *	tree.  The space may not already be free in the destination.
797  */
798 
799 static void
800 blst_copy(blmeta_t *scan, swblk_t blk, int64_t radix,
801 	  swblk_t skip, blist_t dest, swblk_t count)
802 {
803 	swblk_t next_skip;
804 	swblk_t i;
805 
806 	/*
807 	 * Leaf node
808 	 */
809 
810 	if (radix == BLIST_BMAP_RADIX) {
811 		u_swblk_t v = scan->u.bmu_bitmap;
812 
813 		if (v == (u_swblk_t)-1) {
814 			blist_free(dest, blk, count);
815 		} else if (v != 0) {
816 			swblk_t i;
817 
818 			for (i = 0; i < BLIST_BMAP_RADIX && i < count; ++i) {
819 				if (v & ((swblk_t)1 << i))
820 					blist_free(dest, blk + i, 1);
821 			}
822 		}
823 		return;
824 	}
825 
826 	/*
827 	 * Meta node
828 	 */
829 
830 	if (scan->u.bmu_avail == 0) {
831 		/*
832 		 * Source all allocated, leave dest allocated
833 		 */
834 		return;
835 	}
836 	if (scan->u.bmu_avail == radix) {
837 		/*
838 		 * Source all free, free entire dest
839 		 */
840 		if (count < radix)
841 			blist_free(dest, blk, count);
842 		else
843 			blist_free(dest, blk, (swblk_t)radix);
844 		return;
845 	}
846 
847 
848 	radix /= BLIST_META_RADIX;
849 	next_skip = ((u_swblk_t)skip / BLIST_META_RADIX);
850 
851 	for (i = 1; count && i <= skip; i += next_skip) {
852 		if (scan[i].bm_bighint == (swblk_t)-1)
853 			break;
854 
855 		if (count >= (swblk_t)radix) {
856 			blst_copy(
857 			    &scan[i],
858 			    blk,
859 			    radix,
860 			    next_skip - 1,
861 			    dest,
862 			    (swblk_t)radix
863 			);
864 			count -= (swblk_t)radix;
865 		} else {
866 			if (count) {
867 				blst_copy(
868 				    &scan[i],
869 				    blk,
870 				    radix,
871 				    next_skip - 1,
872 				    dest,
873 				    count
874 				);
875 			}
876 			count = 0;
877 		}
878 		blk += (swblk_t)radix;
879 	}
880 }
881 
882 /*
883  * BLST_RADIX_INIT() - initialize radix tree
884  *
885  *	Initialize our meta structures and bitmaps and calculate the exact
886  *	amount of space required to manage 'count' blocks - this space may
887  *	be considerably less then the calculated radix due to the large
888  *	RADIX values we use.
889  */
890 
891 static swblk_t
892 blst_radix_init(blmeta_t *scan, int64_t radix, swblk_t skip, swblk_t count)
893 {
894 	swblk_t i;
895 	swblk_t next_skip;
896 	swblk_t memindex = 0;
897 
898 	/*
899 	 * Leaf node
900 	 */
901 
902 	if (radix == BLIST_BMAP_RADIX) {
903 		if (scan) {
904 			scan->bm_bighint = 0;
905 			scan->u.bmu_bitmap = 0;
906 		}
907 		return(memindex);
908 	}
909 
910 	/*
911 	 * Meta node.  If allocating the entire object we can special
912 	 * case it.  However, we need to figure out how much memory
913 	 * is required to manage 'count' blocks, so we continue on anyway.
914 	 */
915 
916 	if (scan) {
917 		scan->bm_bighint = 0;
918 		scan->u.bmu_avail = 0;
919 	}
920 
921 	radix /= BLIST_META_RADIX;
922 	next_skip = ((u_swblk_t)skip / BLIST_META_RADIX);
923 
924 	for (i = 1; i <= skip; i += next_skip) {
925 		if (count >= (swblk_t)radix) {
926 			/*
927 			 * Allocate the entire object
928 			 */
929 			memindex = i + blst_radix_init(
930 			    ((scan) ? &scan[i] : NULL),
931 			    radix,
932 			    next_skip - 1,
933 			    (swblk_t)radix
934 			);
935 			count -= (swblk_t)radix;
936 		} else if (count > 0) {
937 			/*
938 			 * Allocate a partial object
939 			 */
940 			memindex = i + blst_radix_init(
941 			    ((scan) ? &scan[i] : NULL),
942 			    radix,
943 			    next_skip - 1,
944 			    count
945 			);
946 			count = 0;
947 		} else {
948 			/*
949 			 * Add terminator and break out
950 			 */
951 			if (scan)
952 				scan[i].bm_bighint = (swblk_t)-1;
953 			break;
954 		}
955 	}
956 	if (memindex < i)
957 		memindex = i;
958 	return(memindex);
959 }
960 
961 #ifdef BLIST_DEBUG
962 
963 static void
964 blst_radix_print(blmeta_t *scan, swblk_t blk, int64_t radix, swblk_t skip, int tab)
965 {
966 	swblk_t i;
967 	swblk_t next_skip;
968 
969 	if (radix == BLIST_BMAP_RADIX) {
970 		kprintf(
971 		    "%*.*s(%04lx,%lu): bitmap %016lx big=%lu\n",
972 		    tab, tab, "",
973 		    blk, radix,
974 		    scan->u.bmu_bitmap,
975 		    scan->bm_bighint
976 		);
977 		return;
978 	}
979 
980 	if (scan->u.bmu_avail == 0) {
981 		kprintf(
982 		    "%*.*s(%04lx,%ld) ALL ALLOCATED\n",
983 		    tab, tab, "",
984 		    blk,
985 		    radix
986 		);
987 		return;
988 	}
989 	if (scan->u.bmu_avail == radix) {
990 		kprintf(
991 		    "%*.*s(%04lx,%ld) ALL FREE\n",
992 		    tab, tab, "",
993 		    blk,
994 		    radix
995 		);
996 		return;
997 	}
998 
999 	kprintf(
1000 	    "%*.*s(%04lx,%lu): subtree (%lu/%lu) big=%lu {\n",
1001 	    tab, tab, "",
1002 	    blk, (long long)radix,
1003 	    scan->u.bmu_avail,
1004 	    (long long)radix,
1005 	    scan->bm_bighint
1006 	);
1007 
1008 	radix /= BLIST_META_RADIX;
1009 	next_skip = ((u_swblk_t)skip / BLIST_META_RADIX);
1010 	tab += 4;
1011 
1012 	for (i = 1; i <= skip; i += next_skip) {
1013 		if (scan[i].bm_bighint == (swblk_t)-1) {
1014 			kprintf(
1015 			    "%*.*s(%04lx,%ld): Terminator\n",
1016 			    tab, tab, "",
1017 			    blk, radix
1018 			);
1019 			break;
1020 		}
1021 		blst_radix_print(
1022 		    &scan[i],
1023 		    blk,
1024 		    radix,
1025 		    next_skip - 1,
1026 		    tab
1027 		);
1028 		blk += (swblk_t)radix;
1029 	}
1030 	tab -= 4;
1031 
1032 	kprintf(
1033 	    "%*.*s}\n",
1034 	    tab, tab, ""
1035 	);
1036 }
1037 
1038 #endif
1039 
1040 #ifdef BLIST_DEBUG
1041 
1042 int
1043 main(int ac, char **av)
1044 {
1045 	swblk_t size = 1024;
1046 	swblk_t i;
1047 	blist_t bl;
1048 
1049 	for (i = 1; i < ac; ++i) {
1050 		const char *ptr = av[i];
1051 		if (*ptr != '-') {
1052 			size = strtol(ptr, NULL, 0);
1053 			continue;
1054 		}
1055 		ptr += 2;
1056 		fprintf(stderr, "Bad option: %s\n", ptr - 2);
1057 		exit(1);
1058 	}
1059 	bl = blist_create(size);
1060 	blist_free(bl, 0, size);
1061 
1062 	for (;;) {
1063 		char buf[1024];
1064 		swblk_t da = 0;
1065 		swblk_t count = 0;
1066 		swblk_t blkat;
1067 
1068 
1069 		kprintf("%lu/%lu/%llu> ",
1070 			bl->bl_free, size, (long long)bl->bl_radix);
1071 		fflush(stdout);
1072 		if (fgets(buf, sizeof(buf), stdin) == NULL)
1073 			break;
1074 		switch(buf[0]) {
1075 		case 'r':
1076 			if (sscanf(buf + 1, "%li", &count) == 1) {
1077 				blist_resize(&bl, count, 1);
1078 				size = count;
1079 			} else {
1080 				kprintf("?\n");
1081 			}
1082 		case 'p':
1083 			blist_print(bl);
1084 			break;
1085 		case 'a':
1086 			if (sscanf(buf + 1, "%li %li", &count, &blkat) == 1) {
1087 				kprintf("count %ld\n", count);
1088 				swblk_t blk = blist_alloc(bl, count);
1089 				kprintf("    R=%04lx\n", blk);
1090 			} else if (sscanf(buf + 1, "%li %li", &count, &blkat) == 2) {
1091 				swblk_t blk = blist_allocat(bl, count, blkat);
1092 				kprintf("    R=%04lx\n", blk);
1093 			} else {
1094 				kprintf("?\n");
1095 			}
1096 			break;
1097 		case 'f':
1098 			if (sscanf(buf + 1, "%li %li", &da, &count) == 2) {
1099 				blist_free(bl, da, count);
1100 			} else {
1101 				kprintf("?\n");
1102 			}
1103 			break;
1104 		case 'l':
1105 			if (sscanf(buf + 1, "%li %li", &da, &count) == 2) {
1106 				printf("    n=%lu\n",
1107 				    blist_fill(bl, da, count));
1108 			} else {
1109 				kprintf("?\n");
1110 			}
1111 			break;
1112 		case '?':
1113 		case 'h':
1114 			puts(
1115 			    "p          -print\n"
1116 			    "a %li      -allocate\n"
1117 			    "f %li %li  -free\n"
1118 			    "l %li %li	-fill\n"
1119 			    "r %li      -resize\n"
1120 			    "h/?        -help\n"
1121 			    "    hex may be specified with 0x prefix\n"
1122 			);
1123 			break;
1124 		default:
1125 			kprintf("?\n");
1126 			break;
1127 		}
1128 	}
1129 	return(0);
1130 }
1131 
1132 void
1133 panic(const char *ctl, ...)
1134 {
1135 	__va_list va;
1136 
1137 	__va_start(va, ctl);
1138 	vfprintf(stderr, ctl, va);
1139 	fprintf(stderr, "\n");
1140 	__va_end(va);
1141 	exit(1);
1142 }
1143 
1144 #endif
1145 
1146