1 /* 2 * BLIST.C - Bitmap allocator/deallocator, using a radix tree with hinting 3 * 4 * Copyright (c) 1998,2004 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * 37 * This module implements a general bitmap allocator/deallocator. The 38 * allocator eats around 2 bits per 'block'. The module does not 39 * try to interpret the meaning of a 'block' other then to return 40 * SWAPBLK_NONE on an allocation failure. 41 * 42 * A radix tree is used to maintain the bitmap. Two radix constants are 43 * involved: One for the bitmaps contained in the leaf nodes (typically 44 * 32), and one for the meta nodes (typically 16). Both meta and leaf 45 * nodes have a hint field. This field gives us a hint as to the largest 46 * free contiguous range of blocks under the node. It may contain a 47 * value that is too high, but will never contain a value that is too 48 * low. When the radix tree is searched, allocation failures in subtrees 49 * update the hint. 50 * 51 * The radix tree also implements two collapsed states for meta nodes: 52 * the ALL-ALLOCATED state and the ALL-FREE state. If a meta node is 53 * in either of these two states, all information contained underneath 54 * the node is considered stale. These states are used to optimize 55 * allocation and freeing operations. 56 * 57 * The hinting greatly increases code efficiency for allocations while 58 * the general radix structure optimizes both allocations and frees. The 59 * radix tree should be able to operate well no matter how much 60 * fragmentation there is and no matter how large a bitmap is used. 61 * 62 * Unlike the rlist code, the blist code wires all necessary memory at 63 * creation time. Neither allocations nor frees require interaction with 64 * the memory subsystem. In contrast, the rlist code may allocate memory 65 * on an rlist_free() call. The non-blocking features of the blist code 66 * are used to great advantage in the swap code (vm/nswap_pager.c). The 67 * rlist code uses a little less overall memory then the blist code (but 68 * due to swap interleaving not all that much less), but the blist code 69 * scales much, much better. 70 * 71 * LAYOUT: The radix tree is layed out recursively using a 72 * linear array. Each meta node is immediately followed (layed out 73 * sequentially in memory) by BLIST_META_RADIX lower level nodes. This 74 * is a recursive structure but one that can be easily scanned through 75 * a very simple 'skip' calculation. In order to support large radixes, 76 * portions of the tree may reside outside our memory allocation. We 77 * handle this with an early-termination optimization (when bighint is 78 * set to -1) on the scan. The memory allocation is only large enough 79 * to cover the number of blocks requested at creation time even if it 80 * must be encompassed in larger root-node radix. 81 * 82 * NOTE: The allocator cannot currently allocate more then 83 * BLIST_BMAP_RADIX blocks per call. It will panic with 'allocation too 84 * large' if you try. This is an area that could use improvement. The 85 * radix is large enough that this restriction does not effect the swap 86 * system, though. Currently only the allocation code is effected by 87 * this algorithmic unfeature. The freeing code can handle arbitrary 88 * ranges. 89 * 90 * NOTE: The radix may exceed 32 bits in order to support up to 2^31 91 * blocks. The first divison will drop the radix down and fit 92 * it within a signed 32 bit integer. 93 * 94 * This code can be compiled stand-alone for debugging. 95 * 96 * $FreeBSD: src/sys/kern/subr_blist.c,v 1.5.2.2 2003/01/12 09:23:12 dillon Exp $ 97 * $DragonFly: src/sys/kern/subr_blist.c,v 1.8 2008/08/10 22:09:50 dillon Exp $ 98 */ 99 100 #ifdef _KERNEL 101 102 #include <sys/param.h> 103 #include <sys/systm.h> 104 #include <sys/lock.h> 105 #include <sys/kernel.h> 106 #include <sys/blist.h> 107 #include <sys/malloc.h> 108 #include <vm/vm.h> 109 #include <vm/vm_object.h> 110 #include <vm/vm_kern.h> 111 #include <vm/vm_extern.h> 112 #include <vm/vm_page.h> 113 114 #else 115 116 #ifndef BLIST_NO_DEBUG 117 #define BLIST_DEBUG 118 #endif 119 120 #define SWAPBLK_NONE ((swblk_t)-1) 121 122 #include <sys/types.h> 123 #include <stdio.h> 124 #include <string.h> 125 #include <stdlib.h> 126 #include <stdarg.h> 127 128 #define kmalloc(a,b,c) malloc(a) 129 #define kfree(a,b) free(a) 130 #define kprintf printf 131 #define KKASSERT(exp) 132 133 #include <sys/blist.h> 134 135 void panic(const char *ctl, ...); 136 137 #endif 138 139 /* 140 * static support functions 141 */ 142 143 static swblk_t blst_leaf_alloc(blmeta_t *scan, swblk_t blk, int count); 144 static swblk_t blst_meta_alloc(blmeta_t *scan, swblk_t blk, 145 swblk_t count, int64_t radix, int skip); 146 static void blst_leaf_free(blmeta_t *scan, swblk_t relblk, int count); 147 static void blst_meta_free(blmeta_t *scan, swblk_t freeBlk, swblk_t count, 148 int64_t radix, int skip, swblk_t blk); 149 static void blst_copy(blmeta_t *scan, swblk_t blk, int64_t radix, 150 swblk_t skip, blist_t dest, swblk_t count); 151 static swblk_t blst_radix_init(blmeta_t *scan, int64_t radix, 152 int skip, swblk_t count); 153 #ifndef _KERNEL 154 static void blst_radix_print(blmeta_t *scan, swblk_t blk, 155 int64_t radix, int skip, int tab); 156 #endif 157 158 #ifdef _KERNEL 159 static MALLOC_DEFINE(M_SWAP, "SWAP", "Swap space"); 160 #endif 161 162 /* 163 * blist_create() - create a blist capable of handling up to the specified 164 * number of blocks 165 * 166 * blocks must be greater then 0 167 * 168 * The smallest blist consists of a single leaf node capable of 169 * managing BLIST_BMAP_RADIX blocks. 170 */ 171 172 blist_t 173 blist_create(swblk_t blocks) 174 { 175 blist_t bl; 176 int64_t radix; 177 int skip = 0; 178 179 /* 180 * Calculate radix and skip field used for scanning. 181 * 182 * Radix can exceed 32 bits even if swblk_t is limited to 32 bits. 183 */ 184 radix = BLIST_BMAP_RADIX; 185 186 while (radix < blocks) { 187 radix *= BLIST_META_RADIX; 188 skip = (skip + 1) * BLIST_META_RADIX; 189 KKASSERT(skip > 0); 190 } 191 192 bl = kmalloc(sizeof(struct blist), M_SWAP, M_WAITOK); 193 194 bzero(bl, sizeof(*bl)); 195 196 bl->bl_blocks = blocks; 197 bl->bl_radix = radix; 198 bl->bl_skip = skip; 199 bl->bl_rootblks = 1 + 200 blst_radix_init(NULL, bl->bl_radix, bl->bl_skip, blocks); 201 bl->bl_root = kmalloc(sizeof(blmeta_t) * bl->bl_rootblks, M_SWAP, M_WAITOK); 202 203 #if defined(BLIST_DEBUG) 204 kprintf( 205 "BLIST representing %d blocks (%d MB of swap)" 206 ", requiring %dK of ram\n", 207 bl->bl_blocks, 208 bl->bl_blocks * 4 / 1024, 209 (bl->bl_rootblks * sizeof(blmeta_t) + 1023) / 1024 210 ); 211 kprintf("BLIST raw radix tree contains %d records\n", bl->bl_rootblks); 212 #endif 213 blst_radix_init(bl->bl_root, bl->bl_radix, bl->bl_skip, blocks); 214 215 return(bl); 216 } 217 218 void 219 blist_destroy(blist_t bl) 220 { 221 kfree(bl->bl_root, M_SWAP); 222 kfree(bl, M_SWAP); 223 } 224 225 /* 226 * blist_alloc() - reserve space in the block bitmap. Return the base 227 * of a contiguous region or SWAPBLK_NONE if space could 228 * not be allocated. 229 */ 230 231 swblk_t 232 blist_alloc(blist_t bl, swblk_t count) 233 { 234 swblk_t blk = SWAPBLK_NONE; 235 236 if (bl) { 237 if (bl->bl_radix == BLIST_BMAP_RADIX) 238 blk = blst_leaf_alloc(bl->bl_root, 0, count); 239 else 240 blk = blst_meta_alloc(bl->bl_root, 0, count, bl->bl_radix, bl->bl_skip); 241 if (blk != SWAPBLK_NONE) 242 bl->bl_free -= count; 243 } 244 return(blk); 245 } 246 247 /* 248 * blist_free() - free up space in the block bitmap. Return the base 249 * of a contiguous region. Panic if an inconsistancy is 250 * found. 251 */ 252 253 void 254 blist_free(blist_t bl, swblk_t blkno, swblk_t count) 255 { 256 if (bl) { 257 if (bl->bl_radix == BLIST_BMAP_RADIX) 258 blst_leaf_free(bl->bl_root, blkno, count); 259 else 260 blst_meta_free(bl->bl_root, blkno, count, bl->bl_radix, bl->bl_skip, 0); 261 bl->bl_free += count; 262 } 263 } 264 265 /* 266 * blist_resize() - resize an existing radix tree to handle the 267 * specified number of blocks. This will reallocate 268 * the tree and transfer the previous bitmap to the new 269 * one. When extending the tree you can specify whether 270 * the new blocks are to left allocated or freed. 271 */ 272 273 void 274 blist_resize(blist_t *pbl, swblk_t count, int freenew) 275 { 276 blist_t newbl = blist_create(count); 277 blist_t save = *pbl; 278 279 *pbl = newbl; 280 if (count > save->bl_blocks) 281 count = save->bl_blocks; 282 blst_copy(save->bl_root, 0, save->bl_radix, save->bl_skip, newbl, count); 283 284 /* 285 * If resizing upwards, should we free the new space or not? 286 */ 287 if (freenew && count < newbl->bl_blocks) { 288 blist_free(newbl, count, newbl->bl_blocks - count); 289 } 290 blist_destroy(save); 291 } 292 293 #ifdef BLIST_DEBUG 294 295 /* 296 * blist_print() - dump radix tree 297 */ 298 299 void 300 blist_print(blist_t bl) 301 { 302 kprintf("BLIST {\n"); 303 blst_radix_print(bl->bl_root, 0, bl->bl_radix, bl->bl_skip, 4); 304 kprintf("}\n"); 305 } 306 307 #endif 308 309 /************************************************************************ 310 * ALLOCATION SUPPORT FUNCTIONS * 311 ************************************************************************ 312 * 313 * These support functions do all the actual work. They may seem 314 * rather longish, but that's because I've commented them up. The 315 * actual code is straight forward. 316 * 317 */ 318 319 /* 320 * blist_leaf_alloc() - allocate at a leaf in the radix tree (a bitmap). 321 * 322 * This is the core of the allocator and is optimized for the 1 block 323 * and the BLIST_BMAP_RADIX block allocation cases. Other cases are 324 * somewhat slower. The 1 block allocation case is log2 and extremely 325 * quick. 326 */ 327 328 static swblk_t 329 blst_leaf_alloc(blmeta_t *scan, swblk_t blk, int count) 330 { 331 u_swblk_t orig = scan->u.bmu_bitmap; 332 333 if (orig == 0) { 334 /* 335 * Optimize bitmap all-allocated case. Also, count = 1 336 * case assumes at least 1 bit is free in the bitmap, so 337 * we have to take care of this case here. 338 */ 339 scan->bm_bighint = 0; 340 return(SWAPBLK_NONE); 341 } 342 if (count == 1) { 343 /* 344 * Optimized code to allocate one bit out of the bitmap 345 */ 346 u_swblk_t mask; 347 int j = BLIST_BMAP_RADIX/2; 348 int r = 0; 349 350 mask = (u_swblk_t)-1 >> (BLIST_BMAP_RADIX/2); 351 352 while (j) { 353 if ((orig & mask) == 0) { 354 r += j; 355 orig >>= j; 356 } 357 j >>= 1; 358 mask >>= j; 359 } 360 scan->u.bmu_bitmap &= ~(1 << r); 361 return(blk + r); 362 } 363 if (count <= BLIST_BMAP_RADIX) { 364 /* 365 * non-optimized code to allocate N bits out of the bitmap. 366 * The more bits, the faster the code runs. It will run 367 * the slowest allocating 2 bits, but since there aren't any 368 * memory ops in the core loop (or shouldn't be, anyway), 369 * you probably won't notice the difference. 370 */ 371 int j; 372 int n = BLIST_BMAP_RADIX - count; 373 u_swblk_t mask; 374 375 mask = (u_swblk_t)-1 >> n; 376 377 for (j = 0; j <= n; ++j) { 378 if ((orig & mask) == mask) { 379 scan->u.bmu_bitmap &= ~mask; 380 return(blk + j); 381 } 382 mask = (mask << 1); 383 } 384 } 385 /* 386 * We couldn't allocate count in this subtree, update bighint. 387 */ 388 scan->bm_bighint = count - 1; 389 return(SWAPBLK_NONE); 390 } 391 392 /* 393 * blist_meta_alloc() - allocate at a meta in the radix tree. 394 * 395 * Attempt to allocate at a meta node. If we can't, we update 396 * bighint and return a failure. Updating bighint optimize future 397 * calls that hit this node. We have to check for our collapse cases 398 * and we have a few optimizations strewn in as well. 399 */ 400 401 static swblk_t 402 blst_meta_alloc(blmeta_t *scan, swblk_t blk, swblk_t count, 403 int64_t radix, int skip) 404 { 405 int i; 406 int next_skip = ((u_int)skip / BLIST_META_RADIX); 407 408 if (scan->u.bmu_avail == 0) { 409 /* 410 * ALL-ALLOCATED special case 411 */ 412 scan->bm_bighint = count; 413 return(SWAPBLK_NONE); 414 } 415 416 /* 417 * note: radix may exceed 32 bits until first division. 418 */ 419 if (scan->u.bmu_avail == radix) { 420 radix /= BLIST_META_RADIX; 421 422 /* 423 * ALL-FREE special case, initialize uninitialize 424 * sublevel. 425 */ 426 for (i = 1; i <= skip; i += next_skip) { 427 if (scan[i].bm_bighint == (swblk_t)-1) 428 break; 429 if (next_skip == 1) { 430 scan[i].u.bmu_bitmap = (u_swblk_t)-1; 431 scan[i].bm_bighint = BLIST_BMAP_RADIX; 432 } else { 433 scan[i].bm_bighint = (swblk_t)radix; 434 scan[i].u.bmu_avail = (swblk_t)radix; 435 } 436 } 437 } else { 438 radix /= BLIST_META_RADIX; 439 } 440 441 for (i = 1; i <= skip; i += next_skip) { 442 if (count <= scan[i].bm_bighint) { 443 /* 444 * count fits in object 445 */ 446 swblk_t r; 447 if (next_skip == 1) { 448 r = blst_leaf_alloc(&scan[i], blk, count); 449 } else { 450 r = blst_meta_alloc(&scan[i], blk, count, radix, next_skip - 1); 451 } 452 if (r != SWAPBLK_NONE) { 453 scan->u.bmu_avail -= count; 454 if (scan->bm_bighint > scan->u.bmu_avail) 455 scan->bm_bighint = scan->u.bmu_avail; 456 return(r); 457 } 458 } else if (scan[i].bm_bighint == (swblk_t)-1) { 459 /* 460 * Terminator 461 */ 462 break; 463 } else if (count > (swblk_t)radix) { 464 /* 465 * count does not fit in object even if it were 466 * complete free. 467 */ 468 panic("blist_meta_alloc: allocation too large"); 469 } 470 blk += (swblk_t)radix; 471 } 472 473 /* 474 * We couldn't allocate count in this subtree, update bighint. 475 */ 476 if (scan->bm_bighint >= count) 477 scan->bm_bighint = count - 1; 478 return(SWAPBLK_NONE); 479 } 480 481 /* 482 * BLST_LEAF_FREE() - free allocated block from leaf bitmap 483 * 484 */ 485 486 static void 487 blst_leaf_free(blmeta_t *scan, swblk_t blk, int count) 488 { 489 /* 490 * free some data in this bitmap 491 * 492 * e.g. 493 * 0000111111111110000 494 * \_________/\__/ 495 * v n 496 */ 497 int n = blk & (BLIST_BMAP_RADIX - 1); 498 u_swblk_t mask; 499 500 mask = ((u_swblk_t)-1 << n) & 501 ((u_swblk_t)-1 >> (BLIST_BMAP_RADIX - count - n)); 502 503 if (scan->u.bmu_bitmap & mask) 504 panic("blst_radix_free: freeing free block"); 505 scan->u.bmu_bitmap |= mask; 506 507 /* 508 * We could probably do a better job here. We are required to make 509 * bighint at least as large as the biggest contiguous block of 510 * data. If we just shoehorn it, a little extra overhead will 511 * be incured on the next allocation (but only that one typically). 512 */ 513 scan->bm_bighint = BLIST_BMAP_RADIX; 514 } 515 516 /* 517 * BLST_META_FREE() - free allocated blocks from radix tree meta info 518 * 519 * This support routine frees a range of blocks from the bitmap. 520 * The range must be entirely enclosed by this radix node. If a 521 * meta node, we break the range down recursively to free blocks 522 * in subnodes (which means that this code can free an arbitrary 523 * range whereas the allocation code cannot allocate an arbitrary 524 * range). 525 */ 526 527 static void 528 blst_meta_free(blmeta_t *scan, swblk_t freeBlk, swblk_t count, 529 int64_t radix, int skip, swblk_t blk) 530 { 531 int i; 532 int next_skip = ((u_int)skip / BLIST_META_RADIX); 533 534 #if 0 535 kprintf("FREE (%x,%d) FROM (%x,%lld)\n", 536 freeBlk, count, 537 blk, (long long)radix 538 ); 539 #endif 540 541 /* 542 * NOTE: radix may exceed 32 bits until first division. 543 */ 544 if (scan->u.bmu_avail == 0) { 545 /* 546 * ALL-ALLOCATED special case, with possible 547 * shortcut to ALL-FREE special case. 548 */ 549 scan->u.bmu_avail = count; 550 scan->bm_bighint = count; 551 552 if (count != radix) { 553 for (i = 1; i <= skip; i += next_skip) { 554 if (scan[i].bm_bighint == (swblk_t)-1) 555 break; 556 scan[i].bm_bighint = 0; 557 if (next_skip == 1) { 558 scan[i].u.bmu_bitmap = 0; 559 } else { 560 scan[i].u.bmu_avail = 0; 561 } 562 } 563 /* fall through */ 564 } 565 } else { 566 scan->u.bmu_avail += count; 567 /* scan->bm_bighint = radix; */ 568 } 569 570 /* 571 * ALL-FREE special case. 572 */ 573 574 if (scan->u.bmu_avail == radix) 575 return; 576 if (scan->u.bmu_avail > radix) 577 panic("blst_meta_free: freeing already free blocks (%d) %d/%lld", count, scan->u.bmu_avail, (long long)radix); 578 579 /* 580 * Break the free down into its components 581 */ 582 583 radix /= BLIST_META_RADIX; 584 585 i = (freeBlk - blk) / (swblk_t)radix; 586 blk += i * (swblk_t)radix; 587 i = i * next_skip + 1; 588 589 while (i <= skip && blk < freeBlk + count) { 590 swblk_t v; 591 592 v = blk + (swblk_t)radix - freeBlk; 593 if (v > count) 594 v = count; 595 596 if (scan->bm_bighint == (swblk_t)-1) 597 panic("blst_meta_free: freeing unexpected range"); 598 599 if (next_skip == 1) { 600 blst_leaf_free(&scan[i], freeBlk, v); 601 } else { 602 blst_meta_free(&scan[i], freeBlk, v, radix, next_skip - 1, blk); 603 } 604 if (scan->bm_bighint < scan[i].bm_bighint) 605 scan->bm_bighint = scan[i].bm_bighint; 606 count -= v; 607 freeBlk += v; 608 blk += (swblk_t)radix; 609 i += next_skip; 610 } 611 } 612 613 /* 614 * BLIST_RADIX_COPY() - copy one radix tree to another 615 * 616 * Locates free space in the source tree and frees it in the destination 617 * tree. The space may not already be free in the destination. 618 */ 619 620 static void 621 blst_copy(blmeta_t *scan, swblk_t blk, int64_t radix, 622 swblk_t skip, blist_t dest, swblk_t count) 623 { 624 int next_skip; 625 int i; 626 627 /* 628 * Leaf node 629 */ 630 631 if (radix == BLIST_BMAP_RADIX) { 632 u_swblk_t v = scan->u.bmu_bitmap; 633 634 if (v == (u_swblk_t)-1) { 635 blist_free(dest, blk, count); 636 } else if (v != 0) { 637 int i; 638 639 for (i = 0; i < BLIST_BMAP_RADIX && i < count; ++i) { 640 if (v & (1 << i)) 641 blist_free(dest, blk + i, 1); 642 } 643 } 644 return; 645 } 646 647 /* 648 * Meta node 649 */ 650 651 if (scan->u.bmu_avail == 0) { 652 /* 653 * Source all allocated, leave dest allocated 654 */ 655 return; 656 } 657 if (scan->u.bmu_avail == radix) { 658 /* 659 * Source all free, free entire dest 660 */ 661 if (count < radix) 662 blist_free(dest, blk, count); 663 else 664 blist_free(dest, blk, (swblk_t)radix); 665 return; 666 } 667 668 669 radix /= BLIST_META_RADIX; 670 next_skip = ((u_int)skip / BLIST_META_RADIX); 671 672 for (i = 1; count && i <= skip; i += next_skip) { 673 if (scan[i].bm_bighint == (swblk_t)-1) 674 break; 675 676 if (count >= (swblk_t)radix) { 677 blst_copy( 678 &scan[i], 679 blk, 680 radix, 681 next_skip - 1, 682 dest, 683 (swblk_t)radix 684 ); 685 count -= (swblk_t)radix; 686 } else { 687 if (count) { 688 blst_copy( 689 &scan[i], 690 blk, 691 radix, 692 next_skip - 1, 693 dest, 694 count 695 ); 696 } 697 count = 0; 698 } 699 blk += (swblk_t)radix; 700 } 701 } 702 703 /* 704 * BLST_RADIX_INIT() - initialize radix tree 705 * 706 * Initialize our meta structures and bitmaps and calculate the exact 707 * amount of space required to manage 'count' blocks - this space may 708 * be considerably less then the calculated radix due to the large 709 * RADIX values we use. 710 */ 711 712 static swblk_t 713 blst_radix_init(blmeta_t *scan, int64_t radix, int skip, swblk_t count) 714 { 715 int i; 716 int next_skip; 717 swblk_t memindex = 0; 718 719 /* 720 * Leaf node 721 */ 722 723 if (radix == BLIST_BMAP_RADIX) { 724 if (scan) { 725 scan->bm_bighint = 0; 726 scan->u.bmu_bitmap = 0; 727 } 728 return(memindex); 729 } 730 731 /* 732 * Meta node. If allocating the entire object we can special 733 * case it. However, we need to figure out how much memory 734 * is required to manage 'count' blocks, so we continue on anyway. 735 */ 736 737 if (scan) { 738 scan->bm_bighint = 0; 739 scan->u.bmu_avail = 0; 740 } 741 742 radix /= BLIST_META_RADIX; 743 next_skip = ((u_int)skip / BLIST_META_RADIX); 744 745 for (i = 1; i <= skip; i += next_skip) { 746 if (count >= (swblk_t)radix) { 747 /* 748 * Allocate the entire object 749 */ 750 memindex = i + blst_radix_init( 751 ((scan) ? &scan[i] : NULL), 752 radix, 753 next_skip - 1, 754 (swblk_t)radix 755 ); 756 count -= (swblk_t)radix; 757 } else if (count > 0) { 758 /* 759 * Allocate a partial object 760 */ 761 memindex = i + blst_radix_init( 762 ((scan) ? &scan[i] : NULL), 763 radix, 764 next_skip - 1, 765 count 766 ); 767 count = 0; 768 } else { 769 /* 770 * Add terminator and break out 771 */ 772 if (scan) 773 scan[i].bm_bighint = (swblk_t)-1; 774 break; 775 } 776 } 777 if (memindex < i) 778 memindex = i; 779 return(memindex); 780 } 781 782 #ifdef BLIST_DEBUG 783 784 static void 785 blst_radix_print(blmeta_t *scan, swblk_t blk, int64_t radix, int skip, int tab) 786 { 787 int i; 788 int next_skip; 789 int lastState = 0; 790 791 if (radix == BLIST_BMAP_RADIX) { 792 kprintf( 793 "%*.*s(%04x,%lld): bitmap %08x big=%d\n", 794 tab, tab, "", 795 blk, (long long)radix, 796 scan->u.bmu_bitmap, 797 scan->bm_bighint 798 ); 799 return; 800 } 801 802 if (scan->u.bmu_avail == 0) { 803 kprintf( 804 "%*.*s(%04x,%lld) ALL ALLOCATED\n", 805 tab, tab, "", 806 blk, 807 (long long)radix 808 ); 809 return; 810 } 811 if (scan->u.bmu_avail == radix) { 812 kprintf( 813 "%*.*s(%04x,%lld) ALL FREE\n", 814 tab, tab, "", 815 blk, 816 (long long)radix 817 ); 818 return; 819 } 820 821 kprintf( 822 "%*.*s(%04x,%lld): subtree (%d/%lld) big=%d {\n", 823 tab, tab, "", 824 blk, (long long)radix, 825 scan->u.bmu_avail, 826 (long long)radix, 827 scan->bm_bighint 828 ); 829 830 radix /= BLIST_META_RADIX; 831 next_skip = ((u_int)skip / BLIST_META_RADIX); 832 tab += 4; 833 834 for (i = 1; i <= skip; i += next_skip) { 835 if (scan[i].bm_bighint == (swblk_t)-1) { 836 kprintf( 837 "%*.*s(%04x,%lld): Terminator\n", 838 tab, tab, "", 839 blk, (long long)radix 840 ); 841 lastState = 0; 842 break; 843 } 844 blst_radix_print( 845 &scan[i], 846 blk, 847 radix, 848 next_skip - 1, 849 tab 850 ); 851 blk += (swblk_t)radix; 852 } 853 tab -= 4; 854 855 kprintf( 856 "%*.*s}\n", 857 tab, tab, "" 858 ); 859 } 860 861 #endif 862 863 #ifdef BLIST_DEBUG 864 865 int 866 main(int ac, char **av) 867 { 868 int size = 1024; 869 int i; 870 blist_t bl; 871 872 for (i = 1; i < ac; ++i) { 873 const char *ptr = av[i]; 874 if (*ptr != '-') { 875 size = strtol(ptr, NULL, 0); 876 continue; 877 } 878 ptr += 2; 879 fprintf(stderr, "Bad option: %s\n", ptr - 2); 880 exit(1); 881 } 882 bl = blist_create(size); 883 blist_free(bl, 0, size); 884 885 for (;;) { 886 char buf[1024]; 887 swblk_t da = 0; 888 swblk_t count = 0; 889 890 891 kprintf("%d/%d/%lld> ", 892 bl->bl_free, size, (long long)bl->bl_radix); 893 fflush(stdout); 894 if (fgets(buf, sizeof(buf), stdin) == NULL) 895 break; 896 switch(buf[0]) { 897 case 'r': 898 if (sscanf(buf + 1, "%d", &count) == 1) { 899 blist_resize(&bl, count, 1); 900 } else { 901 kprintf("?\n"); 902 } 903 case 'p': 904 blist_print(bl); 905 break; 906 case 'a': 907 if (sscanf(buf + 1, "%d", &count) == 1) { 908 swblk_t blk = blist_alloc(bl, count); 909 kprintf(" R=%04x\n", blk); 910 } else { 911 kprintf("?\n"); 912 } 913 break; 914 case 'f': 915 if (sscanf(buf + 1, "%x %d", &da, &count) == 2) { 916 blist_free(bl, da, count); 917 } else { 918 kprintf("?\n"); 919 } 920 break; 921 case '?': 922 case 'h': 923 puts( 924 "p -print\n" 925 "a %d -allocate\n" 926 "f %x %d -free\n" 927 "r %d -resize\n" 928 "h/? -help" 929 ); 930 break; 931 default: 932 kprintf("?\n"); 933 break; 934 } 935 } 936 return(0); 937 } 938 939 void 940 panic(const char *ctl, ...) 941 { 942 __va_list va; 943 944 __va_start(va, ctl); 945 vfprintf(stderr, ctl, va); 946 fprintf(stderr, "\n"); 947 __va_end(va); 948 exit(1); 949 } 950 951 #endif 952 953