1 /* 2 * BLIST.C - Bitmap allocator/deallocator, using a radix tree with hinting 3 * 4 * Copyright (c) 1998,2004 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * 37 * This module implements a general bitmap allocator/deallocator. The 38 * allocator eats around 2 bits per 'block'. The module does not 39 * try to interpret the meaning of a 'block' other then to return 40 * SWAPBLK_NONE on an allocation failure. 41 * 42 * A radix tree is used to maintain the bitmap. Two radix constants are 43 * involved: One for the bitmaps contained in the leaf nodes (typically 44 * 32), and one for the meta nodes (typically 16). Both meta and leaf 45 * nodes have a hint field. This field gives us a hint as to the largest 46 * free contiguous range of blocks under the node. It may contain a 47 * value that is too high, but will never contain a value that is too 48 * low. When the radix tree is searched, allocation failures in subtrees 49 * update the hint. 50 * 51 * The radix tree also implements two collapsed states for meta nodes: 52 * the ALL-ALLOCATED state and the ALL-FREE state. If a meta node is 53 * in either of these two states, all information contained underneath 54 * the node is considered stale. These states are used to optimize 55 * allocation and freeing operations. 56 * 57 * The hinting greatly increases code efficiency for allocations while 58 * the general radix structure optimizes both allocations and frees. The 59 * radix tree should be able to operate well no matter how much 60 * fragmentation there is and no matter how large a bitmap is used. 61 * 62 * Unlike the rlist code, the blist code wires all necessary memory at 63 * creation time. Neither allocations nor frees require interaction with 64 * the memory subsystem. In contrast, the rlist code may allocate memory 65 * on an rlist_free() call. The non-blocking features of the blist code 66 * are used to great advantage in the swap code (vm/nswap_pager.c). The 67 * rlist code uses a little less overall memory then the blist code (but 68 * due to swap interleaving not all that much less), but the blist code 69 * scales much, much better. 70 * 71 * LAYOUT: The radix tree is layed out recursively using a 72 * linear array. Each meta node is immediately followed (layed out 73 * sequentially in memory) by BLIST_META_RADIX lower level nodes. This 74 * is a recursive structure but one that can be easily scanned through 75 * a very simple 'skip' calculation. In order to support large radixes, 76 * portions of the tree may reside outside our memory allocation. We 77 * handle this with an early-termination optimization (when bighint is 78 * set to -1) on the scan. The memory allocation is only large enough 79 * to cover the number of blocks requested at creation time even if it 80 * must be encompassed in larger root-node radix. 81 * 82 * NOTE: The allocator cannot currently allocate more then 83 * BLIST_BMAP_RADIX blocks per call. It will panic with 'allocation too 84 * large' if you try. This is an area that could use improvement. The 85 * radix is large enough that this restriction does not effect the swap 86 * system, though. Currently only the allocation code is effected by 87 * this algorithmic unfeature. The freeing code can handle arbitrary 88 * ranges. 89 * 90 * NOTE: The radix may exceed 32 bits in order to support up to 2^31 91 * blocks. The first divison will drop the radix down and fit 92 * it within a signed 32 bit integer. 93 * 94 * This code can be compiled stand-alone for debugging. 95 * 96 * $FreeBSD: src/sys/kern/subr_blist.c,v 1.5.2.2 2003/01/12 09:23:12 dillon Exp $ 97 * $DragonFly: src/sys/kern/subr_blist.c,v 1.8 2008/08/10 22:09:50 dillon Exp $ 98 */ 99 100 #ifdef _KERNEL 101 102 #include <sys/param.h> 103 #include <sys/systm.h> 104 #include <sys/lock.h> 105 #include <sys/kernel.h> 106 #include <sys/blist.h> 107 #include <sys/malloc.h> 108 109 #else 110 111 #ifndef BLIST_NO_DEBUG 112 #define BLIST_DEBUG 113 #endif 114 115 #define SWAPBLK_NONE ((swblk_t)-1) 116 117 #include <sys/types.h> 118 #include <stdio.h> 119 #include <string.h> 120 #include <stdlib.h> 121 #include <stdarg.h> 122 123 #define kmalloc(a,b,c) malloc(a) 124 #define kfree(a,b) free(a) 125 #define kprintf printf 126 #define KKASSERT(exp) 127 128 #include <sys/blist.h> 129 130 void panic(const char *ctl, ...); 131 132 #endif 133 134 /* 135 * static support functions 136 */ 137 138 static swblk_t blst_leaf_alloc(blmeta_t *scan, swblk_t blk, int count); 139 static swblk_t blst_meta_alloc(blmeta_t *scan, swblk_t blk, 140 swblk_t count, int64_t radix, int skip); 141 static void blst_leaf_free(blmeta_t *scan, swblk_t relblk, int count); 142 static void blst_meta_free(blmeta_t *scan, swblk_t freeBlk, swblk_t count, 143 int64_t radix, int skip, swblk_t blk); 144 static void blst_copy(blmeta_t *scan, swblk_t blk, int64_t radix, 145 swblk_t skip, blist_t dest, swblk_t count); 146 static swblk_t blst_radix_init(blmeta_t *scan, int64_t radix, 147 int skip, swblk_t count); 148 #ifndef _KERNEL 149 static void blst_radix_print(blmeta_t *scan, swblk_t blk, 150 int64_t radix, int skip, int tab); 151 #endif 152 153 #ifdef _KERNEL 154 static MALLOC_DEFINE(M_SWAP, "SWAP", "Swap space"); 155 #endif 156 157 /* 158 * blist_create() - create a blist capable of handling up to the specified 159 * number of blocks 160 * 161 * blocks must be greater then 0 162 * 163 * The smallest blist consists of a single leaf node capable of 164 * managing BLIST_BMAP_RADIX blocks. 165 */ 166 167 blist_t 168 blist_create(swblk_t blocks) 169 { 170 blist_t bl; 171 int64_t radix; 172 int skip = 0; 173 174 /* 175 * Calculate radix and skip field used for scanning. 176 * 177 * Radix can exceed 32 bits even if swblk_t is limited to 32 bits. 178 */ 179 radix = BLIST_BMAP_RADIX; 180 181 while (radix < blocks) { 182 radix *= BLIST_META_RADIX; 183 skip = (skip + 1) * BLIST_META_RADIX; 184 KKASSERT(skip > 0); 185 } 186 187 bl = kmalloc(sizeof(struct blist), M_SWAP, M_WAITOK); 188 189 bzero(bl, sizeof(*bl)); 190 191 bl->bl_blocks = blocks; 192 bl->bl_radix = radix; 193 bl->bl_skip = skip; 194 bl->bl_rootblks = 1 + 195 blst_radix_init(NULL, bl->bl_radix, bl->bl_skip, blocks); 196 bl->bl_root = kmalloc(sizeof(blmeta_t) * bl->bl_rootblks, M_SWAP, M_WAITOK); 197 198 #if defined(BLIST_DEBUG) 199 kprintf( 200 "BLIST representing %d blocks (%d MB of swap)" 201 ", requiring %dK of ram\n", 202 bl->bl_blocks, 203 bl->bl_blocks * 4 / 1024, 204 (bl->bl_rootblks * sizeof(blmeta_t) + 1023) / 1024 205 ); 206 kprintf("BLIST raw radix tree contains %d records\n", bl->bl_rootblks); 207 #endif 208 blst_radix_init(bl->bl_root, bl->bl_radix, bl->bl_skip, blocks); 209 210 return(bl); 211 } 212 213 void 214 blist_destroy(blist_t bl) 215 { 216 kfree(bl->bl_root, M_SWAP); 217 kfree(bl, M_SWAP); 218 } 219 220 /* 221 * blist_alloc() - reserve space in the block bitmap. Return the base 222 * of a contiguous region or SWAPBLK_NONE if space could 223 * not be allocated. 224 */ 225 226 swblk_t 227 blist_alloc(blist_t bl, swblk_t count) 228 { 229 swblk_t blk = SWAPBLK_NONE; 230 231 if (bl) { 232 if (bl->bl_radix == BLIST_BMAP_RADIX) 233 blk = blst_leaf_alloc(bl->bl_root, 0, count); 234 else 235 blk = blst_meta_alloc(bl->bl_root, 0, count, bl->bl_radix, bl->bl_skip); 236 if (blk != SWAPBLK_NONE) 237 bl->bl_free -= count; 238 } 239 return(blk); 240 } 241 242 /* 243 * blist_free() - free up space in the block bitmap. Return the base 244 * of a contiguous region. Panic if an inconsistancy is 245 * found. 246 */ 247 248 void 249 blist_free(blist_t bl, swblk_t blkno, swblk_t count) 250 { 251 if (bl) { 252 if (bl->bl_radix == BLIST_BMAP_RADIX) 253 blst_leaf_free(bl->bl_root, blkno, count); 254 else 255 blst_meta_free(bl->bl_root, blkno, count, bl->bl_radix, bl->bl_skip, 0); 256 bl->bl_free += count; 257 } 258 } 259 260 /* 261 * blist_resize() - resize an existing radix tree to handle the 262 * specified number of blocks. This will reallocate 263 * the tree and transfer the previous bitmap to the new 264 * one. When extending the tree you can specify whether 265 * the new blocks are to left allocated or freed. 266 */ 267 268 void 269 blist_resize(blist_t *pbl, swblk_t count, int freenew) 270 { 271 blist_t newbl = blist_create(count); 272 blist_t save = *pbl; 273 274 *pbl = newbl; 275 if (count > save->bl_blocks) 276 count = save->bl_blocks; 277 blst_copy(save->bl_root, 0, save->bl_radix, save->bl_skip, newbl, count); 278 279 /* 280 * If resizing upwards, should we free the new space or not? 281 */ 282 if (freenew && count < newbl->bl_blocks) { 283 blist_free(newbl, count, newbl->bl_blocks - count); 284 } 285 blist_destroy(save); 286 } 287 288 #ifdef BLIST_DEBUG 289 290 /* 291 * blist_print() - dump radix tree 292 */ 293 294 void 295 blist_print(blist_t bl) 296 { 297 kprintf("BLIST {\n"); 298 blst_radix_print(bl->bl_root, 0, bl->bl_radix, bl->bl_skip, 4); 299 kprintf("}\n"); 300 } 301 302 #endif 303 304 /************************************************************************ 305 * ALLOCATION SUPPORT FUNCTIONS * 306 ************************************************************************ 307 * 308 * These support functions do all the actual work. They may seem 309 * rather longish, but that's because I've commented them up. The 310 * actual code is straight forward. 311 * 312 */ 313 314 /* 315 * blist_leaf_alloc() - allocate at a leaf in the radix tree (a bitmap). 316 * 317 * This is the core of the allocator and is optimized for the 1 block 318 * and the BLIST_BMAP_RADIX block allocation cases. Other cases are 319 * somewhat slower. The 1 block allocation case is log2 and extremely 320 * quick. 321 */ 322 323 static swblk_t 324 blst_leaf_alloc(blmeta_t *scan, swblk_t blk, int count) 325 { 326 u_swblk_t orig = scan->u.bmu_bitmap; 327 328 if (orig == 0) { 329 /* 330 * Optimize bitmap all-allocated case. Also, count = 1 331 * case assumes at least 1 bit is free in the bitmap, so 332 * we have to take care of this case here. 333 */ 334 scan->bm_bighint = 0; 335 return(SWAPBLK_NONE); 336 } 337 if (count == 1) { 338 /* 339 * Optimized code to allocate one bit out of the bitmap 340 */ 341 u_swblk_t mask; 342 int j = BLIST_BMAP_RADIX/2; 343 int r = 0; 344 345 mask = (u_swblk_t)-1 >> (BLIST_BMAP_RADIX/2); 346 347 while (j) { 348 if ((orig & mask) == 0) { 349 r += j; 350 orig >>= j; 351 } 352 j >>= 1; 353 mask >>= j; 354 } 355 scan->u.bmu_bitmap &= ~(1 << r); 356 return(blk + r); 357 } 358 if (count <= BLIST_BMAP_RADIX) { 359 /* 360 * non-optimized code to allocate N bits out of the bitmap. 361 * The more bits, the faster the code runs. It will run 362 * the slowest allocating 2 bits, but since there aren't any 363 * memory ops in the core loop (or shouldn't be, anyway), 364 * you probably won't notice the difference. 365 */ 366 int j; 367 int n = BLIST_BMAP_RADIX - count; 368 u_swblk_t mask; 369 370 mask = (u_swblk_t)-1 >> n; 371 372 for (j = 0; j <= n; ++j) { 373 if ((orig & mask) == mask) { 374 scan->u.bmu_bitmap &= ~mask; 375 return(blk + j); 376 } 377 mask = (mask << 1); 378 } 379 } 380 /* 381 * We couldn't allocate count in this subtree, update bighint. 382 */ 383 scan->bm_bighint = count - 1; 384 return(SWAPBLK_NONE); 385 } 386 387 /* 388 * blist_meta_alloc() - allocate at a meta in the radix tree. 389 * 390 * Attempt to allocate at a meta node. If we can't, we update 391 * bighint and return a failure. Updating bighint optimize future 392 * calls that hit this node. We have to check for our collapse cases 393 * and we have a few optimizations strewn in as well. 394 */ 395 396 static swblk_t 397 blst_meta_alloc(blmeta_t *scan, swblk_t blk, swblk_t count, 398 int64_t radix, int skip) 399 { 400 int i; 401 int next_skip = ((u_int)skip / BLIST_META_RADIX); 402 403 if (scan->u.bmu_avail == 0) { 404 /* 405 * ALL-ALLOCATED special case 406 */ 407 scan->bm_bighint = count; 408 return(SWAPBLK_NONE); 409 } 410 411 /* 412 * note: radix may exceed 32 bits until first division. 413 */ 414 if (scan->u.bmu_avail == radix) { 415 radix /= BLIST_META_RADIX; 416 417 /* 418 * ALL-FREE special case, initialize uninitialize 419 * sublevel. 420 */ 421 for (i = 1; i <= skip; i += next_skip) { 422 if (scan[i].bm_bighint == (swblk_t)-1) 423 break; 424 if (next_skip == 1) { 425 scan[i].u.bmu_bitmap = (u_swblk_t)-1; 426 scan[i].bm_bighint = BLIST_BMAP_RADIX; 427 } else { 428 scan[i].bm_bighint = (swblk_t)radix; 429 scan[i].u.bmu_avail = (swblk_t)radix; 430 } 431 } 432 } else { 433 radix /= BLIST_META_RADIX; 434 } 435 436 for (i = 1; i <= skip; i += next_skip) { 437 if (count <= scan[i].bm_bighint) { 438 /* 439 * count fits in object 440 */ 441 swblk_t r; 442 if (next_skip == 1) { 443 r = blst_leaf_alloc(&scan[i], blk, count); 444 } else { 445 r = blst_meta_alloc(&scan[i], blk, count, radix, next_skip - 1); 446 } 447 if (r != SWAPBLK_NONE) { 448 scan->u.bmu_avail -= count; 449 if (scan->bm_bighint > scan->u.bmu_avail) 450 scan->bm_bighint = scan->u.bmu_avail; 451 return(r); 452 } 453 } else if (scan[i].bm_bighint == (swblk_t)-1) { 454 /* 455 * Terminator 456 */ 457 break; 458 } else if (count > (swblk_t)radix) { 459 /* 460 * count does not fit in object even if it were 461 * complete free. 462 */ 463 panic("blist_meta_alloc: allocation too large"); 464 } 465 blk += (swblk_t)radix; 466 } 467 468 /* 469 * We couldn't allocate count in this subtree, update bighint. 470 */ 471 if (scan->bm_bighint >= count) 472 scan->bm_bighint = count - 1; 473 return(SWAPBLK_NONE); 474 } 475 476 /* 477 * BLST_LEAF_FREE() - free allocated block from leaf bitmap 478 * 479 */ 480 481 static void 482 blst_leaf_free(blmeta_t *scan, swblk_t blk, int count) 483 { 484 /* 485 * free some data in this bitmap 486 * 487 * e.g. 488 * 0000111111111110000 489 * \_________/\__/ 490 * v n 491 */ 492 int n = blk & (BLIST_BMAP_RADIX - 1); 493 u_swblk_t mask; 494 495 mask = ((u_swblk_t)-1 << n) & 496 ((u_swblk_t)-1 >> (BLIST_BMAP_RADIX - count - n)); 497 498 if (scan->u.bmu_bitmap & mask) 499 panic("blst_radix_free: freeing free block"); 500 scan->u.bmu_bitmap |= mask; 501 502 /* 503 * We could probably do a better job here. We are required to make 504 * bighint at least as large as the biggest contiguous block of 505 * data. If we just shoehorn it, a little extra overhead will 506 * be incured on the next allocation (but only that one typically). 507 */ 508 scan->bm_bighint = BLIST_BMAP_RADIX; 509 } 510 511 /* 512 * BLST_META_FREE() - free allocated blocks from radix tree meta info 513 * 514 * This support routine frees a range of blocks from the bitmap. 515 * The range must be entirely enclosed by this radix node. If a 516 * meta node, we break the range down recursively to free blocks 517 * in subnodes (which means that this code can free an arbitrary 518 * range whereas the allocation code cannot allocate an arbitrary 519 * range). 520 */ 521 522 static void 523 blst_meta_free(blmeta_t *scan, swblk_t freeBlk, swblk_t count, 524 int64_t radix, int skip, swblk_t blk) 525 { 526 int i; 527 int next_skip = ((u_int)skip / BLIST_META_RADIX); 528 529 #if 0 530 kprintf("FREE (%x,%d) FROM (%x,%lld)\n", 531 freeBlk, count, 532 blk, (long long)radix 533 ); 534 #endif 535 536 /* 537 * NOTE: radix may exceed 32 bits until first division. 538 */ 539 if (scan->u.bmu_avail == 0) { 540 /* 541 * ALL-ALLOCATED special case, with possible 542 * shortcut to ALL-FREE special case. 543 */ 544 scan->u.bmu_avail = count; 545 scan->bm_bighint = count; 546 547 if (count != radix) { 548 for (i = 1; i <= skip; i += next_skip) { 549 if (scan[i].bm_bighint == (swblk_t)-1) 550 break; 551 scan[i].bm_bighint = 0; 552 if (next_skip == 1) { 553 scan[i].u.bmu_bitmap = 0; 554 } else { 555 scan[i].u.bmu_avail = 0; 556 } 557 } 558 /* fall through */ 559 } 560 } else { 561 scan->u.bmu_avail += count; 562 /* scan->bm_bighint = radix; */ 563 } 564 565 /* 566 * ALL-FREE special case. 567 */ 568 569 if (scan->u.bmu_avail == radix) 570 return; 571 if (scan->u.bmu_avail > radix) 572 panic("blst_meta_free: freeing already free blocks (%d) %d/%lld", count, scan->u.bmu_avail, (long long)radix); 573 574 /* 575 * Break the free down into its components 576 */ 577 578 radix /= BLIST_META_RADIX; 579 580 i = (freeBlk - blk) / (swblk_t)radix; 581 blk += i * (swblk_t)radix; 582 i = i * next_skip + 1; 583 584 while (i <= skip && blk < freeBlk + count) { 585 swblk_t v; 586 587 v = blk + (swblk_t)radix - freeBlk; 588 if (v > count) 589 v = count; 590 591 if (scan->bm_bighint == (swblk_t)-1) 592 panic("blst_meta_free: freeing unexpected range"); 593 594 if (next_skip == 1) { 595 blst_leaf_free(&scan[i], freeBlk, v); 596 } else { 597 blst_meta_free(&scan[i], freeBlk, v, radix, next_skip - 1, blk); 598 } 599 if (scan->bm_bighint < scan[i].bm_bighint) 600 scan->bm_bighint = scan[i].bm_bighint; 601 count -= v; 602 freeBlk += v; 603 blk += (swblk_t)radix; 604 i += next_skip; 605 } 606 } 607 608 /* 609 * BLIST_RADIX_COPY() - copy one radix tree to another 610 * 611 * Locates free space in the source tree and frees it in the destination 612 * tree. The space may not already be free in the destination. 613 */ 614 615 static void 616 blst_copy(blmeta_t *scan, swblk_t blk, int64_t radix, 617 swblk_t skip, blist_t dest, swblk_t count) 618 { 619 int next_skip; 620 int i; 621 622 /* 623 * Leaf node 624 */ 625 626 if (radix == BLIST_BMAP_RADIX) { 627 u_swblk_t v = scan->u.bmu_bitmap; 628 629 if (v == (u_swblk_t)-1) { 630 blist_free(dest, blk, count); 631 } else if (v != 0) { 632 int i; 633 634 for (i = 0; i < BLIST_BMAP_RADIX && i < count; ++i) { 635 if (v & (1 << i)) 636 blist_free(dest, blk + i, 1); 637 } 638 } 639 return; 640 } 641 642 /* 643 * Meta node 644 */ 645 646 if (scan->u.bmu_avail == 0) { 647 /* 648 * Source all allocated, leave dest allocated 649 */ 650 return; 651 } 652 if (scan->u.bmu_avail == radix) { 653 /* 654 * Source all free, free entire dest 655 */ 656 if (count < radix) 657 blist_free(dest, blk, count); 658 else 659 blist_free(dest, blk, (swblk_t)radix); 660 return; 661 } 662 663 664 radix /= BLIST_META_RADIX; 665 next_skip = ((u_int)skip / BLIST_META_RADIX); 666 667 for (i = 1; count && i <= skip; i += next_skip) { 668 if (scan[i].bm_bighint == (swblk_t)-1) 669 break; 670 671 if (count >= (swblk_t)radix) { 672 blst_copy( 673 &scan[i], 674 blk, 675 radix, 676 next_skip - 1, 677 dest, 678 (swblk_t)radix 679 ); 680 count -= (swblk_t)radix; 681 } else { 682 if (count) { 683 blst_copy( 684 &scan[i], 685 blk, 686 radix, 687 next_skip - 1, 688 dest, 689 count 690 ); 691 } 692 count = 0; 693 } 694 blk += (swblk_t)radix; 695 } 696 } 697 698 /* 699 * BLST_RADIX_INIT() - initialize radix tree 700 * 701 * Initialize our meta structures and bitmaps and calculate the exact 702 * amount of space required to manage 'count' blocks - this space may 703 * be considerably less then the calculated radix due to the large 704 * RADIX values we use. 705 */ 706 707 static swblk_t 708 blst_radix_init(blmeta_t *scan, int64_t radix, int skip, swblk_t count) 709 { 710 int i; 711 int next_skip; 712 swblk_t memindex = 0; 713 714 /* 715 * Leaf node 716 */ 717 718 if (radix == BLIST_BMAP_RADIX) { 719 if (scan) { 720 scan->bm_bighint = 0; 721 scan->u.bmu_bitmap = 0; 722 } 723 return(memindex); 724 } 725 726 /* 727 * Meta node. If allocating the entire object we can special 728 * case it. However, we need to figure out how much memory 729 * is required to manage 'count' blocks, so we continue on anyway. 730 */ 731 732 if (scan) { 733 scan->bm_bighint = 0; 734 scan->u.bmu_avail = 0; 735 } 736 737 radix /= BLIST_META_RADIX; 738 next_skip = ((u_int)skip / BLIST_META_RADIX); 739 740 for (i = 1; i <= skip; i += next_skip) { 741 if (count >= (swblk_t)radix) { 742 /* 743 * Allocate the entire object 744 */ 745 memindex = i + blst_radix_init( 746 ((scan) ? &scan[i] : NULL), 747 radix, 748 next_skip - 1, 749 (swblk_t)radix 750 ); 751 count -= (swblk_t)radix; 752 } else if (count > 0) { 753 /* 754 * Allocate a partial object 755 */ 756 memindex = i + blst_radix_init( 757 ((scan) ? &scan[i] : NULL), 758 radix, 759 next_skip - 1, 760 count 761 ); 762 count = 0; 763 } else { 764 /* 765 * Add terminator and break out 766 */ 767 if (scan) 768 scan[i].bm_bighint = (swblk_t)-1; 769 break; 770 } 771 } 772 if (memindex < i) 773 memindex = i; 774 return(memindex); 775 } 776 777 #ifdef BLIST_DEBUG 778 779 static void 780 blst_radix_print(blmeta_t *scan, swblk_t blk, int64_t radix, int skip, int tab) 781 { 782 int i; 783 int next_skip; 784 int lastState = 0; 785 786 if (radix == BLIST_BMAP_RADIX) { 787 kprintf( 788 "%*.*s(%04x,%lld): bitmap %08x big=%d\n", 789 tab, tab, "", 790 blk, (long long)radix, 791 scan->u.bmu_bitmap, 792 scan->bm_bighint 793 ); 794 return; 795 } 796 797 if (scan->u.bmu_avail == 0) { 798 kprintf( 799 "%*.*s(%04x,%lld) ALL ALLOCATED\n", 800 tab, tab, "", 801 blk, 802 (long long)radix 803 ); 804 return; 805 } 806 if (scan->u.bmu_avail == radix) { 807 kprintf( 808 "%*.*s(%04x,%lld) ALL FREE\n", 809 tab, tab, "", 810 blk, 811 (long long)radix 812 ); 813 return; 814 } 815 816 kprintf( 817 "%*.*s(%04x,%lld): subtree (%d/%lld) big=%d {\n", 818 tab, tab, "", 819 blk, (long long)radix, 820 scan->u.bmu_avail, 821 (long long)radix, 822 scan->bm_bighint 823 ); 824 825 radix /= BLIST_META_RADIX; 826 next_skip = ((u_int)skip / BLIST_META_RADIX); 827 tab += 4; 828 829 for (i = 1; i <= skip; i += next_skip) { 830 if (scan[i].bm_bighint == (swblk_t)-1) { 831 kprintf( 832 "%*.*s(%04x,%lld): Terminator\n", 833 tab, tab, "", 834 blk, (long long)radix 835 ); 836 lastState = 0; 837 break; 838 } 839 blst_radix_print( 840 &scan[i], 841 blk, 842 radix, 843 next_skip - 1, 844 tab 845 ); 846 blk += (swblk_t)radix; 847 } 848 tab -= 4; 849 850 kprintf( 851 "%*.*s}\n", 852 tab, tab, "" 853 ); 854 } 855 856 #endif 857 858 #ifdef BLIST_DEBUG 859 860 int 861 main(int ac, char **av) 862 { 863 int size = 1024; 864 int i; 865 blist_t bl; 866 867 for (i = 1; i < ac; ++i) { 868 const char *ptr = av[i]; 869 if (*ptr != '-') { 870 size = strtol(ptr, NULL, 0); 871 continue; 872 } 873 ptr += 2; 874 fprintf(stderr, "Bad option: %s\n", ptr - 2); 875 exit(1); 876 } 877 bl = blist_create(size); 878 blist_free(bl, 0, size); 879 880 for (;;) { 881 char buf[1024]; 882 swblk_t da = 0; 883 swblk_t count = 0; 884 885 886 kprintf("%d/%d/%lld> ", 887 bl->bl_free, size, (long long)bl->bl_radix); 888 fflush(stdout); 889 if (fgets(buf, sizeof(buf), stdin) == NULL) 890 break; 891 switch(buf[0]) { 892 case 'r': 893 if (sscanf(buf + 1, "%d", &count) == 1) { 894 blist_resize(&bl, count, 1); 895 size = count; 896 } else { 897 kprintf("?\n"); 898 } 899 case 'p': 900 blist_print(bl); 901 break; 902 case 'a': 903 if (sscanf(buf + 1, "%d", &count) == 1) { 904 swblk_t blk = blist_alloc(bl, count); 905 kprintf(" R=%04x\n", blk); 906 } else { 907 kprintf("?\n"); 908 } 909 break; 910 case 'f': 911 if (sscanf(buf + 1, "%x %d", &da, &count) == 2) { 912 blist_free(bl, da, count); 913 } else { 914 kprintf("?\n"); 915 } 916 break; 917 case '?': 918 case 'h': 919 puts( 920 "p -print\n" 921 "a %d -allocate\n" 922 "f %x %d -free\n" 923 "r %d -resize\n" 924 "h/? -help" 925 ); 926 break; 927 default: 928 kprintf("?\n"); 929 break; 930 } 931 } 932 return(0); 933 } 934 935 void 936 panic(const char *ctl, ...) 937 { 938 __va_list va; 939 940 __va_start(va, ctl); 941 vfprintf(stderr, ctl, va); 942 fprintf(stderr, "\n"); 943 __va_end(va); 944 exit(1); 945 } 946 947 #endif 948 949