1 /* $OpenBSD: subr_extent.c,v 1.18 2001/08/06 11:19:26 art Exp $ */ 2 /* $NetBSD: subr_extent.c,v 1.7 1996/11/21 18:46:34 cgd Exp $ */ 3 4 /*- 5 * Copyright (c) 1996, 1998 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Jason R. Thorpe and Matthias Drochner. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * General purpose extent manager. 42 */ 43 44 #ifdef _KERNEL 45 #include <sys/param.h> 46 #include <sys/extent.h> 47 #include <sys/malloc.h> 48 #include <sys/time.h> 49 #include <sys/systm.h> 50 #include <sys/proc.h> 51 #include <sys/queue.h> 52 #include <sys/pool.h> 53 #include <ddb/db_output.h> 54 #else 55 /* 56 * user-land definitions, so it can fit into a testing harness. 57 */ 58 #include <sys/param.h> 59 #include <sys/extent.h> 60 #include <sys/queue.h> 61 #include <errno.h> 62 #include <stdlib.h> 63 #include <stdio.h> 64 65 #define malloc(s, t, flags) malloc(s) 66 #define free(p, t) free(p) 67 #define tsleep(chan, pri, str, timo) (EWOULDBLOCK) 68 #define wakeup(chan) ((void)0) 69 #define db_printf printf 70 #endif 71 72 static void extent_insert_and_optimize __P((struct extent *, u_long, u_long, 73 int, struct extent_region *, struct extent_region *)); 74 static struct extent_region *extent_alloc_region_descriptor 75 __P((struct extent *, int)); 76 static void extent_free_region_descriptor __P((struct extent *, 77 struct extent_region *)); 78 static void extent_register __P((struct extent *)); 79 80 /* 81 * Macro to align to an arbitrary power-of-two boundary. 82 */ 83 #define EXTENT_ALIGN(_start, _align, _skew) \ 84 (((((_start) - (_skew)) + ((_align) - 1)) & (-(_align))) + (_skew)) 85 86 87 /* 88 * Register the extent on a doubly linked list. 89 * Should work, no? 90 */ 91 static LIST_HEAD(listhead, extent) ext_list; 92 static struct listhead *ext_listp; 93 94 static void 95 extent_register(ex) 96 struct extent *ex; 97 { 98 /* Is this redundant? */ 99 if (ext_listp == NULL){ 100 LIST_INIT(&ext_list); 101 ext_listp = &ext_list; 102 } 103 104 /* Insert into list */ 105 LIST_INSERT_HEAD(ext_listp, ex, ex_link); 106 } 107 108 struct pool ex_region_pl; 109 110 static void 111 extent_pool_init(void) 112 { 113 static int inited; 114 115 if (!inited) { 116 pool_init(&ex_region_pl, sizeof(struct extent_region), 0, 0, 0, 117 "extentpl", 0, 0, 0, 0); 118 inited = 1; 119 } 120 } 121 122 /* 123 * Find a given extent, and return a pointer to 124 * it so that other extent functions can be used 125 * on it. 126 * 127 * Returns NULL on failure. 128 */ 129 struct extent * 130 extent_find(name) 131 char *name; 132 { 133 struct extent *ep; 134 135 for(ep = ext_listp->lh_first; ep != NULL; ep = ep->ex_link.le_next){ 136 if (!strcmp(ep->ex_name, name)) 137 return(ep); 138 } 139 140 return(NULL); 141 } 142 143 #ifdef DDB 144 /* 145 * Print out all extents registered. This is used in 146 * DDB show extents 147 */ 148 void 149 extent_print_all(void) 150 { 151 struct extent *ep; 152 153 for(ep = ext_listp->lh_first; ep != NULL; ep = ep->ex_link.le_next){ 154 extent_print(ep); 155 } 156 } 157 #endif 158 159 /* 160 * Allocate and initialize an extent map. 161 */ 162 struct extent * 163 extent_create(name, start, end, mtype, storage, storagesize, flags) 164 char *name; 165 u_long start, end; 166 int mtype; 167 caddr_t storage; 168 size_t storagesize; 169 int flags; 170 { 171 struct extent *ex; 172 caddr_t cp = storage; 173 size_t sz = storagesize; 174 struct extent_region *rp; 175 int fixed_extent = (storage != NULL); 176 177 #ifdef DIAGNOSTIC 178 /* Check arguments. */ 179 if (name == NULL) 180 panic("extent_create: name == NULL"); 181 if (end < start) { 182 printf("extent_create: extent `%s', start 0x%lx, end 0x%lx\n", 183 name, start, end); 184 panic("extent_create: end < start"); 185 } 186 if (fixed_extent && (storagesize < sizeof(struct extent_fixed))) 187 panic("extent_create: fixed extent, bad storagesize 0x%x", 188 storagesize); 189 if (fixed_extent == 0 && (storagesize != 0 || storage != NULL)) 190 panic("extent_create: storage provided for non-fixed"); 191 #endif 192 193 extent_pool_init(); 194 195 /* Allocate extent descriptor. */ 196 if (fixed_extent) { 197 struct extent_fixed *fex; 198 199 bzero(storage, storagesize); 200 201 /* 202 * Align all descriptors on "long" boundaries. 203 */ 204 fex = (struct extent_fixed *)cp; 205 ex = (struct extent *)fex; 206 cp += ALIGN(sizeof(struct extent_fixed)); 207 sz -= ALIGN(sizeof(struct extent_fixed)); 208 fex->fex_storage = storage; 209 fex->fex_storagesize = storagesize; 210 211 /* 212 * In a fixed extent, we have to pre-allocate region 213 * descriptors and place them in the extent's freelist. 214 */ 215 LIST_INIT(&fex->fex_freelist); 216 while (sz >= ALIGN(sizeof(struct extent_region))) { 217 rp = (struct extent_region *)cp; 218 cp += ALIGN(sizeof(struct extent_region)); 219 sz -= ALIGN(sizeof(struct extent_region)); 220 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link); 221 } 222 } else { 223 ex = (struct extent *)malloc(sizeof(struct extent), 224 mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT); 225 if (ex == NULL) 226 return (NULL); 227 } 228 229 /* Fill in the extent descriptor and return it to the caller. */ 230 LIST_INIT(&ex->ex_regions); 231 ex->ex_name = name; 232 ex->ex_start = start; 233 ex->ex_end = end; 234 ex->ex_mtype = mtype; 235 ex->ex_flags = 0; 236 if (fixed_extent) 237 ex->ex_flags |= EXF_FIXED; 238 if (flags & EX_NOCOALESCE) 239 ex->ex_flags |= EXF_NOCOALESCE; 240 241 extent_register(ex); 242 return (ex); 243 } 244 245 /* 246 * Destroy an extent map. 247 */ 248 void 249 extent_destroy(ex) 250 struct extent *ex; 251 { 252 struct extent_region *rp, *orp; 253 254 #ifdef DIAGNOSTIC 255 /* Check arguments. */ 256 if (ex == NULL) 257 panic("extent_destroy: NULL extent"); 258 #endif 259 260 /* Free all region descriptors in extent. */ 261 for (rp = ex->ex_regions.lh_first; rp != NULL; ) { 262 orp = rp; 263 rp = rp->er_link.le_next; 264 LIST_REMOVE(orp, er_link); 265 extent_free_region_descriptor(ex, orp); 266 } 267 268 /* Remove from the list of all extents. */ 269 LIST_REMOVE(ex, ex_link); 270 271 /* If we're not a fixed extent, free the extent descriptor itself. */ 272 if ((ex->ex_flags & EXF_FIXED) == 0) 273 free(ex, ex->ex_mtype); 274 } 275 276 /* 277 * Insert a region descriptor into the sorted region list after the 278 * entry "after" or at the head of the list (if "after" is NULL). 279 * The region descriptor we insert is passed in "rp". We must 280 * allocate the region descriptor before calling this function! 281 * If we don't need the region descriptor, it will be freed here. 282 */ 283 static void 284 extent_insert_and_optimize(ex, start, size, flags, after, rp) 285 struct extent *ex; 286 u_long start, size; 287 int flags; 288 struct extent_region *after, *rp; 289 { 290 struct extent_region *nextr; 291 int appended = 0; 292 293 if (after == NULL) { 294 /* 295 * We're the first in the region list. If there's 296 * a region after us, attempt to coalesce to save 297 * descriptor overhead. 298 */ 299 if (((ex->ex_flags & EXF_NOCOALESCE) == 0) && 300 (ex->ex_regions.lh_first != NULL) && 301 ((start + size) == ex->ex_regions.lh_first->er_start)) { 302 /* 303 * We can coalesce. Prepend us to the first region. 304 */ 305 ex->ex_regions.lh_first->er_start = start; 306 extent_free_region_descriptor(ex, rp); 307 return; 308 } 309 310 /* 311 * Can't coalesce. Fill in the region descriptor 312 * in, and insert us at the head of the region list. 313 */ 314 rp->er_start = start; 315 rp->er_end = start + (size - 1); 316 LIST_INSERT_HEAD(&ex->ex_regions, rp, er_link); 317 return; 318 } 319 320 /* 321 * If EXF_NOCOALESCE is set, coalescing is disallowed. 322 */ 323 if (ex->ex_flags & EXF_NOCOALESCE) 324 goto cant_coalesce; 325 326 /* 327 * Attempt to coalesce with the region before us. 328 */ 329 if ((after->er_end + 1) == start) { 330 /* 331 * We can coalesce. Append ourselves and make 332 * note of it. 333 */ 334 after->er_end = start + (size - 1); 335 appended = 1; 336 } 337 338 /* 339 * Attempt to coalesce with the region after us. 340 */ 341 if ((after->er_link.le_next != NULL) && 342 ((start + size) == after->er_link.le_next->er_start)) { 343 /* 344 * We can coalesce. Note that if we appended ourselves 345 * to the previous region, we exactly fit the gap, and 346 * can free the "next" region descriptor. 347 */ 348 if (appended) { 349 /* 350 * Yup, we can free it up. 351 */ 352 after->er_end = after->er_link.le_next->er_end; 353 nextr = after->er_link.le_next; 354 LIST_REMOVE(nextr, er_link); 355 extent_free_region_descriptor(ex, nextr); 356 } else { 357 /* 358 * Nope, just prepend us to the next region. 359 */ 360 after->er_link.le_next->er_start = start; 361 } 362 363 extent_free_region_descriptor(ex, rp); 364 return; 365 } 366 367 /* 368 * We weren't able to coalesce with the next region, but 369 * we don't need to allocate a region descriptor if we 370 * appended ourselves to the previous region. 371 */ 372 if (appended) { 373 extent_free_region_descriptor(ex, rp); 374 return; 375 } 376 377 cant_coalesce: 378 379 /* 380 * Fill in the region descriptor and insert ourselves 381 * into the region list. 382 */ 383 rp->er_start = start; 384 rp->er_end = start + (size - 1); 385 LIST_INSERT_AFTER(after, rp, er_link); 386 } 387 388 /* 389 * Allocate a specific region in an extent map. 390 */ 391 int 392 extent_alloc_region(ex, start, size, flags) 393 struct extent *ex; 394 u_long start, size; 395 int flags; 396 { 397 struct extent_region *rp, *last, *myrp; 398 u_long end = start + (size - 1); 399 int error; 400 401 #ifdef DIAGNOSTIC 402 /* Check arguments. */ 403 if (ex == NULL) 404 panic("extent_alloc_region: NULL extent"); 405 if (size < 1) { 406 printf("extent_alloc_region: extent `%s', size 0x%lx\n", 407 ex->ex_name, size); 408 panic("extent_alloc_region: bad size"); 409 } 410 if (end < start) { 411 printf( 412 "extent_alloc_region: extent `%s', start 0x%lx, size 0x%lx\n", 413 ex->ex_name, start, size); 414 panic("extent_alloc_region: overflow"); 415 } 416 #endif 417 418 /* 419 * Make sure the requested region lies within the 420 * extent. 421 */ 422 if ((start < ex->ex_start) || (end > ex->ex_end)) { 423 #ifdef DIAGNOSTIC 424 printf("extent_alloc_region: extent `%s' (0x%lx - 0x%lx)\n", 425 ex->ex_name, ex->ex_start, ex->ex_end); 426 printf("extent_alloc_region: start 0x%lx, end 0x%lx\n", 427 start, end); 428 panic("extent_alloc_region: region lies outside extent"); 429 #else 430 return (EINVAL); 431 #endif 432 } 433 434 /* 435 * Allocate the region descriptor. It will be freed later 436 * if we can coalesce with another region. 437 */ 438 myrp = extent_alloc_region_descriptor(ex, flags); 439 if (myrp == NULL) { 440 #ifdef DIAGNOSTIC 441 printf( 442 "extent_alloc_region: can't allocate region descriptor\n"); 443 #endif 444 return (ENOMEM); 445 } 446 447 alloc_start: 448 /* 449 * Attempt to place ourselves in the desired area of the 450 * extent. We save ourselves some work by keeping the list sorted. 451 * In other words, if the start of the current region is greater 452 * than the end of our region, we don't have to search any further. 453 */ 454 455 /* 456 * Keep a pointer to the last region we looked at so 457 * that we don't have to traverse the list again when 458 * we insert ourselves. If "last" is NULL when we 459 * finally insert ourselves, we go at the head of the 460 * list. See extent_insert_and_optimize() for details. 461 */ 462 last = NULL; 463 464 for (rp = ex->ex_regions.lh_first; rp != NULL; 465 rp = rp->er_link.le_next) { 466 if (rp->er_start > end) { 467 /* 468 * We lie before this region and don't 469 * conflict. 470 */ 471 break; 472 } 473 474 /* 475 * The current region begins before we end. 476 * Check for a conflict. 477 */ 478 if (rp->er_end >= start) { 479 /* 480 * We conflict. If we can (and want to) wait, 481 * do so. 482 */ 483 if (flags & EX_WAITSPACE) { 484 ex->ex_flags |= EXF_WANTED; 485 error = tsleep(ex, 486 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), 487 "extnt", 0); 488 if (error) 489 return (error); 490 goto alloc_start; 491 } 492 extent_free_region_descriptor(ex, myrp); 493 return (EAGAIN); 494 } 495 /* 496 * We don't conflict, but this region lies before 497 * us. Keep a pointer to this region, and keep 498 * trying. 499 */ 500 last = rp; 501 } 502 503 /* 504 * We don't conflict with any regions. "last" points 505 * to the region we fall after, or is NULL if we belong 506 * at the beginning of the region list. Insert ourselves. 507 */ 508 extent_insert_and_optimize(ex, start, size, flags, last, myrp); 509 return (0); 510 } 511 512 /* 513 * Macro to check (x + y) <= z. This check is designed to fail 514 * if an overflow occurs. 515 */ 516 #define LE_OV(x, y, z) ((((x) + (y)) >= (x)) && (((x) + (y)) <= (z))) 517 518 /* 519 * Allocate a region in an extent map subregion. 520 * 521 * If EX_FAST is specified, we return the first fit in the map. 522 * Otherwise, we try to minimize fragmentation by finding the 523 * smallest gap that will hold the request. 524 * 525 * The allocated region is aligned to "alignment", which must be 526 * a power of 2. 527 */ 528 int 529 extent_alloc_subregion(ex, substart, subend, size, alignment, skew, boundary, 530 flags, result) 531 struct extent *ex; 532 u_long substart, subend, size, alignment, skew, boundary; 533 int flags; 534 u_long *result; 535 { 536 struct extent_region *rp, *myrp, *last, *bestlast; 537 u_long newstart, newend, beststart, bestovh, ovh; 538 u_long dontcross; 539 int error; 540 541 #ifdef DIAGNOSTIC 542 /* Check arguments. */ 543 if (ex == NULL) 544 panic("extent_alloc_subregion: NULL extent"); 545 if (result == NULL) 546 panic("extent_alloc_subregion: NULL result pointer"); 547 if ((substart < ex->ex_start) || (substart > ex->ex_end) || 548 (subend > ex->ex_end) || (subend < ex->ex_start)) { 549 printf("extent_alloc_subregion: extent `%s', ex_start 0x%lx, ex_end 0x%lx\n", 550 ex->ex_name, ex->ex_start, ex->ex_end); 551 printf("extent_alloc_subregion: substart 0x%lx, subend 0x%lx\n", 552 substart, subend); 553 panic("extent_alloc_subregion: bad subregion"); 554 } 555 if ((size < 1) || ((size - 1) > (subend - substart))) { 556 printf("extent_alloc_subregion: extent `%s', size 0x%lx\n", 557 ex->ex_name, size); 558 panic("extent_alloc_subregion: bad size"); 559 } 560 if (alignment == 0) 561 panic("extent_alloc_subregion: bad alignment"); 562 if (boundary && (boundary < size)) { 563 printf( 564 "extent_alloc_subregion: extent `%s', size 0x%lx, " 565 "boundary 0x%lx\n", ex->ex_name, size, boundary); 566 panic("extent_alloc_subregion: bad boundary"); 567 } 568 #endif 569 570 /* 571 * Allocate the region descriptor. It will be freed later 572 * if we can coalesce with another region. 573 */ 574 myrp = extent_alloc_region_descriptor(ex, flags); 575 if (myrp == NULL) { 576 #ifdef DIAGNOSTIC 577 printf( 578 "extent_alloc_subregion: can't allocate region descriptor\n"); 579 #endif 580 return (ENOMEM); 581 } 582 583 alloc_start: 584 /* 585 * Keep a pointer to the last region we looked at so 586 * that we don't have to traverse the list again when 587 * we insert ourselves. If "last" is NULL when we 588 * finally insert ourselves, we go at the head of the 589 * list. See extent_insert_and_optimize() for deatails. 590 */ 591 last = NULL; 592 593 /* 594 * Keep track of size and location of the smallest 595 * chunk we fit in. 596 * 597 * Since the extent can be as large as the numeric range 598 * of the CPU (0 - 0xffffffff for 32-bit systems), the 599 * best overhead value can be the maximum unsigned integer. 600 * Thus, we initialize "bestovh" to 0, since we insert ourselves 601 * into the region list immediately on an exact match (which 602 * is the only case where "bestovh" would be set to 0). 603 */ 604 bestovh = 0; 605 beststart = 0; 606 bestlast = NULL; 607 608 /* 609 * For N allocated regions, we must make (N + 1) 610 * checks for unallocated space. The first chunk we 611 * check is the area from the beginning of the subregion 612 * to the first allocated region after that point. 613 */ 614 newstart = EXTENT_ALIGN(substart, alignment, skew); 615 if (newstart < ex->ex_start) { 616 #ifdef DIAGNOSTIC 617 printf( 618 "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n", 619 ex->ex_name, ex->ex_start, ex->ex_end, alignment); 620 panic("extent_alloc_subregion: overflow after alignment"); 621 #else 622 extent_free_region_descriptor(ex, myrp); 623 return (EINVAL); 624 #endif 625 } 626 627 /* 628 * Find the first allocated region that begins on or after 629 * the subregion start, advancing the "last" pointer along 630 * the way. 631 */ 632 for (rp = ex->ex_regions.lh_first; rp != NULL; 633 rp = rp->er_link.le_next) { 634 if (rp->er_start >= newstart) 635 break; 636 last = rp; 637 } 638 639 /* 640 * Relocate the start of our candidate region to the end of 641 * the last allocated region (if there was one overlapping 642 * our subrange). 643 */ 644 if (last != NULL && last->er_end >= newstart) 645 newstart = EXTENT_ALIGN((last->er_end + 1), alignment, skew); 646 647 for (; rp != NULL; rp = rp->er_link.le_next) { 648 /* 649 * Check the chunk before "rp". Note that our 650 * comparison is safe from overflow conditions. 651 */ 652 if (LE_OV(newstart, size, rp->er_start)) { 653 /* 654 * Do a boundary check, if necessary. Note 655 * that a region may *begin* on the boundary, 656 * but it must end before the boundary. 657 */ 658 if (boundary) { 659 newend = newstart + (size - 1); 660 661 /* 662 * Calculate the next boundary after the start 663 * of this region. 664 */ 665 dontcross = EXTENT_ALIGN(newstart+1, boundary, 666 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start) 667 - 1; 668 669 #if 0 670 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n", 671 newstart, newend, ex->ex_start, ex->ex_end, 672 boundary, dontcross); 673 #endif 674 675 /* Check for overflow */ 676 if (dontcross < ex->ex_start) 677 dontcross = ex->ex_end; 678 else if (newend > dontcross) { 679 /* 680 * Candidate region crosses boundary. 681 * Throw away the leading part and see 682 * if we still fit. 683 */ 684 newstart = dontcross + 1; 685 newend = newstart + (size - 1); 686 dontcross += boundary; 687 if (!LE_OV(newstart, size, rp->er_start)) 688 continue; 689 } 690 691 /* 692 * If we run past the end of 693 * the extent or the boundary 694 * overflows, then the request 695 * can't fit. 696 */ 697 if (newstart + size - 1 > ex->ex_end || 698 dontcross < newstart) 699 goto fail; 700 } 701 702 /* 703 * We would fit into this space. Calculate 704 * the overhead (wasted space). If we exactly 705 * fit, or we're taking the first fit, insert 706 * ourselves into the region list. 707 */ 708 ovh = rp->er_start - newstart - size; 709 if ((flags & EX_FAST) || (ovh == 0)) 710 goto found; 711 712 /* 713 * Don't exactly fit, but check to see 714 * if we're better than any current choice. 715 */ 716 if ((bestovh == 0) || (ovh < bestovh)) { 717 bestovh = ovh; 718 beststart = newstart; 719 bestlast = last; 720 } 721 } 722 723 /* 724 * Skip past the current region and check again. 725 */ 726 newstart = EXTENT_ALIGN((rp->er_end + 1), alignment, skew); 727 if (newstart < rp->er_end) { 728 /* 729 * Overflow condition. Don't error out, since 730 * we might have a chunk of space that we can 731 * use. 732 */ 733 goto fail; 734 } 735 736 /* 737 * Check that the current region don't run past the 738 * end of the subregion. 739 */ 740 if (!LE_OV(newstart, (size - 1), subend)) 741 goto fail; 742 743 last = rp; 744 } 745 746 /* 747 * The final check is from the current starting point to the 748 * end of the subregion. If there were no allocated regions, 749 * "newstart" is set to the beginning of the subregion, or 750 * just past the end of the last allocated region, adjusted 751 * for alignment in either case. 752 */ 753 if (LE_OV(newstart, (size - 1), subend)) { 754 /* 755 * Do a boundary check, if necessary. Note 756 * that a region may *begin* on the boundary, 757 * but it must end before the boundary. 758 */ 759 if (boundary) { 760 newend = newstart + (size - 1); 761 762 /* 763 * Calculate the next boundary after the start 764 * of this region. 765 */ 766 dontcross = EXTENT_ALIGN(newstart+1, boundary, 767 (flags & EX_BOUNDZERO) ? 0 : ex->ex_start) 768 - 1; 769 770 #if 0 771 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n", 772 newstart, newend, ex->ex_start, ex->ex_end, 773 boundary, dontcross); 774 #endif 775 776 /* Check for overflow */ 777 if (dontcross < ex->ex_start) 778 dontcross = ex->ex_end; 779 else if (newend > dontcross) { 780 /* 781 * Candidate region crosses boundary. 782 * Throw away the leading part and see 783 * if we still fit. 784 */ 785 newstart = dontcross + 1; 786 newend = newstart + (size - 1); 787 dontcross += boundary; 788 if (!LE_OV(newstart, (size - 1), subend)) 789 goto fail; 790 } 791 792 /* 793 * If we run past the end of 794 * the extent or the boundary 795 * overflows, then the request 796 * can't fit. 797 */ 798 if (newstart + size - 1 > ex->ex_end || 799 dontcross < newstart) 800 goto fail; 801 } 802 803 /* 804 * We would fit into this space. Calculate 805 * the overhead (wasted space). If we exactly 806 * fit, or we're taking the first fit, insert 807 * ourselves into the region list. 808 */ 809 ovh = ex->ex_end - newstart - (size - 1); 810 if ((flags & EX_FAST) || (ovh == 0)) 811 goto found; 812 813 /* 814 * Don't exactly fit, but check to see 815 * if we're better than any current choice. 816 */ 817 if ((bestovh == 0) || (ovh < bestovh)) { 818 bestovh = ovh; 819 beststart = newstart; 820 bestlast = last; 821 } 822 } 823 824 fail: 825 /* 826 * One of the following two conditions have 827 * occurred: 828 * 829 * There is no chunk large enough to hold the request. 830 * 831 * If EX_FAST was not specified, there is not an 832 * exact match for the request. 833 * 834 * Note that if we reach this point and EX_FAST is 835 * set, then we know there is no space in the extent for 836 * the request. 837 */ 838 if (((flags & EX_FAST) == 0) && (bestovh != 0)) { 839 /* 840 * We have a match that's "good enough". 841 */ 842 newstart = beststart; 843 last = bestlast; 844 goto found; 845 } 846 847 /* 848 * No space currently available. Wait for it to free up, 849 * if possible. 850 */ 851 if (flags & EX_WAITSPACE) { 852 ex->ex_flags |= EXF_WANTED; 853 error = tsleep(ex, 854 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), "extnt", 0); 855 if (error) 856 return (error); 857 goto alloc_start; 858 } 859 860 extent_free_region_descriptor(ex, myrp); 861 return (EAGAIN); 862 863 found: 864 /* 865 * Insert ourselves into the region list. 866 */ 867 extent_insert_and_optimize(ex, newstart, size, flags, last, myrp); 868 *result = newstart; 869 return (0); 870 } 871 872 int 873 extent_free(ex, start, size, flags) 874 struct extent *ex; 875 u_long start, size; 876 int flags; 877 { 878 struct extent_region *rp, *nrp = NULL; 879 u_long end = start + (size - 1); 880 int exflags; 881 882 #ifdef DIAGNOSTIC 883 /* Check arguments. */ 884 if (ex == NULL) 885 panic("extent_free: NULL extent"); 886 if ((start < ex->ex_start) || (start > ex->ex_end)) { 887 extent_print(ex); 888 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n", 889 ex->ex_name, start, size); 890 panic("extent_free: extent `%s', region not within extent", 891 ex->ex_name); 892 } 893 /* Check for an overflow. */ 894 if (end < start) { 895 extent_print(ex); 896 printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n", 897 ex->ex_name, start, size); 898 panic("extent_free: overflow"); 899 } 900 #endif 901 902 /* 903 * If we're allowing coalescing, we must allocate a region 904 * descriptor now, since it might block. 905 * 906 * XXX Make a static, create-time flags word, so we don't 907 * XXX have to lock to read it! 908 */ 909 exflags = ex->ex_flags; 910 911 if ((exflags & EXF_NOCOALESCE) == 0) { 912 /* Allocate a region descriptor. */ 913 nrp = extent_alloc_region_descriptor(ex, flags); 914 if (nrp == NULL) 915 return (ENOMEM); 916 } 917 918 /* 919 * Find region and deallocate. Several possibilities: 920 * 921 * 1. (start == er_start) && (end == er_end): 922 * Free descriptor. 923 * 924 * 2. (start == er_start) && (end < er_end): 925 * Adjust er_start. 926 * 927 * 3. (start > er_start) && (end == er_end): 928 * Adjust er_end. 929 * 930 * 4. (start > er_start) && (end < er_end): 931 * Fragment region. Requires descriptor alloc. 932 * 933 * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag 934 * is not set. 935 */ 936 for (rp = ex->ex_regions.lh_first; rp != NULL; 937 rp = rp->er_link.le_next) { 938 /* 939 * Save ourselves some comparisons; does the current 940 * region end before chunk to be freed begins? If so, 941 * then we haven't found the appropriate region descriptor. 942 */ 943 if (rp->er_end < start) 944 continue; 945 946 /* 947 * Save ourselves some traversal; does the current 948 * region begin after the chunk to be freed ends? If so, 949 * then we've already passed any possible region descriptors 950 * that might have contained the chunk to be freed. 951 */ 952 if (rp->er_start > end) 953 break; 954 955 /* Case 1. */ 956 if ((start == rp->er_start) && (end == rp->er_end)) { 957 LIST_REMOVE(rp, er_link); 958 extent_free_region_descriptor(ex, rp); 959 goto done; 960 } 961 962 /* 963 * The following cases all require that EXF_NOCOALESCE 964 * is not set. 965 */ 966 if (ex->ex_flags & EXF_NOCOALESCE) 967 continue; 968 969 /* Case 2. */ 970 if ((start == rp->er_start) && (end < rp->er_end)) { 971 rp->er_start = (end + 1); 972 goto done; 973 } 974 975 /* Case 3. */ 976 if ((start > rp->er_start) && (end == rp->er_end)) { 977 rp->er_end = (start - 1); 978 goto done; 979 } 980 981 /* Case 4. */ 982 if ((start > rp->er_start) && (end < rp->er_end)) { 983 /* Fill in new descriptor. */ 984 nrp->er_start = end + 1; 985 nrp->er_end = rp->er_end; 986 987 /* Adjust current descriptor. */ 988 rp->er_end = start - 1; 989 990 /* Insert new descriptor after current. */ 991 LIST_INSERT_AFTER(rp, nrp, er_link); 992 993 /* We used the new descriptor, so don't free it below */ 994 nrp = NULL; 995 goto done; 996 } 997 } 998 999 /* Region not found, or request otherwise invalid. */ 1000 #if defined(DIAGNOSTIC) || defined(DDB) 1001 extent_print(ex); 1002 #endif 1003 printf("extent_free: start 0x%lx, end 0x%lx\n", start, end); 1004 panic("extent_free: region not found"); 1005 1006 done: 1007 if (nrp != NULL) 1008 extent_free_region_descriptor(ex, nrp); 1009 if (ex->ex_flags & EXF_WANTED) { 1010 ex->ex_flags &= ~EXF_WANTED; 1011 wakeup(ex); 1012 } 1013 return (0); 1014 } 1015 1016 static struct extent_region * 1017 extent_alloc_region_descriptor(ex, flags) 1018 struct extent *ex; 1019 int flags; 1020 { 1021 struct extent_region *rp; 1022 int s; 1023 1024 if (ex->ex_flags & EXF_FIXED) { 1025 struct extent_fixed *fex = (struct extent_fixed *)ex; 1026 1027 while (fex->fex_freelist.lh_first == NULL) { 1028 if (flags & EX_MALLOCOK) 1029 goto alloc; 1030 1031 if ((flags & EX_WAITOK) == 0) 1032 return (NULL); 1033 ex->ex_flags |= EXF_FLWANTED; 1034 if (tsleep(&fex->fex_freelist, 1035 PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), 1036 "extnt", 0)) 1037 return (NULL); 1038 } 1039 rp = fex->fex_freelist.lh_first; 1040 LIST_REMOVE(rp, er_link); 1041 1042 /* 1043 * Don't muck with flags after pulling it off the 1044 * freelist; it may be a dynamiclly allocated 1045 * region pointer that was kindly given to us, 1046 * and we need to preserve that information. 1047 */ 1048 1049 return (rp); 1050 } 1051 1052 alloc: 1053 s = splimp(); 1054 rp = pool_get(&ex_region_pl, (flags & EX_WAITOK) ? PR_WAITOK : 0); 1055 splx(s); 1056 if (rp != NULL) 1057 rp->er_flags = ER_ALLOC; 1058 1059 return (rp); 1060 } 1061 1062 static void 1063 extent_free_region_descriptor(ex, rp) 1064 struct extent *ex; 1065 struct extent_region *rp; 1066 { 1067 int s; 1068 1069 if (ex->ex_flags & EXF_FIXED) { 1070 struct extent_fixed *fex = (struct extent_fixed *)ex; 1071 1072 /* 1073 * If someone's waiting for a region descriptor, 1074 * be nice and give them this one, rather than 1075 * just free'ing it back to the system. 1076 */ 1077 if (rp->er_flags & ER_ALLOC) { 1078 if (ex->ex_flags & EXF_FLWANTED) { 1079 /* Clear all but ER_ALLOC flag. */ 1080 rp->er_flags = ER_ALLOC; 1081 LIST_INSERT_HEAD(&fex->fex_freelist, rp, 1082 er_link); 1083 goto wake_em_up; 1084 } else { 1085 s = splimp(); 1086 pool_put(&ex_region_pl, rp); 1087 splx(s); 1088 } 1089 } else { 1090 /* Clear all flags. */ 1091 rp->er_flags = 0; 1092 LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link); 1093 } 1094 1095 if (ex->ex_flags & EXF_FLWANTED) { 1096 wake_em_up: 1097 ex->ex_flags &= ~EXF_FLWANTED; 1098 wakeup(&fex->fex_freelist); 1099 } 1100 return; 1101 } 1102 1103 /* 1104 * We know it's dynamically allocated if we get here. 1105 */ 1106 s = splimp(); 1107 pool_put(&ex_region_pl, rp); 1108 splx(s); 1109 } 1110 1111 #ifndef DDB 1112 #define db_printf printf 1113 #endif 1114 1115 #if defined(DIAGNOSTIC) || defined(DDB) 1116 void 1117 extent_print(ex) 1118 struct extent *ex; 1119 { 1120 struct extent_region *rp; 1121 1122 if (ex == NULL) 1123 panic("extent_print: NULL extent"); 1124 1125 db_printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name, 1126 ex->ex_start, ex->ex_end, ex->ex_flags); 1127 1128 for (rp = ex->ex_regions.lh_first; rp != NULL; 1129 rp = rp->er_link.le_next) 1130 db_printf(" 0x%lx - 0x%lx\n", rp->er_start, rp->er_end); 1131 } 1132 #endif 1133