1 /* 2 * NMALLOC.C - New Malloc (ported from kernel slab allocator) 3 * 4 * Copyright (c) 2003,2004,2009,2010 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> and by 8 * Venkatesh Srinivas <me@endeavour.zapto.org>. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * $Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 vsrinivas Exp $ 38 */ 39 /* 40 * This module implements a slab allocator drop-in replacement for the 41 * libc malloc(). 42 * 43 * A slab allocator reserves a ZONE for each chunk size, then lays the 44 * chunks out in an array within the zone. Allocation and deallocation 45 * is nearly instantaneous, and overhead losses are limited to a fixed 46 * worst-case amount. 47 * 48 * The slab allocator does not have to pre-initialize the list of 49 * free chunks for each zone, and the underlying VM will not be 50 * touched at all beyond the zone header until an actual allocation 51 * needs it. 52 * 53 * Slab management and locking is done on a per-zone basis. 54 * 55 * Alloc Size Chunking Number of zones 56 * 0-127 8 16 57 * 128-255 16 8 58 * 256-511 32 8 59 * 512-1023 64 8 60 * 1024-2047 128 8 61 * 2048-4095 256 8 62 * 4096-8191 512 8 63 * 8192-16383 1024 8 64 * 16384-32767 2048 8 65 * 66 * Allocations >= ZoneLimit (16K) go directly to mmap and a hash table 67 * is used to locate for free. One and Two-page allocations use the 68 * zone mechanic to avoid excessive mmap()/munmap() calls. 69 * 70 * API FEATURES AND SIDE EFFECTS 71 * 72 * + power-of-2 sized allocations up to a page will be power-of-2 aligned. 73 * Above that power-of-2 sized allocations are page-aligned. Non 74 * power-of-2 sized allocations are aligned the same as the chunk 75 * size for their zone. 76 * + malloc(0) returns a special non-NULL value 77 * + ability to allocate arbitrarily large chunks of memory 78 * + realloc will reuse the passed pointer if possible, within the 79 * limitations of the zone chunking. 80 * 81 * Multithreaded enhancements for small allocations introduced August 2010. 82 * These are in the spirit of 'libumem'. See: 83 * Bonwick, J.; Adams, J. (2001). "Magazines and Vmem: Extending the 84 * slab allocator to many CPUs and arbitrary resources". In Proc. 2001 85 * USENIX Technical Conference. USENIX Association. 86 * 87 * TUNING 88 * 89 * The value of the environment variable MALLOC_OPTIONS is a character string 90 * containing various flags to tune nmalloc. 91 * 92 * 'U' / ['u'] Generate / do not generate utrace entries for ktrace(1) 93 * This will generate utrace events for all malloc, 94 * realloc, and free calls. There are tools (mtrplay) to 95 * replay and allocation pattern or to graph heap structure 96 * (mtrgraph) which can interpret these logs. 97 * 'Z' / ['z'] Zero out / do not zero all allocations. 98 * Each new byte of memory allocated by malloc, realloc, or 99 * reallocf will be initialized to 0. This is intended for 100 * debugging and will affect performance negatively. 101 * 'H' / ['h'] Pass a hint to the kernel about pages unused by the 102 * allocation functions. 103 */ 104 105 /* cc -shared -fPIC -g -O -I/usr/src/lib/libc/include -o nmalloc.so nmalloc.c */ 106 107 #include "libc_private.h" 108 109 #include <sys/param.h> 110 #include <sys/types.h> 111 #include <sys/mman.h> 112 #include <sys/queue.h> 113 #include <sys/uio.h> 114 #include <sys/ktrace.h> 115 #include <stdio.h> 116 #include <stdint.h> 117 #include <stdlib.h> 118 #include <stdarg.h> 119 #include <stddef.h> 120 #include <unistd.h> 121 #include <string.h> 122 #include <fcntl.h> 123 #include <errno.h> 124 #include <pthread.h> 125 126 #include "spinlock.h" 127 #include "un-namespace.h" 128 129 /* 130 * Linked list of large allocations 131 */ 132 typedef struct bigalloc { 133 struct bigalloc *next; /* hash link */ 134 void *base; /* base pointer */ 135 u_long bytes; /* bytes allocated */ 136 } *bigalloc_t; 137 138 /* 139 * Note that any allocations which are exact multiples of PAGE_SIZE, or 140 * which are >= ZALLOC_ZONE_LIMIT, will fall through to the kmem subsystem. 141 */ 142 #define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */ 143 #define ZALLOC_MIN_ZONE_SIZE (32 * 1024) /* minimum zone size */ 144 #define ZALLOC_MAX_ZONE_SIZE (128 * 1024) /* maximum zone size */ 145 #define ZALLOC_ZONE_SIZE (64 * 1024) 146 #define ZALLOC_SLAB_MAGIC 0x736c6162 /* magic sanity */ 147 #define ZALLOC_SLAB_SLIDE 20 /* L1-cache skip */ 148 149 #if ZALLOC_ZONE_LIMIT == 16384 150 #define NZONES 72 151 #elif ZALLOC_ZONE_LIMIT == 32768 152 #define NZONES 80 153 #else 154 #error "I couldn't figure out NZONES" 155 #endif 156 157 /* 158 * Chunk structure for free elements 159 */ 160 typedef struct slchunk { 161 struct slchunk *c_Next; 162 } *slchunk_t; 163 164 /* 165 * The IN-BAND zone header is placed at the beginning of each zone. 166 */ 167 struct slglobaldata; 168 169 typedef struct slzone { 170 int32_t z_Magic; /* magic number for sanity check */ 171 int z_NFree; /* total free chunks / ualloc space */ 172 struct slzone *z_Next; /* ZoneAry[] link if z_NFree non-zero */ 173 int z_NMax; /* maximum free chunks */ 174 char *z_BasePtr; /* pointer to start of chunk array */ 175 int z_UIndex; /* current initial allocation index */ 176 int z_UEndIndex; /* last (first) allocation index */ 177 int z_ChunkSize; /* chunk size for validation */ 178 int z_FirstFreePg; /* chunk list on a page-by-page basis */ 179 int z_ZoneIndex; 180 int z_Flags; 181 struct slchunk *z_PageAry[ZALLOC_ZONE_SIZE / PAGE_SIZE]; 182 #if defined(INVARIANTS) 183 __uint32_t z_Bitmap[]; /* bitmap of free chunks / sanity */ 184 #endif 185 } *slzone_t; 186 187 typedef struct slglobaldata { 188 spinlock_t Spinlock; 189 slzone_t ZoneAry[NZONES];/* linked list of zones NFree > 0 */ 190 int JunkIndex; 191 } *slglobaldata_t; 192 193 #define SLZF_UNOTZEROD 0x0001 194 195 #define FASTSLABREALLOC 0x02 196 197 /* 198 * Misc constants. Note that allocations that are exact multiples of 199 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 200 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists. 201 */ 202 #define MIN_CHUNK_SIZE 8 /* in bytes */ 203 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1) 204 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK) 205 206 /* 207 * The WEIRD_ADDR is used as known text to copy into free objects to 208 * try to create deterministic failure cases if the data is accessed after 209 * free. 210 * 211 * WARNING: A limited number of spinlocks are available, BIGXSIZE should 212 * not be larger then 64. 213 */ 214 #define WEIRD_ADDR 0xdeadc0de 215 #define MAX_COPY sizeof(weirdary) 216 #define ZERO_LENGTH_PTR ((void *)&malloc_dummy_pointer) 217 218 #define BIGHSHIFT 10 /* bigalloc hash table */ 219 #define BIGHSIZE (1 << BIGHSHIFT) 220 #define BIGHMASK (BIGHSIZE - 1) 221 #define BIGXSIZE (BIGHSIZE / 16) /* bigalloc lock table */ 222 #define BIGXMASK (BIGXSIZE - 1) 223 224 #define SAFLAG_ZERO 0x0001 225 #define SAFLAG_PASSIVE 0x0002 226 227 /* 228 * Thread control 229 */ 230 231 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0])) 232 233 #define MASSERT(exp) do { if (__predict_false(!(exp))) \ 234 _mpanic("assertion: %s in %s", \ 235 #exp, __func__); \ 236 } while (0) 237 238 /* 239 * Magazines 240 */ 241 242 #define M_MAX_ROUNDS 64 243 #define M_ZONE_ROUNDS 64 244 #define M_LOW_ROUNDS 32 245 #define M_INIT_ROUNDS 8 246 #define M_BURST_FACTOR 8 247 #define M_BURST_NSCALE 2 248 249 #define M_BURST 0x0001 250 #define M_BURST_EARLY 0x0002 251 252 struct magazine { 253 SLIST_ENTRY(magazine) nextmagazine; 254 255 int flags; 256 int capacity; /* Max rounds in this magazine */ 257 int rounds; /* Current number of free rounds */ 258 int burst_factor; /* Number of blocks to prefill with */ 259 int low_factor; /* Free till low_factor from full mag */ 260 void *objects[M_MAX_ROUNDS]; 261 }; 262 263 SLIST_HEAD(magazinelist, magazine); 264 265 static spinlock_t zone_mag_lock; 266 static struct magazine zone_magazine = { 267 .flags = M_BURST | M_BURST_EARLY, 268 .capacity = M_ZONE_ROUNDS, 269 .rounds = 0, 270 .burst_factor = M_BURST_FACTOR, 271 .low_factor = M_LOW_ROUNDS 272 }; 273 274 #define MAGAZINE_FULL(mp) (mp->rounds == mp->capacity) 275 #define MAGAZINE_NOTFULL(mp) (mp->rounds < mp->capacity) 276 #define MAGAZINE_EMPTY(mp) (mp->rounds == 0) 277 #define MAGAZINE_NOTEMPTY(mp) (mp->rounds != 0) 278 279 /* Each thread will have a pair of magazines per size-class (NZONES) 280 * The loaded magazine will support immediate allocations, the previous 281 * magazine will either be full or empty and can be swapped at need */ 282 typedef struct magazine_pair { 283 struct magazine *loaded; 284 struct magazine *prev; 285 } magazine_pair; 286 287 /* A depot is a collection of magazines for a single zone. */ 288 typedef struct magazine_depot { 289 struct magazinelist full; 290 struct magazinelist empty; 291 spinlock_t lock; 292 } magazine_depot; 293 294 typedef struct thr_mags { 295 magazine_pair mags[NZONES]; 296 struct magazine *newmag; 297 int init; 298 } thr_mags; 299 300 /* With this attribute set, do not require a function call for accessing 301 * this variable when the code is compiled -fPIC */ 302 #define TLS_ATTRIBUTE __attribute__ ((tls_model ("initial-exec"))); 303 304 static int mtmagazine_free_live; 305 static __thread thr_mags thread_mags TLS_ATTRIBUTE; 306 static pthread_key_t thread_mags_key; 307 static pthread_once_t thread_mags_once = PTHREAD_ONCE_INIT; 308 static magazine_depot depots[NZONES]; 309 310 /* 311 * Fixed globals (not per-cpu) 312 */ 313 static const int ZoneSize = ZALLOC_ZONE_SIZE; 314 static const int ZoneLimit = ZALLOC_ZONE_LIMIT; 315 static const int ZonePageCount = ZALLOC_ZONE_SIZE / PAGE_SIZE; 316 static const int ZoneMask = ZALLOC_ZONE_SIZE - 1; 317 318 static int opt_madvise = 0; 319 static int opt_utrace = 0; 320 static int g_malloc_flags = 0; 321 static struct slglobaldata SLGlobalData; 322 static bigalloc_t bigalloc_array[BIGHSIZE]; 323 static spinlock_t bigspin_array[BIGXSIZE]; 324 static int malloc_panic; 325 static int malloc_dummy_pointer; 326 327 static const int32_t weirdary[16] = { 328 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, 329 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, 330 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, 331 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR 332 }; 333 334 static void *_slaballoc(size_t size, int flags); 335 static void *_slabrealloc(void *ptr, size_t size); 336 static void _slabfree(void *ptr, int, bigalloc_t *); 337 static void *_vmem_alloc(size_t bytes, size_t align, int flags); 338 static void _vmem_free(void *ptr, size_t bytes); 339 static void *magazine_alloc(struct magazine *, int *); 340 static int magazine_free(struct magazine *, void *); 341 static void *mtmagazine_alloc(int zi); 342 static int mtmagazine_free(int zi, void *); 343 static void mtmagazine_init(void); 344 static void mtmagazine_destructor(void *); 345 static slzone_t zone_alloc(int flags); 346 static void zone_free(void *z); 347 static void _mpanic(const char *ctl, ...) __printflike(1, 2); 348 static void malloc_init(void) __constructor(0); 349 #if defined(INVARIANTS) 350 static void chunk_mark_allocated(slzone_t z, void *chunk); 351 static void chunk_mark_free(slzone_t z, void *chunk); 352 #endif 353 354 struct nmalloc_utrace { 355 void *p; 356 size_t s; 357 void *r; 358 }; 359 360 #define UTRACE(a, b, c) \ 361 if (opt_utrace) { \ 362 struct nmalloc_utrace ut = { \ 363 .p = (a), \ 364 .s = (b), \ 365 .r = (c) \ 366 }; \ 367 utrace(&ut, sizeof(ut)); \ 368 } 369 370 #ifdef INVARIANTS 371 /* 372 * If enabled any memory allocated without M_ZERO is initialized to -1. 373 */ 374 static int use_malloc_pattern; 375 #endif 376 377 static void 378 malloc_init(void) 379 { 380 const char *p = NULL; 381 382 if (issetugid() == 0) 383 p = getenv("MALLOC_OPTIONS"); 384 385 for (; p != NULL && *p != '\0'; p++) { 386 switch(*p) { 387 case 'u': opt_utrace = 0; break; 388 case 'U': opt_utrace = 1; break; 389 case 'h': opt_madvise = 0; break; 390 case 'H': opt_madvise = 1; break; 391 case 'z': g_malloc_flags = 0; break; 392 case 'Z': g_malloc_flags = SAFLAG_ZERO; break; 393 default: 394 break; 395 } 396 } 397 398 UTRACE((void *) -1, 0, NULL); 399 } 400 401 /* 402 * We have to install a handler for nmalloc thread teardowns when 403 * the thread is created. We cannot delay this because destructors in 404 * sophisticated userland programs can call malloc() for the first time 405 * during their thread exit. 406 * 407 * This routine is called directly from pthreads. 408 */ 409 void 410 _nmalloc_thr_init(void) 411 { 412 thr_mags *tp; 413 414 /* 415 * Disallow mtmagazine operations until the mtmagazine is 416 * initialized. 417 */ 418 tp = &thread_mags; 419 tp->init = -1; 420 421 if (mtmagazine_free_live == 0) { 422 mtmagazine_free_live = 1; 423 pthread_once(&thread_mags_once, mtmagazine_init); 424 } 425 pthread_setspecific(thread_mags_key, tp); 426 tp->init = 1; 427 } 428 429 /* 430 * Thread locks. 431 */ 432 static __inline void 433 slgd_lock(slglobaldata_t slgd) 434 { 435 if (__isthreaded) 436 _SPINLOCK(&slgd->Spinlock); 437 } 438 439 static __inline void 440 slgd_unlock(slglobaldata_t slgd) 441 { 442 if (__isthreaded) 443 _SPINUNLOCK(&slgd->Spinlock); 444 } 445 446 static __inline void 447 depot_lock(magazine_depot *dp) 448 { 449 if (__isthreaded) 450 _SPINLOCK(&dp->lock); 451 } 452 453 static __inline void 454 depot_unlock(magazine_depot *dp) 455 { 456 if (__isthreaded) 457 _SPINUNLOCK(&dp->lock); 458 } 459 460 static __inline void 461 zone_magazine_lock(void) 462 { 463 if (__isthreaded) 464 _SPINLOCK(&zone_mag_lock); 465 } 466 467 static __inline void 468 zone_magazine_unlock(void) 469 { 470 if (__isthreaded) 471 _SPINUNLOCK(&zone_mag_lock); 472 } 473 474 static __inline void 475 swap_mags(magazine_pair *mp) 476 { 477 struct magazine *tmp; 478 tmp = mp->loaded; 479 mp->loaded = mp->prev; 480 mp->prev = tmp; 481 } 482 483 /* 484 * bigalloc hashing and locking support. 485 * 486 * Return an unmasked hash code for the passed pointer. 487 */ 488 static __inline int 489 _bigalloc_hash(void *ptr) 490 { 491 int hv; 492 493 hv = ((int)(intptr_t)ptr >> PAGE_SHIFT) ^ 494 ((int)(intptr_t)ptr >> (PAGE_SHIFT + BIGHSHIFT)); 495 496 return(hv); 497 } 498 499 /* 500 * Lock the hash chain and return a pointer to its base for the specified 501 * address. 502 */ 503 static __inline bigalloc_t * 504 bigalloc_lock(void *ptr) 505 { 506 int hv = _bigalloc_hash(ptr); 507 bigalloc_t *bigp; 508 509 bigp = &bigalloc_array[hv & BIGHMASK]; 510 if (__isthreaded) 511 _SPINLOCK(&bigspin_array[hv & BIGXMASK]); 512 return(bigp); 513 } 514 515 /* 516 * Lock the hash chain and return a pointer to its base for the specified 517 * address. 518 * 519 * BUT, if the hash chain is empty, just return NULL and do not bother 520 * to lock anything. 521 */ 522 static __inline bigalloc_t * 523 bigalloc_check_and_lock(void *ptr) 524 { 525 int hv = _bigalloc_hash(ptr); 526 bigalloc_t *bigp; 527 528 bigp = &bigalloc_array[hv & BIGHMASK]; 529 if (*bigp == NULL) 530 return(NULL); 531 if (__isthreaded) { 532 _SPINLOCK(&bigspin_array[hv & BIGXMASK]); 533 } 534 return(bigp); 535 } 536 537 static __inline void 538 bigalloc_unlock(void *ptr) 539 { 540 int hv; 541 542 if (__isthreaded) { 543 hv = _bigalloc_hash(ptr); 544 _SPINUNLOCK(&bigspin_array[hv & BIGXMASK]); 545 } 546 } 547 548 /* 549 * Calculate the zone index for the allocation request size and set the 550 * allocation request size to that particular zone's chunk size. 551 */ 552 static __inline int 553 zoneindex(size_t *bytes, size_t *chunking) 554 { 555 size_t n = (unsigned int)*bytes; /* unsigned for shift opt */ 556 if (n < 128) { 557 *bytes = n = (n + 7) & ~7; 558 *chunking = 8; 559 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 560 } 561 if (n < 256) { 562 *bytes = n = (n + 15) & ~15; 563 *chunking = 16; 564 return(n / 16 + 7); 565 } 566 if (n < 8192) { 567 if (n < 512) { 568 *bytes = n = (n + 31) & ~31; 569 *chunking = 32; 570 return(n / 32 + 15); 571 } 572 if (n < 1024) { 573 *bytes = n = (n + 63) & ~63; 574 *chunking = 64; 575 return(n / 64 + 23); 576 } 577 if (n < 2048) { 578 *bytes = n = (n + 127) & ~127; 579 *chunking = 128; 580 return(n / 128 + 31); 581 } 582 if (n < 4096) { 583 *bytes = n = (n + 255) & ~255; 584 *chunking = 256; 585 return(n / 256 + 39); 586 } 587 *bytes = n = (n + 511) & ~511; 588 *chunking = 512; 589 return(n / 512 + 47); 590 } 591 #if ZALLOC_ZONE_LIMIT > 8192 592 if (n < 16384) { 593 *bytes = n = (n + 1023) & ~1023; 594 *chunking = 1024; 595 return(n / 1024 + 55); 596 } 597 #endif 598 #if ZALLOC_ZONE_LIMIT > 16384 599 if (n < 32768) { 600 *bytes = n = (n + 2047) & ~2047; 601 *chunking = 2048; 602 return(n / 2048 + 63); 603 } 604 #endif 605 _mpanic("Unexpected byte count %zu", n); 606 return(0); 607 } 608 609 /* 610 * malloc() - call internal slab allocator 611 */ 612 void * 613 malloc(size_t size) 614 { 615 void *ptr; 616 617 ptr = _slaballoc(size, 0); 618 if (ptr == NULL) 619 errno = ENOMEM; 620 else 621 UTRACE(0, size, ptr); 622 return(ptr); 623 } 624 625 /* 626 * calloc() - call internal slab allocator 627 */ 628 void * 629 calloc(size_t number, size_t size) 630 { 631 void *ptr; 632 633 ptr = _slaballoc(number * size, SAFLAG_ZERO); 634 if (ptr == NULL) 635 errno = ENOMEM; 636 else 637 UTRACE(0, number * size, ptr); 638 return(ptr); 639 } 640 641 /* 642 * realloc() (SLAB ALLOCATOR) 643 * 644 * We do not attempt to optimize this routine beyond reusing the same 645 * pointer if the new size fits within the chunking of the old pointer's 646 * zone. 647 */ 648 void * 649 realloc(void *ptr, size_t size) 650 { 651 void *ret; 652 ret = _slabrealloc(ptr, size); 653 if (ret == NULL) 654 errno = ENOMEM; 655 else 656 UTRACE(ptr, size, ret); 657 return(ret); 658 } 659 660 /* 661 * posix_memalign() 662 * 663 * Allocate (size) bytes with a alignment of (alignment), where (alignment) 664 * is a power of 2 >= sizeof(void *). 665 * 666 * The slab allocator will allocate on power-of-2 boundaries up to 667 * at least PAGE_SIZE. We use the zoneindex mechanic to find a 668 * zone matching the requirements, and _vmem_alloc() otherwise. 669 */ 670 int 671 posix_memalign(void **memptr, size_t alignment, size_t size) 672 { 673 bigalloc_t *bigp; 674 bigalloc_t big; 675 size_t chunking; 676 int zi; 677 678 /* 679 * OpenGroup spec issue 6 checks 680 */ 681 if ((alignment | (alignment - 1)) + 1 != (alignment << 1)) { 682 *memptr = NULL; 683 return(EINVAL); 684 } 685 if (alignment < sizeof(void *)) { 686 *memptr = NULL; 687 return(EINVAL); 688 } 689 690 /* 691 * Our zone mechanism guarantees same-sized alignment for any 692 * power-of-2 allocation. If size is a power-of-2 and reasonable 693 * we can just call _slaballoc() and be done. We round size up 694 * to the nearest alignment boundary to improve our odds of 695 * it becoming a power-of-2 if it wasn't before. 696 */ 697 if (size <= alignment) 698 size = alignment; 699 else 700 size = (size + alignment - 1) & ~(size_t)(alignment - 1); 701 if (size < PAGE_SIZE && (size | (size - 1)) + 1 == (size << 1)) { 702 *memptr = _slaballoc(size, 0); 703 return(*memptr ? 0 : ENOMEM); 704 } 705 706 /* 707 * Otherwise locate a zone with a chunking that matches 708 * the requested alignment, within reason. Consider two cases: 709 * 710 * (1) A 1K allocation on a 32-byte alignment. The first zoneindex 711 * we find will be the best fit because the chunking will be 712 * greater or equal to the alignment. 713 * 714 * (2) A 513 allocation on a 256-byte alignment. In this case 715 * the first zoneindex we find will be for 576 byte allocations 716 * with a chunking of 64, which is not sufficient. To fix this 717 * we simply find the nearest power-of-2 >= size and use the 718 * same side-effect of _slaballoc() which guarantees 719 * same-alignment on a power-of-2 allocation. 720 */ 721 if (size < PAGE_SIZE) { 722 zi = zoneindex(&size, &chunking); 723 if (chunking >= alignment) { 724 *memptr = _slaballoc(size, 0); 725 return(*memptr ? 0 : ENOMEM); 726 } 727 if (size >= 1024) 728 alignment = 1024; 729 if (size >= 16384) 730 alignment = 16384; 731 while (alignment < size) 732 alignment <<= 1; 733 *memptr = _slaballoc(alignment, 0); 734 return(*memptr ? 0 : ENOMEM); 735 } 736 737 /* 738 * If the slab allocator cannot handle it use vmem_alloc(). 739 * 740 * Alignment must be adjusted up to at least PAGE_SIZE in this case. 741 */ 742 if (alignment < PAGE_SIZE) 743 alignment = PAGE_SIZE; 744 if (size < alignment) 745 size = alignment; 746 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK; 747 *memptr = _vmem_alloc(size, alignment, 0); 748 if (*memptr == NULL) 749 return(ENOMEM); 750 751 big = _slaballoc(sizeof(struct bigalloc), 0); 752 if (big == NULL) { 753 _vmem_free(*memptr, size); 754 *memptr = NULL; 755 return(ENOMEM); 756 } 757 bigp = bigalloc_lock(*memptr); 758 big->base = *memptr; 759 big->bytes = size; 760 big->next = *bigp; 761 *bigp = big; 762 bigalloc_unlock(*memptr); 763 764 return(0); 765 } 766 767 /* 768 * free() (SLAB ALLOCATOR) - do the obvious 769 */ 770 void 771 free(void *ptr) 772 { 773 UTRACE(ptr, 0, 0); 774 _slabfree(ptr, 0, NULL); 775 } 776 777 /* 778 * _slaballoc() (SLAB ALLOCATOR) 779 * 780 * Allocate memory via the slab allocator. If the request is too large, 781 * or if it page-aligned beyond a certain size, we fall back to the 782 * KMEM subsystem 783 */ 784 static void * 785 _slaballoc(size_t size, int flags) 786 { 787 slzone_t z; 788 slchunk_t chunk; 789 slglobaldata_t slgd; 790 size_t chunking; 791 int zi; 792 #ifdef INVARIANTS 793 int i; 794 #endif 795 int off; 796 void *obj; 797 798 /* 799 * Handle the degenerate size == 0 case. Yes, this does happen. 800 * Return a special pointer. This is to maintain compatibility with 801 * the original malloc implementation. Certain devices, such as the 802 * adaptec driver, not only allocate 0 bytes, they check for NULL and 803 * also realloc() later on. Joy. 804 */ 805 if (size == 0) 806 return(ZERO_LENGTH_PTR); 807 808 /* Capture global flags */ 809 flags |= g_malloc_flags; 810 811 /* 812 * Handle large allocations directly. There should not be very many 813 * of these so performance is not a big issue. 814 * 815 * The backend allocator is pretty nasty on a SMP system. Use the 816 * slab allocator for one and two page-sized chunks even though we 817 * lose some efficiency. 818 */ 819 if (size >= ZoneLimit || 820 ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 821 bigalloc_t big; 822 bigalloc_t *bigp; 823 824 /* 825 * Page-align and cache-color in case of virtually indexed 826 * physically tagged L1 caches (aka SandyBridge). No sweat 827 * otherwise, so just do it. 828 */ 829 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK; 830 if ((size & 8191) == 0) 831 size += 4096; 832 833 chunk = _vmem_alloc(size, PAGE_SIZE, flags); 834 if (chunk == NULL) 835 return(NULL); 836 837 big = _slaballoc(sizeof(struct bigalloc), 0); 838 if (big == NULL) { 839 _vmem_free(chunk, size); 840 return(NULL); 841 } 842 bigp = bigalloc_lock(chunk); 843 big->base = chunk; 844 big->bytes = size; 845 big->next = *bigp; 846 *bigp = big; 847 bigalloc_unlock(chunk); 848 849 return(chunk); 850 } 851 852 /* Compute allocation zone; zoneindex will panic on excessive sizes */ 853 zi = zoneindex(&size, &chunking); 854 MASSERT(zi < NZONES); 855 856 obj = mtmagazine_alloc(zi); 857 if (obj != NULL) { 858 if (flags & SAFLAG_ZERO) 859 bzero(obj, size); 860 return (obj); 861 } 862 863 slgd = &SLGlobalData; 864 slgd_lock(slgd); 865 866 /* 867 * Attempt to allocate out of an existing zone. If all zones are 868 * exhausted pull one off the free list or allocate a new one. 869 */ 870 if ((z = slgd->ZoneAry[zi]) == NULL) { 871 z = zone_alloc(flags); 872 if (z == NULL) 873 goto fail; 874 875 /* 876 * How big is the base structure? 877 */ 878 #if defined(INVARIANTS) 879 /* 880 * Make room for z_Bitmap. An exact calculation is 881 * somewhat more complicated so don't make an exact 882 * calculation. 883 */ 884 off = offsetof(struct slzone, 885 z_Bitmap[(ZoneSize / size + 31) / 32]); 886 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); 887 #else 888 off = sizeof(struct slzone); 889 #endif 890 891 /* 892 * Align the storage in the zone based on the chunking. 893 * 894 * Guarantee power-of-2 alignment for power-of-2-sized 895 * chunks. Otherwise align based on the chunking size 896 * (typically 8 or 16 bytes for small allocations). 897 * 898 * NOTE: Allocations >= ZoneLimit are governed by the 899 * bigalloc code and typically only guarantee page-alignment. 900 * 901 * Set initial conditions for UIndex near the zone header 902 * to reduce unecessary page faults, vs semi-randomization 903 * to improve L1 cache saturation. 904 */ 905 if ((size | (size - 1)) + 1 == (size << 1)) 906 off = (off + size - 1) & ~(size - 1); 907 else 908 off = (off + chunking - 1) & ~(chunking - 1); 909 z->z_Magic = ZALLOC_SLAB_MAGIC; 910 z->z_ZoneIndex = zi; 911 z->z_NMax = (ZoneSize - off) / size; 912 z->z_NFree = z->z_NMax; 913 z->z_BasePtr = (char *)z + off; 914 z->z_UIndex = z->z_UEndIndex = 0; 915 z->z_ChunkSize = size; 916 z->z_FirstFreePg = ZonePageCount; 917 z->z_Next = slgd->ZoneAry[zi]; 918 slgd->ZoneAry[zi] = z; 919 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 920 flags &= ~SAFLAG_ZERO; /* already zero'd */ 921 flags |= SAFLAG_PASSIVE; 922 } 923 924 /* 925 * Slide the base index for initial allocations out of the 926 * next zone we create so we do not over-weight the lower 927 * part of the cpu memory caches. 928 */ 929 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 930 & (ZALLOC_MAX_ZONE_SIZE - 1); 931 } 932 933 /* 934 * Ok, we have a zone from which at least one chunk is available. 935 * 936 * Remove us from the ZoneAry[] when we become empty 937 */ 938 MASSERT(z->z_NFree > 0); 939 940 if (--z->z_NFree == 0) { 941 slgd->ZoneAry[zi] = z->z_Next; 942 z->z_Next = NULL; 943 } 944 945 /* 946 * Locate a chunk in a free page. This attempts to localize 947 * reallocations into earlier pages without us having to sort 948 * the chunk list. A chunk may still overlap a page boundary. 949 */ 950 while (z->z_FirstFreePg < ZonePageCount) { 951 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) { 952 #ifdef DIAGNOSTIC 953 /* 954 * Diagnostic: c_Next is not total garbage. 955 */ 956 MASSERT(chunk->c_Next == NULL || 957 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) == 958 ((intptr_t)chunk & IN_SAME_PAGE_MASK)); 959 #endif 960 #ifdef INVARIANTS 961 chunk_mark_allocated(z, chunk); 962 #endif 963 MASSERT((uintptr_t)chunk & ZoneMask); 964 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next; 965 goto done; 966 } 967 ++z->z_FirstFreePg; 968 } 969 970 /* 971 * No chunks are available but NFree said we had some memory, 972 * so it must be available in the never-before-used-memory 973 * area governed by UIndex. The consequences are very 974 * serious if our zone got corrupted so we use an explicit 975 * panic rather then a KASSERT. 976 */ 977 chunk = (slchunk_t)(z->z_BasePtr + z->z_UIndex * size); 978 979 if (++z->z_UIndex == z->z_NMax) 980 z->z_UIndex = 0; 981 if (z->z_UIndex == z->z_UEndIndex) { 982 if (z->z_NFree != 0) 983 _mpanic("slaballoc: corrupted zone"); 984 } 985 986 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 987 flags &= ~SAFLAG_ZERO; 988 flags |= SAFLAG_PASSIVE; 989 } 990 #if defined(INVARIANTS) 991 chunk_mark_allocated(z, chunk); 992 #endif 993 994 done: 995 slgd_unlock(slgd); 996 if (flags & SAFLAG_ZERO) { 997 bzero(chunk, size); 998 #ifdef INVARIANTS 999 } else if ((flags & (SAFLAG_ZERO|SAFLAG_PASSIVE)) == 0) { 1000 if (use_malloc_pattern) { 1001 for (i = 0; i < size; i += sizeof(int)) { 1002 *(int *)((char *)chunk + i) = -1; 1003 } 1004 } 1005 /* avoid accidental double-free check */ 1006 chunk->c_Next = (void *)-1; 1007 #endif 1008 } 1009 return(chunk); 1010 fail: 1011 slgd_unlock(slgd); 1012 return(NULL); 1013 } 1014 1015 /* 1016 * Reallocate memory within the chunk 1017 */ 1018 static void * 1019 _slabrealloc(void *ptr, size_t size) 1020 { 1021 bigalloc_t *bigp; 1022 void *nptr; 1023 slzone_t z; 1024 size_t chunking; 1025 1026 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) { 1027 return(_slaballoc(size, 0)); 1028 } 1029 1030 if (size == 0) { 1031 free(ptr); 1032 return(ZERO_LENGTH_PTR); 1033 } 1034 1035 /* 1036 * Handle oversized allocations. 1037 */ 1038 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) { 1039 bigalloc_t big; 1040 size_t bigbytes; 1041 1042 while ((big = *bigp) != NULL) { 1043 if (big->base == ptr) { 1044 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK; 1045 bigbytes = big->bytes; 1046 if (bigbytes == size) { 1047 bigalloc_unlock(ptr); 1048 return(ptr); 1049 } 1050 *bigp = big->next; 1051 bigalloc_unlock(ptr); 1052 if ((nptr = _slaballoc(size, 0)) == NULL) { 1053 /* Relink block */ 1054 bigp = bigalloc_lock(ptr); 1055 big->next = *bigp; 1056 *bigp = big; 1057 bigalloc_unlock(ptr); 1058 return(NULL); 1059 } 1060 if (size > bigbytes) 1061 size = bigbytes; 1062 bcopy(ptr, nptr, size); 1063 _slabfree(ptr, FASTSLABREALLOC, &big); 1064 return(nptr); 1065 } 1066 bigp = &big->next; 1067 } 1068 bigalloc_unlock(ptr); 1069 } 1070 1071 /* 1072 * Get the original allocation's zone. If the new request winds 1073 * up using the same chunk size we do not have to do anything. 1074 * 1075 * NOTE: We don't have to lock the globaldata here, the fields we 1076 * access here will not change at least as long as we have control 1077 * over the allocation. 1078 */ 1079 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 1080 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1081 1082 /* 1083 * Use zoneindex() to chunk-align the new size, as long as the 1084 * new size is not too large. 1085 */ 1086 if (size < ZoneLimit) { 1087 zoneindex(&size, &chunking); 1088 if (z->z_ChunkSize == size) { 1089 return(ptr); 1090 } 1091 } 1092 1093 /* 1094 * Allocate memory for the new request size and copy as appropriate. 1095 */ 1096 if ((nptr = _slaballoc(size, 0)) != NULL) { 1097 if (size > z->z_ChunkSize) 1098 size = z->z_ChunkSize; 1099 bcopy(ptr, nptr, size); 1100 _slabfree(ptr, 0, NULL); 1101 } 1102 1103 return(nptr); 1104 } 1105 1106 /* 1107 * free (SLAB ALLOCATOR) 1108 * 1109 * Free a memory block previously allocated by malloc. Note that we do not 1110 * attempt to uplodate ks_loosememuse as MP races could prevent us from 1111 * checking memory limits in malloc. 1112 * 1113 * flags: 1114 * FASTSLABREALLOC Fast call from realloc, *rbigp already 1115 * unlinked. 1116 * 1117 * MPSAFE 1118 */ 1119 static void 1120 _slabfree(void *ptr, int flags, bigalloc_t *rbigp) 1121 { 1122 slzone_t z; 1123 slchunk_t chunk; 1124 bigalloc_t big; 1125 bigalloc_t *bigp; 1126 slglobaldata_t slgd; 1127 size_t size; 1128 int zi; 1129 int pgno; 1130 1131 /* Fast realloc path for big allocations */ 1132 if (flags & FASTSLABREALLOC) { 1133 big = *rbigp; 1134 goto fastslabrealloc; 1135 } 1136 1137 /* 1138 * Handle NULL frees and special 0-byte allocations 1139 */ 1140 if (ptr == NULL) 1141 return; 1142 if (ptr == ZERO_LENGTH_PTR) 1143 return; 1144 1145 /* 1146 * Handle oversized allocations. 1147 */ 1148 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) { 1149 while ((big = *bigp) != NULL) { 1150 if (big->base == ptr) { 1151 *bigp = big->next; 1152 bigalloc_unlock(ptr); 1153 fastslabrealloc: 1154 size = big->bytes; 1155 _slabfree(big, 0, NULL); 1156 #ifdef INVARIANTS 1157 MASSERT(sizeof(weirdary) <= size); 1158 bcopy(weirdary, ptr, sizeof(weirdary)); 1159 #endif 1160 _vmem_free(ptr, size); 1161 return; 1162 } 1163 bigp = &big->next; 1164 } 1165 bigalloc_unlock(ptr); 1166 } 1167 1168 /* 1169 * Zone case. Figure out the zone based on the fact that it is 1170 * ZoneSize aligned. 1171 */ 1172 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 1173 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1174 1175 size = z->z_ChunkSize; 1176 zi = z->z_ZoneIndex; 1177 1178 if (g_malloc_flags & SAFLAG_ZERO) 1179 bzero(ptr, size); 1180 1181 if (mtmagazine_free(zi, ptr) == 0) 1182 return; 1183 1184 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT; 1185 chunk = ptr; 1186 slgd = &SLGlobalData; 1187 slgd_lock(slgd); 1188 1189 #ifdef INVARIANTS 1190 /* 1191 * Attempt to detect a double-free. To reduce overhead we only check 1192 * if there appears to be link pointer at the base of the data. 1193 */ 1194 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) { 1195 slchunk_t scan; 1196 1197 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) { 1198 if (scan == chunk) 1199 _mpanic("Double free at %p", chunk); 1200 } 1201 } 1202 chunk_mark_free(z, chunk); 1203 #endif 1204 1205 /* 1206 * Put weird data into the memory to detect modifications after 1207 * freeing, illegal pointer use after freeing (we should fault on 1208 * the odd address), and so forth. 1209 */ 1210 #ifdef INVARIANTS 1211 if (z->z_ChunkSize < sizeof(weirdary)) 1212 bcopy(weirdary, chunk, z->z_ChunkSize); 1213 else 1214 bcopy(weirdary, chunk, sizeof(weirdary)); 1215 #endif 1216 1217 /* 1218 * Add this free non-zero'd chunk to a linked list for reuse, adjust 1219 * z_FirstFreePg. 1220 */ 1221 chunk->c_Next = z->z_PageAry[pgno]; 1222 z->z_PageAry[pgno] = chunk; 1223 if (z->z_FirstFreePg > pgno) 1224 z->z_FirstFreePg = pgno; 1225 1226 /* 1227 * Bump the number of free chunks. If it becomes non-zero the zone 1228 * must be added back onto the appropriate list. 1229 */ 1230 if (z->z_NFree++ == 0) { 1231 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 1232 slgd->ZoneAry[z->z_ZoneIndex] = z; 1233 } 1234 1235 /* 1236 * If the zone becomes totally free then release it. 1237 */ 1238 if (z->z_NFree == z->z_NMax) { 1239 slzone_t *pz; 1240 1241 pz = &slgd->ZoneAry[z->z_ZoneIndex]; 1242 while (z != *pz) 1243 pz = &(*pz)->z_Next; 1244 *pz = z->z_Next; 1245 z->z_Magic = -1; 1246 z->z_Next = NULL; 1247 zone_free(z); 1248 /* slgd lock released */ 1249 return; 1250 } 1251 slgd_unlock(slgd); 1252 } 1253 1254 #if defined(INVARIANTS) 1255 /* 1256 * Helper routines for sanity checks 1257 */ 1258 static 1259 void 1260 chunk_mark_allocated(slzone_t z, void *chunk) 1261 { 1262 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1263 __uint32_t *bitptr; 1264 1265 MASSERT(bitdex >= 0 && bitdex < z->z_NMax); 1266 bitptr = &z->z_Bitmap[bitdex >> 5]; 1267 bitdex &= 31; 1268 MASSERT((*bitptr & (1 << bitdex)) == 0); 1269 *bitptr |= 1 << bitdex; 1270 } 1271 1272 static 1273 void 1274 chunk_mark_free(slzone_t z, void *chunk) 1275 { 1276 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1277 __uint32_t *bitptr; 1278 1279 MASSERT(bitdex >= 0 && bitdex < z->z_NMax); 1280 bitptr = &z->z_Bitmap[bitdex >> 5]; 1281 bitdex &= 31; 1282 MASSERT((*bitptr & (1 << bitdex)) != 0); 1283 *bitptr &= ~(1 << bitdex); 1284 } 1285 1286 #endif 1287 1288 /* 1289 * Allocate and return a magazine. NULL is returned and *burst is adjusted 1290 * if the magazine is empty. 1291 */ 1292 static __inline void * 1293 magazine_alloc(struct magazine *mp, int *burst) 1294 { 1295 void *obj; 1296 1297 if (mp == NULL) 1298 return(NULL); 1299 if (MAGAZINE_NOTEMPTY(mp)) { 1300 obj = mp->objects[--mp->rounds]; 1301 return(obj); 1302 } 1303 1304 /* 1305 * Return burst factor to caller along with NULL 1306 */ 1307 if ((mp->flags & M_BURST) && (burst != NULL)) { 1308 *burst = mp->burst_factor; 1309 } 1310 /* Reduce burst factor by NSCALE; if it hits 1, disable BURST */ 1311 if ((mp->flags & M_BURST) && (mp->flags & M_BURST_EARLY) && 1312 (burst != NULL)) { 1313 mp->burst_factor -= M_BURST_NSCALE; 1314 if (mp->burst_factor <= 1) { 1315 mp->burst_factor = 1; 1316 mp->flags &= ~(M_BURST); 1317 mp->flags &= ~(M_BURST_EARLY); 1318 } 1319 } 1320 return (NULL); 1321 } 1322 1323 static __inline int 1324 magazine_free(struct magazine *mp, void *p) 1325 { 1326 if (mp != NULL && MAGAZINE_NOTFULL(mp)) { 1327 mp->objects[mp->rounds++] = p; 1328 return 0; 1329 } 1330 1331 return -1; 1332 } 1333 1334 static void * 1335 mtmagazine_alloc(int zi) 1336 { 1337 thr_mags *tp; 1338 struct magazine *mp, *emptymag; 1339 magazine_depot *d; 1340 void *obj; 1341 1342 /* 1343 * Do not try to access per-thread magazines while the mtmagazine 1344 * is being initialized or destroyed. 1345 */ 1346 tp = &thread_mags; 1347 if (tp->init < 0) 1348 return(NULL); 1349 1350 /* 1351 * Primary per-thread allocation loop 1352 */ 1353 for (;;) { 1354 /* 1355 * If the loaded magazine has rounds, allocate and return 1356 */ 1357 mp = tp->mags[zi].loaded; 1358 obj = magazine_alloc(mp, NULL); 1359 if (obj) 1360 break; 1361 1362 /* 1363 * If the prev magazine is full, swap with the loaded 1364 * magazine and retry. 1365 */ 1366 mp = tp->mags[zi].prev; 1367 if (mp && MAGAZINE_FULL(mp)) { 1368 MASSERT(mp->rounds != 0); 1369 swap_mags(&tp->mags[zi]); /* prev now empty */ 1370 continue; 1371 } 1372 1373 /* 1374 * Try to get a full magazine from the depot. Cycle 1375 * through depot(full)->loaded->prev->depot(empty). 1376 * Retry if a full magazine was available from the depot. 1377 * 1378 * Return NULL (caller will fall through) if no magazines 1379 * can be found anywhere. 1380 */ 1381 d = &depots[zi]; 1382 depot_lock(d); 1383 emptymag = tp->mags[zi].prev; 1384 if (emptymag) 1385 SLIST_INSERT_HEAD(&d->empty, emptymag, nextmagazine); 1386 tp->mags[zi].prev = tp->mags[zi].loaded; 1387 mp = SLIST_FIRST(&d->full); /* loaded magazine */ 1388 tp->mags[zi].loaded = mp; 1389 if (mp) { 1390 SLIST_REMOVE_HEAD(&d->full, nextmagazine); 1391 MASSERT(MAGAZINE_NOTEMPTY(mp)); 1392 depot_unlock(d); 1393 continue; 1394 } 1395 depot_unlock(d); 1396 break; 1397 } 1398 1399 return (obj); 1400 } 1401 1402 static int 1403 mtmagazine_free(int zi, void *ptr) 1404 { 1405 thr_mags *tp; 1406 struct magazine *mp, *loadedmag; 1407 magazine_depot *d; 1408 int rc = -1; 1409 1410 /* 1411 * Do not try to access per-thread magazines while the mtmagazine 1412 * is being initialized or destroyed. 1413 */ 1414 tp = &thread_mags; 1415 if (tp->init < 0) 1416 return(-1); 1417 1418 /* 1419 * Primary per-thread freeing loop 1420 */ 1421 for (;;) { 1422 /* 1423 * Make sure a new magazine is available in case we have 1424 * to use it. Staging the newmag allows us to avoid 1425 * some locking/reentrancy complexity. 1426 * 1427 * Temporarily disable the per-thread caches for this 1428 * allocation to avoid reentrancy and/or to avoid a 1429 * stack overflow if the [zi] happens to be the same that 1430 * would be used to allocate the new magazine. 1431 */ 1432 if (tp->newmag == NULL) { 1433 tp->init = -1; 1434 tp->newmag = _slaballoc(sizeof(struct magazine), 1435 SAFLAG_ZERO); 1436 tp->init = 1; 1437 if (tp->newmag == NULL) { 1438 rc = -1; 1439 break; 1440 } 1441 } 1442 1443 /* 1444 * If the loaded magazine has space, free directly to it 1445 */ 1446 rc = magazine_free(tp->mags[zi].loaded, ptr); 1447 if (rc == 0) 1448 break; 1449 1450 /* 1451 * If the prev magazine is empty, swap with the loaded 1452 * magazine and retry. 1453 */ 1454 mp = tp->mags[zi].prev; 1455 if (mp && MAGAZINE_EMPTY(mp)) { 1456 MASSERT(mp->rounds == 0); 1457 swap_mags(&tp->mags[zi]); /* prev now full */ 1458 continue; 1459 } 1460 1461 /* 1462 * Try to get an empty magazine from the depot. Cycle 1463 * through depot(empty)->loaded->prev->depot(full). 1464 * Retry if an empty magazine was available from the depot. 1465 */ 1466 d = &depots[zi]; 1467 depot_lock(d); 1468 1469 if ((loadedmag = tp->mags[zi].prev) != NULL) 1470 SLIST_INSERT_HEAD(&d->full, loadedmag, nextmagazine); 1471 tp->mags[zi].prev = tp->mags[zi].loaded; 1472 mp = SLIST_FIRST(&d->empty); 1473 if (mp) { 1474 tp->mags[zi].loaded = mp; 1475 SLIST_REMOVE_HEAD(&d->empty, nextmagazine); 1476 MASSERT(MAGAZINE_NOTFULL(mp)); 1477 } else { 1478 mp = tp->newmag; 1479 tp->newmag = NULL; 1480 mp->capacity = M_MAX_ROUNDS; 1481 mp->rounds = 0; 1482 mp->flags = 0; 1483 tp->mags[zi].loaded = mp; 1484 } 1485 depot_unlock(d); 1486 } 1487 1488 return rc; 1489 } 1490 1491 static void 1492 mtmagazine_init(void) 1493 { 1494 int error; 1495 1496 error = pthread_key_create(&thread_mags_key, mtmagazine_destructor); 1497 if (error) 1498 abort(); 1499 } 1500 1501 /* 1502 * This function is only used by the thread exit destructor 1503 */ 1504 static void 1505 mtmagazine_drain(struct magazine *mp) 1506 { 1507 void *obj; 1508 1509 while (MAGAZINE_NOTEMPTY(mp)) { 1510 obj = magazine_alloc(mp, NULL); 1511 _slabfree(obj, 0, NULL); 1512 } 1513 } 1514 1515 /* 1516 * mtmagazine_destructor() 1517 * 1518 * When a thread exits, we reclaim all its resources; all its magazines are 1519 * drained and the structures are freed. 1520 * 1521 * WARNING! The destructor can be called multiple times if the larger user 1522 * program has its own destructors which run after ours which 1523 * allocate or free memory. 1524 */ 1525 static void 1526 mtmagazine_destructor(void *thrp) 1527 { 1528 thr_mags *tp = thrp; 1529 struct magazine *mp; 1530 int i; 1531 1532 /* 1533 * Prevent further use of mtmagazines while we are destructing 1534 * them, as well as for any destructors which are run after us 1535 * prior to the thread actually being destroyed. 1536 */ 1537 tp->init = -1; 1538 1539 for (i = 0; i < NZONES; i++) { 1540 mp = tp->mags[i].loaded; 1541 tp->mags[i].loaded = NULL; 1542 if (mp) { 1543 if (MAGAZINE_NOTEMPTY(mp)) 1544 mtmagazine_drain(mp); 1545 _slabfree(mp, 0, NULL); 1546 } 1547 1548 mp = tp->mags[i].prev; 1549 tp->mags[i].prev = NULL; 1550 if (mp) { 1551 if (MAGAZINE_NOTEMPTY(mp)) 1552 mtmagazine_drain(mp); 1553 _slabfree(mp, 0, NULL); 1554 } 1555 } 1556 1557 if (tp->newmag) { 1558 mp = tp->newmag; 1559 tp->newmag = NULL; 1560 _slabfree(mp, 0, NULL); 1561 } 1562 } 1563 1564 /* 1565 * zone_alloc() 1566 * 1567 * Attempt to allocate a zone from the zone magazine; the zone magazine has 1568 * M_BURST_EARLY enabled, so honor the burst request from the magazine. 1569 */ 1570 static slzone_t 1571 zone_alloc(int flags) 1572 { 1573 slglobaldata_t slgd = &SLGlobalData; 1574 int burst = 1; 1575 int i, j; 1576 slzone_t z; 1577 1578 zone_magazine_lock(); 1579 slgd_unlock(slgd); 1580 1581 z = magazine_alloc(&zone_magazine, &burst); 1582 if (z == NULL && burst == 1) { 1583 zone_magazine_unlock(); 1584 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags); 1585 } else if (z == NULL) { 1586 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags); 1587 if (z) { 1588 for (i = 1; i < burst; i++) { 1589 j = magazine_free(&zone_magazine, 1590 (char *) z + (ZoneSize * i)); 1591 MASSERT(j == 0); 1592 } 1593 } 1594 zone_magazine_unlock(); 1595 } else { 1596 z->z_Flags |= SLZF_UNOTZEROD; 1597 zone_magazine_unlock(); 1598 } 1599 slgd_lock(slgd); 1600 return z; 1601 } 1602 1603 /* 1604 * zone_free() 1605 * 1606 * Release a zone and unlock the slgd lock. 1607 */ 1608 static void 1609 zone_free(void *z) 1610 { 1611 slglobaldata_t slgd = &SLGlobalData; 1612 void *excess[M_ZONE_ROUNDS - M_LOW_ROUNDS] = {}; 1613 int i, j; 1614 1615 zone_magazine_lock(); 1616 slgd_unlock(slgd); 1617 1618 bzero(z, sizeof(struct slzone)); 1619 1620 if (opt_madvise) 1621 madvise(z, ZoneSize, MADV_FREE); 1622 1623 i = magazine_free(&zone_magazine, z); 1624 1625 /* 1626 * If we failed to free, collect excess magazines; release the zone 1627 * magazine lock, and then free to the system via _vmem_free. Re-enable 1628 * BURST mode for the magazine. 1629 */ 1630 if (i == -1) { 1631 j = zone_magazine.rounds - zone_magazine.low_factor; 1632 for (i = 0; i < j; i++) { 1633 excess[i] = magazine_alloc(&zone_magazine, NULL); 1634 MASSERT(excess[i] != NULL); 1635 } 1636 1637 zone_magazine_unlock(); 1638 1639 for (i = 0; i < j; i++) 1640 _vmem_free(excess[i], ZoneSize); 1641 1642 _vmem_free(z, ZoneSize); 1643 } else { 1644 zone_magazine_unlock(); 1645 } 1646 } 1647 1648 /* 1649 * _vmem_alloc() 1650 * 1651 * Directly map memory in PAGE_SIZE'd chunks with the specified 1652 * alignment. 1653 * 1654 * Alignment must be a multiple of PAGE_SIZE. 1655 * 1656 * Size must be >= alignment. 1657 */ 1658 static void * 1659 _vmem_alloc(size_t size, size_t align, int flags) 1660 { 1661 char *addr; 1662 char *save; 1663 size_t excess; 1664 1665 /* 1666 * Map anonymous private memory. 1667 */ 1668 addr = mmap(NULL, size, PROT_READ|PROT_WRITE, 1669 MAP_PRIVATE|MAP_ANON, -1, 0); 1670 if (addr == MAP_FAILED) 1671 return(NULL); 1672 1673 /* 1674 * Check alignment. The misaligned offset is also the excess 1675 * amount. If misaligned unmap the excess so we have a chance of 1676 * mapping at the next alignment point and recursively try again. 1677 * 1678 * BBBBBBBBBBB BBBBBBBBBBB BBBBBBBBBBB block alignment 1679 * aaaaaaaaa aaaaaaaaaaa aa mis-aligned allocation 1680 * xxxxxxxxx final excess calculation 1681 * ^ returned address 1682 */ 1683 excess = (uintptr_t)addr & (align - 1); 1684 1685 if (excess) { 1686 excess = align - excess; 1687 save = addr; 1688 1689 munmap(save + excess, size - excess); 1690 addr = _vmem_alloc(size, align, flags); 1691 munmap(save, excess); 1692 } 1693 return((void *)addr); 1694 } 1695 1696 /* 1697 * _vmem_free() 1698 * 1699 * Free a chunk of memory allocated with _vmem_alloc() 1700 */ 1701 static void 1702 _vmem_free(void *ptr, size_t size) 1703 { 1704 munmap(ptr, size); 1705 } 1706 1707 /* 1708 * Panic on fatal conditions 1709 */ 1710 static void 1711 _mpanic(const char *ctl, ...) 1712 { 1713 va_list va; 1714 1715 if (malloc_panic == 0) { 1716 malloc_panic = 1; 1717 va_start(va, ctl); 1718 vfprintf(stderr, ctl, va); 1719 fprintf(stderr, "\n"); 1720 fflush(stderr); 1721 va_end(va); 1722 } 1723 abort(); 1724 } 1725