1 /* 2 * NMALLOC.C - New Malloc (ported from kernel slab allocator) 3 * 4 * Copyright (c) 2003,2004,2009,2010 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> and by 8 * Venkatesh Srinivas <me@endeavour.zapto.org>. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * $Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 vsrinivas Exp $ 38 */ 39 /* 40 * This module implements a slab allocator drop-in replacement for the 41 * libc malloc(). 42 * 43 * A slab allocator reserves a ZONE for each chunk size, then lays the 44 * chunks out in an array within the zone. Allocation and deallocation 45 * is nearly instantaneous, and overhead losses are limited to a fixed 46 * worst-case amount. 47 * 48 * The slab allocator does not have to pre-initialize the list of 49 * free chunks for each zone, and the underlying VM will not be 50 * touched at all beyond the zone header until an actual allocation 51 * needs it. 52 * 53 * Slab management and locking is done on a per-zone basis. 54 * 55 * Alloc Size Chunking Number of zones 56 * 0-127 8 16 57 * 128-255 16 8 58 * 256-511 32 8 59 * 512-1023 64 8 60 * 1024-2047 128 8 61 * 2048-4095 256 8 62 * 4096-8191 512 8 63 * 8192-16383 1024 8 64 * 16384-32767 2048 8 65 * 66 * Allocations >= ZoneLimit (16K) go directly to mmap and a hash table 67 * is used to locate for free. One and Two-page allocations use the 68 * zone mechanic to avoid excessive mmap()/munmap() calls. 69 * 70 * API FEATURES AND SIDE EFFECTS 71 * 72 * + power-of-2 sized allocations up to a page will be power-of-2 aligned. 73 * Above that power-of-2 sized allocations are page-aligned. Non 74 * power-of-2 sized allocations are aligned the same as the chunk 75 * size for their zone. 76 * + malloc(0) returns a special non-NULL value 77 * + ability to allocate arbitrarily large chunks of memory 78 * + realloc will reuse the passed pointer if possible, within the 79 * limitations of the zone chunking. 80 * 81 * Multithreaded enhancements for small allocations introduced August 2010. 82 * These are in the spirit of 'libumem'. See: 83 * Bonwick, J.; Adams, J. (2001). "Magazines and Vmem: Extending the 84 * slab allocator to many CPUs and arbitrary resources". In Proc. 2001 85 * USENIX Technical Conference. USENIX Association. 86 * 87 * Oversized allocations employ the BIGCACHE mechanic whereby large 88 * allocations may be handed significantly larger buffers, allowing them 89 * to avoid mmap/munmap operations even through significant realloc()s. 90 * The excess space is only trimmed if too many large allocations have been 91 * given this treatment. 92 * 93 * TUNING 94 * 95 * The value of the environment variable MALLOC_OPTIONS is a character string 96 * containing various flags to tune nmalloc. 97 * 98 * 'U' / ['u'] Generate / do not generate utrace entries for ktrace(1) 99 * This will generate utrace events for all malloc, 100 * realloc, and free calls. There are tools (mtrplay) to 101 * replay and allocation pattern or to graph heap structure 102 * (mtrgraph) which can interpret these logs. 103 * 'Z' / ['z'] Zero out / do not zero all allocations. 104 * Each new byte of memory allocated by malloc, realloc, or 105 * reallocf will be initialized to 0. This is intended for 106 * debugging and will affect performance negatively. 107 * 'H' / ['h'] Pass a hint to the kernel about pages unused by the 108 * allocation functions. 109 */ 110 111 /* cc -shared -fPIC -g -O -I/usr/src/lib/libc/include -o nmalloc.so nmalloc.c */ 112 113 #include "libc_private.h" 114 115 #include <sys/param.h> 116 #include <sys/types.h> 117 #include <sys/mman.h> 118 #include <sys/queue.h> 119 #include <sys/uio.h> 120 #include <sys/ktrace.h> 121 #include <stdio.h> 122 #include <stdint.h> 123 #include <stdlib.h> 124 #include <stdarg.h> 125 #include <stddef.h> 126 #include <unistd.h> 127 #include <string.h> 128 #include <fcntl.h> 129 #include <errno.h> 130 #include <pthread.h> 131 #include <machine/atomic.h> 132 133 #include "spinlock.h" 134 #include "un-namespace.h" 135 136 137 /* 138 * Linked list of large allocations 139 */ 140 typedef struct bigalloc { 141 struct bigalloc *next; /* hash link */ 142 void *base; /* base pointer */ 143 u_long active; /* bytes active */ 144 u_long bytes; /* bytes allocated */ 145 } *bigalloc_t; 146 147 /* 148 * Note that any allocations which are exact multiples of PAGE_SIZE, or 149 * which are >= ZALLOC_ZONE_LIMIT, will fall through to the kmem subsystem. 150 */ 151 #define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */ 152 #define ZALLOC_MIN_ZONE_SIZE (32 * 1024) /* minimum zone size */ 153 #define ZALLOC_MAX_ZONE_SIZE (128 * 1024) /* maximum zone size */ 154 #define ZALLOC_ZONE_SIZE (64 * 1024) 155 #define ZALLOC_SLAB_MAGIC 0x736c6162 /* magic sanity */ 156 #define ZALLOC_SLAB_SLIDE 20 /* L1-cache skip */ 157 158 #if ZALLOC_ZONE_LIMIT == 16384 159 #define NZONES 72 160 #elif ZALLOC_ZONE_LIMIT == 32768 161 #define NZONES 80 162 #else 163 #error "I couldn't figure out NZONES" 164 #endif 165 166 /* 167 * Chunk structure for free elements 168 */ 169 typedef struct slchunk { 170 struct slchunk *c_Next; 171 } *slchunk_t; 172 173 /* 174 * The IN-BAND zone header is placed at the beginning of each zone. 175 */ 176 struct slglobaldata; 177 178 typedef struct slzone { 179 int32_t z_Magic; /* magic number for sanity check */ 180 int z_NFree; /* total free chunks / ualloc space */ 181 struct slzone *z_Next; /* ZoneAry[] link if z_NFree non-zero */ 182 int z_NMax; /* maximum free chunks */ 183 char *z_BasePtr; /* pointer to start of chunk array */ 184 int z_UIndex; /* current initial allocation index */ 185 int z_UEndIndex; /* last (first) allocation index */ 186 int z_ChunkSize; /* chunk size for validation */ 187 int z_FirstFreePg; /* chunk list on a page-by-page basis */ 188 int z_ZoneIndex; 189 int z_Flags; 190 struct slchunk *z_PageAry[ZALLOC_ZONE_SIZE / PAGE_SIZE]; 191 #if defined(INVARIANTS) 192 __uint32_t z_Bitmap[]; /* bitmap of free chunks / sanity */ 193 #endif 194 } *slzone_t; 195 196 typedef struct slglobaldata { 197 spinlock_t Spinlock; 198 slzone_t ZoneAry[NZONES];/* linked list of zones NFree > 0 */ 199 int JunkIndex; 200 } *slglobaldata_t; 201 202 #define SLZF_UNOTZEROD 0x0001 203 204 #define FASTSLABREALLOC 0x02 205 206 /* 207 * Misc constants. Note that allocations that are exact multiples of 208 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 209 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists. 210 */ 211 #define MIN_CHUNK_SIZE 8 /* in bytes */ 212 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1) 213 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK) 214 215 /* 216 * The WEIRD_ADDR is used as known text to copy into free objects to 217 * try to create deterministic failure cases if the data is accessed after 218 * free. 219 * 220 * WARNING: A limited number of spinlocks are available, BIGXSIZE should 221 * not be larger then 64. 222 */ 223 #define WEIRD_ADDR 0xdeadc0de 224 #define MAX_COPY sizeof(weirdary) 225 #define ZERO_LENGTH_PTR ((void *)&malloc_dummy_pointer) 226 227 #define BIGHSHIFT 10 /* bigalloc hash table */ 228 #define BIGHSIZE (1 << BIGHSHIFT) 229 #define BIGHMASK (BIGHSIZE - 1) 230 #define BIGXSIZE (BIGHSIZE / 16) /* bigalloc lock table */ 231 #define BIGXMASK (BIGXSIZE - 1) 232 233 /* 234 * BIGCACHE caches oversized allocations. Note that a linear search is 235 * performed, so do not make the cache too large. 236 * 237 * BIGCACHE will garbage-collect excess space when the excess exceeds the 238 * specified value. A relatively large number should be used here because 239 * garbage collection is expensive. 240 */ 241 #define BIGCACHE 16 242 #define BIGCACHE_MASK (BIGCACHE - 1) 243 #define BIGCACHE_LIMIT (1024 * 1024) /* size limit */ 244 #define BIGCACHE_EXCESS (16 * 1024 * 1024) /* garbage collect */ 245 246 #define SAFLAG_ZERO 0x0001 247 #define SAFLAG_PASSIVE 0x0002 248 249 /* 250 * Thread control 251 */ 252 253 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0])) 254 255 #define MASSERT(exp) do { if (__predict_false(!(exp))) \ 256 _mpanic("assertion: %s in %s", \ 257 #exp, __func__); \ 258 } while (0) 259 260 /* 261 * Magazines 262 */ 263 264 #define M_MAX_ROUNDS 64 265 #define M_ZONE_ROUNDS 64 266 #define M_LOW_ROUNDS 32 267 #define M_INIT_ROUNDS 8 268 #define M_BURST_FACTOR 8 269 #define M_BURST_NSCALE 2 270 271 #define M_BURST 0x0001 272 #define M_BURST_EARLY 0x0002 273 274 struct magazine { 275 SLIST_ENTRY(magazine) nextmagazine; 276 277 int flags; 278 int capacity; /* Max rounds in this magazine */ 279 int rounds; /* Current number of free rounds */ 280 int burst_factor; /* Number of blocks to prefill with */ 281 int low_factor; /* Free till low_factor from full mag */ 282 void *objects[M_MAX_ROUNDS]; 283 }; 284 285 SLIST_HEAD(magazinelist, magazine); 286 287 static spinlock_t zone_mag_lock; 288 static struct magazine zone_magazine = { 289 .flags = M_BURST | M_BURST_EARLY, 290 .capacity = M_ZONE_ROUNDS, 291 .rounds = 0, 292 .burst_factor = M_BURST_FACTOR, 293 .low_factor = M_LOW_ROUNDS 294 }; 295 296 #define MAGAZINE_FULL(mp) (mp->rounds == mp->capacity) 297 #define MAGAZINE_NOTFULL(mp) (mp->rounds < mp->capacity) 298 #define MAGAZINE_EMPTY(mp) (mp->rounds == 0) 299 #define MAGAZINE_NOTEMPTY(mp) (mp->rounds != 0) 300 301 /* 302 * Each thread will have a pair of magazines per size-class (NZONES) 303 * The loaded magazine will support immediate allocations, the previous 304 * magazine will either be full or empty and can be swapped at need 305 */ 306 typedef struct magazine_pair { 307 struct magazine *loaded; 308 struct magazine *prev; 309 } magazine_pair; 310 311 /* A depot is a collection of magazines for a single zone. */ 312 typedef struct magazine_depot { 313 struct magazinelist full; 314 struct magazinelist empty; 315 spinlock_t lock; 316 } magazine_depot; 317 318 typedef struct thr_mags { 319 magazine_pair mags[NZONES]; 320 struct magazine *newmag; 321 int init; 322 } thr_mags; 323 324 /* 325 * With this attribute set, do not require a function call for accessing 326 * this variable when the code is compiled -fPIC. Empty for libc_rtld 327 * (like __thread). 328 */ 329 #ifdef __LIBC_RTLD 330 #define TLS_ATTRIBUTE 331 #else 332 #define TLS_ATTRIBUTE __attribute__ ((tls_model ("initial-exec"))) 333 #endif 334 335 static int mtmagazine_free_live; 336 static __thread thr_mags thread_mags TLS_ATTRIBUTE; 337 static pthread_key_t thread_mags_key; 338 static pthread_once_t thread_mags_once = PTHREAD_ONCE_INIT; 339 static magazine_depot depots[NZONES]; 340 341 /* 342 * Fixed globals (not per-cpu) 343 */ 344 static const int ZoneSize = ZALLOC_ZONE_SIZE; 345 static const int ZoneLimit = ZALLOC_ZONE_LIMIT; 346 static const int ZonePageCount = ZALLOC_ZONE_SIZE / PAGE_SIZE; 347 static const int ZoneMask = ZALLOC_ZONE_SIZE - 1; 348 349 static int opt_madvise = 0; 350 static int opt_utrace = 0; 351 static int g_malloc_flags = 0; 352 static struct slglobaldata SLGlobalData; 353 static bigalloc_t bigalloc_array[BIGHSIZE]; 354 static spinlock_t bigspin_array[BIGXSIZE]; 355 static volatile void *bigcache_array[BIGCACHE]; /* atomic swap */ 356 static volatile size_t bigcache_size_array[BIGCACHE]; /* SMP races ok */ 357 static volatile int bigcache_index; /* SMP races ok */ 358 static int malloc_panic; 359 static int malloc_dummy_pointer; 360 static size_t excess_alloc; /* excess big allocs */ 361 362 static const int32_t weirdary[16] = { 363 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, 364 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, 365 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, 366 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR 367 }; 368 369 static void *_slaballoc(size_t size, int flags); 370 static void *_slabrealloc(void *ptr, size_t size); 371 static void _slabfree(void *ptr, int, bigalloc_t *); 372 static void *_vmem_alloc(size_t bytes, size_t align, int flags); 373 static void _vmem_free(void *ptr, size_t bytes); 374 static void *magazine_alloc(struct magazine *, int *); 375 static int magazine_free(struct magazine *, void *); 376 static void *mtmagazine_alloc(int zi); 377 static int mtmagazine_free(int zi, void *); 378 static void mtmagazine_init(void); 379 static void mtmagazine_destructor(void *); 380 static slzone_t zone_alloc(int flags); 381 static void zone_free(void *z); 382 static void _mpanic(const char *ctl, ...) __printflike(1, 2); 383 static void malloc_init(void) __constructor(101); 384 #if defined(INVARIANTS) 385 static void chunk_mark_allocated(slzone_t z, void *chunk); 386 static void chunk_mark_free(slzone_t z, void *chunk); 387 #endif 388 389 struct nmalloc_utrace { 390 void *p; 391 size_t s; 392 void *r; 393 }; 394 395 #define UTRACE(a, b, c) \ 396 if (opt_utrace) { \ 397 struct nmalloc_utrace ut = { \ 398 .p = (a), \ 399 .s = (b), \ 400 .r = (c) \ 401 }; \ 402 utrace(&ut, sizeof(ut)); \ 403 } 404 405 #ifdef INVARIANTS 406 /* 407 * If enabled any memory allocated without M_ZERO is initialized to -1. 408 */ 409 static int use_malloc_pattern; 410 #endif 411 412 static void 413 malloc_init(void) 414 { 415 const char *p = NULL; 416 417 if (issetugid() == 0) 418 p = getenv("MALLOC_OPTIONS"); 419 420 for (; p != NULL && *p != '\0'; p++) { 421 switch(*p) { 422 case 'u': opt_utrace = 0; break; 423 case 'U': opt_utrace = 1; break; 424 case 'h': opt_madvise = 0; break; 425 case 'H': opt_madvise = 1; break; 426 case 'z': g_malloc_flags = 0; break; 427 case 'Z': g_malloc_flags = SAFLAG_ZERO; break; 428 default: 429 break; 430 } 431 } 432 433 UTRACE((void *) -1, 0, NULL); 434 } 435 436 /* 437 * We have to install a handler for nmalloc thread teardowns when 438 * the thread is created. We cannot delay this because destructors in 439 * sophisticated userland programs can call malloc() for the first time 440 * during their thread exit. 441 * 442 * This routine is called directly from pthreads. 443 */ 444 void 445 _nmalloc_thr_init(void) 446 { 447 thr_mags *tp; 448 449 /* 450 * Disallow mtmagazine operations until the mtmagazine is 451 * initialized. 452 */ 453 tp = &thread_mags; 454 tp->init = -1; 455 456 if (mtmagazine_free_live == 0) { 457 mtmagazine_free_live = 1; 458 pthread_once(&thread_mags_once, mtmagazine_init); 459 } 460 pthread_setspecific(thread_mags_key, tp); 461 tp->init = 1; 462 } 463 464 /* 465 * Thread locks. 466 */ 467 static __inline void 468 slgd_lock(slglobaldata_t slgd) 469 { 470 if (__isthreaded) 471 _SPINLOCK(&slgd->Spinlock); 472 } 473 474 static __inline void 475 slgd_unlock(slglobaldata_t slgd) 476 { 477 if (__isthreaded) 478 _SPINUNLOCK(&slgd->Spinlock); 479 } 480 481 static __inline void 482 depot_lock(magazine_depot *dp) 483 { 484 if (__isthreaded) 485 _SPINLOCK(&dp->lock); 486 } 487 488 static __inline void 489 depot_unlock(magazine_depot *dp) 490 { 491 if (__isthreaded) 492 _SPINUNLOCK(&dp->lock); 493 } 494 495 static __inline void 496 zone_magazine_lock(void) 497 { 498 if (__isthreaded) 499 _SPINLOCK(&zone_mag_lock); 500 } 501 502 static __inline void 503 zone_magazine_unlock(void) 504 { 505 if (__isthreaded) 506 _SPINUNLOCK(&zone_mag_lock); 507 } 508 509 static __inline void 510 swap_mags(magazine_pair *mp) 511 { 512 struct magazine *tmp; 513 tmp = mp->loaded; 514 mp->loaded = mp->prev; 515 mp->prev = tmp; 516 } 517 518 /* 519 * bigalloc hashing and locking support. 520 * 521 * Return an unmasked hash code for the passed pointer. 522 */ 523 static __inline int 524 _bigalloc_hash(void *ptr) 525 { 526 int hv; 527 528 hv = ((int)(intptr_t)ptr >> PAGE_SHIFT) ^ 529 ((int)(intptr_t)ptr >> (PAGE_SHIFT + BIGHSHIFT)); 530 531 return(hv); 532 } 533 534 /* 535 * Lock the hash chain and return a pointer to its base for the specified 536 * address. 537 */ 538 static __inline bigalloc_t * 539 bigalloc_lock(void *ptr) 540 { 541 int hv = _bigalloc_hash(ptr); 542 bigalloc_t *bigp; 543 544 bigp = &bigalloc_array[hv & BIGHMASK]; 545 if (__isthreaded) 546 _SPINLOCK(&bigspin_array[hv & BIGXMASK]); 547 return(bigp); 548 } 549 550 /* 551 * Lock the hash chain and return a pointer to its base for the specified 552 * address. 553 * 554 * BUT, if the hash chain is empty, just return NULL and do not bother 555 * to lock anything. 556 */ 557 static __inline bigalloc_t * 558 bigalloc_check_and_lock(void *ptr) 559 { 560 int hv = _bigalloc_hash(ptr); 561 bigalloc_t *bigp; 562 563 bigp = &bigalloc_array[hv & BIGHMASK]; 564 if (*bigp == NULL) 565 return(NULL); 566 if (__isthreaded) { 567 _SPINLOCK(&bigspin_array[hv & BIGXMASK]); 568 } 569 return(bigp); 570 } 571 572 static __inline void 573 bigalloc_unlock(void *ptr) 574 { 575 int hv; 576 577 if (__isthreaded) { 578 hv = _bigalloc_hash(ptr); 579 _SPINUNLOCK(&bigspin_array[hv & BIGXMASK]); 580 } 581 } 582 583 /* 584 * Find a bigcache entry that might work for the allocation. SMP races are 585 * ok here except for the swap (that is, it is ok if bigcache_size_array[i] 586 * is wrong or if a NULL or too-small big is returned). 587 * 588 * Generally speaking it is ok to find a large entry even if the bytes 589 * requested are relatively small (but still oversized), because we really 590 * don't know *what* the application is going to do with the buffer. 591 */ 592 static __inline 593 bigalloc_t 594 bigcache_find_alloc(size_t bytes) 595 { 596 bigalloc_t big = NULL; 597 size_t test; 598 int i; 599 600 for (i = 0; i < BIGCACHE; ++i) { 601 test = bigcache_size_array[i]; 602 if (bytes <= test) { 603 bigcache_size_array[i] = 0; 604 big = atomic_swap_ptr(&bigcache_array[i], NULL); 605 break; 606 } 607 } 608 return big; 609 } 610 611 /* 612 * Free a bigcache entry, possibly returning one that the caller really must 613 * free. This is used to cache recent oversized memory blocks. Only 614 * big blocks smaller than BIGCACHE_LIMIT will be cached this way, so try 615 * to collect the biggest ones we can that are under the limit. 616 */ 617 static __inline 618 bigalloc_t 619 bigcache_find_free(bigalloc_t big) 620 { 621 int i; 622 int j; 623 int b; 624 625 b = ++bigcache_index; 626 for (i = 0; i < BIGCACHE; ++i) { 627 j = (b + i) & BIGCACHE_MASK; 628 if (bigcache_size_array[j] < big->bytes) { 629 bigcache_size_array[j] = big->bytes; 630 big = atomic_swap_ptr(&bigcache_array[j], big); 631 break; 632 } 633 } 634 return big; 635 } 636 637 static __inline 638 void 639 handle_excess_big(void) 640 { 641 int i; 642 bigalloc_t big; 643 bigalloc_t *bigp; 644 645 if (excess_alloc <= BIGCACHE_EXCESS) 646 return; 647 648 for (i = 0; i < BIGHSIZE; ++i) { 649 bigp = &bigalloc_array[i]; 650 if (*bigp == NULL) 651 continue; 652 if (__isthreaded) 653 _SPINLOCK(&bigspin_array[i & BIGXMASK]); 654 for (big = *bigp; big; big = big->next) { 655 if (big->active < big->bytes) { 656 MASSERT((big->active & PAGE_MASK) == 0); 657 MASSERT((big->bytes & PAGE_MASK) == 0); 658 munmap((char *)big->base + big->active, 659 big->bytes - big->active); 660 atomic_add_long(&excess_alloc, 661 big->active - big->bytes); 662 big->bytes = big->active; 663 } 664 } 665 if (__isthreaded) 666 _SPINUNLOCK(&bigspin_array[i & BIGXMASK]); 667 } 668 } 669 670 /* 671 * Calculate the zone index for the allocation request size and set the 672 * allocation request size to that particular zone's chunk size. 673 */ 674 static __inline int 675 zoneindex(size_t *bytes, size_t *chunking) 676 { 677 size_t n = (unsigned int)*bytes; /* unsigned for shift opt */ 678 if (n < 128) { 679 *bytes = n = (n + 7) & ~7; 680 *chunking = 8; 681 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 682 } 683 if (n < 256) { 684 *bytes = n = (n + 15) & ~15; 685 *chunking = 16; 686 return(n / 16 + 7); 687 } 688 if (n < 8192) { 689 if (n < 512) { 690 *bytes = n = (n + 31) & ~31; 691 *chunking = 32; 692 return(n / 32 + 15); 693 } 694 if (n < 1024) { 695 *bytes = n = (n + 63) & ~63; 696 *chunking = 64; 697 return(n / 64 + 23); 698 } 699 if (n < 2048) { 700 *bytes = n = (n + 127) & ~127; 701 *chunking = 128; 702 return(n / 128 + 31); 703 } 704 if (n < 4096) { 705 *bytes = n = (n + 255) & ~255; 706 *chunking = 256; 707 return(n / 256 + 39); 708 } 709 *bytes = n = (n + 511) & ~511; 710 *chunking = 512; 711 return(n / 512 + 47); 712 } 713 #if ZALLOC_ZONE_LIMIT > 8192 714 if (n < 16384) { 715 *bytes = n = (n + 1023) & ~1023; 716 *chunking = 1024; 717 return(n / 1024 + 55); 718 } 719 #endif 720 #if ZALLOC_ZONE_LIMIT > 16384 721 if (n < 32768) { 722 *bytes = n = (n + 2047) & ~2047; 723 *chunking = 2048; 724 return(n / 2048 + 63); 725 } 726 #endif 727 _mpanic("Unexpected byte count %zu", n); 728 return(0); 729 } 730 731 /* 732 * malloc() - call internal slab allocator 733 */ 734 void * 735 malloc(size_t size) 736 { 737 void *ptr; 738 739 ptr = _slaballoc(size, 0); 740 if (ptr == NULL) 741 errno = ENOMEM; 742 else 743 UTRACE(0, size, ptr); 744 return(ptr); 745 } 746 747 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4)) 748 749 /* 750 * calloc() - call internal slab allocator 751 */ 752 void * 753 calloc(size_t number, size_t size) 754 { 755 void *ptr; 756 757 if ((number >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 758 number > 0 && SIZE_MAX / number < size) { 759 errno = ENOMEM; 760 return(NULL); 761 } 762 763 ptr = _slaballoc(number * size, SAFLAG_ZERO); 764 if (ptr == NULL) 765 errno = ENOMEM; 766 else 767 UTRACE(0, number * size, ptr); 768 return(ptr); 769 } 770 771 /* 772 * realloc() (SLAB ALLOCATOR) 773 * 774 * We do not attempt to optimize this routine beyond reusing the same 775 * pointer if the new size fits within the chunking of the old pointer's 776 * zone. 777 */ 778 void * 779 realloc(void *ptr, size_t size) 780 { 781 void *ret; 782 ret = _slabrealloc(ptr, size); 783 if (ret == NULL) 784 errno = ENOMEM; 785 else 786 UTRACE(ptr, size, ret); 787 return(ret); 788 } 789 790 /* 791 * posix_memalign() 792 * 793 * Allocate (size) bytes with a alignment of (alignment), where (alignment) 794 * is a power of 2 >= sizeof(void *). 795 * 796 * The slab allocator will allocate on power-of-2 boundaries up to 797 * at least PAGE_SIZE. We use the zoneindex mechanic to find a 798 * zone matching the requirements, and _vmem_alloc() otherwise. 799 */ 800 int 801 posix_memalign(void **memptr, size_t alignment, size_t size) 802 { 803 bigalloc_t *bigp; 804 bigalloc_t big; 805 size_t chunking; 806 int zi __unused; 807 808 /* 809 * OpenGroup spec issue 6 checks 810 */ 811 if ((alignment | (alignment - 1)) + 1 != (alignment << 1)) { 812 *memptr = NULL; 813 return(EINVAL); 814 } 815 if (alignment < sizeof(void *)) { 816 *memptr = NULL; 817 return(EINVAL); 818 } 819 820 /* 821 * Our zone mechanism guarantees same-sized alignment for any 822 * power-of-2 allocation. If size is a power-of-2 and reasonable 823 * we can just call _slaballoc() and be done. We round size up 824 * to the nearest alignment boundary to improve our odds of 825 * it becoming a power-of-2 if it wasn't before. 826 */ 827 if (size <= alignment) 828 size = alignment; 829 else 830 size = (size + alignment - 1) & ~(size_t)(alignment - 1); 831 if (size < PAGE_SIZE && (size | (size - 1)) + 1 == (size << 1)) { 832 *memptr = _slaballoc(size, 0); 833 return(*memptr ? 0 : ENOMEM); 834 } 835 836 /* 837 * Otherwise locate a zone with a chunking that matches 838 * the requested alignment, within reason. Consider two cases: 839 * 840 * (1) A 1K allocation on a 32-byte alignment. The first zoneindex 841 * we find will be the best fit because the chunking will be 842 * greater or equal to the alignment. 843 * 844 * (2) A 513 allocation on a 256-byte alignment. In this case 845 * the first zoneindex we find will be for 576 byte allocations 846 * with a chunking of 64, which is not sufficient. To fix this 847 * we simply find the nearest power-of-2 >= size and use the 848 * same side-effect of _slaballoc() which guarantees 849 * same-alignment on a power-of-2 allocation. 850 */ 851 if (size < PAGE_SIZE) { 852 zi = zoneindex(&size, &chunking); 853 if (chunking >= alignment) { 854 *memptr = _slaballoc(size, 0); 855 return(*memptr ? 0 : ENOMEM); 856 } 857 if (size >= 1024) 858 alignment = 1024; 859 if (size >= 16384) 860 alignment = 16384; 861 while (alignment < size) 862 alignment <<= 1; 863 *memptr = _slaballoc(alignment, 0); 864 return(*memptr ? 0 : ENOMEM); 865 } 866 867 /* 868 * If the slab allocator cannot handle it use vmem_alloc(). 869 * 870 * Alignment must be adjusted up to at least PAGE_SIZE in this case. 871 */ 872 if (alignment < PAGE_SIZE) 873 alignment = PAGE_SIZE; 874 if (size < alignment) 875 size = alignment; 876 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK; 877 *memptr = _vmem_alloc(size, alignment, 0); 878 if (*memptr == NULL) 879 return(ENOMEM); 880 881 big = _slaballoc(sizeof(struct bigalloc), 0); 882 if (big == NULL) { 883 _vmem_free(*memptr, size); 884 *memptr = NULL; 885 return(ENOMEM); 886 } 887 bigp = bigalloc_lock(*memptr); 888 big->base = *memptr; 889 big->active = size; 890 big->bytes = size; /* no excess */ 891 big->next = *bigp; 892 *bigp = big; 893 bigalloc_unlock(*memptr); 894 895 return(0); 896 } 897 898 /* 899 * free() (SLAB ALLOCATOR) - do the obvious 900 */ 901 void 902 free(void *ptr) 903 { 904 UTRACE(ptr, 0, 0); 905 _slabfree(ptr, 0, NULL); 906 } 907 908 /* 909 * _slaballoc() (SLAB ALLOCATOR) 910 * 911 * Allocate memory via the slab allocator. If the request is too large, 912 * or if it page-aligned beyond a certain size, we fall back to the 913 * KMEM subsystem 914 */ 915 static void * 916 _slaballoc(size_t size, int flags) 917 { 918 slzone_t z; 919 slchunk_t chunk; 920 slglobaldata_t slgd; 921 size_t chunking; 922 int zi; 923 #ifdef INVARIANTS 924 int i; 925 #endif 926 int off; 927 void *obj; 928 929 /* 930 * Handle the degenerate size == 0 case. Yes, this does happen. 931 * Return a special pointer. This is to maintain compatibility with 932 * the original malloc implementation. Certain devices, such as the 933 * adaptec driver, not only allocate 0 bytes, they check for NULL and 934 * also realloc() later on. Joy. 935 */ 936 if (size == 0) 937 return(ZERO_LENGTH_PTR); 938 939 /* Capture global flags */ 940 flags |= g_malloc_flags; 941 942 /* 943 * Handle large allocations directly. There should not be very many 944 * of these so performance is not a big issue. 945 * 946 * The backend allocator is pretty nasty on a SMP system. Use the 947 * slab allocator for one and two page-sized chunks even though we 948 * lose some efficiency. 949 */ 950 if (size >= ZoneLimit || 951 ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 952 bigalloc_t big; 953 bigalloc_t *bigp; 954 955 /* 956 * Page-align and cache-color in case of virtually indexed 957 * physically tagged L1 caches (aka SandyBridge). No sweat 958 * otherwise, so just do it. 959 * 960 * (don't count as excess). 961 */ 962 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK; 963 if ((size & (PAGE_SIZE * 2 - 1)) == 0) 964 size += PAGE_SIZE; 965 966 /* 967 * Try to reuse a cached big block to avoid mmap'ing. If it 968 * turns out not to fit our requirements we throw it away 969 * and allocate normally. 970 */ 971 big = NULL; 972 if (size <= BIGCACHE_LIMIT) { 973 big = bigcache_find_alloc(size); 974 if (big && big->bytes < size) { 975 _slabfree(big->base, FASTSLABREALLOC, &big); 976 big = NULL; 977 } 978 } 979 if (big) { 980 chunk = big->base; 981 if (flags & SAFLAG_ZERO) 982 bzero(chunk, size); 983 } else { 984 chunk = _vmem_alloc(size, PAGE_SIZE, flags); 985 if (chunk == NULL) 986 return(NULL); 987 988 big = _slaballoc(sizeof(struct bigalloc), 0); 989 if (big == NULL) { 990 _vmem_free(chunk, size); 991 return(NULL); 992 } 993 big->base = chunk; 994 big->bytes = size; 995 } 996 big->active = size; 997 998 bigp = bigalloc_lock(chunk); 999 if (big->active < big->bytes) { 1000 atomic_add_long(&excess_alloc, 1001 big->bytes - big->active); 1002 } 1003 big->next = *bigp; 1004 *bigp = big; 1005 bigalloc_unlock(chunk); 1006 handle_excess_big(); 1007 1008 return(chunk); 1009 } 1010 1011 /* Compute allocation zone; zoneindex will panic on excessive sizes */ 1012 zi = zoneindex(&size, &chunking); 1013 MASSERT(zi < NZONES); 1014 1015 obj = mtmagazine_alloc(zi); 1016 if (obj != NULL) { 1017 if (flags & SAFLAG_ZERO) 1018 bzero(obj, size); 1019 return (obj); 1020 } 1021 1022 slgd = &SLGlobalData; 1023 slgd_lock(slgd); 1024 1025 /* 1026 * Attempt to allocate out of an existing zone. If all zones are 1027 * exhausted pull one off the free list or allocate a new one. 1028 */ 1029 if ((z = slgd->ZoneAry[zi]) == NULL) { 1030 z = zone_alloc(flags); 1031 if (z == NULL) 1032 goto fail; 1033 1034 /* 1035 * How big is the base structure? 1036 */ 1037 #if defined(INVARIANTS) 1038 /* 1039 * Make room for z_Bitmap. An exact calculation is 1040 * somewhat more complicated so don't make an exact 1041 * calculation. 1042 */ 1043 off = offsetof(struct slzone, 1044 z_Bitmap[(ZoneSize / size + 31) / 32]); 1045 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); 1046 #else 1047 off = sizeof(struct slzone); 1048 #endif 1049 1050 /* 1051 * Align the storage in the zone based on the chunking. 1052 * 1053 * Guarantee power-of-2 alignment for power-of-2-sized 1054 * chunks. Otherwise align based on the chunking size 1055 * (typically 8 or 16 bytes for small allocations). 1056 * 1057 * NOTE: Allocations >= ZoneLimit are governed by the 1058 * bigalloc code and typically only guarantee page-alignment. 1059 * 1060 * Set initial conditions for UIndex near the zone header 1061 * to reduce unecessary page faults, vs semi-randomization 1062 * to improve L1 cache saturation. 1063 */ 1064 if ((size | (size - 1)) + 1 == (size << 1)) 1065 off = roundup2(off, size); 1066 else 1067 off = roundup2(off, chunking); 1068 z->z_Magic = ZALLOC_SLAB_MAGIC; 1069 z->z_ZoneIndex = zi; 1070 z->z_NMax = (ZoneSize - off) / size; 1071 z->z_NFree = z->z_NMax; 1072 z->z_BasePtr = (char *)z + off; 1073 z->z_UIndex = z->z_UEndIndex = 0; 1074 z->z_ChunkSize = size; 1075 z->z_FirstFreePg = ZonePageCount; 1076 z->z_Next = slgd->ZoneAry[zi]; 1077 slgd->ZoneAry[zi] = z; 1078 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 1079 flags &= ~SAFLAG_ZERO; /* already zero'd */ 1080 flags |= SAFLAG_PASSIVE; 1081 } 1082 1083 /* 1084 * Slide the base index for initial allocations out of the 1085 * next zone we create so we do not over-weight the lower 1086 * part of the cpu memory caches. 1087 */ 1088 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 1089 & (ZALLOC_MAX_ZONE_SIZE - 1); 1090 } 1091 1092 /* 1093 * Ok, we have a zone from which at least one chunk is available. 1094 * 1095 * Remove us from the ZoneAry[] when we become empty 1096 */ 1097 MASSERT(z->z_NFree > 0); 1098 1099 if (--z->z_NFree == 0) { 1100 slgd->ZoneAry[zi] = z->z_Next; 1101 z->z_Next = NULL; 1102 } 1103 1104 /* 1105 * Locate a chunk in a free page. This attempts to localize 1106 * reallocations into earlier pages without us having to sort 1107 * the chunk list. A chunk may still overlap a page boundary. 1108 */ 1109 while (z->z_FirstFreePg < ZonePageCount) { 1110 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) { 1111 #ifdef DIAGNOSTIC 1112 /* 1113 * Diagnostic: c_Next is not total garbage. 1114 */ 1115 MASSERT(chunk->c_Next == NULL || 1116 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) == 1117 ((intptr_t)chunk & IN_SAME_PAGE_MASK)); 1118 #endif 1119 #ifdef INVARIANTS 1120 chunk_mark_allocated(z, chunk); 1121 #endif 1122 MASSERT((uintptr_t)chunk & ZoneMask); 1123 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next; 1124 goto done; 1125 } 1126 ++z->z_FirstFreePg; 1127 } 1128 1129 /* 1130 * No chunks are available but NFree said we had some memory, 1131 * so it must be available in the never-before-used-memory 1132 * area governed by UIndex. The consequences are very 1133 * serious if our zone got corrupted so we use an explicit 1134 * panic rather then a KASSERT. 1135 */ 1136 chunk = (slchunk_t)(z->z_BasePtr + z->z_UIndex * size); 1137 1138 if (++z->z_UIndex == z->z_NMax) 1139 z->z_UIndex = 0; 1140 if (z->z_UIndex == z->z_UEndIndex) { 1141 if (z->z_NFree != 0) 1142 _mpanic("slaballoc: corrupted zone"); 1143 } 1144 1145 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 1146 flags &= ~SAFLAG_ZERO; 1147 flags |= SAFLAG_PASSIVE; 1148 } 1149 #if defined(INVARIANTS) 1150 chunk_mark_allocated(z, chunk); 1151 #endif 1152 1153 done: 1154 slgd_unlock(slgd); 1155 if (flags & SAFLAG_ZERO) { 1156 bzero(chunk, size); 1157 #ifdef INVARIANTS 1158 } else if ((flags & (SAFLAG_ZERO|SAFLAG_PASSIVE)) == 0) { 1159 if (use_malloc_pattern) { 1160 for (i = 0; i < size; i += sizeof(int)) { 1161 *(int *)((char *)chunk + i) = -1; 1162 } 1163 } 1164 /* avoid accidental double-free check */ 1165 chunk->c_Next = (void *)-1; 1166 #endif 1167 } 1168 return(chunk); 1169 fail: 1170 slgd_unlock(slgd); 1171 return(NULL); 1172 } 1173 1174 /* 1175 * Reallocate memory within the chunk 1176 */ 1177 static void * 1178 _slabrealloc(void *ptr, size_t size) 1179 { 1180 bigalloc_t *bigp; 1181 void *nptr; 1182 slzone_t z; 1183 size_t chunking; 1184 1185 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) { 1186 return(_slaballoc(size, 0)); 1187 } 1188 1189 if (size == 0) { 1190 free(ptr); 1191 return(ZERO_LENGTH_PTR); 1192 } 1193 1194 /* 1195 * Handle oversized allocations. 1196 */ 1197 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) { 1198 bigalloc_t big; 1199 size_t bigbytes; 1200 1201 while ((big = *bigp) != NULL) { 1202 if (big->base == ptr) { 1203 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK; 1204 bigbytes = big->bytes; 1205 1206 /* 1207 * If it already fits determine if it makes 1208 * sense to shrink/reallocate. Try to optimize 1209 * programs which stupidly make incremental 1210 * reallocations larger or smaller by scaling 1211 * the allocation. Also deal with potential 1212 * coloring. 1213 */ 1214 if (size >= (bigbytes >> 1) && 1215 size <= bigbytes) { 1216 if (big->active != size) { 1217 atomic_add_long(&excess_alloc, 1218 big->active - 1219 size); 1220 } 1221 big->active = size; 1222 bigalloc_unlock(ptr); 1223 return(ptr); 1224 } 1225 1226 /* 1227 * For large reallocations, allocate more space 1228 * than we need to try to avoid excessive 1229 * reallocations later on. 1230 */ 1231 chunking = size + (size >> 3); 1232 chunking = (chunking + PAGE_MASK) & 1233 ~(size_t)PAGE_MASK; 1234 1235 /* 1236 * Try to allocate adjacently in case the 1237 * program is idiotically realloc()ing a 1238 * huge memory block just slightly bigger. 1239 * (llvm's llc tends to do this a lot). 1240 * 1241 * (MAP_TRYFIXED forces mmap to fail if there 1242 * is already something at the address). 1243 */ 1244 if (chunking > bigbytes) { 1245 char *addr; 1246 int errno_save = errno; 1247 1248 addr = mmap((char *)ptr + bigbytes, 1249 chunking - bigbytes, 1250 PROT_READ|PROT_WRITE, 1251 MAP_PRIVATE|MAP_ANON| 1252 MAP_TRYFIXED, 1253 -1, 0); 1254 errno = errno_save; 1255 if (addr == (char *)ptr + bigbytes) { 1256 atomic_add_long(&excess_alloc, 1257 big->active - 1258 big->bytes + 1259 chunking - 1260 size); 1261 big->bytes = chunking; 1262 big->active = size; 1263 bigalloc_unlock(ptr); 1264 1265 return(ptr); 1266 } 1267 MASSERT((void *)addr == MAP_FAILED); 1268 } 1269 1270 /* 1271 * Failed, unlink big and allocate fresh. 1272 * (note that we have to leave (big) intact 1273 * in case the slaballoc fails). 1274 */ 1275 *bigp = big->next; 1276 bigalloc_unlock(ptr); 1277 if ((nptr = _slaballoc(size, 0)) == NULL) { 1278 /* Relink block */ 1279 bigp = bigalloc_lock(ptr); 1280 big->next = *bigp; 1281 *bigp = big; 1282 bigalloc_unlock(ptr); 1283 return(NULL); 1284 } 1285 if (size > bigbytes) 1286 size = bigbytes; 1287 bcopy(ptr, nptr, size); 1288 atomic_add_long(&excess_alloc, big->active - 1289 big->bytes); 1290 _slabfree(ptr, FASTSLABREALLOC, &big); 1291 1292 return(nptr); 1293 } 1294 bigp = &big->next; 1295 } 1296 bigalloc_unlock(ptr); 1297 handle_excess_big(); 1298 } 1299 1300 /* 1301 * Get the original allocation's zone. If the new request winds 1302 * up using the same chunk size we do not have to do anything. 1303 * 1304 * NOTE: We don't have to lock the globaldata here, the fields we 1305 * access here will not change at least as long as we have control 1306 * over the allocation. 1307 */ 1308 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 1309 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1310 1311 /* 1312 * Use zoneindex() to chunk-align the new size, as long as the 1313 * new size is not too large. 1314 */ 1315 if (size < ZoneLimit) { 1316 zoneindex(&size, &chunking); 1317 if (z->z_ChunkSize == size) { 1318 return(ptr); 1319 } 1320 } 1321 1322 /* 1323 * Allocate memory for the new request size and copy as appropriate. 1324 */ 1325 if ((nptr = _slaballoc(size, 0)) != NULL) { 1326 if (size > z->z_ChunkSize) 1327 size = z->z_ChunkSize; 1328 bcopy(ptr, nptr, size); 1329 _slabfree(ptr, 0, NULL); 1330 } 1331 1332 return(nptr); 1333 } 1334 1335 /* 1336 * free (SLAB ALLOCATOR) 1337 * 1338 * Free a memory block previously allocated by malloc. Note that we do not 1339 * attempt to uplodate ks_loosememuse as MP races could prevent us from 1340 * checking memory limits in malloc. 1341 * 1342 * flags: 1343 * FASTSLABREALLOC Fast call from realloc, *rbigp already 1344 * unlinked. 1345 * 1346 * MPSAFE 1347 */ 1348 static void 1349 _slabfree(void *ptr, int flags, bigalloc_t *rbigp) 1350 { 1351 slzone_t z; 1352 slchunk_t chunk; 1353 bigalloc_t big; 1354 bigalloc_t *bigp; 1355 slglobaldata_t slgd; 1356 size_t size; 1357 int zi; 1358 int pgno; 1359 1360 /* Fast realloc path for big allocations */ 1361 if (flags & FASTSLABREALLOC) { 1362 big = *rbigp; 1363 goto fastslabrealloc; 1364 } 1365 1366 /* 1367 * Handle NULL frees and special 0-byte allocations 1368 */ 1369 if (ptr == NULL) 1370 return; 1371 if (ptr == ZERO_LENGTH_PTR) 1372 return; 1373 1374 /* 1375 * Handle oversized allocations. 1376 */ 1377 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) { 1378 while ((big = *bigp) != NULL) { 1379 if (big->base == ptr) { 1380 *bigp = big->next; 1381 atomic_add_long(&excess_alloc, big->active - 1382 big->bytes); 1383 bigalloc_unlock(ptr); 1384 1385 /* 1386 * Try to stash the block we are freeing, 1387 * potentially receiving another block in 1388 * return which must be freed. 1389 */ 1390 fastslabrealloc: 1391 if (big->bytes <= BIGCACHE_LIMIT) { 1392 big = bigcache_find_free(big); 1393 if (big == NULL) 1394 return; 1395 } 1396 ptr = big->base; /* reload */ 1397 size = big->bytes; 1398 _slabfree(big, 0, NULL); 1399 #ifdef INVARIANTS 1400 MASSERT(sizeof(weirdary) <= size); 1401 bcopy(weirdary, ptr, sizeof(weirdary)); 1402 #endif 1403 _vmem_free(ptr, size); 1404 return; 1405 } 1406 bigp = &big->next; 1407 } 1408 bigalloc_unlock(ptr); 1409 handle_excess_big(); 1410 } 1411 1412 /* 1413 * Zone case. Figure out the zone based on the fact that it is 1414 * ZoneSize aligned. 1415 */ 1416 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 1417 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1418 1419 size = z->z_ChunkSize; 1420 zi = z->z_ZoneIndex; 1421 1422 if (g_malloc_flags & SAFLAG_ZERO) 1423 bzero(ptr, size); 1424 1425 if (mtmagazine_free(zi, ptr) == 0) 1426 return; 1427 1428 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT; 1429 chunk = ptr; 1430 slgd = &SLGlobalData; 1431 slgd_lock(slgd); 1432 1433 #ifdef INVARIANTS 1434 /* 1435 * Attempt to detect a double-free. To reduce overhead we only check 1436 * if there appears to be link pointer at the base of the data. 1437 */ 1438 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) { 1439 slchunk_t scan; 1440 1441 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) { 1442 if (scan == chunk) 1443 _mpanic("Double free at %p", chunk); 1444 } 1445 } 1446 chunk_mark_free(z, chunk); 1447 #endif 1448 1449 /* 1450 * Put weird data into the memory to detect modifications after 1451 * freeing, illegal pointer use after freeing (we should fault on 1452 * the odd address), and so forth. 1453 */ 1454 #ifdef INVARIANTS 1455 if (z->z_ChunkSize < sizeof(weirdary)) 1456 bcopy(weirdary, chunk, z->z_ChunkSize); 1457 else 1458 bcopy(weirdary, chunk, sizeof(weirdary)); 1459 #endif 1460 1461 /* 1462 * Add this free non-zero'd chunk to a linked list for reuse, adjust 1463 * z_FirstFreePg. 1464 */ 1465 chunk->c_Next = z->z_PageAry[pgno]; 1466 z->z_PageAry[pgno] = chunk; 1467 if (z->z_FirstFreePg > pgno) 1468 z->z_FirstFreePg = pgno; 1469 1470 /* 1471 * Bump the number of free chunks. If it becomes non-zero the zone 1472 * must be added back onto the appropriate list. 1473 */ 1474 if (z->z_NFree++ == 0) { 1475 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 1476 slgd->ZoneAry[z->z_ZoneIndex] = z; 1477 } 1478 1479 /* 1480 * If the zone becomes totally free then release it. 1481 */ 1482 if (z->z_NFree == z->z_NMax) { 1483 slzone_t *pz; 1484 1485 pz = &slgd->ZoneAry[z->z_ZoneIndex]; 1486 while (z != *pz) 1487 pz = &(*pz)->z_Next; 1488 *pz = z->z_Next; 1489 z->z_Magic = -1; 1490 z->z_Next = NULL; 1491 zone_free(z); 1492 /* slgd lock released */ 1493 return; 1494 } 1495 slgd_unlock(slgd); 1496 } 1497 1498 #if defined(INVARIANTS) 1499 /* 1500 * Helper routines for sanity checks 1501 */ 1502 static 1503 void 1504 chunk_mark_allocated(slzone_t z, void *chunk) 1505 { 1506 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1507 __uint32_t *bitptr; 1508 1509 MASSERT(bitdex >= 0 && bitdex < z->z_NMax); 1510 bitptr = &z->z_Bitmap[bitdex >> 5]; 1511 bitdex &= 31; 1512 MASSERT((*bitptr & (1 << bitdex)) == 0); 1513 *bitptr |= 1 << bitdex; 1514 } 1515 1516 static 1517 void 1518 chunk_mark_free(slzone_t z, void *chunk) 1519 { 1520 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1521 __uint32_t *bitptr; 1522 1523 MASSERT(bitdex >= 0 && bitdex < z->z_NMax); 1524 bitptr = &z->z_Bitmap[bitdex >> 5]; 1525 bitdex &= 31; 1526 MASSERT((*bitptr & (1 << bitdex)) != 0); 1527 *bitptr &= ~(1 << bitdex); 1528 } 1529 1530 #endif 1531 1532 /* 1533 * Allocate and return a magazine. NULL is returned and *burst is adjusted 1534 * if the magazine is empty. 1535 */ 1536 static __inline void * 1537 magazine_alloc(struct magazine *mp, int *burst) 1538 { 1539 void *obj; 1540 1541 if (mp == NULL) 1542 return(NULL); 1543 if (MAGAZINE_NOTEMPTY(mp)) { 1544 obj = mp->objects[--mp->rounds]; 1545 return(obj); 1546 } 1547 1548 /* 1549 * Return burst factor to caller along with NULL 1550 */ 1551 if ((mp->flags & M_BURST) && (burst != NULL)) { 1552 *burst = mp->burst_factor; 1553 } 1554 /* Reduce burst factor by NSCALE; if it hits 1, disable BURST */ 1555 if ((mp->flags & M_BURST) && (mp->flags & M_BURST_EARLY) && 1556 (burst != NULL)) { 1557 mp->burst_factor -= M_BURST_NSCALE; 1558 if (mp->burst_factor <= 1) { 1559 mp->burst_factor = 1; 1560 mp->flags &= ~(M_BURST); 1561 mp->flags &= ~(M_BURST_EARLY); 1562 } 1563 } 1564 return (NULL); 1565 } 1566 1567 static __inline int 1568 magazine_free(struct magazine *mp, void *p) 1569 { 1570 if (mp != NULL && MAGAZINE_NOTFULL(mp)) { 1571 mp->objects[mp->rounds++] = p; 1572 return 0; 1573 } 1574 1575 return -1; 1576 } 1577 1578 static void * 1579 mtmagazine_alloc(int zi) 1580 { 1581 thr_mags *tp; 1582 struct magazine *mp, *emptymag; 1583 magazine_depot *d; 1584 void *obj; 1585 1586 /* 1587 * Do not try to access per-thread magazines while the mtmagazine 1588 * is being initialized or destroyed. 1589 */ 1590 tp = &thread_mags; 1591 if (tp->init < 0) 1592 return(NULL); 1593 1594 /* 1595 * Primary per-thread allocation loop 1596 */ 1597 for (;;) { 1598 /* 1599 * If the loaded magazine has rounds, allocate and return 1600 */ 1601 mp = tp->mags[zi].loaded; 1602 obj = magazine_alloc(mp, NULL); 1603 if (obj) 1604 break; 1605 1606 /* 1607 * If the prev magazine is full, swap with the loaded 1608 * magazine and retry. 1609 */ 1610 mp = tp->mags[zi].prev; 1611 if (mp && MAGAZINE_FULL(mp)) { 1612 MASSERT(mp->rounds != 0); 1613 swap_mags(&tp->mags[zi]); /* prev now empty */ 1614 continue; 1615 } 1616 1617 /* 1618 * Try to get a full magazine from the depot. Cycle 1619 * through depot(full)->loaded->prev->depot(empty). 1620 * Retry if a full magazine was available from the depot. 1621 * 1622 * Return NULL (caller will fall through) if no magazines 1623 * can be found anywhere. 1624 */ 1625 d = &depots[zi]; 1626 depot_lock(d); 1627 emptymag = tp->mags[zi].prev; 1628 if (emptymag) 1629 SLIST_INSERT_HEAD(&d->empty, emptymag, nextmagazine); 1630 tp->mags[zi].prev = tp->mags[zi].loaded; 1631 mp = SLIST_FIRST(&d->full); /* loaded magazine */ 1632 tp->mags[zi].loaded = mp; 1633 if (mp) { 1634 SLIST_REMOVE_HEAD(&d->full, nextmagazine); 1635 MASSERT(MAGAZINE_NOTEMPTY(mp)); 1636 depot_unlock(d); 1637 continue; 1638 } 1639 depot_unlock(d); 1640 break; 1641 } 1642 1643 return (obj); 1644 } 1645 1646 static int 1647 mtmagazine_free(int zi, void *ptr) 1648 { 1649 thr_mags *tp; 1650 struct magazine *mp, *loadedmag; 1651 magazine_depot *d; 1652 int rc = -1; 1653 1654 /* 1655 * Do not try to access per-thread magazines while the mtmagazine 1656 * is being initialized or destroyed. 1657 */ 1658 tp = &thread_mags; 1659 if (tp->init < 0) 1660 return(-1); 1661 1662 /* 1663 * Primary per-thread freeing loop 1664 */ 1665 for (;;) { 1666 /* 1667 * Make sure a new magazine is available in case we have 1668 * to use it. Staging the newmag allows us to avoid 1669 * some locking/reentrancy complexity. 1670 * 1671 * Temporarily disable the per-thread caches for this 1672 * allocation to avoid reentrancy and/or to avoid a 1673 * stack overflow if the [zi] happens to be the same that 1674 * would be used to allocate the new magazine. 1675 */ 1676 if (tp->newmag == NULL) { 1677 tp->init = -1; 1678 tp->newmag = _slaballoc(sizeof(struct magazine), 1679 SAFLAG_ZERO); 1680 tp->init = 1; 1681 if (tp->newmag == NULL) { 1682 rc = -1; 1683 break; 1684 } 1685 } 1686 1687 /* 1688 * If the loaded magazine has space, free directly to it 1689 */ 1690 rc = magazine_free(tp->mags[zi].loaded, ptr); 1691 if (rc == 0) 1692 break; 1693 1694 /* 1695 * If the prev magazine is empty, swap with the loaded 1696 * magazine and retry. 1697 */ 1698 mp = tp->mags[zi].prev; 1699 if (mp && MAGAZINE_EMPTY(mp)) { 1700 MASSERT(mp->rounds == 0); 1701 swap_mags(&tp->mags[zi]); /* prev now full */ 1702 continue; 1703 } 1704 1705 /* 1706 * Try to get an empty magazine from the depot. Cycle 1707 * through depot(empty)->loaded->prev->depot(full). 1708 * Retry if an empty magazine was available from the depot. 1709 */ 1710 d = &depots[zi]; 1711 depot_lock(d); 1712 1713 if ((loadedmag = tp->mags[zi].prev) != NULL) 1714 SLIST_INSERT_HEAD(&d->full, loadedmag, nextmagazine); 1715 tp->mags[zi].prev = tp->mags[zi].loaded; 1716 mp = SLIST_FIRST(&d->empty); 1717 if (mp) { 1718 tp->mags[zi].loaded = mp; 1719 SLIST_REMOVE_HEAD(&d->empty, nextmagazine); 1720 MASSERT(MAGAZINE_NOTFULL(mp)); 1721 } else { 1722 mp = tp->newmag; 1723 tp->newmag = NULL; 1724 mp->capacity = M_MAX_ROUNDS; 1725 mp->rounds = 0; 1726 mp->flags = 0; 1727 tp->mags[zi].loaded = mp; 1728 } 1729 depot_unlock(d); 1730 } 1731 1732 return rc; 1733 } 1734 1735 static void 1736 mtmagazine_init(void) 1737 { 1738 int error; 1739 1740 error = pthread_key_create(&thread_mags_key, mtmagazine_destructor); 1741 if (error) 1742 abort(); 1743 } 1744 1745 /* 1746 * This function is only used by the thread exit destructor 1747 */ 1748 static void 1749 mtmagazine_drain(struct magazine *mp) 1750 { 1751 void *obj; 1752 1753 while (MAGAZINE_NOTEMPTY(mp)) { 1754 obj = magazine_alloc(mp, NULL); 1755 _slabfree(obj, 0, NULL); 1756 } 1757 } 1758 1759 /* 1760 * mtmagazine_destructor() 1761 * 1762 * When a thread exits, we reclaim all its resources; all its magazines are 1763 * drained and the structures are freed. 1764 * 1765 * WARNING! The destructor can be called multiple times if the larger user 1766 * program has its own destructors which run after ours which 1767 * allocate or free memory. 1768 */ 1769 static void 1770 mtmagazine_destructor(void *thrp) 1771 { 1772 thr_mags *tp = thrp; 1773 struct magazine *mp; 1774 int i; 1775 1776 /* 1777 * Prevent further use of mtmagazines while we are destructing 1778 * them, as well as for any destructors which are run after us 1779 * prior to the thread actually being destroyed. 1780 */ 1781 tp->init = -1; 1782 1783 for (i = 0; i < NZONES; i++) { 1784 mp = tp->mags[i].loaded; 1785 tp->mags[i].loaded = NULL; 1786 if (mp) { 1787 if (MAGAZINE_NOTEMPTY(mp)) 1788 mtmagazine_drain(mp); 1789 _slabfree(mp, 0, NULL); 1790 } 1791 1792 mp = tp->mags[i].prev; 1793 tp->mags[i].prev = NULL; 1794 if (mp) { 1795 if (MAGAZINE_NOTEMPTY(mp)) 1796 mtmagazine_drain(mp); 1797 _slabfree(mp, 0, NULL); 1798 } 1799 } 1800 1801 if (tp->newmag) { 1802 mp = tp->newmag; 1803 tp->newmag = NULL; 1804 _slabfree(mp, 0, NULL); 1805 } 1806 } 1807 1808 /* 1809 * zone_alloc() 1810 * 1811 * Attempt to allocate a zone from the zone magazine; the zone magazine has 1812 * M_BURST_EARLY enabled, so honor the burst request from the magazine. 1813 */ 1814 static slzone_t 1815 zone_alloc(int flags) 1816 { 1817 slglobaldata_t slgd = &SLGlobalData; 1818 int burst = 1; 1819 int i, j; 1820 slzone_t z; 1821 1822 zone_magazine_lock(); 1823 slgd_unlock(slgd); 1824 1825 z = magazine_alloc(&zone_magazine, &burst); 1826 if (z == NULL && burst == 1) { 1827 zone_magazine_unlock(); 1828 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags); 1829 } else if (z == NULL) { 1830 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags); 1831 if (z) { 1832 for (i = 1; i < burst; i++) { 1833 j = magazine_free(&zone_magazine, 1834 (char *) z + (ZoneSize * i)); 1835 MASSERT(j == 0); 1836 } 1837 } 1838 zone_magazine_unlock(); 1839 } else { 1840 z->z_Flags |= SLZF_UNOTZEROD; 1841 zone_magazine_unlock(); 1842 } 1843 slgd_lock(slgd); 1844 return z; 1845 } 1846 1847 /* 1848 * zone_free() 1849 * 1850 * Release a zone and unlock the slgd lock. 1851 */ 1852 static void 1853 zone_free(void *z) 1854 { 1855 slglobaldata_t slgd = &SLGlobalData; 1856 void *excess[M_ZONE_ROUNDS - M_LOW_ROUNDS] = {}; 1857 int i, j; 1858 1859 zone_magazine_lock(); 1860 slgd_unlock(slgd); 1861 1862 bzero(z, sizeof(struct slzone)); 1863 1864 if (opt_madvise) 1865 madvise(z, ZoneSize, MADV_FREE); 1866 1867 i = magazine_free(&zone_magazine, z); 1868 1869 /* 1870 * If we failed to free, collect excess magazines; release the zone 1871 * magazine lock, and then free to the system via _vmem_free. Re-enable 1872 * BURST mode for the magazine. 1873 */ 1874 if (i == -1) { 1875 j = zone_magazine.rounds - zone_magazine.low_factor; 1876 for (i = 0; i < j; i++) { 1877 excess[i] = magazine_alloc(&zone_magazine, NULL); 1878 MASSERT(excess[i] != NULL); 1879 } 1880 1881 zone_magazine_unlock(); 1882 1883 for (i = 0; i < j; i++) 1884 _vmem_free(excess[i], ZoneSize); 1885 1886 _vmem_free(z, ZoneSize); 1887 } else { 1888 zone_magazine_unlock(); 1889 } 1890 } 1891 1892 /* 1893 * _vmem_alloc() 1894 * 1895 * Directly map memory in PAGE_SIZE'd chunks with the specified 1896 * alignment. 1897 * 1898 * Alignment must be a multiple of PAGE_SIZE. 1899 * 1900 * Size must be >= alignment. 1901 */ 1902 static void * 1903 _vmem_alloc(size_t size, size_t align, int flags) 1904 { 1905 char *addr; 1906 char *save; 1907 size_t excess; 1908 1909 /* 1910 * Map anonymous private memory. 1911 */ 1912 addr = mmap(NULL, size, PROT_READ|PROT_WRITE, 1913 MAP_PRIVATE|MAP_ANON, -1, 0); 1914 if (addr == MAP_FAILED) 1915 return(NULL); 1916 1917 /* 1918 * Check alignment. The misaligned offset is also the excess 1919 * amount. If misaligned unmap the excess so we have a chance of 1920 * mapping at the next alignment point and recursively try again. 1921 * 1922 * BBBBBBBBBBB BBBBBBBBBBB BBBBBBBBBBB block alignment 1923 * aaaaaaaaa aaaaaaaaaaa aa mis-aligned allocation 1924 * xxxxxxxxx final excess calculation 1925 * ^ returned address 1926 */ 1927 excess = (uintptr_t)addr & (align - 1); 1928 1929 if (excess) { 1930 excess = align - excess; 1931 save = addr; 1932 1933 munmap(save + excess, size - excess); 1934 addr = _vmem_alloc(size, align, flags); 1935 munmap(save, excess); 1936 } 1937 return((void *)addr); 1938 } 1939 1940 /* 1941 * _vmem_free() 1942 * 1943 * Free a chunk of memory allocated with _vmem_alloc() 1944 */ 1945 static void 1946 _vmem_free(void *ptr, size_t size) 1947 { 1948 munmap(ptr, size); 1949 } 1950 1951 /* 1952 * Panic on fatal conditions 1953 */ 1954 static void 1955 _mpanic(const char *ctl, ...) 1956 { 1957 va_list va; 1958 1959 if (malloc_panic == 0) { 1960 malloc_panic = 1; 1961 va_start(va, ctl); 1962 vfprintf(stderr, ctl, va); 1963 fprintf(stderr, "\n"); 1964 fflush(stderr); 1965 va_end(va); 1966 } 1967 abort(); 1968 } 1969