1 /* 2 * NMALLOC.C - New Malloc (ported from kernel slab allocator) 3 * 4 * Copyright (c) 2003,2004,2009,2010 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> and by 8 * Venkatesh Srinivas <me@endeavour.zapto.org>. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * $Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 vsrinivas Exp $ 38 */ 39 /* 40 * This module implements a slab allocator drop-in replacement for the 41 * libc malloc(). 42 * 43 * A slab allocator reserves a ZONE for each chunk size, then lays the 44 * chunks out in an array within the zone. Allocation and deallocation 45 * is nearly instantaneous, and overhead losses are limited to a fixed 46 * worst-case amount. 47 * 48 * The slab allocator does not have to pre-initialize the list of 49 * free chunks for each zone, and the underlying VM will not be 50 * touched at all beyond the zone header until an actual allocation 51 * needs it. 52 * 53 * Slab management and locking is done on a per-zone basis. 54 * 55 * Alloc Size Chunking Number of zones 56 * 0-127 8 16 57 * 128-255 16 8 58 * 256-511 32 8 59 * 512-1023 64 8 60 * 1024-2047 128 8 61 * 2048-4095 256 8 62 * 4096-8191 512 8 63 * 8192-16383 1024 8 64 * 16384-32767 2048 8 65 * 66 * Allocations >= ZoneLimit (16K) go directly to mmap and a hash table 67 * is used to locate for free. One and Two-page allocations use the 68 * zone mechanic to avoid excessive mmap()/munmap() calls. 69 * 70 * API FEATURES AND SIDE EFFECTS 71 * 72 * + power-of-2 sized allocations up to a page will be power-of-2 aligned. 73 * Above that power-of-2 sized allocations are page-aligned. Non 74 * power-of-2 sized allocations are aligned the same as the chunk 75 * size for their zone. 76 * + malloc(0) returns a special non-NULL value 77 * + ability to allocate arbitrarily large chunks of memory 78 * + realloc will reuse the passed pointer if possible, within the 79 * limitations of the zone chunking. 80 * 81 * Multithreaded enhancements for small allocations introduced August 2010. 82 * These are in the spirit of 'libumem'. See: 83 * Bonwick, J.; Adams, J. (2001). "Magazines and Vmem: Extending the 84 * slab allocator to many CPUs and arbitrary resources". In Proc. 2001 85 * USENIX Technical Conference. USENIX Association. 86 * 87 * Oversized allocations employ the BIGCACHE mechanic whereby large 88 * allocations may be handed significantly larger buffers, allowing them 89 * to avoid mmap/munmap operations even through significant realloc()s. 90 * The excess space is only trimmed if too many large allocations have been 91 * given this treatment. 92 * 93 * TUNING 94 * 95 * The value of the environment variable MALLOC_OPTIONS is a character string 96 * containing various flags to tune nmalloc. 97 * 98 * 'U' / ['u'] Generate / do not generate utrace entries for ktrace(1) 99 * This will generate utrace events for all malloc, 100 * realloc, and free calls. There are tools (mtrplay) to 101 * replay and allocation pattern or to graph heap structure 102 * (mtrgraph) which can interpret these logs. 103 * 'Z' / ['z'] Zero out / do not zero all allocations. 104 * Each new byte of memory allocated by malloc, realloc, or 105 * reallocf will be initialized to 0. This is intended for 106 * debugging and will affect performance negatively. 107 * 'H' / ['h'] Pass a hint to the kernel about pages unused by the 108 * allocation functions. 109 */ 110 111 /* cc -shared -fPIC -g -O -I/usr/src/lib/libc/include -o nmalloc.so nmalloc.c */ 112 113 #include "libc_private.h" 114 115 #include <sys/param.h> 116 #include <sys/types.h> 117 #include <sys/mman.h> 118 #include <sys/queue.h> 119 #include <sys/uio.h> 120 #include <sys/ktrace.h> 121 #include <stdio.h> 122 #include <stdint.h> 123 #include <stdlib.h> 124 #include <stdarg.h> 125 #include <stddef.h> 126 #include <unistd.h> 127 #include <string.h> 128 #include <fcntl.h> 129 #include <errno.h> 130 #include <pthread.h> 131 #include <machine/atomic.h> 132 133 #include "spinlock.h" 134 #include "un-namespace.h" 135 136 137 void __free(void *); 138 void *__malloc(size_t); 139 void *__calloc(size_t, size_t); 140 void *__realloc(void *, size_t); 141 void *__aligned_alloc(size_t, size_t); 142 int __posix_memalign(void **, size_t, size_t); 143 144 /* 145 * Linked list of large allocations 146 */ 147 typedef struct bigalloc { 148 struct bigalloc *next; /* hash link */ 149 void *base; /* base pointer */ 150 u_long active; /* bytes active */ 151 u_long bytes; /* bytes allocated */ 152 } *bigalloc_t; 153 154 /* 155 * Note that any allocations which are exact multiples of PAGE_SIZE, or 156 * which are >= ZALLOC_ZONE_LIMIT, will fall through to the kmem subsystem. 157 */ 158 #define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */ 159 #define ZALLOC_MIN_ZONE_SIZE (32 * 1024) /* minimum zone size */ 160 #define ZALLOC_MAX_ZONE_SIZE (128 * 1024) /* maximum zone size */ 161 #define ZALLOC_ZONE_SIZE (64 * 1024) 162 #define ZALLOC_SLAB_MAGIC 0x736c6162 /* magic sanity */ 163 #define ZALLOC_SLAB_SLIDE 20 /* L1-cache skip */ 164 165 #if ZALLOC_ZONE_LIMIT == 16384 166 #define NZONES 72 167 #elif ZALLOC_ZONE_LIMIT == 32768 168 #define NZONES 80 169 #else 170 #error "I couldn't figure out NZONES" 171 #endif 172 173 /* 174 * Chunk structure for free elements 175 */ 176 typedef struct slchunk { 177 struct slchunk *c_Next; 178 } *slchunk_t; 179 180 /* 181 * The IN-BAND zone header is placed at the beginning of each zone. 182 */ 183 struct slglobaldata; 184 185 typedef struct slzone { 186 int32_t z_Magic; /* magic number for sanity check */ 187 int z_NFree; /* total free chunks / ualloc space */ 188 struct slzone *z_Next; /* ZoneAry[] link if z_NFree non-zero */ 189 int z_NMax; /* maximum free chunks */ 190 char *z_BasePtr; /* pointer to start of chunk array */ 191 int z_UIndex; /* current initial allocation index */ 192 int z_UEndIndex; /* last (first) allocation index */ 193 int z_ChunkSize; /* chunk size for validation */ 194 int z_FirstFreePg; /* chunk list on a page-by-page basis */ 195 int z_ZoneIndex; 196 int z_Flags; 197 struct slchunk *z_PageAry[ZALLOC_ZONE_SIZE / PAGE_SIZE]; 198 } *slzone_t; 199 200 typedef struct slglobaldata { 201 spinlock_t Spinlock; 202 slzone_t ZoneAry[NZONES];/* linked list of zones NFree > 0 */ 203 int JunkIndex; 204 } *slglobaldata_t; 205 206 #define SLZF_UNOTZEROD 0x0001 207 208 #define FASTSLABREALLOC 0x02 209 210 /* 211 * Misc constants. Note that allocations that are exact multiples of 212 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 213 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists. 214 */ 215 #define MIN_CHUNK_SIZE 8 /* in bytes */ 216 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1) 217 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK) 218 219 /* 220 * WARNING: A limited number of spinlocks are available, BIGXSIZE should 221 * not be larger then 64. 222 */ 223 #define BIGHSHIFT 10 /* bigalloc hash table */ 224 #define BIGHSIZE (1 << BIGHSHIFT) 225 #define BIGHMASK (BIGHSIZE - 1) 226 #define BIGXSIZE (BIGHSIZE / 16) /* bigalloc lock table */ 227 #define BIGXMASK (BIGXSIZE - 1) 228 229 /* 230 * BIGCACHE caches oversized allocations. Note that a linear search is 231 * performed, so do not make the cache too large. 232 * 233 * BIGCACHE will garbage-collect excess space when the excess exceeds the 234 * specified value. A relatively large number should be used here because 235 * garbage collection is expensive. 236 */ 237 #define BIGCACHE 16 238 #define BIGCACHE_MASK (BIGCACHE - 1) 239 #define BIGCACHE_LIMIT (1024 * 1024) /* size limit */ 240 #define BIGCACHE_EXCESS (16 * 1024 * 1024) /* garbage collect */ 241 242 #define SAFLAG_ZERO 0x0001 243 #define SAFLAG_PASSIVE 0x0002 244 245 /* 246 * Thread control 247 */ 248 249 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0])) 250 251 #define MASSERT(exp) do { if (__predict_false(!(exp))) \ 252 _mpanic("assertion: %s in %s", \ 253 #exp, __func__); \ 254 } while (0) 255 256 /* 257 * Magazines 258 */ 259 260 #define M_MAX_ROUNDS 64 261 #define M_ZONE_ROUNDS 64 262 #define M_LOW_ROUNDS 32 263 #define M_INIT_ROUNDS 8 264 #define M_BURST_FACTOR 8 265 #define M_BURST_NSCALE 2 266 267 #define M_BURST 0x0001 268 #define M_BURST_EARLY 0x0002 269 270 struct magazine { 271 SLIST_ENTRY(magazine) nextmagazine; 272 273 int flags; 274 int capacity; /* Max rounds in this magazine */ 275 int rounds; /* Current number of free rounds */ 276 int burst_factor; /* Number of blocks to prefill with */ 277 int low_factor; /* Free till low_factor from full mag */ 278 void *objects[M_MAX_ROUNDS]; 279 }; 280 281 SLIST_HEAD(magazinelist, magazine); 282 283 static spinlock_t zone_mag_lock; 284 static spinlock_t depot_spinlock; 285 static struct magazine zone_magazine = { 286 .flags = M_BURST | M_BURST_EARLY, 287 .capacity = M_ZONE_ROUNDS, 288 .rounds = 0, 289 .burst_factor = M_BURST_FACTOR, 290 .low_factor = M_LOW_ROUNDS 291 }; 292 293 #define MAGAZINE_FULL(mp) (mp->rounds == mp->capacity) 294 #define MAGAZINE_NOTFULL(mp) (mp->rounds < mp->capacity) 295 #define MAGAZINE_EMPTY(mp) (mp->rounds == 0) 296 #define MAGAZINE_NOTEMPTY(mp) (mp->rounds != 0) 297 298 /* 299 * Each thread will have a pair of magazines per size-class (NZONES) 300 * The loaded magazine will support immediate allocations, the previous 301 * magazine will either be full or empty and can be swapped at need 302 */ 303 typedef struct magazine_pair { 304 struct magazine *loaded; 305 struct magazine *prev; 306 } magazine_pair; 307 308 /* A depot is a collection of magazines for a single zone. */ 309 typedef struct magazine_depot { 310 struct magazinelist full; 311 struct magazinelist empty; 312 spinlock_t lock; 313 } magazine_depot; 314 315 typedef struct thr_mags { 316 magazine_pair mags[NZONES]; 317 struct magazine *newmag; 318 int init; 319 } thr_mags; 320 321 /* 322 * With this attribute set, do not require a function call for accessing 323 * this variable when the code is compiled -fPIC. 324 * 325 * Must be empty for libc_rtld (similar to __thread). 326 */ 327 #ifdef __LIBC_RTLD 328 #define TLS_ATTRIBUTE 329 #else 330 #define TLS_ATTRIBUTE __attribute__ ((tls_model ("initial-exec"))) 331 #endif 332 333 static __thread thr_mags thread_mags TLS_ATTRIBUTE; 334 static pthread_key_t thread_mags_key; 335 static pthread_once_t thread_mags_once = PTHREAD_ONCE_INIT; 336 static magazine_depot depots[NZONES]; 337 338 /* 339 * Fixed globals (not per-cpu) 340 */ 341 static const int ZoneSize = ZALLOC_ZONE_SIZE; 342 static const int ZoneLimit = ZALLOC_ZONE_LIMIT; 343 static const int ZonePageCount = ZALLOC_ZONE_SIZE / PAGE_SIZE; 344 static const int ZoneMask = ZALLOC_ZONE_SIZE - 1; 345 346 static int opt_madvise = 0; 347 static int opt_utrace = 0; 348 static int g_malloc_flags = 0; 349 static struct slglobaldata SLGlobalData; 350 static bigalloc_t bigalloc_array[BIGHSIZE]; 351 static spinlock_t bigspin_array[BIGXSIZE]; 352 static volatile void *bigcache_array[BIGCACHE]; /* atomic swap */ 353 static volatile size_t bigcache_size_array[BIGCACHE]; /* SMP races ok */ 354 static volatile int bigcache_index; /* SMP races ok */ 355 static int malloc_panic; 356 static size_t excess_alloc; /* excess big allocs */ 357 358 static void *_slaballoc(size_t size, int flags); 359 static void *_slabrealloc(void *ptr, size_t size); 360 static void _slabfree(void *ptr, int, bigalloc_t *); 361 static int _slabmemalign(void **memptr, size_t alignment, size_t size); 362 static void *_vmem_alloc(size_t bytes, size_t align, int flags); 363 static void _vmem_free(void *ptr, size_t bytes); 364 static void *magazine_alloc(struct magazine *, int *); 365 static int magazine_free(struct magazine *, void *); 366 static void *mtmagazine_alloc(int zi); 367 static int mtmagazine_free(int zi, void *); 368 static void mtmagazine_init(void); 369 static void mtmagazine_destructor(void *); 370 static slzone_t zone_alloc(int flags); 371 static void zone_free(void *z); 372 static void _mpanic(const char *ctl, ...) __printflike(1, 2); 373 static void malloc_init(void) __constructor(101); 374 375 struct nmalloc_utrace { 376 void *p; 377 size_t s; 378 void *r; 379 }; 380 381 #define UTRACE(a, b, c) \ 382 if (opt_utrace) { \ 383 struct nmalloc_utrace ut = { \ 384 .p = (a), \ 385 .s = (b), \ 386 .r = (c) \ 387 }; \ 388 utrace(&ut, sizeof(ut)); \ 389 } 390 391 static void 392 malloc_init(void) 393 { 394 const char *p = NULL; 395 396 if (issetugid() == 0) 397 p = getenv("MALLOC_OPTIONS"); 398 399 for (; p != NULL && *p != '\0'; p++) { 400 switch(*p) { 401 case 'u': opt_utrace = 0; break; 402 case 'U': opt_utrace = 1; break; 403 case 'h': opt_madvise = 0; break; 404 case 'H': opt_madvise = 1; break; 405 case 'z': g_malloc_flags = 0; break; 406 case 'Z': g_malloc_flags = SAFLAG_ZERO; break; 407 default: 408 break; 409 } 410 } 411 412 UTRACE((void *) -1, 0, NULL); 413 } 414 415 /* 416 * We have to install a handler for nmalloc thread teardowns when 417 * the thread is created. We cannot delay this because destructors in 418 * sophisticated userland programs can call malloc() for the first time 419 * during their thread exit. 420 * 421 * This routine is called directly from pthreads. 422 */ 423 void 424 _nmalloc_thr_init(void) 425 { 426 static int init_once; 427 thr_mags *tp; 428 429 /* 430 * Disallow mtmagazine operations until the mtmagazine is 431 * initialized. 432 */ 433 tp = &thread_mags; 434 tp->init = -1; 435 436 if (init_once == 0) { 437 init_once = 1; 438 pthread_once(&thread_mags_once, mtmagazine_init); 439 } 440 pthread_setspecific(thread_mags_key, tp); 441 tp->init = 1; 442 } 443 444 void 445 _nmalloc_thr_prepfork(void) 446 { 447 if (__isthreaded) { 448 _SPINLOCK(&zone_mag_lock); 449 _SPINLOCK(&depot_spinlock); 450 } 451 } 452 453 void 454 _nmalloc_thr_parentfork(void) 455 { 456 if (__isthreaded) { 457 _SPINUNLOCK(&depot_spinlock); 458 _SPINUNLOCK(&zone_mag_lock); 459 } 460 } 461 462 void 463 _nmalloc_thr_childfork(void) 464 { 465 if (__isthreaded) { 466 _SPINUNLOCK(&depot_spinlock); 467 _SPINUNLOCK(&zone_mag_lock); 468 } 469 } 470 471 /* 472 * Thread locks. 473 */ 474 static __inline void 475 slgd_lock(slglobaldata_t slgd) 476 { 477 if (__isthreaded) 478 _SPINLOCK(&slgd->Spinlock); 479 } 480 481 static __inline void 482 slgd_unlock(slglobaldata_t slgd) 483 { 484 if (__isthreaded) 485 _SPINUNLOCK(&slgd->Spinlock); 486 } 487 488 static __inline void 489 depot_lock(magazine_depot *dp __unused) 490 { 491 if (__isthreaded) 492 _SPINLOCK(&depot_spinlock); 493 #if 0 494 if (__isthreaded) 495 _SPINLOCK(&dp->lock); 496 #endif 497 } 498 499 static __inline void 500 depot_unlock(magazine_depot *dp __unused) 501 { 502 if (__isthreaded) 503 _SPINUNLOCK(&depot_spinlock); 504 #if 0 505 if (__isthreaded) 506 _SPINUNLOCK(&dp->lock); 507 #endif 508 } 509 510 static __inline void 511 zone_magazine_lock(void) 512 { 513 if (__isthreaded) 514 _SPINLOCK(&zone_mag_lock); 515 } 516 517 static __inline void 518 zone_magazine_unlock(void) 519 { 520 if (__isthreaded) 521 _SPINUNLOCK(&zone_mag_lock); 522 } 523 524 static __inline void 525 swap_mags(magazine_pair *mp) 526 { 527 struct magazine *tmp; 528 tmp = mp->loaded; 529 mp->loaded = mp->prev; 530 mp->prev = tmp; 531 } 532 533 /* 534 * bigalloc hashing and locking support. 535 * 536 * Return an unmasked hash code for the passed pointer. 537 */ 538 static __inline int 539 _bigalloc_hash(void *ptr) 540 { 541 int hv; 542 543 hv = ((int)(intptr_t)ptr >> PAGE_SHIFT) ^ 544 ((int)(intptr_t)ptr >> (PAGE_SHIFT + BIGHSHIFT)); 545 546 return(hv); 547 } 548 549 /* 550 * Lock the hash chain and return a pointer to its base for the specified 551 * address. 552 */ 553 static __inline bigalloc_t * 554 bigalloc_lock(void *ptr) 555 { 556 int hv = _bigalloc_hash(ptr); 557 bigalloc_t *bigp; 558 559 bigp = &bigalloc_array[hv & BIGHMASK]; 560 if (__isthreaded) 561 _SPINLOCK(&bigspin_array[hv & BIGXMASK]); 562 return(bigp); 563 } 564 565 /* 566 * Lock the hash chain and return a pointer to its base for the specified 567 * address. 568 * 569 * BUT, if the hash chain is empty, just return NULL and do not bother 570 * to lock anything. 571 */ 572 static __inline bigalloc_t * 573 bigalloc_check_and_lock(void *ptr) 574 { 575 int hv = _bigalloc_hash(ptr); 576 bigalloc_t *bigp; 577 578 bigp = &bigalloc_array[hv & BIGHMASK]; 579 if (*bigp == NULL) 580 return(NULL); 581 if (__isthreaded) { 582 _SPINLOCK(&bigspin_array[hv & BIGXMASK]); 583 } 584 return(bigp); 585 } 586 587 static __inline void 588 bigalloc_unlock(void *ptr) 589 { 590 int hv; 591 592 if (__isthreaded) { 593 hv = _bigalloc_hash(ptr); 594 _SPINUNLOCK(&bigspin_array[hv & BIGXMASK]); 595 } 596 } 597 598 /* 599 * Find a bigcache entry that might work for the allocation. SMP races are 600 * ok here except for the swap (that is, it is ok if bigcache_size_array[i] 601 * is wrong or if a NULL or too-small big is returned). 602 * 603 * Generally speaking it is ok to find a large entry even if the bytes 604 * requested are relatively small (but still oversized), because we really 605 * don't know *what* the application is going to do with the buffer. 606 */ 607 static __inline 608 bigalloc_t 609 bigcache_find_alloc(size_t bytes) 610 { 611 bigalloc_t big = NULL; 612 size_t test; 613 int i; 614 615 for (i = 0; i < BIGCACHE; ++i) { 616 test = bigcache_size_array[i]; 617 if (bytes <= test) { 618 bigcache_size_array[i] = 0; 619 big = atomic_swap_ptr(&bigcache_array[i], NULL); 620 break; 621 } 622 } 623 return big; 624 } 625 626 /* 627 * Free a bigcache entry, possibly returning one that the caller really must 628 * free. This is used to cache recent oversized memory blocks. Only 629 * big blocks smaller than BIGCACHE_LIMIT will be cached this way, so try 630 * to collect the biggest ones we can that are under the limit. 631 */ 632 static __inline 633 bigalloc_t 634 bigcache_find_free(bigalloc_t big) 635 { 636 int i; 637 int j; 638 int b; 639 640 b = ++bigcache_index; 641 for (i = 0; i < BIGCACHE; ++i) { 642 j = (b + i) & BIGCACHE_MASK; 643 if (bigcache_size_array[j] < big->bytes) { 644 bigcache_size_array[j] = big->bytes; 645 big = atomic_swap_ptr(&bigcache_array[j], big); 646 break; 647 } 648 } 649 return big; 650 } 651 652 static __inline 653 void 654 handle_excess_big(void) 655 { 656 int i; 657 bigalloc_t big; 658 bigalloc_t *bigp; 659 660 if (excess_alloc <= BIGCACHE_EXCESS) 661 return; 662 663 for (i = 0; i < BIGHSIZE; ++i) { 664 bigp = &bigalloc_array[i]; 665 if (*bigp == NULL) 666 continue; 667 if (__isthreaded) 668 _SPINLOCK(&bigspin_array[i & BIGXMASK]); 669 for (big = *bigp; big; big = big->next) { 670 if (big->active < big->bytes) { 671 MASSERT((big->active & PAGE_MASK) == 0); 672 MASSERT((big->bytes & PAGE_MASK) == 0); 673 munmap((char *)big->base + big->active, 674 big->bytes - big->active); 675 atomic_add_long(&excess_alloc, 676 big->active - big->bytes); 677 big->bytes = big->active; 678 } 679 } 680 if (__isthreaded) 681 _SPINUNLOCK(&bigspin_array[i & BIGXMASK]); 682 } 683 } 684 685 /* 686 * Calculate the zone index for the allocation request size and set the 687 * allocation request size to that particular zone's chunk size. 688 */ 689 static __inline int 690 zoneindex(size_t *bytes, size_t *chunking) 691 { 692 size_t n = (unsigned int)*bytes; /* unsigned for shift opt */ 693 694 /* 695 * This used to be 8-byte chunks and 16 zones for n < 128. 696 * However some instructions may require 16-byte alignment 697 * (aka SIMD) and programs might not request an aligned size 698 * (aka GCC-7), so change this as follows: 699 * 700 * 0-15 bytes 8-byte alignment in two zones (0-1) 701 * 16-127 bytes 16-byte alignment in four zones (3-10) 702 * zone index 2 and 11-15 are currently unused. 703 */ 704 if (n < 16) { 705 *bytes = n = (n + 7) & ~7; 706 *chunking = 8; 707 return(n / 8 - 1); /* 8 byte chunks, 2 zones */ 708 /* zones 0,1, zone 2 is unused */ 709 } 710 if (n < 128) { 711 *bytes = n = (n + 15) & ~15; 712 *chunking = 16; 713 return(n / 16 + 2); /* 16 byte chunks, 8 zones */ 714 /* zones 3-10, zones 11-15 unused */ 715 } 716 if (n < 256) { 717 *bytes = n = (n + 15) & ~15; 718 *chunking = 16; 719 return(n / 16 + 7); 720 } 721 if (n < 8192) { 722 if (n < 512) { 723 *bytes = n = (n + 31) & ~31; 724 *chunking = 32; 725 return(n / 32 + 15); 726 } 727 if (n < 1024) { 728 *bytes = n = (n + 63) & ~63; 729 *chunking = 64; 730 return(n / 64 + 23); 731 } 732 if (n < 2048) { 733 *bytes = n = (n + 127) & ~127; 734 *chunking = 128; 735 return(n / 128 + 31); 736 } 737 if (n < 4096) { 738 *bytes = n = (n + 255) & ~255; 739 *chunking = 256; 740 return(n / 256 + 39); 741 } 742 *bytes = n = (n + 511) & ~511; 743 *chunking = 512; 744 return(n / 512 + 47); 745 } 746 #if ZALLOC_ZONE_LIMIT > 8192 747 if (n < 16384) { 748 *bytes = n = (n + 1023) & ~1023; 749 *chunking = 1024; 750 return(n / 1024 + 55); 751 } 752 #endif 753 #if ZALLOC_ZONE_LIMIT > 16384 754 if (n < 32768) { 755 *bytes = n = (n + 2047) & ~2047; 756 *chunking = 2048; 757 return(n / 2048 + 63); 758 } 759 #endif 760 _mpanic("Unexpected byte count %zu", n); 761 return(0); 762 } 763 764 /* 765 * malloc() - call internal slab allocator 766 */ 767 void * 768 __malloc(size_t size) 769 { 770 void *ptr; 771 772 ptr = _slaballoc(size, 0); 773 if (ptr == NULL) 774 errno = ENOMEM; 775 else 776 UTRACE(0, size, ptr); 777 return(ptr); 778 } 779 780 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4)) 781 782 /* 783 * calloc() - call internal slab allocator 784 */ 785 void * 786 __calloc(size_t number, size_t size) 787 { 788 void *ptr; 789 790 if ((number >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && 791 number > 0 && SIZE_MAX / number < size) { 792 errno = ENOMEM; 793 return(NULL); 794 } 795 796 ptr = _slaballoc(number * size, SAFLAG_ZERO); 797 if (ptr == NULL) 798 errno = ENOMEM; 799 else 800 UTRACE(0, number * size, ptr); 801 return(ptr); 802 } 803 804 /* 805 * realloc() (SLAB ALLOCATOR) 806 * 807 * We do not attempt to optimize this routine beyond reusing the same 808 * pointer if the new size fits within the chunking of the old pointer's 809 * zone. 810 */ 811 void * 812 __realloc(void *ptr, size_t size) 813 { 814 void *ret; 815 ret = _slabrealloc(ptr, size); 816 if (ret == NULL) 817 errno = ENOMEM; 818 else 819 UTRACE(ptr, size, ret); 820 return(ret); 821 } 822 823 /* 824 * aligned_alloc() 825 * 826 * Allocate (size) bytes with a alignment of (alignment). 827 */ 828 void * 829 __aligned_alloc(size_t alignment, size_t size) 830 { 831 void *ptr; 832 int rc; 833 834 ptr = NULL; 835 rc = _slabmemalign(&ptr, alignment, size); 836 if (rc) 837 errno = rc; 838 839 return (ptr); 840 } 841 842 /* 843 * posix_memalign() 844 * 845 * Allocate (size) bytes with a alignment of (alignment), where (alignment) 846 * is a power of 2 >= sizeof(void *). 847 */ 848 int 849 __posix_memalign(void **memptr, size_t alignment, size_t size) 850 { 851 int rc; 852 853 /* 854 * OpenGroup spec issue 6 check 855 */ 856 if (alignment < sizeof(void *)) { 857 *memptr = NULL; 858 return(EINVAL); 859 } 860 861 rc = _slabmemalign(memptr, alignment, size); 862 863 return (rc); 864 } 865 866 /* 867 * The slab allocator will allocate on power-of-2 boundaries up to 868 * at least PAGE_SIZE. We use the zoneindex mechanic to find a 869 * zone matching the requirements, and _vmem_alloc() otherwise. 870 */ 871 static int 872 _slabmemalign(void **memptr, size_t alignment, size_t size) 873 { 874 bigalloc_t *bigp; 875 bigalloc_t big; 876 size_t chunking; 877 int zi __unused; 878 879 if (alignment < 1) { 880 *memptr = NULL; 881 return(EINVAL); 882 } 883 884 /* 885 * OpenGroup spec issue 6 checks 886 */ 887 if ((alignment | (alignment - 1)) + 1 != (alignment << 1)) { 888 *memptr = NULL; 889 return(EINVAL); 890 } 891 892 /* 893 * Our zone mechanism guarantees same-sized alignment for any 894 * power-of-2 allocation. If size is a power-of-2 and reasonable 895 * we can just call _slaballoc() and be done. We round size up 896 * to the nearest alignment boundary to improve our odds of 897 * it becoming a power-of-2 if it wasn't before. 898 */ 899 if (size <= alignment) 900 size = alignment; 901 else 902 size = (size + alignment - 1) & ~(size_t)(alignment - 1); 903 904 /* 905 * If we have overflowed above when rounding to the nearest alignment 906 * boundary, just return ENOMEM, size should be == N * sizeof(void *). 907 * 908 * Power-of-2 allocations up to 8KB will be aligned to the allocation 909 * size and _slaballoc() can simply be used. Please see line 1082 910 * for this special case: 'Align the storage in the zone based on 911 * the chunking' has a special case for powers of 2. 912 */ 913 if (size == 0) 914 return(ENOMEM); 915 916 if (size <= PAGE_SIZE*2 && (size | (size - 1)) + 1 == (size << 1)) { 917 *memptr = _slaballoc(size, 0); 918 return(*memptr ? 0 : ENOMEM); 919 } 920 921 /* 922 * Otherwise locate a zone with a chunking that matches 923 * the requested alignment, within reason. Consider two cases: 924 * 925 * (1) A 1K allocation on a 32-byte alignment. The first zoneindex 926 * we find will be the best fit because the chunking will be 927 * greater or equal to the alignment. 928 * 929 * (2) A 513 allocation on a 256-byte alignment. In this case 930 * the first zoneindex we find will be for 576 byte allocations 931 * with a chunking of 64, which is not sufficient. To fix this 932 * we simply find the nearest power-of-2 >= size and use the 933 * same side-effect of _slaballoc() which guarantees 934 * same-alignment on a power-of-2 allocation. 935 */ 936 if (size < PAGE_SIZE) { 937 zi = zoneindex(&size, &chunking); 938 if (chunking >= alignment) { 939 *memptr = _slaballoc(size, 0); 940 return(*memptr ? 0 : ENOMEM); 941 } 942 if (size >= 1024) 943 alignment = 1024; 944 if (size >= 16384) 945 alignment = 16384; 946 while (alignment < size) 947 alignment <<= 1; 948 *memptr = _slaballoc(alignment, 0); 949 return(*memptr ? 0 : ENOMEM); 950 } 951 952 /* 953 * If the slab allocator cannot handle it use vmem_alloc(). 954 * 955 * Alignment must be adjusted up to at least PAGE_SIZE in this case. 956 */ 957 if (alignment < PAGE_SIZE) 958 alignment = PAGE_SIZE; 959 if (size < alignment) 960 size = alignment; 961 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK; 962 if (alignment == PAGE_SIZE && size <= BIGCACHE_LIMIT) { 963 big = bigcache_find_alloc(size); 964 if (big && big->bytes < size) { 965 _slabfree(big->base, FASTSLABREALLOC, &big); 966 big = NULL; 967 } 968 if (big) { 969 *memptr = big->base; 970 big->active = size; 971 if (big->active < big->bytes) { 972 atomic_add_long(&excess_alloc, 973 big->bytes - big->active); 974 } 975 bigp = bigalloc_lock(*memptr); 976 big->next = *bigp; 977 *bigp = big; 978 bigalloc_unlock(*memptr); 979 handle_excess_big(); 980 return(0); 981 } 982 } 983 *memptr = _vmem_alloc(size, alignment, 0); 984 if (*memptr == NULL) 985 return(ENOMEM); 986 987 big = _slaballoc(sizeof(struct bigalloc), 0); 988 if (big == NULL) { 989 _vmem_free(*memptr, size); 990 *memptr = NULL; 991 return(ENOMEM); 992 } 993 bigp = bigalloc_lock(*memptr); 994 big->base = *memptr; 995 big->active = size; 996 big->bytes = size; /* no excess */ 997 big->next = *bigp; 998 *bigp = big; 999 bigalloc_unlock(*memptr); 1000 1001 return(0); 1002 } 1003 1004 /* 1005 * free() (SLAB ALLOCATOR) - do the obvious 1006 */ 1007 void 1008 __free(void *ptr) 1009 { 1010 UTRACE(ptr, 0, 0); 1011 _slabfree(ptr, 0, NULL); 1012 } 1013 1014 /* 1015 * _slaballoc() (SLAB ALLOCATOR) 1016 * 1017 * Allocate memory via the slab allocator. If the request is too large, 1018 * or if it page-aligned beyond a certain size, we fall back to the 1019 * KMEM subsystem 1020 */ 1021 static void * 1022 _slaballoc(size_t size, int flags) 1023 { 1024 slzone_t z; 1025 slchunk_t chunk; 1026 slglobaldata_t slgd; 1027 size_t chunking; 1028 int zi; 1029 int off; 1030 void *obj; 1031 1032 /* 1033 * Handle the degenerate size == 0 case. Yes, this does happen. 1034 * Return a special pointer. This is to maintain compatibility with 1035 * the original malloc implementation. Certain devices, such as the 1036 * adaptec driver, not only allocate 0 bytes, they check for NULL and 1037 * also realloc() later on. Joy. 1038 */ 1039 if (size == 0) 1040 size = 1; 1041 1042 /* Capture global flags */ 1043 flags |= g_malloc_flags; 1044 1045 /* 1046 * Handle large allocations directly. There should not be very many 1047 * of these so performance is not a big issue. 1048 * 1049 * The backend allocator is pretty nasty on a SMP system. Use the 1050 * slab allocator for one and two page-sized chunks even though we 1051 * lose some efficiency. 1052 * 1053 * NOTE: Please see posix_memalign around line 864, which assumes 1054 * that power-of-2 allocations of PAGE_SIZE and PAGE_SIZE*2 1055 * can use _slaballoc() and be aligned to the same. The 1056 * zone cache can be used for this case, bigalloc does not 1057 * have to be used. 1058 */ 1059 if (size >= ZoneLimit || 1060 ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 1061 bigalloc_t big; 1062 bigalloc_t *bigp; 1063 1064 /* 1065 * Page-align and cache-color in case of virtually indexed 1066 * physically tagged L1 caches (aka SandyBridge). No sweat 1067 * otherwise, so just do it. 1068 * 1069 * (don't count as excess). 1070 */ 1071 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK; 1072 1073 /* 1074 * If we have overflowed above when rounding to the page 1075 * boundary, something has passed us (size_t)[-PAGE_MASK..-1] 1076 * so just return NULL, size at this point should be >= 0. 1077 */ 1078 if (size == 0) 1079 return (NULL); 1080 1081 if ((size & (PAGE_SIZE * 2 - 1)) == 0) 1082 size += PAGE_SIZE; 1083 1084 /* 1085 * Try to reuse a cached big block to avoid mmap'ing. If it 1086 * turns out not to fit our requirements we throw it away 1087 * and allocate normally. 1088 */ 1089 big = NULL; 1090 if (size <= BIGCACHE_LIMIT) { 1091 big = bigcache_find_alloc(size); 1092 if (big && big->bytes < size) { 1093 _slabfree(big->base, FASTSLABREALLOC, &big); 1094 big = NULL; 1095 } 1096 } 1097 if (big) { 1098 chunk = big->base; 1099 if (flags & SAFLAG_ZERO) 1100 bzero(chunk, size); 1101 } else { 1102 chunk = _vmem_alloc(size, PAGE_SIZE, flags); 1103 if (chunk == NULL) 1104 return(NULL); 1105 1106 big = _slaballoc(sizeof(struct bigalloc), 0); 1107 if (big == NULL) { 1108 _vmem_free(chunk, size); 1109 return(NULL); 1110 } 1111 big->base = chunk; 1112 big->bytes = size; 1113 } 1114 big->active = size; 1115 1116 bigp = bigalloc_lock(chunk); 1117 if (big->active < big->bytes) { 1118 atomic_add_long(&excess_alloc, 1119 big->bytes - big->active); 1120 } 1121 big->next = *bigp; 1122 *bigp = big; 1123 bigalloc_unlock(chunk); 1124 handle_excess_big(); 1125 1126 return(chunk); 1127 } 1128 1129 /* Compute allocation zone; zoneindex will panic on excessive sizes */ 1130 zi = zoneindex(&size, &chunking); 1131 MASSERT(zi < NZONES); 1132 1133 obj = mtmagazine_alloc(zi); 1134 if (obj != NULL) { 1135 if (flags & SAFLAG_ZERO) 1136 bzero(obj, size); 1137 return (obj); 1138 } 1139 1140 slgd = &SLGlobalData; 1141 slgd_lock(slgd); 1142 1143 /* 1144 * Attempt to allocate out of an existing zone. If all zones are 1145 * exhausted pull one off the free list or allocate a new one. 1146 */ 1147 if ((z = slgd->ZoneAry[zi]) == NULL) { 1148 z = zone_alloc(flags); 1149 if (z == NULL) 1150 goto fail; 1151 1152 /* 1153 * How big is the base structure? 1154 */ 1155 off = sizeof(struct slzone); 1156 1157 /* 1158 * Align the storage in the zone based on the chunking. 1159 * 1160 * Guarantee power-of-2 alignment for power-of-2-sized 1161 * chunks. Otherwise align based on the chunking size 1162 * (typically 8 or 16 bytes for small allocations). 1163 * 1164 * NOTE: Allocations >= ZoneLimit are governed by the 1165 * bigalloc code and typically only guarantee page-alignment. 1166 * 1167 * Set initial conditions for UIndex near the zone header 1168 * to reduce unecessary page faults, vs semi-randomization 1169 * to improve L1 cache saturation. 1170 * 1171 * NOTE: Please see posix_memalign around line 864-ish, which 1172 * assumes that power-of-2 allocations of PAGE_SIZE 1173 * and PAGE_SIZE*2 can use _slaballoc() and be aligned 1174 * to the same. The zone cache can be used for this 1175 * case, bigalloc does not have to be used. 1176 * 1177 * ALL power-of-2 requests that fall through to this 1178 * code use this rule (conditionals above limit this 1179 * to <= PAGE_SIZE*2. 1180 */ 1181 if ((size | (size - 1)) + 1 == (size << 1)) 1182 off = roundup2(off, size); 1183 else 1184 off = roundup2(off, chunking); 1185 z->z_Magic = ZALLOC_SLAB_MAGIC; 1186 z->z_ZoneIndex = zi; 1187 z->z_NMax = (ZoneSize - off) / size; 1188 z->z_NFree = z->z_NMax; 1189 z->z_BasePtr = (char *)z + off; 1190 z->z_UIndex = z->z_UEndIndex = 0; 1191 z->z_ChunkSize = size; 1192 z->z_FirstFreePg = ZonePageCount; 1193 z->z_Next = slgd->ZoneAry[zi]; 1194 slgd->ZoneAry[zi] = z; 1195 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 1196 flags &= ~SAFLAG_ZERO; /* already zero'd */ 1197 flags |= SAFLAG_PASSIVE; 1198 } 1199 1200 /* 1201 * Slide the base index for initial allocations out of the 1202 * next zone we create so we do not over-weight the lower 1203 * part of the cpu memory caches. 1204 */ 1205 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 1206 & (ZALLOC_MAX_ZONE_SIZE - 1); 1207 } 1208 1209 /* 1210 * Ok, we have a zone from which at least one chunk is available. 1211 * 1212 * Remove us from the ZoneAry[] when we become empty 1213 */ 1214 MASSERT(z->z_NFree > 0); 1215 1216 if (--z->z_NFree == 0) { 1217 slgd->ZoneAry[zi] = z->z_Next; 1218 z->z_Next = NULL; 1219 } 1220 1221 /* 1222 * Locate a chunk in a free page. This attempts to localize 1223 * reallocations into earlier pages without us having to sort 1224 * the chunk list. A chunk may still overlap a page boundary. 1225 */ 1226 while (z->z_FirstFreePg < ZonePageCount) { 1227 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) { 1228 MASSERT((uintptr_t)chunk & ZoneMask); 1229 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next; 1230 goto done; 1231 } 1232 ++z->z_FirstFreePg; 1233 } 1234 1235 /* 1236 * No chunks are available but NFree said we had some memory, 1237 * so it must be available in the never-before-used-memory 1238 * area governed by UIndex. The consequences are very 1239 * serious if our zone got corrupted so we use an explicit 1240 * panic rather then a KASSERT. 1241 */ 1242 chunk = (slchunk_t)(z->z_BasePtr + z->z_UIndex * size); 1243 1244 if (++z->z_UIndex == z->z_NMax) 1245 z->z_UIndex = 0; 1246 if (z->z_UIndex == z->z_UEndIndex) { 1247 if (z->z_NFree != 0) 1248 _mpanic("slaballoc: corrupted zone"); 1249 } 1250 1251 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 1252 flags &= ~SAFLAG_ZERO; 1253 flags |= SAFLAG_PASSIVE; 1254 } 1255 1256 done: 1257 slgd_unlock(slgd); 1258 if (flags & SAFLAG_ZERO) 1259 bzero(chunk, size); 1260 return(chunk); 1261 fail: 1262 slgd_unlock(slgd); 1263 return(NULL); 1264 } 1265 1266 /* 1267 * Reallocate memory within the chunk 1268 */ 1269 static void * 1270 _slabrealloc(void *ptr, size_t size) 1271 { 1272 bigalloc_t *bigp; 1273 void *nptr; 1274 slzone_t z; 1275 size_t chunking; 1276 1277 if (ptr == NULL) { 1278 return(_slaballoc(size, 0)); 1279 } 1280 1281 if (size == 0) 1282 size = 1; 1283 1284 /* 1285 * Handle oversized allocations. 1286 */ 1287 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) { 1288 bigalloc_t big; 1289 size_t bigbytes; 1290 1291 while ((big = *bigp) != NULL) { 1292 if (big->base == ptr) { 1293 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK; 1294 bigbytes = big->bytes; 1295 1296 /* 1297 * If it already fits determine if it makes 1298 * sense to shrink/reallocate. Try to optimize 1299 * programs which stupidly make incremental 1300 * reallocations larger or smaller by scaling 1301 * the allocation. Also deal with potential 1302 * coloring. 1303 */ 1304 if (size >= (bigbytes >> 1) && 1305 size <= bigbytes) { 1306 if (big->active != size) { 1307 atomic_add_long(&excess_alloc, 1308 big->active - 1309 size); 1310 } 1311 big->active = size; 1312 bigalloc_unlock(ptr); 1313 return(ptr); 1314 } 1315 1316 /* 1317 * For large reallocations, allocate more space 1318 * than we need to try to avoid excessive 1319 * reallocations later on. 1320 */ 1321 chunking = size + (size >> 3); 1322 chunking = (chunking + PAGE_MASK) & 1323 ~(size_t)PAGE_MASK; 1324 1325 /* 1326 * Try to allocate adjacently in case the 1327 * program is idiotically realloc()ing a 1328 * huge memory block just slightly bigger. 1329 * (llvm's llc tends to do this a lot). 1330 * 1331 * (MAP_TRYFIXED forces mmap to fail if there 1332 * is already something at the address). 1333 */ 1334 if (chunking > bigbytes) { 1335 char *addr; 1336 int errno_save = errno; 1337 1338 addr = mmap((char *)ptr + bigbytes, 1339 chunking - bigbytes, 1340 PROT_READ|PROT_WRITE, 1341 MAP_PRIVATE|MAP_ANON| 1342 MAP_TRYFIXED, 1343 -1, 0); 1344 errno = errno_save; 1345 if (addr == (char *)ptr + bigbytes) { 1346 atomic_add_long(&excess_alloc, 1347 big->active - 1348 big->bytes + 1349 chunking - 1350 size); 1351 big->bytes = chunking; 1352 big->active = size; 1353 bigalloc_unlock(ptr); 1354 1355 return(ptr); 1356 } 1357 MASSERT((void *)addr == MAP_FAILED); 1358 } 1359 1360 /* 1361 * Failed, unlink big and allocate fresh. 1362 * (note that we have to leave (big) intact 1363 * in case the slaballoc fails). 1364 */ 1365 *bigp = big->next; 1366 bigalloc_unlock(ptr); 1367 if ((nptr = _slaballoc(size, 0)) == NULL) { 1368 /* Relink block */ 1369 bigp = bigalloc_lock(ptr); 1370 big->next = *bigp; 1371 *bigp = big; 1372 bigalloc_unlock(ptr); 1373 return(NULL); 1374 } 1375 if (size > bigbytes) 1376 size = bigbytes; 1377 bcopy(ptr, nptr, size); 1378 atomic_add_long(&excess_alloc, big->active - 1379 big->bytes); 1380 _slabfree(ptr, FASTSLABREALLOC, &big); 1381 1382 return(nptr); 1383 } 1384 bigp = &big->next; 1385 } 1386 bigalloc_unlock(ptr); 1387 handle_excess_big(); 1388 } 1389 1390 /* 1391 * Get the original allocation's zone. If the new request winds 1392 * up using the same chunk size we do not have to do anything. 1393 * 1394 * NOTE: We don't have to lock the globaldata here, the fields we 1395 * access here will not change at least as long as we have control 1396 * over the allocation. 1397 */ 1398 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 1399 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1400 1401 /* 1402 * Use zoneindex() to chunk-align the new size, as long as the 1403 * new size is not too large. 1404 */ 1405 if (size < ZoneLimit) { 1406 zoneindex(&size, &chunking); 1407 if (z->z_ChunkSize == size) { 1408 return(ptr); 1409 } 1410 } 1411 1412 /* 1413 * Allocate memory for the new request size and copy as appropriate. 1414 */ 1415 if ((nptr = _slaballoc(size, 0)) != NULL) { 1416 if (size > z->z_ChunkSize) 1417 size = z->z_ChunkSize; 1418 bcopy(ptr, nptr, size); 1419 _slabfree(ptr, 0, NULL); 1420 } 1421 1422 return(nptr); 1423 } 1424 1425 /* 1426 * free (SLAB ALLOCATOR) 1427 * 1428 * Free a memory block previously allocated by malloc. Note that we do not 1429 * attempt to uplodate ks_loosememuse as MP races could prevent us from 1430 * checking memory limits in malloc. 1431 * 1432 * flags: 1433 * FASTSLABREALLOC Fast call from realloc, *rbigp already 1434 * unlinked. 1435 * 1436 * MPSAFE 1437 */ 1438 static void 1439 _slabfree(void *ptr, int flags, bigalloc_t *rbigp) 1440 { 1441 slzone_t z; 1442 slchunk_t chunk; 1443 bigalloc_t big; 1444 bigalloc_t *bigp; 1445 slglobaldata_t slgd; 1446 size_t size; 1447 int zi; 1448 int pgno; 1449 1450 /* Fast realloc path for big allocations */ 1451 if (flags & FASTSLABREALLOC) { 1452 big = *rbigp; 1453 goto fastslabrealloc; 1454 } 1455 1456 /* 1457 * Handle NULL frees and special 0-byte allocations 1458 */ 1459 if (ptr == NULL) 1460 return; 1461 1462 /* 1463 * Handle oversized allocations. 1464 */ 1465 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) { 1466 while ((big = *bigp) != NULL) { 1467 if (big->base == ptr) { 1468 *bigp = big->next; 1469 atomic_add_long(&excess_alloc, big->active - 1470 big->bytes); 1471 bigalloc_unlock(ptr); 1472 1473 /* 1474 * Try to stash the block we are freeing, 1475 * potentially receiving another block in 1476 * return which must be freed. 1477 */ 1478 fastslabrealloc: 1479 if (big->bytes <= BIGCACHE_LIMIT) { 1480 big = bigcache_find_free(big); 1481 if (big == NULL) 1482 return; 1483 } 1484 ptr = big->base; /* reload */ 1485 size = big->bytes; 1486 _slabfree(big, 0, NULL); 1487 _vmem_free(ptr, size); 1488 return; 1489 } 1490 bigp = &big->next; 1491 } 1492 bigalloc_unlock(ptr); 1493 handle_excess_big(); 1494 } 1495 1496 /* 1497 * Zone case. Figure out the zone based on the fact that it is 1498 * ZoneSize aligned. 1499 */ 1500 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 1501 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1502 1503 size = z->z_ChunkSize; 1504 zi = z->z_ZoneIndex; 1505 1506 if (g_malloc_flags & SAFLAG_ZERO) 1507 bzero(ptr, size); 1508 1509 if (mtmagazine_free(zi, ptr) == 0) 1510 return; 1511 1512 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT; 1513 chunk = ptr; 1514 slgd = &SLGlobalData; 1515 slgd_lock(slgd); 1516 1517 /* 1518 * Add this free non-zero'd chunk to a linked list for reuse, adjust 1519 * z_FirstFreePg. 1520 */ 1521 chunk->c_Next = z->z_PageAry[pgno]; 1522 z->z_PageAry[pgno] = chunk; 1523 if (z->z_FirstFreePg > pgno) 1524 z->z_FirstFreePg = pgno; 1525 1526 /* 1527 * Bump the number of free chunks. If it becomes non-zero the zone 1528 * must be added back onto the appropriate list. 1529 */ 1530 if (z->z_NFree++ == 0) { 1531 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 1532 slgd->ZoneAry[z->z_ZoneIndex] = z; 1533 } 1534 1535 /* 1536 * If the zone becomes totally free then release it. 1537 */ 1538 if (z->z_NFree == z->z_NMax) { 1539 slzone_t *pz; 1540 1541 pz = &slgd->ZoneAry[z->z_ZoneIndex]; 1542 while (z != *pz) 1543 pz = &(*pz)->z_Next; 1544 *pz = z->z_Next; 1545 z->z_Magic = -1; 1546 z->z_Next = NULL; 1547 zone_free(z); 1548 /* slgd lock released */ 1549 return; 1550 } 1551 slgd_unlock(slgd); 1552 } 1553 1554 /* 1555 * Allocate and return a magazine. NULL is returned and *burst is adjusted 1556 * if the magazine is empty. 1557 */ 1558 static __inline void * 1559 magazine_alloc(struct magazine *mp, int *burst) 1560 { 1561 void *obj; 1562 1563 if (mp == NULL) 1564 return(NULL); 1565 if (MAGAZINE_NOTEMPTY(mp)) { 1566 obj = mp->objects[--mp->rounds]; 1567 return(obj); 1568 } 1569 1570 /* 1571 * Return burst factor to caller along with NULL 1572 */ 1573 if ((mp->flags & M_BURST) && (burst != NULL)) { 1574 *burst = mp->burst_factor; 1575 } 1576 /* Reduce burst factor by NSCALE; if it hits 1, disable BURST */ 1577 if ((mp->flags & M_BURST) && (mp->flags & M_BURST_EARLY) && 1578 (burst != NULL)) { 1579 mp->burst_factor -= M_BURST_NSCALE; 1580 if (mp->burst_factor <= 1) { 1581 mp->burst_factor = 1; 1582 mp->flags &= ~(M_BURST); 1583 mp->flags &= ~(M_BURST_EARLY); 1584 } 1585 } 1586 return (NULL); 1587 } 1588 1589 static __inline int 1590 magazine_free(struct magazine *mp, void *p) 1591 { 1592 if (mp != NULL && MAGAZINE_NOTFULL(mp)) { 1593 mp->objects[mp->rounds++] = p; 1594 return 0; 1595 } 1596 1597 return -1; 1598 } 1599 1600 static void * 1601 mtmagazine_alloc(int zi) 1602 { 1603 thr_mags *tp; 1604 struct magazine *mp, *emptymag; 1605 magazine_depot *d; 1606 void *obj; 1607 1608 /* 1609 * Do not try to access per-thread magazines while the mtmagazine 1610 * is being initialized or destroyed. 1611 */ 1612 tp = &thread_mags; 1613 if (tp->init < 0) 1614 return(NULL); 1615 1616 /* 1617 * Primary per-thread allocation loop 1618 */ 1619 for (;;) { 1620 /* 1621 * If the loaded magazine has rounds, allocate and return 1622 */ 1623 mp = tp->mags[zi].loaded; 1624 obj = magazine_alloc(mp, NULL); 1625 if (obj) 1626 break; 1627 1628 /* 1629 * If the prev magazine is full, swap with the loaded 1630 * magazine and retry. 1631 */ 1632 mp = tp->mags[zi].prev; 1633 if (mp && MAGAZINE_FULL(mp)) { 1634 MASSERT(mp->rounds != 0); 1635 swap_mags(&tp->mags[zi]); /* prev now empty */ 1636 continue; 1637 } 1638 1639 /* 1640 * Try to get a full magazine from the depot. Cycle 1641 * through depot(full)->loaded->prev->depot(empty). 1642 * Retry if a full magazine was available from the depot. 1643 * 1644 * Return NULL (caller will fall through) if no magazines 1645 * can be found anywhere. 1646 */ 1647 d = &depots[zi]; 1648 depot_lock(d); 1649 emptymag = tp->mags[zi].prev; 1650 if (emptymag) 1651 SLIST_INSERT_HEAD(&d->empty, emptymag, nextmagazine); 1652 tp->mags[zi].prev = tp->mags[zi].loaded; 1653 mp = SLIST_FIRST(&d->full); /* loaded magazine */ 1654 tp->mags[zi].loaded = mp; 1655 if (mp) { 1656 SLIST_REMOVE_HEAD(&d->full, nextmagazine); 1657 MASSERT(MAGAZINE_NOTEMPTY(mp)); 1658 depot_unlock(d); 1659 continue; 1660 } 1661 depot_unlock(d); 1662 break; 1663 } 1664 1665 return (obj); 1666 } 1667 1668 static int 1669 mtmagazine_free(int zi, void *ptr) 1670 { 1671 thr_mags *tp; 1672 struct magazine *mp, *loadedmag; 1673 magazine_depot *d; 1674 int rc = -1; 1675 1676 /* 1677 * Do not try to access per-thread magazines while the mtmagazine 1678 * is being initialized or destroyed. 1679 */ 1680 tp = &thread_mags; 1681 if (tp->init < 0) 1682 return(-1); 1683 1684 /* 1685 * Primary per-thread freeing loop 1686 */ 1687 for (;;) { 1688 /* 1689 * Make sure a new magazine is available in case we have 1690 * to use it. Staging the newmag allows us to avoid 1691 * some locking/reentrancy complexity. 1692 * 1693 * Temporarily disable the per-thread caches for this 1694 * allocation to avoid reentrancy and/or to avoid a 1695 * stack overflow if the [zi] happens to be the same that 1696 * would be used to allocate the new magazine. 1697 */ 1698 if (tp->newmag == NULL) { 1699 tp->init = -1; 1700 tp->newmag = _slaballoc(sizeof(struct magazine), 1701 SAFLAG_ZERO); 1702 tp->init = 1; 1703 if (tp->newmag == NULL) { 1704 rc = -1; 1705 break; 1706 } 1707 } 1708 1709 /* 1710 * If the loaded magazine has space, free directly to it 1711 */ 1712 rc = magazine_free(tp->mags[zi].loaded, ptr); 1713 if (rc == 0) 1714 break; 1715 1716 /* 1717 * If the prev magazine is empty, swap with the loaded 1718 * magazine and retry. 1719 */ 1720 mp = tp->mags[zi].prev; 1721 if (mp && MAGAZINE_EMPTY(mp)) { 1722 MASSERT(mp->rounds == 0); 1723 swap_mags(&tp->mags[zi]); /* prev now full */ 1724 continue; 1725 } 1726 1727 /* 1728 * Try to get an empty magazine from the depot. Cycle 1729 * through depot(empty)->loaded->prev->depot(full). 1730 * Retry if an empty magazine was available from the depot. 1731 */ 1732 d = &depots[zi]; 1733 depot_lock(d); 1734 1735 if ((loadedmag = tp->mags[zi].prev) != NULL) 1736 SLIST_INSERT_HEAD(&d->full, loadedmag, nextmagazine); 1737 tp->mags[zi].prev = tp->mags[zi].loaded; 1738 mp = SLIST_FIRST(&d->empty); 1739 if (mp) { 1740 tp->mags[zi].loaded = mp; 1741 SLIST_REMOVE_HEAD(&d->empty, nextmagazine); 1742 MASSERT(MAGAZINE_NOTFULL(mp)); 1743 } else { 1744 mp = tp->newmag; 1745 tp->newmag = NULL; 1746 mp->capacity = M_MAX_ROUNDS; 1747 mp->rounds = 0; 1748 mp->flags = 0; 1749 tp->mags[zi].loaded = mp; 1750 } 1751 depot_unlock(d); 1752 } 1753 1754 return rc; 1755 } 1756 1757 static void 1758 mtmagazine_init(void) 1759 { 1760 int error; 1761 1762 error = pthread_key_create(&thread_mags_key, mtmagazine_destructor); 1763 if (error) 1764 abort(); 1765 } 1766 1767 /* 1768 * This function is only used by the thread exit destructor 1769 */ 1770 static void 1771 mtmagazine_drain(struct magazine *mp) 1772 { 1773 void *obj; 1774 1775 while (MAGAZINE_NOTEMPTY(mp)) { 1776 obj = magazine_alloc(mp, NULL); 1777 _slabfree(obj, 0, NULL); 1778 } 1779 } 1780 1781 /* 1782 * mtmagazine_destructor() 1783 * 1784 * When a thread exits, we reclaim all its resources; all its magazines are 1785 * drained and the structures are freed. 1786 * 1787 * WARNING! The destructor can be called multiple times if the larger user 1788 * program has its own destructors which run after ours which 1789 * allocate or free memory. 1790 */ 1791 static void 1792 mtmagazine_destructor(void *thrp) 1793 { 1794 thr_mags *tp = thrp; 1795 struct magazine *mp; 1796 int i; 1797 1798 /* 1799 * Prevent further use of mtmagazines while we are destructing 1800 * them, as well as for any destructors which are run after us 1801 * prior to the thread actually being destroyed. 1802 */ 1803 tp->init = -1; 1804 1805 for (i = 0; i < NZONES; i++) { 1806 mp = tp->mags[i].loaded; 1807 tp->mags[i].loaded = NULL; 1808 if (mp) { 1809 if (MAGAZINE_NOTEMPTY(mp)) 1810 mtmagazine_drain(mp); 1811 _slabfree(mp, 0, NULL); 1812 } 1813 1814 mp = tp->mags[i].prev; 1815 tp->mags[i].prev = NULL; 1816 if (mp) { 1817 if (MAGAZINE_NOTEMPTY(mp)) 1818 mtmagazine_drain(mp); 1819 _slabfree(mp, 0, NULL); 1820 } 1821 } 1822 1823 if (tp->newmag) { 1824 mp = tp->newmag; 1825 tp->newmag = NULL; 1826 _slabfree(mp, 0, NULL); 1827 } 1828 } 1829 1830 /* 1831 * zone_alloc() 1832 * 1833 * Attempt to allocate a zone from the zone magazine; the zone magazine has 1834 * M_BURST_EARLY enabled, so honor the burst request from the magazine. 1835 */ 1836 static slzone_t 1837 zone_alloc(int flags) 1838 { 1839 slglobaldata_t slgd = &SLGlobalData; 1840 int burst = 1; 1841 int i, j; 1842 slzone_t z; 1843 1844 zone_magazine_lock(); 1845 slgd_unlock(slgd); 1846 1847 z = magazine_alloc(&zone_magazine, &burst); 1848 if (z == NULL && burst == 1) { 1849 zone_magazine_unlock(); 1850 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags); 1851 } else if (z == NULL) { 1852 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags); 1853 if (z) { 1854 for (i = 1; i < burst; i++) { 1855 j = magazine_free(&zone_magazine, 1856 (char *) z + (ZoneSize * i)); 1857 MASSERT(j == 0); 1858 } 1859 } 1860 zone_magazine_unlock(); 1861 } else { 1862 z->z_Flags |= SLZF_UNOTZEROD; 1863 zone_magazine_unlock(); 1864 } 1865 slgd_lock(slgd); 1866 return z; 1867 } 1868 1869 /* 1870 * zone_free() 1871 * 1872 * Release a zone and unlock the slgd lock. 1873 */ 1874 static void 1875 zone_free(void *z) 1876 { 1877 slglobaldata_t slgd = &SLGlobalData; 1878 void *excess[M_ZONE_ROUNDS - M_LOW_ROUNDS] = {}; 1879 int i, j; 1880 1881 zone_magazine_lock(); 1882 slgd_unlock(slgd); 1883 1884 bzero(z, sizeof(struct slzone)); 1885 1886 if (opt_madvise) 1887 madvise(z, ZoneSize, MADV_FREE); 1888 1889 i = magazine_free(&zone_magazine, z); 1890 1891 /* 1892 * If we failed to free, collect excess magazines; release the zone 1893 * magazine lock, and then free to the system via _vmem_free. Re-enable 1894 * BURST mode for the magazine. 1895 */ 1896 if (i == -1) { 1897 j = zone_magazine.rounds - zone_magazine.low_factor; 1898 for (i = 0; i < j; i++) { 1899 excess[i] = magazine_alloc(&zone_magazine, NULL); 1900 MASSERT(excess[i] != NULL); 1901 } 1902 1903 zone_magazine_unlock(); 1904 1905 for (i = 0; i < j; i++) 1906 _vmem_free(excess[i], ZoneSize); 1907 1908 _vmem_free(z, ZoneSize); 1909 } else { 1910 zone_magazine_unlock(); 1911 } 1912 } 1913 1914 /* 1915 * _vmem_alloc() 1916 * 1917 * Directly map memory in PAGE_SIZE'd chunks with the specified 1918 * alignment. 1919 * 1920 * Alignment must be a multiple of PAGE_SIZE. 1921 * 1922 * Size must be >= alignment. 1923 */ 1924 static void * 1925 _vmem_alloc(size_t size, size_t align, int flags) 1926 { 1927 char *addr; 1928 char *save; 1929 size_t excess; 1930 1931 /* 1932 * Map anonymous private memory. 1933 */ 1934 addr = mmap(NULL, size, PROT_READ|PROT_WRITE, 1935 MAP_PRIVATE|MAP_ANON, -1, 0); 1936 if (addr == MAP_FAILED) 1937 return(NULL); 1938 1939 /* 1940 * Check alignment. The misaligned offset is also the excess 1941 * amount. If misaligned unmap the excess so we have a chance of 1942 * mapping at the next alignment point and recursively try again. 1943 * 1944 * BBBBBBBBBBB BBBBBBBBBBB BBBBBBBBBBB block alignment 1945 * aaaaaaaaa aaaaaaaaaaa aa mis-aligned allocation 1946 * xxxxxxxxx final excess calculation 1947 * ^ returned address 1948 */ 1949 excess = (uintptr_t)addr & (align - 1); 1950 1951 if (excess) { 1952 excess = align - excess; 1953 save = addr; 1954 1955 munmap(save + excess, size - excess); 1956 addr = _vmem_alloc(size, align, flags); 1957 munmap(save, excess); 1958 } 1959 return((void *)addr); 1960 } 1961 1962 /* 1963 * _vmem_free() 1964 * 1965 * Free a chunk of memory allocated with _vmem_alloc() 1966 */ 1967 static void 1968 _vmem_free(void *ptr, size_t size) 1969 { 1970 munmap(ptr, size); 1971 } 1972 1973 /* 1974 * Panic on fatal conditions 1975 */ 1976 static void 1977 _mpanic(const char *ctl, ...) 1978 { 1979 va_list va; 1980 1981 if (malloc_panic == 0) { 1982 malloc_panic = 1; 1983 va_start(va, ctl); 1984 vfprintf(stderr, ctl, va); 1985 fprintf(stderr, "\n"); 1986 fflush(stderr); 1987 va_end(va); 1988 } 1989 abort(); 1990 } 1991 1992 __weak_reference(__aligned_alloc, aligned_alloc); 1993 __weak_reference(__malloc, malloc); 1994 __weak_reference(__calloc, calloc); 1995 __weak_reference(__posix_memalign, posix_memalign); 1996 __weak_reference(__realloc, realloc); 1997 __weak_reference(__free, free); 1998