1 /* 2 * KERN_SLABALLOC.C - Kernel SLAB memory allocator 3 * 4 * Copyright (c) 2003,2004,2010-2019 The DragonFly Project. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Matthew Dillon <dillon@backplane.com> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * This module implements a slab allocator drop-in replacement for the 38 * kernel malloc(). 39 * 40 * A slab allocator reserves a ZONE for each chunk size, then lays the 41 * chunks out in an array within the zone. Allocation and deallocation 42 * is nearly instantanious, and fragmentation/overhead losses are limited 43 * to a fixed worst-case amount. 44 * 45 * The downside of this slab implementation is in the chunk size 46 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. 47 * In a kernel implementation all this memory will be physical so 48 * the zone size is adjusted downward on machines with less physical 49 * memory. The upside is that overhead is bounded... this is the *worst* 50 * case overhead. 51 * 52 * Slab management is done on a per-cpu basis and no locking or mutexes 53 * are required, only a critical section. When one cpu frees memory 54 * belonging to another cpu's slab manager an asynchronous IPI message 55 * will be queued to execute the operation. In addition, both the 56 * high level slab allocator and the low level zone allocator optimize 57 * M_ZERO requests, and the slab allocator does not have to pre initialize 58 * the linked list of chunks. 59 * 60 * XXX Balancing is needed between cpus. Balance will be handled through 61 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. 62 * 63 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of 64 * the new zone should be restricted to M_USE_RESERVE requests only. 65 * 66 * Alloc Size Chunking Number of zones 67 * 0-127 8 16 68 * 128-255 16 8 69 * 256-511 32 8 70 * 512-1023 64 8 71 * 1024-2047 128 8 72 * 2048-4095 256 8 73 * 4096-8191 512 8 74 * 8192-16383 1024 8 75 * 16384-32767 2048 8 76 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) 77 * 78 * Allocations >= ZoneLimit go directly to kmem. 79 * (n * PAGE_SIZE, n > 2) allocations go directly to kmem. 80 * 81 * Alignment properties: 82 * - All power-of-2 sized allocations are power-of-2 aligned. 83 * - Allocations with M_POWEROF2 are power-of-2 aligned on the nearest 84 * power-of-2 round up of 'size'. 85 * - Non-power-of-2 sized allocations are zone chunk size aligned (see the 86 * above table 'Chunking' column). 87 * 88 * API REQUIREMENTS AND SIDE EFFECTS 89 * 90 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we 91 * have remained compatible with the following API requirements: 92 * 93 * + malloc(0) is allowed and returns non-NULL (ahc driver) 94 * + ability to allocate arbitrarily large chunks of memory 95 */ 96 97 #include "opt_vm.h" 98 99 #include <sys/param.h> 100 #include <sys/systm.h> 101 #include <sys/kernel.h> 102 #include <sys/slaballoc.h> 103 #include <sys/mbuf.h> 104 #include <sys/vmmeter.h> 105 #include <sys/lock.h> 106 #include <sys/thread.h> 107 #include <sys/globaldata.h> 108 #include <sys/sysctl.h> 109 #include <sys/ktr.h> 110 111 #include <vm/vm.h> 112 #include <vm/vm_param.h> 113 #include <vm/vm_kern.h> 114 #include <vm/vm_extern.h> 115 #include <vm/vm_object.h> 116 #include <vm/pmap.h> 117 #include <vm/vm_map.h> 118 #include <vm/vm_page.h> 119 #include <vm/vm_pageout.h> 120 121 #include <machine/cpu.h> 122 123 #include <sys/thread2.h> 124 #include <vm/vm_page2.h> 125 126 #if (__VM_CACHELINE_SIZE == 32) 127 #define CAN_CACHEALIGN(sz) ((sz) >= 256) 128 #elif (__VM_CACHELINE_SIZE == 64) 129 #define CAN_CACHEALIGN(sz) ((sz) >= 512) 130 #elif (__VM_CACHELINE_SIZE == 128) 131 #define CAN_CACHEALIGN(sz) ((sz) >= 1024) 132 #else 133 #error "unsupported cacheline size" 134 #endif 135 136 #define btokup(z) (&pmap_kvtom((vm_offset_t)(z))->ku_pagecnt) 137 138 #define MEMORY_STRING "ptr=%p type=%p size=%lu flags=%04x" 139 #define MEMORY_ARGS void *ptr, void *type, unsigned long size, int flags 140 141 #if !defined(KTR_MEMORY) 142 #define KTR_MEMORY KTR_ALL 143 #endif 144 KTR_INFO_MASTER(memory); 145 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin"); 146 KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARGS); 147 KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARGS); 148 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARGS); 149 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARGS); 150 KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARGS); 151 KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARGS); 152 KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARGS); 153 KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARGS); 154 KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin"); 155 KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end"); 156 157 #define logmemory(name, ptr, type, size, flags) \ 158 KTR_LOG(memory_ ## name, ptr, type, size, flags) 159 #define logmemory_quick(name) \ 160 KTR_LOG(memory_ ## name) 161 162 /* 163 * Fixed globals (not per-cpu) 164 */ 165 static int ZoneSize; 166 static int ZoneLimit; 167 static int ZonePageCount; 168 static uintptr_t ZoneMask; 169 static int ZoneBigAlloc; /* in KB */ 170 static int ZoneGenAlloc; /* in KB */ 171 struct malloc_type *kmemstatistics; /* exported to vmstat */ 172 #ifdef INVARIANTS 173 static int32_t weirdary[16]; 174 #endif 175 176 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags); 177 static void kmem_slab_free(void *ptr, vm_size_t bytes); 178 179 #if defined(INVARIANTS) 180 static void chunk_mark_allocated(SLZone *z, void *chunk); 181 static void chunk_mark_free(SLZone *z, void *chunk); 182 #else 183 #define chunk_mark_allocated(z, chunk) 184 #define chunk_mark_free(z, chunk) 185 #endif 186 187 /* 188 * Misc constants. Note that allocations that are exact multiples of 189 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 190 */ 191 #define ZONE_RELS_THRESH 32 /* threshold number of zones */ 192 193 #ifdef INVARIANTS 194 /* 195 * The WEIRD_ADDR is used as known text to copy into free objects to 196 * try to create deterministic failure cases if the data is accessed after 197 * free. 198 */ 199 #define WEIRD_ADDR 0xdeadc0de 200 #endif 201 #define ZERO_LENGTH_PTR ((void *)-8) 202 203 /* 204 * Misc global malloc buckets 205 */ 206 207 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 208 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 209 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 210 MALLOC_DEFINE(M_DRM, "m_drm", "DRM memory allocations"); 211 212 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 213 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 214 215 /* 216 * Initialize the slab memory allocator. We have to choose a zone size based 217 * on available physical memory. We choose a zone side which is approximately 218 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of 219 * 128K. The zone size is limited to the bounds set in slaballoc.h 220 * (typically 32K min, 128K max). 221 */ 222 static void kmeminit(void *dummy); 223 224 char *ZeroPage; 225 226 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL); 227 228 #ifdef INVARIANTS 229 /* 230 * If enabled any memory allocated without M_ZERO is initialized to -1. 231 */ 232 static int use_malloc_pattern; 233 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW, 234 &use_malloc_pattern, 0, 235 "Initialize memory to -1 if M_ZERO not specified"); 236 #endif 237 238 static int ZoneRelsThresh = ZONE_RELS_THRESH; 239 SYSCTL_INT(_kern, OID_AUTO, zone_big_alloc, CTLFLAG_RD, &ZoneBigAlloc, 0, ""); 240 SYSCTL_INT(_kern, OID_AUTO, zone_gen_alloc, CTLFLAG_RD, &ZoneGenAlloc, 0, ""); 241 SYSCTL_INT(_kern, OID_AUTO, zone_cache, CTLFLAG_RW, &ZoneRelsThresh, 0, ""); 242 static long SlabsAllocated; 243 static long SlabsFreed; 244 SYSCTL_LONG(_kern, OID_AUTO, slabs_allocated, CTLFLAG_RD, 245 &SlabsAllocated, 0, ""); 246 SYSCTL_LONG(_kern, OID_AUTO, slabs_freed, CTLFLAG_RD, 247 &SlabsFreed, 0, ""); 248 static int SlabFreeToTail; 249 SYSCTL_INT(_kern, OID_AUTO, slab_freetotail, CTLFLAG_RW, 250 &SlabFreeToTail, 0, ""); 251 252 static struct spinlock kmemstat_spin = 253 SPINLOCK_INITIALIZER(&kmemstat_spin, "malinit"); 254 255 /* 256 * Returns the kernel memory size limit for the purposes of initializing 257 * various subsystem caches. The smaller of available memory and the KVM 258 * memory space is returned. 259 * 260 * The size in megabytes is returned. 261 */ 262 size_t 263 kmem_lim_size(void) 264 { 265 size_t limsize; 266 267 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE; 268 if (limsize > KvaSize) 269 limsize = KvaSize; 270 return (limsize / (1024 * 1024)); 271 } 272 273 static void 274 kmeminit(void *dummy) 275 { 276 size_t limsize; 277 int usesize; 278 #ifdef INVARIANTS 279 int i; 280 #endif 281 282 limsize = kmem_lim_size(); 283 usesize = (int)(limsize * 1024); /* convert to KB */ 284 285 /* 286 * If the machine has a large KVM space and more than 8G of ram, 287 * double the zone release threshold to reduce SMP invalidations. 288 * If more than 16G of ram, do it again. 289 * 290 * The BIOS eats a little ram so add some slop. We want 8G worth of 291 * memory sticks to trigger the first adjustment. 292 */ 293 if (ZoneRelsThresh == ZONE_RELS_THRESH) { 294 if (limsize >= 7 * 1024) 295 ZoneRelsThresh *= 2; 296 if (limsize >= 15 * 1024) 297 ZoneRelsThresh *= 2; 298 } 299 300 /* 301 * Calculate the zone size. This typically calculates to 302 * ZALLOC_MAX_ZONE_SIZE 303 */ 304 ZoneSize = ZALLOC_MIN_ZONE_SIZE; 305 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) 306 ZoneSize <<= 1; 307 ZoneLimit = ZoneSize / 4; 308 if (ZoneLimit > ZALLOC_ZONE_LIMIT) 309 ZoneLimit = ZALLOC_ZONE_LIMIT; 310 ZoneMask = ~(uintptr_t)(ZoneSize - 1); 311 ZonePageCount = ZoneSize / PAGE_SIZE; 312 313 #ifdef INVARIANTS 314 for (i = 0; i < NELEM(weirdary); ++i) 315 weirdary[i] = WEIRD_ADDR; 316 #endif 317 318 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO); 319 320 if (bootverbose) 321 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); 322 } 323 324 /* 325 * (low level) Initialize slab-related elements in the globaldata structure. 326 * 327 * Occurs after kmeminit(). 328 */ 329 void 330 slab_gdinit(globaldata_t gd) 331 { 332 SLGlobalData *slgd; 333 int i; 334 335 slgd = &gd->gd_slab; 336 for (i = 0; i < NZONES; ++i) 337 TAILQ_INIT(&slgd->ZoneAry[i]); 338 TAILQ_INIT(&slgd->FreeZones); 339 TAILQ_INIT(&slgd->FreeOvZones); 340 } 341 342 /* 343 * Initialize a malloc type tracking structure. 344 */ 345 void 346 malloc_init(void *data) 347 { 348 struct malloc_type *type = data; 349 size_t limsize; 350 351 if (type->ks_magic != M_MAGIC) 352 panic("malloc type lacks magic"); 353 354 if (type->ks_limit != 0) 355 return; 356 357 if (vmstats.v_page_count == 0) 358 panic("malloc_init not allowed before vm init"); 359 360 limsize = kmem_lim_size() * (1024 * 1024); 361 type->ks_limit = limsize / 10; 362 363 spin_lock(&kmemstat_spin); 364 type->ks_next = kmemstatistics; 365 kmemstatistics = type; 366 spin_unlock(&kmemstat_spin); 367 } 368 369 void 370 malloc_uninit(void *data) 371 { 372 struct malloc_type *type = data; 373 struct malloc_type *t; 374 #ifdef INVARIANTS 375 int i; 376 long ttl; 377 #endif 378 379 if (type->ks_magic != M_MAGIC) 380 panic("malloc type lacks magic"); 381 382 if (vmstats.v_page_count == 0) 383 panic("malloc_uninit not allowed before vm init"); 384 385 if (type->ks_limit == 0) 386 panic("malloc_uninit on uninitialized type"); 387 388 /* Make sure that all pending kfree()s are finished. */ 389 lwkt_synchronize_ipiqs("muninit"); 390 391 #ifdef INVARIANTS 392 /* 393 * memuse is only correct in aggregation. Due to memory being allocated 394 * on one cpu and freed on another individual array entries may be 395 * negative or positive (canceling each other out). 396 */ 397 for (i = ttl = 0; i < ncpus; ++i) 398 ttl += type->ks_use[i].memuse; 399 if (ttl) { 400 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", 401 ttl, type->ks_shortdesc, i); 402 } 403 #endif 404 spin_lock(&kmemstat_spin); 405 if (type == kmemstatistics) { 406 kmemstatistics = type->ks_next; 407 } else { 408 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 409 if (t->ks_next == type) { 410 t->ks_next = type->ks_next; 411 break; 412 } 413 } 414 } 415 type->ks_next = NULL; 416 type->ks_limit = 0; 417 spin_unlock(&kmemstat_spin); 418 } 419 420 /* 421 * Increase the kmalloc pool limit for the specified pool. No changes 422 * are the made if the pool would shrink. 423 */ 424 void 425 kmalloc_raise_limit(struct malloc_type *type, size_t bytes) 426 { 427 if (type->ks_limit == 0) 428 malloc_init(type); 429 if (bytes == 0) 430 bytes = KvaSize; 431 if (type->ks_limit < bytes) 432 type->ks_limit = bytes; 433 } 434 435 void 436 kmalloc_set_unlimited(struct malloc_type *type) 437 { 438 type->ks_limit = kmem_lim_size() * (1024 * 1024); 439 } 440 441 /* 442 * Dynamically create a malloc pool. This function is a NOP if *typep is 443 * already non-NULL. 444 */ 445 void 446 kmalloc_create(struct malloc_type **typep, const char *descr) 447 { 448 struct malloc_type *type; 449 450 if (*typep == NULL) { 451 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO); 452 type->ks_magic = M_MAGIC; 453 type->ks_shortdesc = descr; 454 malloc_init(type); 455 *typep = type; 456 } 457 } 458 459 /* 460 * Destroy a dynamically created malloc pool. This function is a NOP if 461 * the pool has already been destroyed. 462 */ 463 void 464 kmalloc_destroy(struct malloc_type **typep) 465 { 466 if (*typep != NULL) { 467 malloc_uninit(*typep); 468 kfree(*typep, M_TEMP); 469 *typep = NULL; 470 } 471 } 472 473 /* 474 * Calculate the zone index for the allocation request size and set the 475 * allocation request size to that particular zone's chunk size. 476 */ 477 static __inline int 478 zoneindex(unsigned long *bytes, unsigned long *align) 479 { 480 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ 481 482 if (n < 128) { 483 *bytes = n = (n + 7) & ~7; 484 *align = 8; 485 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 486 } 487 if (n < 256) { 488 *bytes = n = (n + 15) & ~15; 489 *align = 16; 490 return(n / 16 + 7); 491 } 492 if (n < 8192) { 493 if (n < 512) { 494 *bytes = n = (n + 31) & ~31; 495 *align = 32; 496 return(n / 32 + 15); 497 } 498 if (n < 1024) { 499 *bytes = n = (n + 63) & ~63; 500 *align = 64; 501 return(n / 64 + 23); 502 } 503 if (n < 2048) { 504 *bytes = n = (n + 127) & ~127; 505 *align = 128; 506 return(n / 128 + 31); 507 } 508 if (n < 4096) { 509 *bytes = n = (n + 255) & ~255; 510 *align = 256; 511 return(n / 256 + 39); 512 } 513 *bytes = n = (n + 511) & ~511; 514 *align = 512; 515 return(n / 512 + 47); 516 } 517 #if ZALLOC_ZONE_LIMIT > 8192 518 if (n < 16384) { 519 *bytes = n = (n + 1023) & ~1023; 520 *align = 1024; 521 return(n / 1024 + 55); 522 } 523 #endif 524 #if ZALLOC_ZONE_LIMIT > 16384 525 if (n < 32768) { 526 *bytes = n = (n + 2047) & ~2047; 527 *align = 2048; 528 return(n / 2048 + 63); 529 } 530 #endif 531 panic("Unexpected byte count %d", n); 532 return(0); 533 } 534 535 static __inline void 536 clean_zone_rchunks(SLZone *z) 537 { 538 SLChunk *bchunk; 539 540 while ((bchunk = z->z_RChunks) != NULL) { 541 cpu_ccfence(); 542 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) { 543 *z->z_LChunksp = bchunk; 544 while (bchunk) { 545 chunk_mark_free(z, bchunk); 546 z->z_LChunksp = &bchunk->c_Next; 547 bchunk = bchunk->c_Next; 548 ++z->z_NFree; 549 } 550 break; 551 } 552 /* retry */ 553 } 554 } 555 556 /* 557 * If the zone becomes totally free and is not the only zone listed for a 558 * chunk size we move it to the FreeZones list. We always leave at least 559 * one zone per chunk size listed, even if it is freeable. 560 * 561 * Do not move the zone if there is an IPI in_flight (z_RCount != 0), 562 * otherwise MP races can result in our free_remote code accessing a 563 * destroyed zone. The remote end interlocks z_RCount with z_RChunks 564 * so one has to test both z_NFree and z_RCount. 565 * 566 * Since this code can be called from an IPI callback, do *NOT* try to mess 567 * with kernel_map here. Hysteresis will be performed at kmalloc() time. 568 */ 569 static __inline SLZone * 570 check_zone_free(SLGlobalData *slgd, SLZone *z) 571 { 572 SLZone *znext; 573 574 znext = TAILQ_NEXT(z, z_Entry); 575 if (z->z_NFree == z->z_NMax && z->z_RCount == 0 && 576 (TAILQ_FIRST(&slgd->ZoneAry[z->z_ZoneIndex]) != z || znext)) { 577 int *kup; 578 579 TAILQ_REMOVE(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 580 581 z->z_Magic = -1; 582 TAILQ_INSERT_HEAD(&slgd->FreeZones, z, z_Entry); 583 ++slgd->NFreeZones; 584 kup = btokup(z); 585 *kup = 0; 586 } 587 return znext; 588 } 589 590 #ifdef SLAB_DEBUG 591 /* 592 * Used to debug memory corruption issues. Record up to (typically 32) 593 * allocation sources for this zone (for a particular chunk size). 594 */ 595 596 static void 597 slab_record_source(SLZone *z, const char *file, int line) 598 { 599 int i; 600 int b = line & (SLAB_DEBUG_ENTRIES - 1); 601 602 i = b; 603 do { 604 if (z->z_Sources[i].file == file && z->z_Sources[i].line == line) 605 return; 606 if (z->z_Sources[i].file == NULL) 607 break; 608 i = (i + 1) & (SLAB_DEBUG_ENTRIES - 1); 609 } while (i != b); 610 z->z_Sources[i].file = file; 611 z->z_Sources[i].line = line; 612 } 613 614 #endif 615 616 static __inline unsigned long 617 powerof2_size(unsigned long size) 618 { 619 int i; 620 621 if (size == 0 || powerof2(size)) 622 return size; 623 624 i = flsl(size); 625 return (1UL << i); 626 } 627 628 /* 629 * kmalloc() (SLAB ALLOCATOR) 630 * 631 * Allocate memory via the slab allocator. If the request is too large, 632 * or if it page-aligned beyond a certain size, we fall back to the 633 * KMEM subsystem. A SLAB tracking descriptor must be specified, use 634 * &SlabMisc if you don't care. 635 * 636 * M_RNOWAIT - don't block. 637 * M_NULLOK - return NULL instead of blocking. 638 * M_ZERO - zero the returned memory. 639 * M_USE_RESERVE - allow greater drawdown of the free list 640 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted 641 * M_POWEROF2 - roundup size to the nearest power of 2 642 * 643 * MPSAFE 644 */ 645 646 /* don't let kmalloc macro mess up function declaration */ 647 #undef kmalloc 648 649 #ifdef SLAB_DEBUG 650 void * 651 kmalloc_debug(unsigned long size, struct malloc_type *type, int flags, 652 const char *file, int line) 653 #else 654 void * 655 kmalloc(unsigned long size, struct malloc_type *type, int flags) 656 #endif 657 { 658 SLZone *z; 659 SLChunk *chunk; 660 SLGlobalData *slgd; 661 struct globaldata *gd; 662 unsigned long align; 663 int zi; 664 #ifdef INVARIANTS 665 int i; 666 #endif 667 668 logmemory_quick(malloc_beg); 669 gd = mycpu; 670 slgd = &gd->gd_slab; 671 672 /* 673 * XXX silly to have this in the critical path. 674 */ 675 if (type->ks_limit == 0) { 676 crit_enter(); 677 malloc_init(type); 678 crit_exit(); 679 } 680 ++type->ks_use[gd->gd_cpuid].calls; 681 682 /* 683 * Flagged for cache-alignment 684 */ 685 if (flags & M_CACHEALIGN) { 686 if (size < __VM_CACHELINE_SIZE) 687 size = __VM_CACHELINE_SIZE; 688 else if (!CAN_CACHEALIGN(size)) 689 flags |= M_POWEROF2; 690 } 691 692 /* 693 * Flagged to force nearest power-of-2 (higher or same) 694 */ 695 if (flags & M_POWEROF2) 696 size = powerof2_size(size); 697 698 /* 699 * Handle the case where the limit is reached. Panic if we can't return 700 * NULL. The original malloc code looped, but this tended to 701 * simply deadlock the computer. 702 * 703 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used 704 * to determine if a more complete limit check should be done. The 705 * actual memory use is tracked via ks_use[cpu].memuse. 706 */ 707 while (type->ks_loosememuse >= type->ks_limit) { 708 int i; 709 long ttl; 710 711 for (i = ttl = 0; i < ncpus; ++i) 712 ttl += type->ks_use[i].memuse; 713 type->ks_loosememuse = ttl; /* not MP synchronized */ 714 if ((ssize_t)ttl < 0) /* deal with occassional race */ 715 ttl = 0; 716 if (ttl >= type->ks_limit) { 717 if (flags & M_NULLOK) { 718 logmemory(malloc_end, NULL, type, size, flags); 719 return(NULL); 720 } 721 panic("%s: malloc limit exceeded", type->ks_shortdesc); 722 } 723 } 724 725 /* 726 * Handle the degenerate size == 0 case. Yes, this does happen. 727 * Return a special pointer. This is to maintain compatibility with 728 * the original malloc implementation. Certain devices, such as the 729 * adaptec driver, not only allocate 0 bytes, they check for NULL and 730 * also realloc() later on. Joy. 731 */ 732 if (size == 0) { 733 logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags); 734 return(ZERO_LENGTH_PTR); 735 } 736 737 /* 738 * Handle hysteresis from prior frees here in malloc(). We cannot 739 * safely manipulate the kernel_map in free() due to free() possibly 740 * being called via an IPI message or from sensitive interrupt code. 741 * 742 * NOTE: ku_pagecnt must be cleared before we free the slab or we 743 * might race another cpu allocating the kva and setting 744 * ku_pagecnt. 745 */ 746 while (slgd->NFreeZones > ZoneRelsThresh && (flags & M_RNOWAIT) == 0) { 747 crit_enter(); 748 if (slgd->NFreeZones > ZoneRelsThresh) { /* crit sect race */ 749 int *kup; 750 751 z = TAILQ_LAST(&slgd->FreeZones, SLZoneList); 752 KKASSERT(z != NULL); 753 TAILQ_REMOVE(&slgd->FreeZones, z, z_Entry); 754 --slgd->NFreeZones; 755 kup = btokup(z); 756 *kup = 0; 757 kmem_slab_free(z, ZoneSize); /* may block */ 758 atomic_add_int(&ZoneGenAlloc, -ZoneSize / 1024); 759 } 760 crit_exit(); 761 } 762 763 /* 764 * XXX handle oversized frees that were queued from kfree(). 765 */ 766 while (TAILQ_FIRST(&slgd->FreeOvZones) && (flags & M_RNOWAIT) == 0) { 767 crit_enter(); 768 if ((z = TAILQ_LAST(&slgd->FreeOvZones, SLZoneList)) != NULL) { 769 vm_size_t tsize; 770 771 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); 772 TAILQ_REMOVE(&slgd->FreeOvZones, z, z_Entry); 773 tsize = z->z_ChunkSize; 774 kmem_slab_free(z, tsize); /* may block */ 775 atomic_add_int(&ZoneBigAlloc, -(int)tsize / 1024); 776 } 777 crit_exit(); 778 } 779 780 /* 781 * Handle large allocations directly. There should not be very many of 782 * these so performance is not a big issue. 783 * 784 * The backend allocator is pretty nasty on a SMP system. Use the 785 * slab allocator for one and two page-sized chunks even though we lose 786 * some efficiency. XXX maybe fix mmio and the elf loader instead. 787 */ 788 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 789 int *kup; 790 791 size = round_page(size); 792 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); 793 if (chunk == NULL) { 794 logmemory(malloc_end, NULL, type, size, flags); 795 return(NULL); 796 } 797 atomic_add_int(&ZoneBigAlloc, (int)size / 1024); 798 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ 799 flags |= M_PASSIVE_ZERO; 800 kup = btokup(chunk); 801 *kup = size / PAGE_SIZE; 802 crit_enter(); 803 goto done; 804 } 805 806 /* 807 * Attempt to allocate out of an existing zone. First try the free list, 808 * then allocate out of unallocated space. If we find a good zone move 809 * it to the head of the list so later allocations find it quickly 810 * (we might have thousands of zones in the list). 811 * 812 * Note: zoneindex() will panic of size is too large. 813 */ 814 zi = zoneindex(&size, &align); 815 KKASSERT(zi < NZONES); 816 crit_enter(); 817 818 if ((z = TAILQ_LAST(&slgd->ZoneAry[zi], SLZoneList)) != NULL) { 819 /* 820 * Locate a chunk - we have to have at least one. If this is the 821 * last chunk go ahead and do the work to retrieve chunks freed 822 * from remote cpus, and if the zone is still empty move it off 823 * the ZoneAry. 824 */ 825 if (--z->z_NFree <= 0) { 826 KKASSERT(z->z_NFree == 0); 827 828 /* 829 * WARNING! This code competes with other cpus. It is ok 830 * for us to not drain RChunks here but we might as well, and 831 * it is ok if more accumulate after we're done. 832 * 833 * Set RSignal before pulling rchunks off, indicating that we 834 * will be moving ourselves off of the ZoneAry. Remote ends will 835 * read RSignal before putting rchunks on thus interlocking 836 * their IPI signaling. 837 */ 838 if (z->z_RChunks == NULL) 839 atomic_swap_int(&z->z_RSignal, 1); 840 841 clean_zone_rchunks(z); 842 843 /* 844 * Remove from the zone list if no free chunks remain. 845 * Clear RSignal 846 */ 847 if (z->z_NFree == 0) { 848 TAILQ_REMOVE(&slgd->ZoneAry[zi], z, z_Entry); 849 } else { 850 z->z_RSignal = 0; 851 } 852 } 853 854 /* 855 * Fast path, we have chunks available in z_LChunks. 856 */ 857 chunk = z->z_LChunks; 858 if (chunk) { 859 chunk_mark_allocated(z, chunk); 860 z->z_LChunks = chunk->c_Next; 861 if (z->z_LChunks == NULL) 862 z->z_LChunksp = &z->z_LChunks; 863 #ifdef SLAB_DEBUG 864 slab_record_source(z, file, line); 865 #endif 866 goto done; 867 } 868 869 /* 870 * No chunks are available in LChunks, the free chunk MUST be 871 * in the never-before-used memory area, controlled by UIndex. 872 * 873 * The consequences are very serious if our zone got corrupted so 874 * we use an explicit panic rather than a KASSERT. 875 */ 876 if (z->z_UIndex + 1 != z->z_NMax) 877 ++z->z_UIndex; 878 else 879 z->z_UIndex = 0; 880 881 if (z->z_UIndex == z->z_UEndIndex) 882 panic("slaballoc: corrupted zone"); 883 884 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 885 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 886 flags &= ~M_ZERO; 887 flags |= M_PASSIVE_ZERO; 888 } 889 chunk_mark_allocated(z, chunk); 890 #ifdef SLAB_DEBUG 891 slab_record_source(z, file, line); 892 #endif 893 goto done; 894 } 895 896 /* 897 * If all zones are exhausted we need to allocate a new zone for this 898 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see 899 * UAlloc use above in regards to M_ZERO. Note that when we are reusing 900 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and 901 * we do not pre-zero it because we do not want to mess up the L1 cache. 902 * 903 * At least one subsystem, the tty code (see CROUND) expects power-of-2 904 * allocations to be power-of-2 aligned. We maintain compatibility by 905 * adjusting the base offset below. 906 */ 907 { 908 int off; 909 int *kup; 910 911 if ((z = TAILQ_FIRST(&slgd->FreeZones)) != NULL) { 912 TAILQ_REMOVE(&slgd->FreeZones, z, z_Entry); 913 --slgd->NFreeZones; 914 bzero(z, sizeof(SLZone)); 915 z->z_Flags |= SLZF_UNOTZEROD; 916 } else { 917 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); 918 if (z == NULL) 919 goto fail; 920 atomic_add_int(&ZoneGenAlloc, ZoneSize / 1024); 921 } 922 923 /* 924 * How big is the base structure? 925 */ 926 #if defined(INVARIANTS) 927 /* 928 * Make room for z_Bitmap. An exact calculation is somewhat more 929 * complicated so don't make an exact calculation. 930 */ 931 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]); 932 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); 933 #else 934 off = sizeof(SLZone); 935 #endif 936 937 /* 938 * Guarentee power-of-2 alignment for power-of-2-sized chunks. 939 * Otherwise properly align the data according to the chunk size. 940 */ 941 if (powerof2(size)) 942 align = size; 943 off = roundup2(off, align); 944 945 z->z_Magic = ZALLOC_SLAB_MAGIC; 946 z->z_ZoneIndex = zi; 947 z->z_NMax = (ZoneSize - off) / size; 948 z->z_NFree = z->z_NMax - 1; 949 z->z_BasePtr = (char *)z + off; 950 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; 951 z->z_ChunkSize = size; 952 z->z_CpuGd = gd; 953 z->z_Cpu = gd->gd_cpuid; 954 z->z_LChunksp = &z->z_LChunks; 955 #ifdef SLAB_DEBUG 956 bcopy(z->z_Sources, z->z_AltSources, sizeof(z->z_Sources)); 957 bzero(z->z_Sources, sizeof(z->z_Sources)); 958 #endif 959 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 960 TAILQ_INSERT_HEAD(&slgd->ZoneAry[zi], z, z_Entry); 961 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 962 flags &= ~M_ZERO; /* already zero'd */ 963 flags |= M_PASSIVE_ZERO; 964 } 965 kup = btokup(z); 966 *kup = -(z->z_Cpu + 1); /* -1 to -(N+1) */ 967 chunk_mark_allocated(z, chunk); 968 #ifdef SLAB_DEBUG 969 slab_record_source(z, file, line); 970 #endif 971 972 /* 973 * Slide the base index for initial allocations out of the next 974 * zone we create so we do not over-weight the lower part of the 975 * cpu memory caches. 976 */ 977 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 978 & (ZALLOC_MAX_ZONE_SIZE - 1); 979 } 980 981 done: 982 ++type->ks_use[gd->gd_cpuid].inuse; 983 type->ks_use[gd->gd_cpuid].memuse += size; 984 type->ks_use[gd->gd_cpuid].loosememuse += size; 985 if (type->ks_use[gd->gd_cpuid].loosememuse >= ZoneSize) { 986 /* not MP synchronized */ 987 type->ks_loosememuse += type->ks_use[gd->gd_cpuid].loosememuse; 988 type->ks_use[gd->gd_cpuid].loosememuse = 0; 989 } 990 crit_exit(); 991 992 if (flags & M_ZERO) 993 bzero(chunk, size); 994 #ifdef INVARIANTS 995 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) { 996 if (use_malloc_pattern) { 997 for (i = 0; i < size; i += sizeof(int)) { 998 *(int *)((char *)chunk + i) = -1; 999 } 1000 } 1001 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ 1002 } 1003 #endif 1004 logmemory(malloc_end, chunk, type, size, flags); 1005 return(chunk); 1006 fail: 1007 crit_exit(); 1008 logmemory(malloc_end, NULL, type, size, flags); 1009 return(NULL); 1010 } 1011 1012 /* 1013 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE) 1014 * 1015 * Generally speaking this routine is not called very often and we do 1016 * not attempt to optimize it beyond reusing the same pointer if the 1017 * new size fits within the chunking of the old pointer's zone. 1018 */ 1019 #ifdef SLAB_DEBUG 1020 void * 1021 krealloc_debug(void *ptr, unsigned long size, 1022 struct malloc_type *type, int flags, 1023 const char *file, int line) 1024 #else 1025 void * 1026 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) 1027 #endif 1028 { 1029 unsigned long osize; 1030 unsigned long align; 1031 SLZone *z; 1032 void *nptr; 1033 int *kup; 1034 1035 KKASSERT((flags & M_ZERO) == 0); /* not supported */ 1036 1037 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) 1038 return(kmalloc_debug(size, type, flags, file, line)); 1039 if (size == 0) { 1040 kfree(ptr, type); 1041 return(NULL); 1042 } 1043 1044 /* 1045 * Handle oversized allocations. XXX we really should require that a 1046 * size be passed to free() instead of this nonsense. 1047 */ 1048 kup = btokup(ptr); 1049 if (*kup > 0) { 1050 osize = *kup << PAGE_SHIFT; 1051 if (osize == round_page(size)) 1052 return(ptr); 1053 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL) 1054 return(NULL); 1055 bcopy(ptr, nptr, min(size, osize)); 1056 kfree(ptr, type); 1057 return(nptr); 1058 } 1059 1060 /* 1061 * Get the original allocation's zone. If the new request winds up 1062 * using the same chunk size we do not have to do anything. 1063 */ 1064 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 1065 kup = btokup(z); 1066 KKASSERT(*kup < 0); 1067 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1068 1069 /* 1070 * Allocate memory for the new request size. Note that zoneindex has 1071 * already adjusted the request size to the appropriate chunk size, which 1072 * should optimize our bcopy(). Then copy and return the new pointer. 1073 * 1074 * Resizing a non-power-of-2 allocation to a power-of-2 size does not 1075 * necessary align the result. 1076 * 1077 * We can only zoneindex (to align size to the chunk size) if the new 1078 * size is not too large. 1079 */ 1080 if (size < ZoneLimit) { 1081 zoneindex(&size, &align); 1082 if (z->z_ChunkSize == size) 1083 return(ptr); 1084 } 1085 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL) 1086 return(NULL); 1087 bcopy(ptr, nptr, min(size, z->z_ChunkSize)); 1088 kfree(ptr, type); 1089 return(nptr); 1090 } 1091 1092 /* 1093 * Return the kmalloc limit for this type, in bytes. 1094 */ 1095 long 1096 kmalloc_limit(struct malloc_type *type) 1097 { 1098 if (type->ks_limit == 0) { 1099 crit_enter(); 1100 if (type->ks_limit == 0) 1101 malloc_init(type); 1102 crit_exit(); 1103 } 1104 return(type->ks_limit); 1105 } 1106 1107 /* 1108 * Allocate a copy of the specified string. 1109 * 1110 * (MP SAFE) (MAY BLOCK) 1111 */ 1112 #ifdef SLAB_DEBUG 1113 char * 1114 kstrdup_debug(const char *str, struct malloc_type *type, 1115 const char *file, int line) 1116 #else 1117 char * 1118 kstrdup(const char *str, struct malloc_type *type) 1119 #endif 1120 { 1121 int zlen; /* length inclusive of terminating NUL */ 1122 char *nstr; 1123 1124 if (str == NULL) 1125 return(NULL); 1126 zlen = strlen(str) + 1; 1127 nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line); 1128 bcopy(str, nstr, zlen); 1129 return(nstr); 1130 } 1131 1132 #ifdef SLAB_DEBUG 1133 char * 1134 kstrndup_debug(const char *str, size_t maxlen, struct malloc_type *type, 1135 const char *file, int line) 1136 #else 1137 char * 1138 kstrndup(const char *str, size_t maxlen, struct malloc_type *type) 1139 #endif 1140 { 1141 int zlen; /* length inclusive of terminating NUL */ 1142 char *nstr; 1143 1144 if (str == NULL) 1145 return(NULL); 1146 zlen = strnlen(str, maxlen) + 1; 1147 nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line); 1148 bcopy(str, nstr, zlen); 1149 nstr[zlen - 1] = '\0'; 1150 return(nstr); 1151 } 1152 1153 /* 1154 * Notify our cpu that a remote cpu has freed some chunks in a zone that 1155 * we own. RCount will be bumped so the memory should be good, but validate 1156 * that it really is. 1157 */ 1158 static void 1159 kfree_remote(void *ptr) 1160 { 1161 SLGlobalData *slgd; 1162 SLZone *z; 1163 int nfree; 1164 int *kup; 1165 1166 slgd = &mycpu->gd_slab; 1167 z = ptr; 1168 kup = btokup(z); 1169 KKASSERT(*kup == -((int)mycpuid + 1)); 1170 KKASSERT(z->z_RCount > 0); 1171 atomic_subtract_int(&z->z_RCount, 1); 1172 1173 logmemory(free_rem_beg, z, NULL, 0L, 0); 1174 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1175 KKASSERT(z->z_Cpu == mycpu->gd_cpuid); 1176 nfree = z->z_NFree; 1177 1178 /* 1179 * Indicate that we will no longer be off of the ZoneAry by 1180 * clearing RSignal. 1181 */ 1182 if (z->z_RChunks) 1183 z->z_RSignal = 0; 1184 1185 /* 1186 * Atomically extract the bchunks list and then process it back 1187 * into the lchunks list. We want to append our bchunks to the 1188 * lchunks list and not prepend since we likely do not have 1189 * cache mastership of the related data (not that it helps since 1190 * we are using c_Next). 1191 */ 1192 clean_zone_rchunks(z); 1193 if (z->z_NFree && nfree == 0) { 1194 TAILQ_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 1195 } 1196 1197 check_zone_free(slgd, z); 1198 logmemory(free_rem_end, z, NULL, 0L, 0); 1199 } 1200 1201 /* 1202 * free (SLAB ALLOCATOR) 1203 * 1204 * Free a memory block previously allocated by malloc. 1205 * 1206 * Note: We do not attempt to update ks_loosememuse as MP races could 1207 * prevent us from checking memory limits in malloc. YYY we may 1208 * consider updating ks_cpu.loosememuse. 1209 * 1210 * MPSAFE 1211 */ 1212 void 1213 kfree(void *ptr, struct malloc_type *type) 1214 { 1215 SLZone *z; 1216 SLChunk *chunk; 1217 SLGlobalData *slgd; 1218 struct globaldata *gd; 1219 int *kup; 1220 unsigned long size; 1221 SLChunk *bchunk; 1222 int rsignal; 1223 1224 logmemory_quick(free_beg); 1225 gd = mycpu; 1226 slgd = &gd->gd_slab; 1227 1228 if (ptr == NULL) 1229 panic("trying to free NULL pointer"); 1230 1231 /* 1232 * Handle special 0-byte allocations 1233 */ 1234 if (ptr == ZERO_LENGTH_PTR) { 1235 logmemory(free_zero, ptr, type, -1UL, 0); 1236 logmemory_quick(free_end); 1237 return; 1238 } 1239 1240 /* 1241 * Panic on bad malloc type 1242 */ 1243 if (type->ks_magic != M_MAGIC) 1244 panic("free: malloc type lacks magic"); 1245 1246 /* 1247 * Handle oversized allocations. XXX we really should require that a 1248 * size be passed to free() instead of this nonsense. 1249 * 1250 * This code is never called via an ipi. 1251 */ 1252 kup = btokup(ptr); 1253 if (*kup > 0) { 1254 size = *kup << PAGE_SHIFT; 1255 *kup = 0; 1256 #ifdef INVARIANTS 1257 KKASSERT(sizeof(weirdary) <= size); 1258 bcopy(weirdary, ptr, sizeof(weirdary)); 1259 #endif 1260 /* 1261 * NOTE: For oversized allocations we do not record the 1262 * originating cpu. It gets freed on the cpu calling 1263 * kfree(). The statistics are in aggregate. 1264 * 1265 * note: XXX we have still inherited the interrupts-can't-block 1266 * assumption. An interrupt thread does not bump 1267 * gd_intr_nesting_level so check TDF_INTTHREAD. This is 1268 * primarily until we can fix softupdate's assumptions about free(). 1269 */ 1270 crit_enter(); 1271 --type->ks_use[gd->gd_cpuid].inuse; 1272 type->ks_use[gd->gd_cpuid].memuse -= size; 1273 if (mycpu->gd_intr_nesting_level || 1274 (gd->gd_curthread->td_flags & TDF_INTTHREAD)) { 1275 logmemory(free_ovsz_delayed, ptr, type, size, 0); 1276 z = (SLZone *)ptr; 1277 z->z_Magic = ZALLOC_OVSZ_MAGIC; 1278 z->z_ChunkSize = size; 1279 1280 TAILQ_INSERT_HEAD(&slgd->FreeOvZones, z, z_Entry); 1281 crit_exit(); 1282 } else { 1283 crit_exit(); 1284 logmemory(free_ovsz, ptr, type, size, 0); 1285 kmem_slab_free(ptr, size); /* may block */ 1286 atomic_add_int(&ZoneBigAlloc, -(int)size / 1024); 1287 } 1288 logmemory_quick(free_end); 1289 return; 1290 } 1291 1292 /* 1293 * Zone case. Figure out the zone based on the fact that it is 1294 * ZoneSize aligned. 1295 */ 1296 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 1297 kup = btokup(z); 1298 KKASSERT(*kup < 0); 1299 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1300 1301 /* 1302 * If we do not own the zone then use atomic ops to free to the 1303 * remote cpu linked list and notify the target zone using a 1304 * passive message. 1305 * 1306 * The target zone cannot be deallocated while we own a chunk of it, 1307 * so the zone header's storage is stable until the very moment 1308 * we adjust z_RChunks. After that we cannot safely dereference (z). 1309 * 1310 * (no critical section needed) 1311 */ 1312 if (z->z_CpuGd != gd) { 1313 /* 1314 * Making these adjustments now allow us to avoid passing (type) 1315 * to the remote cpu. Note that inuse/memuse is being 1316 * adjusted on OUR cpu, not the zone cpu, but it should all still 1317 * sum up properly and cancel out. 1318 */ 1319 crit_enter(); 1320 --type->ks_use[gd->gd_cpuid].inuse; 1321 type->ks_use[gd->gd_cpuid].memuse -= z->z_ChunkSize; 1322 crit_exit(); 1323 1324 /* 1325 * WARNING! This code competes with other cpus. Once we 1326 * successfully link the chunk to RChunks the remote 1327 * cpu can rip z's storage out from under us. 1328 * 1329 * Bumping RCount prevents z's storage from getting 1330 * ripped out. 1331 */ 1332 rsignal = z->z_RSignal; 1333 cpu_lfence(); 1334 if (rsignal) 1335 atomic_add_int(&z->z_RCount, 1); 1336 1337 chunk = ptr; 1338 for (;;) { 1339 bchunk = z->z_RChunks; 1340 cpu_ccfence(); 1341 chunk->c_Next = bchunk; 1342 cpu_sfence(); 1343 1344 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk)) 1345 break; 1346 } 1347 1348 /* 1349 * We have to signal the remote cpu if our actions will cause 1350 * the remote zone to be placed back on ZoneAry so it can 1351 * move the zone back on. 1352 * 1353 * We only need to deal with NULL->non-NULL RChunk transitions 1354 * and only if z_RSignal is set. We interlock by reading rsignal 1355 * before adding our chunk to RChunks. This should result in 1356 * virtually no IPI traffic. 1357 * 1358 * We can use a passive IPI to reduce overhead even further. 1359 */ 1360 if (bchunk == NULL && rsignal) { 1361 logmemory(free_request, ptr, type, 1362 (unsigned long)z->z_ChunkSize, 0); 1363 lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z); 1364 /* z can get ripped out from under us from this point on */ 1365 } else if (rsignal) { 1366 atomic_subtract_int(&z->z_RCount, 1); 1367 /* z can get ripped out from under us from this point on */ 1368 } 1369 logmemory_quick(free_end); 1370 return; 1371 } 1372 1373 /* 1374 * kfree locally 1375 */ 1376 logmemory(free_chunk, ptr, type, (unsigned long)z->z_ChunkSize, 0); 1377 1378 crit_enter(); 1379 chunk = ptr; 1380 chunk_mark_free(z, chunk); 1381 1382 /* 1383 * Put weird data into the memory to detect modifications after freeing, 1384 * illegal pointer use after freeing (we should fault on the odd address), 1385 * and so forth. XXX needs more work, see the old malloc code. 1386 */ 1387 #ifdef INVARIANTS 1388 if (z->z_ChunkSize < sizeof(weirdary)) 1389 bcopy(weirdary, chunk, z->z_ChunkSize); 1390 else 1391 bcopy(weirdary, chunk, sizeof(weirdary)); 1392 #endif 1393 1394 /* 1395 * Add this free non-zero'd chunk to a linked list for reuse. Add 1396 * to the front of the linked list so it is more likely to be 1397 * reallocated, since it is already in our L1 cache. 1398 */ 1399 #ifdef INVARIANTS 1400 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) 1401 panic("BADFREE %p", chunk); 1402 #endif 1403 chunk->c_Next = z->z_LChunks; 1404 z->z_LChunks = chunk; 1405 if (chunk->c_Next == NULL) 1406 z->z_LChunksp = &chunk->c_Next; 1407 1408 #ifdef INVARIANTS 1409 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) 1410 panic("BADFREE2"); 1411 #endif 1412 1413 /* 1414 * Bump the number of free chunks. If it becomes non-zero the zone 1415 * must be added back onto the appropriate list. A fully allocated 1416 * zone that sees its first free is considered 'mature' and is placed 1417 * at the head, giving the system time to potentially free the remaining 1418 * entries even while other allocations are going on and making the zone 1419 * freeable. 1420 */ 1421 if (z->z_NFree++ == 0) { 1422 if (SlabFreeToTail) 1423 TAILQ_INSERT_TAIL(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 1424 else 1425 TAILQ_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 1426 } 1427 1428 --type->ks_use[gd->gd_cpuid].inuse; 1429 type->ks_use[gd->gd_cpuid].memuse -= z->z_ChunkSize; 1430 1431 check_zone_free(slgd, z); 1432 logmemory_quick(free_end); 1433 crit_exit(); 1434 } 1435 1436 /* 1437 * Cleanup slabs which are hanging around due to RChunks or which are wholely 1438 * free and can be moved to the free list if not moved by other means. 1439 * 1440 * Called once every 10 seconds on all cpus. 1441 */ 1442 void 1443 slab_cleanup(void) 1444 { 1445 SLGlobalData *slgd = &mycpu->gd_slab; 1446 SLZone *z; 1447 int i; 1448 1449 crit_enter(); 1450 for (i = 0; i < NZONES; ++i) { 1451 if ((z = TAILQ_FIRST(&slgd->ZoneAry[i])) == NULL) 1452 continue; 1453 1454 /* 1455 * Scan zones. 1456 */ 1457 while (z) { 1458 /* 1459 * Shift all RChunks to the end of the LChunks list. This is 1460 * an O(1) operation. 1461 * 1462 * Then free the zone if possible. 1463 */ 1464 clean_zone_rchunks(z); 1465 z = check_zone_free(slgd, z); 1466 } 1467 } 1468 crit_exit(); 1469 } 1470 1471 #if defined(INVARIANTS) 1472 1473 /* 1474 * Helper routines for sanity checks 1475 */ 1476 static void 1477 chunk_mark_allocated(SLZone *z, void *chunk) 1478 { 1479 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1480 uint32_t *bitptr; 1481 1482 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1483 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1484 ("memory chunk %p bit index %d is illegal", chunk, bitdex)); 1485 bitptr = &z->z_Bitmap[bitdex >> 5]; 1486 bitdex &= 31; 1487 KASSERT((*bitptr & (1 << bitdex)) == 0, 1488 ("memory chunk %p is already allocated!", chunk)); 1489 *bitptr |= 1 << bitdex; 1490 } 1491 1492 static void 1493 chunk_mark_free(SLZone *z, void *chunk) 1494 { 1495 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1496 uint32_t *bitptr; 1497 1498 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1499 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1500 ("memory chunk %p bit index %d is illegal!", chunk, bitdex)); 1501 bitptr = &z->z_Bitmap[bitdex >> 5]; 1502 bitdex &= 31; 1503 KASSERT((*bitptr & (1 << bitdex)) != 0, 1504 ("memory chunk %p is already free!", chunk)); 1505 *bitptr &= ~(1 << bitdex); 1506 } 1507 1508 #endif 1509 1510 /* 1511 * kmem_slab_alloc() 1512 * 1513 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the 1514 * specified alignment. M_* flags are expected in the flags field. 1515 * 1516 * Alignment must be a multiple of PAGE_SIZE. 1517 * 1518 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), 1519 * but when we move zalloc() over to use this function as its backend 1520 * we will have to switch to kreserve/krelease and call reserve(0) 1521 * after the new space is made available. 1522 * 1523 * Interrupt code which has preempted other code is not allowed to 1524 * use PQ_CACHE pages. However, if an interrupt thread is run 1525 * non-preemptively or blocks and then runs non-preemptively, then 1526 * it is free to use PQ_CACHE pages. <--- may not apply any longer XXX 1527 */ 1528 static void * 1529 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) 1530 { 1531 vm_size_t i; 1532 vm_offset_t addr; 1533 int count, vmflags, base_vmflags; 1534 vm_page_t mbase = NULL; 1535 vm_page_t m; 1536 thread_t td; 1537 1538 size = round_page(size); 1539 addr = vm_map_min(&kernel_map); 1540 1541 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1542 crit_enter(); 1543 vm_map_lock(&kernel_map); 1544 if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) { 1545 vm_map_unlock(&kernel_map); 1546 if ((flags & M_NULLOK) == 0) 1547 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 1548 vm_map_entry_release(count); 1549 crit_exit(); 1550 return(NULL); 1551 } 1552 1553 /* 1554 * kernel_object maps 1:1 to kernel_map. 1555 */ 1556 vm_object_hold(&kernel_object); 1557 vm_object_reference_locked(&kernel_object); 1558 vm_map_insert(&kernel_map, &count, 1559 &kernel_object, NULL, 1560 addr, addr, addr + size, 1561 VM_MAPTYPE_NORMAL, 1562 VM_SUBSYS_KMALLOC, 1563 VM_PROT_ALL, VM_PROT_ALL, 0); 1564 vm_object_drop(&kernel_object); 1565 vm_map_set_wired_quick(&kernel_map, addr, size, &count); 1566 vm_map_unlock(&kernel_map); 1567 1568 td = curthread; 1569 1570 base_vmflags = 0; 1571 if (flags & M_ZERO) 1572 base_vmflags |= VM_ALLOC_ZERO; 1573 if (flags & M_USE_RESERVE) 1574 base_vmflags |= VM_ALLOC_SYSTEM; 1575 if (flags & M_USE_INTERRUPT_RESERVE) 1576 base_vmflags |= VM_ALLOC_INTERRUPT; 1577 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) { 1578 panic("kmem_slab_alloc: bad flags %08x (%p)", 1579 flags, ((int **)&size)[-1]); 1580 } 1581 1582 /* 1583 * Allocate the pages. Do not map them yet. VM_ALLOC_NORMAL can only 1584 * be set if we are not preempting. 1585 * 1586 * VM_ALLOC_SYSTEM is automatically set if we are preempting and 1587 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is 1588 * implied in this case), though I'm not sure if we really need to 1589 * do that. 1590 */ 1591 vmflags = base_vmflags; 1592 if (flags & M_WAITOK) { 1593 if (td->td_preempted) 1594 vmflags |= VM_ALLOC_SYSTEM; 1595 else 1596 vmflags |= VM_ALLOC_NORMAL; 1597 } 1598 1599 vm_object_hold(&kernel_object); 1600 for (i = 0; i < size; i += PAGE_SIZE) { 1601 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags); 1602 if (i == 0) 1603 mbase = m; 1604 1605 /* 1606 * If the allocation failed we either return NULL or we retry. 1607 * 1608 * If M_WAITOK is specified we wait for more memory and retry. 1609 * If M_WAITOK is specified from a preemption we yield instead of 1610 * wait. Livelock will not occur because the interrupt thread 1611 * will not be preempting anyone the second time around after the 1612 * yield. 1613 */ 1614 if (m == NULL) { 1615 if (flags & M_WAITOK) { 1616 if (td->td_preempted) { 1617 lwkt_switch(); 1618 } else { 1619 vm_wait(0); 1620 } 1621 i -= PAGE_SIZE; /* retry */ 1622 continue; 1623 } 1624 break; 1625 } 1626 } 1627 1628 /* 1629 * Check and deal with an allocation failure 1630 */ 1631 if (i != size) { 1632 while (i != 0) { 1633 i -= PAGE_SIZE; 1634 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); 1635 /* page should already be busy */ 1636 vm_page_free(m); 1637 } 1638 vm_map_lock(&kernel_map); 1639 vm_map_delete(&kernel_map, addr, addr + size, &count); 1640 vm_map_unlock(&kernel_map); 1641 vm_object_drop(&kernel_object); 1642 1643 vm_map_entry_release(count); 1644 crit_exit(); 1645 return(NULL); 1646 } 1647 1648 /* 1649 * Success! 1650 * 1651 * NOTE: The VM pages are still busied. mbase points to the first one 1652 * but we have to iterate via vm_page_next() 1653 */ 1654 vm_object_drop(&kernel_object); 1655 crit_exit(); 1656 1657 /* 1658 * Enter the pages into the pmap and deal with M_ZERO. 1659 */ 1660 m = mbase; 1661 i = 0; 1662 1663 while (i < size) { 1664 /* 1665 * page should already be busy 1666 */ 1667 m->valid = VM_PAGE_BITS_ALL; 1668 vm_page_wire(m); 1669 pmap_enter(&kernel_pmap, addr + i, m, 1670 VM_PROT_ALL | VM_PROT_NOSYNC, 1, NULL); 1671 if (flags & M_ZERO) 1672 pagezero((char *)addr + i); 1673 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED)); 1674 vm_page_flag_set(m, PG_REFERENCED); 1675 vm_page_wakeup(m); 1676 1677 i += PAGE_SIZE; 1678 vm_object_hold(&kernel_object); 1679 m = vm_page_next(m); 1680 vm_object_drop(&kernel_object); 1681 } 1682 smp_invltlb(); 1683 vm_map_entry_release(count); 1684 atomic_add_long(&SlabsAllocated, 1); 1685 return((void *)addr); 1686 } 1687 1688 /* 1689 * kmem_slab_free() 1690 */ 1691 static void 1692 kmem_slab_free(void *ptr, vm_size_t size) 1693 { 1694 crit_enter(); 1695 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); 1696 atomic_add_long(&SlabsFreed, 1); 1697 crit_exit(); 1698 } 1699