1 /* 2 * KERN_SLABALLOC.C - Kernel SLAB memory allocator 3 * 4 * Copyright (c) 2003,2004,2010-2019 The DragonFly Project. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Matthew Dillon <dillon@backplane.com> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * This module implements a slab allocator drop-in replacement for the 38 * kernel malloc(). 39 * 40 * A slab allocator reserves a ZONE for each chunk size, then lays the 41 * chunks out in an array within the zone. Allocation and deallocation 42 * is nearly instantanious, and fragmentation/overhead losses are limited 43 * to a fixed worst-case amount. 44 * 45 * The downside of this slab implementation is in the chunk size 46 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. 47 * In a kernel implementation all this memory will be physical so 48 * the zone size is adjusted downward on machines with less physical 49 * memory. The upside is that overhead is bounded... this is the *worst* 50 * case overhead. 51 * 52 * Slab management is done on a per-cpu basis and no locking or mutexes 53 * are required, only a critical section. When one cpu frees memory 54 * belonging to another cpu's slab manager an asynchronous IPI message 55 * will be queued to execute the operation. In addition, both the 56 * high level slab allocator and the low level zone allocator optimize 57 * M_ZERO requests, and the slab allocator does not have to pre initialize 58 * the linked list of chunks. 59 * 60 * XXX Balancing is needed between cpus. Balance will be handled through 61 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. 62 * 63 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of 64 * the new zone should be restricted to M_USE_RESERVE requests only. 65 * 66 * Alloc Size Chunking Number of zones 67 * 0-127 8 16 68 * 128-255 16 8 69 * 256-511 32 8 70 * 512-1023 64 8 71 * 1024-2047 128 8 72 * 2048-4095 256 8 73 * 4096-8191 512 8 74 * 8192-16383 1024 8 75 * 16384-32767 2048 8 76 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) 77 * 78 * Allocations >= ZoneLimit go directly to kmem. 79 * (n * PAGE_SIZE, n > 2) allocations go directly to kmem. 80 * 81 * Alignment properties: 82 * - All power-of-2 sized allocations are power-of-2 aligned. 83 * - Allocations with M_POWEROF2 are power-of-2 aligned on the nearest 84 * power-of-2 round up of 'size'. 85 * - Non-power-of-2 sized allocations are zone chunk size aligned (see the 86 * above table 'Chunking' column). 87 * 88 * API REQUIREMENTS AND SIDE EFFECTS 89 * 90 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we 91 * have remained compatible with the following API requirements: 92 * 93 * + malloc(0) is allowed and returns non-NULL (ahc driver) 94 * + ability to allocate arbitrarily large chunks of memory 95 */ 96 97 #include "opt_vm.h" 98 99 #include <sys/param.h> 100 #include <sys/systm.h> 101 #include <sys/kernel.h> 102 #include <sys/slaballoc.h> 103 #include <sys/mbuf.h> 104 #include <sys/vmmeter.h> 105 #include <sys/lock.h> 106 #include <sys/thread.h> 107 #include <sys/globaldata.h> 108 #include <sys/sysctl.h> 109 #include <sys/ktr.h> 110 #include <sys/malloc.h> 111 112 #include <vm/vm.h> 113 #include <vm/vm_param.h> 114 #include <vm/vm_kern.h> 115 #include <vm/vm_extern.h> 116 #include <vm/vm_object.h> 117 #include <vm/pmap.h> 118 #include <vm/vm_map.h> 119 #include <vm/vm_page.h> 120 #include <vm/vm_pageout.h> 121 122 #include <machine/cpu.h> 123 124 #include <sys/thread2.h> 125 #include <vm/vm_page2.h> 126 127 #if (__VM_CACHELINE_SIZE == 32) 128 #define CAN_CACHEALIGN(sz) ((sz) >= 256) 129 #elif (__VM_CACHELINE_SIZE == 64) 130 #define CAN_CACHEALIGN(sz) ((sz) >= 512) 131 #elif (__VM_CACHELINE_SIZE == 128) 132 #define CAN_CACHEALIGN(sz) ((sz) >= 1024) 133 #else 134 #error "unsupported cacheline size" 135 #endif 136 137 #define btokup(z) (&pmap_kvtom((vm_offset_t)(z))->ku_pagecnt) 138 139 #define MEMORY_STRING "ptr=%p type=%p size=%lu flags=%04x" 140 #define MEMORY_ARGS void *ptr, void *type, unsigned long size, int flags 141 142 #if !defined(KTR_MEMORY) 143 #define KTR_MEMORY KTR_ALL 144 #endif 145 KTR_INFO_MASTER(memory); 146 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin"); 147 KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARGS); 148 KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARGS); 149 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARGS); 150 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARGS); 151 KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARGS); 152 KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARGS); 153 KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARGS); 154 KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARGS); 155 KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin"); 156 KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end"); 157 158 #define logmemory(name, ptr, type, size, flags) \ 159 KTR_LOG(memory_ ## name, ptr, type, size, flags) 160 #define logmemory_quick(name) \ 161 KTR_LOG(memory_ ## name) 162 163 /* 164 * Fixed globals (not per-cpu) 165 */ 166 __read_frequently static int ZoneSize; 167 __read_frequently static int ZoneLimit; 168 __read_frequently static int ZonePageCount; 169 __read_frequently static uintptr_t ZoneMask; 170 __read_frequently struct malloc_type *kmemstatistics; /* exported to vmstat */ 171 172 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags); 173 static void kmem_slab_free(void *ptr, vm_size_t bytes); 174 175 #if defined(INVARIANTS) 176 static void chunk_mark_allocated(SLZone *z, void *chunk); 177 static void chunk_mark_free(SLZone *z, void *chunk); 178 #else 179 #define chunk_mark_allocated(z, chunk) 180 #define chunk_mark_free(z, chunk) 181 #endif 182 183 /* 184 * Misc constants. Note that allocations that are exact multiples of 185 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 186 */ 187 #define ZONE_RELS_THRESH 32 /* threshold number of zones */ 188 189 #ifdef INVARIANTS 190 /* 191 * The WEIRD_ADDR is used as known text to copy into free objects to 192 * try to create deterministic failure cases if the data is accessed after 193 * free. 194 */ 195 #define WEIRD_ADDR 0xdeadc0de 196 #endif 197 #define ZERO_LENGTH_PTR ((void *)-8) 198 199 /* 200 * Misc global malloc buckets 201 */ 202 203 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 204 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 205 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 206 MALLOC_DEFINE(M_DRM, "m_drm", "DRM memory allocations"); 207 208 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 209 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 210 211 /* 212 * Initialize the slab memory allocator. We have to choose a zone size based 213 * on available physical memory. We choose a zone side which is approximately 214 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of 215 * 128K. The zone size is limited to the bounds set in slaballoc.h 216 * (typically 32K min, 128K max). 217 */ 218 static void kmeminit(void *dummy); 219 static void kmemfinishinit(void *dummy); 220 221 char *ZeroPage; 222 223 SYSINIT(kmem1, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL); 224 SYSINIT(kmem2, SI_BOOT2_POST_SMP, SI_ORDER_FIRST, kmemfinishinit, NULL); 225 226 #ifdef INVARIANTS 227 /* 228 * If enabled any memory allocated without M_ZERO is initialized to -1. 229 */ 230 __read_frequently static int use_malloc_pattern; 231 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW, 232 &use_malloc_pattern, 0, 233 "Initialize memory to -1 if M_ZERO not specified"); 234 235 __read_frequently static int32_t weirdary[16]; 236 __read_frequently static int use_weird_array; 237 SYSCTL_INT(_debug, OID_AUTO, use_weird_array, CTLFLAG_RW, 238 &use_weird_array, 0, 239 "Initialize memory to weird values on kfree()"); 240 #endif 241 242 __read_frequently static int ZoneRelsThresh = ZONE_RELS_THRESH; 243 SYSCTL_INT(_kern, OID_AUTO, zone_cache, CTLFLAG_RW, &ZoneRelsThresh, 0, ""); 244 245 static struct spinlock kmemstat_spin = 246 SPINLOCK_INITIALIZER(&kmemstat_spin, "malinit"); 247 248 /* 249 * Returns the kernel memory size limit for the purposes of initializing 250 * various subsystem caches. The smaller of available memory and the KVM 251 * memory space is returned. 252 * 253 * The size in megabytes is returned. 254 */ 255 size_t 256 kmem_lim_size(void) 257 { 258 size_t limsize; 259 260 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE; 261 if (limsize > KvaSize) 262 limsize = KvaSize; 263 return (limsize / (1024 * 1024)); 264 } 265 266 static void 267 kmeminit(void *dummy) 268 { 269 size_t limsize; 270 int usesize; 271 #ifdef INVARIANTS 272 int i; 273 #endif 274 275 limsize = kmem_lim_size(); 276 usesize = (int)(limsize * 1024); /* convert to KB */ 277 278 /* 279 * If the machine has a large KVM space and more than 8G of ram, 280 * double the zone release threshold to reduce SMP invalidations. 281 * If more than 16G of ram, do it again. 282 * 283 * The BIOS eats a little ram so add some slop. We want 8G worth of 284 * memory sticks to trigger the first adjustment. 285 */ 286 if (ZoneRelsThresh == ZONE_RELS_THRESH) { 287 if (limsize >= 7 * 1024) 288 ZoneRelsThresh *= 2; 289 if (limsize >= 15 * 1024) 290 ZoneRelsThresh *= 2; 291 if (limsize >= 31 * 1024) 292 ZoneRelsThresh *= 2; 293 if (limsize >= 63 * 1024) 294 ZoneRelsThresh *= 2; 295 if (limsize >= 127 * 1024) 296 ZoneRelsThresh *= 2; 297 } 298 299 /* 300 * Calculate the zone size. This typically calculates to 301 * ZALLOC_MAX_ZONE_SIZE 302 */ 303 ZoneSize = ZALLOC_MIN_ZONE_SIZE; 304 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) 305 ZoneSize <<= 1; 306 ZoneLimit = ZoneSize / 4; 307 if (ZoneLimit > ZALLOC_ZONE_LIMIT) 308 ZoneLimit = ZALLOC_ZONE_LIMIT; 309 ZoneMask = ~(uintptr_t)(ZoneSize - 1); 310 ZonePageCount = ZoneSize / PAGE_SIZE; 311 312 #ifdef INVARIANTS 313 for (i = 0; i < NELEM(weirdary); ++i) 314 weirdary[i] = WEIRD_ADDR; 315 #endif 316 317 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO); 318 319 if (bootverbose) 320 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); 321 } 322 323 /* 324 * Once we know how many cpus are configured reduce ZoneRelsThresh 325 * based on multiples of 32 cpu threads. 326 */ 327 static void 328 kmemfinishinit(void *dummy) 329 { 330 if (ncpus > 32) 331 ZoneRelsThresh = ZoneRelsThresh * 32 / ncpus; 332 } 333 334 /* 335 * (low level) Initialize slab-related elements in the globaldata structure. 336 * 337 * Occurs after kmeminit(). 338 */ 339 void 340 slab_gdinit(globaldata_t gd) 341 { 342 SLGlobalData *slgd; 343 int i; 344 345 slgd = &gd->gd_slab; 346 for (i = 0; i < NZONES; ++i) 347 TAILQ_INIT(&slgd->ZoneAry[i]); 348 TAILQ_INIT(&slgd->FreeZones); 349 TAILQ_INIT(&slgd->FreeOvZones); 350 } 351 352 /* 353 * Initialize a malloc type tracking structure. 354 */ 355 void 356 malloc_init(void *data) 357 { 358 struct malloc_type *type = data; 359 struct kmalloc_use *use; 360 size_t limsize; 361 362 if (type->ks_magic != M_MAGIC) 363 panic("malloc type lacks magic"); 364 365 if (type->ks_limit != 0) 366 return; 367 368 if (vmstats.v_page_count == 0) 369 panic("malloc_init not allowed before vm init"); 370 371 limsize = kmem_lim_size() * (1024 * 1024); 372 type->ks_limit = limsize / 10; 373 374 if (ncpus == 1) 375 use = &type->ks_use0; 376 else 377 use = kmalloc(ncpus * sizeof(*use), M_TEMP, M_WAITOK | M_ZERO); 378 379 spin_lock(&kmemstat_spin); 380 type->ks_next = kmemstatistics; 381 type->ks_use = use; 382 kmemstatistics = type; 383 spin_unlock(&kmemstat_spin); 384 } 385 386 void 387 malloc_uninit(void *data) 388 { 389 struct malloc_type *type = data; 390 struct malloc_type *t; 391 #ifdef INVARIANTS 392 int i; 393 long ttl; 394 #endif 395 396 if (type->ks_magic != M_MAGIC) 397 panic("malloc type lacks magic"); 398 399 if (vmstats.v_page_count == 0) 400 panic("malloc_uninit not allowed before vm init"); 401 402 if (type->ks_limit == 0) 403 panic("malloc_uninit on uninitialized type"); 404 405 /* Make sure that all pending kfree()s are finished. */ 406 lwkt_synchronize_ipiqs("muninit"); 407 408 #ifdef INVARIANTS 409 /* 410 * memuse is only correct in aggregation. Due to memory being allocated 411 * on one cpu and freed on another individual array entries may be 412 * negative or positive (canceling each other out). 413 */ 414 for (i = ttl = 0; i < ncpus; ++i) 415 ttl += type->ks_use[i].memuse; 416 if (ttl) { 417 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", 418 ttl, type->ks_shortdesc, i); 419 } 420 #endif 421 spin_lock(&kmemstat_spin); 422 if (type == kmemstatistics) { 423 kmemstatistics = type->ks_next; 424 } else { 425 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 426 if (t->ks_next == type) { 427 t->ks_next = type->ks_next; 428 break; 429 } 430 } 431 } 432 type->ks_next = NULL; 433 type->ks_limit = 0; 434 spin_unlock(&kmemstat_spin); 435 436 if (type->ks_use != &type->ks_use0) { 437 kfree(type->ks_use, M_TEMP); 438 type->ks_use = NULL; 439 } 440 } 441 442 /* 443 * Reinitialize all installed malloc regions after ncpus has been 444 * determined. type->ks_use0 is initially set to &type->ks_use0, 445 * this function will dynamically allocate it as appropriate for ncpus. 446 */ 447 void 448 malloc_reinit_ncpus(void) 449 { 450 struct malloc_type *t; 451 struct kmalloc_use *use; 452 453 /* 454 * If only one cpu we can leave ks_use set to ks_use0 455 */ 456 if (ncpus <= 1) 457 return; 458 459 /* 460 * Expand ks_use for all kmalloc blocks 461 */ 462 for (t = kmemstatistics; t; t = t->ks_next) { 463 KKASSERT(t->ks_use == &t->ks_use0); 464 t->ks_use = kmalloc(sizeof(*use) * ncpus, M_TEMP, M_WAITOK|M_ZERO); 465 t->ks_use[0] = t->ks_use0; 466 } 467 } 468 469 /* 470 * Increase the kmalloc pool limit for the specified pool. No changes 471 * are the made if the pool would shrink. 472 */ 473 void 474 kmalloc_raise_limit(struct malloc_type *type, size_t bytes) 475 { 476 KKASSERT(type->ks_limit != 0); 477 if (bytes == 0) 478 bytes = KvaSize; 479 if (type->ks_limit < bytes) 480 type->ks_limit = bytes; 481 } 482 483 void 484 kmalloc_set_unlimited(struct malloc_type *type) 485 { 486 type->ks_limit = kmem_lim_size() * (1024 * 1024); 487 } 488 489 /* 490 * Dynamically create a malloc pool. This function is a NOP if *typep is 491 * already non-NULL. 492 */ 493 void 494 kmalloc_create(struct malloc_type **typep, const char *descr) 495 { 496 struct malloc_type *type; 497 498 if (*typep == NULL) { 499 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO); 500 type->ks_magic = M_MAGIC; 501 type->ks_shortdesc = descr; 502 malloc_init(type); 503 *typep = type; 504 } 505 } 506 507 /* 508 * Destroy a dynamically created malloc pool. This function is a NOP if 509 * the pool has already been destroyed. 510 */ 511 void 512 kmalloc_destroy(struct malloc_type **typep) 513 { 514 if (*typep != NULL) { 515 malloc_uninit(*typep); 516 kfree(*typep, M_TEMP); 517 *typep = NULL; 518 } 519 } 520 521 /* 522 * Calculate the zone index for the allocation request size and set the 523 * allocation request size to that particular zone's chunk size. 524 */ 525 static __inline int 526 zoneindex(unsigned long *bytes, unsigned long *align) 527 { 528 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ 529 530 if (n < 128) { 531 *bytes = n = (n + 7) & ~7; 532 *align = 8; 533 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 534 } 535 if (n < 256) { 536 *bytes = n = (n + 15) & ~15; 537 *align = 16; 538 return(n / 16 + 7); 539 } 540 if (n < 8192) { 541 if (n < 512) { 542 *bytes = n = (n + 31) & ~31; 543 *align = 32; 544 return(n / 32 + 15); 545 } 546 if (n < 1024) { 547 *bytes = n = (n + 63) & ~63; 548 *align = 64; 549 return(n / 64 + 23); 550 } 551 if (n < 2048) { 552 *bytes = n = (n + 127) & ~127; 553 *align = 128; 554 return(n / 128 + 31); 555 } 556 if (n < 4096) { 557 *bytes = n = (n + 255) & ~255; 558 *align = 256; 559 return(n / 256 + 39); 560 } 561 *bytes = n = (n + 511) & ~511; 562 *align = 512; 563 return(n / 512 + 47); 564 } 565 #if ZALLOC_ZONE_LIMIT > 8192 566 if (n < 16384) { 567 *bytes = n = (n + 1023) & ~1023; 568 *align = 1024; 569 return(n / 1024 + 55); 570 } 571 #endif 572 #if ZALLOC_ZONE_LIMIT > 16384 573 if (n < 32768) { 574 *bytes = n = (n + 2047) & ~2047; 575 *align = 2048; 576 return(n / 2048 + 63); 577 } 578 #endif 579 panic("Unexpected byte count %d", n); 580 return(0); 581 } 582 583 static __inline void 584 clean_zone_rchunks(SLZone *z) 585 { 586 SLChunk *bchunk; 587 588 while ((bchunk = z->z_RChunks) != NULL) { 589 cpu_ccfence(); 590 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) { 591 *z->z_LChunksp = bchunk; 592 while (bchunk) { 593 chunk_mark_free(z, bchunk); 594 z->z_LChunksp = &bchunk->c_Next; 595 bchunk = bchunk->c_Next; 596 ++z->z_NFree; 597 } 598 break; 599 } 600 /* retry */ 601 } 602 } 603 604 /* 605 * If the zone becomes totally free and is not the only zone listed for a 606 * chunk size we move it to the FreeZones list. We always leave at least 607 * one zone per chunk size listed, even if it is freeable. 608 * 609 * Do not move the zone if there is an IPI in_flight (z_RCount != 0), 610 * otherwise MP races can result in our free_remote code accessing a 611 * destroyed zone. The remote end interlocks z_RCount with z_RChunks 612 * so one has to test both z_NFree and z_RCount. 613 * 614 * Since this code can be called from an IPI callback, do *NOT* try to mess 615 * with kernel_map here. Hysteresis will be performed at kmalloc() time. 616 */ 617 static __inline SLZone * 618 check_zone_free(SLGlobalData *slgd, SLZone *z) 619 { 620 SLZone *znext; 621 622 znext = TAILQ_NEXT(z, z_Entry); 623 if (z->z_NFree == z->z_NMax && z->z_RCount == 0 && 624 (TAILQ_FIRST(&slgd->ZoneAry[z->z_ZoneIndex]) != z || znext)) { 625 int *kup; 626 627 TAILQ_REMOVE(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 628 629 z->z_Magic = -1; 630 TAILQ_INSERT_HEAD(&slgd->FreeZones, z, z_Entry); 631 ++slgd->NFreeZones; 632 kup = btokup(z); 633 *kup = 0; 634 } 635 return znext; 636 } 637 638 #ifdef SLAB_DEBUG 639 /* 640 * Used to debug memory corruption issues. Record up to (typically 32) 641 * allocation sources for this zone (for a particular chunk size). 642 */ 643 644 static void 645 slab_record_source(SLZone *z, const char *file, int line) 646 { 647 int i; 648 int b = line & (SLAB_DEBUG_ENTRIES - 1); 649 650 i = b; 651 do { 652 if (z->z_Sources[i].file == file && z->z_Sources[i].line == line) 653 return; 654 if (z->z_Sources[i].file == NULL) 655 break; 656 i = (i + 1) & (SLAB_DEBUG_ENTRIES - 1); 657 } while (i != b); 658 z->z_Sources[i].file = file; 659 z->z_Sources[i].line = line; 660 } 661 662 #endif 663 664 static __inline unsigned long 665 powerof2_size(unsigned long size) 666 { 667 int i; 668 669 if (size == 0 || powerof2(size)) 670 return size; 671 672 i = flsl(size); 673 return (1UL << i); 674 } 675 676 /* 677 * kmalloc() (SLAB ALLOCATOR) 678 * 679 * Allocate memory via the slab allocator. If the request is too large, 680 * or if it page-aligned beyond a certain size, we fall back to the 681 * KMEM subsystem. A SLAB tracking descriptor must be specified, use 682 * &SlabMisc if you don't care. 683 * 684 * M_RNOWAIT - don't block. 685 * M_NULLOK - return NULL instead of blocking. 686 * M_ZERO - zero the returned memory. 687 * M_USE_RESERVE - allow greater drawdown of the free list 688 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted 689 * M_POWEROF2 - roundup size to the nearest power of 2 690 * 691 * MPSAFE 692 */ 693 694 /* don't let kmalloc macro mess up function declaration */ 695 #undef kmalloc 696 697 #ifdef SLAB_DEBUG 698 void * 699 kmalloc_debug(unsigned long size, struct malloc_type *type, int flags, 700 const char *file, int line) 701 #else 702 void * 703 kmalloc(unsigned long size, struct malloc_type *type, int flags) 704 #endif 705 { 706 SLZone *z; 707 SLChunk *chunk; 708 SLGlobalData *slgd; 709 struct globaldata *gd; 710 unsigned long align; 711 int zi; 712 #ifdef INVARIANTS 713 int i; 714 #endif 715 716 logmemory_quick(malloc_beg); 717 gd = mycpu; 718 slgd = &gd->gd_slab; 719 720 /* 721 * XXX silly to have this in the critical path. 722 */ 723 KKASSERT(type->ks_limit != 0); 724 ++type->ks_use[gd->gd_cpuid].calls; 725 726 /* 727 * Flagged for cache-alignment 728 */ 729 if (flags & M_CACHEALIGN) { 730 if (size < __VM_CACHELINE_SIZE) 731 size = __VM_CACHELINE_SIZE; 732 else if (!CAN_CACHEALIGN(size)) 733 flags |= M_POWEROF2; 734 } 735 736 /* 737 * Flagged to force nearest power-of-2 (higher or same) 738 */ 739 if (flags & M_POWEROF2) 740 size = powerof2_size(size); 741 742 /* 743 * Handle the case where the limit is reached. Panic if we can't return 744 * NULL. The original malloc code looped, but this tended to 745 * simply deadlock the computer. 746 * 747 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used 748 * to determine if a more complete limit check should be done. The 749 * actual memory use is tracked via ks_use[cpu].memuse. 750 */ 751 while (type->ks_loosememuse >= type->ks_limit) { 752 int i; 753 long ttl; 754 755 for (i = ttl = 0; i < ncpus; ++i) 756 ttl += type->ks_use[i].memuse; 757 type->ks_loosememuse = ttl; /* not MP synchronized */ 758 if ((ssize_t)ttl < 0) /* deal with occassional race */ 759 ttl = 0; 760 if (ttl >= type->ks_limit) { 761 if (flags & M_NULLOK) { 762 logmemory(malloc_end, NULL, type, size, flags); 763 return(NULL); 764 } 765 panic("%s: malloc limit exceeded", type->ks_shortdesc); 766 } 767 } 768 769 /* 770 * Handle the degenerate size == 0 case. Yes, this does happen. 771 * Return a special pointer. This is to maintain compatibility with 772 * the original malloc implementation. Certain devices, such as the 773 * adaptec driver, not only allocate 0 bytes, they check for NULL and 774 * also realloc() later on. Joy. 775 */ 776 if (size == 0) { 777 logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags); 778 return(ZERO_LENGTH_PTR); 779 } 780 781 /* 782 * Handle hysteresis from prior frees here in malloc(). We cannot 783 * safely manipulate the kernel_map in free() due to free() possibly 784 * being called via an IPI message or from sensitive interrupt code. 785 * 786 * NOTE: ku_pagecnt must be cleared before we free the slab or we 787 * might race another cpu allocating the kva and setting 788 * ku_pagecnt. 789 */ 790 while (slgd->NFreeZones > ZoneRelsThresh && (flags & M_RNOWAIT) == 0) { 791 crit_enter(); 792 if (slgd->NFreeZones > ZoneRelsThresh) { /* crit sect race */ 793 int *kup; 794 795 z = TAILQ_LAST(&slgd->FreeZones, SLZoneList); 796 KKASSERT(z != NULL); 797 TAILQ_REMOVE(&slgd->FreeZones, z, z_Entry); 798 --slgd->NFreeZones; 799 kup = btokup(z); 800 *kup = 0; 801 kmem_slab_free(z, ZoneSize); /* may block */ 802 } 803 crit_exit(); 804 } 805 806 /* 807 * XXX handle oversized frees that were queued from kfree(). 808 */ 809 while (TAILQ_FIRST(&slgd->FreeOvZones) && (flags & M_RNOWAIT) == 0) { 810 crit_enter(); 811 if ((z = TAILQ_LAST(&slgd->FreeOvZones, SLZoneList)) != NULL) { 812 vm_size_t tsize; 813 814 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); 815 TAILQ_REMOVE(&slgd->FreeOvZones, z, z_Entry); 816 tsize = z->z_ChunkSize; 817 kmem_slab_free(z, tsize); /* may block */ 818 } 819 crit_exit(); 820 } 821 822 /* 823 * Handle large allocations directly. There should not be very many of 824 * these so performance is not a big issue. 825 * 826 * The backend allocator is pretty nasty on a SMP system. Use the 827 * slab allocator for one and two page-sized chunks even though we lose 828 * some efficiency. XXX maybe fix mmio and the elf loader instead. 829 */ 830 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 831 int *kup; 832 833 size = round_page(size); 834 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); 835 if (chunk == NULL) { 836 logmemory(malloc_end, NULL, type, size, flags); 837 return(NULL); 838 } 839 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ 840 flags |= M_PASSIVE_ZERO; 841 kup = btokup(chunk); 842 *kup = size / PAGE_SIZE; 843 crit_enter(); 844 goto done; 845 } 846 847 /* 848 * Attempt to allocate out of an existing zone. First try the free list, 849 * then allocate out of unallocated space. If we find a good zone move 850 * it to the head of the list so later allocations find it quickly 851 * (we might have thousands of zones in the list). 852 * 853 * Note: zoneindex() will panic of size is too large. 854 */ 855 zi = zoneindex(&size, &align); 856 KKASSERT(zi < NZONES); 857 crit_enter(); 858 859 if ((z = TAILQ_LAST(&slgd->ZoneAry[zi], SLZoneList)) != NULL) { 860 /* 861 * Locate a chunk - we have to have at least one. If this is the 862 * last chunk go ahead and do the work to retrieve chunks freed 863 * from remote cpus, and if the zone is still empty move it off 864 * the ZoneAry. 865 */ 866 if (--z->z_NFree <= 0) { 867 KKASSERT(z->z_NFree == 0); 868 869 /* 870 * WARNING! This code competes with other cpus. It is ok 871 * for us to not drain RChunks here but we might as well, and 872 * it is ok if more accumulate after we're done. 873 * 874 * Set RSignal before pulling rchunks off, indicating that we 875 * will be moving ourselves off of the ZoneAry. Remote ends will 876 * read RSignal before putting rchunks on thus interlocking 877 * their IPI signaling. 878 */ 879 if (z->z_RChunks == NULL) 880 atomic_swap_int(&z->z_RSignal, 1); 881 882 clean_zone_rchunks(z); 883 884 /* 885 * Remove from the zone list if no free chunks remain. 886 * Clear RSignal 887 */ 888 if (z->z_NFree == 0) { 889 TAILQ_REMOVE(&slgd->ZoneAry[zi], z, z_Entry); 890 } else { 891 z->z_RSignal = 0; 892 } 893 } 894 895 /* 896 * Fast path, we have chunks available in z_LChunks. 897 */ 898 chunk = z->z_LChunks; 899 if (chunk) { 900 chunk_mark_allocated(z, chunk); 901 z->z_LChunks = chunk->c_Next; 902 if (z->z_LChunks == NULL) 903 z->z_LChunksp = &z->z_LChunks; 904 #ifdef SLAB_DEBUG 905 slab_record_source(z, file, line); 906 #endif 907 goto done; 908 } 909 910 /* 911 * No chunks are available in LChunks, the free chunk MUST be 912 * in the never-before-used memory area, controlled by UIndex. 913 * 914 * The consequences are very serious if our zone got corrupted so 915 * we use an explicit panic rather than a KASSERT. 916 */ 917 if (z->z_UIndex + 1 != z->z_NMax) 918 ++z->z_UIndex; 919 else 920 z->z_UIndex = 0; 921 922 if (z->z_UIndex == z->z_UEndIndex) 923 panic("slaballoc: corrupted zone"); 924 925 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 926 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 927 flags &= ~M_ZERO; 928 flags |= M_PASSIVE_ZERO; 929 } 930 chunk_mark_allocated(z, chunk); 931 #ifdef SLAB_DEBUG 932 slab_record_source(z, file, line); 933 #endif 934 goto done; 935 } 936 937 /* 938 * If all zones are exhausted we need to allocate a new zone for this 939 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see 940 * UAlloc use above in regards to M_ZERO. Note that when we are reusing 941 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and 942 * we do not pre-zero it because we do not want to mess up the L1 cache. 943 * 944 * At least one subsystem, the tty code (see CROUND) expects power-of-2 945 * allocations to be power-of-2 aligned. We maintain compatibility by 946 * adjusting the base offset below. 947 */ 948 { 949 int off; 950 int *kup; 951 952 if ((z = TAILQ_FIRST(&slgd->FreeZones)) != NULL) { 953 TAILQ_REMOVE(&slgd->FreeZones, z, z_Entry); 954 --slgd->NFreeZones; 955 bzero(z, sizeof(SLZone)); 956 z->z_Flags |= SLZF_UNOTZEROD; 957 } else { 958 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); 959 if (z == NULL) 960 goto fail; 961 } 962 963 /* 964 * How big is the base structure? 965 */ 966 #if defined(INVARIANTS) 967 /* 968 * Make room for z_Bitmap. An exact calculation is somewhat more 969 * complicated so don't make an exact calculation. 970 */ 971 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]); 972 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); 973 #else 974 off = sizeof(SLZone); 975 #endif 976 977 /* 978 * Guarentee power-of-2 alignment for power-of-2-sized chunks. 979 * Otherwise properly align the data according to the chunk size. 980 */ 981 if (powerof2(size)) 982 align = size; 983 off = roundup2(off, align); 984 985 z->z_Magic = ZALLOC_SLAB_MAGIC; 986 z->z_ZoneIndex = zi; 987 z->z_NMax = (ZoneSize - off) / size; 988 z->z_NFree = z->z_NMax - 1; 989 z->z_BasePtr = (char *)z + off; 990 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; 991 z->z_ChunkSize = size; 992 z->z_CpuGd = gd; 993 z->z_Cpu = gd->gd_cpuid; 994 z->z_LChunksp = &z->z_LChunks; 995 #ifdef SLAB_DEBUG 996 bcopy(z->z_Sources, z->z_AltSources, sizeof(z->z_Sources)); 997 bzero(z->z_Sources, sizeof(z->z_Sources)); 998 #endif 999 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 1000 TAILQ_INSERT_HEAD(&slgd->ZoneAry[zi], z, z_Entry); 1001 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 1002 flags &= ~M_ZERO; /* already zero'd */ 1003 flags |= M_PASSIVE_ZERO; 1004 } 1005 kup = btokup(z); 1006 *kup = -(z->z_Cpu + 1); /* -1 to -(N+1) */ 1007 chunk_mark_allocated(z, chunk); 1008 #ifdef SLAB_DEBUG 1009 slab_record_source(z, file, line); 1010 #endif 1011 1012 /* 1013 * Slide the base index for initial allocations out of the next 1014 * zone we create so we do not over-weight the lower part of the 1015 * cpu memory caches. 1016 */ 1017 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 1018 & (ZALLOC_MAX_ZONE_SIZE - 1); 1019 } 1020 1021 done: 1022 ++type->ks_use[gd->gd_cpuid].inuse; 1023 type->ks_use[gd->gd_cpuid].memuse += size; 1024 type->ks_use[gd->gd_cpuid].loosememuse += size; 1025 if (type->ks_use[gd->gd_cpuid].loosememuse >= ZoneSize) { 1026 /* not MP synchronized */ 1027 type->ks_loosememuse += type->ks_use[gd->gd_cpuid].loosememuse; 1028 type->ks_use[gd->gd_cpuid].loosememuse = 0; 1029 } 1030 crit_exit(); 1031 1032 if (flags & M_ZERO) 1033 bzero(chunk, size); 1034 #ifdef INVARIANTS 1035 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) { 1036 if (use_malloc_pattern) { 1037 for (i = 0; i < size; i += sizeof(int)) { 1038 *(int *)((char *)chunk + i) = -1; 1039 } 1040 } 1041 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ 1042 } 1043 #endif 1044 logmemory(malloc_end, chunk, type, size, flags); 1045 return(chunk); 1046 fail: 1047 crit_exit(); 1048 logmemory(malloc_end, NULL, type, size, flags); 1049 return(NULL); 1050 } 1051 1052 /* 1053 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE) 1054 * 1055 * Generally speaking this routine is not called very often and we do 1056 * not attempt to optimize it beyond reusing the same pointer if the 1057 * new size fits within the chunking of the old pointer's zone. 1058 */ 1059 #ifdef SLAB_DEBUG 1060 void * 1061 krealloc_debug(void *ptr, unsigned long size, 1062 struct malloc_type *type, int flags, 1063 const char *file, int line) 1064 #else 1065 void * 1066 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) 1067 #endif 1068 { 1069 unsigned long osize; 1070 unsigned long align; 1071 SLZone *z; 1072 void *nptr; 1073 int *kup; 1074 1075 KKASSERT((flags & M_ZERO) == 0); /* not supported */ 1076 1077 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) 1078 return(kmalloc_debug(size, type, flags, file, line)); 1079 if (size == 0) { 1080 kfree(ptr, type); 1081 return(NULL); 1082 } 1083 1084 /* 1085 * Handle oversized allocations. XXX we really should require that a 1086 * size be passed to free() instead of this nonsense. 1087 */ 1088 kup = btokup(ptr); 1089 if (*kup > 0) { 1090 osize = *kup << PAGE_SHIFT; 1091 if (osize == round_page(size)) 1092 return(ptr); 1093 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL) 1094 return(NULL); 1095 bcopy(ptr, nptr, min(size, osize)); 1096 kfree(ptr, type); 1097 return(nptr); 1098 } 1099 1100 /* 1101 * Get the original allocation's zone. If the new request winds up 1102 * using the same chunk size we do not have to do anything. 1103 */ 1104 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 1105 kup = btokup(z); 1106 KKASSERT(*kup < 0); 1107 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1108 1109 /* 1110 * Allocate memory for the new request size. Note that zoneindex has 1111 * already adjusted the request size to the appropriate chunk size, which 1112 * should optimize our bcopy(). Then copy and return the new pointer. 1113 * 1114 * Resizing a non-power-of-2 allocation to a power-of-2 size does not 1115 * necessary align the result. 1116 * 1117 * We can only zoneindex (to align size to the chunk size) if the new 1118 * size is not too large. 1119 */ 1120 if (size < ZoneLimit) { 1121 zoneindex(&size, &align); 1122 if (z->z_ChunkSize == size) 1123 return(ptr); 1124 } 1125 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL) 1126 return(NULL); 1127 bcopy(ptr, nptr, min(size, z->z_ChunkSize)); 1128 kfree(ptr, type); 1129 return(nptr); 1130 } 1131 1132 /* 1133 * Return the kmalloc limit for this type, in bytes. 1134 */ 1135 long 1136 kmalloc_limit(struct malloc_type *type) 1137 { 1138 KKASSERT(type->ks_limit != 0); 1139 return(type->ks_limit); 1140 } 1141 1142 /* 1143 * Allocate a copy of the specified string. 1144 * 1145 * (MP SAFE) (MAY BLOCK) 1146 */ 1147 #ifdef SLAB_DEBUG 1148 char * 1149 kstrdup_debug(const char *str, struct malloc_type *type, 1150 const char *file, int line) 1151 #else 1152 char * 1153 kstrdup(const char *str, struct malloc_type *type) 1154 #endif 1155 { 1156 int zlen; /* length inclusive of terminating NUL */ 1157 char *nstr; 1158 1159 if (str == NULL) 1160 return(NULL); 1161 zlen = strlen(str) + 1; 1162 nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line); 1163 bcopy(str, nstr, zlen); 1164 return(nstr); 1165 } 1166 1167 #ifdef SLAB_DEBUG 1168 char * 1169 kstrndup_debug(const char *str, size_t maxlen, struct malloc_type *type, 1170 const char *file, int line) 1171 #else 1172 char * 1173 kstrndup(const char *str, size_t maxlen, struct malloc_type *type) 1174 #endif 1175 { 1176 int zlen; /* length inclusive of terminating NUL */ 1177 char *nstr; 1178 1179 if (str == NULL) 1180 return(NULL); 1181 zlen = strnlen(str, maxlen) + 1; 1182 nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line); 1183 bcopy(str, nstr, zlen); 1184 nstr[zlen - 1] = '\0'; 1185 return(nstr); 1186 } 1187 1188 /* 1189 * Notify our cpu that a remote cpu has freed some chunks in a zone that 1190 * we own. RCount will be bumped so the memory should be good, but validate 1191 * that it really is. 1192 */ 1193 static void 1194 kfree_remote(void *ptr) 1195 { 1196 SLGlobalData *slgd; 1197 SLZone *z; 1198 int nfree; 1199 int *kup; 1200 1201 slgd = &mycpu->gd_slab; 1202 z = ptr; 1203 kup = btokup(z); 1204 KKASSERT(*kup == -((int)mycpuid + 1)); 1205 KKASSERT(z->z_RCount > 0); 1206 atomic_subtract_int(&z->z_RCount, 1); 1207 1208 logmemory(free_rem_beg, z, NULL, 0L, 0); 1209 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1210 KKASSERT(z->z_Cpu == mycpu->gd_cpuid); 1211 nfree = z->z_NFree; 1212 1213 /* 1214 * Indicate that we will no longer be off of the ZoneAry by 1215 * clearing RSignal. 1216 */ 1217 if (z->z_RChunks) 1218 z->z_RSignal = 0; 1219 1220 /* 1221 * Atomically extract the bchunks list and then process it back 1222 * into the lchunks list. We want to append our bchunks to the 1223 * lchunks list and not prepend since we likely do not have 1224 * cache mastership of the related data (not that it helps since 1225 * we are using c_Next). 1226 */ 1227 clean_zone_rchunks(z); 1228 if (z->z_NFree && nfree == 0) { 1229 TAILQ_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 1230 } 1231 1232 check_zone_free(slgd, z); 1233 logmemory(free_rem_end, z, NULL, 0L, 0); 1234 } 1235 1236 /* 1237 * free (SLAB ALLOCATOR) 1238 * 1239 * Free a memory block previously allocated by malloc. 1240 * 1241 * Note: We do not attempt to update ks_loosememuse as MP races could 1242 * prevent us from checking memory limits in malloc. YYY we may 1243 * consider updating ks_cpu.loosememuse. 1244 * 1245 * MPSAFE 1246 */ 1247 void 1248 kfree(void *ptr, struct malloc_type *type) 1249 { 1250 SLZone *z; 1251 SLChunk *chunk; 1252 SLGlobalData *slgd; 1253 struct globaldata *gd; 1254 int *kup; 1255 unsigned long size; 1256 SLChunk *bchunk; 1257 int rsignal; 1258 1259 logmemory_quick(free_beg); 1260 gd = mycpu; 1261 slgd = &gd->gd_slab; 1262 1263 if (ptr == NULL) 1264 panic("trying to free NULL pointer"); 1265 1266 /* 1267 * Handle special 0-byte allocations 1268 */ 1269 if (ptr == ZERO_LENGTH_PTR) { 1270 logmemory(free_zero, ptr, type, -1UL, 0); 1271 logmemory_quick(free_end); 1272 return; 1273 } 1274 1275 /* 1276 * Panic on bad malloc type 1277 */ 1278 if (type->ks_magic != M_MAGIC) 1279 panic("free: malloc type lacks magic"); 1280 1281 /* 1282 * Handle oversized allocations. XXX we really should require that a 1283 * size be passed to free() instead of this nonsense. 1284 * 1285 * This code is never called via an ipi. 1286 */ 1287 kup = btokup(ptr); 1288 if (*kup > 0) { 1289 size = *kup << PAGE_SHIFT; 1290 *kup = 0; 1291 #ifdef INVARIANTS 1292 if (use_weird_array) { 1293 KKASSERT(sizeof(weirdary) <= size); 1294 bcopy(weirdary, ptr, sizeof(weirdary)); 1295 } 1296 #endif 1297 /* 1298 * NOTE: For oversized allocations we do not record the 1299 * originating cpu. It gets freed on the cpu calling 1300 * kfree(). The statistics are in aggregate. 1301 * 1302 * note: XXX we have still inherited the interrupts-can't-block 1303 * assumption. An interrupt thread does not bump 1304 * gd_intr_nesting_level so check TDF_INTTHREAD. This is 1305 * primarily until we can fix softupdate's assumptions about free(). 1306 */ 1307 crit_enter(); 1308 --type->ks_use[gd->gd_cpuid].inuse; 1309 type->ks_use[gd->gd_cpuid].memuse -= size; 1310 if (mycpu->gd_intr_nesting_level || 1311 (gd->gd_curthread->td_flags & TDF_INTTHREAD)) { 1312 logmemory(free_ovsz_delayed, ptr, type, size, 0); 1313 z = (SLZone *)ptr; 1314 z->z_Magic = ZALLOC_OVSZ_MAGIC; 1315 z->z_ChunkSize = size; 1316 1317 TAILQ_INSERT_HEAD(&slgd->FreeOvZones, z, z_Entry); 1318 crit_exit(); 1319 } else { 1320 crit_exit(); 1321 logmemory(free_ovsz, ptr, type, size, 0); 1322 kmem_slab_free(ptr, size); /* may block */ 1323 } 1324 logmemory_quick(free_end); 1325 return; 1326 } 1327 1328 /* 1329 * Zone case. Figure out the zone based on the fact that it is 1330 * ZoneSize aligned. 1331 */ 1332 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 1333 kup = btokup(z); 1334 KKASSERT(*kup < 0); 1335 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1336 1337 /* 1338 * If we do not own the zone then use atomic ops to free to the 1339 * remote cpu linked list and notify the target zone using a 1340 * passive message. 1341 * 1342 * The target zone cannot be deallocated while we own a chunk of it, 1343 * so the zone header's storage is stable until the very moment 1344 * we adjust z_RChunks. After that we cannot safely dereference (z). 1345 * 1346 * (no critical section needed) 1347 */ 1348 if (z->z_CpuGd != gd) { 1349 /* 1350 * Making these adjustments now allow us to avoid passing (type) 1351 * to the remote cpu. Note that inuse/memuse is being 1352 * adjusted on OUR cpu, not the zone cpu, but it should all still 1353 * sum up properly and cancel out. 1354 */ 1355 crit_enter(); 1356 --type->ks_use[gd->gd_cpuid].inuse; 1357 type->ks_use[gd->gd_cpuid].memuse -= z->z_ChunkSize; 1358 crit_exit(); 1359 1360 /* 1361 * WARNING! This code competes with other cpus. Once we 1362 * successfully link the chunk to RChunks the remote 1363 * cpu can rip z's storage out from under us. 1364 * 1365 * Bumping RCount prevents z's storage from getting 1366 * ripped out. 1367 */ 1368 rsignal = z->z_RSignal; 1369 cpu_lfence(); 1370 if (rsignal) 1371 atomic_add_int(&z->z_RCount, 1); 1372 1373 chunk = ptr; 1374 for (;;) { 1375 bchunk = z->z_RChunks; 1376 cpu_ccfence(); 1377 chunk->c_Next = bchunk; 1378 cpu_sfence(); 1379 1380 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk)) 1381 break; 1382 } 1383 1384 /* 1385 * We have to signal the remote cpu if our actions will cause 1386 * the remote zone to be placed back on ZoneAry so it can 1387 * move the zone back on. 1388 * 1389 * We only need to deal with NULL->non-NULL RChunk transitions 1390 * and only if z_RSignal is set. We interlock by reading rsignal 1391 * before adding our chunk to RChunks. This should result in 1392 * virtually no IPI traffic. 1393 * 1394 * We can use a passive IPI to reduce overhead even further. 1395 */ 1396 if (bchunk == NULL && rsignal) { 1397 logmemory(free_request, ptr, type, 1398 (unsigned long)z->z_ChunkSize, 0); 1399 lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z); 1400 /* z can get ripped out from under us from this point on */ 1401 } else if (rsignal) { 1402 atomic_subtract_int(&z->z_RCount, 1); 1403 /* z can get ripped out from under us from this point on */ 1404 } 1405 logmemory_quick(free_end); 1406 return; 1407 } 1408 1409 /* 1410 * kfree locally 1411 */ 1412 logmemory(free_chunk, ptr, type, (unsigned long)z->z_ChunkSize, 0); 1413 1414 crit_enter(); 1415 chunk = ptr; 1416 chunk_mark_free(z, chunk); 1417 1418 /* 1419 * Put weird data into the memory to detect modifications after freeing, 1420 * illegal pointer use after freeing (we should fault on the odd address), 1421 * and so forth. XXX needs more work, see the old malloc code. 1422 */ 1423 #ifdef INVARIANTS 1424 if (use_weird_array) { 1425 if (z->z_ChunkSize < sizeof(weirdary)) 1426 bcopy(weirdary, chunk, z->z_ChunkSize); 1427 else 1428 bcopy(weirdary, chunk, sizeof(weirdary)); 1429 } 1430 #endif 1431 1432 /* 1433 * Add this free non-zero'd chunk to a linked list for reuse. Add 1434 * to the front of the linked list so it is more likely to be 1435 * reallocated, since it is already in our L1 cache. 1436 */ 1437 #ifdef INVARIANTS 1438 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) 1439 panic("BADFREE %p", chunk); 1440 #endif 1441 chunk->c_Next = z->z_LChunks; 1442 z->z_LChunks = chunk; 1443 if (chunk->c_Next == NULL) 1444 z->z_LChunksp = &chunk->c_Next; 1445 1446 #ifdef INVARIANTS 1447 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) 1448 panic("BADFREE2"); 1449 #endif 1450 1451 /* 1452 * Bump the number of free chunks. If it becomes non-zero the zone 1453 * must be added back onto the appropriate list. A fully allocated 1454 * zone that sees its first free is considered 'mature' and is placed 1455 * at the head, giving the system time to potentially free the remaining 1456 * entries even while other allocations are going on and making the zone 1457 * freeable. 1458 */ 1459 if (z->z_NFree++ == 0) 1460 TAILQ_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 1461 1462 --type->ks_use[gd->gd_cpuid].inuse; 1463 type->ks_use[gd->gd_cpuid].memuse -= z->z_ChunkSize; 1464 1465 check_zone_free(slgd, z); 1466 logmemory_quick(free_end); 1467 crit_exit(); 1468 } 1469 1470 /* 1471 * Cleanup slabs which are hanging around due to RChunks or which are wholely 1472 * free and can be moved to the free list if not moved by other means. 1473 * 1474 * Called once every 10 seconds on all cpus. 1475 */ 1476 void 1477 slab_cleanup(void) 1478 { 1479 SLGlobalData *slgd = &mycpu->gd_slab; 1480 SLZone *z; 1481 int i; 1482 1483 crit_enter(); 1484 for (i = 0; i < NZONES; ++i) { 1485 if ((z = TAILQ_FIRST(&slgd->ZoneAry[i])) == NULL) 1486 continue; 1487 1488 /* 1489 * Scan zones. 1490 */ 1491 while (z) { 1492 /* 1493 * Shift all RChunks to the end of the LChunks list. This is 1494 * an O(1) operation. 1495 * 1496 * Then free the zone if possible. 1497 */ 1498 clean_zone_rchunks(z); 1499 z = check_zone_free(slgd, z); 1500 } 1501 } 1502 crit_exit(); 1503 } 1504 1505 #if defined(INVARIANTS) 1506 1507 /* 1508 * Helper routines for sanity checks 1509 */ 1510 static void 1511 chunk_mark_allocated(SLZone *z, void *chunk) 1512 { 1513 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1514 uint32_t *bitptr; 1515 1516 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1517 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1518 ("memory chunk %p bit index %d is illegal", chunk, bitdex)); 1519 bitptr = &z->z_Bitmap[bitdex >> 5]; 1520 bitdex &= 31; 1521 KASSERT((*bitptr & (1 << bitdex)) == 0, 1522 ("memory chunk %p is already allocated!", chunk)); 1523 *bitptr |= 1 << bitdex; 1524 } 1525 1526 static void 1527 chunk_mark_free(SLZone *z, void *chunk) 1528 { 1529 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1530 uint32_t *bitptr; 1531 1532 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1533 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1534 ("memory chunk %p bit index %d is illegal!", chunk, bitdex)); 1535 bitptr = &z->z_Bitmap[bitdex >> 5]; 1536 bitdex &= 31; 1537 KASSERT((*bitptr & (1 << bitdex)) != 0, 1538 ("memory chunk %p is already free!", chunk)); 1539 *bitptr &= ~(1 << bitdex); 1540 } 1541 1542 #endif 1543 1544 /* 1545 * kmem_slab_alloc() 1546 * 1547 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the 1548 * specified alignment. M_* flags are expected in the flags field. 1549 * 1550 * Alignment must be a multiple of PAGE_SIZE. 1551 * 1552 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), 1553 * but when we move zalloc() over to use this function as its backend 1554 * we will have to switch to kreserve/krelease and call reserve(0) 1555 * after the new space is made available. 1556 * 1557 * Interrupt code which has preempted other code is not allowed to 1558 * use PQ_CACHE pages. However, if an interrupt thread is run 1559 * non-preemptively or blocks and then runs non-preemptively, then 1560 * it is free to use PQ_CACHE pages. <--- may not apply any longer XXX 1561 */ 1562 static void * 1563 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) 1564 { 1565 vm_size_t i; 1566 vm_offset_t addr; 1567 int count, vmflags, base_vmflags; 1568 vm_page_t mbase = NULL; 1569 vm_page_t m; 1570 thread_t td; 1571 1572 size = round_page(size); 1573 addr = vm_map_min(&kernel_map); 1574 1575 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1576 crit_enter(); 1577 vm_map_lock(&kernel_map); 1578 if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) { 1579 vm_map_unlock(&kernel_map); 1580 if ((flags & M_NULLOK) == 0) 1581 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 1582 vm_map_entry_release(count); 1583 crit_exit(); 1584 return(NULL); 1585 } 1586 1587 /* 1588 * kernel_object maps 1:1 to kernel_map. 1589 */ 1590 vm_object_hold(&kernel_object); 1591 vm_object_reference_locked(&kernel_object); 1592 vm_map_insert(&kernel_map, &count, 1593 &kernel_object, NULL, 1594 addr, NULL, 1595 addr, addr + size, 1596 VM_MAPTYPE_NORMAL, 1597 VM_SUBSYS_KMALLOC, 1598 VM_PROT_ALL, VM_PROT_ALL, 0); 1599 vm_object_drop(&kernel_object); 1600 vm_map_set_wired_quick(&kernel_map, addr, size, &count); 1601 vm_map_unlock(&kernel_map); 1602 1603 td = curthread; 1604 1605 base_vmflags = 0; 1606 if (flags & M_ZERO) 1607 base_vmflags |= VM_ALLOC_ZERO; 1608 if (flags & M_USE_RESERVE) 1609 base_vmflags |= VM_ALLOC_SYSTEM; 1610 if (flags & M_USE_INTERRUPT_RESERVE) 1611 base_vmflags |= VM_ALLOC_INTERRUPT; 1612 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) { 1613 panic("kmem_slab_alloc: bad flags %08x (%p)", 1614 flags, ((int **)&size)[-1]); 1615 } 1616 1617 /* 1618 * Allocate the pages. Do not map them yet. VM_ALLOC_NORMAL can only 1619 * be set if we are not preempting. 1620 * 1621 * VM_ALLOC_SYSTEM is automatically set if we are preempting and 1622 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is 1623 * implied in this case), though I'm not sure if we really need to 1624 * do that. 1625 */ 1626 vmflags = base_vmflags; 1627 if (flags & M_WAITOK) { 1628 if (td->td_preempted) 1629 vmflags |= VM_ALLOC_SYSTEM; 1630 else 1631 vmflags |= VM_ALLOC_NORMAL; 1632 } 1633 1634 vm_object_hold(&kernel_object); 1635 for (i = 0; i < size; i += PAGE_SIZE) { 1636 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags); 1637 if (i == 0) 1638 mbase = m; 1639 1640 /* 1641 * If the allocation failed we either return NULL or we retry. 1642 * 1643 * If M_WAITOK is specified we wait for more memory and retry. 1644 * If M_WAITOK is specified from a preemption we yield instead of 1645 * wait. Livelock will not occur because the interrupt thread 1646 * will not be preempting anyone the second time around after the 1647 * yield. 1648 */ 1649 if (m == NULL) { 1650 if (flags & M_WAITOK) { 1651 if (td->td_preempted) { 1652 lwkt_switch(); 1653 } else { 1654 vm_wait(0); 1655 } 1656 i -= PAGE_SIZE; /* retry */ 1657 continue; 1658 } 1659 break; 1660 } 1661 } 1662 1663 /* 1664 * Check and deal with an allocation failure 1665 */ 1666 if (i != size) { 1667 while (i != 0) { 1668 i -= PAGE_SIZE; 1669 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); 1670 /* page should already be busy */ 1671 vm_page_free(m); 1672 } 1673 vm_map_lock(&kernel_map); 1674 vm_map_delete(&kernel_map, addr, addr + size, &count); 1675 vm_map_unlock(&kernel_map); 1676 vm_object_drop(&kernel_object); 1677 1678 vm_map_entry_release(count); 1679 crit_exit(); 1680 return(NULL); 1681 } 1682 1683 /* 1684 * Success! 1685 * 1686 * NOTE: The VM pages are still busied. mbase points to the first one 1687 * but we have to iterate via vm_page_next() 1688 */ 1689 vm_object_drop(&kernel_object); 1690 crit_exit(); 1691 1692 /* 1693 * Enter the pages into the pmap and deal with M_ZERO. 1694 */ 1695 m = mbase; 1696 i = 0; 1697 1698 while (i < size) { 1699 /* 1700 * page should already be busy 1701 */ 1702 m->valid = VM_PAGE_BITS_ALL; 1703 vm_page_wire(m); 1704 pmap_enter(&kernel_pmap, addr + i, m, 1705 VM_PROT_ALL | VM_PROT_NOSYNC, 1, NULL); 1706 if (flags & M_ZERO) 1707 pagezero((char *)addr + i); 1708 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED)); 1709 vm_page_flag_set(m, PG_REFERENCED); 1710 vm_page_wakeup(m); 1711 1712 i += PAGE_SIZE; 1713 vm_object_hold(&kernel_object); 1714 m = vm_page_next(m); 1715 vm_object_drop(&kernel_object); 1716 } 1717 smp_invltlb(); 1718 vm_map_entry_release(count); 1719 return((void *)addr); 1720 } 1721 1722 /* 1723 * kmem_slab_free() 1724 */ 1725 static void 1726 kmem_slab_free(void *ptr, vm_size_t size) 1727 { 1728 crit_enter(); 1729 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); 1730 crit_exit(); 1731 } 1732