1 /* 2 * (MPSAFE) 3 * 4 * KERN_SLABALLOC.C - Kernel SLAB memory allocator 5 * 6 * Copyright (c) 2003,2004,2010 The DragonFly Project. All rights reserved. 7 * 8 * This code is derived from software contributed to The DragonFly Project 9 * by Matthew Dillon <dillon@backplane.com> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in 19 * the documentation and/or other materials provided with the 20 * distribution. 21 * 3. Neither the name of The DragonFly Project nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific, prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * This module implements a slab allocator drop-in replacement for the 39 * kernel malloc(). 40 * 41 * A slab allocator reserves a ZONE for each chunk size, then lays the 42 * chunks out in an array within the zone. Allocation and deallocation 43 * is nearly instantanious, and fragmentation/overhead losses are limited 44 * to a fixed worst-case amount. 45 * 46 * The downside of this slab implementation is in the chunk size 47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. 48 * In a kernel implementation all this memory will be physical so 49 * the zone size is adjusted downward on machines with less physical 50 * memory. The upside is that overhead is bounded... this is the *worst* 51 * case overhead. 52 * 53 * Slab management is done on a per-cpu basis and no locking or mutexes 54 * are required, only a critical section. When one cpu frees memory 55 * belonging to another cpu's slab manager an asynchronous IPI message 56 * will be queued to execute the operation. In addition, both the 57 * high level slab allocator and the low level zone allocator optimize 58 * M_ZERO requests, and the slab allocator does not have to pre initialize 59 * the linked list of chunks. 60 * 61 * XXX Balancing is needed between cpus. Balance will be handled through 62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. 63 * 64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of 65 * the new zone should be restricted to M_USE_RESERVE requests only. 66 * 67 * Alloc Size Chunking Number of zones 68 * 0-127 8 16 69 * 128-255 16 8 70 * 256-511 32 8 71 * 512-1023 64 8 72 * 1024-2047 128 8 73 * 2048-4095 256 8 74 * 4096-8191 512 8 75 * 8192-16383 1024 8 76 * 16384-32767 2048 8 77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) 78 * 79 * Allocations >= ZoneLimit go directly to kmem. 80 * 81 * Alignment properties: 82 * - All power-of-2 sized allocations are power-of-2 aligned. 83 * - Allocations with M_POWEROF2 are power-of-2 aligned on the nearest 84 * power-of-2 round up of 'size'. 85 * - Non-power-of-2 sized allocations are zone chunk size aligned (see the 86 * above table 'Chunking' column). 87 * 88 * API REQUIREMENTS AND SIDE EFFECTS 89 * 90 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we 91 * have remained compatible with the following API requirements: 92 * 93 * + malloc(0) is allowed and returns non-NULL (ahc driver) 94 * + ability to allocate arbitrarily large chunks of memory 95 */ 96 97 #include "opt_vm.h" 98 99 #include <sys/param.h> 100 #include <sys/systm.h> 101 #include <sys/kernel.h> 102 #include <sys/slaballoc.h> 103 #include <sys/mbuf.h> 104 #include <sys/vmmeter.h> 105 #include <sys/lock.h> 106 #include <sys/thread.h> 107 #include <sys/globaldata.h> 108 #include <sys/sysctl.h> 109 #include <sys/ktr.h> 110 111 #include <vm/vm.h> 112 #include <vm/vm_param.h> 113 #include <vm/vm_kern.h> 114 #include <vm/vm_extern.h> 115 #include <vm/vm_object.h> 116 #include <vm/pmap.h> 117 #include <vm/vm_map.h> 118 #include <vm/vm_page.h> 119 #include <vm/vm_pageout.h> 120 121 #include <machine/cpu.h> 122 123 #include <sys/thread2.h> 124 #include <vm/vm_page2.h> 125 126 #define btokup(z) (&pmap_kvtom((vm_offset_t)(z))->ku_pagecnt) 127 128 #define MEMORY_STRING "ptr=%p type=%p size=%lu flags=%04x" 129 #define MEMORY_ARGS void *ptr, void *type, unsigned long size, int flags 130 131 #if !defined(KTR_MEMORY) 132 #define KTR_MEMORY KTR_ALL 133 #endif 134 KTR_INFO_MASTER(memory); 135 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin"); 136 KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARGS); 137 KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARGS); 138 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARGS); 139 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARGS); 140 KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARGS); 141 KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARGS); 142 KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARGS); 143 KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARGS); 144 KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin"); 145 KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end"); 146 147 #define logmemory(name, ptr, type, size, flags) \ 148 KTR_LOG(memory_ ## name, ptr, type, size, flags) 149 #define logmemory_quick(name) \ 150 KTR_LOG(memory_ ## name) 151 152 /* 153 * Fixed globals (not per-cpu) 154 */ 155 static int ZoneSize; 156 static int ZoneLimit; 157 static int ZonePageCount; 158 static uintptr_t ZoneMask; 159 static int ZoneBigAlloc; /* in KB */ 160 static int ZoneGenAlloc; /* in KB */ 161 struct malloc_type *kmemstatistics; /* exported to vmstat */ 162 static int32_t weirdary[16]; 163 164 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags); 165 static void kmem_slab_free(void *ptr, vm_size_t bytes); 166 167 #if defined(INVARIANTS) 168 static void chunk_mark_allocated(SLZone *z, void *chunk); 169 static void chunk_mark_free(SLZone *z, void *chunk); 170 #else 171 #define chunk_mark_allocated(z, chunk) 172 #define chunk_mark_free(z, chunk) 173 #endif 174 175 /* 176 * Misc constants. Note that allocations that are exact multiples of 177 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 178 */ 179 #define ZONE_RELS_THRESH 32 /* threshold number of zones */ 180 181 /* 182 * The WEIRD_ADDR is used as known text to copy into free objects to 183 * try to create deterministic failure cases if the data is accessed after 184 * free. 185 */ 186 #define WEIRD_ADDR 0xdeadc0de 187 #define MAX_COPY sizeof(weirdary) 188 #define ZERO_LENGTH_PTR ((void *)-8) 189 190 /* 191 * Misc global malloc buckets 192 */ 193 194 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 195 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 196 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 197 198 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 199 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 200 201 /* 202 * Initialize the slab memory allocator. We have to choose a zone size based 203 * on available physical memory. We choose a zone side which is approximately 204 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of 205 * 128K. The zone size is limited to the bounds set in slaballoc.h 206 * (typically 32K min, 128K max). 207 */ 208 static void kmeminit(void *dummy); 209 210 char *ZeroPage; 211 212 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL); 213 214 #ifdef INVARIANTS 215 /* 216 * If enabled any memory allocated without M_ZERO is initialized to -1. 217 */ 218 static int use_malloc_pattern; 219 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW, 220 &use_malloc_pattern, 0, 221 "Initialize memory to -1 if M_ZERO not specified"); 222 #endif 223 224 static int ZoneRelsThresh = ZONE_RELS_THRESH; 225 SYSCTL_INT(_kern, OID_AUTO, zone_big_alloc, CTLFLAG_RD, &ZoneBigAlloc, 0, ""); 226 SYSCTL_INT(_kern, OID_AUTO, zone_gen_alloc, CTLFLAG_RD, &ZoneGenAlloc, 0, ""); 227 SYSCTL_INT(_kern, OID_AUTO, zone_cache, CTLFLAG_RW, &ZoneRelsThresh, 0, ""); 228 static long SlabsAllocated; 229 static long SlabsFreed; 230 SYSCTL_LONG(_kern, OID_AUTO, slabs_allocated, CTLFLAG_RD, 231 &SlabsAllocated, 0, ""); 232 SYSCTL_LONG(_kern, OID_AUTO, slabs_freed, CTLFLAG_RD, 233 &SlabsFreed, 0, ""); 234 static int SlabFreeToTail; 235 SYSCTL_INT(_kern, OID_AUTO, slab_freetotail, CTLFLAG_RW, 236 &SlabFreeToTail, 0, ""); 237 238 /* 239 * Returns the kernel memory size limit for the purposes of initializing 240 * various subsystem caches. The smaller of available memory and the KVM 241 * memory space is returned. 242 * 243 * The size in megabytes is returned. 244 */ 245 size_t 246 kmem_lim_size(void) 247 { 248 size_t limsize; 249 250 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE; 251 if (limsize > KvaSize) 252 limsize = KvaSize; 253 return (limsize / (1024 * 1024)); 254 } 255 256 static void 257 kmeminit(void *dummy) 258 { 259 size_t limsize; 260 int usesize; 261 int i; 262 263 limsize = kmem_lim_size(); 264 usesize = (int)(limsize * 1024); /* convert to KB */ 265 266 /* 267 * If the machine has a large KVM space and more than 8G of ram, 268 * double the zone release threshold to reduce SMP invalidations. 269 * If more than 16G of ram, do it again. 270 * 271 * The BIOS eats a little ram so add some slop. We want 8G worth of 272 * memory sticks to trigger the first adjustment. 273 */ 274 if (ZoneRelsThresh == ZONE_RELS_THRESH) { 275 if (limsize >= 7 * 1024) 276 ZoneRelsThresh *= 2; 277 if (limsize >= 15 * 1024) 278 ZoneRelsThresh *= 2; 279 } 280 281 /* 282 * Calculate the zone size. This typically calculates to 283 * ZALLOC_MAX_ZONE_SIZE 284 */ 285 ZoneSize = ZALLOC_MIN_ZONE_SIZE; 286 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) 287 ZoneSize <<= 1; 288 ZoneLimit = ZoneSize / 4; 289 if (ZoneLimit > ZALLOC_ZONE_LIMIT) 290 ZoneLimit = ZALLOC_ZONE_LIMIT; 291 ZoneMask = ~(uintptr_t)(ZoneSize - 1); 292 ZonePageCount = ZoneSize / PAGE_SIZE; 293 294 for (i = 0; i < NELEM(weirdary); ++i) 295 weirdary[i] = WEIRD_ADDR; 296 297 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO); 298 299 if (bootverbose) 300 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); 301 } 302 303 /* 304 * (low level) Initialize slab-related elements in the globaldata structure. 305 * 306 * Occurs after kmeminit(). 307 */ 308 void 309 slab_gdinit(globaldata_t gd) 310 { 311 SLGlobalData *slgd; 312 int i; 313 314 slgd = &gd->gd_slab; 315 for (i = 0; i < NZONES; ++i) 316 TAILQ_INIT(&slgd->ZoneAry[i]); 317 TAILQ_INIT(&slgd->FreeZones); 318 TAILQ_INIT(&slgd->FreeOvZones); 319 } 320 321 /* 322 * Initialize a malloc type tracking structure. 323 */ 324 void 325 malloc_init(void *data) 326 { 327 struct malloc_type *type = data; 328 size_t limsize; 329 330 if (type->ks_magic != M_MAGIC) 331 panic("malloc type lacks magic"); 332 333 if (type->ks_limit != 0) 334 return; 335 336 if (vmstats.v_page_count == 0) 337 panic("malloc_init not allowed before vm init"); 338 339 limsize = kmem_lim_size() * (1024 * 1024); 340 type->ks_limit = limsize / 10; 341 342 type->ks_next = kmemstatistics; 343 kmemstatistics = type; 344 } 345 346 void 347 malloc_uninit(void *data) 348 { 349 struct malloc_type *type = data; 350 struct malloc_type *t; 351 #ifdef INVARIANTS 352 int i; 353 long ttl; 354 #endif 355 356 if (type->ks_magic != M_MAGIC) 357 panic("malloc type lacks magic"); 358 359 if (vmstats.v_page_count == 0) 360 panic("malloc_uninit not allowed before vm init"); 361 362 if (type->ks_limit == 0) 363 panic("malloc_uninit on uninitialized type"); 364 365 /* Make sure that all pending kfree()s are finished. */ 366 lwkt_synchronize_ipiqs("muninit"); 367 368 #ifdef INVARIANTS 369 /* 370 * memuse is only correct in aggregation. Due to memory being allocated 371 * on one cpu and freed on another individual array entries may be 372 * negative or positive (canceling each other out). 373 */ 374 for (i = ttl = 0; i < ncpus; ++i) 375 ttl += type->ks_memuse[i]; 376 if (ttl) { 377 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", 378 ttl, type->ks_shortdesc, i); 379 } 380 #endif 381 if (type == kmemstatistics) { 382 kmemstatistics = type->ks_next; 383 } else { 384 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 385 if (t->ks_next == type) { 386 t->ks_next = type->ks_next; 387 break; 388 } 389 } 390 } 391 type->ks_next = NULL; 392 type->ks_limit = 0; 393 } 394 395 /* 396 * Increase the kmalloc pool limit for the specified pool. No changes 397 * are the made if the pool would shrink. 398 */ 399 void 400 kmalloc_raise_limit(struct malloc_type *type, size_t bytes) 401 { 402 if (type->ks_limit == 0) 403 malloc_init(type); 404 if (bytes == 0) 405 bytes = KvaSize; 406 if (type->ks_limit < bytes) 407 type->ks_limit = bytes; 408 } 409 410 /* 411 * Dynamically create a malloc pool. This function is a NOP if *typep is 412 * already non-NULL. 413 */ 414 void 415 kmalloc_create(struct malloc_type **typep, const char *descr) 416 { 417 struct malloc_type *type; 418 419 if (*typep == NULL) { 420 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO); 421 type->ks_magic = M_MAGIC; 422 type->ks_shortdesc = descr; 423 malloc_init(type); 424 *typep = type; 425 } 426 } 427 428 /* 429 * Destroy a dynamically created malloc pool. This function is a NOP if 430 * the pool has already been destroyed. 431 */ 432 void 433 kmalloc_destroy(struct malloc_type **typep) 434 { 435 if (*typep != NULL) { 436 malloc_uninit(*typep); 437 kfree(*typep, M_TEMP); 438 *typep = NULL; 439 } 440 } 441 442 /* 443 * Calculate the zone index for the allocation request size and set the 444 * allocation request size to that particular zone's chunk size. 445 */ 446 static __inline int 447 zoneindex(unsigned long *bytes, unsigned long *align) 448 { 449 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ 450 if (n < 128) { 451 *bytes = n = (n + 7) & ~7; 452 *align = 8; 453 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 454 } 455 if (n < 256) { 456 *bytes = n = (n + 15) & ~15; 457 *align = 16; 458 return(n / 16 + 7); 459 } 460 if (n < 8192) { 461 if (n < 512) { 462 *bytes = n = (n + 31) & ~31; 463 *align = 32; 464 return(n / 32 + 15); 465 } 466 if (n < 1024) { 467 *bytes = n = (n + 63) & ~63; 468 *align = 64; 469 return(n / 64 + 23); 470 } 471 if (n < 2048) { 472 *bytes = n = (n + 127) & ~127; 473 *align = 128; 474 return(n / 128 + 31); 475 } 476 if (n < 4096) { 477 *bytes = n = (n + 255) & ~255; 478 *align = 256; 479 return(n / 256 + 39); 480 } 481 *bytes = n = (n + 511) & ~511; 482 *align = 512; 483 return(n / 512 + 47); 484 } 485 #if ZALLOC_ZONE_LIMIT > 8192 486 if (n < 16384) { 487 *bytes = n = (n + 1023) & ~1023; 488 *align = 1024; 489 return(n / 1024 + 55); 490 } 491 #endif 492 #if ZALLOC_ZONE_LIMIT > 16384 493 if (n < 32768) { 494 *bytes = n = (n + 2047) & ~2047; 495 *align = 2048; 496 return(n / 2048 + 63); 497 } 498 #endif 499 panic("Unexpected byte count %d", n); 500 return(0); 501 } 502 503 static __inline 504 void 505 clean_zone_rchunks(SLZone *z) 506 { 507 SLChunk *bchunk; 508 509 while ((bchunk = z->z_RChunks) != NULL) { 510 cpu_ccfence(); 511 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) { 512 *z->z_LChunksp = bchunk; 513 while (bchunk) { 514 chunk_mark_free(z, bchunk); 515 z->z_LChunksp = &bchunk->c_Next; 516 bchunk = bchunk->c_Next; 517 ++z->z_NFree; 518 } 519 break; 520 } 521 /* retry */ 522 } 523 } 524 525 /* 526 * If the zone becomes totally free and is not the only zone listed for a 527 * chunk size we move it to the FreeZones list. We always leave at least 528 * one zone per chunk size listed, even if it is freeable. 529 * 530 * Do not move the zone if there is an IPI in_flight (z_RCount != 0), 531 * otherwise MP races can result in our free_remote code accessing a 532 * destroyed zone. The remote end interlocks z_RCount with z_RChunks 533 * so one has to test both z_NFree and z_RCount. 534 * 535 * Since this code can be called from an IPI callback, do *NOT* try to mess 536 * with kernel_map here. Hysteresis will be performed at kmalloc() time. 537 */ 538 static __inline 539 SLZone * 540 check_zone_free(SLGlobalData *slgd, SLZone *z) 541 { 542 SLZone *znext; 543 544 znext = TAILQ_NEXT(z, z_Entry); 545 if (z->z_NFree == z->z_NMax && z->z_RCount == 0 && 546 (TAILQ_FIRST(&slgd->ZoneAry[z->z_ZoneIndex]) != z || znext) 547 ) { 548 int *kup; 549 550 TAILQ_REMOVE(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 551 552 z->z_Magic = -1; 553 TAILQ_INSERT_HEAD(&slgd->FreeZones, z, z_Entry); 554 ++slgd->NFreeZones; 555 kup = btokup(z); 556 *kup = 0; 557 } 558 return znext; 559 } 560 561 #ifdef SLAB_DEBUG 562 /* 563 * Used to debug memory corruption issues. Record up to (typically 32) 564 * allocation sources for this zone (for a particular chunk size). 565 */ 566 567 static void 568 slab_record_source(SLZone *z, const char *file, int line) 569 { 570 int i; 571 int b = line & (SLAB_DEBUG_ENTRIES - 1); 572 573 i = b; 574 do { 575 if (z->z_Sources[i].file == file && z->z_Sources[i].line == line) 576 return; 577 if (z->z_Sources[i].file == NULL) 578 break; 579 i = (i + 1) & (SLAB_DEBUG_ENTRIES - 1); 580 } while (i != b); 581 z->z_Sources[i].file = file; 582 z->z_Sources[i].line = line; 583 } 584 585 #endif 586 587 static __inline unsigned long 588 powerof2_size(unsigned long size) 589 { 590 int i; 591 592 if (size == 0 || powerof2(size)) 593 return size; 594 595 i = flsl(size); 596 return (1UL << i); 597 } 598 599 /* 600 * kmalloc() (SLAB ALLOCATOR) 601 * 602 * Allocate memory via the slab allocator. If the request is too large, 603 * or if it page-aligned beyond a certain size, we fall back to the 604 * KMEM subsystem. A SLAB tracking descriptor must be specified, use 605 * &SlabMisc if you don't care. 606 * 607 * M_RNOWAIT - don't block. 608 * M_NULLOK - return NULL instead of blocking. 609 * M_ZERO - zero the returned memory. 610 * M_USE_RESERVE - allow greater drawdown of the free list 611 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted 612 * M_POWEROF2 - roundup size to the nearest power of 2 613 * 614 * MPSAFE 615 */ 616 617 #ifdef SLAB_DEBUG 618 void * 619 kmalloc_debug(unsigned long size, struct malloc_type *type, int flags, 620 const char *file, int line) 621 #else 622 void * 623 kmalloc(unsigned long size, struct malloc_type *type, int flags) 624 #endif 625 { 626 SLZone *z; 627 SLChunk *chunk; 628 SLGlobalData *slgd; 629 struct globaldata *gd; 630 unsigned long align; 631 int zi; 632 #ifdef INVARIANTS 633 int i; 634 #endif 635 636 logmemory_quick(malloc_beg); 637 gd = mycpu; 638 slgd = &gd->gd_slab; 639 640 /* 641 * XXX silly to have this in the critical path. 642 */ 643 if (type->ks_limit == 0) { 644 crit_enter(); 645 malloc_init(type); 646 crit_exit(); 647 } 648 ++type->ks_calls; 649 650 if (flags & M_POWEROF2) 651 size = powerof2_size(size); 652 653 /* 654 * Handle the case where the limit is reached. Panic if we can't return 655 * NULL. The original malloc code looped, but this tended to 656 * simply deadlock the computer. 657 * 658 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used 659 * to determine if a more complete limit check should be done. The 660 * actual memory use is tracked via ks_memuse[cpu]. 661 */ 662 while (type->ks_loosememuse >= type->ks_limit) { 663 int i; 664 long ttl; 665 666 for (i = ttl = 0; i < ncpus; ++i) 667 ttl += type->ks_memuse[i]; 668 type->ks_loosememuse = ttl; /* not MP synchronized */ 669 if ((ssize_t)ttl < 0) /* deal with occassional race */ 670 ttl = 0; 671 if (ttl >= type->ks_limit) { 672 if (flags & M_NULLOK) { 673 logmemory(malloc_end, NULL, type, size, flags); 674 return(NULL); 675 } 676 panic("%s: malloc limit exceeded", type->ks_shortdesc); 677 } 678 } 679 680 /* 681 * Handle the degenerate size == 0 case. Yes, this does happen. 682 * Return a special pointer. This is to maintain compatibility with 683 * the original malloc implementation. Certain devices, such as the 684 * adaptec driver, not only allocate 0 bytes, they check for NULL and 685 * also realloc() later on. Joy. 686 */ 687 if (size == 0) { 688 logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags); 689 return(ZERO_LENGTH_PTR); 690 } 691 692 /* 693 * Handle hysteresis from prior frees here in malloc(). We cannot 694 * safely manipulate the kernel_map in free() due to free() possibly 695 * being called via an IPI message or from sensitive interrupt code. 696 * 697 * NOTE: ku_pagecnt must be cleared before we free the slab or we 698 * might race another cpu allocating the kva and setting 699 * ku_pagecnt. 700 */ 701 while (slgd->NFreeZones > ZoneRelsThresh && (flags & M_RNOWAIT) == 0) { 702 crit_enter(); 703 if (slgd->NFreeZones > ZoneRelsThresh) { /* crit sect race */ 704 int *kup; 705 706 z = TAILQ_LAST(&slgd->FreeZones, SLZoneList); 707 KKASSERT(z != NULL); 708 TAILQ_REMOVE(&slgd->FreeZones, z, z_Entry); 709 --slgd->NFreeZones; 710 kup = btokup(z); 711 *kup = 0; 712 kmem_slab_free(z, ZoneSize); /* may block */ 713 atomic_add_int(&ZoneGenAlloc, -ZoneSize / 1024); 714 } 715 crit_exit(); 716 } 717 718 /* 719 * XXX handle oversized frees that were queued from kfree(). 720 */ 721 while (TAILQ_FIRST(&slgd->FreeOvZones) && (flags & M_RNOWAIT) == 0) { 722 crit_enter(); 723 if ((z = TAILQ_LAST(&slgd->FreeOvZones, SLZoneList)) != NULL) { 724 vm_size_t tsize; 725 726 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); 727 TAILQ_REMOVE(&slgd->FreeOvZones, z, z_Entry); 728 tsize = z->z_ChunkSize; 729 kmem_slab_free(z, tsize); /* may block */ 730 atomic_add_int(&ZoneBigAlloc, -(int)tsize / 1024); 731 } 732 crit_exit(); 733 } 734 735 /* 736 * Handle large allocations directly. There should not be very many of 737 * these so performance is not a big issue. 738 * 739 * The backend allocator is pretty nasty on a SMP system. Use the 740 * slab allocator for one and two page-sized chunks even though we lose 741 * some efficiency. XXX maybe fix mmio and the elf loader instead. 742 */ 743 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 744 int *kup; 745 746 size = round_page(size); 747 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); 748 if (chunk == NULL) { 749 logmemory(malloc_end, NULL, type, size, flags); 750 return(NULL); 751 } 752 atomic_add_int(&ZoneBigAlloc, (int)size / 1024); 753 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ 754 flags |= M_PASSIVE_ZERO; 755 kup = btokup(chunk); 756 *kup = size / PAGE_SIZE; 757 crit_enter(); 758 goto done; 759 } 760 761 /* 762 * Attempt to allocate out of an existing zone. First try the free list, 763 * then allocate out of unallocated space. If we find a good zone move 764 * it to the head of the list so later allocations find it quickly 765 * (we might have thousands of zones in the list). 766 * 767 * Note: zoneindex() will panic of size is too large. 768 */ 769 zi = zoneindex(&size, &align); 770 KKASSERT(zi < NZONES); 771 crit_enter(); 772 773 if ((z = TAILQ_LAST(&slgd->ZoneAry[zi], SLZoneList)) != NULL) { 774 /* 775 * Locate a chunk - we have to have at least one. If this is the 776 * last chunk go ahead and do the work to retrieve chunks freed 777 * from remote cpus, and if the zone is still empty move it off 778 * the ZoneAry. 779 */ 780 if (--z->z_NFree <= 0) { 781 KKASSERT(z->z_NFree == 0); 782 783 /* 784 * WARNING! This code competes with other cpus. It is ok 785 * for us to not drain RChunks here but we might as well, and 786 * it is ok if more accumulate after we're done. 787 * 788 * Set RSignal before pulling rchunks off, indicating that we 789 * will be moving ourselves off of the ZoneAry. Remote ends will 790 * read RSignal before putting rchunks on thus interlocking 791 * their IPI signaling. 792 */ 793 if (z->z_RChunks == NULL) 794 atomic_swap_int(&z->z_RSignal, 1); 795 796 clean_zone_rchunks(z); 797 798 /* 799 * Remove from the zone list if no free chunks remain. 800 * Clear RSignal 801 */ 802 if (z->z_NFree == 0) { 803 TAILQ_REMOVE(&slgd->ZoneAry[zi], z, z_Entry); 804 } else { 805 z->z_RSignal = 0; 806 } 807 } 808 809 /* 810 * Fast path, we have chunks available in z_LChunks. 811 */ 812 chunk = z->z_LChunks; 813 if (chunk) { 814 chunk_mark_allocated(z, chunk); 815 z->z_LChunks = chunk->c_Next; 816 if (z->z_LChunks == NULL) 817 z->z_LChunksp = &z->z_LChunks; 818 #ifdef SLAB_DEBUG 819 slab_record_source(z, file, line); 820 #endif 821 goto done; 822 } 823 824 /* 825 * No chunks are available in LChunks, the free chunk MUST be 826 * in the never-before-used memory area, controlled by UIndex. 827 * 828 * The consequences are very serious if our zone got corrupted so 829 * we use an explicit panic rather than a KASSERT. 830 */ 831 if (z->z_UIndex + 1 != z->z_NMax) 832 ++z->z_UIndex; 833 else 834 z->z_UIndex = 0; 835 836 if (z->z_UIndex == z->z_UEndIndex) 837 panic("slaballoc: corrupted zone"); 838 839 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 840 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 841 flags &= ~M_ZERO; 842 flags |= M_PASSIVE_ZERO; 843 } 844 chunk_mark_allocated(z, chunk); 845 #ifdef SLAB_DEBUG 846 slab_record_source(z, file, line); 847 #endif 848 goto done; 849 } 850 851 /* 852 * If all zones are exhausted we need to allocate a new zone for this 853 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see 854 * UAlloc use above in regards to M_ZERO. Note that when we are reusing 855 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and 856 * we do not pre-zero it because we do not want to mess up the L1 cache. 857 * 858 * At least one subsystem, the tty code (see CROUND) expects power-of-2 859 * allocations to be power-of-2 aligned. We maintain compatibility by 860 * adjusting the base offset below. 861 */ 862 { 863 int off; 864 int *kup; 865 866 if ((z = TAILQ_FIRST(&slgd->FreeZones)) != NULL) { 867 TAILQ_REMOVE(&slgd->FreeZones, z, z_Entry); 868 --slgd->NFreeZones; 869 bzero(z, sizeof(SLZone)); 870 z->z_Flags |= SLZF_UNOTZEROD; 871 } else { 872 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); 873 if (z == NULL) 874 goto fail; 875 atomic_add_int(&ZoneGenAlloc, ZoneSize / 1024); 876 } 877 878 /* 879 * How big is the base structure? 880 */ 881 #if defined(INVARIANTS) 882 /* 883 * Make room for z_Bitmap. An exact calculation is somewhat more 884 * complicated so don't make an exact calculation. 885 */ 886 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]); 887 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); 888 #else 889 off = sizeof(SLZone); 890 #endif 891 892 /* 893 * Guarentee power-of-2 alignment for power-of-2-sized chunks. 894 * Otherwise properly align the data according to the chunk size. 895 */ 896 if (powerof2(size)) 897 align = size; 898 off = roundup2(off, align); 899 900 z->z_Magic = ZALLOC_SLAB_MAGIC; 901 z->z_ZoneIndex = zi; 902 z->z_NMax = (ZoneSize - off) / size; 903 z->z_NFree = z->z_NMax - 1; 904 z->z_BasePtr = (char *)z + off; 905 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; 906 z->z_ChunkSize = size; 907 z->z_CpuGd = gd; 908 z->z_Cpu = gd->gd_cpuid; 909 z->z_LChunksp = &z->z_LChunks; 910 #ifdef SLAB_DEBUG 911 bcopy(z->z_Sources, z->z_AltSources, sizeof(z->z_Sources)); 912 bzero(z->z_Sources, sizeof(z->z_Sources)); 913 #endif 914 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 915 TAILQ_INSERT_HEAD(&slgd->ZoneAry[zi], z, z_Entry); 916 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 917 flags &= ~M_ZERO; /* already zero'd */ 918 flags |= M_PASSIVE_ZERO; 919 } 920 kup = btokup(z); 921 *kup = -(z->z_Cpu + 1); /* -1 to -(N+1) */ 922 chunk_mark_allocated(z, chunk); 923 #ifdef SLAB_DEBUG 924 slab_record_source(z, file, line); 925 #endif 926 927 /* 928 * Slide the base index for initial allocations out of the next 929 * zone we create so we do not over-weight the lower part of the 930 * cpu memory caches. 931 */ 932 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 933 & (ZALLOC_MAX_ZONE_SIZE - 1); 934 } 935 936 done: 937 ++type->ks_inuse[gd->gd_cpuid]; 938 type->ks_memuse[gd->gd_cpuid] += size; 939 type->ks_loosememuse += size; /* not MP synchronized */ 940 crit_exit(); 941 942 if (flags & M_ZERO) 943 bzero(chunk, size); 944 #ifdef INVARIANTS 945 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) { 946 if (use_malloc_pattern) { 947 for (i = 0; i < size; i += sizeof(int)) { 948 *(int *)((char *)chunk + i) = -1; 949 } 950 } 951 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ 952 } 953 #endif 954 logmemory(malloc_end, chunk, type, size, flags); 955 return(chunk); 956 fail: 957 crit_exit(); 958 logmemory(malloc_end, NULL, type, size, flags); 959 return(NULL); 960 } 961 962 /* 963 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE) 964 * 965 * Generally speaking this routine is not called very often and we do 966 * not attempt to optimize it beyond reusing the same pointer if the 967 * new size fits within the chunking of the old pointer's zone. 968 */ 969 #ifdef SLAB_DEBUG 970 void * 971 krealloc_debug(void *ptr, unsigned long size, 972 struct malloc_type *type, int flags, 973 const char *file, int line) 974 #else 975 void * 976 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) 977 #endif 978 { 979 unsigned long osize; 980 unsigned long align; 981 SLZone *z; 982 void *nptr; 983 int *kup; 984 985 KKASSERT((flags & M_ZERO) == 0); /* not supported */ 986 987 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) 988 return(kmalloc_debug(size, type, flags, file, line)); 989 if (size == 0) { 990 kfree(ptr, type); 991 return(NULL); 992 } 993 994 /* 995 * Handle oversized allocations. XXX we really should require that a 996 * size be passed to free() instead of this nonsense. 997 */ 998 kup = btokup(ptr); 999 if (*kup > 0) { 1000 osize = *kup << PAGE_SHIFT; 1001 if (osize == round_page(size)) 1002 return(ptr); 1003 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL) 1004 return(NULL); 1005 bcopy(ptr, nptr, min(size, osize)); 1006 kfree(ptr, type); 1007 return(nptr); 1008 } 1009 1010 /* 1011 * Get the original allocation's zone. If the new request winds up 1012 * using the same chunk size we do not have to do anything. 1013 */ 1014 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 1015 kup = btokup(z); 1016 KKASSERT(*kup < 0); 1017 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1018 1019 /* 1020 * Allocate memory for the new request size. Note that zoneindex has 1021 * already adjusted the request size to the appropriate chunk size, which 1022 * should optimize our bcopy(). Then copy and return the new pointer. 1023 * 1024 * Resizing a non-power-of-2 allocation to a power-of-2 size does not 1025 * necessary align the result. 1026 * 1027 * We can only zoneindex (to align size to the chunk size) if the new 1028 * size is not too large. 1029 */ 1030 if (size < ZoneLimit) { 1031 zoneindex(&size, &align); 1032 if (z->z_ChunkSize == size) 1033 return(ptr); 1034 } 1035 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL) 1036 return(NULL); 1037 bcopy(ptr, nptr, min(size, z->z_ChunkSize)); 1038 kfree(ptr, type); 1039 return(nptr); 1040 } 1041 1042 /* 1043 * Return the kmalloc limit for this type, in bytes. 1044 */ 1045 long 1046 kmalloc_limit(struct malloc_type *type) 1047 { 1048 if (type->ks_limit == 0) { 1049 crit_enter(); 1050 if (type->ks_limit == 0) 1051 malloc_init(type); 1052 crit_exit(); 1053 } 1054 return(type->ks_limit); 1055 } 1056 1057 /* 1058 * Allocate a copy of the specified string. 1059 * 1060 * (MP SAFE) (MAY BLOCK) 1061 */ 1062 #ifdef SLAB_DEBUG 1063 char * 1064 kstrdup_debug(const char *str, struct malloc_type *type, 1065 const char *file, int line) 1066 #else 1067 char * 1068 kstrdup(const char *str, struct malloc_type *type) 1069 #endif 1070 { 1071 int zlen; /* length inclusive of terminating NUL */ 1072 char *nstr; 1073 1074 if (str == NULL) 1075 return(NULL); 1076 zlen = strlen(str) + 1; 1077 nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line); 1078 bcopy(str, nstr, zlen); 1079 return(nstr); 1080 } 1081 1082 /* 1083 * Notify our cpu that a remote cpu has freed some chunks in a zone that 1084 * we own. RCount will be bumped so the memory should be good, but validate 1085 * that it really is. 1086 */ 1087 static 1088 void 1089 kfree_remote(void *ptr) 1090 { 1091 SLGlobalData *slgd; 1092 SLZone *z; 1093 int nfree; 1094 int *kup; 1095 1096 slgd = &mycpu->gd_slab; 1097 z = ptr; 1098 kup = btokup(z); 1099 KKASSERT(*kup == -((int)mycpuid + 1)); 1100 KKASSERT(z->z_RCount > 0); 1101 atomic_subtract_int(&z->z_RCount, 1); 1102 1103 logmemory(free_rem_beg, z, NULL, 0L, 0); 1104 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1105 KKASSERT(z->z_Cpu == mycpu->gd_cpuid); 1106 nfree = z->z_NFree; 1107 1108 /* 1109 * Indicate that we will no longer be off of the ZoneAry by 1110 * clearing RSignal. 1111 */ 1112 if (z->z_RChunks) 1113 z->z_RSignal = 0; 1114 1115 /* 1116 * Atomically extract the bchunks list and then process it back 1117 * into the lchunks list. We want to append our bchunks to the 1118 * lchunks list and not prepend since we likely do not have 1119 * cache mastership of the related data (not that it helps since 1120 * we are using c_Next). 1121 */ 1122 clean_zone_rchunks(z); 1123 if (z->z_NFree && nfree == 0) { 1124 TAILQ_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 1125 } 1126 1127 /* 1128 * If the zone becomes totally free and is not the only zone listed for a 1129 * chunk size we move it to the FreeZones list. We always leave at least 1130 * one zone per chunk size listed, even if it is freeable. 1131 * 1132 * Since this code can be called from an IPI callback, do *NOT* try to 1133 * mess with kernel_map here. Hysteresis will be performed at malloc() 1134 * time. 1135 * 1136 * Do not move the zone if there is an IPI in_flight (z_RCount != 0), 1137 * otherwise MP races can result in our free_remote code accessing a 1138 * destroyed zone. The remote end interlocks z_RCount with z_RChunks 1139 * so one has to test both z_NFree and z_RCount. 1140 */ 1141 if (z->z_NFree == z->z_NMax && z->z_RCount == 0 && 1142 (TAILQ_FIRST(&slgd->ZoneAry[z->z_ZoneIndex]) != z || 1143 TAILQ_NEXT(z, z_Entry)) 1144 ) { 1145 int *kup; 1146 1147 TAILQ_REMOVE(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 1148 z->z_Magic = -1; 1149 TAILQ_INSERT_HEAD(&slgd->FreeZones, z, z_Entry); 1150 ++slgd->NFreeZones; 1151 kup = btokup(z); 1152 *kup = 0; 1153 } 1154 logmemory(free_rem_end, z, NULL, 0L, 0); 1155 } 1156 1157 /* 1158 * free (SLAB ALLOCATOR) 1159 * 1160 * Free a memory block previously allocated by malloc. Note that we do not 1161 * attempt to update ks_loosememuse as MP races could prevent us from 1162 * checking memory limits in malloc. 1163 * 1164 * MPSAFE 1165 */ 1166 void 1167 kfree(void *ptr, struct malloc_type *type) 1168 { 1169 SLZone *z; 1170 SLChunk *chunk; 1171 SLGlobalData *slgd; 1172 struct globaldata *gd; 1173 int *kup; 1174 unsigned long size; 1175 SLChunk *bchunk; 1176 int rsignal; 1177 1178 logmemory_quick(free_beg); 1179 gd = mycpu; 1180 slgd = &gd->gd_slab; 1181 1182 if (ptr == NULL) 1183 panic("trying to free NULL pointer"); 1184 1185 /* 1186 * Handle special 0-byte allocations 1187 */ 1188 if (ptr == ZERO_LENGTH_PTR) { 1189 logmemory(free_zero, ptr, type, -1UL, 0); 1190 logmemory_quick(free_end); 1191 return; 1192 } 1193 1194 /* 1195 * Panic on bad malloc type 1196 */ 1197 if (type->ks_magic != M_MAGIC) 1198 panic("free: malloc type lacks magic"); 1199 1200 /* 1201 * Handle oversized allocations. XXX we really should require that a 1202 * size be passed to free() instead of this nonsense. 1203 * 1204 * This code is never called via an ipi. 1205 */ 1206 kup = btokup(ptr); 1207 if (*kup > 0) { 1208 size = *kup << PAGE_SHIFT; 1209 *kup = 0; 1210 #ifdef INVARIANTS 1211 KKASSERT(sizeof(weirdary) <= size); 1212 bcopy(weirdary, ptr, sizeof(weirdary)); 1213 #endif 1214 /* 1215 * NOTE: For oversized allocations we do not record the 1216 * originating cpu. It gets freed on the cpu calling 1217 * kfree(). The statistics are in aggregate. 1218 * 1219 * note: XXX we have still inherited the interrupts-can't-block 1220 * assumption. An interrupt thread does not bump 1221 * gd_intr_nesting_level so check TDF_INTTHREAD. This is 1222 * primarily until we can fix softupdate's assumptions about free(). 1223 */ 1224 crit_enter(); 1225 --type->ks_inuse[gd->gd_cpuid]; 1226 type->ks_memuse[gd->gd_cpuid] -= size; 1227 if (mycpu->gd_intr_nesting_level || 1228 (gd->gd_curthread->td_flags & TDF_INTTHREAD)) 1229 { 1230 logmemory(free_ovsz_delayed, ptr, type, size, 0); 1231 z = (SLZone *)ptr; 1232 z->z_Magic = ZALLOC_OVSZ_MAGIC; 1233 z->z_ChunkSize = size; 1234 1235 TAILQ_INSERT_HEAD(&slgd->FreeOvZones, z, z_Entry); 1236 crit_exit(); 1237 } else { 1238 crit_exit(); 1239 logmemory(free_ovsz, ptr, type, size, 0); 1240 kmem_slab_free(ptr, size); /* may block */ 1241 atomic_add_int(&ZoneBigAlloc, -(int)size / 1024); 1242 } 1243 logmemory_quick(free_end); 1244 return; 1245 } 1246 1247 /* 1248 * Zone case. Figure out the zone based on the fact that it is 1249 * ZoneSize aligned. 1250 */ 1251 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 1252 kup = btokup(z); 1253 KKASSERT(*kup < 0); 1254 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1255 1256 /* 1257 * If we do not own the zone then use atomic ops to free to the 1258 * remote cpu linked list and notify the target zone using a 1259 * passive message. 1260 * 1261 * The target zone cannot be deallocated while we own a chunk of it, 1262 * so the zone header's storage is stable until the very moment 1263 * we adjust z_RChunks. After that we cannot safely dereference (z). 1264 * 1265 * (no critical section needed) 1266 */ 1267 if (z->z_CpuGd != gd) { 1268 /* 1269 * Making these adjustments now allow us to avoid passing (type) 1270 * to the remote cpu. Note that ks_inuse/ks_memuse is being 1271 * adjusted on OUR cpu, not the zone cpu, but it should all still 1272 * sum up properly and cancel out. 1273 */ 1274 crit_enter(); 1275 --type->ks_inuse[gd->gd_cpuid]; 1276 type->ks_memuse[gd->gd_cpuid] -= z->z_ChunkSize; 1277 crit_exit(); 1278 1279 /* 1280 * WARNING! This code competes with other cpus. Once we 1281 * successfully link the chunk to RChunks the remote 1282 * cpu can rip z's storage out from under us. 1283 * 1284 * Bumping RCount prevents z's storage from getting 1285 * ripped out. 1286 */ 1287 rsignal = z->z_RSignal; 1288 cpu_lfence(); 1289 if (rsignal) 1290 atomic_add_int(&z->z_RCount, 1); 1291 1292 chunk = ptr; 1293 for (;;) { 1294 bchunk = z->z_RChunks; 1295 cpu_ccfence(); 1296 chunk->c_Next = bchunk; 1297 cpu_sfence(); 1298 1299 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk)) 1300 break; 1301 } 1302 1303 /* 1304 * We have to signal the remote cpu if our actions will cause 1305 * the remote zone to be placed back on ZoneAry so it can 1306 * move the zone back on. 1307 * 1308 * We only need to deal with NULL->non-NULL RChunk transitions 1309 * and only if z_RSignal is set. We interlock by reading rsignal 1310 * before adding our chunk to RChunks. This should result in 1311 * virtually no IPI traffic. 1312 * 1313 * We can use a passive IPI to reduce overhead even further. 1314 */ 1315 if (bchunk == NULL && rsignal) { 1316 logmemory(free_request, ptr, type, 1317 (unsigned long)z->z_ChunkSize, 0); 1318 lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z); 1319 /* z can get ripped out from under us from this point on */ 1320 } else if (rsignal) { 1321 atomic_subtract_int(&z->z_RCount, 1); 1322 /* z can get ripped out from under us from this point on */ 1323 } 1324 logmemory_quick(free_end); 1325 return; 1326 } 1327 1328 /* 1329 * kfree locally 1330 */ 1331 logmemory(free_chunk, ptr, type, (unsigned long)z->z_ChunkSize, 0); 1332 1333 crit_enter(); 1334 chunk = ptr; 1335 chunk_mark_free(z, chunk); 1336 1337 /* 1338 * Put weird data into the memory to detect modifications after freeing, 1339 * illegal pointer use after freeing (we should fault on the odd address), 1340 * and so forth. XXX needs more work, see the old malloc code. 1341 */ 1342 #ifdef INVARIANTS 1343 if (z->z_ChunkSize < sizeof(weirdary)) 1344 bcopy(weirdary, chunk, z->z_ChunkSize); 1345 else 1346 bcopy(weirdary, chunk, sizeof(weirdary)); 1347 #endif 1348 1349 /* 1350 * Add this free non-zero'd chunk to a linked list for reuse. Add 1351 * to the front of the linked list so it is more likely to be 1352 * reallocated, since it is already in our L1 cache. 1353 */ 1354 #ifdef INVARIANTS 1355 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) 1356 panic("BADFREE %p", chunk); 1357 #endif 1358 chunk->c_Next = z->z_LChunks; 1359 z->z_LChunks = chunk; 1360 if (chunk->c_Next == NULL) 1361 z->z_LChunksp = &chunk->c_Next; 1362 1363 #ifdef INVARIANTS 1364 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) 1365 panic("BADFREE2"); 1366 #endif 1367 1368 /* 1369 * Bump the number of free chunks. If it becomes non-zero the zone 1370 * must be added back onto the appropriate list. A fully allocated 1371 * zone that sees its first free is considered 'mature' and is placed 1372 * at the head, giving the system time to potentially free the remaining 1373 * entries even while other allocations are going on and making the zone 1374 * freeable. 1375 */ 1376 if (z->z_NFree++ == 0) { 1377 if (SlabFreeToTail) 1378 TAILQ_INSERT_TAIL(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 1379 else 1380 TAILQ_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 1381 } 1382 1383 --type->ks_inuse[z->z_Cpu]; 1384 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize; 1385 1386 check_zone_free(slgd, z); 1387 logmemory_quick(free_end); 1388 crit_exit(); 1389 } 1390 1391 /* 1392 * Cleanup slabs which are hanging around due to RChunks or which are wholely 1393 * free and can be moved to the free list if not moved by other means. 1394 * 1395 * Called once every 10 seconds on all cpus. 1396 */ 1397 void 1398 slab_cleanup(void) 1399 { 1400 SLGlobalData *slgd = &mycpu->gd_slab; 1401 SLZone *z; 1402 int i; 1403 1404 crit_enter(); 1405 for (i = 0; i < NZONES; ++i) { 1406 if ((z = TAILQ_FIRST(&slgd->ZoneAry[i])) == NULL) 1407 continue; 1408 1409 /* 1410 * Scan zones. 1411 */ 1412 while (z) { 1413 /* 1414 * Shift all RChunks to the end of the LChunks list. This is 1415 * an O(1) operation. 1416 * 1417 * Then free the zone if possible. 1418 */ 1419 clean_zone_rchunks(z); 1420 z = check_zone_free(slgd, z); 1421 } 1422 } 1423 crit_exit(); 1424 } 1425 1426 #if defined(INVARIANTS) 1427 1428 /* 1429 * Helper routines for sanity checks 1430 */ 1431 static 1432 void 1433 chunk_mark_allocated(SLZone *z, void *chunk) 1434 { 1435 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1436 uint32_t *bitptr; 1437 1438 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1439 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1440 ("memory chunk %p bit index %d is illegal", chunk, bitdex)); 1441 bitptr = &z->z_Bitmap[bitdex >> 5]; 1442 bitdex &= 31; 1443 KASSERT((*bitptr & (1 << bitdex)) == 0, 1444 ("memory chunk %p is already allocated!", chunk)); 1445 *bitptr |= 1 << bitdex; 1446 } 1447 1448 static 1449 void 1450 chunk_mark_free(SLZone *z, void *chunk) 1451 { 1452 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1453 uint32_t *bitptr; 1454 1455 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1456 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1457 ("memory chunk %p bit index %d is illegal!", chunk, bitdex)); 1458 bitptr = &z->z_Bitmap[bitdex >> 5]; 1459 bitdex &= 31; 1460 KASSERT((*bitptr & (1 << bitdex)) != 0, 1461 ("memory chunk %p is already free!", chunk)); 1462 *bitptr &= ~(1 << bitdex); 1463 } 1464 1465 #endif 1466 1467 /* 1468 * kmem_slab_alloc() 1469 * 1470 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the 1471 * specified alignment. M_* flags are expected in the flags field. 1472 * 1473 * Alignment must be a multiple of PAGE_SIZE. 1474 * 1475 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), 1476 * but when we move zalloc() over to use this function as its backend 1477 * we will have to switch to kreserve/krelease and call reserve(0) 1478 * after the new space is made available. 1479 * 1480 * Interrupt code which has preempted other code is not allowed to 1481 * use PQ_CACHE pages. However, if an interrupt thread is run 1482 * non-preemptively or blocks and then runs non-preemptively, then 1483 * it is free to use PQ_CACHE pages. <--- may not apply any longer XXX 1484 */ 1485 static void * 1486 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) 1487 { 1488 vm_size_t i; 1489 vm_offset_t addr; 1490 int count, vmflags, base_vmflags; 1491 vm_page_t mbase = NULL; 1492 vm_page_t m; 1493 thread_t td; 1494 1495 size = round_page(size); 1496 addr = vm_map_min(&kernel_map); 1497 1498 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1499 crit_enter(); 1500 vm_map_lock(&kernel_map); 1501 if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) { 1502 vm_map_unlock(&kernel_map); 1503 if ((flags & M_NULLOK) == 0) 1504 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 1505 vm_map_entry_release(count); 1506 crit_exit(); 1507 return(NULL); 1508 } 1509 1510 /* 1511 * kernel_object maps 1:1 to kernel_map. 1512 */ 1513 vm_object_hold(&kernel_object); 1514 vm_object_reference_locked(&kernel_object); 1515 vm_map_insert(&kernel_map, &count, 1516 &kernel_object, NULL, 1517 addr, addr, addr + size, 1518 VM_MAPTYPE_NORMAL, 1519 VM_PROT_ALL, VM_PROT_ALL, 1520 0); 1521 vm_object_drop(&kernel_object); 1522 vm_map_set_wired_quick(&kernel_map, addr, size, &count); 1523 vm_map_unlock(&kernel_map); 1524 1525 td = curthread; 1526 1527 base_vmflags = 0; 1528 if (flags & M_ZERO) 1529 base_vmflags |= VM_ALLOC_ZERO; 1530 if (flags & M_USE_RESERVE) 1531 base_vmflags |= VM_ALLOC_SYSTEM; 1532 if (flags & M_USE_INTERRUPT_RESERVE) 1533 base_vmflags |= VM_ALLOC_INTERRUPT; 1534 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) { 1535 panic("kmem_slab_alloc: bad flags %08x (%p)", 1536 flags, ((int **)&size)[-1]); 1537 } 1538 1539 /* 1540 * Allocate the pages. Do not mess with the PG_ZERO flag or map 1541 * them yet. VM_ALLOC_NORMAL can only be set if we are not preempting. 1542 * 1543 * VM_ALLOC_SYSTEM is automatically set if we are preempting and 1544 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is 1545 * implied in this case), though I'm not sure if we really need to 1546 * do that. 1547 */ 1548 vmflags = base_vmflags; 1549 if (flags & M_WAITOK) { 1550 if (td->td_preempted) 1551 vmflags |= VM_ALLOC_SYSTEM; 1552 else 1553 vmflags |= VM_ALLOC_NORMAL; 1554 } 1555 1556 vm_object_hold(&kernel_object); 1557 for (i = 0; i < size; i += PAGE_SIZE) { 1558 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags); 1559 if (i == 0) 1560 mbase = m; 1561 1562 /* 1563 * If the allocation failed we either return NULL or we retry. 1564 * 1565 * If M_WAITOK is specified we wait for more memory and retry. 1566 * If M_WAITOK is specified from a preemption we yield instead of 1567 * wait. Livelock will not occur because the interrupt thread 1568 * will not be preempting anyone the second time around after the 1569 * yield. 1570 */ 1571 if (m == NULL) { 1572 if (flags & M_WAITOK) { 1573 if (td->td_preempted) { 1574 lwkt_switch(); 1575 } else { 1576 vm_wait(0); 1577 } 1578 i -= PAGE_SIZE; /* retry */ 1579 continue; 1580 } 1581 break; 1582 } 1583 } 1584 1585 /* 1586 * Check and deal with an allocation failure 1587 */ 1588 if (i != size) { 1589 while (i != 0) { 1590 i -= PAGE_SIZE; 1591 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); 1592 /* page should already be busy */ 1593 vm_page_free(m); 1594 } 1595 vm_map_lock(&kernel_map); 1596 vm_map_delete(&kernel_map, addr, addr + size, &count); 1597 vm_map_unlock(&kernel_map); 1598 vm_object_drop(&kernel_object); 1599 1600 vm_map_entry_release(count); 1601 crit_exit(); 1602 return(NULL); 1603 } 1604 1605 /* 1606 * Success! 1607 * 1608 * NOTE: The VM pages are still busied. mbase points to the first one 1609 * but we have to iterate via vm_page_next() 1610 */ 1611 vm_object_drop(&kernel_object); 1612 crit_exit(); 1613 1614 /* 1615 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO. 1616 */ 1617 m = mbase; 1618 i = 0; 1619 1620 while (i < size) { 1621 /* 1622 * page should already be busy 1623 */ 1624 m->valid = VM_PAGE_BITS_ALL; 1625 vm_page_wire(m); 1626 pmap_enter(&kernel_pmap, addr + i, m, VM_PROT_ALL | VM_PROT_NOSYNC, 1627 1, NULL); 1628 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO)) 1629 bzero((char *)addr + i, PAGE_SIZE); 1630 vm_page_flag_clear(m, PG_ZERO); 1631 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED)); 1632 vm_page_flag_set(m, PG_REFERENCED); 1633 vm_page_wakeup(m); 1634 1635 i += PAGE_SIZE; 1636 vm_object_hold(&kernel_object); 1637 m = vm_page_next(m); 1638 vm_object_drop(&kernel_object); 1639 } 1640 smp_invltlb(); 1641 vm_map_entry_release(count); 1642 atomic_add_long(&SlabsAllocated, 1); 1643 return((void *)addr); 1644 } 1645 1646 /* 1647 * kmem_slab_free() 1648 */ 1649 static void 1650 kmem_slab_free(void *ptr, vm_size_t size) 1651 { 1652 crit_enter(); 1653 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); 1654 atomic_add_long(&SlabsFreed, 1); 1655 crit_exit(); 1656 } 1657 1658 void * 1659 kmalloc_cachealign(unsigned long size_alloc, struct malloc_type *type, 1660 int flags) 1661 { 1662 #if (__VM_CACHELINE_SIZE == 32) 1663 #define CAN_CACHEALIGN(sz) ((sz) >= 256) 1664 #elif (__VM_CACHELINE_SIZE == 64) 1665 #define CAN_CACHEALIGN(sz) ((sz) >= 512) 1666 #elif (__VM_CACHELINE_SIZE == 128) 1667 #define CAN_CACHEALIGN(sz) ((sz) >= 1024) 1668 #else 1669 #error "unsupported cacheline size" 1670 #endif 1671 1672 void *ret; 1673 1674 if (size_alloc < __VM_CACHELINE_SIZE) 1675 size_alloc = __VM_CACHELINE_SIZE; 1676 else if (!CAN_CACHEALIGN(size_alloc)) 1677 flags |= M_POWEROF2; 1678 1679 ret = kmalloc(size_alloc, type, flags); 1680 KASSERT(((uintptr_t)ret & (__VM_CACHELINE_SIZE - 1)) == 0, 1681 ("%p(%lu) not cacheline %d aligned", 1682 ret, size_alloc, __VM_CACHELINE_SIZE)); 1683 return ret; 1684 1685 #undef CAN_CACHEALIGN 1686 } 1687