1 /* 2 * (MPSAFE) 3 * 4 * KERN_SLABALLOC.C - Kernel SLAB memory allocator 5 * 6 * Copyright (c) 2003,2004,2010 The DragonFly Project. All rights reserved. 7 * 8 * This code is derived from software contributed to The DragonFly Project 9 * by Matthew Dillon <dillon@backplane.com> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in 19 * the documentation and/or other materials provided with the 20 * distribution. 21 * 3. Neither the name of The DragonFly Project nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific, prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * This module implements a slab allocator drop-in replacement for the 39 * kernel malloc(). 40 * 41 * A slab allocator reserves a ZONE for each chunk size, then lays the 42 * chunks out in an array within the zone. Allocation and deallocation 43 * is nearly instantanious, and fragmentation/overhead losses are limited 44 * to a fixed worst-case amount. 45 * 46 * The downside of this slab implementation is in the chunk size 47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. 48 * In a kernel implementation all this memory will be physical so 49 * the zone size is adjusted downward on machines with less physical 50 * memory. The upside is that overhead is bounded... this is the *worst* 51 * case overhead. 52 * 53 * Slab management is done on a per-cpu basis and no locking or mutexes 54 * are required, only a critical section. When one cpu frees memory 55 * belonging to another cpu's slab manager an asynchronous IPI message 56 * will be queued to execute the operation. In addition, both the 57 * high level slab allocator and the low level zone allocator optimize 58 * M_ZERO requests, and the slab allocator does not have to pre initialize 59 * the linked list of chunks. 60 * 61 * XXX Balancing is needed between cpus. Balance will be handled through 62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. 63 * 64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of 65 * the new zone should be restricted to M_USE_RESERVE requests only. 66 * 67 * Alloc Size Chunking Number of zones 68 * 0-127 8 16 69 * 128-255 16 8 70 * 256-511 32 8 71 * 512-1023 64 8 72 * 1024-2047 128 8 73 * 2048-4095 256 8 74 * 4096-8191 512 8 75 * 8192-16383 1024 8 76 * 16384-32767 2048 8 77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) 78 * 79 * Allocations >= ZoneLimit go directly to kmem. 80 * 81 * Alignment properties: 82 * - All power-of-2 sized allocations are power-of-2 aligned. 83 * - Allocations with M_POWEROF2 are power-of-2 aligned on the nearest 84 * power-of-2 round up of 'size'. 85 * - Non-power-of-2 sized allocations are zone chunk size aligned (see the 86 * above table 'Chunking' column). 87 * 88 * API REQUIREMENTS AND SIDE EFFECTS 89 * 90 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we 91 * have remained compatible with the following API requirements: 92 * 93 * + malloc(0) is allowed and returns non-NULL (ahc driver) 94 * + ability to allocate arbitrarily large chunks of memory 95 */ 96 97 #include "opt_vm.h" 98 99 #include <sys/param.h> 100 #include <sys/systm.h> 101 #include <sys/kernel.h> 102 #include <sys/slaballoc.h> 103 #include <sys/mbuf.h> 104 #include <sys/vmmeter.h> 105 #include <sys/lock.h> 106 #include <sys/thread.h> 107 #include <sys/globaldata.h> 108 #include <sys/sysctl.h> 109 #include <sys/ktr.h> 110 111 #include <vm/vm.h> 112 #include <vm/vm_param.h> 113 #include <vm/vm_kern.h> 114 #include <vm/vm_extern.h> 115 #include <vm/vm_object.h> 116 #include <vm/pmap.h> 117 #include <vm/vm_map.h> 118 #include <vm/vm_page.h> 119 #include <vm/vm_pageout.h> 120 121 #include <machine/cpu.h> 122 123 #include <sys/thread2.h> 124 #include <vm/vm_page2.h> 125 126 #define btokup(z) (&pmap_kvtom((vm_offset_t)(z))->ku_pagecnt) 127 128 #define MEMORY_STRING "ptr=%p type=%p size=%lu flags=%04x" 129 #define MEMORY_ARGS void *ptr, void *type, unsigned long size, int flags 130 131 #if !defined(KTR_MEMORY) 132 #define KTR_MEMORY KTR_ALL 133 #endif 134 KTR_INFO_MASTER(memory); 135 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin"); 136 KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARGS); 137 KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARGS); 138 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARGS); 139 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARGS); 140 KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARGS); 141 KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARGS); 142 KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARGS); 143 KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARGS); 144 KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin"); 145 KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end"); 146 147 #define logmemory(name, ptr, type, size, flags) \ 148 KTR_LOG(memory_ ## name, ptr, type, size, flags) 149 #define logmemory_quick(name) \ 150 KTR_LOG(memory_ ## name) 151 152 /* 153 * Fixed globals (not per-cpu) 154 */ 155 static int ZoneSize; 156 static int ZoneLimit; 157 static int ZonePageCount; 158 static uintptr_t ZoneMask; 159 static int ZoneBigAlloc; /* in KB */ 160 static int ZoneGenAlloc; /* in KB */ 161 struct malloc_type *kmemstatistics; /* exported to vmstat */ 162 static int32_t weirdary[16]; 163 164 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags); 165 static void kmem_slab_free(void *ptr, vm_size_t bytes); 166 167 #if defined(INVARIANTS) 168 static void chunk_mark_allocated(SLZone *z, void *chunk); 169 static void chunk_mark_free(SLZone *z, void *chunk); 170 #else 171 #define chunk_mark_allocated(z, chunk) 172 #define chunk_mark_free(z, chunk) 173 #endif 174 175 /* 176 * Misc constants. Note that allocations that are exact multiples of 177 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 178 */ 179 #define ZONE_RELS_THRESH 32 /* threshold number of zones */ 180 181 /* 182 * The WEIRD_ADDR is used as known text to copy into free objects to 183 * try to create deterministic failure cases if the data is accessed after 184 * free. 185 */ 186 #define WEIRD_ADDR 0xdeadc0de 187 #define MAX_COPY sizeof(weirdary) 188 #define ZERO_LENGTH_PTR ((void *)-8) 189 190 /* 191 * Misc global malloc buckets 192 */ 193 194 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 195 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 196 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 197 MALLOC_DEFINE(M_DRM, "m_drm", "DRM memory allocations"); 198 199 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 200 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 201 202 /* 203 * Initialize the slab memory allocator. We have to choose a zone size based 204 * on available physical memory. We choose a zone side which is approximately 205 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of 206 * 128K. The zone size is limited to the bounds set in slaballoc.h 207 * (typically 32K min, 128K max). 208 */ 209 static void kmeminit(void *dummy); 210 211 char *ZeroPage; 212 213 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL); 214 215 #ifdef INVARIANTS 216 /* 217 * If enabled any memory allocated without M_ZERO is initialized to -1. 218 */ 219 static int use_malloc_pattern; 220 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW, 221 &use_malloc_pattern, 0, 222 "Initialize memory to -1 if M_ZERO not specified"); 223 #endif 224 225 static int ZoneRelsThresh = ZONE_RELS_THRESH; 226 SYSCTL_INT(_kern, OID_AUTO, zone_big_alloc, CTLFLAG_RD, &ZoneBigAlloc, 0, ""); 227 SYSCTL_INT(_kern, OID_AUTO, zone_gen_alloc, CTLFLAG_RD, &ZoneGenAlloc, 0, ""); 228 SYSCTL_INT(_kern, OID_AUTO, zone_cache, CTLFLAG_RW, &ZoneRelsThresh, 0, ""); 229 static long SlabsAllocated; 230 static long SlabsFreed; 231 SYSCTL_LONG(_kern, OID_AUTO, slabs_allocated, CTLFLAG_RD, 232 &SlabsAllocated, 0, ""); 233 SYSCTL_LONG(_kern, OID_AUTO, slabs_freed, CTLFLAG_RD, 234 &SlabsFreed, 0, ""); 235 static int SlabFreeToTail; 236 SYSCTL_INT(_kern, OID_AUTO, slab_freetotail, CTLFLAG_RW, 237 &SlabFreeToTail, 0, ""); 238 239 static struct spinlock kmemstat_spin = 240 SPINLOCK_INITIALIZER(&kmemstat_spin, "malinit"); 241 242 /* 243 * Returns the kernel memory size limit for the purposes of initializing 244 * various subsystem caches. The smaller of available memory and the KVM 245 * memory space is returned. 246 * 247 * The size in megabytes is returned. 248 */ 249 size_t 250 kmem_lim_size(void) 251 { 252 size_t limsize; 253 254 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE; 255 if (limsize > KvaSize) 256 limsize = KvaSize; 257 return (limsize / (1024 * 1024)); 258 } 259 260 static void 261 kmeminit(void *dummy) 262 { 263 size_t limsize; 264 int usesize; 265 int i; 266 267 limsize = kmem_lim_size(); 268 usesize = (int)(limsize * 1024); /* convert to KB */ 269 270 /* 271 * If the machine has a large KVM space and more than 8G of ram, 272 * double the zone release threshold to reduce SMP invalidations. 273 * If more than 16G of ram, do it again. 274 * 275 * The BIOS eats a little ram so add some slop. We want 8G worth of 276 * memory sticks to trigger the first adjustment. 277 */ 278 if (ZoneRelsThresh == ZONE_RELS_THRESH) { 279 if (limsize >= 7 * 1024) 280 ZoneRelsThresh *= 2; 281 if (limsize >= 15 * 1024) 282 ZoneRelsThresh *= 2; 283 } 284 285 /* 286 * Calculate the zone size. This typically calculates to 287 * ZALLOC_MAX_ZONE_SIZE 288 */ 289 ZoneSize = ZALLOC_MIN_ZONE_SIZE; 290 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) 291 ZoneSize <<= 1; 292 ZoneLimit = ZoneSize / 4; 293 if (ZoneLimit > ZALLOC_ZONE_LIMIT) 294 ZoneLimit = ZALLOC_ZONE_LIMIT; 295 ZoneMask = ~(uintptr_t)(ZoneSize - 1); 296 ZonePageCount = ZoneSize / PAGE_SIZE; 297 298 for (i = 0; i < NELEM(weirdary); ++i) 299 weirdary[i] = WEIRD_ADDR; 300 301 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO); 302 303 if (bootverbose) 304 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); 305 } 306 307 /* 308 * (low level) Initialize slab-related elements in the globaldata structure. 309 * 310 * Occurs after kmeminit(). 311 */ 312 void 313 slab_gdinit(globaldata_t gd) 314 { 315 SLGlobalData *slgd; 316 int i; 317 318 slgd = &gd->gd_slab; 319 for (i = 0; i < NZONES; ++i) 320 TAILQ_INIT(&slgd->ZoneAry[i]); 321 TAILQ_INIT(&slgd->FreeZones); 322 TAILQ_INIT(&slgd->FreeOvZones); 323 } 324 325 /* 326 * Initialize a malloc type tracking structure. 327 */ 328 void 329 malloc_init(void *data) 330 { 331 struct malloc_type *type = data; 332 size_t limsize; 333 334 if (type->ks_magic != M_MAGIC) 335 panic("malloc type lacks magic"); 336 337 if (type->ks_limit != 0) 338 return; 339 340 if (vmstats.v_page_count == 0) 341 panic("malloc_init not allowed before vm init"); 342 343 limsize = kmem_lim_size() * (1024 * 1024); 344 type->ks_limit = limsize / 10; 345 346 spin_lock(&kmemstat_spin); 347 type->ks_next = kmemstatistics; 348 kmemstatistics = type; 349 spin_unlock(&kmemstat_spin); 350 } 351 352 void 353 malloc_uninit(void *data) 354 { 355 struct malloc_type *type = data; 356 struct malloc_type *t; 357 #ifdef INVARIANTS 358 int i; 359 long ttl; 360 #endif 361 362 if (type->ks_magic != M_MAGIC) 363 panic("malloc type lacks magic"); 364 365 if (vmstats.v_page_count == 0) 366 panic("malloc_uninit not allowed before vm init"); 367 368 if (type->ks_limit == 0) 369 panic("malloc_uninit on uninitialized type"); 370 371 /* Make sure that all pending kfree()s are finished. */ 372 lwkt_synchronize_ipiqs("muninit"); 373 374 #ifdef INVARIANTS 375 /* 376 * memuse is only correct in aggregation. Due to memory being allocated 377 * on one cpu and freed on another individual array entries may be 378 * negative or positive (canceling each other out). 379 */ 380 for (i = ttl = 0; i < ncpus; ++i) 381 ttl += type->ks_use[i].memuse; 382 if (ttl) { 383 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", 384 ttl, type->ks_shortdesc, i); 385 } 386 #endif 387 spin_lock(&kmemstat_spin); 388 if (type == kmemstatistics) { 389 kmemstatistics = type->ks_next; 390 } else { 391 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 392 if (t->ks_next == type) { 393 t->ks_next = type->ks_next; 394 break; 395 } 396 } 397 } 398 type->ks_next = NULL; 399 type->ks_limit = 0; 400 spin_unlock(&kmemstat_spin); 401 } 402 403 /* 404 * Increase the kmalloc pool limit for the specified pool. No changes 405 * are the made if the pool would shrink. 406 */ 407 void 408 kmalloc_raise_limit(struct malloc_type *type, size_t bytes) 409 { 410 if (type->ks_limit == 0) 411 malloc_init(type); 412 if (bytes == 0) 413 bytes = KvaSize; 414 if (type->ks_limit < bytes) 415 type->ks_limit = bytes; 416 } 417 418 void 419 kmalloc_set_unlimited(struct malloc_type *type) 420 { 421 type->ks_limit = kmem_lim_size() * (1024 * 1024); 422 } 423 424 /* 425 * Dynamically create a malloc pool. This function is a NOP if *typep is 426 * already non-NULL. 427 */ 428 void 429 kmalloc_create(struct malloc_type **typep, const char *descr) 430 { 431 struct malloc_type *type; 432 433 if (*typep == NULL) { 434 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO); 435 type->ks_magic = M_MAGIC; 436 type->ks_shortdesc = descr; 437 malloc_init(type); 438 *typep = type; 439 } 440 } 441 442 /* 443 * Destroy a dynamically created malloc pool. This function is a NOP if 444 * the pool has already been destroyed. 445 */ 446 void 447 kmalloc_destroy(struct malloc_type **typep) 448 { 449 if (*typep != NULL) { 450 malloc_uninit(*typep); 451 kfree(*typep, M_TEMP); 452 *typep = NULL; 453 } 454 } 455 456 /* 457 * Calculate the zone index for the allocation request size and set the 458 * allocation request size to that particular zone's chunk size. 459 */ 460 static __inline int 461 zoneindex(unsigned long *bytes, unsigned long *align) 462 { 463 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ 464 if (n < 128) { 465 *bytes = n = (n + 7) & ~7; 466 *align = 8; 467 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 468 } 469 if (n < 256) { 470 *bytes = n = (n + 15) & ~15; 471 *align = 16; 472 return(n / 16 + 7); 473 } 474 if (n < 8192) { 475 if (n < 512) { 476 *bytes = n = (n + 31) & ~31; 477 *align = 32; 478 return(n / 32 + 15); 479 } 480 if (n < 1024) { 481 *bytes = n = (n + 63) & ~63; 482 *align = 64; 483 return(n / 64 + 23); 484 } 485 if (n < 2048) { 486 *bytes = n = (n + 127) & ~127; 487 *align = 128; 488 return(n / 128 + 31); 489 } 490 if (n < 4096) { 491 *bytes = n = (n + 255) & ~255; 492 *align = 256; 493 return(n / 256 + 39); 494 } 495 *bytes = n = (n + 511) & ~511; 496 *align = 512; 497 return(n / 512 + 47); 498 } 499 #if ZALLOC_ZONE_LIMIT > 8192 500 if (n < 16384) { 501 *bytes = n = (n + 1023) & ~1023; 502 *align = 1024; 503 return(n / 1024 + 55); 504 } 505 #endif 506 #if ZALLOC_ZONE_LIMIT > 16384 507 if (n < 32768) { 508 *bytes = n = (n + 2047) & ~2047; 509 *align = 2048; 510 return(n / 2048 + 63); 511 } 512 #endif 513 panic("Unexpected byte count %d", n); 514 return(0); 515 } 516 517 static __inline 518 void 519 clean_zone_rchunks(SLZone *z) 520 { 521 SLChunk *bchunk; 522 523 while ((bchunk = z->z_RChunks) != NULL) { 524 cpu_ccfence(); 525 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) { 526 *z->z_LChunksp = bchunk; 527 while (bchunk) { 528 chunk_mark_free(z, bchunk); 529 z->z_LChunksp = &bchunk->c_Next; 530 bchunk = bchunk->c_Next; 531 ++z->z_NFree; 532 } 533 break; 534 } 535 /* retry */ 536 } 537 } 538 539 /* 540 * If the zone becomes totally free and is not the only zone listed for a 541 * chunk size we move it to the FreeZones list. We always leave at least 542 * one zone per chunk size listed, even if it is freeable. 543 * 544 * Do not move the zone if there is an IPI in_flight (z_RCount != 0), 545 * otherwise MP races can result in our free_remote code accessing a 546 * destroyed zone. The remote end interlocks z_RCount with z_RChunks 547 * so one has to test both z_NFree and z_RCount. 548 * 549 * Since this code can be called from an IPI callback, do *NOT* try to mess 550 * with kernel_map here. Hysteresis will be performed at kmalloc() time. 551 */ 552 static __inline 553 SLZone * 554 check_zone_free(SLGlobalData *slgd, SLZone *z) 555 { 556 SLZone *znext; 557 558 znext = TAILQ_NEXT(z, z_Entry); 559 if (z->z_NFree == z->z_NMax && z->z_RCount == 0 && 560 (TAILQ_FIRST(&slgd->ZoneAry[z->z_ZoneIndex]) != z || znext) 561 ) { 562 int *kup; 563 564 TAILQ_REMOVE(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 565 566 z->z_Magic = -1; 567 TAILQ_INSERT_HEAD(&slgd->FreeZones, z, z_Entry); 568 ++slgd->NFreeZones; 569 kup = btokup(z); 570 *kup = 0; 571 } 572 return znext; 573 } 574 575 #ifdef SLAB_DEBUG 576 /* 577 * Used to debug memory corruption issues. Record up to (typically 32) 578 * allocation sources for this zone (for a particular chunk size). 579 */ 580 581 static void 582 slab_record_source(SLZone *z, const char *file, int line) 583 { 584 int i; 585 int b = line & (SLAB_DEBUG_ENTRIES - 1); 586 587 i = b; 588 do { 589 if (z->z_Sources[i].file == file && z->z_Sources[i].line == line) 590 return; 591 if (z->z_Sources[i].file == NULL) 592 break; 593 i = (i + 1) & (SLAB_DEBUG_ENTRIES - 1); 594 } while (i != b); 595 z->z_Sources[i].file = file; 596 z->z_Sources[i].line = line; 597 } 598 599 #endif 600 601 static __inline unsigned long 602 powerof2_size(unsigned long size) 603 { 604 int i; 605 606 if (size == 0 || powerof2(size)) 607 return size; 608 609 i = flsl(size); 610 return (1UL << i); 611 } 612 613 /* 614 * kmalloc() (SLAB ALLOCATOR) 615 * 616 * Allocate memory via the slab allocator. If the request is too large, 617 * or if it page-aligned beyond a certain size, we fall back to the 618 * KMEM subsystem. A SLAB tracking descriptor must be specified, use 619 * &SlabMisc if you don't care. 620 * 621 * M_RNOWAIT - don't block. 622 * M_NULLOK - return NULL instead of blocking. 623 * M_ZERO - zero the returned memory. 624 * M_USE_RESERVE - allow greater drawdown of the free list 625 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted 626 * M_POWEROF2 - roundup size to the nearest power of 2 627 * 628 * MPSAFE 629 */ 630 631 #ifdef SLAB_DEBUG 632 void * 633 kmalloc_debug(unsigned long size, struct malloc_type *type, int flags, 634 const char *file, int line) 635 #else 636 void * 637 kmalloc(unsigned long size, struct malloc_type *type, int flags) 638 #endif 639 { 640 SLZone *z; 641 SLChunk *chunk; 642 SLGlobalData *slgd; 643 struct globaldata *gd; 644 unsigned long align; 645 int zi; 646 #ifdef INVARIANTS 647 int i; 648 #endif 649 650 logmemory_quick(malloc_beg); 651 gd = mycpu; 652 slgd = &gd->gd_slab; 653 654 /* 655 * XXX silly to have this in the critical path. 656 */ 657 if (type->ks_limit == 0) { 658 crit_enter(); 659 malloc_init(type); 660 crit_exit(); 661 } 662 ++type->ks_calls; 663 664 if (flags & M_POWEROF2) 665 size = powerof2_size(size); 666 667 /* 668 * Handle the case where the limit is reached. Panic if we can't return 669 * NULL. The original malloc code looped, but this tended to 670 * simply deadlock the computer. 671 * 672 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used 673 * to determine if a more complete limit check should be done. The 674 * actual memory use is tracked via ks_use[cpu].memuse. 675 */ 676 while (type->ks_loosememuse >= type->ks_limit) { 677 int i; 678 long ttl; 679 680 for (i = ttl = 0; i < ncpus; ++i) 681 ttl += type->ks_use[i].memuse; 682 type->ks_loosememuse = ttl; /* not MP synchronized */ 683 if ((ssize_t)ttl < 0) /* deal with occassional race */ 684 ttl = 0; 685 if (ttl >= type->ks_limit) { 686 if (flags & M_NULLOK) { 687 logmemory(malloc_end, NULL, type, size, flags); 688 return(NULL); 689 } 690 panic("%s: malloc limit exceeded", type->ks_shortdesc); 691 } 692 } 693 694 /* 695 * Handle the degenerate size == 0 case. Yes, this does happen. 696 * Return a special pointer. This is to maintain compatibility with 697 * the original malloc implementation. Certain devices, such as the 698 * adaptec driver, not only allocate 0 bytes, they check for NULL and 699 * also realloc() later on. Joy. 700 */ 701 if (size == 0) { 702 logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags); 703 return(ZERO_LENGTH_PTR); 704 } 705 706 /* 707 * Handle hysteresis from prior frees here in malloc(). We cannot 708 * safely manipulate the kernel_map in free() due to free() possibly 709 * being called via an IPI message or from sensitive interrupt code. 710 * 711 * NOTE: ku_pagecnt must be cleared before we free the slab or we 712 * might race another cpu allocating the kva and setting 713 * ku_pagecnt. 714 */ 715 while (slgd->NFreeZones > ZoneRelsThresh && (flags & M_RNOWAIT) == 0) { 716 crit_enter(); 717 if (slgd->NFreeZones > ZoneRelsThresh) { /* crit sect race */ 718 int *kup; 719 720 z = TAILQ_LAST(&slgd->FreeZones, SLZoneList); 721 KKASSERT(z != NULL); 722 TAILQ_REMOVE(&slgd->FreeZones, z, z_Entry); 723 --slgd->NFreeZones; 724 kup = btokup(z); 725 *kup = 0; 726 kmem_slab_free(z, ZoneSize); /* may block */ 727 atomic_add_int(&ZoneGenAlloc, -ZoneSize / 1024); 728 } 729 crit_exit(); 730 } 731 732 /* 733 * XXX handle oversized frees that were queued from kfree(). 734 */ 735 while (TAILQ_FIRST(&slgd->FreeOvZones) && (flags & M_RNOWAIT) == 0) { 736 crit_enter(); 737 if ((z = TAILQ_LAST(&slgd->FreeOvZones, SLZoneList)) != NULL) { 738 vm_size_t tsize; 739 740 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); 741 TAILQ_REMOVE(&slgd->FreeOvZones, z, z_Entry); 742 tsize = z->z_ChunkSize; 743 kmem_slab_free(z, tsize); /* may block */ 744 atomic_add_int(&ZoneBigAlloc, -(int)tsize / 1024); 745 } 746 crit_exit(); 747 } 748 749 /* 750 * Handle large allocations directly. There should not be very many of 751 * these so performance is not a big issue. 752 * 753 * The backend allocator is pretty nasty on a SMP system. Use the 754 * slab allocator for one and two page-sized chunks even though we lose 755 * some efficiency. XXX maybe fix mmio and the elf loader instead. 756 */ 757 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 758 int *kup; 759 760 size = round_page(size); 761 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); 762 if (chunk == NULL) { 763 logmemory(malloc_end, NULL, type, size, flags); 764 return(NULL); 765 } 766 atomic_add_int(&ZoneBigAlloc, (int)size / 1024); 767 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ 768 flags |= M_PASSIVE_ZERO; 769 kup = btokup(chunk); 770 *kup = size / PAGE_SIZE; 771 crit_enter(); 772 goto done; 773 } 774 775 /* 776 * Attempt to allocate out of an existing zone. First try the free list, 777 * then allocate out of unallocated space. If we find a good zone move 778 * it to the head of the list so later allocations find it quickly 779 * (we might have thousands of zones in the list). 780 * 781 * Note: zoneindex() will panic of size is too large. 782 */ 783 zi = zoneindex(&size, &align); 784 KKASSERT(zi < NZONES); 785 crit_enter(); 786 787 if ((z = TAILQ_LAST(&slgd->ZoneAry[zi], SLZoneList)) != NULL) { 788 /* 789 * Locate a chunk - we have to have at least one. If this is the 790 * last chunk go ahead and do the work to retrieve chunks freed 791 * from remote cpus, and if the zone is still empty move it off 792 * the ZoneAry. 793 */ 794 if (--z->z_NFree <= 0) { 795 KKASSERT(z->z_NFree == 0); 796 797 /* 798 * WARNING! This code competes with other cpus. It is ok 799 * for us to not drain RChunks here but we might as well, and 800 * it is ok if more accumulate after we're done. 801 * 802 * Set RSignal before pulling rchunks off, indicating that we 803 * will be moving ourselves off of the ZoneAry. Remote ends will 804 * read RSignal before putting rchunks on thus interlocking 805 * their IPI signaling. 806 */ 807 if (z->z_RChunks == NULL) 808 atomic_swap_int(&z->z_RSignal, 1); 809 810 clean_zone_rchunks(z); 811 812 /* 813 * Remove from the zone list if no free chunks remain. 814 * Clear RSignal 815 */ 816 if (z->z_NFree == 0) { 817 TAILQ_REMOVE(&slgd->ZoneAry[zi], z, z_Entry); 818 } else { 819 z->z_RSignal = 0; 820 } 821 } 822 823 /* 824 * Fast path, we have chunks available in z_LChunks. 825 */ 826 chunk = z->z_LChunks; 827 if (chunk) { 828 chunk_mark_allocated(z, chunk); 829 z->z_LChunks = chunk->c_Next; 830 if (z->z_LChunks == NULL) 831 z->z_LChunksp = &z->z_LChunks; 832 #ifdef SLAB_DEBUG 833 slab_record_source(z, file, line); 834 #endif 835 goto done; 836 } 837 838 /* 839 * No chunks are available in LChunks, the free chunk MUST be 840 * in the never-before-used memory area, controlled by UIndex. 841 * 842 * The consequences are very serious if our zone got corrupted so 843 * we use an explicit panic rather than a KASSERT. 844 */ 845 if (z->z_UIndex + 1 != z->z_NMax) 846 ++z->z_UIndex; 847 else 848 z->z_UIndex = 0; 849 850 if (z->z_UIndex == z->z_UEndIndex) 851 panic("slaballoc: corrupted zone"); 852 853 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 854 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 855 flags &= ~M_ZERO; 856 flags |= M_PASSIVE_ZERO; 857 } 858 chunk_mark_allocated(z, chunk); 859 #ifdef SLAB_DEBUG 860 slab_record_source(z, file, line); 861 #endif 862 goto done; 863 } 864 865 /* 866 * If all zones are exhausted we need to allocate a new zone for this 867 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see 868 * UAlloc use above in regards to M_ZERO. Note that when we are reusing 869 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and 870 * we do not pre-zero it because we do not want to mess up the L1 cache. 871 * 872 * At least one subsystem, the tty code (see CROUND) expects power-of-2 873 * allocations to be power-of-2 aligned. We maintain compatibility by 874 * adjusting the base offset below. 875 */ 876 { 877 int off; 878 int *kup; 879 880 if ((z = TAILQ_FIRST(&slgd->FreeZones)) != NULL) { 881 TAILQ_REMOVE(&slgd->FreeZones, z, z_Entry); 882 --slgd->NFreeZones; 883 bzero(z, sizeof(SLZone)); 884 z->z_Flags |= SLZF_UNOTZEROD; 885 } else { 886 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); 887 if (z == NULL) 888 goto fail; 889 atomic_add_int(&ZoneGenAlloc, ZoneSize / 1024); 890 } 891 892 /* 893 * How big is the base structure? 894 */ 895 #if defined(INVARIANTS) 896 /* 897 * Make room for z_Bitmap. An exact calculation is somewhat more 898 * complicated so don't make an exact calculation. 899 */ 900 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]); 901 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); 902 #else 903 off = sizeof(SLZone); 904 #endif 905 906 /* 907 * Guarentee power-of-2 alignment for power-of-2-sized chunks. 908 * Otherwise properly align the data according to the chunk size. 909 */ 910 if (powerof2(size)) 911 align = size; 912 off = roundup2(off, align); 913 914 z->z_Magic = ZALLOC_SLAB_MAGIC; 915 z->z_ZoneIndex = zi; 916 z->z_NMax = (ZoneSize - off) / size; 917 z->z_NFree = z->z_NMax - 1; 918 z->z_BasePtr = (char *)z + off; 919 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; 920 z->z_ChunkSize = size; 921 z->z_CpuGd = gd; 922 z->z_Cpu = gd->gd_cpuid; 923 z->z_LChunksp = &z->z_LChunks; 924 #ifdef SLAB_DEBUG 925 bcopy(z->z_Sources, z->z_AltSources, sizeof(z->z_Sources)); 926 bzero(z->z_Sources, sizeof(z->z_Sources)); 927 #endif 928 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 929 TAILQ_INSERT_HEAD(&slgd->ZoneAry[zi], z, z_Entry); 930 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 931 flags &= ~M_ZERO; /* already zero'd */ 932 flags |= M_PASSIVE_ZERO; 933 } 934 kup = btokup(z); 935 *kup = -(z->z_Cpu + 1); /* -1 to -(N+1) */ 936 chunk_mark_allocated(z, chunk); 937 #ifdef SLAB_DEBUG 938 slab_record_source(z, file, line); 939 #endif 940 941 /* 942 * Slide the base index for initial allocations out of the next 943 * zone we create so we do not over-weight the lower part of the 944 * cpu memory caches. 945 */ 946 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 947 & (ZALLOC_MAX_ZONE_SIZE - 1); 948 } 949 950 done: 951 ++type->ks_use[gd->gd_cpuid].inuse; 952 type->ks_use[gd->gd_cpuid].memuse += size; 953 type->ks_loosememuse += size; /* not MP synchronized */ 954 crit_exit(); 955 956 if (flags & M_ZERO) 957 bzero(chunk, size); 958 #ifdef INVARIANTS 959 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) { 960 if (use_malloc_pattern) { 961 for (i = 0; i < size; i += sizeof(int)) { 962 *(int *)((char *)chunk + i) = -1; 963 } 964 } 965 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ 966 } 967 #endif 968 logmemory(malloc_end, chunk, type, size, flags); 969 return(chunk); 970 fail: 971 crit_exit(); 972 logmemory(malloc_end, NULL, type, size, flags); 973 return(NULL); 974 } 975 976 /* 977 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE) 978 * 979 * Generally speaking this routine is not called very often and we do 980 * not attempt to optimize it beyond reusing the same pointer if the 981 * new size fits within the chunking of the old pointer's zone. 982 */ 983 #ifdef SLAB_DEBUG 984 void * 985 krealloc_debug(void *ptr, unsigned long size, 986 struct malloc_type *type, int flags, 987 const char *file, int line) 988 #else 989 void * 990 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) 991 #endif 992 { 993 unsigned long osize; 994 unsigned long align; 995 SLZone *z; 996 void *nptr; 997 int *kup; 998 999 KKASSERT((flags & M_ZERO) == 0); /* not supported */ 1000 1001 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) 1002 return(kmalloc_debug(size, type, flags, file, line)); 1003 if (size == 0) { 1004 kfree(ptr, type); 1005 return(NULL); 1006 } 1007 1008 /* 1009 * Handle oversized allocations. XXX we really should require that a 1010 * size be passed to free() instead of this nonsense. 1011 */ 1012 kup = btokup(ptr); 1013 if (*kup > 0) { 1014 osize = *kup << PAGE_SHIFT; 1015 if (osize == round_page(size)) 1016 return(ptr); 1017 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL) 1018 return(NULL); 1019 bcopy(ptr, nptr, min(size, osize)); 1020 kfree(ptr, type); 1021 return(nptr); 1022 } 1023 1024 /* 1025 * Get the original allocation's zone. If the new request winds up 1026 * using the same chunk size we do not have to do anything. 1027 */ 1028 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 1029 kup = btokup(z); 1030 KKASSERT(*kup < 0); 1031 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1032 1033 /* 1034 * Allocate memory for the new request size. Note that zoneindex has 1035 * already adjusted the request size to the appropriate chunk size, which 1036 * should optimize our bcopy(). Then copy and return the new pointer. 1037 * 1038 * Resizing a non-power-of-2 allocation to a power-of-2 size does not 1039 * necessary align the result. 1040 * 1041 * We can only zoneindex (to align size to the chunk size) if the new 1042 * size is not too large. 1043 */ 1044 if (size < ZoneLimit) { 1045 zoneindex(&size, &align); 1046 if (z->z_ChunkSize == size) 1047 return(ptr); 1048 } 1049 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL) 1050 return(NULL); 1051 bcopy(ptr, nptr, min(size, z->z_ChunkSize)); 1052 kfree(ptr, type); 1053 return(nptr); 1054 } 1055 1056 /* 1057 * Return the kmalloc limit for this type, in bytes. 1058 */ 1059 long 1060 kmalloc_limit(struct malloc_type *type) 1061 { 1062 if (type->ks_limit == 0) { 1063 crit_enter(); 1064 if (type->ks_limit == 0) 1065 malloc_init(type); 1066 crit_exit(); 1067 } 1068 return(type->ks_limit); 1069 } 1070 1071 /* 1072 * Allocate a copy of the specified string. 1073 * 1074 * (MP SAFE) (MAY BLOCK) 1075 */ 1076 #ifdef SLAB_DEBUG 1077 char * 1078 kstrdup_debug(const char *str, struct malloc_type *type, 1079 const char *file, int line) 1080 #else 1081 char * 1082 kstrdup(const char *str, struct malloc_type *type) 1083 #endif 1084 { 1085 int zlen; /* length inclusive of terminating NUL */ 1086 char *nstr; 1087 1088 if (str == NULL) 1089 return(NULL); 1090 zlen = strlen(str) + 1; 1091 nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line); 1092 bcopy(str, nstr, zlen); 1093 return(nstr); 1094 } 1095 1096 #ifdef SLAB_DEBUG 1097 char * 1098 kstrndup_debug(const char *str, size_t maxlen, struct malloc_type *type, 1099 const char *file, int line) 1100 #else 1101 char * 1102 kstrndup(const char *str, size_t maxlen, struct malloc_type *type) 1103 #endif 1104 { 1105 int zlen; /* length inclusive of terminating NUL */ 1106 char *nstr; 1107 1108 if (str == NULL) 1109 return(NULL); 1110 zlen = strnlen(str, maxlen) + 1; 1111 nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line); 1112 bcopy(str, nstr, zlen); 1113 nstr[zlen - 1] = '\0'; 1114 return(nstr); 1115 } 1116 1117 /* 1118 * Notify our cpu that a remote cpu has freed some chunks in a zone that 1119 * we own. RCount will be bumped so the memory should be good, but validate 1120 * that it really is. 1121 */ 1122 static 1123 void 1124 kfree_remote(void *ptr) 1125 { 1126 SLGlobalData *slgd; 1127 SLZone *z; 1128 int nfree; 1129 int *kup; 1130 1131 slgd = &mycpu->gd_slab; 1132 z = ptr; 1133 kup = btokup(z); 1134 KKASSERT(*kup == -((int)mycpuid + 1)); 1135 KKASSERT(z->z_RCount > 0); 1136 atomic_subtract_int(&z->z_RCount, 1); 1137 1138 logmemory(free_rem_beg, z, NULL, 0L, 0); 1139 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1140 KKASSERT(z->z_Cpu == mycpu->gd_cpuid); 1141 nfree = z->z_NFree; 1142 1143 /* 1144 * Indicate that we will no longer be off of the ZoneAry by 1145 * clearing RSignal. 1146 */ 1147 if (z->z_RChunks) 1148 z->z_RSignal = 0; 1149 1150 /* 1151 * Atomically extract the bchunks list and then process it back 1152 * into the lchunks list. We want to append our bchunks to the 1153 * lchunks list and not prepend since we likely do not have 1154 * cache mastership of the related data (not that it helps since 1155 * we are using c_Next). 1156 */ 1157 clean_zone_rchunks(z); 1158 if (z->z_NFree && nfree == 0) { 1159 TAILQ_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 1160 } 1161 1162 /* 1163 * If the zone becomes totally free and is not the only zone listed for a 1164 * chunk size we move it to the FreeZones list. We always leave at least 1165 * one zone per chunk size listed, even if it is freeable. 1166 * 1167 * Since this code can be called from an IPI callback, do *NOT* try to 1168 * mess with kernel_map here. Hysteresis will be performed at malloc() 1169 * time. 1170 * 1171 * Do not move the zone if there is an IPI in_flight (z_RCount != 0), 1172 * otherwise MP races can result in our free_remote code accessing a 1173 * destroyed zone. The remote end interlocks z_RCount with z_RChunks 1174 * so one has to test both z_NFree and z_RCount. 1175 */ 1176 if (z->z_NFree == z->z_NMax && z->z_RCount == 0 && 1177 (TAILQ_FIRST(&slgd->ZoneAry[z->z_ZoneIndex]) != z || 1178 TAILQ_NEXT(z, z_Entry)) 1179 ) { 1180 int *kup; 1181 1182 TAILQ_REMOVE(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 1183 z->z_Magic = -1; 1184 TAILQ_INSERT_HEAD(&slgd->FreeZones, z, z_Entry); 1185 ++slgd->NFreeZones; 1186 kup = btokup(z); 1187 *kup = 0; 1188 } 1189 logmemory(free_rem_end, z, NULL, 0L, 0); 1190 } 1191 1192 /* 1193 * free (SLAB ALLOCATOR) 1194 * 1195 * Free a memory block previously allocated by malloc. Note that we do not 1196 * attempt to update ks_loosememuse as MP races could prevent us from 1197 * checking memory limits in malloc. 1198 * 1199 * MPSAFE 1200 */ 1201 void 1202 kfree(void *ptr, struct malloc_type *type) 1203 { 1204 SLZone *z; 1205 SLChunk *chunk; 1206 SLGlobalData *slgd; 1207 struct globaldata *gd; 1208 int *kup; 1209 unsigned long size; 1210 SLChunk *bchunk; 1211 int rsignal; 1212 1213 logmemory_quick(free_beg); 1214 gd = mycpu; 1215 slgd = &gd->gd_slab; 1216 1217 if (ptr == NULL) 1218 panic("trying to free NULL pointer"); 1219 1220 /* 1221 * Handle special 0-byte allocations 1222 */ 1223 if (ptr == ZERO_LENGTH_PTR) { 1224 logmemory(free_zero, ptr, type, -1UL, 0); 1225 logmemory_quick(free_end); 1226 return; 1227 } 1228 1229 /* 1230 * Panic on bad malloc type 1231 */ 1232 if (type->ks_magic != M_MAGIC) 1233 panic("free: malloc type lacks magic"); 1234 1235 /* 1236 * Handle oversized allocations. XXX we really should require that a 1237 * size be passed to free() instead of this nonsense. 1238 * 1239 * This code is never called via an ipi. 1240 */ 1241 kup = btokup(ptr); 1242 if (*kup > 0) { 1243 size = *kup << PAGE_SHIFT; 1244 *kup = 0; 1245 #ifdef INVARIANTS 1246 KKASSERT(sizeof(weirdary) <= size); 1247 bcopy(weirdary, ptr, sizeof(weirdary)); 1248 #endif 1249 /* 1250 * NOTE: For oversized allocations we do not record the 1251 * originating cpu. It gets freed on the cpu calling 1252 * kfree(). The statistics are in aggregate. 1253 * 1254 * note: XXX we have still inherited the interrupts-can't-block 1255 * assumption. An interrupt thread does not bump 1256 * gd_intr_nesting_level so check TDF_INTTHREAD. This is 1257 * primarily until we can fix softupdate's assumptions about free(). 1258 */ 1259 crit_enter(); 1260 --type->ks_use[gd->gd_cpuid].inuse; 1261 type->ks_use[gd->gd_cpuid].memuse -= size; 1262 if (mycpu->gd_intr_nesting_level || 1263 (gd->gd_curthread->td_flags & TDF_INTTHREAD)) 1264 { 1265 logmemory(free_ovsz_delayed, ptr, type, size, 0); 1266 z = (SLZone *)ptr; 1267 z->z_Magic = ZALLOC_OVSZ_MAGIC; 1268 z->z_ChunkSize = size; 1269 1270 TAILQ_INSERT_HEAD(&slgd->FreeOvZones, z, z_Entry); 1271 crit_exit(); 1272 } else { 1273 crit_exit(); 1274 logmemory(free_ovsz, ptr, type, size, 0); 1275 kmem_slab_free(ptr, size); /* may block */ 1276 atomic_add_int(&ZoneBigAlloc, -(int)size / 1024); 1277 } 1278 logmemory_quick(free_end); 1279 return; 1280 } 1281 1282 /* 1283 * Zone case. Figure out the zone based on the fact that it is 1284 * ZoneSize aligned. 1285 */ 1286 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 1287 kup = btokup(z); 1288 KKASSERT(*kup < 0); 1289 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1290 1291 /* 1292 * If we do not own the zone then use atomic ops to free to the 1293 * remote cpu linked list and notify the target zone using a 1294 * passive message. 1295 * 1296 * The target zone cannot be deallocated while we own a chunk of it, 1297 * so the zone header's storage is stable until the very moment 1298 * we adjust z_RChunks. After that we cannot safely dereference (z). 1299 * 1300 * (no critical section needed) 1301 */ 1302 if (z->z_CpuGd != gd) { 1303 /* 1304 * Making these adjustments now allow us to avoid passing (type) 1305 * to the remote cpu. Note that inuse/memuse is being 1306 * adjusted on OUR cpu, not the zone cpu, but it should all still 1307 * sum up properly and cancel out. 1308 */ 1309 crit_enter(); 1310 --type->ks_use[gd->gd_cpuid].inuse; 1311 type->ks_use[gd->gd_cpuid].memuse -= z->z_ChunkSize; 1312 crit_exit(); 1313 1314 /* 1315 * WARNING! This code competes with other cpus. Once we 1316 * successfully link the chunk to RChunks the remote 1317 * cpu can rip z's storage out from under us. 1318 * 1319 * Bumping RCount prevents z's storage from getting 1320 * ripped out. 1321 */ 1322 rsignal = z->z_RSignal; 1323 cpu_lfence(); 1324 if (rsignal) 1325 atomic_add_int(&z->z_RCount, 1); 1326 1327 chunk = ptr; 1328 for (;;) { 1329 bchunk = z->z_RChunks; 1330 cpu_ccfence(); 1331 chunk->c_Next = bchunk; 1332 cpu_sfence(); 1333 1334 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk)) 1335 break; 1336 } 1337 1338 /* 1339 * We have to signal the remote cpu if our actions will cause 1340 * the remote zone to be placed back on ZoneAry so it can 1341 * move the zone back on. 1342 * 1343 * We only need to deal with NULL->non-NULL RChunk transitions 1344 * and only if z_RSignal is set. We interlock by reading rsignal 1345 * before adding our chunk to RChunks. This should result in 1346 * virtually no IPI traffic. 1347 * 1348 * We can use a passive IPI to reduce overhead even further. 1349 */ 1350 if (bchunk == NULL && rsignal) { 1351 logmemory(free_request, ptr, type, 1352 (unsigned long)z->z_ChunkSize, 0); 1353 lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z); 1354 /* z can get ripped out from under us from this point on */ 1355 } else if (rsignal) { 1356 atomic_subtract_int(&z->z_RCount, 1); 1357 /* z can get ripped out from under us from this point on */ 1358 } 1359 logmemory_quick(free_end); 1360 return; 1361 } 1362 1363 /* 1364 * kfree locally 1365 */ 1366 logmemory(free_chunk, ptr, type, (unsigned long)z->z_ChunkSize, 0); 1367 1368 crit_enter(); 1369 chunk = ptr; 1370 chunk_mark_free(z, chunk); 1371 1372 /* 1373 * Put weird data into the memory to detect modifications after freeing, 1374 * illegal pointer use after freeing (we should fault on the odd address), 1375 * and so forth. XXX needs more work, see the old malloc code. 1376 */ 1377 #ifdef INVARIANTS 1378 if (z->z_ChunkSize < sizeof(weirdary)) 1379 bcopy(weirdary, chunk, z->z_ChunkSize); 1380 else 1381 bcopy(weirdary, chunk, sizeof(weirdary)); 1382 #endif 1383 1384 /* 1385 * Add this free non-zero'd chunk to a linked list for reuse. Add 1386 * to the front of the linked list so it is more likely to be 1387 * reallocated, since it is already in our L1 cache. 1388 */ 1389 #ifdef INVARIANTS 1390 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) 1391 panic("BADFREE %p", chunk); 1392 #endif 1393 chunk->c_Next = z->z_LChunks; 1394 z->z_LChunks = chunk; 1395 if (chunk->c_Next == NULL) 1396 z->z_LChunksp = &chunk->c_Next; 1397 1398 #ifdef INVARIANTS 1399 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) 1400 panic("BADFREE2"); 1401 #endif 1402 1403 /* 1404 * Bump the number of free chunks. If it becomes non-zero the zone 1405 * must be added back onto the appropriate list. A fully allocated 1406 * zone that sees its first free is considered 'mature' and is placed 1407 * at the head, giving the system time to potentially free the remaining 1408 * entries even while other allocations are going on and making the zone 1409 * freeable. 1410 */ 1411 if (z->z_NFree++ == 0) { 1412 if (SlabFreeToTail) 1413 TAILQ_INSERT_TAIL(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 1414 else 1415 TAILQ_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 1416 } 1417 1418 --type->ks_use[z->z_Cpu].inuse; 1419 type->ks_use[z->z_Cpu].memuse -= z->z_ChunkSize; 1420 1421 check_zone_free(slgd, z); 1422 logmemory_quick(free_end); 1423 crit_exit(); 1424 } 1425 1426 /* 1427 * Cleanup slabs which are hanging around due to RChunks or which are wholely 1428 * free and can be moved to the free list if not moved by other means. 1429 * 1430 * Called once every 10 seconds on all cpus. 1431 */ 1432 void 1433 slab_cleanup(void) 1434 { 1435 SLGlobalData *slgd = &mycpu->gd_slab; 1436 SLZone *z; 1437 int i; 1438 1439 crit_enter(); 1440 for (i = 0; i < NZONES; ++i) { 1441 if ((z = TAILQ_FIRST(&slgd->ZoneAry[i])) == NULL) 1442 continue; 1443 1444 /* 1445 * Scan zones. 1446 */ 1447 while (z) { 1448 /* 1449 * Shift all RChunks to the end of the LChunks list. This is 1450 * an O(1) operation. 1451 * 1452 * Then free the zone if possible. 1453 */ 1454 clean_zone_rchunks(z); 1455 z = check_zone_free(slgd, z); 1456 } 1457 } 1458 crit_exit(); 1459 } 1460 1461 #if defined(INVARIANTS) 1462 1463 /* 1464 * Helper routines for sanity checks 1465 */ 1466 static 1467 void 1468 chunk_mark_allocated(SLZone *z, void *chunk) 1469 { 1470 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1471 uint32_t *bitptr; 1472 1473 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1474 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1475 ("memory chunk %p bit index %d is illegal", chunk, bitdex)); 1476 bitptr = &z->z_Bitmap[bitdex >> 5]; 1477 bitdex &= 31; 1478 KASSERT((*bitptr & (1 << bitdex)) == 0, 1479 ("memory chunk %p is already allocated!", chunk)); 1480 *bitptr |= 1 << bitdex; 1481 } 1482 1483 static 1484 void 1485 chunk_mark_free(SLZone *z, void *chunk) 1486 { 1487 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1488 uint32_t *bitptr; 1489 1490 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1491 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1492 ("memory chunk %p bit index %d is illegal!", chunk, bitdex)); 1493 bitptr = &z->z_Bitmap[bitdex >> 5]; 1494 bitdex &= 31; 1495 KASSERT((*bitptr & (1 << bitdex)) != 0, 1496 ("memory chunk %p is already free!", chunk)); 1497 *bitptr &= ~(1 << bitdex); 1498 } 1499 1500 #endif 1501 1502 /* 1503 * kmem_slab_alloc() 1504 * 1505 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the 1506 * specified alignment. M_* flags are expected in the flags field. 1507 * 1508 * Alignment must be a multiple of PAGE_SIZE. 1509 * 1510 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), 1511 * but when we move zalloc() over to use this function as its backend 1512 * we will have to switch to kreserve/krelease and call reserve(0) 1513 * after the new space is made available. 1514 * 1515 * Interrupt code which has preempted other code is not allowed to 1516 * use PQ_CACHE pages. However, if an interrupt thread is run 1517 * non-preemptively or blocks and then runs non-preemptively, then 1518 * it is free to use PQ_CACHE pages. <--- may not apply any longer XXX 1519 */ 1520 static void * 1521 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) 1522 { 1523 vm_size_t i; 1524 vm_offset_t addr; 1525 int count, vmflags, base_vmflags; 1526 vm_page_t mbase = NULL; 1527 vm_page_t m; 1528 thread_t td; 1529 1530 size = round_page(size); 1531 addr = vm_map_min(&kernel_map); 1532 1533 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1534 crit_enter(); 1535 vm_map_lock(&kernel_map); 1536 if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) { 1537 vm_map_unlock(&kernel_map); 1538 if ((flags & M_NULLOK) == 0) 1539 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 1540 vm_map_entry_release(count); 1541 crit_exit(); 1542 return(NULL); 1543 } 1544 1545 /* 1546 * kernel_object maps 1:1 to kernel_map. 1547 */ 1548 vm_object_hold(&kernel_object); 1549 vm_object_reference_locked(&kernel_object); 1550 vm_map_insert(&kernel_map, &count, 1551 &kernel_object, NULL, 1552 addr, addr, addr + size, 1553 VM_MAPTYPE_NORMAL, 1554 VM_SUBSYS_KMALLOC, 1555 VM_PROT_ALL, VM_PROT_ALL, 0); 1556 vm_object_drop(&kernel_object); 1557 vm_map_set_wired_quick(&kernel_map, addr, size, &count); 1558 vm_map_unlock(&kernel_map); 1559 1560 td = curthread; 1561 1562 base_vmflags = 0; 1563 if (flags & M_ZERO) 1564 base_vmflags |= VM_ALLOC_ZERO; 1565 if (flags & M_USE_RESERVE) 1566 base_vmflags |= VM_ALLOC_SYSTEM; 1567 if (flags & M_USE_INTERRUPT_RESERVE) 1568 base_vmflags |= VM_ALLOC_INTERRUPT; 1569 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) { 1570 panic("kmem_slab_alloc: bad flags %08x (%p)", 1571 flags, ((int **)&size)[-1]); 1572 } 1573 1574 /* 1575 * Allocate the pages. Do not map them yet. VM_ALLOC_NORMAL can only 1576 * be set if we are not preempting. 1577 * 1578 * VM_ALLOC_SYSTEM is automatically set if we are preempting and 1579 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is 1580 * implied in this case), though I'm not sure if we really need to 1581 * do that. 1582 */ 1583 vmflags = base_vmflags; 1584 if (flags & M_WAITOK) { 1585 if (td->td_preempted) 1586 vmflags |= VM_ALLOC_SYSTEM; 1587 else 1588 vmflags |= VM_ALLOC_NORMAL; 1589 } 1590 1591 vm_object_hold(&kernel_object); 1592 for (i = 0; i < size; i += PAGE_SIZE) { 1593 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags); 1594 if (i == 0) 1595 mbase = m; 1596 1597 /* 1598 * If the allocation failed we either return NULL or we retry. 1599 * 1600 * If M_WAITOK is specified we wait for more memory and retry. 1601 * If M_WAITOK is specified from a preemption we yield instead of 1602 * wait. Livelock will not occur because the interrupt thread 1603 * will not be preempting anyone the second time around after the 1604 * yield. 1605 */ 1606 if (m == NULL) { 1607 if (flags & M_WAITOK) { 1608 if (td->td_preempted) { 1609 lwkt_switch(); 1610 } else { 1611 vm_wait(0); 1612 } 1613 i -= PAGE_SIZE; /* retry */ 1614 continue; 1615 } 1616 break; 1617 } 1618 } 1619 1620 /* 1621 * Check and deal with an allocation failure 1622 */ 1623 if (i != size) { 1624 while (i != 0) { 1625 i -= PAGE_SIZE; 1626 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); 1627 /* page should already be busy */ 1628 vm_page_free(m); 1629 } 1630 vm_map_lock(&kernel_map); 1631 vm_map_delete(&kernel_map, addr, addr + size, &count); 1632 vm_map_unlock(&kernel_map); 1633 vm_object_drop(&kernel_object); 1634 1635 vm_map_entry_release(count); 1636 crit_exit(); 1637 return(NULL); 1638 } 1639 1640 /* 1641 * Success! 1642 * 1643 * NOTE: The VM pages are still busied. mbase points to the first one 1644 * but we have to iterate via vm_page_next() 1645 */ 1646 vm_object_drop(&kernel_object); 1647 crit_exit(); 1648 1649 /* 1650 * Enter the pages into the pmap and deal with M_ZERO. 1651 */ 1652 m = mbase; 1653 i = 0; 1654 1655 while (i < size) { 1656 /* 1657 * page should already be busy 1658 */ 1659 m->valid = VM_PAGE_BITS_ALL; 1660 vm_page_wire(m); 1661 pmap_enter(&kernel_pmap, addr + i, m, 1662 VM_PROT_ALL | VM_PROT_NOSYNC, 1, NULL); 1663 if (flags & M_ZERO) 1664 pagezero((char *)addr + i); 1665 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED)); 1666 vm_page_flag_set(m, PG_REFERENCED); 1667 vm_page_wakeup(m); 1668 1669 i += PAGE_SIZE; 1670 vm_object_hold(&kernel_object); 1671 m = vm_page_next(m); 1672 vm_object_drop(&kernel_object); 1673 } 1674 smp_invltlb(); 1675 vm_map_entry_release(count); 1676 atomic_add_long(&SlabsAllocated, 1); 1677 return((void *)addr); 1678 } 1679 1680 /* 1681 * kmem_slab_free() 1682 */ 1683 static void 1684 kmem_slab_free(void *ptr, vm_size_t size) 1685 { 1686 crit_enter(); 1687 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); 1688 atomic_add_long(&SlabsFreed, 1); 1689 crit_exit(); 1690 } 1691 1692 void * 1693 kmalloc_cachealign(unsigned long size_alloc, struct malloc_type *type, 1694 int flags) 1695 { 1696 #if (__VM_CACHELINE_SIZE == 32) 1697 #define CAN_CACHEALIGN(sz) ((sz) >= 256) 1698 #elif (__VM_CACHELINE_SIZE == 64) 1699 #define CAN_CACHEALIGN(sz) ((sz) >= 512) 1700 #elif (__VM_CACHELINE_SIZE == 128) 1701 #define CAN_CACHEALIGN(sz) ((sz) >= 1024) 1702 #else 1703 #error "unsupported cacheline size" 1704 #endif 1705 1706 void *ret; 1707 1708 if (size_alloc < __VM_CACHELINE_SIZE) 1709 size_alloc = __VM_CACHELINE_SIZE; 1710 else if (!CAN_CACHEALIGN(size_alloc)) 1711 flags |= M_POWEROF2; 1712 1713 ret = kmalloc(size_alloc, type, flags); 1714 KASSERT(((uintptr_t)ret & (__VM_CACHELINE_SIZE - 1)) == 0, 1715 ("%p(%lu) not cacheline %d aligned", 1716 ret, size_alloc, __VM_CACHELINE_SIZE)); 1717 return ret; 1718 1719 #undef CAN_CACHEALIGN 1720 } 1721