1 /* 2 * (MPSAFE) 3 * 4 * KERN_SLABALLOC.C - Kernel SLAB memory allocator 5 * 6 * Copyright (c) 2003,2004,2010 The DragonFly Project. All rights reserved. 7 * 8 * This code is derived from software contributed to The DragonFly Project 9 * by Matthew Dillon <dillon@backplane.com> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in 19 * the documentation and/or other materials provided with the 20 * distribution. 21 * 3. Neither the name of The DragonFly Project nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific, prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * This module implements a slab allocator drop-in replacement for the 39 * kernel malloc(). 40 * 41 * A slab allocator reserves a ZONE for each chunk size, then lays the 42 * chunks out in an array within the zone. Allocation and deallocation 43 * is nearly instantanious, and fragmentation/overhead losses are limited 44 * to a fixed worst-case amount. 45 * 46 * The downside of this slab implementation is in the chunk size 47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. 48 * In a kernel implementation all this memory will be physical so 49 * the zone size is adjusted downward on machines with less physical 50 * memory. The upside is that overhead is bounded... this is the *worst* 51 * case overhead. 52 * 53 * Slab management is done on a per-cpu basis and no locking or mutexes 54 * are required, only a critical section. When one cpu frees memory 55 * belonging to another cpu's slab manager an asynchronous IPI message 56 * will be queued to execute the operation. In addition, both the 57 * high level slab allocator and the low level zone allocator optimize 58 * M_ZERO requests, and the slab allocator does not have to pre initialize 59 * the linked list of chunks. 60 * 61 * XXX Balancing is needed between cpus. Balance will be handled through 62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. 63 * 64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of 65 * the new zone should be restricted to M_USE_RESERVE requests only. 66 * 67 * Alloc Size Chunking Number of zones 68 * 0-127 8 16 69 * 128-255 16 8 70 * 256-511 32 8 71 * 512-1023 64 8 72 * 1024-2047 128 8 73 * 2048-4095 256 8 74 * 4096-8191 512 8 75 * 8192-16383 1024 8 76 * 16384-32767 2048 8 77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) 78 * 79 * Allocations >= ZoneLimit go directly to kmem. 80 * 81 * Alignment properties: 82 * - All power-of-2 sized allocations are power-of-2 aligned. 83 * - Allocations with M_POWEROF2 are power-of-2 aligned on the nearest 84 * power-of-2 round up of 'size'. 85 * - Non-power-of-2 sized allocations are zone chunk size aligned (see the 86 * above table 'Chunking' column). 87 * 88 * API REQUIREMENTS AND SIDE EFFECTS 89 * 90 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we 91 * have remained compatible with the following API requirements: 92 * 93 * + malloc(0) is allowed and returns non-NULL (ahc driver) 94 * + ability to allocate arbitrarily large chunks of memory 95 */ 96 97 #include "opt_vm.h" 98 99 #include <sys/param.h> 100 #include <sys/systm.h> 101 #include <sys/kernel.h> 102 #include <sys/slaballoc.h> 103 #include <sys/mbuf.h> 104 #include <sys/vmmeter.h> 105 #include <sys/lock.h> 106 #include <sys/thread.h> 107 #include <sys/globaldata.h> 108 #include <sys/sysctl.h> 109 #include <sys/ktr.h> 110 111 #include <vm/vm.h> 112 #include <vm/vm_param.h> 113 #include <vm/vm_kern.h> 114 #include <vm/vm_extern.h> 115 #include <vm/vm_object.h> 116 #include <vm/pmap.h> 117 #include <vm/vm_map.h> 118 #include <vm/vm_page.h> 119 #include <vm/vm_pageout.h> 120 121 #include <machine/cpu.h> 122 123 #include <sys/thread2.h> 124 125 #define btokup(z) (&pmap_kvtom((vm_offset_t)(z))->ku_pagecnt) 126 127 #define MEMORY_STRING "ptr=%p type=%p size=%lu flags=%04x" 128 #define MEMORY_ARGS void *ptr, void *type, unsigned long size, int flags 129 130 #if !defined(KTR_MEMORY) 131 #define KTR_MEMORY KTR_ALL 132 #endif 133 KTR_INFO_MASTER(memory); 134 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin"); 135 KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARGS); 136 KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARGS); 137 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARGS); 138 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARGS); 139 KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARGS); 140 KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARGS); 141 KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARGS); 142 KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARGS); 143 KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin"); 144 KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end"); 145 146 #define logmemory(name, ptr, type, size, flags) \ 147 KTR_LOG(memory_ ## name, ptr, type, size, flags) 148 #define logmemory_quick(name) \ 149 KTR_LOG(memory_ ## name) 150 151 /* 152 * Fixed globals (not per-cpu) 153 */ 154 static int ZoneSize; 155 static int ZoneLimit; 156 static int ZonePageCount; 157 static uintptr_t ZoneMask; 158 static int ZoneBigAlloc; /* in KB */ 159 static int ZoneGenAlloc; /* in KB */ 160 struct malloc_type *kmemstatistics; /* exported to vmstat */ 161 static int32_t weirdary[16]; 162 163 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags); 164 static void kmem_slab_free(void *ptr, vm_size_t bytes); 165 166 #if defined(INVARIANTS) 167 static void chunk_mark_allocated(SLZone *z, void *chunk); 168 static void chunk_mark_free(SLZone *z, void *chunk); 169 #else 170 #define chunk_mark_allocated(z, chunk) 171 #define chunk_mark_free(z, chunk) 172 #endif 173 174 /* 175 * Misc constants. Note that allocations that are exact multiples of 176 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 177 */ 178 #define ZONE_RELS_THRESH 32 /* threshold number of zones */ 179 180 /* 181 * The WEIRD_ADDR is used as known text to copy into free objects to 182 * try to create deterministic failure cases if the data is accessed after 183 * free. 184 */ 185 #define WEIRD_ADDR 0xdeadc0de 186 #define MAX_COPY sizeof(weirdary) 187 #define ZERO_LENGTH_PTR ((void *)-8) 188 189 /* 190 * Misc global malloc buckets 191 */ 192 193 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 194 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 195 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 196 197 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 198 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 199 200 /* 201 * Initialize the slab memory allocator. We have to choose a zone size based 202 * on available physical memory. We choose a zone side which is approximately 203 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of 204 * 128K. The zone size is limited to the bounds set in slaballoc.h 205 * (typically 32K min, 128K max). 206 */ 207 static void kmeminit(void *dummy); 208 209 char *ZeroPage; 210 211 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL) 212 213 #ifdef INVARIANTS 214 /* 215 * If enabled any memory allocated without M_ZERO is initialized to -1. 216 */ 217 static int use_malloc_pattern; 218 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW, 219 &use_malloc_pattern, 0, 220 "Initialize memory to -1 if M_ZERO not specified"); 221 #endif 222 223 static int ZoneRelsThresh = ZONE_RELS_THRESH; 224 SYSCTL_INT(_kern, OID_AUTO, zone_big_alloc, CTLFLAG_RD, &ZoneBigAlloc, 0, ""); 225 SYSCTL_INT(_kern, OID_AUTO, zone_gen_alloc, CTLFLAG_RD, &ZoneGenAlloc, 0, ""); 226 SYSCTL_INT(_kern, OID_AUTO, zone_cache, CTLFLAG_RW, &ZoneRelsThresh, 0, ""); 227 static long SlabsAllocated; 228 static long SlabsFreed; 229 SYSCTL_LONG(_kern, OID_AUTO, slabs_allocated, CTLFLAG_RD, &SlabsAllocated, 0, ""); 230 SYSCTL_LONG(_kern, OID_AUTO, slabs_freed, CTLFLAG_RD, &SlabsFreed, 0, ""); 231 232 /* 233 * Returns the kernel memory size limit for the purposes of initializing 234 * various subsystem caches. The smaller of available memory and the KVM 235 * memory space is returned. 236 * 237 * The size in megabytes is returned. 238 */ 239 size_t 240 kmem_lim_size(void) 241 { 242 size_t limsize; 243 244 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE; 245 if (limsize > KvaSize) 246 limsize = KvaSize; 247 return (limsize / (1024 * 1024)); 248 } 249 250 static void 251 kmeminit(void *dummy) 252 { 253 size_t limsize; 254 int usesize; 255 int i; 256 257 limsize = kmem_lim_size(); 258 usesize = (int)(limsize * 1024); /* convert to KB */ 259 260 /* 261 * If the machine has a large KVM space and more than 8G of ram, 262 * double the zone release threshold to reduce SMP invalidations. 263 * If more than 16G of ram, do it again. 264 * 265 * The BIOS eats a little ram so add some slop. We want 8G worth of 266 * memory sticks to trigger the first adjustment. 267 */ 268 if (ZoneRelsThresh == ZONE_RELS_THRESH) { 269 if (limsize >= 7 * 1024) 270 ZoneRelsThresh *= 2; 271 if (limsize >= 15 * 1024) 272 ZoneRelsThresh *= 2; 273 } 274 275 /* 276 * Calculate the zone size. This typically calculates to 277 * ZALLOC_MAX_ZONE_SIZE 278 */ 279 ZoneSize = ZALLOC_MIN_ZONE_SIZE; 280 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) 281 ZoneSize <<= 1; 282 ZoneLimit = ZoneSize / 4; 283 if (ZoneLimit > ZALLOC_ZONE_LIMIT) 284 ZoneLimit = ZALLOC_ZONE_LIMIT; 285 ZoneMask = ~(uintptr_t)(ZoneSize - 1); 286 ZonePageCount = ZoneSize / PAGE_SIZE; 287 288 for (i = 0; i < NELEM(weirdary); ++i) 289 weirdary[i] = WEIRD_ADDR; 290 291 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO); 292 293 if (bootverbose) 294 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); 295 } 296 297 /* 298 * Initialize a malloc type tracking structure. 299 */ 300 void 301 malloc_init(void *data) 302 { 303 struct malloc_type *type = data; 304 size_t limsize; 305 306 if (type->ks_magic != M_MAGIC) 307 panic("malloc type lacks magic"); 308 309 if (type->ks_limit != 0) 310 return; 311 312 if (vmstats.v_page_count == 0) 313 panic("malloc_init not allowed before vm init"); 314 315 limsize = kmem_lim_size() * (1024 * 1024); 316 type->ks_limit = limsize / 10; 317 318 type->ks_next = kmemstatistics; 319 kmemstatistics = type; 320 } 321 322 void 323 malloc_uninit(void *data) 324 { 325 struct malloc_type *type = data; 326 struct malloc_type *t; 327 #ifdef INVARIANTS 328 int i; 329 long ttl; 330 #endif 331 332 if (type->ks_magic != M_MAGIC) 333 panic("malloc type lacks magic"); 334 335 if (vmstats.v_page_count == 0) 336 panic("malloc_uninit not allowed before vm init"); 337 338 if (type->ks_limit == 0) 339 panic("malloc_uninit on uninitialized type"); 340 341 /* Make sure that all pending kfree()s are finished. */ 342 lwkt_synchronize_ipiqs("muninit"); 343 344 #ifdef INVARIANTS 345 /* 346 * memuse is only correct in aggregation. Due to memory being allocated 347 * on one cpu and freed on another individual array entries may be 348 * negative or positive (canceling each other out). 349 */ 350 for (i = ttl = 0; i < ncpus; ++i) 351 ttl += type->ks_memuse[i]; 352 if (ttl) { 353 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", 354 ttl, type->ks_shortdesc, i); 355 } 356 #endif 357 if (type == kmemstatistics) { 358 kmemstatistics = type->ks_next; 359 } else { 360 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 361 if (t->ks_next == type) { 362 t->ks_next = type->ks_next; 363 break; 364 } 365 } 366 } 367 type->ks_next = NULL; 368 type->ks_limit = 0; 369 } 370 371 /* 372 * Increase the kmalloc pool limit for the specified pool. No changes 373 * are the made if the pool would shrink. 374 */ 375 void 376 kmalloc_raise_limit(struct malloc_type *type, size_t bytes) 377 { 378 if (type->ks_limit == 0) 379 malloc_init(type); 380 if (bytes == 0) 381 bytes = KvaSize; 382 if (type->ks_limit < bytes) 383 type->ks_limit = bytes; 384 } 385 386 /* 387 * Dynamically create a malloc pool. This function is a NOP if *typep is 388 * already non-NULL. 389 */ 390 void 391 kmalloc_create(struct malloc_type **typep, const char *descr) 392 { 393 struct malloc_type *type; 394 395 if (*typep == NULL) { 396 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO); 397 type->ks_magic = M_MAGIC; 398 type->ks_shortdesc = descr; 399 malloc_init(type); 400 *typep = type; 401 } 402 } 403 404 /* 405 * Destroy a dynamically created malloc pool. This function is a NOP if 406 * the pool has already been destroyed. 407 */ 408 void 409 kmalloc_destroy(struct malloc_type **typep) 410 { 411 if (*typep != NULL) { 412 malloc_uninit(*typep); 413 kfree(*typep, M_TEMP); 414 *typep = NULL; 415 } 416 } 417 418 /* 419 * Calculate the zone index for the allocation request size and set the 420 * allocation request size to that particular zone's chunk size. 421 */ 422 static __inline int 423 zoneindex(unsigned long *bytes, unsigned long *align) 424 { 425 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ 426 if (n < 128) { 427 *bytes = n = (n + 7) & ~7; 428 *align = 8; 429 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 430 } 431 if (n < 256) { 432 *bytes = n = (n + 15) & ~15; 433 *align = 16; 434 return(n / 16 + 7); 435 } 436 if (n < 8192) { 437 if (n < 512) { 438 *bytes = n = (n + 31) & ~31; 439 *align = 32; 440 return(n / 32 + 15); 441 } 442 if (n < 1024) { 443 *bytes = n = (n + 63) & ~63; 444 *align = 64; 445 return(n / 64 + 23); 446 } 447 if (n < 2048) { 448 *bytes = n = (n + 127) & ~127; 449 *align = 128; 450 return(n / 128 + 31); 451 } 452 if (n < 4096) { 453 *bytes = n = (n + 255) & ~255; 454 *align = 256; 455 return(n / 256 + 39); 456 } 457 *bytes = n = (n + 511) & ~511; 458 *align = 512; 459 return(n / 512 + 47); 460 } 461 #if ZALLOC_ZONE_LIMIT > 8192 462 if (n < 16384) { 463 *bytes = n = (n + 1023) & ~1023; 464 *align = 1024; 465 return(n / 1024 + 55); 466 } 467 #endif 468 #if ZALLOC_ZONE_LIMIT > 16384 469 if (n < 32768) { 470 *bytes = n = (n + 2047) & ~2047; 471 *align = 2048; 472 return(n / 2048 + 63); 473 } 474 #endif 475 panic("Unexpected byte count %d", n); 476 return(0); 477 } 478 479 #ifdef SLAB_DEBUG 480 /* 481 * Used to debug memory corruption issues. Record up to (typically 32) 482 * allocation sources for this zone (for a particular chunk size). 483 */ 484 485 static void 486 slab_record_source(SLZone *z, const char *file, int line) 487 { 488 int i; 489 int b = line & (SLAB_DEBUG_ENTRIES - 1); 490 491 i = b; 492 do { 493 if (z->z_Sources[i].file == file && z->z_Sources[i].line == line) 494 return; 495 if (z->z_Sources[i].file == NULL) 496 break; 497 i = (i + 1) & (SLAB_DEBUG_ENTRIES - 1); 498 } while (i != b); 499 z->z_Sources[i].file = file; 500 z->z_Sources[i].line = line; 501 } 502 503 #endif 504 505 static __inline unsigned long 506 powerof2_size(unsigned long size) 507 { 508 int i; 509 510 if (size == 0 || powerof2(size)) 511 return size; 512 513 i = flsl(size); 514 return (1UL << i); 515 } 516 517 /* 518 * kmalloc() (SLAB ALLOCATOR) 519 * 520 * Allocate memory via the slab allocator. If the request is too large, 521 * or if it page-aligned beyond a certain size, we fall back to the 522 * KMEM subsystem. A SLAB tracking descriptor must be specified, use 523 * &SlabMisc if you don't care. 524 * 525 * M_RNOWAIT - don't block. 526 * M_NULLOK - return NULL instead of blocking. 527 * M_ZERO - zero the returned memory. 528 * M_USE_RESERVE - allow greater drawdown of the free list 529 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted 530 * M_POWEROF2 - roundup size to the nearest power of 2 531 * 532 * MPSAFE 533 */ 534 535 #ifdef SLAB_DEBUG 536 void * 537 kmalloc_debug(unsigned long size, struct malloc_type *type, int flags, 538 const char *file, int line) 539 #else 540 void * 541 kmalloc(unsigned long size, struct malloc_type *type, int flags) 542 #endif 543 { 544 SLZone *z; 545 SLChunk *chunk; 546 SLChunk *bchunk; 547 SLGlobalData *slgd; 548 struct globaldata *gd; 549 unsigned long align; 550 int zi; 551 #ifdef INVARIANTS 552 int i; 553 #endif 554 555 logmemory_quick(malloc_beg); 556 gd = mycpu; 557 slgd = &gd->gd_slab; 558 559 /* 560 * XXX silly to have this in the critical path. 561 */ 562 if (type->ks_limit == 0) { 563 crit_enter(); 564 if (type->ks_limit == 0) 565 malloc_init(type); 566 crit_exit(); 567 } 568 ++type->ks_calls; 569 570 if (flags & M_POWEROF2) 571 size = powerof2_size(size); 572 573 /* 574 * Handle the case where the limit is reached. Panic if we can't return 575 * NULL. The original malloc code looped, but this tended to 576 * simply deadlock the computer. 577 * 578 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used 579 * to determine if a more complete limit check should be done. The 580 * actual memory use is tracked via ks_memuse[cpu]. 581 */ 582 while (type->ks_loosememuse >= type->ks_limit) { 583 int i; 584 long ttl; 585 586 for (i = ttl = 0; i < ncpus; ++i) 587 ttl += type->ks_memuse[i]; 588 type->ks_loosememuse = ttl; /* not MP synchronized */ 589 if ((ssize_t)ttl < 0) /* deal with occassional race */ 590 ttl = 0; 591 if (ttl >= type->ks_limit) { 592 if (flags & M_NULLOK) { 593 logmemory(malloc_end, NULL, type, size, flags); 594 return(NULL); 595 } 596 panic("%s: malloc limit exceeded", type->ks_shortdesc); 597 } 598 } 599 600 /* 601 * Handle the degenerate size == 0 case. Yes, this does happen. 602 * Return a special pointer. This is to maintain compatibility with 603 * the original malloc implementation. Certain devices, such as the 604 * adaptec driver, not only allocate 0 bytes, they check for NULL and 605 * also realloc() later on. Joy. 606 */ 607 if (size == 0) { 608 logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags); 609 return(ZERO_LENGTH_PTR); 610 } 611 612 /* 613 * Handle hysteresis from prior frees here in malloc(). We cannot 614 * safely manipulate the kernel_map in free() due to free() possibly 615 * being called via an IPI message or from sensitive interrupt code. 616 * 617 * NOTE: ku_pagecnt must be cleared before we free the slab or we 618 * might race another cpu allocating the kva and setting 619 * ku_pagecnt. 620 */ 621 while (slgd->NFreeZones > ZoneRelsThresh && (flags & M_RNOWAIT) == 0) { 622 crit_enter(); 623 if (slgd->NFreeZones > ZoneRelsThresh) { /* crit sect race */ 624 int *kup; 625 626 z = slgd->FreeZones; 627 slgd->FreeZones = z->z_Next; 628 --slgd->NFreeZones; 629 kup = btokup(z); 630 *kup = 0; 631 kmem_slab_free(z, ZoneSize); /* may block */ 632 atomic_add_int(&ZoneGenAlloc, -ZoneSize / 1024); 633 } 634 crit_exit(); 635 } 636 637 /* 638 * XXX handle oversized frees that were queued from kfree(). 639 */ 640 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) { 641 crit_enter(); 642 if ((z = slgd->FreeOvZones) != NULL) { 643 vm_size_t tsize; 644 645 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); 646 slgd->FreeOvZones = z->z_Next; 647 tsize = z->z_ChunkSize; 648 kmem_slab_free(z, tsize); /* may block */ 649 atomic_add_int(&ZoneBigAlloc, -(int)tsize / 1024); 650 } 651 crit_exit(); 652 } 653 654 /* 655 * Handle large allocations directly. There should not be very many of 656 * these so performance is not a big issue. 657 * 658 * The backend allocator is pretty nasty on a SMP system. Use the 659 * slab allocator for one and two page-sized chunks even though we lose 660 * some efficiency. XXX maybe fix mmio and the elf loader instead. 661 */ 662 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 663 int *kup; 664 665 size = round_page(size); 666 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); 667 if (chunk == NULL) { 668 logmemory(malloc_end, NULL, type, size, flags); 669 return(NULL); 670 } 671 atomic_add_int(&ZoneBigAlloc, (int)size / 1024); 672 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ 673 flags |= M_PASSIVE_ZERO; 674 kup = btokup(chunk); 675 *kup = size / PAGE_SIZE; 676 crit_enter(); 677 goto done; 678 } 679 680 /* 681 * Attempt to allocate out of an existing zone. First try the free list, 682 * then allocate out of unallocated space. If we find a good zone move 683 * it to the head of the list so later allocations find it quickly 684 * (we might have thousands of zones in the list). 685 * 686 * Note: zoneindex() will panic of size is too large. 687 */ 688 zi = zoneindex(&size, &align); 689 KKASSERT(zi < NZONES); 690 crit_enter(); 691 692 if ((z = slgd->ZoneAry[zi]) != NULL) { 693 /* 694 * Locate a chunk - we have to have at least one. If this is the 695 * last chunk go ahead and do the work to retrieve chunks freed 696 * from remote cpus, and if the zone is still empty move it off 697 * the ZoneAry. 698 */ 699 if (--z->z_NFree <= 0) { 700 KKASSERT(z->z_NFree == 0); 701 702 /* 703 * WARNING! This code competes with other cpus. It is ok 704 * for us to not drain RChunks here but we might as well, and 705 * it is ok if more accumulate after we're done. 706 * 707 * Set RSignal before pulling rchunks off, indicating that we 708 * will be moving ourselves off of the ZoneAry. Remote ends will 709 * read RSignal before putting rchunks on thus interlocking 710 * their IPI signaling. 711 */ 712 if (z->z_RChunks == NULL) 713 atomic_swap_int(&z->z_RSignal, 1); 714 715 while ((bchunk = z->z_RChunks) != NULL) { 716 cpu_ccfence(); 717 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) { 718 *z->z_LChunksp = bchunk; 719 while (bchunk) { 720 chunk_mark_free(z, bchunk); 721 z->z_LChunksp = &bchunk->c_Next; 722 bchunk = bchunk->c_Next; 723 ++z->z_NFree; 724 } 725 break; 726 } 727 } 728 /* 729 * Remove from the zone list if no free chunks remain. 730 * Clear RSignal 731 */ 732 if (z->z_NFree == 0) { 733 slgd->ZoneAry[zi] = z->z_Next; 734 z->z_Next = NULL; 735 } else { 736 z->z_RSignal = 0; 737 } 738 } 739 740 /* 741 * Fast path, we have chunks available in z_LChunks. 742 */ 743 chunk = z->z_LChunks; 744 if (chunk) { 745 chunk_mark_allocated(z, chunk); 746 z->z_LChunks = chunk->c_Next; 747 if (z->z_LChunks == NULL) 748 z->z_LChunksp = &z->z_LChunks; 749 #ifdef SLAB_DEBUG 750 slab_record_source(z, file, line); 751 #endif 752 goto done; 753 } 754 755 /* 756 * No chunks are available in LChunks, the free chunk MUST be 757 * in the never-before-used memory area, controlled by UIndex. 758 * 759 * The consequences are very serious if our zone got corrupted so 760 * we use an explicit panic rather than a KASSERT. 761 */ 762 if (z->z_UIndex + 1 != z->z_NMax) 763 ++z->z_UIndex; 764 else 765 z->z_UIndex = 0; 766 767 if (z->z_UIndex == z->z_UEndIndex) 768 panic("slaballoc: corrupted zone"); 769 770 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 771 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 772 flags &= ~M_ZERO; 773 flags |= M_PASSIVE_ZERO; 774 } 775 chunk_mark_allocated(z, chunk); 776 #ifdef SLAB_DEBUG 777 slab_record_source(z, file, line); 778 #endif 779 goto done; 780 } 781 782 /* 783 * If all zones are exhausted we need to allocate a new zone for this 784 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see 785 * UAlloc use above in regards to M_ZERO. Note that when we are reusing 786 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and 787 * we do not pre-zero it because we do not want to mess up the L1 cache. 788 * 789 * At least one subsystem, the tty code (see CROUND) expects power-of-2 790 * allocations to be power-of-2 aligned. We maintain compatibility by 791 * adjusting the base offset below. 792 */ 793 { 794 int off; 795 int *kup; 796 797 if ((z = slgd->FreeZones) != NULL) { 798 slgd->FreeZones = z->z_Next; 799 --slgd->NFreeZones; 800 bzero(z, sizeof(SLZone)); 801 z->z_Flags |= SLZF_UNOTZEROD; 802 } else { 803 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); 804 if (z == NULL) 805 goto fail; 806 atomic_add_int(&ZoneGenAlloc, ZoneSize / 1024); 807 } 808 809 /* 810 * How big is the base structure? 811 */ 812 #if defined(INVARIANTS) 813 /* 814 * Make room for z_Bitmap. An exact calculation is somewhat more 815 * complicated so don't make an exact calculation. 816 */ 817 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]); 818 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); 819 #else 820 off = sizeof(SLZone); 821 #endif 822 823 /* 824 * Guarentee power-of-2 alignment for power-of-2-sized chunks. 825 * Otherwise properly align the data according to the chunk size. 826 */ 827 if (powerof2(size)) 828 align = size; 829 off = (off + align - 1) & ~(align - 1); 830 831 z->z_Magic = ZALLOC_SLAB_MAGIC; 832 z->z_ZoneIndex = zi; 833 z->z_NMax = (ZoneSize - off) / size; 834 z->z_NFree = z->z_NMax - 1; 835 z->z_BasePtr = (char *)z + off; 836 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; 837 z->z_ChunkSize = size; 838 z->z_CpuGd = gd; 839 z->z_Cpu = gd->gd_cpuid; 840 z->z_LChunksp = &z->z_LChunks; 841 #ifdef SLAB_DEBUG 842 bcopy(z->z_Sources, z->z_AltSources, sizeof(z->z_Sources)); 843 bzero(z->z_Sources, sizeof(z->z_Sources)); 844 #endif 845 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 846 z->z_Next = slgd->ZoneAry[zi]; 847 slgd->ZoneAry[zi] = z; 848 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 849 flags &= ~M_ZERO; /* already zero'd */ 850 flags |= M_PASSIVE_ZERO; 851 } 852 kup = btokup(z); 853 *kup = -(z->z_Cpu + 1); /* -1 to -(N+1) */ 854 chunk_mark_allocated(z, chunk); 855 #ifdef SLAB_DEBUG 856 slab_record_source(z, file, line); 857 #endif 858 859 /* 860 * Slide the base index for initial allocations out of the next 861 * zone we create so we do not over-weight the lower part of the 862 * cpu memory caches. 863 */ 864 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 865 & (ZALLOC_MAX_ZONE_SIZE - 1); 866 } 867 868 done: 869 ++type->ks_inuse[gd->gd_cpuid]; 870 type->ks_memuse[gd->gd_cpuid] += size; 871 type->ks_loosememuse += size; /* not MP synchronized */ 872 crit_exit(); 873 874 if (flags & M_ZERO) 875 bzero(chunk, size); 876 #ifdef INVARIANTS 877 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) { 878 if (use_malloc_pattern) { 879 for (i = 0; i < size; i += sizeof(int)) { 880 *(int *)((char *)chunk + i) = -1; 881 } 882 } 883 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ 884 } 885 #endif 886 logmemory(malloc_end, chunk, type, size, flags); 887 return(chunk); 888 fail: 889 crit_exit(); 890 logmemory(malloc_end, NULL, type, size, flags); 891 return(NULL); 892 } 893 894 /* 895 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE) 896 * 897 * Generally speaking this routine is not called very often and we do 898 * not attempt to optimize it beyond reusing the same pointer if the 899 * new size fits within the chunking of the old pointer's zone. 900 */ 901 #ifdef SLAB_DEBUG 902 void * 903 krealloc_debug(void *ptr, unsigned long size, 904 struct malloc_type *type, int flags, 905 const char *file, int line) 906 #else 907 void * 908 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) 909 #endif 910 { 911 unsigned long osize; 912 unsigned long align; 913 SLZone *z; 914 void *nptr; 915 int *kup; 916 917 KKASSERT((flags & M_ZERO) == 0); /* not supported */ 918 919 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) 920 return(kmalloc_debug(size, type, flags, file, line)); 921 if (size == 0) { 922 kfree(ptr, type); 923 return(NULL); 924 } 925 926 /* 927 * Handle oversized allocations. XXX we really should require that a 928 * size be passed to free() instead of this nonsense. 929 */ 930 kup = btokup(ptr); 931 if (*kup > 0) { 932 osize = *kup << PAGE_SHIFT; 933 if (osize == round_page(size)) 934 return(ptr); 935 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL) 936 return(NULL); 937 bcopy(ptr, nptr, min(size, osize)); 938 kfree(ptr, type); 939 return(nptr); 940 } 941 942 /* 943 * Get the original allocation's zone. If the new request winds up 944 * using the same chunk size we do not have to do anything. 945 */ 946 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 947 kup = btokup(z); 948 KKASSERT(*kup < 0); 949 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 950 951 /* 952 * Allocate memory for the new request size. Note that zoneindex has 953 * already adjusted the request size to the appropriate chunk size, which 954 * should optimize our bcopy(). Then copy and return the new pointer. 955 * 956 * Resizing a non-power-of-2 allocation to a power-of-2 size does not 957 * necessary align the result. 958 * 959 * We can only zoneindex (to align size to the chunk size) if the new 960 * size is not too large. 961 */ 962 if (size < ZoneLimit) { 963 zoneindex(&size, &align); 964 if (z->z_ChunkSize == size) 965 return(ptr); 966 } 967 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL) 968 return(NULL); 969 bcopy(ptr, nptr, min(size, z->z_ChunkSize)); 970 kfree(ptr, type); 971 return(nptr); 972 } 973 974 /* 975 * Return the kmalloc limit for this type, in bytes. 976 */ 977 long 978 kmalloc_limit(struct malloc_type *type) 979 { 980 if (type->ks_limit == 0) { 981 crit_enter(); 982 if (type->ks_limit == 0) 983 malloc_init(type); 984 crit_exit(); 985 } 986 return(type->ks_limit); 987 } 988 989 /* 990 * Allocate a copy of the specified string. 991 * 992 * (MP SAFE) (MAY BLOCK) 993 */ 994 #ifdef SLAB_DEBUG 995 char * 996 kstrdup_debug(const char *str, struct malloc_type *type, 997 const char *file, int line) 998 #else 999 char * 1000 kstrdup(const char *str, struct malloc_type *type) 1001 #endif 1002 { 1003 int zlen; /* length inclusive of terminating NUL */ 1004 char *nstr; 1005 1006 if (str == NULL) 1007 return(NULL); 1008 zlen = strlen(str) + 1; 1009 nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line); 1010 bcopy(str, nstr, zlen); 1011 return(nstr); 1012 } 1013 1014 /* 1015 * Notify our cpu that a remote cpu has freed some chunks in a zone that 1016 * we own. RCount will be bumped so the memory should be good, but validate 1017 * that it really is. 1018 */ 1019 static 1020 void 1021 kfree_remote(void *ptr) 1022 { 1023 SLGlobalData *slgd; 1024 SLChunk *bchunk; 1025 SLZone *z; 1026 int nfree; 1027 int *kup; 1028 1029 slgd = &mycpu->gd_slab; 1030 z = ptr; 1031 kup = btokup(z); 1032 KKASSERT(*kup == -((int)mycpuid + 1)); 1033 KKASSERT(z->z_RCount > 0); 1034 atomic_subtract_int(&z->z_RCount, 1); 1035 1036 logmemory(free_rem_beg, z, NULL, 0L, 0); 1037 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1038 KKASSERT(z->z_Cpu == mycpu->gd_cpuid); 1039 nfree = z->z_NFree; 1040 1041 /* 1042 * Indicate that we will no longer be off of the ZoneAry by 1043 * clearing RSignal. 1044 */ 1045 if (z->z_RChunks) 1046 z->z_RSignal = 0; 1047 1048 /* 1049 * Atomically extract the bchunks list and then process it back 1050 * into the lchunks list. We want to append our bchunks to the 1051 * lchunks list and not prepend since we likely do not have 1052 * cache mastership of the related data (not that it helps since 1053 * we are using c_Next). 1054 */ 1055 while ((bchunk = z->z_RChunks) != NULL) { 1056 cpu_ccfence(); 1057 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) { 1058 *z->z_LChunksp = bchunk; 1059 while (bchunk) { 1060 chunk_mark_free(z, bchunk); 1061 z->z_LChunksp = &bchunk->c_Next; 1062 bchunk = bchunk->c_Next; 1063 ++z->z_NFree; 1064 } 1065 break; 1066 } 1067 } 1068 if (z->z_NFree && nfree == 0) { 1069 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 1070 slgd->ZoneAry[z->z_ZoneIndex] = z; 1071 } 1072 1073 /* 1074 * If the zone becomes totally free, and there are other zones we 1075 * can allocate from, move this zone to the FreeZones list. Since 1076 * this code can be called from an IPI callback, do *NOT* try to mess 1077 * with kernel_map here. Hysteresis will be performed at malloc() time. 1078 * 1079 * Do not move the zone if there is an IPI inflight, otherwise MP 1080 * races can result in our free_remote code accessing a destroyed 1081 * zone. 1082 */ 1083 if (z->z_NFree == z->z_NMax && 1084 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) && 1085 z->z_RCount == 0 1086 ) { 1087 SLZone **pz; 1088 int *kup; 1089 1090 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; 1091 z != *pz; 1092 pz = &(*pz)->z_Next) { 1093 ; 1094 } 1095 *pz = z->z_Next; 1096 z->z_Magic = -1; 1097 z->z_Next = slgd->FreeZones; 1098 slgd->FreeZones = z; 1099 ++slgd->NFreeZones; 1100 kup = btokup(z); 1101 *kup = 0; 1102 } 1103 logmemory(free_rem_end, z, bchunk, 0L, 0); 1104 } 1105 1106 /* 1107 * free (SLAB ALLOCATOR) 1108 * 1109 * Free a memory block previously allocated by malloc. Note that we do not 1110 * attempt to update ks_loosememuse as MP races could prevent us from 1111 * checking memory limits in malloc. 1112 * 1113 * MPSAFE 1114 */ 1115 void 1116 kfree(void *ptr, struct malloc_type *type) 1117 { 1118 SLZone *z; 1119 SLChunk *chunk; 1120 SLGlobalData *slgd; 1121 struct globaldata *gd; 1122 int *kup; 1123 unsigned long size; 1124 SLChunk *bchunk; 1125 int rsignal; 1126 1127 logmemory_quick(free_beg); 1128 gd = mycpu; 1129 slgd = &gd->gd_slab; 1130 1131 if (ptr == NULL) 1132 panic("trying to free NULL pointer"); 1133 1134 /* 1135 * Handle special 0-byte allocations 1136 */ 1137 if (ptr == ZERO_LENGTH_PTR) { 1138 logmemory(free_zero, ptr, type, -1UL, 0); 1139 logmemory_quick(free_end); 1140 return; 1141 } 1142 1143 /* 1144 * Panic on bad malloc type 1145 */ 1146 if (type->ks_magic != M_MAGIC) 1147 panic("free: malloc type lacks magic"); 1148 1149 /* 1150 * Handle oversized allocations. XXX we really should require that a 1151 * size be passed to free() instead of this nonsense. 1152 * 1153 * This code is never called via an ipi. 1154 */ 1155 kup = btokup(ptr); 1156 if (*kup > 0) { 1157 size = *kup << PAGE_SHIFT; 1158 *kup = 0; 1159 #ifdef INVARIANTS 1160 KKASSERT(sizeof(weirdary) <= size); 1161 bcopy(weirdary, ptr, sizeof(weirdary)); 1162 #endif 1163 /* 1164 * NOTE: For oversized allocations we do not record the 1165 * originating cpu. It gets freed on the cpu calling 1166 * kfree(). The statistics are in aggregate. 1167 * 1168 * note: XXX we have still inherited the interrupts-can't-block 1169 * assumption. An interrupt thread does not bump 1170 * gd_intr_nesting_level so check TDF_INTTHREAD. This is 1171 * primarily until we can fix softupdate's assumptions about free(). 1172 */ 1173 crit_enter(); 1174 --type->ks_inuse[gd->gd_cpuid]; 1175 type->ks_memuse[gd->gd_cpuid] -= size; 1176 if (mycpu->gd_intr_nesting_level || 1177 (gd->gd_curthread->td_flags & TDF_INTTHREAD)) 1178 { 1179 logmemory(free_ovsz_delayed, ptr, type, size, 0); 1180 z = (SLZone *)ptr; 1181 z->z_Magic = ZALLOC_OVSZ_MAGIC; 1182 z->z_Next = slgd->FreeOvZones; 1183 z->z_ChunkSize = size; 1184 slgd->FreeOvZones = z; 1185 crit_exit(); 1186 } else { 1187 crit_exit(); 1188 logmemory(free_ovsz, ptr, type, size, 0); 1189 kmem_slab_free(ptr, size); /* may block */ 1190 atomic_add_int(&ZoneBigAlloc, -(int)size / 1024); 1191 } 1192 logmemory_quick(free_end); 1193 return; 1194 } 1195 1196 /* 1197 * Zone case. Figure out the zone based on the fact that it is 1198 * ZoneSize aligned. 1199 */ 1200 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 1201 kup = btokup(z); 1202 KKASSERT(*kup < 0); 1203 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1204 1205 /* 1206 * If we do not own the zone then use atomic ops to free to the 1207 * remote cpu linked list and notify the target zone using a 1208 * passive message. 1209 * 1210 * The target zone cannot be deallocated while we own a chunk of it, 1211 * so the zone header's storage is stable until the very moment 1212 * we adjust z_RChunks. After that we cannot safely dereference (z). 1213 * 1214 * (no critical section needed) 1215 */ 1216 if (z->z_CpuGd != gd) { 1217 /* 1218 * Making these adjustments now allow us to avoid passing (type) 1219 * to the remote cpu. Note that ks_inuse/ks_memuse is being 1220 * adjusted on OUR cpu, not the zone cpu, but it should all still 1221 * sum up properly and cancel out. 1222 */ 1223 crit_enter(); 1224 --type->ks_inuse[gd->gd_cpuid]; 1225 type->ks_memuse[gd->gd_cpuid] -= z->z_ChunkSize; 1226 crit_exit(); 1227 1228 /* 1229 * WARNING! This code competes with other cpus. Once we 1230 * successfully link the chunk to RChunks the remote 1231 * cpu can rip z's storage out from under us. 1232 * 1233 * Bumping RCount prevents z's storage from getting 1234 * ripped out. 1235 */ 1236 rsignal = z->z_RSignal; 1237 cpu_lfence(); 1238 if (rsignal) 1239 atomic_add_int(&z->z_RCount, 1); 1240 1241 chunk = ptr; 1242 for (;;) { 1243 bchunk = z->z_RChunks; 1244 cpu_ccfence(); 1245 chunk->c_Next = bchunk; 1246 cpu_sfence(); 1247 1248 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk)) 1249 break; 1250 } 1251 1252 /* 1253 * We have to signal the remote cpu if our actions will cause 1254 * the remote zone to be placed back on ZoneAry so it can 1255 * move the zone back on. 1256 * 1257 * We only need to deal with NULL->non-NULL RChunk transitions 1258 * and only if z_RSignal is set. We interlock by reading rsignal 1259 * before adding our chunk to RChunks. This should result in 1260 * virtually no IPI traffic. 1261 * 1262 * We can use a passive IPI to reduce overhead even further. 1263 */ 1264 if (bchunk == NULL && rsignal) { 1265 logmemory(free_request, ptr, type, (unsigned long)z->z_ChunkSize, 0); 1266 lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z); 1267 /* z can get ripped out from under us from this point on */ 1268 } else if (rsignal) { 1269 atomic_subtract_int(&z->z_RCount, 1); 1270 /* z can get ripped out from under us from this point on */ 1271 } 1272 logmemory_quick(free_end); 1273 return; 1274 } 1275 1276 /* 1277 * kfree locally 1278 */ 1279 logmemory(free_chunk, ptr, type, (unsigned long)z->z_ChunkSize, 0); 1280 1281 crit_enter(); 1282 chunk = ptr; 1283 chunk_mark_free(z, chunk); 1284 1285 /* 1286 * Put weird data into the memory to detect modifications after freeing, 1287 * illegal pointer use after freeing (we should fault on the odd address), 1288 * and so forth. XXX needs more work, see the old malloc code. 1289 */ 1290 #ifdef INVARIANTS 1291 if (z->z_ChunkSize < sizeof(weirdary)) 1292 bcopy(weirdary, chunk, z->z_ChunkSize); 1293 else 1294 bcopy(weirdary, chunk, sizeof(weirdary)); 1295 #endif 1296 1297 /* 1298 * Add this free non-zero'd chunk to a linked list for reuse. Add 1299 * to the front of the linked list so it is more likely to be 1300 * reallocated, since it is already in our L1 cache. 1301 */ 1302 #ifdef INVARIANTS 1303 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) 1304 panic("BADFREE %p", chunk); 1305 #endif 1306 chunk->c_Next = z->z_LChunks; 1307 z->z_LChunks = chunk; 1308 if (chunk->c_Next == NULL) 1309 z->z_LChunksp = &chunk->c_Next; 1310 1311 #ifdef INVARIANTS 1312 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) 1313 panic("BADFREE2"); 1314 #endif 1315 1316 /* 1317 * Bump the number of free chunks. If it becomes non-zero the zone 1318 * must be added back onto the appropriate list. 1319 */ 1320 if (z->z_NFree++ == 0) { 1321 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 1322 slgd->ZoneAry[z->z_ZoneIndex] = z; 1323 } 1324 1325 --type->ks_inuse[z->z_Cpu]; 1326 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize; 1327 1328 /* 1329 * If the zone becomes totally free, and there are other zones we 1330 * can allocate from, move this zone to the FreeZones list. Since 1331 * this code can be called from an IPI callback, do *NOT* try to mess 1332 * with kernel_map here. Hysteresis will be performed at malloc() time. 1333 */ 1334 if (z->z_NFree == z->z_NMax && 1335 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) && 1336 z->z_RCount == 0 1337 ) { 1338 SLZone **pz; 1339 int *kup; 1340 1341 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next) 1342 ; 1343 *pz = z->z_Next; 1344 z->z_Magic = -1; 1345 z->z_Next = slgd->FreeZones; 1346 slgd->FreeZones = z; 1347 ++slgd->NFreeZones; 1348 kup = btokup(z); 1349 *kup = 0; 1350 } 1351 logmemory_quick(free_end); 1352 crit_exit(); 1353 } 1354 1355 #if defined(INVARIANTS) 1356 1357 /* 1358 * Helper routines for sanity checks 1359 */ 1360 static 1361 void 1362 chunk_mark_allocated(SLZone *z, void *chunk) 1363 { 1364 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1365 __uint32_t *bitptr; 1366 1367 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1368 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1369 ("memory chunk %p bit index %d is illegal", chunk, bitdex)); 1370 bitptr = &z->z_Bitmap[bitdex >> 5]; 1371 bitdex &= 31; 1372 KASSERT((*bitptr & (1 << bitdex)) == 0, 1373 ("memory chunk %p is already allocated!", chunk)); 1374 *bitptr |= 1 << bitdex; 1375 } 1376 1377 static 1378 void 1379 chunk_mark_free(SLZone *z, void *chunk) 1380 { 1381 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1382 __uint32_t *bitptr; 1383 1384 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1385 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1386 ("memory chunk %p bit index %d is illegal!", chunk, bitdex)); 1387 bitptr = &z->z_Bitmap[bitdex >> 5]; 1388 bitdex &= 31; 1389 KASSERT((*bitptr & (1 << bitdex)) != 0, 1390 ("memory chunk %p is already free!", chunk)); 1391 *bitptr &= ~(1 << bitdex); 1392 } 1393 1394 #endif 1395 1396 /* 1397 * kmem_slab_alloc() 1398 * 1399 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the 1400 * specified alignment. M_* flags are expected in the flags field. 1401 * 1402 * Alignment must be a multiple of PAGE_SIZE. 1403 * 1404 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), 1405 * but when we move zalloc() over to use this function as its backend 1406 * we will have to switch to kreserve/krelease and call reserve(0) 1407 * after the new space is made available. 1408 * 1409 * Interrupt code which has preempted other code is not allowed to 1410 * use PQ_CACHE pages. However, if an interrupt thread is run 1411 * non-preemptively or blocks and then runs non-preemptively, then 1412 * it is free to use PQ_CACHE pages. <--- may not apply any longer XXX 1413 */ 1414 static void * 1415 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) 1416 { 1417 vm_size_t i; 1418 vm_offset_t addr; 1419 int count, vmflags, base_vmflags; 1420 vm_page_t mbase = NULL; 1421 vm_page_t m; 1422 thread_t td; 1423 1424 size = round_page(size); 1425 addr = vm_map_min(&kernel_map); 1426 1427 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1428 crit_enter(); 1429 vm_map_lock(&kernel_map); 1430 if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) { 1431 vm_map_unlock(&kernel_map); 1432 if ((flags & M_NULLOK) == 0) 1433 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 1434 vm_map_entry_release(count); 1435 crit_exit(); 1436 return(NULL); 1437 } 1438 1439 /* 1440 * kernel_object maps 1:1 to kernel_map. 1441 */ 1442 vm_object_hold(&kernel_object); 1443 vm_object_reference_locked(&kernel_object); 1444 vm_map_insert(&kernel_map, &count, 1445 &kernel_object, addr, addr, addr + size, 1446 VM_MAPTYPE_NORMAL, 1447 VM_PROT_ALL, VM_PROT_ALL, 1448 0); 1449 vm_object_drop(&kernel_object); 1450 vm_map_set_wired_quick(&kernel_map, addr, size, &count); 1451 vm_map_unlock(&kernel_map); 1452 1453 td = curthread; 1454 1455 base_vmflags = 0; 1456 if (flags & M_ZERO) 1457 base_vmflags |= VM_ALLOC_ZERO; 1458 if (flags & M_USE_RESERVE) 1459 base_vmflags |= VM_ALLOC_SYSTEM; 1460 if (flags & M_USE_INTERRUPT_RESERVE) 1461 base_vmflags |= VM_ALLOC_INTERRUPT; 1462 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) { 1463 panic("kmem_slab_alloc: bad flags %08x (%p)", 1464 flags, ((int **)&size)[-1]); 1465 } 1466 1467 /* 1468 * Allocate the pages. Do not mess with the PG_ZERO flag or map 1469 * them yet. VM_ALLOC_NORMAL can only be set if we are not preempting. 1470 * 1471 * VM_ALLOC_SYSTEM is automatically set if we are preempting and 1472 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is 1473 * implied in this case), though I'm not sure if we really need to 1474 * do that. 1475 */ 1476 vmflags = base_vmflags; 1477 if (flags & M_WAITOK) { 1478 if (td->td_preempted) 1479 vmflags |= VM_ALLOC_SYSTEM; 1480 else 1481 vmflags |= VM_ALLOC_NORMAL; 1482 } 1483 1484 vm_object_hold(&kernel_object); 1485 for (i = 0; i < size; i += PAGE_SIZE) { 1486 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags); 1487 if (i == 0) 1488 mbase = m; 1489 1490 /* 1491 * If the allocation failed we either return NULL or we retry. 1492 * 1493 * If M_WAITOK is specified we wait for more memory and retry. 1494 * If M_WAITOK is specified from a preemption we yield instead of 1495 * wait. Livelock will not occur because the interrupt thread 1496 * will not be preempting anyone the second time around after the 1497 * yield. 1498 */ 1499 if (m == NULL) { 1500 if (flags & M_WAITOK) { 1501 if (td->td_preempted) { 1502 lwkt_switch(); 1503 } else { 1504 vm_wait(0); 1505 } 1506 i -= PAGE_SIZE; /* retry */ 1507 continue; 1508 } 1509 break; 1510 } 1511 } 1512 1513 /* 1514 * Check and deal with an allocation failure 1515 */ 1516 if (i != size) { 1517 while (i != 0) { 1518 i -= PAGE_SIZE; 1519 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); 1520 /* page should already be busy */ 1521 vm_page_free(m); 1522 } 1523 vm_map_lock(&kernel_map); 1524 vm_map_delete(&kernel_map, addr, addr + size, &count); 1525 vm_map_unlock(&kernel_map); 1526 vm_object_drop(&kernel_object); 1527 1528 vm_map_entry_release(count); 1529 crit_exit(); 1530 return(NULL); 1531 } 1532 1533 /* 1534 * Success! 1535 * 1536 * NOTE: The VM pages are still busied. mbase points to the first one 1537 * but we have to iterate via vm_page_next() 1538 */ 1539 vm_object_drop(&kernel_object); 1540 crit_exit(); 1541 1542 /* 1543 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO. 1544 */ 1545 m = mbase; 1546 i = 0; 1547 1548 while (i < size) { 1549 /* 1550 * page should already be busy 1551 */ 1552 m->valid = VM_PAGE_BITS_ALL; 1553 vm_page_wire(m); 1554 pmap_enter(&kernel_pmap, addr + i, m, VM_PROT_ALL | VM_PROT_NOSYNC, 1555 1, NULL); 1556 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO)) 1557 bzero((char *)addr + i, PAGE_SIZE); 1558 vm_page_flag_clear(m, PG_ZERO); 1559 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED)); 1560 vm_page_flag_set(m, PG_REFERENCED); 1561 vm_page_wakeup(m); 1562 1563 i += PAGE_SIZE; 1564 vm_object_hold(&kernel_object); 1565 m = vm_page_next(m); 1566 vm_object_drop(&kernel_object); 1567 } 1568 smp_invltlb(); 1569 vm_map_entry_release(count); 1570 atomic_add_long(&SlabsAllocated, 1); 1571 return((void *)addr); 1572 } 1573 1574 /* 1575 * kmem_slab_free() 1576 */ 1577 static void 1578 kmem_slab_free(void *ptr, vm_size_t size) 1579 { 1580 crit_enter(); 1581 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); 1582 atomic_add_long(&SlabsFreed, 1); 1583 crit_exit(); 1584 } 1585 1586 void * 1587 kmalloc_cachealign(unsigned long size_alloc, struct malloc_type *type, 1588 int flags) 1589 { 1590 #if (__VM_CACHELINE_SIZE == 32) 1591 #define CAN_CACHEALIGN(sz) ((sz) >= 256) 1592 #elif (__VM_CACHELINE_SIZE == 64) 1593 #define CAN_CACHEALIGN(sz) ((sz) >= 512) 1594 #elif (__VM_CACHELINE_SIZE == 128) 1595 #define CAN_CACHEALIGN(sz) ((sz) >= 1024) 1596 #else 1597 #error "unsupported cacheline size" 1598 #endif 1599 1600 void *ret; 1601 1602 if (size_alloc < __VM_CACHELINE_SIZE) 1603 size_alloc = __VM_CACHELINE_SIZE; 1604 else if (!CAN_CACHEALIGN(size_alloc)) 1605 flags |= M_POWEROF2; 1606 1607 ret = kmalloc(size_alloc, type, flags); 1608 KASSERT(((uintptr_t)ret & (__VM_CACHELINE_SIZE - 1)) == 0, 1609 ("%p(%lu) not cacheline %d aligned", 1610 ret, size_alloc, __VM_CACHELINE_SIZE)); 1611 return ret; 1612 1613 #undef CAN_CACHEALIGN 1614 } 1615