1 /* 2 * (MPSAFE) 3 * 4 * KERN_SLABALLOC.C - Kernel SLAB memory allocator 5 * 6 * Copyright (c) 2003,2004,2010 The DragonFly Project. All rights reserved. 7 * 8 * This code is derived from software contributed to The DragonFly Project 9 * by Matthew Dillon <dillon@backplane.com> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in 19 * the documentation and/or other materials provided with the 20 * distribution. 21 * 3. Neither the name of The DragonFly Project nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific, prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * This module implements a slab allocator drop-in replacement for the 39 * kernel malloc(). 40 * 41 * A slab allocator reserves a ZONE for each chunk size, then lays the 42 * chunks out in an array within the zone. Allocation and deallocation 43 * is nearly instantanious, and fragmentation/overhead losses are limited 44 * to a fixed worst-case amount. 45 * 46 * The downside of this slab implementation is in the chunk size 47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. 48 * In a kernel implementation all this memory will be physical so 49 * the zone size is adjusted downward on machines with less physical 50 * memory. The upside is that overhead is bounded... this is the *worst* 51 * case overhead. 52 * 53 * Slab management is done on a per-cpu basis and no locking or mutexes 54 * are required, only a critical section. When one cpu frees memory 55 * belonging to another cpu's slab manager an asynchronous IPI message 56 * will be queued to execute the operation. In addition, both the 57 * high level slab allocator and the low level zone allocator optimize 58 * M_ZERO requests, and the slab allocator does not have to pre initialize 59 * the linked list of chunks. 60 * 61 * XXX Balancing is needed between cpus. Balance will be handled through 62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. 63 * 64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of 65 * the new zone should be restricted to M_USE_RESERVE requests only. 66 * 67 * Alloc Size Chunking Number of zones 68 * 0-127 8 16 69 * 128-255 16 8 70 * 256-511 32 8 71 * 512-1023 64 8 72 * 1024-2047 128 8 73 * 2048-4095 256 8 74 * 4096-8191 512 8 75 * 8192-16383 1024 8 76 * 16384-32767 2048 8 77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) 78 * 79 * Allocations >= ZoneLimit go directly to kmem. 80 * 81 * Alignment properties: 82 * - All power-of-2 sized allocations are power-of-2 aligned. 83 * - Allocations with M_POWEROF2 are power-of-2 aligned on the nearest 84 * power-of-2 round up of 'size'. 85 * - Non-power-of-2 sized allocations are zone chunk size aligned (see the 86 * above table 'Chunking' column). 87 * 88 * API REQUIREMENTS AND SIDE EFFECTS 89 * 90 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we 91 * have remained compatible with the following API requirements: 92 * 93 * + malloc(0) is allowed and returns non-NULL (ahc driver) 94 * + ability to allocate arbitrarily large chunks of memory 95 */ 96 97 #include "opt_vm.h" 98 99 #include <sys/param.h> 100 #include <sys/systm.h> 101 #include <sys/kernel.h> 102 #include <sys/slaballoc.h> 103 #include <sys/mbuf.h> 104 #include <sys/vmmeter.h> 105 #include <sys/lock.h> 106 #include <sys/thread.h> 107 #include <sys/globaldata.h> 108 #include <sys/sysctl.h> 109 #include <sys/ktr.h> 110 111 #include <vm/vm.h> 112 #include <vm/vm_param.h> 113 #include <vm/vm_kern.h> 114 #include <vm/vm_extern.h> 115 #include <vm/vm_object.h> 116 #include <vm/pmap.h> 117 #include <vm/vm_map.h> 118 #include <vm/vm_page.h> 119 #include <vm/vm_pageout.h> 120 121 #include <machine/cpu.h> 122 123 #include <sys/thread2.h> 124 125 #define btokup(z) (&pmap_kvtom((vm_offset_t)(z))->ku_pagecnt) 126 127 #define MEMORY_STRING "ptr=%p type=%p size=%lu flags=%04x" 128 #define MEMORY_ARGS void *ptr, void *type, unsigned long size, int flags 129 130 #if !defined(KTR_MEMORY) 131 #define KTR_MEMORY KTR_ALL 132 #endif 133 KTR_INFO_MASTER(memory); 134 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin"); 135 KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARGS); 136 KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARGS); 137 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARGS); 138 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARGS); 139 KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARGS); 140 KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARGS); 141 KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARGS); 142 KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARGS); 143 KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin"); 144 KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end"); 145 146 #define logmemory(name, ptr, type, size, flags) \ 147 KTR_LOG(memory_ ## name, ptr, type, size, flags) 148 #define logmemory_quick(name) \ 149 KTR_LOG(memory_ ## name) 150 151 /* 152 * Fixed globals (not per-cpu) 153 */ 154 static int ZoneSize; 155 static int ZoneLimit; 156 static int ZonePageCount; 157 static uintptr_t ZoneMask; 158 static int ZoneBigAlloc; /* in KB */ 159 static int ZoneGenAlloc; /* in KB */ 160 struct malloc_type *kmemstatistics; /* exported to vmstat */ 161 static int32_t weirdary[16]; 162 163 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags); 164 static void kmem_slab_free(void *ptr, vm_size_t bytes); 165 166 #if defined(INVARIANTS) 167 static void chunk_mark_allocated(SLZone *z, void *chunk); 168 static void chunk_mark_free(SLZone *z, void *chunk); 169 #else 170 #define chunk_mark_allocated(z, chunk) 171 #define chunk_mark_free(z, chunk) 172 #endif 173 174 /* 175 * Misc constants. Note that allocations that are exact multiples of 176 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 177 */ 178 #define ZONE_RELS_THRESH 32 /* threshold number of zones */ 179 180 /* 181 * The WEIRD_ADDR is used as known text to copy into free objects to 182 * try to create deterministic failure cases if the data is accessed after 183 * free. 184 */ 185 #define WEIRD_ADDR 0xdeadc0de 186 #define MAX_COPY sizeof(weirdary) 187 #define ZERO_LENGTH_PTR ((void *)-8) 188 189 /* 190 * Misc global malloc buckets 191 */ 192 193 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 194 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 195 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 196 197 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 198 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 199 200 /* 201 * Initialize the slab memory allocator. We have to choose a zone size based 202 * on available physical memory. We choose a zone side which is approximately 203 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of 204 * 128K. The zone size is limited to the bounds set in slaballoc.h 205 * (typically 32K min, 128K max). 206 */ 207 static void kmeminit(void *dummy); 208 209 char *ZeroPage; 210 211 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL) 212 213 #ifdef INVARIANTS 214 /* 215 * If enabled any memory allocated without M_ZERO is initialized to -1. 216 */ 217 static int use_malloc_pattern; 218 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW, 219 &use_malloc_pattern, 0, 220 "Initialize memory to -1 if M_ZERO not specified"); 221 #endif 222 223 static int ZoneRelsThresh = ZONE_RELS_THRESH; 224 SYSCTL_INT(_kern, OID_AUTO, zone_big_alloc, CTLFLAG_RD, &ZoneBigAlloc, 0, ""); 225 SYSCTL_INT(_kern, OID_AUTO, zone_gen_alloc, CTLFLAG_RD, &ZoneGenAlloc, 0, ""); 226 SYSCTL_INT(_kern, OID_AUTO, zone_cache, CTLFLAG_RW, &ZoneRelsThresh, 0, ""); 227 228 /* 229 * Returns the kernel memory size limit for the purposes of initializing 230 * various subsystem caches. The smaller of available memory and the KVM 231 * memory space is returned. 232 * 233 * The size in megabytes is returned. 234 */ 235 size_t 236 kmem_lim_size(void) 237 { 238 size_t limsize; 239 240 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE; 241 if (limsize > KvaSize) 242 limsize = KvaSize; 243 return (limsize / (1024 * 1024)); 244 } 245 246 static void 247 kmeminit(void *dummy) 248 { 249 size_t limsize; 250 int usesize; 251 int i; 252 253 limsize = kmem_lim_size(); 254 usesize = (int)(limsize * 1024); /* convert to KB */ 255 256 /* 257 * If the machine has a large KVM space and more than 8G of ram, 258 * double the zone release threshold to reduce SMP invalidations. 259 * If more than 16G of ram, do it again. 260 * 261 * The BIOS eats a little ram so add some slop. We want 8G worth of 262 * memory sticks to trigger the first adjustment. 263 */ 264 if (ZoneRelsThresh == ZONE_RELS_THRESH) { 265 if (limsize >= 7 * 1024) 266 ZoneRelsThresh *= 2; 267 if (limsize >= 15 * 1024) 268 ZoneRelsThresh *= 2; 269 } 270 271 /* 272 * Calculate the zone size. This typically calculates to 273 * ZALLOC_MAX_ZONE_SIZE 274 */ 275 ZoneSize = ZALLOC_MIN_ZONE_SIZE; 276 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) 277 ZoneSize <<= 1; 278 ZoneLimit = ZoneSize / 4; 279 if (ZoneLimit > ZALLOC_ZONE_LIMIT) 280 ZoneLimit = ZALLOC_ZONE_LIMIT; 281 ZoneMask = ~(uintptr_t)(ZoneSize - 1); 282 ZonePageCount = ZoneSize / PAGE_SIZE; 283 284 for (i = 0; i < NELEM(weirdary); ++i) 285 weirdary[i] = WEIRD_ADDR; 286 287 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO); 288 289 if (bootverbose) 290 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); 291 } 292 293 /* 294 * Initialize a malloc type tracking structure. 295 */ 296 void 297 malloc_init(void *data) 298 { 299 struct malloc_type *type = data; 300 size_t limsize; 301 302 if (type->ks_magic != M_MAGIC) 303 panic("malloc type lacks magic"); 304 305 if (type->ks_limit != 0) 306 return; 307 308 if (vmstats.v_page_count == 0) 309 panic("malloc_init not allowed before vm init"); 310 311 limsize = kmem_lim_size() * (1024 * 1024); 312 type->ks_limit = limsize / 10; 313 314 type->ks_next = kmemstatistics; 315 kmemstatistics = type; 316 } 317 318 void 319 malloc_uninit(void *data) 320 { 321 struct malloc_type *type = data; 322 struct malloc_type *t; 323 #ifdef INVARIANTS 324 int i; 325 long ttl; 326 #endif 327 328 if (type->ks_magic != M_MAGIC) 329 panic("malloc type lacks magic"); 330 331 if (vmstats.v_page_count == 0) 332 panic("malloc_uninit not allowed before vm init"); 333 334 if (type->ks_limit == 0) 335 panic("malloc_uninit on uninitialized type"); 336 337 /* Make sure that all pending kfree()s are finished. */ 338 lwkt_synchronize_ipiqs("muninit"); 339 340 #ifdef INVARIANTS 341 /* 342 * memuse is only correct in aggregation. Due to memory being allocated 343 * on one cpu and freed on another individual array entries may be 344 * negative or positive (canceling each other out). 345 */ 346 for (i = ttl = 0; i < ncpus; ++i) 347 ttl += type->ks_memuse[i]; 348 if (ttl) { 349 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", 350 ttl, type->ks_shortdesc, i); 351 } 352 #endif 353 if (type == kmemstatistics) { 354 kmemstatistics = type->ks_next; 355 } else { 356 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 357 if (t->ks_next == type) { 358 t->ks_next = type->ks_next; 359 break; 360 } 361 } 362 } 363 type->ks_next = NULL; 364 type->ks_limit = 0; 365 } 366 367 /* 368 * Increase the kmalloc pool limit for the specified pool. No changes 369 * are the made if the pool would shrink. 370 */ 371 void 372 kmalloc_raise_limit(struct malloc_type *type, size_t bytes) 373 { 374 if (type->ks_limit == 0) 375 malloc_init(type); 376 if (bytes == 0) 377 bytes = KvaSize; 378 if (type->ks_limit < bytes) 379 type->ks_limit = bytes; 380 } 381 382 /* 383 * Dynamically create a malloc pool. This function is a NOP if *typep is 384 * already non-NULL. 385 */ 386 void 387 kmalloc_create(struct malloc_type **typep, const char *descr) 388 { 389 struct malloc_type *type; 390 391 if (*typep == NULL) { 392 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO); 393 type->ks_magic = M_MAGIC; 394 type->ks_shortdesc = descr; 395 malloc_init(type); 396 *typep = type; 397 } 398 } 399 400 /* 401 * Destroy a dynamically created malloc pool. This function is a NOP if 402 * the pool has already been destroyed. 403 */ 404 void 405 kmalloc_destroy(struct malloc_type **typep) 406 { 407 if (*typep != NULL) { 408 malloc_uninit(*typep); 409 kfree(*typep, M_TEMP); 410 *typep = NULL; 411 } 412 } 413 414 /* 415 * Calculate the zone index for the allocation request size and set the 416 * allocation request size to that particular zone's chunk size. 417 */ 418 static __inline int 419 zoneindex(unsigned long *bytes, unsigned long *align) 420 { 421 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ 422 if (n < 128) { 423 *bytes = n = (n + 7) & ~7; 424 *align = 8; 425 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 426 } 427 if (n < 256) { 428 *bytes = n = (n + 15) & ~15; 429 *align = 16; 430 return(n / 16 + 7); 431 } 432 if (n < 8192) { 433 if (n < 512) { 434 *bytes = n = (n + 31) & ~31; 435 *align = 32; 436 return(n / 32 + 15); 437 } 438 if (n < 1024) { 439 *bytes = n = (n + 63) & ~63; 440 *align = 64; 441 return(n / 64 + 23); 442 } 443 if (n < 2048) { 444 *bytes = n = (n + 127) & ~127; 445 *align = 128; 446 return(n / 128 + 31); 447 } 448 if (n < 4096) { 449 *bytes = n = (n + 255) & ~255; 450 *align = 256; 451 return(n / 256 + 39); 452 } 453 *bytes = n = (n + 511) & ~511; 454 *align = 512; 455 return(n / 512 + 47); 456 } 457 #if ZALLOC_ZONE_LIMIT > 8192 458 if (n < 16384) { 459 *bytes = n = (n + 1023) & ~1023; 460 *align = 1024; 461 return(n / 1024 + 55); 462 } 463 #endif 464 #if ZALLOC_ZONE_LIMIT > 16384 465 if (n < 32768) { 466 *bytes = n = (n + 2047) & ~2047; 467 *align = 2048; 468 return(n / 2048 + 63); 469 } 470 #endif 471 panic("Unexpected byte count %d", n); 472 return(0); 473 } 474 475 #ifdef SLAB_DEBUG 476 /* 477 * Used to debug memory corruption issues. Record up to (typically 32) 478 * allocation sources for this zone (for a particular chunk size). 479 */ 480 481 static void 482 slab_record_source(SLZone *z, const char *file, int line) 483 { 484 int i; 485 int b = line & (SLAB_DEBUG_ENTRIES - 1); 486 487 i = b; 488 do { 489 if (z->z_Sources[i].file == file && z->z_Sources[i].line == line) 490 return; 491 if (z->z_Sources[i].file == NULL) 492 break; 493 i = (i + 1) & (SLAB_DEBUG_ENTRIES - 1); 494 } while (i != b); 495 z->z_Sources[i].file = file; 496 z->z_Sources[i].line = line; 497 } 498 499 #endif 500 501 static __inline unsigned long 502 powerof2_size(unsigned long size) 503 { 504 int i; 505 506 if (size == 0 || powerof2(size)) 507 return size; 508 509 i = flsl(size); 510 return (1UL << i); 511 } 512 513 /* 514 * kmalloc() (SLAB ALLOCATOR) 515 * 516 * Allocate memory via the slab allocator. If the request is too large, 517 * or if it page-aligned beyond a certain size, we fall back to the 518 * KMEM subsystem. A SLAB tracking descriptor must be specified, use 519 * &SlabMisc if you don't care. 520 * 521 * M_RNOWAIT - don't block. 522 * M_NULLOK - return NULL instead of blocking. 523 * M_ZERO - zero the returned memory. 524 * M_USE_RESERVE - allow greater drawdown of the free list 525 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted 526 * M_POWEROF2 - roundup size to the nearest power of 2 527 * 528 * MPSAFE 529 */ 530 531 #ifdef SLAB_DEBUG 532 void * 533 kmalloc_debug(unsigned long size, struct malloc_type *type, int flags, 534 const char *file, int line) 535 #else 536 void * 537 kmalloc(unsigned long size, struct malloc_type *type, int flags) 538 #endif 539 { 540 SLZone *z; 541 SLChunk *chunk; 542 SLChunk *bchunk; 543 SLGlobalData *slgd; 544 struct globaldata *gd; 545 unsigned long align; 546 int zi; 547 #ifdef INVARIANTS 548 int i; 549 #endif 550 551 logmemory_quick(malloc_beg); 552 gd = mycpu; 553 slgd = &gd->gd_slab; 554 555 /* 556 * XXX silly to have this in the critical path. 557 */ 558 if (type->ks_limit == 0) { 559 crit_enter(); 560 if (type->ks_limit == 0) 561 malloc_init(type); 562 crit_exit(); 563 } 564 ++type->ks_calls; 565 566 if (flags & M_POWEROF2) 567 size = powerof2_size(size); 568 569 /* 570 * Handle the case where the limit is reached. Panic if we can't return 571 * NULL. The original malloc code looped, but this tended to 572 * simply deadlock the computer. 573 * 574 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used 575 * to determine if a more complete limit check should be done. The 576 * actual memory use is tracked via ks_memuse[cpu]. 577 */ 578 while (type->ks_loosememuse >= type->ks_limit) { 579 int i; 580 long ttl; 581 582 for (i = ttl = 0; i < ncpus; ++i) 583 ttl += type->ks_memuse[i]; 584 type->ks_loosememuse = ttl; /* not MP synchronized */ 585 if ((ssize_t)ttl < 0) /* deal with occassional race */ 586 ttl = 0; 587 if (ttl >= type->ks_limit) { 588 if (flags & M_NULLOK) { 589 logmemory(malloc_end, NULL, type, size, flags); 590 return(NULL); 591 } 592 panic("%s: malloc limit exceeded", type->ks_shortdesc); 593 } 594 } 595 596 /* 597 * Handle the degenerate size == 0 case. Yes, this does happen. 598 * Return a special pointer. This is to maintain compatibility with 599 * the original malloc implementation. Certain devices, such as the 600 * adaptec driver, not only allocate 0 bytes, they check for NULL and 601 * also realloc() later on. Joy. 602 */ 603 if (size == 0) { 604 logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags); 605 return(ZERO_LENGTH_PTR); 606 } 607 608 /* 609 * Handle hysteresis from prior frees here in malloc(). We cannot 610 * safely manipulate the kernel_map in free() due to free() possibly 611 * being called via an IPI message or from sensitive interrupt code. 612 * 613 * NOTE: ku_pagecnt must be cleared before we free the slab or we 614 * might race another cpu allocating the kva and setting 615 * ku_pagecnt. 616 */ 617 while (slgd->NFreeZones > ZoneRelsThresh && (flags & M_RNOWAIT) == 0) { 618 crit_enter(); 619 if (slgd->NFreeZones > ZoneRelsThresh) { /* crit sect race */ 620 int *kup; 621 622 z = slgd->FreeZones; 623 slgd->FreeZones = z->z_Next; 624 --slgd->NFreeZones; 625 kup = btokup(z); 626 *kup = 0; 627 kmem_slab_free(z, ZoneSize); /* may block */ 628 atomic_add_int(&ZoneGenAlloc, -ZoneSize / 1024); 629 } 630 crit_exit(); 631 } 632 633 /* 634 * XXX handle oversized frees that were queued from kfree(). 635 */ 636 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) { 637 crit_enter(); 638 if ((z = slgd->FreeOvZones) != NULL) { 639 vm_size_t tsize; 640 641 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); 642 slgd->FreeOvZones = z->z_Next; 643 tsize = z->z_ChunkSize; 644 kmem_slab_free(z, tsize); /* may block */ 645 atomic_add_int(&ZoneBigAlloc, -(int)tsize / 1024); 646 } 647 crit_exit(); 648 } 649 650 /* 651 * Handle large allocations directly. There should not be very many of 652 * these so performance is not a big issue. 653 * 654 * The backend allocator is pretty nasty on a SMP system. Use the 655 * slab allocator for one and two page-sized chunks even though we lose 656 * some efficiency. XXX maybe fix mmio and the elf loader instead. 657 */ 658 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 659 int *kup; 660 661 size = round_page(size); 662 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); 663 if (chunk == NULL) { 664 logmemory(malloc_end, NULL, type, size, flags); 665 return(NULL); 666 } 667 atomic_add_int(&ZoneBigAlloc, (int)size / 1024); 668 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ 669 flags |= M_PASSIVE_ZERO; 670 kup = btokup(chunk); 671 *kup = size / PAGE_SIZE; 672 crit_enter(); 673 goto done; 674 } 675 676 /* 677 * Attempt to allocate out of an existing zone. First try the free list, 678 * then allocate out of unallocated space. If we find a good zone move 679 * it to the head of the list so later allocations find it quickly 680 * (we might have thousands of zones in the list). 681 * 682 * Note: zoneindex() will panic of size is too large. 683 */ 684 zi = zoneindex(&size, &align); 685 KKASSERT(zi < NZONES); 686 crit_enter(); 687 688 if ((z = slgd->ZoneAry[zi]) != NULL) { 689 /* 690 * Locate a chunk - we have to have at least one. If this is the 691 * last chunk go ahead and do the work to retrieve chunks freed 692 * from remote cpus, and if the zone is still empty move it off 693 * the ZoneAry. 694 */ 695 if (--z->z_NFree <= 0) { 696 KKASSERT(z->z_NFree == 0); 697 698 /* 699 * WARNING! This code competes with other cpus. It is ok 700 * for us to not drain RChunks here but we might as well, and 701 * it is ok if more accumulate after we're done. 702 * 703 * Set RSignal before pulling rchunks off, indicating that we 704 * will be moving ourselves off of the ZoneAry. Remote ends will 705 * read RSignal before putting rchunks on thus interlocking 706 * their IPI signaling. 707 */ 708 if (z->z_RChunks == NULL) 709 atomic_swap_int(&z->z_RSignal, 1); 710 711 while ((bchunk = z->z_RChunks) != NULL) { 712 cpu_ccfence(); 713 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) { 714 *z->z_LChunksp = bchunk; 715 while (bchunk) { 716 chunk_mark_free(z, bchunk); 717 z->z_LChunksp = &bchunk->c_Next; 718 bchunk = bchunk->c_Next; 719 ++z->z_NFree; 720 } 721 break; 722 } 723 } 724 /* 725 * Remove from the zone list if no free chunks remain. 726 * Clear RSignal 727 */ 728 if (z->z_NFree == 0) { 729 slgd->ZoneAry[zi] = z->z_Next; 730 z->z_Next = NULL; 731 } else { 732 z->z_RSignal = 0; 733 } 734 } 735 736 /* 737 * Fast path, we have chunks available in z_LChunks. 738 */ 739 chunk = z->z_LChunks; 740 if (chunk) { 741 chunk_mark_allocated(z, chunk); 742 z->z_LChunks = chunk->c_Next; 743 if (z->z_LChunks == NULL) 744 z->z_LChunksp = &z->z_LChunks; 745 #ifdef SLAB_DEBUG 746 slab_record_source(z, file, line); 747 #endif 748 goto done; 749 } 750 751 /* 752 * No chunks are available in LChunks, the free chunk MUST be 753 * in the never-before-used memory area, controlled by UIndex. 754 * 755 * The consequences are very serious if our zone got corrupted so 756 * we use an explicit panic rather than a KASSERT. 757 */ 758 if (z->z_UIndex + 1 != z->z_NMax) 759 ++z->z_UIndex; 760 else 761 z->z_UIndex = 0; 762 763 if (z->z_UIndex == z->z_UEndIndex) 764 panic("slaballoc: corrupted zone"); 765 766 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 767 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 768 flags &= ~M_ZERO; 769 flags |= M_PASSIVE_ZERO; 770 } 771 chunk_mark_allocated(z, chunk); 772 #ifdef SLAB_DEBUG 773 slab_record_source(z, file, line); 774 #endif 775 goto done; 776 } 777 778 /* 779 * If all zones are exhausted we need to allocate a new zone for this 780 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see 781 * UAlloc use above in regards to M_ZERO. Note that when we are reusing 782 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and 783 * we do not pre-zero it because we do not want to mess up the L1 cache. 784 * 785 * At least one subsystem, the tty code (see CROUND) expects power-of-2 786 * allocations to be power-of-2 aligned. We maintain compatibility by 787 * adjusting the base offset below. 788 */ 789 { 790 int off; 791 int *kup; 792 793 if ((z = slgd->FreeZones) != NULL) { 794 slgd->FreeZones = z->z_Next; 795 --slgd->NFreeZones; 796 bzero(z, sizeof(SLZone)); 797 z->z_Flags |= SLZF_UNOTZEROD; 798 } else { 799 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); 800 if (z == NULL) 801 goto fail; 802 atomic_add_int(&ZoneGenAlloc, ZoneSize / 1024); 803 } 804 805 /* 806 * How big is the base structure? 807 */ 808 #if defined(INVARIANTS) 809 /* 810 * Make room for z_Bitmap. An exact calculation is somewhat more 811 * complicated so don't make an exact calculation. 812 */ 813 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]); 814 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); 815 #else 816 off = sizeof(SLZone); 817 #endif 818 819 /* 820 * Guarentee power-of-2 alignment for power-of-2-sized chunks. 821 * Otherwise properly align the data according to the chunk size. 822 */ 823 if (powerof2(size)) 824 align = size; 825 off = (off + align - 1) & ~(align - 1); 826 827 z->z_Magic = ZALLOC_SLAB_MAGIC; 828 z->z_ZoneIndex = zi; 829 z->z_NMax = (ZoneSize - off) / size; 830 z->z_NFree = z->z_NMax - 1; 831 z->z_BasePtr = (char *)z + off; 832 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; 833 z->z_ChunkSize = size; 834 z->z_CpuGd = gd; 835 z->z_Cpu = gd->gd_cpuid; 836 z->z_LChunksp = &z->z_LChunks; 837 #ifdef SLAB_DEBUG 838 bcopy(z->z_Sources, z->z_AltSources, sizeof(z->z_Sources)); 839 bzero(z->z_Sources, sizeof(z->z_Sources)); 840 #endif 841 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 842 z->z_Next = slgd->ZoneAry[zi]; 843 slgd->ZoneAry[zi] = z; 844 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 845 flags &= ~M_ZERO; /* already zero'd */ 846 flags |= M_PASSIVE_ZERO; 847 } 848 kup = btokup(z); 849 *kup = -(z->z_Cpu + 1); /* -1 to -(N+1) */ 850 chunk_mark_allocated(z, chunk); 851 #ifdef SLAB_DEBUG 852 slab_record_source(z, file, line); 853 #endif 854 855 /* 856 * Slide the base index for initial allocations out of the next 857 * zone we create so we do not over-weight the lower part of the 858 * cpu memory caches. 859 */ 860 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 861 & (ZALLOC_MAX_ZONE_SIZE - 1); 862 } 863 864 done: 865 ++type->ks_inuse[gd->gd_cpuid]; 866 type->ks_memuse[gd->gd_cpuid] += size; 867 type->ks_loosememuse += size; /* not MP synchronized */ 868 crit_exit(); 869 870 if (flags & M_ZERO) 871 bzero(chunk, size); 872 #ifdef INVARIANTS 873 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) { 874 if (use_malloc_pattern) { 875 for (i = 0; i < size; i += sizeof(int)) { 876 *(int *)((char *)chunk + i) = -1; 877 } 878 } 879 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ 880 } 881 #endif 882 logmemory(malloc_end, chunk, type, size, flags); 883 return(chunk); 884 fail: 885 crit_exit(); 886 logmemory(malloc_end, NULL, type, size, flags); 887 return(NULL); 888 } 889 890 /* 891 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE) 892 * 893 * Generally speaking this routine is not called very often and we do 894 * not attempt to optimize it beyond reusing the same pointer if the 895 * new size fits within the chunking of the old pointer's zone. 896 */ 897 #ifdef SLAB_DEBUG 898 void * 899 krealloc_debug(void *ptr, unsigned long size, 900 struct malloc_type *type, int flags, 901 const char *file, int line) 902 #else 903 void * 904 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) 905 #endif 906 { 907 unsigned long osize; 908 unsigned long align; 909 SLZone *z; 910 void *nptr; 911 int *kup; 912 913 KKASSERT((flags & M_ZERO) == 0); /* not supported */ 914 915 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) 916 return(kmalloc_debug(size, type, flags, file, line)); 917 if (size == 0) { 918 kfree(ptr, type); 919 return(NULL); 920 } 921 922 /* 923 * Handle oversized allocations. XXX we really should require that a 924 * size be passed to free() instead of this nonsense. 925 */ 926 kup = btokup(ptr); 927 if (*kup > 0) { 928 osize = *kup << PAGE_SHIFT; 929 if (osize == round_page(size)) 930 return(ptr); 931 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL) 932 return(NULL); 933 bcopy(ptr, nptr, min(size, osize)); 934 kfree(ptr, type); 935 return(nptr); 936 } 937 938 /* 939 * Get the original allocation's zone. If the new request winds up 940 * using the same chunk size we do not have to do anything. 941 */ 942 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 943 kup = btokup(z); 944 KKASSERT(*kup < 0); 945 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 946 947 /* 948 * Allocate memory for the new request size. Note that zoneindex has 949 * already adjusted the request size to the appropriate chunk size, which 950 * should optimize our bcopy(). Then copy and return the new pointer. 951 * 952 * Resizing a non-power-of-2 allocation to a power-of-2 size does not 953 * necessary align the result. 954 * 955 * We can only zoneindex (to align size to the chunk size) if the new 956 * size is not too large. 957 */ 958 if (size < ZoneLimit) { 959 zoneindex(&size, &align); 960 if (z->z_ChunkSize == size) 961 return(ptr); 962 } 963 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL) 964 return(NULL); 965 bcopy(ptr, nptr, min(size, z->z_ChunkSize)); 966 kfree(ptr, type); 967 return(nptr); 968 } 969 970 /* 971 * Return the kmalloc limit for this type, in bytes. 972 */ 973 long 974 kmalloc_limit(struct malloc_type *type) 975 { 976 if (type->ks_limit == 0) { 977 crit_enter(); 978 if (type->ks_limit == 0) 979 malloc_init(type); 980 crit_exit(); 981 } 982 return(type->ks_limit); 983 } 984 985 /* 986 * Allocate a copy of the specified string. 987 * 988 * (MP SAFE) (MAY BLOCK) 989 */ 990 #ifdef SLAB_DEBUG 991 char * 992 kstrdup_debug(const char *str, struct malloc_type *type, 993 const char *file, int line) 994 #else 995 char * 996 kstrdup(const char *str, struct malloc_type *type) 997 #endif 998 { 999 int zlen; /* length inclusive of terminating NUL */ 1000 char *nstr; 1001 1002 if (str == NULL) 1003 return(NULL); 1004 zlen = strlen(str) + 1; 1005 nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line); 1006 bcopy(str, nstr, zlen); 1007 return(nstr); 1008 } 1009 1010 /* 1011 * Notify our cpu that a remote cpu has freed some chunks in a zone that 1012 * we own. RCount will be bumped so the memory should be good, but validate 1013 * that it really is. 1014 */ 1015 static 1016 void 1017 kfree_remote(void *ptr) 1018 { 1019 SLGlobalData *slgd; 1020 SLChunk *bchunk; 1021 SLZone *z; 1022 int nfree; 1023 int *kup; 1024 1025 slgd = &mycpu->gd_slab; 1026 z = ptr; 1027 kup = btokup(z); 1028 KKASSERT(*kup == -((int)mycpuid + 1)); 1029 KKASSERT(z->z_RCount > 0); 1030 atomic_subtract_int(&z->z_RCount, 1); 1031 1032 logmemory(free_rem_beg, z, NULL, 0L, 0); 1033 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1034 KKASSERT(z->z_Cpu == mycpu->gd_cpuid); 1035 nfree = z->z_NFree; 1036 1037 /* 1038 * Indicate that we will no longer be off of the ZoneAry by 1039 * clearing RSignal. 1040 */ 1041 if (z->z_RChunks) 1042 z->z_RSignal = 0; 1043 1044 /* 1045 * Atomically extract the bchunks list and then process it back 1046 * into the lchunks list. We want to append our bchunks to the 1047 * lchunks list and not prepend since we likely do not have 1048 * cache mastership of the related data (not that it helps since 1049 * we are using c_Next). 1050 */ 1051 while ((bchunk = z->z_RChunks) != NULL) { 1052 cpu_ccfence(); 1053 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) { 1054 *z->z_LChunksp = bchunk; 1055 while (bchunk) { 1056 chunk_mark_free(z, bchunk); 1057 z->z_LChunksp = &bchunk->c_Next; 1058 bchunk = bchunk->c_Next; 1059 ++z->z_NFree; 1060 } 1061 break; 1062 } 1063 } 1064 if (z->z_NFree && nfree == 0) { 1065 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 1066 slgd->ZoneAry[z->z_ZoneIndex] = z; 1067 } 1068 1069 /* 1070 * If the zone becomes totally free, and there are other zones we 1071 * can allocate from, move this zone to the FreeZones list. Since 1072 * this code can be called from an IPI callback, do *NOT* try to mess 1073 * with kernel_map here. Hysteresis will be performed at malloc() time. 1074 * 1075 * Do not move the zone if there is an IPI inflight, otherwise MP 1076 * races can result in our free_remote code accessing a destroyed 1077 * zone. 1078 */ 1079 if (z->z_NFree == z->z_NMax && 1080 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) && 1081 z->z_RCount == 0 1082 ) { 1083 SLZone **pz; 1084 int *kup; 1085 1086 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; 1087 z != *pz; 1088 pz = &(*pz)->z_Next) { 1089 ; 1090 } 1091 *pz = z->z_Next; 1092 z->z_Magic = -1; 1093 z->z_Next = slgd->FreeZones; 1094 slgd->FreeZones = z; 1095 ++slgd->NFreeZones; 1096 kup = btokup(z); 1097 *kup = 0; 1098 } 1099 logmemory(free_rem_end, z, bchunk, 0L, 0); 1100 } 1101 1102 /* 1103 * free (SLAB ALLOCATOR) 1104 * 1105 * Free a memory block previously allocated by malloc. Note that we do not 1106 * attempt to update ks_loosememuse as MP races could prevent us from 1107 * checking memory limits in malloc. 1108 * 1109 * MPSAFE 1110 */ 1111 void 1112 kfree(void *ptr, struct malloc_type *type) 1113 { 1114 SLZone *z; 1115 SLChunk *chunk; 1116 SLGlobalData *slgd; 1117 struct globaldata *gd; 1118 int *kup; 1119 unsigned long size; 1120 SLChunk *bchunk; 1121 int rsignal; 1122 1123 logmemory_quick(free_beg); 1124 gd = mycpu; 1125 slgd = &gd->gd_slab; 1126 1127 if (ptr == NULL) 1128 panic("trying to free NULL pointer"); 1129 1130 /* 1131 * Handle special 0-byte allocations 1132 */ 1133 if (ptr == ZERO_LENGTH_PTR) { 1134 logmemory(free_zero, ptr, type, -1UL, 0); 1135 logmemory_quick(free_end); 1136 return; 1137 } 1138 1139 /* 1140 * Panic on bad malloc type 1141 */ 1142 if (type->ks_magic != M_MAGIC) 1143 panic("free: malloc type lacks magic"); 1144 1145 /* 1146 * Handle oversized allocations. XXX we really should require that a 1147 * size be passed to free() instead of this nonsense. 1148 * 1149 * This code is never called via an ipi. 1150 */ 1151 kup = btokup(ptr); 1152 if (*kup > 0) { 1153 size = *kup << PAGE_SHIFT; 1154 *kup = 0; 1155 #ifdef INVARIANTS 1156 KKASSERT(sizeof(weirdary) <= size); 1157 bcopy(weirdary, ptr, sizeof(weirdary)); 1158 #endif 1159 /* 1160 * NOTE: For oversized allocations we do not record the 1161 * originating cpu. It gets freed on the cpu calling 1162 * kfree(). The statistics are in aggregate. 1163 * 1164 * note: XXX we have still inherited the interrupts-can't-block 1165 * assumption. An interrupt thread does not bump 1166 * gd_intr_nesting_level so check TDF_INTTHREAD. This is 1167 * primarily until we can fix softupdate's assumptions about free(). 1168 */ 1169 crit_enter(); 1170 --type->ks_inuse[gd->gd_cpuid]; 1171 type->ks_memuse[gd->gd_cpuid] -= size; 1172 if (mycpu->gd_intr_nesting_level || 1173 (gd->gd_curthread->td_flags & TDF_INTTHREAD)) 1174 { 1175 logmemory(free_ovsz_delayed, ptr, type, size, 0); 1176 z = (SLZone *)ptr; 1177 z->z_Magic = ZALLOC_OVSZ_MAGIC; 1178 z->z_Next = slgd->FreeOvZones; 1179 z->z_ChunkSize = size; 1180 slgd->FreeOvZones = z; 1181 crit_exit(); 1182 } else { 1183 crit_exit(); 1184 logmemory(free_ovsz, ptr, type, size, 0); 1185 kmem_slab_free(ptr, size); /* may block */ 1186 atomic_add_int(&ZoneBigAlloc, -(int)size / 1024); 1187 } 1188 logmemory_quick(free_end); 1189 return; 1190 } 1191 1192 /* 1193 * Zone case. Figure out the zone based on the fact that it is 1194 * ZoneSize aligned. 1195 */ 1196 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 1197 kup = btokup(z); 1198 KKASSERT(*kup < 0); 1199 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1200 1201 /* 1202 * If we do not own the zone then use atomic ops to free to the 1203 * remote cpu linked list and notify the target zone using a 1204 * passive message. 1205 * 1206 * The target zone cannot be deallocated while we own a chunk of it, 1207 * so the zone header's storage is stable until the very moment 1208 * we adjust z_RChunks. After that we cannot safely dereference (z). 1209 * 1210 * (no critical section needed) 1211 */ 1212 if (z->z_CpuGd != gd) { 1213 /* 1214 * Making these adjustments now allow us to avoid passing (type) 1215 * to the remote cpu. Note that ks_inuse/ks_memuse is being 1216 * adjusted on OUR cpu, not the zone cpu, but it should all still 1217 * sum up properly and cancel out. 1218 */ 1219 crit_enter(); 1220 --type->ks_inuse[gd->gd_cpuid]; 1221 type->ks_memuse[gd->gd_cpuid] -= z->z_ChunkSize; 1222 crit_exit(); 1223 1224 /* 1225 * WARNING! This code competes with other cpus. Once we 1226 * successfully link the chunk to RChunks the remote 1227 * cpu can rip z's storage out from under us. 1228 * 1229 * Bumping RCount prevents z's storage from getting 1230 * ripped out. 1231 */ 1232 rsignal = z->z_RSignal; 1233 cpu_lfence(); 1234 if (rsignal) 1235 atomic_add_int(&z->z_RCount, 1); 1236 1237 chunk = ptr; 1238 for (;;) { 1239 bchunk = z->z_RChunks; 1240 cpu_ccfence(); 1241 chunk->c_Next = bchunk; 1242 cpu_sfence(); 1243 1244 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk)) 1245 break; 1246 } 1247 1248 /* 1249 * We have to signal the remote cpu if our actions will cause 1250 * the remote zone to be placed back on ZoneAry so it can 1251 * move the zone back on. 1252 * 1253 * We only need to deal with NULL->non-NULL RChunk transitions 1254 * and only if z_RSignal is set. We interlock by reading rsignal 1255 * before adding our chunk to RChunks. This should result in 1256 * virtually no IPI traffic. 1257 * 1258 * We can use a passive IPI to reduce overhead even further. 1259 */ 1260 if (bchunk == NULL && rsignal) { 1261 logmemory(free_request, ptr, type, (unsigned long)z->z_ChunkSize, 0); 1262 lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z); 1263 /* z can get ripped out from under us from this point on */ 1264 } else if (rsignal) { 1265 atomic_subtract_int(&z->z_RCount, 1); 1266 /* z can get ripped out from under us from this point on */ 1267 } 1268 logmemory_quick(free_end); 1269 return; 1270 } 1271 1272 /* 1273 * kfree locally 1274 */ 1275 logmemory(free_chunk, ptr, type, (unsigned long)z->z_ChunkSize, 0); 1276 1277 crit_enter(); 1278 chunk = ptr; 1279 chunk_mark_free(z, chunk); 1280 1281 /* 1282 * Put weird data into the memory to detect modifications after freeing, 1283 * illegal pointer use after freeing (we should fault on the odd address), 1284 * and so forth. XXX needs more work, see the old malloc code. 1285 */ 1286 #ifdef INVARIANTS 1287 if (z->z_ChunkSize < sizeof(weirdary)) 1288 bcopy(weirdary, chunk, z->z_ChunkSize); 1289 else 1290 bcopy(weirdary, chunk, sizeof(weirdary)); 1291 #endif 1292 1293 /* 1294 * Add this free non-zero'd chunk to a linked list for reuse. Add 1295 * to the front of the linked list so it is more likely to be 1296 * reallocated, since it is already in our L1 cache. 1297 */ 1298 #ifdef INVARIANTS 1299 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) 1300 panic("BADFREE %p", chunk); 1301 #endif 1302 chunk->c_Next = z->z_LChunks; 1303 z->z_LChunks = chunk; 1304 if (chunk->c_Next == NULL) 1305 z->z_LChunksp = &chunk->c_Next; 1306 1307 #ifdef INVARIANTS 1308 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) 1309 panic("BADFREE2"); 1310 #endif 1311 1312 /* 1313 * Bump the number of free chunks. If it becomes non-zero the zone 1314 * must be added back onto the appropriate list. 1315 */ 1316 if (z->z_NFree++ == 0) { 1317 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 1318 slgd->ZoneAry[z->z_ZoneIndex] = z; 1319 } 1320 1321 --type->ks_inuse[z->z_Cpu]; 1322 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize; 1323 1324 /* 1325 * If the zone becomes totally free, and there are other zones we 1326 * can allocate from, move this zone to the FreeZones list. Since 1327 * this code can be called from an IPI callback, do *NOT* try to mess 1328 * with kernel_map here. Hysteresis will be performed at malloc() time. 1329 */ 1330 if (z->z_NFree == z->z_NMax && 1331 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) && 1332 z->z_RCount == 0 1333 ) { 1334 SLZone **pz; 1335 int *kup; 1336 1337 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next) 1338 ; 1339 *pz = z->z_Next; 1340 z->z_Magic = -1; 1341 z->z_Next = slgd->FreeZones; 1342 slgd->FreeZones = z; 1343 ++slgd->NFreeZones; 1344 kup = btokup(z); 1345 *kup = 0; 1346 } 1347 logmemory_quick(free_end); 1348 crit_exit(); 1349 } 1350 1351 #if defined(INVARIANTS) 1352 1353 /* 1354 * Helper routines for sanity checks 1355 */ 1356 static 1357 void 1358 chunk_mark_allocated(SLZone *z, void *chunk) 1359 { 1360 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1361 __uint32_t *bitptr; 1362 1363 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1364 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1365 ("memory chunk %p bit index %d is illegal", chunk, bitdex)); 1366 bitptr = &z->z_Bitmap[bitdex >> 5]; 1367 bitdex &= 31; 1368 KASSERT((*bitptr & (1 << bitdex)) == 0, 1369 ("memory chunk %p is already allocated!", chunk)); 1370 *bitptr |= 1 << bitdex; 1371 } 1372 1373 static 1374 void 1375 chunk_mark_free(SLZone *z, void *chunk) 1376 { 1377 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1378 __uint32_t *bitptr; 1379 1380 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1381 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1382 ("memory chunk %p bit index %d is illegal!", chunk, bitdex)); 1383 bitptr = &z->z_Bitmap[bitdex >> 5]; 1384 bitdex &= 31; 1385 KASSERT((*bitptr & (1 << bitdex)) != 0, 1386 ("memory chunk %p is already free!", chunk)); 1387 *bitptr &= ~(1 << bitdex); 1388 } 1389 1390 #endif 1391 1392 /* 1393 * kmem_slab_alloc() 1394 * 1395 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the 1396 * specified alignment. M_* flags are expected in the flags field. 1397 * 1398 * Alignment must be a multiple of PAGE_SIZE. 1399 * 1400 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), 1401 * but when we move zalloc() over to use this function as its backend 1402 * we will have to switch to kreserve/krelease and call reserve(0) 1403 * after the new space is made available. 1404 * 1405 * Interrupt code which has preempted other code is not allowed to 1406 * use PQ_CACHE pages. However, if an interrupt thread is run 1407 * non-preemptively or blocks and then runs non-preemptively, then 1408 * it is free to use PQ_CACHE pages. <--- may not apply any longer XXX 1409 */ 1410 static void * 1411 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) 1412 { 1413 vm_size_t i; 1414 vm_offset_t addr; 1415 int count, vmflags, base_vmflags; 1416 vm_page_t mbase = NULL; 1417 vm_page_t m; 1418 thread_t td; 1419 1420 size = round_page(size); 1421 addr = vm_map_min(&kernel_map); 1422 1423 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1424 crit_enter(); 1425 vm_map_lock(&kernel_map); 1426 if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) { 1427 vm_map_unlock(&kernel_map); 1428 if ((flags & M_NULLOK) == 0) 1429 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 1430 vm_map_entry_release(count); 1431 crit_exit(); 1432 return(NULL); 1433 } 1434 1435 /* 1436 * kernel_object maps 1:1 to kernel_map. 1437 */ 1438 vm_object_hold(&kernel_object); 1439 vm_object_reference_locked(&kernel_object); 1440 vm_map_insert(&kernel_map, &count, 1441 &kernel_object, addr, addr, addr + size, 1442 VM_MAPTYPE_NORMAL, 1443 VM_PROT_ALL, VM_PROT_ALL, 1444 0); 1445 vm_object_drop(&kernel_object); 1446 vm_map_set_wired_quick(&kernel_map, addr, size, &count); 1447 vm_map_unlock(&kernel_map); 1448 1449 td = curthread; 1450 1451 base_vmflags = 0; 1452 if (flags & M_ZERO) 1453 base_vmflags |= VM_ALLOC_ZERO; 1454 if (flags & M_USE_RESERVE) 1455 base_vmflags |= VM_ALLOC_SYSTEM; 1456 if (flags & M_USE_INTERRUPT_RESERVE) 1457 base_vmflags |= VM_ALLOC_INTERRUPT; 1458 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) { 1459 panic("kmem_slab_alloc: bad flags %08x (%p)", 1460 flags, ((int **)&size)[-1]); 1461 } 1462 1463 /* 1464 * Allocate the pages. Do not mess with the PG_ZERO flag or map 1465 * them yet. VM_ALLOC_NORMAL can only be set if we are not preempting. 1466 * 1467 * VM_ALLOC_SYSTEM is automatically set if we are preempting and 1468 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is 1469 * implied in this case), though I'm not sure if we really need to 1470 * do that. 1471 */ 1472 vmflags = base_vmflags; 1473 if (flags & M_WAITOK) { 1474 if (td->td_preempted) 1475 vmflags |= VM_ALLOC_SYSTEM; 1476 else 1477 vmflags |= VM_ALLOC_NORMAL; 1478 } 1479 1480 vm_object_hold(&kernel_object); 1481 for (i = 0; i < size; i += PAGE_SIZE) { 1482 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags); 1483 if (i == 0) 1484 mbase = m; 1485 1486 /* 1487 * If the allocation failed we either return NULL or we retry. 1488 * 1489 * If M_WAITOK is specified we wait for more memory and retry. 1490 * If M_WAITOK is specified from a preemption we yield instead of 1491 * wait. Livelock will not occur because the interrupt thread 1492 * will not be preempting anyone the second time around after the 1493 * yield. 1494 */ 1495 if (m == NULL) { 1496 if (flags & M_WAITOK) { 1497 if (td->td_preempted) { 1498 lwkt_switch(); 1499 } else { 1500 vm_wait(0); 1501 } 1502 i -= PAGE_SIZE; /* retry */ 1503 continue; 1504 } 1505 break; 1506 } 1507 } 1508 1509 /* 1510 * Check and deal with an allocation failure 1511 */ 1512 if (i != size) { 1513 while (i != 0) { 1514 i -= PAGE_SIZE; 1515 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); 1516 /* page should already be busy */ 1517 vm_page_free(m); 1518 } 1519 vm_map_lock(&kernel_map); 1520 vm_map_delete(&kernel_map, addr, addr + size, &count); 1521 vm_map_unlock(&kernel_map); 1522 vm_object_drop(&kernel_object); 1523 1524 vm_map_entry_release(count); 1525 crit_exit(); 1526 return(NULL); 1527 } 1528 1529 /* 1530 * Success! 1531 * 1532 * NOTE: The VM pages are still busied. mbase points to the first one 1533 * but we have to iterate via vm_page_next() 1534 */ 1535 vm_object_drop(&kernel_object); 1536 crit_exit(); 1537 1538 /* 1539 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO. 1540 */ 1541 m = mbase; 1542 i = 0; 1543 1544 while (i < size) { 1545 /* 1546 * page should already be busy 1547 */ 1548 m->valid = VM_PAGE_BITS_ALL; 1549 vm_page_wire(m); 1550 pmap_enter(&kernel_pmap, addr + i, m, VM_PROT_ALL | VM_PROT_NOSYNC, 1551 1, NULL); 1552 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO)) 1553 bzero((char *)addr + i, PAGE_SIZE); 1554 vm_page_flag_clear(m, PG_ZERO); 1555 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED)); 1556 vm_page_flag_set(m, PG_REFERENCED); 1557 vm_page_wakeup(m); 1558 1559 i += PAGE_SIZE; 1560 vm_object_hold(&kernel_object); 1561 m = vm_page_next(m); 1562 vm_object_drop(&kernel_object); 1563 } 1564 smp_invltlb(); 1565 vm_map_entry_release(count); 1566 return((void *)addr); 1567 } 1568 1569 /* 1570 * kmem_slab_free() 1571 */ 1572 static void 1573 kmem_slab_free(void *ptr, vm_size_t size) 1574 { 1575 crit_enter(); 1576 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); 1577 crit_exit(); 1578 } 1579 1580 void * 1581 kmalloc_cachealign(unsigned long size_alloc, struct malloc_type *type, 1582 int flags) 1583 { 1584 #if (__VM_CACHELINE_SIZE == 32) 1585 #define CAN_CACHEALIGN(sz) ((sz) >= 256) 1586 #elif (__VM_CACHELINE_SIZE == 64) 1587 #define CAN_CACHEALIGN(sz) ((sz) >= 512) 1588 #elif (__VM_CACHELINE_SIZE == 128) 1589 #define CAN_CACHEALIGN(sz) ((sz) >= 1024) 1590 #else 1591 #error "unsupported cacheline size" 1592 #endif 1593 1594 void *ret; 1595 1596 if (size_alloc < __VM_CACHELINE_SIZE) 1597 size_alloc = __VM_CACHELINE_SIZE; 1598 else if (!CAN_CACHEALIGN(size_alloc)) 1599 flags |= M_POWEROF2; 1600 1601 ret = kmalloc(size_alloc, type, flags); 1602 KASSERT(((uintptr_t)ret & (__VM_CACHELINE_SIZE - 1)) == 0, 1603 ("%p(%lu) not cacheline %d aligned", 1604 ret, size_alloc, __VM_CACHELINE_SIZE)); 1605 return ret; 1606 1607 #undef CAN_CACHEALIGN 1608 } 1609