1 /* 2 * (MPSAFE) 3 * 4 * KERN_SLABALLOC.C - Kernel SLAB memory allocator 5 * 6 * Copyright (c) 2003,2004,2010 The DragonFly Project. All rights reserved. 7 * 8 * This code is derived from software contributed to The DragonFly Project 9 * by Matthew Dillon <dillon@backplane.com> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in 19 * the documentation and/or other materials provided with the 20 * distribution. 21 * 3. Neither the name of The DragonFly Project nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific, prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * This module implements a slab allocator drop-in replacement for the 39 * kernel malloc(). 40 * 41 * A slab allocator reserves a ZONE for each chunk size, then lays the 42 * chunks out in an array within the zone. Allocation and deallocation 43 * is nearly instantanious, and fragmentation/overhead losses are limited 44 * to a fixed worst-case amount. 45 * 46 * The downside of this slab implementation is in the chunk size 47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. 48 * In a kernel implementation all this memory will be physical so 49 * the zone size is adjusted downward on machines with less physical 50 * memory. The upside is that overhead is bounded... this is the *worst* 51 * case overhead. 52 * 53 * Slab management is done on a per-cpu basis and no locking or mutexes 54 * are required, only a critical section. When one cpu frees memory 55 * belonging to another cpu's slab manager an asynchronous IPI message 56 * will be queued to execute the operation. In addition, both the 57 * high level slab allocator and the low level zone allocator optimize 58 * M_ZERO requests, and the slab allocator does not have to pre initialize 59 * the linked list of chunks. 60 * 61 * XXX Balancing is needed between cpus. Balance will be handled through 62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. 63 * 64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of 65 * the new zone should be restricted to M_USE_RESERVE requests only. 66 * 67 * Alloc Size Chunking Number of zones 68 * 0-127 8 16 69 * 128-255 16 8 70 * 256-511 32 8 71 * 512-1023 64 8 72 * 1024-2047 128 8 73 * 2048-4095 256 8 74 * 4096-8191 512 8 75 * 8192-16383 1024 8 76 * 16384-32767 2048 8 77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) 78 * 79 * Allocations >= ZoneLimit go directly to kmem. 80 * 81 * API REQUIREMENTS AND SIDE EFFECTS 82 * 83 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we 84 * have remained compatible with the following API requirements: 85 * 86 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty) 87 * + all power-of-2 sized allocations are power-of-2 aligned (twe) 88 * + malloc(0) is allowed and returns non-NULL (ahc driver) 89 * + ability to allocate arbitrarily large chunks of memory 90 */ 91 92 #include "opt_vm.h" 93 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/kernel.h> 97 #include <sys/slaballoc.h> 98 #include <sys/mbuf.h> 99 #include <sys/vmmeter.h> 100 #include <sys/lock.h> 101 #include <sys/thread.h> 102 #include <sys/globaldata.h> 103 #include <sys/sysctl.h> 104 #include <sys/ktr.h> 105 106 #include <vm/vm.h> 107 #include <vm/vm_param.h> 108 #include <vm/vm_kern.h> 109 #include <vm/vm_extern.h> 110 #include <vm/vm_object.h> 111 #include <vm/pmap.h> 112 #include <vm/vm_map.h> 113 #include <vm/vm_page.h> 114 #include <vm/vm_pageout.h> 115 116 #include <machine/cpu.h> 117 118 #include <sys/thread2.h> 119 120 #define btokup(z) (&pmap_kvtom((vm_offset_t)(z))->ku_pagecnt) 121 122 #define MEMORY_STRING "ptr=%p type=%p size=%d flags=%04x" 123 #define MEMORY_ARG_SIZE (sizeof(void *) * 2 + sizeof(unsigned long) + \ 124 sizeof(int)) 125 126 #if !defined(KTR_MEMORY) 127 #define KTR_MEMORY KTR_ALL 128 #endif 129 KTR_INFO_MASTER(memory); 130 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin", 0); 131 KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARG_SIZE); 132 KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARG_SIZE); 133 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARG_SIZE); 134 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARG_SIZE); 135 KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARG_SIZE); 136 #ifdef SMP 137 KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARG_SIZE); 138 KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARG_SIZE); 139 KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARG_SIZE); 140 #endif 141 KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin", 0); 142 KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end", 0); 143 144 #define logmemory(name, ptr, type, size, flags) \ 145 KTR_LOG(memory_ ## name, ptr, type, size, flags) 146 #define logmemory_quick(name) \ 147 KTR_LOG(memory_ ## name) 148 149 /* 150 * Fixed globals (not per-cpu) 151 */ 152 static int ZoneSize; 153 static int ZoneLimit; 154 static int ZonePageCount; 155 static uintptr_t ZoneMask; 156 static int ZoneBigAlloc; /* in KB */ 157 static int ZoneGenAlloc; /* in KB */ 158 struct malloc_type *kmemstatistics; /* exported to vmstat */ 159 static int32_t weirdary[16]; 160 161 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags); 162 static void kmem_slab_free(void *ptr, vm_size_t bytes); 163 164 #if defined(INVARIANTS) 165 static void chunk_mark_allocated(SLZone *z, void *chunk); 166 static void chunk_mark_free(SLZone *z, void *chunk); 167 #else 168 #define chunk_mark_allocated(z, chunk) 169 #define chunk_mark_free(z, chunk) 170 #endif 171 172 /* 173 * Misc constants. Note that allocations that are exact multiples of 174 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 175 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists. 176 */ 177 #define MIN_CHUNK_SIZE 8 /* in bytes */ 178 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1) 179 #define ZONE_RELS_THRESH 32 /* threshold number of zones */ 180 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK) 181 182 /* 183 * The WEIRD_ADDR is used as known text to copy into free objects to 184 * try to create deterministic failure cases if the data is accessed after 185 * free. 186 */ 187 #define WEIRD_ADDR 0xdeadc0de 188 #define MAX_COPY sizeof(weirdary) 189 #define ZERO_LENGTH_PTR ((void *)-8) 190 191 /* 192 * Misc global malloc buckets 193 */ 194 195 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 196 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 197 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 198 199 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 200 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 201 202 /* 203 * Initialize the slab memory allocator. We have to choose a zone size based 204 * on available physical memory. We choose a zone side which is approximately 205 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of 206 * 128K. The zone size is limited to the bounds set in slaballoc.h 207 * (typically 32K min, 128K max). 208 */ 209 static void kmeminit(void *dummy); 210 211 char *ZeroPage; 212 213 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL) 214 215 #ifdef INVARIANTS 216 /* 217 * If enabled any memory allocated without M_ZERO is initialized to -1. 218 */ 219 static int use_malloc_pattern; 220 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW, 221 &use_malloc_pattern, 0, 222 "Initialize memory to -1 if M_ZERO not specified"); 223 #endif 224 225 static int ZoneRelsThresh = ZONE_RELS_THRESH; 226 SYSCTL_INT(_kern, OID_AUTO, zone_big_alloc, CTLFLAG_RD, &ZoneBigAlloc, 0, ""); 227 SYSCTL_INT(_kern, OID_AUTO, zone_gen_alloc, CTLFLAG_RD, &ZoneGenAlloc, 0, ""); 228 SYSCTL_INT(_kern, OID_AUTO, zone_cache, CTLFLAG_RW, &ZoneRelsThresh, 0, ""); 229 230 /* 231 * Returns the kernel memory size limit for the purposes of initializing 232 * various subsystem caches. The smaller of available memory and the KVM 233 * memory space is returned. 234 * 235 * The size in megabytes is returned. 236 */ 237 size_t 238 kmem_lim_size(void) 239 { 240 size_t limsize; 241 242 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE; 243 if (limsize > KvaSize) 244 limsize = KvaSize; 245 return (limsize / (1024 * 1024)); 246 } 247 248 static void 249 kmeminit(void *dummy) 250 { 251 size_t limsize; 252 int usesize; 253 int i; 254 255 limsize = kmem_lim_size(); 256 usesize = (int)(limsize * 1024); /* convert to KB */ 257 258 /* 259 * If the machine has a large KVM space and more than 8G of ram, 260 * double the zone release threshold to reduce SMP invalidations. 261 * If more than 16G of ram, do it again. 262 * 263 * The BIOS eats a little ram so add some slop. We want 8G worth of 264 * memory sticks to trigger the first adjustment. 265 */ 266 if (ZoneRelsThresh == ZONE_RELS_THRESH) { 267 if (limsize >= 7 * 1024) 268 ZoneRelsThresh *= 2; 269 if (limsize >= 15 * 1024) 270 ZoneRelsThresh *= 2; 271 } 272 273 /* 274 * Calculate the zone size. This typically calculates to 275 * ZALLOC_MAX_ZONE_SIZE 276 */ 277 ZoneSize = ZALLOC_MIN_ZONE_SIZE; 278 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) 279 ZoneSize <<= 1; 280 ZoneLimit = ZoneSize / 4; 281 if (ZoneLimit > ZALLOC_ZONE_LIMIT) 282 ZoneLimit = ZALLOC_ZONE_LIMIT; 283 ZoneMask = ~(uintptr_t)(ZoneSize - 1); 284 ZonePageCount = ZoneSize / PAGE_SIZE; 285 286 for (i = 0; i < NELEM(weirdary); ++i) 287 weirdary[i] = WEIRD_ADDR; 288 289 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO); 290 291 if (bootverbose) 292 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); 293 } 294 295 /* 296 * Initialize a malloc type tracking structure. 297 */ 298 void 299 malloc_init(void *data) 300 { 301 struct malloc_type *type = data; 302 size_t limsize; 303 304 if (type->ks_magic != M_MAGIC) 305 panic("malloc type lacks magic"); 306 307 if (type->ks_limit != 0) 308 return; 309 310 if (vmstats.v_page_count == 0) 311 panic("malloc_init not allowed before vm init"); 312 313 limsize = kmem_lim_size() * (1024 * 1024); 314 type->ks_limit = limsize / 10; 315 316 type->ks_next = kmemstatistics; 317 kmemstatistics = type; 318 } 319 320 void 321 malloc_uninit(void *data) 322 { 323 struct malloc_type *type = data; 324 struct malloc_type *t; 325 #ifdef INVARIANTS 326 int i; 327 long ttl; 328 #endif 329 330 if (type->ks_magic != M_MAGIC) 331 panic("malloc type lacks magic"); 332 333 if (vmstats.v_page_count == 0) 334 panic("malloc_uninit not allowed before vm init"); 335 336 if (type->ks_limit == 0) 337 panic("malloc_uninit on uninitialized type"); 338 339 #ifdef SMP 340 /* Make sure that all pending kfree()s are finished. */ 341 lwkt_synchronize_ipiqs("muninit"); 342 #endif 343 344 #ifdef INVARIANTS 345 /* 346 * memuse is only correct in aggregation. Due to memory being allocated 347 * on one cpu and freed on another individual array entries may be 348 * negative or positive (canceling each other out). 349 */ 350 for (i = ttl = 0; i < ncpus; ++i) 351 ttl += type->ks_memuse[i]; 352 if (ttl) { 353 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", 354 ttl, type->ks_shortdesc, i); 355 } 356 #endif 357 if (type == kmemstatistics) { 358 kmemstatistics = type->ks_next; 359 } else { 360 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 361 if (t->ks_next == type) { 362 t->ks_next = type->ks_next; 363 break; 364 } 365 } 366 } 367 type->ks_next = NULL; 368 type->ks_limit = 0; 369 } 370 371 /* 372 * Increase the kmalloc pool limit for the specified pool. No changes 373 * are the made if the pool would shrink. 374 */ 375 void 376 kmalloc_raise_limit(struct malloc_type *type, size_t bytes) 377 { 378 if (type->ks_limit == 0) 379 malloc_init(type); 380 if (bytes == 0) 381 bytes = KvaSize; 382 if (type->ks_limit < bytes) 383 type->ks_limit = bytes; 384 } 385 386 /* 387 * Dynamically create a malloc pool. This function is a NOP if *typep is 388 * already non-NULL. 389 */ 390 void 391 kmalloc_create(struct malloc_type **typep, const char *descr) 392 { 393 struct malloc_type *type; 394 395 if (*typep == NULL) { 396 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO); 397 type->ks_magic = M_MAGIC; 398 type->ks_shortdesc = descr; 399 malloc_init(type); 400 *typep = type; 401 } 402 } 403 404 /* 405 * Destroy a dynamically created malloc pool. This function is a NOP if 406 * the pool has already been destroyed. 407 */ 408 void 409 kmalloc_destroy(struct malloc_type **typep) 410 { 411 if (*typep != NULL) { 412 malloc_uninit(*typep); 413 kfree(*typep, M_TEMP); 414 *typep = NULL; 415 } 416 } 417 418 /* 419 * Calculate the zone index for the allocation request size and set the 420 * allocation request size to that particular zone's chunk size. 421 */ 422 static __inline int 423 zoneindex(unsigned long *bytes) 424 { 425 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ 426 if (n < 128) { 427 *bytes = n = (n + 7) & ~7; 428 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 429 } 430 if (n < 256) { 431 *bytes = n = (n + 15) & ~15; 432 return(n / 16 + 7); 433 } 434 if (n < 8192) { 435 if (n < 512) { 436 *bytes = n = (n + 31) & ~31; 437 return(n / 32 + 15); 438 } 439 if (n < 1024) { 440 *bytes = n = (n + 63) & ~63; 441 return(n / 64 + 23); 442 } 443 if (n < 2048) { 444 *bytes = n = (n + 127) & ~127; 445 return(n / 128 + 31); 446 } 447 if (n < 4096) { 448 *bytes = n = (n + 255) & ~255; 449 return(n / 256 + 39); 450 } 451 *bytes = n = (n + 511) & ~511; 452 return(n / 512 + 47); 453 } 454 #if ZALLOC_ZONE_LIMIT > 8192 455 if (n < 16384) { 456 *bytes = n = (n + 1023) & ~1023; 457 return(n / 1024 + 55); 458 } 459 #endif 460 #if ZALLOC_ZONE_LIMIT > 16384 461 if (n < 32768) { 462 *bytes = n = (n + 2047) & ~2047; 463 return(n / 2048 + 63); 464 } 465 #endif 466 panic("Unexpected byte count %d", n); 467 return(0); 468 } 469 470 #ifdef SLAB_DEBUG 471 /* 472 * Used to debug memory corruption issues. Record up to (typically 32) 473 * allocation sources for this zone (for a particular chunk size). 474 */ 475 476 static void 477 slab_record_source(SLZone *z, const char *file, int line) 478 { 479 int i; 480 int b = line & (SLAB_DEBUG_ENTRIES - 1); 481 482 i = b; 483 do { 484 if (z->z_Sources[i].file == file && z->z_Sources[i].line == line) 485 return; 486 if (z->z_Sources[i].file == NULL) 487 break; 488 i = (i + 1) & (SLAB_DEBUG_ENTRIES - 1); 489 } while (i != b); 490 z->z_Sources[i].file = file; 491 z->z_Sources[i].line = line; 492 } 493 494 #endif 495 496 /* 497 * kmalloc() (SLAB ALLOCATOR) 498 * 499 * Allocate memory via the slab allocator. If the request is too large, 500 * or if it page-aligned beyond a certain size, we fall back to the 501 * KMEM subsystem. A SLAB tracking descriptor must be specified, use 502 * &SlabMisc if you don't care. 503 * 504 * M_RNOWAIT - don't block. 505 * M_NULLOK - return NULL instead of blocking. 506 * M_ZERO - zero the returned memory. 507 * M_USE_RESERVE - allow greater drawdown of the free list 508 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted 509 * 510 * MPSAFE 511 */ 512 513 #ifdef SLAB_DEBUG 514 void * 515 kmalloc_debug(unsigned long size, struct malloc_type *type, int flags, 516 const char *file, int line) 517 #else 518 void * 519 kmalloc(unsigned long size, struct malloc_type *type, int flags) 520 #endif 521 { 522 SLZone *z; 523 SLChunk *chunk; 524 #ifdef SMP 525 SLChunk *bchunk; 526 #endif 527 SLGlobalData *slgd; 528 struct globaldata *gd; 529 int zi; 530 #ifdef INVARIANTS 531 int i; 532 #endif 533 534 logmemory_quick(malloc_beg); 535 gd = mycpu; 536 slgd = &gd->gd_slab; 537 538 /* 539 * XXX silly to have this in the critical path. 540 */ 541 if (type->ks_limit == 0) { 542 crit_enter(); 543 if (type->ks_limit == 0) 544 malloc_init(type); 545 crit_exit(); 546 } 547 ++type->ks_calls; 548 549 /* 550 * Handle the case where the limit is reached. Panic if we can't return 551 * NULL. The original malloc code looped, but this tended to 552 * simply deadlock the computer. 553 * 554 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used 555 * to determine if a more complete limit check should be done. The 556 * actual memory use is tracked via ks_memuse[cpu]. 557 */ 558 while (type->ks_loosememuse >= type->ks_limit) { 559 int i; 560 long ttl; 561 562 for (i = ttl = 0; i < ncpus; ++i) 563 ttl += type->ks_memuse[i]; 564 type->ks_loosememuse = ttl; /* not MP synchronized */ 565 if ((ssize_t)ttl < 0) /* deal with occassional race */ 566 ttl = 0; 567 if (ttl >= type->ks_limit) { 568 if (flags & M_NULLOK) { 569 logmemory(malloc_end, NULL, type, size, flags); 570 return(NULL); 571 } 572 panic("%s: malloc limit exceeded", type->ks_shortdesc); 573 } 574 } 575 576 /* 577 * Handle the degenerate size == 0 case. Yes, this does happen. 578 * Return a special pointer. This is to maintain compatibility with 579 * the original malloc implementation. Certain devices, such as the 580 * adaptec driver, not only allocate 0 bytes, they check for NULL and 581 * also realloc() later on. Joy. 582 */ 583 if (size == 0) { 584 logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags); 585 return(ZERO_LENGTH_PTR); 586 } 587 588 /* 589 * Handle hysteresis from prior frees here in malloc(). We cannot 590 * safely manipulate the kernel_map in free() due to free() possibly 591 * being called via an IPI message or from sensitive interrupt code. 592 * 593 * NOTE: ku_pagecnt must be cleared before we free the slab or we 594 * might race another cpu allocating the kva and setting 595 * ku_pagecnt. 596 */ 597 while (slgd->NFreeZones > ZoneRelsThresh && (flags & M_RNOWAIT) == 0) { 598 crit_enter(); 599 if (slgd->NFreeZones > ZoneRelsThresh) { /* crit sect race */ 600 int *kup; 601 602 z = slgd->FreeZones; 603 slgd->FreeZones = z->z_Next; 604 --slgd->NFreeZones; 605 kup = btokup(z); 606 *kup = 0; 607 kmem_slab_free(z, ZoneSize); /* may block */ 608 atomic_add_int(&ZoneGenAlloc, -(int)ZoneSize / 1024); 609 } 610 crit_exit(); 611 } 612 613 /* 614 * XXX handle oversized frees that were queued from kfree(). 615 */ 616 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) { 617 crit_enter(); 618 if ((z = slgd->FreeOvZones) != NULL) { 619 vm_size_t tsize; 620 621 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); 622 slgd->FreeOvZones = z->z_Next; 623 tsize = z->z_ChunkSize; 624 kmem_slab_free(z, tsize); /* may block */ 625 atomic_add_int(&ZoneBigAlloc, -(int)tsize / 1024); 626 } 627 crit_exit(); 628 } 629 630 /* 631 * Handle large allocations directly. There should not be very many of 632 * these so performance is not a big issue. 633 * 634 * The backend allocator is pretty nasty on a SMP system. Use the 635 * slab allocator for one and two page-sized chunks even though we lose 636 * some efficiency. XXX maybe fix mmio and the elf loader instead. 637 */ 638 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 639 int *kup; 640 641 size = round_page(size); 642 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); 643 if (chunk == NULL) { 644 logmemory(malloc_end, NULL, type, size, flags); 645 return(NULL); 646 } 647 atomic_add_int(&ZoneBigAlloc, (int)size / 1024); 648 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ 649 flags |= M_PASSIVE_ZERO; 650 kup = btokup(chunk); 651 *kup = size / PAGE_SIZE; 652 crit_enter(); 653 goto done; 654 } 655 656 /* 657 * Attempt to allocate out of an existing zone. First try the free list, 658 * then allocate out of unallocated space. If we find a good zone move 659 * it to the head of the list so later allocations find it quickly 660 * (we might have thousands of zones in the list). 661 * 662 * Note: zoneindex() will panic of size is too large. 663 */ 664 zi = zoneindex(&size); 665 KKASSERT(zi < NZONES); 666 crit_enter(); 667 668 if ((z = slgd->ZoneAry[zi]) != NULL) { 669 /* 670 * Locate a chunk - we have to have at least one. If this is the 671 * last chunk go ahead and do the work to retrieve chunks freed 672 * from remote cpus, and if the zone is still empty move it off 673 * the ZoneAry. 674 */ 675 if (--z->z_NFree <= 0) { 676 KKASSERT(z->z_NFree == 0); 677 678 #ifdef SMP 679 /* 680 * WARNING! This code competes with other cpus. It is ok 681 * for us to not drain RChunks here but we might as well, and 682 * it is ok if more accumulate after we're done. 683 * 684 * Set RSignal before pulling rchunks off, indicating that we 685 * will be moving ourselves off of the ZoneAry. Remote ends will 686 * read RSignal before putting rchunks on thus interlocking 687 * their IPI signaling. 688 */ 689 if (z->z_RChunks == NULL) 690 atomic_swap_int(&z->z_RSignal, 1); 691 692 while ((bchunk = z->z_RChunks) != NULL) { 693 cpu_ccfence(); 694 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) { 695 *z->z_LChunksp = bchunk; 696 while (bchunk) { 697 chunk_mark_free(z, bchunk); 698 z->z_LChunksp = &bchunk->c_Next; 699 bchunk = bchunk->c_Next; 700 ++z->z_NFree; 701 } 702 break; 703 } 704 } 705 #endif 706 /* 707 * Remove from the zone list if no free chunks remain. 708 * Clear RSignal 709 */ 710 if (z->z_NFree == 0) { 711 slgd->ZoneAry[zi] = z->z_Next; 712 z->z_Next = NULL; 713 } else { 714 z->z_RSignal = 0; 715 } 716 } 717 718 /* 719 * Fast path, we have chunks available in z_LChunks. 720 */ 721 chunk = z->z_LChunks; 722 if (chunk) { 723 chunk_mark_allocated(z, chunk); 724 z->z_LChunks = chunk->c_Next; 725 if (z->z_LChunks == NULL) 726 z->z_LChunksp = &z->z_LChunks; 727 #ifdef SLAB_DEBUG 728 slab_record_source(z, file, line); 729 #endif 730 goto done; 731 } 732 733 /* 734 * No chunks are available in LChunks, the free chunk MUST be 735 * in the never-before-used memory area, controlled by UIndex. 736 * 737 * The consequences are very serious if our zone got corrupted so 738 * we use an explicit panic rather than a KASSERT. 739 */ 740 if (z->z_UIndex + 1 != z->z_NMax) 741 ++z->z_UIndex; 742 else 743 z->z_UIndex = 0; 744 745 if (z->z_UIndex == z->z_UEndIndex) 746 panic("slaballoc: corrupted zone"); 747 748 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 749 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 750 flags &= ~M_ZERO; 751 flags |= M_PASSIVE_ZERO; 752 } 753 chunk_mark_allocated(z, chunk); 754 #ifdef SLAB_DEBUG 755 slab_record_source(z, file, line); 756 #endif 757 goto done; 758 } 759 760 /* 761 * If all zones are exhausted we need to allocate a new zone for this 762 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see 763 * UAlloc use above in regards to M_ZERO. Note that when we are reusing 764 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and 765 * we do not pre-zero it because we do not want to mess up the L1 cache. 766 * 767 * At least one subsystem, the tty code (see CROUND) expects power-of-2 768 * allocations to be power-of-2 aligned. We maintain compatibility by 769 * adjusting the base offset below. 770 */ 771 { 772 int off; 773 int *kup; 774 775 if ((z = slgd->FreeZones) != NULL) { 776 slgd->FreeZones = z->z_Next; 777 --slgd->NFreeZones; 778 bzero(z, sizeof(SLZone)); 779 z->z_Flags |= SLZF_UNOTZEROD; 780 } else { 781 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); 782 if (z == NULL) 783 goto fail; 784 atomic_add_int(&ZoneGenAlloc, (int)ZoneSize / 1024); 785 } 786 787 /* 788 * How big is the base structure? 789 */ 790 #if defined(INVARIANTS) 791 /* 792 * Make room for z_Bitmap. An exact calculation is somewhat more 793 * complicated so don't make an exact calculation. 794 */ 795 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]); 796 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); 797 #else 798 off = sizeof(SLZone); 799 #endif 800 801 /* 802 * Guarentee power-of-2 alignment for power-of-2-sized chunks. 803 * Otherwise just 8-byte align the data. 804 */ 805 if ((size | (size - 1)) + 1 == (size << 1)) 806 off = (off + size - 1) & ~(size - 1); 807 else 808 off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK; 809 z->z_Magic = ZALLOC_SLAB_MAGIC; 810 z->z_ZoneIndex = zi; 811 z->z_NMax = (ZoneSize - off) / size; 812 z->z_NFree = z->z_NMax - 1; 813 z->z_BasePtr = (char *)z + off; 814 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; 815 z->z_ChunkSize = size; 816 z->z_CpuGd = gd; 817 z->z_Cpu = gd->gd_cpuid; 818 z->z_LChunksp = &z->z_LChunks; 819 #ifdef SLAB_DEBUG 820 bcopy(z->z_Sources, z->z_AltSources, sizeof(z->z_Sources)); 821 bzero(z->z_Sources, sizeof(z->z_Sources)); 822 #endif 823 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 824 z->z_Next = slgd->ZoneAry[zi]; 825 slgd->ZoneAry[zi] = z; 826 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 827 flags &= ~M_ZERO; /* already zero'd */ 828 flags |= M_PASSIVE_ZERO; 829 } 830 kup = btokup(z); 831 *kup = -(z->z_Cpu + 1); /* -1 to -(N+1) */ 832 chunk_mark_allocated(z, chunk); 833 #ifdef SLAB_DEBUG 834 slab_record_source(z, file, line); 835 #endif 836 837 /* 838 * Slide the base index for initial allocations out of the next 839 * zone we create so we do not over-weight the lower part of the 840 * cpu memory caches. 841 */ 842 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 843 & (ZALLOC_MAX_ZONE_SIZE - 1); 844 } 845 846 done: 847 ++type->ks_inuse[gd->gd_cpuid]; 848 type->ks_memuse[gd->gd_cpuid] += size; 849 type->ks_loosememuse += size; /* not MP synchronized */ 850 crit_exit(); 851 852 if (flags & M_ZERO) 853 bzero(chunk, size); 854 #ifdef INVARIANTS 855 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) { 856 if (use_malloc_pattern) { 857 for (i = 0; i < size; i += sizeof(int)) { 858 *(int *)((char *)chunk + i) = -1; 859 } 860 } 861 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ 862 } 863 #endif 864 logmemory(malloc_end, chunk, type, size, flags); 865 return(chunk); 866 fail: 867 crit_exit(); 868 logmemory(malloc_end, NULL, type, size, flags); 869 return(NULL); 870 } 871 872 /* 873 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE) 874 * 875 * Generally speaking this routine is not called very often and we do 876 * not attempt to optimize it beyond reusing the same pointer if the 877 * new size fits within the chunking of the old pointer's zone. 878 */ 879 #ifdef SLAB_DEBUG 880 void * 881 krealloc_debug(void *ptr, unsigned long size, 882 struct malloc_type *type, int flags, 883 const char *file, int line) 884 #else 885 void * 886 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) 887 #endif 888 { 889 unsigned long osize; 890 SLZone *z; 891 void *nptr; 892 int *kup; 893 894 KKASSERT((flags & M_ZERO) == 0); /* not supported */ 895 896 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) 897 return(kmalloc_debug(size, type, flags, file, line)); 898 if (size == 0) { 899 kfree(ptr, type); 900 return(NULL); 901 } 902 903 /* 904 * Handle oversized allocations. XXX we really should require that a 905 * size be passed to free() instead of this nonsense. 906 */ 907 kup = btokup(ptr); 908 if (*kup > 0) { 909 osize = *kup << PAGE_SHIFT; 910 if (osize == round_page(size)) 911 return(ptr); 912 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL) 913 return(NULL); 914 bcopy(ptr, nptr, min(size, osize)); 915 kfree(ptr, type); 916 return(nptr); 917 } 918 919 /* 920 * Get the original allocation's zone. If the new request winds up 921 * using the same chunk size we do not have to do anything. 922 */ 923 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 924 kup = btokup(z); 925 KKASSERT(*kup < 0); 926 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 927 928 /* 929 * Allocate memory for the new request size. Note that zoneindex has 930 * already adjusted the request size to the appropriate chunk size, which 931 * should optimize our bcopy(). Then copy and return the new pointer. 932 * 933 * Resizing a non-power-of-2 allocation to a power-of-2 size does not 934 * necessary align the result. 935 * 936 * We can only zoneindex (to align size to the chunk size) if the new 937 * size is not too large. 938 */ 939 if (size < ZoneLimit) { 940 zoneindex(&size); 941 if (z->z_ChunkSize == size) 942 return(ptr); 943 } 944 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL) 945 return(NULL); 946 bcopy(ptr, nptr, min(size, z->z_ChunkSize)); 947 kfree(ptr, type); 948 return(nptr); 949 } 950 951 /* 952 * Return the kmalloc limit for this type, in bytes. 953 */ 954 long 955 kmalloc_limit(struct malloc_type *type) 956 { 957 if (type->ks_limit == 0) { 958 crit_enter(); 959 if (type->ks_limit == 0) 960 malloc_init(type); 961 crit_exit(); 962 } 963 return(type->ks_limit); 964 } 965 966 /* 967 * Allocate a copy of the specified string. 968 * 969 * (MP SAFE) (MAY BLOCK) 970 */ 971 #ifdef SLAB_DEBUG 972 char * 973 kstrdup_debug(const char *str, struct malloc_type *type, 974 const char *file, int line) 975 #else 976 char * 977 kstrdup(const char *str, struct malloc_type *type) 978 #endif 979 { 980 int zlen; /* length inclusive of terminating NUL */ 981 char *nstr; 982 983 if (str == NULL) 984 return(NULL); 985 zlen = strlen(str) + 1; 986 nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line); 987 bcopy(str, nstr, zlen); 988 return(nstr); 989 } 990 991 #ifdef SMP 992 /* 993 * Notify our cpu that a remote cpu has freed some chunks in a zone that 994 * we own. RCount will be bumped so the memory should be good, but validate 995 * that it really is. 996 */ 997 static 998 void 999 kfree_remote(void *ptr) 1000 { 1001 SLGlobalData *slgd; 1002 SLChunk *bchunk; 1003 SLZone *z; 1004 int nfree; 1005 int *kup; 1006 1007 slgd = &mycpu->gd_slab; 1008 z = ptr; 1009 kup = btokup(z); 1010 KKASSERT(*kup == -((int)mycpuid + 1)); 1011 KKASSERT(z->z_RCount > 0); 1012 atomic_subtract_int(&z->z_RCount, 1); 1013 1014 logmemory(free_rem_beg, z, NULL, 0, 0); 1015 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1016 KKASSERT(z->z_Cpu == mycpu->gd_cpuid); 1017 nfree = z->z_NFree; 1018 1019 /* 1020 * Indicate that we will no longer be off of the ZoneAry by 1021 * clearing RSignal. 1022 */ 1023 if (z->z_RChunks) 1024 z->z_RSignal = 0; 1025 1026 /* 1027 * Atomically extract the bchunks list and then process it back 1028 * into the lchunks list. We want to append our bchunks to the 1029 * lchunks list and not prepend since we likely do not have 1030 * cache mastership of the related data (not that it helps since 1031 * we are using c_Next). 1032 */ 1033 while ((bchunk = z->z_RChunks) != NULL) { 1034 cpu_ccfence(); 1035 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) { 1036 *z->z_LChunksp = bchunk; 1037 while (bchunk) { 1038 chunk_mark_free(z, bchunk); 1039 z->z_LChunksp = &bchunk->c_Next; 1040 bchunk = bchunk->c_Next; 1041 ++z->z_NFree; 1042 } 1043 break; 1044 } 1045 } 1046 if (z->z_NFree && nfree == 0) { 1047 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 1048 slgd->ZoneAry[z->z_ZoneIndex] = z; 1049 } 1050 1051 /* 1052 * If the zone becomes totally free, and there are other zones we 1053 * can allocate from, move this zone to the FreeZones list. Since 1054 * this code can be called from an IPI callback, do *NOT* try to mess 1055 * with kernel_map here. Hysteresis will be performed at malloc() time. 1056 * 1057 * Do not move the zone if there is an IPI inflight, otherwise MP 1058 * races can result in our free_remote code accessing a destroyed 1059 * zone. 1060 */ 1061 if (z->z_NFree == z->z_NMax && 1062 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) && 1063 z->z_RCount == 0 1064 ) { 1065 SLZone **pz; 1066 int *kup; 1067 1068 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; 1069 z != *pz; 1070 pz = &(*pz)->z_Next) { 1071 ; 1072 } 1073 *pz = z->z_Next; 1074 z->z_Magic = -1; 1075 z->z_Next = slgd->FreeZones; 1076 slgd->FreeZones = z; 1077 ++slgd->NFreeZones; 1078 kup = btokup(z); 1079 *kup = 0; 1080 } 1081 logmemory(free_rem_end, z, bchunk, 0, 0); 1082 } 1083 1084 #endif 1085 1086 /* 1087 * free (SLAB ALLOCATOR) 1088 * 1089 * Free a memory block previously allocated by malloc. Note that we do not 1090 * attempt to update ks_loosememuse as MP races could prevent us from 1091 * checking memory limits in malloc. 1092 * 1093 * MPSAFE 1094 */ 1095 void 1096 kfree(void *ptr, struct malloc_type *type) 1097 { 1098 SLZone *z; 1099 SLChunk *chunk; 1100 SLGlobalData *slgd; 1101 struct globaldata *gd; 1102 int *kup; 1103 unsigned long size; 1104 #ifdef SMP 1105 SLChunk *bchunk; 1106 int rsignal; 1107 #endif 1108 1109 logmemory_quick(free_beg); 1110 gd = mycpu; 1111 slgd = &gd->gd_slab; 1112 1113 if (ptr == NULL) 1114 panic("trying to free NULL pointer"); 1115 1116 /* 1117 * Handle special 0-byte allocations 1118 */ 1119 if (ptr == ZERO_LENGTH_PTR) { 1120 logmemory(free_zero, ptr, type, -1, 0); 1121 logmemory_quick(free_end); 1122 return; 1123 } 1124 1125 /* 1126 * Panic on bad malloc type 1127 */ 1128 if (type->ks_magic != M_MAGIC) 1129 panic("free: malloc type lacks magic"); 1130 1131 /* 1132 * Handle oversized allocations. XXX we really should require that a 1133 * size be passed to free() instead of this nonsense. 1134 * 1135 * This code is never called via an ipi. 1136 */ 1137 kup = btokup(ptr); 1138 if (*kup > 0) { 1139 size = *kup << PAGE_SHIFT; 1140 *kup = 0; 1141 #ifdef INVARIANTS 1142 KKASSERT(sizeof(weirdary) <= size); 1143 bcopy(weirdary, ptr, sizeof(weirdary)); 1144 #endif 1145 /* 1146 * NOTE: For oversized allocations we do not record the 1147 * originating cpu. It gets freed on the cpu calling 1148 * kfree(). The statistics are in aggregate. 1149 * 1150 * note: XXX we have still inherited the interrupts-can't-block 1151 * assumption. An interrupt thread does not bump 1152 * gd_intr_nesting_level so check TDF_INTTHREAD. This is 1153 * primarily until we can fix softupdate's assumptions about free(). 1154 */ 1155 crit_enter(); 1156 --type->ks_inuse[gd->gd_cpuid]; 1157 type->ks_memuse[gd->gd_cpuid] -= size; 1158 if (mycpu->gd_intr_nesting_level || 1159 (gd->gd_curthread->td_flags & TDF_INTTHREAD)) 1160 { 1161 logmemory(free_ovsz_delayed, ptr, type, size, 0); 1162 z = (SLZone *)ptr; 1163 z->z_Magic = ZALLOC_OVSZ_MAGIC; 1164 z->z_Next = slgd->FreeOvZones; 1165 z->z_ChunkSize = size; 1166 slgd->FreeOvZones = z; 1167 crit_exit(); 1168 } else { 1169 crit_exit(); 1170 logmemory(free_ovsz, ptr, type, size, 0); 1171 kmem_slab_free(ptr, size); /* may block */ 1172 atomic_add_int(&ZoneBigAlloc, -(int)size / 1024); 1173 } 1174 logmemory_quick(free_end); 1175 return; 1176 } 1177 1178 /* 1179 * Zone case. Figure out the zone based on the fact that it is 1180 * ZoneSize aligned. 1181 */ 1182 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 1183 kup = btokup(z); 1184 KKASSERT(*kup < 0); 1185 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1186 1187 /* 1188 * If we do not own the zone then use atomic ops to free to the 1189 * remote cpu linked list and notify the target zone using a 1190 * passive message. 1191 * 1192 * The target zone cannot be deallocated while we own a chunk of it, 1193 * so the zone header's storage is stable until the very moment 1194 * we adjust z_RChunks. After that we cannot safely dereference (z). 1195 * 1196 * (no critical section needed) 1197 */ 1198 if (z->z_CpuGd != gd) { 1199 #ifdef SMP 1200 /* 1201 * Making these adjustments now allow us to avoid passing (type) 1202 * to the remote cpu. Note that ks_inuse/ks_memuse is being 1203 * adjusted on OUR cpu, not the zone cpu, but it should all still 1204 * sum up properly and cancel out. 1205 */ 1206 crit_enter(); 1207 --type->ks_inuse[gd->gd_cpuid]; 1208 type->ks_memuse[gd->gd_cpuid] -= z->z_ChunkSize; 1209 crit_exit(); 1210 1211 /* 1212 * WARNING! This code competes with other cpus. Once we 1213 * successfully link the chunk to RChunks the remote 1214 * cpu can rip z's storage out from under us. 1215 * 1216 * Bumping RCount prevents z's storage from getting 1217 * ripped out. 1218 */ 1219 rsignal = z->z_RSignal; 1220 cpu_lfence(); 1221 if (rsignal) 1222 atomic_add_int(&z->z_RCount, 1); 1223 1224 chunk = ptr; 1225 for (;;) { 1226 bchunk = z->z_RChunks; 1227 cpu_ccfence(); 1228 chunk->c_Next = bchunk; 1229 cpu_sfence(); 1230 1231 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk)) 1232 break; 1233 } 1234 1235 /* 1236 * We have to signal the remote cpu if our actions will cause 1237 * the remote zone to be placed back on ZoneAry so it can 1238 * move the zone back on. 1239 * 1240 * We only need to deal with NULL->non-NULL RChunk transitions 1241 * and only if z_RSignal is set. We interlock by reading rsignal 1242 * before adding our chunk to RChunks. This should result in 1243 * virtually no IPI traffic. 1244 * 1245 * We can use a passive IPI to reduce overhead even further. 1246 */ 1247 if (bchunk == NULL && rsignal) { 1248 logmemory(free_request, ptr, type, z->z_ChunkSize, 0); 1249 lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z); 1250 /* z can get ripped out from under us from this point on */ 1251 } else if (rsignal) { 1252 atomic_subtract_int(&z->z_RCount, 1); 1253 /* z can get ripped out from under us from this point on */ 1254 } 1255 #else 1256 panic("Corrupt SLZone"); 1257 #endif 1258 logmemory_quick(free_end); 1259 return; 1260 } 1261 1262 /* 1263 * kfree locally 1264 */ 1265 logmemory(free_chunk, ptr, type, z->z_ChunkSize, 0); 1266 1267 crit_enter(); 1268 chunk = ptr; 1269 chunk_mark_free(z, chunk); 1270 1271 /* 1272 * Put weird data into the memory to detect modifications after freeing, 1273 * illegal pointer use after freeing (we should fault on the odd address), 1274 * and so forth. XXX needs more work, see the old malloc code. 1275 */ 1276 #ifdef INVARIANTS 1277 if (z->z_ChunkSize < sizeof(weirdary)) 1278 bcopy(weirdary, chunk, z->z_ChunkSize); 1279 else 1280 bcopy(weirdary, chunk, sizeof(weirdary)); 1281 #endif 1282 1283 /* 1284 * Add this free non-zero'd chunk to a linked list for reuse. Add 1285 * to the front of the linked list so it is more likely to be 1286 * reallocated, since it is already in our L1 cache. 1287 */ 1288 #ifdef INVARIANTS 1289 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) 1290 panic("BADFREE %p", chunk); 1291 #endif 1292 chunk->c_Next = z->z_LChunks; 1293 z->z_LChunks = chunk; 1294 if (chunk->c_Next == NULL) 1295 z->z_LChunksp = &chunk->c_Next; 1296 1297 #ifdef INVARIANTS 1298 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) 1299 panic("BADFREE2"); 1300 #endif 1301 1302 /* 1303 * Bump the number of free chunks. If it becomes non-zero the zone 1304 * must be added back onto the appropriate list. 1305 */ 1306 if (z->z_NFree++ == 0) { 1307 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 1308 slgd->ZoneAry[z->z_ZoneIndex] = z; 1309 } 1310 1311 --type->ks_inuse[z->z_Cpu]; 1312 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize; 1313 1314 /* 1315 * If the zone becomes totally free, and there are other zones we 1316 * can allocate from, move this zone to the FreeZones list. Since 1317 * this code can be called from an IPI callback, do *NOT* try to mess 1318 * with kernel_map here. Hysteresis will be performed at malloc() time. 1319 */ 1320 if (z->z_NFree == z->z_NMax && 1321 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) && 1322 z->z_RCount == 0 1323 ) { 1324 SLZone **pz; 1325 int *kup; 1326 1327 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next) 1328 ; 1329 *pz = z->z_Next; 1330 z->z_Magic = -1; 1331 z->z_Next = slgd->FreeZones; 1332 slgd->FreeZones = z; 1333 ++slgd->NFreeZones; 1334 kup = btokup(z); 1335 *kup = 0; 1336 } 1337 logmemory_quick(free_end); 1338 crit_exit(); 1339 } 1340 1341 #if defined(INVARIANTS) 1342 1343 /* 1344 * Helper routines for sanity checks 1345 */ 1346 static 1347 void 1348 chunk_mark_allocated(SLZone *z, void *chunk) 1349 { 1350 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1351 __uint32_t *bitptr; 1352 1353 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1354 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1355 ("memory chunk %p bit index %d is illegal", chunk, bitdex)); 1356 bitptr = &z->z_Bitmap[bitdex >> 5]; 1357 bitdex &= 31; 1358 KASSERT((*bitptr & (1 << bitdex)) == 0, 1359 ("memory chunk %p is already allocated!", chunk)); 1360 *bitptr |= 1 << bitdex; 1361 } 1362 1363 static 1364 void 1365 chunk_mark_free(SLZone *z, void *chunk) 1366 { 1367 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1368 __uint32_t *bitptr; 1369 1370 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1371 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1372 ("memory chunk %p bit index %d is illegal!", chunk, bitdex)); 1373 bitptr = &z->z_Bitmap[bitdex >> 5]; 1374 bitdex &= 31; 1375 KASSERT((*bitptr & (1 << bitdex)) != 0, 1376 ("memory chunk %p is already free!", chunk)); 1377 *bitptr &= ~(1 << bitdex); 1378 } 1379 1380 #endif 1381 1382 /* 1383 * kmem_slab_alloc() 1384 * 1385 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the 1386 * specified alignment. M_* flags are expected in the flags field. 1387 * 1388 * Alignment must be a multiple of PAGE_SIZE. 1389 * 1390 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), 1391 * but when we move zalloc() over to use this function as its backend 1392 * we will have to switch to kreserve/krelease and call reserve(0) 1393 * after the new space is made available. 1394 * 1395 * Interrupt code which has preempted other code is not allowed to 1396 * use PQ_CACHE pages. However, if an interrupt thread is run 1397 * non-preemptively or blocks and then runs non-preemptively, then 1398 * it is free to use PQ_CACHE pages. <--- may not apply any longer XXX 1399 */ 1400 static void * 1401 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) 1402 { 1403 vm_size_t i; 1404 vm_offset_t addr; 1405 int count, vmflags, base_vmflags; 1406 vm_page_t mbase = NULL; 1407 vm_page_t m; 1408 thread_t td; 1409 1410 size = round_page(size); 1411 addr = vm_map_min(&kernel_map); 1412 1413 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1414 crit_enter(); 1415 vm_map_lock(&kernel_map); 1416 if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) { 1417 vm_map_unlock(&kernel_map); 1418 if ((flags & M_NULLOK) == 0) 1419 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 1420 vm_map_entry_release(count); 1421 crit_exit(); 1422 return(NULL); 1423 } 1424 1425 /* 1426 * kernel_object maps 1:1 to kernel_map. 1427 */ 1428 vm_object_hold(&kernel_object); 1429 vm_object_reference_locked(&kernel_object); 1430 vm_map_insert(&kernel_map, &count, 1431 &kernel_object, addr, addr, addr + size, 1432 VM_MAPTYPE_NORMAL, 1433 VM_PROT_ALL, VM_PROT_ALL, 1434 0); 1435 vm_object_drop(&kernel_object); 1436 vm_map_set_wired_quick(&kernel_map, addr, size, &count); 1437 vm_map_unlock(&kernel_map); 1438 1439 td = curthread; 1440 1441 base_vmflags = 0; 1442 if (flags & M_ZERO) 1443 base_vmflags |= VM_ALLOC_ZERO; 1444 if (flags & M_USE_RESERVE) 1445 base_vmflags |= VM_ALLOC_SYSTEM; 1446 if (flags & M_USE_INTERRUPT_RESERVE) 1447 base_vmflags |= VM_ALLOC_INTERRUPT; 1448 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) { 1449 panic("kmem_slab_alloc: bad flags %08x (%p)", 1450 flags, ((int **)&size)[-1]); 1451 } 1452 1453 /* 1454 * Allocate the pages. Do not mess with the PG_ZERO flag or map 1455 * them yet. VM_ALLOC_NORMAL can only be set if we are not preempting. 1456 * 1457 * VM_ALLOC_SYSTEM is automatically set if we are preempting and 1458 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is 1459 * implied in this case), though I'm not sure if we really need to 1460 * do that. 1461 */ 1462 vmflags = base_vmflags; 1463 if (flags & M_WAITOK) { 1464 if (td->td_preempted) 1465 vmflags |= VM_ALLOC_SYSTEM; 1466 else 1467 vmflags |= VM_ALLOC_NORMAL; 1468 } 1469 1470 vm_object_hold(&kernel_object); 1471 for (i = 0; i < size; i += PAGE_SIZE) { 1472 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags); 1473 if (i == 0) 1474 mbase = m; 1475 1476 /* 1477 * If the allocation failed we either return NULL or we retry. 1478 * 1479 * If M_WAITOK is specified we wait for more memory and retry. 1480 * If M_WAITOK is specified from a preemption we yield instead of 1481 * wait. Livelock will not occur because the interrupt thread 1482 * will not be preempting anyone the second time around after the 1483 * yield. 1484 */ 1485 if (m == NULL) { 1486 if (flags & M_WAITOK) { 1487 if (td->td_preempted) { 1488 lwkt_switch(); 1489 } else { 1490 vm_wait(0); 1491 } 1492 i -= PAGE_SIZE; /* retry */ 1493 continue; 1494 } 1495 break; 1496 } 1497 } 1498 1499 /* 1500 * Check and deal with an allocation failure 1501 */ 1502 if (i != size) { 1503 while (i != 0) { 1504 i -= PAGE_SIZE; 1505 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); 1506 /* page should already be busy */ 1507 vm_page_free(m); 1508 } 1509 vm_map_lock(&kernel_map); 1510 vm_map_delete(&kernel_map, addr, addr + size, &count); 1511 vm_map_unlock(&kernel_map); 1512 vm_object_drop(&kernel_object); 1513 1514 vm_map_entry_release(count); 1515 crit_exit(); 1516 return(NULL); 1517 } 1518 1519 /* 1520 * Success! 1521 * 1522 * NOTE: The VM pages are still busied. mbase points to the first one 1523 * but we have to iterate via vm_page_next() 1524 */ 1525 vm_object_drop(&kernel_object); 1526 crit_exit(); 1527 1528 /* 1529 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO. 1530 */ 1531 m = mbase; 1532 i = 0; 1533 1534 while (i < size) { 1535 /* 1536 * page should already be busy 1537 */ 1538 m->valid = VM_PAGE_BITS_ALL; 1539 vm_page_wire(m); 1540 pmap_enter(&kernel_pmap, addr + i, m, VM_PROT_ALL | VM_PROT_NOSYNC, 1); 1541 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO)) 1542 bzero((char *)addr + i, PAGE_SIZE); 1543 vm_page_flag_clear(m, PG_ZERO); 1544 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED)); 1545 vm_page_flag_set(m, PG_REFERENCED); 1546 vm_page_wakeup(m); 1547 1548 i += PAGE_SIZE; 1549 vm_object_hold(&kernel_object); 1550 m = vm_page_next(m); 1551 vm_object_drop(&kernel_object); 1552 } 1553 smp_invltlb(); 1554 vm_map_entry_release(count); 1555 return((void *)addr); 1556 } 1557 1558 /* 1559 * kmem_slab_free() 1560 */ 1561 static void 1562 kmem_slab_free(void *ptr, vm_size_t size) 1563 { 1564 crit_enter(); 1565 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); 1566 crit_exit(); 1567 } 1568 1569