1 /* 2 * (MPSAFE) 3 * 4 * KERN_SLABALLOC.C - Kernel SLAB memory allocator 5 * 6 * Copyright (c) 2003,2004,2010 The DragonFly Project. All rights reserved. 7 * 8 * This code is derived from software contributed to The DragonFly Project 9 * by Matthew Dillon <dillon@backplane.com> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in 19 * the documentation and/or other materials provided with the 20 * distribution. 21 * 3. Neither the name of The DragonFly Project nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific, prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * This module implements a slab allocator drop-in replacement for the 39 * kernel malloc(). 40 * 41 * A slab allocator reserves a ZONE for each chunk size, then lays the 42 * chunks out in an array within the zone. Allocation and deallocation 43 * is nearly instantanious, and fragmentation/overhead losses are limited 44 * to a fixed worst-case amount. 45 * 46 * The downside of this slab implementation is in the chunk size 47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. 48 * In a kernel implementation all this memory will be physical so 49 * the zone size is adjusted downward on machines with less physical 50 * memory. The upside is that overhead is bounded... this is the *worst* 51 * case overhead. 52 * 53 * Slab management is done on a per-cpu basis and no locking or mutexes 54 * are required, only a critical section. When one cpu frees memory 55 * belonging to another cpu's slab manager an asynchronous IPI message 56 * will be queued to execute the operation. In addition, both the 57 * high level slab allocator and the low level zone allocator optimize 58 * M_ZERO requests, and the slab allocator does not have to pre initialize 59 * the linked list of chunks. 60 * 61 * XXX Balancing is needed between cpus. Balance will be handled through 62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. 63 * 64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of 65 * the new zone should be restricted to M_USE_RESERVE requests only. 66 * 67 * Alloc Size Chunking Number of zones 68 * 0-127 8 16 69 * 128-255 16 8 70 * 256-511 32 8 71 * 512-1023 64 8 72 * 1024-2047 128 8 73 * 2048-4095 256 8 74 * 4096-8191 512 8 75 * 8192-16383 1024 8 76 * 16384-32767 2048 8 77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) 78 * 79 * Allocations >= ZoneLimit go directly to kmem. 80 * 81 * API REQUIREMENTS AND SIDE EFFECTS 82 * 83 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we 84 * have remained compatible with the following API requirements: 85 * 86 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty) 87 * + all power-of-2 sized allocations are power-of-2 aligned (twe) 88 * + malloc(0) is allowed and returns non-NULL (ahc driver) 89 * + ability to allocate arbitrarily large chunks of memory 90 */ 91 92 #include "opt_vm.h" 93 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/kernel.h> 97 #include <sys/slaballoc.h> 98 #include <sys/mbuf.h> 99 #include <sys/vmmeter.h> 100 #include <sys/lock.h> 101 #include <sys/thread.h> 102 #include <sys/globaldata.h> 103 #include <sys/sysctl.h> 104 #include <sys/ktr.h> 105 106 #include <vm/vm.h> 107 #include <vm/vm_param.h> 108 #include <vm/vm_kern.h> 109 #include <vm/vm_extern.h> 110 #include <vm/vm_object.h> 111 #include <vm/pmap.h> 112 #include <vm/vm_map.h> 113 #include <vm/vm_page.h> 114 #include <vm/vm_pageout.h> 115 116 #include <machine/cpu.h> 117 118 #include <sys/thread2.h> 119 120 #define btokup(z) (&pmap_kvtom((vm_offset_t)(z))->ku_pagecnt) 121 122 #define MEMORY_STRING "ptr=%p type=%p size=%lu flags=%04x" 123 #define MEMORY_ARGS void *ptr, void *type, unsigned long size, int flags 124 125 #if !defined(KTR_MEMORY) 126 #define KTR_MEMORY KTR_ALL 127 #endif 128 KTR_INFO_MASTER(memory); 129 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin"); 130 KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARGS); 131 KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARGS); 132 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARGS); 133 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARGS); 134 KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARGS); 135 #ifdef SMP 136 KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARGS); 137 KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARGS); 138 KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARGS); 139 #endif 140 KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin"); 141 KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end"); 142 143 #define logmemory(name, ptr, type, size, flags) \ 144 KTR_LOG(memory_ ## name, ptr, type, size, flags) 145 #define logmemory_quick(name) \ 146 KTR_LOG(memory_ ## name) 147 148 /* 149 * Fixed globals (not per-cpu) 150 */ 151 static int ZoneSize; 152 static int ZoneLimit; 153 static int ZonePageCount; 154 static uintptr_t ZoneMask; 155 static int ZoneBigAlloc; /* in KB */ 156 static int ZoneGenAlloc; /* in KB */ 157 struct malloc_type *kmemstatistics; /* exported to vmstat */ 158 static int32_t weirdary[16]; 159 160 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags); 161 static void kmem_slab_free(void *ptr, vm_size_t bytes); 162 163 #if defined(INVARIANTS) 164 static void chunk_mark_allocated(SLZone *z, void *chunk); 165 static void chunk_mark_free(SLZone *z, void *chunk); 166 #else 167 #define chunk_mark_allocated(z, chunk) 168 #define chunk_mark_free(z, chunk) 169 #endif 170 171 /* 172 * Misc constants. Note that allocations that are exact multiples of 173 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 174 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists. 175 */ 176 #define MIN_CHUNK_SIZE 8 /* in bytes */ 177 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1) 178 #define ZONE_RELS_THRESH 32 /* threshold number of zones */ 179 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK) 180 181 /* 182 * The WEIRD_ADDR is used as known text to copy into free objects to 183 * try to create deterministic failure cases if the data is accessed after 184 * free. 185 */ 186 #define WEIRD_ADDR 0xdeadc0de 187 #define MAX_COPY sizeof(weirdary) 188 #define ZERO_LENGTH_PTR ((void *)-8) 189 190 /* 191 * Misc global malloc buckets 192 */ 193 194 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 195 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 196 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 197 198 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 199 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 200 201 /* 202 * Initialize the slab memory allocator. We have to choose a zone size based 203 * on available physical memory. We choose a zone side which is approximately 204 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of 205 * 128K. The zone size is limited to the bounds set in slaballoc.h 206 * (typically 32K min, 128K max). 207 */ 208 static void kmeminit(void *dummy); 209 210 char *ZeroPage; 211 212 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL) 213 214 #ifdef INVARIANTS 215 /* 216 * If enabled any memory allocated without M_ZERO is initialized to -1. 217 */ 218 static int use_malloc_pattern; 219 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW, 220 &use_malloc_pattern, 0, 221 "Initialize memory to -1 if M_ZERO not specified"); 222 #endif 223 224 static int ZoneRelsThresh = ZONE_RELS_THRESH; 225 SYSCTL_INT(_kern, OID_AUTO, zone_big_alloc, CTLFLAG_RD, &ZoneBigAlloc, 0, ""); 226 SYSCTL_INT(_kern, OID_AUTO, zone_gen_alloc, CTLFLAG_RD, &ZoneGenAlloc, 0, ""); 227 SYSCTL_INT(_kern, OID_AUTO, zone_cache, CTLFLAG_RW, &ZoneRelsThresh, 0, ""); 228 229 /* 230 * Returns the kernel memory size limit for the purposes of initializing 231 * various subsystem caches. The smaller of available memory and the KVM 232 * memory space is returned. 233 * 234 * The size in megabytes is returned. 235 */ 236 size_t 237 kmem_lim_size(void) 238 { 239 size_t limsize; 240 241 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE; 242 if (limsize > KvaSize) 243 limsize = KvaSize; 244 return (limsize / (1024 * 1024)); 245 } 246 247 static void 248 kmeminit(void *dummy) 249 { 250 size_t limsize; 251 int usesize; 252 int i; 253 254 limsize = kmem_lim_size(); 255 usesize = (int)(limsize * 1024); /* convert to KB */ 256 257 /* 258 * If the machine has a large KVM space and more than 8G of ram, 259 * double the zone release threshold to reduce SMP invalidations. 260 * If more than 16G of ram, do it again. 261 * 262 * The BIOS eats a little ram so add some slop. We want 8G worth of 263 * memory sticks to trigger the first adjustment. 264 */ 265 if (ZoneRelsThresh == ZONE_RELS_THRESH) { 266 if (limsize >= 7 * 1024) 267 ZoneRelsThresh *= 2; 268 if (limsize >= 15 * 1024) 269 ZoneRelsThresh *= 2; 270 } 271 272 /* 273 * Calculate the zone size. This typically calculates to 274 * ZALLOC_MAX_ZONE_SIZE 275 */ 276 ZoneSize = ZALLOC_MIN_ZONE_SIZE; 277 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) 278 ZoneSize <<= 1; 279 ZoneLimit = ZoneSize / 4; 280 if (ZoneLimit > ZALLOC_ZONE_LIMIT) 281 ZoneLimit = ZALLOC_ZONE_LIMIT; 282 ZoneMask = ~(uintptr_t)(ZoneSize - 1); 283 ZonePageCount = ZoneSize / PAGE_SIZE; 284 285 for (i = 0; i < NELEM(weirdary); ++i) 286 weirdary[i] = WEIRD_ADDR; 287 288 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO); 289 290 if (bootverbose) 291 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); 292 } 293 294 /* 295 * Initialize a malloc type tracking structure. 296 */ 297 void 298 malloc_init(void *data) 299 { 300 struct malloc_type *type = data; 301 size_t limsize; 302 303 if (type->ks_magic != M_MAGIC) 304 panic("malloc type lacks magic"); 305 306 if (type->ks_limit != 0) 307 return; 308 309 if (vmstats.v_page_count == 0) 310 panic("malloc_init not allowed before vm init"); 311 312 limsize = kmem_lim_size() * (1024 * 1024); 313 type->ks_limit = limsize / 10; 314 315 type->ks_next = kmemstatistics; 316 kmemstatistics = type; 317 } 318 319 void 320 malloc_uninit(void *data) 321 { 322 struct malloc_type *type = data; 323 struct malloc_type *t; 324 #ifdef INVARIANTS 325 int i; 326 long ttl; 327 #endif 328 329 if (type->ks_magic != M_MAGIC) 330 panic("malloc type lacks magic"); 331 332 if (vmstats.v_page_count == 0) 333 panic("malloc_uninit not allowed before vm init"); 334 335 if (type->ks_limit == 0) 336 panic("malloc_uninit on uninitialized type"); 337 338 #ifdef SMP 339 /* Make sure that all pending kfree()s are finished. */ 340 lwkt_synchronize_ipiqs("muninit"); 341 #endif 342 343 #ifdef INVARIANTS 344 /* 345 * memuse is only correct in aggregation. Due to memory being allocated 346 * on one cpu and freed on another individual array entries may be 347 * negative or positive (canceling each other out). 348 */ 349 for (i = ttl = 0; i < ncpus; ++i) 350 ttl += type->ks_memuse[i]; 351 if (ttl) { 352 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", 353 ttl, type->ks_shortdesc, i); 354 } 355 #endif 356 if (type == kmemstatistics) { 357 kmemstatistics = type->ks_next; 358 } else { 359 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 360 if (t->ks_next == type) { 361 t->ks_next = type->ks_next; 362 break; 363 } 364 } 365 } 366 type->ks_next = NULL; 367 type->ks_limit = 0; 368 } 369 370 /* 371 * Increase the kmalloc pool limit for the specified pool. No changes 372 * are the made if the pool would shrink. 373 */ 374 void 375 kmalloc_raise_limit(struct malloc_type *type, size_t bytes) 376 { 377 if (type->ks_limit == 0) 378 malloc_init(type); 379 if (bytes == 0) 380 bytes = KvaSize; 381 if (type->ks_limit < bytes) 382 type->ks_limit = bytes; 383 } 384 385 /* 386 * Dynamically create a malloc pool. This function is a NOP if *typep is 387 * already non-NULL. 388 */ 389 void 390 kmalloc_create(struct malloc_type **typep, const char *descr) 391 { 392 struct malloc_type *type; 393 394 if (*typep == NULL) { 395 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO); 396 type->ks_magic = M_MAGIC; 397 type->ks_shortdesc = descr; 398 malloc_init(type); 399 *typep = type; 400 } 401 } 402 403 /* 404 * Destroy a dynamically created malloc pool. This function is a NOP if 405 * the pool has already been destroyed. 406 */ 407 void 408 kmalloc_destroy(struct malloc_type **typep) 409 { 410 if (*typep != NULL) { 411 malloc_uninit(*typep); 412 kfree(*typep, M_TEMP); 413 *typep = NULL; 414 } 415 } 416 417 /* 418 * Calculate the zone index for the allocation request size and set the 419 * allocation request size to that particular zone's chunk size. 420 */ 421 static __inline int 422 zoneindex(unsigned long *bytes) 423 { 424 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ 425 if (n < 128) { 426 *bytes = n = (n + 7) & ~7; 427 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 428 } 429 if (n < 256) { 430 *bytes = n = (n + 15) & ~15; 431 return(n / 16 + 7); 432 } 433 if (n < 8192) { 434 if (n < 512) { 435 *bytes = n = (n + 31) & ~31; 436 return(n / 32 + 15); 437 } 438 if (n < 1024) { 439 *bytes = n = (n + 63) & ~63; 440 return(n / 64 + 23); 441 } 442 if (n < 2048) { 443 *bytes = n = (n + 127) & ~127; 444 return(n / 128 + 31); 445 } 446 if (n < 4096) { 447 *bytes = n = (n + 255) & ~255; 448 return(n / 256 + 39); 449 } 450 *bytes = n = (n + 511) & ~511; 451 return(n / 512 + 47); 452 } 453 #if ZALLOC_ZONE_LIMIT > 8192 454 if (n < 16384) { 455 *bytes = n = (n + 1023) & ~1023; 456 return(n / 1024 + 55); 457 } 458 #endif 459 #if ZALLOC_ZONE_LIMIT > 16384 460 if (n < 32768) { 461 *bytes = n = (n + 2047) & ~2047; 462 return(n / 2048 + 63); 463 } 464 #endif 465 panic("Unexpected byte count %d", n); 466 return(0); 467 } 468 469 #ifdef SLAB_DEBUG 470 /* 471 * Used to debug memory corruption issues. Record up to (typically 32) 472 * allocation sources for this zone (for a particular chunk size). 473 */ 474 475 static void 476 slab_record_source(SLZone *z, const char *file, int line) 477 { 478 int i; 479 int b = line & (SLAB_DEBUG_ENTRIES - 1); 480 481 i = b; 482 do { 483 if (z->z_Sources[i].file == file && z->z_Sources[i].line == line) 484 return; 485 if (z->z_Sources[i].file == NULL) 486 break; 487 i = (i + 1) & (SLAB_DEBUG_ENTRIES - 1); 488 } while (i != b); 489 z->z_Sources[i].file = file; 490 z->z_Sources[i].line = line; 491 } 492 493 #endif 494 495 /* 496 * kmalloc() (SLAB ALLOCATOR) 497 * 498 * Allocate memory via the slab allocator. If the request is too large, 499 * or if it page-aligned beyond a certain size, we fall back to the 500 * KMEM subsystem. A SLAB tracking descriptor must be specified, use 501 * &SlabMisc if you don't care. 502 * 503 * M_RNOWAIT - don't block. 504 * M_NULLOK - return NULL instead of blocking. 505 * M_ZERO - zero the returned memory. 506 * M_USE_RESERVE - allow greater drawdown of the free list 507 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted 508 * 509 * MPSAFE 510 */ 511 512 #ifdef SLAB_DEBUG 513 void * 514 kmalloc_debug(unsigned long size, struct malloc_type *type, int flags, 515 const char *file, int line) 516 #else 517 void * 518 kmalloc(unsigned long size, struct malloc_type *type, int flags) 519 #endif 520 { 521 SLZone *z; 522 SLChunk *chunk; 523 #ifdef SMP 524 SLChunk *bchunk; 525 #endif 526 SLGlobalData *slgd; 527 struct globaldata *gd; 528 int zi; 529 #ifdef INVARIANTS 530 int i; 531 #endif 532 533 logmemory_quick(malloc_beg); 534 gd = mycpu; 535 slgd = &gd->gd_slab; 536 537 /* 538 * XXX silly to have this in the critical path. 539 */ 540 if (type->ks_limit == 0) { 541 crit_enter(); 542 if (type->ks_limit == 0) 543 malloc_init(type); 544 crit_exit(); 545 } 546 ++type->ks_calls; 547 548 /* 549 * Handle the case where the limit is reached. Panic if we can't return 550 * NULL. The original malloc code looped, but this tended to 551 * simply deadlock the computer. 552 * 553 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used 554 * to determine if a more complete limit check should be done. The 555 * actual memory use is tracked via ks_memuse[cpu]. 556 */ 557 while (type->ks_loosememuse >= type->ks_limit) { 558 int i; 559 long ttl; 560 561 for (i = ttl = 0; i < ncpus; ++i) 562 ttl += type->ks_memuse[i]; 563 type->ks_loosememuse = ttl; /* not MP synchronized */ 564 if ((ssize_t)ttl < 0) /* deal with occassional race */ 565 ttl = 0; 566 if (ttl >= type->ks_limit) { 567 if (flags & M_NULLOK) { 568 logmemory(malloc_end, NULL, type, size, flags); 569 return(NULL); 570 } 571 panic("%s: malloc limit exceeded", type->ks_shortdesc); 572 } 573 } 574 575 /* 576 * Handle the degenerate size == 0 case. Yes, this does happen. 577 * Return a special pointer. This is to maintain compatibility with 578 * the original malloc implementation. Certain devices, such as the 579 * adaptec driver, not only allocate 0 bytes, they check for NULL and 580 * also realloc() later on. Joy. 581 */ 582 if (size == 0) { 583 logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags); 584 return(ZERO_LENGTH_PTR); 585 } 586 587 /* 588 * Handle hysteresis from prior frees here in malloc(). We cannot 589 * safely manipulate the kernel_map in free() due to free() possibly 590 * being called via an IPI message or from sensitive interrupt code. 591 * 592 * NOTE: ku_pagecnt must be cleared before we free the slab or we 593 * might race another cpu allocating the kva and setting 594 * ku_pagecnt. 595 */ 596 while (slgd->NFreeZones > ZoneRelsThresh && (flags & M_RNOWAIT) == 0) { 597 crit_enter(); 598 if (slgd->NFreeZones > ZoneRelsThresh) { /* crit sect race */ 599 int *kup; 600 601 z = slgd->FreeZones; 602 slgd->FreeZones = z->z_Next; 603 --slgd->NFreeZones; 604 kup = btokup(z); 605 *kup = 0; 606 kmem_slab_free(z, ZoneSize); /* may block */ 607 atomic_add_int(&ZoneGenAlloc, -ZoneSize / 1024); 608 } 609 crit_exit(); 610 } 611 612 /* 613 * XXX handle oversized frees that were queued from kfree(). 614 */ 615 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) { 616 crit_enter(); 617 if ((z = slgd->FreeOvZones) != NULL) { 618 vm_size_t tsize; 619 620 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); 621 slgd->FreeOvZones = z->z_Next; 622 tsize = z->z_ChunkSize; 623 kmem_slab_free(z, tsize); /* may block */ 624 atomic_add_int(&ZoneBigAlloc, -(int)tsize / 1024); 625 } 626 crit_exit(); 627 } 628 629 /* 630 * Handle large allocations directly. There should not be very many of 631 * these so performance is not a big issue. 632 * 633 * The backend allocator is pretty nasty on a SMP system. Use the 634 * slab allocator for one and two page-sized chunks even though we lose 635 * some efficiency. XXX maybe fix mmio and the elf loader instead. 636 */ 637 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 638 int *kup; 639 640 size = round_page(size); 641 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); 642 if (chunk == NULL) { 643 logmemory(malloc_end, NULL, type, size, flags); 644 return(NULL); 645 } 646 atomic_add_int(&ZoneBigAlloc, (int)size / 1024); 647 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ 648 flags |= M_PASSIVE_ZERO; 649 kup = btokup(chunk); 650 *kup = size / PAGE_SIZE; 651 crit_enter(); 652 goto done; 653 } 654 655 /* 656 * Attempt to allocate out of an existing zone. First try the free list, 657 * then allocate out of unallocated space. If we find a good zone move 658 * it to the head of the list so later allocations find it quickly 659 * (we might have thousands of zones in the list). 660 * 661 * Note: zoneindex() will panic of size is too large. 662 */ 663 zi = zoneindex(&size); 664 KKASSERT(zi < NZONES); 665 crit_enter(); 666 667 if ((z = slgd->ZoneAry[zi]) != NULL) { 668 /* 669 * Locate a chunk - we have to have at least one. If this is the 670 * last chunk go ahead and do the work to retrieve chunks freed 671 * from remote cpus, and if the zone is still empty move it off 672 * the ZoneAry. 673 */ 674 if (--z->z_NFree <= 0) { 675 KKASSERT(z->z_NFree == 0); 676 677 #ifdef SMP 678 /* 679 * WARNING! This code competes with other cpus. It is ok 680 * for us to not drain RChunks here but we might as well, and 681 * it is ok if more accumulate after we're done. 682 * 683 * Set RSignal before pulling rchunks off, indicating that we 684 * will be moving ourselves off of the ZoneAry. Remote ends will 685 * read RSignal before putting rchunks on thus interlocking 686 * their IPI signaling. 687 */ 688 if (z->z_RChunks == NULL) 689 atomic_swap_int(&z->z_RSignal, 1); 690 691 while ((bchunk = z->z_RChunks) != NULL) { 692 cpu_ccfence(); 693 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) { 694 *z->z_LChunksp = bchunk; 695 while (bchunk) { 696 chunk_mark_free(z, bchunk); 697 z->z_LChunksp = &bchunk->c_Next; 698 bchunk = bchunk->c_Next; 699 ++z->z_NFree; 700 } 701 break; 702 } 703 } 704 #endif 705 /* 706 * Remove from the zone list if no free chunks remain. 707 * Clear RSignal 708 */ 709 if (z->z_NFree == 0) { 710 slgd->ZoneAry[zi] = z->z_Next; 711 z->z_Next = NULL; 712 } else { 713 z->z_RSignal = 0; 714 } 715 } 716 717 /* 718 * Fast path, we have chunks available in z_LChunks. 719 */ 720 chunk = z->z_LChunks; 721 if (chunk) { 722 chunk_mark_allocated(z, chunk); 723 z->z_LChunks = chunk->c_Next; 724 if (z->z_LChunks == NULL) 725 z->z_LChunksp = &z->z_LChunks; 726 #ifdef SLAB_DEBUG 727 slab_record_source(z, file, line); 728 #endif 729 goto done; 730 } 731 732 /* 733 * No chunks are available in LChunks, the free chunk MUST be 734 * in the never-before-used memory area, controlled by UIndex. 735 * 736 * The consequences are very serious if our zone got corrupted so 737 * we use an explicit panic rather than a KASSERT. 738 */ 739 if (z->z_UIndex + 1 != z->z_NMax) 740 ++z->z_UIndex; 741 else 742 z->z_UIndex = 0; 743 744 if (z->z_UIndex == z->z_UEndIndex) 745 panic("slaballoc: corrupted zone"); 746 747 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 748 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 749 flags &= ~M_ZERO; 750 flags |= M_PASSIVE_ZERO; 751 } 752 chunk_mark_allocated(z, chunk); 753 #ifdef SLAB_DEBUG 754 slab_record_source(z, file, line); 755 #endif 756 goto done; 757 } 758 759 /* 760 * If all zones are exhausted we need to allocate a new zone for this 761 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see 762 * UAlloc use above in regards to M_ZERO. Note that when we are reusing 763 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and 764 * we do not pre-zero it because we do not want to mess up the L1 cache. 765 * 766 * At least one subsystem, the tty code (see CROUND) expects power-of-2 767 * allocations to be power-of-2 aligned. We maintain compatibility by 768 * adjusting the base offset below. 769 */ 770 { 771 int off; 772 int *kup; 773 774 if ((z = slgd->FreeZones) != NULL) { 775 slgd->FreeZones = z->z_Next; 776 --slgd->NFreeZones; 777 bzero(z, sizeof(SLZone)); 778 z->z_Flags |= SLZF_UNOTZEROD; 779 } else { 780 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); 781 if (z == NULL) 782 goto fail; 783 atomic_add_int(&ZoneGenAlloc, ZoneSize / 1024); 784 } 785 786 /* 787 * How big is the base structure? 788 */ 789 #if defined(INVARIANTS) 790 /* 791 * Make room for z_Bitmap. An exact calculation is somewhat more 792 * complicated so don't make an exact calculation. 793 */ 794 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]); 795 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); 796 #else 797 off = sizeof(SLZone); 798 #endif 799 800 /* 801 * Guarentee power-of-2 alignment for power-of-2-sized chunks. 802 * Otherwise just 8-byte align the data. 803 */ 804 if ((size | (size - 1)) + 1 == (size << 1)) 805 off = (off + size - 1) & ~(size - 1); 806 else 807 off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK; 808 z->z_Magic = ZALLOC_SLAB_MAGIC; 809 z->z_ZoneIndex = zi; 810 z->z_NMax = (ZoneSize - off) / size; 811 z->z_NFree = z->z_NMax - 1; 812 z->z_BasePtr = (char *)z + off; 813 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; 814 z->z_ChunkSize = size; 815 z->z_CpuGd = gd; 816 z->z_Cpu = gd->gd_cpuid; 817 z->z_LChunksp = &z->z_LChunks; 818 #ifdef SLAB_DEBUG 819 bcopy(z->z_Sources, z->z_AltSources, sizeof(z->z_Sources)); 820 bzero(z->z_Sources, sizeof(z->z_Sources)); 821 #endif 822 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 823 z->z_Next = slgd->ZoneAry[zi]; 824 slgd->ZoneAry[zi] = z; 825 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 826 flags &= ~M_ZERO; /* already zero'd */ 827 flags |= M_PASSIVE_ZERO; 828 } 829 kup = btokup(z); 830 *kup = -(z->z_Cpu + 1); /* -1 to -(N+1) */ 831 chunk_mark_allocated(z, chunk); 832 #ifdef SLAB_DEBUG 833 slab_record_source(z, file, line); 834 #endif 835 836 /* 837 * Slide the base index for initial allocations out of the next 838 * zone we create so we do not over-weight the lower part of the 839 * cpu memory caches. 840 */ 841 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 842 & (ZALLOC_MAX_ZONE_SIZE - 1); 843 } 844 845 done: 846 ++type->ks_inuse[gd->gd_cpuid]; 847 type->ks_memuse[gd->gd_cpuid] += size; 848 type->ks_loosememuse += size; /* not MP synchronized */ 849 crit_exit(); 850 851 if (flags & M_ZERO) 852 bzero(chunk, size); 853 #ifdef INVARIANTS 854 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) { 855 if (use_malloc_pattern) { 856 for (i = 0; i < size; i += sizeof(int)) { 857 *(int *)((char *)chunk + i) = -1; 858 } 859 } 860 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ 861 } 862 #endif 863 logmemory(malloc_end, chunk, type, size, flags); 864 return(chunk); 865 fail: 866 crit_exit(); 867 logmemory(malloc_end, NULL, type, size, flags); 868 return(NULL); 869 } 870 871 /* 872 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE) 873 * 874 * Generally speaking this routine is not called very often and we do 875 * not attempt to optimize it beyond reusing the same pointer if the 876 * new size fits within the chunking of the old pointer's zone. 877 */ 878 #ifdef SLAB_DEBUG 879 void * 880 krealloc_debug(void *ptr, unsigned long size, 881 struct malloc_type *type, int flags, 882 const char *file, int line) 883 #else 884 void * 885 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) 886 #endif 887 { 888 unsigned long osize; 889 SLZone *z; 890 void *nptr; 891 int *kup; 892 893 KKASSERT((flags & M_ZERO) == 0); /* not supported */ 894 895 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) 896 return(kmalloc_debug(size, type, flags, file, line)); 897 if (size == 0) { 898 kfree(ptr, type); 899 return(NULL); 900 } 901 902 /* 903 * Handle oversized allocations. XXX we really should require that a 904 * size be passed to free() instead of this nonsense. 905 */ 906 kup = btokup(ptr); 907 if (*kup > 0) { 908 osize = *kup << PAGE_SHIFT; 909 if (osize == round_page(size)) 910 return(ptr); 911 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL) 912 return(NULL); 913 bcopy(ptr, nptr, min(size, osize)); 914 kfree(ptr, type); 915 return(nptr); 916 } 917 918 /* 919 * Get the original allocation's zone. If the new request winds up 920 * using the same chunk size we do not have to do anything. 921 */ 922 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 923 kup = btokup(z); 924 KKASSERT(*kup < 0); 925 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 926 927 /* 928 * Allocate memory for the new request size. Note that zoneindex has 929 * already adjusted the request size to the appropriate chunk size, which 930 * should optimize our bcopy(). Then copy and return the new pointer. 931 * 932 * Resizing a non-power-of-2 allocation to a power-of-2 size does not 933 * necessary align the result. 934 * 935 * We can only zoneindex (to align size to the chunk size) if the new 936 * size is not too large. 937 */ 938 if (size < ZoneLimit) { 939 zoneindex(&size); 940 if (z->z_ChunkSize == size) 941 return(ptr); 942 } 943 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL) 944 return(NULL); 945 bcopy(ptr, nptr, min(size, z->z_ChunkSize)); 946 kfree(ptr, type); 947 return(nptr); 948 } 949 950 /* 951 * Return the kmalloc limit for this type, in bytes. 952 */ 953 long 954 kmalloc_limit(struct malloc_type *type) 955 { 956 if (type->ks_limit == 0) { 957 crit_enter(); 958 if (type->ks_limit == 0) 959 malloc_init(type); 960 crit_exit(); 961 } 962 return(type->ks_limit); 963 } 964 965 /* 966 * Allocate a copy of the specified string. 967 * 968 * (MP SAFE) (MAY BLOCK) 969 */ 970 #ifdef SLAB_DEBUG 971 char * 972 kstrdup_debug(const char *str, struct malloc_type *type, 973 const char *file, int line) 974 #else 975 char * 976 kstrdup(const char *str, struct malloc_type *type) 977 #endif 978 { 979 int zlen; /* length inclusive of terminating NUL */ 980 char *nstr; 981 982 if (str == NULL) 983 return(NULL); 984 zlen = strlen(str) + 1; 985 nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line); 986 bcopy(str, nstr, zlen); 987 return(nstr); 988 } 989 990 #ifdef SMP 991 /* 992 * Notify our cpu that a remote cpu has freed some chunks in a zone that 993 * we own. RCount will be bumped so the memory should be good, but validate 994 * that it really is. 995 */ 996 static 997 void 998 kfree_remote(void *ptr) 999 { 1000 SLGlobalData *slgd; 1001 SLChunk *bchunk; 1002 SLZone *z; 1003 int nfree; 1004 int *kup; 1005 1006 slgd = &mycpu->gd_slab; 1007 z = ptr; 1008 kup = btokup(z); 1009 KKASSERT(*kup == -((int)mycpuid + 1)); 1010 KKASSERT(z->z_RCount > 0); 1011 atomic_subtract_int(&z->z_RCount, 1); 1012 1013 logmemory(free_rem_beg, z, NULL, 0L, 0); 1014 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1015 KKASSERT(z->z_Cpu == mycpu->gd_cpuid); 1016 nfree = z->z_NFree; 1017 1018 /* 1019 * Indicate that we will no longer be off of the ZoneAry by 1020 * clearing RSignal. 1021 */ 1022 if (z->z_RChunks) 1023 z->z_RSignal = 0; 1024 1025 /* 1026 * Atomically extract the bchunks list and then process it back 1027 * into the lchunks list. We want to append our bchunks to the 1028 * lchunks list and not prepend since we likely do not have 1029 * cache mastership of the related data (not that it helps since 1030 * we are using c_Next). 1031 */ 1032 while ((bchunk = z->z_RChunks) != NULL) { 1033 cpu_ccfence(); 1034 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) { 1035 *z->z_LChunksp = bchunk; 1036 while (bchunk) { 1037 chunk_mark_free(z, bchunk); 1038 z->z_LChunksp = &bchunk->c_Next; 1039 bchunk = bchunk->c_Next; 1040 ++z->z_NFree; 1041 } 1042 break; 1043 } 1044 } 1045 if (z->z_NFree && nfree == 0) { 1046 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 1047 slgd->ZoneAry[z->z_ZoneIndex] = z; 1048 } 1049 1050 /* 1051 * If the zone becomes totally free, and there are other zones we 1052 * can allocate from, move this zone to the FreeZones list. Since 1053 * this code can be called from an IPI callback, do *NOT* try to mess 1054 * with kernel_map here. Hysteresis will be performed at malloc() time. 1055 * 1056 * Do not move the zone if there is an IPI inflight, otherwise MP 1057 * races can result in our free_remote code accessing a destroyed 1058 * zone. 1059 */ 1060 if (z->z_NFree == z->z_NMax && 1061 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) && 1062 z->z_RCount == 0 1063 ) { 1064 SLZone **pz; 1065 int *kup; 1066 1067 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; 1068 z != *pz; 1069 pz = &(*pz)->z_Next) { 1070 ; 1071 } 1072 *pz = z->z_Next; 1073 z->z_Magic = -1; 1074 z->z_Next = slgd->FreeZones; 1075 slgd->FreeZones = z; 1076 ++slgd->NFreeZones; 1077 kup = btokup(z); 1078 *kup = 0; 1079 } 1080 logmemory(free_rem_end, z, bchunk, 0L, 0); 1081 } 1082 1083 #endif 1084 1085 /* 1086 * free (SLAB ALLOCATOR) 1087 * 1088 * Free a memory block previously allocated by malloc. Note that we do not 1089 * attempt to update ks_loosememuse as MP races could prevent us from 1090 * checking memory limits in malloc. 1091 * 1092 * MPSAFE 1093 */ 1094 void 1095 kfree(void *ptr, struct malloc_type *type) 1096 { 1097 SLZone *z; 1098 SLChunk *chunk; 1099 SLGlobalData *slgd; 1100 struct globaldata *gd; 1101 int *kup; 1102 unsigned long size; 1103 #ifdef SMP 1104 SLChunk *bchunk; 1105 int rsignal; 1106 #endif 1107 1108 logmemory_quick(free_beg); 1109 gd = mycpu; 1110 slgd = &gd->gd_slab; 1111 1112 if (ptr == NULL) 1113 panic("trying to free NULL pointer"); 1114 1115 /* 1116 * Handle special 0-byte allocations 1117 */ 1118 if (ptr == ZERO_LENGTH_PTR) { 1119 logmemory(free_zero, ptr, type, -1UL, 0); 1120 logmemory_quick(free_end); 1121 return; 1122 } 1123 1124 /* 1125 * Panic on bad malloc type 1126 */ 1127 if (type->ks_magic != M_MAGIC) 1128 panic("free: malloc type lacks magic"); 1129 1130 /* 1131 * Handle oversized allocations. XXX we really should require that a 1132 * size be passed to free() instead of this nonsense. 1133 * 1134 * This code is never called via an ipi. 1135 */ 1136 kup = btokup(ptr); 1137 if (*kup > 0) { 1138 size = *kup << PAGE_SHIFT; 1139 *kup = 0; 1140 #ifdef INVARIANTS 1141 KKASSERT(sizeof(weirdary) <= size); 1142 bcopy(weirdary, ptr, sizeof(weirdary)); 1143 #endif 1144 /* 1145 * NOTE: For oversized allocations we do not record the 1146 * originating cpu. It gets freed on the cpu calling 1147 * kfree(). The statistics are in aggregate. 1148 * 1149 * note: XXX we have still inherited the interrupts-can't-block 1150 * assumption. An interrupt thread does not bump 1151 * gd_intr_nesting_level so check TDF_INTTHREAD. This is 1152 * primarily until we can fix softupdate's assumptions about free(). 1153 */ 1154 crit_enter(); 1155 --type->ks_inuse[gd->gd_cpuid]; 1156 type->ks_memuse[gd->gd_cpuid] -= size; 1157 if (mycpu->gd_intr_nesting_level || 1158 (gd->gd_curthread->td_flags & TDF_INTTHREAD)) 1159 { 1160 logmemory(free_ovsz_delayed, ptr, type, size, 0); 1161 z = (SLZone *)ptr; 1162 z->z_Magic = ZALLOC_OVSZ_MAGIC; 1163 z->z_Next = slgd->FreeOvZones; 1164 z->z_ChunkSize = size; 1165 slgd->FreeOvZones = z; 1166 crit_exit(); 1167 } else { 1168 crit_exit(); 1169 logmemory(free_ovsz, ptr, type, size, 0); 1170 kmem_slab_free(ptr, size); /* may block */ 1171 atomic_add_int(&ZoneBigAlloc, -(int)size / 1024); 1172 } 1173 logmemory_quick(free_end); 1174 return; 1175 } 1176 1177 /* 1178 * Zone case. Figure out the zone based on the fact that it is 1179 * ZoneSize aligned. 1180 */ 1181 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 1182 kup = btokup(z); 1183 KKASSERT(*kup < 0); 1184 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1185 1186 /* 1187 * If we do not own the zone then use atomic ops to free to the 1188 * remote cpu linked list and notify the target zone using a 1189 * passive message. 1190 * 1191 * The target zone cannot be deallocated while we own a chunk of it, 1192 * so the zone header's storage is stable until the very moment 1193 * we adjust z_RChunks. After that we cannot safely dereference (z). 1194 * 1195 * (no critical section needed) 1196 */ 1197 if (z->z_CpuGd != gd) { 1198 #ifdef SMP 1199 /* 1200 * Making these adjustments now allow us to avoid passing (type) 1201 * to the remote cpu. Note that ks_inuse/ks_memuse is being 1202 * adjusted on OUR cpu, not the zone cpu, but it should all still 1203 * sum up properly and cancel out. 1204 */ 1205 crit_enter(); 1206 --type->ks_inuse[gd->gd_cpuid]; 1207 type->ks_memuse[gd->gd_cpuid] -= z->z_ChunkSize; 1208 crit_exit(); 1209 1210 /* 1211 * WARNING! This code competes with other cpus. Once we 1212 * successfully link the chunk to RChunks the remote 1213 * cpu can rip z's storage out from under us. 1214 * 1215 * Bumping RCount prevents z's storage from getting 1216 * ripped out. 1217 */ 1218 rsignal = z->z_RSignal; 1219 cpu_lfence(); 1220 if (rsignal) 1221 atomic_add_int(&z->z_RCount, 1); 1222 1223 chunk = ptr; 1224 for (;;) { 1225 bchunk = z->z_RChunks; 1226 cpu_ccfence(); 1227 chunk->c_Next = bchunk; 1228 cpu_sfence(); 1229 1230 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk)) 1231 break; 1232 } 1233 1234 /* 1235 * We have to signal the remote cpu if our actions will cause 1236 * the remote zone to be placed back on ZoneAry so it can 1237 * move the zone back on. 1238 * 1239 * We only need to deal with NULL->non-NULL RChunk transitions 1240 * and only if z_RSignal is set. We interlock by reading rsignal 1241 * before adding our chunk to RChunks. This should result in 1242 * virtually no IPI traffic. 1243 * 1244 * We can use a passive IPI to reduce overhead even further. 1245 */ 1246 if (bchunk == NULL && rsignal) { 1247 logmemory(free_request, ptr, type, (unsigned long)z->z_ChunkSize, 0); 1248 lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z); 1249 /* z can get ripped out from under us from this point on */ 1250 } else if (rsignal) { 1251 atomic_subtract_int(&z->z_RCount, 1); 1252 /* z can get ripped out from under us from this point on */ 1253 } 1254 #else 1255 panic("Corrupt SLZone"); 1256 #endif 1257 logmemory_quick(free_end); 1258 return; 1259 } 1260 1261 /* 1262 * kfree locally 1263 */ 1264 logmemory(free_chunk, ptr, type, (unsigned long)z->z_ChunkSize, 0); 1265 1266 crit_enter(); 1267 chunk = ptr; 1268 chunk_mark_free(z, chunk); 1269 1270 /* 1271 * Put weird data into the memory to detect modifications after freeing, 1272 * illegal pointer use after freeing (we should fault on the odd address), 1273 * and so forth. XXX needs more work, see the old malloc code. 1274 */ 1275 #ifdef INVARIANTS 1276 if (z->z_ChunkSize < sizeof(weirdary)) 1277 bcopy(weirdary, chunk, z->z_ChunkSize); 1278 else 1279 bcopy(weirdary, chunk, sizeof(weirdary)); 1280 #endif 1281 1282 /* 1283 * Add this free non-zero'd chunk to a linked list for reuse. Add 1284 * to the front of the linked list so it is more likely to be 1285 * reallocated, since it is already in our L1 cache. 1286 */ 1287 #ifdef INVARIANTS 1288 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) 1289 panic("BADFREE %p", chunk); 1290 #endif 1291 chunk->c_Next = z->z_LChunks; 1292 z->z_LChunks = chunk; 1293 if (chunk->c_Next == NULL) 1294 z->z_LChunksp = &chunk->c_Next; 1295 1296 #ifdef INVARIANTS 1297 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) 1298 panic("BADFREE2"); 1299 #endif 1300 1301 /* 1302 * Bump the number of free chunks. If it becomes non-zero the zone 1303 * must be added back onto the appropriate list. 1304 */ 1305 if (z->z_NFree++ == 0) { 1306 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 1307 slgd->ZoneAry[z->z_ZoneIndex] = z; 1308 } 1309 1310 --type->ks_inuse[z->z_Cpu]; 1311 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize; 1312 1313 /* 1314 * If the zone becomes totally free, and there are other zones we 1315 * can allocate from, move this zone to the FreeZones list. Since 1316 * this code can be called from an IPI callback, do *NOT* try to mess 1317 * with kernel_map here. Hysteresis will be performed at malloc() time. 1318 */ 1319 if (z->z_NFree == z->z_NMax && 1320 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) && 1321 z->z_RCount == 0 1322 ) { 1323 SLZone **pz; 1324 int *kup; 1325 1326 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next) 1327 ; 1328 *pz = z->z_Next; 1329 z->z_Magic = -1; 1330 z->z_Next = slgd->FreeZones; 1331 slgd->FreeZones = z; 1332 ++slgd->NFreeZones; 1333 kup = btokup(z); 1334 *kup = 0; 1335 } 1336 logmemory_quick(free_end); 1337 crit_exit(); 1338 } 1339 1340 #if defined(INVARIANTS) 1341 1342 /* 1343 * Helper routines for sanity checks 1344 */ 1345 static 1346 void 1347 chunk_mark_allocated(SLZone *z, void *chunk) 1348 { 1349 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1350 __uint32_t *bitptr; 1351 1352 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1353 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1354 ("memory chunk %p bit index %d is illegal", chunk, bitdex)); 1355 bitptr = &z->z_Bitmap[bitdex >> 5]; 1356 bitdex &= 31; 1357 KASSERT((*bitptr & (1 << bitdex)) == 0, 1358 ("memory chunk %p is already allocated!", chunk)); 1359 *bitptr |= 1 << bitdex; 1360 } 1361 1362 static 1363 void 1364 chunk_mark_free(SLZone *z, void *chunk) 1365 { 1366 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1367 __uint32_t *bitptr; 1368 1369 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1370 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1371 ("memory chunk %p bit index %d is illegal!", chunk, bitdex)); 1372 bitptr = &z->z_Bitmap[bitdex >> 5]; 1373 bitdex &= 31; 1374 KASSERT((*bitptr & (1 << bitdex)) != 0, 1375 ("memory chunk %p is already free!", chunk)); 1376 *bitptr &= ~(1 << bitdex); 1377 } 1378 1379 #endif 1380 1381 /* 1382 * kmem_slab_alloc() 1383 * 1384 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the 1385 * specified alignment. M_* flags are expected in the flags field. 1386 * 1387 * Alignment must be a multiple of PAGE_SIZE. 1388 * 1389 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), 1390 * but when we move zalloc() over to use this function as its backend 1391 * we will have to switch to kreserve/krelease and call reserve(0) 1392 * after the new space is made available. 1393 * 1394 * Interrupt code which has preempted other code is not allowed to 1395 * use PQ_CACHE pages. However, if an interrupt thread is run 1396 * non-preemptively or blocks and then runs non-preemptively, then 1397 * it is free to use PQ_CACHE pages. <--- may not apply any longer XXX 1398 */ 1399 static void * 1400 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) 1401 { 1402 vm_size_t i; 1403 vm_offset_t addr; 1404 int count, vmflags, base_vmflags; 1405 vm_page_t mbase = NULL; 1406 vm_page_t m; 1407 thread_t td; 1408 1409 size = round_page(size); 1410 addr = vm_map_min(&kernel_map); 1411 1412 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1413 crit_enter(); 1414 vm_map_lock(&kernel_map); 1415 if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) { 1416 vm_map_unlock(&kernel_map); 1417 if ((flags & M_NULLOK) == 0) 1418 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 1419 vm_map_entry_release(count); 1420 crit_exit(); 1421 return(NULL); 1422 } 1423 1424 /* 1425 * kernel_object maps 1:1 to kernel_map. 1426 */ 1427 vm_object_hold(&kernel_object); 1428 vm_object_reference_locked(&kernel_object); 1429 vm_map_insert(&kernel_map, &count, 1430 &kernel_object, addr, addr, addr + size, 1431 VM_MAPTYPE_NORMAL, 1432 VM_PROT_ALL, VM_PROT_ALL, 1433 0); 1434 vm_object_drop(&kernel_object); 1435 vm_map_set_wired_quick(&kernel_map, addr, size, &count); 1436 vm_map_unlock(&kernel_map); 1437 1438 td = curthread; 1439 1440 base_vmflags = 0; 1441 if (flags & M_ZERO) 1442 base_vmflags |= VM_ALLOC_ZERO; 1443 if (flags & M_USE_RESERVE) 1444 base_vmflags |= VM_ALLOC_SYSTEM; 1445 if (flags & M_USE_INTERRUPT_RESERVE) 1446 base_vmflags |= VM_ALLOC_INTERRUPT; 1447 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) { 1448 panic("kmem_slab_alloc: bad flags %08x (%p)", 1449 flags, ((int **)&size)[-1]); 1450 } 1451 1452 /* 1453 * Allocate the pages. Do not mess with the PG_ZERO flag or map 1454 * them yet. VM_ALLOC_NORMAL can only be set if we are not preempting. 1455 * 1456 * VM_ALLOC_SYSTEM is automatically set if we are preempting and 1457 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is 1458 * implied in this case), though I'm not sure if we really need to 1459 * do that. 1460 */ 1461 vmflags = base_vmflags; 1462 if (flags & M_WAITOK) { 1463 if (td->td_preempted) 1464 vmflags |= VM_ALLOC_SYSTEM; 1465 else 1466 vmflags |= VM_ALLOC_NORMAL; 1467 } 1468 1469 vm_object_hold(&kernel_object); 1470 for (i = 0; i < size; i += PAGE_SIZE) { 1471 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags); 1472 if (i == 0) 1473 mbase = m; 1474 1475 /* 1476 * If the allocation failed we either return NULL or we retry. 1477 * 1478 * If M_WAITOK is specified we wait for more memory and retry. 1479 * If M_WAITOK is specified from a preemption we yield instead of 1480 * wait. Livelock will not occur because the interrupt thread 1481 * will not be preempting anyone the second time around after the 1482 * yield. 1483 */ 1484 if (m == NULL) { 1485 if (flags & M_WAITOK) { 1486 if (td->td_preempted) { 1487 lwkt_switch(); 1488 } else { 1489 vm_wait(0); 1490 } 1491 i -= PAGE_SIZE; /* retry */ 1492 continue; 1493 } 1494 break; 1495 } 1496 } 1497 1498 /* 1499 * Check and deal with an allocation failure 1500 */ 1501 if (i != size) { 1502 while (i != 0) { 1503 i -= PAGE_SIZE; 1504 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); 1505 /* page should already be busy */ 1506 vm_page_free(m); 1507 } 1508 vm_map_lock(&kernel_map); 1509 vm_map_delete(&kernel_map, addr, addr + size, &count); 1510 vm_map_unlock(&kernel_map); 1511 vm_object_drop(&kernel_object); 1512 1513 vm_map_entry_release(count); 1514 crit_exit(); 1515 return(NULL); 1516 } 1517 1518 /* 1519 * Success! 1520 * 1521 * NOTE: The VM pages are still busied. mbase points to the first one 1522 * but we have to iterate via vm_page_next() 1523 */ 1524 vm_object_drop(&kernel_object); 1525 crit_exit(); 1526 1527 /* 1528 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO. 1529 */ 1530 m = mbase; 1531 i = 0; 1532 1533 while (i < size) { 1534 /* 1535 * page should already be busy 1536 */ 1537 m->valid = VM_PAGE_BITS_ALL; 1538 vm_page_wire(m); 1539 pmap_enter(&kernel_pmap, addr + i, m, VM_PROT_ALL | VM_PROT_NOSYNC, 1); 1540 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO)) 1541 bzero((char *)addr + i, PAGE_SIZE); 1542 vm_page_flag_clear(m, PG_ZERO); 1543 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED)); 1544 vm_page_flag_set(m, PG_REFERENCED); 1545 vm_page_wakeup(m); 1546 1547 i += PAGE_SIZE; 1548 vm_object_hold(&kernel_object); 1549 m = vm_page_next(m); 1550 vm_object_drop(&kernel_object); 1551 } 1552 smp_invltlb(); 1553 vm_map_entry_release(count); 1554 return((void *)addr); 1555 } 1556 1557 /* 1558 * kmem_slab_free() 1559 */ 1560 static void 1561 kmem_slab_free(void *ptr, vm_size_t size) 1562 { 1563 crit_enter(); 1564 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); 1565 crit_exit(); 1566 } 1567 1568