1 /* 2 * (MPSAFE) 3 * 4 * KERN_SLABALLOC.C - Kernel SLAB memory allocator 5 * 6 * Copyright (c) 2003,2004,2010 The DragonFly Project. All rights reserved. 7 * 8 * This code is derived from software contributed to The DragonFly Project 9 * by Matthew Dillon <dillon@backplane.com> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in 19 * the documentation and/or other materials provided with the 20 * distribution. 21 * 3. Neither the name of The DragonFly Project nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific, prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * This module implements a slab allocator drop-in replacement for the 39 * kernel malloc(). 40 * 41 * A slab allocator reserves a ZONE for each chunk size, then lays the 42 * chunks out in an array within the zone. Allocation and deallocation 43 * is nearly instantanious, and fragmentation/overhead losses are limited 44 * to a fixed worst-case amount. 45 * 46 * The downside of this slab implementation is in the chunk size 47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. 48 * In a kernel implementation all this memory will be physical so 49 * the zone size is adjusted downward on machines with less physical 50 * memory. The upside is that overhead is bounded... this is the *worst* 51 * case overhead. 52 * 53 * Slab management is done on a per-cpu basis and no locking or mutexes 54 * are required, only a critical section. When one cpu frees memory 55 * belonging to another cpu's slab manager an asynchronous IPI message 56 * will be queued to execute the operation. In addition, both the 57 * high level slab allocator and the low level zone allocator optimize 58 * M_ZERO requests, and the slab allocator does not have to pre initialize 59 * the linked list of chunks. 60 * 61 * XXX Balancing is needed between cpus. Balance will be handled through 62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. 63 * 64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of 65 * the new zone should be restricted to M_USE_RESERVE requests only. 66 * 67 * Alloc Size Chunking Number of zones 68 * 0-127 8 16 69 * 128-255 16 8 70 * 256-511 32 8 71 * 512-1023 64 8 72 * 1024-2047 128 8 73 * 2048-4095 256 8 74 * 4096-8191 512 8 75 * 8192-16383 1024 8 76 * 16384-32767 2048 8 77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) 78 * 79 * Allocations >= ZoneLimit go directly to kmem. 80 * 81 * API REQUIREMENTS AND SIDE EFFECTS 82 * 83 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we 84 * have remained compatible with the following API requirements: 85 * 86 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty) 87 * + all power-of-2 sized allocations are power-of-2 aligned (twe) 88 * + malloc(0) is allowed and returns non-NULL (ahc driver) 89 * + ability to allocate arbitrarily large chunks of memory 90 */ 91 92 #include "opt_vm.h" 93 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/kernel.h> 97 #include <sys/slaballoc.h> 98 #include <sys/mbuf.h> 99 #include <sys/vmmeter.h> 100 #include <sys/lock.h> 101 #include <sys/thread.h> 102 #include <sys/globaldata.h> 103 #include <sys/sysctl.h> 104 #include <sys/ktr.h> 105 106 #include <vm/vm.h> 107 #include <vm/vm_param.h> 108 #include <vm/vm_kern.h> 109 #include <vm/vm_extern.h> 110 #include <vm/vm_object.h> 111 #include <vm/pmap.h> 112 #include <vm/vm_map.h> 113 #include <vm/vm_page.h> 114 #include <vm/vm_pageout.h> 115 116 #include <machine/cpu.h> 117 118 #include <sys/thread2.h> 119 120 #define btokup(z) (&pmap_kvtom((vm_offset_t)(z))->ku_pagecnt) 121 122 #define MEMORY_STRING "ptr=%p type=%p size=%d flags=%04x" 123 #define MEMORY_ARG_SIZE (sizeof(void *) * 2 + sizeof(unsigned long) + \ 124 sizeof(int)) 125 126 #if !defined(KTR_MEMORY) 127 #define KTR_MEMORY KTR_ALL 128 #endif 129 KTR_INFO_MASTER(memory); 130 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin", 0); 131 KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARG_SIZE); 132 KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARG_SIZE); 133 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARG_SIZE); 134 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARG_SIZE); 135 KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARG_SIZE); 136 #ifdef SMP 137 KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARG_SIZE); 138 KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARG_SIZE); 139 KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARG_SIZE); 140 #endif 141 KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin", 0); 142 KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end", 0); 143 144 #define logmemory(name, ptr, type, size, flags) \ 145 KTR_LOG(memory_ ## name, ptr, type, size, flags) 146 #define logmemory_quick(name) \ 147 KTR_LOG(memory_ ## name) 148 149 /* 150 * Fixed globals (not per-cpu) 151 */ 152 static int ZoneSize; 153 static int ZoneLimit; 154 static int ZonePageCount; 155 static uintptr_t ZoneMask; 156 static int ZoneBigAlloc; /* in KB */ 157 static int ZoneGenAlloc; /* in KB */ 158 struct malloc_type *kmemstatistics; /* exported to vmstat */ 159 static int32_t weirdary[16]; 160 161 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags); 162 static void kmem_slab_free(void *ptr, vm_size_t bytes); 163 164 #if defined(INVARIANTS) 165 static void chunk_mark_allocated(SLZone *z, void *chunk); 166 static void chunk_mark_free(SLZone *z, void *chunk); 167 #else 168 #define chunk_mark_allocated(z, chunk) 169 #define chunk_mark_free(z, chunk) 170 #endif 171 172 /* 173 * Misc constants. Note that allocations that are exact multiples of 174 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 175 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists. 176 */ 177 #define MIN_CHUNK_SIZE 8 /* in bytes */ 178 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1) 179 #define ZONE_RELS_THRESH 32 /* threshold number of zones */ 180 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK) 181 182 /* 183 * The WEIRD_ADDR is used as known text to copy into free objects to 184 * try to create deterministic failure cases if the data is accessed after 185 * free. 186 */ 187 #define WEIRD_ADDR 0xdeadc0de 188 #define MAX_COPY sizeof(weirdary) 189 #define ZERO_LENGTH_PTR ((void *)-8) 190 191 /* 192 * Misc global malloc buckets 193 */ 194 195 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 196 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 197 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 198 199 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 200 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 201 202 /* 203 * Initialize the slab memory allocator. We have to choose a zone size based 204 * on available physical memory. We choose a zone side which is approximately 205 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of 206 * 128K. The zone size is limited to the bounds set in slaballoc.h 207 * (typically 32K min, 128K max). 208 */ 209 static void kmeminit(void *dummy); 210 211 char *ZeroPage; 212 213 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL) 214 215 #ifdef INVARIANTS 216 /* 217 * If enabled any memory allocated without M_ZERO is initialized to -1. 218 */ 219 static int use_malloc_pattern; 220 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW, 221 &use_malloc_pattern, 0, 222 "Initialize memory to -1 if M_ZERO not specified"); 223 #endif 224 225 static int ZoneRelsThresh = ZONE_RELS_THRESH; 226 SYSCTL_INT(_kern, OID_AUTO, zone_big_alloc, CTLFLAG_RD, &ZoneBigAlloc, 0, ""); 227 SYSCTL_INT(_kern, OID_AUTO, zone_gen_alloc, CTLFLAG_RD, &ZoneGenAlloc, 0, ""); 228 SYSCTL_INT(_kern, OID_AUTO, zone_cache, CTLFLAG_RW, &ZoneRelsThresh, 0, ""); 229 230 static void 231 kmeminit(void *dummy) 232 { 233 size_t limsize; 234 int usesize; 235 int i; 236 237 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE; 238 if (limsize > KvaSize) 239 limsize = KvaSize; 240 241 usesize = (int)(limsize / 1024); /* convert to KB */ 242 243 ZoneSize = ZALLOC_MIN_ZONE_SIZE; 244 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) 245 ZoneSize <<= 1; 246 ZoneLimit = ZoneSize / 4; 247 if (ZoneLimit > ZALLOC_ZONE_LIMIT) 248 ZoneLimit = ZALLOC_ZONE_LIMIT; 249 ZoneMask = ~(uintptr_t)(ZoneSize - 1); 250 ZonePageCount = ZoneSize / PAGE_SIZE; 251 252 for (i = 0; i < NELEM(weirdary); ++i) 253 weirdary[i] = WEIRD_ADDR; 254 255 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO); 256 257 if (bootverbose) 258 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); 259 } 260 261 /* 262 * Initialize a malloc type tracking structure. 263 */ 264 void 265 malloc_init(void *data) 266 { 267 struct malloc_type *type = data; 268 size_t limsize; 269 270 if (type->ks_magic != M_MAGIC) 271 panic("malloc type lacks magic"); 272 273 if (type->ks_limit != 0) 274 return; 275 276 if (vmstats.v_page_count == 0) 277 panic("malloc_init not allowed before vm init"); 278 279 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE; 280 if (limsize > KvaSize) 281 limsize = KvaSize; 282 type->ks_limit = limsize / 10; 283 284 type->ks_next = kmemstatistics; 285 kmemstatistics = type; 286 } 287 288 void 289 malloc_uninit(void *data) 290 { 291 struct malloc_type *type = data; 292 struct malloc_type *t; 293 #ifdef INVARIANTS 294 int i; 295 long ttl; 296 #endif 297 298 if (type->ks_magic != M_MAGIC) 299 panic("malloc type lacks magic"); 300 301 if (vmstats.v_page_count == 0) 302 panic("malloc_uninit not allowed before vm init"); 303 304 if (type->ks_limit == 0) 305 panic("malloc_uninit on uninitialized type"); 306 307 #ifdef SMP 308 /* Make sure that all pending kfree()s are finished. */ 309 lwkt_synchronize_ipiqs("muninit"); 310 #endif 311 312 #ifdef INVARIANTS 313 /* 314 * memuse is only correct in aggregation. Due to memory being allocated 315 * on one cpu and freed on another individual array entries may be 316 * negative or positive (canceling each other out). 317 */ 318 for (i = ttl = 0; i < ncpus; ++i) 319 ttl += type->ks_memuse[i]; 320 if (ttl) { 321 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", 322 ttl, type->ks_shortdesc, i); 323 } 324 #endif 325 if (type == kmemstatistics) { 326 kmemstatistics = type->ks_next; 327 } else { 328 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 329 if (t->ks_next == type) { 330 t->ks_next = type->ks_next; 331 break; 332 } 333 } 334 } 335 type->ks_next = NULL; 336 type->ks_limit = 0; 337 } 338 339 /* 340 * Increase the kmalloc pool limit for the specified pool. No changes 341 * are the made if the pool would shrink. 342 */ 343 void 344 kmalloc_raise_limit(struct malloc_type *type, size_t bytes) 345 { 346 if (type->ks_limit == 0) 347 malloc_init(type); 348 if (bytes == 0) 349 bytes = KvaSize; 350 if (type->ks_limit < bytes) 351 type->ks_limit = bytes; 352 } 353 354 /* 355 * Dynamically create a malloc pool. This function is a NOP if *typep is 356 * already non-NULL. 357 */ 358 void 359 kmalloc_create(struct malloc_type **typep, const char *descr) 360 { 361 struct malloc_type *type; 362 363 if (*typep == NULL) { 364 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO); 365 type->ks_magic = M_MAGIC; 366 type->ks_shortdesc = descr; 367 malloc_init(type); 368 *typep = type; 369 } 370 } 371 372 /* 373 * Destroy a dynamically created malloc pool. This function is a NOP if 374 * the pool has already been destroyed. 375 */ 376 void 377 kmalloc_destroy(struct malloc_type **typep) 378 { 379 if (*typep != NULL) { 380 malloc_uninit(*typep); 381 kfree(*typep, M_TEMP); 382 *typep = NULL; 383 } 384 } 385 386 /* 387 * Calculate the zone index for the allocation request size and set the 388 * allocation request size to that particular zone's chunk size. 389 */ 390 static __inline int 391 zoneindex(unsigned long *bytes) 392 { 393 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ 394 if (n < 128) { 395 *bytes = n = (n + 7) & ~7; 396 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 397 } 398 if (n < 256) { 399 *bytes = n = (n + 15) & ~15; 400 return(n / 16 + 7); 401 } 402 if (n < 8192) { 403 if (n < 512) { 404 *bytes = n = (n + 31) & ~31; 405 return(n / 32 + 15); 406 } 407 if (n < 1024) { 408 *bytes = n = (n + 63) & ~63; 409 return(n / 64 + 23); 410 } 411 if (n < 2048) { 412 *bytes = n = (n + 127) & ~127; 413 return(n / 128 + 31); 414 } 415 if (n < 4096) { 416 *bytes = n = (n + 255) & ~255; 417 return(n / 256 + 39); 418 } 419 *bytes = n = (n + 511) & ~511; 420 return(n / 512 + 47); 421 } 422 #if ZALLOC_ZONE_LIMIT > 8192 423 if (n < 16384) { 424 *bytes = n = (n + 1023) & ~1023; 425 return(n / 1024 + 55); 426 } 427 #endif 428 #if ZALLOC_ZONE_LIMIT > 16384 429 if (n < 32768) { 430 *bytes = n = (n + 2047) & ~2047; 431 return(n / 2048 + 63); 432 } 433 #endif 434 panic("Unexpected byte count %d", n); 435 return(0); 436 } 437 438 #ifdef SLAB_DEBUG 439 /* 440 * Used to debug memory corruption issues. Record up to (typically 32) 441 * allocation sources for this zone (for a particular chunk size). 442 */ 443 444 static void 445 slab_record_source(SLZone *z, const char *file, int line) 446 { 447 int i; 448 int b = line & (SLAB_DEBUG_ENTRIES - 1); 449 450 i = b; 451 do { 452 if (z->z_Sources[i].file == file && z->z_Sources[i].line == line) 453 return; 454 if (z->z_Sources[i].file == NULL) 455 break; 456 i = (i + 1) & (SLAB_DEBUG_ENTRIES - 1); 457 } while (i != b); 458 z->z_Sources[i].file = file; 459 z->z_Sources[i].line = line; 460 } 461 462 #endif 463 464 /* 465 * kmalloc() (SLAB ALLOCATOR) 466 * 467 * Allocate memory via the slab allocator. If the request is too large, 468 * or if it page-aligned beyond a certain size, we fall back to the 469 * KMEM subsystem. A SLAB tracking descriptor must be specified, use 470 * &SlabMisc if you don't care. 471 * 472 * M_RNOWAIT - don't block. 473 * M_NULLOK - return NULL instead of blocking. 474 * M_ZERO - zero the returned memory. 475 * M_USE_RESERVE - allow greater drawdown of the free list 476 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted 477 * 478 * MPSAFE 479 */ 480 481 #ifdef SLAB_DEBUG 482 void * 483 kmalloc_debug(unsigned long size, struct malloc_type *type, int flags, 484 const char *file, int line) 485 #else 486 void * 487 kmalloc(unsigned long size, struct malloc_type *type, int flags) 488 #endif 489 { 490 SLZone *z; 491 SLChunk *chunk; 492 #ifdef SMP 493 SLChunk *bchunk; 494 #endif 495 SLGlobalData *slgd; 496 struct globaldata *gd; 497 int zi; 498 #ifdef INVARIANTS 499 int i; 500 #endif 501 502 logmemory_quick(malloc_beg); 503 gd = mycpu; 504 slgd = &gd->gd_slab; 505 506 /* 507 * XXX silly to have this in the critical path. 508 */ 509 if (type->ks_limit == 0) { 510 crit_enter(); 511 if (type->ks_limit == 0) 512 malloc_init(type); 513 crit_exit(); 514 } 515 ++type->ks_calls; 516 517 /* 518 * Handle the case where the limit is reached. Panic if we can't return 519 * NULL. The original malloc code looped, but this tended to 520 * simply deadlock the computer. 521 * 522 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used 523 * to determine if a more complete limit check should be done. The 524 * actual memory use is tracked via ks_memuse[cpu]. 525 */ 526 while (type->ks_loosememuse >= type->ks_limit) { 527 int i; 528 long ttl; 529 530 for (i = ttl = 0; i < ncpus; ++i) 531 ttl += type->ks_memuse[i]; 532 type->ks_loosememuse = ttl; /* not MP synchronized */ 533 if ((ssize_t)ttl < 0) /* deal with occassional race */ 534 ttl = 0; 535 if (ttl >= type->ks_limit) { 536 if (flags & M_NULLOK) { 537 logmemory(malloc_end, NULL, type, size, flags); 538 return(NULL); 539 } 540 panic("%s: malloc limit exceeded", type->ks_shortdesc); 541 } 542 } 543 544 /* 545 * Handle the degenerate size == 0 case. Yes, this does happen. 546 * Return a special pointer. This is to maintain compatibility with 547 * the original malloc implementation. Certain devices, such as the 548 * adaptec driver, not only allocate 0 bytes, they check for NULL and 549 * also realloc() later on. Joy. 550 */ 551 if (size == 0) { 552 logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags); 553 return(ZERO_LENGTH_PTR); 554 } 555 556 /* 557 * Handle hysteresis from prior frees here in malloc(). We cannot 558 * safely manipulate the kernel_map in free() due to free() possibly 559 * being called via an IPI message or from sensitive interrupt code. 560 * 561 * NOTE: ku_pagecnt must be cleared before we free the slab or we 562 * might race another cpu allocating the kva and setting 563 * ku_pagecnt. 564 */ 565 while (slgd->NFreeZones > ZoneRelsThresh && (flags & M_RNOWAIT) == 0) { 566 crit_enter(); 567 if (slgd->NFreeZones > ZoneRelsThresh) { /* crit sect race */ 568 int *kup; 569 570 z = slgd->FreeZones; 571 slgd->FreeZones = z->z_Next; 572 --slgd->NFreeZones; 573 kup = btokup(z); 574 *kup = 0; 575 kmem_slab_free(z, ZoneSize); /* may block */ 576 atomic_add_int(&ZoneGenAlloc, -(int)ZoneSize / 1024); 577 } 578 crit_exit(); 579 } 580 581 /* 582 * XXX handle oversized frees that were queued from kfree(). 583 */ 584 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) { 585 crit_enter(); 586 if ((z = slgd->FreeOvZones) != NULL) { 587 vm_size_t tsize; 588 589 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); 590 slgd->FreeOvZones = z->z_Next; 591 tsize = z->z_ChunkSize; 592 kmem_slab_free(z, tsize); /* may block */ 593 atomic_add_int(&ZoneBigAlloc, -(int)tsize / 1024); 594 } 595 crit_exit(); 596 } 597 598 /* 599 * Handle large allocations directly. There should not be very many of 600 * these so performance is not a big issue. 601 * 602 * The backend allocator is pretty nasty on a SMP system. Use the 603 * slab allocator for one and two page-sized chunks even though we lose 604 * some efficiency. XXX maybe fix mmio and the elf loader instead. 605 */ 606 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 607 int *kup; 608 609 size = round_page(size); 610 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); 611 if (chunk == NULL) { 612 logmemory(malloc_end, NULL, type, size, flags); 613 return(NULL); 614 } 615 atomic_add_int(&ZoneBigAlloc, (int)size / 1024); 616 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ 617 flags |= M_PASSIVE_ZERO; 618 kup = btokup(chunk); 619 *kup = size / PAGE_SIZE; 620 crit_enter(); 621 goto done; 622 } 623 624 /* 625 * Attempt to allocate out of an existing zone. First try the free list, 626 * then allocate out of unallocated space. If we find a good zone move 627 * it to the head of the list so later allocations find it quickly 628 * (we might have thousands of zones in the list). 629 * 630 * Note: zoneindex() will panic of size is too large. 631 */ 632 zi = zoneindex(&size); 633 KKASSERT(zi < NZONES); 634 crit_enter(); 635 636 if ((z = slgd->ZoneAry[zi]) != NULL) { 637 /* 638 * Locate a chunk - we have to have at least one. If this is the 639 * last chunk go ahead and do the work to retrieve chunks freed 640 * from remote cpus, and if the zone is still empty move it off 641 * the ZoneAry. 642 */ 643 if (--z->z_NFree <= 0) { 644 KKASSERT(z->z_NFree == 0); 645 646 #ifdef SMP 647 /* 648 * WARNING! This code competes with other cpus. It is ok 649 * for us to not drain RChunks here but we might as well, and 650 * it is ok if more accumulate after we're done. 651 * 652 * Set RSignal before pulling rchunks off, indicating that we 653 * will be moving ourselves off of the ZoneAry. Remote ends will 654 * read RSignal before putting rchunks on thus interlocking 655 * their IPI signaling. 656 */ 657 if (z->z_RChunks == NULL) 658 atomic_swap_int(&z->z_RSignal, 1); 659 660 while ((bchunk = z->z_RChunks) != NULL) { 661 cpu_ccfence(); 662 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) { 663 *z->z_LChunksp = bchunk; 664 while (bchunk) { 665 chunk_mark_free(z, bchunk); 666 z->z_LChunksp = &bchunk->c_Next; 667 bchunk = bchunk->c_Next; 668 ++z->z_NFree; 669 } 670 break; 671 } 672 } 673 #endif 674 /* 675 * Remove from the zone list if no free chunks remain. 676 * Clear RSignal 677 */ 678 if (z->z_NFree == 0) { 679 slgd->ZoneAry[zi] = z->z_Next; 680 z->z_Next = NULL; 681 } else { 682 z->z_RSignal = 0; 683 } 684 } 685 686 /* 687 * Fast path, we have chunks available in z_LChunks. 688 */ 689 chunk = z->z_LChunks; 690 if (chunk) { 691 chunk_mark_allocated(z, chunk); 692 z->z_LChunks = chunk->c_Next; 693 if (z->z_LChunks == NULL) 694 z->z_LChunksp = &z->z_LChunks; 695 #ifdef SLAB_DEBUG 696 slab_record_source(z, file, line); 697 #endif 698 goto done; 699 } 700 701 /* 702 * No chunks are available in LChunks, the free chunk MUST be 703 * in the never-before-used memory area, controlled by UIndex. 704 * 705 * The consequences are very serious if our zone got corrupted so 706 * we use an explicit panic rather than a KASSERT. 707 */ 708 if (z->z_UIndex + 1 != z->z_NMax) 709 ++z->z_UIndex; 710 else 711 z->z_UIndex = 0; 712 713 if (z->z_UIndex == z->z_UEndIndex) 714 panic("slaballoc: corrupted zone"); 715 716 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 717 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 718 flags &= ~M_ZERO; 719 flags |= M_PASSIVE_ZERO; 720 } 721 chunk_mark_allocated(z, chunk); 722 #ifdef SLAB_DEBUG 723 slab_record_source(z, file, line); 724 #endif 725 goto done; 726 } 727 728 /* 729 * If all zones are exhausted we need to allocate a new zone for this 730 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see 731 * UAlloc use above in regards to M_ZERO. Note that when we are reusing 732 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and 733 * we do not pre-zero it because we do not want to mess up the L1 cache. 734 * 735 * At least one subsystem, the tty code (see CROUND) expects power-of-2 736 * allocations to be power-of-2 aligned. We maintain compatibility by 737 * adjusting the base offset below. 738 */ 739 { 740 int off; 741 int *kup; 742 743 if ((z = slgd->FreeZones) != NULL) { 744 slgd->FreeZones = z->z_Next; 745 --slgd->NFreeZones; 746 bzero(z, sizeof(SLZone)); 747 z->z_Flags |= SLZF_UNOTZEROD; 748 } else { 749 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); 750 if (z == NULL) 751 goto fail; 752 atomic_add_int(&ZoneGenAlloc, (int)ZoneSize / 1024); 753 } 754 755 /* 756 * How big is the base structure? 757 */ 758 #if defined(INVARIANTS) 759 /* 760 * Make room for z_Bitmap. An exact calculation is somewhat more 761 * complicated so don't make an exact calculation. 762 */ 763 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]); 764 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); 765 #else 766 off = sizeof(SLZone); 767 #endif 768 769 /* 770 * Guarentee power-of-2 alignment for power-of-2-sized chunks. 771 * Otherwise just 8-byte align the data. 772 */ 773 if ((size | (size - 1)) + 1 == (size << 1)) 774 off = (off + size - 1) & ~(size - 1); 775 else 776 off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK; 777 z->z_Magic = ZALLOC_SLAB_MAGIC; 778 z->z_ZoneIndex = zi; 779 z->z_NMax = (ZoneSize - off) / size; 780 z->z_NFree = z->z_NMax - 1; 781 z->z_BasePtr = (char *)z + off; 782 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; 783 z->z_ChunkSize = size; 784 z->z_CpuGd = gd; 785 z->z_Cpu = gd->gd_cpuid; 786 z->z_LChunksp = &z->z_LChunks; 787 #ifdef SLAB_DEBUG 788 bcopy(z->z_Sources, z->z_AltSources, sizeof(z->z_Sources)); 789 bzero(z->z_Sources, sizeof(z->z_Sources)); 790 #endif 791 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 792 z->z_Next = slgd->ZoneAry[zi]; 793 slgd->ZoneAry[zi] = z; 794 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 795 flags &= ~M_ZERO; /* already zero'd */ 796 flags |= M_PASSIVE_ZERO; 797 } 798 kup = btokup(z); 799 *kup = -(z->z_Cpu + 1); /* -1 to -(N+1) */ 800 chunk_mark_allocated(z, chunk); 801 #ifdef SLAB_DEBUG 802 slab_record_source(z, file, line); 803 #endif 804 805 /* 806 * Slide the base index for initial allocations out of the next 807 * zone we create so we do not over-weight the lower part of the 808 * cpu memory caches. 809 */ 810 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 811 & (ZALLOC_MAX_ZONE_SIZE - 1); 812 } 813 814 done: 815 ++type->ks_inuse[gd->gd_cpuid]; 816 type->ks_memuse[gd->gd_cpuid] += size; 817 type->ks_loosememuse += size; /* not MP synchronized */ 818 crit_exit(); 819 820 if (flags & M_ZERO) 821 bzero(chunk, size); 822 #ifdef INVARIANTS 823 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) { 824 if (use_malloc_pattern) { 825 for (i = 0; i < size; i += sizeof(int)) { 826 *(int *)((char *)chunk + i) = -1; 827 } 828 } 829 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ 830 } 831 #endif 832 logmemory(malloc_end, chunk, type, size, flags); 833 return(chunk); 834 fail: 835 crit_exit(); 836 logmemory(malloc_end, NULL, type, size, flags); 837 return(NULL); 838 } 839 840 /* 841 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE) 842 * 843 * Generally speaking this routine is not called very often and we do 844 * not attempt to optimize it beyond reusing the same pointer if the 845 * new size fits within the chunking of the old pointer's zone. 846 */ 847 #ifdef SLAB_DEBUG 848 void * 849 krealloc_debug(void *ptr, unsigned long size, 850 struct malloc_type *type, int flags, 851 const char *file, int line) 852 #else 853 void * 854 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) 855 #endif 856 { 857 unsigned long osize; 858 SLZone *z; 859 void *nptr; 860 int *kup; 861 862 KKASSERT((flags & M_ZERO) == 0); /* not supported */ 863 864 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) 865 return(kmalloc_debug(size, type, flags, file, line)); 866 if (size == 0) { 867 kfree(ptr, type); 868 return(NULL); 869 } 870 871 /* 872 * Handle oversized allocations. XXX we really should require that a 873 * size be passed to free() instead of this nonsense. 874 */ 875 kup = btokup(ptr); 876 if (*kup > 0) { 877 osize = *kup << PAGE_SHIFT; 878 if (osize == round_page(size)) 879 return(ptr); 880 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL) 881 return(NULL); 882 bcopy(ptr, nptr, min(size, osize)); 883 kfree(ptr, type); 884 return(nptr); 885 } 886 887 /* 888 * Get the original allocation's zone. If the new request winds up 889 * using the same chunk size we do not have to do anything. 890 */ 891 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 892 kup = btokup(z); 893 KKASSERT(*kup < 0); 894 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 895 896 /* 897 * Allocate memory for the new request size. Note that zoneindex has 898 * already adjusted the request size to the appropriate chunk size, which 899 * should optimize our bcopy(). Then copy and return the new pointer. 900 * 901 * Resizing a non-power-of-2 allocation to a power-of-2 size does not 902 * necessary align the result. 903 * 904 * We can only zoneindex (to align size to the chunk size) if the new 905 * size is not too large. 906 */ 907 if (size < ZoneLimit) { 908 zoneindex(&size); 909 if (z->z_ChunkSize == size) 910 return(ptr); 911 } 912 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL) 913 return(NULL); 914 bcopy(ptr, nptr, min(size, z->z_ChunkSize)); 915 kfree(ptr, type); 916 return(nptr); 917 } 918 919 /* 920 * Return the kmalloc limit for this type, in bytes. 921 */ 922 long 923 kmalloc_limit(struct malloc_type *type) 924 { 925 if (type->ks_limit == 0) { 926 crit_enter(); 927 if (type->ks_limit == 0) 928 malloc_init(type); 929 crit_exit(); 930 } 931 return(type->ks_limit); 932 } 933 934 /* 935 * Allocate a copy of the specified string. 936 * 937 * (MP SAFE) (MAY BLOCK) 938 */ 939 #ifdef SLAB_DEBUG 940 char * 941 kstrdup_debug(const char *str, struct malloc_type *type, 942 const char *file, int line) 943 #else 944 char * 945 kstrdup(const char *str, struct malloc_type *type) 946 #endif 947 { 948 int zlen; /* length inclusive of terminating NUL */ 949 char *nstr; 950 951 if (str == NULL) 952 return(NULL); 953 zlen = strlen(str) + 1; 954 nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line); 955 bcopy(str, nstr, zlen); 956 return(nstr); 957 } 958 959 #ifdef SMP 960 /* 961 * Notify our cpu that a remote cpu has freed some chunks in a zone that 962 * we own. RCount will be bumped so the memory should be good, but validate 963 * that it really is. 964 */ 965 static 966 void 967 kfree_remote(void *ptr) 968 { 969 SLGlobalData *slgd; 970 SLChunk *bchunk; 971 SLZone *z; 972 int nfree; 973 int *kup; 974 975 slgd = &mycpu->gd_slab; 976 z = ptr; 977 kup = btokup(z); 978 KKASSERT(*kup == -((int)mycpuid + 1)); 979 KKASSERT(z->z_RCount > 0); 980 atomic_subtract_int(&z->z_RCount, 1); 981 982 logmemory(free_rem_beg, z, NULL, 0, 0); 983 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 984 KKASSERT(z->z_Cpu == mycpu->gd_cpuid); 985 nfree = z->z_NFree; 986 987 /* 988 * Indicate that we will no longer be off of the ZoneAry by 989 * clearing RSignal. 990 */ 991 if (z->z_RChunks) 992 z->z_RSignal = 0; 993 994 /* 995 * Atomically extract the bchunks list and then process it back 996 * into the lchunks list. We want to append our bchunks to the 997 * lchunks list and not prepend since we likely do not have 998 * cache mastership of the related data (not that it helps since 999 * we are using c_Next). 1000 */ 1001 while ((bchunk = z->z_RChunks) != NULL) { 1002 cpu_ccfence(); 1003 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) { 1004 *z->z_LChunksp = bchunk; 1005 while (bchunk) { 1006 chunk_mark_free(z, bchunk); 1007 z->z_LChunksp = &bchunk->c_Next; 1008 bchunk = bchunk->c_Next; 1009 ++z->z_NFree; 1010 } 1011 break; 1012 } 1013 } 1014 if (z->z_NFree && nfree == 0) { 1015 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 1016 slgd->ZoneAry[z->z_ZoneIndex] = z; 1017 } 1018 1019 /* 1020 * If the zone becomes totally free, and there are other zones we 1021 * can allocate from, move this zone to the FreeZones list. Since 1022 * this code can be called from an IPI callback, do *NOT* try to mess 1023 * with kernel_map here. Hysteresis will be performed at malloc() time. 1024 * 1025 * Do not move the zone if there is an IPI inflight, otherwise MP 1026 * races can result in our free_remote code accessing a destroyed 1027 * zone. 1028 */ 1029 if (z->z_NFree == z->z_NMax && 1030 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) && 1031 z->z_RCount == 0 1032 ) { 1033 SLZone **pz; 1034 int *kup; 1035 1036 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; 1037 z != *pz; 1038 pz = &(*pz)->z_Next) { 1039 ; 1040 } 1041 *pz = z->z_Next; 1042 z->z_Magic = -1; 1043 z->z_Next = slgd->FreeZones; 1044 slgd->FreeZones = z; 1045 ++slgd->NFreeZones; 1046 kup = btokup(z); 1047 *kup = 0; 1048 } 1049 logmemory(free_rem_end, z, bchunk, 0, 0); 1050 } 1051 1052 #endif 1053 1054 /* 1055 * free (SLAB ALLOCATOR) 1056 * 1057 * Free a memory block previously allocated by malloc. Note that we do not 1058 * attempt to update ks_loosememuse as MP races could prevent us from 1059 * checking memory limits in malloc. 1060 * 1061 * MPSAFE 1062 */ 1063 void 1064 kfree(void *ptr, struct malloc_type *type) 1065 { 1066 SLZone *z; 1067 SLChunk *chunk; 1068 SLGlobalData *slgd; 1069 struct globaldata *gd; 1070 int *kup; 1071 unsigned long size; 1072 #ifdef SMP 1073 SLChunk *bchunk; 1074 int rsignal; 1075 #endif 1076 1077 logmemory_quick(free_beg); 1078 gd = mycpu; 1079 slgd = &gd->gd_slab; 1080 1081 if (ptr == NULL) 1082 panic("trying to free NULL pointer"); 1083 1084 /* 1085 * Handle special 0-byte allocations 1086 */ 1087 if (ptr == ZERO_LENGTH_PTR) { 1088 logmemory(free_zero, ptr, type, -1, 0); 1089 logmemory_quick(free_end); 1090 return; 1091 } 1092 1093 /* 1094 * Panic on bad malloc type 1095 */ 1096 if (type->ks_magic != M_MAGIC) 1097 panic("free: malloc type lacks magic"); 1098 1099 /* 1100 * Handle oversized allocations. XXX we really should require that a 1101 * size be passed to free() instead of this nonsense. 1102 * 1103 * This code is never called via an ipi. 1104 */ 1105 kup = btokup(ptr); 1106 if (*kup > 0) { 1107 size = *kup << PAGE_SHIFT; 1108 *kup = 0; 1109 #ifdef INVARIANTS 1110 KKASSERT(sizeof(weirdary) <= size); 1111 bcopy(weirdary, ptr, sizeof(weirdary)); 1112 #endif 1113 /* 1114 * NOTE: For oversized allocations we do not record the 1115 * originating cpu. It gets freed on the cpu calling 1116 * kfree(). The statistics are in aggregate. 1117 * 1118 * note: XXX we have still inherited the interrupts-can't-block 1119 * assumption. An interrupt thread does not bump 1120 * gd_intr_nesting_level so check TDF_INTTHREAD. This is 1121 * primarily until we can fix softupdate's assumptions about free(). 1122 */ 1123 crit_enter(); 1124 --type->ks_inuse[gd->gd_cpuid]; 1125 type->ks_memuse[gd->gd_cpuid] -= size; 1126 if (mycpu->gd_intr_nesting_level || 1127 (gd->gd_curthread->td_flags & TDF_INTTHREAD)) 1128 { 1129 logmemory(free_ovsz_delayed, ptr, type, size, 0); 1130 z = (SLZone *)ptr; 1131 z->z_Magic = ZALLOC_OVSZ_MAGIC; 1132 z->z_Next = slgd->FreeOvZones; 1133 z->z_ChunkSize = size; 1134 slgd->FreeOvZones = z; 1135 crit_exit(); 1136 } else { 1137 crit_exit(); 1138 logmemory(free_ovsz, ptr, type, size, 0); 1139 kmem_slab_free(ptr, size); /* may block */ 1140 atomic_add_int(&ZoneBigAlloc, -(int)size / 1024); 1141 } 1142 logmemory_quick(free_end); 1143 return; 1144 } 1145 1146 /* 1147 * Zone case. Figure out the zone based on the fact that it is 1148 * ZoneSize aligned. 1149 */ 1150 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 1151 kup = btokup(z); 1152 KKASSERT(*kup < 0); 1153 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1154 1155 /* 1156 * If we do not own the zone then use atomic ops to free to the 1157 * remote cpu linked list and notify the target zone using a 1158 * passive message. 1159 * 1160 * The target zone cannot be deallocated while we own a chunk of it, 1161 * so the zone header's storage is stable until the very moment 1162 * we adjust z_RChunks. After that we cannot safely dereference (z). 1163 * 1164 * (no critical section needed) 1165 */ 1166 if (z->z_CpuGd != gd) { 1167 #ifdef SMP 1168 /* 1169 * Making these adjustments now allow us to avoid passing (type) 1170 * to the remote cpu. Note that ks_inuse/ks_memuse is being 1171 * adjusted on OUR cpu, not the zone cpu, but it should all still 1172 * sum up properly and cancel out. 1173 */ 1174 crit_enter(); 1175 --type->ks_inuse[gd->gd_cpuid]; 1176 type->ks_memuse[gd->gd_cpuid] -= z->z_ChunkSize; 1177 crit_exit(); 1178 1179 /* 1180 * WARNING! This code competes with other cpus. Once we 1181 * successfully link the chunk to RChunks the remote 1182 * cpu can rip z's storage out from under us. 1183 * 1184 * Bumping RCount prevents z's storage from getting 1185 * ripped out. 1186 */ 1187 rsignal = z->z_RSignal; 1188 cpu_lfence(); 1189 if (rsignal) 1190 atomic_add_int(&z->z_RCount, 1); 1191 1192 chunk = ptr; 1193 for (;;) { 1194 bchunk = z->z_RChunks; 1195 cpu_ccfence(); 1196 chunk->c_Next = bchunk; 1197 cpu_sfence(); 1198 1199 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk)) 1200 break; 1201 } 1202 1203 /* 1204 * We have to signal the remote cpu if our actions will cause 1205 * the remote zone to be placed back on ZoneAry so it can 1206 * move the zone back on. 1207 * 1208 * We only need to deal with NULL->non-NULL RChunk transitions 1209 * and only if z_RSignal is set. We interlock by reading rsignal 1210 * before adding our chunk to RChunks. This should result in 1211 * virtually no IPI traffic. 1212 * 1213 * We can use a passive IPI to reduce overhead even further. 1214 */ 1215 if (bchunk == NULL && rsignal) { 1216 logmemory(free_request, ptr, type, z->z_ChunkSize, 0); 1217 lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z); 1218 /* z can get ripped out from under us from this point on */ 1219 } else if (rsignal) { 1220 atomic_subtract_int(&z->z_RCount, 1); 1221 /* z can get ripped out from under us from this point on */ 1222 } 1223 #else 1224 panic("Corrupt SLZone"); 1225 #endif 1226 logmemory_quick(free_end); 1227 return; 1228 } 1229 1230 /* 1231 * kfree locally 1232 */ 1233 logmemory(free_chunk, ptr, type, z->z_ChunkSize, 0); 1234 1235 crit_enter(); 1236 chunk = ptr; 1237 chunk_mark_free(z, chunk); 1238 1239 /* 1240 * Put weird data into the memory to detect modifications after freeing, 1241 * illegal pointer use after freeing (we should fault on the odd address), 1242 * and so forth. XXX needs more work, see the old malloc code. 1243 */ 1244 #ifdef INVARIANTS 1245 if (z->z_ChunkSize < sizeof(weirdary)) 1246 bcopy(weirdary, chunk, z->z_ChunkSize); 1247 else 1248 bcopy(weirdary, chunk, sizeof(weirdary)); 1249 #endif 1250 1251 /* 1252 * Add this free non-zero'd chunk to a linked list for reuse. Add 1253 * to the front of the linked list so it is more likely to be 1254 * reallocated, since it is already in our L1 cache. 1255 */ 1256 #ifdef INVARIANTS 1257 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) 1258 panic("BADFREE %p", chunk); 1259 #endif 1260 chunk->c_Next = z->z_LChunks; 1261 z->z_LChunks = chunk; 1262 if (chunk->c_Next == NULL) 1263 z->z_LChunksp = &chunk->c_Next; 1264 1265 #ifdef INVARIANTS 1266 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) 1267 panic("BADFREE2"); 1268 #endif 1269 1270 /* 1271 * Bump the number of free chunks. If it becomes non-zero the zone 1272 * must be added back onto the appropriate list. 1273 */ 1274 if (z->z_NFree++ == 0) { 1275 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 1276 slgd->ZoneAry[z->z_ZoneIndex] = z; 1277 } 1278 1279 --type->ks_inuse[z->z_Cpu]; 1280 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize; 1281 1282 /* 1283 * If the zone becomes totally free, and there are other zones we 1284 * can allocate from, move this zone to the FreeZones list. Since 1285 * this code can be called from an IPI callback, do *NOT* try to mess 1286 * with kernel_map here. Hysteresis will be performed at malloc() time. 1287 */ 1288 if (z->z_NFree == z->z_NMax && 1289 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) && 1290 z->z_RCount == 0 1291 ) { 1292 SLZone **pz; 1293 int *kup; 1294 1295 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next) 1296 ; 1297 *pz = z->z_Next; 1298 z->z_Magic = -1; 1299 z->z_Next = slgd->FreeZones; 1300 slgd->FreeZones = z; 1301 ++slgd->NFreeZones; 1302 kup = btokup(z); 1303 *kup = 0; 1304 } 1305 logmemory_quick(free_end); 1306 crit_exit(); 1307 } 1308 1309 #if defined(INVARIANTS) 1310 1311 /* 1312 * Helper routines for sanity checks 1313 */ 1314 static 1315 void 1316 chunk_mark_allocated(SLZone *z, void *chunk) 1317 { 1318 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1319 __uint32_t *bitptr; 1320 1321 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1322 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1323 ("memory chunk %p bit index %d is illegal", chunk, bitdex)); 1324 bitptr = &z->z_Bitmap[bitdex >> 5]; 1325 bitdex &= 31; 1326 KASSERT((*bitptr & (1 << bitdex)) == 0, 1327 ("memory chunk %p is already allocated!", chunk)); 1328 *bitptr |= 1 << bitdex; 1329 } 1330 1331 static 1332 void 1333 chunk_mark_free(SLZone *z, void *chunk) 1334 { 1335 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1336 __uint32_t *bitptr; 1337 1338 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1339 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1340 ("memory chunk %p bit index %d is illegal!", chunk, bitdex)); 1341 bitptr = &z->z_Bitmap[bitdex >> 5]; 1342 bitdex &= 31; 1343 KASSERT((*bitptr & (1 << bitdex)) != 0, 1344 ("memory chunk %p is already free!", chunk)); 1345 *bitptr &= ~(1 << bitdex); 1346 } 1347 1348 #endif 1349 1350 /* 1351 * kmem_slab_alloc() 1352 * 1353 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the 1354 * specified alignment. M_* flags are expected in the flags field. 1355 * 1356 * Alignment must be a multiple of PAGE_SIZE. 1357 * 1358 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), 1359 * but when we move zalloc() over to use this function as its backend 1360 * we will have to switch to kreserve/krelease and call reserve(0) 1361 * after the new space is made available. 1362 * 1363 * Interrupt code which has preempted other code is not allowed to 1364 * use PQ_CACHE pages. However, if an interrupt thread is run 1365 * non-preemptively or blocks and then runs non-preemptively, then 1366 * it is free to use PQ_CACHE pages. 1367 */ 1368 static void * 1369 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) 1370 { 1371 vm_size_t i; 1372 vm_offset_t addr; 1373 int count, vmflags, base_vmflags; 1374 vm_page_t mp[ZALLOC_MAX_ZONE_SIZE / PAGE_SIZE]; 1375 thread_t td; 1376 1377 size = round_page(size); 1378 addr = vm_map_min(&kernel_map); 1379 1380 /* 1381 * Reserve properly aligned space from kernel_map. RNOWAIT allocations 1382 * cannot block. 1383 */ 1384 if (flags & M_RNOWAIT) { 1385 if (lwkt_trytoken(&vm_token) == 0) 1386 return(NULL); 1387 } else { 1388 lwkt_gettoken(&vm_token); 1389 } 1390 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1391 crit_enter(); 1392 vm_map_lock(&kernel_map); 1393 if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) { 1394 vm_map_unlock(&kernel_map); 1395 if ((flags & M_NULLOK) == 0) 1396 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 1397 vm_map_entry_release(count); 1398 crit_exit(); 1399 lwkt_reltoken(&vm_token); 1400 return(NULL); 1401 } 1402 1403 /* 1404 * kernel_object maps 1:1 to kernel_map. 1405 */ 1406 vm_object_reference(&kernel_object); 1407 vm_map_insert(&kernel_map, &count, 1408 &kernel_object, addr, addr, addr + size, 1409 VM_MAPTYPE_NORMAL, 1410 VM_PROT_ALL, VM_PROT_ALL, 1411 0); 1412 1413 td = curthread; 1414 1415 base_vmflags = 0; 1416 if (flags & M_ZERO) 1417 base_vmflags |= VM_ALLOC_ZERO; 1418 if (flags & M_USE_RESERVE) 1419 base_vmflags |= VM_ALLOC_SYSTEM; 1420 if (flags & M_USE_INTERRUPT_RESERVE) 1421 base_vmflags |= VM_ALLOC_INTERRUPT; 1422 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) { 1423 panic("kmem_slab_alloc: bad flags %08x (%p)", 1424 flags, ((int **)&size)[-1]); 1425 } 1426 1427 1428 /* 1429 * Allocate the pages. Do not mess with the PG_ZERO flag yet. 1430 */ 1431 for (i = 0; i < size; i += PAGE_SIZE) { 1432 vm_page_t m; 1433 1434 /* 1435 * VM_ALLOC_NORMAL can only be set if we are not preempting. 1436 * 1437 * VM_ALLOC_SYSTEM is automatically set if we are preempting and 1438 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is 1439 * implied in this case), though I'm not sure if we really need to 1440 * do that. 1441 */ 1442 vmflags = base_vmflags; 1443 if (flags & M_WAITOK) { 1444 if (td->td_preempted) 1445 vmflags |= VM_ALLOC_SYSTEM; 1446 else 1447 vmflags |= VM_ALLOC_NORMAL; 1448 } 1449 1450 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags); 1451 if (i / PAGE_SIZE < NELEM(mp)) 1452 mp[i / PAGE_SIZE] = m; 1453 1454 /* 1455 * If the allocation failed we either return NULL or we retry. 1456 * 1457 * If M_WAITOK is specified we wait for more memory and retry. 1458 * If M_WAITOK is specified from a preemption we yield instead of 1459 * wait. Livelock will not occur because the interrupt thread 1460 * will not be preempting anyone the second time around after the 1461 * yield. 1462 */ 1463 if (m == NULL) { 1464 if (flags & M_WAITOK) { 1465 if (td->td_preempted) { 1466 vm_map_unlock(&kernel_map); 1467 lwkt_switch(); 1468 vm_map_lock(&kernel_map); 1469 } else { 1470 vm_map_unlock(&kernel_map); 1471 vm_wait(0); 1472 vm_map_lock(&kernel_map); 1473 } 1474 i -= PAGE_SIZE; /* retry */ 1475 continue; 1476 } 1477 1478 /* 1479 * We were unable to recover, cleanup and return NULL 1480 * 1481 * (vm_token already held) 1482 */ 1483 while (i != 0) { 1484 i -= PAGE_SIZE; 1485 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); 1486 /* page should already be busy */ 1487 vm_page_free(m); 1488 } 1489 vm_map_delete(&kernel_map, addr, addr + size, &count); 1490 vm_map_unlock(&kernel_map); 1491 vm_map_entry_release(count); 1492 crit_exit(); 1493 lwkt_reltoken(&vm_token); 1494 return(NULL); 1495 } 1496 } 1497 1498 /* 1499 * Success! 1500 * 1501 * Mark the map entry as non-pageable using a routine that allows us to 1502 * populate the underlying pages. 1503 * 1504 * The pages were busied by the allocations above. 1505 */ 1506 vm_map_set_wired_quick(&kernel_map, addr, size, &count); 1507 crit_exit(); 1508 1509 /* 1510 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO. 1511 */ 1512 for (i = 0; i < size; i += PAGE_SIZE) { 1513 vm_page_t m; 1514 1515 if (i / PAGE_SIZE < NELEM(mp)) 1516 m = mp[i / PAGE_SIZE]; 1517 else 1518 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); 1519 m->valid = VM_PAGE_BITS_ALL; 1520 /* page should already be busy */ 1521 vm_page_wire(m); 1522 pmap_enter(&kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 1523 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO)) 1524 bzero((char *)addr + i, PAGE_SIZE); 1525 vm_page_flag_clear(m, PG_ZERO); 1526 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED)); 1527 vm_page_flag_set(m, PG_REFERENCED); 1528 vm_page_wakeup(m); 1529 } 1530 vm_map_unlock(&kernel_map); 1531 vm_map_entry_release(count); 1532 lwkt_reltoken(&vm_token); 1533 return((void *)addr); 1534 } 1535 1536 /* 1537 * kmem_slab_free() 1538 */ 1539 static void 1540 kmem_slab_free(void *ptr, vm_size_t size) 1541 { 1542 crit_enter(); 1543 lwkt_gettoken(&vm_token); 1544 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); 1545 lwkt_reltoken(&vm_token); 1546 crit_exit(); 1547 } 1548 1549