1 /* 2 * (MPSAFE) 3 * 4 * KERN_SLABALLOC.C - Kernel SLAB memory allocator 5 * 6 * Copyright (c) 2003,2004,2010 The DragonFly Project. All rights reserved. 7 * 8 * This code is derived from software contributed to The DragonFly Project 9 * by Matthew Dillon <dillon@backplane.com> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in 19 * the documentation and/or other materials provided with the 20 * distribution. 21 * 3. Neither the name of The DragonFly Project nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific, prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * This module implements a slab allocator drop-in replacement for the 39 * kernel malloc(). 40 * 41 * A slab allocator reserves a ZONE for each chunk size, then lays the 42 * chunks out in an array within the zone. Allocation and deallocation 43 * is nearly instantanious, and fragmentation/overhead losses are limited 44 * to a fixed worst-case amount. 45 * 46 * The downside of this slab implementation is in the chunk size 47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. 48 * In a kernel implementation all this memory will be physical so 49 * the zone size is adjusted downward on machines with less physical 50 * memory. The upside is that overhead is bounded... this is the *worst* 51 * case overhead. 52 * 53 * Slab management is done on a per-cpu basis and no locking or mutexes 54 * are required, only a critical section. When one cpu frees memory 55 * belonging to another cpu's slab manager an asynchronous IPI message 56 * will be queued to execute the operation. In addition, both the 57 * high level slab allocator and the low level zone allocator optimize 58 * M_ZERO requests, and the slab allocator does not have to pre initialize 59 * the linked list of chunks. 60 * 61 * XXX Balancing is needed between cpus. Balance will be handled through 62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. 63 * 64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of 65 * the new zone should be restricted to M_USE_RESERVE requests only. 66 * 67 * Alloc Size Chunking Number of zones 68 * 0-127 8 16 69 * 128-255 16 8 70 * 256-511 32 8 71 * 512-1023 64 8 72 * 1024-2047 128 8 73 * 2048-4095 256 8 74 * 4096-8191 512 8 75 * 8192-16383 1024 8 76 * 16384-32767 2048 8 77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) 78 * 79 * Allocations >= ZoneLimit go directly to kmem. 80 * 81 * API REQUIREMENTS AND SIDE EFFECTS 82 * 83 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we 84 * have remained compatible with the following API requirements: 85 * 86 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty) 87 * + all power-of-2 sized allocations are power-of-2 aligned (twe) 88 * + malloc(0) is allowed and returns non-NULL (ahc driver) 89 * + ability to allocate arbitrarily large chunks of memory 90 */ 91 92 #include "opt_vm.h" 93 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/kernel.h> 97 #include <sys/slaballoc.h> 98 #include <sys/mbuf.h> 99 #include <sys/vmmeter.h> 100 #include <sys/lock.h> 101 #include <sys/thread.h> 102 #include <sys/globaldata.h> 103 #include <sys/sysctl.h> 104 #include <sys/ktr.h> 105 106 #include <vm/vm.h> 107 #include <vm/vm_param.h> 108 #include <vm/vm_kern.h> 109 #include <vm/vm_extern.h> 110 #include <vm/vm_object.h> 111 #include <vm/pmap.h> 112 #include <vm/vm_map.h> 113 #include <vm/vm_page.h> 114 #include <vm/vm_pageout.h> 115 116 #include <machine/cpu.h> 117 118 #include <sys/thread2.h> 119 120 #define btokup(z) (&pmap_kvtom((vm_offset_t)(z))->ku_pagecnt) 121 122 #define MEMORY_STRING "ptr=%p type=%p size=%d flags=%04x" 123 #define MEMORY_ARG_SIZE (sizeof(void *) * 2 + sizeof(unsigned long) + \ 124 sizeof(int)) 125 126 #if !defined(KTR_MEMORY) 127 #define KTR_MEMORY KTR_ALL 128 #endif 129 KTR_INFO_MASTER(memory); 130 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin", 0); 131 KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARG_SIZE); 132 KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARG_SIZE); 133 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARG_SIZE); 134 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARG_SIZE); 135 KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARG_SIZE); 136 #ifdef SMP 137 KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARG_SIZE); 138 KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARG_SIZE); 139 KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARG_SIZE); 140 #endif 141 KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin", 0); 142 KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end", 0); 143 144 #define logmemory(name, ptr, type, size, flags) \ 145 KTR_LOG(memory_ ## name, ptr, type, size, flags) 146 #define logmemory_quick(name) \ 147 KTR_LOG(memory_ ## name) 148 149 /* 150 * Fixed globals (not per-cpu) 151 */ 152 static int ZoneSize; 153 static int ZoneLimit; 154 static int ZonePageCount; 155 static uintptr_t ZoneMask; 156 static int ZoneBigAlloc; /* in KB */ 157 static int ZoneGenAlloc; /* in KB */ 158 struct malloc_type *kmemstatistics; /* exported to vmstat */ 159 static int32_t weirdary[16]; 160 161 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags); 162 static void kmem_slab_free(void *ptr, vm_size_t bytes); 163 164 #if defined(INVARIANTS) 165 static void chunk_mark_allocated(SLZone *z, void *chunk); 166 static void chunk_mark_free(SLZone *z, void *chunk); 167 #else 168 #define chunk_mark_allocated(z, chunk) 169 #define chunk_mark_free(z, chunk) 170 #endif 171 172 /* 173 * Misc constants. Note that allocations that are exact multiples of 174 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 175 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists. 176 */ 177 #define MIN_CHUNK_SIZE 8 /* in bytes */ 178 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1) 179 #define ZONE_RELS_THRESH 2 /* threshold number of zones */ 180 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK) 181 182 /* 183 * The WEIRD_ADDR is used as known text to copy into free objects to 184 * try to create deterministic failure cases if the data is accessed after 185 * free. 186 */ 187 #define WEIRD_ADDR 0xdeadc0de 188 #define MAX_COPY sizeof(weirdary) 189 #define ZERO_LENGTH_PTR ((void *)-8) 190 191 /* 192 * Misc global malloc buckets 193 */ 194 195 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 196 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 197 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 198 199 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 200 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 201 202 /* 203 * Initialize the slab memory allocator. We have to choose a zone size based 204 * on available physical memory. We choose a zone side which is approximately 205 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of 206 * 128K. The zone size is limited to the bounds set in slaballoc.h 207 * (typically 32K min, 128K max). 208 */ 209 static void kmeminit(void *dummy); 210 211 char *ZeroPage; 212 213 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL) 214 215 #ifdef INVARIANTS 216 /* 217 * If enabled any memory allocated without M_ZERO is initialized to -1. 218 */ 219 static int use_malloc_pattern; 220 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW, 221 &use_malloc_pattern, 0, 222 "Initialize memory to -1 if M_ZERO not specified"); 223 #endif 224 225 SYSCTL_INT(_kern, OID_AUTO, zone_big_alloc, CTLFLAG_RD, &ZoneBigAlloc, 0, ""); 226 SYSCTL_INT(_kern, OID_AUTO, zone_gen_alloc, CTLFLAG_RD, &ZoneGenAlloc, 0, ""); 227 228 static void 229 kmeminit(void *dummy) 230 { 231 size_t limsize; 232 int usesize; 233 int i; 234 235 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE; 236 if (limsize > KvaSize) 237 limsize = KvaSize; 238 239 usesize = (int)(limsize / 1024); /* convert to KB */ 240 241 ZoneSize = ZALLOC_MIN_ZONE_SIZE; 242 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) 243 ZoneSize <<= 1; 244 ZoneLimit = ZoneSize / 4; 245 if (ZoneLimit > ZALLOC_ZONE_LIMIT) 246 ZoneLimit = ZALLOC_ZONE_LIMIT; 247 ZoneMask = ~(uintptr_t)(ZoneSize - 1); 248 ZonePageCount = ZoneSize / PAGE_SIZE; 249 250 for (i = 0; i < NELEM(weirdary); ++i) 251 weirdary[i] = WEIRD_ADDR; 252 253 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO); 254 255 if (bootverbose) 256 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); 257 } 258 259 /* 260 * Initialize a malloc type tracking structure. 261 */ 262 void 263 malloc_init(void *data) 264 { 265 struct malloc_type *type = data; 266 size_t limsize; 267 268 if (type->ks_magic != M_MAGIC) 269 panic("malloc type lacks magic"); 270 271 if (type->ks_limit != 0) 272 return; 273 274 if (vmstats.v_page_count == 0) 275 panic("malloc_init not allowed before vm init"); 276 277 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE; 278 if (limsize > KvaSize) 279 limsize = KvaSize; 280 type->ks_limit = limsize / 10; 281 282 type->ks_next = kmemstatistics; 283 kmemstatistics = type; 284 } 285 286 void 287 malloc_uninit(void *data) 288 { 289 struct malloc_type *type = data; 290 struct malloc_type *t; 291 #ifdef INVARIANTS 292 int i; 293 long ttl; 294 #endif 295 296 if (type->ks_magic != M_MAGIC) 297 panic("malloc type lacks magic"); 298 299 if (vmstats.v_page_count == 0) 300 panic("malloc_uninit not allowed before vm init"); 301 302 if (type->ks_limit == 0) 303 panic("malloc_uninit on uninitialized type"); 304 305 #ifdef SMP 306 /* Make sure that all pending kfree()s are finished. */ 307 lwkt_synchronize_ipiqs("muninit"); 308 #endif 309 310 #ifdef INVARIANTS 311 /* 312 * memuse is only correct in aggregation. Due to memory being allocated 313 * on one cpu and freed on another individual array entries may be 314 * negative or positive (canceling each other out). 315 */ 316 for (i = ttl = 0; i < ncpus; ++i) 317 ttl += type->ks_memuse[i]; 318 if (ttl) { 319 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", 320 ttl, type->ks_shortdesc, i); 321 } 322 #endif 323 if (type == kmemstatistics) { 324 kmemstatistics = type->ks_next; 325 } else { 326 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 327 if (t->ks_next == type) { 328 t->ks_next = type->ks_next; 329 break; 330 } 331 } 332 } 333 type->ks_next = NULL; 334 type->ks_limit = 0; 335 } 336 337 /* 338 * Increase the kmalloc pool limit for the specified pool. No changes 339 * are the made if the pool would shrink. 340 */ 341 void 342 kmalloc_raise_limit(struct malloc_type *type, size_t bytes) 343 { 344 if (type->ks_limit == 0) 345 malloc_init(type); 346 if (bytes == 0) 347 bytes = KvaSize; 348 if (type->ks_limit < bytes) 349 type->ks_limit = bytes; 350 } 351 352 /* 353 * Dynamically create a malloc pool. This function is a NOP if *typep is 354 * already non-NULL. 355 */ 356 void 357 kmalloc_create(struct malloc_type **typep, const char *descr) 358 { 359 struct malloc_type *type; 360 361 if (*typep == NULL) { 362 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO); 363 type->ks_magic = M_MAGIC; 364 type->ks_shortdesc = descr; 365 malloc_init(type); 366 *typep = type; 367 } 368 } 369 370 /* 371 * Destroy a dynamically created malloc pool. This function is a NOP if 372 * the pool has already been destroyed. 373 */ 374 void 375 kmalloc_destroy(struct malloc_type **typep) 376 { 377 if (*typep != NULL) { 378 malloc_uninit(*typep); 379 kfree(*typep, M_TEMP); 380 *typep = NULL; 381 } 382 } 383 384 /* 385 * Calculate the zone index for the allocation request size and set the 386 * allocation request size to that particular zone's chunk size. 387 */ 388 static __inline int 389 zoneindex(unsigned long *bytes) 390 { 391 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ 392 if (n < 128) { 393 *bytes = n = (n + 7) & ~7; 394 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 395 } 396 if (n < 256) { 397 *bytes = n = (n + 15) & ~15; 398 return(n / 16 + 7); 399 } 400 if (n < 8192) { 401 if (n < 512) { 402 *bytes = n = (n + 31) & ~31; 403 return(n / 32 + 15); 404 } 405 if (n < 1024) { 406 *bytes = n = (n + 63) & ~63; 407 return(n / 64 + 23); 408 } 409 if (n < 2048) { 410 *bytes = n = (n + 127) & ~127; 411 return(n / 128 + 31); 412 } 413 if (n < 4096) { 414 *bytes = n = (n + 255) & ~255; 415 return(n / 256 + 39); 416 } 417 *bytes = n = (n + 511) & ~511; 418 return(n / 512 + 47); 419 } 420 #if ZALLOC_ZONE_LIMIT > 8192 421 if (n < 16384) { 422 *bytes = n = (n + 1023) & ~1023; 423 return(n / 1024 + 55); 424 } 425 #endif 426 #if ZALLOC_ZONE_LIMIT > 16384 427 if (n < 32768) { 428 *bytes = n = (n + 2047) & ~2047; 429 return(n / 2048 + 63); 430 } 431 #endif 432 panic("Unexpected byte count %d", n); 433 return(0); 434 } 435 436 /* 437 * kmalloc() (SLAB ALLOCATOR) 438 * 439 * Allocate memory via the slab allocator. If the request is too large, 440 * or if it page-aligned beyond a certain size, we fall back to the 441 * KMEM subsystem. A SLAB tracking descriptor must be specified, use 442 * &SlabMisc if you don't care. 443 * 444 * M_RNOWAIT - don't block. 445 * M_NULLOK - return NULL instead of blocking. 446 * M_ZERO - zero the returned memory. 447 * M_USE_RESERVE - allow greater drawdown of the free list 448 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted 449 * 450 * MPSAFE 451 */ 452 void * 453 kmalloc(unsigned long size, struct malloc_type *type, int flags) 454 { 455 SLZone *z; 456 SLChunk *chunk; 457 #ifdef SMP 458 SLChunk *bchunk; 459 #endif 460 SLGlobalData *slgd; 461 struct globaldata *gd; 462 int zi; 463 #ifdef INVARIANTS 464 int i; 465 #endif 466 467 logmemory_quick(malloc_beg); 468 gd = mycpu; 469 slgd = &gd->gd_slab; 470 471 /* 472 * XXX silly to have this in the critical path. 473 */ 474 if (type->ks_limit == 0) { 475 crit_enter(); 476 if (type->ks_limit == 0) 477 malloc_init(type); 478 crit_exit(); 479 } 480 ++type->ks_calls; 481 482 /* 483 * Handle the case where the limit is reached. Panic if we can't return 484 * NULL. The original malloc code looped, but this tended to 485 * simply deadlock the computer. 486 * 487 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used 488 * to determine if a more complete limit check should be done. The 489 * actual memory use is tracked via ks_memuse[cpu]. 490 */ 491 while (type->ks_loosememuse >= type->ks_limit) { 492 int i; 493 long ttl; 494 495 for (i = ttl = 0; i < ncpus; ++i) 496 ttl += type->ks_memuse[i]; 497 type->ks_loosememuse = ttl; /* not MP synchronized */ 498 if ((ssize_t)ttl < 0) /* deal with occassional race */ 499 ttl = 0; 500 if (ttl >= type->ks_limit) { 501 if (flags & M_NULLOK) { 502 logmemory(malloc_end, NULL, type, size, flags); 503 return(NULL); 504 } 505 panic("%s: malloc limit exceeded", type->ks_shortdesc); 506 } 507 } 508 509 /* 510 * Handle the degenerate size == 0 case. Yes, this does happen. 511 * Return a special pointer. This is to maintain compatibility with 512 * the original malloc implementation. Certain devices, such as the 513 * adaptec driver, not only allocate 0 bytes, they check for NULL and 514 * also realloc() later on. Joy. 515 */ 516 if (size == 0) { 517 logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags); 518 return(ZERO_LENGTH_PTR); 519 } 520 521 /* 522 * Handle hysteresis from prior frees here in malloc(). We cannot 523 * safely manipulate the kernel_map in free() due to free() possibly 524 * being called via an IPI message or from sensitive interrupt code. 525 * 526 * NOTE: ku_pagecnt must be cleared before we free the slab or we 527 * might race another cpu allocating the kva and setting 528 * ku_pagecnt. 529 */ 530 while (slgd->NFreeZones > ZONE_RELS_THRESH && (flags & M_RNOWAIT) == 0) { 531 crit_enter(); 532 if (slgd->NFreeZones > ZONE_RELS_THRESH) { /* crit sect race */ 533 int *kup; 534 535 z = slgd->FreeZones; 536 slgd->FreeZones = z->z_Next; 537 --slgd->NFreeZones; 538 kup = btokup(z); 539 *kup = 0; 540 kmem_slab_free(z, ZoneSize); /* may block */ 541 atomic_add_int(&ZoneGenAlloc, -(int)ZoneSize / 1024); 542 } 543 crit_exit(); 544 } 545 546 /* 547 * XXX handle oversized frees that were queued from kfree(). 548 */ 549 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) { 550 crit_enter(); 551 if ((z = slgd->FreeOvZones) != NULL) { 552 vm_size_t tsize; 553 554 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); 555 slgd->FreeOvZones = z->z_Next; 556 tsize = z->z_ChunkSize; 557 kmem_slab_free(z, tsize); /* may block */ 558 atomic_add_int(&ZoneBigAlloc, -(int)tsize / 1024); 559 } 560 crit_exit(); 561 } 562 563 /* 564 * Handle large allocations directly. There should not be very many of 565 * these so performance is not a big issue. 566 * 567 * The backend allocator is pretty nasty on a SMP system. Use the 568 * slab allocator for one and two page-sized chunks even though we lose 569 * some efficiency. XXX maybe fix mmio and the elf loader instead. 570 */ 571 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 572 int *kup; 573 574 size = round_page(size); 575 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); 576 if (chunk == NULL) { 577 logmemory(malloc_end, NULL, type, size, flags); 578 return(NULL); 579 } 580 atomic_add_int(&ZoneBigAlloc, (int)size / 1024); 581 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ 582 flags |= M_PASSIVE_ZERO; 583 kup = btokup(chunk); 584 *kup = size / PAGE_SIZE; 585 crit_enter(); 586 goto done; 587 } 588 589 /* 590 * Attempt to allocate out of an existing zone. First try the free list, 591 * then allocate out of unallocated space. If we find a good zone move 592 * it to the head of the list so later allocations find it quickly 593 * (we might have thousands of zones in the list). 594 * 595 * Note: zoneindex() will panic of size is too large. 596 */ 597 zi = zoneindex(&size); 598 KKASSERT(zi < NZONES); 599 crit_enter(); 600 601 if ((z = slgd->ZoneAry[zi]) != NULL) { 602 /* 603 * Locate a chunk - we have to have at least one. If this is the 604 * last chunk go ahead and do the work to retrieve chunks freed 605 * from remote cpus, and if the zone is still empty move it off 606 * the ZoneAry. 607 */ 608 if (--z->z_NFree <= 0) { 609 KKASSERT(z->z_NFree == 0); 610 611 #ifdef SMP 612 /* 613 * WARNING! This code competes with other cpus. It is ok 614 * for us to not drain RChunks here but we might as well, and 615 * it is ok if more accumulate after we're done. 616 * 617 * Set RSignal before pulling rchunks off, indicating that we 618 * will be moving ourselves off of the ZoneAry. Remote ends will 619 * read RSignal before putting rchunks on thus interlocking 620 * their IPI signaling. 621 */ 622 if (z->z_RChunks == NULL) 623 atomic_swap_int(&z->z_RSignal, 1); 624 625 while ((bchunk = z->z_RChunks) != NULL) { 626 cpu_ccfence(); 627 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) { 628 *z->z_LChunksp = bchunk; 629 while (bchunk) { 630 chunk_mark_free(z, bchunk); 631 z->z_LChunksp = &bchunk->c_Next; 632 bchunk = bchunk->c_Next; 633 ++z->z_NFree; 634 } 635 break; 636 } 637 } 638 #endif 639 /* 640 * Remove from the zone list if no free chunks remain. 641 * Clear RSignal 642 */ 643 if (z->z_NFree == 0) { 644 slgd->ZoneAry[zi] = z->z_Next; 645 z->z_Next = NULL; 646 } else { 647 z->z_RSignal = 0; 648 } 649 } 650 651 /* 652 * Fast path, we have chunks available in z_LChunks. 653 */ 654 chunk = z->z_LChunks; 655 if (chunk) { 656 chunk_mark_allocated(z, chunk); 657 z->z_LChunks = chunk->c_Next; 658 if (z->z_LChunks == NULL) 659 z->z_LChunksp = &z->z_LChunks; 660 goto done; 661 } 662 663 /* 664 * No chunks are available in LChunks, the free chunk MUST be 665 * in the never-before-used memory area, controlled by UIndex. 666 * 667 * The consequences are very serious if our zone got corrupted so 668 * we use an explicit panic rather than a KASSERT. 669 */ 670 if (z->z_UIndex + 1 != z->z_NMax) 671 ++z->z_UIndex; 672 else 673 z->z_UIndex = 0; 674 675 if (z->z_UIndex == z->z_UEndIndex) 676 panic("slaballoc: corrupted zone"); 677 678 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 679 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 680 flags &= ~M_ZERO; 681 flags |= M_PASSIVE_ZERO; 682 } 683 chunk_mark_allocated(z, chunk); 684 goto done; 685 } 686 687 /* 688 * If all zones are exhausted we need to allocate a new zone for this 689 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see 690 * UAlloc use above in regards to M_ZERO. Note that when we are reusing 691 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and 692 * we do not pre-zero it because we do not want to mess up the L1 cache. 693 * 694 * At least one subsystem, the tty code (see CROUND) expects power-of-2 695 * allocations to be power-of-2 aligned. We maintain compatibility by 696 * adjusting the base offset below. 697 */ 698 { 699 int off; 700 int *kup; 701 702 if ((z = slgd->FreeZones) != NULL) { 703 slgd->FreeZones = z->z_Next; 704 --slgd->NFreeZones; 705 bzero(z, sizeof(SLZone)); 706 z->z_Flags |= SLZF_UNOTZEROD; 707 } else { 708 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); 709 if (z == NULL) 710 goto fail; 711 atomic_add_int(&ZoneGenAlloc, (int)ZoneSize / 1024); 712 } 713 714 /* 715 * How big is the base structure? 716 */ 717 #if defined(INVARIANTS) 718 /* 719 * Make room for z_Bitmap. An exact calculation is somewhat more 720 * complicated so don't make an exact calculation. 721 */ 722 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]); 723 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); 724 #else 725 off = sizeof(SLZone); 726 #endif 727 728 /* 729 * Guarentee power-of-2 alignment for power-of-2-sized chunks. 730 * Otherwise just 8-byte align the data. 731 */ 732 if ((size | (size - 1)) + 1 == (size << 1)) 733 off = (off + size - 1) & ~(size - 1); 734 else 735 off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK; 736 z->z_Magic = ZALLOC_SLAB_MAGIC; 737 z->z_ZoneIndex = zi; 738 z->z_NMax = (ZoneSize - off) / size; 739 z->z_NFree = z->z_NMax - 1; 740 z->z_BasePtr = (char *)z + off; 741 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; 742 z->z_ChunkSize = size; 743 z->z_CpuGd = gd; 744 z->z_Cpu = gd->gd_cpuid; 745 z->z_LChunksp = &z->z_LChunks; 746 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 747 z->z_Next = slgd->ZoneAry[zi]; 748 slgd->ZoneAry[zi] = z; 749 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 750 flags &= ~M_ZERO; /* already zero'd */ 751 flags |= M_PASSIVE_ZERO; 752 } 753 kup = btokup(z); 754 *kup = -(z->z_Cpu + 1); /* -1 to -(N+1) */ 755 chunk_mark_allocated(z, chunk); 756 757 /* 758 * Slide the base index for initial allocations out of the next 759 * zone we create so we do not over-weight the lower part of the 760 * cpu memory caches. 761 */ 762 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 763 & (ZALLOC_MAX_ZONE_SIZE - 1); 764 } 765 766 done: 767 ++type->ks_inuse[gd->gd_cpuid]; 768 type->ks_memuse[gd->gd_cpuid] += size; 769 type->ks_loosememuse += size; /* not MP synchronized */ 770 crit_exit(); 771 772 if (flags & M_ZERO) 773 bzero(chunk, size); 774 #ifdef INVARIANTS 775 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) { 776 if (use_malloc_pattern) { 777 for (i = 0; i < size; i += sizeof(int)) { 778 *(int *)((char *)chunk + i) = -1; 779 } 780 } 781 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ 782 } 783 #endif 784 logmemory(malloc_end, chunk, type, size, flags); 785 return(chunk); 786 fail: 787 crit_exit(); 788 logmemory(malloc_end, NULL, type, size, flags); 789 return(NULL); 790 } 791 792 /* 793 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE) 794 * 795 * Generally speaking this routine is not called very often and we do 796 * not attempt to optimize it beyond reusing the same pointer if the 797 * new size fits within the chunking of the old pointer's zone. 798 */ 799 void * 800 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) 801 { 802 unsigned long osize; 803 SLZone *z; 804 void *nptr; 805 int *kup; 806 807 KKASSERT((flags & M_ZERO) == 0); /* not supported */ 808 809 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) 810 return(kmalloc(size, type, flags)); 811 if (size == 0) { 812 kfree(ptr, type); 813 return(NULL); 814 } 815 816 /* 817 * Handle oversized allocations. XXX we really should require that a 818 * size be passed to free() instead of this nonsense. 819 */ 820 kup = btokup(ptr); 821 if (*kup > 0) { 822 osize = *kup << PAGE_SHIFT; 823 if (osize == round_page(size)) 824 return(ptr); 825 if ((nptr = kmalloc(size, type, flags)) == NULL) 826 return(NULL); 827 bcopy(ptr, nptr, min(size, osize)); 828 kfree(ptr, type); 829 return(nptr); 830 } 831 832 /* 833 * Get the original allocation's zone. If the new request winds up 834 * using the same chunk size we do not have to do anything. 835 */ 836 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 837 kup = btokup(z); 838 KKASSERT(*kup < 0); 839 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 840 841 /* 842 * Allocate memory for the new request size. Note that zoneindex has 843 * already adjusted the request size to the appropriate chunk size, which 844 * should optimize our bcopy(). Then copy and return the new pointer. 845 * 846 * Resizing a non-power-of-2 allocation to a power-of-2 size does not 847 * necessary align the result. 848 * 849 * We can only zoneindex (to align size to the chunk size) if the new 850 * size is not too large. 851 */ 852 if (size < ZoneLimit) { 853 zoneindex(&size); 854 if (z->z_ChunkSize == size) 855 return(ptr); 856 } 857 if ((nptr = kmalloc(size, type, flags)) == NULL) 858 return(NULL); 859 bcopy(ptr, nptr, min(size, z->z_ChunkSize)); 860 kfree(ptr, type); 861 return(nptr); 862 } 863 864 /* 865 * Return the kmalloc limit for this type, in bytes. 866 */ 867 long 868 kmalloc_limit(struct malloc_type *type) 869 { 870 if (type->ks_limit == 0) { 871 crit_enter(); 872 if (type->ks_limit == 0) 873 malloc_init(type); 874 crit_exit(); 875 } 876 return(type->ks_limit); 877 } 878 879 /* 880 * Allocate a copy of the specified string. 881 * 882 * (MP SAFE) (MAY BLOCK) 883 */ 884 char * 885 kstrdup(const char *str, struct malloc_type *type) 886 { 887 int zlen; /* length inclusive of terminating NUL */ 888 char *nstr; 889 890 if (str == NULL) 891 return(NULL); 892 zlen = strlen(str) + 1; 893 nstr = kmalloc(zlen, type, M_WAITOK); 894 bcopy(str, nstr, zlen); 895 return(nstr); 896 } 897 898 #ifdef SMP 899 /* 900 * Notify our cpu that a remote cpu has freed some chunks in a zone that 901 * we own. RCount will be bumped so the memory should be good, but validate 902 * that it really is. 903 */ 904 static 905 void 906 kfree_remote(void *ptr) 907 { 908 SLGlobalData *slgd; 909 SLChunk *bchunk; 910 SLZone *z; 911 int nfree; 912 int *kup; 913 914 slgd = &mycpu->gd_slab; 915 z = ptr; 916 kup = btokup(z); 917 KKASSERT(*kup == -((int)mycpuid + 1)); 918 KKASSERT(z->z_RCount > 0); 919 atomic_subtract_int(&z->z_RCount, 1); 920 921 logmemory(free_rem_beg, z, NULL, 0, 0); 922 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 923 KKASSERT(z->z_Cpu == mycpu->gd_cpuid); 924 nfree = z->z_NFree; 925 926 /* 927 * Indicate that we will no longer be off of the ZoneAry by 928 * clearing RSignal. 929 */ 930 if (z->z_RChunks) 931 z->z_RSignal = 0; 932 933 /* 934 * Atomically extract the bchunks list and then process it back 935 * into the lchunks list. We want to append our bchunks to the 936 * lchunks list and not prepend since we likely do not have 937 * cache mastership of the related data (not that it helps since 938 * we are using c_Next). 939 */ 940 while ((bchunk = z->z_RChunks) != NULL) { 941 cpu_ccfence(); 942 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) { 943 *z->z_LChunksp = bchunk; 944 while (bchunk) { 945 chunk_mark_free(z, bchunk); 946 z->z_LChunksp = &bchunk->c_Next; 947 bchunk = bchunk->c_Next; 948 ++z->z_NFree; 949 } 950 break; 951 } 952 } 953 if (z->z_NFree && nfree == 0) { 954 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 955 slgd->ZoneAry[z->z_ZoneIndex] = z; 956 } 957 958 /* 959 * If the zone becomes totally free, and there are other zones we 960 * can allocate from, move this zone to the FreeZones list. Since 961 * this code can be called from an IPI callback, do *NOT* try to mess 962 * with kernel_map here. Hysteresis will be performed at malloc() time. 963 * 964 * Do not move the zone if there is an IPI inflight, otherwise MP 965 * races can result in our free_remote code accessing a destroyed 966 * zone. 967 */ 968 if (z->z_NFree == z->z_NMax && 969 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) && 970 z->z_RCount == 0 971 ) { 972 SLZone **pz; 973 int *kup; 974 975 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; 976 z != *pz; 977 pz = &(*pz)->z_Next) { 978 ; 979 } 980 *pz = z->z_Next; 981 z->z_Magic = -1; 982 z->z_Next = slgd->FreeZones; 983 slgd->FreeZones = z; 984 ++slgd->NFreeZones; 985 kup = btokup(z); 986 *kup = 0; 987 } 988 logmemory(free_rem_end, z, bchunk, 0, 0); 989 } 990 991 #endif 992 993 /* 994 * free (SLAB ALLOCATOR) 995 * 996 * Free a memory block previously allocated by malloc. Note that we do not 997 * attempt to update ks_loosememuse as MP races could prevent us from 998 * checking memory limits in malloc. 999 * 1000 * MPSAFE 1001 */ 1002 void 1003 kfree(void *ptr, struct malloc_type *type) 1004 { 1005 SLZone *z; 1006 SLChunk *chunk; 1007 SLGlobalData *slgd; 1008 struct globaldata *gd; 1009 int *kup; 1010 unsigned long size; 1011 #ifdef SMP 1012 SLChunk *bchunk; 1013 int rsignal; 1014 #endif 1015 1016 logmemory_quick(free_beg); 1017 gd = mycpu; 1018 slgd = &gd->gd_slab; 1019 1020 if (ptr == NULL) 1021 panic("trying to free NULL pointer"); 1022 1023 /* 1024 * Handle special 0-byte allocations 1025 */ 1026 if (ptr == ZERO_LENGTH_PTR) { 1027 logmemory(free_zero, ptr, type, -1, 0); 1028 logmemory_quick(free_end); 1029 return; 1030 } 1031 1032 /* 1033 * Panic on bad malloc type 1034 */ 1035 if (type->ks_magic != M_MAGIC) 1036 panic("free: malloc type lacks magic"); 1037 1038 /* 1039 * Handle oversized allocations. XXX we really should require that a 1040 * size be passed to free() instead of this nonsense. 1041 * 1042 * This code is never called via an ipi. 1043 */ 1044 kup = btokup(ptr); 1045 if (*kup > 0) { 1046 size = *kup << PAGE_SHIFT; 1047 *kup = 0; 1048 #ifdef INVARIANTS 1049 KKASSERT(sizeof(weirdary) <= size); 1050 bcopy(weirdary, ptr, sizeof(weirdary)); 1051 #endif 1052 /* 1053 * NOTE: For oversized allocations we do not record the 1054 * originating cpu. It gets freed on the cpu calling 1055 * kfree(). The statistics are in aggregate. 1056 * 1057 * note: XXX we have still inherited the interrupts-can't-block 1058 * assumption. An interrupt thread does not bump 1059 * gd_intr_nesting_level so check TDF_INTTHREAD. This is 1060 * primarily until we can fix softupdate's assumptions about free(). 1061 */ 1062 crit_enter(); 1063 --type->ks_inuse[gd->gd_cpuid]; 1064 type->ks_memuse[gd->gd_cpuid] -= size; 1065 if (mycpu->gd_intr_nesting_level || 1066 (gd->gd_curthread->td_flags & TDF_INTTHREAD)) 1067 { 1068 logmemory(free_ovsz_delayed, ptr, type, size, 0); 1069 z = (SLZone *)ptr; 1070 z->z_Magic = ZALLOC_OVSZ_MAGIC; 1071 z->z_Next = slgd->FreeOvZones; 1072 z->z_ChunkSize = size; 1073 slgd->FreeOvZones = z; 1074 crit_exit(); 1075 } else { 1076 crit_exit(); 1077 logmemory(free_ovsz, ptr, type, size, 0); 1078 kmem_slab_free(ptr, size); /* may block */ 1079 atomic_add_int(&ZoneBigAlloc, -(int)size / 1024); 1080 } 1081 logmemory_quick(free_end); 1082 return; 1083 } 1084 1085 /* 1086 * Zone case. Figure out the zone based on the fact that it is 1087 * ZoneSize aligned. 1088 */ 1089 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 1090 kup = btokup(z); 1091 KKASSERT(*kup < 0); 1092 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1093 1094 /* 1095 * If we do not own the zone then use atomic ops to free to the 1096 * remote cpu linked list and notify the target zone using a 1097 * passive message. 1098 * 1099 * The target zone cannot be deallocated while we own a chunk of it, 1100 * so the zone header's storage is stable until the very moment 1101 * we adjust z_RChunks. After that we cannot safely dereference (z). 1102 * 1103 * (no critical section needed) 1104 */ 1105 if (z->z_CpuGd != gd) { 1106 #ifdef SMP 1107 /* 1108 * Making these adjustments now allow us to avoid passing (type) 1109 * to the remote cpu. Note that ks_inuse/ks_memuse is being 1110 * adjusted on OUR cpu, not the zone cpu, but it should all still 1111 * sum up properly and cancel out. 1112 */ 1113 crit_enter(); 1114 --type->ks_inuse[gd->gd_cpuid]; 1115 type->ks_memuse[gd->gd_cpuid] -= z->z_ChunkSize; 1116 crit_exit(); 1117 1118 /* 1119 * WARNING! This code competes with other cpus. Once we 1120 * successfully link the chunk to RChunks the remote 1121 * cpu can rip z's storage out from under us. 1122 * 1123 * Bumping RCount prevents z's storage from getting 1124 * ripped out. 1125 */ 1126 rsignal = z->z_RSignal; 1127 cpu_lfence(); 1128 if (rsignal) 1129 atomic_add_int(&z->z_RCount, 1); 1130 1131 chunk = ptr; 1132 for (;;) { 1133 bchunk = z->z_RChunks; 1134 cpu_ccfence(); 1135 chunk->c_Next = bchunk; 1136 cpu_sfence(); 1137 1138 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk)) 1139 break; 1140 } 1141 1142 /* 1143 * We have to signal the remote cpu if our actions will cause 1144 * the remote zone to be placed back on ZoneAry so it can 1145 * move the zone back on. 1146 * 1147 * We only need to deal with NULL->non-NULL RChunk transitions 1148 * and only if z_RSignal is set. We interlock by reading rsignal 1149 * before adding our chunk to RChunks. This should result in 1150 * virtually no IPI traffic. 1151 * 1152 * We can use a passive IPI to reduce overhead even further. 1153 */ 1154 if (bchunk == NULL && rsignal) { 1155 logmemory(free_request, ptr, type, z->z_ChunkSize, 0); 1156 lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z); 1157 /* z can get ripped out from under us from this point on */ 1158 } else if (rsignal) { 1159 atomic_subtract_int(&z->z_RCount, 1); 1160 /* z can get ripped out from under us from this point on */ 1161 } 1162 #else 1163 panic("Corrupt SLZone"); 1164 #endif 1165 logmemory_quick(free_end); 1166 return; 1167 } 1168 1169 /* 1170 * kfree locally 1171 */ 1172 logmemory(free_chunk, ptr, type, z->z_ChunkSize, 0); 1173 1174 crit_enter(); 1175 chunk = ptr; 1176 chunk_mark_free(z, chunk); 1177 1178 /* 1179 * Put weird data into the memory to detect modifications after freeing, 1180 * illegal pointer use after freeing (we should fault on the odd address), 1181 * and so forth. XXX needs more work, see the old malloc code. 1182 */ 1183 #ifdef INVARIANTS 1184 if (z->z_ChunkSize < sizeof(weirdary)) 1185 bcopy(weirdary, chunk, z->z_ChunkSize); 1186 else 1187 bcopy(weirdary, chunk, sizeof(weirdary)); 1188 #endif 1189 1190 /* 1191 * Add this free non-zero'd chunk to a linked list for reuse. Add 1192 * to the front of the linked list so it is more likely to be 1193 * reallocated, since it is already in our L1 cache. 1194 */ 1195 #ifdef INVARIANTS 1196 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) 1197 panic("BADFREE %p", chunk); 1198 #endif 1199 chunk->c_Next = z->z_LChunks; 1200 z->z_LChunks = chunk; 1201 if (chunk->c_Next == NULL) 1202 z->z_LChunksp = &chunk->c_Next; 1203 1204 #ifdef INVARIANTS 1205 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) 1206 panic("BADFREE2"); 1207 #endif 1208 1209 /* 1210 * Bump the number of free chunks. If it becomes non-zero the zone 1211 * must be added back onto the appropriate list. 1212 */ 1213 if (z->z_NFree++ == 0) { 1214 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 1215 slgd->ZoneAry[z->z_ZoneIndex] = z; 1216 } 1217 1218 --type->ks_inuse[z->z_Cpu]; 1219 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize; 1220 1221 /* 1222 * If the zone becomes totally free, and there are other zones we 1223 * can allocate from, move this zone to the FreeZones list. Since 1224 * this code can be called from an IPI callback, do *NOT* try to mess 1225 * with kernel_map here. Hysteresis will be performed at malloc() time. 1226 */ 1227 if (z->z_NFree == z->z_NMax && 1228 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) && 1229 z->z_RCount == 0 1230 ) { 1231 SLZone **pz; 1232 int *kup; 1233 1234 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next) 1235 ; 1236 *pz = z->z_Next; 1237 z->z_Magic = -1; 1238 z->z_Next = slgd->FreeZones; 1239 slgd->FreeZones = z; 1240 ++slgd->NFreeZones; 1241 kup = btokup(z); 1242 *kup = 0; 1243 } 1244 logmemory_quick(free_end); 1245 crit_exit(); 1246 } 1247 1248 #if defined(INVARIANTS) 1249 1250 /* 1251 * Helper routines for sanity checks 1252 */ 1253 static 1254 void 1255 chunk_mark_allocated(SLZone *z, void *chunk) 1256 { 1257 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1258 __uint32_t *bitptr; 1259 1260 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1261 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1262 ("memory chunk %p bit index %d is illegal", chunk, bitdex)); 1263 bitptr = &z->z_Bitmap[bitdex >> 5]; 1264 bitdex &= 31; 1265 KASSERT((*bitptr & (1 << bitdex)) == 0, 1266 ("memory chunk %p is already allocated!", chunk)); 1267 *bitptr |= 1 << bitdex; 1268 } 1269 1270 static 1271 void 1272 chunk_mark_free(SLZone *z, void *chunk) 1273 { 1274 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1275 __uint32_t *bitptr; 1276 1277 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1278 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1279 ("memory chunk %p bit index %d is illegal!", chunk, bitdex)); 1280 bitptr = &z->z_Bitmap[bitdex >> 5]; 1281 bitdex &= 31; 1282 KASSERT((*bitptr & (1 << bitdex)) != 0, 1283 ("memory chunk %p is already free!", chunk)); 1284 *bitptr &= ~(1 << bitdex); 1285 } 1286 1287 #endif 1288 1289 /* 1290 * kmem_slab_alloc() 1291 * 1292 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the 1293 * specified alignment. M_* flags are expected in the flags field. 1294 * 1295 * Alignment must be a multiple of PAGE_SIZE. 1296 * 1297 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), 1298 * but when we move zalloc() over to use this function as its backend 1299 * we will have to switch to kreserve/krelease and call reserve(0) 1300 * after the new space is made available. 1301 * 1302 * Interrupt code which has preempted other code is not allowed to 1303 * use PQ_CACHE pages. However, if an interrupt thread is run 1304 * non-preemptively or blocks and then runs non-preemptively, then 1305 * it is free to use PQ_CACHE pages. 1306 */ 1307 static void * 1308 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) 1309 { 1310 vm_size_t i; 1311 vm_offset_t addr; 1312 int count, vmflags, base_vmflags; 1313 vm_page_t mp[ZALLOC_MAX_ZONE_SIZE / PAGE_SIZE]; 1314 thread_t td; 1315 1316 size = round_page(size); 1317 addr = vm_map_min(&kernel_map); 1318 1319 /* 1320 * Reserve properly aligned space from kernel_map. RNOWAIT allocations 1321 * cannot block. 1322 */ 1323 if (flags & M_RNOWAIT) { 1324 if (lwkt_trytoken(&vm_token) == 0) 1325 return(NULL); 1326 } else { 1327 lwkt_gettoken(&vm_token); 1328 } 1329 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1330 crit_enter(); 1331 vm_map_lock(&kernel_map); 1332 if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) { 1333 vm_map_unlock(&kernel_map); 1334 if ((flags & M_NULLOK) == 0) 1335 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 1336 vm_map_entry_release(count); 1337 crit_exit(); 1338 lwkt_reltoken(&vm_token); 1339 return(NULL); 1340 } 1341 1342 /* 1343 * kernel_object maps 1:1 to kernel_map. 1344 */ 1345 vm_object_reference(&kernel_object); 1346 vm_map_insert(&kernel_map, &count, 1347 &kernel_object, addr, addr, addr + size, 1348 VM_MAPTYPE_NORMAL, 1349 VM_PROT_ALL, VM_PROT_ALL, 1350 0); 1351 1352 td = curthread; 1353 1354 base_vmflags = 0; 1355 if (flags & M_ZERO) 1356 base_vmflags |= VM_ALLOC_ZERO; 1357 if (flags & M_USE_RESERVE) 1358 base_vmflags |= VM_ALLOC_SYSTEM; 1359 if (flags & M_USE_INTERRUPT_RESERVE) 1360 base_vmflags |= VM_ALLOC_INTERRUPT; 1361 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) { 1362 panic("kmem_slab_alloc: bad flags %08x (%p)", 1363 flags, ((int **)&size)[-1]); 1364 } 1365 1366 1367 /* 1368 * Allocate the pages. Do not mess with the PG_ZERO flag yet. 1369 */ 1370 for (i = 0; i < size; i += PAGE_SIZE) { 1371 vm_page_t m; 1372 1373 /* 1374 * VM_ALLOC_NORMAL can only be set if we are not preempting. 1375 * 1376 * VM_ALLOC_SYSTEM is automatically set if we are preempting and 1377 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is 1378 * implied in this case), though I'm not sure if we really need to 1379 * do that. 1380 */ 1381 vmflags = base_vmflags; 1382 if (flags & M_WAITOK) { 1383 if (td->td_preempted) 1384 vmflags |= VM_ALLOC_SYSTEM; 1385 else 1386 vmflags |= VM_ALLOC_NORMAL; 1387 } 1388 1389 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags); 1390 if ((i / PAGE_SIZE) < (sizeof(mp) / sizeof(mp[0]))) 1391 mp[i / PAGE_SIZE] = m; 1392 1393 /* 1394 * If the allocation failed we either return NULL or we retry. 1395 * 1396 * If M_WAITOK is specified we wait for more memory and retry. 1397 * If M_WAITOK is specified from a preemption we yield instead of 1398 * wait. Livelock will not occur because the interrupt thread 1399 * will not be preempting anyone the second time around after the 1400 * yield. 1401 */ 1402 if (m == NULL) { 1403 if (flags & M_WAITOK) { 1404 if (td->td_preempted) { 1405 vm_map_unlock(&kernel_map); 1406 lwkt_switch(); 1407 vm_map_lock(&kernel_map); 1408 } else { 1409 vm_map_unlock(&kernel_map); 1410 vm_wait(0); 1411 vm_map_lock(&kernel_map); 1412 } 1413 i -= PAGE_SIZE; /* retry */ 1414 continue; 1415 } 1416 1417 /* 1418 * We were unable to recover, cleanup and return NULL 1419 * 1420 * (vm_token already held) 1421 */ 1422 while (i != 0) { 1423 i -= PAGE_SIZE; 1424 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); 1425 /* page should already be busy */ 1426 vm_page_free(m); 1427 } 1428 vm_map_delete(&kernel_map, addr, addr + size, &count); 1429 vm_map_unlock(&kernel_map); 1430 vm_map_entry_release(count); 1431 crit_exit(); 1432 lwkt_reltoken(&vm_token); 1433 return(NULL); 1434 } 1435 } 1436 1437 /* 1438 * Success! 1439 * 1440 * Mark the map entry as non-pageable using a routine that allows us to 1441 * populate the underlying pages. 1442 * 1443 * The pages were busied by the allocations above. 1444 */ 1445 vm_map_set_wired_quick(&kernel_map, addr, size, &count); 1446 crit_exit(); 1447 1448 /* 1449 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO. 1450 */ 1451 for (i = 0; i < size; i += PAGE_SIZE) { 1452 vm_page_t m; 1453 1454 if ((i / PAGE_SIZE) < (sizeof(mp) / sizeof(mp[0]))) 1455 m = mp[i / PAGE_SIZE]; 1456 else 1457 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); 1458 m->valid = VM_PAGE_BITS_ALL; 1459 /* page should already be busy */ 1460 vm_page_wire(m); 1461 pmap_enter(&kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 1462 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO)) 1463 bzero((char *)addr + i, PAGE_SIZE); 1464 vm_page_flag_clear(m, PG_ZERO); 1465 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED)); 1466 vm_page_flag_set(m, PG_REFERENCED); 1467 vm_page_wakeup(m); 1468 } 1469 vm_map_unlock(&kernel_map); 1470 vm_map_entry_release(count); 1471 lwkt_reltoken(&vm_token); 1472 return((void *)addr); 1473 } 1474 1475 /* 1476 * kmem_slab_free() 1477 */ 1478 static void 1479 kmem_slab_free(void *ptr, vm_size_t size) 1480 { 1481 crit_enter(); 1482 lwkt_gettoken(&vm_token); 1483 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); 1484 lwkt_reltoken(&vm_token); 1485 crit_exit(); 1486 } 1487 1488