1 /* 2 * KERN_SLABALLOC.C - Kernel SLAB memory allocator 3 * 4 * Copyright (c) 2003,2004,2010-2019 The DragonFly Project. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Matthew Dillon <dillon@backplane.com> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * This module implements a slab allocator drop-in replacement for the 38 * kernel malloc(). 39 * 40 * A slab allocator reserves a ZONE for each chunk size, then lays the 41 * chunks out in an array within the zone. Allocation and deallocation 42 * is nearly instantanious, and fragmentation/overhead losses are limited 43 * to a fixed worst-case amount. 44 * 45 * The downside of this slab implementation is in the chunk size 46 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. 47 * In a kernel implementation all this memory will be physical so 48 * the zone size is adjusted downward on machines with less physical 49 * memory. The upside is that overhead is bounded... this is the *worst* 50 * case overhead. 51 * 52 * Slab management is done on a per-cpu basis and no locking or mutexes 53 * are required, only a critical section. When one cpu frees memory 54 * belonging to another cpu's slab manager an asynchronous IPI message 55 * will be queued to execute the operation. In addition, both the 56 * high level slab allocator and the low level zone allocator optimize 57 * M_ZERO requests, and the slab allocator does not have to pre initialize 58 * the linked list of chunks. 59 * 60 * XXX Balancing is needed between cpus. Balance will be handled through 61 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. 62 * 63 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of 64 * the new zone should be restricted to M_USE_RESERVE requests only. 65 * 66 * Alloc Size Chunking Number of zones 67 * 0-127 8 16 68 * 128-255 16 8 69 * 256-511 32 8 70 * 512-1023 64 8 71 * 1024-2047 128 8 72 * 2048-4095 256 8 73 * 4096-8191 512 8 74 * 8192-16383 1024 8 75 * 16384-32767 2048 8 76 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) 77 * 78 * Allocations >= ZoneLimit go directly to kmem. 79 * (n * PAGE_SIZE, n > 2) allocations go directly to kmem. 80 * 81 * Alignment properties: 82 * - All power-of-2 sized allocations are power-of-2 aligned. 83 * - Allocations with M_POWEROF2 are power-of-2 aligned on the nearest 84 * power-of-2 round up of 'size'. 85 * - Non-power-of-2 sized allocations are zone chunk size aligned (see the 86 * above table 'Chunking' column). 87 * 88 * API REQUIREMENTS AND SIDE EFFECTS 89 * 90 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we 91 * have remained compatible with the following API requirements: 92 * 93 * + malloc(0) is allowed and returns non-NULL (ahc driver) 94 * + ability to allocate arbitrarily large chunks of memory 95 */ 96 97 #include "opt_vm.h" 98 99 #include <sys/param.h> 100 #include <sys/systm.h> 101 #include <sys/kernel.h> 102 #include <sys/slaballoc.h> 103 #include <sys/mbuf.h> 104 #include <sys/vmmeter.h> 105 #include <sys/lock.h> 106 #include <sys/thread.h> 107 #include <sys/globaldata.h> 108 #include <sys/sysctl.h> 109 #include <sys/ktr.h> 110 #include <sys/kthread.h> 111 #include <sys/malloc.h> 112 113 #include <vm/vm.h> 114 #include <vm/vm_param.h> 115 #include <vm/vm_kern.h> 116 #include <vm/vm_extern.h> 117 #include <vm/vm_object.h> 118 #include <vm/pmap.h> 119 #include <vm/vm_map.h> 120 #include <vm/vm_page.h> 121 #include <vm/vm_pageout.h> 122 123 #include <machine/cpu.h> 124 125 #include <sys/thread2.h> 126 #include <vm/vm_page2.h> 127 128 #if (__VM_CACHELINE_SIZE == 32) 129 #define CAN_CACHEALIGN(sz) ((sz) >= 256) 130 #elif (__VM_CACHELINE_SIZE == 64) 131 #define CAN_CACHEALIGN(sz) ((sz) >= 512) 132 #elif (__VM_CACHELINE_SIZE == 128) 133 #define CAN_CACHEALIGN(sz) ((sz) >= 1024) 134 #else 135 #error "unsupported cacheline size" 136 #endif 137 138 #define btokup(z) (&pmap_kvtom((vm_offset_t)(z))->ku_pagecnt) 139 140 #define MEMORY_STRING "ptr=%p type=%p size=%lu flags=%04x" 141 #define MEMORY_ARGS void *ptr, void *type, unsigned long size, int flags 142 143 #if !defined(KTR_MEMORY) 144 #define KTR_MEMORY KTR_ALL 145 #endif 146 KTR_INFO_MASTER(memory); 147 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin"); 148 KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARGS); 149 KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARGS); 150 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARGS); 151 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARGS); 152 KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARGS); 153 KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARGS); 154 KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARGS); 155 KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARGS); 156 KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin"); 157 KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end"); 158 159 #define logmemory(name, ptr, type, size, flags) \ 160 KTR_LOG(memory_ ## name, ptr, type, size, flags) 161 #define logmemory_quick(name) \ 162 KTR_LOG(memory_ ## name) 163 164 /* 165 * Fixed globals (not per-cpu) 166 */ 167 __read_frequently static int ZoneSize; 168 __read_frequently static int ZoneLimit; 169 __read_frequently static int ZonePageCount; 170 __read_frequently static uintptr_t ZoneMask; 171 __read_frequently struct malloc_type *kmemstatistics; /* exported to vmstat */ 172 173 #if defined(INVARIANTS) 174 static void chunk_mark_allocated(SLZone *z, void *chunk); 175 static void chunk_mark_free(SLZone *z, void *chunk); 176 #else 177 #define chunk_mark_allocated(z, chunk) 178 #define chunk_mark_free(z, chunk) 179 #endif 180 181 /* 182 * Misc constants. Note that allocations that are exact multiples of 183 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 184 */ 185 #define ZONE_RELS_THRESH 32 /* threshold number of zones */ 186 187 #ifdef INVARIANTS 188 /* 189 * The WEIRD_ADDR is used as known text to copy into free objects to 190 * try to create deterministic failure cases if the data is accessed after 191 * free. 192 */ 193 #define WEIRD_ADDR 0xdeadc0de 194 #endif 195 #define ZERO_LENGTH_PTR ((void *)-8) 196 197 /* 198 * Misc global malloc buckets 199 */ 200 201 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 202 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 203 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 204 MALLOC_DEFINE(M_DRM, "m_drm", "DRM memory allocations"); 205 206 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 207 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 208 209 /* 210 * Initialize the slab memory allocator. We have to choose a zone size based 211 * on available physical memory. We choose a zone side which is approximately 212 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of 213 * 128K. The zone size is limited to the bounds set in slaballoc.h 214 * (typically 32K min, 128K max). 215 */ 216 static void kmeminit(void *dummy); 217 static void kmemfinishinit(void *dummy); 218 219 char *ZeroPage; 220 221 SYSINIT(kmem1, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL); 222 SYSINIT(kmem2, SI_BOOT2_POST_SMP, SI_ORDER_FIRST, kmemfinishinit, NULL); 223 224 #ifdef INVARIANTS 225 /* 226 * If enabled any memory allocated without M_ZERO is initialized to -1. 227 */ 228 __read_frequently static int use_malloc_pattern; 229 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW, 230 &use_malloc_pattern, 0, 231 "Initialize memory to -1 if M_ZERO not specified"); 232 233 __read_frequently static int32_t weirdary[16]; 234 __read_frequently static int use_weird_array; 235 SYSCTL_INT(_debug, OID_AUTO, use_weird_array, CTLFLAG_RW, 236 &use_weird_array, 0, 237 "Initialize memory to weird values on kfree()"); 238 #endif 239 240 __read_frequently static int ZoneRelsThresh = ZONE_RELS_THRESH; 241 SYSCTL_INT(_kern, OID_AUTO, zone_cache, CTLFLAG_RW, &ZoneRelsThresh, 0, ""); 242 __read_frequently static int kzone_pollfreq = 1; 243 SYSCTL_INT(_kern, OID_AUTO, kzone_pollfreq, CTLFLAG_RW, &kzone_pollfreq, 0, ""); 244 245 static struct spinlock kmemstat_spin = 246 SPINLOCK_INITIALIZER(&kmemstat_spin, "malinit"); 247 static struct malloc_type *kmemstat_poll; 248 249 /* 250 * Returns the kernel memory size limit for the purposes of initializing 251 * various subsystem caches. The smaller of available memory and the KVM 252 * memory space is returned. 253 * 254 * The size in megabytes is returned. 255 */ 256 size_t 257 kmem_lim_size(void) 258 { 259 size_t limsize; 260 261 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE; 262 if (limsize > KvaSize) 263 limsize = KvaSize; 264 return (limsize / (1024 * 1024)); 265 } 266 267 static void 268 kmeminit(void *dummy) 269 { 270 size_t limsize; 271 int usesize; 272 #ifdef INVARIANTS 273 int i; 274 #endif 275 276 limsize = kmem_lim_size(); 277 usesize = (int)(limsize * 1024); /* convert to KB */ 278 279 /* 280 * If the machine has a large KVM space and more than 8G of ram, 281 * double the zone release threshold to reduce SMP invalidations. 282 * If more than 16G of ram, do it again. 283 * 284 * The BIOS eats a little ram so add some slop. We want 8G worth of 285 * memory sticks to trigger the first adjustment. 286 */ 287 if (ZoneRelsThresh == ZONE_RELS_THRESH) { 288 if (limsize >= 7 * 1024) 289 ZoneRelsThresh *= 2; 290 if (limsize >= 15 * 1024) 291 ZoneRelsThresh *= 2; 292 if (limsize >= 31 * 1024) 293 ZoneRelsThresh *= 2; 294 if (limsize >= 63 * 1024) 295 ZoneRelsThresh *= 2; 296 if (limsize >= 127 * 1024) 297 ZoneRelsThresh *= 2; 298 } 299 300 /* 301 * Calculate the zone size. This typically calculates to 302 * ZALLOC_MAX_ZONE_SIZE 303 */ 304 ZoneSize = ZALLOC_MIN_ZONE_SIZE; 305 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) 306 ZoneSize <<= 1; 307 ZoneLimit = ZoneSize / 4; 308 if (ZoneLimit > ZALLOC_ZONE_LIMIT) 309 ZoneLimit = ZALLOC_ZONE_LIMIT; 310 ZoneMask = ~(uintptr_t)(ZoneSize - 1); 311 ZonePageCount = ZoneSize / PAGE_SIZE; 312 313 #ifdef INVARIANTS 314 for (i = 0; i < NELEM(weirdary); ++i) 315 weirdary[i] = WEIRD_ADDR; 316 #endif 317 318 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO); 319 320 if (bootverbose) 321 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); 322 } 323 324 /* 325 * Once we know how many cpus are configured reduce ZoneRelsThresh 326 * based on multiples of 32 cpu threads. 327 */ 328 static void 329 kmemfinishinit(void *dummy) 330 { 331 if (ncpus > 32) 332 ZoneRelsThresh = ZoneRelsThresh * 32 / ncpus; 333 } 334 335 /* 336 * (low level) Initialize slab-related elements in the globaldata structure. 337 * 338 * Occurs after kmeminit(). 339 */ 340 void 341 slab_gdinit(globaldata_t gd) 342 { 343 SLGlobalData *slgd; 344 int i; 345 346 slgd = &gd->gd_slab; 347 for (i = 0; i < NZONES; ++i) 348 TAILQ_INIT(&slgd->ZoneAry[i]); 349 TAILQ_INIT(&slgd->FreeZones); 350 TAILQ_INIT(&slgd->FreeOvZones); 351 } 352 353 /* 354 * Initialize a malloc type tracking structure. 355 */ 356 void 357 malloc_init(void *data) 358 { 359 struct malloc_type *type = data; 360 struct kmalloc_use *use; 361 size_t limsize; 362 int n; 363 364 if (type->ks_magic != M_MAGIC) 365 panic("malloc type lacks magic"); 366 367 if (type->ks_limit != 0) 368 return; 369 370 if (vmstats.v_page_count == 0) 371 panic("malloc_init not allowed before vm init"); 372 373 limsize = kmem_lim_size() * (1024 * 1024); 374 type->ks_limit = limsize / 10; 375 if (type->ks_flags & KSF_OBJSIZE) 376 malloc_mgt_init(type, &type->ks_mgt, type->ks_objsize); 377 378 if (ncpus == 1) 379 use = &type->ks_use0; 380 else 381 use = kmalloc(ncpus * sizeof(*use), M_TEMP, M_WAITOK | M_ZERO); 382 if (type->ks_flags & KSF_OBJSIZE) { 383 for (n = 0; n < ncpus; ++n) 384 malloc_mgt_init(type, &use[n].mgt, type->ks_objsize); 385 } 386 387 spin_lock(&kmemstat_spin); 388 type->ks_next = kmemstatistics; 389 type->ks_use = use; 390 kmemstatistics = type; 391 spin_unlock(&kmemstat_spin); 392 } 393 394 void 395 malloc_uninit(void *data) 396 { 397 struct malloc_type *type = data; 398 struct malloc_type *t; 399 int i; 400 #ifdef INVARIANTS 401 long ttl; 402 #endif 403 404 if (type->ks_magic != M_MAGIC) 405 panic("malloc type lacks magic"); 406 407 if (vmstats.v_page_count == 0) 408 panic("malloc_uninit not allowed before vm init"); 409 410 if (type->ks_limit == 0) 411 panic("malloc_uninit on uninitialized type"); 412 413 /* Make sure that all pending kfree()s are finished. */ 414 lwkt_synchronize_ipiqs("muninit"); 415 416 /* 417 * Remove from the kmemstatistics list, blocking if the removal races 418 * the kmalloc poller. 419 * 420 * Advance kmemstat_poll if necessary. 421 */ 422 spin_lock(&kmemstat_spin); 423 while (type->ks_flags & KSF_POLLING) 424 ssleep(type, &kmemstat_spin, 0, "kmuninit", 0); 425 426 if (kmemstat_poll == type) 427 kmemstat_poll = type->ks_next; 428 429 if (kmemstatistics == type) { 430 kmemstatistics = type->ks_next; 431 } else { 432 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 433 if (t->ks_next == type) { 434 t->ks_next = type->ks_next; 435 break; 436 } 437 } 438 } 439 type->ks_next = NULL; 440 type->ks_limit = 0; 441 spin_unlock(&kmemstat_spin); 442 443 /* 444 * memuse is only correct in aggregation. Due to memory being allocated 445 * on one cpu and freed on another individual array entries may be 446 * negative or positive (canceling each other out). 447 */ 448 #ifdef INVARIANTS 449 ttl = 0; 450 #endif 451 for (i = 0; i < ncpus; ++i) { 452 #ifdef INVARIANTS 453 ttl += type->ks_use[i].memuse; 454 #endif 455 if (type->ks_flags & KSF_OBJSIZE) 456 malloc_mgt_uninit(type, &type->ks_use[i].mgt); 457 } 458 if (type->ks_flags & KSF_OBJSIZE) 459 malloc_mgt_uninit(type, &type->ks_mgt); 460 #ifdef INVARIANTS 461 if (ttl) { 462 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", 463 ttl, type->ks_shortdesc, i); 464 } 465 #endif 466 467 if (type->ks_use != &type->ks_use0) { 468 kfree(type->ks_use, M_TEMP); 469 type->ks_use = NULL; 470 } 471 } 472 473 /* 474 * Slowly polls all kmalloc zones for cleanup 475 */ 476 static void 477 kmalloc_poller_thread(void) 478 { 479 struct malloc_type *type; 480 481 for (;;) { 482 /* 483 * Very slow poll by default, adjustable with sysctl 484 */ 485 int sticks; 486 487 sticks = kzone_pollfreq; 488 cpu_ccfence(); 489 if (sticks > 0) 490 sticks = hz / sticks + 1; /* approximate */ 491 else 492 sticks = hz; /* safety */ 493 tsleep((caddr_t)&sticks, 0, "kmslp", sticks); 494 495 /* 496 * [re]poll one zone each period. 497 */ 498 spin_lock(&kmemstat_spin); 499 type = kmemstat_poll; 500 501 if (type == NULL) 502 type = kmemstatistics; 503 if (type) { 504 atomic_set_int(&type->ks_flags, KSF_POLLING); 505 spin_unlock(&kmemstat_spin); 506 if (malloc_mgt_poll(type)) { 507 spin_lock(&kmemstat_spin); 508 kmemstat_poll = type->ks_next; 509 } else { 510 spin_lock(&kmemstat_spin); 511 } 512 atomic_clear_int(&type->ks_flags, KSF_POLLING); 513 wakeup(type); 514 } else { 515 kmemstat_poll = NULL; 516 } 517 spin_unlock(&kmemstat_spin); 518 } 519 } 520 521 static struct thread *kmalloc_poller_td; 522 static struct kproc_desc kmalloc_poller_kp = { 523 "kmalloc_poller", 524 kmalloc_poller_thread, 525 &kmalloc_poller_td 526 }; 527 SYSINIT(kmalloc_polller, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, 528 kproc_start, &kmalloc_poller_kp); 529 530 /* 531 * Reinitialize all installed malloc regions after ncpus has been 532 * determined. type->ks_use0 is initially set to &type->ks_use0, 533 * this function will dynamically allocate it as appropriate for ncpus. 534 */ 535 void 536 malloc_reinit_ncpus(void) 537 { 538 struct malloc_type *t; 539 struct kmalloc_use *use; 540 int n; 541 542 /* 543 * If only one cpu we can leave ks_use set to ks_use0 544 */ 545 if (ncpus <= 1) 546 return; 547 548 /* 549 * Expand ks_use for all kmalloc blocks 550 */ 551 for (t = kmemstatistics; t; t = t->ks_next) { 552 KKASSERT(t->ks_use == &t->ks_use0); 553 t->ks_use = kmalloc(sizeof(*use) * ncpus, M_TEMP, M_WAITOK|M_ZERO); 554 t->ks_use[0] = t->ks_use0; 555 if (t->ks_flags & KSF_OBJSIZE) { 556 malloc_mgt_relocate(&t->ks_use0.mgt, &t->ks_use[0].mgt); 557 for (n = 1; n < ncpus; ++n) 558 malloc_mgt_init(t, &t->ks_use[n].mgt, t->ks_objsize); 559 } 560 } 561 } 562 563 /* 564 * Increase the kmalloc pool limit for the specified pool. No changes 565 * are the made if the pool would shrink. 566 */ 567 void 568 kmalloc_raise_limit(struct malloc_type *type, size_t bytes) 569 { 570 KKASSERT(type->ks_limit != 0); 571 if (bytes == 0) 572 bytes = KvaSize; 573 if (type->ks_limit < bytes) 574 type->ks_limit = bytes; 575 } 576 577 void 578 kmalloc_set_unlimited(struct malloc_type *type) 579 { 580 type->ks_limit = kmem_lim_size() * (1024 * 1024); 581 } 582 583 /* 584 * Dynamically create a malloc pool. This function is a NOP if *typep is 585 * already non-NULL. 586 */ 587 void 588 kmalloc_create(struct malloc_type **typep, const char *descr) 589 { 590 struct malloc_type *type; 591 592 if (*typep == NULL) { 593 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO); 594 type->ks_magic = M_MAGIC; 595 type->ks_shortdesc = descr; 596 malloc_init(type); 597 *typep = type; 598 } 599 } 600 601 void 602 _kmalloc_create_obj(struct malloc_type **typep, const char *descr, 603 size_t objsize) 604 { 605 struct malloc_type *type; 606 607 if (*typep == NULL) { 608 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO); 609 type->ks_magic = M_MAGIC; 610 type->ks_shortdesc = descr; 611 type->ks_flags = KSF_OBJSIZE; 612 type->ks_objsize = __VM_CACHELINE_ALIGN(objsize); 613 malloc_init(type); 614 *typep = type; 615 } 616 } 617 618 /* 619 * Destroy a dynamically created malloc pool. This function is a NOP if 620 * the pool has already been destroyed. 621 * 622 * WARNING! For kmalloc_obj's, the exis state for related slabs is ignored, 623 * only call once all references are 100% known to be gone. 624 */ 625 void 626 kmalloc_destroy(struct malloc_type **typep) 627 { 628 if (*typep != NULL) { 629 malloc_uninit(*typep); 630 kfree(*typep, M_TEMP); 631 *typep = NULL; 632 } 633 } 634 635 /* 636 * Calculate the zone index for the allocation request size and set the 637 * allocation request size to that particular zone's chunk size. 638 */ 639 static __inline int 640 zoneindex(unsigned long *bytes, unsigned long *align) 641 { 642 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ 643 644 if (n < 128) { 645 *bytes = n = (n + 7) & ~7; 646 *align = 8; 647 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 648 } 649 if (n < 256) { 650 *bytes = n = (n + 15) & ~15; 651 *align = 16; 652 return(n / 16 + 7); 653 } 654 if (n < 8192) { 655 if (n < 512) { 656 *bytes = n = (n + 31) & ~31; 657 *align = 32; 658 return(n / 32 + 15); 659 } 660 if (n < 1024) { 661 *bytes = n = (n + 63) & ~63; 662 *align = 64; 663 return(n / 64 + 23); 664 } 665 if (n < 2048) { 666 *bytes = n = (n + 127) & ~127; 667 *align = 128; 668 return(n / 128 + 31); 669 } 670 if (n < 4096) { 671 *bytes = n = (n + 255) & ~255; 672 *align = 256; 673 return(n / 256 + 39); 674 } 675 *bytes = n = (n + 511) & ~511; 676 *align = 512; 677 return(n / 512 + 47); 678 } 679 #if ZALLOC_ZONE_LIMIT > 8192 680 if (n < 16384) { 681 *bytes = n = (n + 1023) & ~1023; 682 *align = 1024; 683 return(n / 1024 + 55); 684 } 685 #endif 686 #if ZALLOC_ZONE_LIMIT > 16384 687 if (n < 32768) { 688 *bytes = n = (n + 2047) & ~2047; 689 *align = 2048; 690 return(n / 2048 + 63); 691 } 692 #endif 693 panic("Unexpected byte count %d", n); 694 return(0); 695 } 696 697 static __inline void 698 clean_zone_rchunks(SLZone *z) 699 { 700 SLChunk *bchunk; 701 702 while ((bchunk = z->z_RChunks) != NULL) { 703 cpu_ccfence(); 704 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) { 705 *z->z_LChunksp = bchunk; 706 while (bchunk) { 707 chunk_mark_free(z, bchunk); 708 z->z_LChunksp = &bchunk->c_Next; 709 bchunk = bchunk->c_Next; 710 ++z->z_NFree; 711 } 712 break; 713 } 714 /* retry */ 715 } 716 } 717 718 /* 719 * If the zone becomes totally free and is not the only zone listed for a 720 * chunk size we move it to the FreeZones list. We always leave at least 721 * one zone per chunk size listed, even if it is freeable. 722 * 723 * Do not move the zone if there is an IPI in_flight (z_RCount != 0), 724 * otherwise MP races can result in our free_remote code accessing a 725 * destroyed zone. The remote end interlocks z_RCount with z_RChunks 726 * so one has to test both z_NFree and z_RCount. 727 * 728 * Since this code can be called from an IPI callback, do *NOT* try to mess 729 * with kernel_map here. Hysteresis will be performed at kmalloc() time. 730 */ 731 static __inline SLZone * 732 check_zone_free(SLGlobalData *slgd, SLZone *z) 733 { 734 SLZone *znext; 735 736 znext = TAILQ_NEXT(z, z_Entry); 737 if (z->z_NFree == z->z_NMax && z->z_RCount == 0 && 738 (TAILQ_FIRST(&slgd->ZoneAry[z->z_ZoneIndex]) != z || znext)) { 739 int *kup; 740 741 TAILQ_REMOVE(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 742 743 z->z_Magic = -1; 744 TAILQ_INSERT_HEAD(&slgd->FreeZones, z, z_Entry); 745 ++slgd->NFreeZones; 746 kup = btokup(z); 747 *kup = 0; 748 } 749 return znext; 750 } 751 752 #ifdef SLAB_DEBUG 753 /* 754 * Used to debug memory corruption issues. Record up to (typically 32) 755 * allocation sources for this zone (for a particular chunk size). 756 */ 757 758 static void 759 slab_record_source(SLZone *z, const char *file, int line) 760 { 761 int i; 762 int b = line & (SLAB_DEBUG_ENTRIES - 1); 763 764 i = b; 765 do { 766 if (z->z_Sources[i].file == file && z->z_Sources[i].line == line) 767 return; 768 if (z->z_Sources[i].file == NULL) 769 break; 770 i = (i + 1) & (SLAB_DEBUG_ENTRIES - 1); 771 } while (i != b); 772 z->z_Sources[i].file = file; 773 z->z_Sources[i].line = line; 774 } 775 776 #endif 777 778 static __inline unsigned long 779 powerof2_size(unsigned long size) 780 { 781 int i; 782 783 if (size == 0 || powerof2(size)) 784 return size; 785 786 i = flsl(size); 787 return (1UL << i); 788 } 789 790 /* 791 * kmalloc() (SLAB ALLOCATOR) 792 * 793 * Allocate memory via the slab allocator. If the request is too large, 794 * or if it page-aligned beyond a certain size, we fall back to the 795 * KMEM subsystem. A SLAB tracking descriptor must be specified, use 796 * &SlabMisc if you don't care. 797 * 798 * M_RNOWAIT - don't block. 799 * M_NULLOK - return NULL instead of blocking. 800 * M_ZERO - zero the returned memory. 801 * M_USE_RESERVE - allow greater drawdown of the free list 802 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted 803 * M_POWEROF2 - roundup size to the nearest power of 2 804 * 805 * MPSAFE 806 */ 807 808 /* don't let kmalloc macro mess up function declaration */ 809 #undef kmalloc 810 811 #ifdef SLAB_DEBUG 812 void * 813 _kmalloc_debug(unsigned long size, struct malloc_type *type, int flags, 814 const char *file, int line) 815 #else 816 void * 817 _kmalloc(unsigned long size, struct malloc_type *type, int flags) 818 #endif 819 { 820 SLZone *z; 821 SLChunk *chunk; 822 SLGlobalData *slgd; 823 struct globaldata *gd; 824 unsigned long align; 825 int zi; 826 #ifdef INVARIANTS 827 int i; 828 #endif 829 830 logmemory_quick(malloc_beg); 831 gd = mycpu; 832 slgd = &gd->gd_slab; 833 834 /* 835 * XXX silly to have this in the critical path. 836 */ 837 KKASSERT(type->ks_limit != 0); 838 ++type->ks_use[gd->gd_cpuid].calls; 839 840 /* 841 * Flagged for cache-alignment 842 */ 843 if (flags & M_CACHEALIGN) { 844 if (size < __VM_CACHELINE_SIZE) 845 size = __VM_CACHELINE_SIZE; 846 else if (!CAN_CACHEALIGN(size)) 847 flags |= M_POWEROF2; 848 } 849 850 /* 851 * Flagged to force nearest power-of-2 (higher or same) 852 */ 853 if (flags & M_POWEROF2) 854 size = powerof2_size(size); 855 856 /* 857 * Handle the case where the limit is reached. Panic if we can't return 858 * NULL. The original malloc code looped, but this tended to 859 * simply deadlock the computer. 860 * 861 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used 862 * to determine if a more complete limit check should be done. The 863 * actual memory use is tracked via ks_use[cpu].memuse. 864 */ 865 while (type->ks_loosememuse >= type->ks_limit) { 866 int i; 867 long ttl; 868 869 for (i = ttl = 0; i < ncpus; ++i) 870 ttl += type->ks_use[i].memuse; 871 type->ks_loosememuse = ttl; /* not MP synchronized */ 872 if ((ssize_t)ttl < 0) /* deal with occassional race */ 873 ttl = 0; 874 if (ttl >= type->ks_limit) { 875 if (flags & M_NULLOK) { 876 logmemory(malloc_end, NULL, type, size, flags); 877 return(NULL); 878 } 879 panic("%s: malloc limit exceeded", type->ks_shortdesc); 880 } 881 } 882 883 /* 884 * Handle the degenerate size == 0 case. Yes, this does happen. 885 * Return a special pointer. This is to maintain compatibility with 886 * the original malloc implementation. Certain devices, such as the 887 * adaptec driver, not only allocate 0 bytes, they check for NULL and 888 * also realloc() later on. Joy. 889 */ 890 if (size == 0) { 891 logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags); 892 return(ZERO_LENGTH_PTR); 893 } 894 895 /* 896 * Handle hysteresis from prior frees here in malloc(). We cannot 897 * safely manipulate the kernel_map in free() due to free() possibly 898 * being called via an IPI message or from sensitive interrupt code. 899 * 900 * NOTE: ku_pagecnt must be cleared before we free the slab or we 901 * might race another cpu allocating the kva and setting 902 * ku_pagecnt. 903 */ 904 while (slgd->NFreeZones > ZoneRelsThresh && (flags & M_RNOWAIT) == 0) { 905 crit_enter(); 906 if (slgd->NFreeZones > ZoneRelsThresh) { /* crit sect race */ 907 int *kup; 908 909 z = TAILQ_LAST(&slgd->FreeZones, SLZoneList); 910 KKASSERT(z != NULL); 911 TAILQ_REMOVE(&slgd->FreeZones, z, z_Entry); 912 --slgd->NFreeZones; 913 kup = btokup(z); 914 *kup = 0; 915 kmem_slab_free(z, ZoneSize); /* may block */ 916 } 917 crit_exit(); 918 } 919 920 /* 921 * XXX handle oversized frees that were queued from kfree(). 922 */ 923 while (TAILQ_FIRST(&slgd->FreeOvZones) && (flags & M_RNOWAIT) == 0) { 924 crit_enter(); 925 if ((z = TAILQ_LAST(&slgd->FreeOvZones, SLZoneList)) != NULL) { 926 vm_size_t tsize; 927 928 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); 929 TAILQ_REMOVE(&slgd->FreeOvZones, z, z_Entry); 930 tsize = z->z_ChunkSize; 931 kmem_slab_free(z, tsize); /* may block */ 932 } 933 crit_exit(); 934 } 935 936 /* 937 * Handle large allocations directly. There should not be very many of 938 * these so performance is not a big issue. 939 * 940 * The backend allocator is pretty nasty on a SMP system. Use the 941 * slab allocator for one and two page-sized chunks even though we lose 942 * some efficiency. XXX maybe fix mmio and the elf loader instead. 943 */ 944 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 945 int *kup; 946 947 size = round_page(size); 948 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); 949 if (chunk == NULL) { 950 logmemory(malloc_end, NULL, type, size, flags); 951 return(NULL); 952 } 953 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ 954 flags |= M_PASSIVE_ZERO; 955 kup = btokup(chunk); 956 *kup = size / PAGE_SIZE; 957 crit_enter(); 958 goto done; 959 } 960 961 /* 962 * Attempt to allocate out of an existing zone. First try the free list, 963 * then allocate out of unallocated space. If we find a good zone move 964 * it to the head of the list so later allocations find it quickly 965 * (we might have thousands of zones in the list). 966 * 967 * Note: zoneindex() will panic of size is too large. 968 */ 969 zi = zoneindex(&size, &align); 970 KKASSERT(zi < NZONES); 971 crit_enter(); 972 973 if ((z = TAILQ_LAST(&slgd->ZoneAry[zi], SLZoneList)) != NULL) { 974 /* 975 * Locate a chunk - we have to have at least one. If this is the 976 * last chunk go ahead and do the work to retrieve chunks freed 977 * from remote cpus, and if the zone is still empty move it off 978 * the ZoneAry. 979 */ 980 if (--z->z_NFree <= 0) { 981 KKASSERT(z->z_NFree == 0); 982 983 /* 984 * WARNING! This code competes with other cpus. It is ok 985 * for us to not drain RChunks here but we might as well, and 986 * it is ok if more accumulate after we're done. 987 * 988 * Set RSignal before pulling rchunks off, indicating that we 989 * will be moving ourselves off of the ZoneAry. Remote ends will 990 * read RSignal before putting rchunks on thus interlocking 991 * their IPI signaling. 992 */ 993 if (z->z_RChunks == NULL) 994 atomic_swap_int(&z->z_RSignal, 1); 995 996 clean_zone_rchunks(z); 997 998 /* 999 * Remove from the zone list if no free chunks remain. 1000 * Clear RSignal 1001 */ 1002 if (z->z_NFree == 0) { 1003 TAILQ_REMOVE(&slgd->ZoneAry[zi], z, z_Entry); 1004 } else { 1005 z->z_RSignal = 0; 1006 } 1007 } 1008 1009 /* 1010 * Fast path, we have chunks available in z_LChunks. 1011 */ 1012 chunk = z->z_LChunks; 1013 if (chunk) { 1014 chunk_mark_allocated(z, chunk); 1015 z->z_LChunks = chunk->c_Next; 1016 if (z->z_LChunks == NULL) 1017 z->z_LChunksp = &z->z_LChunks; 1018 #ifdef SLAB_DEBUG 1019 slab_record_source(z, file, line); 1020 #endif 1021 goto done; 1022 } 1023 1024 /* 1025 * No chunks are available in LChunks, the free chunk MUST be 1026 * in the never-before-used memory area, controlled by UIndex. 1027 * 1028 * The consequences are very serious if our zone got corrupted so 1029 * we use an explicit panic rather than a KASSERT. 1030 */ 1031 if (z->z_UIndex + 1 != z->z_NMax) 1032 ++z->z_UIndex; 1033 else 1034 z->z_UIndex = 0; 1035 1036 if (z->z_UIndex == z->z_UEndIndex) 1037 panic("slaballoc: corrupted zone"); 1038 1039 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 1040 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 1041 flags &= ~M_ZERO; 1042 flags |= M_PASSIVE_ZERO; 1043 } 1044 chunk_mark_allocated(z, chunk); 1045 #ifdef SLAB_DEBUG 1046 slab_record_source(z, file, line); 1047 #endif 1048 goto done; 1049 } 1050 1051 /* 1052 * If all zones are exhausted we need to allocate a new zone for this 1053 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see 1054 * UAlloc use above in regards to M_ZERO. Note that when we are reusing 1055 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and 1056 * we do not pre-zero it because we do not want to mess up the L1 cache. 1057 * 1058 * At least one subsystem, the tty code (see CROUND) expects power-of-2 1059 * allocations to be power-of-2 aligned. We maintain compatibility by 1060 * adjusting the base offset below. 1061 */ 1062 { 1063 int off; 1064 int *kup; 1065 1066 if ((z = TAILQ_FIRST(&slgd->FreeZones)) != NULL) { 1067 TAILQ_REMOVE(&slgd->FreeZones, z, z_Entry); 1068 --slgd->NFreeZones; 1069 bzero(z, sizeof(SLZone)); 1070 z->z_Flags |= SLZF_UNOTZEROD; 1071 } else { 1072 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); 1073 if (z == NULL) 1074 goto fail; 1075 } 1076 1077 /* 1078 * How big is the base structure? 1079 */ 1080 #if defined(INVARIANTS) 1081 /* 1082 * Make room for z_Bitmap. An exact calculation is somewhat more 1083 * complicated so don't make an exact calculation. 1084 */ 1085 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]); 1086 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); 1087 #else 1088 off = sizeof(SLZone); 1089 #endif 1090 1091 /* 1092 * Guarentee power-of-2 alignment for power-of-2-sized chunks. 1093 * Otherwise properly align the data according to the chunk size. 1094 */ 1095 if (powerof2(size)) 1096 align = size; 1097 off = roundup2(off, align); 1098 1099 z->z_Magic = ZALLOC_SLAB_MAGIC; 1100 z->z_ZoneIndex = zi; 1101 z->z_NMax = (ZoneSize - off) / size; 1102 z->z_NFree = z->z_NMax - 1; 1103 z->z_BasePtr = (char *)z + off; 1104 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; 1105 z->z_ChunkSize = size; 1106 z->z_CpuGd = gd; 1107 z->z_Cpu = gd->gd_cpuid; 1108 z->z_LChunksp = &z->z_LChunks; 1109 #ifdef SLAB_DEBUG 1110 bcopy(z->z_Sources, z->z_AltSources, sizeof(z->z_Sources)); 1111 bzero(z->z_Sources, sizeof(z->z_Sources)); 1112 #endif 1113 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 1114 TAILQ_INSERT_HEAD(&slgd->ZoneAry[zi], z, z_Entry); 1115 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 1116 flags &= ~M_ZERO; /* already zero'd */ 1117 flags |= M_PASSIVE_ZERO; 1118 } 1119 kup = btokup(z); 1120 *kup = -(z->z_Cpu + 1); /* -1 to -(N+1) */ 1121 chunk_mark_allocated(z, chunk); 1122 #ifdef SLAB_DEBUG 1123 slab_record_source(z, file, line); 1124 #endif 1125 1126 /* 1127 * Slide the base index for initial allocations out of the next 1128 * zone we create so we do not over-weight the lower part of the 1129 * cpu memory caches. 1130 */ 1131 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 1132 & (ZALLOC_MAX_ZONE_SIZE - 1); 1133 } 1134 1135 done: 1136 ++type->ks_use[gd->gd_cpuid].inuse; 1137 type->ks_use[gd->gd_cpuid].memuse += size; 1138 type->ks_use[gd->gd_cpuid].loosememuse += size; 1139 if (type->ks_use[gd->gd_cpuid].loosememuse >= ZoneSize) { 1140 /* not MP synchronized */ 1141 type->ks_loosememuse += type->ks_use[gd->gd_cpuid].loosememuse; 1142 type->ks_use[gd->gd_cpuid].loosememuse = 0; 1143 } 1144 crit_exit(); 1145 1146 if (flags & M_ZERO) 1147 bzero(chunk, size); 1148 #ifdef INVARIANTS 1149 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) { 1150 if (use_malloc_pattern) { 1151 for (i = 0; i < size; i += sizeof(int)) { 1152 *(int *)((char *)chunk + i) = -1; 1153 } 1154 } 1155 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ 1156 } 1157 #endif 1158 logmemory(malloc_end, chunk, type, size, flags); 1159 return(chunk); 1160 fail: 1161 crit_exit(); 1162 logmemory(malloc_end, NULL, type, size, flags); 1163 return(NULL); 1164 } 1165 1166 /* 1167 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE) 1168 * 1169 * Generally speaking this routine is not called very often and we do 1170 * not attempt to optimize it beyond reusing the same pointer if the 1171 * new size fits within the chunking of the old pointer's zone. 1172 */ 1173 #ifdef SLAB_DEBUG 1174 void * 1175 krealloc_debug(void *ptr, unsigned long size, 1176 struct malloc_type *type, int flags, 1177 const char *file, int line) 1178 #else 1179 void * 1180 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) 1181 #endif 1182 { 1183 unsigned long osize; 1184 unsigned long align; 1185 SLZone *z; 1186 void *nptr; 1187 int *kup; 1188 1189 KKASSERT((flags & M_ZERO) == 0); /* not supported */ 1190 1191 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) 1192 return(_kmalloc_debug(size, type, flags, file, line)); 1193 if (size == 0) { 1194 kfree(ptr, type); 1195 return(NULL); 1196 } 1197 1198 /* 1199 * Handle oversized allocations. XXX we really should require that a 1200 * size be passed to free() instead of this nonsense. 1201 */ 1202 kup = btokup(ptr); 1203 if (*kup > 0) { 1204 osize = *kup << PAGE_SHIFT; 1205 if (osize == round_page(size)) 1206 return(ptr); 1207 if ((nptr = _kmalloc_debug(size, type, flags, file, line)) == NULL) 1208 return(NULL); 1209 bcopy(ptr, nptr, min(size, osize)); 1210 kfree(ptr, type); 1211 return(nptr); 1212 } 1213 1214 /* 1215 * Get the original allocation's zone. If the new request winds up 1216 * using the same chunk size we do not have to do anything. 1217 */ 1218 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 1219 kup = btokup(z); 1220 KKASSERT(*kup < 0); 1221 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1222 1223 /* 1224 * Allocate memory for the new request size. Note that zoneindex has 1225 * already adjusted the request size to the appropriate chunk size, which 1226 * should optimize our bcopy(). Then copy and return the new pointer. 1227 * 1228 * Resizing a non-power-of-2 allocation to a power-of-2 size does not 1229 * necessary align the result. 1230 * 1231 * We can only zoneindex (to align size to the chunk size) if the new 1232 * size is not too large. 1233 */ 1234 if (size < ZoneLimit) { 1235 zoneindex(&size, &align); 1236 if (z->z_ChunkSize == size) 1237 return(ptr); 1238 } 1239 if ((nptr = _kmalloc_debug(size, type, flags, file, line)) == NULL) 1240 return(NULL); 1241 bcopy(ptr, nptr, min(size, z->z_ChunkSize)); 1242 kfree(ptr, type); 1243 return(nptr); 1244 } 1245 1246 size_t 1247 kmalloc_usable_size(const void *ptr) 1248 { 1249 unsigned long size; 1250 SLZone *z; 1251 int *kup; 1252 1253 if (ptr == NULL) 1254 return 0; 1255 if (ptr == ZERO_LENGTH_PTR) 1256 return 0; 1257 1258 /* 1259 * Check to see if the pointer blongs to an oversized segment 1260 */ 1261 kup = btokup(ptr); 1262 if (*kup > 0) { 1263 size = *kup << PAGE_SHIFT; 1264 return size; 1265 } 1266 1267 /* 1268 * Zone case. Figure out the zone based on the fact that it is 1269 * ZoneSize aligned. 1270 */ 1271 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 1272 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1273 1274 return (z->z_ChunkSize); 1275 } 1276 1277 /* 1278 * Return the kmalloc limit for this type, in bytes. 1279 */ 1280 long 1281 kmalloc_limit(struct malloc_type *type) 1282 { 1283 KKASSERT(type->ks_limit != 0); 1284 return(type->ks_limit); 1285 } 1286 1287 /* 1288 * Allocate a copy of the specified string. 1289 * 1290 * (MP SAFE) (MAY BLOCK) 1291 */ 1292 #ifdef SLAB_DEBUG 1293 char * 1294 kstrdup_debug(const char *str, struct malloc_type *type, 1295 const char *file, int line) 1296 #else 1297 char * 1298 kstrdup(const char *str, struct malloc_type *type) 1299 #endif 1300 { 1301 int zlen; /* length inclusive of terminating NUL */ 1302 char *nstr; 1303 1304 if (str == NULL) 1305 return(NULL); 1306 zlen = strlen(str) + 1; 1307 nstr = _kmalloc_debug(zlen, type, M_WAITOK, file, line); 1308 bcopy(str, nstr, zlen); 1309 return(nstr); 1310 } 1311 1312 #ifdef SLAB_DEBUG 1313 char * 1314 kstrndup_debug(const char *str, size_t maxlen, struct malloc_type *type, 1315 const char *file, int line) 1316 #else 1317 char * 1318 kstrndup(const char *str, size_t maxlen, struct malloc_type *type) 1319 #endif 1320 { 1321 int zlen; /* length inclusive of terminating NUL */ 1322 char *nstr; 1323 1324 if (str == NULL) 1325 return(NULL); 1326 zlen = strnlen(str, maxlen) + 1; 1327 nstr = _kmalloc_debug(zlen, type, M_WAITOK, file, line); 1328 bcopy(str, nstr, zlen); 1329 nstr[zlen - 1] = '\0'; 1330 return(nstr); 1331 } 1332 1333 /* 1334 * Notify our cpu that a remote cpu has freed some chunks in a zone that 1335 * we own. RCount will be bumped so the memory should be good, but validate 1336 * that it really is. 1337 */ 1338 static void 1339 kfree_remote(void *ptr) 1340 { 1341 SLGlobalData *slgd; 1342 SLZone *z; 1343 int nfree; 1344 int *kup; 1345 1346 slgd = &mycpu->gd_slab; 1347 z = ptr; 1348 kup = btokup(z); 1349 KKASSERT(*kup == -((int)mycpuid + 1)); 1350 KKASSERT(z->z_RCount > 0); 1351 atomic_subtract_int(&z->z_RCount, 1); 1352 1353 logmemory(free_rem_beg, z, NULL, 0L, 0); 1354 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1355 KKASSERT(z->z_Cpu == mycpu->gd_cpuid); 1356 nfree = z->z_NFree; 1357 1358 /* 1359 * Indicate that we will no longer be off of the ZoneAry by 1360 * clearing RSignal. 1361 */ 1362 if (z->z_RChunks) 1363 z->z_RSignal = 0; 1364 1365 /* 1366 * Atomically extract the bchunks list and then process it back 1367 * into the lchunks list. We want to append our bchunks to the 1368 * lchunks list and not prepend since we likely do not have 1369 * cache mastership of the related data (not that it helps since 1370 * we are using c_Next). 1371 */ 1372 clean_zone_rchunks(z); 1373 if (z->z_NFree && nfree == 0) { 1374 TAILQ_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 1375 } 1376 1377 check_zone_free(slgd, z); 1378 logmemory(free_rem_end, z, NULL, 0L, 0); 1379 } 1380 1381 /* 1382 * free (SLAB ALLOCATOR) 1383 * 1384 * Free a memory block previously allocated by malloc. 1385 * 1386 * Note: We do not attempt to update ks_loosememuse as MP races could 1387 * prevent us from checking memory limits in malloc. YYY we may 1388 * consider updating ks_cpu.loosememuse. 1389 * 1390 * MPSAFE 1391 */ 1392 void 1393 _kfree(void *ptr, struct malloc_type *type) 1394 { 1395 SLZone *z; 1396 SLChunk *chunk; 1397 SLGlobalData *slgd; 1398 struct globaldata *gd; 1399 int *kup; 1400 unsigned long size; 1401 SLChunk *bchunk; 1402 int rsignal; 1403 1404 logmemory_quick(free_beg); 1405 gd = mycpu; 1406 slgd = &gd->gd_slab; 1407 1408 if (ptr == NULL) 1409 panic("trying to free NULL pointer"); 1410 1411 /* 1412 * Handle special 0-byte allocations 1413 */ 1414 if (ptr == ZERO_LENGTH_PTR) { 1415 logmemory(free_zero, ptr, type, -1UL, 0); 1416 logmemory_quick(free_end); 1417 return; 1418 } 1419 1420 /* 1421 * Panic on bad malloc type 1422 */ 1423 if (type->ks_magic != M_MAGIC) 1424 panic("free: malloc type lacks magic"); 1425 1426 /* 1427 * Handle oversized allocations. XXX we really should require that a 1428 * size be passed to free() instead of this nonsense. 1429 * 1430 * This code is never called via an ipi. 1431 */ 1432 kup = btokup(ptr); 1433 if (*kup > 0) { 1434 size = *kup << PAGE_SHIFT; 1435 *kup = 0; 1436 #ifdef INVARIANTS 1437 if (use_weird_array) { 1438 KKASSERT(sizeof(weirdary) <= size); 1439 bcopy(weirdary, ptr, sizeof(weirdary)); 1440 } 1441 #endif 1442 /* 1443 * NOTE: For oversized allocations we do not record the 1444 * originating cpu. It gets freed on the cpu calling 1445 * kfree(). The statistics are in aggregate. 1446 * 1447 * note: XXX we have still inherited the interrupts-can't-block 1448 * assumption. An interrupt thread does not bump 1449 * gd_intr_nesting_level so check TDF_INTTHREAD. This is 1450 * primarily until we can fix softupdate's assumptions about free(). 1451 */ 1452 crit_enter(); 1453 --type->ks_use[gd->gd_cpuid].inuse; 1454 type->ks_use[gd->gd_cpuid].memuse -= size; 1455 if (mycpu->gd_intr_nesting_level || 1456 (gd->gd_curthread->td_flags & TDF_INTTHREAD)) { 1457 logmemory(free_ovsz_delayed, ptr, type, size, 0); 1458 z = (SLZone *)ptr; 1459 z->z_Magic = ZALLOC_OVSZ_MAGIC; 1460 z->z_ChunkSize = size; 1461 1462 TAILQ_INSERT_HEAD(&slgd->FreeOvZones, z, z_Entry); 1463 crit_exit(); 1464 } else { 1465 crit_exit(); 1466 logmemory(free_ovsz, ptr, type, size, 0); 1467 kmem_slab_free(ptr, size); /* may block */ 1468 } 1469 logmemory_quick(free_end); 1470 return; 1471 } 1472 1473 /* 1474 * Zone case. Figure out the zone based on the fact that it is 1475 * ZoneSize aligned. 1476 */ 1477 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 1478 kup = btokup(z); 1479 KKASSERT(*kup < 0); 1480 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1481 1482 /* 1483 * If we do not own the zone then use atomic ops to free to the 1484 * remote cpu linked list and notify the target zone using a 1485 * passive message. 1486 * 1487 * The target zone cannot be deallocated while we own a chunk of it, 1488 * so the zone header's storage is stable until the very moment 1489 * we adjust z_RChunks. After that we cannot safely dereference (z). 1490 * 1491 * (no critical section needed) 1492 */ 1493 if (z->z_CpuGd != gd) { 1494 /* 1495 * Making these adjustments now allow us to avoid passing (type) 1496 * to the remote cpu. Note that inuse/memuse is being 1497 * adjusted on OUR cpu, not the zone cpu, but it should all still 1498 * sum up properly and cancel out. 1499 */ 1500 crit_enter(); 1501 --type->ks_use[gd->gd_cpuid].inuse; 1502 type->ks_use[gd->gd_cpuid].memuse -= z->z_ChunkSize; 1503 crit_exit(); 1504 1505 /* 1506 * WARNING! This code competes with other cpus. Once we 1507 * successfully link the chunk to RChunks the remote 1508 * cpu can rip z's storage out from under us. 1509 * 1510 * Bumping RCount prevents z's storage from getting 1511 * ripped out. 1512 */ 1513 rsignal = z->z_RSignal; 1514 cpu_lfence(); 1515 if (rsignal) 1516 atomic_add_int(&z->z_RCount, 1); 1517 1518 chunk = ptr; 1519 for (;;) { 1520 bchunk = z->z_RChunks; 1521 cpu_ccfence(); 1522 chunk->c_Next = bchunk; 1523 cpu_sfence(); 1524 1525 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk)) 1526 break; 1527 } 1528 1529 /* 1530 * We have to signal the remote cpu if our actions will cause 1531 * the remote zone to be placed back on ZoneAry so it can 1532 * move the zone back on. 1533 * 1534 * We only need to deal with NULL->non-NULL RChunk transitions 1535 * and only if z_RSignal is set. We interlock by reading rsignal 1536 * before adding our chunk to RChunks. This should result in 1537 * virtually no IPI traffic. 1538 * 1539 * We can use a passive IPI to reduce overhead even further. 1540 */ 1541 if (bchunk == NULL && rsignal) { 1542 logmemory(free_request, ptr, type, 1543 (unsigned long)z->z_ChunkSize, 0); 1544 lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z); 1545 /* z can get ripped out from under us from this point on */ 1546 } else if (rsignal) { 1547 atomic_subtract_int(&z->z_RCount, 1); 1548 /* z can get ripped out from under us from this point on */ 1549 } 1550 logmemory_quick(free_end); 1551 return; 1552 } 1553 1554 /* 1555 * kfree locally 1556 */ 1557 logmemory(free_chunk, ptr, type, (unsigned long)z->z_ChunkSize, 0); 1558 1559 crit_enter(); 1560 chunk = ptr; 1561 chunk_mark_free(z, chunk); 1562 1563 /* 1564 * Put weird data into the memory to detect modifications after freeing, 1565 * illegal pointer use after freeing (we should fault on the odd address), 1566 * and so forth. XXX needs more work, see the old malloc code. 1567 */ 1568 #ifdef INVARIANTS 1569 if (use_weird_array) { 1570 if (z->z_ChunkSize < sizeof(weirdary)) 1571 bcopy(weirdary, chunk, z->z_ChunkSize); 1572 else 1573 bcopy(weirdary, chunk, sizeof(weirdary)); 1574 } 1575 #endif 1576 1577 /* 1578 * Add this free non-zero'd chunk to a linked list for reuse. Add 1579 * to the front of the linked list so it is more likely to be 1580 * reallocated, since it is already in our L1 cache. 1581 */ 1582 #ifdef INVARIANTS 1583 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) 1584 panic("BADFREE %p", chunk); 1585 #endif 1586 chunk->c_Next = z->z_LChunks; 1587 z->z_LChunks = chunk; 1588 if (chunk->c_Next == NULL) 1589 z->z_LChunksp = &chunk->c_Next; 1590 1591 #ifdef INVARIANTS 1592 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) 1593 panic("BADFREE2"); 1594 #endif 1595 1596 /* 1597 * Bump the number of free chunks. If it becomes non-zero the zone 1598 * must be added back onto the appropriate list. A fully allocated 1599 * zone that sees its first free is considered 'mature' and is placed 1600 * at the head, giving the system time to potentially free the remaining 1601 * entries even while other allocations are going on and making the zone 1602 * freeable. 1603 */ 1604 if (z->z_NFree++ == 0) 1605 TAILQ_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 1606 1607 --type->ks_use[gd->gd_cpuid].inuse; 1608 type->ks_use[gd->gd_cpuid].memuse -= z->z_ChunkSize; 1609 1610 check_zone_free(slgd, z); 1611 logmemory_quick(free_end); 1612 crit_exit(); 1613 } 1614 1615 /* 1616 * Cleanup slabs which are hanging around due to RChunks or which are wholely 1617 * free and can be moved to the free list if not moved by other means. 1618 * 1619 * Called once every 10 seconds on all cpus. 1620 */ 1621 void 1622 slab_cleanup(void) 1623 { 1624 SLGlobalData *slgd = &mycpu->gd_slab; 1625 SLZone *z; 1626 int i; 1627 1628 crit_enter(); 1629 for (i = 0; i < NZONES; ++i) { 1630 if ((z = TAILQ_FIRST(&slgd->ZoneAry[i])) == NULL) 1631 continue; 1632 1633 /* 1634 * Scan zones. 1635 */ 1636 while (z) { 1637 /* 1638 * Shift all RChunks to the end of the LChunks list. This is 1639 * an O(1) operation. 1640 * 1641 * Then free the zone if possible. 1642 */ 1643 clean_zone_rchunks(z); 1644 z = check_zone_free(slgd, z); 1645 } 1646 } 1647 crit_exit(); 1648 } 1649 1650 #if defined(INVARIANTS) 1651 1652 /* 1653 * Helper routines for sanity checks 1654 */ 1655 static void 1656 chunk_mark_allocated(SLZone *z, void *chunk) 1657 { 1658 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1659 uint32_t *bitptr; 1660 1661 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1662 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1663 ("memory chunk %p bit index %d is illegal", chunk, bitdex)); 1664 bitptr = &z->z_Bitmap[bitdex >> 5]; 1665 bitdex &= 31; 1666 KASSERT((*bitptr & (1 << bitdex)) == 0, 1667 ("memory chunk %p is already allocated!", chunk)); 1668 *bitptr |= 1 << bitdex; 1669 } 1670 1671 static void 1672 chunk_mark_free(SLZone *z, void *chunk) 1673 { 1674 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1675 uint32_t *bitptr; 1676 1677 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1678 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1679 ("memory chunk %p bit index %d is illegal!", chunk, bitdex)); 1680 bitptr = &z->z_Bitmap[bitdex >> 5]; 1681 bitdex &= 31; 1682 KASSERT((*bitptr & (1 << bitdex)) != 0, 1683 ("memory chunk %p is already free!", chunk)); 1684 *bitptr &= ~(1 << bitdex); 1685 } 1686 1687 #endif 1688 1689 /* 1690 * kmem_slab_alloc() 1691 * 1692 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the 1693 * specified alignment. M_* flags are expected in the flags field. 1694 * 1695 * Alignment must be a multiple of PAGE_SIZE. 1696 * 1697 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), 1698 * but when we move zalloc() over to use this function as its backend 1699 * we will have to switch to kreserve/krelease and call reserve(0) 1700 * after the new space is made available. 1701 * 1702 * Interrupt code which has preempted other code is not allowed to 1703 * use PQ_CACHE pages. However, if an interrupt thread is run 1704 * non-preemptively or blocks and then runs non-preemptively, then 1705 * it is free to use PQ_CACHE pages. <--- may not apply any longer XXX 1706 */ 1707 void * 1708 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) 1709 { 1710 vm_size_t i; 1711 vm_offset_t addr; 1712 int count, vmflags, base_vmflags; 1713 vm_page_t mbase = NULL; 1714 vm_page_t m; 1715 thread_t td; 1716 1717 size = round_page(size); 1718 addr = vm_map_min(kernel_map); 1719 1720 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1721 crit_enter(); 1722 vm_map_lock(kernel_map); 1723 if (vm_map_findspace(kernel_map, addr, size, align, 0, &addr)) { 1724 vm_map_unlock(kernel_map); 1725 if ((flags & M_NULLOK) == 0) 1726 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 1727 vm_map_entry_release(count); 1728 crit_exit(); 1729 return(NULL); 1730 } 1731 1732 /* 1733 * kernel_object maps 1:1 to kernel_map. 1734 */ 1735 vm_object_hold(kernel_object); 1736 vm_object_reference_locked(kernel_object); 1737 vm_map_insert(kernel_map, &count, 1738 kernel_object, NULL, 1739 addr, NULL, 1740 addr, addr + size, 1741 VM_MAPTYPE_NORMAL, 1742 VM_SUBSYS_KMALLOC, 1743 VM_PROT_ALL, VM_PROT_ALL, 0); 1744 vm_object_drop(kernel_object); 1745 vm_map_set_wired_quick(kernel_map, addr, size, &count); 1746 vm_map_unlock(kernel_map); 1747 1748 td = curthread; 1749 1750 base_vmflags = 0; 1751 if (flags & M_ZERO) 1752 base_vmflags |= VM_ALLOC_ZERO; 1753 if (flags & M_USE_RESERVE) 1754 base_vmflags |= VM_ALLOC_SYSTEM; 1755 if (flags & M_USE_INTERRUPT_RESERVE) 1756 base_vmflags |= VM_ALLOC_INTERRUPT; 1757 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) { 1758 panic("kmem_slab_alloc: bad flags %08x (%p)", 1759 flags, ((int **)&size)[-1]); 1760 } 1761 1762 /* 1763 * Allocate the pages. Do not map them yet. VM_ALLOC_NORMAL can only 1764 * be set if we are not preempting. 1765 * 1766 * VM_ALLOC_SYSTEM is automatically set if we are preempting and 1767 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is 1768 * implied in this case), though I'm not sure if we really need to 1769 * do that. 1770 */ 1771 vmflags = base_vmflags; 1772 if (flags & M_WAITOK) { 1773 if (td->td_preempted) 1774 vmflags |= VM_ALLOC_SYSTEM; 1775 else 1776 vmflags |= VM_ALLOC_NORMAL; 1777 } 1778 1779 vm_object_hold(kernel_object); 1780 for (i = 0; i < size; i += PAGE_SIZE) { 1781 m = vm_page_alloc(kernel_object, OFF_TO_IDX(addr + i), vmflags); 1782 if (i == 0) 1783 mbase = m; 1784 1785 /* 1786 * If the allocation failed we either return NULL or we retry. 1787 * 1788 * If M_WAITOK is specified we wait for more memory and retry. 1789 * If M_WAITOK is specified from a preemption we yield instead of 1790 * wait. Livelock will not occur because the interrupt thread 1791 * will not be preempting anyone the second time around after the 1792 * yield. 1793 */ 1794 if (m == NULL) { 1795 if (flags & M_WAITOK) { 1796 if (td->td_preempted) { 1797 lwkt_switch(); 1798 } else { 1799 vm_wait(0); 1800 } 1801 i -= PAGE_SIZE; /* retry */ 1802 continue; 1803 } 1804 break; 1805 } 1806 } 1807 1808 /* 1809 * Check and deal with an allocation failure 1810 */ 1811 if (i != size) { 1812 while (i != 0) { 1813 i -= PAGE_SIZE; 1814 m = vm_page_lookup(kernel_object, OFF_TO_IDX(addr + i)); 1815 /* page should already be busy */ 1816 vm_page_free(m); 1817 } 1818 vm_map_lock(kernel_map); 1819 vm_map_delete(kernel_map, addr, addr + size, &count); 1820 vm_map_unlock(kernel_map); 1821 vm_object_drop(kernel_object); 1822 1823 vm_map_entry_release(count); 1824 crit_exit(); 1825 return(NULL); 1826 } 1827 1828 /* 1829 * Success! 1830 * 1831 * NOTE: The VM pages are still busied. mbase points to the first one 1832 * but we have to iterate via vm_page_next() 1833 */ 1834 vm_object_drop(kernel_object); 1835 crit_exit(); 1836 1837 /* 1838 * Enter the pages into the pmap and deal with M_ZERO. 1839 */ 1840 m = mbase; 1841 i = 0; 1842 1843 while (i < size) { 1844 /* 1845 * page should already be busy 1846 */ 1847 m->valid = VM_PAGE_BITS_ALL; 1848 vm_page_wire(m); 1849 pmap_enter(kernel_pmap, addr + i, m, 1850 VM_PROT_ALL | VM_PROT_NOSYNC, 1, NULL); 1851 if (flags & M_ZERO) 1852 pagezero((char *)addr + i); 1853 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED)); 1854 vm_page_flag_set(m, PG_REFERENCED); 1855 vm_page_wakeup(m); 1856 1857 i += PAGE_SIZE; 1858 vm_object_hold(kernel_object); 1859 m = vm_page_next(m); 1860 vm_object_drop(kernel_object); 1861 } 1862 smp_invltlb(); 1863 vm_map_entry_release(count); 1864 return((void *)addr); 1865 } 1866 1867 /* 1868 * kmem_slab_free() 1869 */ 1870 void 1871 kmem_slab_free(void *ptr, vm_size_t size) 1872 { 1873 crit_enter(); 1874 vm_map_remove(kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); 1875 crit_exit(); 1876 } 1877