1 /* 2 * KERN_SLABALLOC.C - Kernel SLAB memory allocator 3 * 4 * Copyright (c) 2003,2004,2010-2019 The DragonFly Project. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Matthew Dillon <dillon@backplane.com> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * This module implements a slab allocator drop-in replacement for the 38 * kernel malloc(). 39 * 40 * A slab allocator reserves a ZONE for each chunk size, then lays the 41 * chunks out in an array within the zone. Allocation and deallocation 42 * is nearly instantanious, and fragmentation/overhead losses are limited 43 * to a fixed worst-case amount. 44 * 45 * The downside of this slab implementation is in the chunk size 46 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. 47 * In a kernel implementation all this memory will be physical so 48 * the zone size is adjusted downward on machines with less physical 49 * memory. The upside is that overhead is bounded... this is the *worst* 50 * case overhead. 51 * 52 * Slab management is done on a per-cpu basis and no locking or mutexes 53 * are required, only a critical section. When one cpu frees memory 54 * belonging to another cpu's slab manager an asynchronous IPI message 55 * will be queued to execute the operation. In addition, both the 56 * high level slab allocator and the low level zone allocator optimize 57 * M_ZERO requests, and the slab allocator does not have to pre initialize 58 * the linked list of chunks. 59 * 60 * XXX Balancing is needed between cpus. Balance will be handled through 61 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. 62 * 63 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of 64 * the new zone should be restricted to M_USE_RESERVE requests only. 65 * 66 * Alloc Size Chunking Number of zones 67 * 0-127 8 16 68 * 128-255 16 8 69 * 256-511 32 8 70 * 512-1023 64 8 71 * 1024-2047 128 8 72 * 2048-4095 256 8 73 * 4096-8191 512 8 74 * 8192-16383 1024 8 75 * 16384-32767 2048 8 76 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) 77 * 78 * Allocations >= ZoneLimit go directly to kmem. 79 * (n * PAGE_SIZE, n > 2) allocations go directly to kmem. 80 * 81 * Alignment properties: 82 * - All power-of-2 sized allocations are power-of-2 aligned. 83 * - Allocations with M_POWEROF2 are power-of-2 aligned on the nearest 84 * power-of-2 round up of 'size'. 85 * - Non-power-of-2 sized allocations are zone chunk size aligned (see the 86 * above table 'Chunking' column). 87 * 88 * API REQUIREMENTS AND SIDE EFFECTS 89 * 90 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we 91 * have remained compatible with the following API requirements: 92 * 93 * + malloc(0) is allowed and returns non-NULL (ahc driver) 94 * + ability to allocate arbitrarily large chunks of memory 95 */ 96 97 #include <sys/param.h> 98 #include <sys/systm.h> 99 #include <sys/kernel.h> 100 #include <sys/slaballoc.h> 101 #include <sys/mbuf.h> 102 #include <sys/vmmeter.h> 103 #include <sys/lock.h> 104 #include <sys/thread.h> 105 #include <sys/globaldata.h> 106 #include <sys/sysctl.h> 107 #include <sys/ktr.h> 108 #include <sys/kthread.h> 109 #include <sys/malloc.h> 110 111 #include <vm/vm.h> 112 #include <vm/vm_param.h> 113 #include <vm/vm_kern.h> 114 #include <vm/vm_extern.h> 115 #include <vm/vm_object.h> 116 #include <vm/pmap.h> 117 #include <vm/vm_map.h> 118 #include <vm/vm_page.h> 119 #include <vm/vm_pageout.h> 120 121 #include <machine/cpu.h> 122 123 #include <sys/thread2.h> 124 #include <vm/vm_page2.h> 125 126 #if (__VM_CACHELINE_SIZE == 32) 127 #define CAN_CACHEALIGN(sz) ((sz) >= 256) 128 #elif (__VM_CACHELINE_SIZE == 64) 129 #define CAN_CACHEALIGN(sz) ((sz) >= 512) 130 #elif (__VM_CACHELINE_SIZE == 128) 131 #define CAN_CACHEALIGN(sz) ((sz) >= 1024) 132 #else 133 #error "unsupported cacheline size" 134 #endif 135 136 #define btokup(z) (&pmap_kvtom((vm_offset_t)(z))->ku_pagecnt) 137 138 #define MEMORY_STRING "ptr=%p type=%p size=%lu flags=%04x" 139 #define MEMORY_ARGS void *ptr, void *type, unsigned long size, int flags 140 141 #if !defined(KTR_MEMORY) 142 #define KTR_MEMORY KTR_ALL 143 #endif 144 KTR_INFO_MASTER(memory); 145 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin"); 146 KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARGS); 147 KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARGS); 148 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARGS); 149 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARGS); 150 KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARGS); 151 KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARGS); 152 KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARGS); 153 KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARGS); 154 KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin"); 155 KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end"); 156 157 #define logmemory(name, ptr, type, size, flags) \ 158 KTR_LOG(memory_ ## name, ptr, type, size, flags) 159 #define logmemory_quick(name) \ 160 KTR_LOG(memory_ ## name) 161 162 /* 163 * Fixed globals (not per-cpu) 164 */ 165 __read_frequently static int ZoneSize; 166 __read_frequently static int ZoneLimit; 167 __read_frequently static int ZonePageCount; 168 __read_frequently static uintptr_t ZoneMask; 169 __read_frequently struct malloc_type *kmemstatistics; /* exported to vmstat */ 170 171 #if defined(INVARIANTS) 172 static void chunk_mark_allocated(SLZone *z, void *chunk); 173 static void chunk_mark_free(SLZone *z, void *chunk); 174 #else 175 #define chunk_mark_allocated(z, chunk) 176 #define chunk_mark_free(z, chunk) 177 #endif 178 179 /* 180 * Misc constants. Note that allocations that are exact multiples of 181 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 182 */ 183 #define ZONE_RELS_THRESH 32 /* threshold number of zones */ 184 185 #ifdef INVARIANTS 186 /* 187 * The WEIRD_ADDR is used as known text to copy into free objects to 188 * try to create deterministic failure cases if the data is accessed after 189 * free. 190 */ 191 #define WEIRD_ADDR 0xdeadc0de 192 #endif 193 #define ZERO_LENGTH_PTR ((void *)-8) 194 195 /* 196 * Misc global malloc buckets 197 */ 198 199 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 200 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 201 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 202 MALLOC_DEFINE(M_DRM, "m_drm", "DRM memory allocations"); 203 204 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 205 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 206 207 /* 208 * Initialize the slab memory allocator. We have to choose a zone size based 209 * on available physical memory. We choose a zone side which is approximately 210 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of 211 * 128K. The zone size is limited to the bounds set in slaballoc.h 212 * (typically 32K min, 128K max). 213 */ 214 static void kmeminit(void *dummy); 215 static void kmemfinishinit(void *dummy); 216 217 char *ZeroPage; 218 219 SYSINIT(kmem1, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL); 220 SYSINIT(kmem2, SI_BOOT2_POST_SMP, SI_ORDER_FIRST, kmemfinishinit, NULL); 221 222 #ifdef INVARIANTS 223 /* 224 * If enabled any memory allocated without M_ZERO is initialized to -1. 225 */ 226 __read_frequently static int use_malloc_pattern; 227 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW, 228 &use_malloc_pattern, 0, 229 "Initialize memory to -1 if M_ZERO not specified"); 230 231 __read_frequently static int32_t weirdary[16]; 232 __read_frequently static int use_weird_array; 233 SYSCTL_INT(_debug, OID_AUTO, use_weird_array, CTLFLAG_RW, 234 &use_weird_array, 0, 235 "Initialize memory to weird values on kfree()"); 236 #endif 237 238 __read_frequently static int ZoneRelsThresh = ZONE_RELS_THRESH; 239 SYSCTL_INT(_kern, OID_AUTO, zone_cache, CTLFLAG_RW, &ZoneRelsThresh, 0, ""); 240 __read_frequently static int kzone_pollfreq = 1; 241 SYSCTL_INT(_kern, OID_AUTO, kzone_pollfreq, CTLFLAG_RW, &kzone_pollfreq, 0, ""); 242 243 static struct spinlock kmemstat_spin = 244 SPINLOCK_INITIALIZER(&kmemstat_spin, "malinit"); 245 static struct malloc_type *kmemstat_poll; 246 247 /* 248 * Returns the kernel memory size limit for the purposes of initializing 249 * various subsystem caches. The smaller of available memory and the KVM 250 * memory space is returned. 251 * 252 * The size in megabytes is returned. 253 */ 254 size_t 255 kmem_lim_size(void) 256 { 257 size_t limsize; 258 259 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE; 260 if (limsize > KvaSize) 261 limsize = KvaSize; 262 return (limsize / (1024 * 1024)); 263 } 264 265 static void 266 kmeminit(void *dummy) 267 { 268 size_t limsize; 269 int usesize; 270 #ifdef INVARIANTS 271 int i; 272 #endif 273 274 limsize = kmem_lim_size(); 275 usesize = (int)(limsize * 1024); /* convert to KB */ 276 277 /* 278 * If the machine has a large KVM space and more than 8G of ram, 279 * double the zone release threshold to reduce SMP invalidations. 280 * If more than 16G of ram, do it again. 281 * 282 * The BIOS eats a little ram so add some slop. We want 8G worth of 283 * memory sticks to trigger the first adjustment. 284 */ 285 if (ZoneRelsThresh == ZONE_RELS_THRESH) { 286 if (limsize >= 7 * 1024) 287 ZoneRelsThresh *= 2; 288 if (limsize >= 15 * 1024) 289 ZoneRelsThresh *= 2; 290 if (limsize >= 31 * 1024) 291 ZoneRelsThresh *= 2; 292 if (limsize >= 63 * 1024) 293 ZoneRelsThresh *= 2; 294 if (limsize >= 127 * 1024) 295 ZoneRelsThresh *= 2; 296 } 297 298 /* 299 * Calculate the zone size. This typically calculates to 300 * ZALLOC_MAX_ZONE_SIZE 301 */ 302 ZoneSize = ZALLOC_MIN_ZONE_SIZE; 303 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) 304 ZoneSize <<= 1; 305 ZoneLimit = ZoneSize / 4; 306 if (ZoneLimit > ZALLOC_ZONE_LIMIT) 307 ZoneLimit = ZALLOC_ZONE_LIMIT; 308 ZoneMask = ~(uintptr_t)(ZoneSize - 1); 309 ZonePageCount = ZoneSize / PAGE_SIZE; 310 311 #ifdef INVARIANTS 312 for (i = 0; i < NELEM(weirdary); ++i) 313 weirdary[i] = WEIRD_ADDR; 314 #endif 315 316 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO); 317 318 if (bootverbose) 319 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); 320 } 321 322 /* 323 * Once we know how many cpus are configured reduce ZoneRelsThresh 324 * based on multiples of 32 cpu threads. 325 */ 326 static void 327 kmemfinishinit(void *dummy) 328 { 329 if (ncpus > 32) 330 ZoneRelsThresh = ZoneRelsThresh * 32 / ncpus; 331 } 332 333 /* 334 * (low level) Initialize slab-related elements in the globaldata structure. 335 * 336 * Occurs after kmeminit(). 337 */ 338 void 339 slab_gdinit(globaldata_t gd) 340 { 341 SLGlobalData *slgd; 342 int i; 343 344 slgd = &gd->gd_slab; 345 for (i = 0; i < NZONES; ++i) 346 TAILQ_INIT(&slgd->ZoneAry[i]); 347 TAILQ_INIT(&slgd->FreeZones); 348 TAILQ_INIT(&slgd->FreeOvZones); 349 } 350 351 /* 352 * Initialize a malloc type tracking structure. 353 */ 354 void 355 malloc_init(void *data) 356 { 357 struct malloc_type *type = data; 358 struct kmalloc_use *use; 359 size_t limsize; 360 int n; 361 362 if (type->ks_magic != M_MAGIC) 363 panic("malloc type lacks magic"); 364 365 if (type->ks_limit != 0) 366 return; 367 368 if (vmstats.v_page_count == 0) 369 panic("malloc_init not allowed before vm init"); 370 371 limsize = kmem_lim_size() * (1024 * 1024); 372 type->ks_limit = limsize / 10; 373 if (type->ks_flags & KSF_OBJSIZE) 374 malloc_mgt_init(type, &type->ks_mgt, type->ks_objsize); 375 376 if (ncpus == 1) 377 use = &type->ks_use0; 378 else 379 use = kmalloc(ncpus * sizeof(*use), M_TEMP, M_WAITOK | M_ZERO); 380 if (type->ks_flags & KSF_OBJSIZE) { 381 for (n = 0; n < ncpus; ++n) 382 malloc_mgt_init(type, &use[n].mgt, type->ks_objsize); 383 } 384 385 spin_lock(&kmemstat_spin); 386 type->ks_next = kmemstatistics; 387 type->ks_use = use; 388 kmemstatistics = type; 389 spin_unlock(&kmemstat_spin); 390 } 391 392 void 393 malloc_uninit(void *data) 394 { 395 struct malloc_type *type = data; 396 struct malloc_type *t; 397 int i; 398 #ifdef INVARIANTS 399 long ttl; 400 #endif 401 402 if (type->ks_magic != M_MAGIC) 403 panic("malloc type lacks magic"); 404 405 if (vmstats.v_page_count == 0) 406 panic("malloc_uninit not allowed before vm init"); 407 408 if (type->ks_limit == 0) 409 panic("malloc_uninit on uninitialized type"); 410 411 /* Make sure that all pending kfree()s are finished. */ 412 lwkt_synchronize_ipiqs("muninit"); 413 414 /* 415 * Remove from the kmemstatistics list, blocking if the removal races 416 * the kmalloc poller. 417 * 418 * Advance kmemstat_poll if necessary. 419 */ 420 spin_lock(&kmemstat_spin); 421 while (type->ks_flags & KSF_POLLING) 422 ssleep(type, &kmemstat_spin, 0, "kmuninit", 0); 423 424 if (kmemstat_poll == type) 425 kmemstat_poll = type->ks_next; 426 427 if (kmemstatistics == type) { 428 kmemstatistics = type->ks_next; 429 } else { 430 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 431 if (t->ks_next == type) { 432 t->ks_next = type->ks_next; 433 break; 434 } 435 } 436 } 437 type->ks_next = NULL; 438 type->ks_limit = 0; 439 spin_unlock(&kmemstat_spin); 440 441 /* 442 * memuse is only correct in aggregation. Due to memory being allocated 443 * on one cpu and freed on another individual array entries may be 444 * negative or positive (canceling each other out). 445 */ 446 #ifdef INVARIANTS 447 ttl = 0; 448 #endif 449 for (i = 0; i < ncpus; ++i) { 450 #ifdef INVARIANTS 451 ttl += type->ks_use[i].memuse; 452 #endif 453 if (type->ks_flags & KSF_OBJSIZE) 454 malloc_mgt_uninit(type, &type->ks_use[i].mgt); 455 } 456 if (type->ks_flags & KSF_OBJSIZE) 457 malloc_mgt_uninit(type, &type->ks_mgt); 458 #ifdef INVARIANTS 459 if (ttl) { 460 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", 461 ttl, type->ks_shortdesc, i); 462 } 463 #endif 464 465 if (type->ks_use != &type->ks_use0) { 466 kfree(type->ks_use, M_TEMP); 467 type->ks_use = NULL; 468 } 469 } 470 471 /* 472 * Slowly polls all kmalloc zones for cleanup 473 */ 474 static void 475 kmalloc_poller_thread(void) 476 { 477 struct malloc_type *type; 478 479 for (;;) { 480 /* 481 * Very slow poll by default, adjustable with sysctl 482 */ 483 int sticks; 484 485 sticks = kzone_pollfreq; 486 cpu_ccfence(); 487 if (sticks > 0) 488 sticks = hz / sticks + 1; /* approximate */ 489 else 490 sticks = hz; /* safety */ 491 tsleep((caddr_t)&sticks, 0, "kmslp", sticks); 492 493 /* 494 * [re]poll one zone each period. 495 */ 496 spin_lock(&kmemstat_spin); 497 type = kmemstat_poll; 498 499 if (type == NULL) 500 type = kmemstatistics; 501 if (type) { 502 atomic_set_int(&type->ks_flags, KSF_POLLING); 503 spin_unlock(&kmemstat_spin); 504 if (malloc_mgt_poll(type)) { 505 spin_lock(&kmemstat_spin); 506 kmemstat_poll = type->ks_next; 507 } else { 508 spin_lock(&kmemstat_spin); 509 } 510 atomic_clear_int(&type->ks_flags, KSF_POLLING); 511 wakeup(type); 512 } else { 513 kmemstat_poll = NULL; 514 } 515 spin_unlock(&kmemstat_spin); 516 } 517 } 518 519 static struct thread *kmalloc_poller_td; 520 static struct kproc_desc kmalloc_poller_kp = { 521 "kmalloc_poller", 522 kmalloc_poller_thread, 523 &kmalloc_poller_td 524 }; 525 SYSINIT(kmalloc_polller, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, 526 kproc_start, &kmalloc_poller_kp); 527 528 /* 529 * Reinitialize all installed malloc regions after ncpus has been 530 * determined. type->ks_use0 is initially set to &type->ks_use0, 531 * this function will dynamically allocate it as appropriate for ncpus. 532 */ 533 void 534 malloc_reinit_ncpus(void) 535 { 536 struct malloc_type *t; 537 struct kmalloc_use *use; 538 int n; 539 540 /* 541 * If only one cpu we can leave ks_use set to ks_use0 542 */ 543 if (ncpus <= 1) 544 return; 545 546 /* 547 * Expand ks_use for all kmalloc blocks 548 */ 549 for (t = kmemstatistics; t; t = t->ks_next) { 550 KKASSERT(t->ks_use == &t->ks_use0); 551 t->ks_use = kmalloc(sizeof(*use) * ncpus, M_TEMP, M_WAITOK|M_ZERO); 552 t->ks_use[0] = t->ks_use0; 553 if (t->ks_flags & KSF_OBJSIZE) { 554 malloc_mgt_relocate(&t->ks_use0.mgt, &t->ks_use[0].mgt); 555 for (n = 1; n < ncpus; ++n) 556 malloc_mgt_init(t, &t->ks_use[n].mgt, t->ks_objsize); 557 } 558 } 559 } 560 561 /* 562 * Increase the kmalloc pool limit for the specified pool. No changes 563 * are the made if the pool would shrink. 564 */ 565 void 566 kmalloc_raise_limit(struct malloc_type *type, size_t bytes) 567 { 568 KKASSERT(type->ks_limit != 0); 569 if (bytes == 0) 570 bytes = KvaSize; 571 if (type->ks_limit < bytes) 572 type->ks_limit = bytes; 573 } 574 575 void 576 kmalloc_set_unlimited(struct malloc_type *type) 577 { 578 type->ks_limit = kmem_lim_size() * (1024 * 1024); 579 } 580 581 /* 582 * Dynamically create a malloc pool. This function is a NOP if *typep is 583 * already non-NULL. 584 */ 585 void 586 kmalloc_create(struct malloc_type **typep, const char *descr) 587 { 588 struct malloc_type *type; 589 590 if (*typep == NULL) { 591 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO); 592 type->ks_magic = M_MAGIC; 593 type->ks_shortdesc = descr; 594 malloc_init(type); 595 *typep = type; 596 } 597 } 598 599 void 600 _kmalloc_create_obj(struct malloc_type **typep, const char *descr, 601 size_t objsize) 602 { 603 struct malloc_type *type; 604 605 if (*typep == NULL) { 606 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO); 607 type->ks_magic = M_MAGIC; 608 type->ks_shortdesc = descr; 609 type->ks_flags = KSF_OBJSIZE; 610 type->ks_objsize = __VM_CACHELINE_ALIGN(objsize); 611 malloc_init(type); 612 *typep = type; 613 } 614 } 615 616 /* 617 * Destroy a dynamically created malloc pool. This function is a NOP if 618 * the pool has already been destroyed. 619 * 620 * WARNING! For kmalloc_obj's, the exis state for related slabs is ignored, 621 * only call once all references are 100% known to be gone. 622 */ 623 void 624 kmalloc_destroy(struct malloc_type **typep) 625 { 626 if (*typep != NULL) { 627 malloc_uninit(*typep); 628 kfree(*typep, M_TEMP); 629 *typep = NULL; 630 } 631 } 632 633 /* 634 * Calculate the zone index for the allocation request size and set the 635 * allocation request size to that particular zone's chunk size. 636 */ 637 static __inline int 638 zoneindex(unsigned long *bytes, unsigned long *align) 639 { 640 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ 641 642 if (n < 128) { 643 *bytes = n = (n + 7) & ~7; 644 *align = 8; 645 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 646 } 647 if (n < 256) { 648 *bytes = n = (n + 15) & ~15; 649 *align = 16; 650 return(n / 16 + 7); 651 } 652 if (n < 8192) { 653 if (n < 512) { 654 *bytes = n = (n + 31) & ~31; 655 *align = 32; 656 return(n / 32 + 15); 657 } 658 if (n < 1024) { 659 *bytes = n = (n + 63) & ~63; 660 *align = 64; 661 return(n / 64 + 23); 662 } 663 if (n < 2048) { 664 *bytes = n = (n + 127) & ~127; 665 *align = 128; 666 return(n / 128 + 31); 667 } 668 if (n < 4096) { 669 *bytes = n = (n + 255) & ~255; 670 *align = 256; 671 return(n / 256 + 39); 672 } 673 *bytes = n = (n + 511) & ~511; 674 *align = 512; 675 return(n / 512 + 47); 676 } 677 #if ZALLOC_ZONE_LIMIT > 8192 678 if (n < 16384) { 679 *bytes = n = (n + 1023) & ~1023; 680 *align = 1024; 681 return(n / 1024 + 55); 682 } 683 #endif 684 #if ZALLOC_ZONE_LIMIT > 16384 685 if (n < 32768) { 686 *bytes = n = (n + 2047) & ~2047; 687 *align = 2048; 688 return(n / 2048 + 63); 689 } 690 #endif 691 panic("Unexpected byte count %d", n); 692 return(0); 693 } 694 695 static __inline void 696 clean_zone_rchunks(SLZone *z) 697 { 698 SLChunk *bchunk; 699 700 while ((bchunk = z->z_RChunks) != NULL) { 701 cpu_ccfence(); 702 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) { 703 *z->z_LChunksp = bchunk; 704 while (bchunk) { 705 chunk_mark_free(z, bchunk); 706 z->z_LChunksp = &bchunk->c_Next; 707 bchunk = bchunk->c_Next; 708 ++z->z_NFree; 709 } 710 break; 711 } 712 /* retry */ 713 } 714 } 715 716 /* 717 * If the zone becomes totally free and is not the only zone listed for a 718 * chunk size we move it to the FreeZones list. We always leave at least 719 * one zone per chunk size listed, even if it is freeable. 720 * 721 * Do not move the zone if there is an IPI in_flight (z_RCount != 0), 722 * otherwise MP races can result in our free_remote code accessing a 723 * destroyed zone. The remote end interlocks z_RCount with z_RChunks 724 * so one has to test both z_NFree and z_RCount. 725 * 726 * Since this code can be called from an IPI callback, do *NOT* try to mess 727 * with kernel_map here. Hysteresis will be performed at kmalloc() time. 728 */ 729 static __inline SLZone * 730 check_zone_free(SLGlobalData *slgd, SLZone *z) 731 { 732 SLZone *znext; 733 734 znext = TAILQ_NEXT(z, z_Entry); 735 if (z->z_NFree == z->z_NMax && z->z_RCount == 0 && 736 (TAILQ_FIRST(&slgd->ZoneAry[z->z_ZoneIndex]) != z || znext)) { 737 int *kup; 738 739 TAILQ_REMOVE(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 740 741 z->z_Magic = -1; 742 TAILQ_INSERT_HEAD(&slgd->FreeZones, z, z_Entry); 743 ++slgd->NFreeZones; 744 kup = btokup(z); 745 *kup = 0; 746 } 747 return znext; 748 } 749 750 #ifdef SLAB_DEBUG 751 /* 752 * Used to debug memory corruption issues. Record up to (typically 32) 753 * allocation sources for this zone (for a particular chunk size). 754 */ 755 756 static void 757 slab_record_source(SLZone *z, const char *file, int line) 758 { 759 int i; 760 int b = line & (SLAB_DEBUG_ENTRIES - 1); 761 762 i = b; 763 do { 764 if (z->z_Sources[i].file == file && z->z_Sources[i].line == line) 765 return; 766 if (z->z_Sources[i].file == NULL) 767 break; 768 i = (i + 1) & (SLAB_DEBUG_ENTRIES - 1); 769 } while (i != b); 770 z->z_Sources[i].file = file; 771 z->z_Sources[i].line = line; 772 } 773 774 #endif 775 776 static __inline unsigned long 777 powerof2_size(unsigned long size) 778 { 779 int i; 780 781 if (size == 0 || powerof2(size)) 782 return size; 783 784 i = flsl(size); 785 return (1UL << i); 786 } 787 788 /* 789 * kmalloc() (SLAB ALLOCATOR) 790 * 791 * Allocate memory via the slab allocator. If the request is too large, 792 * or if it page-aligned beyond a certain size, we fall back to the 793 * KMEM subsystem. A SLAB tracking descriptor must be specified, use 794 * &SlabMisc if you don't care. 795 * 796 * M_RNOWAIT - don't block. 797 * M_NULLOK - return NULL instead of blocking. 798 * M_ZERO - zero the returned memory. 799 * M_USE_RESERVE - allow greater drawdown of the free list 800 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted 801 * M_POWEROF2 - roundup size to the nearest power of 2 802 * 803 * MPSAFE 804 */ 805 806 /* don't let kmalloc macro mess up function declaration */ 807 #undef kmalloc 808 809 #ifdef SLAB_DEBUG 810 void * 811 _kmalloc_debug(unsigned long size, struct malloc_type *type, int flags, 812 const char *file, int line) 813 #else 814 void * 815 _kmalloc(unsigned long size, struct malloc_type *type, int flags) 816 #endif 817 { 818 SLZone *z; 819 SLChunk *chunk; 820 SLGlobalData *slgd; 821 struct globaldata *gd; 822 unsigned long align; 823 int zi; 824 #ifdef INVARIANTS 825 int i; 826 #endif 827 828 logmemory_quick(malloc_beg); 829 gd = mycpu; 830 slgd = &gd->gd_slab; 831 832 /* 833 * XXX silly to have this in the critical path. 834 */ 835 KKASSERT(type->ks_limit != 0); 836 ++type->ks_use[gd->gd_cpuid].calls; 837 838 /* 839 * Flagged for cache-alignment 840 */ 841 if (flags & M_CACHEALIGN) { 842 if (size < __VM_CACHELINE_SIZE) 843 size = __VM_CACHELINE_SIZE; 844 else if (!CAN_CACHEALIGN(size)) 845 flags |= M_POWEROF2; 846 } 847 848 /* 849 * Flagged to force nearest power-of-2 (higher or same) 850 */ 851 if (flags & M_POWEROF2) 852 size = powerof2_size(size); 853 854 /* 855 * Handle the case where the limit is reached. Panic if we can't return 856 * NULL. The original malloc code looped, but this tended to 857 * simply deadlock the computer. 858 * 859 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used 860 * to determine if a more complete limit check should be done. The 861 * actual memory use is tracked via ks_use[cpu].memuse. 862 */ 863 while (type->ks_loosememuse >= type->ks_limit) { 864 int i; 865 long ttl; 866 867 for (i = ttl = 0; i < ncpus; ++i) 868 ttl += type->ks_use[i].memuse; 869 type->ks_loosememuse = ttl; /* not MP synchronized */ 870 if ((ssize_t)ttl < 0) /* deal with occassional race */ 871 ttl = 0; 872 if (ttl >= type->ks_limit) { 873 if (flags & M_NULLOK) { 874 logmemory(malloc_end, NULL, type, size, flags); 875 return(NULL); 876 } 877 panic("%s: malloc limit exceeded", type->ks_shortdesc); 878 } 879 } 880 881 /* 882 * Handle the degenerate size == 0 case. Yes, this does happen. 883 * Return a special pointer. This is to maintain compatibility with 884 * the original malloc implementation. Certain devices, such as the 885 * adaptec driver, not only allocate 0 bytes, they check for NULL and 886 * also realloc() later on. Joy. 887 */ 888 if (size == 0) { 889 logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags); 890 return(ZERO_LENGTH_PTR); 891 } 892 893 /* 894 * Handle hysteresis from prior frees here in malloc(). We cannot 895 * safely manipulate the kernel_map in free() due to free() possibly 896 * being called via an IPI message or from sensitive interrupt code. 897 * 898 * NOTE: ku_pagecnt must be cleared before we free the slab or we 899 * might race another cpu allocating the kva and setting 900 * ku_pagecnt. 901 */ 902 while (slgd->NFreeZones > ZoneRelsThresh && (flags & M_RNOWAIT) == 0) { 903 crit_enter(); 904 if (slgd->NFreeZones > ZoneRelsThresh) { /* crit sect race */ 905 int *kup; 906 907 z = TAILQ_LAST(&slgd->FreeZones, SLZoneList); 908 KKASSERT(z != NULL); 909 TAILQ_REMOVE(&slgd->FreeZones, z, z_Entry); 910 --slgd->NFreeZones; 911 kup = btokup(z); 912 *kup = 0; 913 kmem_slab_free(z, ZoneSize); /* may block */ 914 } 915 crit_exit(); 916 } 917 918 /* 919 * XXX handle oversized frees that were queued from kfree(). 920 */ 921 while (TAILQ_FIRST(&slgd->FreeOvZones) && (flags & M_RNOWAIT) == 0) { 922 crit_enter(); 923 if ((z = TAILQ_LAST(&slgd->FreeOvZones, SLZoneList)) != NULL) { 924 vm_size_t tsize; 925 926 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); 927 TAILQ_REMOVE(&slgd->FreeOvZones, z, z_Entry); 928 tsize = z->z_ChunkSize; 929 kmem_slab_free(z, tsize); /* may block */ 930 } 931 crit_exit(); 932 } 933 934 /* 935 * Handle large allocations directly. There should not be very many of 936 * these so performance is not a big issue. 937 * 938 * The backend allocator is pretty nasty on a SMP system. Use the 939 * slab allocator for one and two page-sized chunks even though we lose 940 * some efficiency. XXX maybe fix mmio and the elf loader instead. 941 */ 942 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 943 int *kup; 944 945 size = round_page(size); 946 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); 947 if (chunk == NULL) { 948 logmemory(malloc_end, NULL, type, size, flags); 949 return(NULL); 950 } 951 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ 952 flags |= M_PASSIVE_ZERO; 953 kup = btokup(chunk); 954 *kup = size / PAGE_SIZE; 955 crit_enter(); 956 goto done; 957 } 958 959 /* 960 * Attempt to allocate out of an existing zone. First try the free list, 961 * then allocate out of unallocated space. If we find a good zone move 962 * it to the head of the list so later allocations find it quickly 963 * (we might have thousands of zones in the list). 964 * 965 * Note: zoneindex() will panic of size is too large. 966 */ 967 zi = zoneindex(&size, &align); 968 KKASSERT(zi < NZONES); 969 crit_enter(); 970 971 if ((z = TAILQ_LAST(&slgd->ZoneAry[zi], SLZoneList)) != NULL) { 972 /* 973 * Locate a chunk - we have to have at least one. If this is the 974 * last chunk go ahead and do the work to retrieve chunks freed 975 * from remote cpus, and if the zone is still empty move it off 976 * the ZoneAry. 977 */ 978 if (--z->z_NFree <= 0) { 979 KKASSERT(z->z_NFree == 0); 980 981 /* 982 * WARNING! This code competes with other cpus. It is ok 983 * for us to not drain RChunks here but we might as well, and 984 * it is ok if more accumulate after we're done. 985 * 986 * Set RSignal before pulling rchunks off, indicating that we 987 * will be moving ourselves off of the ZoneAry. Remote ends will 988 * read RSignal before putting rchunks on thus interlocking 989 * their IPI signaling. 990 */ 991 if (z->z_RChunks == NULL) 992 atomic_swap_int(&z->z_RSignal, 1); 993 994 clean_zone_rchunks(z); 995 996 /* 997 * Remove from the zone list if no free chunks remain. 998 * Clear RSignal 999 */ 1000 if (z->z_NFree == 0) { 1001 TAILQ_REMOVE(&slgd->ZoneAry[zi], z, z_Entry); 1002 } else { 1003 z->z_RSignal = 0; 1004 } 1005 } 1006 1007 /* 1008 * Fast path, we have chunks available in z_LChunks. 1009 */ 1010 chunk = z->z_LChunks; 1011 if (chunk) { 1012 chunk_mark_allocated(z, chunk); 1013 z->z_LChunks = chunk->c_Next; 1014 if (z->z_LChunks == NULL) 1015 z->z_LChunksp = &z->z_LChunks; 1016 #ifdef SLAB_DEBUG 1017 slab_record_source(z, file, line); 1018 #endif 1019 goto done; 1020 } 1021 1022 /* 1023 * No chunks are available in LChunks, the free chunk MUST be 1024 * in the never-before-used memory area, controlled by UIndex. 1025 * 1026 * The consequences are very serious if our zone got corrupted so 1027 * we use an explicit panic rather than a KASSERT. 1028 */ 1029 if (z->z_UIndex + 1 != z->z_NMax) 1030 ++z->z_UIndex; 1031 else 1032 z->z_UIndex = 0; 1033 1034 if (z->z_UIndex == z->z_UEndIndex) 1035 panic("slaballoc: corrupted zone"); 1036 1037 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 1038 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 1039 flags &= ~M_ZERO; 1040 flags |= M_PASSIVE_ZERO; 1041 } 1042 chunk_mark_allocated(z, chunk); 1043 #ifdef SLAB_DEBUG 1044 slab_record_source(z, file, line); 1045 #endif 1046 goto done; 1047 } 1048 1049 /* 1050 * If all zones are exhausted we need to allocate a new zone for this 1051 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see 1052 * UAlloc use above in regards to M_ZERO. Note that when we are reusing 1053 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and 1054 * we do not pre-zero it because we do not want to mess up the L1 cache. 1055 * 1056 * At least one subsystem, the tty code (see CROUND) expects power-of-2 1057 * allocations to be power-of-2 aligned. We maintain compatibility by 1058 * adjusting the base offset below. 1059 */ 1060 { 1061 int off; 1062 int *kup; 1063 1064 if ((z = TAILQ_FIRST(&slgd->FreeZones)) != NULL) { 1065 TAILQ_REMOVE(&slgd->FreeZones, z, z_Entry); 1066 --slgd->NFreeZones; 1067 bzero(z, sizeof(SLZone)); 1068 z->z_Flags |= SLZF_UNOTZEROD; 1069 } else { 1070 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); 1071 if (z == NULL) 1072 goto fail; 1073 } 1074 1075 /* 1076 * How big is the base structure? 1077 */ 1078 #if defined(INVARIANTS) 1079 /* 1080 * Make room for z_Bitmap. An exact calculation is somewhat more 1081 * complicated so don't make an exact calculation. 1082 */ 1083 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]); 1084 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); 1085 #else 1086 off = sizeof(SLZone); 1087 #endif 1088 1089 /* 1090 * Guarentee power-of-2 alignment for power-of-2-sized chunks. 1091 * Otherwise properly align the data according to the chunk size. 1092 */ 1093 if (powerof2(size)) 1094 align = size; 1095 off = roundup2(off, align); 1096 1097 z->z_Magic = ZALLOC_SLAB_MAGIC; 1098 z->z_ZoneIndex = zi; 1099 z->z_NMax = (ZoneSize - off) / size; 1100 z->z_NFree = z->z_NMax - 1; 1101 z->z_BasePtr = (char *)z + off; 1102 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; 1103 z->z_ChunkSize = size; 1104 z->z_CpuGd = gd; 1105 z->z_Cpu = gd->gd_cpuid; 1106 z->z_LChunksp = &z->z_LChunks; 1107 #ifdef SLAB_DEBUG 1108 bcopy(z->z_Sources, z->z_AltSources, sizeof(z->z_Sources)); 1109 bzero(z->z_Sources, sizeof(z->z_Sources)); 1110 #endif 1111 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 1112 TAILQ_INSERT_HEAD(&slgd->ZoneAry[zi], z, z_Entry); 1113 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 1114 flags &= ~M_ZERO; /* already zero'd */ 1115 flags |= M_PASSIVE_ZERO; 1116 } 1117 kup = btokup(z); 1118 *kup = -(z->z_Cpu + 1); /* -1 to -(N+1) */ 1119 chunk_mark_allocated(z, chunk); 1120 #ifdef SLAB_DEBUG 1121 slab_record_source(z, file, line); 1122 #endif 1123 1124 /* 1125 * Slide the base index for initial allocations out of the next 1126 * zone we create so we do not over-weight the lower part of the 1127 * cpu memory caches. 1128 */ 1129 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 1130 & (ZALLOC_MAX_ZONE_SIZE - 1); 1131 } 1132 1133 done: 1134 ++type->ks_use[gd->gd_cpuid].inuse; 1135 type->ks_use[gd->gd_cpuid].memuse += size; 1136 type->ks_use[gd->gd_cpuid].loosememuse += size; 1137 if (type->ks_use[gd->gd_cpuid].loosememuse >= ZoneSize) { 1138 /* not MP synchronized */ 1139 type->ks_loosememuse += type->ks_use[gd->gd_cpuid].loosememuse; 1140 type->ks_use[gd->gd_cpuid].loosememuse = 0; 1141 } 1142 crit_exit(); 1143 1144 if (flags & M_ZERO) 1145 bzero(chunk, size); 1146 #ifdef INVARIANTS 1147 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) { 1148 if (use_malloc_pattern) { 1149 for (i = 0; i < size; i += sizeof(int)) { 1150 *(int *)((char *)chunk + i) = -1; 1151 } 1152 } 1153 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ 1154 } 1155 #endif 1156 logmemory(malloc_end, chunk, type, size, flags); 1157 return(chunk); 1158 fail: 1159 crit_exit(); 1160 logmemory(malloc_end, NULL, type, size, flags); 1161 return(NULL); 1162 } 1163 1164 /* 1165 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE) 1166 * 1167 * Generally speaking this routine is not called very often and we do 1168 * not attempt to optimize it beyond reusing the same pointer if the 1169 * new size fits within the chunking of the old pointer's zone. 1170 */ 1171 #ifdef SLAB_DEBUG 1172 void * 1173 krealloc_debug(void *ptr, unsigned long size, 1174 struct malloc_type *type, int flags, 1175 const char *file, int line) 1176 #else 1177 void * 1178 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) 1179 #endif 1180 { 1181 unsigned long osize; 1182 unsigned long align; 1183 SLZone *z; 1184 void *nptr; 1185 int *kup; 1186 1187 KKASSERT((flags & M_ZERO) == 0); /* not supported */ 1188 1189 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) 1190 return(_kmalloc_debug(size, type, flags, file, line)); 1191 if (size == 0) { 1192 kfree(ptr, type); 1193 return(NULL); 1194 } 1195 1196 /* 1197 * Handle oversized allocations. XXX we really should require that a 1198 * size be passed to free() instead of this nonsense. 1199 */ 1200 kup = btokup(ptr); 1201 if (*kup > 0) { 1202 osize = *kup << PAGE_SHIFT; 1203 if (osize == round_page(size)) 1204 return(ptr); 1205 if ((nptr = _kmalloc_debug(size, type, flags, file, line)) == NULL) 1206 return(NULL); 1207 bcopy(ptr, nptr, min(size, osize)); 1208 kfree(ptr, type); 1209 return(nptr); 1210 } 1211 1212 /* 1213 * Get the original allocation's zone. If the new request winds up 1214 * using the same chunk size we do not have to do anything. 1215 */ 1216 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 1217 kup = btokup(z); 1218 KKASSERT(*kup < 0); 1219 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1220 1221 /* 1222 * Allocate memory for the new request size. Note that zoneindex has 1223 * already adjusted the request size to the appropriate chunk size, which 1224 * should optimize our bcopy(). Then copy and return the new pointer. 1225 * 1226 * Resizing a non-power-of-2 allocation to a power-of-2 size does not 1227 * necessary align the result. 1228 * 1229 * We can only zoneindex (to align size to the chunk size) if the new 1230 * size is not too large. 1231 */ 1232 if (size < ZoneLimit) { 1233 zoneindex(&size, &align); 1234 if (z->z_ChunkSize == size) 1235 return(ptr); 1236 } 1237 if ((nptr = _kmalloc_debug(size, type, flags, file, line)) == NULL) 1238 return(NULL); 1239 bcopy(ptr, nptr, min(size, z->z_ChunkSize)); 1240 kfree(ptr, type); 1241 return(nptr); 1242 } 1243 1244 size_t 1245 kmalloc_usable_size(const void *ptr) 1246 { 1247 unsigned long size; 1248 SLZone *z; 1249 int *kup; 1250 1251 if (ptr == NULL) 1252 return 0; 1253 if (ptr == ZERO_LENGTH_PTR) 1254 return 0; 1255 1256 /* 1257 * Check to see if the pointer blongs to an oversized segment 1258 */ 1259 kup = btokup(ptr); 1260 if (*kup > 0) { 1261 size = *kup << PAGE_SHIFT; 1262 return size; 1263 } 1264 1265 /* 1266 * Zone case. Figure out the zone based on the fact that it is 1267 * ZoneSize aligned. 1268 */ 1269 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 1270 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1271 1272 return (z->z_ChunkSize); 1273 } 1274 1275 /* 1276 * Return the kmalloc limit for this type, in bytes. 1277 */ 1278 long 1279 kmalloc_limit(struct malloc_type *type) 1280 { 1281 KKASSERT(type->ks_limit != 0); 1282 return(type->ks_limit); 1283 } 1284 1285 /* 1286 * Allocate a copy of the specified string. 1287 * 1288 * (MP SAFE) (MAY BLOCK) 1289 */ 1290 #ifdef SLAB_DEBUG 1291 char * 1292 kstrdup_debug(const char *str, struct malloc_type *type, 1293 const char *file, int line) 1294 #else 1295 char * 1296 kstrdup(const char *str, struct malloc_type *type) 1297 #endif 1298 { 1299 int zlen; /* length inclusive of terminating NUL */ 1300 char *nstr; 1301 1302 if (str == NULL) 1303 return(NULL); 1304 zlen = strlen(str) + 1; 1305 nstr = _kmalloc_debug(zlen, type, M_WAITOK, file, line); 1306 bcopy(str, nstr, zlen); 1307 return(nstr); 1308 } 1309 1310 #ifdef SLAB_DEBUG 1311 char * 1312 kstrndup_debug(const char *str, size_t maxlen, struct malloc_type *type, 1313 const char *file, int line) 1314 #else 1315 char * 1316 kstrndup(const char *str, size_t maxlen, struct malloc_type *type) 1317 #endif 1318 { 1319 int zlen; /* length inclusive of terminating NUL */ 1320 char *nstr; 1321 1322 if (str == NULL) 1323 return(NULL); 1324 zlen = strnlen(str, maxlen) + 1; 1325 nstr = _kmalloc_debug(zlen, type, M_WAITOK, file, line); 1326 bcopy(str, nstr, zlen); 1327 nstr[zlen - 1] = '\0'; 1328 return(nstr); 1329 } 1330 1331 /* 1332 * Notify our cpu that a remote cpu has freed some chunks in a zone that 1333 * we own. RCount will be bumped so the memory should be good, but validate 1334 * that it really is. 1335 */ 1336 static void 1337 kfree_remote(void *ptr) 1338 { 1339 SLGlobalData *slgd; 1340 SLZone *z; 1341 int nfree; 1342 int *kup; 1343 1344 slgd = &mycpu->gd_slab; 1345 z = ptr; 1346 kup = btokup(z); 1347 KKASSERT(*kup == -((int)mycpuid + 1)); 1348 KKASSERT(z->z_RCount > 0); 1349 atomic_subtract_int(&z->z_RCount, 1); 1350 1351 logmemory(free_rem_beg, z, NULL, 0L, 0); 1352 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1353 KKASSERT(z->z_Cpu == mycpu->gd_cpuid); 1354 nfree = z->z_NFree; 1355 1356 /* 1357 * Indicate that we will no longer be off of the ZoneAry by 1358 * clearing RSignal. 1359 */ 1360 if (z->z_RChunks) 1361 z->z_RSignal = 0; 1362 1363 /* 1364 * Atomically extract the bchunks list and then process it back 1365 * into the lchunks list. We want to append our bchunks to the 1366 * lchunks list and not prepend since we likely do not have 1367 * cache mastership of the related data (not that it helps since 1368 * we are using c_Next). 1369 */ 1370 clean_zone_rchunks(z); 1371 if (z->z_NFree && nfree == 0) { 1372 TAILQ_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 1373 } 1374 1375 check_zone_free(slgd, z); 1376 logmemory(free_rem_end, z, NULL, 0L, 0); 1377 } 1378 1379 /* 1380 * free (SLAB ALLOCATOR) 1381 * 1382 * Free a memory block previously allocated by malloc. 1383 * 1384 * Note: We do not attempt to update ks_loosememuse as MP races could 1385 * prevent us from checking memory limits in malloc. YYY we may 1386 * consider updating ks_cpu.loosememuse. 1387 * 1388 * MPSAFE 1389 */ 1390 void 1391 _kfree(void *ptr, struct malloc_type *type) 1392 { 1393 SLZone *z; 1394 SLChunk *chunk; 1395 SLGlobalData *slgd; 1396 struct globaldata *gd; 1397 int *kup; 1398 unsigned long size; 1399 SLChunk *bchunk; 1400 int rsignal; 1401 1402 logmemory_quick(free_beg); 1403 gd = mycpu; 1404 slgd = &gd->gd_slab; 1405 1406 if (ptr == NULL) 1407 panic("trying to free NULL pointer"); 1408 1409 /* 1410 * Handle special 0-byte allocations 1411 */ 1412 if (ptr == ZERO_LENGTH_PTR) { 1413 logmemory(free_zero, ptr, type, -1UL, 0); 1414 logmemory_quick(free_end); 1415 return; 1416 } 1417 1418 /* 1419 * Panic on bad malloc type 1420 */ 1421 if (type->ks_magic != M_MAGIC) 1422 panic("free: malloc type lacks magic"); 1423 1424 /* 1425 * Handle oversized allocations. XXX we really should require that a 1426 * size be passed to free() instead of this nonsense. 1427 * 1428 * This code is never called via an ipi. 1429 */ 1430 kup = btokup(ptr); 1431 if (*kup > 0) { 1432 size = *kup << PAGE_SHIFT; 1433 *kup = 0; 1434 #ifdef INVARIANTS 1435 if (use_weird_array) { 1436 KKASSERT(sizeof(weirdary) <= size); 1437 bcopy(weirdary, ptr, sizeof(weirdary)); 1438 } 1439 #endif 1440 /* 1441 * NOTE: For oversized allocations we do not record the 1442 * originating cpu. It gets freed on the cpu calling 1443 * kfree(). The statistics are in aggregate. 1444 * 1445 * note: XXX we have still inherited the interrupts-can't-block 1446 * assumption. An interrupt thread does not bump 1447 * gd_intr_nesting_level so check TDF_INTTHREAD. This is 1448 * primarily until we can fix softupdate's assumptions about free(). 1449 */ 1450 crit_enter(); 1451 --type->ks_use[gd->gd_cpuid].inuse; 1452 type->ks_use[gd->gd_cpuid].memuse -= size; 1453 if (mycpu->gd_intr_nesting_level || 1454 (gd->gd_curthread->td_flags & TDF_INTTHREAD)) { 1455 logmemory(free_ovsz_delayed, ptr, type, size, 0); 1456 z = (SLZone *)ptr; 1457 z->z_Magic = ZALLOC_OVSZ_MAGIC; 1458 z->z_ChunkSize = size; 1459 1460 TAILQ_INSERT_HEAD(&slgd->FreeOvZones, z, z_Entry); 1461 crit_exit(); 1462 } else { 1463 crit_exit(); 1464 logmemory(free_ovsz, ptr, type, size, 0); 1465 kmem_slab_free(ptr, size); /* may block */ 1466 } 1467 logmemory_quick(free_end); 1468 return; 1469 } 1470 1471 /* 1472 * Zone case. Figure out the zone based on the fact that it is 1473 * ZoneSize aligned. 1474 */ 1475 z = (SLZone *)((uintptr_t)ptr & ZoneMask); 1476 kup = btokup(z); 1477 KKASSERT(*kup < 0); 1478 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 1479 1480 /* 1481 * If we do not own the zone then use atomic ops to free to the 1482 * remote cpu linked list and notify the target zone using a 1483 * passive message. 1484 * 1485 * The target zone cannot be deallocated while we own a chunk of it, 1486 * so the zone header's storage is stable until the very moment 1487 * we adjust z_RChunks. After that we cannot safely dereference (z). 1488 * 1489 * (no critical section needed) 1490 */ 1491 if (z->z_CpuGd != gd) { 1492 /* 1493 * Making these adjustments now allow us to avoid passing (type) 1494 * to the remote cpu. Note that inuse/memuse is being 1495 * adjusted on OUR cpu, not the zone cpu, but it should all still 1496 * sum up properly and cancel out. 1497 */ 1498 crit_enter(); 1499 --type->ks_use[gd->gd_cpuid].inuse; 1500 type->ks_use[gd->gd_cpuid].memuse -= z->z_ChunkSize; 1501 crit_exit(); 1502 1503 /* 1504 * WARNING! This code competes with other cpus. Once we 1505 * successfully link the chunk to RChunks the remote 1506 * cpu can rip z's storage out from under us. 1507 * 1508 * Bumping RCount prevents z's storage from getting 1509 * ripped out. 1510 */ 1511 rsignal = z->z_RSignal; 1512 cpu_lfence(); 1513 if (rsignal) 1514 atomic_add_int(&z->z_RCount, 1); 1515 1516 chunk = ptr; 1517 for (;;) { 1518 bchunk = z->z_RChunks; 1519 cpu_ccfence(); 1520 chunk->c_Next = bchunk; 1521 cpu_sfence(); 1522 1523 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk)) 1524 break; 1525 } 1526 1527 /* 1528 * We have to signal the remote cpu if our actions will cause 1529 * the remote zone to be placed back on ZoneAry so it can 1530 * move the zone back on. 1531 * 1532 * We only need to deal with NULL->non-NULL RChunk transitions 1533 * and only if z_RSignal is set. We interlock by reading rsignal 1534 * before adding our chunk to RChunks. This should result in 1535 * virtually no IPI traffic. 1536 * 1537 * We can use a passive IPI to reduce overhead even further. 1538 */ 1539 if (bchunk == NULL && rsignal) { 1540 logmemory(free_request, ptr, type, 1541 (unsigned long)z->z_ChunkSize, 0); 1542 lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z); 1543 /* z can get ripped out from under us from this point on */ 1544 } else if (rsignal) { 1545 atomic_subtract_int(&z->z_RCount, 1); 1546 /* z can get ripped out from under us from this point on */ 1547 } 1548 logmemory_quick(free_end); 1549 return; 1550 } 1551 1552 /* 1553 * kfree locally 1554 */ 1555 logmemory(free_chunk, ptr, type, (unsigned long)z->z_ChunkSize, 0); 1556 1557 crit_enter(); 1558 chunk = ptr; 1559 chunk_mark_free(z, chunk); 1560 1561 /* 1562 * Put weird data into the memory to detect modifications after freeing, 1563 * illegal pointer use after freeing (we should fault on the odd address), 1564 * and so forth. XXX needs more work, see the old malloc code. 1565 */ 1566 #ifdef INVARIANTS 1567 if (use_weird_array) { 1568 if (z->z_ChunkSize < sizeof(weirdary)) 1569 bcopy(weirdary, chunk, z->z_ChunkSize); 1570 else 1571 bcopy(weirdary, chunk, sizeof(weirdary)); 1572 } 1573 #endif 1574 1575 /* 1576 * Add this free non-zero'd chunk to a linked list for reuse. Add 1577 * to the front of the linked list so it is more likely to be 1578 * reallocated, since it is already in our L1 cache. 1579 */ 1580 #ifdef INVARIANTS 1581 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) 1582 panic("BADFREE %p", chunk); 1583 #endif 1584 chunk->c_Next = z->z_LChunks; 1585 z->z_LChunks = chunk; 1586 if (chunk->c_Next == NULL) 1587 z->z_LChunksp = &chunk->c_Next; 1588 1589 #ifdef INVARIANTS 1590 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) 1591 panic("BADFREE2"); 1592 #endif 1593 1594 /* 1595 * Bump the number of free chunks. If it becomes non-zero the zone 1596 * must be added back onto the appropriate list. A fully allocated 1597 * zone that sees its first free is considered 'mature' and is placed 1598 * at the head, giving the system time to potentially free the remaining 1599 * entries even while other allocations are going on and making the zone 1600 * freeable. 1601 */ 1602 if (z->z_NFree++ == 0) 1603 TAILQ_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry); 1604 1605 --type->ks_use[gd->gd_cpuid].inuse; 1606 type->ks_use[gd->gd_cpuid].memuse -= z->z_ChunkSize; 1607 1608 check_zone_free(slgd, z); 1609 logmemory_quick(free_end); 1610 crit_exit(); 1611 } 1612 1613 /* 1614 * Cleanup slabs which are hanging around due to RChunks or which are wholely 1615 * free and can be moved to the free list if not moved by other means. 1616 * 1617 * Called once every 10 seconds on all cpus. 1618 */ 1619 void 1620 slab_cleanup(void) 1621 { 1622 SLGlobalData *slgd = &mycpu->gd_slab; 1623 SLZone *z; 1624 int i; 1625 1626 crit_enter(); 1627 for (i = 0; i < NZONES; ++i) { 1628 if ((z = TAILQ_FIRST(&slgd->ZoneAry[i])) == NULL) 1629 continue; 1630 1631 /* 1632 * Scan zones. 1633 */ 1634 while (z) { 1635 /* 1636 * Shift all RChunks to the end of the LChunks list. This is 1637 * an O(1) operation. 1638 * 1639 * Then free the zone if possible. 1640 */ 1641 clean_zone_rchunks(z); 1642 z = check_zone_free(slgd, z); 1643 } 1644 } 1645 crit_exit(); 1646 } 1647 1648 #if defined(INVARIANTS) 1649 1650 /* 1651 * Helper routines for sanity checks 1652 */ 1653 static void 1654 chunk_mark_allocated(SLZone *z, void *chunk) 1655 { 1656 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1657 uint32_t *bitptr; 1658 1659 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1660 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1661 ("memory chunk %p bit index %d is illegal", chunk, bitdex)); 1662 bitptr = &z->z_Bitmap[bitdex >> 5]; 1663 bitdex &= 31; 1664 KASSERT((*bitptr & (1 << bitdex)) == 0, 1665 ("memory chunk %p is already allocated!", chunk)); 1666 *bitptr |= 1 << bitdex; 1667 } 1668 1669 static void 1670 chunk_mark_free(SLZone *z, void *chunk) 1671 { 1672 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1673 uint32_t *bitptr; 1674 1675 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0); 1676 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, 1677 ("memory chunk %p bit index %d is illegal!", chunk, bitdex)); 1678 bitptr = &z->z_Bitmap[bitdex >> 5]; 1679 bitdex &= 31; 1680 KASSERT((*bitptr & (1 << bitdex)) != 0, 1681 ("memory chunk %p is already free!", chunk)); 1682 *bitptr &= ~(1 << bitdex); 1683 } 1684 1685 #endif 1686 1687 /* 1688 * kmem_slab_alloc() 1689 * 1690 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the 1691 * specified alignment. M_* flags are expected in the flags field. 1692 * 1693 * Alignment must be a multiple of PAGE_SIZE. 1694 * 1695 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), 1696 * but when we move zalloc() over to use this function as its backend 1697 * we will have to switch to kreserve/krelease and call reserve(0) 1698 * after the new space is made available. 1699 * 1700 * Interrupt code which has preempted other code is not allowed to 1701 * use PQ_CACHE pages. However, if an interrupt thread is run 1702 * non-preemptively or blocks and then runs non-preemptively, then 1703 * it is free to use PQ_CACHE pages. <--- may not apply any longer XXX 1704 */ 1705 void * 1706 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) 1707 { 1708 vm_size_t i; 1709 vm_offset_t addr; 1710 int count, vmflags, base_vmflags; 1711 vm_page_t mbase = NULL; 1712 vm_page_t m; 1713 thread_t td; 1714 1715 size = round_page(size); 1716 addr = vm_map_min(kernel_map); 1717 1718 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1719 crit_enter(); 1720 vm_map_lock(kernel_map); 1721 if (vm_map_findspace(kernel_map, addr, size, align, 0, &addr)) { 1722 vm_map_unlock(kernel_map); 1723 if ((flags & M_NULLOK) == 0) 1724 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 1725 vm_map_entry_release(count); 1726 crit_exit(); 1727 return(NULL); 1728 } 1729 1730 /* 1731 * kernel_object maps 1:1 to kernel_map. 1732 */ 1733 vm_object_hold(kernel_object); 1734 vm_object_reference_locked(kernel_object); 1735 vm_map_insert(kernel_map, &count, 1736 kernel_object, NULL, 1737 addr, NULL, 1738 addr, addr + size, 1739 VM_MAPTYPE_NORMAL, 1740 VM_SUBSYS_KMALLOC, 1741 VM_PROT_ALL, VM_PROT_ALL, 0); 1742 vm_object_drop(kernel_object); 1743 vm_map_set_wired_quick(kernel_map, addr, size, &count); 1744 vm_map_unlock(kernel_map); 1745 1746 td = curthread; 1747 1748 base_vmflags = 0; 1749 if (flags & M_ZERO) 1750 base_vmflags |= VM_ALLOC_ZERO; 1751 if (flags & M_USE_RESERVE) 1752 base_vmflags |= VM_ALLOC_SYSTEM; 1753 if (flags & M_USE_INTERRUPT_RESERVE) 1754 base_vmflags |= VM_ALLOC_INTERRUPT; 1755 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) { 1756 panic("kmem_slab_alloc: bad flags %08x (%p)", 1757 flags, ((int **)&size)[-1]); 1758 } 1759 1760 /* 1761 * Allocate the pages. Do not map them yet. VM_ALLOC_NORMAL can only 1762 * be set if we are not preempting. 1763 * 1764 * VM_ALLOC_SYSTEM is automatically set if we are preempting and 1765 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is 1766 * implied in this case), though I'm not sure if we really need to 1767 * do that. 1768 */ 1769 vmflags = base_vmflags; 1770 if (flags & M_WAITOK) { 1771 if (td->td_preempted) 1772 vmflags |= VM_ALLOC_SYSTEM; 1773 else 1774 vmflags |= VM_ALLOC_NORMAL; 1775 } 1776 1777 vm_object_hold(kernel_object); 1778 for (i = 0; i < size; i += PAGE_SIZE) { 1779 m = vm_page_alloc(kernel_object, OFF_TO_IDX(addr + i), vmflags); 1780 if (i == 0) 1781 mbase = m; 1782 1783 /* 1784 * If the allocation failed we either return NULL or we retry. 1785 * 1786 * If M_WAITOK is specified we wait for more memory and retry. 1787 * If M_WAITOK is specified from a preemption we yield instead of 1788 * wait. Livelock will not occur because the interrupt thread 1789 * will not be preempting anyone the second time around after the 1790 * yield. 1791 */ 1792 if (m == NULL) { 1793 if (flags & M_WAITOK) { 1794 if (td->td_preempted) { 1795 lwkt_switch(); 1796 } else { 1797 vm_wait(0); 1798 } 1799 i -= PAGE_SIZE; /* retry */ 1800 continue; 1801 } 1802 break; 1803 } 1804 } 1805 1806 /* 1807 * Check and deal with an allocation failure 1808 */ 1809 if (i != size) { 1810 while (i != 0) { 1811 i -= PAGE_SIZE; 1812 m = vm_page_lookup(kernel_object, OFF_TO_IDX(addr + i)); 1813 /* page should already be busy */ 1814 vm_page_free(m); 1815 } 1816 vm_map_lock(kernel_map); 1817 vm_map_delete(kernel_map, addr, addr + size, &count); 1818 vm_map_unlock(kernel_map); 1819 vm_object_drop(kernel_object); 1820 1821 vm_map_entry_release(count); 1822 crit_exit(); 1823 return(NULL); 1824 } 1825 1826 /* 1827 * Success! 1828 * 1829 * NOTE: The VM pages are still busied. mbase points to the first one 1830 * but we have to iterate via vm_page_next() 1831 */ 1832 vm_object_drop(kernel_object); 1833 crit_exit(); 1834 1835 /* 1836 * Enter the pages into the pmap and deal with M_ZERO. 1837 */ 1838 m = mbase; 1839 i = 0; 1840 1841 while (i < size) { 1842 /* 1843 * page should already be busy 1844 */ 1845 m->valid = VM_PAGE_BITS_ALL; 1846 vm_page_wire(m); 1847 pmap_enter(kernel_pmap, addr + i, m, 1848 VM_PROT_ALL | VM_PROT_NOSYNC, 1, NULL); 1849 if (flags & M_ZERO) 1850 pagezero((char *)addr + i); 1851 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED)); 1852 vm_page_flag_set(m, PG_REFERENCED); 1853 vm_page_wakeup(m); 1854 1855 i += PAGE_SIZE; 1856 vm_object_hold(kernel_object); 1857 m = vm_page_next(m); 1858 vm_object_drop(kernel_object); 1859 } 1860 smp_invltlb(); 1861 vm_map_entry_release(count); 1862 return((void *)addr); 1863 } 1864 1865 /* 1866 * kmem_slab_free() 1867 */ 1868 void 1869 kmem_slab_free(void *ptr, vm_size_t size) 1870 { 1871 crit_enter(); 1872 vm_map_remove(kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); 1873 crit_exit(); 1874 } 1875