1 /* 2 * KERN_SLABALLOC.C - Kernel SLAB memory allocator 3 * 4 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.43 2006/09/11 20:25:01 dillon Exp $ 37 * 38 * This module implements a slab allocator drop-in replacement for the 39 * kernel malloc(). 40 * 41 * A slab allocator reserves a ZONE for each chunk size, then lays the 42 * chunks out in an array within the zone. Allocation and deallocation 43 * is nearly instantanious, and fragmentation/overhead losses are limited 44 * to a fixed worst-case amount. 45 * 46 * The downside of this slab implementation is in the chunk size 47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. 48 * In a kernel implementation all this memory will be physical so 49 * the zone size is adjusted downward on machines with less physical 50 * memory. The upside is that overhead is bounded... this is the *worst* 51 * case overhead. 52 * 53 * Slab management is done on a per-cpu basis and no locking or mutexes 54 * are required, only a critical section. When one cpu frees memory 55 * belonging to another cpu's slab manager an asynchronous IPI message 56 * will be queued to execute the operation. In addition, both the 57 * high level slab allocator and the low level zone allocator optimize 58 * M_ZERO requests, and the slab allocator does not have to pre initialize 59 * the linked list of chunks. 60 * 61 * XXX Balancing is needed between cpus. Balance will be handled through 62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. 63 * 64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of 65 * the new zone should be restricted to M_USE_RESERVE requests only. 66 * 67 * Alloc Size Chunking Number of zones 68 * 0-127 8 16 69 * 128-255 16 8 70 * 256-511 32 8 71 * 512-1023 64 8 72 * 1024-2047 128 8 73 * 2048-4095 256 8 74 * 4096-8191 512 8 75 * 8192-16383 1024 8 76 * 16384-32767 2048 8 77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) 78 * 79 * Allocations >= ZoneLimit go directly to kmem. 80 * 81 * API REQUIREMENTS AND SIDE EFFECTS 82 * 83 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we 84 * have remained compatible with the following API requirements: 85 * 86 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty) 87 * + all power-of-2 sized allocations are power-of-2 aligned (twe) 88 * + malloc(0) is allowed and returns non-NULL (ahc driver) 89 * + ability to allocate arbitrarily large chunks of memory 90 */ 91 92 #include "opt_vm.h" 93 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/kernel.h> 97 #include <sys/slaballoc.h> 98 #include <sys/mbuf.h> 99 #include <sys/vmmeter.h> 100 #include <sys/lock.h> 101 #include <sys/thread.h> 102 #include <sys/globaldata.h> 103 #include <sys/sysctl.h> 104 #include <sys/ktr.h> 105 106 #include <vm/vm.h> 107 #include <vm/vm_param.h> 108 #include <vm/vm_kern.h> 109 #include <vm/vm_extern.h> 110 #include <vm/vm_object.h> 111 #include <vm/pmap.h> 112 #include <vm/vm_map.h> 113 #include <vm/vm_page.h> 114 #include <vm/vm_pageout.h> 115 116 #include <machine/cpu.h> 117 118 #include <sys/thread2.h> 119 120 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0])) 121 122 #define MEMORY_STRING "ptr=%p type=%p size=%d flags=%04x" 123 #define MEMORY_ARG_SIZE (sizeof(void *) * 2 + sizeof(unsigned long) + \ 124 sizeof(int)) 125 126 #if !defined(KTR_MEMORY) 127 #define KTR_MEMORY KTR_ALL 128 #endif 129 KTR_INFO_MASTER(memory); 130 KTR_INFO(KTR_MEMORY, memory, malloc, 0, MEMORY_STRING, MEMORY_ARG_SIZE); 131 KTR_INFO(KTR_MEMORY, memory, free_zero, 1, MEMORY_STRING, MEMORY_ARG_SIZE); 132 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 2, MEMORY_STRING, MEMORY_ARG_SIZE); 133 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 3, MEMORY_STRING, MEMORY_ARG_SIZE); 134 KTR_INFO(KTR_MEMORY, memory, free_chunk, 4, MEMORY_STRING, MEMORY_ARG_SIZE); 135 #ifdef SMP 136 KTR_INFO(KTR_MEMORY, memory, free_request, 5, MEMORY_STRING, MEMORY_ARG_SIZE); 137 KTR_INFO(KTR_MEMORY, memory, free_remote, 6, MEMORY_STRING, MEMORY_ARG_SIZE); 138 #endif 139 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin", 0); 140 KTR_INFO(KTR_MEMORY, memory, free_beg, 0, "free begin", 0); 141 KTR_INFO(KTR_MEMORY, memory, free_end, 0, "free end", 0); 142 143 #define logmemory(name, ptr, type, size, flags) \ 144 KTR_LOG(memory_ ## name, ptr, type, size, flags) 145 #define logmemory_quick(name) \ 146 KTR_LOG(memory_ ## name) 147 148 /* 149 * Fixed globals (not per-cpu) 150 */ 151 static int ZoneSize; 152 static int ZoneLimit; 153 static int ZonePageCount; 154 static int ZoneMask; 155 static struct malloc_type *kmemstatistics; 156 static struct kmemusage *kmemusage; 157 static int32_t weirdary[16]; 158 159 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags); 160 static void kmem_slab_free(void *ptr, vm_size_t bytes); 161 #if defined(INVARIANTS) 162 static void chunk_mark_allocated(SLZone *z, void *chunk); 163 static void chunk_mark_free(SLZone *z, void *chunk); 164 #endif 165 166 /* 167 * Misc constants. Note that allocations that are exact multiples of 168 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 169 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists. 170 */ 171 #define MIN_CHUNK_SIZE 8 /* in bytes */ 172 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1) 173 #define ZONE_RELS_THRESH 2 /* threshold number of zones */ 174 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK) 175 176 /* 177 * The WEIRD_ADDR is used as known text to copy into free objects to 178 * try to create deterministic failure cases if the data is accessed after 179 * free. 180 */ 181 #define WEIRD_ADDR 0xdeadc0de 182 #define MAX_COPY sizeof(weirdary) 183 #define ZERO_LENGTH_PTR ((void *)-8) 184 185 /* 186 * Misc global malloc buckets 187 */ 188 189 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 190 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 191 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 192 193 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 194 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 195 196 /* 197 * Initialize the slab memory allocator. We have to choose a zone size based 198 * on available physical memory. We choose a zone side which is approximately 199 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of 200 * 128K. The zone size is limited to the bounds set in slaballoc.h 201 * (typically 32K min, 128K max). 202 */ 203 static void kmeminit(void *dummy); 204 205 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL) 206 207 #ifdef INVARIANTS 208 /* 209 * If enabled any memory allocated without M_ZERO is initialized to -1. 210 */ 211 static int use_malloc_pattern; 212 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW, 213 &use_malloc_pattern, 0, ""); 214 #endif 215 216 static void 217 kmeminit(void *dummy) 218 { 219 vm_poff_t limsize; 220 int usesize; 221 int i; 222 vm_pindex_t npg; 223 224 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE; 225 if (limsize > VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) 226 limsize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS; 227 228 usesize = (int)(limsize / 1024); /* convert to KB */ 229 230 ZoneSize = ZALLOC_MIN_ZONE_SIZE; 231 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) 232 ZoneSize <<= 1; 233 ZoneLimit = ZoneSize / 4; 234 if (ZoneLimit > ZALLOC_ZONE_LIMIT) 235 ZoneLimit = ZALLOC_ZONE_LIMIT; 236 ZoneMask = ZoneSize - 1; 237 ZonePageCount = ZoneSize / PAGE_SIZE; 238 239 npg = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE; 240 kmemusage = kmem_slab_alloc(npg * sizeof(struct kmemusage), PAGE_SIZE, M_WAITOK|M_ZERO); 241 242 for (i = 0; i < arysize(weirdary); ++i) 243 weirdary[i] = WEIRD_ADDR; 244 245 if (bootverbose) 246 printf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); 247 } 248 249 /* 250 * Initialize a malloc type tracking structure. 251 */ 252 void 253 malloc_init(void *data) 254 { 255 struct malloc_type *type = data; 256 vm_poff_t limsize; 257 258 if (type->ks_magic != M_MAGIC) 259 panic("malloc type lacks magic"); 260 261 if (type->ks_limit != 0) 262 return; 263 264 if (vmstats.v_page_count == 0) 265 panic("malloc_init not allowed before vm init"); 266 267 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE; 268 if (limsize > VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) 269 limsize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS; 270 type->ks_limit = limsize / 10; 271 272 type->ks_next = kmemstatistics; 273 kmemstatistics = type; 274 } 275 276 void 277 malloc_uninit(void *data) 278 { 279 struct malloc_type *type = data; 280 struct malloc_type *t; 281 #ifdef INVARIANTS 282 int i; 283 long ttl; 284 #endif 285 286 if (type->ks_magic != M_MAGIC) 287 panic("malloc type lacks magic"); 288 289 if (vmstats.v_page_count == 0) 290 panic("malloc_uninit not allowed before vm init"); 291 292 if (type->ks_limit == 0) 293 panic("malloc_uninit on uninitialized type"); 294 295 #ifdef INVARIANTS 296 /* 297 * memuse is only correct in aggregation. Due to memory being allocated 298 * on one cpu and freed on another individual array entries may be 299 * negative or positive (canceling each other out). 300 */ 301 for (i = ttl = 0; i < ncpus; ++i) 302 ttl += type->ks_memuse[i]; 303 if (ttl) { 304 printf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", 305 ttl, type->ks_shortdesc, i); 306 } 307 #endif 308 if (type == kmemstatistics) { 309 kmemstatistics = type->ks_next; 310 } else { 311 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 312 if (t->ks_next == type) { 313 t->ks_next = type->ks_next; 314 break; 315 } 316 } 317 } 318 type->ks_next = NULL; 319 type->ks_limit = 0; 320 } 321 322 /* 323 * Calculate the zone index for the allocation request size and set the 324 * allocation request size to that particular zone's chunk size. 325 */ 326 static __inline int 327 zoneindex(unsigned long *bytes) 328 { 329 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ 330 if (n < 128) { 331 *bytes = n = (n + 7) & ~7; 332 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 333 } 334 if (n < 256) { 335 *bytes = n = (n + 15) & ~15; 336 return(n / 16 + 7); 337 } 338 if (n < 8192) { 339 if (n < 512) { 340 *bytes = n = (n + 31) & ~31; 341 return(n / 32 + 15); 342 } 343 if (n < 1024) { 344 *bytes = n = (n + 63) & ~63; 345 return(n / 64 + 23); 346 } 347 if (n < 2048) { 348 *bytes = n = (n + 127) & ~127; 349 return(n / 128 + 31); 350 } 351 if (n < 4096) { 352 *bytes = n = (n + 255) & ~255; 353 return(n / 256 + 39); 354 } 355 *bytes = n = (n + 511) & ~511; 356 return(n / 512 + 47); 357 } 358 #if ZALLOC_ZONE_LIMIT > 8192 359 if (n < 16384) { 360 *bytes = n = (n + 1023) & ~1023; 361 return(n / 1024 + 55); 362 } 363 #endif 364 #if ZALLOC_ZONE_LIMIT > 16384 365 if (n < 32768) { 366 *bytes = n = (n + 2047) & ~2047; 367 return(n / 2048 + 63); 368 } 369 #endif 370 panic("Unexpected byte count %d", n); 371 return(0); 372 } 373 374 /* 375 * malloc() (SLAB ALLOCATOR) 376 * 377 * Allocate memory via the slab allocator. If the request is too large, 378 * or if it page-aligned beyond a certain size, we fall back to the 379 * KMEM subsystem. A SLAB tracking descriptor must be specified, use 380 * &SlabMisc if you don't care. 381 * 382 * M_RNOWAIT - don't block. 383 * M_NULLOK - return NULL instead of blocking. 384 * M_ZERO - zero the returned memory. 385 * M_USE_RESERVE - allow greater drawdown of the free list 386 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted 387 * 388 * MPSAFE 389 */ 390 391 void * 392 kmalloc(unsigned long size, struct malloc_type *type, int flags) 393 { 394 SLZone *z; 395 SLChunk *chunk; 396 SLGlobalData *slgd; 397 struct globaldata *gd; 398 int zi; 399 #ifdef INVARIANTS 400 int i; 401 #endif 402 403 logmemory_quick(malloc_beg); 404 gd = mycpu; 405 slgd = &gd->gd_slab; 406 407 /* 408 * XXX silly to have this in the critical path. 409 */ 410 if (type->ks_limit == 0) { 411 crit_enter(); 412 if (type->ks_limit == 0) 413 malloc_init(type); 414 crit_exit(); 415 } 416 ++type->ks_calls; 417 418 /* 419 * Handle the case where the limit is reached. Panic if we can't return 420 * NULL. The original malloc code looped, but this tended to 421 * simply deadlock the computer. 422 * 423 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used 424 * to determine if a more complete limit check should be done. The 425 * actual memory use is tracked via ks_memuse[cpu]. 426 */ 427 while (type->ks_loosememuse >= type->ks_limit) { 428 int i; 429 long ttl; 430 431 for (i = ttl = 0; i < ncpus; ++i) 432 ttl += type->ks_memuse[i]; 433 type->ks_loosememuse = ttl; /* not MP synchronized */ 434 if (ttl >= type->ks_limit) { 435 if (flags & M_NULLOK) { 436 logmemory(malloc, NULL, type, size, flags); 437 return(NULL); 438 } 439 panic("%s: malloc limit exceeded", type->ks_shortdesc); 440 } 441 } 442 443 /* 444 * Handle the degenerate size == 0 case. Yes, this does happen. 445 * Return a special pointer. This is to maintain compatibility with 446 * the original malloc implementation. Certain devices, such as the 447 * adaptec driver, not only allocate 0 bytes, they check for NULL and 448 * also realloc() later on. Joy. 449 */ 450 if (size == 0) { 451 logmemory(malloc, ZERO_LENGTH_PTR, type, size, flags); 452 return(ZERO_LENGTH_PTR); 453 } 454 455 /* 456 * Handle hysteresis from prior frees here in malloc(). We cannot 457 * safely manipulate the kernel_map in free() due to free() possibly 458 * being called via an IPI message or from sensitive interrupt code. 459 */ 460 while (slgd->NFreeZones > ZONE_RELS_THRESH && (flags & M_RNOWAIT) == 0) { 461 crit_enter(); 462 if (slgd->NFreeZones > ZONE_RELS_THRESH) { /* crit sect race */ 463 z = slgd->FreeZones; 464 slgd->FreeZones = z->z_Next; 465 --slgd->NFreeZones; 466 kmem_slab_free(z, ZoneSize); /* may block */ 467 } 468 crit_exit(); 469 } 470 /* 471 * XXX handle oversized frees that were queued from free(). 472 */ 473 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) { 474 crit_enter(); 475 if ((z = slgd->FreeOvZones) != NULL) { 476 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); 477 slgd->FreeOvZones = z->z_Next; 478 kmem_slab_free(z, z->z_ChunkSize); /* may block */ 479 } 480 crit_exit(); 481 } 482 483 /* 484 * Handle large allocations directly. There should not be very many of 485 * these so performance is not a big issue. 486 * 487 * Guarentee page alignment for allocations in multiples of PAGE_SIZE 488 */ 489 if (size >= ZoneLimit || (size & PAGE_MASK) == 0) { 490 struct kmemusage *kup; 491 492 size = round_page(size); 493 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); 494 if (chunk == NULL) { 495 logmemory(malloc, NULL, type, size, flags); 496 return(NULL); 497 } 498 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ 499 flags |= M_PASSIVE_ZERO; 500 kup = btokup(chunk); 501 kup->ku_pagecnt = size / PAGE_SIZE; 502 kup->ku_cpu = gd->gd_cpuid; 503 crit_enter(); 504 goto done; 505 } 506 507 /* 508 * Attempt to allocate out of an existing zone. First try the free list, 509 * then allocate out of unallocated space. If we find a good zone move 510 * it to the head of the list so later allocations find it quickly 511 * (we might have thousands of zones in the list). 512 * 513 * Note: zoneindex() will panic of size is too large. 514 */ 515 zi = zoneindex(&size); 516 KKASSERT(zi < NZONES); 517 crit_enter(); 518 if ((z = slgd->ZoneAry[zi]) != NULL) { 519 KKASSERT(z->z_NFree > 0); 520 521 /* 522 * Remove us from the ZoneAry[] when we become empty 523 */ 524 if (--z->z_NFree == 0) { 525 slgd->ZoneAry[zi] = z->z_Next; 526 z->z_Next = NULL; 527 } 528 529 /* 530 * Locate a chunk in a free page. This attempts to localize 531 * reallocations into earlier pages without us having to sort 532 * the chunk list. A chunk may still overlap a page boundary. 533 */ 534 while (z->z_FirstFreePg < ZonePageCount) { 535 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) { 536 #ifdef DIAGNOSTIC 537 /* 538 * Diagnostic: c_Next is not total garbage. 539 */ 540 KKASSERT(chunk->c_Next == NULL || 541 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) == 542 ((intptr_t)chunk & IN_SAME_PAGE_MASK)); 543 #endif 544 #ifdef INVARIANTS 545 if ((uintptr_t)chunk < VM_MIN_KERNEL_ADDRESS) 546 panic("chunk %p FFPG %d/%d", chunk, z->z_FirstFreePg, ZonePageCount); 547 if (chunk->c_Next && (uintptr_t)chunk->c_Next < VM_MIN_KERNEL_ADDRESS) 548 panic("chunkNEXT %p %p FFPG %d/%d", chunk, chunk->c_Next, z->z_FirstFreePg, ZonePageCount); 549 chunk_mark_allocated(z, chunk); 550 #endif 551 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next; 552 goto done; 553 } 554 ++z->z_FirstFreePg; 555 } 556 557 /* 558 * No chunks are available but NFree said we had some memory, so 559 * it must be available in the never-before-used-memory area 560 * governed by UIndex. The consequences are very serious if our zone 561 * got corrupted so we use an explicit panic rather then a KASSERT. 562 */ 563 if (z->z_UIndex + 1 != z->z_NMax) 564 z->z_UIndex = z->z_UIndex + 1; 565 else 566 z->z_UIndex = 0; 567 if (z->z_UIndex == z->z_UEndIndex) 568 panic("slaballoc: corrupted zone"); 569 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 570 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 571 flags &= ~M_ZERO; 572 flags |= M_PASSIVE_ZERO; 573 } 574 #if defined(INVARIANTS) 575 chunk_mark_allocated(z, chunk); 576 #endif 577 goto done; 578 } 579 580 /* 581 * If all zones are exhausted we need to allocate a new zone for this 582 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see 583 * UAlloc use above in regards to M_ZERO. Note that when we are reusing 584 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and 585 * we do not pre-zero it because we do not want to mess up the L1 cache. 586 * 587 * At least one subsystem, the tty code (see CROUND) expects power-of-2 588 * allocations to be power-of-2 aligned. We maintain compatibility by 589 * adjusting the base offset below. 590 */ 591 { 592 int off; 593 594 if ((z = slgd->FreeZones) != NULL) { 595 slgd->FreeZones = z->z_Next; 596 --slgd->NFreeZones; 597 bzero(z, sizeof(SLZone)); 598 z->z_Flags |= SLZF_UNOTZEROD; 599 } else { 600 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); 601 if (z == NULL) 602 goto fail; 603 } 604 605 /* 606 * How big is the base structure? 607 */ 608 #if defined(INVARIANTS) 609 /* 610 * Make room for z_Bitmap. An exact calculation is somewhat more 611 * complicated so don't make an exact calculation. 612 */ 613 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]); 614 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); 615 #else 616 off = sizeof(SLZone); 617 #endif 618 619 /* 620 * Guarentee power-of-2 alignment for power-of-2-sized chunks. 621 * Otherwise just 8-byte align the data. 622 */ 623 if ((size | (size - 1)) + 1 == (size << 1)) 624 off = (off + size - 1) & ~(size - 1); 625 else 626 off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK; 627 z->z_Magic = ZALLOC_SLAB_MAGIC; 628 z->z_ZoneIndex = zi; 629 z->z_NMax = (ZoneSize - off) / size; 630 z->z_NFree = z->z_NMax - 1; 631 z->z_BasePtr = (char *)z + off; 632 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; 633 z->z_ChunkSize = size; 634 z->z_FirstFreePg = ZonePageCount; 635 z->z_CpuGd = gd; 636 z->z_Cpu = gd->gd_cpuid; 637 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 638 z->z_Next = slgd->ZoneAry[zi]; 639 slgd->ZoneAry[zi] = z; 640 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 641 flags &= ~M_ZERO; /* already zero'd */ 642 flags |= M_PASSIVE_ZERO; 643 } 644 #if defined(INVARIANTS) 645 chunk_mark_allocated(z, chunk); 646 #endif 647 648 /* 649 * Slide the base index for initial allocations out of the next 650 * zone we create so we do not over-weight the lower part of the 651 * cpu memory caches. 652 */ 653 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 654 & (ZALLOC_MAX_ZONE_SIZE - 1); 655 } 656 done: 657 ++type->ks_inuse[gd->gd_cpuid]; 658 type->ks_memuse[gd->gd_cpuid] += size; 659 type->ks_loosememuse += size; /* not MP synchronized */ 660 crit_exit(); 661 if (flags & M_ZERO) 662 bzero(chunk, size); 663 #ifdef INVARIANTS 664 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) { 665 if (use_malloc_pattern) { 666 for (i = 0; i < size; i += sizeof(int)) { 667 *(int *)((char *)chunk + i) = -1; 668 } 669 } 670 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ 671 } 672 #endif 673 logmemory(malloc, chunk, type, size, flags); 674 return(chunk); 675 fail: 676 crit_exit(); 677 logmemory(malloc, NULL, type, size, flags); 678 return(NULL); 679 } 680 681 /* 682 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE) 683 * 684 * Generally speaking this routine is not called very often and we do 685 * not attempt to optimize it beyond reusing the same pointer if the 686 * new size fits within the chunking of the old pointer's zone. 687 */ 688 void * 689 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) 690 { 691 SLZone *z; 692 void *nptr; 693 unsigned long osize; 694 695 KKASSERT((flags & M_ZERO) == 0); /* not supported */ 696 697 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) 698 return(kmalloc(size, type, flags)); 699 if (size == 0) { 700 kfree(ptr, type); 701 return(NULL); 702 } 703 704 /* 705 * Handle oversized allocations. XXX we really should require that a 706 * size be passed to free() instead of this nonsense. 707 */ 708 { 709 struct kmemusage *kup; 710 711 kup = btokup(ptr); 712 if (kup->ku_pagecnt) { 713 osize = kup->ku_pagecnt << PAGE_SHIFT; 714 if (osize == round_page(size)) 715 return(ptr); 716 if ((nptr = kmalloc(size, type, flags)) == NULL) 717 return(NULL); 718 bcopy(ptr, nptr, min(size, osize)); 719 kfree(ptr, type); 720 return(nptr); 721 } 722 } 723 724 /* 725 * Get the original allocation's zone. If the new request winds up 726 * using the same chunk size we do not have to do anything. 727 */ 728 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 729 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 730 731 zoneindex(&size); 732 if (z->z_ChunkSize == size) 733 return(ptr); 734 735 /* 736 * Allocate memory for the new request size. Note that zoneindex has 737 * already adjusted the request size to the appropriate chunk size, which 738 * should optimize our bcopy(). Then copy and return the new pointer. 739 */ 740 if ((nptr = kmalloc(size, type, flags)) == NULL) 741 return(NULL); 742 bcopy(ptr, nptr, min(size, z->z_ChunkSize)); 743 kfree(ptr, type); 744 return(nptr); 745 } 746 747 /* 748 * Allocate a copy of the specified string. 749 * 750 * (MP SAFE) (MAY BLOCK) 751 */ 752 char * 753 kstrdup(const char *str, struct malloc_type *type) 754 { 755 int zlen; /* length inclusive of terminating NUL */ 756 char *nstr; 757 758 if (str == NULL) 759 return(NULL); 760 zlen = strlen(str) + 1; 761 nstr = kmalloc(zlen, type, M_WAITOK); 762 bcopy(str, nstr, zlen); 763 return(nstr); 764 } 765 766 #ifdef SMP 767 /* 768 * free() (SLAB ALLOCATOR) 769 * 770 * Free the specified chunk of memory. 771 */ 772 static 773 void 774 free_remote(void *ptr) 775 { 776 logmemory(free_remote, ptr, *(struct malloc_type **)ptr, -1, 0); 777 kfree(ptr, *(struct malloc_type **)ptr); 778 } 779 780 #endif 781 782 /* 783 * free (SLAB ALLOCATOR) 784 * 785 * Free a memory block previously allocated by malloc. Note that we do not 786 * attempt to uplodate ks_loosememuse as MP races could prevent us from 787 * checking memory limits in malloc. 788 * 789 * MPSAFE 790 */ 791 void 792 kfree(void *ptr, struct malloc_type *type) 793 { 794 SLZone *z; 795 SLChunk *chunk; 796 SLGlobalData *slgd; 797 struct globaldata *gd; 798 int pgno; 799 800 logmemory_quick(free_beg); 801 gd = mycpu; 802 slgd = &gd->gd_slab; 803 804 if (ptr == NULL) 805 panic("trying to free NULL pointer"); 806 807 /* 808 * Handle special 0-byte allocations 809 */ 810 if (ptr == ZERO_LENGTH_PTR) { 811 logmemory(free_zero, ptr, type, -1, 0); 812 logmemory_quick(free_end); 813 return; 814 } 815 816 /* 817 * Handle oversized allocations. XXX we really should require that a 818 * size be passed to free() instead of this nonsense. 819 * 820 * This code is never called via an ipi. 821 */ 822 { 823 struct kmemusage *kup; 824 unsigned long size; 825 826 kup = btokup(ptr); 827 if (kup->ku_pagecnt) { 828 size = kup->ku_pagecnt << PAGE_SHIFT; 829 kup->ku_pagecnt = 0; 830 #ifdef INVARIANTS 831 KKASSERT(sizeof(weirdary) <= size); 832 bcopy(weirdary, ptr, sizeof(weirdary)); 833 #endif 834 /* 835 * note: we always adjust our cpu's slot, not the originating 836 * cpu (kup->ku_cpuid). The statistics are in aggregate. 837 * 838 * note: XXX we have still inherited the interrupts-can't-block 839 * assumption. An interrupt thread does not bump 840 * gd_intr_nesting_level so check TDF_INTTHREAD. This is 841 * primarily until we can fix softupdate's assumptions about free(). 842 */ 843 crit_enter(); 844 --type->ks_inuse[gd->gd_cpuid]; 845 type->ks_memuse[gd->gd_cpuid] -= size; 846 if (mycpu->gd_intr_nesting_level || (gd->gd_curthread->td_flags & TDF_INTTHREAD)) { 847 logmemory(free_ovsz_delayed, ptr, type, size, 0); 848 z = (SLZone *)ptr; 849 z->z_Magic = ZALLOC_OVSZ_MAGIC; 850 z->z_Next = slgd->FreeOvZones; 851 z->z_ChunkSize = size; 852 slgd->FreeOvZones = z; 853 crit_exit(); 854 } else { 855 crit_exit(); 856 logmemory(free_ovsz, ptr, type, size, 0); 857 kmem_slab_free(ptr, size); /* may block */ 858 } 859 logmemory_quick(free_end); 860 return; 861 } 862 } 863 864 /* 865 * Zone case. Figure out the zone based on the fact that it is 866 * ZoneSize aligned. 867 */ 868 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 869 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 870 871 /* 872 * If we do not own the zone then forward the request to the 873 * cpu that does. Since the timing is non-critical, a passive 874 * message is sent. 875 */ 876 if (z->z_CpuGd != gd) { 877 *(struct malloc_type **)ptr = type; 878 #ifdef SMP 879 logmemory(free_request, ptr, type, z->z_ChunkSize, 0); 880 lwkt_send_ipiq_passive(z->z_CpuGd, free_remote, ptr); 881 #else 882 panic("Corrupt SLZone"); 883 #endif 884 logmemory_quick(free_end); 885 return; 886 } 887 888 logmemory(free_chunk, ptr, type, z->z_ChunkSize, 0); 889 890 if (type->ks_magic != M_MAGIC) 891 panic("free: malloc type lacks magic"); 892 893 crit_enter(); 894 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT; 895 chunk = ptr; 896 897 #ifdef INVARIANTS 898 /* 899 * Attempt to detect a double-free. To reduce overhead we only check 900 * if there appears to be link pointer at the base of the data. 901 */ 902 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) { 903 SLChunk *scan; 904 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) { 905 if (scan == chunk) 906 panic("Double free at %p", chunk); 907 } 908 } 909 chunk_mark_free(z, chunk); 910 #endif 911 912 /* 913 * Put weird data into the memory to detect modifications after freeing, 914 * illegal pointer use after freeing (we should fault on the odd address), 915 * and so forth. XXX needs more work, see the old malloc code. 916 */ 917 #ifdef INVARIANTS 918 if (z->z_ChunkSize < sizeof(weirdary)) 919 bcopy(weirdary, chunk, z->z_ChunkSize); 920 else 921 bcopy(weirdary, chunk, sizeof(weirdary)); 922 #endif 923 924 /* 925 * Add this free non-zero'd chunk to a linked list for reuse, adjust 926 * z_FirstFreePg. 927 */ 928 #ifdef INVARIANTS 929 if ((uintptr_t)chunk < VM_MIN_KERNEL_ADDRESS) 930 panic("BADFREE %p", chunk); 931 #endif 932 chunk->c_Next = z->z_PageAry[pgno]; 933 z->z_PageAry[pgno] = chunk; 934 #ifdef INVARIANTS 935 if (chunk->c_Next && (uintptr_t)chunk->c_Next < VM_MIN_KERNEL_ADDRESS) 936 panic("BADFREE2"); 937 #endif 938 if (z->z_FirstFreePg > pgno) 939 z->z_FirstFreePg = pgno; 940 941 /* 942 * Bump the number of free chunks. If it becomes non-zero the zone 943 * must be added back onto the appropriate list. 944 */ 945 if (z->z_NFree++ == 0) { 946 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 947 slgd->ZoneAry[z->z_ZoneIndex] = z; 948 } 949 950 --type->ks_inuse[z->z_Cpu]; 951 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize; 952 953 /* 954 * If the zone becomes totally free, and there are other zones we 955 * can allocate from, move this zone to the FreeZones list. Since 956 * this code can be called from an IPI callback, do *NOT* try to mess 957 * with kernel_map here. Hysteresis will be performed at malloc() time. 958 */ 959 if (z->z_NFree == z->z_NMax && 960 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) 961 ) { 962 SLZone **pz; 963 964 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next) 965 ; 966 *pz = z->z_Next; 967 z->z_Magic = -1; 968 z->z_Next = slgd->FreeZones; 969 slgd->FreeZones = z; 970 ++slgd->NFreeZones; 971 } 972 logmemory_quick(free_end); 973 crit_exit(); 974 } 975 976 #if defined(INVARIANTS) 977 /* 978 * Helper routines for sanity checks 979 */ 980 static 981 void 982 chunk_mark_allocated(SLZone *z, void *chunk) 983 { 984 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 985 __uint32_t *bitptr; 986 987 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal", chunk, bitdex)); 988 bitptr = &z->z_Bitmap[bitdex >> 5]; 989 bitdex &= 31; 990 KASSERT((*bitptr & (1 << bitdex)) == 0, ("memory chunk %p is already allocated!", chunk)); 991 *bitptr |= 1 << bitdex; 992 } 993 994 static 995 void 996 chunk_mark_free(SLZone *z, void *chunk) 997 { 998 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 999 __uint32_t *bitptr; 1000 1001 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal!", chunk, bitdex)); 1002 bitptr = &z->z_Bitmap[bitdex >> 5]; 1003 bitdex &= 31; 1004 KASSERT((*bitptr & (1 << bitdex)) != 0, ("memory chunk %p is already free!", chunk)); 1005 *bitptr &= ~(1 << bitdex); 1006 } 1007 1008 #endif 1009 1010 /* 1011 * kmem_slab_alloc() 1012 * 1013 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the 1014 * specified alignment. M_* flags are expected in the flags field. 1015 * 1016 * Alignment must be a multiple of PAGE_SIZE. 1017 * 1018 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), 1019 * but when we move zalloc() over to use this function as its backend 1020 * we will have to switch to kreserve/krelease and call reserve(0) 1021 * after the new space is made available. 1022 * 1023 * Interrupt code which has preempted other code is not allowed to 1024 * use PQ_CACHE pages. However, if an interrupt thread is run 1025 * non-preemptively or blocks and then runs non-preemptively, then 1026 * it is free to use PQ_CACHE pages. 1027 * 1028 * This routine will currently obtain the BGL. 1029 * 1030 * MPALMOSTSAFE - acquires mplock 1031 */ 1032 static void * 1033 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) 1034 { 1035 vm_size_t i; 1036 vm_offset_t addr; 1037 vm_offset_t offset; 1038 int count, vmflags, base_vmflags; 1039 thread_t td; 1040 vm_map_t map = kernel_map; 1041 1042 size = round_page(size); 1043 addr = vm_map_min(map); 1044 1045 /* 1046 * Reserve properly aligned space from kernel_map. RNOWAIT allocations 1047 * cannot block. 1048 */ 1049 if (flags & M_RNOWAIT) { 1050 if (try_mplock() == 0) 1051 return(NULL); 1052 } else { 1053 get_mplock(); 1054 } 1055 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1056 crit_enter(); 1057 vm_map_lock(map); 1058 if (vm_map_findspace(map, vm_map_min(map), size, align, &addr)) { 1059 vm_map_unlock(map); 1060 if ((flags & M_NULLOK) == 0) 1061 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 1062 crit_exit(); 1063 vm_map_entry_release(count); 1064 rel_mplock(); 1065 return(NULL); 1066 } 1067 offset = addr - VM_MIN_KERNEL_ADDRESS; 1068 vm_object_reference(kernel_object); 1069 vm_map_insert(map, &count, 1070 kernel_object, offset, addr, addr + size, 1071 VM_MAPTYPE_NORMAL, 1072 VM_PROT_ALL, VM_PROT_ALL, 1073 0); 1074 1075 td = curthread; 1076 1077 base_vmflags = 0; 1078 if (flags & M_ZERO) 1079 base_vmflags |= VM_ALLOC_ZERO; 1080 if (flags & M_USE_RESERVE) 1081 base_vmflags |= VM_ALLOC_SYSTEM; 1082 if (flags & M_USE_INTERRUPT_RESERVE) 1083 base_vmflags |= VM_ALLOC_INTERRUPT; 1084 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) 1085 panic("kmem_slab_alloc: bad flags %08x (%p)", flags, ((int **)&size)[-1]); 1086 1087 1088 /* 1089 * Allocate the pages. Do not mess with the PG_ZERO flag yet. 1090 */ 1091 for (i = 0; i < size; i += PAGE_SIZE) { 1092 vm_page_t m; 1093 vm_pindex_t idx = OFF_TO_IDX(offset + i); 1094 1095 /* 1096 * VM_ALLOC_NORMAL can only be set if we are not preempting. 1097 * 1098 * VM_ALLOC_SYSTEM is automatically set if we are preempting and 1099 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is 1100 * implied in this case), though I'm sure if we really need to do 1101 * that. 1102 */ 1103 vmflags = base_vmflags; 1104 if (flags & M_WAITOK) { 1105 if (td->td_preempted) 1106 vmflags |= VM_ALLOC_SYSTEM; 1107 else 1108 vmflags |= VM_ALLOC_NORMAL; 1109 } 1110 1111 m = vm_page_alloc(kernel_object, idx, vmflags); 1112 1113 /* 1114 * If the allocation failed we either return NULL or we retry. 1115 * 1116 * If M_WAITOK is specified we wait for more memory and retry. 1117 * If M_WAITOK is specified from a preemption we yield instead of 1118 * wait. Livelock will not occur because the interrupt thread 1119 * will not be preempting anyone the second time around after the 1120 * yield. 1121 */ 1122 if (m == NULL) { 1123 if (flags & M_WAITOK) { 1124 if (td->td_preempted) { 1125 vm_map_unlock(map); 1126 lwkt_yield(); 1127 vm_map_lock(map); 1128 } else { 1129 vm_map_unlock(map); 1130 vm_wait(); 1131 vm_map_lock(map); 1132 } 1133 i -= PAGE_SIZE; /* retry */ 1134 continue; 1135 } 1136 1137 /* 1138 * We were unable to recover, cleanup and return NULL 1139 */ 1140 while (i != 0) { 1141 i -= PAGE_SIZE; 1142 m = vm_page_lookup(kernel_object, OFF_TO_IDX(offset + i)); 1143 vm_page_free(m); 1144 } 1145 vm_map_delete(map, addr, addr + size, &count); 1146 vm_map_unlock(map); 1147 crit_exit(); 1148 vm_map_entry_release(count); 1149 rel_mplock(); 1150 return(NULL); 1151 } 1152 } 1153 1154 /* 1155 * Success! 1156 * 1157 * Mark the map entry as non-pageable using a routine that allows us to 1158 * populate the underlying pages. 1159 */ 1160 vm_map_set_wired_quick(map, addr, size, &count); 1161 crit_exit(); 1162 1163 /* 1164 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO. 1165 */ 1166 for (i = 0; i < size; i += PAGE_SIZE) { 1167 vm_page_t m; 1168 1169 m = vm_page_lookup(kernel_object, OFF_TO_IDX(offset + i)); 1170 m->valid = VM_PAGE_BITS_ALL; 1171 vm_page_wire(m); 1172 vm_page_wakeup(m); 1173 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 1174 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO)) 1175 bzero((char *)addr + i, PAGE_SIZE); 1176 vm_page_flag_clear(m, PG_ZERO); 1177 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED); 1178 } 1179 vm_map_unlock(map); 1180 vm_map_entry_release(count); 1181 rel_mplock(); 1182 return((void *)addr); 1183 } 1184 1185 /* 1186 * kmem_slab_free() 1187 * 1188 * MPALMOSTSAFE - acquires mplock 1189 */ 1190 static void 1191 kmem_slab_free(void *ptr, vm_size_t size) 1192 { 1193 get_mplock(); 1194 crit_enter(); 1195 vm_map_remove(kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); 1196 crit_exit(); 1197 rel_mplock(); 1198 } 1199 1200