1 /* 2 * KERN_SLABALLOC.C - Kernel SLAB memory allocator 3 * 4 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.24 2004/07/29 08:50:09 dillon Exp $ 37 * 38 * This module implements a slab allocator drop-in replacement for the 39 * kernel malloc(). 40 * 41 * A slab allocator reserves a ZONE for each chunk size, then lays the 42 * chunks out in an array within the zone. Allocation and deallocation 43 * is nearly instantanious, and fragmentation/overhead losses are limited 44 * to a fixed worst-case amount. 45 * 46 * The downside of this slab implementation is in the chunk size 47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. 48 * In a kernel implementation all this memory will be physical so 49 * the zone size is adjusted downward on machines with less physical 50 * memory. The upside is that overhead is bounded... this is the *worst* 51 * case overhead. 52 * 53 * Slab management is done on a per-cpu basis and no locking or mutexes 54 * are required, only a critical section. When one cpu frees memory 55 * belonging to another cpu's slab manager an asynchronous IPI message 56 * will be queued to execute the operation. In addition, both the 57 * high level slab allocator and the low level zone allocator optimize 58 * M_ZERO requests, and the slab allocator does not have to pre initialize 59 * the linked list of chunks. 60 * 61 * XXX Balancing is needed between cpus. Balance will be handled through 62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. 63 * 64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of 65 * the new zone should be restricted to M_USE_RESERVE requests only. 66 * 67 * Alloc Size Chunking Number of zones 68 * 0-127 8 16 69 * 128-255 16 8 70 * 256-511 32 8 71 * 512-1023 64 8 72 * 1024-2047 128 8 73 * 2048-4095 256 8 74 * 4096-8191 512 8 75 * 8192-16383 1024 8 76 * 16384-32767 2048 8 77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) 78 * 79 * Allocations >= ZoneLimit go directly to kmem. 80 * 81 * API REQUIREMENTS AND SIDE EFFECTS 82 * 83 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we 84 * have remained compatible with the following API requirements: 85 * 86 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty) 87 * + all power-of-2 sized allocations are power-of-2 aligned (twe) 88 * + malloc(0) is allowed and returns non-NULL (ahc driver) 89 * + ability to allocate arbitrarily large chunks of memory 90 */ 91 92 #include "opt_vm.h" 93 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/kernel.h> 97 #include <sys/slaballoc.h> 98 #include <sys/mbuf.h> 99 #include <sys/vmmeter.h> 100 #include <sys/lock.h> 101 #include <sys/thread.h> 102 #include <sys/globaldata.h> 103 104 #include <vm/vm.h> 105 #include <vm/vm_param.h> 106 #include <vm/vm_kern.h> 107 #include <vm/vm_extern.h> 108 #include <vm/vm_object.h> 109 #include <vm/pmap.h> 110 #include <vm/vm_map.h> 111 #include <vm/vm_page.h> 112 #include <vm/vm_pageout.h> 113 114 #include <machine/cpu.h> 115 116 #include <sys/thread2.h> 117 118 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0])) 119 120 /* 121 * Fixed globals (not per-cpu) 122 */ 123 static int ZoneSize; 124 static int ZoneLimit; 125 static int ZonePageCount; 126 static int ZonePageLimit; 127 static int ZoneMask; 128 static struct malloc_type *kmemstatistics; 129 static struct kmemusage *kmemusage; 130 static int32_t weirdary[16]; 131 132 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags); 133 static void kmem_slab_free(void *ptr, vm_size_t bytes); 134 135 /* 136 * Misc constants. Note that allocations that are exact multiples of 137 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 138 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists. 139 */ 140 #define MIN_CHUNK_SIZE 8 /* in bytes */ 141 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1) 142 #define ZONE_RELS_THRESH 2 /* threshold number of zones */ 143 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK) 144 145 /* 146 * The WEIRD_ADDR is used as known text to copy into free objects to 147 * try to create deterministic failure cases if the data is accessed after 148 * free. 149 */ 150 #define WEIRD_ADDR 0xdeadc0de 151 #define MAX_COPY sizeof(weirdary) 152 #define ZERO_LENGTH_PTR ((void *)-8) 153 154 /* 155 * Misc global malloc buckets 156 */ 157 158 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 159 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 160 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 161 162 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 163 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 164 165 /* 166 * Initialize the slab memory allocator. We have to choose a zone size based 167 * on available physical memory. We choose a zone side which is approximately 168 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of 169 * 128K. The zone size is limited to the bounds set in slaballoc.h 170 * (typically 32K min, 128K max). 171 */ 172 static void kmeminit(void *dummy); 173 174 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL) 175 176 static void 177 kmeminit(void *dummy) 178 { 179 vm_poff_t limsize; 180 int usesize; 181 int i; 182 vm_pindex_t npg; 183 184 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE; 185 if (limsize > VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) 186 limsize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS; 187 188 usesize = (int)(limsize / 1024); /* convert to KB */ 189 190 ZoneSize = ZALLOC_MIN_ZONE_SIZE; 191 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) 192 ZoneSize <<= 1; 193 ZoneLimit = ZoneSize / 4; 194 if (ZoneLimit > ZALLOC_ZONE_LIMIT) 195 ZoneLimit = ZALLOC_ZONE_LIMIT; 196 ZoneMask = ZoneSize - 1; 197 ZonePageLimit = PAGE_SIZE * 4; 198 ZonePageCount = ZoneSize / PAGE_SIZE; 199 200 npg = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE; 201 kmemusage = kmem_slab_alloc(npg * sizeof(struct kmemusage), PAGE_SIZE, M_WAITOK|M_ZERO); 202 203 for (i = 0; i < arysize(weirdary); ++i) 204 weirdary[i] = WEIRD_ADDR; 205 206 if (bootverbose) 207 printf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); 208 } 209 210 /* 211 * Initialize a malloc type tracking structure. 212 */ 213 void 214 malloc_init(void *data) 215 { 216 struct malloc_type *type = data; 217 vm_poff_t limsize; 218 219 if (type->ks_magic != M_MAGIC) 220 panic("malloc type lacks magic"); 221 222 if (type->ks_limit != 0) 223 return; 224 225 if (vmstats.v_page_count == 0) 226 panic("malloc_init not allowed before vm init"); 227 228 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE; 229 if (limsize > VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) 230 limsize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS; 231 type->ks_limit = limsize / 10; 232 233 type->ks_next = kmemstatistics; 234 kmemstatistics = type; 235 } 236 237 void 238 malloc_uninit(void *data) 239 { 240 struct malloc_type *type = data; 241 struct malloc_type *t; 242 #ifdef INVARIANTS 243 int i; 244 long ttl; 245 #endif 246 247 if (type->ks_magic != M_MAGIC) 248 panic("malloc type lacks magic"); 249 250 if (vmstats.v_page_count == 0) 251 panic("malloc_uninit not allowed before vm init"); 252 253 if (type->ks_limit == 0) 254 panic("malloc_uninit on uninitialized type"); 255 256 #ifdef INVARIANTS 257 /* 258 * memuse is only correct in aggregation. Due to memory being allocated 259 * on one cpu and freed on another individual array entries may be 260 * negative or positive (canceling each other out). 261 */ 262 for (i = ttl = 0; i < ncpus; ++i) 263 ttl += type->ks_memuse[i]; 264 if (ttl) { 265 printf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", 266 ttl, type->ks_shortdesc, i); 267 } 268 #endif 269 if (type == kmemstatistics) { 270 kmemstatistics = type->ks_next; 271 } else { 272 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 273 if (t->ks_next == type) { 274 t->ks_next = type->ks_next; 275 break; 276 } 277 } 278 } 279 type->ks_next = NULL; 280 type->ks_limit = 0; 281 } 282 283 /* 284 * Calculate the zone index for the allocation request size and set the 285 * allocation request size to that particular zone's chunk size. 286 */ 287 static __inline int 288 zoneindex(unsigned long *bytes) 289 { 290 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ 291 if (n < 128) { 292 *bytes = n = (n + 7) & ~7; 293 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 294 } 295 if (n < 256) { 296 *bytes = n = (n + 15) & ~15; 297 return(n / 16 + 7); 298 } 299 if (n < 8192) { 300 if (n < 512) { 301 *bytes = n = (n + 31) & ~31; 302 return(n / 32 + 15); 303 } 304 if (n < 1024) { 305 *bytes = n = (n + 63) & ~63; 306 return(n / 64 + 23); 307 } 308 if (n < 2048) { 309 *bytes = n = (n + 127) & ~127; 310 return(n / 128 + 31); 311 } 312 if (n < 4096) { 313 *bytes = n = (n + 255) & ~255; 314 return(n / 256 + 39); 315 } 316 *bytes = n = (n + 511) & ~511; 317 return(n / 512 + 47); 318 } 319 #if ZALLOC_ZONE_LIMIT > 8192 320 if (n < 16384) { 321 *bytes = n = (n + 1023) & ~1023; 322 return(n / 1024 + 55); 323 } 324 #endif 325 #if ZALLOC_ZONE_LIMIT > 16384 326 if (n < 32768) { 327 *bytes = n = (n + 2047) & ~2047; 328 return(n / 2048 + 63); 329 } 330 #endif 331 panic("Unexpected byte count %d", n); 332 return(0); 333 } 334 335 /* 336 * malloc() (SLAB ALLOCATOR) 337 * 338 * Allocate memory via the slab allocator. If the request is too large, 339 * or if it page-aligned beyond a certain size, we fall back to the 340 * KMEM subsystem. A SLAB tracking descriptor must be specified, use 341 * &SlabMisc if you don't care. 342 * 343 * M_RNOWAIT - return NULL instead of blocking. 344 * M_ZERO - zero the returned memory. 345 * M_USE_RESERVE - allow greater drawdown of the free list 346 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted 347 * 348 * M_FAILSAFE - Failsafe allocation, when the allocation must 349 * succeed attemp to get out of any preemption context 350 * and allocate from the cache, else block (even though 351 * we might be blocking from an interrupt), or panic. 352 */ 353 void * 354 malloc(unsigned long size, struct malloc_type *type, int flags) 355 { 356 SLZone *z; 357 SLChunk *chunk; 358 SLGlobalData *slgd; 359 struct globaldata *gd; 360 int zi; 361 362 gd = mycpu; 363 slgd = &gd->gd_slab; 364 365 /* 366 * XXX silly to have this in the critical path. 367 */ 368 if (type->ks_limit == 0) { 369 crit_enter(); 370 if (type->ks_limit == 0) 371 malloc_init(type); 372 crit_exit(); 373 } 374 ++type->ks_calls; 375 376 /* 377 * Handle the case where the limit is reached. Panic if can't return 378 * NULL. XXX the original malloc code looped, but this tended to 379 * simply deadlock the computer. 380 */ 381 while (type->ks_loosememuse >= type->ks_limit) { 382 int i; 383 long ttl; 384 385 for (i = ttl = 0; i < ncpus; ++i) 386 ttl += type->ks_memuse[i]; 387 type->ks_loosememuse = ttl; 388 if (ttl >= type->ks_limit) { 389 if (flags & (M_RNOWAIT|M_NULLOK)) 390 return(NULL); 391 panic("%s: malloc limit exceeded", type->ks_shortdesc); 392 } 393 } 394 395 /* 396 * Handle the degenerate size == 0 case. Yes, this does happen. 397 * Return a special pointer. This is to maintain compatibility with 398 * the original malloc implementation. Certain devices, such as the 399 * adaptec driver, not only allocate 0 bytes, they check for NULL and 400 * also realloc() later on. Joy. 401 */ 402 if (size == 0) 403 return(ZERO_LENGTH_PTR); 404 405 /* 406 * Handle hysteresis from prior frees here in malloc(). We cannot 407 * safely manipulate the kernel_map in free() due to free() possibly 408 * being called via an IPI message or from sensitive interrupt code. 409 */ 410 while (slgd->NFreeZones > ZONE_RELS_THRESH && (flags & M_RNOWAIT) == 0) { 411 crit_enter(); 412 if (slgd->NFreeZones > ZONE_RELS_THRESH) { /* crit sect race */ 413 z = slgd->FreeZones; 414 slgd->FreeZones = z->z_Next; 415 --slgd->NFreeZones; 416 kmem_slab_free(z, ZoneSize); /* may block */ 417 } 418 crit_exit(); 419 } 420 /* 421 * XXX handle oversized frees that were queued from free(). 422 */ 423 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) { 424 crit_enter(); 425 if ((z = slgd->FreeOvZones) != NULL) { 426 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); 427 slgd->FreeOvZones = z->z_Next; 428 kmem_slab_free(z, z->z_ChunkSize); /* may block */ 429 } 430 crit_exit(); 431 } 432 433 /* 434 * Handle large allocations directly. There should not be very many of 435 * these so performance is not a big issue. 436 * 437 * Guarentee page alignment for allocations in multiples of PAGE_SIZE 438 */ 439 if (size >= ZoneLimit || (size & PAGE_MASK) == 0) { 440 struct kmemusage *kup; 441 442 size = round_page(size); 443 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); 444 if (chunk == NULL) 445 return(NULL); 446 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ 447 flags |= M_PASSIVE_ZERO; 448 kup = btokup(chunk); 449 kup->ku_pagecnt = size / PAGE_SIZE; 450 kup->ku_cpu = gd->gd_cpuid; 451 crit_enter(); 452 goto done; 453 } 454 455 /* 456 * Attempt to allocate out of an existing zone. First try the free list, 457 * then allocate out of unallocated space. If we find a good zone move 458 * it to the head of the list so later allocations find it quickly 459 * (we might have thousands of zones in the list). 460 * 461 * Note: zoneindex() will panic of size is too large. 462 */ 463 zi = zoneindex(&size); 464 KKASSERT(zi < NZONES); 465 crit_enter(); 466 if ((z = slgd->ZoneAry[zi]) != NULL) { 467 KKASSERT(z->z_NFree > 0); 468 469 /* 470 * Remove us from the ZoneAry[] when we become empty 471 */ 472 if (--z->z_NFree == 0) { 473 slgd->ZoneAry[zi] = z->z_Next; 474 z->z_Next = NULL; 475 } 476 477 /* 478 * Locate a chunk in a free page. This attempts to localize 479 * reallocations into earlier pages without us having to sort 480 * the chunk list. A chunk may still overlap a page boundary. 481 */ 482 while (z->z_FirstFreePg < ZonePageCount) { 483 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) { 484 #ifdef DIAGNOSTIC 485 /* 486 * Diagnostic: c_Next is not total garbage. 487 */ 488 KKASSERT(chunk->c_Next == NULL || 489 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) == 490 ((intptr_t)chunk & IN_SAME_PAGE_MASK)); 491 #endif 492 #ifdef INVARIANTS 493 if ((uintptr_t)chunk < VM_MIN_KERNEL_ADDRESS) 494 panic("chunk %p FFPG %d/%d", chunk, z->z_FirstFreePg, ZonePageCount); 495 if (chunk->c_Next && (uintptr_t)chunk->c_Next < VM_MIN_KERNEL_ADDRESS) 496 panic("chunkNEXT %p %p FFPG %d/%d", chunk, chunk->c_Next, z->z_FirstFreePg, ZonePageCount); 497 #endif 498 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next; 499 goto done; 500 } 501 ++z->z_FirstFreePg; 502 } 503 504 /* 505 * No chunks are available but NFree said we had some memory, so 506 * it must be available in the never-before-used-memory area 507 * governed by UIndex. The consequences are very serious if our zone 508 * got corrupted so we use an explicit panic rather then a KASSERT. 509 */ 510 if (z->z_UIndex + 1 != z->z_NMax) 511 z->z_UIndex = z->z_UIndex + 1; 512 else 513 z->z_UIndex = 0; 514 if (z->z_UIndex == z->z_UEndIndex) 515 panic("slaballoc: corrupted zone"); 516 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 517 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 518 flags &= ~M_ZERO; 519 flags |= M_PASSIVE_ZERO; 520 } 521 goto done; 522 } 523 524 /* 525 * If all zones are exhausted we need to allocate a new zone for this 526 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see 527 * UAlloc use above in regards to M_ZERO. Note that when we are reusing 528 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and 529 * we do not pre-zero it because we do not want to mess up the L1 cache. 530 * 531 * At least one subsystem, the tty code (see CROUND) expects power-of-2 532 * allocations to be power-of-2 aligned. We maintain compatibility by 533 * adjusting the base offset below. 534 */ 535 { 536 int off; 537 538 if ((z = slgd->FreeZones) != NULL) { 539 slgd->FreeZones = z->z_Next; 540 --slgd->NFreeZones; 541 bzero(z, sizeof(SLZone)); 542 z->z_Flags |= SLZF_UNOTZEROD; 543 } else { 544 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); 545 if (z == NULL) 546 goto fail; 547 } 548 549 /* 550 * Guarentee power-of-2 alignment for power-of-2-sized chunks. 551 * Otherwise just 8-byte align the data. 552 */ 553 if ((size | (size - 1)) + 1 == (size << 1)) 554 off = (sizeof(SLZone) + size - 1) & ~(size - 1); 555 else 556 off = (sizeof(SLZone) + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK; 557 z->z_Magic = ZALLOC_SLAB_MAGIC; 558 z->z_ZoneIndex = zi; 559 z->z_NMax = (ZoneSize - off) / size; 560 z->z_NFree = z->z_NMax - 1; 561 z->z_BasePtr = (char *)z + off; 562 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; 563 z->z_ChunkSize = size; 564 z->z_FirstFreePg = ZonePageCount; 565 z->z_CpuGd = gd; 566 z->z_Cpu = gd->gd_cpuid; 567 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 568 z->z_Next = slgd->ZoneAry[zi]; 569 slgd->ZoneAry[zi] = z; 570 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 571 flags &= ~M_ZERO; /* already zero'd */ 572 flags |= M_PASSIVE_ZERO; 573 } 574 575 /* 576 * Slide the base index for initial allocations out of the next 577 * zone we create so we do not over-weight the lower part of the 578 * cpu memory caches. 579 */ 580 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 581 & (ZALLOC_MAX_ZONE_SIZE - 1); 582 } 583 done: 584 ++type->ks_inuse[gd->gd_cpuid]; 585 type->ks_memuse[gd->gd_cpuid] += size; 586 type->ks_loosememuse += size; 587 crit_exit(); 588 if (flags & M_ZERO) 589 bzero(chunk, size); 590 #ifdef INVARIANTS 591 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) 592 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ 593 #endif 594 return(chunk); 595 fail: 596 crit_exit(); 597 return(NULL); 598 } 599 600 void * 601 realloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) 602 { 603 SLZone *z; 604 void *nptr; 605 unsigned long osize; 606 607 KKASSERT((flags & M_ZERO) == 0); /* not supported */ 608 609 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) 610 return(malloc(size, type, flags)); 611 if (size == 0) { 612 free(ptr, type); 613 return(NULL); 614 } 615 616 /* 617 * Handle oversized allocations. XXX we really should require that a 618 * size be passed to free() instead of this nonsense. 619 */ 620 { 621 struct kmemusage *kup; 622 623 kup = btokup(ptr); 624 if (kup->ku_pagecnt) { 625 osize = kup->ku_pagecnt << PAGE_SHIFT; 626 if (osize == round_page(size)) 627 return(ptr); 628 if ((nptr = malloc(size, type, flags)) == NULL) 629 return(NULL); 630 bcopy(ptr, nptr, min(size, osize)); 631 free(ptr, type); 632 return(nptr); 633 } 634 } 635 636 /* 637 * Get the original allocation's zone. If the new request winds up 638 * using the same chunk size we do not have to do anything. 639 */ 640 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 641 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 642 643 zoneindex(&size); 644 if (z->z_ChunkSize == size) 645 return(ptr); 646 647 /* 648 * Allocate memory for the new request size. Note that zoneindex has 649 * already adjusted the request size to the appropriate chunk size, which 650 * should optimize our bcopy(). Then copy and return the new pointer. 651 */ 652 if ((nptr = malloc(size, type, flags)) == NULL) 653 return(NULL); 654 bcopy(ptr, nptr, min(size, z->z_ChunkSize)); 655 free(ptr, type); 656 return(nptr); 657 } 658 659 char * 660 strdup(const char *str, struct malloc_type *type) 661 { 662 int zlen; /* length inclusive of terminating NUL */ 663 char *nstr; 664 665 if (str == NULL) 666 return(NULL); 667 zlen = strlen(str) + 1; 668 nstr = malloc(zlen, type, M_WAITOK); 669 bcopy(str, nstr, zlen); 670 return(nstr); 671 } 672 673 #ifdef SMP 674 /* 675 * free() (SLAB ALLOCATOR) 676 * 677 * Free the specified chunk of memory. 678 */ 679 static 680 void 681 free_remote(void *ptr) 682 { 683 free(ptr, *(struct malloc_type **)ptr); 684 } 685 686 #endif 687 688 void 689 free(void *ptr, struct malloc_type *type) 690 { 691 SLZone *z; 692 SLChunk *chunk; 693 SLGlobalData *slgd; 694 struct globaldata *gd; 695 int pgno; 696 697 gd = mycpu; 698 slgd = &gd->gd_slab; 699 700 if (ptr == NULL) 701 panic("trying to free NULL pointer"); 702 703 /* 704 * Handle special 0-byte allocations 705 */ 706 if (ptr == ZERO_LENGTH_PTR) 707 return; 708 709 /* 710 * Handle oversized allocations. XXX we really should require that a 711 * size be passed to free() instead of this nonsense. 712 * 713 * This code is never called via an ipi. 714 */ 715 { 716 struct kmemusage *kup; 717 unsigned long size; 718 719 kup = btokup(ptr); 720 if (kup->ku_pagecnt) { 721 size = kup->ku_pagecnt << PAGE_SHIFT; 722 kup->ku_pagecnt = 0; 723 #ifdef INVARIANTS 724 KKASSERT(sizeof(weirdary) <= size); 725 bcopy(weirdary, ptr, sizeof(weirdary)); 726 #endif 727 /* 728 * note: we always adjust our cpu's slot, not the originating 729 * cpu (kup->ku_cpuid). The statistics are in aggregate. 730 * 731 * note: XXX we have still inherited the interrupts-can't-block 732 * assumption. An interrupt thread does not bump 733 * gd_intr_nesting_level so check TDF_INTTHREAD. This is 734 * primarily until we can fix softupdate's assumptions about free(). 735 */ 736 crit_enter(); 737 --type->ks_inuse[gd->gd_cpuid]; 738 type->ks_memuse[gd->gd_cpuid] -= size; 739 if (mycpu->gd_intr_nesting_level || (gd->gd_curthread->td_flags & TDF_INTTHREAD)) { 740 z = (SLZone *)ptr; 741 z->z_Magic = ZALLOC_OVSZ_MAGIC; 742 z->z_Next = slgd->FreeOvZones; 743 z->z_ChunkSize = size; 744 slgd->FreeOvZones = z; 745 crit_exit(); 746 } else { 747 crit_exit(); 748 kmem_slab_free(ptr, size); /* may block */ 749 } 750 return; 751 } 752 } 753 754 /* 755 * Zone case. Figure out the zone based on the fact that it is 756 * ZoneSize aligned. 757 */ 758 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 759 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 760 761 /* 762 * If we do not own the zone then forward the request to the 763 * cpu that does. 764 */ 765 if (z->z_CpuGd != gd) { 766 *(struct malloc_type **)ptr = type; 767 #ifdef SMP 768 lwkt_send_ipiq(z->z_CpuGd, free_remote, ptr); 769 #else 770 panic("Corrupt SLZone"); 771 #endif 772 return; 773 } 774 775 if (type->ks_magic != M_MAGIC) 776 panic("free: malloc type lacks magic"); 777 778 crit_enter(); 779 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT; 780 chunk = ptr; 781 782 #ifdef INVARIANTS 783 /* 784 * Attempt to detect a double-free. To reduce overhead we only check 785 * if there appears to be link pointer at the base of the data. 786 */ 787 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) { 788 SLChunk *scan; 789 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) { 790 if (scan == chunk) 791 panic("Double free at %p", chunk); 792 } 793 } 794 #endif 795 796 /* 797 * Put weird data into the memory to detect modifications after freeing, 798 * illegal pointer use after freeing (we should fault on the odd address), 799 * and so forth. XXX needs more work, see the old malloc code. 800 */ 801 #ifdef INVARIANTS 802 if (z->z_ChunkSize < sizeof(weirdary)) 803 bcopy(weirdary, chunk, z->z_ChunkSize); 804 else 805 bcopy(weirdary, chunk, sizeof(weirdary)); 806 #endif 807 808 /* 809 * Add this free non-zero'd chunk to a linked list for reuse, adjust 810 * z_FirstFreePg. 811 */ 812 #ifdef INVARIANTS 813 if ((uintptr_t)chunk < VM_MIN_KERNEL_ADDRESS) 814 panic("BADFREE %p", chunk); 815 #endif 816 chunk->c_Next = z->z_PageAry[pgno]; 817 z->z_PageAry[pgno] = chunk; 818 #ifdef INVARIANTS 819 if (chunk->c_Next && (uintptr_t)chunk->c_Next < VM_MIN_KERNEL_ADDRESS) 820 panic("BADFREE2"); 821 #endif 822 if (z->z_FirstFreePg > pgno) 823 z->z_FirstFreePg = pgno; 824 825 /* 826 * Bump the number of free chunks. If it becomes non-zero the zone 827 * must be added back onto the appropriate list. 828 */ 829 if (z->z_NFree++ == 0) { 830 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 831 slgd->ZoneAry[z->z_ZoneIndex] = z; 832 } 833 834 --type->ks_inuse[z->z_Cpu]; 835 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize; 836 837 /* 838 * If the zone becomes totally free, and there are other zones we 839 * can allocate from, move this zone to the FreeZones list. Since 840 * this code can be called from an IPI callback, do *NOT* try to mess 841 * with kernel_map here. Hysteresis will be performed at malloc() time. 842 */ 843 if (z->z_NFree == z->z_NMax && 844 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) 845 ) { 846 SLZone **pz; 847 848 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next) 849 ; 850 *pz = z->z_Next; 851 z->z_Magic = -1; 852 z->z_Next = slgd->FreeZones; 853 slgd->FreeZones = z; 854 ++slgd->NFreeZones; 855 } 856 crit_exit(); 857 } 858 859 /* 860 * kmem_slab_alloc() 861 * 862 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the 863 * specified alignment. M_* flags are expected in the flags field. 864 * 865 * Alignment must be a multiple of PAGE_SIZE. 866 * 867 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), 868 * but when we move zalloc() over to use this function as its backend 869 * we will have to switch to kreserve/krelease and call reserve(0) 870 * after the new space is made available. 871 * 872 * Interrupt code which has preempted other code is not allowed to 873 * message with CACHE pages, but if M_FAILSAFE is set we can do a 874 * yield to become non-preempting and try again inclusive of 875 * cache pages. 876 */ 877 static void * 878 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) 879 { 880 vm_size_t i; 881 vm_offset_t addr; 882 vm_offset_t offset; 883 int count; 884 thread_t td; 885 vm_map_t map = kernel_map; 886 887 size = round_page(size); 888 addr = vm_map_min(map); 889 890 /* 891 * Reserve properly aligned space from kernel_map 892 */ 893 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 894 crit_enter(); 895 vm_map_lock(map); 896 if (vm_map_findspace(map, vm_map_min(map), size, align, &addr)) { 897 vm_map_unlock(map); 898 if ((flags & (M_RNOWAIT|M_NULLOK)) == 0) 899 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 900 crit_exit(); 901 vm_map_entry_release(count); 902 if ((flags & (M_FAILSAFE|M_NULLOK)) == M_FAILSAFE) 903 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 904 return(NULL); 905 } 906 offset = addr - VM_MIN_KERNEL_ADDRESS; 907 vm_object_reference(kernel_object); 908 vm_map_insert(map, &count, 909 kernel_object, offset, addr, addr + size, 910 VM_PROT_ALL, VM_PROT_ALL, 0); 911 912 td = curthread; 913 914 /* 915 * Allocate the pages. Do not mess with the PG_ZERO flag yet. 916 */ 917 for (i = 0; i < size; i += PAGE_SIZE) { 918 vm_page_t m; 919 vm_pindex_t idx = OFF_TO_IDX(offset + i); 920 int vmflags = 0; 921 922 if (flags & M_ZERO) 923 vmflags |= VM_ALLOC_ZERO; 924 if (flags & M_USE_RESERVE) 925 vmflags |= VM_ALLOC_SYSTEM; 926 if (flags & M_USE_INTERRUPT_RESERVE) 927 vmflags |= VM_ALLOC_INTERRUPT; 928 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) 929 panic("kmem_slab_alloc: bad flags %08x (%p)", flags, ((int **)&size)[-1]); 930 931 /* 932 * Never set VM_ALLOC_NORMAL during a preemption because this allows 933 * allocation out of the VM page cache and could cause mainline kernel 934 * code working on VM objects to get confused. 935 */ 936 if (flags & (M_FAILSAFE|M_WAITOK)) { 937 if (td->td_preempted) { 938 vmflags |= VM_ALLOC_SYSTEM; 939 } else { 940 vmflags |= VM_ALLOC_NORMAL; 941 } 942 } 943 944 m = vm_page_alloc(kernel_object, idx, vmflags); 945 946 /* 947 * If the allocation failed we either return NULL or we retry. 948 * 949 * If M_WAITOK or M_FAILSAFE is set we retry. Note that M_WAITOK 950 * (and M_FAILSAFE) can be specified from an interrupt. M_FAILSAFE 951 * generates a warning or a panic. 952 * 953 * If we are preempting a thread we yield instead of block. Both 954 * gets us out from under a preemption but yielding will get cpu 955 * back more quicker. Livelock does not occur because we will not 956 * be preempting anyone the second time around. 957 * 958 */ 959 if (m == NULL) { 960 if (flags & (M_FAILSAFE|M_WAITOK)) { 961 if (td->td_preempted) { 962 if (flags & M_FAILSAFE) { 963 printf("malloc: M_WAITOK from preemption would block" 964 " try failsafe yield/block\n"); 965 } 966 vm_map_unlock(map); 967 lwkt_yield(); 968 vm_map_lock(map); 969 } else { 970 vm_map_unlock(map); 971 vm_wait(); 972 vm_map_lock(map); 973 } 974 i -= PAGE_SIZE; /* retry */ 975 continue; 976 } 977 978 /* 979 * We were unable to recover, cleanup and return NULL 980 */ 981 while (i != 0) { 982 i -= PAGE_SIZE; 983 m = vm_page_lookup(kernel_object, OFF_TO_IDX(offset + i)); 984 vm_page_free(m); 985 } 986 vm_map_delete(map, addr, addr + size, &count); 987 vm_map_unlock(map); 988 crit_exit(); 989 vm_map_entry_release(count); 990 return(NULL); 991 } 992 } 993 994 /* 995 * Success! 996 * 997 * Mark the map entry as non-pageable using a routine that allows us to 998 * populate the underlying pages. 999 */ 1000 vm_map_set_wired_quick(map, addr, size, &count); 1001 crit_exit(); 1002 1003 /* 1004 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO. 1005 */ 1006 for (i = 0; i < size; i += PAGE_SIZE) { 1007 vm_page_t m; 1008 1009 m = vm_page_lookup(kernel_object, OFF_TO_IDX(offset + i)); 1010 m->valid = VM_PAGE_BITS_ALL; 1011 vm_page_wire(m); 1012 vm_page_wakeup(m); 1013 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 1014 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO)) 1015 bzero((char *)addr + i, PAGE_SIZE); 1016 vm_page_flag_clear(m, PG_ZERO); 1017 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED); 1018 } 1019 vm_map_unlock(map); 1020 vm_map_entry_release(count); 1021 return((void *)addr); 1022 } 1023 1024 static void 1025 kmem_slab_free(void *ptr, vm_size_t size) 1026 { 1027 crit_enter(); 1028 vm_map_remove(kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); 1029 crit_exit(); 1030 } 1031 1032