1 /* 2 * KERN_SLABALLOC.C - Kernel SLAB memory allocator 3 * 4 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.19 2004/03/12 22:29:21 joerg Exp $ 29 * 30 * This module implements a slab allocator drop-in replacement for the 31 * kernel malloc(). 32 * 33 * A slab allocator reserves a ZONE for each chunk size, then lays the 34 * chunks out in an array within the zone. Allocation and deallocation 35 * is nearly instantanious, and fragmentation/overhead losses are limited 36 * to a fixed worst-case amount. 37 * 38 * The downside of this slab implementation is in the chunk size 39 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. 40 * In a kernel implementation all this memory will be physical so 41 * the zone size is adjusted downward on machines with less physical 42 * memory. The upside is that overhead is bounded... this is the *worst* 43 * case overhead. 44 * 45 * Slab management is done on a per-cpu basis and no locking or mutexes 46 * are required, only a critical section. When one cpu frees memory 47 * belonging to another cpu's slab manager an asynchronous IPI message 48 * will be queued to execute the operation. In addition, both the 49 * high level slab allocator and the low level zone allocator optimize 50 * M_ZERO requests, and the slab allocator does not have to pre initialize 51 * the linked list of chunks. 52 * 53 * XXX Balancing is needed between cpus. Balance will be handled through 54 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. 55 * 56 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of 57 * the new zone should be restricted to M_USE_RESERVE requests only. 58 * 59 * Alloc Size Chunking Number of zones 60 * 0-127 8 16 61 * 128-255 16 8 62 * 256-511 32 8 63 * 512-1023 64 8 64 * 1024-2047 128 8 65 * 2048-4095 256 8 66 * 4096-8191 512 8 67 * 8192-16383 1024 8 68 * 16384-32767 2048 8 69 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) 70 * 71 * Allocations >= ZoneLimit go directly to kmem. 72 * 73 * API REQUIREMENTS AND SIDE EFFECTS 74 * 75 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we 76 * have remained compatible with the following API requirements: 77 * 78 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty) 79 * + all power-of-2 sized allocations are power-of-2 aligned (twe) 80 * + malloc(0) is allowed and returns non-NULL (ahc driver) 81 * + ability to allocate arbitrarily large chunks of memory 82 */ 83 84 #include "opt_vm.h" 85 86 #include <sys/param.h> 87 #include <sys/systm.h> 88 #include <sys/kernel.h> 89 #include <sys/slaballoc.h> 90 #include <sys/mbuf.h> 91 #include <sys/vmmeter.h> 92 #include <sys/lock.h> 93 #include <sys/thread.h> 94 #include <sys/globaldata.h> 95 96 #include <vm/vm.h> 97 #include <vm/vm_param.h> 98 #include <vm/vm_kern.h> 99 #include <vm/vm_extern.h> 100 #include <vm/vm_object.h> 101 #include <vm/pmap.h> 102 #include <vm/vm_map.h> 103 #include <vm/vm_page.h> 104 #include <vm/vm_pageout.h> 105 106 #include <machine/cpu.h> 107 108 #include <sys/thread2.h> 109 110 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0])) 111 112 /* 113 * Fixed globals (not per-cpu) 114 */ 115 static int ZoneSize; 116 static int ZoneLimit; 117 static int ZonePageCount; 118 static int ZonePageLimit; 119 static int ZoneMask; 120 static struct malloc_type *kmemstatistics; 121 static struct kmemusage *kmemusage; 122 static int32_t weirdary[16]; 123 124 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags); 125 static void kmem_slab_free(void *ptr, vm_size_t bytes); 126 127 /* 128 * Misc constants. Note that allocations that are exact multiples of 129 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 130 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists. 131 */ 132 #define MIN_CHUNK_SIZE 8 /* in bytes */ 133 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1) 134 #define ZONE_RELS_THRESH 2 /* threshold number of zones */ 135 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK) 136 137 /* 138 * The WEIRD_ADDR is used as known text to copy into free objects to 139 * try to create deterministic failure cases if the data is accessed after 140 * free. 141 */ 142 #define WEIRD_ADDR 0xdeadc0de 143 #define MAX_COPY sizeof(weirdary) 144 #define ZERO_LENGTH_PTR ((void *)-8) 145 146 /* 147 * Misc global malloc buckets 148 */ 149 150 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 151 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 152 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 153 154 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 155 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 156 157 /* 158 * Initialize the slab memory allocator. We have to choose a zone size based 159 * on available physical memory. We choose a zone side which is approximately 160 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of 161 * 128K. The zone size is limited to the bounds set in slaballoc.h 162 * (typically 32K min, 128K max). 163 */ 164 static void kmeminit(void *dummy); 165 166 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL) 167 168 static void 169 kmeminit(void *dummy) 170 { 171 vm_poff_t limsize; 172 int usesize; 173 int i; 174 vm_pindex_t npg; 175 176 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE; 177 if (limsize > VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) 178 limsize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS; 179 180 usesize = (int)(limsize / 1024); /* convert to KB */ 181 182 ZoneSize = ZALLOC_MIN_ZONE_SIZE; 183 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) 184 ZoneSize <<= 1; 185 ZoneLimit = ZoneSize / 4; 186 if (ZoneLimit > ZALLOC_ZONE_LIMIT) 187 ZoneLimit = ZALLOC_ZONE_LIMIT; 188 ZoneMask = ZoneSize - 1; 189 ZonePageLimit = PAGE_SIZE * 4; 190 ZonePageCount = ZoneSize / PAGE_SIZE; 191 192 npg = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE; 193 kmemusage = kmem_slab_alloc(npg * sizeof(struct kmemusage), PAGE_SIZE, M_WAITOK|M_ZERO); 194 195 for (i = 0; i < arysize(weirdary); ++i) 196 weirdary[i] = WEIRD_ADDR; 197 198 if (bootverbose) 199 printf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); 200 } 201 202 /* 203 * Initialize a malloc type tracking structure. 204 */ 205 void 206 malloc_init(void *data) 207 { 208 struct malloc_type *type = data; 209 vm_poff_t limsize; 210 211 if (type->ks_magic != M_MAGIC) 212 panic("malloc type lacks magic"); 213 214 if (type->ks_limit != 0) 215 return; 216 217 if (vmstats.v_page_count == 0) 218 panic("malloc_init not allowed before vm init"); 219 220 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE; 221 if (limsize > VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) 222 limsize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS; 223 type->ks_limit = limsize / 10; 224 225 type->ks_next = kmemstatistics; 226 kmemstatistics = type; 227 } 228 229 void 230 malloc_uninit(void *data) 231 { 232 struct malloc_type *type = data; 233 struct malloc_type *t; 234 #ifdef INVARIANTS 235 int i; 236 long ttl; 237 #endif 238 239 if (type->ks_magic != M_MAGIC) 240 panic("malloc type lacks magic"); 241 242 if (vmstats.v_page_count == 0) 243 panic("malloc_uninit not allowed before vm init"); 244 245 if (type->ks_limit == 0) 246 panic("malloc_uninit on uninitialized type"); 247 248 #ifdef INVARIANTS 249 /* 250 * memuse is only correct in aggregation. Due to memory being allocated 251 * on one cpu and freed on another individual array entries may be 252 * negative or positive (canceling each other out). 253 */ 254 for (i = ttl = 0; i < ncpus; ++i) 255 ttl += type->ks_memuse[i]; 256 if (ttl) { 257 printf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", 258 ttl, type->ks_shortdesc, i); 259 } 260 #endif 261 if (type == kmemstatistics) { 262 kmemstatistics = type->ks_next; 263 } else { 264 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 265 if (t->ks_next == type) { 266 t->ks_next = type->ks_next; 267 break; 268 } 269 } 270 } 271 type->ks_next = NULL; 272 type->ks_limit = 0; 273 } 274 275 /* 276 * Calculate the zone index for the allocation request size and set the 277 * allocation request size to that particular zone's chunk size. 278 */ 279 static __inline int 280 zoneindex(unsigned long *bytes) 281 { 282 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ 283 if (n < 128) { 284 *bytes = n = (n + 7) & ~7; 285 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 286 } 287 if (n < 256) { 288 *bytes = n = (n + 15) & ~15; 289 return(n / 16 + 7); 290 } 291 if (n < 8192) { 292 if (n < 512) { 293 *bytes = n = (n + 31) & ~31; 294 return(n / 32 + 15); 295 } 296 if (n < 1024) { 297 *bytes = n = (n + 63) & ~63; 298 return(n / 64 + 23); 299 } 300 if (n < 2048) { 301 *bytes = n = (n + 127) & ~127; 302 return(n / 128 + 31); 303 } 304 if (n < 4096) { 305 *bytes = n = (n + 255) & ~255; 306 return(n / 256 + 39); 307 } 308 *bytes = n = (n + 511) & ~511; 309 return(n / 512 + 47); 310 } 311 #if ZALLOC_ZONE_LIMIT > 8192 312 if (n < 16384) { 313 *bytes = n = (n + 1023) & ~1023; 314 return(n / 1024 + 55); 315 } 316 #endif 317 #if ZALLOC_ZONE_LIMIT > 16384 318 if (n < 32768) { 319 *bytes = n = (n + 2047) & ~2047; 320 return(n / 2048 + 63); 321 } 322 #endif 323 panic("Unexpected byte count %d", n); 324 return(0); 325 } 326 327 /* 328 * malloc() (SLAB ALLOCATOR) 329 * 330 * Allocate memory via the slab allocator. If the request is too large, 331 * or if it page-aligned beyond a certain size, we fall back to the 332 * KMEM subsystem. A SLAB tracking descriptor must be specified, use 333 * &SlabMisc if you don't care. 334 * 335 * M_RNOWAIT - return NULL instead of blocking. 336 * M_ZERO - zero the returned memory. 337 * M_USE_RESERVE - allow greater drawdown of the free list 338 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted 339 * 340 * M_FAILSAFE - Failsafe allocation, when the allocation must 341 * succeed attemp to get out of any preemption context 342 * and allocate from the cache, else block (even though 343 * we might be blocking from an interrupt), or panic. 344 */ 345 void * 346 malloc(unsigned long size, struct malloc_type *type, int flags) 347 { 348 SLZone *z; 349 SLChunk *chunk; 350 SLGlobalData *slgd; 351 struct globaldata *gd; 352 int zi; 353 354 gd = mycpu; 355 slgd = &gd->gd_slab; 356 357 /* 358 * XXX silly to have this in the critical path. 359 */ 360 if (type->ks_limit == 0) { 361 crit_enter(); 362 if (type->ks_limit == 0) 363 malloc_init(type); 364 crit_exit(); 365 } 366 ++type->ks_calls; 367 368 /* 369 * Handle the case where the limit is reached. Panic if can't return 370 * NULL. XXX the original malloc code looped, but this tended to 371 * simply deadlock the computer. 372 */ 373 while (type->ks_loosememuse >= type->ks_limit) { 374 int i; 375 long ttl; 376 377 for (i = ttl = 0; i < ncpus; ++i) 378 ttl += type->ks_memuse[i]; 379 type->ks_loosememuse = ttl; 380 if (ttl >= type->ks_limit) { 381 if (flags & (M_RNOWAIT|M_NULLOK)) 382 return(NULL); 383 panic("%s: malloc limit exceeded", type->ks_shortdesc); 384 } 385 } 386 387 /* 388 * Handle the degenerate size == 0 case. Yes, this does happen. 389 * Return a special pointer. This is to maintain compatibility with 390 * the original malloc implementation. Certain devices, such as the 391 * adaptec driver, not only allocate 0 bytes, they check for NULL and 392 * also realloc() later on. Joy. 393 */ 394 if (size == 0) 395 return(ZERO_LENGTH_PTR); 396 397 /* 398 * Handle hysteresis from prior frees here in malloc(). We cannot 399 * safely manipulate the kernel_map in free() due to free() possibly 400 * being called via an IPI message or from sensitive interrupt code. 401 */ 402 while (slgd->NFreeZones > ZONE_RELS_THRESH && (flags & M_RNOWAIT) == 0) { 403 crit_enter(); 404 if (slgd->NFreeZones > ZONE_RELS_THRESH) { /* crit sect race */ 405 z = slgd->FreeZones; 406 slgd->FreeZones = z->z_Next; 407 --slgd->NFreeZones; 408 kmem_slab_free(z, ZoneSize); /* may block */ 409 } 410 crit_exit(); 411 } 412 /* 413 * XXX handle oversized frees that were queued from free(). 414 */ 415 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) { 416 crit_enter(); 417 if ((z = slgd->FreeOvZones) != NULL) { 418 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); 419 slgd->FreeOvZones = z->z_Next; 420 kmem_slab_free(z, z->z_ChunkSize); /* may block */ 421 } 422 crit_exit(); 423 } 424 425 /* 426 * Handle large allocations directly. There should not be very many of 427 * these so performance is not a big issue. 428 * 429 * Guarentee page alignment for allocations in multiples of PAGE_SIZE 430 */ 431 if (size >= ZoneLimit || (size & PAGE_MASK) == 0) { 432 struct kmemusage *kup; 433 434 size = round_page(size); 435 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); 436 if (chunk == NULL) 437 return(NULL); 438 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ 439 flags |= M_PASSIVE_ZERO; 440 kup = btokup(chunk); 441 kup->ku_pagecnt = size / PAGE_SIZE; 442 kup->ku_cpu = gd->gd_cpuid; 443 crit_enter(); 444 goto done; 445 } 446 447 /* 448 * Attempt to allocate out of an existing zone. First try the free list, 449 * then allocate out of unallocated space. If we find a good zone move 450 * it to the head of the list so later allocations find it quickly 451 * (we might have thousands of zones in the list). 452 * 453 * Note: zoneindex() will panic of size is too large. 454 */ 455 zi = zoneindex(&size); 456 KKASSERT(zi < NZONES); 457 crit_enter(); 458 if ((z = slgd->ZoneAry[zi]) != NULL) { 459 KKASSERT(z->z_NFree > 0); 460 461 /* 462 * Remove us from the ZoneAry[] when we become empty 463 */ 464 if (--z->z_NFree == 0) { 465 slgd->ZoneAry[zi] = z->z_Next; 466 z->z_Next = NULL; 467 } 468 469 /* 470 * Locate a chunk in a free page. This attempts to localize 471 * reallocations into earlier pages without us having to sort 472 * the chunk list. A chunk may still overlap a page boundary. 473 */ 474 while (z->z_FirstFreePg < ZonePageCount) { 475 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) { 476 #ifdef DIAGNOSTIC 477 /* 478 * Diagnostic: c_Next is not total garbage. 479 */ 480 KKASSERT(chunk->c_Next == NULL || 481 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) == 482 ((intptr_t)chunk & IN_SAME_PAGE_MASK)); 483 #endif 484 #ifdef INVARIANTS 485 if ((uintptr_t)chunk < VM_MIN_KERNEL_ADDRESS) 486 panic("chunk %p FFPG %d/%d", chunk, z->z_FirstFreePg, ZonePageCount); 487 if (chunk->c_Next && (uintptr_t)chunk->c_Next < VM_MIN_KERNEL_ADDRESS) 488 panic("chunkNEXT %p %p FFPG %d/%d", chunk, chunk->c_Next, z->z_FirstFreePg, ZonePageCount); 489 #endif 490 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next; 491 goto done; 492 } 493 ++z->z_FirstFreePg; 494 } 495 496 /* 497 * No chunks are available but NFree said we had some memory, so 498 * it must be available in the never-before-used-memory area 499 * governed by UIndex. The consequences are very serious if our zone 500 * got corrupted so we use an explicit panic rather then a KASSERT. 501 */ 502 if (z->z_UIndex + 1 != z->z_NMax) 503 z->z_UIndex = z->z_UIndex + 1; 504 else 505 z->z_UIndex = 0; 506 if (z->z_UIndex == z->z_UEndIndex) 507 panic("slaballoc: corrupted zone"); 508 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 509 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 510 flags &= ~M_ZERO; 511 flags |= M_PASSIVE_ZERO; 512 } 513 goto done; 514 } 515 516 /* 517 * If all zones are exhausted we need to allocate a new zone for this 518 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see 519 * UAlloc use above in regards to M_ZERO. Note that when we are reusing 520 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and 521 * we do not pre-zero it because we do not want to mess up the L1 cache. 522 * 523 * At least one subsystem, the tty code (see CROUND) expects power-of-2 524 * allocations to be power-of-2 aligned. We maintain compatibility by 525 * adjusting the base offset below. 526 */ 527 { 528 int off; 529 530 if ((z = slgd->FreeZones) != NULL) { 531 slgd->FreeZones = z->z_Next; 532 --slgd->NFreeZones; 533 bzero(z, sizeof(SLZone)); 534 z->z_Flags |= SLZF_UNOTZEROD; 535 } else { 536 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); 537 if (z == NULL) 538 goto fail; 539 } 540 541 /* 542 * Guarentee power-of-2 alignment for power-of-2-sized chunks. 543 * Otherwise just 8-byte align the data. 544 */ 545 if ((size | (size - 1)) + 1 == (size << 1)) 546 off = (sizeof(SLZone) + size - 1) & ~(size - 1); 547 else 548 off = (sizeof(SLZone) + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK; 549 z->z_Magic = ZALLOC_SLAB_MAGIC; 550 z->z_ZoneIndex = zi; 551 z->z_NMax = (ZoneSize - off) / size; 552 z->z_NFree = z->z_NMax - 1; 553 z->z_BasePtr = (char *)z + off; 554 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; 555 z->z_ChunkSize = size; 556 z->z_FirstFreePg = ZonePageCount; 557 z->z_CpuGd = gd; 558 z->z_Cpu = gd->gd_cpuid; 559 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 560 z->z_Next = slgd->ZoneAry[zi]; 561 slgd->ZoneAry[zi] = z; 562 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 563 flags &= ~M_ZERO; /* already zero'd */ 564 flags |= M_PASSIVE_ZERO; 565 } 566 567 /* 568 * Slide the base index for initial allocations out of the next 569 * zone we create so we do not over-weight the lower part of the 570 * cpu memory caches. 571 */ 572 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 573 & (ZALLOC_MAX_ZONE_SIZE - 1); 574 } 575 done: 576 ++type->ks_inuse[gd->gd_cpuid]; 577 type->ks_memuse[gd->gd_cpuid] += size; 578 type->ks_loosememuse += size; 579 crit_exit(); 580 if (flags & M_ZERO) 581 bzero(chunk, size); 582 #ifdef INVARIANTS 583 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) 584 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ 585 #endif 586 return(chunk); 587 fail: 588 crit_exit(); 589 return(NULL); 590 } 591 592 void * 593 realloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) 594 { 595 SLZone *z; 596 void *nptr; 597 unsigned long osize; 598 599 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) 600 return(malloc(size, type, flags)); 601 if (size == 0) { 602 free(ptr, type); 603 return(NULL); 604 } 605 606 /* 607 * Handle oversized allocations. XXX we really should require that a 608 * size be passed to free() instead of this nonsense. 609 */ 610 { 611 struct kmemusage *kup; 612 613 kup = btokup(ptr); 614 if (kup->ku_pagecnt) { 615 osize = kup->ku_pagecnt << PAGE_SHIFT; 616 if (osize == round_page(size)) 617 return(ptr); 618 if ((nptr = malloc(size, type, flags)) == NULL) 619 return(NULL); 620 bcopy(ptr, nptr, min(size, osize)); 621 free(ptr, type); 622 return(nptr); 623 } 624 } 625 626 /* 627 * Get the original allocation's zone. If the new request winds up 628 * using the same chunk size we do not have to do anything. 629 */ 630 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 631 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 632 633 zoneindex(&size); 634 if (z->z_ChunkSize == size) 635 return(ptr); 636 637 /* 638 * Allocate memory for the new request size. Note that zoneindex has 639 * already adjusted the request size to the appropriate chunk size, which 640 * should optimize our bcopy(). Then copy and return the new pointer. 641 */ 642 if ((nptr = malloc(size, type, flags)) == NULL) 643 return(NULL); 644 bcopy(ptr, nptr, min(size, z->z_ChunkSize)); 645 free(ptr, type); 646 return(nptr); 647 } 648 649 #ifdef SMP 650 /* 651 * free() (SLAB ALLOCATOR) 652 * 653 * Free the specified chunk of memory. 654 */ 655 static 656 void 657 free_remote(void *ptr) 658 { 659 free(ptr, *(struct malloc_type **)ptr); 660 } 661 662 #endif 663 664 void 665 free(void *ptr, struct malloc_type *type) 666 { 667 SLZone *z; 668 SLChunk *chunk; 669 SLGlobalData *slgd; 670 struct globaldata *gd; 671 int pgno; 672 673 gd = mycpu; 674 slgd = &gd->gd_slab; 675 676 if (ptr == NULL) 677 panic("trying to free NULL pointer"); 678 679 /* 680 * Handle special 0-byte allocations 681 */ 682 if (ptr == ZERO_LENGTH_PTR) 683 return; 684 685 /* 686 * Handle oversized allocations. XXX we really should require that a 687 * size be passed to free() instead of this nonsense. 688 * 689 * This code is never called via an ipi. 690 */ 691 { 692 struct kmemusage *kup; 693 unsigned long size; 694 695 kup = btokup(ptr); 696 if (kup->ku_pagecnt) { 697 size = kup->ku_pagecnt << PAGE_SHIFT; 698 kup->ku_pagecnt = 0; 699 #ifdef INVARIANTS 700 KKASSERT(sizeof(weirdary) <= size); 701 bcopy(weirdary, ptr, sizeof(weirdary)); 702 #endif 703 /* 704 * note: we always adjust our cpu's slot, not the originating 705 * cpu (kup->ku_cpuid). The statistics are in aggregate. 706 * 707 * note: XXX we have still inherited the interrupts-can't-block 708 * assumption. An interrupt thread does not bump 709 * gd_intr_nesting_level so check TDF_INTTHREAD. This is 710 * primarily until we can fix softupdate's assumptions about free(). 711 */ 712 crit_enter(); 713 --type->ks_inuse[gd->gd_cpuid]; 714 type->ks_memuse[gd->gd_cpuid] -= size; 715 if (mycpu->gd_intr_nesting_level || (gd->gd_curthread->td_flags & TDF_INTTHREAD)) { 716 z = (SLZone *)ptr; 717 z->z_Magic = ZALLOC_OVSZ_MAGIC; 718 z->z_Next = slgd->FreeOvZones; 719 z->z_ChunkSize = size; 720 slgd->FreeOvZones = z; 721 crit_exit(); 722 } else { 723 crit_exit(); 724 kmem_slab_free(ptr, size); /* may block */ 725 } 726 return; 727 } 728 } 729 730 /* 731 * Zone case. Figure out the zone based on the fact that it is 732 * ZoneSize aligned. 733 */ 734 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 735 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 736 737 /* 738 * If we do not own the zone then forward the request to the 739 * cpu that does. The freeing code does not need the byte count 740 * unless DIAGNOSTIC is set. 741 */ 742 if (z->z_CpuGd != gd) { 743 *(struct malloc_type **)ptr = type; 744 #ifdef SMP 745 lwkt_send_ipiq(z->z_CpuGd, free_remote, ptr); 746 #else 747 panic("Corrupt SLZone"); 748 #endif 749 return; 750 } 751 752 if (type->ks_magic != M_MAGIC) 753 panic("free: malloc type lacks magic"); 754 755 crit_enter(); 756 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT; 757 chunk = ptr; 758 759 #ifdef INVARIANTS 760 /* 761 * Attempt to detect a double-free. To reduce overhead we only check 762 * if there appears to be link pointer at the base of the data. 763 */ 764 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) { 765 SLChunk *scan; 766 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) { 767 if (scan == chunk) 768 panic("Double free at %p", chunk); 769 } 770 } 771 #endif 772 773 /* 774 * Put weird data into the memory to detect modifications after freeing, 775 * illegal pointer use after freeing (we should fault on the odd address), 776 * and so forth. XXX needs more work, see the old malloc code. 777 */ 778 #ifdef INVARIANTS 779 if (z->z_ChunkSize < sizeof(weirdary)) 780 bcopy(weirdary, chunk, z->z_ChunkSize); 781 else 782 bcopy(weirdary, chunk, sizeof(weirdary)); 783 #endif 784 785 /* 786 * Add this free non-zero'd chunk to a linked list for reuse, adjust 787 * z_FirstFreePg. 788 */ 789 #ifdef INVARIANTS 790 if ((uintptr_t)chunk < VM_MIN_KERNEL_ADDRESS) 791 panic("BADFREE %p\n", chunk); 792 #endif 793 chunk->c_Next = z->z_PageAry[pgno]; 794 z->z_PageAry[pgno] = chunk; 795 #ifdef INVARIANTS 796 if (chunk->c_Next && (uintptr_t)chunk->c_Next < VM_MIN_KERNEL_ADDRESS) 797 panic("BADFREE2"); 798 #endif 799 if (z->z_FirstFreePg > pgno) 800 z->z_FirstFreePg = pgno; 801 802 /* 803 * Bump the number of free chunks. If it becomes non-zero the zone 804 * must be added back onto the appropriate list. 805 */ 806 if (z->z_NFree++ == 0) { 807 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 808 slgd->ZoneAry[z->z_ZoneIndex] = z; 809 } 810 811 --type->ks_inuse[z->z_Cpu]; 812 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize; 813 814 /* 815 * If the zone becomes totally free, and there are other zones we 816 * can allocate from, move this zone to the FreeZones list. Since 817 * this code can be called from an IPI callback, do *NOT* try to mess 818 * with kernel_map here. Hysteresis will be performed at malloc() time. 819 */ 820 if (z->z_NFree == z->z_NMax && 821 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) 822 ) { 823 SLZone **pz; 824 825 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next) 826 ; 827 *pz = z->z_Next; 828 z->z_Magic = -1; 829 z->z_Next = slgd->FreeZones; 830 slgd->FreeZones = z; 831 ++slgd->NFreeZones; 832 } 833 crit_exit(); 834 } 835 836 /* 837 * kmem_slab_alloc() 838 * 839 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the 840 * specified alignment. M_* flags are expected in the flags field. 841 * 842 * Alignment must be a multiple of PAGE_SIZE. 843 * 844 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), 845 * but when we move zalloc() over to use this function as its backend 846 * we will have to switch to kreserve/krelease and call reserve(0) 847 * after the new space is made available. 848 * 849 * Interrupt code which has preempted other code is not allowed to 850 * message with CACHE pages, but if M_FAILSAFE is set we can do a 851 * yield to become non-preempting and try again inclusive of 852 * cache pages. 853 */ 854 static void * 855 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) 856 { 857 vm_size_t i; 858 vm_offset_t addr; 859 vm_offset_t offset; 860 int count; 861 thread_t td; 862 vm_map_t map = kernel_map; 863 864 size = round_page(size); 865 addr = vm_map_min(map); 866 867 /* 868 * Reserve properly aligned space from kernel_map 869 */ 870 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 871 crit_enter(); 872 vm_map_lock(map); 873 if (vm_map_findspace(map, vm_map_min(map), size, align, &addr)) { 874 vm_map_unlock(map); 875 if ((flags & (M_RNOWAIT|M_NULLOK)) == 0) 876 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 877 crit_exit(); 878 vm_map_entry_release(count); 879 if ((flags & (M_FAILSAFE|M_NULLOK)) == M_FAILSAFE) 880 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 881 return(NULL); 882 } 883 offset = addr - VM_MIN_KERNEL_ADDRESS; 884 vm_object_reference(kernel_object); 885 vm_map_insert(map, &count, 886 kernel_object, offset, addr, addr + size, 887 VM_PROT_ALL, VM_PROT_ALL, 0); 888 889 td = curthread; 890 891 /* 892 * Allocate the pages. Do not mess with the PG_ZERO flag yet. 893 */ 894 for (i = 0; i < size; i += PAGE_SIZE) { 895 vm_page_t m; 896 vm_pindex_t idx = OFF_TO_IDX(offset + i); 897 int vmflags = 0; 898 899 if (flags & M_ZERO) 900 vmflags |= VM_ALLOC_ZERO; 901 if (flags & M_USE_RESERVE) 902 vmflags |= VM_ALLOC_SYSTEM; 903 if (flags & M_USE_INTERRUPT_RESERVE) 904 vmflags |= VM_ALLOC_INTERRUPT; 905 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) 906 panic("kmem_slab_alloc: bad flags %08x (%p)\n", flags, ((int **)&size)[-1]); 907 908 /* 909 * Never set VM_ALLOC_NORMAL during a preemption because this allows 910 * allocation out of the VM page cache and could cause mainline kernel 911 * code working on VM objects to get confused. 912 */ 913 if (flags & (M_FAILSAFE|M_WAITOK)) { 914 if (td->td_preempted) { 915 vmflags |= VM_ALLOC_SYSTEM; 916 } else { 917 vmflags |= VM_ALLOC_NORMAL; 918 } 919 } 920 921 m = vm_page_alloc(kernel_object, idx, vmflags); 922 923 /* 924 * If the allocation failed we either return NULL or we retry. 925 * 926 * If M_WAITOK or M_FAILSAFE is set we retry. Note that M_WAITOK 927 * (and M_FAILSAFE) can be specified from an interrupt. M_FAILSAFE 928 * generates a warning or a panic. 929 * 930 * If we are preempting a thread we yield instead of block. Both 931 * gets us out from under a preemption but yielding will get cpu 932 * back more quicker. Livelock does not occur because we will not 933 * be preempting anyone the second time around. 934 * 935 */ 936 if (m == NULL) { 937 if (flags & (M_FAILSAFE|M_WAITOK)) { 938 if (td->td_preempted) { 939 if (flags & M_FAILSAFE) { 940 printf("malloc: M_WAITOK from preemption would block" 941 " try failsafe yield/block\n"); 942 } 943 vm_map_unlock(map); 944 lwkt_yield(); 945 vm_map_lock(map); 946 } else { 947 vm_map_unlock(map); 948 vm_wait(); 949 vm_map_lock(map); 950 } 951 i -= PAGE_SIZE; /* retry */ 952 continue; 953 } 954 955 /* 956 * We were unable to recover, cleanup and return NULL 957 */ 958 while (i != 0) { 959 i -= PAGE_SIZE; 960 m = vm_page_lookup(kernel_object, OFF_TO_IDX(offset + i)); 961 vm_page_free(m); 962 } 963 vm_map_delete(map, addr, addr + size, &count); 964 vm_map_unlock(map); 965 crit_exit(); 966 vm_map_entry_release(count); 967 return(NULL); 968 } 969 } 970 971 /* 972 * Success! 973 * 974 * Mark the map entry as non-pageable using a routine that allows us to 975 * populate the underlying pages. 976 */ 977 vm_map_set_wired_quick(map, addr, size, &count); 978 crit_exit(); 979 980 /* 981 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO. 982 */ 983 for (i = 0; i < size; i += PAGE_SIZE) { 984 vm_page_t m; 985 986 m = vm_page_lookup(kernel_object, OFF_TO_IDX(offset + i)); 987 m->valid = VM_PAGE_BITS_ALL; 988 vm_page_wire(m); 989 vm_page_wakeup(m); 990 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 991 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO)) 992 bzero((char *)addr + i, PAGE_SIZE); 993 vm_page_flag_clear(m, PG_ZERO); 994 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED); 995 } 996 vm_map_unlock(map); 997 vm_map_entry_release(count); 998 return((void *)addr); 999 } 1000 1001 static void 1002 kmem_slab_free(void *ptr, vm_size_t size) 1003 { 1004 crit_enter(); 1005 vm_map_remove(kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); 1006 crit_exit(); 1007 } 1008 1009