1 /* 2 * KERN_SLABALLOC.C - Kernel SLAB memory allocator 3 * 4 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.25 2004/11/17 23:36:17 dillon Exp $ 37 * 38 * This module implements a slab allocator drop-in replacement for the 39 * kernel malloc(). 40 * 41 * A slab allocator reserves a ZONE for each chunk size, then lays the 42 * chunks out in an array within the zone. Allocation and deallocation 43 * is nearly instantanious, and fragmentation/overhead losses are limited 44 * to a fixed worst-case amount. 45 * 46 * The downside of this slab implementation is in the chunk size 47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. 48 * In a kernel implementation all this memory will be physical so 49 * the zone size is adjusted downward on machines with less physical 50 * memory. The upside is that overhead is bounded... this is the *worst* 51 * case overhead. 52 * 53 * Slab management is done on a per-cpu basis and no locking or mutexes 54 * are required, only a critical section. When one cpu frees memory 55 * belonging to another cpu's slab manager an asynchronous IPI message 56 * will be queued to execute the operation. In addition, both the 57 * high level slab allocator and the low level zone allocator optimize 58 * M_ZERO requests, and the slab allocator does not have to pre initialize 59 * the linked list of chunks. 60 * 61 * XXX Balancing is needed between cpus. Balance will be handled through 62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. 63 * 64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of 65 * the new zone should be restricted to M_USE_RESERVE requests only. 66 * 67 * Alloc Size Chunking Number of zones 68 * 0-127 8 16 69 * 128-255 16 8 70 * 256-511 32 8 71 * 512-1023 64 8 72 * 1024-2047 128 8 73 * 2048-4095 256 8 74 * 4096-8191 512 8 75 * 8192-16383 1024 8 76 * 16384-32767 2048 8 77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) 78 * 79 * Allocations >= ZoneLimit go directly to kmem. 80 * 81 * API REQUIREMENTS AND SIDE EFFECTS 82 * 83 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we 84 * have remained compatible with the following API requirements: 85 * 86 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty) 87 * + all power-of-2 sized allocations are power-of-2 aligned (twe) 88 * + malloc(0) is allowed and returns non-NULL (ahc driver) 89 * + ability to allocate arbitrarily large chunks of memory 90 */ 91 92 #include "opt_vm.h" 93 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/kernel.h> 97 #include <sys/slaballoc.h> 98 #include <sys/mbuf.h> 99 #include <sys/vmmeter.h> 100 #include <sys/lock.h> 101 #include <sys/thread.h> 102 #include <sys/globaldata.h> 103 104 #include <vm/vm.h> 105 #include <vm/vm_param.h> 106 #include <vm/vm_kern.h> 107 #include <vm/vm_extern.h> 108 #include <vm/vm_object.h> 109 #include <vm/pmap.h> 110 #include <vm/vm_map.h> 111 #include <vm/vm_page.h> 112 #include <vm/vm_pageout.h> 113 114 #include <machine/cpu.h> 115 116 #include <sys/thread2.h> 117 118 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0])) 119 120 /* 121 * Fixed globals (not per-cpu) 122 */ 123 static int ZoneSize; 124 static int ZoneLimit; 125 static int ZonePageCount; 126 static int ZoneMask; 127 static struct malloc_type *kmemstatistics; 128 static struct kmemusage *kmemusage; 129 static int32_t weirdary[16]; 130 131 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags); 132 static void kmem_slab_free(void *ptr, vm_size_t bytes); 133 134 /* 135 * Misc constants. Note that allocations that are exact multiples of 136 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 137 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists. 138 */ 139 #define MIN_CHUNK_SIZE 8 /* in bytes */ 140 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1) 141 #define ZONE_RELS_THRESH 2 /* threshold number of zones */ 142 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK) 143 144 /* 145 * The WEIRD_ADDR is used as known text to copy into free objects to 146 * try to create deterministic failure cases if the data is accessed after 147 * free. 148 */ 149 #define WEIRD_ADDR 0xdeadc0de 150 #define MAX_COPY sizeof(weirdary) 151 #define ZERO_LENGTH_PTR ((void *)-8) 152 153 /* 154 * Misc global malloc buckets 155 */ 156 157 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 158 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 159 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 160 161 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 162 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 163 164 /* 165 * Initialize the slab memory allocator. We have to choose a zone size based 166 * on available physical memory. We choose a zone side which is approximately 167 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of 168 * 128K. The zone size is limited to the bounds set in slaballoc.h 169 * (typically 32K min, 128K max). 170 */ 171 static void kmeminit(void *dummy); 172 173 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL) 174 175 static void 176 kmeminit(void *dummy) 177 { 178 vm_poff_t limsize; 179 int usesize; 180 int i; 181 vm_pindex_t npg; 182 183 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE; 184 if (limsize > VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) 185 limsize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS; 186 187 usesize = (int)(limsize / 1024); /* convert to KB */ 188 189 ZoneSize = ZALLOC_MIN_ZONE_SIZE; 190 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) 191 ZoneSize <<= 1; 192 ZoneLimit = ZoneSize / 4; 193 if (ZoneLimit > ZALLOC_ZONE_LIMIT) 194 ZoneLimit = ZALLOC_ZONE_LIMIT; 195 ZoneMask = ZoneSize - 1; 196 ZonePageCount = ZoneSize / PAGE_SIZE; 197 198 npg = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE; 199 kmemusage = kmem_slab_alloc(npg * sizeof(struct kmemusage), PAGE_SIZE, M_WAITOK|M_ZERO); 200 201 for (i = 0; i < arysize(weirdary); ++i) 202 weirdary[i] = WEIRD_ADDR; 203 204 if (bootverbose) 205 printf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); 206 } 207 208 /* 209 * Initialize a malloc type tracking structure. 210 */ 211 void 212 malloc_init(void *data) 213 { 214 struct malloc_type *type = data; 215 vm_poff_t limsize; 216 217 if (type->ks_magic != M_MAGIC) 218 panic("malloc type lacks magic"); 219 220 if (type->ks_limit != 0) 221 return; 222 223 if (vmstats.v_page_count == 0) 224 panic("malloc_init not allowed before vm init"); 225 226 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE; 227 if (limsize > VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) 228 limsize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS; 229 type->ks_limit = limsize / 10; 230 231 type->ks_next = kmemstatistics; 232 kmemstatistics = type; 233 } 234 235 void 236 malloc_uninit(void *data) 237 { 238 struct malloc_type *type = data; 239 struct malloc_type *t; 240 #ifdef INVARIANTS 241 int i; 242 long ttl; 243 #endif 244 245 if (type->ks_magic != M_MAGIC) 246 panic("malloc type lacks magic"); 247 248 if (vmstats.v_page_count == 0) 249 panic("malloc_uninit not allowed before vm init"); 250 251 if (type->ks_limit == 0) 252 panic("malloc_uninit on uninitialized type"); 253 254 #ifdef INVARIANTS 255 /* 256 * memuse is only correct in aggregation. Due to memory being allocated 257 * on one cpu and freed on another individual array entries may be 258 * negative or positive (canceling each other out). 259 */ 260 for (i = ttl = 0; i < ncpus; ++i) 261 ttl += type->ks_memuse[i]; 262 if (ttl) { 263 printf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", 264 ttl, type->ks_shortdesc, i); 265 } 266 #endif 267 if (type == kmemstatistics) { 268 kmemstatistics = type->ks_next; 269 } else { 270 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 271 if (t->ks_next == type) { 272 t->ks_next = type->ks_next; 273 break; 274 } 275 } 276 } 277 type->ks_next = NULL; 278 type->ks_limit = 0; 279 } 280 281 /* 282 * Calculate the zone index for the allocation request size and set the 283 * allocation request size to that particular zone's chunk size. 284 */ 285 static __inline int 286 zoneindex(unsigned long *bytes) 287 { 288 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ 289 if (n < 128) { 290 *bytes = n = (n + 7) & ~7; 291 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 292 } 293 if (n < 256) { 294 *bytes = n = (n + 15) & ~15; 295 return(n / 16 + 7); 296 } 297 if (n < 8192) { 298 if (n < 512) { 299 *bytes = n = (n + 31) & ~31; 300 return(n / 32 + 15); 301 } 302 if (n < 1024) { 303 *bytes = n = (n + 63) & ~63; 304 return(n / 64 + 23); 305 } 306 if (n < 2048) { 307 *bytes = n = (n + 127) & ~127; 308 return(n / 128 + 31); 309 } 310 if (n < 4096) { 311 *bytes = n = (n + 255) & ~255; 312 return(n / 256 + 39); 313 } 314 *bytes = n = (n + 511) & ~511; 315 return(n / 512 + 47); 316 } 317 #if ZALLOC_ZONE_LIMIT > 8192 318 if (n < 16384) { 319 *bytes = n = (n + 1023) & ~1023; 320 return(n / 1024 + 55); 321 } 322 #endif 323 #if ZALLOC_ZONE_LIMIT > 16384 324 if (n < 32768) { 325 *bytes = n = (n + 2047) & ~2047; 326 return(n / 2048 + 63); 327 } 328 #endif 329 panic("Unexpected byte count %d", n); 330 return(0); 331 } 332 333 /* 334 * malloc() (SLAB ALLOCATOR) 335 * 336 * Allocate memory via the slab allocator. If the request is too large, 337 * or if it page-aligned beyond a certain size, we fall back to the 338 * KMEM subsystem. A SLAB tracking descriptor must be specified, use 339 * &SlabMisc if you don't care. 340 * 341 * M_RNOWAIT - return NULL instead of blocking. 342 * M_ZERO - zero the returned memory. 343 * M_USE_RESERVE - allow greater drawdown of the free list 344 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted 345 */ 346 void * 347 malloc(unsigned long size, struct malloc_type *type, int flags) 348 { 349 SLZone *z; 350 SLChunk *chunk; 351 SLGlobalData *slgd; 352 struct globaldata *gd; 353 int zi; 354 355 gd = mycpu; 356 slgd = &gd->gd_slab; 357 358 /* 359 * XXX silly to have this in the critical path. 360 */ 361 if (type->ks_limit == 0) { 362 crit_enter(); 363 if (type->ks_limit == 0) 364 malloc_init(type); 365 crit_exit(); 366 } 367 ++type->ks_calls; 368 369 /* 370 * Handle the case where the limit is reached. Panic if can't return 371 * NULL. XXX the original malloc code looped, but this tended to 372 * simply deadlock the computer. 373 */ 374 while (type->ks_loosememuse >= type->ks_limit) { 375 int i; 376 long ttl; 377 378 for (i = ttl = 0; i < ncpus; ++i) 379 ttl += type->ks_memuse[i]; 380 type->ks_loosememuse = ttl; 381 if (ttl >= type->ks_limit) { 382 if (flags & (M_RNOWAIT|M_NULLOK)) 383 return(NULL); 384 panic("%s: malloc limit exceeded", type->ks_shortdesc); 385 } 386 } 387 388 /* 389 * Handle the degenerate size == 0 case. Yes, this does happen. 390 * Return a special pointer. This is to maintain compatibility with 391 * the original malloc implementation. Certain devices, such as the 392 * adaptec driver, not only allocate 0 bytes, they check for NULL and 393 * also realloc() later on. Joy. 394 */ 395 if (size == 0) 396 return(ZERO_LENGTH_PTR); 397 398 /* 399 * Handle hysteresis from prior frees here in malloc(). We cannot 400 * safely manipulate the kernel_map in free() due to free() possibly 401 * being called via an IPI message or from sensitive interrupt code. 402 */ 403 while (slgd->NFreeZones > ZONE_RELS_THRESH && (flags & M_RNOWAIT) == 0) { 404 crit_enter(); 405 if (slgd->NFreeZones > ZONE_RELS_THRESH) { /* crit sect race */ 406 z = slgd->FreeZones; 407 slgd->FreeZones = z->z_Next; 408 --slgd->NFreeZones; 409 kmem_slab_free(z, ZoneSize); /* may block */ 410 } 411 crit_exit(); 412 } 413 /* 414 * XXX handle oversized frees that were queued from free(). 415 */ 416 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) { 417 crit_enter(); 418 if ((z = slgd->FreeOvZones) != NULL) { 419 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); 420 slgd->FreeOvZones = z->z_Next; 421 kmem_slab_free(z, z->z_ChunkSize); /* may block */ 422 } 423 crit_exit(); 424 } 425 426 /* 427 * Handle large allocations directly. There should not be very many of 428 * these so performance is not a big issue. 429 * 430 * Guarentee page alignment for allocations in multiples of PAGE_SIZE 431 */ 432 if (size >= ZoneLimit || (size & PAGE_MASK) == 0) { 433 struct kmemusage *kup; 434 435 size = round_page(size); 436 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); 437 if (chunk == NULL) 438 return(NULL); 439 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ 440 flags |= M_PASSIVE_ZERO; 441 kup = btokup(chunk); 442 kup->ku_pagecnt = size / PAGE_SIZE; 443 kup->ku_cpu = gd->gd_cpuid; 444 crit_enter(); 445 goto done; 446 } 447 448 /* 449 * Attempt to allocate out of an existing zone. First try the free list, 450 * then allocate out of unallocated space. If we find a good zone move 451 * it to the head of the list so later allocations find it quickly 452 * (we might have thousands of zones in the list). 453 * 454 * Note: zoneindex() will panic of size is too large. 455 */ 456 zi = zoneindex(&size); 457 KKASSERT(zi < NZONES); 458 crit_enter(); 459 if ((z = slgd->ZoneAry[zi]) != NULL) { 460 KKASSERT(z->z_NFree > 0); 461 462 /* 463 * Remove us from the ZoneAry[] when we become empty 464 */ 465 if (--z->z_NFree == 0) { 466 slgd->ZoneAry[zi] = z->z_Next; 467 z->z_Next = NULL; 468 } 469 470 /* 471 * Locate a chunk in a free page. This attempts to localize 472 * reallocations into earlier pages without us having to sort 473 * the chunk list. A chunk may still overlap a page boundary. 474 */ 475 while (z->z_FirstFreePg < ZonePageCount) { 476 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) { 477 #ifdef DIAGNOSTIC 478 /* 479 * Diagnostic: c_Next is not total garbage. 480 */ 481 KKASSERT(chunk->c_Next == NULL || 482 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) == 483 ((intptr_t)chunk & IN_SAME_PAGE_MASK)); 484 #endif 485 #ifdef INVARIANTS 486 if ((uintptr_t)chunk < VM_MIN_KERNEL_ADDRESS) 487 panic("chunk %p FFPG %d/%d", chunk, z->z_FirstFreePg, ZonePageCount); 488 if (chunk->c_Next && (uintptr_t)chunk->c_Next < VM_MIN_KERNEL_ADDRESS) 489 panic("chunkNEXT %p %p FFPG %d/%d", chunk, chunk->c_Next, z->z_FirstFreePg, ZonePageCount); 490 #endif 491 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next; 492 goto done; 493 } 494 ++z->z_FirstFreePg; 495 } 496 497 /* 498 * No chunks are available but NFree said we had some memory, so 499 * it must be available in the never-before-used-memory area 500 * governed by UIndex. The consequences are very serious if our zone 501 * got corrupted so we use an explicit panic rather then a KASSERT. 502 */ 503 if (z->z_UIndex + 1 != z->z_NMax) 504 z->z_UIndex = z->z_UIndex + 1; 505 else 506 z->z_UIndex = 0; 507 if (z->z_UIndex == z->z_UEndIndex) 508 panic("slaballoc: corrupted zone"); 509 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 510 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 511 flags &= ~M_ZERO; 512 flags |= M_PASSIVE_ZERO; 513 } 514 goto done; 515 } 516 517 /* 518 * If all zones are exhausted we need to allocate a new zone for this 519 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see 520 * UAlloc use above in regards to M_ZERO. Note that when we are reusing 521 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and 522 * we do not pre-zero it because we do not want to mess up the L1 cache. 523 * 524 * At least one subsystem, the tty code (see CROUND) expects power-of-2 525 * allocations to be power-of-2 aligned. We maintain compatibility by 526 * adjusting the base offset below. 527 */ 528 { 529 int off; 530 531 if ((z = slgd->FreeZones) != NULL) { 532 slgd->FreeZones = z->z_Next; 533 --slgd->NFreeZones; 534 bzero(z, sizeof(SLZone)); 535 z->z_Flags |= SLZF_UNOTZEROD; 536 } else { 537 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); 538 if (z == NULL) 539 goto fail; 540 } 541 542 /* 543 * Guarentee power-of-2 alignment for power-of-2-sized chunks. 544 * Otherwise just 8-byte align the data. 545 */ 546 if ((size | (size - 1)) + 1 == (size << 1)) 547 off = (sizeof(SLZone) + size - 1) & ~(size - 1); 548 else 549 off = (sizeof(SLZone) + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK; 550 z->z_Magic = ZALLOC_SLAB_MAGIC; 551 z->z_ZoneIndex = zi; 552 z->z_NMax = (ZoneSize - off) / size; 553 z->z_NFree = z->z_NMax - 1; 554 z->z_BasePtr = (char *)z + off; 555 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; 556 z->z_ChunkSize = size; 557 z->z_FirstFreePg = ZonePageCount; 558 z->z_CpuGd = gd; 559 z->z_Cpu = gd->gd_cpuid; 560 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 561 z->z_Next = slgd->ZoneAry[zi]; 562 slgd->ZoneAry[zi] = z; 563 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 564 flags &= ~M_ZERO; /* already zero'd */ 565 flags |= M_PASSIVE_ZERO; 566 } 567 568 /* 569 * Slide the base index for initial allocations out of the next 570 * zone we create so we do not over-weight the lower part of the 571 * cpu memory caches. 572 */ 573 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 574 & (ZALLOC_MAX_ZONE_SIZE - 1); 575 } 576 done: 577 ++type->ks_inuse[gd->gd_cpuid]; 578 type->ks_memuse[gd->gd_cpuid] += size; 579 type->ks_loosememuse += size; 580 crit_exit(); 581 if (flags & M_ZERO) 582 bzero(chunk, size); 583 #ifdef INVARIANTS 584 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) 585 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ 586 #endif 587 return(chunk); 588 fail: 589 crit_exit(); 590 return(NULL); 591 } 592 593 void * 594 realloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) 595 { 596 SLZone *z; 597 void *nptr; 598 unsigned long osize; 599 600 KKASSERT((flags & M_ZERO) == 0); /* not supported */ 601 602 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) 603 return(malloc(size, type, flags)); 604 if (size == 0) { 605 free(ptr, type); 606 return(NULL); 607 } 608 609 /* 610 * Handle oversized allocations. XXX we really should require that a 611 * size be passed to free() instead of this nonsense. 612 */ 613 { 614 struct kmemusage *kup; 615 616 kup = btokup(ptr); 617 if (kup->ku_pagecnt) { 618 osize = kup->ku_pagecnt << PAGE_SHIFT; 619 if (osize == round_page(size)) 620 return(ptr); 621 if ((nptr = malloc(size, type, flags)) == NULL) 622 return(NULL); 623 bcopy(ptr, nptr, min(size, osize)); 624 free(ptr, type); 625 return(nptr); 626 } 627 } 628 629 /* 630 * Get the original allocation's zone. If the new request winds up 631 * using the same chunk size we do not have to do anything. 632 */ 633 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 634 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 635 636 zoneindex(&size); 637 if (z->z_ChunkSize == size) 638 return(ptr); 639 640 /* 641 * Allocate memory for the new request size. Note that zoneindex has 642 * already adjusted the request size to the appropriate chunk size, which 643 * should optimize our bcopy(). Then copy and return the new pointer. 644 */ 645 if ((nptr = malloc(size, type, flags)) == NULL) 646 return(NULL); 647 bcopy(ptr, nptr, min(size, z->z_ChunkSize)); 648 free(ptr, type); 649 return(nptr); 650 } 651 652 char * 653 strdup(const char *str, struct malloc_type *type) 654 { 655 int zlen; /* length inclusive of terminating NUL */ 656 char *nstr; 657 658 if (str == NULL) 659 return(NULL); 660 zlen = strlen(str) + 1; 661 nstr = malloc(zlen, type, M_WAITOK); 662 bcopy(str, nstr, zlen); 663 return(nstr); 664 } 665 666 #ifdef SMP 667 /* 668 * free() (SLAB ALLOCATOR) 669 * 670 * Free the specified chunk of memory. 671 */ 672 static 673 void 674 free_remote(void *ptr) 675 { 676 free(ptr, *(struct malloc_type **)ptr); 677 } 678 679 #endif 680 681 void 682 free(void *ptr, struct malloc_type *type) 683 { 684 SLZone *z; 685 SLChunk *chunk; 686 SLGlobalData *slgd; 687 struct globaldata *gd; 688 int pgno; 689 690 gd = mycpu; 691 slgd = &gd->gd_slab; 692 693 if (ptr == NULL) 694 panic("trying to free NULL pointer"); 695 696 /* 697 * Handle special 0-byte allocations 698 */ 699 if (ptr == ZERO_LENGTH_PTR) 700 return; 701 702 /* 703 * Handle oversized allocations. XXX we really should require that a 704 * size be passed to free() instead of this nonsense. 705 * 706 * This code is never called via an ipi. 707 */ 708 { 709 struct kmemusage *kup; 710 unsigned long size; 711 712 kup = btokup(ptr); 713 if (kup->ku_pagecnt) { 714 size = kup->ku_pagecnt << PAGE_SHIFT; 715 kup->ku_pagecnt = 0; 716 #ifdef INVARIANTS 717 KKASSERT(sizeof(weirdary) <= size); 718 bcopy(weirdary, ptr, sizeof(weirdary)); 719 #endif 720 /* 721 * note: we always adjust our cpu's slot, not the originating 722 * cpu (kup->ku_cpuid). The statistics are in aggregate. 723 * 724 * note: XXX we have still inherited the interrupts-can't-block 725 * assumption. An interrupt thread does not bump 726 * gd_intr_nesting_level so check TDF_INTTHREAD. This is 727 * primarily until we can fix softupdate's assumptions about free(). 728 */ 729 crit_enter(); 730 --type->ks_inuse[gd->gd_cpuid]; 731 type->ks_memuse[gd->gd_cpuid] -= size; 732 if (mycpu->gd_intr_nesting_level || (gd->gd_curthread->td_flags & TDF_INTTHREAD)) { 733 z = (SLZone *)ptr; 734 z->z_Magic = ZALLOC_OVSZ_MAGIC; 735 z->z_Next = slgd->FreeOvZones; 736 z->z_ChunkSize = size; 737 slgd->FreeOvZones = z; 738 crit_exit(); 739 } else { 740 crit_exit(); 741 kmem_slab_free(ptr, size); /* may block */ 742 } 743 return; 744 } 745 } 746 747 /* 748 * Zone case. Figure out the zone based on the fact that it is 749 * ZoneSize aligned. 750 */ 751 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 752 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 753 754 /* 755 * If we do not own the zone then forward the request to the 756 * cpu that does. 757 */ 758 if (z->z_CpuGd != gd) { 759 *(struct malloc_type **)ptr = type; 760 #ifdef SMP 761 lwkt_send_ipiq(z->z_CpuGd, free_remote, ptr); 762 #else 763 panic("Corrupt SLZone"); 764 #endif 765 return; 766 } 767 768 if (type->ks_magic != M_MAGIC) 769 panic("free: malloc type lacks magic"); 770 771 crit_enter(); 772 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT; 773 chunk = ptr; 774 775 #ifdef INVARIANTS 776 /* 777 * Attempt to detect a double-free. To reduce overhead we only check 778 * if there appears to be link pointer at the base of the data. 779 */ 780 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) { 781 SLChunk *scan; 782 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) { 783 if (scan == chunk) 784 panic("Double free at %p", chunk); 785 } 786 } 787 #endif 788 789 /* 790 * Put weird data into the memory to detect modifications after freeing, 791 * illegal pointer use after freeing (we should fault on the odd address), 792 * and so forth. XXX needs more work, see the old malloc code. 793 */ 794 #ifdef INVARIANTS 795 if (z->z_ChunkSize < sizeof(weirdary)) 796 bcopy(weirdary, chunk, z->z_ChunkSize); 797 else 798 bcopy(weirdary, chunk, sizeof(weirdary)); 799 #endif 800 801 /* 802 * Add this free non-zero'd chunk to a linked list for reuse, adjust 803 * z_FirstFreePg. 804 */ 805 #ifdef INVARIANTS 806 if ((uintptr_t)chunk < VM_MIN_KERNEL_ADDRESS) 807 panic("BADFREE %p", chunk); 808 #endif 809 chunk->c_Next = z->z_PageAry[pgno]; 810 z->z_PageAry[pgno] = chunk; 811 #ifdef INVARIANTS 812 if (chunk->c_Next && (uintptr_t)chunk->c_Next < VM_MIN_KERNEL_ADDRESS) 813 panic("BADFREE2"); 814 #endif 815 if (z->z_FirstFreePg > pgno) 816 z->z_FirstFreePg = pgno; 817 818 /* 819 * Bump the number of free chunks. If it becomes non-zero the zone 820 * must be added back onto the appropriate list. 821 */ 822 if (z->z_NFree++ == 0) { 823 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 824 slgd->ZoneAry[z->z_ZoneIndex] = z; 825 } 826 827 --type->ks_inuse[z->z_Cpu]; 828 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize; 829 830 /* 831 * If the zone becomes totally free, and there are other zones we 832 * can allocate from, move this zone to the FreeZones list. Since 833 * this code can be called from an IPI callback, do *NOT* try to mess 834 * with kernel_map here. Hysteresis will be performed at malloc() time. 835 */ 836 if (z->z_NFree == z->z_NMax && 837 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) 838 ) { 839 SLZone **pz; 840 841 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next) 842 ; 843 *pz = z->z_Next; 844 z->z_Magic = -1; 845 z->z_Next = slgd->FreeZones; 846 slgd->FreeZones = z; 847 ++slgd->NFreeZones; 848 } 849 crit_exit(); 850 } 851 852 /* 853 * kmem_slab_alloc() 854 * 855 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the 856 * specified alignment. M_* flags are expected in the flags field. 857 * 858 * Alignment must be a multiple of PAGE_SIZE. 859 * 860 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), 861 * but when we move zalloc() over to use this function as its backend 862 * we will have to switch to kreserve/krelease and call reserve(0) 863 * after the new space is made available. 864 * 865 * Interrupt code which has preempted other code is not allowed to 866 * use PQ_CACHE pages. However, if an interrupt thread is run 867 * non-preemptively or blocks and then runs non-preemptively, then 868 * it is free to use PQ_CACHE pages. 869 */ 870 static void * 871 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) 872 { 873 vm_size_t i; 874 vm_offset_t addr; 875 vm_offset_t offset; 876 int count; 877 thread_t td; 878 vm_map_t map = kernel_map; 879 880 size = round_page(size); 881 addr = vm_map_min(map); 882 883 /* 884 * Reserve properly aligned space from kernel_map 885 */ 886 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 887 crit_enter(); 888 vm_map_lock(map); 889 if (vm_map_findspace(map, vm_map_min(map), size, align, &addr)) { 890 vm_map_unlock(map); 891 if ((flags & (M_RNOWAIT|M_NULLOK)) == 0) 892 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 893 crit_exit(); 894 vm_map_entry_release(count); 895 if ((flags & M_NULLOK) == 0) 896 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 897 return(NULL); 898 } 899 offset = addr - VM_MIN_KERNEL_ADDRESS; 900 vm_object_reference(kernel_object); 901 vm_map_insert(map, &count, 902 kernel_object, offset, addr, addr + size, 903 VM_PROT_ALL, VM_PROT_ALL, 0); 904 905 td = curthread; 906 907 /* 908 * Allocate the pages. Do not mess with the PG_ZERO flag yet. 909 */ 910 for (i = 0; i < size; i += PAGE_SIZE) { 911 vm_page_t m; 912 vm_pindex_t idx = OFF_TO_IDX(offset + i); 913 int vmflags = 0; 914 915 if (flags & M_ZERO) 916 vmflags |= VM_ALLOC_ZERO; 917 if (flags & M_USE_RESERVE) 918 vmflags |= VM_ALLOC_SYSTEM; 919 if (flags & M_USE_INTERRUPT_RESERVE) 920 vmflags |= VM_ALLOC_INTERRUPT; 921 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) 922 panic("kmem_slab_alloc: bad flags %08x (%p)", flags, ((int **)&size)[-1]); 923 924 /* 925 * VM_ALLOC_NORMAL can only be set if we are not preempting. 926 * 927 * VM_ALLOC_SYSTEM is automatically set if we are preempting and 928 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is 929 * implied in this case), though I'm sure if we really need to do 930 * that. 931 */ 932 if (flags & M_WAITOK) { 933 if (td->td_preempted) { 934 vmflags |= VM_ALLOC_SYSTEM; 935 } else { 936 vmflags |= VM_ALLOC_NORMAL; 937 } 938 } 939 940 m = vm_page_alloc(kernel_object, idx, vmflags); 941 942 /* 943 * If the allocation failed we either return NULL or we retry. 944 * 945 * If M_WAITOK is specified we wait for more memory and retry. 946 * If M_WAITOK is specified from a preemption we yield instead of 947 * wait. Livelock will not occur because the interrupt thread 948 * will not be preempting anyone the second time around after the 949 * yield. 950 */ 951 if (m == NULL) { 952 if (flags & M_WAITOK) { 953 if (td->td_preempted) { 954 vm_map_unlock(map); 955 lwkt_yield(); 956 vm_map_lock(map); 957 } else { 958 vm_map_unlock(map); 959 vm_wait(); 960 vm_map_lock(map); 961 } 962 i -= PAGE_SIZE; /* retry */ 963 continue; 964 } 965 966 /* 967 * We were unable to recover, cleanup and return NULL 968 */ 969 while (i != 0) { 970 i -= PAGE_SIZE; 971 m = vm_page_lookup(kernel_object, OFF_TO_IDX(offset + i)); 972 vm_page_free(m); 973 } 974 vm_map_delete(map, addr, addr + size, &count); 975 vm_map_unlock(map); 976 crit_exit(); 977 vm_map_entry_release(count); 978 return(NULL); 979 } 980 } 981 982 /* 983 * Success! 984 * 985 * Mark the map entry as non-pageable using a routine that allows us to 986 * populate the underlying pages. 987 */ 988 vm_map_set_wired_quick(map, addr, size, &count); 989 crit_exit(); 990 991 /* 992 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO. 993 */ 994 for (i = 0; i < size; i += PAGE_SIZE) { 995 vm_page_t m; 996 997 m = vm_page_lookup(kernel_object, OFF_TO_IDX(offset + i)); 998 m->valid = VM_PAGE_BITS_ALL; 999 vm_page_wire(m); 1000 vm_page_wakeup(m); 1001 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 1002 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO)) 1003 bzero((char *)addr + i, PAGE_SIZE); 1004 vm_page_flag_clear(m, PG_ZERO); 1005 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED); 1006 } 1007 vm_map_unlock(map); 1008 vm_map_entry_release(count); 1009 return((void *)addr); 1010 } 1011 1012 static void 1013 kmem_slab_free(void *ptr, vm_size_t size) 1014 { 1015 crit_enter(); 1016 vm_map_remove(kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); 1017 crit_exit(); 1018 } 1019 1020