1 /* 2 * KERN_SLABALLOC.C - Kernel SLAB memory allocator 3 * 4 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.55 2008/10/22 01:42:17 dillon Exp $ 37 * 38 * This module implements a slab allocator drop-in replacement for the 39 * kernel malloc(). 40 * 41 * A slab allocator reserves a ZONE for each chunk size, then lays the 42 * chunks out in an array within the zone. Allocation and deallocation 43 * is nearly instantanious, and fragmentation/overhead losses are limited 44 * to a fixed worst-case amount. 45 * 46 * The downside of this slab implementation is in the chunk size 47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. 48 * In a kernel implementation all this memory will be physical so 49 * the zone size is adjusted downward on machines with less physical 50 * memory. The upside is that overhead is bounded... this is the *worst* 51 * case overhead. 52 * 53 * Slab management is done on a per-cpu basis and no locking or mutexes 54 * are required, only a critical section. When one cpu frees memory 55 * belonging to another cpu's slab manager an asynchronous IPI message 56 * will be queued to execute the operation. In addition, both the 57 * high level slab allocator and the low level zone allocator optimize 58 * M_ZERO requests, and the slab allocator does not have to pre initialize 59 * the linked list of chunks. 60 * 61 * XXX Balancing is needed between cpus. Balance will be handled through 62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. 63 * 64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of 65 * the new zone should be restricted to M_USE_RESERVE requests only. 66 * 67 * Alloc Size Chunking Number of zones 68 * 0-127 8 16 69 * 128-255 16 8 70 * 256-511 32 8 71 * 512-1023 64 8 72 * 1024-2047 128 8 73 * 2048-4095 256 8 74 * 4096-8191 512 8 75 * 8192-16383 1024 8 76 * 16384-32767 2048 8 77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) 78 * 79 * Allocations >= ZoneLimit go directly to kmem. 80 * 81 * API REQUIREMENTS AND SIDE EFFECTS 82 * 83 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we 84 * have remained compatible with the following API requirements: 85 * 86 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty) 87 * + all power-of-2 sized allocations are power-of-2 aligned (twe) 88 * + malloc(0) is allowed and returns non-NULL (ahc driver) 89 * + ability to allocate arbitrarily large chunks of memory 90 */ 91 92 #include "opt_vm.h" 93 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/kernel.h> 97 #include <sys/slaballoc.h> 98 #include <sys/mbuf.h> 99 #include <sys/vmmeter.h> 100 #include <sys/lock.h> 101 #include <sys/thread.h> 102 #include <sys/globaldata.h> 103 #include <sys/sysctl.h> 104 #include <sys/ktr.h> 105 106 #include <vm/vm.h> 107 #include <vm/vm_param.h> 108 #include <vm/vm_kern.h> 109 #include <vm/vm_extern.h> 110 #include <vm/vm_object.h> 111 #include <vm/pmap.h> 112 #include <vm/vm_map.h> 113 #include <vm/vm_page.h> 114 #include <vm/vm_pageout.h> 115 116 #include <machine/cpu.h> 117 118 #include <sys/thread2.h> 119 120 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0])) 121 122 #define MEMORY_STRING "ptr=%p type=%p size=%d flags=%04x" 123 #define MEMORY_ARG_SIZE (sizeof(void *) * 2 + sizeof(unsigned long) + \ 124 sizeof(int)) 125 126 #if !defined(KTR_MEMORY) 127 #define KTR_MEMORY KTR_ALL 128 #endif 129 KTR_INFO_MASTER(memory); 130 KTR_INFO(KTR_MEMORY, memory, malloc, 0, MEMORY_STRING, MEMORY_ARG_SIZE); 131 KTR_INFO(KTR_MEMORY, memory, free_zero, 1, MEMORY_STRING, MEMORY_ARG_SIZE); 132 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 2, MEMORY_STRING, MEMORY_ARG_SIZE); 133 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 3, MEMORY_STRING, MEMORY_ARG_SIZE); 134 KTR_INFO(KTR_MEMORY, memory, free_chunk, 4, MEMORY_STRING, MEMORY_ARG_SIZE); 135 #ifdef SMP 136 KTR_INFO(KTR_MEMORY, memory, free_request, 5, MEMORY_STRING, MEMORY_ARG_SIZE); 137 KTR_INFO(KTR_MEMORY, memory, free_remote, 6, MEMORY_STRING, MEMORY_ARG_SIZE); 138 #endif 139 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin", 0); 140 KTR_INFO(KTR_MEMORY, memory, free_beg, 0, "free begin", 0); 141 KTR_INFO(KTR_MEMORY, memory, free_end, 0, "free end", 0); 142 143 #define logmemory(name, ptr, type, size, flags) \ 144 KTR_LOG(memory_ ## name, ptr, type, size, flags) 145 #define logmemory_quick(name) \ 146 KTR_LOG(memory_ ## name) 147 148 /* 149 * Fixed globals (not per-cpu) 150 */ 151 static int ZoneSize; 152 static int ZoneLimit; 153 static int ZonePageCount; 154 static int ZoneMask; 155 struct malloc_type *kmemstatistics; /* exported to vmstat */ 156 static struct kmemusage *kmemusage; 157 static int32_t weirdary[16]; 158 159 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags); 160 static void kmem_slab_free(void *ptr, vm_size_t bytes); 161 #if defined(INVARIANTS) 162 static void chunk_mark_allocated(SLZone *z, void *chunk); 163 static void chunk_mark_free(SLZone *z, void *chunk); 164 #endif 165 166 /* 167 * Misc constants. Note that allocations that are exact multiples of 168 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 169 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists. 170 */ 171 #define MIN_CHUNK_SIZE 8 /* in bytes */ 172 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1) 173 #define ZONE_RELS_THRESH 2 /* threshold number of zones */ 174 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK) 175 176 /* 177 * The WEIRD_ADDR is used as known text to copy into free objects to 178 * try to create deterministic failure cases if the data is accessed after 179 * free. 180 */ 181 #define WEIRD_ADDR 0xdeadc0de 182 #define MAX_COPY sizeof(weirdary) 183 #define ZERO_LENGTH_PTR ((void *)-8) 184 185 /* 186 * Misc global malloc buckets 187 */ 188 189 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 190 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 191 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 192 193 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 194 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 195 196 /* 197 * Initialize the slab memory allocator. We have to choose a zone size based 198 * on available physical memory. We choose a zone side which is approximately 199 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of 200 * 128K. The zone size is limited to the bounds set in slaballoc.h 201 * (typically 32K min, 128K max). 202 */ 203 static void kmeminit(void *dummy); 204 205 char *ZeroPage; 206 207 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL) 208 209 #ifdef INVARIANTS 210 /* 211 * If enabled any memory allocated without M_ZERO is initialized to -1. 212 */ 213 static int use_malloc_pattern; 214 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW, 215 &use_malloc_pattern, 0, ""); 216 #endif 217 218 static void 219 kmeminit(void *dummy) 220 { 221 vm_poff_t limsize; 222 int usesize; 223 int i; 224 vm_pindex_t npg; 225 226 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE; 227 if (limsize > KvaSize) 228 limsize = KvaSize; 229 230 usesize = (int)(limsize / 1024); /* convert to KB */ 231 232 ZoneSize = ZALLOC_MIN_ZONE_SIZE; 233 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) 234 ZoneSize <<= 1; 235 ZoneLimit = ZoneSize / 4; 236 if (ZoneLimit > ZALLOC_ZONE_LIMIT) 237 ZoneLimit = ZALLOC_ZONE_LIMIT; 238 ZoneMask = ZoneSize - 1; 239 ZonePageCount = ZoneSize / PAGE_SIZE; 240 241 npg = KvaSize / PAGE_SIZE; 242 kmemusage = kmem_slab_alloc(npg * sizeof(struct kmemusage), 243 PAGE_SIZE, M_WAITOK|M_ZERO); 244 245 for (i = 0; i < arysize(weirdary); ++i) 246 weirdary[i] = WEIRD_ADDR; 247 248 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO); 249 250 if (bootverbose) 251 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); 252 } 253 254 /* 255 * Initialize a malloc type tracking structure. 256 */ 257 void 258 malloc_init(void *data) 259 { 260 struct malloc_type *type = data; 261 vm_poff_t limsize; 262 263 if (type->ks_magic != M_MAGIC) 264 panic("malloc type lacks magic"); 265 266 if (type->ks_limit != 0) 267 return; 268 269 if (vmstats.v_page_count == 0) 270 panic("malloc_init not allowed before vm init"); 271 272 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE; 273 if (limsize > KvaSize) 274 limsize = KvaSize; 275 type->ks_limit = limsize / 10; 276 277 type->ks_next = kmemstatistics; 278 kmemstatistics = type; 279 } 280 281 void 282 malloc_uninit(void *data) 283 { 284 struct malloc_type *type = data; 285 struct malloc_type *t; 286 #ifdef INVARIANTS 287 int i; 288 long ttl; 289 #endif 290 291 if (type->ks_magic != M_MAGIC) 292 panic("malloc type lacks magic"); 293 294 if (vmstats.v_page_count == 0) 295 panic("malloc_uninit not allowed before vm init"); 296 297 if (type->ks_limit == 0) 298 panic("malloc_uninit on uninitialized type"); 299 300 #ifdef SMP 301 /* Make sure that all pending kfree()s are finished. */ 302 lwkt_synchronize_ipiqs("muninit"); 303 #endif 304 305 #ifdef INVARIANTS 306 /* 307 * memuse is only correct in aggregation. Due to memory being allocated 308 * on one cpu and freed on another individual array entries may be 309 * negative or positive (canceling each other out). 310 */ 311 for (i = ttl = 0; i < ncpus; ++i) 312 ttl += type->ks_memuse[i]; 313 if (ttl) { 314 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", 315 ttl, type->ks_shortdesc, i); 316 } 317 #endif 318 if (type == kmemstatistics) { 319 kmemstatistics = type->ks_next; 320 } else { 321 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 322 if (t->ks_next == type) { 323 t->ks_next = type->ks_next; 324 break; 325 } 326 } 327 } 328 type->ks_next = NULL; 329 type->ks_limit = 0; 330 } 331 332 /* 333 * Increase the kmalloc pool limit for the specified pool. No changes 334 * are the made if the pool would shrink. 335 */ 336 void 337 kmalloc_raise_limit(struct malloc_type *type, size_t bytes) 338 { 339 if (type->ks_limit == 0) 340 malloc_init(type); 341 if (type->ks_limit < bytes) 342 type->ks_limit = bytes; 343 } 344 345 /* 346 * Dynamically create a malloc pool. This function is a NOP if *typep is 347 * already non-NULL. 348 */ 349 void 350 kmalloc_create(struct malloc_type **typep, const char *descr) 351 { 352 struct malloc_type *type; 353 354 if (*typep == NULL) { 355 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO); 356 type->ks_magic = M_MAGIC; 357 type->ks_shortdesc = descr; 358 malloc_init(type); 359 *typep = type; 360 } 361 } 362 363 /* 364 * Destroy a dynamically created malloc pool. This function is a NOP if 365 * the pool has already been destroyed. 366 */ 367 void 368 kmalloc_destroy(struct malloc_type **typep) 369 { 370 if (*typep != NULL) { 371 malloc_uninit(*typep); 372 kfree(*typep, M_TEMP); 373 *typep = NULL; 374 } 375 } 376 377 /* 378 * Calculate the zone index for the allocation request size and set the 379 * allocation request size to that particular zone's chunk size. 380 */ 381 static __inline int 382 zoneindex(unsigned long *bytes) 383 { 384 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ 385 if (n < 128) { 386 *bytes = n = (n + 7) & ~7; 387 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 388 } 389 if (n < 256) { 390 *bytes = n = (n + 15) & ~15; 391 return(n / 16 + 7); 392 } 393 if (n < 8192) { 394 if (n < 512) { 395 *bytes = n = (n + 31) & ~31; 396 return(n / 32 + 15); 397 } 398 if (n < 1024) { 399 *bytes = n = (n + 63) & ~63; 400 return(n / 64 + 23); 401 } 402 if (n < 2048) { 403 *bytes = n = (n + 127) & ~127; 404 return(n / 128 + 31); 405 } 406 if (n < 4096) { 407 *bytes = n = (n + 255) & ~255; 408 return(n / 256 + 39); 409 } 410 *bytes = n = (n + 511) & ~511; 411 return(n / 512 + 47); 412 } 413 #if ZALLOC_ZONE_LIMIT > 8192 414 if (n < 16384) { 415 *bytes = n = (n + 1023) & ~1023; 416 return(n / 1024 + 55); 417 } 418 #endif 419 #if ZALLOC_ZONE_LIMIT > 16384 420 if (n < 32768) { 421 *bytes = n = (n + 2047) & ~2047; 422 return(n / 2048 + 63); 423 } 424 #endif 425 panic("Unexpected byte count %d", n); 426 return(0); 427 } 428 429 /* 430 * malloc() (SLAB ALLOCATOR) 431 * 432 * Allocate memory via the slab allocator. If the request is too large, 433 * or if it page-aligned beyond a certain size, we fall back to the 434 * KMEM subsystem. A SLAB tracking descriptor must be specified, use 435 * &SlabMisc if you don't care. 436 * 437 * M_RNOWAIT - don't block. 438 * M_NULLOK - return NULL instead of blocking. 439 * M_ZERO - zero the returned memory. 440 * M_USE_RESERVE - allow greater drawdown of the free list 441 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted 442 * 443 * MPSAFE 444 */ 445 446 void * 447 kmalloc(unsigned long size, struct malloc_type *type, int flags) 448 { 449 SLZone *z; 450 SLChunk *chunk; 451 SLGlobalData *slgd; 452 struct globaldata *gd; 453 int zi; 454 #ifdef INVARIANTS 455 int i; 456 #endif 457 458 logmemory_quick(malloc_beg); 459 gd = mycpu; 460 slgd = &gd->gd_slab; 461 462 /* 463 * XXX silly to have this in the critical path. 464 */ 465 if (type->ks_limit == 0) { 466 crit_enter(); 467 if (type->ks_limit == 0) 468 malloc_init(type); 469 crit_exit(); 470 } 471 ++type->ks_calls; 472 473 /* 474 * Handle the case where the limit is reached. Panic if we can't return 475 * NULL. The original malloc code looped, but this tended to 476 * simply deadlock the computer. 477 * 478 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used 479 * to determine if a more complete limit check should be done. The 480 * actual memory use is tracked via ks_memuse[cpu]. 481 */ 482 while (type->ks_loosememuse >= type->ks_limit) { 483 int i; 484 long ttl; 485 486 for (i = ttl = 0; i < ncpus; ++i) 487 ttl += type->ks_memuse[i]; 488 type->ks_loosememuse = ttl; /* not MP synchronized */ 489 if (ttl >= type->ks_limit) { 490 if (flags & M_NULLOK) { 491 logmemory(malloc, NULL, type, size, flags); 492 return(NULL); 493 } 494 panic("%s: malloc limit exceeded", type->ks_shortdesc); 495 } 496 } 497 498 /* 499 * Handle the degenerate size == 0 case. Yes, this does happen. 500 * Return a special pointer. This is to maintain compatibility with 501 * the original malloc implementation. Certain devices, such as the 502 * adaptec driver, not only allocate 0 bytes, they check for NULL and 503 * also realloc() later on. Joy. 504 */ 505 if (size == 0) { 506 logmemory(malloc, ZERO_LENGTH_PTR, type, size, flags); 507 return(ZERO_LENGTH_PTR); 508 } 509 510 /* 511 * Handle hysteresis from prior frees here in malloc(). We cannot 512 * safely manipulate the kernel_map in free() due to free() possibly 513 * being called via an IPI message or from sensitive interrupt code. 514 */ 515 while (slgd->NFreeZones > ZONE_RELS_THRESH && (flags & M_RNOWAIT) == 0) { 516 crit_enter(); 517 if (slgd->NFreeZones > ZONE_RELS_THRESH) { /* crit sect race */ 518 z = slgd->FreeZones; 519 slgd->FreeZones = z->z_Next; 520 --slgd->NFreeZones; 521 kmem_slab_free(z, ZoneSize); /* may block */ 522 } 523 crit_exit(); 524 } 525 /* 526 * XXX handle oversized frees that were queued from free(). 527 */ 528 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) { 529 crit_enter(); 530 if ((z = slgd->FreeOvZones) != NULL) { 531 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); 532 slgd->FreeOvZones = z->z_Next; 533 kmem_slab_free(z, z->z_ChunkSize); /* may block */ 534 } 535 crit_exit(); 536 } 537 538 /* 539 * Handle large allocations directly. There should not be very many of 540 * these so performance is not a big issue. 541 * 542 * The backend allocator is pretty nasty on a SMP system. Use the 543 * slab allocator for one and two page-sized chunks even though we lose 544 * some efficiency. XXX maybe fix mmio and the elf loader instead. 545 */ 546 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 547 struct kmemusage *kup; 548 549 size = round_page(size); 550 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); 551 if (chunk == NULL) { 552 logmemory(malloc, NULL, type, size, flags); 553 return(NULL); 554 } 555 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ 556 flags |= M_PASSIVE_ZERO; 557 kup = btokup(chunk); 558 kup->ku_pagecnt = size / PAGE_SIZE; 559 kup->ku_cpu = gd->gd_cpuid; 560 crit_enter(); 561 goto done; 562 } 563 564 /* 565 * Attempt to allocate out of an existing zone. First try the free list, 566 * then allocate out of unallocated space. If we find a good zone move 567 * it to the head of the list so later allocations find it quickly 568 * (we might have thousands of zones in the list). 569 * 570 * Note: zoneindex() will panic of size is too large. 571 */ 572 zi = zoneindex(&size); 573 KKASSERT(zi < NZONES); 574 crit_enter(); 575 if ((z = slgd->ZoneAry[zi]) != NULL) { 576 KKASSERT(z->z_NFree > 0); 577 578 /* 579 * Remove us from the ZoneAry[] when we become empty 580 */ 581 if (--z->z_NFree == 0) { 582 slgd->ZoneAry[zi] = z->z_Next; 583 z->z_Next = NULL; 584 } 585 586 /* 587 * Locate a chunk in a free page. This attempts to localize 588 * reallocations into earlier pages without us having to sort 589 * the chunk list. A chunk may still overlap a page boundary. 590 */ 591 while (z->z_FirstFreePg < ZonePageCount) { 592 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) { 593 #ifdef DIAGNOSTIC 594 /* 595 * Diagnostic: c_Next is not total garbage. 596 */ 597 KKASSERT(chunk->c_Next == NULL || 598 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) == 599 ((intptr_t)chunk & IN_SAME_PAGE_MASK)); 600 #endif 601 #ifdef INVARIANTS 602 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) 603 panic("chunk %p FFPG %d/%d", chunk, z->z_FirstFreePg, ZonePageCount); 604 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) 605 panic("chunkNEXT %p %p FFPG %d/%d", chunk, chunk->c_Next, z->z_FirstFreePg, ZonePageCount); 606 chunk_mark_allocated(z, chunk); 607 #endif 608 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next; 609 goto done; 610 } 611 ++z->z_FirstFreePg; 612 } 613 614 /* 615 * No chunks are available but NFree said we had some memory, so 616 * it must be available in the never-before-used-memory area 617 * governed by UIndex. The consequences are very serious if our zone 618 * got corrupted so we use an explicit panic rather then a KASSERT. 619 */ 620 if (z->z_UIndex + 1 != z->z_NMax) 621 z->z_UIndex = z->z_UIndex + 1; 622 else 623 z->z_UIndex = 0; 624 if (z->z_UIndex == z->z_UEndIndex) 625 panic("slaballoc: corrupted zone"); 626 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 627 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 628 flags &= ~M_ZERO; 629 flags |= M_PASSIVE_ZERO; 630 } 631 #if defined(INVARIANTS) 632 chunk_mark_allocated(z, chunk); 633 #endif 634 goto done; 635 } 636 637 /* 638 * If all zones are exhausted we need to allocate a new zone for this 639 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see 640 * UAlloc use above in regards to M_ZERO. Note that when we are reusing 641 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and 642 * we do not pre-zero it because we do not want to mess up the L1 cache. 643 * 644 * At least one subsystem, the tty code (see CROUND) expects power-of-2 645 * allocations to be power-of-2 aligned. We maintain compatibility by 646 * adjusting the base offset below. 647 */ 648 { 649 int off; 650 651 if ((z = slgd->FreeZones) != NULL) { 652 slgd->FreeZones = z->z_Next; 653 --slgd->NFreeZones; 654 bzero(z, sizeof(SLZone)); 655 z->z_Flags |= SLZF_UNOTZEROD; 656 } else { 657 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); 658 if (z == NULL) 659 goto fail; 660 } 661 662 /* 663 * How big is the base structure? 664 */ 665 #if defined(INVARIANTS) 666 /* 667 * Make room for z_Bitmap. An exact calculation is somewhat more 668 * complicated so don't make an exact calculation. 669 */ 670 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]); 671 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); 672 #else 673 off = sizeof(SLZone); 674 #endif 675 676 /* 677 * Guarentee power-of-2 alignment for power-of-2-sized chunks. 678 * Otherwise just 8-byte align the data. 679 */ 680 if ((size | (size - 1)) + 1 == (size << 1)) 681 off = (off + size - 1) & ~(size - 1); 682 else 683 off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK; 684 z->z_Magic = ZALLOC_SLAB_MAGIC; 685 z->z_ZoneIndex = zi; 686 z->z_NMax = (ZoneSize - off) / size; 687 z->z_NFree = z->z_NMax - 1; 688 z->z_BasePtr = (char *)z + off; 689 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; 690 z->z_ChunkSize = size; 691 z->z_FirstFreePg = ZonePageCount; 692 z->z_CpuGd = gd; 693 z->z_Cpu = gd->gd_cpuid; 694 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 695 z->z_Next = slgd->ZoneAry[zi]; 696 slgd->ZoneAry[zi] = z; 697 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 698 flags &= ~M_ZERO; /* already zero'd */ 699 flags |= M_PASSIVE_ZERO; 700 } 701 #if defined(INVARIANTS) 702 chunk_mark_allocated(z, chunk); 703 #endif 704 705 /* 706 * Slide the base index for initial allocations out of the next 707 * zone we create so we do not over-weight the lower part of the 708 * cpu memory caches. 709 */ 710 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 711 & (ZALLOC_MAX_ZONE_SIZE - 1); 712 } 713 done: 714 ++type->ks_inuse[gd->gd_cpuid]; 715 type->ks_memuse[gd->gd_cpuid] += size; 716 type->ks_loosememuse += size; /* not MP synchronized */ 717 crit_exit(); 718 if (flags & M_ZERO) 719 bzero(chunk, size); 720 #ifdef INVARIANTS 721 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) { 722 if (use_malloc_pattern) { 723 for (i = 0; i < size; i += sizeof(int)) { 724 *(int *)((char *)chunk + i) = -1; 725 } 726 } 727 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ 728 } 729 #endif 730 logmemory(malloc, chunk, type, size, flags); 731 return(chunk); 732 fail: 733 crit_exit(); 734 logmemory(malloc, NULL, type, size, flags); 735 return(NULL); 736 } 737 738 /* 739 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE) 740 * 741 * Generally speaking this routine is not called very often and we do 742 * not attempt to optimize it beyond reusing the same pointer if the 743 * new size fits within the chunking of the old pointer's zone. 744 */ 745 void * 746 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) 747 { 748 SLZone *z; 749 void *nptr; 750 unsigned long osize; 751 752 KKASSERT((flags & M_ZERO) == 0); /* not supported */ 753 754 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) 755 return(kmalloc(size, type, flags)); 756 if (size == 0) { 757 kfree(ptr, type); 758 return(NULL); 759 } 760 761 /* 762 * Handle oversized allocations. XXX we really should require that a 763 * size be passed to free() instead of this nonsense. 764 */ 765 { 766 struct kmemusage *kup; 767 768 kup = btokup(ptr); 769 if (kup->ku_pagecnt) { 770 osize = kup->ku_pagecnt << PAGE_SHIFT; 771 if (osize == round_page(size)) 772 return(ptr); 773 if ((nptr = kmalloc(size, type, flags)) == NULL) 774 return(NULL); 775 bcopy(ptr, nptr, min(size, osize)); 776 kfree(ptr, type); 777 return(nptr); 778 } 779 } 780 781 /* 782 * Get the original allocation's zone. If the new request winds up 783 * using the same chunk size we do not have to do anything. 784 */ 785 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 786 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 787 788 /* 789 * Allocate memory for the new request size. Note that zoneindex has 790 * already adjusted the request size to the appropriate chunk size, which 791 * should optimize our bcopy(). Then copy and return the new pointer. 792 * 793 * Resizing a non-power-of-2 allocation to a power-of-2 size does not 794 * necessary align the result. 795 * 796 * We can only zoneindex (to align size to the chunk size) if the new 797 * size is not too large. 798 */ 799 if (size < ZoneLimit) { 800 zoneindex(&size); 801 if (z->z_ChunkSize == size) 802 return(ptr); 803 } 804 if ((nptr = kmalloc(size, type, flags)) == NULL) 805 return(NULL); 806 bcopy(ptr, nptr, min(size, z->z_ChunkSize)); 807 kfree(ptr, type); 808 return(nptr); 809 } 810 811 /* 812 * Return the kmalloc limit for this type, in bytes. 813 */ 814 long 815 kmalloc_limit(struct malloc_type *type) 816 { 817 if (type->ks_limit == 0) { 818 crit_enter(); 819 if (type->ks_limit == 0) 820 malloc_init(type); 821 crit_exit(); 822 } 823 return(type->ks_limit); 824 } 825 826 /* 827 * Allocate a copy of the specified string. 828 * 829 * (MP SAFE) (MAY BLOCK) 830 */ 831 char * 832 kstrdup(const char *str, struct malloc_type *type) 833 { 834 int zlen; /* length inclusive of terminating NUL */ 835 char *nstr; 836 837 if (str == NULL) 838 return(NULL); 839 zlen = strlen(str) + 1; 840 nstr = kmalloc(zlen, type, M_WAITOK); 841 bcopy(str, nstr, zlen); 842 return(nstr); 843 } 844 845 #ifdef SMP 846 /* 847 * free() (SLAB ALLOCATOR) 848 * 849 * Free the specified chunk of memory. 850 */ 851 static 852 void 853 free_remote(void *ptr) 854 { 855 logmemory(free_remote, ptr, *(struct malloc_type **)ptr, -1, 0); 856 kfree(ptr, *(struct malloc_type **)ptr); 857 } 858 859 #endif 860 861 /* 862 * free (SLAB ALLOCATOR) 863 * 864 * Free a memory block previously allocated by malloc. Note that we do not 865 * attempt to uplodate ks_loosememuse as MP races could prevent us from 866 * checking memory limits in malloc. 867 * 868 * MPSAFE 869 */ 870 void 871 kfree(void *ptr, struct malloc_type *type) 872 { 873 SLZone *z; 874 SLChunk *chunk; 875 SLGlobalData *slgd; 876 struct globaldata *gd; 877 int pgno; 878 879 logmemory_quick(free_beg); 880 gd = mycpu; 881 slgd = &gd->gd_slab; 882 883 if (ptr == NULL) 884 panic("trying to free NULL pointer"); 885 886 /* 887 * Handle special 0-byte allocations 888 */ 889 if (ptr == ZERO_LENGTH_PTR) { 890 logmemory(free_zero, ptr, type, -1, 0); 891 logmemory_quick(free_end); 892 return; 893 } 894 895 /* 896 * Handle oversized allocations. XXX we really should require that a 897 * size be passed to free() instead of this nonsense. 898 * 899 * This code is never called via an ipi. 900 */ 901 { 902 struct kmemusage *kup; 903 unsigned long size; 904 905 kup = btokup(ptr); 906 if (kup->ku_pagecnt) { 907 size = kup->ku_pagecnt << PAGE_SHIFT; 908 kup->ku_pagecnt = 0; 909 #ifdef INVARIANTS 910 KKASSERT(sizeof(weirdary) <= size); 911 bcopy(weirdary, ptr, sizeof(weirdary)); 912 #endif 913 /* 914 * note: we always adjust our cpu's slot, not the originating 915 * cpu (kup->ku_cpuid). The statistics are in aggregate. 916 * 917 * note: XXX we have still inherited the interrupts-can't-block 918 * assumption. An interrupt thread does not bump 919 * gd_intr_nesting_level so check TDF_INTTHREAD. This is 920 * primarily until we can fix softupdate's assumptions about free(). 921 */ 922 crit_enter(); 923 --type->ks_inuse[gd->gd_cpuid]; 924 type->ks_memuse[gd->gd_cpuid] -= size; 925 if (mycpu->gd_intr_nesting_level || (gd->gd_curthread->td_flags & TDF_INTTHREAD)) { 926 logmemory(free_ovsz_delayed, ptr, type, size, 0); 927 z = (SLZone *)ptr; 928 z->z_Magic = ZALLOC_OVSZ_MAGIC; 929 z->z_Next = slgd->FreeOvZones; 930 z->z_ChunkSize = size; 931 slgd->FreeOvZones = z; 932 crit_exit(); 933 } else { 934 crit_exit(); 935 logmemory(free_ovsz, ptr, type, size, 0); 936 kmem_slab_free(ptr, size); /* may block */ 937 } 938 logmemory_quick(free_end); 939 return; 940 } 941 } 942 943 /* 944 * Zone case. Figure out the zone based on the fact that it is 945 * ZoneSize aligned. 946 */ 947 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 948 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 949 950 /* 951 * If we do not own the zone then forward the request to the 952 * cpu that does. Since the timing is non-critical, a passive 953 * message is sent. 954 */ 955 if (z->z_CpuGd != gd) { 956 *(struct malloc_type **)ptr = type; 957 #ifdef SMP 958 logmemory(free_request, ptr, type, z->z_ChunkSize, 0); 959 lwkt_send_ipiq_passive(z->z_CpuGd, free_remote, ptr); 960 #else 961 panic("Corrupt SLZone"); 962 #endif 963 logmemory_quick(free_end); 964 return; 965 } 966 967 logmemory(free_chunk, ptr, type, z->z_ChunkSize, 0); 968 969 if (type->ks_magic != M_MAGIC) 970 panic("free: malloc type lacks magic"); 971 972 crit_enter(); 973 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT; 974 chunk = ptr; 975 976 #ifdef INVARIANTS 977 /* 978 * Attempt to detect a double-free. To reduce overhead we only check 979 * if there appears to be link pointer at the base of the data. 980 */ 981 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) { 982 SLChunk *scan; 983 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) { 984 if (scan == chunk) 985 panic("Double free at %p", chunk); 986 } 987 } 988 chunk_mark_free(z, chunk); 989 #endif 990 991 /* 992 * Put weird data into the memory to detect modifications after freeing, 993 * illegal pointer use after freeing (we should fault on the odd address), 994 * and so forth. XXX needs more work, see the old malloc code. 995 */ 996 #ifdef INVARIANTS 997 if (z->z_ChunkSize < sizeof(weirdary)) 998 bcopy(weirdary, chunk, z->z_ChunkSize); 999 else 1000 bcopy(weirdary, chunk, sizeof(weirdary)); 1001 #endif 1002 1003 /* 1004 * Add this free non-zero'd chunk to a linked list for reuse, adjust 1005 * z_FirstFreePg. 1006 */ 1007 #ifdef INVARIANTS 1008 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) 1009 panic("BADFREE %p", chunk); 1010 #endif 1011 chunk->c_Next = z->z_PageAry[pgno]; 1012 z->z_PageAry[pgno] = chunk; 1013 #ifdef INVARIANTS 1014 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) 1015 panic("BADFREE2"); 1016 #endif 1017 if (z->z_FirstFreePg > pgno) 1018 z->z_FirstFreePg = pgno; 1019 1020 /* 1021 * Bump the number of free chunks. If it becomes non-zero the zone 1022 * must be added back onto the appropriate list. 1023 */ 1024 if (z->z_NFree++ == 0) { 1025 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 1026 slgd->ZoneAry[z->z_ZoneIndex] = z; 1027 } 1028 1029 --type->ks_inuse[z->z_Cpu]; 1030 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize; 1031 1032 /* 1033 * If the zone becomes totally free, and there are other zones we 1034 * can allocate from, move this zone to the FreeZones list. Since 1035 * this code can be called from an IPI callback, do *NOT* try to mess 1036 * with kernel_map here. Hysteresis will be performed at malloc() time. 1037 */ 1038 if (z->z_NFree == z->z_NMax && 1039 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) 1040 ) { 1041 SLZone **pz; 1042 1043 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next) 1044 ; 1045 *pz = z->z_Next; 1046 z->z_Magic = -1; 1047 z->z_Next = slgd->FreeZones; 1048 slgd->FreeZones = z; 1049 ++slgd->NFreeZones; 1050 } 1051 logmemory_quick(free_end); 1052 crit_exit(); 1053 } 1054 1055 #if defined(INVARIANTS) 1056 /* 1057 * Helper routines for sanity checks 1058 */ 1059 static 1060 void 1061 chunk_mark_allocated(SLZone *z, void *chunk) 1062 { 1063 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1064 __uint32_t *bitptr; 1065 1066 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal", chunk, bitdex)); 1067 bitptr = &z->z_Bitmap[bitdex >> 5]; 1068 bitdex &= 31; 1069 KASSERT((*bitptr & (1 << bitdex)) == 0, ("memory chunk %p is already allocated!", chunk)); 1070 *bitptr |= 1 << bitdex; 1071 } 1072 1073 static 1074 void 1075 chunk_mark_free(SLZone *z, void *chunk) 1076 { 1077 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1078 __uint32_t *bitptr; 1079 1080 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal!", chunk, bitdex)); 1081 bitptr = &z->z_Bitmap[bitdex >> 5]; 1082 bitdex &= 31; 1083 KASSERT((*bitptr & (1 << bitdex)) != 0, ("memory chunk %p is already free!", chunk)); 1084 *bitptr &= ~(1 << bitdex); 1085 } 1086 1087 #endif 1088 1089 /* 1090 * kmem_slab_alloc() 1091 * 1092 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the 1093 * specified alignment. M_* flags are expected in the flags field. 1094 * 1095 * Alignment must be a multiple of PAGE_SIZE. 1096 * 1097 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), 1098 * but when we move zalloc() over to use this function as its backend 1099 * we will have to switch to kreserve/krelease and call reserve(0) 1100 * after the new space is made available. 1101 * 1102 * Interrupt code which has preempted other code is not allowed to 1103 * use PQ_CACHE pages. However, if an interrupt thread is run 1104 * non-preemptively or blocks and then runs non-preemptively, then 1105 * it is free to use PQ_CACHE pages. 1106 * 1107 * This routine will currently obtain the BGL. 1108 * 1109 * MPALMOSTSAFE - acquires mplock 1110 */ 1111 static void * 1112 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) 1113 { 1114 vm_size_t i; 1115 vm_offset_t addr; 1116 int count, vmflags, base_vmflags; 1117 thread_t td; 1118 1119 size = round_page(size); 1120 addr = vm_map_min(&kernel_map); 1121 1122 /* 1123 * Reserve properly aligned space from kernel_map. RNOWAIT allocations 1124 * cannot block. 1125 */ 1126 if (flags & M_RNOWAIT) { 1127 if (try_mplock() == 0) 1128 return(NULL); 1129 } else { 1130 get_mplock(); 1131 } 1132 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1133 crit_enter(); 1134 vm_map_lock(&kernel_map); 1135 if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) { 1136 vm_map_unlock(&kernel_map); 1137 if ((flags & M_NULLOK) == 0) 1138 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 1139 crit_exit(); 1140 vm_map_entry_release(count); 1141 rel_mplock(); 1142 return(NULL); 1143 } 1144 1145 /* 1146 * kernel_object maps 1:1 to kernel_map. 1147 */ 1148 vm_object_reference(&kernel_object); 1149 vm_map_insert(&kernel_map, &count, 1150 &kernel_object, addr, addr, addr + size, 1151 VM_MAPTYPE_NORMAL, 1152 VM_PROT_ALL, VM_PROT_ALL, 1153 0); 1154 1155 td = curthread; 1156 1157 base_vmflags = 0; 1158 if (flags & M_ZERO) 1159 base_vmflags |= VM_ALLOC_ZERO; 1160 if (flags & M_USE_RESERVE) 1161 base_vmflags |= VM_ALLOC_SYSTEM; 1162 if (flags & M_USE_INTERRUPT_RESERVE) 1163 base_vmflags |= VM_ALLOC_INTERRUPT; 1164 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) 1165 panic("kmem_slab_alloc: bad flags %08x (%p)", flags, ((int **)&size)[-1]); 1166 1167 1168 /* 1169 * Allocate the pages. Do not mess with the PG_ZERO flag yet. 1170 */ 1171 for (i = 0; i < size; i += PAGE_SIZE) { 1172 vm_page_t m; 1173 1174 /* 1175 * VM_ALLOC_NORMAL can only be set if we are not preempting. 1176 * 1177 * VM_ALLOC_SYSTEM is automatically set if we are preempting and 1178 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is 1179 * implied in this case), though I'm not sure if we really need to 1180 * do that. 1181 */ 1182 vmflags = base_vmflags; 1183 if (flags & M_WAITOK) { 1184 if (td->td_preempted) 1185 vmflags |= VM_ALLOC_SYSTEM; 1186 else 1187 vmflags |= VM_ALLOC_NORMAL; 1188 } 1189 1190 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags); 1191 1192 /* 1193 * If the allocation failed we either return NULL or we retry. 1194 * 1195 * If M_WAITOK is specified we wait for more memory and retry. 1196 * If M_WAITOK is specified from a preemption we yield instead of 1197 * wait. Livelock will not occur because the interrupt thread 1198 * will not be preempting anyone the second time around after the 1199 * yield. 1200 */ 1201 if (m == NULL) { 1202 if (flags & M_WAITOK) { 1203 if (td->td_preempted) { 1204 vm_map_unlock(&kernel_map); 1205 lwkt_yield(); 1206 vm_map_lock(&kernel_map); 1207 } else { 1208 vm_map_unlock(&kernel_map); 1209 vm_wait(0); 1210 vm_map_lock(&kernel_map); 1211 } 1212 i -= PAGE_SIZE; /* retry */ 1213 continue; 1214 } 1215 1216 /* 1217 * We were unable to recover, cleanup and return NULL 1218 */ 1219 while (i != 0) { 1220 i -= PAGE_SIZE; 1221 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); 1222 /* page should already be busy */ 1223 vm_page_free(m); 1224 } 1225 vm_map_delete(&kernel_map, addr, addr + size, &count); 1226 vm_map_unlock(&kernel_map); 1227 crit_exit(); 1228 vm_map_entry_release(count); 1229 rel_mplock(); 1230 return(NULL); 1231 } 1232 } 1233 1234 /* 1235 * Success! 1236 * 1237 * Mark the map entry as non-pageable using a routine that allows us to 1238 * populate the underlying pages. 1239 * 1240 * The pages were busied by the allocations above. 1241 */ 1242 vm_map_set_wired_quick(&kernel_map, addr, size, &count); 1243 crit_exit(); 1244 1245 /* 1246 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO. 1247 */ 1248 for (i = 0; i < size; i += PAGE_SIZE) { 1249 vm_page_t m; 1250 1251 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); 1252 m->valid = VM_PAGE_BITS_ALL; 1253 /* page should already be busy */ 1254 vm_page_wire(m); 1255 vm_page_wakeup(m); 1256 pmap_enter(&kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 1257 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO)) 1258 bzero((char *)addr + i, PAGE_SIZE); 1259 vm_page_flag_clear(m, PG_ZERO); 1260 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED)); 1261 vm_page_flag_set(m, PG_REFERENCED); 1262 } 1263 vm_map_unlock(&kernel_map); 1264 vm_map_entry_release(count); 1265 rel_mplock(); 1266 return((void *)addr); 1267 } 1268 1269 /* 1270 * kmem_slab_free() 1271 * 1272 * MPALMOSTSAFE - acquires mplock 1273 */ 1274 static void 1275 kmem_slab_free(void *ptr, vm_size_t size) 1276 { 1277 get_mplock(); 1278 crit_enter(); 1279 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); 1280 crit_exit(); 1281 rel_mplock(); 1282 } 1283 1284