1 /* 2 * KERN_SLABALLOC.C - Kernel SLAB memory allocator 3 * 4 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.55 2008/10/22 01:42:17 dillon Exp $ 37 * 38 * This module implements a slab allocator drop-in replacement for the 39 * kernel malloc(). 40 * 41 * A slab allocator reserves a ZONE for each chunk size, then lays the 42 * chunks out in an array within the zone. Allocation and deallocation 43 * is nearly instantanious, and fragmentation/overhead losses are limited 44 * to a fixed worst-case amount. 45 * 46 * The downside of this slab implementation is in the chunk size 47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. 48 * In a kernel implementation all this memory will be physical so 49 * the zone size is adjusted downward on machines with less physical 50 * memory. The upside is that overhead is bounded... this is the *worst* 51 * case overhead. 52 * 53 * Slab management is done on a per-cpu basis and no locking or mutexes 54 * are required, only a critical section. When one cpu frees memory 55 * belonging to another cpu's slab manager an asynchronous IPI message 56 * will be queued to execute the operation. In addition, both the 57 * high level slab allocator and the low level zone allocator optimize 58 * M_ZERO requests, and the slab allocator does not have to pre initialize 59 * the linked list of chunks. 60 * 61 * XXX Balancing is needed between cpus. Balance will be handled through 62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. 63 * 64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of 65 * the new zone should be restricted to M_USE_RESERVE requests only. 66 * 67 * Alloc Size Chunking Number of zones 68 * 0-127 8 16 69 * 128-255 16 8 70 * 256-511 32 8 71 * 512-1023 64 8 72 * 1024-2047 128 8 73 * 2048-4095 256 8 74 * 4096-8191 512 8 75 * 8192-16383 1024 8 76 * 16384-32767 2048 8 77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) 78 * 79 * Allocations >= ZoneLimit go directly to kmem. 80 * 81 * API REQUIREMENTS AND SIDE EFFECTS 82 * 83 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we 84 * have remained compatible with the following API requirements: 85 * 86 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty) 87 * + all power-of-2 sized allocations are power-of-2 aligned (twe) 88 * + malloc(0) is allowed and returns non-NULL (ahc driver) 89 * + ability to allocate arbitrarily large chunks of memory 90 */ 91 92 #include "opt_vm.h" 93 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/kernel.h> 97 #include <sys/slaballoc.h> 98 #include <sys/mbuf.h> 99 #include <sys/vmmeter.h> 100 #include <sys/lock.h> 101 #include <sys/thread.h> 102 #include <sys/globaldata.h> 103 #include <sys/sysctl.h> 104 #include <sys/ktr.h> 105 106 #include <vm/vm.h> 107 #include <vm/vm_param.h> 108 #include <vm/vm_kern.h> 109 #include <vm/vm_extern.h> 110 #include <vm/vm_object.h> 111 #include <vm/pmap.h> 112 #include <vm/vm_map.h> 113 #include <vm/vm_page.h> 114 #include <vm/vm_pageout.h> 115 116 #include <machine/cpu.h> 117 118 #include <sys/thread2.h> 119 #include <sys/mplock2.h> 120 121 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0])) 122 123 #define MEMORY_STRING "ptr=%p type=%p size=%d flags=%04x" 124 #define MEMORY_ARG_SIZE (sizeof(void *) * 2 + sizeof(unsigned long) + \ 125 sizeof(int)) 126 127 #if !defined(KTR_MEMORY) 128 #define KTR_MEMORY KTR_ALL 129 #endif 130 KTR_INFO_MASTER(memory); 131 KTR_INFO(KTR_MEMORY, memory, malloc, 0, MEMORY_STRING, MEMORY_ARG_SIZE); 132 KTR_INFO(KTR_MEMORY, memory, free_zero, 1, MEMORY_STRING, MEMORY_ARG_SIZE); 133 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 2, MEMORY_STRING, MEMORY_ARG_SIZE); 134 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 3, MEMORY_STRING, MEMORY_ARG_SIZE); 135 KTR_INFO(KTR_MEMORY, memory, free_chunk, 4, MEMORY_STRING, MEMORY_ARG_SIZE); 136 #ifdef SMP 137 KTR_INFO(KTR_MEMORY, memory, free_request, 5, MEMORY_STRING, MEMORY_ARG_SIZE); 138 KTR_INFO(KTR_MEMORY, memory, free_remote, 6, MEMORY_STRING, MEMORY_ARG_SIZE); 139 #endif 140 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin", 0); 141 KTR_INFO(KTR_MEMORY, memory, free_beg, 0, "free begin", 0); 142 KTR_INFO(KTR_MEMORY, memory, free_end, 0, "free end", 0); 143 144 #define logmemory(name, ptr, type, size, flags) \ 145 KTR_LOG(memory_ ## name, ptr, type, size, flags) 146 #define logmemory_quick(name) \ 147 KTR_LOG(memory_ ## name) 148 149 /* 150 * Fixed globals (not per-cpu) 151 */ 152 static int ZoneSize; 153 static int ZoneLimit; 154 static int ZonePageCount; 155 static int ZoneMask; 156 static int ZoneBigAlloc; /* in KB */ 157 static int ZoneGenAlloc; /* in KB */ 158 struct malloc_type *kmemstatistics; /* exported to vmstat */ 159 static struct kmemusage *kmemusage; 160 static int32_t weirdary[16]; 161 162 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags); 163 static void kmem_slab_free(void *ptr, vm_size_t bytes); 164 #if defined(INVARIANTS) 165 static void chunk_mark_allocated(SLZone *z, void *chunk); 166 static void chunk_mark_free(SLZone *z, void *chunk); 167 #endif 168 169 /* 170 * Misc constants. Note that allocations that are exact multiples of 171 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 172 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists. 173 */ 174 #define MIN_CHUNK_SIZE 8 /* in bytes */ 175 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1) 176 #define ZONE_RELS_THRESH 2 /* threshold number of zones */ 177 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK) 178 179 /* 180 * The WEIRD_ADDR is used as known text to copy into free objects to 181 * try to create deterministic failure cases if the data is accessed after 182 * free. 183 */ 184 #define WEIRD_ADDR 0xdeadc0de 185 #define MAX_COPY sizeof(weirdary) 186 #define ZERO_LENGTH_PTR ((void *)-8) 187 188 /* 189 * Misc global malloc buckets 190 */ 191 192 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 193 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 194 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 195 196 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 197 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 198 199 /* 200 * Initialize the slab memory allocator. We have to choose a zone size based 201 * on available physical memory. We choose a zone side which is approximately 202 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of 203 * 128K. The zone size is limited to the bounds set in slaballoc.h 204 * (typically 32K min, 128K max). 205 */ 206 static void kmeminit(void *dummy); 207 208 char *ZeroPage; 209 210 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL) 211 212 #ifdef INVARIANTS 213 /* 214 * If enabled any memory allocated without M_ZERO is initialized to -1. 215 */ 216 static int use_malloc_pattern; 217 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW, 218 &use_malloc_pattern, 0, ""); 219 #endif 220 221 SYSCTL_INT(_kern, OID_AUTO, zone_big_alloc, CTLFLAG_RD, &ZoneBigAlloc, 0, ""); 222 SYSCTL_INT(_kern, OID_AUTO, zone_gen_alloc, CTLFLAG_RD, &ZoneGenAlloc, 0, ""); 223 224 static void 225 kmeminit(void *dummy) 226 { 227 size_t limsize; 228 int usesize; 229 int i; 230 vm_offset_t npg; 231 232 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE; 233 if (limsize > KvaSize) 234 limsize = KvaSize; 235 236 usesize = (int)(limsize / 1024); /* convert to KB */ 237 238 ZoneSize = ZALLOC_MIN_ZONE_SIZE; 239 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) 240 ZoneSize <<= 1; 241 ZoneLimit = ZoneSize / 4; 242 if (ZoneLimit > ZALLOC_ZONE_LIMIT) 243 ZoneLimit = ZALLOC_ZONE_LIMIT; 244 ZoneMask = ZoneSize - 1; 245 ZonePageCount = ZoneSize / PAGE_SIZE; 246 247 npg = KvaSize / PAGE_SIZE; 248 kmemusage = kmem_slab_alloc(npg * sizeof(struct kmemusage), 249 PAGE_SIZE, M_WAITOK|M_ZERO); 250 251 for (i = 0; i < arysize(weirdary); ++i) 252 weirdary[i] = WEIRD_ADDR; 253 254 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO); 255 256 if (bootverbose) 257 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); 258 } 259 260 /* 261 * Initialize a malloc type tracking structure. 262 */ 263 void 264 malloc_init(void *data) 265 { 266 struct malloc_type *type = data; 267 size_t limsize; 268 269 if (type->ks_magic != M_MAGIC) 270 panic("malloc type lacks magic"); 271 272 if (type->ks_limit != 0) 273 return; 274 275 if (vmstats.v_page_count == 0) 276 panic("malloc_init not allowed before vm init"); 277 278 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE; 279 if (limsize > KvaSize) 280 limsize = KvaSize; 281 type->ks_limit = limsize / 10; 282 283 type->ks_next = kmemstatistics; 284 kmemstatistics = type; 285 } 286 287 void 288 malloc_uninit(void *data) 289 { 290 struct malloc_type *type = data; 291 struct malloc_type *t; 292 #ifdef INVARIANTS 293 int i; 294 long ttl; 295 #endif 296 297 if (type->ks_magic != M_MAGIC) 298 panic("malloc type lacks magic"); 299 300 if (vmstats.v_page_count == 0) 301 panic("malloc_uninit not allowed before vm init"); 302 303 if (type->ks_limit == 0) 304 panic("malloc_uninit on uninitialized type"); 305 306 #ifdef SMP 307 /* Make sure that all pending kfree()s are finished. */ 308 lwkt_synchronize_ipiqs("muninit"); 309 #endif 310 311 #ifdef INVARIANTS 312 /* 313 * memuse is only correct in aggregation. Due to memory being allocated 314 * on one cpu and freed on another individual array entries may be 315 * negative or positive (canceling each other out). 316 */ 317 for (i = ttl = 0; i < ncpus; ++i) 318 ttl += type->ks_memuse[i]; 319 if (ttl) { 320 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", 321 ttl, type->ks_shortdesc, i); 322 } 323 #endif 324 if (type == kmemstatistics) { 325 kmemstatistics = type->ks_next; 326 } else { 327 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 328 if (t->ks_next == type) { 329 t->ks_next = type->ks_next; 330 break; 331 } 332 } 333 } 334 type->ks_next = NULL; 335 type->ks_limit = 0; 336 } 337 338 /* 339 * Increase the kmalloc pool limit for the specified pool. No changes 340 * are the made if the pool would shrink. 341 */ 342 void 343 kmalloc_raise_limit(struct malloc_type *type, size_t bytes) 344 { 345 if (type->ks_limit == 0) 346 malloc_init(type); 347 if (bytes == 0) 348 bytes = KvaSize; 349 if (type->ks_limit < bytes) 350 type->ks_limit = bytes; 351 } 352 353 /* 354 * Dynamically create a malloc pool. This function is a NOP if *typep is 355 * already non-NULL. 356 */ 357 void 358 kmalloc_create(struct malloc_type **typep, const char *descr) 359 { 360 struct malloc_type *type; 361 362 if (*typep == NULL) { 363 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO); 364 type->ks_magic = M_MAGIC; 365 type->ks_shortdesc = descr; 366 malloc_init(type); 367 *typep = type; 368 } 369 } 370 371 /* 372 * Destroy a dynamically created malloc pool. This function is a NOP if 373 * the pool has already been destroyed. 374 */ 375 void 376 kmalloc_destroy(struct malloc_type **typep) 377 { 378 if (*typep != NULL) { 379 malloc_uninit(*typep); 380 kfree(*typep, M_TEMP); 381 *typep = NULL; 382 } 383 } 384 385 /* 386 * Calculate the zone index for the allocation request size and set the 387 * allocation request size to that particular zone's chunk size. 388 */ 389 static __inline int 390 zoneindex(unsigned long *bytes) 391 { 392 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ 393 if (n < 128) { 394 *bytes = n = (n + 7) & ~7; 395 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 396 } 397 if (n < 256) { 398 *bytes = n = (n + 15) & ~15; 399 return(n / 16 + 7); 400 } 401 if (n < 8192) { 402 if (n < 512) { 403 *bytes = n = (n + 31) & ~31; 404 return(n / 32 + 15); 405 } 406 if (n < 1024) { 407 *bytes = n = (n + 63) & ~63; 408 return(n / 64 + 23); 409 } 410 if (n < 2048) { 411 *bytes = n = (n + 127) & ~127; 412 return(n / 128 + 31); 413 } 414 if (n < 4096) { 415 *bytes = n = (n + 255) & ~255; 416 return(n / 256 + 39); 417 } 418 *bytes = n = (n + 511) & ~511; 419 return(n / 512 + 47); 420 } 421 #if ZALLOC_ZONE_LIMIT > 8192 422 if (n < 16384) { 423 *bytes = n = (n + 1023) & ~1023; 424 return(n / 1024 + 55); 425 } 426 #endif 427 #if ZALLOC_ZONE_LIMIT > 16384 428 if (n < 32768) { 429 *bytes = n = (n + 2047) & ~2047; 430 return(n / 2048 + 63); 431 } 432 #endif 433 panic("Unexpected byte count %d", n); 434 return(0); 435 } 436 437 /* 438 * malloc() (SLAB ALLOCATOR) 439 * 440 * Allocate memory via the slab allocator. If the request is too large, 441 * or if it page-aligned beyond a certain size, we fall back to the 442 * KMEM subsystem. A SLAB tracking descriptor must be specified, use 443 * &SlabMisc if you don't care. 444 * 445 * M_RNOWAIT - don't block. 446 * M_NULLOK - return NULL instead of blocking. 447 * M_ZERO - zero the returned memory. 448 * M_USE_RESERVE - allow greater drawdown of the free list 449 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted 450 * 451 * MPSAFE 452 */ 453 454 void * 455 kmalloc(unsigned long size, struct malloc_type *type, int flags) 456 { 457 SLZone *z; 458 SLChunk *chunk; 459 SLGlobalData *slgd; 460 struct globaldata *gd; 461 int zi; 462 #ifdef INVARIANTS 463 int i; 464 #endif 465 466 logmemory_quick(malloc_beg); 467 gd = mycpu; 468 slgd = &gd->gd_slab; 469 470 /* 471 * XXX silly to have this in the critical path. 472 */ 473 if (type->ks_limit == 0) { 474 crit_enter(); 475 if (type->ks_limit == 0) 476 malloc_init(type); 477 crit_exit(); 478 } 479 ++type->ks_calls; 480 481 /* 482 * Handle the case where the limit is reached. Panic if we can't return 483 * NULL. The original malloc code looped, but this tended to 484 * simply deadlock the computer. 485 * 486 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used 487 * to determine if a more complete limit check should be done. The 488 * actual memory use is tracked via ks_memuse[cpu]. 489 */ 490 while (type->ks_loosememuse >= type->ks_limit) { 491 int i; 492 long ttl; 493 494 for (i = ttl = 0; i < ncpus; ++i) 495 ttl += type->ks_memuse[i]; 496 type->ks_loosememuse = ttl; /* not MP synchronized */ 497 if (ttl >= type->ks_limit) { 498 if (flags & M_NULLOK) { 499 logmemory(malloc, NULL, type, size, flags); 500 return(NULL); 501 } 502 panic("%s: malloc limit exceeded", type->ks_shortdesc); 503 } 504 } 505 506 /* 507 * Handle the degenerate size == 0 case. Yes, this does happen. 508 * Return a special pointer. This is to maintain compatibility with 509 * the original malloc implementation. Certain devices, such as the 510 * adaptec driver, not only allocate 0 bytes, they check for NULL and 511 * also realloc() later on. Joy. 512 */ 513 if (size == 0) { 514 logmemory(malloc, ZERO_LENGTH_PTR, type, size, flags); 515 return(ZERO_LENGTH_PTR); 516 } 517 518 /* 519 * Handle hysteresis from prior frees here in malloc(). We cannot 520 * safely manipulate the kernel_map in free() due to free() possibly 521 * being called via an IPI message or from sensitive interrupt code. 522 */ 523 while (slgd->NFreeZones > ZONE_RELS_THRESH && (flags & M_RNOWAIT) == 0) { 524 crit_enter(); 525 if (slgd->NFreeZones > ZONE_RELS_THRESH) { /* crit sect race */ 526 z = slgd->FreeZones; 527 slgd->FreeZones = z->z_Next; 528 --slgd->NFreeZones; 529 kmem_slab_free(z, ZoneSize); /* may block */ 530 atomic_add_int(&ZoneGenAlloc, -(int)ZoneSize / 1024); 531 } 532 crit_exit(); 533 } 534 /* 535 * XXX handle oversized frees that were queued from free(). 536 */ 537 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) { 538 crit_enter(); 539 if ((z = slgd->FreeOvZones) != NULL) { 540 vm_size_t tsize; 541 542 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); 543 slgd->FreeOvZones = z->z_Next; 544 tsize = z->z_ChunkSize; 545 kmem_slab_free(z, tsize); /* may block */ 546 atomic_add_int(&ZoneBigAlloc, -(int)tsize / 1024); 547 } 548 crit_exit(); 549 } 550 551 /* 552 * Handle large allocations directly. There should not be very many of 553 * these so performance is not a big issue. 554 * 555 * The backend allocator is pretty nasty on a SMP system. Use the 556 * slab allocator for one and two page-sized chunks even though we lose 557 * some efficiency. XXX maybe fix mmio and the elf loader instead. 558 */ 559 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 560 struct kmemusage *kup; 561 562 size = round_page(size); 563 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); 564 if (chunk == NULL) { 565 logmemory(malloc, NULL, type, size, flags); 566 return(NULL); 567 } 568 atomic_add_int(&ZoneBigAlloc, (int)size / 1024); 569 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ 570 flags |= M_PASSIVE_ZERO; 571 kup = btokup(chunk); 572 kup->ku_pagecnt = size / PAGE_SIZE; 573 crit_enter(); 574 goto done; 575 } 576 577 /* 578 * Attempt to allocate out of an existing zone. First try the free list, 579 * then allocate out of unallocated space. If we find a good zone move 580 * it to the head of the list so later allocations find it quickly 581 * (we might have thousands of zones in the list). 582 * 583 * Note: zoneindex() will panic of size is too large. 584 */ 585 zi = zoneindex(&size); 586 KKASSERT(zi < NZONES); 587 crit_enter(); 588 if ((z = slgd->ZoneAry[zi]) != NULL) { 589 KKASSERT(z->z_NFree > 0); 590 591 /* 592 * Remove us from the ZoneAry[] when we become empty 593 */ 594 if (--z->z_NFree == 0) { 595 slgd->ZoneAry[zi] = z->z_Next; 596 z->z_Next = NULL; 597 } 598 599 /* 600 * Locate a chunk in a free page. This attempts to localize 601 * reallocations into earlier pages without us having to sort 602 * the chunk list. A chunk may still overlap a page boundary. 603 */ 604 while (z->z_FirstFreePg < ZonePageCount) { 605 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) { 606 #ifdef DIAGNOSTIC 607 /* 608 * Diagnostic: c_Next is not total garbage. 609 */ 610 KKASSERT(chunk->c_Next == NULL || 611 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) == 612 ((intptr_t)chunk & IN_SAME_PAGE_MASK)); 613 #endif 614 #ifdef INVARIANTS 615 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) 616 panic("chunk %p FFPG %d/%d", chunk, z->z_FirstFreePg, ZonePageCount); 617 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) 618 panic("chunkNEXT %p %p FFPG %d/%d", chunk, chunk->c_Next, z->z_FirstFreePg, ZonePageCount); 619 chunk_mark_allocated(z, chunk); 620 #endif 621 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next; 622 goto done; 623 } 624 ++z->z_FirstFreePg; 625 } 626 627 /* 628 * No chunks are available but NFree said we had some memory, so 629 * it must be available in the never-before-used-memory area 630 * governed by UIndex. The consequences are very serious if our zone 631 * got corrupted so we use an explicit panic rather then a KASSERT. 632 */ 633 if (z->z_UIndex + 1 != z->z_NMax) 634 z->z_UIndex = z->z_UIndex + 1; 635 else 636 z->z_UIndex = 0; 637 if (z->z_UIndex == z->z_UEndIndex) 638 panic("slaballoc: corrupted zone"); 639 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 640 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 641 flags &= ~M_ZERO; 642 flags |= M_PASSIVE_ZERO; 643 } 644 #if defined(INVARIANTS) 645 chunk_mark_allocated(z, chunk); 646 #endif 647 goto done; 648 } 649 650 /* 651 * If all zones are exhausted we need to allocate a new zone for this 652 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see 653 * UAlloc use above in regards to M_ZERO. Note that when we are reusing 654 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and 655 * we do not pre-zero it because we do not want to mess up the L1 cache. 656 * 657 * At least one subsystem, the tty code (see CROUND) expects power-of-2 658 * allocations to be power-of-2 aligned. We maintain compatibility by 659 * adjusting the base offset below. 660 */ 661 { 662 int off; 663 664 if ((z = slgd->FreeZones) != NULL) { 665 slgd->FreeZones = z->z_Next; 666 --slgd->NFreeZones; 667 bzero(z, sizeof(SLZone)); 668 z->z_Flags |= SLZF_UNOTZEROD; 669 } else { 670 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); 671 if (z == NULL) 672 goto fail; 673 atomic_add_int(&ZoneGenAlloc, (int)ZoneSize / 1024); 674 } 675 676 /* 677 * How big is the base structure? 678 */ 679 #if defined(INVARIANTS) 680 /* 681 * Make room for z_Bitmap. An exact calculation is somewhat more 682 * complicated so don't make an exact calculation. 683 */ 684 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]); 685 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); 686 #else 687 off = sizeof(SLZone); 688 #endif 689 690 /* 691 * Guarentee power-of-2 alignment for power-of-2-sized chunks. 692 * Otherwise just 8-byte align the data. 693 */ 694 if ((size | (size - 1)) + 1 == (size << 1)) 695 off = (off + size - 1) & ~(size - 1); 696 else 697 off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK; 698 z->z_Magic = ZALLOC_SLAB_MAGIC; 699 z->z_ZoneIndex = zi; 700 z->z_NMax = (ZoneSize - off) / size; 701 z->z_NFree = z->z_NMax - 1; 702 z->z_BasePtr = (char *)z + off; 703 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; 704 z->z_ChunkSize = size; 705 z->z_FirstFreePg = ZonePageCount; 706 z->z_CpuGd = gd; 707 z->z_Cpu = gd->gd_cpuid; 708 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 709 z->z_Next = slgd->ZoneAry[zi]; 710 slgd->ZoneAry[zi] = z; 711 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 712 flags &= ~M_ZERO; /* already zero'd */ 713 flags |= M_PASSIVE_ZERO; 714 } 715 #if defined(INVARIANTS) 716 chunk_mark_allocated(z, chunk); 717 #endif 718 719 /* 720 * Slide the base index for initial allocations out of the next 721 * zone we create so we do not over-weight the lower part of the 722 * cpu memory caches. 723 */ 724 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 725 & (ZALLOC_MAX_ZONE_SIZE - 1); 726 } 727 done: 728 ++type->ks_inuse[gd->gd_cpuid]; 729 type->ks_memuse[gd->gd_cpuid] += size; 730 type->ks_loosememuse += size; /* not MP synchronized */ 731 crit_exit(); 732 if (flags & M_ZERO) 733 bzero(chunk, size); 734 #ifdef INVARIANTS 735 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) { 736 if (use_malloc_pattern) { 737 for (i = 0; i < size; i += sizeof(int)) { 738 *(int *)((char *)chunk + i) = -1; 739 } 740 } 741 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ 742 } 743 #endif 744 logmemory(malloc, chunk, type, size, flags); 745 return(chunk); 746 fail: 747 crit_exit(); 748 logmemory(malloc, NULL, type, size, flags); 749 return(NULL); 750 } 751 752 /* 753 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE) 754 * 755 * Generally speaking this routine is not called very often and we do 756 * not attempt to optimize it beyond reusing the same pointer if the 757 * new size fits within the chunking of the old pointer's zone. 758 */ 759 void * 760 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) 761 { 762 SLZone *z; 763 void *nptr; 764 unsigned long osize; 765 766 KKASSERT((flags & M_ZERO) == 0); /* not supported */ 767 768 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) 769 return(kmalloc(size, type, flags)); 770 if (size == 0) { 771 kfree(ptr, type); 772 return(NULL); 773 } 774 775 /* 776 * Handle oversized allocations. XXX we really should require that a 777 * size be passed to free() instead of this nonsense. 778 */ 779 { 780 struct kmemusage *kup; 781 782 kup = btokup(ptr); 783 if (kup->ku_pagecnt) { 784 osize = kup->ku_pagecnt << PAGE_SHIFT; 785 if (osize == round_page(size)) 786 return(ptr); 787 if ((nptr = kmalloc(size, type, flags)) == NULL) 788 return(NULL); 789 bcopy(ptr, nptr, min(size, osize)); 790 kfree(ptr, type); 791 return(nptr); 792 } 793 } 794 795 /* 796 * Get the original allocation's zone. If the new request winds up 797 * using the same chunk size we do not have to do anything. 798 */ 799 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 800 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 801 802 /* 803 * Allocate memory for the new request size. Note that zoneindex has 804 * already adjusted the request size to the appropriate chunk size, which 805 * should optimize our bcopy(). Then copy and return the new pointer. 806 * 807 * Resizing a non-power-of-2 allocation to a power-of-2 size does not 808 * necessary align the result. 809 * 810 * We can only zoneindex (to align size to the chunk size) if the new 811 * size is not too large. 812 */ 813 if (size < ZoneLimit) { 814 zoneindex(&size); 815 if (z->z_ChunkSize == size) 816 return(ptr); 817 } 818 if ((nptr = kmalloc(size, type, flags)) == NULL) 819 return(NULL); 820 bcopy(ptr, nptr, min(size, z->z_ChunkSize)); 821 kfree(ptr, type); 822 return(nptr); 823 } 824 825 /* 826 * Return the kmalloc limit for this type, in bytes. 827 */ 828 long 829 kmalloc_limit(struct malloc_type *type) 830 { 831 if (type->ks_limit == 0) { 832 crit_enter(); 833 if (type->ks_limit == 0) 834 malloc_init(type); 835 crit_exit(); 836 } 837 return(type->ks_limit); 838 } 839 840 /* 841 * Allocate a copy of the specified string. 842 * 843 * (MP SAFE) (MAY BLOCK) 844 */ 845 char * 846 kstrdup(const char *str, struct malloc_type *type) 847 { 848 int zlen; /* length inclusive of terminating NUL */ 849 char *nstr; 850 851 if (str == NULL) 852 return(NULL); 853 zlen = strlen(str) + 1; 854 nstr = kmalloc(zlen, type, M_WAITOK); 855 bcopy(str, nstr, zlen); 856 return(nstr); 857 } 858 859 #ifdef SMP 860 /* 861 * free() (SLAB ALLOCATOR) 862 * 863 * Free the specified chunk of memory. 864 */ 865 static 866 void 867 free_remote(void *ptr) 868 { 869 logmemory(free_remote, ptr, *(struct malloc_type **)ptr, -1, 0); 870 kfree(ptr, *(struct malloc_type **)ptr); 871 } 872 873 #endif 874 875 /* 876 * free (SLAB ALLOCATOR) 877 * 878 * Free a memory block previously allocated by malloc. Note that we do not 879 * attempt to uplodate ks_loosememuse as MP races could prevent us from 880 * checking memory limits in malloc. 881 * 882 * MPSAFE 883 */ 884 void 885 kfree(void *ptr, struct malloc_type *type) 886 { 887 SLZone *z; 888 SLChunk *chunk; 889 SLGlobalData *slgd; 890 struct globaldata *gd; 891 int pgno; 892 893 logmemory_quick(free_beg); 894 gd = mycpu; 895 slgd = &gd->gd_slab; 896 897 if (ptr == NULL) 898 panic("trying to free NULL pointer"); 899 900 /* 901 * Handle special 0-byte allocations 902 */ 903 if (ptr == ZERO_LENGTH_PTR) { 904 logmemory(free_zero, ptr, type, -1, 0); 905 logmemory_quick(free_end); 906 return; 907 } 908 909 /* 910 * Handle oversized allocations. XXX we really should require that a 911 * size be passed to free() instead of this nonsense. 912 * 913 * This code is never called via an ipi. 914 */ 915 { 916 struct kmemusage *kup; 917 unsigned long size; 918 919 kup = btokup(ptr); 920 if (kup->ku_pagecnt) { 921 size = kup->ku_pagecnt << PAGE_SHIFT; 922 kup->ku_pagecnt = 0; 923 #ifdef INVARIANTS 924 KKASSERT(sizeof(weirdary) <= size); 925 bcopy(weirdary, ptr, sizeof(weirdary)); 926 #endif 927 /* 928 * NOTE: For oversized allocations we do not record the 929 * originating cpu. It gets freed on the cpu calling 930 * kfree(). The statistics are in aggregate. 931 * 932 * note: XXX we have still inherited the interrupts-can't-block 933 * assumption. An interrupt thread does not bump 934 * gd_intr_nesting_level so check TDF_INTTHREAD. This is 935 * primarily until we can fix softupdate's assumptions about free(). 936 */ 937 crit_enter(); 938 --type->ks_inuse[gd->gd_cpuid]; 939 type->ks_memuse[gd->gd_cpuid] -= size; 940 if (mycpu->gd_intr_nesting_level || 941 (gd->gd_curthread->td_flags & TDF_INTTHREAD)) 942 { 943 logmemory(free_ovsz_delayed, ptr, type, size, 0); 944 z = (SLZone *)ptr; 945 z->z_Magic = ZALLOC_OVSZ_MAGIC; 946 z->z_Next = slgd->FreeOvZones; 947 z->z_ChunkSize = size; 948 slgd->FreeOvZones = z; 949 crit_exit(); 950 } else { 951 crit_exit(); 952 logmemory(free_ovsz, ptr, type, size, 0); 953 kmem_slab_free(ptr, size); /* may block */ 954 atomic_add_int(&ZoneBigAlloc, -(int)size / 1024); 955 } 956 logmemory_quick(free_end); 957 return; 958 } 959 } 960 961 /* 962 * Zone case. Figure out the zone based on the fact that it is 963 * ZoneSize aligned. 964 */ 965 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 966 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 967 968 /* 969 * If we do not own the zone then forward the request to the 970 * cpu that does. Since the timing is non-critical, a passive 971 * message is sent. 972 */ 973 if (z->z_CpuGd != gd) { 974 *(struct malloc_type **)ptr = type; 975 #ifdef SMP 976 logmemory(free_request, ptr, type, z->z_ChunkSize, 0); 977 lwkt_send_ipiq_passive(z->z_CpuGd, free_remote, ptr); 978 #else 979 panic("Corrupt SLZone"); 980 #endif 981 logmemory_quick(free_end); 982 return; 983 } 984 985 logmemory(free_chunk, ptr, type, z->z_ChunkSize, 0); 986 987 if (type->ks_magic != M_MAGIC) 988 panic("free: malloc type lacks magic"); 989 990 crit_enter(); 991 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT; 992 chunk = ptr; 993 994 #ifdef INVARIANTS 995 /* 996 * Attempt to detect a double-free. To reduce overhead we only check 997 * if there appears to be link pointer at the base of the data. 998 */ 999 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) { 1000 SLChunk *scan; 1001 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) { 1002 if (scan == chunk) 1003 panic("Double free at %p", chunk); 1004 } 1005 } 1006 chunk_mark_free(z, chunk); 1007 #endif 1008 1009 /* 1010 * Put weird data into the memory to detect modifications after freeing, 1011 * illegal pointer use after freeing (we should fault on the odd address), 1012 * and so forth. XXX needs more work, see the old malloc code. 1013 */ 1014 #ifdef INVARIANTS 1015 if (z->z_ChunkSize < sizeof(weirdary)) 1016 bcopy(weirdary, chunk, z->z_ChunkSize); 1017 else 1018 bcopy(weirdary, chunk, sizeof(weirdary)); 1019 #endif 1020 1021 /* 1022 * Add this free non-zero'd chunk to a linked list for reuse, adjust 1023 * z_FirstFreePg. 1024 */ 1025 #ifdef INVARIANTS 1026 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) 1027 panic("BADFREE %p", chunk); 1028 #endif 1029 chunk->c_Next = z->z_PageAry[pgno]; 1030 z->z_PageAry[pgno] = chunk; 1031 #ifdef INVARIANTS 1032 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) 1033 panic("BADFREE2"); 1034 #endif 1035 if (z->z_FirstFreePg > pgno) 1036 z->z_FirstFreePg = pgno; 1037 1038 /* 1039 * Bump the number of free chunks. If it becomes non-zero the zone 1040 * must be added back onto the appropriate list. 1041 */ 1042 if (z->z_NFree++ == 0) { 1043 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 1044 slgd->ZoneAry[z->z_ZoneIndex] = z; 1045 } 1046 1047 --type->ks_inuse[z->z_Cpu]; 1048 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize; 1049 1050 /* 1051 * If the zone becomes totally free, and there are other zones we 1052 * can allocate from, move this zone to the FreeZones list. Since 1053 * this code can be called from an IPI callback, do *NOT* try to mess 1054 * with kernel_map here. Hysteresis will be performed at malloc() time. 1055 */ 1056 if (z->z_NFree == z->z_NMax && 1057 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) 1058 ) { 1059 SLZone **pz; 1060 1061 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next) 1062 ; 1063 *pz = z->z_Next; 1064 z->z_Magic = -1; 1065 z->z_Next = slgd->FreeZones; 1066 slgd->FreeZones = z; 1067 ++slgd->NFreeZones; 1068 } 1069 logmemory_quick(free_end); 1070 crit_exit(); 1071 } 1072 1073 #if defined(INVARIANTS) 1074 /* 1075 * Helper routines for sanity checks 1076 */ 1077 static 1078 void 1079 chunk_mark_allocated(SLZone *z, void *chunk) 1080 { 1081 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1082 __uint32_t *bitptr; 1083 1084 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal", chunk, bitdex)); 1085 bitptr = &z->z_Bitmap[bitdex >> 5]; 1086 bitdex &= 31; 1087 KASSERT((*bitptr & (1 << bitdex)) == 0, ("memory chunk %p is already allocated!", chunk)); 1088 *bitptr |= 1 << bitdex; 1089 } 1090 1091 static 1092 void 1093 chunk_mark_free(SLZone *z, void *chunk) 1094 { 1095 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1096 __uint32_t *bitptr; 1097 1098 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal!", chunk, bitdex)); 1099 bitptr = &z->z_Bitmap[bitdex >> 5]; 1100 bitdex &= 31; 1101 KASSERT((*bitptr & (1 << bitdex)) != 0, ("memory chunk %p is already free!", chunk)); 1102 *bitptr &= ~(1 << bitdex); 1103 } 1104 1105 #endif 1106 1107 /* 1108 * kmem_slab_alloc() 1109 * 1110 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the 1111 * specified alignment. M_* flags are expected in the flags field. 1112 * 1113 * Alignment must be a multiple of PAGE_SIZE. 1114 * 1115 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), 1116 * but when we move zalloc() over to use this function as its backend 1117 * we will have to switch to kreserve/krelease and call reserve(0) 1118 * after the new space is made available. 1119 * 1120 * Interrupt code which has preempted other code is not allowed to 1121 * use PQ_CACHE pages. However, if an interrupt thread is run 1122 * non-preemptively or blocks and then runs non-preemptively, then 1123 * it is free to use PQ_CACHE pages. 1124 * 1125 * This routine will currently obtain the BGL. 1126 * 1127 * MPALMOSTSAFE - acquires mplock 1128 */ 1129 static void * 1130 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) 1131 { 1132 vm_size_t i; 1133 vm_offset_t addr; 1134 int count, vmflags, base_vmflags; 1135 thread_t td; 1136 1137 size = round_page(size); 1138 addr = vm_map_min(&kernel_map); 1139 1140 /* 1141 * Reserve properly aligned space from kernel_map. RNOWAIT allocations 1142 * cannot block. 1143 */ 1144 if (flags & M_RNOWAIT) { 1145 if (try_mplock() == 0) 1146 return(NULL); 1147 } else { 1148 get_mplock(); 1149 } 1150 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1151 crit_enter(); 1152 vm_map_lock(&kernel_map); 1153 if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) { 1154 vm_map_unlock(&kernel_map); 1155 if ((flags & M_NULLOK) == 0) 1156 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 1157 crit_exit(); 1158 vm_map_entry_release(count); 1159 rel_mplock(); 1160 return(NULL); 1161 } 1162 1163 /* 1164 * kernel_object maps 1:1 to kernel_map. 1165 */ 1166 vm_object_reference(&kernel_object); 1167 vm_map_insert(&kernel_map, &count, 1168 &kernel_object, addr, addr, addr + size, 1169 VM_MAPTYPE_NORMAL, 1170 VM_PROT_ALL, VM_PROT_ALL, 1171 0); 1172 1173 td = curthread; 1174 1175 base_vmflags = 0; 1176 if (flags & M_ZERO) 1177 base_vmflags |= VM_ALLOC_ZERO; 1178 if (flags & M_USE_RESERVE) 1179 base_vmflags |= VM_ALLOC_SYSTEM; 1180 if (flags & M_USE_INTERRUPT_RESERVE) 1181 base_vmflags |= VM_ALLOC_INTERRUPT; 1182 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) 1183 panic("kmem_slab_alloc: bad flags %08x (%p)", flags, ((int **)&size)[-1]); 1184 1185 1186 /* 1187 * Allocate the pages. Do not mess with the PG_ZERO flag yet. 1188 */ 1189 for (i = 0; i < size; i += PAGE_SIZE) { 1190 vm_page_t m; 1191 1192 /* 1193 * VM_ALLOC_NORMAL can only be set if we are not preempting. 1194 * 1195 * VM_ALLOC_SYSTEM is automatically set if we are preempting and 1196 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is 1197 * implied in this case), though I'm not sure if we really need to 1198 * do that. 1199 */ 1200 vmflags = base_vmflags; 1201 if (flags & M_WAITOK) { 1202 if (td->td_preempted) 1203 vmflags |= VM_ALLOC_SYSTEM; 1204 else 1205 vmflags |= VM_ALLOC_NORMAL; 1206 } 1207 1208 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags); 1209 1210 /* 1211 * If the allocation failed we either return NULL or we retry. 1212 * 1213 * If M_WAITOK is specified we wait for more memory and retry. 1214 * If M_WAITOK is specified from a preemption we yield instead of 1215 * wait. Livelock will not occur because the interrupt thread 1216 * will not be preempting anyone the second time around after the 1217 * yield. 1218 */ 1219 if (m == NULL) { 1220 if (flags & M_WAITOK) { 1221 if (td->td_preempted) { 1222 vm_map_unlock(&kernel_map); 1223 lwkt_yield(); 1224 vm_map_lock(&kernel_map); 1225 } else { 1226 vm_map_unlock(&kernel_map); 1227 vm_wait(0); 1228 vm_map_lock(&kernel_map); 1229 } 1230 i -= PAGE_SIZE; /* retry */ 1231 continue; 1232 } 1233 1234 /* 1235 * We were unable to recover, cleanup and return NULL 1236 */ 1237 while (i != 0) { 1238 i -= PAGE_SIZE; 1239 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); 1240 /* page should already be busy */ 1241 vm_page_free(m); 1242 } 1243 vm_map_delete(&kernel_map, addr, addr + size, &count); 1244 vm_map_unlock(&kernel_map); 1245 crit_exit(); 1246 vm_map_entry_release(count); 1247 rel_mplock(); 1248 return(NULL); 1249 } 1250 } 1251 1252 /* 1253 * Success! 1254 * 1255 * Mark the map entry as non-pageable using a routine that allows us to 1256 * populate the underlying pages. 1257 * 1258 * The pages were busied by the allocations above. 1259 */ 1260 vm_map_set_wired_quick(&kernel_map, addr, size, &count); 1261 crit_exit(); 1262 1263 /* 1264 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO. 1265 */ 1266 for (i = 0; i < size; i += PAGE_SIZE) { 1267 vm_page_t m; 1268 1269 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); 1270 m->valid = VM_PAGE_BITS_ALL; 1271 /* page should already be busy */ 1272 vm_page_wire(m); 1273 vm_page_wakeup(m); 1274 pmap_enter(&kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 1275 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO)) 1276 bzero((char *)addr + i, PAGE_SIZE); 1277 vm_page_flag_clear(m, PG_ZERO); 1278 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED)); 1279 vm_page_flag_set(m, PG_REFERENCED); 1280 } 1281 vm_map_unlock(&kernel_map); 1282 vm_map_entry_release(count); 1283 rel_mplock(); 1284 return((void *)addr); 1285 } 1286 1287 /* 1288 * kmem_slab_free() 1289 * 1290 * MPALMOSTSAFE - acquires mplock 1291 */ 1292 static void 1293 kmem_slab_free(void *ptr, vm_size_t size) 1294 { 1295 get_mplock(); 1296 crit_enter(); 1297 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); 1298 crit_exit(); 1299 rel_mplock(); 1300 } 1301 1302