1 /* 2 * KERN_SLABALLOC.C - Kernel SLAB memory allocator 3 * 4 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.55 2008/10/22 01:42:17 dillon Exp $ 37 * 38 * This module implements a slab allocator drop-in replacement for the 39 * kernel malloc(). 40 * 41 * A slab allocator reserves a ZONE for each chunk size, then lays the 42 * chunks out in an array within the zone. Allocation and deallocation 43 * is nearly instantanious, and fragmentation/overhead losses are limited 44 * to a fixed worst-case amount. 45 * 46 * The downside of this slab implementation is in the chunk size 47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. 48 * In a kernel implementation all this memory will be physical so 49 * the zone size is adjusted downward on machines with less physical 50 * memory. The upside is that overhead is bounded... this is the *worst* 51 * case overhead. 52 * 53 * Slab management is done on a per-cpu basis and no locking or mutexes 54 * are required, only a critical section. When one cpu frees memory 55 * belonging to another cpu's slab manager an asynchronous IPI message 56 * will be queued to execute the operation. In addition, both the 57 * high level slab allocator and the low level zone allocator optimize 58 * M_ZERO requests, and the slab allocator does not have to pre initialize 59 * the linked list of chunks. 60 * 61 * XXX Balancing is needed between cpus. Balance will be handled through 62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. 63 * 64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of 65 * the new zone should be restricted to M_USE_RESERVE requests only. 66 * 67 * Alloc Size Chunking Number of zones 68 * 0-127 8 16 69 * 128-255 16 8 70 * 256-511 32 8 71 * 512-1023 64 8 72 * 1024-2047 128 8 73 * 2048-4095 256 8 74 * 4096-8191 512 8 75 * 8192-16383 1024 8 76 * 16384-32767 2048 8 77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) 78 * 79 * Allocations >= ZoneLimit go directly to kmem. 80 * 81 * API REQUIREMENTS AND SIDE EFFECTS 82 * 83 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we 84 * have remained compatible with the following API requirements: 85 * 86 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty) 87 * + all power-of-2 sized allocations are power-of-2 aligned (twe) 88 * + malloc(0) is allowed and returns non-NULL (ahc driver) 89 * + ability to allocate arbitrarily large chunks of memory 90 */ 91 92 #include "opt_vm.h" 93 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/kernel.h> 97 #include <sys/slaballoc.h> 98 #include <sys/mbuf.h> 99 #include <sys/vmmeter.h> 100 #include <sys/lock.h> 101 #include <sys/thread.h> 102 #include <sys/globaldata.h> 103 #include <sys/sysctl.h> 104 #include <sys/ktr.h> 105 106 #include <vm/vm.h> 107 #include <vm/vm_param.h> 108 #include <vm/vm_kern.h> 109 #include <vm/vm_extern.h> 110 #include <vm/vm_object.h> 111 #include <vm/pmap.h> 112 #include <vm/vm_map.h> 113 #include <vm/vm_page.h> 114 #include <vm/vm_pageout.h> 115 116 #include <machine/cpu.h> 117 118 #include <sys/thread2.h> 119 120 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0])) 121 122 #define MEMORY_STRING "ptr=%p type=%p size=%d flags=%04x" 123 #define MEMORY_ARG_SIZE (sizeof(void *) * 2 + sizeof(unsigned long) + \ 124 sizeof(int)) 125 126 #if !defined(KTR_MEMORY) 127 #define KTR_MEMORY KTR_ALL 128 #endif 129 KTR_INFO_MASTER(memory); 130 KTR_INFO(KTR_MEMORY, memory, malloc, 0, MEMORY_STRING, MEMORY_ARG_SIZE); 131 KTR_INFO(KTR_MEMORY, memory, free_zero, 1, MEMORY_STRING, MEMORY_ARG_SIZE); 132 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 2, MEMORY_STRING, MEMORY_ARG_SIZE); 133 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 3, MEMORY_STRING, MEMORY_ARG_SIZE); 134 KTR_INFO(KTR_MEMORY, memory, free_chunk, 4, MEMORY_STRING, MEMORY_ARG_SIZE); 135 #ifdef SMP 136 KTR_INFO(KTR_MEMORY, memory, free_request, 5, MEMORY_STRING, MEMORY_ARG_SIZE); 137 KTR_INFO(KTR_MEMORY, memory, free_remote, 6, MEMORY_STRING, MEMORY_ARG_SIZE); 138 #endif 139 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin", 0); 140 KTR_INFO(KTR_MEMORY, memory, free_beg, 0, "free begin", 0); 141 KTR_INFO(KTR_MEMORY, memory, free_end, 0, "free end", 0); 142 143 #define logmemory(name, ptr, type, size, flags) \ 144 KTR_LOG(memory_ ## name, ptr, type, size, flags) 145 #define logmemory_quick(name) \ 146 KTR_LOG(memory_ ## name) 147 148 /* 149 * Fixed globals (not per-cpu) 150 */ 151 static int ZoneSize; 152 static int ZoneLimit; 153 static int ZonePageCount; 154 static int ZoneMask; 155 struct malloc_type *kmemstatistics; /* exported to vmstat */ 156 static struct kmemusage *kmemusage; 157 static int32_t weirdary[16]; 158 159 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags); 160 static void kmem_slab_free(void *ptr, vm_size_t bytes); 161 #if defined(INVARIANTS) 162 static void chunk_mark_allocated(SLZone *z, void *chunk); 163 static void chunk_mark_free(SLZone *z, void *chunk); 164 #endif 165 166 /* 167 * Misc constants. Note that allocations that are exact multiples of 168 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. 169 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists. 170 */ 171 #define MIN_CHUNK_SIZE 8 /* in bytes */ 172 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1) 173 #define ZONE_RELS_THRESH 2 /* threshold number of zones */ 174 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK) 175 176 /* 177 * The WEIRD_ADDR is used as known text to copy into free objects to 178 * try to create deterministic failure cases if the data is accessed after 179 * free. 180 */ 181 #define WEIRD_ADDR 0xdeadc0de 182 #define MAX_COPY sizeof(weirdary) 183 #define ZERO_LENGTH_PTR ((void *)-8) 184 185 /* 186 * Misc global malloc buckets 187 */ 188 189 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 190 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 191 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 192 193 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 194 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 195 196 /* 197 * Initialize the slab memory allocator. We have to choose a zone size based 198 * on available physical memory. We choose a zone side which is approximately 199 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of 200 * 128K. The zone size is limited to the bounds set in slaballoc.h 201 * (typically 32K min, 128K max). 202 */ 203 static void kmeminit(void *dummy); 204 205 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL) 206 207 #ifdef INVARIANTS 208 /* 209 * If enabled any memory allocated without M_ZERO is initialized to -1. 210 */ 211 static int use_malloc_pattern; 212 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW, 213 &use_malloc_pattern, 0, ""); 214 #endif 215 216 static void 217 kmeminit(void *dummy) 218 { 219 vm_poff_t limsize; 220 int usesize; 221 int i; 222 vm_pindex_t npg; 223 224 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE; 225 if (limsize > KvaSize) 226 limsize = KvaSize; 227 228 usesize = (int)(limsize / 1024); /* convert to KB */ 229 230 ZoneSize = ZALLOC_MIN_ZONE_SIZE; 231 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) 232 ZoneSize <<= 1; 233 ZoneLimit = ZoneSize / 4; 234 if (ZoneLimit > ZALLOC_ZONE_LIMIT) 235 ZoneLimit = ZALLOC_ZONE_LIMIT; 236 ZoneMask = ZoneSize - 1; 237 ZonePageCount = ZoneSize / PAGE_SIZE; 238 239 npg = KvaSize / PAGE_SIZE; 240 kmemusage = kmem_slab_alloc(npg * sizeof(struct kmemusage), 241 PAGE_SIZE, M_WAITOK|M_ZERO); 242 243 for (i = 0; i < arysize(weirdary); ++i) 244 weirdary[i] = WEIRD_ADDR; 245 246 if (bootverbose) 247 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); 248 } 249 250 /* 251 * Initialize a malloc type tracking structure. 252 */ 253 void 254 malloc_init(void *data) 255 { 256 struct malloc_type *type = data; 257 vm_poff_t limsize; 258 259 if (type->ks_magic != M_MAGIC) 260 panic("malloc type lacks magic"); 261 262 if (type->ks_limit != 0) 263 return; 264 265 if (vmstats.v_page_count == 0) 266 panic("malloc_init not allowed before vm init"); 267 268 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE; 269 if (limsize > KvaSize) 270 limsize = KvaSize; 271 type->ks_limit = limsize / 10; 272 273 type->ks_next = kmemstatistics; 274 kmemstatistics = type; 275 } 276 277 void 278 malloc_uninit(void *data) 279 { 280 struct malloc_type *type = data; 281 struct malloc_type *t; 282 #ifdef INVARIANTS 283 int i; 284 long ttl; 285 #endif 286 287 if (type->ks_magic != M_MAGIC) 288 panic("malloc type lacks magic"); 289 290 if (vmstats.v_page_count == 0) 291 panic("malloc_uninit not allowed before vm init"); 292 293 if (type->ks_limit == 0) 294 panic("malloc_uninit on uninitialized type"); 295 296 #ifdef SMP 297 /* Make sure that all pending kfree()s are finished. */ 298 lwkt_synchronize_ipiqs("muninit"); 299 #endif 300 301 #ifdef INVARIANTS 302 /* 303 * memuse is only correct in aggregation. Due to memory being allocated 304 * on one cpu and freed on another individual array entries may be 305 * negative or positive (canceling each other out). 306 */ 307 for (i = ttl = 0; i < ncpus; ++i) 308 ttl += type->ks_memuse[i]; 309 if (ttl) { 310 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", 311 ttl, type->ks_shortdesc, i); 312 } 313 #endif 314 if (type == kmemstatistics) { 315 kmemstatistics = type->ks_next; 316 } else { 317 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 318 if (t->ks_next == type) { 319 t->ks_next = type->ks_next; 320 break; 321 } 322 } 323 } 324 type->ks_next = NULL; 325 type->ks_limit = 0; 326 } 327 328 /* 329 * Increase the kmalloc pool limit for the specified pool. No changes 330 * are the made if the pool would shrink. 331 */ 332 void 333 kmalloc_raise_limit(struct malloc_type *type, size_t bytes) 334 { 335 if (type->ks_limit == 0) 336 malloc_init(type); 337 if (type->ks_limit < bytes) 338 type->ks_limit = bytes; 339 } 340 341 /* 342 * Dynamically create a malloc pool. This function is a NOP if *typep is 343 * already non-NULL. 344 */ 345 void 346 kmalloc_create(struct malloc_type **typep, const char *descr) 347 { 348 struct malloc_type *type; 349 350 if (*typep == NULL) { 351 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO); 352 type->ks_magic = M_MAGIC; 353 type->ks_shortdesc = descr; 354 malloc_init(type); 355 *typep = type; 356 } 357 } 358 359 /* 360 * Destroy a dynamically created malloc pool. This function is a NOP if 361 * the pool has already been destroyed. 362 */ 363 void 364 kmalloc_destroy(struct malloc_type **typep) 365 { 366 if (*typep != NULL) { 367 malloc_uninit(*typep); 368 kfree(*typep, M_TEMP); 369 *typep = NULL; 370 } 371 } 372 373 /* 374 * Calculate the zone index for the allocation request size and set the 375 * allocation request size to that particular zone's chunk size. 376 */ 377 static __inline int 378 zoneindex(unsigned long *bytes) 379 { 380 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ 381 if (n < 128) { 382 *bytes = n = (n + 7) & ~7; 383 return(n / 8 - 1); /* 8 byte chunks, 16 zones */ 384 } 385 if (n < 256) { 386 *bytes = n = (n + 15) & ~15; 387 return(n / 16 + 7); 388 } 389 if (n < 8192) { 390 if (n < 512) { 391 *bytes = n = (n + 31) & ~31; 392 return(n / 32 + 15); 393 } 394 if (n < 1024) { 395 *bytes = n = (n + 63) & ~63; 396 return(n / 64 + 23); 397 } 398 if (n < 2048) { 399 *bytes = n = (n + 127) & ~127; 400 return(n / 128 + 31); 401 } 402 if (n < 4096) { 403 *bytes = n = (n + 255) & ~255; 404 return(n / 256 + 39); 405 } 406 *bytes = n = (n + 511) & ~511; 407 return(n / 512 + 47); 408 } 409 #if ZALLOC_ZONE_LIMIT > 8192 410 if (n < 16384) { 411 *bytes = n = (n + 1023) & ~1023; 412 return(n / 1024 + 55); 413 } 414 #endif 415 #if ZALLOC_ZONE_LIMIT > 16384 416 if (n < 32768) { 417 *bytes = n = (n + 2047) & ~2047; 418 return(n / 2048 + 63); 419 } 420 #endif 421 panic("Unexpected byte count %d", n); 422 return(0); 423 } 424 425 /* 426 * malloc() (SLAB ALLOCATOR) 427 * 428 * Allocate memory via the slab allocator. If the request is too large, 429 * or if it page-aligned beyond a certain size, we fall back to the 430 * KMEM subsystem. A SLAB tracking descriptor must be specified, use 431 * &SlabMisc if you don't care. 432 * 433 * M_RNOWAIT - don't block. 434 * M_NULLOK - return NULL instead of blocking. 435 * M_ZERO - zero the returned memory. 436 * M_USE_RESERVE - allow greater drawdown of the free list 437 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted 438 * 439 * MPSAFE 440 */ 441 442 void * 443 kmalloc(unsigned long size, struct malloc_type *type, int flags) 444 { 445 SLZone *z; 446 SLChunk *chunk; 447 SLGlobalData *slgd; 448 struct globaldata *gd; 449 int zi; 450 #ifdef INVARIANTS 451 int i; 452 #endif 453 454 logmemory_quick(malloc_beg); 455 gd = mycpu; 456 slgd = &gd->gd_slab; 457 458 /* 459 * XXX silly to have this in the critical path. 460 */ 461 if (type->ks_limit == 0) { 462 crit_enter(); 463 if (type->ks_limit == 0) 464 malloc_init(type); 465 crit_exit(); 466 } 467 ++type->ks_calls; 468 469 /* 470 * Handle the case where the limit is reached. Panic if we can't return 471 * NULL. The original malloc code looped, but this tended to 472 * simply deadlock the computer. 473 * 474 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used 475 * to determine if a more complete limit check should be done. The 476 * actual memory use is tracked via ks_memuse[cpu]. 477 */ 478 while (type->ks_loosememuse >= type->ks_limit) { 479 int i; 480 long ttl; 481 482 for (i = ttl = 0; i < ncpus; ++i) 483 ttl += type->ks_memuse[i]; 484 type->ks_loosememuse = ttl; /* not MP synchronized */ 485 if (ttl >= type->ks_limit) { 486 if (flags & M_NULLOK) { 487 logmemory(malloc, NULL, type, size, flags); 488 return(NULL); 489 } 490 panic("%s: malloc limit exceeded", type->ks_shortdesc); 491 } 492 } 493 494 /* 495 * Handle the degenerate size == 0 case. Yes, this does happen. 496 * Return a special pointer. This is to maintain compatibility with 497 * the original malloc implementation. Certain devices, such as the 498 * adaptec driver, not only allocate 0 bytes, they check for NULL and 499 * also realloc() later on. Joy. 500 */ 501 if (size == 0) { 502 logmemory(malloc, ZERO_LENGTH_PTR, type, size, flags); 503 return(ZERO_LENGTH_PTR); 504 } 505 506 /* 507 * Handle hysteresis from prior frees here in malloc(). We cannot 508 * safely manipulate the kernel_map in free() due to free() possibly 509 * being called via an IPI message or from sensitive interrupt code. 510 */ 511 while (slgd->NFreeZones > ZONE_RELS_THRESH && (flags & M_RNOWAIT) == 0) { 512 crit_enter(); 513 if (slgd->NFreeZones > ZONE_RELS_THRESH) { /* crit sect race */ 514 z = slgd->FreeZones; 515 slgd->FreeZones = z->z_Next; 516 --slgd->NFreeZones; 517 kmem_slab_free(z, ZoneSize); /* may block */ 518 } 519 crit_exit(); 520 } 521 /* 522 * XXX handle oversized frees that were queued from free(). 523 */ 524 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) { 525 crit_enter(); 526 if ((z = slgd->FreeOvZones) != NULL) { 527 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); 528 slgd->FreeOvZones = z->z_Next; 529 kmem_slab_free(z, z->z_ChunkSize); /* may block */ 530 } 531 crit_exit(); 532 } 533 534 /* 535 * Handle large allocations directly. There should not be very many of 536 * these so performance is not a big issue. 537 * 538 * The backend allocator is pretty nasty on a SMP system. Use the 539 * slab allocator for one and two page-sized chunks even though we lose 540 * some efficiency. XXX maybe fix mmio and the elf loader instead. 541 */ 542 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { 543 struct kmemusage *kup; 544 545 size = round_page(size); 546 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); 547 if (chunk == NULL) { 548 logmemory(malloc, NULL, type, size, flags); 549 return(NULL); 550 } 551 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ 552 flags |= M_PASSIVE_ZERO; 553 kup = btokup(chunk); 554 kup->ku_pagecnt = size / PAGE_SIZE; 555 kup->ku_cpu = gd->gd_cpuid; 556 crit_enter(); 557 goto done; 558 } 559 560 /* 561 * Attempt to allocate out of an existing zone. First try the free list, 562 * then allocate out of unallocated space. If we find a good zone move 563 * it to the head of the list so later allocations find it quickly 564 * (we might have thousands of zones in the list). 565 * 566 * Note: zoneindex() will panic of size is too large. 567 */ 568 zi = zoneindex(&size); 569 KKASSERT(zi < NZONES); 570 crit_enter(); 571 if ((z = slgd->ZoneAry[zi]) != NULL) { 572 KKASSERT(z->z_NFree > 0); 573 574 /* 575 * Remove us from the ZoneAry[] when we become empty 576 */ 577 if (--z->z_NFree == 0) { 578 slgd->ZoneAry[zi] = z->z_Next; 579 z->z_Next = NULL; 580 } 581 582 /* 583 * Locate a chunk in a free page. This attempts to localize 584 * reallocations into earlier pages without us having to sort 585 * the chunk list. A chunk may still overlap a page boundary. 586 */ 587 while (z->z_FirstFreePg < ZonePageCount) { 588 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) { 589 #ifdef DIAGNOSTIC 590 /* 591 * Diagnostic: c_Next is not total garbage. 592 */ 593 KKASSERT(chunk->c_Next == NULL || 594 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) == 595 ((intptr_t)chunk & IN_SAME_PAGE_MASK)); 596 #endif 597 #ifdef INVARIANTS 598 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) 599 panic("chunk %p FFPG %d/%d", chunk, z->z_FirstFreePg, ZonePageCount); 600 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) 601 panic("chunkNEXT %p %p FFPG %d/%d", chunk, chunk->c_Next, z->z_FirstFreePg, ZonePageCount); 602 chunk_mark_allocated(z, chunk); 603 #endif 604 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next; 605 goto done; 606 } 607 ++z->z_FirstFreePg; 608 } 609 610 /* 611 * No chunks are available but NFree said we had some memory, so 612 * it must be available in the never-before-used-memory area 613 * governed by UIndex. The consequences are very serious if our zone 614 * got corrupted so we use an explicit panic rather then a KASSERT. 615 */ 616 if (z->z_UIndex + 1 != z->z_NMax) 617 z->z_UIndex = z->z_UIndex + 1; 618 else 619 z->z_UIndex = 0; 620 if (z->z_UIndex == z->z_UEndIndex) 621 panic("slaballoc: corrupted zone"); 622 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 623 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 624 flags &= ~M_ZERO; 625 flags |= M_PASSIVE_ZERO; 626 } 627 #if defined(INVARIANTS) 628 chunk_mark_allocated(z, chunk); 629 #endif 630 goto done; 631 } 632 633 /* 634 * If all zones are exhausted we need to allocate a new zone for this 635 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see 636 * UAlloc use above in regards to M_ZERO. Note that when we are reusing 637 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and 638 * we do not pre-zero it because we do not want to mess up the L1 cache. 639 * 640 * At least one subsystem, the tty code (see CROUND) expects power-of-2 641 * allocations to be power-of-2 aligned. We maintain compatibility by 642 * adjusting the base offset below. 643 */ 644 { 645 int off; 646 647 if ((z = slgd->FreeZones) != NULL) { 648 slgd->FreeZones = z->z_Next; 649 --slgd->NFreeZones; 650 bzero(z, sizeof(SLZone)); 651 z->z_Flags |= SLZF_UNOTZEROD; 652 } else { 653 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); 654 if (z == NULL) 655 goto fail; 656 } 657 658 /* 659 * How big is the base structure? 660 */ 661 #if defined(INVARIANTS) 662 /* 663 * Make room for z_Bitmap. An exact calculation is somewhat more 664 * complicated so don't make an exact calculation. 665 */ 666 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]); 667 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); 668 #else 669 off = sizeof(SLZone); 670 #endif 671 672 /* 673 * Guarentee power-of-2 alignment for power-of-2-sized chunks. 674 * Otherwise just 8-byte align the data. 675 */ 676 if ((size | (size - 1)) + 1 == (size << 1)) 677 off = (off + size - 1) & ~(size - 1); 678 else 679 off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK; 680 z->z_Magic = ZALLOC_SLAB_MAGIC; 681 z->z_ZoneIndex = zi; 682 z->z_NMax = (ZoneSize - off) / size; 683 z->z_NFree = z->z_NMax - 1; 684 z->z_BasePtr = (char *)z + off; 685 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; 686 z->z_ChunkSize = size; 687 z->z_FirstFreePg = ZonePageCount; 688 z->z_CpuGd = gd; 689 z->z_Cpu = gd->gd_cpuid; 690 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); 691 z->z_Next = slgd->ZoneAry[zi]; 692 slgd->ZoneAry[zi] = z; 693 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { 694 flags &= ~M_ZERO; /* already zero'd */ 695 flags |= M_PASSIVE_ZERO; 696 } 697 #if defined(INVARIANTS) 698 chunk_mark_allocated(z, chunk); 699 #endif 700 701 /* 702 * Slide the base index for initial allocations out of the next 703 * zone we create so we do not over-weight the lower part of the 704 * cpu memory caches. 705 */ 706 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) 707 & (ZALLOC_MAX_ZONE_SIZE - 1); 708 } 709 done: 710 ++type->ks_inuse[gd->gd_cpuid]; 711 type->ks_memuse[gd->gd_cpuid] += size; 712 type->ks_loosememuse += size; /* not MP synchronized */ 713 crit_exit(); 714 if (flags & M_ZERO) 715 bzero(chunk, size); 716 #ifdef INVARIANTS 717 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) { 718 if (use_malloc_pattern) { 719 for (i = 0; i < size; i += sizeof(int)) { 720 *(int *)((char *)chunk + i) = -1; 721 } 722 } 723 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ 724 } 725 #endif 726 logmemory(malloc, chunk, type, size, flags); 727 return(chunk); 728 fail: 729 crit_exit(); 730 logmemory(malloc, NULL, type, size, flags); 731 return(NULL); 732 } 733 734 /* 735 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE) 736 * 737 * Generally speaking this routine is not called very often and we do 738 * not attempt to optimize it beyond reusing the same pointer if the 739 * new size fits within the chunking of the old pointer's zone. 740 */ 741 void * 742 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) 743 { 744 SLZone *z; 745 void *nptr; 746 unsigned long osize; 747 748 KKASSERT((flags & M_ZERO) == 0); /* not supported */ 749 750 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) 751 return(kmalloc(size, type, flags)); 752 if (size == 0) { 753 kfree(ptr, type); 754 return(NULL); 755 } 756 757 /* 758 * Handle oversized allocations. XXX we really should require that a 759 * size be passed to free() instead of this nonsense. 760 */ 761 { 762 struct kmemusage *kup; 763 764 kup = btokup(ptr); 765 if (kup->ku_pagecnt) { 766 osize = kup->ku_pagecnt << PAGE_SHIFT; 767 if (osize == round_page(size)) 768 return(ptr); 769 if ((nptr = kmalloc(size, type, flags)) == NULL) 770 return(NULL); 771 bcopy(ptr, nptr, min(size, osize)); 772 kfree(ptr, type); 773 return(nptr); 774 } 775 } 776 777 /* 778 * Get the original allocation's zone. If the new request winds up 779 * using the same chunk size we do not have to do anything. 780 */ 781 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 782 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 783 784 zoneindex(&size); 785 if (z->z_ChunkSize == size) 786 return(ptr); 787 788 /* 789 * Allocate memory for the new request size. Note that zoneindex has 790 * already adjusted the request size to the appropriate chunk size, which 791 * should optimize our bcopy(). Then copy and return the new pointer. 792 */ 793 if ((nptr = kmalloc(size, type, flags)) == NULL) 794 return(NULL); 795 bcopy(ptr, nptr, min(size, z->z_ChunkSize)); 796 kfree(ptr, type); 797 return(nptr); 798 } 799 800 /* 801 * Return the kmalloc limit for this type, in bytes. 802 */ 803 long 804 kmalloc_limit(struct malloc_type *type) 805 { 806 if (type->ks_limit == 0) { 807 crit_enter(); 808 if (type->ks_limit == 0) 809 malloc_init(type); 810 crit_exit(); 811 } 812 return(type->ks_limit); 813 } 814 815 /* 816 * Allocate a copy of the specified string. 817 * 818 * (MP SAFE) (MAY BLOCK) 819 */ 820 char * 821 kstrdup(const char *str, struct malloc_type *type) 822 { 823 int zlen; /* length inclusive of terminating NUL */ 824 char *nstr; 825 826 if (str == NULL) 827 return(NULL); 828 zlen = strlen(str) + 1; 829 nstr = kmalloc(zlen, type, M_WAITOK); 830 bcopy(str, nstr, zlen); 831 return(nstr); 832 } 833 834 #ifdef SMP 835 /* 836 * free() (SLAB ALLOCATOR) 837 * 838 * Free the specified chunk of memory. 839 */ 840 static 841 void 842 free_remote(void *ptr) 843 { 844 logmemory(free_remote, ptr, *(struct malloc_type **)ptr, -1, 0); 845 kfree(ptr, *(struct malloc_type **)ptr); 846 } 847 848 #endif 849 850 /* 851 * free (SLAB ALLOCATOR) 852 * 853 * Free a memory block previously allocated by malloc. Note that we do not 854 * attempt to uplodate ks_loosememuse as MP races could prevent us from 855 * checking memory limits in malloc. 856 * 857 * MPSAFE 858 */ 859 void 860 kfree(void *ptr, struct malloc_type *type) 861 { 862 SLZone *z; 863 SLChunk *chunk; 864 SLGlobalData *slgd; 865 struct globaldata *gd; 866 int pgno; 867 868 logmemory_quick(free_beg); 869 gd = mycpu; 870 slgd = &gd->gd_slab; 871 872 if (ptr == NULL) 873 panic("trying to free NULL pointer"); 874 875 /* 876 * Handle special 0-byte allocations 877 */ 878 if (ptr == ZERO_LENGTH_PTR) { 879 logmemory(free_zero, ptr, type, -1, 0); 880 logmemory_quick(free_end); 881 return; 882 } 883 884 /* 885 * Handle oversized allocations. XXX we really should require that a 886 * size be passed to free() instead of this nonsense. 887 * 888 * This code is never called via an ipi. 889 */ 890 { 891 struct kmemusage *kup; 892 unsigned long size; 893 894 kup = btokup(ptr); 895 if (kup->ku_pagecnt) { 896 size = kup->ku_pagecnt << PAGE_SHIFT; 897 kup->ku_pagecnt = 0; 898 #ifdef INVARIANTS 899 KKASSERT(sizeof(weirdary) <= size); 900 bcopy(weirdary, ptr, sizeof(weirdary)); 901 #endif 902 /* 903 * note: we always adjust our cpu's slot, not the originating 904 * cpu (kup->ku_cpuid). The statistics are in aggregate. 905 * 906 * note: XXX we have still inherited the interrupts-can't-block 907 * assumption. An interrupt thread does not bump 908 * gd_intr_nesting_level so check TDF_INTTHREAD. This is 909 * primarily until we can fix softupdate's assumptions about free(). 910 */ 911 crit_enter(); 912 --type->ks_inuse[gd->gd_cpuid]; 913 type->ks_memuse[gd->gd_cpuid] -= size; 914 if (mycpu->gd_intr_nesting_level || (gd->gd_curthread->td_flags & TDF_INTTHREAD)) { 915 logmemory(free_ovsz_delayed, ptr, type, size, 0); 916 z = (SLZone *)ptr; 917 z->z_Magic = ZALLOC_OVSZ_MAGIC; 918 z->z_Next = slgd->FreeOvZones; 919 z->z_ChunkSize = size; 920 slgd->FreeOvZones = z; 921 crit_exit(); 922 } else { 923 crit_exit(); 924 logmemory(free_ovsz, ptr, type, size, 0); 925 kmem_slab_free(ptr, size); /* may block */ 926 } 927 logmemory_quick(free_end); 928 return; 929 } 930 } 931 932 /* 933 * Zone case. Figure out the zone based on the fact that it is 934 * ZoneSize aligned. 935 */ 936 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); 937 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); 938 939 /* 940 * If we do not own the zone then forward the request to the 941 * cpu that does. Since the timing is non-critical, a passive 942 * message is sent. 943 */ 944 if (z->z_CpuGd != gd) { 945 *(struct malloc_type **)ptr = type; 946 #ifdef SMP 947 logmemory(free_request, ptr, type, z->z_ChunkSize, 0); 948 lwkt_send_ipiq_passive(z->z_CpuGd, free_remote, ptr); 949 #else 950 panic("Corrupt SLZone"); 951 #endif 952 logmemory_quick(free_end); 953 return; 954 } 955 956 logmemory(free_chunk, ptr, type, z->z_ChunkSize, 0); 957 958 if (type->ks_magic != M_MAGIC) 959 panic("free: malloc type lacks magic"); 960 961 crit_enter(); 962 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT; 963 chunk = ptr; 964 965 #ifdef INVARIANTS 966 /* 967 * Attempt to detect a double-free. To reduce overhead we only check 968 * if there appears to be link pointer at the base of the data. 969 */ 970 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) { 971 SLChunk *scan; 972 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) { 973 if (scan == chunk) 974 panic("Double free at %p", chunk); 975 } 976 } 977 chunk_mark_free(z, chunk); 978 #endif 979 980 /* 981 * Put weird data into the memory to detect modifications after freeing, 982 * illegal pointer use after freeing (we should fault on the odd address), 983 * and so forth. XXX needs more work, see the old malloc code. 984 */ 985 #ifdef INVARIANTS 986 if (z->z_ChunkSize < sizeof(weirdary)) 987 bcopy(weirdary, chunk, z->z_ChunkSize); 988 else 989 bcopy(weirdary, chunk, sizeof(weirdary)); 990 #endif 991 992 /* 993 * Add this free non-zero'd chunk to a linked list for reuse, adjust 994 * z_FirstFreePg. 995 */ 996 #ifdef INVARIANTS 997 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) 998 panic("BADFREE %p", chunk); 999 #endif 1000 chunk->c_Next = z->z_PageAry[pgno]; 1001 z->z_PageAry[pgno] = chunk; 1002 #ifdef INVARIANTS 1003 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) 1004 panic("BADFREE2"); 1005 #endif 1006 if (z->z_FirstFreePg > pgno) 1007 z->z_FirstFreePg = pgno; 1008 1009 /* 1010 * Bump the number of free chunks. If it becomes non-zero the zone 1011 * must be added back onto the appropriate list. 1012 */ 1013 if (z->z_NFree++ == 0) { 1014 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; 1015 slgd->ZoneAry[z->z_ZoneIndex] = z; 1016 } 1017 1018 --type->ks_inuse[z->z_Cpu]; 1019 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize; 1020 1021 /* 1022 * If the zone becomes totally free, and there are other zones we 1023 * can allocate from, move this zone to the FreeZones list. Since 1024 * this code can be called from an IPI callback, do *NOT* try to mess 1025 * with kernel_map here. Hysteresis will be performed at malloc() time. 1026 */ 1027 if (z->z_NFree == z->z_NMax && 1028 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) 1029 ) { 1030 SLZone **pz; 1031 1032 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next) 1033 ; 1034 *pz = z->z_Next; 1035 z->z_Magic = -1; 1036 z->z_Next = slgd->FreeZones; 1037 slgd->FreeZones = z; 1038 ++slgd->NFreeZones; 1039 } 1040 logmemory_quick(free_end); 1041 crit_exit(); 1042 } 1043 1044 #if defined(INVARIANTS) 1045 /* 1046 * Helper routines for sanity checks 1047 */ 1048 static 1049 void 1050 chunk_mark_allocated(SLZone *z, void *chunk) 1051 { 1052 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1053 __uint32_t *bitptr; 1054 1055 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal", chunk, bitdex)); 1056 bitptr = &z->z_Bitmap[bitdex >> 5]; 1057 bitdex &= 31; 1058 KASSERT((*bitptr & (1 << bitdex)) == 0, ("memory chunk %p is already allocated!", chunk)); 1059 *bitptr |= 1 << bitdex; 1060 } 1061 1062 static 1063 void 1064 chunk_mark_free(SLZone *z, void *chunk) 1065 { 1066 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; 1067 __uint32_t *bitptr; 1068 1069 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal!", chunk, bitdex)); 1070 bitptr = &z->z_Bitmap[bitdex >> 5]; 1071 bitdex &= 31; 1072 KASSERT((*bitptr & (1 << bitdex)) != 0, ("memory chunk %p is already free!", chunk)); 1073 *bitptr &= ~(1 << bitdex); 1074 } 1075 1076 #endif 1077 1078 /* 1079 * kmem_slab_alloc() 1080 * 1081 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the 1082 * specified alignment. M_* flags are expected in the flags field. 1083 * 1084 * Alignment must be a multiple of PAGE_SIZE. 1085 * 1086 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), 1087 * but when we move zalloc() over to use this function as its backend 1088 * we will have to switch to kreserve/krelease and call reserve(0) 1089 * after the new space is made available. 1090 * 1091 * Interrupt code which has preempted other code is not allowed to 1092 * use PQ_CACHE pages. However, if an interrupt thread is run 1093 * non-preemptively or blocks and then runs non-preemptively, then 1094 * it is free to use PQ_CACHE pages. 1095 * 1096 * This routine will currently obtain the BGL. 1097 * 1098 * MPALMOSTSAFE - acquires mplock 1099 */ 1100 static void * 1101 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) 1102 { 1103 vm_size_t i; 1104 vm_offset_t addr; 1105 int count, vmflags, base_vmflags; 1106 thread_t td; 1107 1108 size = round_page(size); 1109 addr = vm_map_min(&kernel_map); 1110 1111 /* 1112 * Reserve properly aligned space from kernel_map. RNOWAIT allocations 1113 * cannot block. 1114 */ 1115 if (flags & M_RNOWAIT) { 1116 if (try_mplock() == 0) 1117 return(NULL); 1118 } else { 1119 get_mplock(); 1120 } 1121 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1122 crit_enter(); 1123 vm_map_lock(&kernel_map); 1124 if (vm_map_findspace(&kernel_map, addr, size, align, &addr)) { 1125 vm_map_unlock(&kernel_map); 1126 if ((flags & M_NULLOK) == 0) 1127 panic("kmem_slab_alloc(): kernel_map ran out of space!"); 1128 crit_exit(); 1129 vm_map_entry_release(count); 1130 rel_mplock(); 1131 return(NULL); 1132 } 1133 1134 /* 1135 * kernel_object maps 1:1 to kernel_map. 1136 */ 1137 vm_object_reference(&kernel_object); 1138 vm_map_insert(&kernel_map, &count, 1139 &kernel_object, addr, addr, addr + size, 1140 VM_MAPTYPE_NORMAL, 1141 VM_PROT_ALL, VM_PROT_ALL, 1142 0); 1143 1144 td = curthread; 1145 1146 base_vmflags = 0; 1147 if (flags & M_ZERO) 1148 base_vmflags |= VM_ALLOC_ZERO; 1149 if (flags & M_USE_RESERVE) 1150 base_vmflags |= VM_ALLOC_SYSTEM; 1151 if (flags & M_USE_INTERRUPT_RESERVE) 1152 base_vmflags |= VM_ALLOC_INTERRUPT; 1153 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) 1154 panic("kmem_slab_alloc: bad flags %08x (%p)", flags, ((int **)&size)[-1]); 1155 1156 1157 /* 1158 * Allocate the pages. Do not mess with the PG_ZERO flag yet. 1159 */ 1160 for (i = 0; i < size; i += PAGE_SIZE) { 1161 vm_page_t m; 1162 1163 /* 1164 * VM_ALLOC_NORMAL can only be set if we are not preempting. 1165 * 1166 * VM_ALLOC_SYSTEM is automatically set if we are preempting and 1167 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is 1168 * implied in this case), though I'm not sure if we really need to 1169 * do that. 1170 */ 1171 vmflags = base_vmflags; 1172 if (flags & M_WAITOK) { 1173 if (td->td_preempted) 1174 vmflags |= VM_ALLOC_SYSTEM; 1175 else 1176 vmflags |= VM_ALLOC_NORMAL; 1177 } 1178 1179 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags); 1180 1181 /* 1182 * If the allocation failed we either return NULL or we retry. 1183 * 1184 * If M_WAITOK is specified we wait for more memory and retry. 1185 * If M_WAITOK is specified from a preemption we yield instead of 1186 * wait. Livelock will not occur because the interrupt thread 1187 * will not be preempting anyone the second time around after the 1188 * yield. 1189 */ 1190 if (m == NULL) { 1191 if (flags & M_WAITOK) { 1192 if (td->td_preempted) { 1193 vm_map_unlock(&kernel_map); 1194 lwkt_yield(); 1195 vm_map_lock(&kernel_map); 1196 } else { 1197 vm_map_unlock(&kernel_map); 1198 vm_wait(0); 1199 vm_map_lock(&kernel_map); 1200 } 1201 i -= PAGE_SIZE; /* retry */ 1202 continue; 1203 } 1204 1205 /* 1206 * We were unable to recover, cleanup and return NULL 1207 */ 1208 while (i != 0) { 1209 i -= PAGE_SIZE; 1210 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); 1211 /* page should already be busy */ 1212 vm_page_free(m); 1213 } 1214 vm_map_delete(&kernel_map, addr, addr + size, &count); 1215 vm_map_unlock(&kernel_map); 1216 crit_exit(); 1217 vm_map_entry_release(count); 1218 rel_mplock(); 1219 return(NULL); 1220 } 1221 } 1222 1223 /* 1224 * Success! 1225 * 1226 * Mark the map entry as non-pageable using a routine that allows us to 1227 * populate the underlying pages. 1228 * 1229 * The pages were busied by the allocations above. 1230 */ 1231 vm_map_set_wired_quick(&kernel_map, addr, size, &count); 1232 crit_exit(); 1233 1234 /* 1235 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO. 1236 */ 1237 for (i = 0; i < size; i += PAGE_SIZE) { 1238 vm_page_t m; 1239 1240 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); 1241 m->valid = VM_PAGE_BITS_ALL; 1242 /* page should already be busy */ 1243 vm_page_wire(m); 1244 vm_page_wakeup(m); 1245 pmap_enter(&kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 1246 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO)) 1247 bzero((char *)addr + i, PAGE_SIZE); 1248 vm_page_flag_clear(m, PG_ZERO); 1249 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED)); 1250 vm_page_flag_set(m, PG_REFERENCED); 1251 } 1252 vm_map_unlock(&kernel_map); 1253 vm_map_entry_release(count); 1254 rel_mplock(); 1255 return((void *)addr); 1256 } 1257 1258 /* 1259 * kmem_slab_free() 1260 * 1261 * MPALMOSTSAFE - acquires mplock 1262 */ 1263 static void 1264 kmem_slab_free(void *ptr, vm_size_t size) 1265 { 1266 get_mplock(); 1267 crit_enter(); 1268 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); 1269 crit_exit(); 1270 rel_mplock(); 1271 } 1272 1273