1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org> 5 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 6 * Copyright (c) 2004-2006 Robert N. M. Watson 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* 32 * uma_core.c Implementation of the Universal Memory allocator 33 * 34 * This allocator is intended to replace the multitude of similar object caches 35 * in the standard FreeBSD kernel. The intent is to be flexible as well as 36 * efficient. A primary design goal is to return unused memory to the rest of 37 * the system. This will make the system as a whole more flexible due to the 38 * ability to move memory to subsystems which most need it instead of leaving 39 * pools of reserved memory unused. 40 * 41 * The basic ideas stem from similar slab/zone based allocators whose algorithms 42 * are well known. 43 * 44 */ 45 46 /* 47 * TODO: 48 * - Improve memory usage for large allocations 49 * - Investigate cache size adjustments 50 */ 51 52 #include <sys/cdefs.h> 53 __FBSDID("$FreeBSD$"); 54 55 #include "opt_ddb.h" 56 #include "opt_param.h" 57 #include "opt_vm.h" 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/bitset.h> 62 #include <sys/domainset.h> 63 #include <sys/eventhandler.h> 64 #include <sys/kernel.h> 65 #include <sys/types.h> 66 #include <sys/limits.h> 67 #include <sys/queue.h> 68 #include <sys/malloc.h> 69 #include <sys/ktr.h> 70 #include <sys/lock.h> 71 #include <sys/sysctl.h> 72 #include <sys/mutex.h> 73 #include <sys/proc.h> 74 #include <sys/random.h> 75 #include <sys/rwlock.h> 76 #include <sys/sbuf.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smp.h> 80 #include <sys/smr.h> 81 #include <sys/taskqueue.h> 82 #include <sys/vmmeter.h> 83 84 #include <vm/vm.h> 85 #include <vm/vm_domainset.h> 86 #include <vm/vm_object.h> 87 #include <vm/vm_page.h> 88 #include <vm/vm_pageout.h> 89 #include <vm/vm_param.h> 90 #include <vm/vm_phys.h> 91 #include <vm/vm_pagequeue.h> 92 #include <vm/vm_map.h> 93 #include <vm/vm_kern.h> 94 #include <vm/vm_extern.h> 95 #include <vm/uma.h> 96 #include <vm/uma_int.h> 97 #include <vm/uma_dbg.h> 98 99 #include <ddb/ddb.h> 100 101 #ifdef DEBUG_MEMGUARD 102 #include <vm/memguard.h> 103 #endif 104 105 #include <machine/md_var.h> 106 107 #ifdef INVARIANTS 108 #define UMA_ALWAYS_CTORDTOR 1 109 #else 110 #define UMA_ALWAYS_CTORDTOR 0 111 #endif 112 113 /* 114 * This is the zone and keg from which all zones are spawned. 115 */ 116 static uma_zone_t kegs; 117 static uma_zone_t zones; 118 119 /* 120 * These are the two zones from which all offpage uma_slab_ts are allocated. 121 * 122 * One zone is for slab headers that can represent a larger number of items, 123 * making the slabs themselves more efficient, and the other zone is for 124 * headers that are smaller and represent fewer items, making the headers more 125 * efficient. 126 */ 127 #define SLABZONE_SIZE(setsize) \ 128 (sizeof(struct uma_hash_slab) + BITSET_SIZE(setsize) * SLAB_BITSETS) 129 #define SLABZONE0_SETSIZE (PAGE_SIZE / 16) 130 #define SLABZONE1_SETSIZE SLAB_MAX_SETSIZE 131 #define SLABZONE0_SIZE SLABZONE_SIZE(SLABZONE0_SETSIZE) 132 #define SLABZONE1_SIZE SLABZONE_SIZE(SLABZONE1_SETSIZE) 133 static uma_zone_t slabzones[2]; 134 135 /* 136 * The initial hash tables come out of this zone so they can be allocated 137 * prior to malloc coming up. 138 */ 139 static uma_zone_t hashzone; 140 141 /* The boot-time adjusted value for cache line alignment. */ 142 int uma_align_cache = 64 - 1; 143 144 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); 145 static MALLOC_DEFINE(M_UMA, "UMA", "UMA Misc"); 146 147 /* 148 * Are we allowed to allocate buckets? 149 */ 150 static int bucketdisable = 1; 151 152 /* Linked list of all kegs in the system */ 153 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs); 154 155 /* Linked list of all cache-only zones in the system */ 156 static LIST_HEAD(,uma_zone) uma_cachezones = 157 LIST_HEAD_INITIALIZER(uma_cachezones); 158 159 /* This RW lock protects the keg list */ 160 static struct rwlock_padalign __exclusive_cache_line uma_rwlock; 161 162 /* 163 * First available virual address for boot time allocations. 164 */ 165 static vm_offset_t bootstart; 166 static vm_offset_t bootmem; 167 168 static struct sx uma_reclaim_lock; 169 170 /* 171 * kmem soft limit, initialized by uma_set_limit(). Ensure that early 172 * allocations don't trigger a wakeup of the reclaim thread. 173 */ 174 unsigned long uma_kmem_limit = LONG_MAX; 175 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0, 176 "UMA kernel memory soft limit"); 177 unsigned long uma_kmem_total; 178 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_total, CTLFLAG_RD, &uma_kmem_total, 0, 179 "UMA kernel memory usage"); 180 181 /* Is the VM done starting up? */ 182 static enum { 183 BOOT_COLD, 184 BOOT_KVA, 185 BOOT_RUNNING, 186 BOOT_SHUTDOWN, 187 } booted = BOOT_COLD; 188 189 /* 190 * This is the handle used to schedule events that need to happen 191 * outside of the allocation fast path. 192 */ 193 static struct callout uma_callout; 194 #define UMA_TIMEOUT 20 /* Seconds for callout interval. */ 195 196 /* 197 * This structure is passed as the zone ctor arg so that I don't have to create 198 * a special allocation function just for zones. 199 */ 200 struct uma_zctor_args { 201 const char *name; 202 size_t size; 203 uma_ctor ctor; 204 uma_dtor dtor; 205 uma_init uminit; 206 uma_fini fini; 207 uma_import import; 208 uma_release release; 209 void *arg; 210 uma_keg_t keg; 211 int align; 212 uint32_t flags; 213 }; 214 215 struct uma_kctor_args { 216 uma_zone_t zone; 217 size_t size; 218 uma_init uminit; 219 uma_fini fini; 220 int align; 221 uint32_t flags; 222 }; 223 224 struct uma_bucket_zone { 225 uma_zone_t ubz_zone; 226 char *ubz_name; 227 int ubz_entries; /* Number of items it can hold. */ 228 int ubz_maxsize; /* Maximum allocation size per-item. */ 229 }; 230 231 /* 232 * Compute the actual number of bucket entries to pack them in power 233 * of two sizes for more efficient space utilization. 234 */ 235 #define BUCKET_SIZE(n) \ 236 (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *)) 237 238 #define BUCKET_MAX BUCKET_SIZE(256) 239 #define BUCKET_MIN BUCKET_SIZE(4) 240 241 struct uma_bucket_zone bucket_zones[] = { 242 { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 }, 243 { NULL, "6 Bucket", BUCKET_SIZE(6), 3072 }, 244 { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 }, 245 { NULL, "12 Bucket", BUCKET_SIZE(12), 1536 }, 246 { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 }, 247 { NULL, "32 Bucket", BUCKET_SIZE(32), 512 }, 248 { NULL, "64 Bucket", BUCKET_SIZE(64), 256 }, 249 { NULL, "128 Bucket", BUCKET_SIZE(128), 128 }, 250 { NULL, "256 Bucket", BUCKET_SIZE(256), 64 }, 251 { NULL, NULL, 0} 252 }; 253 254 /* 255 * Flags and enumerations to be passed to internal functions. 256 */ 257 enum zfreeskip { 258 SKIP_NONE = 0, 259 SKIP_CNT = 0x00000001, 260 SKIP_DTOR = 0x00010000, 261 SKIP_FINI = 0x00020000, 262 }; 263 264 /* Prototypes.. */ 265 266 void uma_startup1(vm_offset_t); 267 void uma_startup2(void); 268 269 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 270 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 271 static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 272 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 273 static void page_free(void *, vm_size_t, uint8_t); 274 static void pcpu_page_free(void *, vm_size_t, uint8_t); 275 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int); 276 static void cache_drain(uma_zone_t); 277 static void bucket_drain(uma_zone_t, uma_bucket_t); 278 static void bucket_cache_reclaim(uma_zone_t zone, bool); 279 static int keg_ctor(void *, int, void *, int); 280 static void keg_dtor(void *, int, void *); 281 static int zone_ctor(void *, int, void *, int); 282 static void zone_dtor(void *, int, void *); 283 static inline void item_dtor(uma_zone_t zone, void *item, int size, 284 void *udata, enum zfreeskip skip); 285 static int zero_init(void *, int, int); 286 static void zone_foreach(void (*zfunc)(uma_zone_t, void *), void *); 287 static void zone_foreach_unlocked(void (*zfunc)(uma_zone_t, void *), void *); 288 static void zone_timeout(uma_zone_t zone, void *); 289 static int hash_alloc(struct uma_hash *, u_int); 290 static int hash_expand(struct uma_hash *, struct uma_hash *); 291 static void hash_free(struct uma_hash *hash); 292 static void uma_timeout(void *); 293 static void uma_startup3(void); 294 static void uma_shutdown(void); 295 static void *zone_alloc_item(uma_zone_t, void *, int, int); 296 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip); 297 static int zone_alloc_limit(uma_zone_t zone, int count, int flags); 298 static void zone_free_limit(uma_zone_t zone, int count); 299 static void bucket_enable(void); 300 static void bucket_init(void); 301 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); 302 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); 303 static void bucket_zone_drain(void); 304 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int); 305 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); 306 static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item); 307 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, 308 uma_fini fini, int align, uint32_t flags); 309 static int zone_import(void *, void **, int, int, int); 310 static void zone_release(void *, void **, int); 311 static bool cache_alloc(uma_zone_t, uma_cache_t, void *, int); 312 static bool cache_free(uma_zone_t, uma_cache_t, void *, void *, int); 313 314 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS); 315 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS); 316 static int sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS); 317 static int sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS); 318 static int sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS); 319 static int sysctl_handle_uma_slab_efficiency(SYSCTL_HANDLER_ARGS); 320 static int sysctl_handle_uma_zone_items(SYSCTL_HANDLER_ARGS); 321 322 static uint64_t uma_zone_get_allocs(uma_zone_t zone); 323 324 #ifdef INVARIANTS 325 static uint64_t uma_keg_get_allocs(uma_keg_t zone); 326 static inline struct noslabbits *slab_dbg_bits(uma_slab_t slab, uma_keg_t keg); 327 328 static bool uma_dbg_kskip(uma_keg_t keg, void *mem); 329 static bool uma_dbg_zskip(uma_zone_t zone, void *mem); 330 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item); 331 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item); 332 333 static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0, 334 "Memory allocation debugging"); 335 336 static u_int dbg_divisor = 1; 337 SYSCTL_UINT(_vm_debug, OID_AUTO, divisor, 338 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0, 339 "Debug & thrash every this item in memory allocator"); 340 341 static counter_u64_t uma_dbg_cnt = EARLY_COUNTER; 342 static counter_u64_t uma_skip_cnt = EARLY_COUNTER; 343 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD, 344 &uma_dbg_cnt, "memory items debugged"); 345 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD, 346 &uma_skip_cnt, "memory items skipped, not debugged"); 347 #endif 348 349 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); 350 351 SYSCTL_NODE(_vm, OID_AUTO, uma, CTLFLAG_RW, 0, "Universal Memory Allocator"); 352 353 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLTYPE_INT, 354 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones"); 355 356 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLTYPE_STRUCT, 357 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats"); 358 359 static int zone_warnings = 1; 360 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0, 361 "Warn when UMA zones becomes full"); 362 363 /* 364 * Select the slab zone for an offpage slab with the given maximum item count. 365 */ 366 static inline uma_zone_t 367 slabzone(int ipers) 368 { 369 370 return (slabzones[ipers > SLABZONE0_SETSIZE]); 371 } 372 373 /* 374 * This routine checks to see whether or not it's safe to enable buckets. 375 */ 376 static void 377 bucket_enable(void) 378 { 379 380 KASSERT(booted >= BOOT_KVA, ("Bucket enable before init")); 381 bucketdisable = vm_page_count_min(); 382 } 383 384 /* 385 * Initialize bucket_zones, the array of zones of buckets of various sizes. 386 * 387 * For each zone, calculate the memory required for each bucket, consisting 388 * of the header and an array of pointers. 389 */ 390 static void 391 bucket_init(void) 392 { 393 struct uma_bucket_zone *ubz; 394 int size; 395 396 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) { 397 size = roundup(sizeof(struct uma_bucket), sizeof(void *)); 398 size += sizeof(void *) * ubz->ubz_entries; 399 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, 400 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 401 UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | 402 UMA_ZONE_FIRSTTOUCH); 403 } 404 } 405 406 /* 407 * Given a desired number of entries for a bucket, return the zone from which 408 * to allocate the bucket. 409 */ 410 static struct uma_bucket_zone * 411 bucket_zone_lookup(int entries) 412 { 413 struct uma_bucket_zone *ubz; 414 415 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 416 if (ubz->ubz_entries >= entries) 417 return (ubz); 418 ubz--; 419 return (ubz); 420 } 421 422 static struct uma_bucket_zone * 423 bucket_zone_max(uma_zone_t zone, int nitems) 424 { 425 struct uma_bucket_zone *ubz; 426 int bpcpu; 427 428 bpcpu = 2; 429 if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) 430 /* Count the cross-domain bucket. */ 431 bpcpu++; 432 433 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 434 if (ubz->ubz_entries * bpcpu * mp_ncpus > nitems) 435 break; 436 if (ubz == &bucket_zones[0]) 437 ubz = NULL; 438 else 439 ubz--; 440 return (ubz); 441 } 442 443 static int 444 bucket_select(int size) 445 { 446 struct uma_bucket_zone *ubz; 447 448 ubz = &bucket_zones[0]; 449 if (size > ubz->ubz_maxsize) 450 return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1); 451 452 for (; ubz->ubz_entries != 0; ubz++) 453 if (ubz->ubz_maxsize < size) 454 break; 455 ubz--; 456 return (ubz->ubz_entries); 457 } 458 459 static uma_bucket_t 460 bucket_alloc(uma_zone_t zone, void *udata, int flags) 461 { 462 struct uma_bucket_zone *ubz; 463 uma_bucket_t bucket; 464 465 /* 466 * Don't allocate buckets early in boot. 467 */ 468 if (__predict_false(booted < BOOT_KVA)) 469 return (NULL); 470 471 /* 472 * To limit bucket recursion we store the original zone flags 473 * in a cookie passed via zalloc_arg/zfree_arg. This allows the 474 * NOVM flag to persist even through deep recursions. We also 475 * store ZFLAG_BUCKET once we have recursed attempting to allocate 476 * a bucket for a bucket zone so we do not allow infinite bucket 477 * recursion. This cookie will even persist to frees of unused 478 * buckets via the allocation path or bucket allocations in the 479 * free path. 480 */ 481 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 482 udata = (void *)(uintptr_t)zone->uz_flags; 483 else { 484 if ((uintptr_t)udata & UMA_ZFLAG_BUCKET) 485 return (NULL); 486 udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET); 487 } 488 if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY) 489 flags |= M_NOVM; 490 ubz = bucket_zone_lookup(zone->uz_bucket_size); 491 if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0) 492 ubz++; 493 bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags); 494 if (bucket) { 495 #ifdef INVARIANTS 496 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); 497 #endif 498 bucket->ub_cnt = 0; 499 bucket->ub_entries = ubz->ubz_entries; 500 bucket->ub_seq = SMR_SEQ_INVALID; 501 CTR3(KTR_UMA, "bucket_alloc: zone %s(%p) allocated bucket %p", 502 zone->uz_name, zone, bucket); 503 } 504 505 return (bucket); 506 } 507 508 static void 509 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata) 510 { 511 struct uma_bucket_zone *ubz; 512 513 KASSERT(bucket->ub_cnt == 0, 514 ("bucket_free: Freeing a non free bucket.")); 515 KASSERT(bucket->ub_seq == SMR_SEQ_INVALID, 516 ("bucket_free: Freeing an SMR bucket.")); 517 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 518 udata = (void *)(uintptr_t)zone->uz_flags; 519 ubz = bucket_zone_lookup(bucket->ub_entries); 520 uma_zfree_arg(ubz->ubz_zone, bucket, udata); 521 } 522 523 static void 524 bucket_zone_drain(void) 525 { 526 struct uma_bucket_zone *ubz; 527 528 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 529 uma_zone_reclaim(ubz->ubz_zone, UMA_RECLAIM_DRAIN); 530 } 531 532 /* 533 * Attempt to satisfy an allocation by retrieving a full bucket from one of the 534 * zone's caches. If a bucket is found the zone is not locked on return. 535 */ 536 static uma_bucket_t 537 zone_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom) 538 { 539 uma_bucket_t bucket; 540 int i; 541 bool dtor = false; 542 543 ZONE_LOCK_ASSERT(zone); 544 545 if ((bucket = TAILQ_FIRST(&zdom->uzd_buckets)) == NULL) 546 return (NULL); 547 548 if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && 549 bucket->ub_seq != SMR_SEQ_INVALID) { 550 if (!smr_poll(zone->uz_smr, bucket->ub_seq, false)) 551 return (NULL); 552 bucket->ub_seq = SMR_SEQ_INVALID; 553 dtor = (zone->uz_dtor != NULL) | UMA_ALWAYS_CTORDTOR; 554 } 555 MPASS(zdom->uzd_nitems >= bucket->ub_cnt); 556 TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link); 557 zdom->uzd_nitems -= bucket->ub_cnt; 558 if (zdom->uzd_imin > zdom->uzd_nitems) 559 zdom->uzd_imin = zdom->uzd_nitems; 560 zone->uz_bkt_count -= bucket->ub_cnt; 561 ZONE_UNLOCK(zone); 562 if (dtor) 563 for (i = 0; i < bucket->ub_cnt; i++) 564 item_dtor(zone, bucket->ub_bucket[i], zone->uz_size, 565 NULL, SKIP_NONE); 566 567 return (bucket); 568 } 569 570 /* 571 * Insert a full bucket into the specified cache. The "ws" parameter indicates 572 * whether the bucket's contents should be counted as part of the zone's working 573 * set. 574 */ 575 static void 576 zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket, 577 const bool ws) 578 { 579 580 ZONE_LOCK_ASSERT(zone); 581 KASSERT(!ws || zone->uz_bkt_count < zone->uz_bkt_max, 582 ("%s: zone %p overflow", __func__, zone)); 583 584 if (ws && bucket->ub_seq == SMR_SEQ_INVALID) 585 TAILQ_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); 586 else 587 TAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link); 588 zdom->uzd_nitems += bucket->ub_cnt; 589 if (ws && zdom->uzd_imax < zdom->uzd_nitems) 590 zdom->uzd_imax = zdom->uzd_nitems; 591 zone->uz_bkt_count += bucket->ub_cnt; 592 } 593 594 /* Pops an item out of a per-cpu cache bucket. */ 595 static inline void * 596 cache_bucket_pop(uma_cache_t cache, uma_cache_bucket_t bucket) 597 { 598 void *item; 599 600 CRITICAL_ASSERT(curthread); 601 602 bucket->ucb_cnt--; 603 item = bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt]; 604 #ifdef INVARIANTS 605 bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] = NULL; 606 KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled.")); 607 #endif 608 cache->uc_allocs++; 609 610 return (item); 611 } 612 613 /* Pushes an item into a per-cpu cache bucket. */ 614 static inline void 615 cache_bucket_push(uma_cache_t cache, uma_cache_bucket_t bucket, void *item) 616 { 617 618 CRITICAL_ASSERT(curthread); 619 KASSERT(bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] == NULL, 620 ("uma_zfree: Freeing to non free bucket index.")); 621 622 bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] = item; 623 bucket->ucb_cnt++; 624 cache->uc_frees++; 625 } 626 627 /* 628 * Unload a UMA bucket from a per-cpu cache. 629 */ 630 static inline uma_bucket_t 631 cache_bucket_unload(uma_cache_bucket_t bucket) 632 { 633 uma_bucket_t b; 634 635 b = bucket->ucb_bucket; 636 if (b != NULL) { 637 MPASS(b->ub_entries == bucket->ucb_entries); 638 b->ub_cnt = bucket->ucb_cnt; 639 bucket->ucb_bucket = NULL; 640 bucket->ucb_entries = bucket->ucb_cnt = 0; 641 } 642 643 return (b); 644 } 645 646 static inline uma_bucket_t 647 cache_bucket_unload_alloc(uma_cache_t cache) 648 { 649 650 return (cache_bucket_unload(&cache->uc_allocbucket)); 651 } 652 653 static inline uma_bucket_t 654 cache_bucket_unload_free(uma_cache_t cache) 655 { 656 657 return (cache_bucket_unload(&cache->uc_freebucket)); 658 } 659 660 static inline uma_bucket_t 661 cache_bucket_unload_cross(uma_cache_t cache) 662 { 663 664 return (cache_bucket_unload(&cache->uc_crossbucket)); 665 } 666 667 /* 668 * Load a bucket into a per-cpu cache bucket. 669 */ 670 static inline void 671 cache_bucket_load(uma_cache_bucket_t bucket, uma_bucket_t b) 672 { 673 674 CRITICAL_ASSERT(curthread); 675 MPASS(bucket->ucb_bucket == NULL); 676 677 bucket->ucb_bucket = b; 678 bucket->ucb_cnt = b->ub_cnt; 679 bucket->ucb_entries = b->ub_entries; 680 } 681 682 static inline void 683 cache_bucket_load_alloc(uma_cache_t cache, uma_bucket_t b) 684 { 685 686 cache_bucket_load(&cache->uc_allocbucket, b); 687 } 688 689 static inline void 690 cache_bucket_load_free(uma_cache_t cache, uma_bucket_t b) 691 { 692 693 cache_bucket_load(&cache->uc_freebucket, b); 694 } 695 696 #ifdef NUMA 697 static inline void 698 cache_bucket_load_cross(uma_cache_t cache, uma_bucket_t b) 699 { 700 701 cache_bucket_load(&cache->uc_crossbucket, b); 702 } 703 #endif 704 705 /* 706 * Copy and preserve ucb_spare. 707 */ 708 static inline void 709 cache_bucket_copy(uma_cache_bucket_t b1, uma_cache_bucket_t b2) 710 { 711 712 b1->ucb_bucket = b2->ucb_bucket; 713 b1->ucb_entries = b2->ucb_entries; 714 b1->ucb_cnt = b2->ucb_cnt; 715 } 716 717 /* 718 * Swap two cache buckets. 719 */ 720 static inline void 721 cache_bucket_swap(uma_cache_bucket_t b1, uma_cache_bucket_t b2) 722 { 723 struct uma_cache_bucket b3; 724 725 CRITICAL_ASSERT(curthread); 726 727 cache_bucket_copy(&b3, b1); 728 cache_bucket_copy(b1, b2); 729 cache_bucket_copy(b2, &b3); 730 } 731 732 static void 733 zone_log_warning(uma_zone_t zone) 734 { 735 static const struct timeval warninterval = { 300, 0 }; 736 737 if (!zone_warnings || zone->uz_warning == NULL) 738 return; 739 740 if (ratecheck(&zone->uz_ratecheck, &warninterval)) 741 printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); 742 } 743 744 static inline void 745 zone_maxaction(uma_zone_t zone) 746 { 747 748 if (zone->uz_maxaction.ta_func != NULL) 749 taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction); 750 } 751 752 /* 753 * Routine called by timeout which is used to fire off some time interval 754 * based calculations. (stats, hash size, etc.) 755 * 756 * Arguments: 757 * arg Unused 758 * 759 * Returns: 760 * Nothing 761 */ 762 static void 763 uma_timeout(void *unused) 764 { 765 bucket_enable(); 766 zone_foreach(zone_timeout, NULL); 767 768 /* Reschedule this event */ 769 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 770 } 771 772 /* 773 * Update the working set size estimate for the zone's bucket cache. 774 * The constants chosen here are somewhat arbitrary. With an update period of 775 * 20s (UMA_TIMEOUT), this estimate is dominated by zone activity over the 776 * last 100s. 777 */ 778 static void 779 zone_domain_update_wss(uma_zone_domain_t zdom) 780 { 781 long wss; 782 783 MPASS(zdom->uzd_imax >= zdom->uzd_imin); 784 wss = zdom->uzd_imax - zdom->uzd_imin; 785 zdom->uzd_imax = zdom->uzd_imin = zdom->uzd_nitems; 786 zdom->uzd_wss = (4 * wss + zdom->uzd_wss) / 5; 787 } 788 789 /* 790 * Routine to perform timeout driven calculations. This expands the 791 * hashes and does per cpu statistics aggregation. 792 * 793 * Returns nothing. 794 */ 795 static void 796 zone_timeout(uma_zone_t zone, void *unused) 797 { 798 uma_keg_t keg; 799 u_int slabs, pages; 800 801 if ((zone->uz_flags & UMA_ZFLAG_HASH) == 0) 802 goto update_wss; 803 804 keg = zone->uz_keg; 805 806 /* 807 * Hash zones are non-numa by definition so the first domain 808 * is the only one present. 809 */ 810 KEG_LOCK(keg, 0); 811 pages = keg->uk_domain[0].ud_pages; 812 813 /* 814 * Expand the keg hash table. 815 * 816 * This is done if the number of slabs is larger than the hash size. 817 * What I'm trying to do here is completely reduce collisions. This 818 * may be a little aggressive. Should I allow for two collisions max? 819 */ 820 if ((slabs = pages / keg->uk_ppera) > keg->uk_hash.uh_hashsize) { 821 struct uma_hash newhash; 822 struct uma_hash oldhash; 823 int ret; 824 825 /* 826 * This is so involved because allocating and freeing 827 * while the keg lock is held will lead to deadlock. 828 * I have to do everything in stages and check for 829 * races. 830 */ 831 KEG_UNLOCK(keg, 0); 832 ret = hash_alloc(&newhash, 1 << fls(slabs)); 833 KEG_LOCK(keg, 0); 834 if (ret) { 835 if (hash_expand(&keg->uk_hash, &newhash)) { 836 oldhash = keg->uk_hash; 837 keg->uk_hash = newhash; 838 } else 839 oldhash = newhash; 840 841 KEG_UNLOCK(keg, 0); 842 hash_free(&oldhash); 843 goto update_wss; 844 } 845 } 846 KEG_UNLOCK(keg, 0); 847 848 update_wss: 849 ZONE_LOCK(zone); 850 for (int i = 0; i < vm_ndomains; i++) 851 zone_domain_update_wss(&zone->uz_domain[i]); 852 ZONE_UNLOCK(zone); 853 } 854 855 /* 856 * Allocate and zero fill the next sized hash table from the appropriate 857 * backing store. 858 * 859 * Arguments: 860 * hash A new hash structure with the old hash size in uh_hashsize 861 * 862 * Returns: 863 * 1 on success and 0 on failure. 864 */ 865 static int 866 hash_alloc(struct uma_hash *hash, u_int size) 867 { 868 size_t alloc; 869 870 KASSERT(powerof2(size), ("hash size must be power of 2")); 871 if (size > UMA_HASH_SIZE_INIT) { 872 hash->uh_hashsize = size; 873 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; 874 hash->uh_slab_hash = malloc(alloc, M_UMAHASH, M_NOWAIT); 875 } else { 876 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; 877 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL, 878 UMA_ANYDOMAIN, M_WAITOK); 879 hash->uh_hashsize = UMA_HASH_SIZE_INIT; 880 } 881 if (hash->uh_slab_hash) { 882 bzero(hash->uh_slab_hash, alloc); 883 hash->uh_hashmask = hash->uh_hashsize - 1; 884 return (1); 885 } 886 887 return (0); 888 } 889 890 /* 891 * Expands the hash table for HASH zones. This is done from zone_timeout 892 * to reduce collisions. This must not be done in the regular allocation 893 * path, otherwise, we can recurse on the vm while allocating pages. 894 * 895 * Arguments: 896 * oldhash The hash you want to expand 897 * newhash The hash structure for the new table 898 * 899 * Returns: 900 * Nothing 901 * 902 * Discussion: 903 */ 904 static int 905 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) 906 { 907 uma_hash_slab_t slab; 908 u_int hval; 909 u_int idx; 910 911 if (!newhash->uh_slab_hash) 912 return (0); 913 914 if (oldhash->uh_hashsize >= newhash->uh_hashsize) 915 return (0); 916 917 /* 918 * I need to investigate hash algorithms for resizing without a 919 * full rehash. 920 */ 921 922 for (idx = 0; idx < oldhash->uh_hashsize; idx++) 923 while (!LIST_EMPTY(&oldhash->uh_slab_hash[idx])) { 924 slab = LIST_FIRST(&oldhash->uh_slab_hash[idx]); 925 LIST_REMOVE(slab, uhs_hlink); 926 hval = UMA_HASH(newhash, slab->uhs_data); 927 LIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], 928 slab, uhs_hlink); 929 } 930 931 return (1); 932 } 933 934 /* 935 * Free the hash bucket to the appropriate backing store. 936 * 937 * Arguments: 938 * slab_hash The hash bucket we're freeing 939 * hashsize The number of entries in that hash bucket 940 * 941 * Returns: 942 * Nothing 943 */ 944 static void 945 hash_free(struct uma_hash *hash) 946 { 947 if (hash->uh_slab_hash == NULL) 948 return; 949 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) 950 zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE); 951 else 952 free(hash->uh_slab_hash, M_UMAHASH); 953 } 954 955 /* 956 * Frees all outstanding items in a bucket 957 * 958 * Arguments: 959 * zone The zone to free to, must be unlocked. 960 * bucket The free/alloc bucket with items. 961 * 962 * Returns: 963 * Nothing 964 */ 965 966 static void 967 bucket_drain(uma_zone_t zone, uma_bucket_t bucket) 968 { 969 int i; 970 971 if (bucket == NULL || bucket->ub_cnt == 0) 972 return; 973 974 if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && 975 bucket->ub_seq != SMR_SEQ_INVALID) { 976 smr_wait(zone->uz_smr, bucket->ub_seq); 977 for (i = 0; i < bucket->ub_cnt; i++) 978 item_dtor(zone, bucket->ub_bucket[i], 979 zone->uz_size, NULL, SKIP_NONE); 980 bucket->ub_seq = SMR_SEQ_INVALID; 981 } 982 if (zone->uz_fini) 983 for (i = 0; i < bucket->ub_cnt; i++) 984 zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); 985 zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); 986 if (zone->uz_max_items > 0) 987 zone_free_limit(zone, bucket->ub_cnt); 988 #ifdef INVARIANTS 989 bzero(bucket->ub_bucket, sizeof(void *) * bucket->ub_cnt); 990 #endif 991 bucket->ub_cnt = 0; 992 } 993 994 /* 995 * Drains the per cpu caches for a zone. 996 * 997 * NOTE: This may only be called while the zone is being torn down, and not 998 * during normal operation. This is necessary in order that we do not have 999 * to migrate CPUs to drain the per-CPU caches. 1000 * 1001 * Arguments: 1002 * zone The zone to drain, must be unlocked. 1003 * 1004 * Returns: 1005 * Nothing 1006 */ 1007 static void 1008 cache_drain(uma_zone_t zone) 1009 { 1010 uma_cache_t cache; 1011 uma_bucket_t bucket; 1012 int cpu; 1013 1014 /* 1015 * XXX: It is safe to not lock the per-CPU caches, because we're 1016 * tearing down the zone anyway. I.e., there will be no further use 1017 * of the caches at this point. 1018 * 1019 * XXX: It would good to be able to assert that the zone is being 1020 * torn down to prevent improper use of cache_drain(). 1021 */ 1022 CPU_FOREACH(cpu) { 1023 cache = &zone->uz_cpu[cpu]; 1024 bucket = cache_bucket_unload_alloc(cache); 1025 if (bucket != NULL) { 1026 bucket_drain(zone, bucket); 1027 bucket_free(zone, bucket, NULL); 1028 } 1029 bucket = cache_bucket_unload_free(cache); 1030 if (bucket != NULL) { 1031 bucket_drain(zone, bucket); 1032 bucket_free(zone, bucket, NULL); 1033 } 1034 bucket = cache_bucket_unload_cross(cache); 1035 if (bucket != NULL) { 1036 bucket_drain(zone, bucket); 1037 bucket_free(zone, bucket, NULL); 1038 } 1039 } 1040 bucket_cache_reclaim(zone, true); 1041 } 1042 1043 static void 1044 cache_shrink(uma_zone_t zone, void *unused) 1045 { 1046 1047 if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 1048 return; 1049 1050 ZONE_LOCK(zone); 1051 zone->uz_bucket_size = 1052 (zone->uz_bucket_size_min + zone->uz_bucket_size) / 2; 1053 ZONE_UNLOCK(zone); 1054 } 1055 1056 static void 1057 cache_drain_safe_cpu(uma_zone_t zone, void *unused) 1058 { 1059 uma_cache_t cache; 1060 uma_bucket_t b1, b2, b3; 1061 int domain; 1062 1063 if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 1064 return; 1065 1066 b1 = b2 = b3 = NULL; 1067 ZONE_LOCK(zone); 1068 critical_enter(); 1069 if (zone->uz_flags & UMA_ZONE_FIRSTTOUCH) 1070 domain = PCPU_GET(domain); 1071 else 1072 domain = 0; 1073 cache = &zone->uz_cpu[curcpu]; 1074 b1 = cache_bucket_unload_alloc(cache); 1075 if (b1 != NULL && b1->ub_cnt != 0) { 1076 zone_put_bucket(zone, &zone->uz_domain[domain], b1, false); 1077 b1 = NULL; 1078 } 1079 1080 /* 1081 * Don't flush SMR zone buckets. This leaves the zone without a 1082 * bucket and forces every free to synchronize(). 1083 */ 1084 if ((zone->uz_flags & UMA_ZONE_SMR) != 0) 1085 goto out; 1086 b2 = cache_bucket_unload_free(cache); 1087 if (b2 != NULL && b2->ub_cnt != 0) { 1088 zone_put_bucket(zone, &zone->uz_domain[domain], b2, false); 1089 b2 = NULL; 1090 } 1091 b3 = cache_bucket_unload_cross(cache); 1092 1093 out: 1094 critical_exit(); 1095 ZONE_UNLOCK(zone); 1096 if (b1) 1097 bucket_free(zone, b1, NULL); 1098 if (b2) 1099 bucket_free(zone, b2, NULL); 1100 if (b3) { 1101 bucket_drain(zone, b3); 1102 bucket_free(zone, b3, NULL); 1103 } 1104 } 1105 1106 /* 1107 * Safely drain per-CPU caches of a zone(s) to alloc bucket. 1108 * This is an expensive call because it needs to bind to all CPUs 1109 * one by one and enter a critical section on each of them in order 1110 * to safely access their cache buckets. 1111 * Zone lock must not be held on call this function. 1112 */ 1113 static void 1114 pcpu_cache_drain_safe(uma_zone_t zone) 1115 { 1116 int cpu; 1117 1118 /* 1119 * Polite bucket sizes shrinking was not enough, shrink aggressively. 1120 */ 1121 if (zone) 1122 cache_shrink(zone, NULL); 1123 else 1124 zone_foreach(cache_shrink, NULL); 1125 1126 CPU_FOREACH(cpu) { 1127 thread_lock(curthread); 1128 sched_bind(curthread, cpu); 1129 thread_unlock(curthread); 1130 1131 if (zone) 1132 cache_drain_safe_cpu(zone, NULL); 1133 else 1134 zone_foreach(cache_drain_safe_cpu, NULL); 1135 } 1136 thread_lock(curthread); 1137 sched_unbind(curthread); 1138 thread_unlock(curthread); 1139 } 1140 1141 /* 1142 * Reclaim cached buckets from a zone. All buckets are reclaimed if the caller 1143 * requested a drain, otherwise the per-domain caches are trimmed to either 1144 * estimated working set size. 1145 */ 1146 static void 1147 bucket_cache_reclaim(uma_zone_t zone, bool drain) 1148 { 1149 uma_zone_domain_t zdom; 1150 uma_bucket_t bucket; 1151 long target, tofree; 1152 int i; 1153 1154 for (i = 0; i < vm_ndomains; i++) { 1155 /* 1156 * The cross bucket is partially filled and not part of 1157 * the item count. Reclaim it individually here. 1158 */ 1159 zdom = &zone->uz_domain[i]; 1160 ZONE_CROSS_LOCK(zone); 1161 bucket = zdom->uzd_cross; 1162 zdom->uzd_cross = NULL; 1163 ZONE_CROSS_UNLOCK(zone); 1164 if (bucket != NULL) { 1165 bucket_drain(zone, bucket); 1166 bucket_free(zone, bucket, NULL); 1167 } 1168 1169 /* 1170 * Shrink the zone bucket size to ensure that the per-CPU caches 1171 * don't grow too large. 1172 */ 1173 ZONE_LOCK(zone); 1174 if (i == 0 && zone->uz_bucket_size > zone->uz_bucket_size_min) 1175 zone->uz_bucket_size--; 1176 1177 /* 1178 * If we were asked to drain the zone, we are done only once 1179 * this bucket cache is empty. Otherwise, we reclaim items in 1180 * excess of the zone's estimated working set size. If the 1181 * difference nitems - imin is larger than the WSS estimate, 1182 * then the estimate will grow at the end of this interval and 1183 * we ignore the historical average. 1184 */ 1185 target = drain ? 0 : lmax(zdom->uzd_wss, zdom->uzd_nitems - 1186 zdom->uzd_imin); 1187 while (zdom->uzd_nitems > target) { 1188 bucket = TAILQ_FIRST(&zdom->uzd_buckets); 1189 if (bucket == NULL) 1190 break; 1191 tofree = bucket->ub_cnt; 1192 TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link); 1193 zdom->uzd_nitems -= tofree; 1194 1195 /* 1196 * Shift the bounds of the current WSS interval to avoid 1197 * perturbing the estimate. 1198 */ 1199 zdom->uzd_imax -= lmin(zdom->uzd_imax, tofree); 1200 zdom->uzd_imin -= lmin(zdom->uzd_imin, tofree); 1201 1202 ZONE_UNLOCK(zone); 1203 bucket_drain(zone, bucket); 1204 bucket_free(zone, bucket, NULL); 1205 ZONE_LOCK(zone); 1206 } 1207 ZONE_UNLOCK(zone); 1208 } 1209 } 1210 1211 static void 1212 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) 1213 { 1214 uint8_t *mem; 1215 int i; 1216 uint8_t flags; 1217 1218 CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes", 1219 keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera); 1220 1221 mem = slab_data(slab, keg); 1222 flags = slab->us_flags; 1223 i = start; 1224 if (keg->uk_fini != NULL) { 1225 for (i--; i > -1; i--) 1226 #ifdef INVARIANTS 1227 /* 1228 * trash_fini implies that dtor was trash_dtor. trash_fini 1229 * would check that memory hasn't been modified since free, 1230 * which executed trash_dtor. 1231 * That's why we need to run uma_dbg_kskip() check here, 1232 * albeit we don't make skip check for other init/fini 1233 * invocations. 1234 */ 1235 if (!uma_dbg_kskip(keg, slab_item(slab, keg, i)) || 1236 keg->uk_fini != trash_fini) 1237 #endif 1238 keg->uk_fini(slab_item(slab, keg, i), keg->uk_size); 1239 } 1240 if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) 1241 zone_free_item(slabzone(keg->uk_ipers), slab_tohashslab(slab), 1242 NULL, SKIP_NONE); 1243 keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags); 1244 uma_total_dec(PAGE_SIZE * keg->uk_ppera); 1245 } 1246 1247 /* 1248 * Frees pages from a keg back to the system. This is done on demand from 1249 * the pageout daemon. 1250 * 1251 * Returns nothing. 1252 */ 1253 static void 1254 keg_drain(uma_keg_t keg) 1255 { 1256 struct slabhead freeslabs = { 0 }; 1257 uma_domain_t dom; 1258 uma_slab_t slab, tmp; 1259 int i, n; 1260 1261 /* 1262 * We don't want to take pages from statically allocated kegs at this 1263 * time 1264 */ 1265 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) 1266 return; 1267 1268 for (i = 0; i < vm_ndomains; i++) { 1269 CTR4(KTR_UMA, "keg_drain %s(%p) domain %d free items: %u", 1270 keg->uk_name, keg, i, dom->ud_free); 1271 n = 0; 1272 dom = &keg->uk_domain[i]; 1273 KEG_LOCK(keg, i); 1274 LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) { 1275 if (keg->uk_flags & UMA_ZFLAG_HASH) 1276 UMA_HASH_REMOVE(&keg->uk_hash, slab); 1277 n++; 1278 LIST_REMOVE(slab, us_link); 1279 LIST_INSERT_HEAD(&freeslabs, slab, us_link); 1280 } 1281 dom->ud_pages -= n * keg->uk_ppera; 1282 dom->ud_free -= n * keg->uk_ipers; 1283 KEG_UNLOCK(keg, i); 1284 } 1285 1286 while ((slab = LIST_FIRST(&freeslabs)) != NULL) { 1287 LIST_REMOVE(slab, us_link); 1288 keg_free_slab(keg, slab, keg->uk_ipers); 1289 } 1290 } 1291 1292 static void 1293 zone_reclaim(uma_zone_t zone, int waitok, bool drain) 1294 { 1295 1296 /* 1297 * Set draining to interlock with zone_dtor() so we can release our 1298 * locks as we go. Only dtor() should do a WAITOK call since it 1299 * is the only call that knows the structure will still be available 1300 * when it wakes up. 1301 */ 1302 ZONE_LOCK(zone); 1303 while (zone->uz_flags & UMA_ZFLAG_RECLAIMING) { 1304 if (waitok == M_NOWAIT) 1305 goto out; 1306 msleep(zone, &zone->uz_lock, PVM, "zonedrain", 1); 1307 } 1308 zone->uz_flags |= UMA_ZFLAG_RECLAIMING; 1309 ZONE_UNLOCK(zone); 1310 bucket_cache_reclaim(zone, drain); 1311 1312 /* 1313 * The DRAINING flag protects us from being freed while 1314 * we're running. Normally the uma_rwlock would protect us but we 1315 * must be able to release and acquire the right lock for each keg. 1316 */ 1317 if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0) 1318 keg_drain(zone->uz_keg); 1319 ZONE_LOCK(zone); 1320 zone->uz_flags &= ~UMA_ZFLAG_RECLAIMING; 1321 wakeup(zone); 1322 out: 1323 ZONE_UNLOCK(zone); 1324 } 1325 1326 static void 1327 zone_drain(uma_zone_t zone, void *unused) 1328 { 1329 1330 zone_reclaim(zone, M_NOWAIT, true); 1331 } 1332 1333 static void 1334 zone_trim(uma_zone_t zone, void *unused) 1335 { 1336 1337 zone_reclaim(zone, M_NOWAIT, false); 1338 } 1339 1340 /* 1341 * Allocate a new slab for a keg and inserts it into the partial slab list. 1342 * The keg should be unlocked on entry. If the allocation succeeds it will 1343 * be locked on return. 1344 * 1345 * Arguments: 1346 * flags Wait flags for the item initialization routine 1347 * aflags Wait flags for the slab allocation 1348 * 1349 * Returns: 1350 * The slab that was allocated or NULL if there is no memory and the 1351 * caller specified M_NOWAIT. 1352 */ 1353 static uma_slab_t 1354 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags, 1355 int aflags) 1356 { 1357 uma_domain_t dom; 1358 uma_alloc allocf; 1359 uma_slab_t slab; 1360 unsigned long size; 1361 uint8_t *mem; 1362 uint8_t sflags; 1363 int i; 1364 1365 KASSERT(domain >= 0 && domain < vm_ndomains, 1366 ("keg_alloc_slab: domain %d out of range", domain)); 1367 1368 allocf = keg->uk_allocf; 1369 slab = NULL; 1370 mem = NULL; 1371 if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) { 1372 uma_hash_slab_t hslab; 1373 hslab = zone_alloc_item(slabzone(keg->uk_ipers), NULL, 1374 domain, aflags); 1375 if (hslab == NULL) 1376 goto fail; 1377 slab = &hslab->uhs_slab; 1378 } 1379 1380 /* 1381 * This reproduces the old vm_zone behavior of zero filling pages the 1382 * first time they are added to a zone. 1383 * 1384 * Malloced items are zeroed in uma_zalloc. 1385 */ 1386 1387 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 1388 aflags |= M_ZERO; 1389 else 1390 aflags &= ~M_ZERO; 1391 1392 if (keg->uk_flags & UMA_ZONE_NODUMP) 1393 aflags |= M_NODUMP; 1394 1395 /* zone is passed for legacy reasons. */ 1396 size = keg->uk_ppera * PAGE_SIZE; 1397 mem = allocf(zone, size, domain, &sflags, aflags); 1398 if (mem == NULL) { 1399 if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) 1400 zone_free_item(slabzone(keg->uk_ipers), 1401 slab_tohashslab(slab), NULL, SKIP_NONE); 1402 goto fail; 1403 } 1404 uma_total_inc(size); 1405 1406 /* For HASH zones all pages go to the same uma_domain. */ 1407 if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0) 1408 domain = 0; 1409 1410 /* Point the slab into the allocated memory */ 1411 if (!(keg->uk_flags & UMA_ZFLAG_OFFPAGE)) 1412 slab = (uma_slab_t )(mem + keg->uk_pgoff); 1413 else 1414 slab_tohashslab(slab)->uhs_data = mem; 1415 1416 if (keg->uk_flags & UMA_ZFLAG_VTOSLAB) 1417 for (i = 0; i < keg->uk_ppera; i++) 1418 vsetzoneslab((vm_offset_t)mem + (i * PAGE_SIZE), 1419 zone, slab); 1420 1421 slab->us_freecount = keg->uk_ipers; 1422 slab->us_flags = sflags; 1423 slab->us_domain = domain; 1424 1425 BIT_FILL(keg->uk_ipers, &slab->us_free); 1426 #ifdef INVARIANTS 1427 BIT_ZERO(keg->uk_ipers, slab_dbg_bits(slab, keg)); 1428 #endif 1429 1430 if (keg->uk_init != NULL) { 1431 for (i = 0; i < keg->uk_ipers; i++) 1432 if (keg->uk_init(slab_item(slab, keg, i), 1433 keg->uk_size, flags) != 0) 1434 break; 1435 if (i != keg->uk_ipers) { 1436 keg_free_slab(keg, slab, i); 1437 goto fail; 1438 } 1439 } 1440 KEG_LOCK(keg, domain); 1441 1442 CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)", 1443 slab, keg->uk_name, keg); 1444 1445 if (keg->uk_flags & UMA_ZFLAG_HASH) 1446 UMA_HASH_INSERT(&keg->uk_hash, slab, mem); 1447 1448 /* 1449 * If we got a slab here it's safe to mark it partially used 1450 * and return. We assume that the caller is going to remove 1451 * at least one item. 1452 */ 1453 dom = &keg->uk_domain[domain]; 1454 LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); 1455 dom->ud_pages += keg->uk_ppera; 1456 dom->ud_free += keg->uk_ipers; 1457 1458 return (slab); 1459 1460 fail: 1461 return (NULL); 1462 } 1463 1464 /* 1465 * This function is intended to be used early on in place of page_alloc() so 1466 * that we may use the boot time page cache to satisfy allocations before 1467 * the VM is ready. 1468 */ 1469 static void * 1470 startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 1471 int wait) 1472 { 1473 vm_paddr_t pa; 1474 vm_page_t m; 1475 void *mem; 1476 int pages; 1477 int i; 1478 1479 pages = howmany(bytes, PAGE_SIZE); 1480 KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__)); 1481 1482 *pflag = UMA_SLAB_BOOT; 1483 m = vm_page_alloc_contig_domain(NULL, 0, domain, 1484 malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED, pages, 1485 (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT); 1486 if (m == NULL) 1487 return (NULL); 1488 1489 pa = VM_PAGE_TO_PHYS(m); 1490 for (i = 0; i < pages; i++, pa += PAGE_SIZE) { 1491 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \ 1492 defined(__riscv) || defined(__powerpc64__) 1493 if ((wait & M_NODUMP) == 0) 1494 dump_add_page(pa); 1495 #endif 1496 } 1497 /* Allocate KVA and indirectly advance bootmem. */ 1498 mem = (void *)pmap_map(&bootmem, m->phys_addr, 1499 m->phys_addr + (pages * PAGE_SIZE), VM_PROT_READ | VM_PROT_WRITE); 1500 if ((wait & M_ZERO) != 0) 1501 bzero(mem, pages * PAGE_SIZE); 1502 1503 return (mem); 1504 } 1505 1506 static void 1507 startup_free(void *mem, vm_size_t bytes) 1508 { 1509 vm_offset_t va; 1510 vm_page_t m; 1511 1512 va = (vm_offset_t)mem; 1513 m = PHYS_TO_VM_PAGE(pmap_kextract(va)); 1514 pmap_remove(kernel_pmap, va, va + bytes); 1515 for (; bytes != 0; bytes -= PAGE_SIZE, m++) { 1516 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \ 1517 defined(__riscv) || defined(__powerpc64__) 1518 dump_drop_page(VM_PAGE_TO_PHYS(m)); 1519 #endif 1520 vm_page_unwire_noq(m); 1521 vm_page_free(m); 1522 } 1523 } 1524 1525 /* 1526 * Allocates a number of pages from the system 1527 * 1528 * Arguments: 1529 * bytes The number of bytes requested 1530 * wait Shall we wait? 1531 * 1532 * Returns: 1533 * A pointer to the alloced memory or possibly 1534 * NULL if M_NOWAIT is set. 1535 */ 1536 static void * 1537 page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 1538 int wait) 1539 { 1540 void *p; /* Returned page */ 1541 1542 *pflag = UMA_SLAB_KERNEL; 1543 p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait); 1544 1545 return (p); 1546 } 1547 1548 static void * 1549 pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 1550 int wait) 1551 { 1552 struct pglist alloctail; 1553 vm_offset_t addr, zkva; 1554 int cpu, flags; 1555 vm_page_t p, p_next; 1556 #ifdef NUMA 1557 struct pcpu *pc; 1558 #endif 1559 1560 MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE); 1561 1562 TAILQ_INIT(&alloctail); 1563 flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | 1564 malloc2vm_flags(wait); 1565 *pflag = UMA_SLAB_KERNEL; 1566 for (cpu = 0; cpu <= mp_maxid; cpu++) { 1567 if (CPU_ABSENT(cpu)) { 1568 p = vm_page_alloc(NULL, 0, flags); 1569 } else { 1570 #ifndef NUMA 1571 p = vm_page_alloc(NULL, 0, flags); 1572 #else 1573 pc = pcpu_find(cpu); 1574 if (__predict_false(VM_DOMAIN_EMPTY(pc->pc_domain))) 1575 p = NULL; 1576 else 1577 p = vm_page_alloc_domain(NULL, 0, 1578 pc->pc_domain, flags); 1579 if (__predict_false(p == NULL)) 1580 p = vm_page_alloc(NULL, 0, flags); 1581 #endif 1582 } 1583 if (__predict_false(p == NULL)) 1584 goto fail; 1585 TAILQ_INSERT_TAIL(&alloctail, p, listq); 1586 } 1587 if ((addr = kva_alloc(bytes)) == 0) 1588 goto fail; 1589 zkva = addr; 1590 TAILQ_FOREACH(p, &alloctail, listq) { 1591 pmap_qenter(zkva, &p, 1); 1592 zkva += PAGE_SIZE; 1593 } 1594 return ((void*)addr); 1595 fail: 1596 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { 1597 vm_page_unwire_noq(p); 1598 vm_page_free(p); 1599 } 1600 return (NULL); 1601 } 1602 1603 /* 1604 * Allocates a number of pages from within an object 1605 * 1606 * Arguments: 1607 * bytes The number of bytes requested 1608 * wait Shall we wait? 1609 * 1610 * Returns: 1611 * A pointer to the alloced memory or possibly 1612 * NULL if M_NOWAIT is set. 1613 */ 1614 static void * 1615 noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags, 1616 int wait) 1617 { 1618 TAILQ_HEAD(, vm_page) alloctail; 1619 u_long npages; 1620 vm_offset_t retkva, zkva; 1621 vm_page_t p, p_next; 1622 uma_keg_t keg; 1623 1624 TAILQ_INIT(&alloctail); 1625 keg = zone->uz_keg; 1626 1627 npages = howmany(bytes, PAGE_SIZE); 1628 while (npages > 0) { 1629 p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT | 1630 VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | 1631 ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK : 1632 VM_ALLOC_NOWAIT)); 1633 if (p != NULL) { 1634 /* 1635 * Since the page does not belong to an object, its 1636 * listq is unused. 1637 */ 1638 TAILQ_INSERT_TAIL(&alloctail, p, listq); 1639 npages--; 1640 continue; 1641 } 1642 /* 1643 * Page allocation failed, free intermediate pages and 1644 * exit. 1645 */ 1646 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { 1647 vm_page_unwire_noq(p); 1648 vm_page_free(p); 1649 } 1650 return (NULL); 1651 } 1652 *flags = UMA_SLAB_PRIV; 1653 zkva = keg->uk_kva + 1654 atomic_fetchadd_long(&keg->uk_offset, round_page(bytes)); 1655 retkva = zkva; 1656 TAILQ_FOREACH(p, &alloctail, listq) { 1657 pmap_qenter(zkva, &p, 1); 1658 zkva += PAGE_SIZE; 1659 } 1660 1661 return ((void *)retkva); 1662 } 1663 1664 /* 1665 * Frees a number of pages to the system 1666 * 1667 * Arguments: 1668 * mem A pointer to the memory to be freed 1669 * size The size of the memory being freed 1670 * flags The original p->us_flags field 1671 * 1672 * Returns: 1673 * Nothing 1674 */ 1675 static void 1676 page_free(void *mem, vm_size_t size, uint8_t flags) 1677 { 1678 1679 if ((flags & UMA_SLAB_BOOT) != 0) { 1680 startup_free(mem, size); 1681 return; 1682 } 1683 1684 if ((flags & UMA_SLAB_KERNEL) == 0) 1685 panic("UMA: page_free used with invalid flags %x", flags); 1686 1687 kmem_free((vm_offset_t)mem, size); 1688 } 1689 1690 /* 1691 * Frees pcpu zone allocations 1692 * 1693 * Arguments: 1694 * mem A pointer to the memory to be freed 1695 * size The size of the memory being freed 1696 * flags The original p->us_flags field 1697 * 1698 * Returns: 1699 * Nothing 1700 */ 1701 static void 1702 pcpu_page_free(void *mem, vm_size_t size, uint8_t flags) 1703 { 1704 vm_offset_t sva, curva; 1705 vm_paddr_t paddr; 1706 vm_page_t m; 1707 1708 MPASS(size == (mp_maxid+1)*PAGE_SIZE); 1709 sva = (vm_offset_t)mem; 1710 for (curva = sva; curva < sva + size; curva += PAGE_SIZE) { 1711 paddr = pmap_kextract(curva); 1712 m = PHYS_TO_VM_PAGE(paddr); 1713 vm_page_unwire_noq(m); 1714 vm_page_free(m); 1715 } 1716 pmap_qremove(sva, size >> PAGE_SHIFT); 1717 kva_free(sva, size); 1718 } 1719 1720 1721 /* 1722 * Zero fill initializer 1723 * 1724 * Arguments/Returns follow uma_init specifications 1725 */ 1726 static int 1727 zero_init(void *mem, int size, int flags) 1728 { 1729 bzero(mem, size); 1730 return (0); 1731 } 1732 1733 #ifdef INVARIANTS 1734 struct noslabbits * 1735 slab_dbg_bits(uma_slab_t slab, uma_keg_t keg) 1736 { 1737 1738 return ((void *)((char *)&slab->us_free + BITSET_SIZE(keg->uk_ipers))); 1739 } 1740 #endif 1741 1742 /* 1743 * Actual size of embedded struct slab (!OFFPAGE). 1744 */ 1745 size_t 1746 slab_sizeof(int nitems) 1747 { 1748 size_t s; 1749 1750 s = sizeof(struct uma_slab) + BITSET_SIZE(nitems) * SLAB_BITSETS; 1751 return (roundup(s, UMA_ALIGN_PTR + 1)); 1752 } 1753 1754 /* 1755 * Size of memory for embedded slabs (!OFFPAGE). 1756 */ 1757 size_t 1758 slab_space(int nitems) 1759 { 1760 return (UMA_SLAB_SIZE - slab_sizeof(nitems)); 1761 } 1762 1763 #define UMA_FIXPT_SHIFT 31 1764 #define UMA_FRAC_FIXPT(n, d) \ 1765 ((uint32_t)(((uint64_t)(n) << UMA_FIXPT_SHIFT) / (d))) 1766 #define UMA_FIXPT_PCT(f) \ 1767 ((u_int)(((uint64_t)100 * (f)) >> UMA_FIXPT_SHIFT)) 1768 #define UMA_PCT_FIXPT(pct) UMA_FRAC_FIXPT((pct), 100) 1769 #define UMA_MIN_EFF UMA_PCT_FIXPT(100 - UMA_MAX_WASTE) 1770 1771 /* 1772 * Compute the number of items that will fit in a slab. If hdr is true, the 1773 * item count may be limited to provide space in the slab for an inline slab 1774 * header. Otherwise, all slab space will be provided for item storage. 1775 */ 1776 static u_int 1777 slab_ipers_hdr(u_int size, u_int rsize, u_int slabsize, bool hdr) 1778 { 1779 u_int ipers; 1780 u_int padpi; 1781 1782 /* The padding between items is not needed after the last item. */ 1783 padpi = rsize - size; 1784 1785 if (hdr) { 1786 /* 1787 * Start with the maximum item count and remove items until 1788 * the slab header first alongside the allocatable memory. 1789 */ 1790 for (ipers = MIN(SLAB_MAX_SETSIZE, 1791 (slabsize + padpi - slab_sizeof(1)) / rsize); 1792 ipers > 0 && 1793 ipers * rsize - padpi + slab_sizeof(ipers) > slabsize; 1794 ipers--) 1795 continue; 1796 } else { 1797 ipers = MIN((slabsize + padpi) / rsize, SLAB_MAX_SETSIZE); 1798 } 1799 1800 return (ipers); 1801 } 1802 1803 /* 1804 * Compute the number of items that will fit in a slab for a startup zone. 1805 */ 1806 int 1807 slab_ipers(size_t size, int align) 1808 { 1809 int rsize; 1810 1811 rsize = roundup(size, align + 1); /* Assume no CACHESPREAD */ 1812 return (slab_ipers_hdr(size, rsize, UMA_SLAB_SIZE, true)); 1813 } 1814 1815 /* 1816 * Determine the format of a uma keg. This determines where the slab header 1817 * will be placed (inline or offpage) and calculates ipers, rsize, and ppera. 1818 * 1819 * Arguments 1820 * keg The zone we should initialize 1821 * 1822 * Returns 1823 * Nothing 1824 */ 1825 static void 1826 keg_layout(uma_keg_t keg) 1827 { 1828 u_int alignsize; 1829 u_int eff; 1830 u_int eff_offpage; 1831 u_int format; 1832 u_int ipers; 1833 u_int ipers_offpage; 1834 u_int pages; 1835 u_int rsize; 1836 u_int slabsize; 1837 1838 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 || 1839 (keg->uk_size <= UMA_PCPU_ALLOC_SIZE && 1840 (keg->uk_flags & UMA_ZONE_CACHESPREAD) == 0), 1841 ("%s: cannot configure for PCPU: keg=%s, size=%u, flags=0x%b", 1842 __func__, keg->uk_name, keg->uk_size, keg->uk_flags, 1843 PRINT_UMA_ZFLAGS)); 1844 KASSERT((keg->uk_flags & 1845 (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY)) == 0 || 1846 (keg->uk_flags & (UMA_ZONE_NOTOUCH | UMA_ZONE_PCPU)) == 0, 1847 ("%s: incompatible flags 0x%b", __func__, keg->uk_flags, 1848 PRINT_UMA_ZFLAGS)); 1849 1850 alignsize = keg->uk_align + 1; 1851 format = 0; 1852 ipers = 0; 1853 1854 /* 1855 * Calculate the size of each allocation (rsize) according to 1856 * alignment. If the requested size is smaller than we have 1857 * allocation bits for we round it up. 1858 */ 1859 rsize = MAX(keg->uk_size, UMA_SMALLEST_UNIT); 1860 rsize = roundup2(rsize, alignsize); 1861 1862 if ((keg->uk_flags & UMA_ZONE_PCPU) != 0) { 1863 slabsize = UMA_PCPU_ALLOC_SIZE; 1864 pages = mp_maxid + 1; 1865 } else if ((keg->uk_flags & UMA_ZONE_CACHESPREAD) != 0) { 1866 /* 1867 * We want one item to start on every align boundary in a page. 1868 * To do this we will span pages. We will also extend the item 1869 * by the size of align if it is an even multiple of align. 1870 * Otherwise, it would fall on the same boundary every time. 1871 */ 1872 if ((rsize & alignsize) == 0) 1873 rsize += alignsize; 1874 slabsize = rsize * (PAGE_SIZE / alignsize); 1875 slabsize = MIN(slabsize, rsize * SLAB_MAX_SETSIZE); 1876 slabsize = MIN(slabsize, UMA_CACHESPREAD_MAX_SIZE); 1877 pages = howmany(slabsize, PAGE_SIZE); 1878 slabsize = ptoa(pages); 1879 } else { 1880 /* 1881 * Choose a slab size of as many pages as it takes to represent 1882 * a single item. We will then try to fit as many additional 1883 * items into the slab as possible. At some point, we may want 1884 * to increase the slab size for awkward item sizes in order to 1885 * increase efficiency. 1886 */ 1887 pages = howmany(keg->uk_size, PAGE_SIZE); 1888 slabsize = ptoa(pages); 1889 } 1890 1891 /* Evaluate an inline slab layout. */ 1892 if ((keg->uk_flags & (UMA_ZONE_NOTOUCH | UMA_ZONE_PCPU)) == 0) 1893 ipers = slab_ipers_hdr(keg->uk_size, rsize, slabsize, true); 1894 1895 /* TODO: vm_page-embedded slab. */ 1896 1897 /* 1898 * We can't do OFFPAGE if we're internal or if we've been 1899 * asked to not go to the VM for buckets. If we do this we 1900 * may end up going to the VM for slabs which we do not 1901 * want to do if we're UMA_ZFLAG_CACHEONLY as a result 1902 * of UMA_ZONE_VM, which clearly forbids it. 1903 */ 1904 if ((keg->uk_flags & 1905 (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY)) != 0) { 1906 if (ipers == 0) { 1907 /* We need an extra page for the slab header. */ 1908 pages++; 1909 slabsize = ptoa(pages); 1910 ipers = slab_ipers_hdr(keg->uk_size, rsize, slabsize, 1911 true); 1912 } 1913 goto out; 1914 } 1915 1916 /* 1917 * See if using an OFFPAGE slab will improve our efficiency. 1918 * Only do this if we are below our efficiency threshold. 1919 * 1920 * XXX We could try growing slabsize to limit max waste as well. 1921 * Historically this was not done because the VM could not 1922 * efficiently handle contiguous allocations. 1923 */ 1924 eff = UMA_FRAC_FIXPT(ipers * rsize, slabsize); 1925 ipers_offpage = slab_ipers_hdr(keg->uk_size, rsize, slabsize, false); 1926 eff_offpage = UMA_FRAC_FIXPT(ipers_offpage * rsize, 1927 slabsize + slabzone(ipers_offpage)->uz_keg->uk_rsize); 1928 if (ipers == 0 || (eff < UMA_MIN_EFF && eff < eff_offpage)) { 1929 CTR5(KTR_UMA, "UMA decided we need offpage slab headers for " 1930 "keg: %s(%p), minimum efficiency allowed = %u%%, " 1931 "old efficiency = %u%%, offpage efficiency = %u%%", 1932 keg->uk_name, keg, UMA_FIXPT_PCT(UMA_MIN_EFF), 1933 UMA_FIXPT_PCT(eff), UMA_FIXPT_PCT(eff_offpage)); 1934 format = UMA_ZFLAG_OFFPAGE; 1935 ipers = ipers_offpage; 1936 } 1937 1938 out: 1939 /* 1940 * How do we find the slab header if it is offpage or if not all item 1941 * start addresses are in the same page? We could solve the latter 1942 * case with vaddr alignment, but we don't. 1943 */ 1944 if ((format & UMA_ZFLAG_OFFPAGE) != 0 || 1945 (ipers - 1) * rsize >= PAGE_SIZE) { 1946 if ((keg->uk_flags & UMA_ZONE_NOTPAGE) != 0) 1947 format |= UMA_ZFLAG_HASH; 1948 else 1949 format |= UMA_ZFLAG_VTOSLAB; 1950 } 1951 keg->uk_ipers = ipers; 1952 keg->uk_rsize = rsize; 1953 keg->uk_flags |= format; 1954 keg->uk_ppera = pages; 1955 CTR6(KTR_UMA, "%s: keg=%s, flags=%#x, rsize=%u, ipers=%u, ppera=%u", 1956 __func__, keg->uk_name, keg->uk_flags, rsize, ipers, pages); 1957 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_MAX_SETSIZE, 1958 ("%s: keg=%s, flags=0x%b, rsize=%u, ipers=%u, ppera=%u", __func__, 1959 keg->uk_name, keg->uk_flags, PRINT_UMA_ZFLAGS, rsize, ipers, 1960 pages)); 1961 } 1962 1963 /* 1964 * Keg header ctor. This initializes all fields, locks, etc. And inserts 1965 * the keg onto the global keg list. 1966 * 1967 * Arguments/Returns follow uma_ctor specifications 1968 * udata Actually uma_kctor_args 1969 */ 1970 static int 1971 keg_ctor(void *mem, int size, void *udata, int flags) 1972 { 1973 struct uma_kctor_args *arg = udata; 1974 uma_keg_t keg = mem; 1975 uma_zone_t zone; 1976 int i; 1977 1978 bzero(keg, size); 1979 keg->uk_size = arg->size; 1980 keg->uk_init = arg->uminit; 1981 keg->uk_fini = arg->fini; 1982 keg->uk_align = arg->align; 1983 keg->uk_reserve = 0; 1984 keg->uk_flags = arg->flags; 1985 1986 /* 1987 * We use a global round-robin policy by default. Zones with 1988 * UMA_ZONE_FIRSTTOUCH set will use first-touch instead, in which 1989 * case the iterator is never run. 1990 */ 1991 keg->uk_dr.dr_policy = DOMAINSET_RR(); 1992 keg->uk_dr.dr_iter = 0; 1993 1994 /* 1995 * The master zone is passed to us at keg-creation time. 1996 */ 1997 zone = arg->zone; 1998 keg->uk_name = zone->uz_name; 1999 2000 if (arg->flags & UMA_ZONE_VM) 2001 keg->uk_flags |= UMA_ZFLAG_CACHEONLY; 2002 2003 if (arg->flags & UMA_ZONE_ZINIT) 2004 keg->uk_init = zero_init; 2005 2006 if (arg->flags & UMA_ZONE_MALLOC) 2007 keg->uk_flags |= UMA_ZFLAG_VTOSLAB; 2008 2009 #ifndef SMP 2010 keg->uk_flags &= ~UMA_ZONE_PCPU; 2011 #endif 2012 2013 keg_layout(keg); 2014 2015 /* 2016 * Use a first-touch NUMA policy for all kegs that pmap_extract() 2017 * will work on with the exception of critical VM structures 2018 * necessary for paging. 2019 * 2020 * Zones may override the default by specifying either. 2021 */ 2022 #ifdef NUMA 2023 if ((keg->uk_flags & 2024 (UMA_ZFLAG_HASH | UMA_ZONE_VM | UMA_ZONE_ROUNDROBIN)) == 0) 2025 keg->uk_flags |= UMA_ZONE_FIRSTTOUCH; 2026 else if ((keg->uk_flags & UMA_ZONE_FIRSTTOUCH) == 0) 2027 keg->uk_flags |= UMA_ZONE_ROUNDROBIN; 2028 #endif 2029 2030 /* 2031 * If we haven't booted yet we need allocations to go through the 2032 * startup cache until the vm is ready. 2033 */ 2034 #ifdef UMA_MD_SMALL_ALLOC 2035 if (keg->uk_ppera == 1) 2036 keg->uk_allocf = uma_small_alloc; 2037 else 2038 #endif 2039 if (booted < BOOT_KVA) 2040 keg->uk_allocf = startup_alloc; 2041 else if (keg->uk_flags & UMA_ZONE_PCPU) 2042 keg->uk_allocf = pcpu_page_alloc; 2043 else 2044 keg->uk_allocf = page_alloc; 2045 #ifdef UMA_MD_SMALL_ALLOC 2046 if (keg->uk_ppera == 1) 2047 keg->uk_freef = uma_small_free; 2048 else 2049 #endif 2050 if (keg->uk_flags & UMA_ZONE_PCPU) 2051 keg->uk_freef = pcpu_page_free; 2052 else 2053 keg->uk_freef = page_free; 2054 2055 /* 2056 * Initialize keg's locks. 2057 */ 2058 for (i = 0; i < vm_ndomains; i++) 2059 KEG_LOCK_INIT(keg, i, (arg->flags & UMA_ZONE_MTXCLASS)); 2060 2061 /* 2062 * If we're putting the slab header in the actual page we need to 2063 * figure out where in each page it goes. See slab_sizeof 2064 * definition. 2065 */ 2066 if (!(keg->uk_flags & UMA_ZFLAG_OFFPAGE)) { 2067 size_t shsize; 2068 2069 shsize = slab_sizeof(keg->uk_ipers); 2070 keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - shsize; 2071 /* 2072 * The only way the following is possible is if with our 2073 * UMA_ALIGN_PTR adjustments we are now bigger than 2074 * UMA_SLAB_SIZE. I haven't checked whether this is 2075 * mathematically possible for all cases, so we make 2076 * sure here anyway. 2077 */ 2078 KASSERT(keg->uk_pgoff + shsize <= PAGE_SIZE * keg->uk_ppera, 2079 ("zone %s ipers %d rsize %d size %d slab won't fit", 2080 zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size)); 2081 } 2082 2083 if (keg->uk_flags & UMA_ZFLAG_HASH) 2084 hash_alloc(&keg->uk_hash, 0); 2085 2086 CTR3(KTR_UMA, "keg_ctor %p zone %s(%p)", keg, zone->uz_name, zone); 2087 2088 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); 2089 2090 rw_wlock(&uma_rwlock); 2091 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); 2092 rw_wunlock(&uma_rwlock); 2093 return (0); 2094 } 2095 2096 static void 2097 zone_kva_available(uma_zone_t zone, void *unused) 2098 { 2099 uma_keg_t keg; 2100 2101 if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0) 2102 return; 2103 KEG_GET(zone, keg); 2104 if (keg->uk_flags & UMA_ZONE_PCPU) 2105 keg->uk_allocf = pcpu_page_alloc; 2106 else if (keg->uk_allocf == startup_alloc) 2107 keg->uk_allocf = page_alloc; 2108 } 2109 2110 static void 2111 zone_alloc_counters(uma_zone_t zone, void *unused) 2112 { 2113 2114 zone->uz_allocs = counter_u64_alloc(M_WAITOK); 2115 zone->uz_frees = counter_u64_alloc(M_WAITOK); 2116 zone->uz_fails = counter_u64_alloc(M_WAITOK); 2117 } 2118 2119 static void 2120 zone_alloc_sysctl(uma_zone_t zone, void *unused) 2121 { 2122 uma_zone_domain_t zdom; 2123 uma_domain_t dom; 2124 uma_keg_t keg; 2125 struct sysctl_oid *oid, *domainoid; 2126 int domains, i, cnt; 2127 static const char *nokeg = "cache zone"; 2128 char *c; 2129 2130 /* 2131 * Make a sysctl safe copy of the zone name by removing 2132 * any special characters and handling dups by appending 2133 * an index. 2134 */ 2135 if (zone->uz_namecnt != 0) { 2136 /* Count the number of decimal digits and '_' separator. */ 2137 for (i = 1, cnt = zone->uz_namecnt; cnt != 0; i++) 2138 cnt /= 10; 2139 zone->uz_ctlname = malloc(strlen(zone->uz_name) + i + 1, 2140 M_UMA, M_WAITOK); 2141 sprintf(zone->uz_ctlname, "%s_%d", zone->uz_name, 2142 zone->uz_namecnt); 2143 } else 2144 zone->uz_ctlname = strdup(zone->uz_name, M_UMA); 2145 for (c = zone->uz_ctlname; *c != '\0'; c++) 2146 if (strchr("./\\ -", *c) != NULL) 2147 *c = '_'; 2148 2149 /* 2150 * Basic parameters at the root. 2151 */ 2152 zone->uz_oid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_vm_uma), 2153 OID_AUTO, zone->uz_ctlname, CTLFLAG_RD, NULL, ""); 2154 oid = zone->uz_oid; 2155 SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2156 "size", CTLFLAG_RD, &zone->uz_size, 0, "Allocation size"); 2157 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2158 "flags", CTLFLAG_RD | CTLTYPE_STRING | CTLFLAG_MPSAFE, 2159 zone, 0, sysctl_handle_uma_zone_flags, "A", 2160 "Allocator configuration flags"); 2161 SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2162 "bucket_size", CTLFLAG_RD, &zone->uz_bucket_size, 0, 2163 "Desired per-cpu cache size"); 2164 SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2165 "bucket_size_max", CTLFLAG_RD, &zone->uz_bucket_size_max, 0, 2166 "Maximum allowed per-cpu cache size"); 2167 2168 /* 2169 * keg if present. 2170 */ 2171 if ((zone->uz_flags & UMA_ZFLAG_HASH) == 0) 2172 domains = vm_ndomains; 2173 else 2174 domains = 1; 2175 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO, 2176 "keg", CTLFLAG_RD, NULL, ""); 2177 keg = zone->uz_keg; 2178 if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0) { 2179 SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2180 "name", CTLFLAG_RD, keg->uk_name, "Keg name"); 2181 SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2182 "rsize", CTLFLAG_RD, &keg->uk_rsize, 0, 2183 "Real object size with alignment"); 2184 SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2185 "ppera", CTLFLAG_RD, &keg->uk_ppera, 0, 2186 "pages per-slab allocation"); 2187 SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2188 "ipers", CTLFLAG_RD, &keg->uk_ipers, 0, 2189 "items available per-slab"); 2190 SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2191 "align", CTLFLAG_RD, &keg->uk_align, 0, 2192 "item alignment mask"); 2193 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2194 "efficiency", CTLFLAG_RD | CTLTYPE_INT | CTLFLAG_MPSAFE, 2195 keg, 0, sysctl_handle_uma_slab_efficiency, "I", 2196 "Slab utilization (100 - internal fragmentation %)"); 2197 domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(oid), 2198 OID_AUTO, "domain", CTLFLAG_RD, NULL, ""); 2199 for (i = 0; i < domains; i++) { 2200 dom = &keg->uk_domain[i]; 2201 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid), 2202 OID_AUTO, VM_DOMAIN(i)->vmd_name, CTLFLAG_RD, 2203 NULL, ""); 2204 SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2205 "pages", CTLFLAG_RD, &dom->ud_pages, 0, 2206 "Total pages currently allocated from VM"); 2207 SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2208 "free", CTLFLAG_RD, &dom->ud_free, 0, 2209 "items free in the slab layer"); 2210 } 2211 } else 2212 SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2213 "name", CTLFLAG_RD, nokeg, "Keg name"); 2214 2215 /* 2216 * Information about zone limits. 2217 */ 2218 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO, 2219 "limit", CTLFLAG_RD, NULL, ""); 2220 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2221 "items", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE, 2222 zone, 0, sysctl_handle_uma_zone_items, "QU", 2223 "current number of allocated items if limit is set"); 2224 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2225 "max_items", CTLFLAG_RD, &zone->uz_max_items, 0, 2226 "Maximum number of cached items"); 2227 SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2228 "sleepers", CTLFLAG_RD, &zone->uz_sleepers, 0, 2229 "Number of threads sleeping at limit"); 2230 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2231 "sleeps", CTLFLAG_RD, &zone->uz_sleeps, 0, 2232 "Total zone limit sleeps"); 2233 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2234 "bucket_max", CTLFLAG_RD, &zone->uz_bkt_max, 0, 2235 "Maximum number of items in the bucket cache"); 2236 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2237 "bucket_cnt", CTLFLAG_RD, &zone->uz_bkt_count, 0, 2238 "Number of items in the bucket cache"); 2239 2240 /* 2241 * Per-domain zone information. 2242 */ 2243 domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), 2244 OID_AUTO, "domain", CTLFLAG_RD, NULL, ""); 2245 if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) == 0) 2246 domains = 1; 2247 for (i = 0; i < domains; i++) { 2248 zdom = &zone->uz_domain[i]; 2249 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid), 2250 OID_AUTO, VM_DOMAIN(i)->vmd_name, CTLFLAG_RD, NULL, ""); 2251 SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2252 "nitems", CTLFLAG_RD, &zdom->uzd_nitems, 2253 "number of items in this domain"); 2254 SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2255 "imax", CTLFLAG_RD, &zdom->uzd_imax, 2256 "maximum item count in this period"); 2257 SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2258 "imin", CTLFLAG_RD, &zdom->uzd_imin, 2259 "minimum item count in this period"); 2260 SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2261 "wss", CTLFLAG_RD, &zdom->uzd_wss, 2262 "Working set size"); 2263 } 2264 2265 /* 2266 * General statistics. 2267 */ 2268 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO, 2269 "stats", CTLFLAG_RD, NULL, ""); 2270 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2271 "current", CTLFLAG_RD | CTLTYPE_INT | CTLFLAG_MPSAFE, 2272 zone, 1, sysctl_handle_uma_zone_cur, "I", 2273 "Current number of allocated items"); 2274 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2275 "allocs", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE, 2276 zone, 0, sysctl_handle_uma_zone_allocs, "QU", 2277 "Total allocation calls"); 2278 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2279 "frees", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE, 2280 zone, 0, sysctl_handle_uma_zone_frees, "QU", 2281 "Total free calls"); 2282 SYSCTL_ADD_COUNTER_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2283 "fails", CTLFLAG_RD, &zone->uz_fails, 2284 "Number of allocation failures"); 2285 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2286 "xdomain", CTLFLAG_RD, &zone->uz_xdomain, 0, 2287 "Free calls from the wrong domain"); 2288 } 2289 2290 struct uma_zone_count { 2291 const char *name; 2292 int count; 2293 }; 2294 2295 static void 2296 zone_count(uma_zone_t zone, void *arg) 2297 { 2298 struct uma_zone_count *cnt; 2299 2300 cnt = arg; 2301 /* 2302 * Some zones are rapidly created with identical names and 2303 * destroyed out of order. This can lead to gaps in the count. 2304 * Use one greater than the maximum observed for this name. 2305 */ 2306 if (strcmp(zone->uz_name, cnt->name) == 0) 2307 cnt->count = MAX(cnt->count, 2308 zone->uz_namecnt + 1); 2309 } 2310 2311 static void 2312 zone_update_caches(uma_zone_t zone) 2313 { 2314 int i; 2315 2316 for (i = 0; i <= mp_maxid; i++) { 2317 cache_set_uz_size(&zone->uz_cpu[i], zone->uz_size); 2318 cache_set_uz_flags(&zone->uz_cpu[i], zone->uz_flags); 2319 } 2320 } 2321 2322 /* 2323 * Zone header ctor. This initializes all fields, locks, etc. 2324 * 2325 * Arguments/Returns follow uma_ctor specifications 2326 * udata Actually uma_zctor_args 2327 */ 2328 static int 2329 zone_ctor(void *mem, int size, void *udata, int flags) 2330 { 2331 struct uma_zone_count cnt; 2332 struct uma_zctor_args *arg = udata; 2333 uma_zone_t zone = mem; 2334 uma_zone_t z; 2335 uma_keg_t keg; 2336 int i; 2337 2338 bzero(zone, size); 2339 zone->uz_name = arg->name; 2340 zone->uz_ctor = arg->ctor; 2341 zone->uz_dtor = arg->dtor; 2342 zone->uz_init = NULL; 2343 zone->uz_fini = NULL; 2344 zone->uz_sleeps = 0; 2345 zone->uz_xdomain = 0; 2346 zone->uz_bucket_size = 0; 2347 zone->uz_bucket_size_min = 0; 2348 zone->uz_bucket_size_max = BUCKET_MAX; 2349 zone->uz_flags = (arg->flags & UMA_ZONE_SMR); 2350 zone->uz_warning = NULL; 2351 /* The domain structures follow the cpu structures. */ 2352 zone->uz_domain = 2353 (struct uma_zone_domain *)&zone->uz_cpu[mp_maxid + 1]; 2354 zone->uz_bkt_max = ULONG_MAX; 2355 timevalclear(&zone->uz_ratecheck); 2356 2357 /* Count the number of duplicate names. */ 2358 cnt.name = arg->name; 2359 cnt.count = 0; 2360 zone_foreach(zone_count, &cnt); 2361 zone->uz_namecnt = cnt.count; 2362 ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS)); 2363 ZONE_CROSS_LOCK_INIT(zone); 2364 2365 for (i = 0; i < vm_ndomains; i++) 2366 TAILQ_INIT(&zone->uz_domain[i].uzd_buckets); 2367 2368 #ifdef INVARIANTS 2369 if (arg->uminit == trash_init && arg->fini == trash_fini) 2370 zone->uz_flags |= UMA_ZFLAG_TRASH | UMA_ZFLAG_CTORDTOR; 2371 #endif 2372 2373 /* 2374 * This is a pure cache zone, no kegs. 2375 */ 2376 if (arg->import) { 2377 KASSERT((arg->flags & UMA_ZFLAG_CACHE) != 0, 2378 ("zone_ctor: Import specified for non-cache zone.")); 2379 if (arg->flags & UMA_ZONE_VM) 2380 arg->flags |= UMA_ZFLAG_CACHEONLY; 2381 zone->uz_flags = arg->flags; 2382 zone->uz_size = arg->size; 2383 zone->uz_import = arg->import; 2384 zone->uz_release = arg->release; 2385 zone->uz_arg = arg->arg; 2386 rw_wlock(&uma_rwlock); 2387 LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link); 2388 rw_wunlock(&uma_rwlock); 2389 goto out; 2390 } 2391 2392 /* 2393 * Use the regular zone/keg/slab allocator. 2394 */ 2395 zone->uz_import = zone_import; 2396 zone->uz_release = zone_release; 2397 zone->uz_arg = zone; 2398 keg = arg->keg; 2399 2400 if (arg->flags & UMA_ZONE_SECONDARY) { 2401 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0, 2402 ("Secondary zone requested UMA_ZFLAG_INTERNAL")); 2403 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); 2404 zone->uz_init = arg->uminit; 2405 zone->uz_fini = arg->fini; 2406 zone->uz_flags |= UMA_ZONE_SECONDARY; 2407 rw_wlock(&uma_rwlock); 2408 ZONE_LOCK(zone); 2409 LIST_FOREACH(z, &keg->uk_zones, uz_link) { 2410 if (LIST_NEXT(z, uz_link) == NULL) { 2411 LIST_INSERT_AFTER(z, zone, uz_link); 2412 break; 2413 } 2414 } 2415 ZONE_UNLOCK(zone); 2416 rw_wunlock(&uma_rwlock); 2417 } else if (keg == NULL) { 2418 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini, 2419 arg->align, arg->flags)) == NULL) 2420 return (ENOMEM); 2421 } else { 2422 struct uma_kctor_args karg; 2423 int error; 2424 2425 /* We should only be here from uma_startup() */ 2426 karg.size = arg->size; 2427 karg.uminit = arg->uminit; 2428 karg.fini = arg->fini; 2429 karg.align = arg->align; 2430 karg.flags = (arg->flags & ~UMA_ZONE_SMR); 2431 karg.zone = zone; 2432 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, 2433 flags); 2434 if (error) 2435 return (error); 2436 } 2437 2438 /* Inherit properties from the keg. */ 2439 zone->uz_keg = keg; 2440 zone->uz_size = keg->uk_size; 2441 zone->uz_flags |= (keg->uk_flags & 2442 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); 2443 2444 out: 2445 if (__predict_true(booted >= BOOT_RUNNING)) { 2446 zone_alloc_counters(zone, NULL); 2447 zone_alloc_sysctl(zone, NULL); 2448 } else { 2449 zone->uz_allocs = EARLY_COUNTER; 2450 zone->uz_frees = EARLY_COUNTER; 2451 zone->uz_fails = EARLY_COUNTER; 2452 } 2453 2454 /* Caller requests a private SMR context. */ 2455 if ((zone->uz_flags & UMA_ZONE_SMR) != 0) 2456 zone->uz_smr = smr_create(zone->uz_name); 2457 2458 KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) != 2459 (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET), 2460 ("Invalid zone flag combination")); 2461 if (arg->flags & UMA_ZFLAG_INTERNAL) 2462 zone->uz_bucket_size_max = zone->uz_bucket_size = 0; 2463 if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0) 2464 zone->uz_bucket_size = BUCKET_MAX; 2465 else if ((arg->flags & UMA_ZONE_MINBUCKET) != 0) 2466 zone->uz_bucket_size_max = zone->uz_bucket_size = BUCKET_MIN; 2467 else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0) 2468 zone->uz_bucket_size = 0; 2469 else 2470 zone->uz_bucket_size = bucket_select(zone->uz_size); 2471 zone->uz_bucket_size_min = zone->uz_bucket_size; 2472 if (zone->uz_dtor != NULL || zone->uz_ctor != NULL) 2473 zone->uz_flags |= UMA_ZFLAG_CTORDTOR; 2474 zone_update_caches(zone); 2475 2476 return (0); 2477 } 2478 2479 /* 2480 * Keg header dtor. This frees all data, destroys locks, frees the hash 2481 * table and removes the keg from the global list. 2482 * 2483 * Arguments/Returns follow uma_dtor specifications 2484 * udata unused 2485 */ 2486 static void 2487 keg_dtor(void *arg, int size, void *udata) 2488 { 2489 uma_keg_t keg; 2490 uint32_t free, pages; 2491 int i; 2492 2493 keg = (uma_keg_t)arg; 2494 free = pages = 0; 2495 for (i = 0; i < vm_ndomains; i++) { 2496 free += keg->uk_domain[i].ud_free; 2497 pages += keg->uk_domain[i].ud_pages; 2498 KEG_LOCK_FINI(keg, i); 2499 } 2500 if (pages != 0) 2501 printf("Freed UMA keg (%s) was not empty (%u items). " 2502 " Lost %u pages of memory.\n", 2503 keg->uk_name ? keg->uk_name : "", 2504 pages / keg->uk_ppera * keg->uk_ipers - free, pages); 2505 2506 hash_free(&keg->uk_hash); 2507 } 2508 2509 /* 2510 * Zone header dtor. 2511 * 2512 * Arguments/Returns follow uma_dtor specifications 2513 * udata unused 2514 */ 2515 static void 2516 zone_dtor(void *arg, int size, void *udata) 2517 { 2518 uma_zone_t zone; 2519 uma_keg_t keg; 2520 2521 zone = (uma_zone_t)arg; 2522 2523 sysctl_remove_oid(zone->uz_oid, 1, 1); 2524 2525 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) 2526 cache_drain(zone); 2527 2528 rw_wlock(&uma_rwlock); 2529 LIST_REMOVE(zone, uz_link); 2530 rw_wunlock(&uma_rwlock); 2531 /* 2532 * XXX there are some races here where 2533 * the zone can be drained but zone lock 2534 * released and then refilled before we 2535 * remove it... we dont care for now 2536 */ 2537 zone_reclaim(zone, M_WAITOK, true); 2538 /* 2539 * We only destroy kegs from non secondary/non cache zones. 2540 */ 2541 if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) { 2542 keg = zone->uz_keg; 2543 rw_wlock(&uma_rwlock); 2544 LIST_REMOVE(keg, uk_link); 2545 rw_wunlock(&uma_rwlock); 2546 zone_free_item(kegs, keg, NULL, SKIP_NONE); 2547 } 2548 counter_u64_free(zone->uz_allocs); 2549 counter_u64_free(zone->uz_frees); 2550 counter_u64_free(zone->uz_fails); 2551 free(zone->uz_ctlname, M_UMA); 2552 ZONE_LOCK_FINI(zone); 2553 ZONE_CROSS_LOCK_FINI(zone); 2554 } 2555 2556 static void 2557 zone_foreach_unlocked(void (*zfunc)(uma_zone_t, void *arg), void *arg) 2558 { 2559 uma_keg_t keg; 2560 uma_zone_t zone; 2561 2562 LIST_FOREACH(keg, &uma_kegs, uk_link) { 2563 LIST_FOREACH(zone, &keg->uk_zones, uz_link) 2564 zfunc(zone, arg); 2565 } 2566 LIST_FOREACH(zone, &uma_cachezones, uz_link) 2567 zfunc(zone, arg); 2568 } 2569 2570 /* 2571 * Traverses every zone in the system and calls a callback 2572 * 2573 * Arguments: 2574 * zfunc A pointer to a function which accepts a zone 2575 * as an argument. 2576 * 2577 * Returns: 2578 * Nothing 2579 */ 2580 static void 2581 zone_foreach(void (*zfunc)(uma_zone_t, void *arg), void *arg) 2582 { 2583 2584 rw_rlock(&uma_rwlock); 2585 zone_foreach_unlocked(zfunc, arg); 2586 rw_runlock(&uma_rwlock); 2587 } 2588 2589 /* 2590 * Initialize the kernel memory allocator. This is done after pages can be 2591 * allocated but before general KVA is available. 2592 */ 2593 void 2594 uma_startup1(vm_offset_t virtual_avail) 2595 { 2596 struct uma_zctor_args args; 2597 size_t ksize, zsize, size; 2598 uma_keg_t masterkeg; 2599 uintptr_t m; 2600 uint8_t pflag; 2601 2602 bootstart = bootmem = virtual_avail; 2603 2604 rw_init(&uma_rwlock, "UMA lock"); 2605 sx_init(&uma_reclaim_lock, "umareclaim"); 2606 2607 ksize = sizeof(struct uma_keg) + 2608 (sizeof(struct uma_domain) * vm_ndomains); 2609 ksize = roundup(ksize, UMA_SUPER_ALIGN); 2610 zsize = sizeof(struct uma_zone) + 2611 (sizeof(struct uma_cache) * (mp_maxid + 1)) + 2612 (sizeof(struct uma_zone_domain) * vm_ndomains); 2613 zsize = roundup(zsize, UMA_SUPER_ALIGN); 2614 2615 /* Allocate the zone of zones, zone of kegs, and zone of zones keg. */ 2616 size = (zsize * 2) + ksize; 2617 m = (uintptr_t)startup_alloc(NULL, size, 0, &pflag, M_NOWAIT | M_ZERO); 2618 zones = (uma_zone_t)m; 2619 m += zsize; 2620 kegs = (uma_zone_t)m; 2621 m += zsize; 2622 masterkeg = (uma_keg_t)m; 2623 2624 /* "manually" create the initial zone */ 2625 memset(&args, 0, sizeof(args)); 2626 args.name = "UMA Kegs"; 2627 args.size = ksize; 2628 args.ctor = keg_ctor; 2629 args.dtor = keg_dtor; 2630 args.uminit = zero_init; 2631 args.fini = NULL; 2632 args.keg = masterkeg; 2633 args.align = UMA_SUPER_ALIGN - 1; 2634 args.flags = UMA_ZFLAG_INTERNAL; 2635 zone_ctor(kegs, zsize, &args, M_WAITOK); 2636 2637 args.name = "UMA Zones"; 2638 args.size = zsize; 2639 args.ctor = zone_ctor; 2640 args.dtor = zone_dtor; 2641 args.uminit = zero_init; 2642 args.fini = NULL; 2643 args.keg = NULL; 2644 args.align = UMA_SUPER_ALIGN - 1; 2645 args.flags = UMA_ZFLAG_INTERNAL; 2646 zone_ctor(zones, zsize, &args, M_WAITOK); 2647 2648 /* Now make zones for slab headers */ 2649 slabzones[0] = uma_zcreate("UMA Slabs 0", SLABZONE0_SIZE, 2650 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 2651 slabzones[1] = uma_zcreate("UMA Slabs 1", SLABZONE1_SIZE, 2652 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 2653 2654 hashzone = uma_zcreate("UMA Hash", 2655 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, 2656 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 2657 2658 bucket_init(); 2659 smr_init(); 2660 } 2661 2662 #ifndef UMA_MD_SMALL_ALLOC 2663 extern void vm_radix_reserve_kva(void); 2664 #endif 2665 2666 /* 2667 * Advertise the availability of normal kva allocations and switch to 2668 * the default back-end allocator. Marks the KVA we consumed on startup 2669 * as used in the map. 2670 */ 2671 void 2672 uma_startup2(void) 2673 { 2674 2675 if (bootstart != bootmem) { 2676 vm_map_lock(kernel_map); 2677 (void)vm_map_insert(kernel_map, NULL, 0, bootstart, bootmem, 2678 VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT); 2679 vm_map_unlock(kernel_map); 2680 } 2681 2682 #ifndef UMA_MD_SMALL_ALLOC 2683 /* Set up radix zone to use noobj_alloc. */ 2684 vm_radix_reserve_kva(); 2685 #endif 2686 2687 booted = BOOT_KVA; 2688 zone_foreach_unlocked(zone_kva_available, NULL); 2689 bucket_enable(); 2690 } 2691 2692 /* 2693 * Finish our initialization steps. 2694 */ 2695 static void 2696 uma_startup3(void) 2697 { 2698 2699 #ifdef INVARIANTS 2700 TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor); 2701 uma_dbg_cnt = counter_u64_alloc(M_WAITOK); 2702 uma_skip_cnt = counter_u64_alloc(M_WAITOK); 2703 #endif 2704 zone_foreach_unlocked(zone_alloc_counters, NULL); 2705 zone_foreach_unlocked(zone_alloc_sysctl, NULL); 2706 callout_init(&uma_callout, 1); 2707 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 2708 booted = BOOT_RUNNING; 2709 2710 EVENTHANDLER_REGISTER(shutdown_post_sync, uma_shutdown, NULL, 2711 EVENTHANDLER_PRI_FIRST); 2712 } 2713 2714 static void 2715 uma_shutdown(void) 2716 { 2717 2718 booted = BOOT_SHUTDOWN; 2719 } 2720 2721 static uma_keg_t 2722 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, 2723 int align, uint32_t flags) 2724 { 2725 struct uma_kctor_args args; 2726 2727 args.size = size; 2728 args.uminit = uminit; 2729 args.fini = fini; 2730 args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align; 2731 args.flags = flags; 2732 args.zone = zone; 2733 return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK)); 2734 } 2735 2736 /* Public functions */ 2737 /* See uma.h */ 2738 void 2739 uma_set_align(int align) 2740 { 2741 2742 if (align != UMA_ALIGN_CACHE) 2743 uma_align_cache = align; 2744 } 2745 2746 /* See uma.h */ 2747 uma_zone_t 2748 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, 2749 uma_init uminit, uma_fini fini, int align, uint32_t flags) 2750 2751 { 2752 struct uma_zctor_args args; 2753 uma_zone_t res; 2754 2755 KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"", 2756 align, name)); 2757 2758 /* This stuff is essential for the zone ctor */ 2759 memset(&args, 0, sizeof(args)); 2760 args.name = name; 2761 args.size = size; 2762 args.ctor = ctor; 2763 args.dtor = dtor; 2764 args.uminit = uminit; 2765 args.fini = fini; 2766 #ifdef INVARIANTS 2767 /* 2768 * Inject procedures which check for memory use after free if we are 2769 * allowed to scramble the memory while it is not allocated. This 2770 * requires that: UMA is actually able to access the memory, no init 2771 * or fini procedures, no dependency on the initial value of the 2772 * memory, and no (legitimate) use of the memory after free. Note, 2773 * the ctor and dtor do not need to be empty. 2774 */ 2775 if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOTOUCH | 2776 UMA_ZONE_NOFREE))) && uminit == NULL && fini == NULL) { 2777 args.uminit = trash_init; 2778 args.fini = trash_fini; 2779 } 2780 #endif 2781 args.align = align; 2782 args.flags = flags; 2783 args.keg = NULL; 2784 2785 sx_slock(&uma_reclaim_lock); 2786 res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK); 2787 sx_sunlock(&uma_reclaim_lock); 2788 2789 return (res); 2790 } 2791 2792 /* See uma.h */ 2793 uma_zone_t 2794 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, 2795 uma_init zinit, uma_fini zfini, uma_zone_t master) 2796 { 2797 struct uma_zctor_args args; 2798 uma_keg_t keg; 2799 uma_zone_t res; 2800 2801 keg = master->uz_keg; 2802 memset(&args, 0, sizeof(args)); 2803 args.name = name; 2804 args.size = keg->uk_size; 2805 args.ctor = ctor; 2806 args.dtor = dtor; 2807 args.uminit = zinit; 2808 args.fini = zfini; 2809 args.align = keg->uk_align; 2810 args.flags = keg->uk_flags | UMA_ZONE_SECONDARY; 2811 args.keg = keg; 2812 2813 sx_slock(&uma_reclaim_lock); 2814 res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK); 2815 sx_sunlock(&uma_reclaim_lock); 2816 2817 return (res); 2818 } 2819 2820 /* See uma.h */ 2821 uma_zone_t 2822 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, 2823 uma_init zinit, uma_fini zfini, uma_import zimport, 2824 uma_release zrelease, void *arg, int flags) 2825 { 2826 struct uma_zctor_args args; 2827 2828 memset(&args, 0, sizeof(args)); 2829 args.name = name; 2830 args.size = size; 2831 args.ctor = ctor; 2832 args.dtor = dtor; 2833 args.uminit = zinit; 2834 args.fini = zfini; 2835 args.import = zimport; 2836 args.release = zrelease; 2837 args.arg = arg; 2838 args.align = 0; 2839 args.flags = flags | UMA_ZFLAG_CACHE; 2840 2841 return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK)); 2842 } 2843 2844 /* See uma.h */ 2845 void 2846 uma_zdestroy(uma_zone_t zone) 2847 { 2848 2849 /* 2850 * Large slabs are expensive to reclaim, so don't bother doing 2851 * unnecessary work if we're shutting down. 2852 */ 2853 if (booted == BOOT_SHUTDOWN && 2854 zone->uz_fini == NULL && zone->uz_release == zone_release) 2855 return; 2856 sx_slock(&uma_reclaim_lock); 2857 zone_free_item(zones, zone, NULL, SKIP_NONE); 2858 sx_sunlock(&uma_reclaim_lock); 2859 } 2860 2861 void 2862 uma_zwait(uma_zone_t zone) 2863 { 2864 void *item; 2865 2866 item = uma_zalloc_arg(zone, NULL, M_WAITOK); 2867 uma_zfree(zone, item); 2868 } 2869 2870 void * 2871 uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags) 2872 { 2873 void *item; 2874 #ifdef SMP 2875 int i; 2876 2877 MPASS(zone->uz_flags & UMA_ZONE_PCPU); 2878 #endif 2879 item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO); 2880 if (item != NULL && (flags & M_ZERO)) { 2881 #ifdef SMP 2882 for (i = 0; i <= mp_maxid; i++) 2883 bzero(zpcpu_get_cpu(item, i), zone->uz_size); 2884 #else 2885 bzero(item, zone->uz_size); 2886 #endif 2887 } 2888 return (item); 2889 } 2890 2891 /* 2892 * A stub while both regular and pcpu cases are identical. 2893 */ 2894 void 2895 uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata) 2896 { 2897 2898 #ifdef SMP 2899 MPASS(zone->uz_flags & UMA_ZONE_PCPU); 2900 #endif 2901 uma_zfree_arg(zone, item, udata); 2902 } 2903 2904 static inline void * 2905 item_ctor(uma_zone_t zone, int uz_flags, int size, void *udata, int flags, 2906 void *item) 2907 { 2908 #ifdef INVARIANTS 2909 bool skipdbg; 2910 2911 skipdbg = uma_dbg_zskip(zone, item); 2912 if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 && 2913 zone->uz_ctor != trash_ctor) 2914 trash_ctor(item, size, udata, flags); 2915 #endif 2916 /* Check flags before loading ctor pointer. */ 2917 if (__predict_false((uz_flags & UMA_ZFLAG_CTORDTOR) != 0) && 2918 __predict_false(zone->uz_ctor != NULL) && 2919 zone->uz_ctor(item, size, udata, flags) != 0) { 2920 counter_u64_add(zone->uz_fails, 1); 2921 zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT); 2922 return (NULL); 2923 } 2924 #ifdef INVARIANTS 2925 if (!skipdbg) 2926 uma_dbg_alloc(zone, NULL, item); 2927 #endif 2928 if (flags & M_ZERO) 2929 bzero(item, size); 2930 2931 return (item); 2932 } 2933 2934 static inline void 2935 item_dtor(uma_zone_t zone, void *item, int size, void *udata, 2936 enum zfreeskip skip) 2937 { 2938 #ifdef INVARIANTS 2939 bool skipdbg; 2940 2941 skipdbg = uma_dbg_zskip(zone, item); 2942 if (skip == SKIP_NONE && !skipdbg) { 2943 if ((zone->uz_flags & UMA_ZONE_MALLOC) != 0) 2944 uma_dbg_free(zone, udata, item); 2945 else 2946 uma_dbg_free(zone, NULL, item); 2947 } 2948 #endif 2949 if (__predict_true(skip < SKIP_DTOR)) { 2950 if (zone->uz_dtor != NULL) 2951 zone->uz_dtor(item, size, udata); 2952 #ifdef INVARIANTS 2953 if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 && 2954 zone->uz_dtor != trash_dtor) 2955 trash_dtor(item, size, udata); 2956 #endif 2957 } 2958 } 2959 2960 #if defined(INVARIANTS) || defined(DEBUG_MEMGUARD) || defined(WITNESS) 2961 #define UMA_ZALLOC_DEBUG 2962 static int 2963 uma_zalloc_debug(uma_zone_t zone, void **itemp, void *udata, int flags) 2964 { 2965 int error; 2966 2967 error = 0; 2968 #ifdef WITNESS 2969 if (flags & M_WAITOK) { 2970 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2971 "uma_zalloc_debug: zone \"%s\"", zone->uz_name); 2972 } 2973 #endif 2974 2975 #ifdef INVARIANTS 2976 KASSERT((flags & M_EXEC) == 0, 2977 ("uma_zalloc_debug: called with M_EXEC")); 2978 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 2979 ("uma_zalloc_debug: called within spinlock or critical section")); 2980 KASSERT((zone->uz_flags & UMA_ZONE_PCPU) == 0 || (flags & M_ZERO) == 0, 2981 ("uma_zalloc_debug: allocating from a pcpu zone with M_ZERO")); 2982 #endif 2983 2984 #ifdef DEBUG_MEMGUARD 2985 if ((zone->uz_flags & UMA_ZONE_SMR) == 0 && memguard_cmp_zone(zone)) { 2986 void *item; 2987 item = memguard_alloc(zone->uz_size, flags); 2988 if (item != NULL) { 2989 error = EJUSTRETURN; 2990 if (zone->uz_init != NULL && 2991 zone->uz_init(item, zone->uz_size, flags) != 0) { 2992 *itemp = NULL; 2993 return (error); 2994 } 2995 if (zone->uz_ctor != NULL && 2996 zone->uz_ctor(item, zone->uz_size, udata, 2997 flags) != 0) { 2998 counter_u64_add(zone->uz_fails, 1); 2999 zone->uz_fini(item, zone->uz_size); 3000 *itemp = NULL; 3001 return (error); 3002 } 3003 *itemp = item; 3004 return (error); 3005 } 3006 /* This is unfortunate but should not be fatal. */ 3007 } 3008 #endif 3009 return (error); 3010 } 3011 3012 static int 3013 uma_zfree_debug(uma_zone_t zone, void *item, void *udata) 3014 { 3015 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 3016 ("uma_zfree_debug: called with spinlock or critical section held")); 3017 3018 #ifdef DEBUG_MEMGUARD 3019 if ((zone->uz_flags & UMA_ZONE_SMR) == 0 && is_memguard_addr(item)) { 3020 if (zone->uz_dtor != NULL) 3021 zone->uz_dtor(item, zone->uz_size, udata); 3022 if (zone->uz_fini != NULL) 3023 zone->uz_fini(item, zone->uz_size); 3024 memguard_free(item); 3025 return (EJUSTRETURN); 3026 } 3027 #endif 3028 return (0); 3029 } 3030 #endif 3031 3032 static __noinline void * 3033 uma_zalloc_single(uma_zone_t zone, void *udata, int flags) 3034 { 3035 int domain; 3036 3037 /* 3038 * We can not get a bucket so try to return a single item. 3039 */ 3040 if (zone->uz_flags & UMA_ZONE_FIRSTTOUCH) 3041 domain = PCPU_GET(domain); 3042 else 3043 domain = UMA_ANYDOMAIN; 3044 return (zone_alloc_item(zone, udata, domain, flags)); 3045 } 3046 3047 /* See uma.h */ 3048 void * 3049 uma_zalloc_smr(uma_zone_t zone, int flags) 3050 { 3051 uma_cache_bucket_t bucket; 3052 uma_cache_t cache; 3053 void *item; 3054 int size, uz_flags; 3055 3056 #ifdef UMA_ZALLOC_DEBUG 3057 KASSERT((zone->uz_flags & UMA_ZONE_SMR) != 0, 3058 ("uma_zalloc_arg: called with non-SMR zone.\n")); 3059 if (uma_zalloc_debug(zone, &item, NULL, flags) == EJUSTRETURN) 3060 return (item); 3061 #endif 3062 3063 critical_enter(); 3064 do { 3065 cache = &zone->uz_cpu[curcpu]; 3066 bucket = &cache->uc_allocbucket; 3067 size = cache_uz_size(cache); 3068 uz_flags = cache_uz_flags(cache); 3069 if (__predict_true(bucket->ucb_cnt != 0)) { 3070 item = cache_bucket_pop(cache, bucket); 3071 critical_exit(); 3072 return (item_ctor(zone, uz_flags, size, NULL, flags, 3073 item)); 3074 } 3075 } while (cache_alloc(zone, cache, NULL, flags)); 3076 critical_exit(); 3077 3078 return (uma_zalloc_single(zone, NULL, flags)); 3079 } 3080 3081 /* See uma.h */ 3082 void * 3083 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) 3084 { 3085 uma_cache_bucket_t bucket; 3086 uma_cache_t cache; 3087 void *item; 3088 int size, uz_flags; 3089 3090 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 3091 random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); 3092 3093 /* This is the fast path allocation */ 3094 CTR3(KTR_UMA, "uma_zalloc_arg zone %s(%p) flags %d", zone->uz_name, 3095 zone, flags); 3096 3097 #ifdef UMA_ZALLOC_DEBUG 3098 KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0, 3099 ("uma_zalloc_arg: called with SMR zone.\n")); 3100 if (uma_zalloc_debug(zone, &item, udata, flags) == EJUSTRETURN) 3101 return (item); 3102 #endif 3103 3104 /* 3105 * If possible, allocate from the per-CPU cache. There are two 3106 * requirements for safe access to the per-CPU cache: (1) the thread 3107 * accessing the cache must not be preempted or yield during access, 3108 * and (2) the thread must not migrate CPUs without switching which 3109 * cache it accesses. We rely on a critical section to prevent 3110 * preemption and migration. We release the critical section in 3111 * order to acquire the zone mutex if we are unable to allocate from 3112 * the current cache; when we re-acquire the critical section, we 3113 * must detect and handle migration if it has occurred. 3114 */ 3115 critical_enter(); 3116 do { 3117 cache = &zone->uz_cpu[curcpu]; 3118 bucket = &cache->uc_allocbucket; 3119 size = cache_uz_size(cache); 3120 uz_flags = cache_uz_flags(cache); 3121 if (__predict_true(bucket->ucb_cnt != 0)) { 3122 item = cache_bucket_pop(cache, bucket); 3123 critical_exit(); 3124 return (item_ctor(zone, uz_flags, size, udata, flags, 3125 item)); 3126 } 3127 } while (cache_alloc(zone, cache, udata, flags)); 3128 critical_exit(); 3129 3130 return (uma_zalloc_single(zone, udata, flags)); 3131 } 3132 3133 /* 3134 * Replenish an alloc bucket and possibly restore an old one. Called in 3135 * a critical section. Returns in a critical section. 3136 * 3137 * A false return value indicates an allocation failure. 3138 * A true return value indicates success and the caller should retry. 3139 */ 3140 static __noinline bool 3141 cache_alloc(uma_zone_t zone, uma_cache_t cache, void *udata, int flags) 3142 { 3143 uma_zone_domain_t zdom; 3144 uma_bucket_t bucket; 3145 int domain; 3146 bool lockfail; 3147 3148 CRITICAL_ASSERT(curthread); 3149 3150 /* 3151 * If we have run out of items in our alloc bucket see 3152 * if we can switch with the free bucket. 3153 * 3154 * SMR Zones can't re-use the free bucket until the sequence has 3155 * expired. 3156 */ 3157 if ((zone->uz_flags & UMA_ZONE_SMR) == 0 && 3158 cache->uc_freebucket.ucb_cnt != 0) { 3159 cache_bucket_swap(&cache->uc_freebucket, 3160 &cache->uc_allocbucket); 3161 return (true); 3162 } 3163 3164 /* 3165 * Discard any empty allocation bucket while we hold no locks. 3166 */ 3167 bucket = cache_bucket_unload_alloc(cache); 3168 critical_exit(); 3169 if (bucket != NULL) 3170 bucket_free(zone, bucket, udata); 3171 3172 /* Short-circuit for zones without buckets and low memory. */ 3173 if (zone->uz_bucket_size == 0 || bucketdisable) { 3174 critical_enter(); 3175 return (false); 3176 } 3177 3178 /* 3179 * Attempt to retrieve the item from the per-CPU cache has failed, so 3180 * we must go back to the zone. This requires the zone lock, so we 3181 * must drop the critical section, then re-acquire it when we go back 3182 * to the cache. Since the critical section is released, we may be 3183 * preempted or migrate. As such, make sure not to maintain any 3184 * thread-local state specific to the cache from prior to releasing 3185 * the critical section. 3186 */ 3187 lockfail = 0; 3188 if (ZONE_TRYLOCK(zone) == 0) { 3189 /* Record contention to size the buckets. */ 3190 ZONE_LOCK(zone); 3191 lockfail = 1; 3192 } 3193 3194 /* See if we lost the race to fill the cache. */ 3195 critical_enter(); 3196 cache = &zone->uz_cpu[curcpu]; 3197 if (cache->uc_allocbucket.ucb_bucket != NULL) { 3198 ZONE_UNLOCK(zone); 3199 return (true); 3200 } 3201 3202 /* 3203 * Check the zone's cache of buckets. 3204 */ 3205 if (zone->uz_flags & UMA_ZONE_FIRSTTOUCH) { 3206 domain = PCPU_GET(domain); 3207 zdom = &zone->uz_domain[domain]; 3208 } else { 3209 domain = UMA_ANYDOMAIN; 3210 zdom = &zone->uz_domain[0]; 3211 } 3212 3213 if ((bucket = zone_fetch_bucket(zone, zdom)) != NULL) { 3214 KASSERT(bucket->ub_cnt != 0, 3215 ("uma_zalloc_arg: Returning an empty bucket.")); 3216 cache_bucket_load_alloc(cache, bucket); 3217 return (true); 3218 } 3219 /* We are no longer associated with this CPU. */ 3220 critical_exit(); 3221 3222 /* 3223 * We bump the uz count when the cache size is insufficient to 3224 * handle the working set. 3225 */ 3226 if (lockfail && zone->uz_bucket_size < zone->uz_bucket_size_max) 3227 zone->uz_bucket_size++; 3228 ZONE_UNLOCK(zone); 3229 3230 /* 3231 * Fill a bucket and attempt to use it as the alloc bucket. 3232 */ 3233 bucket = zone_alloc_bucket(zone, udata, domain, flags); 3234 CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p", 3235 zone->uz_name, zone, bucket); 3236 if (bucket == NULL) { 3237 critical_enter(); 3238 return (false); 3239 } 3240 3241 /* 3242 * See if we lost the race or were migrated. Cache the 3243 * initialized bucket to make this less likely or claim 3244 * the memory directly. 3245 */ 3246 ZONE_LOCK(zone); 3247 critical_enter(); 3248 cache = &zone->uz_cpu[curcpu]; 3249 if (cache->uc_allocbucket.ucb_bucket == NULL && 3250 ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) == 0 || 3251 domain == PCPU_GET(domain))) { 3252 cache_bucket_load_alloc(cache, bucket); 3253 zdom->uzd_imax += bucket->ub_cnt; 3254 } else if (zone->uz_bkt_count >= zone->uz_bkt_max) { 3255 critical_exit(); 3256 ZONE_UNLOCK(zone); 3257 bucket_drain(zone, bucket); 3258 bucket_free(zone, bucket, udata); 3259 critical_enter(); 3260 return (true); 3261 } else 3262 zone_put_bucket(zone, zdom, bucket, false); 3263 ZONE_UNLOCK(zone); 3264 return (true); 3265 } 3266 3267 void * 3268 uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags) 3269 { 3270 3271 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 3272 random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); 3273 3274 /* This is the fast path allocation */ 3275 CTR4(KTR_UMA, "uma_zalloc_domain zone %s(%p) domain %d flags %d", 3276 zone->uz_name, zone, domain, flags); 3277 3278 if (flags & M_WAITOK) { 3279 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 3280 "uma_zalloc_domain: zone \"%s\"", zone->uz_name); 3281 } 3282 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 3283 ("uma_zalloc_domain: called with spinlock or critical section held")); 3284 3285 return (zone_alloc_item(zone, udata, domain, flags)); 3286 } 3287 3288 /* 3289 * Find a slab with some space. Prefer slabs that are partially used over those 3290 * that are totally full. This helps to reduce fragmentation. 3291 * 3292 * If 'rr' is 1, search all domains starting from 'domain'. Otherwise check 3293 * only 'domain'. 3294 */ 3295 static uma_slab_t 3296 keg_first_slab(uma_keg_t keg, int domain, bool rr) 3297 { 3298 uma_domain_t dom; 3299 uma_slab_t slab; 3300 int start; 3301 3302 KASSERT(domain >= 0 && domain < vm_ndomains, 3303 ("keg_first_slab: domain %d out of range", domain)); 3304 KEG_LOCK_ASSERT(keg, domain); 3305 3306 slab = NULL; 3307 start = domain; 3308 do { 3309 dom = &keg->uk_domain[domain]; 3310 if (!LIST_EMPTY(&dom->ud_part_slab)) 3311 return (LIST_FIRST(&dom->ud_part_slab)); 3312 if (!LIST_EMPTY(&dom->ud_free_slab)) { 3313 slab = LIST_FIRST(&dom->ud_free_slab); 3314 LIST_REMOVE(slab, us_link); 3315 LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); 3316 return (slab); 3317 } 3318 if (rr) 3319 domain = (domain + 1) % vm_ndomains; 3320 } while (domain != start); 3321 3322 return (NULL); 3323 } 3324 3325 /* 3326 * Fetch an existing slab from a free or partial list. Returns with the 3327 * keg domain lock held if a slab was found or unlocked if not. 3328 */ 3329 static uma_slab_t 3330 keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags) 3331 { 3332 uma_slab_t slab; 3333 uint32_t reserve; 3334 3335 /* HASH has a single free list. */ 3336 if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0) 3337 domain = 0; 3338 3339 KEG_LOCK(keg, domain); 3340 reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve; 3341 if (keg->uk_domain[domain].ud_free <= reserve || 3342 (slab = keg_first_slab(keg, domain, rr)) == NULL) { 3343 KEG_UNLOCK(keg, domain); 3344 return (NULL); 3345 } 3346 return (slab); 3347 } 3348 3349 static uma_slab_t 3350 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags) 3351 { 3352 struct vm_domainset_iter di; 3353 uma_slab_t slab; 3354 int aflags, domain; 3355 bool rr; 3356 3357 restart: 3358 /* 3359 * Use the keg's policy if upper layers haven't already specified a 3360 * domain (as happens with first-touch zones). 3361 * 3362 * To avoid races we run the iterator with the keg lock held, but that 3363 * means that we cannot allow the vm_domainset layer to sleep. Thus, 3364 * clear M_WAITOK and handle low memory conditions locally. 3365 */ 3366 rr = rdomain == UMA_ANYDOMAIN; 3367 if (rr) { 3368 aflags = (flags & ~M_WAITOK) | M_NOWAIT; 3369 vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain, 3370 &aflags); 3371 } else { 3372 aflags = flags; 3373 domain = rdomain; 3374 } 3375 3376 for (;;) { 3377 slab = keg_fetch_free_slab(keg, domain, rr, flags); 3378 if (slab != NULL) 3379 return (slab); 3380 3381 /* 3382 * M_NOVM means don't ask at all! 3383 */ 3384 if (flags & M_NOVM) 3385 break; 3386 3387 slab = keg_alloc_slab(keg, zone, domain, flags, aflags); 3388 if (slab != NULL) 3389 return (slab); 3390 if (!rr && (flags & M_WAITOK) == 0) 3391 break; 3392 if (rr && vm_domainset_iter_policy(&di, &domain) != 0) { 3393 if ((flags & M_WAITOK) != 0) { 3394 vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask); 3395 goto restart; 3396 } 3397 break; 3398 } 3399 } 3400 3401 /* 3402 * We might not have been able to get a slab but another cpu 3403 * could have while we were unlocked. Check again before we 3404 * fail. 3405 */ 3406 if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL) 3407 return (slab); 3408 3409 return (NULL); 3410 } 3411 3412 static void * 3413 slab_alloc_item(uma_keg_t keg, uma_slab_t slab) 3414 { 3415 uma_domain_t dom; 3416 void *item; 3417 int freei; 3418 3419 KEG_LOCK_ASSERT(keg, slab->us_domain); 3420 3421 dom = &keg->uk_domain[slab->us_domain]; 3422 freei = BIT_FFS(keg->uk_ipers, &slab->us_free) - 1; 3423 BIT_CLR(keg->uk_ipers, freei, &slab->us_free); 3424 item = slab_item(slab, keg, freei); 3425 slab->us_freecount--; 3426 dom->ud_free--; 3427 3428 /* Move this slab to the full list */ 3429 if (slab->us_freecount == 0) { 3430 LIST_REMOVE(slab, us_link); 3431 LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link); 3432 } 3433 3434 return (item); 3435 } 3436 3437 static int 3438 zone_import(void *arg, void **bucket, int max, int domain, int flags) 3439 { 3440 uma_domain_t dom; 3441 uma_zone_t zone; 3442 uma_slab_t slab; 3443 uma_keg_t keg; 3444 #ifdef NUMA 3445 int stripe; 3446 #endif 3447 int i; 3448 3449 zone = arg; 3450 slab = NULL; 3451 keg = zone->uz_keg; 3452 /* Try to keep the buckets totally full */ 3453 for (i = 0; i < max; ) { 3454 if ((slab = keg_fetch_slab(keg, zone, domain, flags)) == NULL) 3455 break; 3456 #ifdef NUMA 3457 stripe = howmany(max, vm_ndomains); 3458 #endif 3459 dom = &keg->uk_domain[slab->us_domain]; 3460 while (slab->us_freecount && i < max) { 3461 bucket[i++] = slab_alloc_item(keg, slab); 3462 if (dom->ud_free <= keg->uk_reserve) 3463 break; 3464 #ifdef NUMA 3465 /* 3466 * If the zone is striped we pick a new slab for every 3467 * N allocations. Eliminating this conditional will 3468 * instead pick a new domain for each bucket rather 3469 * than stripe within each bucket. The current option 3470 * produces more fragmentation and requires more cpu 3471 * time but yields better distribution. 3472 */ 3473 if ((zone->uz_flags & UMA_ZONE_ROUNDROBIN) != 0 && 3474 vm_ndomains > 1 && --stripe == 0) 3475 break; 3476 #endif 3477 } 3478 KEG_UNLOCK(keg, slab->us_domain); 3479 /* Don't block if we allocated any successfully. */ 3480 flags &= ~M_WAITOK; 3481 flags |= M_NOWAIT; 3482 } 3483 3484 return i; 3485 } 3486 3487 static int 3488 zone_alloc_limit_hard(uma_zone_t zone, int count, int flags) 3489 { 3490 uint64_t old, new, total, max; 3491 3492 /* 3493 * The hard case. We're going to sleep because there were existing 3494 * sleepers or because we ran out of items. This routine enforces 3495 * fairness by keeping fifo order. 3496 * 3497 * First release our ill gotten gains and make some noise. 3498 */ 3499 for (;;) { 3500 zone_free_limit(zone, count); 3501 zone_log_warning(zone); 3502 zone_maxaction(zone); 3503 if (flags & M_NOWAIT) 3504 return (0); 3505 3506 /* 3507 * We need to allocate an item or set ourself as a sleeper 3508 * while the sleepq lock is held to avoid wakeup races. This 3509 * is essentially a home rolled semaphore. 3510 */ 3511 sleepq_lock(&zone->uz_max_items); 3512 old = zone->uz_items; 3513 do { 3514 MPASS(UZ_ITEMS_SLEEPERS(old) < UZ_ITEMS_SLEEPERS_MAX); 3515 /* Cache the max since we will evaluate twice. */ 3516 max = zone->uz_max_items; 3517 if (UZ_ITEMS_SLEEPERS(old) != 0 || 3518 UZ_ITEMS_COUNT(old) >= max) 3519 new = old + UZ_ITEMS_SLEEPER; 3520 else 3521 new = old + MIN(count, max - old); 3522 } while (atomic_fcmpset_64(&zone->uz_items, &old, new) == 0); 3523 3524 /* We may have successfully allocated under the sleepq lock. */ 3525 if (UZ_ITEMS_SLEEPERS(new) == 0) { 3526 sleepq_release(&zone->uz_max_items); 3527 return (new - old); 3528 } 3529 3530 /* 3531 * This is in a different cacheline from uz_items so that we 3532 * don't constantly invalidate the fastpath cacheline when we 3533 * adjust item counts. This could be limited to toggling on 3534 * transitions. 3535 */ 3536 atomic_add_32(&zone->uz_sleepers, 1); 3537 atomic_add_64(&zone->uz_sleeps, 1); 3538 3539 /* 3540 * We have added ourselves as a sleeper. The sleepq lock 3541 * protects us from wakeup races. Sleep now and then retry. 3542 */ 3543 sleepq_add(&zone->uz_max_items, NULL, "zonelimit", 0, 0); 3544 sleepq_wait(&zone->uz_max_items, PVM); 3545 3546 /* 3547 * After wakeup, remove ourselves as a sleeper and try 3548 * again. We no longer have the sleepq lock for protection. 3549 * 3550 * Subract ourselves as a sleeper while attempting to add 3551 * our count. 3552 */ 3553 atomic_subtract_32(&zone->uz_sleepers, 1); 3554 old = atomic_fetchadd_64(&zone->uz_items, 3555 -(UZ_ITEMS_SLEEPER - count)); 3556 /* We're no longer a sleeper. */ 3557 old -= UZ_ITEMS_SLEEPER; 3558 3559 /* 3560 * If we're still at the limit, restart. Notably do not 3561 * block on other sleepers. Cache the max value to protect 3562 * against changes via sysctl. 3563 */ 3564 total = UZ_ITEMS_COUNT(old); 3565 max = zone->uz_max_items; 3566 if (total >= max) 3567 continue; 3568 /* Truncate if necessary, otherwise wake other sleepers. */ 3569 if (total + count > max) { 3570 zone_free_limit(zone, total + count - max); 3571 count = max - total; 3572 } else if (total + count < max && UZ_ITEMS_SLEEPERS(old) != 0) 3573 wakeup_one(&zone->uz_max_items); 3574 3575 return (count); 3576 } 3577 } 3578 3579 /* 3580 * Allocate 'count' items from our max_items limit. Returns the number 3581 * available. If M_NOWAIT is not specified it will sleep until at least 3582 * one item can be allocated. 3583 */ 3584 static int 3585 zone_alloc_limit(uma_zone_t zone, int count, int flags) 3586 { 3587 uint64_t old; 3588 uint64_t max; 3589 3590 max = zone->uz_max_items; 3591 MPASS(max > 0); 3592 3593 /* 3594 * We expect normal allocations to succeed with a simple 3595 * fetchadd. 3596 */ 3597 old = atomic_fetchadd_64(&zone->uz_items, count); 3598 if (__predict_true(old + count <= max)) 3599 return (count); 3600 3601 /* 3602 * If we had some items and no sleepers just return the 3603 * truncated value. We have to release the excess space 3604 * though because that may wake sleepers who weren't woken 3605 * because we were temporarily over the limit. 3606 */ 3607 if (old < max) { 3608 zone_free_limit(zone, (old + count) - max); 3609 return (max - old); 3610 } 3611 return (zone_alloc_limit_hard(zone, count, flags)); 3612 } 3613 3614 /* 3615 * Free a number of items back to the limit. 3616 */ 3617 static void 3618 zone_free_limit(uma_zone_t zone, int count) 3619 { 3620 uint64_t old; 3621 3622 MPASS(count > 0); 3623 3624 /* 3625 * In the common case we either have no sleepers or 3626 * are still over the limit and can just return. 3627 */ 3628 old = atomic_fetchadd_64(&zone->uz_items, -count); 3629 if (__predict_true(UZ_ITEMS_SLEEPERS(old) == 0 || 3630 UZ_ITEMS_COUNT(old) - count >= zone->uz_max_items)) 3631 return; 3632 3633 /* 3634 * Moderate the rate of wakeups. Sleepers will continue 3635 * to generate wakeups if necessary. 3636 */ 3637 wakeup_one(&zone->uz_max_items); 3638 } 3639 3640 static uma_bucket_t 3641 zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags) 3642 { 3643 uma_bucket_t bucket; 3644 int maxbucket, cnt; 3645 3646 CTR3(KTR_UMA, "zone_alloc_bucket zone %s(%p) domain %d", zone->uz_name, 3647 zone, domain); 3648 3649 /* Avoid allocs targeting empty domains. */ 3650 if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain)) 3651 domain = UMA_ANYDOMAIN; 3652 3653 if (zone->uz_max_items > 0) 3654 maxbucket = zone_alloc_limit(zone, zone->uz_bucket_size, 3655 M_NOWAIT); 3656 else 3657 maxbucket = zone->uz_bucket_size; 3658 if (maxbucket == 0) 3659 return (false); 3660 3661 /* Don't wait for buckets, preserve caller's NOVM setting. */ 3662 bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM)); 3663 if (bucket == NULL) { 3664 cnt = 0; 3665 goto out; 3666 } 3667 3668 bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket, 3669 MIN(maxbucket, bucket->ub_entries), domain, flags); 3670 3671 /* 3672 * Initialize the memory if necessary. 3673 */ 3674 if (bucket->ub_cnt != 0 && zone->uz_init != NULL) { 3675 int i; 3676 3677 for (i = 0; i < bucket->ub_cnt; i++) 3678 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size, 3679 flags) != 0) 3680 break; 3681 /* 3682 * If we couldn't initialize the whole bucket, put the 3683 * rest back onto the freelist. 3684 */ 3685 if (i != bucket->ub_cnt) { 3686 zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i], 3687 bucket->ub_cnt - i); 3688 #ifdef INVARIANTS 3689 bzero(&bucket->ub_bucket[i], 3690 sizeof(void *) * (bucket->ub_cnt - i)); 3691 #endif 3692 bucket->ub_cnt = i; 3693 } 3694 } 3695 3696 cnt = bucket->ub_cnt; 3697 if (bucket->ub_cnt == 0) { 3698 bucket_free(zone, bucket, udata); 3699 counter_u64_add(zone->uz_fails, 1); 3700 bucket = NULL; 3701 } 3702 out: 3703 if (zone->uz_max_items > 0 && cnt < maxbucket) 3704 zone_free_limit(zone, maxbucket - cnt); 3705 3706 return (bucket); 3707 } 3708 3709 /* 3710 * Allocates a single item from a zone. 3711 * 3712 * Arguments 3713 * zone The zone to alloc for. 3714 * udata The data to be passed to the constructor. 3715 * domain The domain to allocate from or UMA_ANYDOMAIN. 3716 * flags M_WAITOK, M_NOWAIT, M_ZERO. 3717 * 3718 * Returns 3719 * NULL if there is no memory and M_NOWAIT is set 3720 * An item if successful 3721 */ 3722 3723 static void * 3724 zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags) 3725 { 3726 void *item; 3727 3728 if (zone->uz_max_items > 0 && zone_alloc_limit(zone, 1, flags) == 0) 3729 return (NULL); 3730 3731 /* Avoid allocs targeting empty domains. */ 3732 if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain)) 3733 domain = UMA_ANYDOMAIN; 3734 3735 if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1) 3736 goto fail_cnt; 3737 3738 /* 3739 * We have to call both the zone's init (not the keg's init) 3740 * and the zone's ctor. This is because the item is going from 3741 * a keg slab directly to the user, and the user is expecting it 3742 * to be both zone-init'd as well as zone-ctor'd. 3743 */ 3744 if (zone->uz_init != NULL) { 3745 if (zone->uz_init(item, zone->uz_size, flags) != 0) { 3746 zone_free_item(zone, item, udata, SKIP_FINI | SKIP_CNT); 3747 goto fail_cnt; 3748 } 3749 } 3750 item = item_ctor(zone, zone->uz_flags, zone->uz_size, udata, flags, 3751 item); 3752 if (item == NULL) 3753 goto fail; 3754 3755 counter_u64_add(zone->uz_allocs, 1); 3756 CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item, 3757 zone->uz_name, zone); 3758 3759 return (item); 3760 3761 fail_cnt: 3762 counter_u64_add(zone->uz_fails, 1); 3763 fail: 3764 if (zone->uz_max_items > 0) 3765 zone_free_limit(zone, 1); 3766 CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)", 3767 zone->uz_name, zone); 3768 3769 return (NULL); 3770 } 3771 3772 /* See uma.h */ 3773 void 3774 uma_zfree_smr(uma_zone_t zone, void *item) 3775 { 3776 uma_cache_t cache; 3777 uma_cache_bucket_t bucket; 3778 int domain, itemdomain, uz_flags; 3779 3780 #ifdef UMA_ZALLOC_DEBUG 3781 KASSERT((zone->uz_flags & UMA_ZONE_SMR) != 0, 3782 ("uma_zfree_smr: called with non-SMR zone.\n")); 3783 KASSERT(item != NULL, ("uma_zfree_smr: Called with NULL pointer.")); 3784 if (uma_zfree_debug(zone, item, NULL) == EJUSTRETURN) 3785 return; 3786 #endif 3787 cache = &zone->uz_cpu[curcpu]; 3788 uz_flags = cache_uz_flags(cache); 3789 domain = itemdomain = 0; 3790 #ifdef NUMA 3791 if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) 3792 itemdomain = _vm_phys_domain(pmap_kextract((vm_offset_t)item)); 3793 #endif 3794 critical_enter(); 3795 do { 3796 cache = &zone->uz_cpu[curcpu]; 3797 /* SMR Zones must free to the free bucket. */ 3798 bucket = &cache->uc_freebucket; 3799 #ifdef NUMA 3800 domain = PCPU_GET(domain); 3801 if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 && 3802 domain != itemdomain) { 3803 bucket = &cache->uc_crossbucket; 3804 } 3805 #endif 3806 if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) { 3807 cache_bucket_push(cache, bucket, item); 3808 critical_exit(); 3809 return; 3810 } 3811 } while (cache_free(zone, cache, NULL, item, itemdomain)); 3812 critical_exit(); 3813 3814 /* 3815 * If nothing else caught this, we'll just do an internal free. 3816 */ 3817 zone_free_item(zone, item, NULL, SKIP_NONE); 3818 } 3819 3820 /* See uma.h */ 3821 void 3822 uma_zfree_arg(uma_zone_t zone, void *item, void *udata) 3823 { 3824 uma_cache_t cache; 3825 uma_cache_bucket_t bucket; 3826 int domain, itemdomain, uz_flags; 3827 3828 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 3829 random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); 3830 3831 CTR2(KTR_UMA, "uma_zfree_arg zone %s(%p)", zone->uz_name, zone); 3832 3833 #ifdef UMA_ZALLOC_DEBUG 3834 KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0, 3835 ("uma_zfree_arg: called with SMR zone.\n")); 3836 if (uma_zfree_debug(zone, item, udata) == EJUSTRETURN) 3837 return; 3838 #endif 3839 /* uma_zfree(..., NULL) does nothing, to match free(9). */ 3840 if (item == NULL) 3841 return; 3842 3843 /* 3844 * We are accessing the per-cpu cache without a critical section to 3845 * fetch size and flags. This is acceptable, if we are preempted we 3846 * will simply read another cpu's line. 3847 */ 3848 cache = &zone->uz_cpu[curcpu]; 3849 uz_flags = cache_uz_flags(cache); 3850 if (UMA_ALWAYS_CTORDTOR || 3851 __predict_false((uz_flags & UMA_ZFLAG_CTORDTOR) != 0)) 3852 item_dtor(zone, item, cache_uz_size(cache), udata, SKIP_NONE); 3853 3854 /* 3855 * The race here is acceptable. If we miss it we'll just have to wait 3856 * a little longer for the limits to be reset. 3857 */ 3858 if (__predict_false(uz_flags & UMA_ZFLAG_LIMIT)) { 3859 if (zone->uz_sleepers > 0) 3860 goto zfree_item; 3861 } 3862 3863 /* 3864 * If possible, free to the per-CPU cache. There are two 3865 * requirements for safe access to the per-CPU cache: (1) the thread 3866 * accessing the cache must not be preempted or yield during access, 3867 * and (2) the thread must not migrate CPUs without switching which 3868 * cache it accesses. We rely on a critical section to prevent 3869 * preemption and migration. We release the critical section in 3870 * order to acquire the zone mutex if we are unable to free to the 3871 * current cache; when we re-acquire the critical section, we must 3872 * detect and handle migration if it has occurred. 3873 */ 3874 domain = itemdomain = 0; 3875 #ifdef NUMA 3876 if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) 3877 itemdomain = _vm_phys_domain(pmap_kextract((vm_offset_t)item)); 3878 #endif 3879 critical_enter(); 3880 do { 3881 cache = &zone->uz_cpu[curcpu]; 3882 /* 3883 * Try to free into the allocbucket first to give LIFO 3884 * ordering for cache-hot datastructures. Spill over 3885 * into the freebucket if necessary. Alloc will swap 3886 * them if one runs dry. 3887 */ 3888 bucket = &cache->uc_allocbucket; 3889 #ifdef NUMA 3890 domain = PCPU_GET(domain); 3891 if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 && 3892 domain != itemdomain) { 3893 bucket = &cache->uc_crossbucket; 3894 } else 3895 #endif 3896 if (bucket->ucb_cnt >= bucket->ucb_entries) 3897 bucket = &cache->uc_freebucket; 3898 if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) { 3899 cache_bucket_push(cache, bucket, item); 3900 critical_exit(); 3901 return; 3902 } 3903 } while (cache_free(zone, cache, udata, item, itemdomain)); 3904 critical_exit(); 3905 3906 /* 3907 * If nothing else caught this, we'll just do an internal free. 3908 */ 3909 zfree_item: 3910 zone_free_item(zone, item, udata, SKIP_DTOR); 3911 } 3912 3913 #ifdef NUMA 3914 /* 3915 * sort crossdomain free buckets to domain correct buckets and cache 3916 * them. 3917 */ 3918 static void 3919 zone_free_cross(uma_zone_t zone, uma_bucket_t bucket, void *udata) 3920 { 3921 struct uma_bucketlist fullbuckets; 3922 uma_zone_domain_t zdom; 3923 uma_bucket_t b; 3924 void *item; 3925 int domain; 3926 3927 CTR3(KTR_UMA, 3928 "uma_zfree: zone %s(%p) draining cross bucket %p", 3929 zone->uz_name, zone, bucket); 3930 3931 TAILQ_INIT(&fullbuckets); 3932 3933 /* 3934 * To avoid having ndomain * ndomain buckets for sorting we have a 3935 * lock on the current crossfree bucket. A full matrix with 3936 * per-domain locking could be used if necessary. 3937 */ 3938 ZONE_CROSS_LOCK(zone); 3939 while (bucket->ub_cnt > 0) { 3940 item = bucket->ub_bucket[bucket->ub_cnt - 1]; 3941 domain = _vm_phys_domain(pmap_kextract((vm_offset_t)item)); 3942 zdom = &zone->uz_domain[domain]; 3943 if (zdom->uzd_cross == NULL) { 3944 zdom->uzd_cross = bucket_alloc(zone, udata, M_NOWAIT); 3945 if (zdom->uzd_cross == NULL) 3946 break; 3947 } 3948 zdom->uzd_cross->ub_bucket[zdom->uzd_cross->ub_cnt++] = item; 3949 if (zdom->uzd_cross->ub_cnt == zdom->uzd_cross->ub_entries) { 3950 TAILQ_INSERT_HEAD(&fullbuckets, zdom->uzd_cross, 3951 ub_link); 3952 zdom->uzd_cross = NULL; 3953 } 3954 bucket->ub_cnt--; 3955 } 3956 ZONE_CROSS_UNLOCK(zone); 3957 if (!TAILQ_EMPTY(&fullbuckets)) { 3958 ZONE_LOCK(zone); 3959 while ((b = TAILQ_FIRST(&fullbuckets)) != NULL) { 3960 if ((zone->uz_flags & UMA_ZONE_SMR) != 0) 3961 bucket->ub_seq = smr_current(zone->uz_smr); 3962 TAILQ_REMOVE(&fullbuckets, b, ub_link); 3963 if (zone->uz_bkt_count >= zone->uz_bkt_max) { 3964 ZONE_UNLOCK(zone); 3965 bucket_drain(zone, b); 3966 bucket_free(zone, b, udata); 3967 ZONE_LOCK(zone); 3968 } else { 3969 domain = _vm_phys_domain( 3970 pmap_kextract( 3971 (vm_offset_t)b->ub_bucket[0])); 3972 zdom = &zone->uz_domain[domain]; 3973 zone_put_bucket(zone, zdom, b, true); 3974 } 3975 } 3976 ZONE_UNLOCK(zone); 3977 } 3978 if (bucket->ub_cnt != 0) 3979 bucket_drain(zone, bucket); 3980 bucket->ub_seq = SMR_SEQ_INVALID; 3981 bucket_free(zone, bucket, udata); 3982 } 3983 #endif 3984 3985 static void 3986 zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata, 3987 int domain, int itemdomain) 3988 { 3989 uma_zone_domain_t zdom; 3990 3991 #ifdef NUMA 3992 /* 3993 * Buckets coming from the wrong domain will be entirely for the 3994 * only other domain on two domain systems. In this case we can 3995 * simply cache them. Otherwise we need to sort them back to 3996 * correct domains. 3997 */ 3998 if (domain != itemdomain && vm_ndomains > 2) { 3999 zone_free_cross(zone, bucket, udata); 4000 return; 4001 } 4002 #endif 4003 4004 /* 4005 * Attempt to save the bucket in the zone's domain bucket cache. 4006 * 4007 * We bump the uz count when the cache size is insufficient to 4008 * handle the working set. 4009 */ 4010 if (ZONE_TRYLOCK(zone) == 0) { 4011 /* Record contention to size the buckets. */ 4012 ZONE_LOCK(zone); 4013 if (zone->uz_bucket_size < zone->uz_bucket_size_max) 4014 zone->uz_bucket_size++; 4015 } 4016 4017 CTR3(KTR_UMA, 4018 "uma_zfree: zone %s(%p) putting bucket %p on free list", 4019 zone->uz_name, zone, bucket); 4020 /* ub_cnt is pointing to the last free item */ 4021 KASSERT(bucket->ub_cnt == bucket->ub_entries, 4022 ("uma_zfree: Attempting to insert partial bucket onto the full list.\n")); 4023 if (zone->uz_bkt_count >= zone->uz_bkt_max) { 4024 ZONE_UNLOCK(zone); 4025 bucket_drain(zone, bucket); 4026 bucket_free(zone, bucket, udata); 4027 } else { 4028 zdom = &zone->uz_domain[itemdomain]; 4029 zone_put_bucket(zone, zdom, bucket, true); 4030 ZONE_UNLOCK(zone); 4031 } 4032 } 4033 4034 /* 4035 * Populate a free or cross bucket for the current cpu cache. Free any 4036 * existing full bucket either to the zone cache or back to the slab layer. 4037 * 4038 * Enters and returns in a critical section. false return indicates that 4039 * we can not satisfy this free in the cache layer. true indicates that 4040 * the caller should retry. 4041 */ 4042 static __noinline bool 4043 cache_free(uma_zone_t zone, uma_cache_t cache, void *udata, void *item, 4044 int itemdomain) 4045 { 4046 uma_cache_bucket_t cbucket; 4047 uma_bucket_t newbucket, bucket; 4048 int domain; 4049 4050 CRITICAL_ASSERT(curthread); 4051 4052 if (zone->uz_bucket_size == 0) 4053 return false; 4054 4055 cache = &zone->uz_cpu[curcpu]; 4056 newbucket = NULL; 4057 4058 /* 4059 * FIRSTTOUCH domains need to free to the correct zdom. When 4060 * enabled this is the zdom of the item. The bucket is the 4061 * cross bucket if the current domain and itemdomain do not match. 4062 */ 4063 cbucket = &cache->uc_freebucket; 4064 #ifdef NUMA 4065 if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) { 4066 domain = PCPU_GET(domain); 4067 if (domain != itemdomain) { 4068 cbucket = &cache->uc_crossbucket; 4069 if (cbucket->ucb_cnt != 0) 4070 atomic_add_64(&zone->uz_xdomain, 4071 cbucket->ucb_cnt); 4072 } 4073 } else 4074 #endif 4075 itemdomain = domain = 0; 4076 bucket = cache_bucket_unload(cbucket); 4077 4078 /* We are no longer associated with this CPU. */ 4079 critical_exit(); 4080 4081 /* 4082 * Don't let SMR zones operate without a free bucket. Force 4083 * a synchronize and re-use this one. We will only degrade 4084 * to a synchronize every bucket_size items rather than every 4085 * item if we fail to allocate a bucket. 4086 */ 4087 if ((zone->uz_flags & UMA_ZONE_SMR) != 0) { 4088 if (bucket != NULL) 4089 bucket->ub_seq = smr_advance(zone->uz_smr); 4090 newbucket = bucket_alloc(zone, udata, M_NOWAIT); 4091 if (newbucket == NULL && bucket != NULL) { 4092 bucket_drain(zone, bucket); 4093 newbucket = bucket; 4094 bucket = NULL; 4095 } 4096 } else if (!bucketdisable) 4097 newbucket = bucket_alloc(zone, udata, M_NOWAIT); 4098 4099 if (bucket != NULL) 4100 zone_free_bucket(zone, bucket, udata, domain, itemdomain); 4101 4102 critical_enter(); 4103 if ((bucket = newbucket) == NULL) 4104 return (false); 4105 cache = &zone->uz_cpu[curcpu]; 4106 #ifdef NUMA 4107 /* 4108 * Check to see if we should be populating the cross bucket. If it 4109 * is already populated we will fall through and attempt to populate 4110 * the free bucket. 4111 */ 4112 if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) { 4113 domain = PCPU_GET(domain); 4114 if (domain != itemdomain && 4115 cache->uc_crossbucket.ucb_bucket == NULL) { 4116 cache_bucket_load_cross(cache, bucket); 4117 return (true); 4118 } 4119 } 4120 #endif 4121 /* 4122 * We may have lost the race to fill the bucket or switched CPUs. 4123 */ 4124 if (cache->uc_freebucket.ucb_bucket != NULL) { 4125 critical_exit(); 4126 bucket_free(zone, bucket, udata); 4127 critical_enter(); 4128 } else 4129 cache_bucket_load_free(cache, bucket); 4130 4131 return (true); 4132 } 4133 4134 void 4135 uma_zfree_domain(uma_zone_t zone, void *item, void *udata) 4136 { 4137 4138 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 4139 random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); 4140 4141 CTR2(KTR_UMA, "uma_zfree_domain zone %s(%p)", zone->uz_name, zone); 4142 4143 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 4144 ("uma_zfree_domain: called with spinlock or critical section held")); 4145 4146 /* uma_zfree(..., NULL) does nothing, to match free(9). */ 4147 if (item == NULL) 4148 return; 4149 zone_free_item(zone, item, udata, SKIP_NONE); 4150 } 4151 4152 static void 4153 slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item) 4154 { 4155 uma_keg_t keg; 4156 uma_domain_t dom; 4157 int freei; 4158 4159 keg = zone->uz_keg; 4160 KEG_LOCK_ASSERT(keg, slab->us_domain); 4161 4162 /* Do we need to remove from any lists? */ 4163 dom = &keg->uk_domain[slab->us_domain]; 4164 if (slab->us_freecount+1 == keg->uk_ipers) { 4165 LIST_REMOVE(slab, us_link); 4166 LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link); 4167 } else if (slab->us_freecount == 0) { 4168 LIST_REMOVE(slab, us_link); 4169 LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); 4170 } 4171 4172 /* Slab management. */ 4173 freei = slab_item_index(slab, keg, item); 4174 BIT_SET(keg->uk_ipers, freei, &slab->us_free); 4175 slab->us_freecount++; 4176 4177 /* Keg statistics. */ 4178 dom->ud_free++; 4179 } 4180 4181 static void 4182 zone_release(void *arg, void **bucket, int cnt) 4183 { 4184 struct mtx *lock; 4185 uma_zone_t zone; 4186 uma_slab_t slab; 4187 uma_keg_t keg; 4188 uint8_t *mem; 4189 void *item; 4190 int i; 4191 4192 zone = arg; 4193 keg = zone->uz_keg; 4194 lock = NULL; 4195 if (__predict_false((zone->uz_flags & UMA_ZFLAG_HASH) != 0)) 4196 lock = KEG_LOCK(keg, 0); 4197 for (i = 0; i < cnt; i++) { 4198 item = bucket[i]; 4199 if (__predict_true((zone->uz_flags & UMA_ZFLAG_VTOSLAB) != 0)) { 4200 slab = vtoslab((vm_offset_t)item); 4201 } else { 4202 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); 4203 if ((zone->uz_flags & UMA_ZFLAG_HASH) != 0) 4204 slab = hash_sfind(&keg->uk_hash, mem); 4205 else 4206 slab = (uma_slab_t)(mem + keg->uk_pgoff); 4207 } 4208 if (lock != KEG_LOCKPTR(keg, slab->us_domain)) { 4209 if (lock != NULL) 4210 mtx_unlock(lock); 4211 lock = KEG_LOCK(keg, slab->us_domain); 4212 } 4213 slab_free_item(zone, slab, item); 4214 } 4215 if (lock != NULL) 4216 mtx_unlock(lock); 4217 } 4218 4219 /* 4220 * Frees a single item to any zone. 4221 * 4222 * Arguments: 4223 * zone The zone to free to 4224 * item The item we're freeing 4225 * udata User supplied data for the dtor 4226 * skip Skip dtors and finis 4227 */ 4228 static void 4229 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip) 4230 { 4231 4232 /* 4233 * If a free is sent directly to an SMR zone we have to 4234 * synchronize immediately because the item can instantly 4235 * be reallocated. This should only happen in degenerate 4236 * cases when no memory is available for per-cpu caches. 4237 */ 4238 if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && skip == SKIP_NONE) 4239 smr_synchronize(zone->uz_smr); 4240 4241 item_dtor(zone, item, zone->uz_size, udata, skip); 4242 4243 if (skip < SKIP_FINI && zone->uz_fini) 4244 zone->uz_fini(item, zone->uz_size); 4245 4246 zone->uz_release(zone->uz_arg, &item, 1); 4247 4248 if (skip & SKIP_CNT) 4249 return; 4250 4251 counter_u64_add(zone->uz_frees, 1); 4252 4253 if (zone->uz_max_items > 0) 4254 zone_free_limit(zone, 1); 4255 } 4256 4257 /* See uma.h */ 4258 int 4259 uma_zone_set_max(uma_zone_t zone, int nitems) 4260 { 4261 struct uma_bucket_zone *ubz; 4262 int count; 4263 4264 /* 4265 * XXX This can misbehave if the zone has any allocations with 4266 * no limit and a limit is imposed. There is currently no 4267 * way to clear a limit. 4268 */ 4269 ZONE_LOCK(zone); 4270 ubz = bucket_zone_max(zone, nitems); 4271 count = ubz != NULL ? ubz->ubz_entries : 0; 4272 zone->uz_bucket_size_max = zone->uz_bucket_size = count; 4273 if (zone->uz_bucket_size_min > zone->uz_bucket_size_max) 4274 zone->uz_bucket_size_min = zone->uz_bucket_size_max; 4275 zone->uz_max_items = nitems; 4276 zone->uz_flags |= UMA_ZFLAG_LIMIT; 4277 zone_update_caches(zone); 4278 /* We may need to wake waiters. */ 4279 wakeup(&zone->uz_max_items); 4280 ZONE_UNLOCK(zone); 4281 4282 return (nitems); 4283 } 4284 4285 /* See uma.h */ 4286 void 4287 uma_zone_set_maxcache(uma_zone_t zone, int nitems) 4288 { 4289 struct uma_bucket_zone *ubz; 4290 int bpcpu; 4291 4292 ZONE_LOCK(zone); 4293 ubz = bucket_zone_max(zone, nitems); 4294 if (ubz != NULL) { 4295 bpcpu = 2; 4296 if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) 4297 /* Count the cross-domain bucket. */ 4298 bpcpu++; 4299 nitems -= ubz->ubz_entries * bpcpu * mp_ncpus; 4300 zone->uz_bucket_size_max = ubz->ubz_entries; 4301 } else { 4302 zone->uz_bucket_size_max = zone->uz_bucket_size = 0; 4303 } 4304 if (zone->uz_bucket_size_min > zone->uz_bucket_size_max) 4305 zone->uz_bucket_size_min = zone->uz_bucket_size_max; 4306 zone->uz_bkt_max = nitems; 4307 ZONE_UNLOCK(zone); 4308 } 4309 4310 /* See uma.h */ 4311 int 4312 uma_zone_get_max(uma_zone_t zone) 4313 { 4314 int nitems; 4315 4316 nitems = atomic_load_64(&zone->uz_max_items); 4317 4318 return (nitems); 4319 } 4320 4321 /* See uma.h */ 4322 void 4323 uma_zone_set_warning(uma_zone_t zone, const char *warning) 4324 { 4325 4326 ZONE_ASSERT_COLD(zone); 4327 zone->uz_warning = warning; 4328 } 4329 4330 /* See uma.h */ 4331 void 4332 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction) 4333 { 4334 4335 ZONE_ASSERT_COLD(zone); 4336 TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone); 4337 } 4338 4339 /* See uma.h */ 4340 int 4341 uma_zone_get_cur(uma_zone_t zone) 4342 { 4343 int64_t nitems; 4344 u_int i; 4345 4346 nitems = 0; 4347 if (zone->uz_allocs != EARLY_COUNTER && zone->uz_frees != EARLY_COUNTER) 4348 nitems = counter_u64_fetch(zone->uz_allocs) - 4349 counter_u64_fetch(zone->uz_frees); 4350 CPU_FOREACH(i) 4351 nitems += atomic_load_64(&zone->uz_cpu[i].uc_allocs) - 4352 atomic_load_64(&zone->uz_cpu[i].uc_frees); 4353 4354 return (nitems < 0 ? 0 : nitems); 4355 } 4356 4357 static uint64_t 4358 uma_zone_get_allocs(uma_zone_t zone) 4359 { 4360 uint64_t nitems; 4361 u_int i; 4362 4363 nitems = 0; 4364 if (zone->uz_allocs != EARLY_COUNTER) 4365 nitems = counter_u64_fetch(zone->uz_allocs); 4366 CPU_FOREACH(i) 4367 nitems += atomic_load_64(&zone->uz_cpu[i].uc_allocs); 4368 4369 return (nitems); 4370 } 4371 4372 static uint64_t 4373 uma_zone_get_frees(uma_zone_t zone) 4374 { 4375 uint64_t nitems; 4376 u_int i; 4377 4378 nitems = 0; 4379 if (zone->uz_frees != EARLY_COUNTER) 4380 nitems = counter_u64_fetch(zone->uz_frees); 4381 CPU_FOREACH(i) 4382 nitems += atomic_load_64(&zone->uz_cpu[i].uc_frees); 4383 4384 return (nitems); 4385 } 4386 4387 #ifdef INVARIANTS 4388 /* Used only for KEG_ASSERT_COLD(). */ 4389 static uint64_t 4390 uma_keg_get_allocs(uma_keg_t keg) 4391 { 4392 uma_zone_t z; 4393 uint64_t nitems; 4394 4395 nitems = 0; 4396 LIST_FOREACH(z, &keg->uk_zones, uz_link) 4397 nitems += uma_zone_get_allocs(z); 4398 4399 return (nitems); 4400 } 4401 #endif 4402 4403 /* See uma.h */ 4404 void 4405 uma_zone_set_init(uma_zone_t zone, uma_init uminit) 4406 { 4407 uma_keg_t keg; 4408 4409 KEG_GET(zone, keg); 4410 KEG_ASSERT_COLD(keg); 4411 keg->uk_init = uminit; 4412 } 4413 4414 /* See uma.h */ 4415 void 4416 uma_zone_set_fini(uma_zone_t zone, uma_fini fini) 4417 { 4418 uma_keg_t keg; 4419 4420 KEG_GET(zone, keg); 4421 KEG_ASSERT_COLD(keg); 4422 keg->uk_fini = fini; 4423 } 4424 4425 /* See uma.h */ 4426 void 4427 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) 4428 { 4429 4430 ZONE_ASSERT_COLD(zone); 4431 zone->uz_init = zinit; 4432 } 4433 4434 /* See uma.h */ 4435 void 4436 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) 4437 { 4438 4439 ZONE_ASSERT_COLD(zone); 4440 zone->uz_fini = zfini; 4441 } 4442 4443 /* See uma.h */ 4444 void 4445 uma_zone_set_freef(uma_zone_t zone, uma_free freef) 4446 { 4447 uma_keg_t keg; 4448 4449 KEG_GET(zone, keg); 4450 KEG_ASSERT_COLD(keg); 4451 keg->uk_freef = freef; 4452 } 4453 4454 /* See uma.h */ 4455 void 4456 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) 4457 { 4458 uma_keg_t keg; 4459 4460 KEG_GET(zone, keg); 4461 KEG_ASSERT_COLD(keg); 4462 keg->uk_allocf = allocf; 4463 } 4464 4465 /* See uma.h */ 4466 void 4467 uma_zone_set_smr(uma_zone_t zone, smr_t smr) 4468 { 4469 4470 ZONE_ASSERT_COLD(zone); 4471 4472 zone->uz_flags |= UMA_ZONE_SMR; 4473 zone->uz_smr = smr; 4474 zone_update_caches(zone); 4475 } 4476 4477 smr_t 4478 uma_zone_get_smr(uma_zone_t zone) 4479 { 4480 4481 return (zone->uz_smr); 4482 } 4483 4484 /* See uma.h */ 4485 void 4486 uma_zone_reserve(uma_zone_t zone, int items) 4487 { 4488 uma_keg_t keg; 4489 4490 KEG_GET(zone, keg); 4491 KEG_ASSERT_COLD(keg); 4492 keg->uk_reserve = items; 4493 } 4494 4495 /* See uma.h */ 4496 int 4497 uma_zone_reserve_kva(uma_zone_t zone, int count) 4498 { 4499 uma_keg_t keg; 4500 vm_offset_t kva; 4501 u_int pages; 4502 4503 KEG_GET(zone, keg); 4504 KEG_ASSERT_COLD(keg); 4505 ZONE_ASSERT_COLD(zone); 4506 4507 pages = howmany(count, keg->uk_ipers) * keg->uk_ppera; 4508 4509 #ifdef UMA_MD_SMALL_ALLOC 4510 if (keg->uk_ppera > 1) { 4511 #else 4512 if (1) { 4513 #endif 4514 kva = kva_alloc((vm_size_t)pages * PAGE_SIZE); 4515 if (kva == 0) 4516 return (0); 4517 } else 4518 kva = 0; 4519 4520 ZONE_LOCK(zone); 4521 MPASS(keg->uk_kva == 0); 4522 keg->uk_kva = kva; 4523 keg->uk_offset = 0; 4524 zone->uz_max_items = pages * keg->uk_ipers; 4525 #ifdef UMA_MD_SMALL_ALLOC 4526 keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; 4527 #else 4528 keg->uk_allocf = noobj_alloc; 4529 #endif 4530 keg->uk_flags |= UMA_ZFLAG_LIMIT | UMA_ZONE_NOFREE; 4531 zone->uz_flags |= UMA_ZFLAG_LIMIT | UMA_ZONE_NOFREE; 4532 zone_update_caches(zone); 4533 ZONE_UNLOCK(zone); 4534 4535 return (1); 4536 } 4537 4538 /* See uma.h */ 4539 void 4540 uma_prealloc(uma_zone_t zone, int items) 4541 { 4542 struct vm_domainset_iter di; 4543 uma_domain_t dom; 4544 uma_slab_t slab; 4545 uma_keg_t keg; 4546 int aflags, domain, slabs; 4547 4548 KEG_GET(zone, keg); 4549 slabs = howmany(items, keg->uk_ipers); 4550 while (slabs-- > 0) { 4551 aflags = M_NOWAIT; 4552 vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain, 4553 &aflags); 4554 for (;;) { 4555 slab = keg_alloc_slab(keg, zone, domain, M_WAITOK, 4556 aflags); 4557 if (slab != NULL) { 4558 dom = &keg->uk_domain[slab->us_domain]; 4559 LIST_REMOVE(slab, us_link); 4560 LIST_INSERT_HEAD(&dom->ud_free_slab, slab, 4561 us_link); 4562 KEG_UNLOCK(keg, slab->us_domain); 4563 break; 4564 } 4565 if (vm_domainset_iter_policy(&di, &domain) != 0) 4566 vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask); 4567 } 4568 } 4569 } 4570 4571 /* See uma.h */ 4572 void 4573 uma_reclaim(int req) 4574 { 4575 4576 CTR0(KTR_UMA, "UMA: vm asked us to release pages!"); 4577 sx_xlock(&uma_reclaim_lock); 4578 bucket_enable(); 4579 4580 switch (req) { 4581 case UMA_RECLAIM_TRIM: 4582 zone_foreach(zone_trim, NULL); 4583 break; 4584 case UMA_RECLAIM_DRAIN: 4585 case UMA_RECLAIM_DRAIN_CPU: 4586 zone_foreach(zone_drain, NULL); 4587 if (req == UMA_RECLAIM_DRAIN_CPU) { 4588 pcpu_cache_drain_safe(NULL); 4589 zone_foreach(zone_drain, NULL); 4590 } 4591 break; 4592 default: 4593 panic("unhandled reclamation request %d", req); 4594 } 4595 4596 /* 4597 * Some slabs may have been freed but this zone will be visited early 4598 * we visit again so that we can free pages that are empty once other 4599 * zones are drained. We have to do the same for buckets. 4600 */ 4601 zone_drain(slabzones[0], NULL); 4602 zone_drain(slabzones[1], NULL); 4603 bucket_zone_drain(); 4604 sx_xunlock(&uma_reclaim_lock); 4605 } 4606 4607 static volatile int uma_reclaim_needed; 4608 4609 void 4610 uma_reclaim_wakeup(void) 4611 { 4612 4613 if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0) 4614 wakeup(uma_reclaim); 4615 } 4616 4617 void 4618 uma_reclaim_worker(void *arg __unused) 4619 { 4620 4621 for (;;) { 4622 sx_xlock(&uma_reclaim_lock); 4623 while (atomic_load_int(&uma_reclaim_needed) == 0) 4624 sx_sleep(uma_reclaim, &uma_reclaim_lock, PVM, "umarcl", 4625 hz); 4626 sx_xunlock(&uma_reclaim_lock); 4627 EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM); 4628 uma_reclaim(UMA_RECLAIM_DRAIN_CPU); 4629 atomic_store_int(&uma_reclaim_needed, 0); 4630 /* Don't fire more than once per-second. */ 4631 pause("umarclslp", hz); 4632 } 4633 } 4634 4635 /* See uma.h */ 4636 void 4637 uma_zone_reclaim(uma_zone_t zone, int req) 4638 { 4639 4640 switch (req) { 4641 case UMA_RECLAIM_TRIM: 4642 zone_trim(zone, NULL); 4643 break; 4644 case UMA_RECLAIM_DRAIN: 4645 zone_drain(zone, NULL); 4646 break; 4647 case UMA_RECLAIM_DRAIN_CPU: 4648 pcpu_cache_drain_safe(zone); 4649 zone_drain(zone, NULL); 4650 break; 4651 default: 4652 panic("unhandled reclamation request %d", req); 4653 } 4654 } 4655 4656 /* See uma.h */ 4657 int 4658 uma_zone_exhausted(uma_zone_t zone) 4659 { 4660 4661 return (atomic_load_32(&zone->uz_sleepers) > 0); 4662 } 4663 4664 unsigned long 4665 uma_limit(void) 4666 { 4667 4668 return (uma_kmem_limit); 4669 } 4670 4671 void 4672 uma_set_limit(unsigned long limit) 4673 { 4674 4675 uma_kmem_limit = limit; 4676 } 4677 4678 unsigned long 4679 uma_size(void) 4680 { 4681 4682 return (atomic_load_long(&uma_kmem_total)); 4683 } 4684 4685 long 4686 uma_avail(void) 4687 { 4688 4689 return (uma_kmem_limit - uma_size()); 4690 } 4691 4692 #ifdef DDB 4693 /* 4694 * Generate statistics across both the zone and its per-cpu cache's. Return 4695 * desired statistics if the pointer is non-NULL for that statistic. 4696 * 4697 * Note: does not update the zone statistics, as it can't safely clear the 4698 * per-CPU cache statistic. 4699 * 4700 */ 4701 static void 4702 uma_zone_sumstat(uma_zone_t z, long *cachefreep, uint64_t *allocsp, 4703 uint64_t *freesp, uint64_t *sleepsp, uint64_t *xdomainp) 4704 { 4705 uma_cache_t cache; 4706 uint64_t allocs, frees, sleeps, xdomain; 4707 int cachefree, cpu; 4708 4709 allocs = frees = sleeps = xdomain = 0; 4710 cachefree = 0; 4711 CPU_FOREACH(cpu) { 4712 cache = &z->uz_cpu[cpu]; 4713 cachefree += cache->uc_allocbucket.ucb_cnt; 4714 cachefree += cache->uc_freebucket.ucb_cnt; 4715 xdomain += cache->uc_crossbucket.ucb_cnt; 4716 cachefree += cache->uc_crossbucket.ucb_cnt; 4717 allocs += cache->uc_allocs; 4718 frees += cache->uc_frees; 4719 } 4720 allocs += counter_u64_fetch(z->uz_allocs); 4721 frees += counter_u64_fetch(z->uz_frees); 4722 sleeps += z->uz_sleeps; 4723 xdomain += z->uz_xdomain; 4724 if (cachefreep != NULL) 4725 *cachefreep = cachefree; 4726 if (allocsp != NULL) 4727 *allocsp = allocs; 4728 if (freesp != NULL) 4729 *freesp = frees; 4730 if (sleepsp != NULL) 4731 *sleepsp = sleeps; 4732 if (xdomainp != NULL) 4733 *xdomainp = xdomain; 4734 } 4735 #endif /* DDB */ 4736 4737 static int 4738 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS) 4739 { 4740 uma_keg_t kz; 4741 uma_zone_t z; 4742 int count; 4743 4744 count = 0; 4745 rw_rlock(&uma_rwlock); 4746 LIST_FOREACH(kz, &uma_kegs, uk_link) { 4747 LIST_FOREACH(z, &kz->uk_zones, uz_link) 4748 count++; 4749 } 4750 LIST_FOREACH(z, &uma_cachezones, uz_link) 4751 count++; 4752 4753 rw_runlock(&uma_rwlock); 4754 return (sysctl_handle_int(oidp, &count, 0, req)); 4755 } 4756 4757 static void 4758 uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf, 4759 struct uma_percpu_stat *ups, bool internal) 4760 { 4761 uma_zone_domain_t zdom; 4762 uma_cache_t cache; 4763 int i; 4764 4765 4766 for (i = 0; i < vm_ndomains; i++) { 4767 zdom = &z->uz_domain[i]; 4768 uth->uth_zone_free += zdom->uzd_nitems; 4769 } 4770 uth->uth_allocs = counter_u64_fetch(z->uz_allocs); 4771 uth->uth_frees = counter_u64_fetch(z->uz_frees); 4772 uth->uth_fails = counter_u64_fetch(z->uz_fails); 4773 uth->uth_sleeps = z->uz_sleeps; 4774 uth->uth_xdomain = z->uz_xdomain; 4775 4776 /* 4777 * While it is not normally safe to access the cache bucket pointers 4778 * while not on the CPU that owns the cache, we only allow the pointers 4779 * to be exchanged without the zone lock held, not invalidated, so 4780 * accept the possible race associated with bucket exchange during 4781 * monitoring. Use atomic_load_ptr() to ensure that the bucket pointers 4782 * are loaded only once. 4783 */ 4784 for (i = 0; i < mp_maxid + 1; i++) { 4785 bzero(&ups[i], sizeof(*ups)); 4786 if (internal || CPU_ABSENT(i)) 4787 continue; 4788 cache = &z->uz_cpu[i]; 4789 ups[i].ups_cache_free += cache->uc_allocbucket.ucb_cnt; 4790 ups[i].ups_cache_free += cache->uc_freebucket.ucb_cnt; 4791 ups[i].ups_cache_free += cache->uc_crossbucket.ucb_cnt; 4792 ups[i].ups_allocs = cache->uc_allocs; 4793 ups[i].ups_frees = cache->uc_frees; 4794 } 4795 } 4796 4797 static int 4798 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) 4799 { 4800 struct uma_stream_header ush; 4801 struct uma_type_header uth; 4802 struct uma_percpu_stat *ups; 4803 struct sbuf sbuf; 4804 uma_keg_t kz; 4805 uma_zone_t z; 4806 uint64_t items; 4807 uint32_t kfree, pages; 4808 int count, error, i; 4809 4810 error = sysctl_wire_old_buffer(req, 0); 4811 if (error != 0) 4812 return (error); 4813 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 4814 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); 4815 ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK); 4816 4817 count = 0; 4818 rw_rlock(&uma_rwlock); 4819 LIST_FOREACH(kz, &uma_kegs, uk_link) { 4820 LIST_FOREACH(z, &kz->uk_zones, uz_link) 4821 count++; 4822 } 4823 4824 LIST_FOREACH(z, &uma_cachezones, uz_link) 4825 count++; 4826 4827 /* 4828 * Insert stream header. 4829 */ 4830 bzero(&ush, sizeof(ush)); 4831 ush.ush_version = UMA_STREAM_VERSION; 4832 ush.ush_maxcpus = (mp_maxid + 1); 4833 ush.ush_count = count; 4834 (void)sbuf_bcat(&sbuf, &ush, sizeof(ush)); 4835 4836 LIST_FOREACH(kz, &uma_kegs, uk_link) { 4837 kfree = pages = 0; 4838 for (i = 0; i < vm_ndomains; i++) { 4839 kfree += kz->uk_domain[i].ud_free; 4840 pages += kz->uk_domain[i].ud_pages; 4841 } 4842 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 4843 bzero(&uth, sizeof(uth)); 4844 ZONE_LOCK(z); 4845 strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); 4846 uth.uth_align = kz->uk_align; 4847 uth.uth_size = kz->uk_size; 4848 uth.uth_rsize = kz->uk_rsize; 4849 if (z->uz_max_items > 0) { 4850 items = UZ_ITEMS_COUNT(z->uz_items); 4851 uth.uth_pages = (items / kz->uk_ipers) * 4852 kz->uk_ppera; 4853 } else 4854 uth.uth_pages = pages; 4855 uth.uth_maxpages = (z->uz_max_items / kz->uk_ipers) * 4856 kz->uk_ppera; 4857 uth.uth_limit = z->uz_max_items; 4858 uth.uth_keg_free = kfree; 4859 4860 /* 4861 * A zone is secondary is it is not the first entry 4862 * on the keg's zone list. 4863 */ 4864 if ((z->uz_flags & UMA_ZONE_SECONDARY) && 4865 (LIST_FIRST(&kz->uk_zones) != z)) 4866 uth.uth_zone_flags = UTH_ZONE_SECONDARY; 4867 uma_vm_zone_stats(&uth, z, &sbuf, ups, 4868 kz->uk_flags & UMA_ZFLAG_INTERNAL); 4869 ZONE_UNLOCK(z); 4870 (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); 4871 for (i = 0; i < mp_maxid + 1; i++) 4872 (void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i])); 4873 } 4874 } 4875 LIST_FOREACH(z, &uma_cachezones, uz_link) { 4876 bzero(&uth, sizeof(uth)); 4877 ZONE_LOCK(z); 4878 strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); 4879 uth.uth_size = z->uz_size; 4880 uma_vm_zone_stats(&uth, z, &sbuf, ups, false); 4881 ZONE_UNLOCK(z); 4882 (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); 4883 for (i = 0; i < mp_maxid + 1; i++) 4884 (void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i])); 4885 } 4886 4887 rw_runlock(&uma_rwlock); 4888 error = sbuf_finish(&sbuf); 4889 sbuf_delete(&sbuf); 4890 free(ups, M_TEMP); 4891 return (error); 4892 } 4893 4894 int 4895 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS) 4896 { 4897 uma_zone_t zone = *(uma_zone_t *)arg1; 4898 int error, max; 4899 4900 max = uma_zone_get_max(zone); 4901 error = sysctl_handle_int(oidp, &max, 0, req); 4902 if (error || !req->newptr) 4903 return (error); 4904 4905 uma_zone_set_max(zone, max); 4906 4907 return (0); 4908 } 4909 4910 int 4911 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS) 4912 { 4913 uma_zone_t zone; 4914 int cur; 4915 4916 /* 4917 * Some callers want to add sysctls for global zones that 4918 * may not yet exist so they pass a pointer to a pointer. 4919 */ 4920 if (arg2 == 0) 4921 zone = *(uma_zone_t *)arg1; 4922 else 4923 zone = arg1; 4924 cur = uma_zone_get_cur(zone); 4925 return (sysctl_handle_int(oidp, &cur, 0, req)); 4926 } 4927 4928 static int 4929 sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS) 4930 { 4931 uma_zone_t zone = arg1; 4932 uint64_t cur; 4933 4934 cur = uma_zone_get_allocs(zone); 4935 return (sysctl_handle_64(oidp, &cur, 0, req)); 4936 } 4937 4938 static int 4939 sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS) 4940 { 4941 uma_zone_t zone = arg1; 4942 uint64_t cur; 4943 4944 cur = uma_zone_get_frees(zone); 4945 return (sysctl_handle_64(oidp, &cur, 0, req)); 4946 } 4947 4948 static int 4949 sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS) 4950 { 4951 struct sbuf sbuf; 4952 uma_zone_t zone = arg1; 4953 int error; 4954 4955 sbuf_new_for_sysctl(&sbuf, NULL, 0, req); 4956 if (zone->uz_flags != 0) 4957 sbuf_printf(&sbuf, "0x%b", zone->uz_flags, PRINT_UMA_ZFLAGS); 4958 else 4959 sbuf_printf(&sbuf, "0"); 4960 error = sbuf_finish(&sbuf); 4961 sbuf_delete(&sbuf); 4962 4963 return (error); 4964 } 4965 4966 static int 4967 sysctl_handle_uma_slab_efficiency(SYSCTL_HANDLER_ARGS) 4968 { 4969 uma_keg_t keg = arg1; 4970 int avail, effpct, total; 4971 4972 total = keg->uk_ppera * PAGE_SIZE; 4973 if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0) 4974 total += slabzone(keg->uk_ipers)->uz_keg->uk_rsize; 4975 /* 4976 * We consider the client's requested size and alignment here, not the 4977 * real size determination uk_rsize, because we also adjust the real 4978 * size for internal implementation reasons (max bitset size). 4979 */ 4980 avail = keg->uk_ipers * roundup2(keg->uk_size, keg->uk_align + 1); 4981 if ((keg->uk_flags & UMA_ZONE_PCPU) != 0) 4982 avail *= mp_maxid + 1; 4983 effpct = 100 * avail / total; 4984 return (sysctl_handle_int(oidp, &effpct, 0, req)); 4985 } 4986 4987 static int 4988 sysctl_handle_uma_zone_items(SYSCTL_HANDLER_ARGS) 4989 { 4990 uma_zone_t zone = arg1; 4991 uint64_t cur; 4992 4993 cur = UZ_ITEMS_COUNT(atomic_load_64(&zone->uz_items)); 4994 return (sysctl_handle_64(oidp, &cur, 0, req)); 4995 } 4996 4997 #ifdef INVARIANTS 4998 static uma_slab_t 4999 uma_dbg_getslab(uma_zone_t zone, void *item) 5000 { 5001 uma_slab_t slab; 5002 uma_keg_t keg; 5003 uint8_t *mem; 5004 5005 /* 5006 * It is safe to return the slab here even though the 5007 * zone is unlocked because the item's allocation state 5008 * essentially holds a reference. 5009 */ 5010 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); 5011 if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0) 5012 return (NULL); 5013 if (zone->uz_flags & UMA_ZFLAG_VTOSLAB) 5014 return (vtoslab((vm_offset_t)mem)); 5015 keg = zone->uz_keg; 5016 if ((keg->uk_flags & UMA_ZFLAG_HASH) == 0) 5017 return ((uma_slab_t)(mem + keg->uk_pgoff)); 5018 KEG_LOCK(keg, 0); 5019 slab = hash_sfind(&keg->uk_hash, mem); 5020 KEG_UNLOCK(keg, 0); 5021 5022 return (slab); 5023 } 5024 5025 static bool 5026 uma_dbg_zskip(uma_zone_t zone, void *mem) 5027 { 5028 5029 if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0) 5030 return (true); 5031 5032 return (uma_dbg_kskip(zone->uz_keg, mem)); 5033 } 5034 5035 static bool 5036 uma_dbg_kskip(uma_keg_t keg, void *mem) 5037 { 5038 uintptr_t idx; 5039 5040 if (dbg_divisor == 0) 5041 return (true); 5042 5043 if (dbg_divisor == 1) 5044 return (false); 5045 5046 idx = (uintptr_t)mem >> PAGE_SHIFT; 5047 if (keg->uk_ipers > 1) { 5048 idx *= keg->uk_ipers; 5049 idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize; 5050 } 5051 5052 if ((idx / dbg_divisor) * dbg_divisor != idx) { 5053 counter_u64_add(uma_skip_cnt, 1); 5054 return (true); 5055 } 5056 counter_u64_add(uma_dbg_cnt, 1); 5057 5058 return (false); 5059 } 5060 5061 /* 5062 * Set up the slab's freei data such that uma_dbg_free can function. 5063 * 5064 */ 5065 static void 5066 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item) 5067 { 5068 uma_keg_t keg; 5069 int freei; 5070 5071 if (slab == NULL) { 5072 slab = uma_dbg_getslab(zone, item); 5073 if (slab == NULL) 5074 panic("uma: item %p did not belong to zone %s\n", 5075 item, zone->uz_name); 5076 } 5077 keg = zone->uz_keg; 5078 freei = slab_item_index(slab, keg, item); 5079 5080 if (BIT_ISSET(keg->uk_ipers, freei, slab_dbg_bits(slab, keg))) 5081 panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n", 5082 item, zone, zone->uz_name, slab, freei); 5083 BIT_SET_ATOMIC(keg->uk_ipers, freei, slab_dbg_bits(slab, keg)); 5084 } 5085 5086 /* 5087 * Verifies freed addresses. Checks for alignment, valid slab membership 5088 * and duplicate frees. 5089 * 5090 */ 5091 static void 5092 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item) 5093 { 5094 uma_keg_t keg; 5095 int freei; 5096 5097 if (slab == NULL) { 5098 slab = uma_dbg_getslab(zone, item); 5099 if (slab == NULL) 5100 panic("uma: Freed item %p did not belong to zone %s\n", 5101 item, zone->uz_name); 5102 } 5103 keg = zone->uz_keg; 5104 freei = slab_item_index(slab, keg, item); 5105 5106 if (freei >= keg->uk_ipers) 5107 panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n", 5108 item, zone, zone->uz_name, slab, freei); 5109 5110 if (slab_item(slab, keg, freei) != item) 5111 panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n", 5112 item, zone, zone->uz_name, slab, freei); 5113 5114 if (!BIT_ISSET(keg->uk_ipers, freei, slab_dbg_bits(slab, keg))) 5115 panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n", 5116 item, zone, zone->uz_name, slab, freei); 5117 5118 BIT_CLR_ATOMIC(keg->uk_ipers, freei, slab_dbg_bits(slab, keg)); 5119 } 5120 #endif /* INVARIANTS */ 5121 5122 #ifdef DDB 5123 static int64_t 5124 get_uma_stats(uma_keg_t kz, uma_zone_t z, uint64_t *allocs, uint64_t *used, 5125 uint64_t *sleeps, long *cachefree, uint64_t *xdomain) 5126 { 5127 uint64_t frees; 5128 int i; 5129 5130 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) { 5131 *allocs = counter_u64_fetch(z->uz_allocs); 5132 frees = counter_u64_fetch(z->uz_frees); 5133 *sleeps = z->uz_sleeps; 5134 *cachefree = 0; 5135 *xdomain = 0; 5136 } else 5137 uma_zone_sumstat(z, cachefree, allocs, &frees, sleeps, 5138 xdomain); 5139 for (i = 0; i < vm_ndomains; i++) { 5140 *cachefree += z->uz_domain[i].uzd_nitems; 5141 if (!((z->uz_flags & UMA_ZONE_SECONDARY) && 5142 (LIST_FIRST(&kz->uk_zones) != z))) 5143 *cachefree += kz->uk_domain[i].ud_free; 5144 } 5145 *used = *allocs - frees; 5146 return (((int64_t)*used + *cachefree) * kz->uk_size); 5147 } 5148 5149 DB_SHOW_COMMAND(uma, db_show_uma) 5150 { 5151 const char *fmt_hdr, *fmt_entry; 5152 uma_keg_t kz; 5153 uma_zone_t z; 5154 uint64_t allocs, used, sleeps, xdomain; 5155 long cachefree; 5156 /* variables for sorting */ 5157 uma_keg_t cur_keg; 5158 uma_zone_t cur_zone, last_zone; 5159 int64_t cur_size, last_size, size; 5160 int ties; 5161 5162 /* /i option produces machine-parseable CSV output */ 5163 if (modif[0] == 'i') { 5164 fmt_hdr = "%s,%s,%s,%s,%s,%s,%s,%s,%s\n"; 5165 fmt_entry = "\"%s\",%ju,%jd,%ld,%ju,%ju,%u,%jd,%ju\n"; 5166 } else { 5167 fmt_hdr = "%18s %6s %7s %7s %11s %7s %7s %10s %8s\n"; 5168 fmt_entry = "%18s %6ju %7jd %7ld %11ju %7ju %7u %10jd %8ju\n"; 5169 } 5170 5171 db_printf(fmt_hdr, "Zone", "Size", "Used", "Free", "Requests", 5172 "Sleeps", "Bucket", "Total Mem", "XFree"); 5173 5174 /* Sort the zones with largest size first. */ 5175 last_zone = NULL; 5176 last_size = INT64_MAX; 5177 for (;;) { 5178 cur_zone = NULL; 5179 cur_size = -1; 5180 ties = 0; 5181 LIST_FOREACH(kz, &uma_kegs, uk_link) { 5182 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 5183 /* 5184 * In the case of size ties, print out zones 5185 * in the order they are encountered. That is, 5186 * when we encounter the most recently output 5187 * zone, we have already printed all preceding 5188 * ties, and we must print all following ties. 5189 */ 5190 if (z == last_zone) { 5191 ties = 1; 5192 continue; 5193 } 5194 size = get_uma_stats(kz, z, &allocs, &used, 5195 &sleeps, &cachefree, &xdomain); 5196 if (size > cur_size && size < last_size + ties) 5197 { 5198 cur_size = size; 5199 cur_zone = z; 5200 cur_keg = kz; 5201 } 5202 } 5203 } 5204 if (cur_zone == NULL) 5205 break; 5206 5207 size = get_uma_stats(cur_keg, cur_zone, &allocs, &used, 5208 &sleeps, &cachefree, &xdomain); 5209 db_printf(fmt_entry, cur_zone->uz_name, 5210 (uintmax_t)cur_keg->uk_size, (intmax_t)used, cachefree, 5211 (uintmax_t)allocs, (uintmax_t)sleeps, 5212 (unsigned)cur_zone->uz_bucket_size, (intmax_t)size, 5213 xdomain); 5214 5215 if (db_pager_quit) 5216 return; 5217 last_zone = cur_zone; 5218 last_size = cur_size; 5219 } 5220 } 5221 5222 DB_SHOW_COMMAND(umacache, db_show_umacache) 5223 { 5224 uma_zone_t z; 5225 uint64_t allocs, frees; 5226 long cachefree; 5227 int i; 5228 5229 db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free", 5230 "Requests", "Bucket"); 5231 LIST_FOREACH(z, &uma_cachezones, uz_link) { 5232 uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL, NULL); 5233 for (i = 0; i < vm_ndomains; i++) 5234 cachefree += z->uz_domain[i].uzd_nitems; 5235 db_printf("%18s %8ju %8jd %8ld %12ju %8u\n", 5236 z->uz_name, (uintmax_t)z->uz_size, 5237 (intmax_t)(allocs - frees), cachefree, 5238 (uintmax_t)allocs, z->uz_bucket_size); 5239 if (db_pager_quit) 5240 return; 5241 } 5242 } 5243 #endif /* DDB */ 5244