xref: /freebsd/sys/vm/uma_core.c (revision c25a30e2)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org>
5  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6  * Copyright (c) 2004-2006 Robert N. M. Watson
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /*
32  * uma_core.c  Implementation of the Universal Memory allocator
33  *
34  * This allocator is intended to replace the multitude of similar object caches
35  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
36  * efficient.  A primary design goal is to return unused memory to the rest of
37  * the system.  This will make the system as a whole more flexible due to the
38  * ability to move memory to subsystems which most need it instead of leaving
39  * pools of reserved memory unused.
40  *
41  * The basic ideas stem from similar slab/zone based allocators whose algorithms
42  * are well known.
43  *
44  */
45 
46 /*
47  * TODO:
48  *	- Improve memory usage for large allocations
49  *	- Investigate cache size adjustments
50  */
51 
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
54 
55 #include "opt_ddb.h"
56 #include "opt_param.h"
57 #include "opt_vm.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/asan.h>
62 #include <sys/bitset.h>
63 #include <sys/domainset.h>
64 #include <sys/eventhandler.h>
65 #include <sys/kernel.h>
66 #include <sys/types.h>
67 #include <sys/limits.h>
68 #include <sys/queue.h>
69 #include <sys/malloc.h>
70 #include <sys/ktr.h>
71 #include <sys/lock.h>
72 #include <sys/msan.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/random.h>
76 #include <sys/rwlock.h>
77 #include <sys/sbuf.h>
78 #include <sys/sched.h>
79 #include <sys/sleepqueue.h>
80 #include <sys/smp.h>
81 #include <sys/smr.h>
82 #include <sys/sysctl.h>
83 #include <sys/taskqueue.h>
84 #include <sys/vmmeter.h>
85 
86 #include <vm/vm.h>
87 #include <vm/vm_param.h>
88 #include <vm/vm_domainset.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_pageout.h>
92 #include <vm/vm_phys.h>
93 #include <vm/vm_pagequeue.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vm_dumpset.h>
98 #include <vm/uma.h>
99 #include <vm/uma_int.h>
100 #include <vm/uma_dbg.h>
101 
102 #include <ddb/ddb.h>
103 
104 #ifdef DEBUG_MEMGUARD
105 #include <vm/memguard.h>
106 #endif
107 
108 #include <machine/md_var.h>
109 
110 #ifdef INVARIANTS
111 #define	UMA_ALWAYS_CTORDTOR	1
112 #else
113 #define	UMA_ALWAYS_CTORDTOR	0
114 #endif
115 
116 /*
117  * This is the zone and keg from which all zones are spawned.
118  */
119 static uma_zone_t kegs;
120 static uma_zone_t zones;
121 
122 /*
123  * On INVARIANTS builds, the slab contains a second bitset of the same size,
124  * "dbg_bits", which is laid out immediately after us_free.
125  */
126 #ifdef INVARIANTS
127 #define	SLAB_BITSETS	2
128 #else
129 #define	SLAB_BITSETS	1
130 #endif
131 
132 /*
133  * These are the two zones from which all offpage uma_slab_ts are allocated.
134  *
135  * One zone is for slab headers that can represent a larger number of items,
136  * making the slabs themselves more efficient, and the other zone is for
137  * headers that are smaller and represent fewer items, making the headers more
138  * efficient.
139  */
140 #define	SLABZONE_SIZE(setsize)					\
141     (sizeof(struct uma_hash_slab) + BITSET_SIZE(setsize) * SLAB_BITSETS)
142 #define	SLABZONE0_SETSIZE	(PAGE_SIZE / 16)
143 #define	SLABZONE1_SETSIZE	SLAB_MAX_SETSIZE
144 #define	SLABZONE0_SIZE	SLABZONE_SIZE(SLABZONE0_SETSIZE)
145 #define	SLABZONE1_SIZE	SLABZONE_SIZE(SLABZONE1_SETSIZE)
146 static uma_zone_t slabzones[2];
147 
148 /*
149  * The initial hash tables come out of this zone so they can be allocated
150  * prior to malloc coming up.
151  */
152 static uma_zone_t hashzone;
153 
154 /* The boot-time adjusted value for cache line alignment. */
155 int uma_align_cache = 64 - 1;
156 
157 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
158 static MALLOC_DEFINE(M_UMA, "UMA", "UMA Misc");
159 
160 /*
161  * Are we allowed to allocate buckets?
162  */
163 static int bucketdisable = 1;
164 
165 /* Linked list of all kegs in the system */
166 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
167 
168 /* Linked list of all cache-only zones in the system */
169 static LIST_HEAD(,uma_zone) uma_cachezones =
170     LIST_HEAD_INITIALIZER(uma_cachezones);
171 
172 /*
173  * Mutex for global lists: uma_kegs, uma_cachezones, and the per-keg list of
174  * zones.
175  */
176 static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
177 
178 static struct sx uma_reclaim_lock;
179 
180 /*
181  * First available virual address for boot time allocations.
182  */
183 static vm_offset_t bootstart;
184 static vm_offset_t bootmem;
185 
186 /*
187  * kmem soft limit, initialized by uma_set_limit().  Ensure that early
188  * allocations don't trigger a wakeup of the reclaim thread.
189  */
190 unsigned long uma_kmem_limit = LONG_MAX;
191 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0,
192     "UMA kernel memory soft limit");
193 unsigned long uma_kmem_total;
194 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_total, CTLFLAG_RD, &uma_kmem_total, 0,
195     "UMA kernel memory usage");
196 
197 /* Is the VM done starting up? */
198 static enum {
199 	BOOT_COLD,
200 	BOOT_KVA,
201 	BOOT_PCPU,
202 	BOOT_RUNNING,
203 	BOOT_SHUTDOWN,
204 } booted = BOOT_COLD;
205 
206 /*
207  * This is the handle used to schedule events that need to happen
208  * outside of the allocation fast path.
209  */
210 static struct callout uma_callout;
211 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
212 
213 /*
214  * This structure is passed as the zone ctor arg so that I don't have to create
215  * a special allocation function just for zones.
216  */
217 struct uma_zctor_args {
218 	const char *name;
219 	size_t size;
220 	uma_ctor ctor;
221 	uma_dtor dtor;
222 	uma_init uminit;
223 	uma_fini fini;
224 	uma_import import;
225 	uma_release release;
226 	void *arg;
227 	uma_keg_t keg;
228 	int align;
229 	uint32_t flags;
230 };
231 
232 struct uma_kctor_args {
233 	uma_zone_t zone;
234 	size_t size;
235 	uma_init uminit;
236 	uma_fini fini;
237 	int align;
238 	uint32_t flags;
239 };
240 
241 struct uma_bucket_zone {
242 	uma_zone_t	ubz_zone;
243 	const char	*ubz_name;
244 	int		ubz_entries;	/* Number of items it can hold. */
245 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
246 };
247 
248 /*
249  * Compute the actual number of bucket entries to pack them in power
250  * of two sizes for more efficient space utilization.
251  */
252 #define	BUCKET_SIZE(n)						\
253     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
254 
255 #define	BUCKET_MAX	BUCKET_SIZE(256)
256 
257 struct uma_bucket_zone bucket_zones[] = {
258 	/* Literal bucket sizes. */
259 	{ NULL, "2 Bucket", 2, 4096 },
260 	{ NULL, "4 Bucket", 4, 3072 },
261 	{ NULL, "8 Bucket", 8, 2048 },
262 	{ NULL, "16 Bucket", 16, 1024 },
263 	/* Rounded down power of 2 sizes for efficiency. */
264 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
265 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
266 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
267 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
268 	{ NULL, NULL, 0}
269 };
270 
271 /*
272  * Flags and enumerations to be passed to internal functions.
273  */
274 enum zfreeskip {
275 	SKIP_NONE =	0,
276 	SKIP_CNT =	0x00000001,
277 	SKIP_DTOR =	0x00010000,
278 	SKIP_FINI =	0x00020000,
279 };
280 
281 /* Prototypes.. */
282 
283 void	uma_startup1(vm_offset_t);
284 void	uma_startup2(void);
285 
286 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
287 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
288 static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
289 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
290 static void *contig_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
291 static void page_free(void *, vm_size_t, uint8_t);
292 static void pcpu_page_free(void *, vm_size_t, uint8_t);
293 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int);
294 static void cache_drain(uma_zone_t);
295 static void bucket_drain(uma_zone_t, uma_bucket_t);
296 static void bucket_cache_reclaim(uma_zone_t zone, bool, int);
297 static bool bucket_cache_reclaim_domain(uma_zone_t, bool, bool, int);
298 static int keg_ctor(void *, int, void *, int);
299 static void keg_dtor(void *, int, void *);
300 static void keg_drain(uma_keg_t keg, int domain);
301 static int zone_ctor(void *, int, void *, int);
302 static void zone_dtor(void *, int, void *);
303 static inline void item_dtor(uma_zone_t zone, void *item, int size,
304     void *udata, enum zfreeskip skip);
305 static int zero_init(void *, int, int);
306 static void zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata,
307     int itemdomain, bool ws);
308 static void zone_foreach(void (*zfunc)(uma_zone_t, void *), void *);
309 static void zone_foreach_unlocked(void (*zfunc)(uma_zone_t, void *), void *);
310 static void zone_timeout(uma_zone_t zone, void *);
311 static int hash_alloc(struct uma_hash *, u_int);
312 static int hash_expand(struct uma_hash *, struct uma_hash *);
313 static void hash_free(struct uma_hash *hash);
314 static void uma_timeout(void *);
315 static void uma_shutdown(void);
316 static void *zone_alloc_item(uma_zone_t, void *, int, int);
317 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
318 static int zone_alloc_limit(uma_zone_t zone, int count, int flags);
319 static void zone_free_limit(uma_zone_t zone, int count);
320 static void bucket_enable(void);
321 static void bucket_init(void);
322 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
323 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
324 static void bucket_zone_drain(int domain);
325 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int);
326 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
327 static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item);
328 static size_t slab_sizeof(int nitems);
329 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
330     uma_fini fini, int align, uint32_t flags);
331 static int zone_import(void *, void **, int, int, int);
332 static void zone_release(void *, void **, int);
333 static bool cache_alloc(uma_zone_t, uma_cache_t, void *, int);
334 static bool cache_free(uma_zone_t, uma_cache_t, void *, int);
335 
336 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
337 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
338 static int sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS);
339 static int sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS);
340 static int sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS);
341 static int sysctl_handle_uma_slab_efficiency(SYSCTL_HANDLER_ARGS);
342 static int sysctl_handle_uma_zone_items(SYSCTL_HANDLER_ARGS);
343 
344 static uint64_t uma_zone_get_allocs(uma_zone_t zone);
345 
346 static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
347     "Memory allocation debugging");
348 
349 #ifdef INVARIANTS
350 static uint64_t uma_keg_get_allocs(uma_keg_t zone);
351 static inline struct noslabbits *slab_dbg_bits(uma_slab_t slab, uma_keg_t keg);
352 
353 static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
354 static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
355 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
356 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
357 
358 static u_int dbg_divisor = 1;
359 SYSCTL_UINT(_vm_debug, OID_AUTO, divisor,
360     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0,
361     "Debug & thrash every this item in memory allocator");
362 
363 static counter_u64_t uma_dbg_cnt = EARLY_COUNTER;
364 static counter_u64_t uma_skip_cnt = EARLY_COUNTER;
365 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD,
366     &uma_dbg_cnt, "memory items debugged");
367 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD,
368     &uma_skip_cnt, "memory items skipped, not debugged");
369 #endif
370 
371 SYSCTL_NODE(_vm, OID_AUTO, uma, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
372     "Universal Memory Allocator");
373 
374 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLTYPE_INT,
375     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
376 
377 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLTYPE_STRUCT,
378     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
379 
380 static int zone_warnings = 1;
381 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
382     "Warn when UMA zones becomes full");
383 
384 static int multipage_slabs = 1;
385 TUNABLE_INT("vm.debug.uma_multipage_slabs", &multipage_slabs);
386 SYSCTL_INT(_vm_debug, OID_AUTO, uma_multipage_slabs,
387     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &multipage_slabs, 0,
388     "UMA may choose larger slab sizes for better efficiency");
389 
390 /*
391  * Select the slab zone for an offpage slab with the given maximum item count.
392  */
393 static inline uma_zone_t
394 slabzone(int ipers)
395 {
396 
397 	return (slabzones[ipers > SLABZONE0_SETSIZE]);
398 }
399 
400 /*
401  * This routine checks to see whether or not it's safe to enable buckets.
402  */
403 static void
404 bucket_enable(void)
405 {
406 
407 	KASSERT(booted >= BOOT_KVA, ("Bucket enable before init"));
408 	bucketdisable = vm_page_count_min();
409 }
410 
411 /*
412  * Initialize bucket_zones, the array of zones of buckets of various sizes.
413  *
414  * For each zone, calculate the memory required for each bucket, consisting
415  * of the header and an array of pointers.
416  */
417 static void
418 bucket_init(void)
419 {
420 	struct uma_bucket_zone *ubz;
421 	int size;
422 
423 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
424 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
425 		size += sizeof(void *) * ubz->ubz_entries;
426 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
427 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
428 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET |
429 		    UMA_ZONE_FIRSTTOUCH);
430 	}
431 }
432 
433 /*
434  * Given a desired number of entries for a bucket, return the zone from which
435  * to allocate the bucket.
436  */
437 static struct uma_bucket_zone *
438 bucket_zone_lookup(int entries)
439 {
440 	struct uma_bucket_zone *ubz;
441 
442 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
443 		if (ubz->ubz_entries >= entries)
444 			return (ubz);
445 	ubz--;
446 	return (ubz);
447 }
448 
449 static int
450 bucket_select(int size)
451 {
452 	struct uma_bucket_zone *ubz;
453 
454 	ubz = &bucket_zones[0];
455 	if (size > ubz->ubz_maxsize)
456 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
457 
458 	for (; ubz->ubz_entries != 0; ubz++)
459 		if (ubz->ubz_maxsize < size)
460 			break;
461 	ubz--;
462 	return (ubz->ubz_entries);
463 }
464 
465 static uma_bucket_t
466 bucket_alloc(uma_zone_t zone, void *udata, int flags)
467 {
468 	struct uma_bucket_zone *ubz;
469 	uma_bucket_t bucket;
470 
471 	/*
472 	 * Don't allocate buckets early in boot.
473 	 */
474 	if (__predict_false(booted < BOOT_KVA))
475 		return (NULL);
476 
477 	/*
478 	 * To limit bucket recursion we store the original zone flags
479 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
480 	 * NOVM flag to persist even through deep recursions.  We also
481 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
482 	 * a bucket for a bucket zone so we do not allow infinite bucket
483 	 * recursion.  This cookie will even persist to frees of unused
484 	 * buckets via the allocation path or bucket allocations in the
485 	 * free path.
486 	 */
487 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
488 		udata = (void *)(uintptr_t)zone->uz_flags;
489 	else {
490 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
491 			return (NULL);
492 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
493 	}
494 	if (((uintptr_t)udata & UMA_ZONE_VM) != 0)
495 		flags |= M_NOVM;
496 	ubz = bucket_zone_lookup(atomic_load_16(&zone->uz_bucket_size));
497 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
498 		ubz++;
499 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
500 	if (bucket) {
501 #ifdef INVARIANTS
502 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
503 #endif
504 		bucket->ub_cnt = 0;
505 		bucket->ub_entries = min(ubz->ubz_entries,
506 		    zone->uz_bucket_size_max);
507 		bucket->ub_seq = SMR_SEQ_INVALID;
508 		CTR3(KTR_UMA, "bucket_alloc: zone %s(%p) allocated bucket %p",
509 		    zone->uz_name, zone, bucket);
510 	}
511 
512 	return (bucket);
513 }
514 
515 static void
516 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
517 {
518 	struct uma_bucket_zone *ubz;
519 
520 	if (bucket->ub_cnt != 0)
521 		bucket_drain(zone, bucket);
522 
523 	KASSERT(bucket->ub_cnt == 0,
524 	    ("bucket_free: Freeing a non free bucket."));
525 	KASSERT(bucket->ub_seq == SMR_SEQ_INVALID,
526 	    ("bucket_free: Freeing an SMR bucket."));
527 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
528 		udata = (void *)(uintptr_t)zone->uz_flags;
529 	ubz = bucket_zone_lookup(bucket->ub_entries);
530 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
531 }
532 
533 static void
534 bucket_zone_drain(int domain)
535 {
536 	struct uma_bucket_zone *ubz;
537 
538 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
539 		uma_zone_reclaim_domain(ubz->ubz_zone, UMA_RECLAIM_DRAIN,
540 		    domain);
541 }
542 
543 #ifdef KASAN
544 _Static_assert(UMA_SMALLEST_UNIT % KASAN_SHADOW_SCALE == 0,
545     "Base UMA allocation size not a multiple of the KASAN scale factor");
546 
547 static void
548 kasan_mark_item_valid(uma_zone_t zone, void *item)
549 {
550 	void *pcpu_item;
551 	size_t sz, rsz;
552 	int i;
553 
554 	if ((zone->uz_flags & UMA_ZONE_NOKASAN) != 0)
555 		return;
556 
557 	sz = zone->uz_size;
558 	rsz = roundup2(sz, KASAN_SHADOW_SCALE);
559 	if ((zone->uz_flags & UMA_ZONE_PCPU) == 0) {
560 		kasan_mark(item, sz, rsz, KASAN_GENERIC_REDZONE);
561 	} else {
562 		pcpu_item = zpcpu_base_to_offset(item);
563 		for (i = 0; i <= mp_maxid; i++)
564 			kasan_mark(zpcpu_get_cpu(pcpu_item, i), sz, rsz,
565 			    KASAN_GENERIC_REDZONE);
566 	}
567 }
568 
569 static void
570 kasan_mark_item_invalid(uma_zone_t zone, void *item)
571 {
572 	void *pcpu_item;
573 	size_t sz;
574 	int i;
575 
576 	if ((zone->uz_flags & UMA_ZONE_NOKASAN) != 0)
577 		return;
578 
579 	sz = roundup2(zone->uz_size, KASAN_SHADOW_SCALE);
580 	if ((zone->uz_flags & UMA_ZONE_PCPU) == 0) {
581 		kasan_mark(item, 0, sz, KASAN_UMA_FREED);
582 	} else {
583 		pcpu_item = zpcpu_base_to_offset(item);
584 		for (i = 0; i <= mp_maxid; i++)
585 			kasan_mark(zpcpu_get_cpu(pcpu_item, i), 0, sz,
586 			    KASAN_UMA_FREED);
587 	}
588 }
589 
590 static void
591 kasan_mark_slab_valid(uma_keg_t keg, void *mem)
592 {
593 	size_t sz;
594 
595 	if ((keg->uk_flags & UMA_ZONE_NOKASAN) == 0) {
596 		sz = keg->uk_ppera * PAGE_SIZE;
597 		kasan_mark(mem, sz, sz, 0);
598 	}
599 }
600 
601 static void
602 kasan_mark_slab_invalid(uma_keg_t keg, void *mem)
603 {
604 	size_t sz;
605 
606 	if ((keg->uk_flags & UMA_ZONE_NOKASAN) == 0) {
607 		if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0)
608 			sz = keg->uk_ppera * PAGE_SIZE;
609 		else
610 			sz = keg->uk_pgoff;
611 		kasan_mark(mem, 0, sz, KASAN_UMA_FREED);
612 	}
613 }
614 #else /* !KASAN */
615 static void
616 kasan_mark_item_valid(uma_zone_t zone __unused, void *item __unused)
617 {
618 }
619 
620 static void
621 kasan_mark_item_invalid(uma_zone_t zone __unused, void *item __unused)
622 {
623 }
624 
625 static void
626 kasan_mark_slab_valid(uma_keg_t keg __unused, void *mem __unused)
627 {
628 }
629 
630 static void
631 kasan_mark_slab_invalid(uma_keg_t keg __unused, void *mem __unused)
632 {
633 }
634 #endif /* KASAN */
635 
636 #ifdef KMSAN
637 static inline void
638 kmsan_mark_item_uninitialized(uma_zone_t zone, void *item)
639 {
640 	void *pcpu_item;
641 	size_t sz;
642 	int i;
643 
644 	if ((zone->uz_flags &
645 	    (UMA_ZFLAG_CACHE | UMA_ZONE_SECONDARY | UMA_ZONE_MALLOC)) != 0) {
646 		/*
647 		 * Cache zones should not be instrumented by default, as UMA
648 		 * does not have enough information to do so correctly.
649 		 * Consumers can mark items themselves if it makes sense to do
650 		 * so.
651 		 *
652 		 * Items from secondary zones are initialized by the parent
653 		 * zone and thus cannot safely be marked by UMA.
654 		 *
655 		 * malloc zones are handled directly by malloc(9) and friends,
656 		 * since they can provide more precise origin tracking.
657 		 */
658 		return;
659 	}
660 	if (zone->uz_keg->uk_init != NULL) {
661 		/*
662 		 * By definition, initialized items cannot be marked.  The
663 		 * best we can do is mark items from these zones after they
664 		 * are freed to the keg.
665 		 */
666 		return;
667 	}
668 
669 	sz = zone->uz_size;
670 	if ((zone->uz_flags & UMA_ZONE_PCPU) == 0) {
671 		kmsan_orig(item, sz, KMSAN_TYPE_UMA, KMSAN_RET_ADDR);
672 		kmsan_mark(item, sz, KMSAN_STATE_UNINIT);
673 	} else {
674 		pcpu_item = zpcpu_base_to_offset(item);
675 		for (i = 0; i <= mp_maxid; i++) {
676 			kmsan_orig(zpcpu_get_cpu(pcpu_item, i), sz,
677 			    KMSAN_TYPE_UMA, KMSAN_RET_ADDR);
678 			kmsan_mark(zpcpu_get_cpu(pcpu_item, i), sz,
679 			    KMSAN_STATE_INITED);
680 		}
681 	}
682 }
683 #else /* !KMSAN */
684 static inline void
685 kmsan_mark_item_uninitialized(uma_zone_t zone __unused, void *item __unused)
686 {
687 }
688 #endif /* KMSAN */
689 
690 /*
691  * Acquire the domain lock and record contention.
692  */
693 static uma_zone_domain_t
694 zone_domain_lock(uma_zone_t zone, int domain)
695 {
696 	uma_zone_domain_t zdom;
697 	bool lockfail;
698 
699 	zdom = ZDOM_GET(zone, domain);
700 	lockfail = false;
701 	if (ZDOM_OWNED(zdom))
702 		lockfail = true;
703 	ZDOM_LOCK(zdom);
704 	/* This is unsynchronized.  The counter does not need to be precise. */
705 	if (lockfail && zone->uz_bucket_size < zone->uz_bucket_size_max)
706 		zone->uz_bucket_size++;
707 	return (zdom);
708 }
709 
710 /*
711  * Search for the domain with the least cached items and return it if it
712  * is out of balance with the preferred domain.
713  */
714 static __noinline int
715 zone_domain_lowest(uma_zone_t zone, int pref)
716 {
717 	long least, nitems, prefitems;
718 	int domain;
719 	int i;
720 
721 	prefitems = least = LONG_MAX;
722 	domain = 0;
723 	for (i = 0; i < vm_ndomains; i++) {
724 		nitems = ZDOM_GET(zone, i)->uzd_nitems;
725 		if (nitems < least) {
726 			domain = i;
727 			least = nitems;
728 		}
729 		if (domain == pref)
730 			prefitems = nitems;
731 	}
732 	if (prefitems < least * 2)
733 		return (pref);
734 
735 	return (domain);
736 }
737 
738 /*
739  * Search for the domain with the most cached items and return it or the
740  * preferred domain if it has enough to proceed.
741  */
742 static __noinline int
743 zone_domain_highest(uma_zone_t zone, int pref)
744 {
745 	long most, nitems;
746 	int domain;
747 	int i;
748 
749 	if (ZDOM_GET(zone, pref)->uzd_nitems > BUCKET_MAX)
750 		return (pref);
751 
752 	most = 0;
753 	domain = 0;
754 	for (i = 0; i < vm_ndomains; i++) {
755 		nitems = ZDOM_GET(zone, i)->uzd_nitems;
756 		if (nitems > most) {
757 			domain = i;
758 			most = nitems;
759 		}
760 	}
761 
762 	return (domain);
763 }
764 
765 /*
766  * Set the maximum imax value.
767  */
768 static void
769 zone_domain_imax_set(uma_zone_domain_t zdom, int nitems)
770 {
771 	long old;
772 
773 	old = zdom->uzd_imax;
774 	do {
775 		if (old >= nitems)
776 			return;
777 	} while (atomic_fcmpset_long(&zdom->uzd_imax, &old, nitems) == 0);
778 
779 	/*
780 	 * We are at new maximum, so do the last WSS update for the old
781 	 * bimin and prepare to measure next allocation batch.
782 	 */
783 	if (zdom->uzd_wss < old - zdom->uzd_bimin)
784 		zdom->uzd_wss = old - zdom->uzd_bimin;
785 	zdom->uzd_bimin = nitems;
786 }
787 
788 /*
789  * Attempt to satisfy an allocation by retrieving a full bucket from one of the
790  * zone's caches.  If a bucket is found the zone is not locked on return.
791  */
792 static uma_bucket_t
793 zone_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom, bool reclaim)
794 {
795 	uma_bucket_t bucket;
796 	long cnt;
797 	int i;
798 	bool dtor = false;
799 
800 	ZDOM_LOCK_ASSERT(zdom);
801 
802 	if ((bucket = STAILQ_FIRST(&zdom->uzd_buckets)) == NULL)
803 		return (NULL);
804 
805 	/* SMR Buckets can not be re-used until readers expire. */
806 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0 &&
807 	    bucket->ub_seq != SMR_SEQ_INVALID) {
808 		if (!smr_poll(zone->uz_smr, bucket->ub_seq, false))
809 			return (NULL);
810 		bucket->ub_seq = SMR_SEQ_INVALID;
811 		dtor = (zone->uz_dtor != NULL) || UMA_ALWAYS_CTORDTOR;
812 		if (STAILQ_NEXT(bucket, ub_link) != NULL)
813 			zdom->uzd_seq = STAILQ_NEXT(bucket, ub_link)->ub_seq;
814 	}
815 	STAILQ_REMOVE_HEAD(&zdom->uzd_buckets, ub_link);
816 
817 	KASSERT(zdom->uzd_nitems >= bucket->ub_cnt,
818 	    ("%s: item count underflow (%ld, %d)",
819 	    __func__, zdom->uzd_nitems, bucket->ub_cnt));
820 	KASSERT(bucket->ub_cnt > 0,
821 	    ("%s: empty bucket in bucket cache", __func__));
822 	zdom->uzd_nitems -= bucket->ub_cnt;
823 
824 	if (reclaim) {
825 		/*
826 		 * Shift the bounds of the current WSS interval to avoid
827 		 * perturbing the estimates.
828 		 */
829 		cnt = lmin(zdom->uzd_bimin, bucket->ub_cnt);
830 		atomic_subtract_long(&zdom->uzd_imax, cnt);
831 		zdom->uzd_bimin -= cnt;
832 		zdom->uzd_imin -= lmin(zdom->uzd_imin, bucket->ub_cnt);
833 		if (zdom->uzd_limin >= bucket->ub_cnt) {
834 			zdom->uzd_limin -= bucket->ub_cnt;
835 		} else {
836 			zdom->uzd_limin = 0;
837 			zdom->uzd_timin = 0;
838 		}
839 	} else if (zdom->uzd_bimin > zdom->uzd_nitems) {
840 		zdom->uzd_bimin = zdom->uzd_nitems;
841 		if (zdom->uzd_imin > zdom->uzd_nitems)
842 			zdom->uzd_imin = zdom->uzd_nitems;
843 	}
844 
845 	ZDOM_UNLOCK(zdom);
846 	if (dtor)
847 		for (i = 0; i < bucket->ub_cnt; i++)
848 			item_dtor(zone, bucket->ub_bucket[i], zone->uz_size,
849 			    NULL, SKIP_NONE);
850 
851 	return (bucket);
852 }
853 
854 /*
855  * Insert a full bucket into the specified cache.  The "ws" parameter indicates
856  * whether the bucket's contents should be counted as part of the zone's working
857  * set.  The bucket may be freed if it exceeds the bucket limit.
858  */
859 static void
860 zone_put_bucket(uma_zone_t zone, int domain, uma_bucket_t bucket, void *udata,
861     const bool ws)
862 {
863 	uma_zone_domain_t zdom;
864 
865 	/* We don't cache empty buckets.  This can happen after a reclaim. */
866 	if (bucket->ub_cnt == 0)
867 		goto out;
868 	zdom = zone_domain_lock(zone, domain);
869 
870 	/*
871 	 * Conditionally set the maximum number of items.
872 	 */
873 	zdom->uzd_nitems += bucket->ub_cnt;
874 	if (__predict_true(zdom->uzd_nitems < zone->uz_bucket_max)) {
875 		if (ws) {
876 			zone_domain_imax_set(zdom, zdom->uzd_nitems);
877 		} else {
878 			/*
879 			 * Shift the bounds of the current WSS interval to
880 			 * avoid perturbing the estimates.
881 			 */
882 			atomic_add_long(&zdom->uzd_imax, bucket->ub_cnt);
883 			zdom->uzd_imin += bucket->ub_cnt;
884 			zdom->uzd_bimin += bucket->ub_cnt;
885 			zdom->uzd_limin += bucket->ub_cnt;
886 		}
887 		if (STAILQ_EMPTY(&zdom->uzd_buckets))
888 			zdom->uzd_seq = bucket->ub_seq;
889 
890 		/*
891 		 * Try to promote reuse of recently used items.  For items
892 		 * protected by SMR, try to defer reuse to minimize polling.
893 		 */
894 		if (bucket->ub_seq == SMR_SEQ_INVALID)
895 			STAILQ_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
896 		else
897 			STAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link);
898 		ZDOM_UNLOCK(zdom);
899 		return;
900 	}
901 	zdom->uzd_nitems -= bucket->ub_cnt;
902 	ZDOM_UNLOCK(zdom);
903 out:
904 	bucket_free(zone, bucket, udata);
905 }
906 
907 /* Pops an item out of a per-cpu cache bucket. */
908 static inline void *
909 cache_bucket_pop(uma_cache_t cache, uma_cache_bucket_t bucket)
910 {
911 	void *item;
912 
913 	CRITICAL_ASSERT(curthread);
914 
915 	bucket->ucb_cnt--;
916 	item = bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt];
917 #ifdef INVARIANTS
918 	bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] = NULL;
919 	KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
920 #endif
921 	cache->uc_allocs++;
922 
923 	return (item);
924 }
925 
926 /* Pushes an item into a per-cpu cache bucket. */
927 static inline void
928 cache_bucket_push(uma_cache_t cache, uma_cache_bucket_t bucket, void *item)
929 {
930 
931 	CRITICAL_ASSERT(curthread);
932 	KASSERT(bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] == NULL,
933 	    ("uma_zfree: Freeing to non free bucket index."));
934 
935 	bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] = item;
936 	bucket->ucb_cnt++;
937 	cache->uc_frees++;
938 }
939 
940 /*
941  * Unload a UMA bucket from a per-cpu cache.
942  */
943 static inline uma_bucket_t
944 cache_bucket_unload(uma_cache_bucket_t bucket)
945 {
946 	uma_bucket_t b;
947 
948 	b = bucket->ucb_bucket;
949 	if (b != NULL) {
950 		MPASS(b->ub_entries == bucket->ucb_entries);
951 		b->ub_cnt = bucket->ucb_cnt;
952 		bucket->ucb_bucket = NULL;
953 		bucket->ucb_entries = bucket->ucb_cnt = 0;
954 	}
955 
956 	return (b);
957 }
958 
959 static inline uma_bucket_t
960 cache_bucket_unload_alloc(uma_cache_t cache)
961 {
962 
963 	return (cache_bucket_unload(&cache->uc_allocbucket));
964 }
965 
966 static inline uma_bucket_t
967 cache_bucket_unload_free(uma_cache_t cache)
968 {
969 
970 	return (cache_bucket_unload(&cache->uc_freebucket));
971 }
972 
973 static inline uma_bucket_t
974 cache_bucket_unload_cross(uma_cache_t cache)
975 {
976 
977 	return (cache_bucket_unload(&cache->uc_crossbucket));
978 }
979 
980 /*
981  * Load a bucket into a per-cpu cache bucket.
982  */
983 static inline void
984 cache_bucket_load(uma_cache_bucket_t bucket, uma_bucket_t b)
985 {
986 
987 	CRITICAL_ASSERT(curthread);
988 	MPASS(bucket->ucb_bucket == NULL);
989 	MPASS(b->ub_seq == SMR_SEQ_INVALID);
990 
991 	bucket->ucb_bucket = b;
992 	bucket->ucb_cnt = b->ub_cnt;
993 	bucket->ucb_entries = b->ub_entries;
994 }
995 
996 static inline void
997 cache_bucket_load_alloc(uma_cache_t cache, uma_bucket_t b)
998 {
999 
1000 	cache_bucket_load(&cache->uc_allocbucket, b);
1001 }
1002 
1003 static inline void
1004 cache_bucket_load_free(uma_cache_t cache, uma_bucket_t b)
1005 {
1006 
1007 	cache_bucket_load(&cache->uc_freebucket, b);
1008 }
1009 
1010 #ifdef NUMA
1011 static inline void
1012 cache_bucket_load_cross(uma_cache_t cache, uma_bucket_t b)
1013 {
1014 
1015 	cache_bucket_load(&cache->uc_crossbucket, b);
1016 }
1017 #endif
1018 
1019 /*
1020  * Copy and preserve ucb_spare.
1021  */
1022 static inline void
1023 cache_bucket_copy(uma_cache_bucket_t b1, uma_cache_bucket_t b2)
1024 {
1025 
1026 	b1->ucb_bucket = b2->ucb_bucket;
1027 	b1->ucb_entries = b2->ucb_entries;
1028 	b1->ucb_cnt = b2->ucb_cnt;
1029 }
1030 
1031 /*
1032  * Swap two cache buckets.
1033  */
1034 static inline void
1035 cache_bucket_swap(uma_cache_bucket_t b1, uma_cache_bucket_t b2)
1036 {
1037 	struct uma_cache_bucket b3;
1038 
1039 	CRITICAL_ASSERT(curthread);
1040 
1041 	cache_bucket_copy(&b3, b1);
1042 	cache_bucket_copy(b1, b2);
1043 	cache_bucket_copy(b2, &b3);
1044 }
1045 
1046 /*
1047  * Attempt to fetch a bucket from a zone on behalf of the current cpu cache.
1048  */
1049 static uma_bucket_t
1050 cache_fetch_bucket(uma_zone_t zone, uma_cache_t cache, int domain)
1051 {
1052 	uma_zone_domain_t zdom;
1053 	uma_bucket_t bucket;
1054 
1055 	/*
1056 	 * Avoid the lock if possible.
1057 	 */
1058 	zdom = ZDOM_GET(zone, domain);
1059 	if (zdom->uzd_nitems == 0)
1060 		return (NULL);
1061 
1062 	if ((cache_uz_flags(cache) & UMA_ZONE_SMR) != 0 &&
1063 	    !smr_poll(zone->uz_smr, zdom->uzd_seq, false))
1064 		return (NULL);
1065 
1066 	/*
1067 	 * Check the zone's cache of buckets.
1068 	 */
1069 	zdom = zone_domain_lock(zone, domain);
1070 	if ((bucket = zone_fetch_bucket(zone, zdom, false)) != NULL)
1071 		return (bucket);
1072 	ZDOM_UNLOCK(zdom);
1073 
1074 	return (NULL);
1075 }
1076 
1077 static void
1078 zone_log_warning(uma_zone_t zone)
1079 {
1080 	static const struct timeval warninterval = { 300, 0 };
1081 
1082 	if (!zone_warnings || zone->uz_warning == NULL)
1083 		return;
1084 
1085 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
1086 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
1087 }
1088 
1089 static inline void
1090 zone_maxaction(uma_zone_t zone)
1091 {
1092 
1093 	if (zone->uz_maxaction.ta_func != NULL)
1094 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
1095 }
1096 
1097 /*
1098  * Routine called by timeout which is used to fire off some time interval
1099  * based calculations.  (stats, hash size, etc.)
1100  *
1101  * Arguments:
1102  *	arg   Unused
1103  *
1104  * Returns:
1105  *	Nothing
1106  */
1107 static void
1108 uma_timeout(void *unused)
1109 {
1110 	bucket_enable();
1111 	zone_foreach(zone_timeout, NULL);
1112 
1113 	/* Reschedule this event */
1114 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1115 }
1116 
1117 /*
1118  * Update the working set size estimates for the zone's bucket cache.
1119  * The constants chosen here are somewhat arbitrary.
1120  */
1121 static void
1122 zone_domain_update_wss(uma_zone_domain_t zdom)
1123 {
1124 	long m;
1125 
1126 	ZDOM_LOCK_ASSERT(zdom);
1127 	MPASS(zdom->uzd_imax >= zdom->uzd_nitems);
1128 	MPASS(zdom->uzd_nitems >= zdom->uzd_bimin);
1129 	MPASS(zdom->uzd_bimin >= zdom->uzd_imin);
1130 
1131 	/*
1132 	 * Estimate WSS as modified moving average of biggest allocation
1133 	 * batches for each period over few minutes (UMA_TIMEOUT of 20s).
1134 	 */
1135 	zdom->uzd_wss = lmax(zdom->uzd_wss * 3 / 4,
1136 	    zdom->uzd_imax - zdom->uzd_bimin);
1137 
1138 	/*
1139 	 * Estimate longtime minimum item count as a combination of recent
1140 	 * minimum item count, adjusted by WSS for safety, and the modified
1141 	 * moving average over the last several hours (UMA_TIMEOUT of 20s).
1142 	 * timin measures time since limin tried to go negative, that means
1143 	 * we were dangerously close to or got out of cache.
1144 	 */
1145 	m = zdom->uzd_imin - zdom->uzd_wss;
1146 	if (m >= 0) {
1147 		if (zdom->uzd_limin >= m)
1148 			zdom->uzd_limin = m;
1149 		else
1150 			zdom->uzd_limin = (m + zdom->uzd_limin * 255) / 256;
1151 		zdom->uzd_timin++;
1152 	} else {
1153 		zdom->uzd_limin = 0;
1154 		zdom->uzd_timin = 0;
1155 	}
1156 
1157 	/* To reduce period edge effects on WSS keep half of the imax. */
1158 	atomic_subtract_long(&zdom->uzd_imax,
1159 	    (zdom->uzd_imax - zdom->uzd_nitems + 1) / 2);
1160 	zdom->uzd_imin = zdom->uzd_bimin = zdom->uzd_nitems;
1161 }
1162 
1163 /*
1164  * Routine to perform timeout driven calculations.  This expands the
1165  * hashes and does per cpu statistics aggregation.
1166  *
1167  *  Returns nothing.
1168  */
1169 static void
1170 zone_timeout(uma_zone_t zone, void *unused)
1171 {
1172 	uma_keg_t keg;
1173 	u_int slabs, pages;
1174 
1175 	if ((zone->uz_flags & UMA_ZFLAG_HASH) == 0)
1176 		goto trim;
1177 
1178 	keg = zone->uz_keg;
1179 
1180 	/*
1181 	 * Hash zones are non-numa by definition so the first domain
1182 	 * is the only one present.
1183 	 */
1184 	KEG_LOCK(keg, 0);
1185 	pages = keg->uk_domain[0].ud_pages;
1186 
1187 	/*
1188 	 * Expand the keg hash table.
1189 	 *
1190 	 * This is done if the number of slabs is larger than the hash size.
1191 	 * What I'm trying to do here is completely reduce collisions.  This
1192 	 * may be a little aggressive.  Should I allow for two collisions max?
1193 	 */
1194 	if ((slabs = pages / keg->uk_ppera) > keg->uk_hash.uh_hashsize) {
1195 		struct uma_hash newhash;
1196 		struct uma_hash oldhash;
1197 		int ret;
1198 
1199 		/*
1200 		 * This is so involved because allocating and freeing
1201 		 * while the keg lock is held will lead to deadlock.
1202 		 * I have to do everything in stages and check for
1203 		 * races.
1204 		 */
1205 		KEG_UNLOCK(keg, 0);
1206 		ret = hash_alloc(&newhash, 1 << fls(slabs));
1207 		KEG_LOCK(keg, 0);
1208 		if (ret) {
1209 			if (hash_expand(&keg->uk_hash, &newhash)) {
1210 				oldhash = keg->uk_hash;
1211 				keg->uk_hash = newhash;
1212 			} else
1213 				oldhash = newhash;
1214 
1215 			KEG_UNLOCK(keg, 0);
1216 			hash_free(&oldhash);
1217 			goto trim;
1218 		}
1219 	}
1220 	KEG_UNLOCK(keg, 0);
1221 
1222 trim:
1223 	/* Trim caches not used for a long time. */
1224 	for (int i = 0; i < vm_ndomains; i++) {
1225 		if (bucket_cache_reclaim_domain(zone, false, false, i) &&
1226 		    (zone->uz_flags & UMA_ZFLAG_CACHE) == 0)
1227 			keg_drain(zone->uz_keg, i);
1228 	}
1229 }
1230 
1231 /*
1232  * Allocate and zero fill the next sized hash table from the appropriate
1233  * backing store.
1234  *
1235  * Arguments:
1236  *	hash  A new hash structure with the old hash size in uh_hashsize
1237  *
1238  * Returns:
1239  *	1 on success and 0 on failure.
1240  */
1241 static int
1242 hash_alloc(struct uma_hash *hash, u_int size)
1243 {
1244 	size_t alloc;
1245 
1246 	KASSERT(powerof2(size), ("hash size must be power of 2"));
1247 	if (size > UMA_HASH_SIZE_INIT)  {
1248 		hash->uh_hashsize = size;
1249 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
1250 		hash->uh_slab_hash = malloc(alloc, M_UMAHASH, M_NOWAIT);
1251 	} else {
1252 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
1253 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
1254 		    UMA_ANYDOMAIN, M_WAITOK);
1255 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
1256 	}
1257 	if (hash->uh_slab_hash) {
1258 		bzero(hash->uh_slab_hash, alloc);
1259 		hash->uh_hashmask = hash->uh_hashsize - 1;
1260 		return (1);
1261 	}
1262 
1263 	return (0);
1264 }
1265 
1266 /*
1267  * Expands the hash table for HASH zones.  This is done from zone_timeout
1268  * to reduce collisions.  This must not be done in the regular allocation
1269  * path, otherwise, we can recurse on the vm while allocating pages.
1270  *
1271  * Arguments:
1272  *	oldhash  The hash you want to expand
1273  *	newhash  The hash structure for the new table
1274  *
1275  * Returns:
1276  *	Nothing
1277  *
1278  * Discussion:
1279  */
1280 static int
1281 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
1282 {
1283 	uma_hash_slab_t slab;
1284 	u_int hval;
1285 	u_int idx;
1286 
1287 	if (!newhash->uh_slab_hash)
1288 		return (0);
1289 
1290 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
1291 		return (0);
1292 
1293 	/*
1294 	 * I need to investigate hash algorithms for resizing without a
1295 	 * full rehash.
1296 	 */
1297 
1298 	for (idx = 0; idx < oldhash->uh_hashsize; idx++)
1299 		while (!LIST_EMPTY(&oldhash->uh_slab_hash[idx])) {
1300 			slab = LIST_FIRST(&oldhash->uh_slab_hash[idx]);
1301 			LIST_REMOVE(slab, uhs_hlink);
1302 			hval = UMA_HASH(newhash, slab->uhs_data);
1303 			LIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
1304 			    slab, uhs_hlink);
1305 		}
1306 
1307 	return (1);
1308 }
1309 
1310 /*
1311  * Free the hash bucket to the appropriate backing store.
1312  *
1313  * Arguments:
1314  *	slab_hash  The hash bucket we're freeing
1315  *	hashsize   The number of entries in that hash bucket
1316  *
1317  * Returns:
1318  *	Nothing
1319  */
1320 static void
1321 hash_free(struct uma_hash *hash)
1322 {
1323 	if (hash->uh_slab_hash == NULL)
1324 		return;
1325 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
1326 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
1327 	else
1328 		free(hash->uh_slab_hash, M_UMAHASH);
1329 }
1330 
1331 /*
1332  * Frees all outstanding items in a bucket
1333  *
1334  * Arguments:
1335  *	zone   The zone to free to, must be unlocked.
1336  *	bucket The free/alloc bucket with items.
1337  *
1338  * Returns:
1339  *	Nothing
1340  */
1341 static void
1342 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
1343 {
1344 	int i;
1345 
1346 	if (bucket->ub_cnt == 0)
1347 		return;
1348 
1349 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0 &&
1350 	    bucket->ub_seq != SMR_SEQ_INVALID) {
1351 		smr_wait(zone->uz_smr, bucket->ub_seq);
1352 		bucket->ub_seq = SMR_SEQ_INVALID;
1353 		for (i = 0; i < bucket->ub_cnt; i++)
1354 			item_dtor(zone, bucket->ub_bucket[i],
1355 			    zone->uz_size, NULL, SKIP_NONE);
1356 	}
1357 	if (zone->uz_fini)
1358 		for (i = 0; i < bucket->ub_cnt; i++) {
1359 			kasan_mark_item_valid(zone, bucket->ub_bucket[i]);
1360 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
1361 			kasan_mark_item_invalid(zone, bucket->ub_bucket[i]);
1362 		}
1363 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
1364 	if (zone->uz_max_items > 0)
1365 		zone_free_limit(zone, bucket->ub_cnt);
1366 #ifdef INVARIANTS
1367 	bzero(bucket->ub_bucket, sizeof(void *) * bucket->ub_cnt);
1368 #endif
1369 	bucket->ub_cnt = 0;
1370 }
1371 
1372 /*
1373  * Drains the per cpu caches for a zone.
1374  *
1375  * NOTE: This may only be called while the zone is being torn down, and not
1376  * during normal operation.  This is necessary in order that we do not have
1377  * to migrate CPUs to drain the per-CPU caches.
1378  *
1379  * Arguments:
1380  *	zone     The zone to drain, must be unlocked.
1381  *
1382  * Returns:
1383  *	Nothing
1384  */
1385 static void
1386 cache_drain(uma_zone_t zone)
1387 {
1388 	uma_cache_t cache;
1389 	uma_bucket_t bucket;
1390 	smr_seq_t seq;
1391 	int cpu;
1392 
1393 	/*
1394 	 * XXX: It is safe to not lock the per-CPU caches, because we're
1395 	 * tearing down the zone anyway.  I.e., there will be no further use
1396 	 * of the caches at this point.
1397 	 *
1398 	 * XXX: It would good to be able to assert that the zone is being
1399 	 * torn down to prevent improper use of cache_drain().
1400 	 */
1401 	seq = SMR_SEQ_INVALID;
1402 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
1403 		seq = smr_advance(zone->uz_smr);
1404 	CPU_FOREACH(cpu) {
1405 		cache = &zone->uz_cpu[cpu];
1406 		bucket = cache_bucket_unload_alloc(cache);
1407 		if (bucket != NULL)
1408 			bucket_free(zone, bucket, NULL);
1409 		bucket = cache_bucket_unload_free(cache);
1410 		if (bucket != NULL) {
1411 			bucket->ub_seq = seq;
1412 			bucket_free(zone, bucket, NULL);
1413 		}
1414 		bucket = cache_bucket_unload_cross(cache);
1415 		if (bucket != NULL) {
1416 			bucket->ub_seq = seq;
1417 			bucket_free(zone, bucket, NULL);
1418 		}
1419 	}
1420 	bucket_cache_reclaim(zone, true, UMA_ANYDOMAIN);
1421 }
1422 
1423 static void
1424 cache_shrink(uma_zone_t zone, void *unused)
1425 {
1426 
1427 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
1428 		return;
1429 
1430 	ZONE_LOCK(zone);
1431 	zone->uz_bucket_size =
1432 	    (zone->uz_bucket_size_min + zone->uz_bucket_size) / 2;
1433 	ZONE_UNLOCK(zone);
1434 }
1435 
1436 static void
1437 cache_drain_safe_cpu(uma_zone_t zone, void *unused)
1438 {
1439 	uma_cache_t cache;
1440 	uma_bucket_t b1, b2, b3;
1441 	int domain;
1442 
1443 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
1444 		return;
1445 
1446 	b1 = b2 = b3 = NULL;
1447 	critical_enter();
1448 	cache = &zone->uz_cpu[curcpu];
1449 	domain = PCPU_GET(domain);
1450 	b1 = cache_bucket_unload_alloc(cache);
1451 
1452 	/*
1453 	 * Don't flush SMR zone buckets.  This leaves the zone without a
1454 	 * bucket and forces every free to synchronize().
1455 	 */
1456 	if ((zone->uz_flags & UMA_ZONE_SMR) == 0) {
1457 		b2 = cache_bucket_unload_free(cache);
1458 		b3 = cache_bucket_unload_cross(cache);
1459 	}
1460 	critical_exit();
1461 
1462 	if (b1 != NULL)
1463 		zone_free_bucket(zone, b1, NULL, domain, false);
1464 	if (b2 != NULL)
1465 		zone_free_bucket(zone, b2, NULL, domain, false);
1466 	if (b3 != NULL) {
1467 		/* Adjust the domain so it goes to zone_free_cross. */
1468 		domain = (domain + 1) % vm_ndomains;
1469 		zone_free_bucket(zone, b3, NULL, domain, false);
1470 	}
1471 }
1472 
1473 /*
1474  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
1475  * This is an expensive call because it needs to bind to all CPUs
1476  * one by one and enter a critical section on each of them in order
1477  * to safely access their cache buckets.
1478  * Zone lock must not be held on call this function.
1479  */
1480 static void
1481 pcpu_cache_drain_safe(uma_zone_t zone)
1482 {
1483 	int cpu;
1484 
1485 	/*
1486 	 * Polite bucket sizes shrinking was not enough, shrink aggressively.
1487 	 */
1488 	if (zone)
1489 		cache_shrink(zone, NULL);
1490 	else
1491 		zone_foreach(cache_shrink, NULL);
1492 
1493 	CPU_FOREACH(cpu) {
1494 		thread_lock(curthread);
1495 		sched_bind(curthread, cpu);
1496 		thread_unlock(curthread);
1497 
1498 		if (zone)
1499 			cache_drain_safe_cpu(zone, NULL);
1500 		else
1501 			zone_foreach(cache_drain_safe_cpu, NULL);
1502 	}
1503 	thread_lock(curthread);
1504 	sched_unbind(curthread);
1505 	thread_unlock(curthread);
1506 }
1507 
1508 /*
1509  * Reclaim cached buckets from a zone.  All buckets are reclaimed if the caller
1510  * requested a drain, otherwise the per-domain caches are trimmed to either
1511  * estimated working set size.
1512  */
1513 static bool
1514 bucket_cache_reclaim_domain(uma_zone_t zone, bool drain, bool trim, int domain)
1515 {
1516 	uma_zone_domain_t zdom;
1517 	uma_bucket_t bucket;
1518 	long target;
1519 	bool done = false;
1520 
1521 	/*
1522 	 * The cross bucket is partially filled and not part of
1523 	 * the item count.  Reclaim it individually here.
1524 	 */
1525 	zdom = ZDOM_GET(zone, domain);
1526 	if ((zone->uz_flags & UMA_ZONE_SMR) == 0 || drain) {
1527 		ZONE_CROSS_LOCK(zone);
1528 		bucket = zdom->uzd_cross;
1529 		zdom->uzd_cross = NULL;
1530 		ZONE_CROSS_UNLOCK(zone);
1531 		if (bucket != NULL)
1532 			bucket_free(zone, bucket, NULL);
1533 	}
1534 
1535 	/*
1536 	 * If we were asked to drain the zone, we are done only once
1537 	 * this bucket cache is empty.  If trim, we reclaim items in
1538 	 * excess of the zone's estimated working set size.  Multiple
1539 	 * consecutive calls will shrink the WSS and so reclaim more.
1540 	 * If neither drain nor trim, then voluntarily reclaim 1/4
1541 	 * (to reduce first spike) of items not used for a long time.
1542 	 */
1543 	ZDOM_LOCK(zdom);
1544 	zone_domain_update_wss(zdom);
1545 	if (drain)
1546 		target = 0;
1547 	else if (trim)
1548 		target = zdom->uzd_wss;
1549 	else if (zdom->uzd_timin > 900 / UMA_TIMEOUT)
1550 		target = zdom->uzd_nitems - zdom->uzd_limin / 4;
1551 	else {
1552 		ZDOM_UNLOCK(zdom);
1553 		return (done);
1554 	}
1555 	while ((bucket = STAILQ_FIRST(&zdom->uzd_buckets)) != NULL &&
1556 	    zdom->uzd_nitems >= target + bucket->ub_cnt) {
1557 		bucket = zone_fetch_bucket(zone, zdom, true);
1558 		if (bucket == NULL)
1559 			break;
1560 		bucket_free(zone, bucket, NULL);
1561 		done = true;
1562 		ZDOM_LOCK(zdom);
1563 	}
1564 	ZDOM_UNLOCK(zdom);
1565 	return (done);
1566 }
1567 
1568 static void
1569 bucket_cache_reclaim(uma_zone_t zone, bool drain, int domain)
1570 {
1571 	int i;
1572 
1573 	/*
1574 	 * Shrink the zone bucket size to ensure that the per-CPU caches
1575 	 * don't grow too large.
1576 	 */
1577 	if (zone->uz_bucket_size > zone->uz_bucket_size_min)
1578 		zone->uz_bucket_size--;
1579 
1580 	if (domain != UMA_ANYDOMAIN &&
1581 	    (zone->uz_flags & UMA_ZONE_ROUNDROBIN) == 0) {
1582 		bucket_cache_reclaim_domain(zone, drain, true, domain);
1583 	} else {
1584 		for (i = 0; i < vm_ndomains; i++)
1585 			bucket_cache_reclaim_domain(zone, drain, true, i);
1586 	}
1587 }
1588 
1589 static void
1590 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
1591 {
1592 	uint8_t *mem;
1593 	size_t size;
1594 	int i;
1595 	uint8_t flags;
1596 
1597 	CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
1598 	    keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
1599 
1600 	mem = slab_data(slab, keg);
1601 	size = PAGE_SIZE * keg->uk_ppera;
1602 
1603 	kasan_mark_slab_valid(keg, mem);
1604 	if (keg->uk_fini != NULL) {
1605 		for (i = start - 1; i > -1; i--)
1606 #ifdef INVARIANTS
1607 		/*
1608 		 * trash_fini implies that dtor was trash_dtor. trash_fini
1609 		 * would check that memory hasn't been modified since free,
1610 		 * which executed trash_dtor.
1611 		 * That's why we need to run uma_dbg_kskip() check here,
1612 		 * albeit we don't make skip check for other init/fini
1613 		 * invocations.
1614 		 */
1615 		if (!uma_dbg_kskip(keg, slab_item(slab, keg, i)) ||
1616 		    keg->uk_fini != trash_fini)
1617 #endif
1618 			keg->uk_fini(slab_item(slab, keg, i), keg->uk_size);
1619 	}
1620 	flags = slab->us_flags;
1621 	if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) {
1622 		zone_free_item(slabzone(keg->uk_ipers), slab_tohashslab(slab),
1623 		    NULL, SKIP_NONE);
1624 	}
1625 	keg->uk_freef(mem, size, flags);
1626 	uma_total_dec(size);
1627 }
1628 
1629 static void
1630 keg_drain_domain(uma_keg_t keg, int domain)
1631 {
1632 	struct slabhead freeslabs;
1633 	uma_domain_t dom;
1634 	uma_slab_t slab, tmp;
1635 	uint32_t i, stofree, stokeep, partial;
1636 
1637 	dom = &keg->uk_domain[domain];
1638 	LIST_INIT(&freeslabs);
1639 
1640 	CTR4(KTR_UMA, "keg_drain %s(%p) domain %d free items: %u",
1641 	    keg->uk_name, keg, domain, dom->ud_free_items);
1642 
1643 	KEG_LOCK(keg, domain);
1644 
1645 	/*
1646 	 * Are the free items in partially allocated slabs sufficient to meet
1647 	 * the reserve? If not, compute the number of fully free slabs that must
1648 	 * be kept.
1649 	 */
1650 	partial = dom->ud_free_items - dom->ud_free_slabs * keg->uk_ipers;
1651 	if (partial < keg->uk_reserve) {
1652 		stokeep = min(dom->ud_free_slabs,
1653 		    howmany(keg->uk_reserve - partial, keg->uk_ipers));
1654 	} else {
1655 		stokeep = 0;
1656 	}
1657 	stofree = dom->ud_free_slabs - stokeep;
1658 
1659 	/*
1660 	 * Partition the free slabs into two sets: those that must be kept in
1661 	 * order to maintain the reserve, and those that may be released back to
1662 	 * the system.  Since one set may be much larger than the other,
1663 	 * populate the smaller of the two sets and swap them if necessary.
1664 	 */
1665 	for (i = min(stofree, stokeep); i > 0; i--) {
1666 		slab = LIST_FIRST(&dom->ud_free_slab);
1667 		LIST_REMOVE(slab, us_link);
1668 		LIST_INSERT_HEAD(&freeslabs, slab, us_link);
1669 	}
1670 	if (stofree > stokeep)
1671 		LIST_SWAP(&freeslabs, &dom->ud_free_slab, uma_slab, us_link);
1672 
1673 	if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0) {
1674 		LIST_FOREACH(slab, &freeslabs, us_link)
1675 			UMA_HASH_REMOVE(&keg->uk_hash, slab);
1676 	}
1677 	dom->ud_free_items -= stofree * keg->uk_ipers;
1678 	dom->ud_free_slabs -= stofree;
1679 	dom->ud_pages -= stofree * keg->uk_ppera;
1680 	KEG_UNLOCK(keg, domain);
1681 
1682 	LIST_FOREACH_SAFE(slab, &freeslabs, us_link, tmp)
1683 		keg_free_slab(keg, slab, keg->uk_ipers);
1684 }
1685 
1686 /*
1687  * Frees pages from a keg back to the system.  This is done on demand from
1688  * the pageout daemon.
1689  *
1690  * Returns nothing.
1691  */
1692 static void
1693 keg_drain(uma_keg_t keg, int domain)
1694 {
1695 	int i;
1696 
1697 	if ((keg->uk_flags & UMA_ZONE_NOFREE) != 0)
1698 		return;
1699 	if (domain != UMA_ANYDOMAIN) {
1700 		keg_drain_domain(keg, domain);
1701 	} else {
1702 		for (i = 0; i < vm_ndomains; i++)
1703 			keg_drain_domain(keg, i);
1704 	}
1705 }
1706 
1707 static void
1708 zone_reclaim(uma_zone_t zone, int domain, int waitok, bool drain)
1709 {
1710 	/*
1711 	 * Count active reclaim operations in order to interlock with
1712 	 * zone_dtor(), which removes the zone from global lists before
1713 	 * attempting to reclaim items itself.
1714 	 *
1715 	 * The zone may be destroyed while sleeping, so only zone_dtor() should
1716 	 * specify M_WAITOK.
1717 	 */
1718 	ZONE_LOCK(zone);
1719 	if (waitok == M_WAITOK) {
1720 		while (zone->uz_reclaimers > 0)
1721 			msleep(zone, ZONE_LOCKPTR(zone), PVM, "zonedrain", 1);
1722 	}
1723 	zone->uz_reclaimers++;
1724 	ZONE_UNLOCK(zone);
1725 	bucket_cache_reclaim(zone, drain, domain);
1726 
1727 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0)
1728 		keg_drain(zone->uz_keg, domain);
1729 	ZONE_LOCK(zone);
1730 	zone->uz_reclaimers--;
1731 	if (zone->uz_reclaimers == 0)
1732 		wakeup(zone);
1733 	ZONE_UNLOCK(zone);
1734 }
1735 
1736 static void
1737 zone_drain(uma_zone_t zone, void *arg)
1738 {
1739 	int domain;
1740 
1741 	domain = (int)(uintptr_t)arg;
1742 	zone_reclaim(zone, domain, M_NOWAIT, true);
1743 }
1744 
1745 static void
1746 zone_trim(uma_zone_t zone, void *arg)
1747 {
1748 	int domain;
1749 
1750 	domain = (int)(uintptr_t)arg;
1751 	zone_reclaim(zone, domain, M_NOWAIT, false);
1752 }
1753 
1754 /*
1755  * Allocate a new slab for a keg and inserts it into the partial slab list.
1756  * The keg should be unlocked on entry.  If the allocation succeeds it will
1757  * be locked on return.
1758  *
1759  * Arguments:
1760  *	flags   Wait flags for the item initialization routine
1761  *	aflags  Wait flags for the slab allocation
1762  *
1763  * Returns:
1764  *	The slab that was allocated or NULL if there is no memory and the
1765  *	caller specified M_NOWAIT.
1766  */
1767 static uma_slab_t
1768 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
1769     int aflags)
1770 {
1771 	uma_domain_t dom;
1772 	uma_slab_t slab;
1773 	unsigned long size;
1774 	uint8_t *mem;
1775 	uint8_t sflags;
1776 	int i;
1777 
1778 	KASSERT(domain >= 0 && domain < vm_ndomains,
1779 	    ("keg_alloc_slab: domain %d out of range", domain));
1780 
1781 	slab = NULL;
1782 	mem = NULL;
1783 	if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) {
1784 		uma_hash_slab_t hslab;
1785 		hslab = zone_alloc_item(slabzone(keg->uk_ipers), NULL,
1786 		    domain, aflags);
1787 		if (hslab == NULL)
1788 			goto fail;
1789 		slab = &hslab->uhs_slab;
1790 	}
1791 
1792 	/*
1793 	 * This reproduces the old vm_zone behavior of zero filling pages the
1794 	 * first time they are added to a zone.
1795 	 *
1796 	 * Malloced items are zeroed in uma_zalloc.
1797 	 */
1798 
1799 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1800 		aflags |= M_ZERO;
1801 	else
1802 		aflags &= ~M_ZERO;
1803 
1804 	if (keg->uk_flags & UMA_ZONE_NODUMP)
1805 		aflags |= M_NODUMP;
1806 
1807 	/* zone is passed for legacy reasons. */
1808 	size = keg->uk_ppera * PAGE_SIZE;
1809 	mem = keg->uk_allocf(zone, size, domain, &sflags, aflags);
1810 	if (mem == NULL) {
1811 		if (keg->uk_flags & UMA_ZFLAG_OFFPAGE)
1812 			zone_free_item(slabzone(keg->uk_ipers),
1813 			    slab_tohashslab(slab), NULL, SKIP_NONE);
1814 		goto fail;
1815 	}
1816 	uma_total_inc(size);
1817 
1818 	/* For HASH zones all pages go to the same uma_domain. */
1819 	if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0)
1820 		domain = 0;
1821 
1822 	/* Point the slab into the allocated memory */
1823 	if (!(keg->uk_flags & UMA_ZFLAG_OFFPAGE))
1824 		slab = (uma_slab_t)(mem + keg->uk_pgoff);
1825 	else
1826 		slab_tohashslab(slab)->uhs_data = mem;
1827 
1828 	if (keg->uk_flags & UMA_ZFLAG_VTOSLAB)
1829 		for (i = 0; i < keg->uk_ppera; i++)
1830 			vsetzoneslab((vm_offset_t)mem + (i * PAGE_SIZE),
1831 			    zone, slab);
1832 
1833 	slab->us_freecount = keg->uk_ipers;
1834 	slab->us_flags = sflags;
1835 	slab->us_domain = domain;
1836 
1837 	BIT_FILL(keg->uk_ipers, &slab->us_free);
1838 #ifdef INVARIANTS
1839 	BIT_ZERO(keg->uk_ipers, slab_dbg_bits(slab, keg));
1840 #endif
1841 
1842 	if (keg->uk_init != NULL) {
1843 		for (i = 0; i < keg->uk_ipers; i++)
1844 			if (keg->uk_init(slab_item(slab, keg, i),
1845 			    keg->uk_size, flags) != 0)
1846 				break;
1847 		if (i != keg->uk_ipers) {
1848 			keg_free_slab(keg, slab, i);
1849 			goto fail;
1850 		}
1851 	}
1852 	kasan_mark_slab_invalid(keg, mem);
1853 	KEG_LOCK(keg, domain);
1854 
1855 	CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
1856 	    slab, keg->uk_name, keg);
1857 
1858 	if (keg->uk_flags & UMA_ZFLAG_HASH)
1859 		UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1860 
1861 	/*
1862 	 * If we got a slab here it's safe to mark it partially used
1863 	 * and return.  We assume that the caller is going to remove
1864 	 * at least one item.
1865 	 */
1866 	dom = &keg->uk_domain[domain];
1867 	LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
1868 	dom->ud_pages += keg->uk_ppera;
1869 	dom->ud_free_items += keg->uk_ipers;
1870 
1871 	return (slab);
1872 
1873 fail:
1874 	return (NULL);
1875 }
1876 
1877 /*
1878  * This function is intended to be used early on in place of page_alloc().  It
1879  * performs contiguous physical memory allocations and uses a bump allocator for
1880  * KVA, so is usable before the kernel map is initialized.
1881  */
1882 static void *
1883 startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1884     int wait)
1885 {
1886 	vm_paddr_t pa;
1887 	vm_page_t m;
1888 	int i, pages;
1889 
1890 	pages = howmany(bytes, PAGE_SIZE);
1891 	KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__));
1892 
1893 	*pflag = UMA_SLAB_BOOT;
1894 	m = vm_page_alloc_noobj_contig_domain(domain, malloc2vm_flags(wait) |
1895 	    VM_ALLOC_WIRED, pages, (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0,
1896 	    VM_MEMATTR_DEFAULT);
1897 	if (m == NULL)
1898 		return (NULL);
1899 
1900 	pa = VM_PAGE_TO_PHYS(m);
1901 	for (i = 0; i < pages; i++, pa += PAGE_SIZE) {
1902 #if defined(__aarch64__) || defined(__amd64__) || \
1903     defined(__riscv) || defined(__powerpc64__)
1904 		if ((wait & M_NODUMP) == 0)
1905 			dump_add_page(pa);
1906 #endif
1907 	}
1908 
1909 	/* Allocate KVA and indirectly advance bootmem. */
1910 	return ((void *)pmap_map(&bootmem, m->phys_addr,
1911 	    m->phys_addr + (pages * PAGE_SIZE), VM_PROT_READ | VM_PROT_WRITE));
1912 }
1913 
1914 static void
1915 startup_free(void *mem, vm_size_t bytes)
1916 {
1917 	vm_offset_t va;
1918 	vm_page_t m;
1919 
1920 	va = (vm_offset_t)mem;
1921 	m = PHYS_TO_VM_PAGE(pmap_kextract(va));
1922 
1923 	/*
1924 	 * startup_alloc() returns direct-mapped slabs on some platforms.  Avoid
1925 	 * unmapping ranges of the direct map.
1926 	 */
1927 	if (va >= bootstart && va + bytes <= bootmem)
1928 		pmap_remove(kernel_pmap, va, va + bytes);
1929 	for (; bytes != 0; bytes -= PAGE_SIZE, m++) {
1930 #if defined(__aarch64__) || defined(__amd64__) || \
1931     defined(__riscv) || defined(__powerpc64__)
1932 		dump_drop_page(VM_PAGE_TO_PHYS(m));
1933 #endif
1934 		vm_page_unwire_noq(m);
1935 		vm_page_free(m);
1936 	}
1937 }
1938 
1939 /*
1940  * Allocates a number of pages from the system
1941  *
1942  * Arguments:
1943  *	bytes  The number of bytes requested
1944  *	wait  Shall we wait?
1945  *
1946  * Returns:
1947  *	A pointer to the alloced memory or possibly
1948  *	NULL if M_NOWAIT is set.
1949  */
1950 static void *
1951 page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1952     int wait)
1953 {
1954 	void *p;	/* Returned page */
1955 
1956 	*pflag = UMA_SLAB_KERNEL;
1957 	p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
1958 
1959 	return (p);
1960 }
1961 
1962 static void *
1963 pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1964     int wait)
1965 {
1966 	struct pglist alloctail;
1967 	vm_offset_t addr, zkva;
1968 	int cpu, flags;
1969 	vm_page_t p, p_next;
1970 #ifdef NUMA
1971 	struct pcpu *pc;
1972 #endif
1973 
1974 	MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE);
1975 
1976 	TAILQ_INIT(&alloctail);
1977 	flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | malloc2vm_flags(wait);
1978 	*pflag = UMA_SLAB_KERNEL;
1979 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
1980 		if (CPU_ABSENT(cpu)) {
1981 			p = vm_page_alloc_noobj(flags);
1982 		} else {
1983 #ifndef NUMA
1984 			p = vm_page_alloc_noobj(flags);
1985 #else
1986 			pc = pcpu_find(cpu);
1987 			if (__predict_false(VM_DOMAIN_EMPTY(pc->pc_domain)))
1988 				p = NULL;
1989 			else
1990 				p = vm_page_alloc_noobj_domain(pc->pc_domain,
1991 				    flags);
1992 			if (__predict_false(p == NULL))
1993 				p = vm_page_alloc_noobj(flags);
1994 #endif
1995 		}
1996 		if (__predict_false(p == NULL))
1997 			goto fail;
1998 		TAILQ_INSERT_TAIL(&alloctail, p, listq);
1999 	}
2000 	if ((addr = kva_alloc(bytes)) == 0)
2001 		goto fail;
2002 	zkva = addr;
2003 	TAILQ_FOREACH(p, &alloctail, listq) {
2004 		pmap_qenter(zkva, &p, 1);
2005 		zkva += PAGE_SIZE;
2006 	}
2007 	return ((void*)addr);
2008 fail:
2009 	TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
2010 		vm_page_unwire_noq(p);
2011 		vm_page_free(p);
2012 	}
2013 	return (NULL);
2014 }
2015 
2016 /*
2017  * Allocates a number of pages not belonging to a VM object
2018  *
2019  * Arguments:
2020  *	bytes  The number of bytes requested
2021  *	wait   Shall we wait?
2022  *
2023  * Returns:
2024  *	A pointer to the alloced memory or possibly
2025  *	NULL if M_NOWAIT is set.
2026  */
2027 static void *
2028 noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
2029     int wait)
2030 {
2031 	TAILQ_HEAD(, vm_page) alloctail;
2032 	u_long npages;
2033 	vm_offset_t retkva, zkva;
2034 	vm_page_t p, p_next;
2035 	uma_keg_t keg;
2036 	int req;
2037 
2038 	TAILQ_INIT(&alloctail);
2039 	keg = zone->uz_keg;
2040 	req = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
2041 	if ((wait & M_WAITOK) != 0)
2042 		req |= VM_ALLOC_WAITOK;
2043 
2044 	npages = howmany(bytes, PAGE_SIZE);
2045 	while (npages > 0) {
2046 		p = vm_page_alloc_noobj_domain(domain, req);
2047 		if (p != NULL) {
2048 			/*
2049 			 * Since the page does not belong to an object, its
2050 			 * listq is unused.
2051 			 */
2052 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
2053 			npages--;
2054 			continue;
2055 		}
2056 		/*
2057 		 * Page allocation failed, free intermediate pages and
2058 		 * exit.
2059 		 */
2060 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
2061 			vm_page_unwire_noq(p);
2062 			vm_page_free(p);
2063 		}
2064 		return (NULL);
2065 	}
2066 	*flags = UMA_SLAB_PRIV;
2067 	zkva = keg->uk_kva +
2068 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
2069 	retkva = zkva;
2070 	TAILQ_FOREACH(p, &alloctail, listq) {
2071 		pmap_qenter(zkva, &p, 1);
2072 		zkva += PAGE_SIZE;
2073 	}
2074 
2075 	return ((void *)retkva);
2076 }
2077 
2078 /*
2079  * Allocate physically contiguous pages.
2080  */
2081 static void *
2082 contig_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
2083     int wait)
2084 {
2085 
2086 	*pflag = UMA_SLAB_KERNEL;
2087 	return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain),
2088 	    bytes, wait, 0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT));
2089 }
2090 
2091 /*
2092  * Frees a number of pages to the system
2093  *
2094  * Arguments:
2095  *	mem   A pointer to the memory to be freed
2096  *	size  The size of the memory being freed
2097  *	flags The original p->us_flags field
2098  *
2099  * Returns:
2100  *	Nothing
2101  */
2102 static void
2103 page_free(void *mem, vm_size_t size, uint8_t flags)
2104 {
2105 
2106 	if ((flags & UMA_SLAB_BOOT) != 0) {
2107 		startup_free(mem, size);
2108 		return;
2109 	}
2110 
2111 	KASSERT((flags & UMA_SLAB_KERNEL) != 0,
2112 	    ("UMA: page_free used with invalid flags %x", flags));
2113 
2114 	kmem_free((vm_offset_t)mem, size);
2115 }
2116 
2117 /*
2118  * Frees pcpu zone allocations
2119  *
2120  * Arguments:
2121  *	mem   A pointer to the memory to be freed
2122  *	size  The size of the memory being freed
2123  *	flags The original p->us_flags field
2124  *
2125  * Returns:
2126  *	Nothing
2127  */
2128 static void
2129 pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
2130 {
2131 	vm_offset_t sva, curva;
2132 	vm_paddr_t paddr;
2133 	vm_page_t m;
2134 
2135 	MPASS(size == (mp_maxid+1)*PAGE_SIZE);
2136 
2137 	if ((flags & UMA_SLAB_BOOT) != 0) {
2138 		startup_free(mem, size);
2139 		return;
2140 	}
2141 
2142 	sva = (vm_offset_t)mem;
2143 	for (curva = sva; curva < sva + size; curva += PAGE_SIZE) {
2144 		paddr = pmap_kextract(curva);
2145 		m = PHYS_TO_VM_PAGE(paddr);
2146 		vm_page_unwire_noq(m);
2147 		vm_page_free(m);
2148 	}
2149 	pmap_qremove(sva, size >> PAGE_SHIFT);
2150 	kva_free(sva, size);
2151 }
2152 
2153 /*
2154  * Zero fill initializer
2155  *
2156  * Arguments/Returns follow uma_init specifications
2157  */
2158 static int
2159 zero_init(void *mem, int size, int flags)
2160 {
2161 	bzero(mem, size);
2162 	return (0);
2163 }
2164 
2165 #ifdef INVARIANTS
2166 static struct noslabbits *
2167 slab_dbg_bits(uma_slab_t slab, uma_keg_t keg)
2168 {
2169 
2170 	return ((void *)((char *)&slab->us_free + BITSET_SIZE(keg->uk_ipers)));
2171 }
2172 #endif
2173 
2174 /*
2175  * Actual size of embedded struct slab (!OFFPAGE).
2176  */
2177 static size_t
2178 slab_sizeof(int nitems)
2179 {
2180 	size_t s;
2181 
2182 	s = sizeof(struct uma_slab) + BITSET_SIZE(nitems) * SLAB_BITSETS;
2183 	return (roundup(s, UMA_ALIGN_PTR + 1));
2184 }
2185 
2186 #define	UMA_FIXPT_SHIFT	31
2187 #define	UMA_FRAC_FIXPT(n, d)						\
2188 	((uint32_t)(((uint64_t)(n) << UMA_FIXPT_SHIFT) / (d)))
2189 #define	UMA_FIXPT_PCT(f)						\
2190 	((u_int)(((uint64_t)100 * (f)) >> UMA_FIXPT_SHIFT))
2191 #define	UMA_PCT_FIXPT(pct)	UMA_FRAC_FIXPT((pct), 100)
2192 #define	UMA_MIN_EFF	UMA_PCT_FIXPT(100 - UMA_MAX_WASTE)
2193 
2194 /*
2195  * Compute the number of items that will fit in a slab.  If hdr is true, the
2196  * item count may be limited to provide space in the slab for an inline slab
2197  * header.  Otherwise, all slab space will be provided for item storage.
2198  */
2199 static u_int
2200 slab_ipers_hdr(u_int size, u_int rsize, u_int slabsize, bool hdr)
2201 {
2202 	u_int ipers;
2203 	u_int padpi;
2204 
2205 	/* The padding between items is not needed after the last item. */
2206 	padpi = rsize - size;
2207 
2208 	if (hdr) {
2209 		/*
2210 		 * Start with the maximum item count and remove items until
2211 		 * the slab header first alongside the allocatable memory.
2212 		 */
2213 		for (ipers = MIN(SLAB_MAX_SETSIZE,
2214 		    (slabsize + padpi - slab_sizeof(1)) / rsize);
2215 		    ipers > 0 &&
2216 		    ipers * rsize - padpi + slab_sizeof(ipers) > slabsize;
2217 		    ipers--)
2218 			continue;
2219 	} else {
2220 		ipers = MIN((slabsize + padpi) / rsize, SLAB_MAX_SETSIZE);
2221 	}
2222 
2223 	return (ipers);
2224 }
2225 
2226 struct keg_layout_result {
2227 	u_int format;
2228 	u_int slabsize;
2229 	u_int ipers;
2230 	u_int eff;
2231 };
2232 
2233 static void
2234 keg_layout_one(uma_keg_t keg, u_int rsize, u_int slabsize, u_int fmt,
2235     struct keg_layout_result *kl)
2236 {
2237 	u_int total;
2238 
2239 	kl->format = fmt;
2240 	kl->slabsize = slabsize;
2241 
2242 	/* Handle INTERNAL as inline with an extra page. */
2243 	if ((fmt & UMA_ZFLAG_INTERNAL) != 0) {
2244 		kl->format &= ~UMA_ZFLAG_INTERNAL;
2245 		kl->slabsize += PAGE_SIZE;
2246 	}
2247 
2248 	kl->ipers = slab_ipers_hdr(keg->uk_size, rsize, kl->slabsize,
2249 	    (fmt & UMA_ZFLAG_OFFPAGE) == 0);
2250 
2251 	/* Account for memory used by an offpage slab header. */
2252 	total = kl->slabsize;
2253 	if ((fmt & UMA_ZFLAG_OFFPAGE) != 0)
2254 		total += slabzone(kl->ipers)->uz_keg->uk_rsize;
2255 
2256 	kl->eff = UMA_FRAC_FIXPT(kl->ipers * rsize, total);
2257 }
2258 
2259 /*
2260  * Determine the format of a uma keg.  This determines where the slab header
2261  * will be placed (inline or offpage) and calculates ipers, rsize, and ppera.
2262  *
2263  * Arguments
2264  *	keg  The zone we should initialize
2265  *
2266  * Returns
2267  *	Nothing
2268  */
2269 static void
2270 keg_layout(uma_keg_t keg)
2271 {
2272 	struct keg_layout_result kl = {}, kl_tmp;
2273 	u_int fmts[2];
2274 	u_int alignsize;
2275 	u_int nfmt;
2276 	u_int pages;
2277 	u_int rsize;
2278 	u_int slabsize;
2279 	u_int i, j;
2280 
2281 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
2282 	    (keg->uk_size <= UMA_PCPU_ALLOC_SIZE &&
2283 	     (keg->uk_flags & UMA_ZONE_CACHESPREAD) == 0),
2284 	    ("%s: cannot configure for PCPU: keg=%s, size=%u, flags=0x%b",
2285 	     __func__, keg->uk_name, keg->uk_size, keg->uk_flags,
2286 	     PRINT_UMA_ZFLAGS));
2287 	KASSERT((keg->uk_flags & (UMA_ZFLAG_INTERNAL | UMA_ZONE_VM)) == 0 ||
2288 	    (keg->uk_flags & (UMA_ZONE_NOTOUCH | UMA_ZONE_PCPU)) == 0,
2289 	    ("%s: incompatible flags 0x%b", __func__, keg->uk_flags,
2290 	     PRINT_UMA_ZFLAGS));
2291 
2292 	alignsize = keg->uk_align + 1;
2293 #ifdef KASAN
2294 	/*
2295 	 * ASAN requires that each allocation be aligned to the shadow map
2296 	 * scale factor.
2297 	 */
2298 	if (alignsize < KASAN_SHADOW_SCALE)
2299 		alignsize = KASAN_SHADOW_SCALE;
2300 #endif
2301 
2302 	/*
2303 	 * Calculate the size of each allocation (rsize) according to
2304 	 * alignment.  If the requested size is smaller than we have
2305 	 * allocation bits for we round it up.
2306 	 */
2307 	rsize = MAX(keg->uk_size, UMA_SMALLEST_UNIT);
2308 	rsize = roundup2(rsize, alignsize);
2309 
2310 	if ((keg->uk_flags & UMA_ZONE_CACHESPREAD) != 0) {
2311 		/*
2312 		 * We want one item to start on every align boundary in a page.
2313 		 * To do this we will span pages.  We will also extend the item
2314 		 * by the size of align if it is an even multiple of align.
2315 		 * Otherwise, it would fall on the same boundary every time.
2316 		 */
2317 		if ((rsize & alignsize) == 0)
2318 			rsize += alignsize;
2319 		slabsize = rsize * (PAGE_SIZE / alignsize);
2320 		slabsize = MIN(slabsize, rsize * SLAB_MAX_SETSIZE);
2321 		slabsize = MIN(slabsize, UMA_CACHESPREAD_MAX_SIZE);
2322 		slabsize = round_page(slabsize);
2323 	} else {
2324 		/*
2325 		 * Start with a slab size of as many pages as it takes to
2326 		 * represent a single item.  We will try to fit as many
2327 		 * additional items into the slab as possible.
2328 		 */
2329 		slabsize = round_page(keg->uk_size);
2330 	}
2331 
2332 	/* Build a list of all of the available formats for this keg. */
2333 	nfmt = 0;
2334 
2335 	/* Evaluate an inline slab layout. */
2336 	if ((keg->uk_flags & (UMA_ZONE_NOTOUCH | UMA_ZONE_PCPU)) == 0)
2337 		fmts[nfmt++] = 0;
2338 
2339 	/* TODO: vm_page-embedded slab. */
2340 
2341 	/*
2342 	 * We can't do OFFPAGE if we're internal or if we've been
2343 	 * asked to not go to the VM for buckets.  If we do this we
2344 	 * may end up going to the VM for slabs which we do not want
2345 	 * to do if we're UMA_ZONE_VM, which clearly forbids it.
2346 	 * In those cases, evaluate a pseudo-format called INTERNAL
2347 	 * which has an inline slab header and one extra page to
2348 	 * guarantee that it fits.
2349 	 *
2350 	 * Otherwise, see if using an OFFPAGE slab will improve our
2351 	 * efficiency.
2352 	 */
2353 	if ((keg->uk_flags & (UMA_ZFLAG_INTERNAL | UMA_ZONE_VM)) != 0)
2354 		fmts[nfmt++] = UMA_ZFLAG_INTERNAL;
2355 	else
2356 		fmts[nfmt++] = UMA_ZFLAG_OFFPAGE;
2357 
2358 	/*
2359 	 * Choose a slab size and format which satisfy the minimum efficiency.
2360 	 * Prefer the smallest slab size that meets the constraints.
2361 	 *
2362 	 * Start with a minimum slab size, to accommodate CACHESPREAD.  Then,
2363 	 * for small items (up to PAGE_SIZE), the iteration increment is one
2364 	 * page; and for large items, the increment is one item.
2365 	 */
2366 	i = (slabsize + rsize - keg->uk_size) / MAX(PAGE_SIZE, rsize);
2367 	KASSERT(i >= 1, ("keg %s(%p) flags=0x%b slabsize=%u, rsize=%u, i=%u",
2368 	    keg->uk_name, keg, keg->uk_flags, PRINT_UMA_ZFLAGS, slabsize,
2369 	    rsize, i));
2370 	for ( ; ; i++) {
2371 		slabsize = (rsize <= PAGE_SIZE) ? ptoa(i) :
2372 		    round_page(rsize * (i - 1) + keg->uk_size);
2373 
2374 		for (j = 0; j < nfmt; j++) {
2375 			/* Only if we have no viable format yet. */
2376 			if ((fmts[j] & UMA_ZFLAG_INTERNAL) != 0 &&
2377 			    kl.ipers > 0)
2378 				continue;
2379 
2380 			keg_layout_one(keg, rsize, slabsize, fmts[j], &kl_tmp);
2381 			if (kl_tmp.eff <= kl.eff)
2382 				continue;
2383 
2384 			kl = kl_tmp;
2385 
2386 			CTR6(KTR_UMA, "keg %s layout: format %#x "
2387 			    "(ipers %u * rsize %u) / slabsize %#x = %u%% eff",
2388 			    keg->uk_name, kl.format, kl.ipers, rsize,
2389 			    kl.slabsize, UMA_FIXPT_PCT(kl.eff));
2390 
2391 			/* Stop when we reach the minimum efficiency. */
2392 			if (kl.eff >= UMA_MIN_EFF)
2393 				break;
2394 		}
2395 
2396 		if (kl.eff >= UMA_MIN_EFF || !multipage_slabs ||
2397 		    slabsize >= SLAB_MAX_SETSIZE * rsize ||
2398 		    (keg->uk_flags & (UMA_ZONE_PCPU | UMA_ZONE_CONTIG)) != 0)
2399 			break;
2400 	}
2401 
2402 	pages = atop(kl.slabsize);
2403 	if ((keg->uk_flags & UMA_ZONE_PCPU) != 0)
2404 		pages *= mp_maxid + 1;
2405 
2406 	keg->uk_rsize = rsize;
2407 	keg->uk_ipers = kl.ipers;
2408 	keg->uk_ppera = pages;
2409 	keg->uk_flags |= kl.format;
2410 
2411 	/*
2412 	 * How do we find the slab header if it is offpage or if not all item
2413 	 * start addresses are in the same page?  We could solve the latter
2414 	 * case with vaddr alignment, but we don't.
2415 	 */
2416 	if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0 ||
2417 	    (keg->uk_ipers - 1) * rsize >= PAGE_SIZE) {
2418 		if ((keg->uk_flags & UMA_ZONE_NOTPAGE) != 0)
2419 			keg->uk_flags |= UMA_ZFLAG_HASH;
2420 		else
2421 			keg->uk_flags |= UMA_ZFLAG_VTOSLAB;
2422 	}
2423 
2424 	CTR6(KTR_UMA, "%s: keg=%s, flags=%#x, rsize=%u, ipers=%u, ppera=%u",
2425 	    __func__, keg->uk_name, keg->uk_flags, rsize, keg->uk_ipers,
2426 	    pages);
2427 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_MAX_SETSIZE,
2428 	    ("%s: keg=%s, flags=0x%b, rsize=%u, ipers=%u, ppera=%u", __func__,
2429 	     keg->uk_name, keg->uk_flags, PRINT_UMA_ZFLAGS, rsize,
2430 	     keg->uk_ipers, pages));
2431 }
2432 
2433 /*
2434  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
2435  * the keg onto the global keg list.
2436  *
2437  * Arguments/Returns follow uma_ctor specifications
2438  *	udata  Actually uma_kctor_args
2439  */
2440 static int
2441 keg_ctor(void *mem, int size, void *udata, int flags)
2442 {
2443 	struct uma_kctor_args *arg = udata;
2444 	uma_keg_t keg = mem;
2445 	uma_zone_t zone;
2446 	int i;
2447 
2448 	bzero(keg, size);
2449 	keg->uk_size = arg->size;
2450 	keg->uk_init = arg->uminit;
2451 	keg->uk_fini = arg->fini;
2452 	keg->uk_align = arg->align;
2453 	keg->uk_reserve = 0;
2454 	keg->uk_flags = arg->flags;
2455 
2456 	/*
2457 	 * We use a global round-robin policy by default.  Zones with
2458 	 * UMA_ZONE_FIRSTTOUCH set will use first-touch instead, in which
2459 	 * case the iterator is never run.
2460 	 */
2461 	keg->uk_dr.dr_policy = DOMAINSET_RR();
2462 	keg->uk_dr.dr_iter = 0;
2463 
2464 	/*
2465 	 * The primary zone is passed to us at keg-creation time.
2466 	 */
2467 	zone = arg->zone;
2468 	keg->uk_name = zone->uz_name;
2469 
2470 	if (arg->flags & UMA_ZONE_ZINIT)
2471 		keg->uk_init = zero_init;
2472 
2473 	if (arg->flags & UMA_ZONE_MALLOC)
2474 		keg->uk_flags |= UMA_ZFLAG_VTOSLAB;
2475 
2476 #ifndef SMP
2477 	keg->uk_flags &= ~UMA_ZONE_PCPU;
2478 #endif
2479 
2480 	keg_layout(keg);
2481 
2482 	/*
2483 	 * Use a first-touch NUMA policy for kegs that pmap_extract() will
2484 	 * work on.  Use round-robin for everything else.
2485 	 *
2486 	 * Zones may override the default by specifying either.
2487 	 */
2488 #ifdef NUMA
2489 	if ((keg->uk_flags &
2490 	    (UMA_ZONE_ROUNDROBIN | UMA_ZFLAG_CACHE | UMA_ZONE_NOTPAGE)) == 0)
2491 		keg->uk_flags |= UMA_ZONE_FIRSTTOUCH;
2492 	else if ((keg->uk_flags & UMA_ZONE_FIRSTTOUCH) == 0)
2493 		keg->uk_flags |= UMA_ZONE_ROUNDROBIN;
2494 #endif
2495 
2496 	/*
2497 	 * If we haven't booted yet we need allocations to go through the
2498 	 * startup cache until the vm is ready.
2499 	 */
2500 #ifdef UMA_MD_SMALL_ALLOC
2501 	if (keg->uk_ppera == 1)
2502 		keg->uk_allocf = uma_small_alloc;
2503 	else
2504 #endif
2505 	if (booted < BOOT_KVA)
2506 		keg->uk_allocf = startup_alloc;
2507 	else if (keg->uk_flags & UMA_ZONE_PCPU)
2508 		keg->uk_allocf = pcpu_page_alloc;
2509 	else if ((keg->uk_flags & UMA_ZONE_CONTIG) != 0 && keg->uk_ppera > 1)
2510 		keg->uk_allocf = contig_alloc;
2511 	else
2512 		keg->uk_allocf = page_alloc;
2513 #ifdef UMA_MD_SMALL_ALLOC
2514 	if (keg->uk_ppera == 1)
2515 		keg->uk_freef = uma_small_free;
2516 	else
2517 #endif
2518 	if (keg->uk_flags & UMA_ZONE_PCPU)
2519 		keg->uk_freef = pcpu_page_free;
2520 	else
2521 		keg->uk_freef = page_free;
2522 
2523 	/*
2524 	 * Initialize keg's locks.
2525 	 */
2526 	for (i = 0; i < vm_ndomains; i++)
2527 		KEG_LOCK_INIT(keg, i, (arg->flags & UMA_ZONE_MTXCLASS));
2528 
2529 	/*
2530 	 * If we're putting the slab header in the actual page we need to
2531 	 * figure out where in each page it goes.  See slab_sizeof
2532 	 * definition.
2533 	 */
2534 	if (!(keg->uk_flags & UMA_ZFLAG_OFFPAGE)) {
2535 		size_t shsize;
2536 
2537 		shsize = slab_sizeof(keg->uk_ipers);
2538 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - shsize;
2539 		/*
2540 		 * The only way the following is possible is if with our
2541 		 * UMA_ALIGN_PTR adjustments we are now bigger than
2542 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
2543 		 * mathematically possible for all cases, so we make
2544 		 * sure here anyway.
2545 		 */
2546 		KASSERT(keg->uk_pgoff + shsize <= PAGE_SIZE * keg->uk_ppera,
2547 		    ("zone %s ipers %d rsize %d size %d slab won't fit",
2548 		    zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size));
2549 	}
2550 
2551 	if (keg->uk_flags & UMA_ZFLAG_HASH)
2552 		hash_alloc(&keg->uk_hash, 0);
2553 
2554 	CTR3(KTR_UMA, "keg_ctor %p zone %s(%p)", keg, zone->uz_name, zone);
2555 
2556 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
2557 
2558 	rw_wlock(&uma_rwlock);
2559 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
2560 	rw_wunlock(&uma_rwlock);
2561 	return (0);
2562 }
2563 
2564 static void
2565 zone_kva_available(uma_zone_t zone, void *unused)
2566 {
2567 	uma_keg_t keg;
2568 
2569 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0)
2570 		return;
2571 	KEG_GET(zone, keg);
2572 
2573 	if (keg->uk_allocf == startup_alloc) {
2574 		/* Switch to the real allocator. */
2575 		if (keg->uk_flags & UMA_ZONE_PCPU)
2576 			keg->uk_allocf = pcpu_page_alloc;
2577 		else if ((keg->uk_flags & UMA_ZONE_CONTIG) != 0 &&
2578 		    keg->uk_ppera > 1)
2579 			keg->uk_allocf = contig_alloc;
2580 		else
2581 			keg->uk_allocf = page_alloc;
2582 	}
2583 }
2584 
2585 static void
2586 zone_alloc_counters(uma_zone_t zone, void *unused)
2587 {
2588 
2589 	zone->uz_allocs = counter_u64_alloc(M_WAITOK);
2590 	zone->uz_frees = counter_u64_alloc(M_WAITOK);
2591 	zone->uz_fails = counter_u64_alloc(M_WAITOK);
2592 	zone->uz_xdomain = counter_u64_alloc(M_WAITOK);
2593 }
2594 
2595 static void
2596 zone_alloc_sysctl(uma_zone_t zone, void *unused)
2597 {
2598 	uma_zone_domain_t zdom;
2599 	uma_domain_t dom;
2600 	uma_keg_t keg;
2601 	struct sysctl_oid *oid, *domainoid;
2602 	int domains, i, cnt;
2603 	static const char *nokeg = "cache zone";
2604 	char *c;
2605 
2606 	/*
2607 	 * Make a sysctl safe copy of the zone name by removing
2608 	 * any special characters and handling dups by appending
2609 	 * an index.
2610 	 */
2611 	if (zone->uz_namecnt != 0) {
2612 		/* Count the number of decimal digits and '_' separator. */
2613 		for (i = 1, cnt = zone->uz_namecnt; cnt != 0; i++)
2614 			cnt /= 10;
2615 		zone->uz_ctlname = malloc(strlen(zone->uz_name) + i + 1,
2616 		    M_UMA, M_WAITOK);
2617 		sprintf(zone->uz_ctlname, "%s_%d", zone->uz_name,
2618 		    zone->uz_namecnt);
2619 	} else
2620 		zone->uz_ctlname = strdup(zone->uz_name, M_UMA);
2621 	for (c = zone->uz_ctlname; *c != '\0'; c++)
2622 		if (strchr("./\\ -", *c) != NULL)
2623 			*c = '_';
2624 
2625 	/*
2626 	 * Basic parameters at the root.
2627 	 */
2628 	zone->uz_oid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_vm_uma),
2629 	    OID_AUTO, zone->uz_ctlname, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2630 	oid = zone->uz_oid;
2631 	SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2632 	    "size", CTLFLAG_RD, &zone->uz_size, 0, "Allocation size");
2633 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2634 	    "flags", CTLFLAG_RD | CTLTYPE_STRING | CTLFLAG_MPSAFE,
2635 	    zone, 0, sysctl_handle_uma_zone_flags, "A",
2636 	    "Allocator configuration flags");
2637 	SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2638 	    "bucket_size", CTLFLAG_RD, &zone->uz_bucket_size, 0,
2639 	    "Desired per-cpu cache size");
2640 	SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2641 	    "bucket_size_max", CTLFLAG_RD, &zone->uz_bucket_size_max, 0,
2642 	    "Maximum allowed per-cpu cache size");
2643 
2644 	/*
2645 	 * keg if present.
2646 	 */
2647 	if ((zone->uz_flags & UMA_ZFLAG_HASH) == 0)
2648 		domains = vm_ndomains;
2649 	else
2650 		domains = 1;
2651 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
2652 	    "keg", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2653 	keg = zone->uz_keg;
2654 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0) {
2655 		SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2656 		    "name", CTLFLAG_RD, keg->uk_name, "Keg name");
2657 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2658 		    "rsize", CTLFLAG_RD, &keg->uk_rsize, 0,
2659 		    "Real object size with alignment");
2660 		SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2661 		    "ppera", CTLFLAG_RD, &keg->uk_ppera, 0,
2662 		    "pages per-slab allocation");
2663 		SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2664 		    "ipers", CTLFLAG_RD, &keg->uk_ipers, 0,
2665 		    "items available per-slab");
2666 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2667 		    "align", CTLFLAG_RD, &keg->uk_align, 0,
2668 		    "item alignment mask");
2669 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2670 		    "reserve", CTLFLAG_RD, &keg->uk_reserve, 0,
2671 		    "number of reserved items");
2672 		SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2673 		    "efficiency", CTLFLAG_RD | CTLTYPE_INT | CTLFLAG_MPSAFE,
2674 		    keg, 0, sysctl_handle_uma_slab_efficiency, "I",
2675 		    "Slab utilization (100 - internal fragmentation %)");
2676 		domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(oid),
2677 		    OID_AUTO, "domain", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2678 		for (i = 0; i < domains; i++) {
2679 			dom = &keg->uk_domain[i];
2680 			oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid),
2681 			    OID_AUTO, VM_DOMAIN(i)->vmd_name,
2682 			    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2683 			SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2684 			    "pages", CTLFLAG_RD, &dom->ud_pages, 0,
2685 			    "Total pages currently allocated from VM");
2686 			SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2687 			    "free_items", CTLFLAG_RD, &dom->ud_free_items, 0,
2688 			    "Items free in the slab layer");
2689 			SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2690 			    "free_slabs", CTLFLAG_RD, &dom->ud_free_slabs, 0,
2691 			    "Unused slabs");
2692 		}
2693 	} else
2694 		SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2695 		    "name", CTLFLAG_RD, nokeg, "Keg name");
2696 
2697 	/*
2698 	 * Information about zone limits.
2699 	 */
2700 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
2701 	    "limit", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2702 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2703 	    "items", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
2704 	    zone, 0, sysctl_handle_uma_zone_items, "QU",
2705 	    "Current number of allocated items if limit is set");
2706 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2707 	    "max_items", CTLFLAG_RD, &zone->uz_max_items, 0,
2708 	    "Maximum number of allocated and cached items");
2709 	SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2710 	    "sleepers", CTLFLAG_RD, &zone->uz_sleepers, 0,
2711 	    "Number of threads sleeping at limit");
2712 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2713 	    "sleeps", CTLFLAG_RD, &zone->uz_sleeps, 0,
2714 	    "Total zone limit sleeps");
2715 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2716 	    "bucket_max", CTLFLAG_RD, &zone->uz_bucket_max, 0,
2717 	    "Maximum number of items in each domain's bucket cache");
2718 
2719 	/*
2720 	 * Per-domain zone information.
2721 	 */
2722 	domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid),
2723 	    OID_AUTO, "domain", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2724 	for (i = 0; i < domains; i++) {
2725 		zdom = ZDOM_GET(zone, i);
2726 		oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid),
2727 		    OID_AUTO, VM_DOMAIN(i)->vmd_name,
2728 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2729 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2730 		    "nitems", CTLFLAG_RD, &zdom->uzd_nitems,
2731 		    "number of items in this domain");
2732 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2733 		    "imax", CTLFLAG_RD, &zdom->uzd_imax,
2734 		    "maximum item count in this period");
2735 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2736 		    "imin", CTLFLAG_RD, &zdom->uzd_imin,
2737 		    "minimum item count in this period");
2738 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2739 		    "bimin", CTLFLAG_RD, &zdom->uzd_bimin,
2740 		    "Minimum item count in this batch");
2741 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2742 		    "wss", CTLFLAG_RD, &zdom->uzd_wss,
2743 		    "Working set size");
2744 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2745 		    "limin", CTLFLAG_RD, &zdom->uzd_limin,
2746 		    "Long time minimum item count");
2747 		SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2748 		    "timin", CTLFLAG_RD, &zdom->uzd_timin, 0,
2749 		    "Time since zero long time minimum item count");
2750 	}
2751 
2752 	/*
2753 	 * General statistics.
2754 	 */
2755 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
2756 	    "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2757 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2758 	    "current", CTLFLAG_RD | CTLTYPE_INT | CTLFLAG_MPSAFE,
2759 	    zone, 1, sysctl_handle_uma_zone_cur, "I",
2760 	    "Current number of allocated items");
2761 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2762 	    "allocs", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
2763 	    zone, 0, sysctl_handle_uma_zone_allocs, "QU",
2764 	    "Total allocation calls");
2765 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2766 	    "frees", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
2767 	    zone, 0, sysctl_handle_uma_zone_frees, "QU",
2768 	    "Total free calls");
2769 	SYSCTL_ADD_COUNTER_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2770 	    "fails", CTLFLAG_RD, &zone->uz_fails,
2771 	    "Number of allocation failures");
2772 	SYSCTL_ADD_COUNTER_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2773 	    "xdomain", CTLFLAG_RD, &zone->uz_xdomain,
2774 	    "Free calls from the wrong domain");
2775 }
2776 
2777 struct uma_zone_count {
2778 	const char	*name;
2779 	int		count;
2780 };
2781 
2782 static void
2783 zone_count(uma_zone_t zone, void *arg)
2784 {
2785 	struct uma_zone_count *cnt;
2786 
2787 	cnt = arg;
2788 	/*
2789 	 * Some zones are rapidly created with identical names and
2790 	 * destroyed out of order.  This can lead to gaps in the count.
2791 	 * Use one greater than the maximum observed for this name.
2792 	 */
2793 	if (strcmp(zone->uz_name, cnt->name) == 0)
2794 		cnt->count = MAX(cnt->count,
2795 		    zone->uz_namecnt + 1);
2796 }
2797 
2798 static void
2799 zone_update_caches(uma_zone_t zone)
2800 {
2801 	int i;
2802 
2803 	for (i = 0; i <= mp_maxid; i++) {
2804 		cache_set_uz_size(&zone->uz_cpu[i], zone->uz_size);
2805 		cache_set_uz_flags(&zone->uz_cpu[i], zone->uz_flags);
2806 	}
2807 }
2808 
2809 /*
2810  * Zone header ctor.  This initializes all fields, locks, etc.
2811  *
2812  * Arguments/Returns follow uma_ctor specifications
2813  *	udata  Actually uma_zctor_args
2814  */
2815 static int
2816 zone_ctor(void *mem, int size, void *udata, int flags)
2817 {
2818 	struct uma_zone_count cnt;
2819 	struct uma_zctor_args *arg = udata;
2820 	uma_zone_domain_t zdom;
2821 	uma_zone_t zone = mem;
2822 	uma_zone_t z;
2823 	uma_keg_t keg;
2824 	int i;
2825 
2826 	bzero(zone, size);
2827 	zone->uz_name = arg->name;
2828 	zone->uz_ctor = arg->ctor;
2829 	zone->uz_dtor = arg->dtor;
2830 	zone->uz_init = NULL;
2831 	zone->uz_fini = NULL;
2832 	zone->uz_sleeps = 0;
2833 	zone->uz_bucket_size = 0;
2834 	zone->uz_bucket_size_min = 0;
2835 	zone->uz_bucket_size_max = BUCKET_MAX;
2836 	zone->uz_flags = (arg->flags & UMA_ZONE_SMR);
2837 	zone->uz_warning = NULL;
2838 	/* The domain structures follow the cpu structures. */
2839 	zone->uz_bucket_max = ULONG_MAX;
2840 	timevalclear(&zone->uz_ratecheck);
2841 
2842 	/* Count the number of duplicate names. */
2843 	cnt.name = arg->name;
2844 	cnt.count = 0;
2845 	zone_foreach(zone_count, &cnt);
2846 	zone->uz_namecnt = cnt.count;
2847 	ZONE_CROSS_LOCK_INIT(zone);
2848 
2849 	for (i = 0; i < vm_ndomains; i++) {
2850 		zdom = ZDOM_GET(zone, i);
2851 		ZDOM_LOCK_INIT(zone, zdom, (arg->flags & UMA_ZONE_MTXCLASS));
2852 		STAILQ_INIT(&zdom->uzd_buckets);
2853 	}
2854 
2855 #if defined(INVARIANTS) && !defined(KASAN) && !defined(KMSAN)
2856 	if (arg->uminit == trash_init && arg->fini == trash_fini)
2857 		zone->uz_flags |= UMA_ZFLAG_TRASH | UMA_ZFLAG_CTORDTOR;
2858 #elif defined(KASAN)
2859 	if ((arg->flags & (UMA_ZONE_NOFREE | UMA_ZFLAG_CACHE)) != 0)
2860 		arg->flags |= UMA_ZONE_NOKASAN;
2861 #endif
2862 
2863 	/*
2864 	 * This is a pure cache zone, no kegs.
2865 	 */
2866 	if (arg->import) {
2867 		KASSERT((arg->flags & UMA_ZFLAG_CACHE) != 0,
2868 		    ("zone_ctor: Import specified for non-cache zone."));
2869 		zone->uz_flags = arg->flags;
2870 		zone->uz_size = arg->size;
2871 		zone->uz_import = arg->import;
2872 		zone->uz_release = arg->release;
2873 		zone->uz_arg = arg->arg;
2874 #ifdef NUMA
2875 		/*
2876 		 * Cache zones are round-robin unless a policy is
2877 		 * specified because they may have incompatible
2878 		 * constraints.
2879 		 */
2880 		if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) == 0)
2881 			zone->uz_flags |= UMA_ZONE_ROUNDROBIN;
2882 #endif
2883 		rw_wlock(&uma_rwlock);
2884 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
2885 		rw_wunlock(&uma_rwlock);
2886 		goto out;
2887 	}
2888 
2889 	/*
2890 	 * Use the regular zone/keg/slab allocator.
2891 	 */
2892 	zone->uz_import = zone_import;
2893 	zone->uz_release = zone_release;
2894 	zone->uz_arg = zone;
2895 	keg = arg->keg;
2896 
2897 	if (arg->flags & UMA_ZONE_SECONDARY) {
2898 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
2899 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
2900 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
2901 		zone->uz_init = arg->uminit;
2902 		zone->uz_fini = arg->fini;
2903 		zone->uz_flags |= UMA_ZONE_SECONDARY;
2904 		rw_wlock(&uma_rwlock);
2905 		ZONE_LOCK(zone);
2906 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
2907 			if (LIST_NEXT(z, uz_link) == NULL) {
2908 				LIST_INSERT_AFTER(z, zone, uz_link);
2909 				break;
2910 			}
2911 		}
2912 		ZONE_UNLOCK(zone);
2913 		rw_wunlock(&uma_rwlock);
2914 	} else if (keg == NULL) {
2915 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
2916 		    arg->align, arg->flags)) == NULL)
2917 			return (ENOMEM);
2918 	} else {
2919 		struct uma_kctor_args karg;
2920 		int error;
2921 
2922 		/* We should only be here from uma_startup() */
2923 		karg.size = arg->size;
2924 		karg.uminit = arg->uminit;
2925 		karg.fini = arg->fini;
2926 		karg.align = arg->align;
2927 		karg.flags = (arg->flags & ~UMA_ZONE_SMR);
2928 		karg.zone = zone;
2929 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
2930 		    flags);
2931 		if (error)
2932 			return (error);
2933 	}
2934 
2935 	/* Inherit properties from the keg. */
2936 	zone->uz_keg = keg;
2937 	zone->uz_size = keg->uk_size;
2938 	zone->uz_flags |= (keg->uk_flags &
2939 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
2940 
2941 out:
2942 	if (booted >= BOOT_PCPU) {
2943 		zone_alloc_counters(zone, NULL);
2944 		if (booted >= BOOT_RUNNING)
2945 			zone_alloc_sysctl(zone, NULL);
2946 	} else {
2947 		zone->uz_allocs = EARLY_COUNTER;
2948 		zone->uz_frees = EARLY_COUNTER;
2949 		zone->uz_fails = EARLY_COUNTER;
2950 	}
2951 
2952 	/* Caller requests a private SMR context. */
2953 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
2954 		zone->uz_smr = smr_create(zone->uz_name, 0, 0);
2955 
2956 	KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
2957 	    (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
2958 	    ("Invalid zone flag combination"));
2959 	if (arg->flags & UMA_ZFLAG_INTERNAL)
2960 		zone->uz_bucket_size_max = zone->uz_bucket_size = 0;
2961 	if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0)
2962 		zone->uz_bucket_size = BUCKET_MAX;
2963 	else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0)
2964 		zone->uz_bucket_size = 0;
2965 	else
2966 		zone->uz_bucket_size = bucket_select(zone->uz_size);
2967 	zone->uz_bucket_size_min = zone->uz_bucket_size;
2968 	if (zone->uz_dtor != NULL || zone->uz_ctor != NULL)
2969 		zone->uz_flags |= UMA_ZFLAG_CTORDTOR;
2970 	zone_update_caches(zone);
2971 
2972 	return (0);
2973 }
2974 
2975 /*
2976  * Keg header dtor.  This frees all data, destroys locks, frees the hash
2977  * table and removes the keg from the global list.
2978  *
2979  * Arguments/Returns follow uma_dtor specifications
2980  *	udata  unused
2981  */
2982 static void
2983 keg_dtor(void *arg, int size, void *udata)
2984 {
2985 	uma_keg_t keg;
2986 	uint32_t free, pages;
2987 	int i;
2988 
2989 	keg = (uma_keg_t)arg;
2990 	free = pages = 0;
2991 	for (i = 0; i < vm_ndomains; i++) {
2992 		free += keg->uk_domain[i].ud_free_items;
2993 		pages += keg->uk_domain[i].ud_pages;
2994 		KEG_LOCK_FINI(keg, i);
2995 	}
2996 	if (pages != 0)
2997 		printf("Freed UMA keg (%s) was not empty (%u items). "
2998 		    " Lost %u pages of memory.\n",
2999 		    keg->uk_name ? keg->uk_name : "",
3000 		    pages / keg->uk_ppera * keg->uk_ipers - free, pages);
3001 
3002 	hash_free(&keg->uk_hash);
3003 }
3004 
3005 /*
3006  * Zone header dtor.
3007  *
3008  * Arguments/Returns follow uma_dtor specifications
3009  *	udata  unused
3010  */
3011 static void
3012 zone_dtor(void *arg, int size, void *udata)
3013 {
3014 	uma_zone_t zone;
3015 	uma_keg_t keg;
3016 	int i;
3017 
3018 	zone = (uma_zone_t)arg;
3019 
3020 	sysctl_remove_oid(zone->uz_oid, 1, 1);
3021 
3022 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
3023 		cache_drain(zone);
3024 
3025 	rw_wlock(&uma_rwlock);
3026 	LIST_REMOVE(zone, uz_link);
3027 	rw_wunlock(&uma_rwlock);
3028 	if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) {
3029 		keg = zone->uz_keg;
3030 		keg->uk_reserve = 0;
3031 	}
3032 	zone_reclaim(zone, UMA_ANYDOMAIN, M_WAITOK, true);
3033 
3034 	/*
3035 	 * We only destroy kegs from non secondary/non cache zones.
3036 	 */
3037 	if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) {
3038 		keg = zone->uz_keg;
3039 		rw_wlock(&uma_rwlock);
3040 		LIST_REMOVE(keg, uk_link);
3041 		rw_wunlock(&uma_rwlock);
3042 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
3043 	}
3044 	counter_u64_free(zone->uz_allocs);
3045 	counter_u64_free(zone->uz_frees);
3046 	counter_u64_free(zone->uz_fails);
3047 	counter_u64_free(zone->uz_xdomain);
3048 	free(zone->uz_ctlname, M_UMA);
3049 	for (i = 0; i < vm_ndomains; i++)
3050 		ZDOM_LOCK_FINI(ZDOM_GET(zone, i));
3051 	ZONE_CROSS_LOCK_FINI(zone);
3052 }
3053 
3054 static void
3055 zone_foreach_unlocked(void (*zfunc)(uma_zone_t, void *arg), void *arg)
3056 {
3057 	uma_keg_t keg;
3058 	uma_zone_t zone;
3059 
3060 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
3061 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
3062 			zfunc(zone, arg);
3063 	}
3064 	LIST_FOREACH(zone, &uma_cachezones, uz_link)
3065 		zfunc(zone, arg);
3066 }
3067 
3068 /*
3069  * Traverses every zone in the system and calls a callback
3070  *
3071  * Arguments:
3072  *	zfunc  A pointer to a function which accepts a zone
3073  *		as an argument.
3074  *
3075  * Returns:
3076  *	Nothing
3077  */
3078 static void
3079 zone_foreach(void (*zfunc)(uma_zone_t, void *arg), void *arg)
3080 {
3081 
3082 	rw_rlock(&uma_rwlock);
3083 	zone_foreach_unlocked(zfunc, arg);
3084 	rw_runlock(&uma_rwlock);
3085 }
3086 
3087 /*
3088  * Initialize the kernel memory allocator.  This is done after pages can be
3089  * allocated but before general KVA is available.
3090  */
3091 void
3092 uma_startup1(vm_offset_t virtual_avail)
3093 {
3094 	struct uma_zctor_args args;
3095 	size_t ksize, zsize, size;
3096 	uma_keg_t primarykeg;
3097 	uintptr_t m;
3098 	int domain;
3099 	uint8_t pflag;
3100 
3101 	bootstart = bootmem = virtual_avail;
3102 
3103 	rw_init(&uma_rwlock, "UMA lock");
3104 	sx_init(&uma_reclaim_lock, "umareclaim");
3105 
3106 	ksize = sizeof(struct uma_keg) +
3107 	    (sizeof(struct uma_domain) * vm_ndomains);
3108 	ksize = roundup(ksize, UMA_SUPER_ALIGN);
3109 	zsize = sizeof(struct uma_zone) +
3110 	    (sizeof(struct uma_cache) * (mp_maxid + 1)) +
3111 	    (sizeof(struct uma_zone_domain) * vm_ndomains);
3112 	zsize = roundup(zsize, UMA_SUPER_ALIGN);
3113 
3114 	/* Allocate the zone of zones, zone of kegs, and zone of zones keg. */
3115 	size = (zsize * 2) + ksize;
3116 	for (domain = 0; domain < vm_ndomains; domain++) {
3117 		m = (uintptr_t)startup_alloc(NULL, size, domain, &pflag,
3118 		    M_NOWAIT | M_ZERO);
3119 		if (m != 0)
3120 			break;
3121 	}
3122 	zones = (uma_zone_t)m;
3123 	m += zsize;
3124 	kegs = (uma_zone_t)m;
3125 	m += zsize;
3126 	primarykeg = (uma_keg_t)m;
3127 
3128 	/* "manually" create the initial zone */
3129 	memset(&args, 0, sizeof(args));
3130 	args.name = "UMA Kegs";
3131 	args.size = ksize;
3132 	args.ctor = keg_ctor;
3133 	args.dtor = keg_dtor;
3134 	args.uminit = zero_init;
3135 	args.fini = NULL;
3136 	args.keg = primarykeg;
3137 	args.align = UMA_SUPER_ALIGN - 1;
3138 	args.flags = UMA_ZFLAG_INTERNAL;
3139 	zone_ctor(kegs, zsize, &args, M_WAITOK);
3140 
3141 	args.name = "UMA Zones";
3142 	args.size = zsize;
3143 	args.ctor = zone_ctor;
3144 	args.dtor = zone_dtor;
3145 	args.uminit = zero_init;
3146 	args.fini = NULL;
3147 	args.keg = NULL;
3148 	args.align = UMA_SUPER_ALIGN - 1;
3149 	args.flags = UMA_ZFLAG_INTERNAL;
3150 	zone_ctor(zones, zsize, &args, M_WAITOK);
3151 
3152 	/* Now make zones for slab headers */
3153 	slabzones[0] = uma_zcreate("UMA Slabs 0", SLABZONE0_SIZE,
3154 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
3155 	slabzones[1] = uma_zcreate("UMA Slabs 1", SLABZONE1_SIZE,
3156 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
3157 
3158 	hashzone = uma_zcreate("UMA Hash",
3159 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
3160 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
3161 
3162 	bucket_init();
3163 	smr_init();
3164 }
3165 
3166 #ifndef UMA_MD_SMALL_ALLOC
3167 extern void vm_radix_reserve_kva(void);
3168 #endif
3169 
3170 /*
3171  * Advertise the availability of normal kva allocations and switch to
3172  * the default back-end allocator.  Marks the KVA we consumed on startup
3173  * as used in the map.
3174  */
3175 void
3176 uma_startup2(void)
3177 {
3178 
3179 	if (bootstart != bootmem) {
3180 		vm_map_lock(kernel_map);
3181 		(void)vm_map_insert(kernel_map, NULL, 0, bootstart, bootmem,
3182 		    VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT);
3183 		vm_map_unlock(kernel_map);
3184 	}
3185 
3186 #ifndef UMA_MD_SMALL_ALLOC
3187 	/* Set up radix zone to use noobj_alloc. */
3188 	vm_radix_reserve_kva();
3189 #endif
3190 
3191 	booted = BOOT_KVA;
3192 	zone_foreach_unlocked(zone_kva_available, NULL);
3193 	bucket_enable();
3194 }
3195 
3196 /*
3197  * Allocate counters as early as possible so that boot-time allocations are
3198  * accounted more precisely.
3199  */
3200 static void
3201 uma_startup_pcpu(void *arg __unused)
3202 {
3203 
3204 	zone_foreach_unlocked(zone_alloc_counters, NULL);
3205 	booted = BOOT_PCPU;
3206 }
3207 SYSINIT(uma_startup_pcpu, SI_SUB_COUNTER, SI_ORDER_ANY, uma_startup_pcpu, NULL);
3208 
3209 /*
3210  * Finish our initialization steps.
3211  */
3212 static void
3213 uma_startup3(void *arg __unused)
3214 {
3215 
3216 #ifdef INVARIANTS
3217 	TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor);
3218 	uma_dbg_cnt = counter_u64_alloc(M_WAITOK);
3219 	uma_skip_cnt = counter_u64_alloc(M_WAITOK);
3220 #endif
3221 	zone_foreach_unlocked(zone_alloc_sysctl, NULL);
3222 	callout_init(&uma_callout, 1);
3223 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
3224 	booted = BOOT_RUNNING;
3225 
3226 	EVENTHANDLER_REGISTER(shutdown_post_sync, uma_shutdown, NULL,
3227 	    EVENTHANDLER_PRI_FIRST);
3228 }
3229 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
3230 
3231 static void
3232 uma_shutdown(void)
3233 {
3234 
3235 	booted = BOOT_SHUTDOWN;
3236 }
3237 
3238 static uma_keg_t
3239 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
3240 		int align, uint32_t flags)
3241 {
3242 	struct uma_kctor_args args;
3243 
3244 	args.size = size;
3245 	args.uminit = uminit;
3246 	args.fini = fini;
3247 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
3248 	args.flags = flags;
3249 	args.zone = zone;
3250 	return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
3251 }
3252 
3253 /* Public functions */
3254 /* See uma.h */
3255 void
3256 uma_set_align(int align)
3257 {
3258 
3259 	if (align != UMA_ALIGN_CACHE)
3260 		uma_align_cache = align;
3261 }
3262 
3263 /* See uma.h */
3264 uma_zone_t
3265 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
3266 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
3267 
3268 {
3269 	struct uma_zctor_args args;
3270 	uma_zone_t res;
3271 
3272 	KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
3273 	    align, name));
3274 
3275 	/* This stuff is essential for the zone ctor */
3276 	memset(&args, 0, sizeof(args));
3277 	args.name = name;
3278 	args.size = size;
3279 	args.ctor = ctor;
3280 	args.dtor = dtor;
3281 	args.uminit = uminit;
3282 	args.fini = fini;
3283 #if defined(INVARIANTS) && !defined(KASAN) && !defined(KMSAN)
3284 	/*
3285 	 * Inject procedures which check for memory use after free if we are
3286 	 * allowed to scramble the memory while it is not allocated.  This
3287 	 * requires that: UMA is actually able to access the memory, no init
3288 	 * or fini procedures, no dependency on the initial value of the
3289 	 * memory, and no (legitimate) use of the memory after free.  Note,
3290 	 * the ctor and dtor do not need to be empty.
3291 	 */
3292 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOTOUCH |
3293 	    UMA_ZONE_NOFREE))) && uminit == NULL && fini == NULL) {
3294 		args.uminit = trash_init;
3295 		args.fini = trash_fini;
3296 	}
3297 #endif
3298 	args.align = align;
3299 	args.flags = flags;
3300 	args.keg = NULL;
3301 
3302 	sx_xlock(&uma_reclaim_lock);
3303 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
3304 	sx_xunlock(&uma_reclaim_lock);
3305 
3306 	return (res);
3307 }
3308 
3309 /* See uma.h */
3310 uma_zone_t
3311 uma_zsecond_create(const char *name, uma_ctor ctor, uma_dtor dtor,
3312     uma_init zinit, uma_fini zfini, uma_zone_t primary)
3313 {
3314 	struct uma_zctor_args args;
3315 	uma_keg_t keg;
3316 	uma_zone_t res;
3317 
3318 	keg = primary->uz_keg;
3319 	memset(&args, 0, sizeof(args));
3320 	args.name = name;
3321 	args.size = keg->uk_size;
3322 	args.ctor = ctor;
3323 	args.dtor = dtor;
3324 	args.uminit = zinit;
3325 	args.fini = zfini;
3326 	args.align = keg->uk_align;
3327 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
3328 	args.keg = keg;
3329 
3330 	sx_xlock(&uma_reclaim_lock);
3331 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
3332 	sx_xunlock(&uma_reclaim_lock);
3333 
3334 	return (res);
3335 }
3336 
3337 /* See uma.h */
3338 uma_zone_t
3339 uma_zcache_create(const char *name, int size, uma_ctor ctor, uma_dtor dtor,
3340     uma_init zinit, uma_fini zfini, uma_import zimport, uma_release zrelease,
3341     void *arg, int flags)
3342 {
3343 	struct uma_zctor_args args;
3344 
3345 	memset(&args, 0, sizeof(args));
3346 	args.name = name;
3347 	args.size = size;
3348 	args.ctor = ctor;
3349 	args.dtor = dtor;
3350 	args.uminit = zinit;
3351 	args.fini = zfini;
3352 	args.import = zimport;
3353 	args.release = zrelease;
3354 	args.arg = arg;
3355 	args.align = 0;
3356 	args.flags = flags | UMA_ZFLAG_CACHE;
3357 
3358 	return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK));
3359 }
3360 
3361 /* See uma.h */
3362 void
3363 uma_zdestroy(uma_zone_t zone)
3364 {
3365 
3366 	/*
3367 	 * Large slabs are expensive to reclaim, so don't bother doing
3368 	 * unnecessary work if we're shutting down.
3369 	 */
3370 	if (booted == BOOT_SHUTDOWN &&
3371 	    zone->uz_fini == NULL && zone->uz_release == zone_release)
3372 		return;
3373 	sx_xlock(&uma_reclaim_lock);
3374 	zone_free_item(zones, zone, NULL, SKIP_NONE);
3375 	sx_xunlock(&uma_reclaim_lock);
3376 }
3377 
3378 void
3379 uma_zwait(uma_zone_t zone)
3380 {
3381 
3382 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
3383 		uma_zfree_smr(zone, uma_zalloc_smr(zone, M_WAITOK));
3384 	else if ((zone->uz_flags & UMA_ZONE_PCPU) != 0)
3385 		uma_zfree_pcpu(zone, uma_zalloc_pcpu(zone, M_WAITOK));
3386 	else
3387 		uma_zfree(zone, uma_zalloc(zone, M_WAITOK));
3388 }
3389 
3390 void *
3391 uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags)
3392 {
3393 	void *item, *pcpu_item;
3394 #ifdef SMP
3395 	int i;
3396 
3397 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
3398 #endif
3399 	item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO);
3400 	if (item == NULL)
3401 		return (NULL);
3402 	pcpu_item = zpcpu_base_to_offset(item);
3403 	if (flags & M_ZERO) {
3404 #ifdef SMP
3405 		for (i = 0; i <= mp_maxid; i++)
3406 			bzero(zpcpu_get_cpu(pcpu_item, i), zone->uz_size);
3407 #else
3408 		bzero(item, zone->uz_size);
3409 #endif
3410 	}
3411 	return (pcpu_item);
3412 }
3413 
3414 /*
3415  * A stub while both regular and pcpu cases are identical.
3416  */
3417 void
3418 uma_zfree_pcpu_arg(uma_zone_t zone, void *pcpu_item, void *udata)
3419 {
3420 	void *item;
3421 
3422 #ifdef SMP
3423 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
3424 #endif
3425 
3426         /* uma_zfree_pcu_*(..., NULL) does nothing, to match free(9). */
3427         if (pcpu_item == NULL)
3428                 return;
3429 
3430 	item = zpcpu_offset_to_base(pcpu_item);
3431 	uma_zfree_arg(zone, item, udata);
3432 }
3433 
3434 static inline void *
3435 item_ctor(uma_zone_t zone, int uz_flags, int size, void *udata, int flags,
3436     void *item)
3437 {
3438 #ifdef INVARIANTS
3439 	bool skipdbg;
3440 #endif
3441 
3442 	kasan_mark_item_valid(zone, item);
3443 	kmsan_mark_item_uninitialized(zone, item);
3444 
3445 #ifdef INVARIANTS
3446 	skipdbg = uma_dbg_zskip(zone, item);
3447 	if (!skipdbg && (uz_flags & UMA_ZFLAG_TRASH) != 0 &&
3448 	    zone->uz_ctor != trash_ctor)
3449 		trash_ctor(item, size, udata, flags);
3450 #endif
3451 
3452 	/* Check flags before loading ctor pointer. */
3453 	if (__predict_false((uz_flags & UMA_ZFLAG_CTORDTOR) != 0) &&
3454 	    __predict_false(zone->uz_ctor != NULL) &&
3455 	    zone->uz_ctor(item, size, udata, flags) != 0) {
3456 		counter_u64_add(zone->uz_fails, 1);
3457 		zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT);
3458 		return (NULL);
3459 	}
3460 #ifdef INVARIANTS
3461 	if (!skipdbg)
3462 		uma_dbg_alloc(zone, NULL, item);
3463 #endif
3464 	if (__predict_false(flags & M_ZERO))
3465 		return (memset(item, 0, size));
3466 
3467 	return (item);
3468 }
3469 
3470 static inline void
3471 item_dtor(uma_zone_t zone, void *item, int size, void *udata,
3472     enum zfreeskip skip)
3473 {
3474 #ifdef INVARIANTS
3475 	bool skipdbg;
3476 
3477 	skipdbg = uma_dbg_zskip(zone, item);
3478 	if (skip == SKIP_NONE && !skipdbg) {
3479 		if ((zone->uz_flags & UMA_ZONE_MALLOC) != 0)
3480 			uma_dbg_free(zone, udata, item);
3481 		else
3482 			uma_dbg_free(zone, NULL, item);
3483 	}
3484 #endif
3485 	if (__predict_true(skip < SKIP_DTOR)) {
3486 		if (zone->uz_dtor != NULL)
3487 			zone->uz_dtor(item, size, udata);
3488 #ifdef INVARIANTS
3489 		if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 &&
3490 		    zone->uz_dtor != trash_dtor)
3491 			trash_dtor(item, size, udata);
3492 #endif
3493 	}
3494 	kasan_mark_item_invalid(zone, item);
3495 }
3496 
3497 #ifdef NUMA
3498 static int
3499 item_domain(void *item)
3500 {
3501 	int domain;
3502 
3503 	domain = vm_phys_domain(vtophys(item));
3504 	KASSERT(domain >= 0 && domain < vm_ndomains,
3505 	    ("%s: unknown domain for item %p", __func__, item));
3506 	return (domain);
3507 }
3508 #endif
3509 
3510 #if defined(INVARIANTS) || defined(DEBUG_MEMGUARD) || defined(WITNESS)
3511 #define	UMA_ZALLOC_DEBUG
3512 static int
3513 uma_zalloc_debug(uma_zone_t zone, void **itemp, void *udata, int flags)
3514 {
3515 	int error;
3516 
3517 	error = 0;
3518 #ifdef WITNESS
3519 	if (flags & M_WAITOK) {
3520 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
3521 		    "uma_zalloc_debug: zone \"%s\"", zone->uz_name);
3522 	}
3523 #endif
3524 
3525 #ifdef INVARIANTS
3526 	KASSERT((flags & M_EXEC) == 0,
3527 	    ("uma_zalloc_debug: called with M_EXEC"));
3528 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3529 	    ("uma_zalloc_debug: called within spinlock or critical section"));
3530 	KASSERT((zone->uz_flags & UMA_ZONE_PCPU) == 0 || (flags & M_ZERO) == 0,
3531 	    ("uma_zalloc_debug: allocating from a pcpu zone with M_ZERO"));
3532 #endif
3533 
3534 #ifdef DEBUG_MEMGUARD
3535 	if ((zone->uz_flags & UMA_ZONE_SMR) == 0 && memguard_cmp_zone(zone)) {
3536 		void *item;
3537 		item = memguard_alloc(zone->uz_size, flags);
3538 		if (item != NULL) {
3539 			error = EJUSTRETURN;
3540 			if (zone->uz_init != NULL &&
3541 			    zone->uz_init(item, zone->uz_size, flags) != 0) {
3542 				*itemp = NULL;
3543 				return (error);
3544 			}
3545 			if (zone->uz_ctor != NULL &&
3546 			    zone->uz_ctor(item, zone->uz_size, udata,
3547 			    flags) != 0) {
3548 				counter_u64_add(zone->uz_fails, 1);
3549 			    	zone->uz_fini(item, zone->uz_size);
3550 				*itemp = NULL;
3551 				return (error);
3552 			}
3553 			*itemp = item;
3554 			return (error);
3555 		}
3556 		/* This is unfortunate but should not be fatal. */
3557 	}
3558 #endif
3559 	return (error);
3560 }
3561 
3562 static int
3563 uma_zfree_debug(uma_zone_t zone, void *item, void *udata)
3564 {
3565 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3566 	    ("uma_zfree_debug: called with spinlock or critical section held"));
3567 
3568 #ifdef DEBUG_MEMGUARD
3569 	if ((zone->uz_flags & UMA_ZONE_SMR) == 0 && is_memguard_addr(item)) {
3570 		if (zone->uz_dtor != NULL)
3571 			zone->uz_dtor(item, zone->uz_size, udata);
3572 		if (zone->uz_fini != NULL)
3573 			zone->uz_fini(item, zone->uz_size);
3574 		memguard_free(item);
3575 		return (EJUSTRETURN);
3576 	}
3577 #endif
3578 	return (0);
3579 }
3580 #endif
3581 
3582 static inline void *
3583 cache_alloc_item(uma_zone_t zone, uma_cache_t cache, uma_cache_bucket_t bucket,
3584     void *udata, int flags)
3585 {
3586 	void *item;
3587 	int size, uz_flags;
3588 
3589 	item = cache_bucket_pop(cache, bucket);
3590 	size = cache_uz_size(cache);
3591 	uz_flags = cache_uz_flags(cache);
3592 	critical_exit();
3593 	return (item_ctor(zone, uz_flags, size, udata, flags, item));
3594 }
3595 
3596 static __noinline void *
3597 cache_alloc_retry(uma_zone_t zone, uma_cache_t cache, void *udata, int flags)
3598 {
3599 	uma_cache_bucket_t bucket;
3600 	int domain;
3601 
3602 	while (cache_alloc(zone, cache, udata, flags)) {
3603 		cache = &zone->uz_cpu[curcpu];
3604 		bucket = &cache->uc_allocbucket;
3605 		if (__predict_false(bucket->ucb_cnt == 0))
3606 			continue;
3607 		return (cache_alloc_item(zone, cache, bucket, udata, flags));
3608 	}
3609 	critical_exit();
3610 
3611 	/*
3612 	 * We can not get a bucket so try to return a single item.
3613 	 */
3614 	if (zone->uz_flags & UMA_ZONE_FIRSTTOUCH)
3615 		domain = PCPU_GET(domain);
3616 	else
3617 		domain = UMA_ANYDOMAIN;
3618 	return (zone_alloc_item(zone, udata, domain, flags));
3619 }
3620 
3621 /* See uma.h */
3622 void *
3623 uma_zalloc_smr(uma_zone_t zone, int flags)
3624 {
3625 	uma_cache_bucket_t bucket;
3626 	uma_cache_t cache;
3627 
3628 	CTR3(KTR_UMA, "uma_zalloc_smr zone %s(%p) flags %d", zone->uz_name,
3629 	    zone, flags);
3630 
3631 #ifdef UMA_ZALLOC_DEBUG
3632 	void *item;
3633 
3634 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) != 0,
3635 	    ("uma_zalloc_arg: called with non-SMR zone."));
3636 	if (uma_zalloc_debug(zone, &item, NULL, flags) == EJUSTRETURN)
3637 		return (item);
3638 #endif
3639 
3640 	critical_enter();
3641 	cache = &zone->uz_cpu[curcpu];
3642 	bucket = &cache->uc_allocbucket;
3643 	if (__predict_false(bucket->ucb_cnt == 0))
3644 		return (cache_alloc_retry(zone, cache, NULL, flags));
3645 	return (cache_alloc_item(zone, cache, bucket, NULL, flags));
3646 }
3647 
3648 /* See uma.h */
3649 void *
3650 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
3651 {
3652 	uma_cache_bucket_t bucket;
3653 	uma_cache_t cache;
3654 
3655 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3656 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3657 
3658 	/* This is the fast path allocation */
3659 	CTR3(KTR_UMA, "uma_zalloc_arg zone %s(%p) flags %d", zone->uz_name,
3660 	    zone, flags);
3661 
3662 #ifdef UMA_ZALLOC_DEBUG
3663 	void *item;
3664 
3665 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0,
3666 	    ("uma_zalloc_arg: called with SMR zone."));
3667 	if (uma_zalloc_debug(zone, &item, udata, flags) == EJUSTRETURN)
3668 		return (item);
3669 #endif
3670 
3671 	/*
3672 	 * If possible, allocate from the per-CPU cache.  There are two
3673 	 * requirements for safe access to the per-CPU cache: (1) the thread
3674 	 * accessing the cache must not be preempted or yield during access,
3675 	 * and (2) the thread must not migrate CPUs without switching which
3676 	 * cache it accesses.  We rely on a critical section to prevent
3677 	 * preemption and migration.  We release the critical section in
3678 	 * order to acquire the zone mutex if we are unable to allocate from
3679 	 * the current cache; when we re-acquire the critical section, we
3680 	 * must detect and handle migration if it has occurred.
3681 	 */
3682 	critical_enter();
3683 	cache = &zone->uz_cpu[curcpu];
3684 	bucket = &cache->uc_allocbucket;
3685 	if (__predict_false(bucket->ucb_cnt == 0))
3686 		return (cache_alloc_retry(zone, cache, udata, flags));
3687 	return (cache_alloc_item(zone, cache, bucket, udata, flags));
3688 }
3689 
3690 /*
3691  * Replenish an alloc bucket and possibly restore an old one.  Called in
3692  * a critical section.  Returns in a critical section.
3693  *
3694  * A false return value indicates an allocation failure.
3695  * A true return value indicates success and the caller should retry.
3696  */
3697 static __noinline bool
3698 cache_alloc(uma_zone_t zone, uma_cache_t cache, void *udata, int flags)
3699 {
3700 	uma_bucket_t bucket;
3701 	int curdomain, domain;
3702 	bool new;
3703 
3704 	CRITICAL_ASSERT(curthread);
3705 
3706 	/*
3707 	 * If we have run out of items in our alloc bucket see
3708 	 * if we can switch with the free bucket.
3709 	 *
3710 	 * SMR Zones can't re-use the free bucket until the sequence has
3711 	 * expired.
3712 	 */
3713 	if ((cache_uz_flags(cache) & UMA_ZONE_SMR) == 0 &&
3714 	    cache->uc_freebucket.ucb_cnt != 0) {
3715 		cache_bucket_swap(&cache->uc_freebucket,
3716 		    &cache->uc_allocbucket);
3717 		return (true);
3718 	}
3719 
3720 	/*
3721 	 * Discard any empty allocation bucket while we hold no locks.
3722 	 */
3723 	bucket = cache_bucket_unload_alloc(cache);
3724 	critical_exit();
3725 
3726 	if (bucket != NULL) {
3727 		KASSERT(bucket->ub_cnt == 0,
3728 		    ("cache_alloc: Entered with non-empty alloc bucket."));
3729 		bucket_free(zone, bucket, udata);
3730 	}
3731 
3732 	/*
3733 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
3734 	 * we must go back to the zone.  This requires the zdom lock, so we
3735 	 * must drop the critical section, then re-acquire it when we go back
3736 	 * to the cache.  Since the critical section is released, we may be
3737 	 * preempted or migrate.  As such, make sure not to maintain any
3738 	 * thread-local state specific to the cache from prior to releasing
3739 	 * the critical section.
3740 	 */
3741 	domain = PCPU_GET(domain);
3742 	if ((cache_uz_flags(cache) & UMA_ZONE_ROUNDROBIN) != 0 ||
3743 	    VM_DOMAIN_EMPTY(domain))
3744 		domain = zone_domain_highest(zone, domain);
3745 	bucket = cache_fetch_bucket(zone, cache, domain);
3746 	if (bucket == NULL && zone->uz_bucket_size != 0 && !bucketdisable) {
3747 		bucket = zone_alloc_bucket(zone, udata, domain, flags);
3748 		new = true;
3749 	} else {
3750 		new = false;
3751 	}
3752 
3753 	CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
3754 	    zone->uz_name, zone, bucket);
3755 	if (bucket == NULL) {
3756 		critical_enter();
3757 		return (false);
3758 	}
3759 
3760 	/*
3761 	 * See if we lost the race or were migrated.  Cache the
3762 	 * initialized bucket to make this less likely or claim
3763 	 * the memory directly.
3764 	 */
3765 	critical_enter();
3766 	cache = &zone->uz_cpu[curcpu];
3767 	if (cache->uc_allocbucket.ucb_bucket == NULL &&
3768 	    ((cache_uz_flags(cache) & UMA_ZONE_FIRSTTOUCH) == 0 ||
3769 	    (curdomain = PCPU_GET(domain)) == domain ||
3770 	    VM_DOMAIN_EMPTY(curdomain))) {
3771 		if (new)
3772 			atomic_add_long(&ZDOM_GET(zone, domain)->uzd_imax,
3773 			    bucket->ub_cnt);
3774 		cache_bucket_load_alloc(cache, bucket);
3775 		return (true);
3776 	}
3777 
3778 	/*
3779 	 * We lost the race, release this bucket and start over.
3780 	 */
3781 	critical_exit();
3782 	zone_put_bucket(zone, domain, bucket, udata, !new);
3783 	critical_enter();
3784 
3785 	return (true);
3786 }
3787 
3788 void *
3789 uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
3790 {
3791 #ifdef NUMA
3792 	uma_bucket_t bucket;
3793 	uma_zone_domain_t zdom;
3794 	void *item;
3795 #endif
3796 
3797 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3798 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3799 
3800 	/* This is the fast path allocation */
3801 	CTR4(KTR_UMA, "uma_zalloc_domain zone %s(%p) domain %d flags %d",
3802 	    zone->uz_name, zone, domain, flags);
3803 
3804 	if (flags & M_WAITOK) {
3805 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
3806 		    "uma_zalloc_domain: zone \"%s\"", zone->uz_name);
3807 	}
3808 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3809 	    ("uma_zalloc_domain: called with spinlock or critical section held"));
3810 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0,
3811 	    ("uma_zalloc_domain: called with SMR zone."));
3812 #ifdef NUMA
3813 	KASSERT((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0,
3814 	    ("uma_zalloc_domain: called with non-FIRSTTOUCH zone."));
3815 
3816 	if (vm_ndomains == 1)
3817 		return (uma_zalloc_arg(zone, udata, flags));
3818 
3819 	/*
3820 	 * Try to allocate from the bucket cache before falling back to the keg.
3821 	 * We could try harder and attempt to allocate from per-CPU caches or
3822 	 * the per-domain cross-domain buckets, but the complexity is probably
3823 	 * not worth it.  It is more important that frees of previous
3824 	 * cross-domain allocations do not blow up the cache.
3825 	 */
3826 	zdom = zone_domain_lock(zone, domain);
3827 	if ((bucket = zone_fetch_bucket(zone, zdom, false)) != NULL) {
3828 		item = bucket->ub_bucket[bucket->ub_cnt - 1];
3829 #ifdef INVARIANTS
3830 		bucket->ub_bucket[bucket->ub_cnt - 1] = NULL;
3831 #endif
3832 		bucket->ub_cnt--;
3833 		zone_put_bucket(zone, domain, bucket, udata, true);
3834 		item = item_ctor(zone, zone->uz_flags, zone->uz_size, udata,
3835 		    flags, item);
3836 		if (item != NULL) {
3837 			KASSERT(item_domain(item) == domain,
3838 			    ("%s: bucket cache item %p from wrong domain",
3839 			    __func__, item));
3840 			counter_u64_add(zone->uz_allocs, 1);
3841 		}
3842 		return (item);
3843 	}
3844 	ZDOM_UNLOCK(zdom);
3845 	return (zone_alloc_item(zone, udata, domain, flags));
3846 #else
3847 	return (uma_zalloc_arg(zone, udata, flags));
3848 #endif
3849 }
3850 
3851 /*
3852  * Find a slab with some space.  Prefer slabs that are partially used over those
3853  * that are totally full.  This helps to reduce fragmentation.
3854  *
3855  * If 'rr' is 1, search all domains starting from 'domain'.  Otherwise check
3856  * only 'domain'.
3857  */
3858 static uma_slab_t
3859 keg_first_slab(uma_keg_t keg, int domain, bool rr)
3860 {
3861 	uma_domain_t dom;
3862 	uma_slab_t slab;
3863 	int start;
3864 
3865 	KASSERT(domain >= 0 && domain < vm_ndomains,
3866 	    ("keg_first_slab: domain %d out of range", domain));
3867 	KEG_LOCK_ASSERT(keg, domain);
3868 
3869 	slab = NULL;
3870 	start = domain;
3871 	do {
3872 		dom = &keg->uk_domain[domain];
3873 		if ((slab = LIST_FIRST(&dom->ud_part_slab)) != NULL)
3874 			return (slab);
3875 		if ((slab = LIST_FIRST(&dom->ud_free_slab)) != NULL) {
3876 			LIST_REMOVE(slab, us_link);
3877 			dom->ud_free_slabs--;
3878 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
3879 			return (slab);
3880 		}
3881 		if (rr)
3882 			domain = (domain + 1) % vm_ndomains;
3883 	} while (domain != start);
3884 
3885 	return (NULL);
3886 }
3887 
3888 /*
3889  * Fetch an existing slab from a free or partial list.  Returns with the
3890  * keg domain lock held if a slab was found or unlocked if not.
3891  */
3892 static uma_slab_t
3893 keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags)
3894 {
3895 	uma_slab_t slab;
3896 	uint32_t reserve;
3897 
3898 	/* HASH has a single free list. */
3899 	if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0)
3900 		domain = 0;
3901 
3902 	KEG_LOCK(keg, domain);
3903 	reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve;
3904 	if (keg->uk_domain[domain].ud_free_items <= reserve ||
3905 	    (slab = keg_first_slab(keg, domain, rr)) == NULL) {
3906 		KEG_UNLOCK(keg, domain);
3907 		return (NULL);
3908 	}
3909 	return (slab);
3910 }
3911 
3912 static uma_slab_t
3913 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags)
3914 {
3915 	struct vm_domainset_iter di;
3916 	uma_slab_t slab;
3917 	int aflags, domain;
3918 	bool rr;
3919 
3920 	KASSERT((flags & (M_WAITOK | M_NOVM)) != (M_WAITOK | M_NOVM),
3921 	    ("%s: invalid flags %#x", __func__, flags));
3922 
3923 restart:
3924 	/*
3925 	 * Use the keg's policy if upper layers haven't already specified a
3926 	 * domain (as happens with first-touch zones).
3927 	 *
3928 	 * To avoid races we run the iterator with the keg lock held, but that
3929 	 * means that we cannot allow the vm_domainset layer to sleep.  Thus,
3930 	 * clear M_WAITOK and handle low memory conditions locally.
3931 	 */
3932 	rr = rdomain == UMA_ANYDOMAIN;
3933 	if (rr) {
3934 		aflags = (flags & ~M_WAITOK) | M_NOWAIT;
3935 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
3936 		    &aflags);
3937 	} else {
3938 		aflags = flags;
3939 		domain = rdomain;
3940 	}
3941 
3942 	for (;;) {
3943 		slab = keg_fetch_free_slab(keg, domain, rr, flags);
3944 		if (slab != NULL)
3945 			return (slab);
3946 
3947 		/*
3948 		 * M_NOVM is used to break the recursion that can otherwise
3949 		 * occur if low-level memory management routines use UMA.
3950 		 */
3951 		if ((flags & M_NOVM) == 0) {
3952 			slab = keg_alloc_slab(keg, zone, domain, flags, aflags);
3953 			if (slab != NULL)
3954 				return (slab);
3955 		}
3956 
3957 		if (!rr) {
3958 			if ((flags & M_USE_RESERVE) != 0) {
3959 				/*
3960 				 * Drain reserves from other domains before
3961 				 * giving up or sleeping.  It may be useful to
3962 				 * support per-domain reserves eventually.
3963 				 */
3964 				rdomain = UMA_ANYDOMAIN;
3965 				goto restart;
3966 			}
3967 			if ((flags & M_WAITOK) == 0)
3968 				break;
3969 			vm_wait_domain(domain);
3970 		} else if (vm_domainset_iter_policy(&di, &domain) != 0) {
3971 			if ((flags & M_WAITOK) != 0) {
3972 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask, 0);
3973 				goto restart;
3974 			}
3975 			break;
3976 		}
3977 	}
3978 
3979 	/*
3980 	 * We might not have been able to get a slab but another cpu
3981 	 * could have while we were unlocked.  Check again before we
3982 	 * fail.
3983 	 */
3984 	if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL)
3985 		return (slab);
3986 
3987 	return (NULL);
3988 }
3989 
3990 static void *
3991 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
3992 {
3993 	uma_domain_t dom;
3994 	void *item;
3995 	int freei;
3996 
3997 	KEG_LOCK_ASSERT(keg, slab->us_domain);
3998 
3999 	dom = &keg->uk_domain[slab->us_domain];
4000 	freei = BIT_FFS(keg->uk_ipers, &slab->us_free) - 1;
4001 	BIT_CLR(keg->uk_ipers, freei, &slab->us_free);
4002 	item = slab_item(slab, keg, freei);
4003 	slab->us_freecount--;
4004 	dom->ud_free_items--;
4005 
4006 	/*
4007 	 * Move this slab to the full list.  It must be on the partial list, so
4008 	 * we do not need to update the free slab count.  In particular,
4009 	 * keg_fetch_slab() always returns slabs on the partial list.
4010 	 */
4011 	if (slab->us_freecount == 0) {
4012 		LIST_REMOVE(slab, us_link);
4013 		LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link);
4014 	}
4015 
4016 	return (item);
4017 }
4018 
4019 static int
4020 zone_import(void *arg, void **bucket, int max, int domain, int flags)
4021 {
4022 	uma_domain_t dom;
4023 	uma_zone_t zone;
4024 	uma_slab_t slab;
4025 	uma_keg_t keg;
4026 #ifdef NUMA
4027 	int stripe;
4028 #endif
4029 	int i;
4030 
4031 	zone = arg;
4032 	slab = NULL;
4033 	keg = zone->uz_keg;
4034 	/* Try to keep the buckets totally full */
4035 	for (i = 0; i < max; ) {
4036 		if ((slab = keg_fetch_slab(keg, zone, domain, flags)) == NULL)
4037 			break;
4038 #ifdef NUMA
4039 		stripe = howmany(max, vm_ndomains);
4040 #endif
4041 		dom = &keg->uk_domain[slab->us_domain];
4042 		do {
4043 			bucket[i++] = slab_alloc_item(keg, slab);
4044 			if (keg->uk_reserve > 0 &&
4045 			    dom->ud_free_items <= keg->uk_reserve) {
4046 				/*
4047 				 * Avoid depleting the reserve after a
4048 				 * successful item allocation, even if
4049 				 * M_USE_RESERVE is specified.
4050 				 */
4051 				KEG_UNLOCK(keg, slab->us_domain);
4052 				goto out;
4053 			}
4054 #ifdef NUMA
4055 			/*
4056 			 * If the zone is striped we pick a new slab for every
4057 			 * N allocations.  Eliminating this conditional will
4058 			 * instead pick a new domain for each bucket rather
4059 			 * than stripe within each bucket.  The current option
4060 			 * produces more fragmentation and requires more cpu
4061 			 * time but yields better distribution.
4062 			 */
4063 			if ((zone->uz_flags & UMA_ZONE_ROUNDROBIN) != 0 &&
4064 			    vm_ndomains > 1 && --stripe == 0)
4065 				break;
4066 #endif
4067 		} while (slab->us_freecount != 0 && i < max);
4068 		KEG_UNLOCK(keg, slab->us_domain);
4069 
4070 		/* Don't block if we allocated any successfully. */
4071 		flags &= ~M_WAITOK;
4072 		flags |= M_NOWAIT;
4073 	}
4074 out:
4075 	return i;
4076 }
4077 
4078 static int
4079 zone_alloc_limit_hard(uma_zone_t zone, int count, int flags)
4080 {
4081 	uint64_t old, new, total, max;
4082 
4083 	/*
4084 	 * The hard case.  We're going to sleep because there were existing
4085 	 * sleepers or because we ran out of items.  This routine enforces
4086 	 * fairness by keeping fifo order.
4087 	 *
4088 	 * First release our ill gotten gains and make some noise.
4089 	 */
4090 	for (;;) {
4091 		zone_free_limit(zone, count);
4092 		zone_log_warning(zone);
4093 		zone_maxaction(zone);
4094 		if (flags & M_NOWAIT)
4095 			return (0);
4096 
4097 		/*
4098 		 * We need to allocate an item or set ourself as a sleeper
4099 		 * while the sleepq lock is held to avoid wakeup races.  This
4100 		 * is essentially a home rolled semaphore.
4101 		 */
4102 		sleepq_lock(&zone->uz_max_items);
4103 		old = zone->uz_items;
4104 		do {
4105 			MPASS(UZ_ITEMS_SLEEPERS(old) < UZ_ITEMS_SLEEPERS_MAX);
4106 			/* Cache the max since we will evaluate twice. */
4107 			max = zone->uz_max_items;
4108 			if (UZ_ITEMS_SLEEPERS(old) != 0 ||
4109 			    UZ_ITEMS_COUNT(old) >= max)
4110 				new = old + UZ_ITEMS_SLEEPER;
4111 			else
4112 				new = old + MIN(count, max - old);
4113 		} while (atomic_fcmpset_64(&zone->uz_items, &old, new) == 0);
4114 
4115 		/* We may have successfully allocated under the sleepq lock. */
4116 		if (UZ_ITEMS_SLEEPERS(new) == 0) {
4117 			sleepq_release(&zone->uz_max_items);
4118 			return (new - old);
4119 		}
4120 
4121 		/*
4122 		 * This is in a different cacheline from uz_items so that we
4123 		 * don't constantly invalidate the fastpath cacheline when we
4124 		 * adjust item counts.  This could be limited to toggling on
4125 		 * transitions.
4126 		 */
4127 		atomic_add_32(&zone->uz_sleepers, 1);
4128 		atomic_add_64(&zone->uz_sleeps, 1);
4129 
4130 		/*
4131 		 * We have added ourselves as a sleeper.  The sleepq lock
4132 		 * protects us from wakeup races.  Sleep now and then retry.
4133 		 */
4134 		sleepq_add(&zone->uz_max_items, NULL, "zonelimit", 0, 0);
4135 		sleepq_wait(&zone->uz_max_items, PVM);
4136 
4137 		/*
4138 		 * After wakeup, remove ourselves as a sleeper and try
4139 		 * again.  We no longer have the sleepq lock for protection.
4140 		 *
4141 		 * Subract ourselves as a sleeper while attempting to add
4142 		 * our count.
4143 		 */
4144 		atomic_subtract_32(&zone->uz_sleepers, 1);
4145 		old = atomic_fetchadd_64(&zone->uz_items,
4146 		    -(UZ_ITEMS_SLEEPER - count));
4147 		/* We're no longer a sleeper. */
4148 		old -= UZ_ITEMS_SLEEPER;
4149 
4150 		/*
4151 		 * If we're still at the limit, restart.  Notably do not
4152 		 * block on other sleepers.  Cache the max value to protect
4153 		 * against changes via sysctl.
4154 		 */
4155 		total = UZ_ITEMS_COUNT(old);
4156 		max = zone->uz_max_items;
4157 		if (total >= max)
4158 			continue;
4159 		/* Truncate if necessary, otherwise wake other sleepers. */
4160 		if (total + count > max) {
4161 			zone_free_limit(zone, total + count - max);
4162 			count = max - total;
4163 		} else if (total + count < max && UZ_ITEMS_SLEEPERS(old) != 0)
4164 			wakeup_one(&zone->uz_max_items);
4165 
4166 		return (count);
4167 	}
4168 }
4169 
4170 /*
4171  * Allocate 'count' items from our max_items limit.  Returns the number
4172  * available.  If M_NOWAIT is not specified it will sleep until at least
4173  * one item can be allocated.
4174  */
4175 static int
4176 zone_alloc_limit(uma_zone_t zone, int count, int flags)
4177 {
4178 	uint64_t old;
4179 	uint64_t max;
4180 
4181 	max = zone->uz_max_items;
4182 	MPASS(max > 0);
4183 
4184 	/*
4185 	 * We expect normal allocations to succeed with a simple
4186 	 * fetchadd.
4187 	 */
4188 	old = atomic_fetchadd_64(&zone->uz_items, count);
4189 	if (__predict_true(old + count <= max))
4190 		return (count);
4191 
4192 	/*
4193 	 * If we had some items and no sleepers just return the
4194 	 * truncated value.  We have to release the excess space
4195 	 * though because that may wake sleepers who weren't woken
4196 	 * because we were temporarily over the limit.
4197 	 */
4198 	if (old < max) {
4199 		zone_free_limit(zone, (old + count) - max);
4200 		return (max - old);
4201 	}
4202 	return (zone_alloc_limit_hard(zone, count, flags));
4203 }
4204 
4205 /*
4206  * Free a number of items back to the limit.
4207  */
4208 static void
4209 zone_free_limit(uma_zone_t zone, int count)
4210 {
4211 	uint64_t old;
4212 
4213 	MPASS(count > 0);
4214 
4215 	/*
4216 	 * In the common case we either have no sleepers or
4217 	 * are still over the limit and can just return.
4218 	 */
4219 	old = atomic_fetchadd_64(&zone->uz_items, -count);
4220 	if (__predict_true(UZ_ITEMS_SLEEPERS(old) == 0 ||
4221 	   UZ_ITEMS_COUNT(old) - count >= zone->uz_max_items))
4222 		return;
4223 
4224 	/*
4225 	 * Moderate the rate of wakeups.  Sleepers will continue
4226 	 * to generate wakeups if necessary.
4227 	 */
4228 	wakeup_one(&zone->uz_max_items);
4229 }
4230 
4231 static uma_bucket_t
4232 zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags)
4233 {
4234 	uma_bucket_t bucket;
4235 	int error, maxbucket, cnt;
4236 
4237 	CTR3(KTR_UMA, "zone_alloc_bucket zone %s(%p) domain %d", zone->uz_name,
4238 	    zone, domain);
4239 
4240 	/* Avoid allocs targeting empty domains. */
4241 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
4242 		domain = UMA_ANYDOMAIN;
4243 	else if ((zone->uz_flags & UMA_ZONE_ROUNDROBIN) != 0)
4244 		domain = UMA_ANYDOMAIN;
4245 
4246 	if (zone->uz_max_items > 0)
4247 		maxbucket = zone_alloc_limit(zone, zone->uz_bucket_size,
4248 		    M_NOWAIT);
4249 	else
4250 		maxbucket = zone->uz_bucket_size;
4251 	if (maxbucket == 0)
4252 		return (false);
4253 
4254 	/* Don't wait for buckets, preserve caller's NOVM setting. */
4255 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
4256 	if (bucket == NULL) {
4257 		cnt = 0;
4258 		goto out;
4259 	}
4260 
4261 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
4262 	    MIN(maxbucket, bucket->ub_entries), domain, flags);
4263 
4264 	/*
4265 	 * Initialize the memory if necessary.
4266 	 */
4267 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
4268 		int i;
4269 
4270 		for (i = 0; i < bucket->ub_cnt; i++) {
4271 			kasan_mark_item_valid(zone, bucket->ub_bucket[i]);
4272 			error = zone->uz_init(bucket->ub_bucket[i],
4273 			    zone->uz_size, flags);
4274 			kasan_mark_item_invalid(zone, bucket->ub_bucket[i]);
4275 			if (error != 0)
4276 				break;
4277 		}
4278 
4279 		/*
4280 		 * If we couldn't initialize the whole bucket, put the
4281 		 * rest back onto the freelist.
4282 		 */
4283 		if (i != bucket->ub_cnt) {
4284 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
4285 			    bucket->ub_cnt - i);
4286 #ifdef INVARIANTS
4287 			bzero(&bucket->ub_bucket[i],
4288 			    sizeof(void *) * (bucket->ub_cnt - i));
4289 #endif
4290 			bucket->ub_cnt = i;
4291 		}
4292 	}
4293 
4294 	cnt = bucket->ub_cnt;
4295 	if (bucket->ub_cnt == 0) {
4296 		bucket_free(zone, bucket, udata);
4297 		counter_u64_add(zone->uz_fails, 1);
4298 		bucket = NULL;
4299 	}
4300 out:
4301 	if (zone->uz_max_items > 0 && cnt < maxbucket)
4302 		zone_free_limit(zone, maxbucket - cnt);
4303 
4304 	return (bucket);
4305 }
4306 
4307 /*
4308  * Allocates a single item from a zone.
4309  *
4310  * Arguments
4311  *	zone   The zone to alloc for.
4312  *	udata  The data to be passed to the constructor.
4313  *	domain The domain to allocate from or UMA_ANYDOMAIN.
4314  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
4315  *
4316  * Returns
4317  *	NULL if there is no memory and M_NOWAIT is set
4318  *	An item if successful
4319  */
4320 
4321 static void *
4322 zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags)
4323 {
4324 	void *item;
4325 
4326 	if (zone->uz_max_items > 0 && zone_alloc_limit(zone, 1, flags) == 0) {
4327 		counter_u64_add(zone->uz_fails, 1);
4328 		return (NULL);
4329 	}
4330 
4331 	/* Avoid allocs targeting empty domains. */
4332 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
4333 		domain = UMA_ANYDOMAIN;
4334 
4335 	if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1)
4336 		goto fail_cnt;
4337 
4338 	/*
4339 	 * We have to call both the zone's init (not the keg's init)
4340 	 * and the zone's ctor.  This is because the item is going from
4341 	 * a keg slab directly to the user, and the user is expecting it
4342 	 * to be both zone-init'd as well as zone-ctor'd.
4343 	 */
4344 	if (zone->uz_init != NULL) {
4345 		int error;
4346 
4347 		kasan_mark_item_valid(zone, item);
4348 		error = zone->uz_init(item, zone->uz_size, flags);
4349 		kasan_mark_item_invalid(zone, item);
4350 		if (error != 0) {
4351 			zone_free_item(zone, item, udata, SKIP_FINI | SKIP_CNT);
4352 			goto fail_cnt;
4353 		}
4354 	}
4355 	item = item_ctor(zone, zone->uz_flags, zone->uz_size, udata, flags,
4356 	    item);
4357 	if (item == NULL)
4358 		goto fail;
4359 
4360 	counter_u64_add(zone->uz_allocs, 1);
4361 	CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
4362 	    zone->uz_name, zone);
4363 
4364 	return (item);
4365 
4366 fail_cnt:
4367 	counter_u64_add(zone->uz_fails, 1);
4368 fail:
4369 	if (zone->uz_max_items > 0)
4370 		zone_free_limit(zone, 1);
4371 	CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
4372 	    zone->uz_name, zone);
4373 
4374 	return (NULL);
4375 }
4376 
4377 /* See uma.h */
4378 void
4379 uma_zfree_smr(uma_zone_t zone, void *item)
4380 {
4381 	uma_cache_t cache;
4382 	uma_cache_bucket_t bucket;
4383 	int itemdomain, uz_flags;
4384 
4385 	CTR3(KTR_UMA, "uma_zfree_smr zone %s(%p) item %p",
4386 	    zone->uz_name, zone, item);
4387 
4388 #ifdef UMA_ZALLOC_DEBUG
4389 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) != 0,
4390 	    ("uma_zfree_smr: called with non-SMR zone."));
4391 	KASSERT(item != NULL, ("uma_zfree_smr: Called with NULL pointer."));
4392 	SMR_ASSERT_NOT_ENTERED(zone->uz_smr);
4393 	if (uma_zfree_debug(zone, item, NULL) == EJUSTRETURN)
4394 		return;
4395 #endif
4396 	cache = &zone->uz_cpu[curcpu];
4397 	uz_flags = cache_uz_flags(cache);
4398 	itemdomain = 0;
4399 #ifdef NUMA
4400 	if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0)
4401 		itemdomain = item_domain(item);
4402 #endif
4403 	critical_enter();
4404 	do {
4405 		cache = &zone->uz_cpu[curcpu];
4406 		/* SMR Zones must free to the free bucket. */
4407 		bucket = &cache->uc_freebucket;
4408 #ifdef NUMA
4409 		if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 &&
4410 		    PCPU_GET(domain) != itemdomain) {
4411 			bucket = &cache->uc_crossbucket;
4412 		}
4413 #endif
4414 		if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) {
4415 			cache_bucket_push(cache, bucket, item);
4416 			critical_exit();
4417 			return;
4418 		}
4419 	} while (cache_free(zone, cache, NULL, itemdomain));
4420 	critical_exit();
4421 
4422 	/*
4423 	 * If nothing else caught this, we'll just do an internal free.
4424 	 */
4425 	zone_free_item(zone, item, NULL, SKIP_NONE);
4426 }
4427 
4428 /* See uma.h */
4429 void
4430 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
4431 {
4432 	uma_cache_t cache;
4433 	uma_cache_bucket_t bucket;
4434 	int itemdomain, uz_flags;
4435 
4436 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
4437 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
4438 
4439 	CTR3(KTR_UMA, "uma_zfree_arg zone %s(%p) item %p",
4440 	    zone->uz_name, zone, item);
4441 
4442 #ifdef UMA_ZALLOC_DEBUG
4443 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0,
4444 	    ("uma_zfree_arg: called with SMR zone."));
4445 	if (uma_zfree_debug(zone, item, udata) == EJUSTRETURN)
4446 		return;
4447 #endif
4448         /* uma_zfree(..., NULL) does nothing, to match free(9). */
4449         if (item == NULL)
4450                 return;
4451 
4452 	/*
4453 	 * We are accessing the per-cpu cache without a critical section to
4454 	 * fetch size and flags.  This is acceptable, if we are preempted we
4455 	 * will simply read another cpu's line.
4456 	 */
4457 	cache = &zone->uz_cpu[curcpu];
4458 	uz_flags = cache_uz_flags(cache);
4459 	if (UMA_ALWAYS_CTORDTOR ||
4460 	    __predict_false((uz_flags & UMA_ZFLAG_CTORDTOR) != 0))
4461 		item_dtor(zone, item, cache_uz_size(cache), udata, SKIP_NONE);
4462 
4463 	/*
4464 	 * The race here is acceptable.  If we miss it we'll just have to wait
4465 	 * a little longer for the limits to be reset.
4466 	 */
4467 	if (__predict_false(uz_flags & UMA_ZFLAG_LIMIT)) {
4468 		if (atomic_load_32(&zone->uz_sleepers) > 0)
4469 			goto zfree_item;
4470 	}
4471 
4472 	/*
4473 	 * If possible, free to the per-CPU cache.  There are two
4474 	 * requirements for safe access to the per-CPU cache: (1) the thread
4475 	 * accessing the cache must not be preempted or yield during access,
4476 	 * and (2) the thread must not migrate CPUs without switching which
4477 	 * cache it accesses.  We rely on a critical section to prevent
4478 	 * preemption and migration.  We release the critical section in
4479 	 * order to acquire the zone mutex if we are unable to free to the
4480 	 * current cache; when we re-acquire the critical section, we must
4481 	 * detect and handle migration if it has occurred.
4482 	 */
4483 	itemdomain = 0;
4484 #ifdef NUMA
4485 	if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0)
4486 		itemdomain = item_domain(item);
4487 #endif
4488 	critical_enter();
4489 	do {
4490 		cache = &zone->uz_cpu[curcpu];
4491 		/*
4492 		 * Try to free into the allocbucket first to give LIFO
4493 		 * ordering for cache-hot datastructures.  Spill over
4494 		 * into the freebucket if necessary.  Alloc will swap
4495 		 * them if one runs dry.
4496 		 */
4497 		bucket = &cache->uc_allocbucket;
4498 #ifdef NUMA
4499 		if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 &&
4500 		    PCPU_GET(domain) != itemdomain) {
4501 			bucket = &cache->uc_crossbucket;
4502 		} else
4503 #endif
4504 		if (bucket->ucb_cnt == bucket->ucb_entries &&
4505 		   cache->uc_freebucket.ucb_cnt <
4506 		   cache->uc_freebucket.ucb_entries)
4507 			cache_bucket_swap(&cache->uc_freebucket,
4508 			    &cache->uc_allocbucket);
4509 		if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) {
4510 			cache_bucket_push(cache, bucket, item);
4511 			critical_exit();
4512 			return;
4513 		}
4514 	} while (cache_free(zone, cache, udata, itemdomain));
4515 	critical_exit();
4516 
4517 	/*
4518 	 * If nothing else caught this, we'll just do an internal free.
4519 	 */
4520 zfree_item:
4521 	zone_free_item(zone, item, udata, SKIP_DTOR);
4522 }
4523 
4524 #ifdef NUMA
4525 /*
4526  * sort crossdomain free buckets to domain correct buckets and cache
4527  * them.
4528  */
4529 static void
4530 zone_free_cross(uma_zone_t zone, uma_bucket_t bucket, void *udata)
4531 {
4532 	struct uma_bucketlist emptybuckets, fullbuckets;
4533 	uma_zone_domain_t zdom;
4534 	uma_bucket_t b;
4535 	smr_seq_t seq;
4536 	void *item;
4537 	int domain;
4538 
4539 	CTR3(KTR_UMA,
4540 	    "uma_zfree: zone %s(%p) draining cross bucket %p",
4541 	    zone->uz_name, zone, bucket);
4542 
4543 	/*
4544 	 * It is possible for buckets to arrive here out of order so we fetch
4545 	 * the current smr seq rather than accepting the bucket's.
4546 	 */
4547 	seq = SMR_SEQ_INVALID;
4548 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
4549 		seq = smr_advance(zone->uz_smr);
4550 
4551 	/*
4552 	 * To avoid having ndomain * ndomain buckets for sorting we have a
4553 	 * lock on the current crossfree bucket.  A full matrix with
4554 	 * per-domain locking could be used if necessary.
4555 	 */
4556 	STAILQ_INIT(&emptybuckets);
4557 	STAILQ_INIT(&fullbuckets);
4558 	ZONE_CROSS_LOCK(zone);
4559 	for (; bucket->ub_cnt > 0; bucket->ub_cnt--) {
4560 		item = bucket->ub_bucket[bucket->ub_cnt - 1];
4561 		domain = item_domain(item);
4562 		zdom = ZDOM_GET(zone, domain);
4563 		if (zdom->uzd_cross == NULL) {
4564 			if ((b = STAILQ_FIRST(&emptybuckets)) != NULL) {
4565 				STAILQ_REMOVE_HEAD(&emptybuckets, ub_link);
4566 				zdom->uzd_cross = b;
4567 			} else {
4568 				/*
4569 				 * Avoid allocating a bucket with the cross lock
4570 				 * held, since allocation can trigger a
4571 				 * cross-domain free and bucket zones may
4572 				 * allocate from each other.
4573 				 */
4574 				ZONE_CROSS_UNLOCK(zone);
4575 				b = bucket_alloc(zone, udata, M_NOWAIT);
4576 				if (b == NULL)
4577 					goto out;
4578 				ZONE_CROSS_LOCK(zone);
4579 				if (zdom->uzd_cross != NULL) {
4580 					STAILQ_INSERT_HEAD(&emptybuckets, b,
4581 					    ub_link);
4582 				} else {
4583 					zdom->uzd_cross = b;
4584 				}
4585 			}
4586 		}
4587 		b = zdom->uzd_cross;
4588 		b->ub_bucket[b->ub_cnt++] = item;
4589 		b->ub_seq = seq;
4590 		if (b->ub_cnt == b->ub_entries) {
4591 			STAILQ_INSERT_HEAD(&fullbuckets, b, ub_link);
4592 			if ((b = STAILQ_FIRST(&emptybuckets)) != NULL)
4593 				STAILQ_REMOVE_HEAD(&emptybuckets, ub_link);
4594 			zdom->uzd_cross = b;
4595 		}
4596 	}
4597 	ZONE_CROSS_UNLOCK(zone);
4598 out:
4599 	if (bucket->ub_cnt == 0)
4600 		bucket->ub_seq = SMR_SEQ_INVALID;
4601 	bucket_free(zone, bucket, udata);
4602 
4603 	while ((b = STAILQ_FIRST(&emptybuckets)) != NULL) {
4604 		STAILQ_REMOVE_HEAD(&emptybuckets, ub_link);
4605 		bucket_free(zone, b, udata);
4606 	}
4607 	while ((b = STAILQ_FIRST(&fullbuckets)) != NULL) {
4608 		STAILQ_REMOVE_HEAD(&fullbuckets, ub_link);
4609 		domain = item_domain(b->ub_bucket[0]);
4610 		zone_put_bucket(zone, domain, b, udata, true);
4611 	}
4612 }
4613 #endif
4614 
4615 static void
4616 zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata,
4617     int itemdomain, bool ws)
4618 {
4619 
4620 #ifdef NUMA
4621 	/*
4622 	 * Buckets coming from the wrong domain will be entirely for the
4623 	 * only other domain on two domain systems.  In this case we can
4624 	 * simply cache them.  Otherwise we need to sort them back to
4625 	 * correct domains.
4626 	 */
4627 	if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 &&
4628 	    vm_ndomains > 2 && PCPU_GET(domain) != itemdomain) {
4629 		zone_free_cross(zone, bucket, udata);
4630 		return;
4631 	}
4632 #endif
4633 
4634 	/*
4635 	 * Attempt to save the bucket in the zone's domain bucket cache.
4636 	 */
4637 	CTR3(KTR_UMA,
4638 	    "uma_zfree: zone %s(%p) putting bucket %p on free list",
4639 	    zone->uz_name, zone, bucket);
4640 	/* ub_cnt is pointing to the last free item */
4641 	if ((zone->uz_flags & UMA_ZONE_ROUNDROBIN) != 0)
4642 		itemdomain = zone_domain_lowest(zone, itemdomain);
4643 	zone_put_bucket(zone, itemdomain, bucket, udata, ws);
4644 }
4645 
4646 /*
4647  * Populate a free or cross bucket for the current cpu cache.  Free any
4648  * existing full bucket either to the zone cache or back to the slab layer.
4649  *
4650  * Enters and returns in a critical section.  false return indicates that
4651  * we can not satisfy this free in the cache layer.  true indicates that
4652  * the caller should retry.
4653  */
4654 static __noinline bool
4655 cache_free(uma_zone_t zone, uma_cache_t cache, void *udata, int itemdomain)
4656 {
4657 	uma_cache_bucket_t cbucket;
4658 	uma_bucket_t newbucket, bucket;
4659 
4660 	CRITICAL_ASSERT(curthread);
4661 
4662 	if (zone->uz_bucket_size == 0)
4663 		return false;
4664 
4665 	cache = &zone->uz_cpu[curcpu];
4666 	newbucket = NULL;
4667 
4668 	/*
4669 	 * FIRSTTOUCH domains need to free to the correct zdom.  When
4670 	 * enabled this is the zdom of the item.   The bucket is the
4671 	 * cross bucket if the current domain and itemdomain do not match.
4672 	 */
4673 	cbucket = &cache->uc_freebucket;
4674 #ifdef NUMA
4675 	if ((cache_uz_flags(cache) & UMA_ZONE_FIRSTTOUCH) != 0) {
4676 		if (PCPU_GET(domain) != itemdomain) {
4677 			cbucket = &cache->uc_crossbucket;
4678 			if (cbucket->ucb_cnt != 0)
4679 				counter_u64_add(zone->uz_xdomain,
4680 				    cbucket->ucb_cnt);
4681 		}
4682 	}
4683 #endif
4684 	bucket = cache_bucket_unload(cbucket);
4685 	KASSERT(bucket == NULL || bucket->ub_cnt == bucket->ub_entries,
4686 	    ("cache_free: Entered with non-full free bucket."));
4687 
4688 	/* We are no longer associated with this CPU. */
4689 	critical_exit();
4690 
4691 	/*
4692 	 * Don't let SMR zones operate without a free bucket.  Force
4693 	 * a synchronize and re-use this one.  We will only degrade
4694 	 * to a synchronize every bucket_size items rather than every
4695 	 * item if we fail to allocate a bucket.
4696 	 */
4697 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0) {
4698 		if (bucket != NULL)
4699 			bucket->ub_seq = smr_advance(zone->uz_smr);
4700 		newbucket = bucket_alloc(zone, udata, M_NOWAIT);
4701 		if (newbucket == NULL && bucket != NULL) {
4702 			bucket_drain(zone, bucket);
4703 			newbucket = bucket;
4704 			bucket = NULL;
4705 		}
4706 	} else if (!bucketdisable)
4707 		newbucket = bucket_alloc(zone, udata, M_NOWAIT);
4708 
4709 	if (bucket != NULL)
4710 		zone_free_bucket(zone, bucket, udata, itemdomain, true);
4711 
4712 	critical_enter();
4713 	if ((bucket = newbucket) == NULL)
4714 		return (false);
4715 	cache = &zone->uz_cpu[curcpu];
4716 #ifdef NUMA
4717 	/*
4718 	 * Check to see if we should be populating the cross bucket.  If it
4719 	 * is already populated we will fall through and attempt to populate
4720 	 * the free bucket.
4721 	 */
4722 	if ((cache_uz_flags(cache) & UMA_ZONE_FIRSTTOUCH) != 0) {
4723 		if (PCPU_GET(domain) != itemdomain &&
4724 		    cache->uc_crossbucket.ucb_bucket == NULL) {
4725 			cache_bucket_load_cross(cache, bucket);
4726 			return (true);
4727 		}
4728 	}
4729 #endif
4730 	/*
4731 	 * We may have lost the race to fill the bucket or switched CPUs.
4732 	 */
4733 	if (cache->uc_freebucket.ucb_bucket != NULL) {
4734 		critical_exit();
4735 		bucket_free(zone, bucket, udata);
4736 		critical_enter();
4737 	} else
4738 		cache_bucket_load_free(cache, bucket);
4739 
4740 	return (true);
4741 }
4742 
4743 static void
4744 slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item)
4745 {
4746 	uma_keg_t keg;
4747 	uma_domain_t dom;
4748 	int freei;
4749 
4750 	keg = zone->uz_keg;
4751 	KEG_LOCK_ASSERT(keg, slab->us_domain);
4752 
4753 	/* Do we need to remove from any lists? */
4754 	dom = &keg->uk_domain[slab->us_domain];
4755 	if (slab->us_freecount + 1 == keg->uk_ipers) {
4756 		LIST_REMOVE(slab, us_link);
4757 		LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
4758 		dom->ud_free_slabs++;
4759 	} else if (slab->us_freecount == 0) {
4760 		LIST_REMOVE(slab, us_link);
4761 		LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
4762 	}
4763 
4764 	/* Slab management. */
4765 	freei = slab_item_index(slab, keg, item);
4766 	BIT_SET(keg->uk_ipers, freei, &slab->us_free);
4767 	slab->us_freecount++;
4768 
4769 	/* Keg statistics. */
4770 	dom->ud_free_items++;
4771 }
4772 
4773 static void
4774 zone_release(void *arg, void **bucket, int cnt)
4775 {
4776 	struct mtx *lock;
4777 	uma_zone_t zone;
4778 	uma_slab_t slab;
4779 	uma_keg_t keg;
4780 	uint8_t *mem;
4781 	void *item;
4782 	int i;
4783 
4784 	zone = arg;
4785 	keg = zone->uz_keg;
4786 	lock = NULL;
4787 	if (__predict_false((zone->uz_flags & UMA_ZFLAG_HASH) != 0))
4788 		lock = KEG_LOCK(keg, 0);
4789 	for (i = 0; i < cnt; i++) {
4790 		item = bucket[i];
4791 		if (__predict_true((zone->uz_flags & UMA_ZFLAG_VTOSLAB) != 0)) {
4792 			slab = vtoslab((vm_offset_t)item);
4793 		} else {
4794 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
4795 			if ((zone->uz_flags & UMA_ZFLAG_HASH) != 0)
4796 				slab = hash_sfind(&keg->uk_hash, mem);
4797 			else
4798 				slab = (uma_slab_t)(mem + keg->uk_pgoff);
4799 		}
4800 		if (lock != KEG_LOCKPTR(keg, slab->us_domain)) {
4801 			if (lock != NULL)
4802 				mtx_unlock(lock);
4803 			lock = KEG_LOCK(keg, slab->us_domain);
4804 		}
4805 		slab_free_item(zone, slab, item);
4806 	}
4807 	if (lock != NULL)
4808 		mtx_unlock(lock);
4809 }
4810 
4811 /*
4812  * Frees a single item to any zone.
4813  *
4814  * Arguments:
4815  *	zone   The zone to free to
4816  *	item   The item we're freeing
4817  *	udata  User supplied data for the dtor
4818  *	skip   Skip dtors and finis
4819  */
4820 static __noinline void
4821 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
4822 {
4823 
4824 	/*
4825 	 * If a free is sent directly to an SMR zone we have to
4826 	 * synchronize immediately because the item can instantly
4827 	 * be reallocated. This should only happen in degenerate
4828 	 * cases when no memory is available for per-cpu caches.
4829 	 */
4830 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && skip == SKIP_NONE)
4831 		smr_synchronize(zone->uz_smr);
4832 
4833 	item_dtor(zone, item, zone->uz_size, udata, skip);
4834 
4835 	if (skip < SKIP_FINI && zone->uz_fini) {
4836 		kasan_mark_item_valid(zone, item);
4837 		zone->uz_fini(item, zone->uz_size);
4838 		kasan_mark_item_invalid(zone, item);
4839 	}
4840 
4841 	zone->uz_release(zone->uz_arg, &item, 1);
4842 
4843 	if (skip & SKIP_CNT)
4844 		return;
4845 
4846 	counter_u64_add(zone->uz_frees, 1);
4847 
4848 	if (zone->uz_max_items > 0)
4849 		zone_free_limit(zone, 1);
4850 }
4851 
4852 /* See uma.h */
4853 int
4854 uma_zone_set_max(uma_zone_t zone, int nitems)
4855 {
4856 
4857 	/*
4858 	 * If the limit is small, we may need to constrain the maximum per-CPU
4859 	 * cache size, or disable caching entirely.
4860 	 */
4861 	uma_zone_set_maxcache(zone, nitems);
4862 
4863 	/*
4864 	 * XXX This can misbehave if the zone has any allocations with
4865 	 * no limit and a limit is imposed.  There is currently no
4866 	 * way to clear a limit.
4867 	 */
4868 	ZONE_LOCK(zone);
4869 	zone->uz_max_items = nitems;
4870 	zone->uz_flags |= UMA_ZFLAG_LIMIT;
4871 	zone_update_caches(zone);
4872 	/* We may need to wake waiters. */
4873 	wakeup(&zone->uz_max_items);
4874 	ZONE_UNLOCK(zone);
4875 
4876 	return (nitems);
4877 }
4878 
4879 /* See uma.h */
4880 void
4881 uma_zone_set_maxcache(uma_zone_t zone, int nitems)
4882 {
4883 	int bpcpu, bpdom, bsize, nb;
4884 
4885 	ZONE_LOCK(zone);
4886 
4887 	/*
4888 	 * Compute a lower bound on the number of items that may be cached in
4889 	 * the zone.  Each CPU gets at least two buckets, and for cross-domain
4890 	 * frees we use an additional bucket per CPU and per domain.  Select the
4891 	 * largest bucket size that does not exceed half of the requested limit,
4892 	 * with the left over space given to the full bucket cache.
4893 	 */
4894 	bpdom = 0;
4895 	bpcpu = 2;
4896 #ifdef NUMA
4897 	if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 && vm_ndomains > 1) {
4898 		bpcpu++;
4899 		bpdom++;
4900 	}
4901 #endif
4902 	nb = bpcpu * mp_ncpus + bpdom * vm_ndomains;
4903 	bsize = nitems / nb / 2;
4904 	if (bsize > BUCKET_MAX)
4905 		bsize = BUCKET_MAX;
4906 	else if (bsize == 0 && nitems / nb > 0)
4907 		bsize = 1;
4908 	zone->uz_bucket_size_max = zone->uz_bucket_size = bsize;
4909 	if (zone->uz_bucket_size_min > zone->uz_bucket_size_max)
4910 		zone->uz_bucket_size_min = zone->uz_bucket_size_max;
4911 	zone->uz_bucket_max = nitems - nb * bsize;
4912 	ZONE_UNLOCK(zone);
4913 }
4914 
4915 /* See uma.h */
4916 int
4917 uma_zone_get_max(uma_zone_t zone)
4918 {
4919 	int nitems;
4920 
4921 	nitems = atomic_load_64(&zone->uz_max_items);
4922 
4923 	return (nitems);
4924 }
4925 
4926 /* See uma.h */
4927 void
4928 uma_zone_set_warning(uma_zone_t zone, const char *warning)
4929 {
4930 
4931 	ZONE_ASSERT_COLD(zone);
4932 	zone->uz_warning = warning;
4933 }
4934 
4935 /* See uma.h */
4936 void
4937 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
4938 {
4939 
4940 	ZONE_ASSERT_COLD(zone);
4941 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
4942 }
4943 
4944 /* See uma.h */
4945 int
4946 uma_zone_get_cur(uma_zone_t zone)
4947 {
4948 	int64_t nitems;
4949 	u_int i;
4950 
4951 	nitems = 0;
4952 	if (zone->uz_allocs != EARLY_COUNTER && zone->uz_frees != EARLY_COUNTER)
4953 		nitems = counter_u64_fetch(zone->uz_allocs) -
4954 		    counter_u64_fetch(zone->uz_frees);
4955 	CPU_FOREACH(i)
4956 		nitems += atomic_load_64(&zone->uz_cpu[i].uc_allocs) -
4957 		    atomic_load_64(&zone->uz_cpu[i].uc_frees);
4958 
4959 	return (nitems < 0 ? 0 : nitems);
4960 }
4961 
4962 static uint64_t
4963 uma_zone_get_allocs(uma_zone_t zone)
4964 {
4965 	uint64_t nitems;
4966 	u_int i;
4967 
4968 	nitems = 0;
4969 	if (zone->uz_allocs != EARLY_COUNTER)
4970 		nitems = counter_u64_fetch(zone->uz_allocs);
4971 	CPU_FOREACH(i)
4972 		nitems += atomic_load_64(&zone->uz_cpu[i].uc_allocs);
4973 
4974 	return (nitems);
4975 }
4976 
4977 static uint64_t
4978 uma_zone_get_frees(uma_zone_t zone)
4979 {
4980 	uint64_t nitems;
4981 	u_int i;
4982 
4983 	nitems = 0;
4984 	if (zone->uz_frees != EARLY_COUNTER)
4985 		nitems = counter_u64_fetch(zone->uz_frees);
4986 	CPU_FOREACH(i)
4987 		nitems += atomic_load_64(&zone->uz_cpu[i].uc_frees);
4988 
4989 	return (nitems);
4990 }
4991 
4992 #ifdef INVARIANTS
4993 /* Used only for KEG_ASSERT_COLD(). */
4994 static uint64_t
4995 uma_keg_get_allocs(uma_keg_t keg)
4996 {
4997 	uma_zone_t z;
4998 	uint64_t nitems;
4999 
5000 	nitems = 0;
5001 	LIST_FOREACH(z, &keg->uk_zones, uz_link)
5002 		nitems += uma_zone_get_allocs(z);
5003 
5004 	return (nitems);
5005 }
5006 #endif
5007 
5008 /* See uma.h */
5009 void
5010 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
5011 {
5012 	uma_keg_t keg;
5013 
5014 	KEG_GET(zone, keg);
5015 	KEG_ASSERT_COLD(keg);
5016 	keg->uk_init = uminit;
5017 }
5018 
5019 /* See uma.h */
5020 void
5021 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
5022 {
5023 	uma_keg_t keg;
5024 
5025 	KEG_GET(zone, keg);
5026 	KEG_ASSERT_COLD(keg);
5027 	keg->uk_fini = fini;
5028 }
5029 
5030 /* See uma.h */
5031 void
5032 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
5033 {
5034 
5035 	ZONE_ASSERT_COLD(zone);
5036 	zone->uz_init = zinit;
5037 }
5038 
5039 /* See uma.h */
5040 void
5041 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
5042 {
5043 
5044 	ZONE_ASSERT_COLD(zone);
5045 	zone->uz_fini = zfini;
5046 }
5047 
5048 /* See uma.h */
5049 void
5050 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
5051 {
5052 	uma_keg_t keg;
5053 
5054 	KEG_GET(zone, keg);
5055 	KEG_ASSERT_COLD(keg);
5056 	keg->uk_freef = freef;
5057 }
5058 
5059 /* See uma.h */
5060 void
5061 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
5062 {
5063 	uma_keg_t keg;
5064 
5065 	KEG_GET(zone, keg);
5066 	KEG_ASSERT_COLD(keg);
5067 	keg->uk_allocf = allocf;
5068 }
5069 
5070 /* See uma.h */
5071 void
5072 uma_zone_set_smr(uma_zone_t zone, smr_t smr)
5073 {
5074 
5075 	ZONE_ASSERT_COLD(zone);
5076 
5077 	KASSERT(smr != NULL, ("Got NULL smr"));
5078 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0,
5079 	    ("zone %p (%s) already uses SMR", zone, zone->uz_name));
5080 	zone->uz_flags |= UMA_ZONE_SMR;
5081 	zone->uz_smr = smr;
5082 	zone_update_caches(zone);
5083 }
5084 
5085 smr_t
5086 uma_zone_get_smr(uma_zone_t zone)
5087 {
5088 
5089 	return (zone->uz_smr);
5090 }
5091 
5092 /* See uma.h */
5093 void
5094 uma_zone_reserve(uma_zone_t zone, int items)
5095 {
5096 	uma_keg_t keg;
5097 
5098 	KEG_GET(zone, keg);
5099 	KEG_ASSERT_COLD(keg);
5100 	keg->uk_reserve = items;
5101 }
5102 
5103 /* See uma.h */
5104 int
5105 uma_zone_reserve_kva(uma_zone_t zone, int count)
5106 {
5107 	uma_keg_t keg;
5108 	vm_offset_t kva;
5109 	u_int pages;
5110 
5111 	KEG_GET(zone, keg);
5112 	KEG_ASSERT_COLD(keg);
5113 	ZONE_ASSERT_COLD(zone);
5114 
5115 	pages = howmany(count, keg->uk_ipers) * keg->uk_ppera;
5116 
5117 #ifdef UMA_MD_SMALL_ALLOC
5118 	if (keg->uk_ppera > 1) {
5119 #else
5120 	if (1) {
5121 #endif
5122 		kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
5123 		if (kva == 0)
5124 			return (0);
5125 	} else
5126 		kva = 0;
5127 
5128 	MPASS(keg->uk_kva == 0);
5129 	keg->uk_kva = kva;
5130 	keg->uk_offset = 0;
5131 	zone->uz_max_items = pages * keg->uk_ipers;
5132 #ifdef UMA_MD_SMALL_ALLOC
5133 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
5134 #else
5135 	keg->uk_allocf = noobj_alloc;
5136 #endif
5137 	keg->uk_flags |= UMA_ZFLAG_LIMIT | UMA_ZONE_NOFREE;
5138 	zone->uz_flags |= UMA_ZFLAG_LIMIT | UMA_ZONE_NOFREE;
5139 	zone_update_caches(zone);
5140 
5141 	return (1);
5142 }
5143 
5144 /* See uma.h */
5145 void
5146 uma_prealloc(uma_zone_t zone, int items)
5147 {
5148 	struct vm_domainset_iter di;
5149 	uma_domain_t dom;
5150 	uma_slab_t slab;
5151 	uma_keg_t keg;
5152 	int aflags, domain, slabs;
5153 
5154 	KEG_GET(zone, keg);
5155 	slabs = howmany(items, keg->uk_ipers);
5156 	while (slabs-- > 0) {
5157 		aflags = M_NOWAIT;
5158 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
5159 		    &aflags);
5160 		for (;;) {
5161 			slab = keg_alloc_slab(keg, zone, domain, M_WAITOK,
5162 			    aflags);
5163 			if (slab != NULL) {
5164 				dom = &keg->uk_domain[slab->us_domain];
5165 				/*
5166 				 * keg_alloc_slab() always returns a slab on the
5167 				 * partial list.
5168 				 */
5169 				LIST_REMOVE(slab, us_link);
5170 				LIST_INSERT_HEAD(&dom->ud_free_slab, slab,
5171 				    us_link);
5172 				dom->ud_free_slabs++;
5173 				KEG_UNLOCK(keg, slab->us_domain);
5174 				break;
5175 			}
5176 			if (vm_domainset_iter_policy(&di, &domain) != 0)
5177 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask, 0);
5178 		}
5179 	}
5180 }
5181 
5182 /*
5183  * Returns a snapshot of memory consumption in bytes.
5184  */
5185 size_t
5186 uma_zone_memory(uma_zone_t zone)
5187 {
5188 	size_t sz;
5189 	int i;
5190 
5191 	sz = 0;
5192 	if (zone->uz_flags & UMA_ZFLAG_CACHE) {
5193 		for (i = 0; i < vm_ndomains; i++)
5194 			sz += ZDOM_GET(zone, i)->uzd_nitems;
5195 		return (sz * zone->uz_size);
5196 	}
5197 	for (i = 0; i < vm_ndomains; i++)
5198 		sz += zone->uz_keg->uk_domain[i].ud_pages;
5199 
5200 	return (sz * PAGE_SIZE);
5201 }
5202 
5203 /* See uma.h */
5204 void
5205 uma_reclaim(int req)
5206 {
5207 	uma_reclaim_domain(req, UMA_ANYDOMAIN);
5208 }
5209 
5210 void
5211 uma_reclaim_domain(int req, int domain)
5212 {
5213 	void *arg;
5214 
5215 	bucket_enable();
5216 
5217 	arg = (void *)(uintptr_t)domain;
5218 	sx_slock(&uma_reclaim_lock);
5219 	switch (req) {
5220 	case UMA_RECLAIM_TRIM:
5221 		zone_foreach(zone_trim, arg);
5222 		break;
5223 	case UMA_RECLAIM_DRAIN:
5224 		zone_foreach(zone_drain, arg);
5225 		break;
5226 	case UMA_RECLAIM_DRAIN_CPU:
5227 		zone_foreach(zone_drain, arg);
5228 		pcpu_cache_drain_safe(NULL);
5229 		zone_foreach(zone_drain, arg);
5230 		break;
5231 	default:
5232 		panic("unhandled reclamation request %d", req);
5233 	}
5234 
5235 	/*
5236 	 * Some slabs may have been freed but this zone will be visited early
5237 	 * we visit again so that we can free pages that are empty once other
5238 	 * zones are drained.  We have to do the same for buckets.
5239 	 */
5240 	zone_drain(slabzones[0], arg);
5241 	zone_drain(slabzones[1], arg);
5242 	bucket_zone_drain(domain);
5243 	sx_sunlock(&uma_reclaim_lock);
5244 }
5245 
5246 static volatile int uma_reclaim_needed;
5247 
5248 void
5249 uma_reclaim_wakeup(void)
5250 {
5251 
5252 	if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0)
5253 		wakeup(uma_reclaim);
5254 }
5255 
5256 void
5257 uma_reclaim_worker(void *arg __unused)
5258 {
5259 
5260 	for (;;) {
5261 		sx_xlock(&uma_reclaim_lock);
5262 		while (atomic_load_int(&uma_reclaim_needed) == 0)
5263 			sx_sleep(uma_reclaim, &uma_reclaim_lock, PVM, "umarcl",
5264 			    hz);
5265 		sx_xunlock(&uma_reclaim_lock);
5266 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
5267 		uma_reclaim(UMA_RECLAIM_DRAIN_CPU);
5268 		atomic_store_int(&uma_reclaim_needed, 0);
5269 		/* Don't fire more than once per-second. */
5270 		pause("umarclslp", hz);
5271 	}
5272 }
5273 
5274 /* See uma.h */
5275 void
5276 uma_zone_reclaim(uma_zone_t zone, int req)
5277 {
5278 	uma_zone_reclaim_domain(zone, req, UMA_ANYDOMAIN);
5279 }
5280 
5281 void
5282 uma_zone_reclaim_domain(uma_zone_t zone, int req, int domain)
5283 {
5284 	void *arg;
5285 
5286 	arg = (void *)(uintptr_t)domain;
5287 	switch (req) {
5288 	case UMA_RECLAIM_TRIM:
5289 		zone_trim(zone, arg);
5290 		break;
5291 	case UMA_RECLAIM_DRAIN:
5292 		zone_drain(zone, arg);
5293 		break;
5294 	case UMA_RECLAIM_DRAIN_CPU:
5295 		pcpu_cache_drain_safe(zone);
5296 		zone_drain(zone, arg);
5297 		break;
5298 	default:
5299 		panic("unhandled reclamation request %d", req);
5300 	}
5301 }
5302 
5303 /* See uma.h */
5304 int
5305 uma_zone_exhausted(uma_zone_t zone)
5306 {
5307 
5308 	return (atomic_load_32(&zone->uz_sleepers) > 0);
5309 }
5310 
5311 unsigned long
5312 uma_limit(void)
5313 {
5314 
5315 	return (uma_kmem_limit);
5316 }
5317 
5318 void
5319 uma_set_limit(unsigned long limit)
5320 {
5321 
5322 	uma_kmem_limit = limit;
5323 }
5324 
5325 unsigned long
5326 uma_size(void)
5327 {
5328 
5329 	return (atomic_load_long(&uma_kmem_total));
5330 }
5331 
5332 long
5333 uma_avail(void)
5334 {
5335 
5336 	return (uma_kmem_limit - uma_size());
5337 }
5338 
5339 #ifdef DDB
5340 /*
5341  * Generate statistics across both the zone and its per-cpu cache's.  Return
5342  * desired statistics if the pointer is non-NULL for that statistic.
5343  *
5344  * Note: does not update the zone statistics, as it can't safely clear the
5345  * per-CPU cache statistic.
5346  *
5347  */
5348 static void
5349 uma_zone_sumstat(uma_zone_t z, long *cachefreep, uint64_t *allocsp,
5350     uint64_t *freesp, uint64_t *sleepsp, uint64_t *xdomainp)
5351 {
5352 	uma_cache_t cache;
5353 	uint64_t allocs, frees, sleeps, xdomain;
5354 	int cachefree, cpu;
5355 
5356 	allocs = frees = sleeps = xdomain = 0;
5357 	cachefree = 0;
5358 	CPU_FOREACH(cpu) {
5359 		cache = &z->uz_cpu[cpu];
5360 		cachefree += cache->uc_allocbucket.ucb_cnt;
5361 		cachefree += cache->uc_freebucket.ucb_cnt;
5362 		xdomain += cache->uc_crossbucket.ucb_cnt;
5363 		cachefree += cache->uc_crossbucket.ucb_cnt;
5364 		allocs += cache->uc_allocs;
5365 		frees += cache->uc_frees;
5366 	}
5367 	allocs += counter_u64_fetch(z->uz_allocs);
5368 	frees += counter_u64_fetch(z->uz_frees);
5369 	xdomain += counter_u64_fetch(z->uz_xdomain);
5370 	sleeps += z->uz_sleeps;
5371 	if (cachefreep != NULL)
5372 		*cachefreep = cachefree;
5373 	if (allocsp != NULL)
5374 		*allocsp = allocs;
5375 	if (freesp != NULL)
5376 		*freesp = frees;
5377 	if (sleepsp != NULL)
5378 		*sleepsp = sleeps;
5379 	if (xdomainp != NULL)
5380 		*xdomainp = xdomain;
5381 }
5382 #endif /* DDB */
5383 
5384 static int
5385 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
5386 {
5387 	uma_keg_t kz;
5388 	uma_zone_t z;
5389 	int count;
5390 
5391 	count = 0;
5392 	rw_rlock(&uma_rwlock);
5393 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
5394 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
5395 			count++;
5396 	}
5397 	LIST_FOREACH(z, &uma_cachezones, uz_link)
5398 		count++;
5399 
5400 	rw_runlock(&uma_rwlock);
5401 	return (sysctl_handle_int(oidp, &count, 0, req));
5402 }
5403 
5404 static void
5405 uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf,
5406     struct uma_percpu_stat *ups, bool internal)
5407 {
5408 	uma_zone_domain_t zdom;
5409 	uma_cache_t cache;
5410 	int i;
5411 
5412 	for (i = 0; i < vm_ndomains; i++) {
5413 		zdom = ZDOM_GET(z, i);
5414 		uth->uth_zone_free += zdom->uzd_nitems;
5415 	}
5416 	uth->uth_allocs = counter_u64_fetch(z->uz_allocs);
5417 	uth->uth_frees = counter_u64_fetch(z->uz_frees);
5418 	uth->uth_fails = counter_u64_fetch(z->uz_fails);
5419 	uth->uth_xdomain = counter_u64_fetch(z->uz_xdomain);
5420 	uth->uth_sleeps = z->uz_sleeps;
5421 
5422 	for (i = 0; i < mp_maxid + 1; i++) {
5423 		bzero(&ups[i], sizeof(*ups));
5424 		if (internal || CPU_ABSENT(i))
5425 			continue;
5426 		cache = &z->uz_cpu[i];
5427 		ups[i].ups_cache_free += cache->uc_allocbucket.ucb_cnt;
5428 		ups[i].ups_cache_free += cache->uc_freebucket.ucb_cnt;
5429 		ups[i].ups_cache_free += cache->uc_crossbucket.ucb_cnt;
5430 		ups[i].ups_allocs = cache->uc_allocs;
5431 		ups[i].ups_frees = cache->uc_frees;
5432 	}
5433 }
5434 
5435 static int
5436 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
5437 {
5438 	struct uma_stream_header ush;
5439 	struct uma_type_header uth;
5440 	struct uma_percpu_stat *ups;
5441 	struct sbuf sbuf;
5442 	uma_keg_t kz;
5443 	uma_zone_t z;
5444 	uint64_t items;
5445 	uint32_t kfree, pages;
5446 	int count, error, i;
5447 
5448 	error = sysctl_wire_old_buffer(req, 0);
5449 	if (error != 0)
5450 		return (error);
5451 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
5452 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
5453 	ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK);
5454 
5455 	count = 0;
5456 	rw_rlock(&uma_rwlock);
5457 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
5458 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
5459 			count++;
5460 	}
5461 
5462 	LIST_FOREACH(z, &uma_cachezones, uz_link)
5463 		count++;
5464 
5465 	/*
5466 	 * Insert stream header.
5467 	 */
5468 	bzero(&ush, sizeof(ush));
5469 	ush.ush_version = UMA_STREAM_VERSION;
5470 	ush.ush_maxcpus = (mp_maxid + 1);
5471 	ush.ush_count = count;
5472 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
5473 
5474 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
5475 		kfree = pages = 0;
5476 		for (i = 0; i < vm_ndomains; i++) {
5477 			kfree += kz->uk_domain[i].ud_free_items;
5478 			pages += kz->uk_domain[i].ud_pages;
5479 		}
5480 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
5481 			bzero(&uth, sizeof(uth));
5482 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
5483 			uth.uth_align = kz->uk_align;
5484 			uth.uth_size = kz->uk_size;
5485 			uth.uth_rsize = kz->uk_rsize;
5486 			if (z->uz_max_items > 0) {
5487 				items = UZ_ITEMS_COUNT(z->uz_items);
5488 				uth.uth_pages = (items / kz->uk_ipers) *
5489 					kz->uk_ppera;
5490 			} else
5491 				uth.uth_pages = pages;
5492 			uth.uth_maxpages = (z->uz_max_items / kz->uk_ipers) *
5493 			    kz->uk_ppera;
5494 			uth.uth_limit = z->uz_max_items;
5495 			uth.uth_keg_free = kfree;
5496 
5497 			/*
5498 			 * A zone is secondary is it is not the first entry
5499 			 * on the keg's zone list.
5500 			 */
5501 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
5502 			    (LIST_FIRST(&kz->uk_zones) != z))
5503 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
5504 			uma_vm_zone_stats(&uth, z, &sbuf, ups,
5505 			    kz->uk_flags & UMA_ZFLAG_INTERNAL);
5506 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
5507 			for (i = 0; i < mp_maxid + 1; i++)
5508 				(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
5509 		}
5510 	}
5511 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
5512 		bzero(&uth, sizeof(uth));
5513 		strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
5514 		uth.uth_size = z->uz_size;
5515 		uma_vm_zone_stats(&uth, z, &sbuf, ups, false);
5516 		(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
5517 		for (i = 0; i < mp_maxid + 1; i++)
5518 			(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
5519 	}
5520 
5521 	rw_runlock(&uma_rwlock);
5522 	error = sbuf_finish(&sbuf);
5523 	sbuf_delete(&sbuf);
5524 	free(ups, M_TEMP);
5525 	return (error);
5526 }
5527 
5528 int
5529 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
5530 {
5531 	uma_zone_t zone = *(uma_zone_t *)arg1;
5532 	int error, max;
5533 
5534 	max = uma_zone_get_max(zone);
5535 	error = sysctl_handle_int(oidp, &max, 0, req);
5536 	if (error || !req->newptr)
5537 		return (error);
5538 
5539 	uma_zone_set_max(zone, max);
5540 
5541 	return (0);
5542 }
5543 
5544 int
5545 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
5546 {
5547 	uma_zone_t zone;
5548 	int cur;
5549 
5550 	/*
5551 	 * Some callers want to add sysctls for global zones that
5552 	 * may not yet exist so they pass a pointer to a pointer.
5553 	 */
5554 	if (arg2 == 0)
5555 		zone = *(uma_zone_t *)arg1;
5556 	else
5557 		zone = arg1;
5558 	cur = uma_zone_get_cur(zone);
5559 	return (sysctl_handle_int(oidp, &cur, 0, req));
5560 }
5561 
5562 static int
5563 sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS)
5564 {
5565 	uma_zone_t zone = arg1;
5566 	uint64_t cur;
5567 
5568 	cur = uma_zone_get_allocs(zone);
5569 	return (sysctl_handle_64(oidp, &cur, 0, req));
5570 }
5571 
5572 static int
5573 sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS)
5574 {
5575 	uma_zone_t zone = arg1;
5576 	uint64_t cur;
5577 
5578 	cur = uma_zone_get_frees(zone);
5579 	return (sysctl_handle_64(oidp, &cur, 0, req));
5580 }
5581 
5582 static int
5583 sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS)
5584 {
5585 	struct sbuf sbuf;
5586 	uma_zone_t zone = arg1;
5587 	int error;
5588 
5589 	sbuf_new_for_sysctl(&sbuf, NULL, 0, req);
5590 	if (zone->uz_flags != 0)
5591 		sbuf_printf(&sbuf, "0x%b", zone->uz_flags, PRINT_UMA_ZFLAGS);
5592 	else
5593 		sbuf_printf(&sbuf, "0");
5594 	error = sbuf_finish(&sbuf);
5595 	sbuf_delete(&sbuf);
5596 
5597 	return (error);
5598 }
5599 
5600 static int
5601 sysctl_handle_uma_slab_efficiency(SYSCTL_HANDLER_ARGS)
5602 {
5603 	uma_keg_t keg = arg1;
5604 	int avail, effpct, total;
5605 
5606 	total = keg->uk_ppera * PAGE_SIZE;
5607 	if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0)
5608 		total += slabzone(keg->uk_ipers)->uz_keg->uk_rsize;
5609 	/*
5610 	 * We consider the client's requested size and alignment here, not the
5611 	 * real size determination uk_rsize, because we also adjust the real
5612 	 * size for internal implementation reasons (max bitset size).
5613 	 */
5614 	avail = keg->uk_ipers * roundup2(keg->uk_size, keg->uk_align + 1);
5615 	if ((keg->uk_flags & UMA_ZONE_PCPU) != 0)
5616 		avail *= mp_maxid + 1;
5617 	effpct = 100 * avail / total;
5618 	return (sysctl_handle_int(oidp, &effpct, 0, req));
5619 }
5620 
5621 static int
5622 sysctl_handle_uma_zone_items(SYSCTL_HANDLER_ARGS)
5623 {
5624 	uma_zone_t zone = arg1;
5625 	uint64_t cur;
5626 
5627 	cur = UZ_ITEMS_COUNT(atomic_load_64(&zone->uz_items));
5628 	return (sysctl_handle_64(oidp, &cur, 0, req));
5629 }
5630 
5631 #ifdef INVARIANTS
5632 static uma_slab_t
5633 uma_dbg_getslab(uma_zone_t zone, void *item)
5634 {
5635 	uma_slab_t slab;
5636 	uma_keg_t keg;
5637 	uint8_t *mem;
5638 
5639 	/*
5640 	 * It is safe to return the slab here even though the
5641 	 * zone is unlocked because the item's allocation state
5642 	 * essentially holds a reference.
5643 	 */
5644 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
5645 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0)
5646 		return (NULL);
5647 	if (zone->uz_flags & UMA_ZFLAG_VTOSLAB)
5648 		return (vtoslab((vm_offset_t)mem));
5649 	keg = zone->uz_keg;
5650 	if ((keg->uk_flags & UMA_ZFLAG_HASH) == 0)
5651 		return ((uma_slab_t)(mem + keg->uk_pgoff));
5652 	KEG_LOCK(keg, 0);
5653 	slab = hash_sfind(&keg->uk_hash, mem);
5654 	KEG_UNLOCK(keg, 0);
5655 
5656 	return (slab);
5657 }
5658 
5659 static bool
5660 uma_dbg_zskip(uma_zone_t zone, void *mem)
5661 {
5662 
5663 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0)
5664 		return (true);
5665 
5666 	return (uma_dbg_kskip(zone->uz_keg, mem));
5667 }
5668 
5669 static bool
5670 uma_dbg_kskip(uma_keg_t keg, void *mem)
5671 {
5672 	uintptr_t idx;
5673 
5674 	if (dbg_divisor == 0)
5675 		return (true);
5676 
5677 	if (dbg_divisor == 1)
5678 		return (false);
5679 
5680 	idx = (uintptr_t)mem >> PAGE_SHIFT;
5681 	if (keg->uk_ipers > 1) {
5682 		idx *= keg->uk_ipers;
5683 		idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize;
5684 	}
5685 
5686 	if ((idx / dbg_divisor) * dbg_divisor != idx) {
5687 		counter_u64_add(uma_skip_cnt, 1);
5688 		return (true);
5689 	}
5690 	counter_u64_add(uma_dbg_cnt, 1);
5691 
5692 	return (false);
5693 }
5694 
5695 /*
5696  * Set up the slab's freei data such that uma_dbg_free can function.
5697  *
5698  */
5699 static void
5700 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
5701 {
5702 	uma_keg_t keg;
5703 	int freei;
5704 
5705 	if (slab == NULL) {
5706 		slab = uma_dbg_getslab(zone, item);
5707 		if (slab == NULL)
5708 			panic("uma: item %p did not belong to zone %s",
5709 			    item, zone->uz_name);
5710 	}
5711 	keg = zone->uz_keg;
5712 	freei = slab_item_index(slab, keg, item);
5713 
5714 	if (BIT_TEST_SET_ATOMIC(keg->uk_ipers, freei,
5715 	    slab_dbg_bits(slab, keg)))
5716 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)",
5717 		    item, zone, zone->uz_name, slab, freei);
5718 }
5719 
5720 /*
5721  * Verifies freed addresses.  Checks for alignment, valid slab membership
5722  * and duplicate frees.
5723  *
5724  */
5725 static void
5726 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
5727 {
5728 	uma_keg_t keg;
5729 	int freei;
5730 
5731 	if (slab == NULL) {
5732 		slab = uma_dbg_getslab(zone, item);
5733 		if (slab == NULL)
5734 			panic("uma: Freed item %p did not belong to zone %s",
5735 			    item, zone->uz_name);
5736 	}
5737 	keg = zone->uz_keg;
5738 	freei = slab_item_index(slab, keg, item);
5739 
5740 	if (freei >= keg->uk_ipers)
5741 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)",
5742 		    item, zone, zone->uz_name, slab, freei);
5743 
5744 	if (slab_item(slab, keg, freei) != item)
5745 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)",
5746 		    item, zone, zone->uz_name, slab, freei);
5747 
5748 	if (!BIT_TEST_CLR_ATOMIC(keg->uk_ipers, freei,
5749 	    slab_dbg_bits(slab, keg)))
5750 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)",
5751 		    item, zone, zone->uz_name, slab, freei);
5752 }
5753 #endif /* INVARIANTS */
5754 
5755 #ifdef DDB
5756 static int64_t
5757 get_uma_stats(uma_keg_t kz, uma_zone_t z, uint64_t *allocs, uint64_t *used,
5758     uint64_t *sleeps, long *cachefree, uint64_t *xdomain)
5759 {
5760 	uint64_t frees;
5761 	int i;
5762 
5763 	if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
5764 		*allocs = counter_u64_fetch(z->uz_allocs);
5765 		frees = counter_u64_fetch(z->uz_frees);
5766 		*sleeps = z->uz_sleeps;
5767 		*cachefree = 0;
5768 		*xdomain = 0;
5769 	} else
5770 		uma_zone_sumstat(z, cachefree, allocs, &frees, sleeps,
5771 		    xdomain);
5772 	for (i = 0; i < vm_ndomains; i++) {
5773 		*cachefree += ZDOM_GET(z, i)->uzd_nitems;
5774 		if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
5775 		    (LIST_FIRST(&kz->uk_zones) != z)))
5776 			*cachefree += kz->uk_domain[i].ud_free_items;
5777 	}
5778 	*used = *allocs - frees;
5779 	return (((int64_t)*used + *cachefree) * kz->uk_size);
5780 }
5781 
5782 DB_SHOW_COMMAND(uma, db_show_uma)
5783 {
5784 	const char *fmt_hdr, *fmt_entry;
5785 	uma_keg_t kz;
5786 	uma_zone_t z;
5787 	uint64_t allocs, used, sleeps, xdomain;
5788 	long cachefree;
5789 	/* variables for sorting */
5790 	uma_keg_t cur_keg;
5791 	uma_zone_t cur_zone, last_zone;
5792 	int64_t cur_size, last_size, size;
5793 	int ties;
5794 
5795 	/* /i option produces machine-parseable CSV output */
5796 	if (modif[0] == 'i') {
5797 		fmt_hdr = "%s,%s,%s,%s,%s,%s,%s,%s,%s\n";
5798 		fmt_entry = "\"%s\",%ju,%jd,%ld,%ju,%ju,%u,%jd,%ju\n";
5799 	} else {
5800 		fmt_hdr = "%18s %6s %7s %7s %11s %7s %7s %10s %8s\n";
5801 		fmt_entry = "%18s %6ju %7jd %7ld %11ju %7ju %7u %10jd %8ju\n";
5802 	}
5803 
5804 	db_printf(fmt_hdr, "Zone", "Size", "Used", "Free", "Requests",
5805 	    "Sleeps", "Bucket", "Total Mem", "XFree");
5806 
5807 	/* Sort the zones with largest size first. */
5808 	last_zone = NULL;
5809 	last_size = INT64_MAX;
5810 	for (;;) {
5811 		cur_zone = NULL;
5812 		cur_size = -1;
5813 		ties = 0;
5814 		LIST_FOREACH(kz, &uma_kegs, uk_link) {
5815 			LIST_FOREACH(z, &kz->uk_zones, uz_link) {
5816 				/*
5817 				 * In the case of size ties, print out zones
5818 				 * in the order they are encountered.  That is,
5819 				 * when we encounter the most recently output
5820 				 * zone, we have already printed all preceding
5821 				 * ties, and we must print all following ties.
5822 				 */
5823 				if (z == last_zone) {
5824 					ties = 1;
5825 					continue;
5826 				}
5827 				size = get_uma_stats(kz, z, &allocs, &used,
5828 				    &sleeps, &cachefree, &xdomain);
5829 				if (size > cur_size && size < last_size + ties)
5830 				{
5831 					cur_size = size;
5832 					cur_zone = z;
5833 					cur_keg = kz;
5834 				}
5835 			}
5836 		}
5837 		if (cur_zone == NULL)
5838 			break;
5839 
5840 		size = get_uma_stats(cur_keg, cur_zone, &allocs, &used,
5841 		    &sleeps, &cachefree, &xdomain);
5842 		db_printf(fmt_entry, cur_zone->uz_name,
5843 		    (uintmax_t)cur_keg->uk_size, (intmax_t)used, cachefree,
5844 		    (uintmax_t)allocs, (uintmax_t)sleeps,
5845 		    (unsigned)cur_zone->uz_bucket_size, (intmax_t)size,
5846 		    xdomain);
5847 
5848 		if (db_pager_quit)
5849 			return;
5850 		last_zone = cur_zone;
5851 		last_size = cur_size;
5852 	}
5853 }
5854 
5855 DB_SHOW_COMMAND(umacache, db_show_umacache)
5856 {
5857 	uma_zone_t z;
5858 	uint64_t allocs, frees;
5859 	long cachefree;
5860 	int i;
5861 
5862 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
5863 	    "Requests", "Bucket");
5864 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
5865 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL, NULL);
5866 		for (i = 0; i < vm_ndomains; i++)
5867 			cachefree += ZDOM_GET(z, i)->uzd_nitems;
5868 		db_printf("%18s %8ju %8jd %8ld %12ju %8u\n",
5869 		    z->uz_name, (uintmax_t)z->uz_size,
5870 		    (intmax_t)(allocs - frees), cachefree,
5871 		    (uintmax_t)allocs, z->uz_bucket_size);
5872 		if (db_pager_quit)
5873 			return;
5874 	}
5875 }
5876 #endif	/* DDB */
5877