xref: /freebsd/sys/vm/uma_core.c (revision f56f82e0)
1 /*-
2  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
3  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4  * Copyright (c) 2004-2006 Robert N. M. Watson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * uma_core.c  Implementation of the Universal Memory allocator
31  *
32  * This allocator is intended to replace the multitude of similar object caches
33  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
34  * efficient.  A primary design goal is to return unused memory to the rest of
35  * the system.  This will make the system as a whole more flexible due to the
36  * ability to move memory to subsystems which most need it instead of leaving
37  * pools of reserved memory unused.
38  *
39  * The basic ideas stem from similar slab/zone based allocators whose algorithms
40  * are well known.
41  *
42  */
43 
44 /*
45  * TODO:
46  *	- Improve memory usage for large allocations
47  *	- Investigate cache size adjustments
48  */
49 
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
52 
53 #include "opt_ddb.h"
54 #include "opt_param.h"
55 #include "opt_vm.h"
56 
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/bitset.h>
60 #include <sys/eventhandler.h>
61 #include <sys/kernel.h>
62 #include <sys/types.h>
63 #include <sys/queue.h>
64 #include <sys/malloc.h>
65 #include <sys/ktr.h>
66 #include <sys/lock.h>
67 #include <sys/sysctl.h>
68 #include <sys/mutex.h>
69 #include <sys/proc.h>
70 #include <sys/random.h>
71 #include <sys/rwlock.h>
72 #include <sys/sbuf.h>
73 #include <sys/sched.h>
74 #include <sys/smp.h>
75 #include <sys/taskqueue.h>
76 #include <sys/vmmeter.h>
77 
78 #include <vm/vm.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_page.h>
81 #include <vm/vm_pageout.h>
82 #include <vm/vm_param.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_kern.h>
85 #include <vm/vm_extern.h>
86 #include <vm/uma.h>
87 #include <vm/uma_int.h>
88 #include <vm/uma_dbg.h>
89 
90 #include <ddb/ddb.h>
91 
92 #ifdef DEBUG_MEMGUARD
93 #include <vm/memguard.h>
94 #endif
95 
96 /*
97  * This is the zone and keg from which all zones are spawned.  The idea is that
98  * even the zone & keg heads are allocated from the allocator, so we use the
99  * bss section to bootstrap us.
100  */
101 static struct uma_keg masterkeg;
102 static struct uma_zone masterzone_k;
103 static struct uma_zone masterzone_z;
104 static uma_zone_t kegs = &masterzone_k;
105 static uma_zone_t zones = &masterzone_z;
106 
107 /* This is the zone from which all of uma_slab_t's are allocated. */
108 static uma_zone_t slabzone;
109 
110 /*
111  * The initial hash tables come out of this zone so they can be allocated
112  * prior to malloc coming up.
113  */
114 static uma_zone_t hashzone;
115 
116 /* The boot-time adjusted value for cache line alignment. */
117 int uma_align_cache = 64 - 1;
118 
119 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
120 
121 /*
122  * Are we allowed to allocate buckets?
123  */
124 static int bucketdisable = 1;
125 
126 /* Linked list of all kegs in the system */
127 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
128 
129 /* Linked list of all cache-only zones in the system */
130 static LIST_HEAD(,uma_zone) uma_cachezones =
131     LIST_HEAD_INITIALIZER(uma_cachezones);
132 
133 /* This RW lock protects the keg list */
134 static struct rwlock_padalign uma_rwlock;
135 
136 /*
137  * Pointer and counter to pool of pages, that is preallocated at
138  * startup to bootstrap UMA.  Early zones continue to use the pool
139  * until it is depleted, so allocations may happen after boot, thus
140  * we need a mutex to protect it.
141  */
142 static char *bootmem;
143 static int boot_pages;
144 static struct mtx uma_boot_pages_mtx;
145 
146 static struct sx uma_drain_lock;
147 
148 /* Is the VM done starting up? */
149 static int booted = 0;
150 #define	UMA_STARTUP	1
151 #define	UMA_STARTUP2	2
152 
153 /*
154  * This is the handle used to schedule events that need to happen
155  * outside of the allocation fast path.
156  */
157 static struct callout uma_callout;
158 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
159 
160 /*
161  * This structure is passed as the zone ctor arg so that I don't have to create
162  * a special allocation function just for zones.
163  */
164 struct uma_zctor_args {
165 	const char *name;
166 	size_t size;
167 	uma_ctor ctor;
168 	uma_dtor dtor;
169 	uma_init uminit;
170 	uma_fini fini;
171 	uma_import import;
172 	uma_release release;
173 	void *arg;
174 	uma_keg_t keg;
175 	int align;
176 	uint32_t flags;
177 };
178 
179 struct uma_kctor_args {
180 	uma_zone_t zone;
181 	size_t size;
182 	uma_init uminit;
183 	uma_fini fini;
184 	int align;
185 	uint32_t flags;
186 };
187 
188 struct uma_bucket_zone {
189 	uma_zone_t	ubz_zone;
190 	char		*ubz_name;
191 	int		ubz_entries;	/* Number of items it can hold. */
192 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
193 };
194 
195 /*
196  * Compute the actual number of bucket entries to pack them in power
197  * of two sizes for more efficient space utilization.
198  */
199 #define	BUCKET_SIZE(n)						\
200     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
201 
202 #define	BUCKET_MAX	BUCKET_SIZE(256)
203 
204 struct uma_bucket_zone bucket_zones[] = {
205 	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
206 	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
207 	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
208 	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
209 	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
210 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
211 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
212 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
213 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
214 	{ NULL, NULL, 0}
215 };
216 
217 /*
218  * Flags and enumerations to be passed to internal functions.
219  */
220 enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
221 
222 /* Prototypes.. */
223 
224 static void *noobj_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
225 static void *page_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
226 static void *startup_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
227 static void page_free(void *, vm_size_t, uint8_t);
228 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
229 static void cache_drain(uma_zone_t);
230 static void bucket_drain(uma_zone_t, uma_bucket_t);
231 static void bucket_cache_drain(uma_zone_t zone);
232 static int keg_ctor(void *, int, void *, int);
233 static void keg_dtor(void *, int, void *);
234 static int zone_ctor(void *, int, void *, int);
235 static void zone_dtor(void *, int, void *);
236 static int zero_init(void *, int, int);
237 static void keg_small_init(uma_keg_t keg);
238 static void keg_large_init(uma_keg_t keg);
239 static void zone_foreach(void (*zfunc)(uma_zone_t));
240 static void zone_timeout(uma_zone_t zone);
241 static int hash_alloc(struct uma_hash *);
242 static int hash_expand(struct uma_hash *, struct uma_hash *);
243 static void hash_free(struct uma_hash *hash);
244 static void uma_timeout(void *);
245 static void uma_startup3(void);
246 static void *zone_alloc_item(uma_zone_t, void *, int);
247 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
248 static void bucket_enable(void);
249 static void bucket_init(void);
250 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
251 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
252 static void bucket_zone_drain(void);
253 static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags);
254 static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
255 static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
256 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
257 static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
258 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
259     uma_fini fini, int align, uint32_t flags);
260 static int zone_import(uma_zone_t zone, void **bucket, int max, int flags);
261 static void zone_release(uma_zone_t zone, void **bucket, int cnt);
262 static void uma_zero_item(void *item, uma_zone_t zone);
263 
264 void uma_print_zone(uma_zone_t);
265 void uma_print_stats(void);
266 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
267 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
268 
269 #ifdef INVARIANTS
270 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
271 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
272 #endif
273 
274 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
275 
276 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
277     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
278 
279 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
280     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
281 
282 static int zone_warnings = 1;
283 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
284     "Warn when UMA zones becomes full");
285 
286 /*
287  * This routine checks to see whether or not it's safe to enable buckets.
288  */
289 static void
290 bucket_enable(void)
291 {
292 	bucketdisable = vm_page_count_min();
293 }
294 
295 /*
296  * Initialize bucket_zones, the array of zones of buckets of various sizes.
297  *
298  * For each zone, calculate the memory required for each bucket, consisting
299  * of the header and an array of pointers.
300  */
301 static void
302 bucket_init(void)
303 {
304 	struct uma_bucket_zone *ubz;
305 	int size;
306 
307 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
308 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
309 		size += sizeof(void *) * ubz->ubz_entries;
310 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
311 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
312 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET);
313 	}
314 }
315 
316 /*
317  * Given a desired number of entries for a bucket, return the zone from which
318  * to allocate the bucket.
319  */
320 static struct uma_bucket_zone *
321 bucket_zone_lookup(int entries)
322 {
323 	struct uma_bucket_zone *ubz;
324 
325 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
326 		if (ubz->ubz_entries >= entries)
327 			return (ubz);
328 	ubz--;
329 	return (ubz);
330 }
331 
332 static int
333 bucket_select(int size)
334 {
335 	struct uma_bucket_zone *ubz;
336 
337 	ubz = &bucket_zones[0];
338 	if (size > ubz->ubz_maxsize)
339 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
340 
341 	for (; ubz->ubz_entries != 0; ubz++)
342 		if (ubz->ubz_maxsize < size)
343 			break;
344 	ubz--;
345 	return (ubz->ubz_entries);
346 }
347 
348 static uma_bucket_t
349 bucket_alloc(uma_zone_t zone, void *udata, int flags)
350 {
351 	struct uma_bucket_zone *ubz;
352 	uma_bucket_t bucket;
353 
354 	/*
355 	 * This is to stop us from allocating per cpu buckets while we're
356 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
357 	 * boot pages.  This also prevents us from allocating buckets in
358 	 * low memory situations.
359 	 */
360 	if (bucketdisable)
361 		return (NULL);
362 	/*
363 	 * To limit bucket recursion we store the original zone flags
364 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
365 	 * NOVM flag to persist even through deep recursions.  We also
366 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
367 	 * a bucket for a bucket zone so we do not allow infinite bucket
368 	 * recursion.  This cookie will even persist to frees of unused
369 	 * buckets via the allocation path or bucket allocations in the
370 	 * free path.
371 	 */
372 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
373 		udata = (void *)(uintptr_t)zone->uz_flags;
374 	else {
375 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
376 			return (NULL);
377 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
378 	}
379 	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
380 		flags |= M_NOVM;
381 	ubz = bucket_zone_lookup(zone->uz_count);
382 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
383 		ubz++;
384 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
385 	if (bucket) {
386 #ifdef INVARIANTS
387 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
388 #endif
389 		bucket->ub_cnt = 0;
390 		bucket->ub_entries = ubz->ubz_entries;
391 	}
392 
393 	return (bucket);
394 }
395 
396 static void
397 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
398 {
399 	struct uma_bucket_zone *ubz;
400 
401 	KASSERT(bucket->ub_cnt == 0,
402 	    ("bucket_free: Freeing a non free bucket."));
403 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
404 		udata = (void *)(uintptr_t)zone->uz_flags;
405 	ubz = bucket_zone_lookup(bucket->ub_entries);
406 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
407 }
408 
409 static void
410 bucket_zone_drain(void)
411 {
412 	struct uma_bucket_zone *ubz;
413 
414 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
415 		zone_drain(ubz->ubz_zone);
416 }
417 
418 static void
419 zone_log_warning(uma_zone_t zone)
420 {
421 	static const struct timeval warninterval = { 300, 0 };
422 
423 	if (!zone_warnings || zone->uz_warning == NULL)
424 		return;
425 
426 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
427 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
428 }
429 
430 static inline void
431 zone_maxaction(uma_zone_t zone)
432 {
433 
434 	if (zone->uz_maxaction.ta_func != NULL)
435 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
436 }
437 
438 static void
439 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
440 {
441 	uma_klink_t klink;
442 
443 	LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
444 		kegfn(klink->kl_keg);
445 }
446 
447 /*
448  * Routine called by timeout which is used to fire off some time interval
449  * based calculations.  (stats, hash size, etc.)
450  *
451  * Arguments:
452  *	arg   Unused
453  *
454  * Returns:
455  *	Nothing
456  */
457 static void
458 uma_timeout(void *unused)
459 {
460 	bucket_enable();
461 	zone_foreach(zone_timeout);
462 
463 	/* Reschedule this event */
464 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
465 }
466 
467 /*
468  * Routine to perform timeout driven calculations.  This expands the
469  * hashes and does per cpu statistics aggregation.
470  *
471  *  Returns nothing.
472  */
473 static void
474 keg_timeout(uma_keg_t keg)
475 {
476 
477 	KEG_LOCK(keg);
478 	/*
479 	 * Expand the keg hash table.
480 	 *
481 	 * This is done if the number of slabs is larger than the hash size.
482 	 * What I'm trying to do here is completely reduce collisions.  This
483 	 * may be a little aggressive.  Should I allow for two collisions max?
484 	 */
485 	if (keg->uk_flags & UMA_ZONE_HASH &&
486 	    keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
487 		struct uma_hash newhash;
488 		struct uma_hash oldhash;
489 		int ret;
490 
491 		/*
492 		 * This is so involved because allocating and freeing
493 		 * while the keg lock is held will lead to deadlock.
494 		 * I have to do everything in stages and check for
495 		 * races.
496 		 */
497 		newhash = keg->uk_hash;
498 		KEG_UNLOCK(keg);
499 		ret = hash_alloc(&newhash);
500 		KEG_LOCK(keg);
501 		if (ret) {
502 			if (hash_expand(&keg->uk_hash, &newhash)) {
503 				oldhash = keg->uk_hash;
504 				keg->uk_hash = newhash;
505 			} else
506 				oldhash = newhash;
507 
508 			KEG_UNLOCK(keg);
509 			hash_free(&oldhash);
510 			return;
511 		}
512 	}
513 	KEG_UNLOCK(keg);
514 }
515 
516 static void
517 zone_timeout(uma_zone_t zone)
518 {
519 
520 	zone_foreach_keg(zone, &keg_timeout);
521 }
522 
523 /*
524  * Allocate and zero fill the next sized hash table from the appropriate
525  * backing store.
526  *
527  * Arguments:
528  *	hash  A new hash structure with the old hash size in uh_hashsize
529  *
530  * Returns:
531  *	1 on success and 0 on failure.
532  */
533 static int
534 hash_alloc(struct uma_hash *hash)
535 {
536 	int oldsize;
537 	int alloc;
538 
539 	oldsize = hash->uh_hashsize;
540 
541 	/* We're just going to go to a power of two greater */
542 	if (oldsize)  {
543 		hash->uh_hashsize = oldsize * 2;
544 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
545 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
546 		    M_UMAHASH, M_NOWAIT);
547 	} else {
548 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
549 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
550 		    M_WAITOK);
551 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
552 	}
553 	if (hash->uh_slab_hash) {
554 		bzero(hash->uh_slab_hash, alloc);
555 		hash->uh_hashmask = hash->uh_hashsize - 1;
556 		return (1);
557 	}
558 
559 	return (0);
560 }
561 
562 /*
563  * Expands the hash table for HASH zones.  This is done from zone_timeout
564  * to reduce collisions.  This must not be done in the regular allocation
565  * path, otherwise, we can recurse on the vm while allocating pages.
566  *
567  * Arguments:
568  *	oldhash  The hash you want to expand
569  *	newhash  The hash structure for the new table
570  *
571  * Returns:
572  *	Nothing
573  *
574  * Discussion:
575  */
576 static int
577 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
578 {
579 	uma_slab_t slab;
580 	int hval;
581 	int i;
582 
583 	if (!newhash->uh_slab_hash)
584 		return (0);
585 
586 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
587 		return (0);
588 
589 	/*
590 	 * I need to investigate hash algorithms for resizing without a
591 	 * full rehash.
592 	 */
593 
594 	for (i = 0; i < oldhash->uh_hashsize; i++)
595 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
596 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
597 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
598 			hval = UMA_HASH(newhash, slab->us_data);
599 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
600 			    slab, us_hlink);
601 		}
602 
603 	return (1);
604 }
605 
606 /*
607  * Free the hash bucket to the appropriate backing store.
608  *
609  * Arguments:
610  *	slab_hash  The hash bucket we're freeing
611  *	hashsize   The number of entries in that hash bucket
612  *
613  * Returns:
614  *	Nothing
615  */
616 static void
617 hash_free(struct uma_hash *hash)
618 {
619 	if (hash->uh_slab_hash == NULL)
620 		return;
621 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
622 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
623 	else
624 		free(hash->uh_slab_hash, M_UMAHASH);
625 }
626 
627 /*
628  * Frees all outstanding items in a bucket
629  *
630  * Arguments:
631  *	zone   The zone to free to, must be unlocked.
632  *	bucket The free/alloc bucket with items, cpu queue must be locked.
633  *
634  * Returns:
635  *	Nothing
636  */
637 
638 static void
639 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
640 {
641 	int i;
642 
643 	if (bucket == NULL)
644 		return;
645 
646 	if (zone->uz_fini)
647 		for (i = 0; i < bucket->ub_cnt; i++)
648 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
649 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
650 	bucket->ub_cnt = 0;
651 }
652 
653 /*
654  * Drains the per cpu caches for a zone.
655  *
656  * NOTE: This may only be called while the zone is being turn down, and not
657  * during normal operation.  This is necessary in order that we do not have
658  * to migrate CPUs to drain the per-CPU caches.
659  *
660  * Arguments:
661  *	zone     The zone to drain, must be unlocked.
662  *
663  * Returns:
664  *	Nothing
665  */
666 static void
667 cache_drain(uma_zone_t zone)
668 {
669 	uma_cache_t cache;
670 	int cpu;
671 
672 	/*
673 	 * XXX: It is safe to not lock the per-CPU caches, because we're
674 	 * tearing down the zone anyway.  I.e., there will be no further use
675 	 * of the caches at this point.
676 	 *
677 	 * XXX: It would good to be able to assert that the zone is being
678 	 * torn down to prevent improper use of cache_drain().
679 	 *
680 	 * XXX: We lock the zone before passing into bucket_cache_drain() as
681 	 * it is used elsewhere.  Should the tear-down path be made special
682 	 * there in some form?
683 	 */
684 	CPU_FOREACH(cpu) {
685 		cache = &zone->uz_cpu[cpu];
686 		bucket_drain(zone, cache->uc_allocbucket);
687 		bucket_drain(zone, cache->uc_freebucket);
688 		if (cache->uc_allocbucket != NULL)
689 			bucket_free(zone, cache->uc_allocbucket, NULL);
690 		if (cache->uc_freebucket != NULL)
691 			bucket_free(zone, cache->uc_freebucket, NULL);
692 		cache->uc_allocbucket = cache->uc_freebucket = NULL;
693 	}
694 	ZONE_LOCK(zone);
695 	bucket_cache_drain(zone);
696 	ZONE_UNLOCK(zone);
697 }
698 
699 static void
700 cache_shrink(uma_zone_t zone)
701 {
702 
703 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
704 		return;
705 
706 	ZONE_LOCK(zone);
707 	zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
708 	ZONE_UNLOCK(zone);
709 }
710 
711 static void
712 cache_drain_safe_cpu(uma_zone_t zone)
713 {
714 	uma_cache_t cache;
715 	uma_bucket_t b1, b2;
716 
717 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
718 		return;
719 
720 	b1 = b2 = NULL;
721 	ZONE_LOCK(zone);
722 	critical_enter();
723 	cache = &zone->uz_cpu[curcpu];
724 	if (cache->uc_allocbucket) {
725 		if (cache->uc_allocbucket->ub_cnt != 0)
726 			LIST_INSERT_HEAD(&zone->uz_buckets,
727 			    cache->uc_allocbucket, ub_link);
728 		else
729 			b1 = cache->uc_allocbucket;
730 		cache->uc_allocbucket = NULL;
731 	}
732 	if (cache->uc_freebucket) {
733 		if (cache->uc_freebucket->ub_cnt != 0)
734 			LIST_INSERT_HEAD(&zone->uz_buckets,
735 			    cache->uc_freebucket, ub_link);
736 		else
737 			b2 = cache->uc_freebucket;
738 		cache->uc_freebucket = NULL;
739 	}
740 	critical_exit();
741 	ZONE_UNLOCK(zone);
742 	if (b1)
743 		bucket_free(zone, b1, NULL);
744 	if (b2)
745 		bucket_free(zone, b2, NULL);
746 }
747 
748 /*
749  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
750  * This is an expensive call because it needs to bind to all CPUs
751  * one by one and enter a critical section on each of them in order
752  * to safely access their cache buckets.
753  * Zone lock must not be held on call this function.
754  */
755 static void
756 cache_drain_safe(uma_zone_t zone)
757 {
758 	int cpu;
759 
760 	/*
761 	 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
762 	 */
763 	if (zone)
764 		cache_shrink(zone);
765 	else
766 		zone_foreach(cache_shrink);
767 
768 	CPU_FOREACH(cpu) {
769 		thread_lock(curthread);
770 		sched_bind(curthread, cpu);
771 		thread_unlock(curthread);
772 
773 		if (zone)
774 			cache_drain_safe_cpu(zone);
775 		else
776 			zone_foreach(cache_drain_safe_cpu);
777 	}
778 	thread_lock(curthread);
779 	sched_unbind(curthread);
780 	thread_unlock(curthread);
781 }
782 
783 /*
784  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
785  */
786 static void
787 bucket_cache_drain(uma_zone_t zone)
788 {
789 	uma_bucket_t bucket;
790 
791 	/*
792 	 * Drain the bucket queues and free the buckets, we just keep two per
793 	 * cpu (alloc/free).
794 	 */
795 	while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
796 		LIST_REMOVE(bucket, ub_link);
797 		ZONE_UNLOCK(zone);
798 		bucket_drain(zone, bucket);
799 		bucket_free(zone, bucket, NULL);
800 		ZONE_LOCK(zone);
801 	}
802 
803 	/*
804 	 * Shrink further bucket sizes.  Price of single zone lock collision
805 	 * is probably lower then price of global cache drain.
806 	 */
807 	if (zone->uz_count > zone->uz_count_min)
808 		zone->uz_count--;
809 }
810 
811 static void
812 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
813 {
814 	uint8_t *mem;
815 	int i;
816 	uint8_t flags;
817 
818 	CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
819 	    keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
820 
821 	mem = slab->us_data;
822 	flags = slab->us_flags;
823 	i = start;
824 	if (keg->uk_fini != NULL) {
825 		for (i--; i > -1; i--)
826 			keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
827 			    keg->uk_size);
828 	}
829 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
830 		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
831 	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
832 }
833 
834 /*
835  * Frees pages from a keg back to the system.  This is done on demand from
836  * the pageout daemon.
837  *
838  * Returns nothing.
839  */
840 static void
841 keg_drain(uma_keg_t keg)
842 {
843 	struct slabhead freeslabs = { 0 };
844 	uma_slab_t slab, tmp;
845 
846 	/*
847 	 * We don't want to take pages from statically allocated kegs at this
848 	 * time
849 	 */
850 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
851 		return;
852 
853 	CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u",
854 	    keg->uk_name, keg, keg->uk_free);
855 	KEG_LOCK(keg);
856 	if (keg->uk_free == 0)
857 		goto finished;
858 
859 	LIST_FOREACH_SAFE(slab, &keg->uk_free_slab, us_link, tmp) {
860 		/* We have nowhere to free these to. */
861 		if (slab->us_flags & UMA_SLAB_BOOT)
862 			continue;
863 
864 		LIST_REMOVE(slab, us_link);
865 		keg->uk_pages -= keg->uk_ppera;
866 		keg->uk_free -= keg->uk_ipers;
867 
868 		if (keg->uk_flags & UMA_ZONE_HASH)
869 			UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
870 
871 		SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
872 	}
873 finished:
874 	KEG_UNLOCK(keg);
875 
876 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
877 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
878 		keg_free_slab(keg, slab, keg->uk_ipers);
879 	}
880 }
881 
882 static void
883 zone_drain_wait(uma_zone_t zone, int waitok)
884 {
885 
886 	/*
887 	 * Set draining to interlock with zone_dtor() so we can release our
888 	 * locks as we go.  Only dtor() should do a WAITOK call since it
889 	 * is the only call that knows the structure will still be available
890 	 * when it wakes up.
891 	 */
892 	ZONE_LOCK(zone);
893 	while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
894 		if (waitok == M_NOWAIT)
895 			goto out;
896 		msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
897 	}
898 	zone->uz_flags |= UMA_ZFLAG_DRAINING;
899 	bucket_cache_drain(zone);
900 	ZONE_UNLOCK(zone);
901 	/*
902 	 * The DRAINING flag protects us from being freed while
903 	 * we're running.  Normally the uma_rwlock would protect us but we
904 	 * must be able to release and acquire the right lock for each keg.
905 	 */
906 	zone_foreach_keg(zone, &keg_drain);
907 	ZONE_LOCK(zone);
908 	zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
909 	wakeup(zone);
910 out:
911 	ZONE_UNLOCK(zone);
912 }
913 
914 void
915 zone_drain(uma_zone_t zone)
916 {
917 
918 	zone_drain_wait(zone, M_NOWAIT);
919 }
920 
921 /*
922  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
923  *
924  * Arguments:
925  *	wait  Shall we wait?
926  *
927  * Returns:
928  *	The slab that was allocated or NULL if there is no memory and the
929  *	caller specified M_NOWAIT.
930  */
931 static uma_slab_t
932 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
933 {
934 	uma_alloc allocf;
935 	uma_slab_t slab;
936 	uint8_t *mem;
937 	uint8_t flags;
938 	int i;
939 
940 	mtx_assert(&keg->uk_lock, MA_OWNED);
941 	slab = NULL;
942 	mem = NULL;
943 
944 	allocf = keg->uk_allocf;
945 	KEG_UNLOCK(keg);
946 
947 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
948 		slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
949 		if (slab == NULL)
950 			goto out;
951 	}
952 
953 	/*
954 	 * This reproduces the old vm_zone behavior of zero filling pages the
955 	 * first time they are added to a zone.
956 	 *
957 	 * Malloced items are zeroed in uma_zalloc.
958 	 */
959 
960 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
961 		wait |= M_ZERO;
962 	else
963 		wait &= ~M_ZERO;
964 
965 	if (keg->uk_flags & UMA_ZONE_NODUMP)
966 		wait |= M_NODUMP;
967 
968 	/* zone is passed for legacy reasons. */
969 	mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait);
970 	if (mem == NULL) {
971 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
972 			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
973 		slab = NULL;
974 		goto out;
975 	}
976 
977 	/* Point the slab into the allocated memory */
978 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
979 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
980 
981 	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
982 		for (i = 0; i < keg->uk_ppera; i++)
983 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
984 
985 	slab->us_keg = keg;
986 	slab->us_data = mem;
987 	slab->us_freecount = keg->uk_ipers;
988 	slab->us_flags = flags;
989 	BIT_FILL(SLAB_SETSIZE, &slab->us_free);
990 #ifdef INVARIANTS
991 	BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
992 #endif
993 
994 	if (keg->uk_init != NULL) {
995 		for (i = 0; i < keg->uk_ipers; i++)
996 			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
997 			    keg->uk_size, wait) != 0)
998 				break;
999 		if (i != keg->uk_ipers) {
1000 			keg_free_slab(keg, slab, i);
1001 			slab = NULL;
1002 			goto out;
1003 		}
1004 	}
1005 out:
1006 	KEG_LOCK(keg);
1007 
1008 	CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
1009 	    slab, keg->uk_name, keg);
1010 
1011 	if (slab != NULL) {
1012 		if (keg->uk_flags & UMA_ZONE_HASH)
1013 			UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1014 
1015 		keg->uk_pages += keg->uk_ppera;
1016 		keg->uk_free += keg->uk_ipers;
1017 	}
1018 
1019 	return (slab);
1020 }
1021 
1022 /*
1023  * This function is intended to be used early on in place of page_alloc() so
1024  * that we may use the boot time page cache to satisfy allocations before
1025  * the VM is ready.
1026  */
1027 static void *
1028 startup_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
1029 {
1030 	uma_keg_t keg;
1031 	void *mem;
1032 	int pages;
1033 
1034 	keg = zone_first_keg(zone);
1035 	pages = howmany(bytes, PAGE_SIZE);
1036 	KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
1037 
1038 	/*
1039 	 * Check our small startup cache to see if it has pages remaining.
1040 	 */
1041 	mtx_lock(&uma_boot_pages_mtx);
1042 	if (pages <= boot_pages) {
1043 		mem = bootmem;
1044 		boot_pages -= pages;
1045 		bootmem += pages * PAGE_SIZE;
1046 		mtx_unlock(&uma_boot_pages_mtx);
1047 		*pflag = UMA_SLAB_BOOT;
1048 		return (mem);
1049 	}
1050 	mtx_unlock(&uma_boot_pages_mtx);
1051 	if (booted < UMA_STARTUP2)
1052 		panic("UMA: Increase vm.boot_pages");
1053 	/*
1054 	 * Now that we've booted reset these users to their real allocator.
1055 	 */
1056 #ifdef UMA_MD_SMALL_ALLOC
1057 	keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
1058 #else
1059 	keg->uk_allocf = page_alloc;
1060 #endif
1061 	return keg->uk_allocf(zone, bytes, pflag, wait);
1062 }
1063 
1064 /*
1065  * Allocates a number of pages from the system
1066  *
1067  * Arguments:
1068  *	bytes  The number of bytes requested
1069  *	wait  Shall we wait?
1070  *
1071  * Returns:
1072  *	A pointer to the alloced memory or possibly
1073  *	NULL if M_NOWAIT is set.
1074  */
1075 static void *
1076 page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
1077 {
1078 	void *p;	/* Returned page */
1079 
1080 	*pflag = UMA_SLAB_KMEM;
1081 	p = (void *) kmem_malloc(kmem_arena, bytes, wait);
1082 
1083 	return (p);
1084 }
1085 
1086 /*
1087  * Allocates a number of pages from within an object
1088  *
1089  * Arguments:
1090  *	bytes  The number of bytes requested
1091  *	wait   Shall we wait?
1092  *
1093  * Returns:
1094  *	A pointer to the alloced memory or possibly
1095  *	NULL if M_NOWAIT is set.
1096  */
1097 static void *
1098 noobj_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait)
1099 {
1100 	TAILQ_HEAD(, vm_page) alloctail;
1101 	u_long npages;
1102 	vm_offset_t retkva, zkva;
1103 	vm_page_t p, p_next;
1104 	uma_keg_t keg;
1105 
1106 	TAILQ_INIT(&alloctail);
1107 	keg = zone_first_keg(zone);
1108 
1109 	npages = howmany(bytes, PAGE_SIZE);
1110 	while (npages > 0) {
1111 		p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
1112 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
1113 		if (p != NULL) {
1114 			/*
1115 			 * Since the page does not belong to an object, its
1116 			 * listq is unused.
1117 			 */
1118 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1119 			npages--;
1120 			continue;
1121 		}
1122 		if (wait & M_WAITOK) {
1123 			VM_WAIT;
1124 			continue;
1125 		}
1126 
1127 		/*
1128 		 * Page allocation failed, free intermediate pages and
1129 		 * exit.
1130 		 */
1131 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1132 			vm_page_unwire(p, PQ_NONE);
1133 			vm_page_free(p);
1134 		}
1135 		return (NULL);
1136 	}
1137 	*flags = UMA_SLAB_PRIV;
1138 	zkva = keg->uk_kva +
1139 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1140 	retkva = zkva;
1141 	TAILQ_FOREACH(p, &alloctail, listq) {
1142 		pmap_qenter(zkva, &p, 1);
1143 		zkva += PAGE_SIZE;
1144 	}
1145 
1146 	return ((void *)retkva);
1147 }
1148 
1149 /*
1150  * Frees a number of pages to the system
1151  *
1152  * Arguments:
1153  *	mem   A pointer to the memory to be freed
1154  *	size  The size of the memory being freed
1155  *	flags The original p->us_flags field
1156  *
1157  * Returns:
1158  *	Nothing
1159  */
1160 static void
1161 page_free(void *mem, vm_size_t size, uint8_t flags)
1162 {
1163 	struct vmem *vmem;
1164 
1165 	if (flags & UMA_SLAB_KMEM)
1166 		vmem = kmem_arena;
1167 	else if (flags & UMA_SLAB_KERNEL)
1168 		vmem = kernel_arena;
1169 	else
1170 		panic("UMA: page_free used with invalid flags %x", flags);
1171 
1172 	kmem_free(vmem, (vm_offset_t)mem, size);
1173 }
1174 
1175 /*
1176  * Zero fill initializer
1177  *
1178  * Arguments/Returns follow uma_init specifications
1179  */
1180 static int
1181 zero_init(void *mem, int size, int flags)
1182 {
1183 	bzero(mem, size);
1184 	return (0);
1185 }
1186 
1187 /*
1188  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
1189  *
1190  * Arguments
1191  *	keg  The zone we should initialize
1192  *
1193  * Returns
1194  *	Nothing
1195  */
1196 static void
1197 keg_small_init(uma_keg_t keg)
1198 {
1199 	u_int rsize;
1200 	u_int memused;
1201 	u_int wastedspace;
1202 	u_int shsize;
1203 	u_int slabsize;
1204 
1205 	if (keg->uk_flags & UMA_ZONE_PCPU) {
1206 		u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
1207 
1208 		slabsize = sizeof(struct pcpu);
1209 		keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu),
1210 		    PAGE_SIZE);
1211 	} else {
1212 		slabsize = UMA_SLAB_SIZE;
1213 		keg->uk_ppera = 1;
1214 	}
1215 
1216 	/*
1217 	 * Calculate the size of each allocation (rsize) according to
1218 	 * alignment.  If the requested size is smaller than we have
1219 	 * allocation bits for we round it up.
1220 	 */
1221 	rsize = keg->uk_size;
1222 	if (rsize < slabsize / SLAB_SETSIZE)
1223 		rsize = slabsize / SLAB_SETSIZE;
1224 	if (rsize & keg->uk_align)
1225 		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1226 	keg->uk_rsize = rsize;
1227 
1228 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1229 	    keg->uk_rsize < sizeof(struct pcpu),
1230 	    ("%s: size %u too large", __func__, keg->uk_rsize));
1231 
1232 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1233 		shsize = 0;
1234 	else
1235 		shsize = sizeof(struct uma_slab);
1236 
1237 	keg->uk_ipers = (slabsize - shsize) / rsize;
1238 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1239 	    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1240 
1241 	memused = keg->uk_ipers * rsize + shsize;
1242 	wastedspace = slabsize - memused;
1243 
1244 	/*
1245 	 * We can't do OFFPAGE if we're internal or if we've been
1246 	 * asked to not go to the VM for buckets.  If we do this we
1247 	 * may end up going to the VM  for slabs which we do not
1248 	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1249 	 * of UMA_ZONE_VM, which clearly forbids it.
1250 	 */
1251 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1252 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1253 		return;
1254 
1255 	/*
1256 	 * See if using an OFFPAGE slab will limit our waste.  Only do
1257 	 * this if it permits more items per-slab.
1258 	 *
1259 	 * XXX We could try growing slabsize to limit max waste as well.
1260 	 * Historically this was not done because the VM could not
1261 	 * efficiently handle contiguous allocations.
1262 	 */
1263 	if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
1264 	    (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
1265 		keg->uk_ipers = slabsize / keg->uk_rsize;
1266 		KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1267 		    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1268 		CTR6(KTR_UMA, "UMA decided we need offpage slab headers for "
1269 		    "keg: %s(%p), calculated wastedspace = %d, "
1270 		    "maximum wasted space allowed = %d, "
1271 		    "calculated ipers = %d, "
1272 		    "new wasted space = %d\n", keg->uk_name, keg, wastedspace,
1273 		    slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1274 		    slabsize - keg->uk_ipers * keg->uk_rsize);
1275 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1276 	}
1277 
1278 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1279 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1280 		keg->uk_flags |= UMA_ZONE_HASH;
1281 }
1282 
1283 /*
1284  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
1285  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1286  * more complicated.
1287  *
1288  * Arguments
1289  *	keg  The keg we should initialize
1290  *
1291  * Returns
1292  *	Nothing
1293  */
1294 static void
1295 keg_large_init(uma_keg_t keg)
1296 {
1297 	u_int shsize;
1298 
1299 	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1300 	KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1301 	    ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
1302 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1303 	    ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
1304 
1305 	keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1306 	keg->uk_ipers = 1;
1307 	keg->uk_rsize = keg->uk_size;
1308 
1309 	/* We can't do OFFPAGE if we're internal, bail out here. */
1310 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL)
1311 		return;
1312 
1313 	/* Check whether we have enough space to not do OFFPAGE. */
1314 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) {
1315 		shsize = sizeof(struct uma_slab);
1316 		if (shsize & UMA_ALIGN_PTR)
1317 			shsize = (shsize & ~UMA_ALIGN_PTR) +
1318 			    (UMA_ALIGN_PTR + 1);
1319 
1320 		if ((PAGE_SIZE * keg->uk_ppera) - keg->uk_rsize < shsize)
1321 			keg->uk_flags |= UMA_ZONE_OFFPAGE;
1322 	}
1323 
1324 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1325 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1326 		keg->uk_flags |= UMA_ZONE_HASH;
1327 }
1328 
1329 static void
1330 keg_cachespread_init(uma_keg_t keg)
1331 {
1332 	int alignsize;
1333 	int trailer;
1334 	int pages;
1335 	int rsize;
1336 
1337 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1338 	    ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1339 
1340 	alignsize = keg->uk_align + 1;
1341 	rsize = keg->uk_size;
1342 	/*
1343 	 * We want one item to start on every align boundary in a page.  To
1344 	 * do this we will span pages.  We will also extend the item by the
1345 	 * size of align if it is an even multiple of align.  Otherwise, it
1346 	 * would fall on the same boundary every time.
1347 	 */
1348 	if (rsize & keg->uk_align)
1349 		rsize = (rsize & ~keg->uk_align) + alignsize;
1350 	if ((rsize & alignsize) == 0)
1351 		rsize += alignsize;
1352 	trailer = rsize - keg->uk_size;
1353 	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1354 	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1355 	keg->uk_rsize = rsize;
1356 	keg->uk_ppera = pages;
1357 	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1358 	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1359 	KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
1360 	    ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1361 	    keg->uk_ipers));
1362 }
1363 
1364 /*
1365  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1366  * the keg onto the global keg list.
1367  *
1368  * Arguments/Returns follow uma_ctor specifications
1369  *	udata  Actually uma_kctor_args
1370  */
1371 static int
1372 keg_ctor(void *mem, int size, void *udata, int flags)
1373 {
1374 	struct uma_kctor_args *arg = udata;
1375 	uma_keg_t keg = mem;
1376 	uma_zone_t zone;
1377 
1378 	bzero(keg, size);
1379 	keg->uk_size = arg->size;
1380 	keg->uk_init = arg->uminit;
1381 	keg->uk_fini = arg->fini;
1382 	keg->uk_align = arg->align;
1383 	keg->uk_free = 0;
1384 	keg->uk_reserve = 0;
1385 	keg->uk_pages = 0;
1386 	keg->uk_flags = arg->flags;
1387 	keg->uk_slabzone = NULL;
1388 
1389 	/*
1390 	 * The master zone is passed to us at keg-creation time.
1391 	 */
1392 	zone = arg->zone;
1393 	keg->uk_name = zone->uz_name;
1394 
1395 	if (arg->flags & UMA_ZONE_VM)
1396 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1397 
1398 	if (arg->flags & UMA_ZONE_ZINIT)
1399 		keg->uk_init = zero_init;
1400 
1401 	if (arg->flags & UMA_ZONE_MALLOC)
1402 		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1403 
1404 	if (arg->flags & UMA_ZONE_PCPU)
1405 #ifdef SMP
1406 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1407 #else
1408 		keg->uk_flags &= ~UMA_ZONE_PCPU;
1409 #endif
1410 
1411 	if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1412 		keg_cachespread_init(keg);
1413 	} else {
1414 		if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1415 			keg_large_init(keg);
1416 		else
1417 			keg_small_init(keg);
1418 	}
1419 
1420 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1421 		keg->uk_slabzone = slabzone;
1422 
1423 	/*
1424 	 * If we haven't booted yet we need allocations to go through the
1425 	 * startup cache until the vm is ready.
1426 	 */
1427 	if (booted < UMA_STARTUP2)
1428 		keg->uk_allocf = startup_alloc;
1429 #ifdef UMA_MD_SMALL_ALLOC
1430 	else if (keg->uk_ppera == 1)
1431 		keg->uk_allocf = uma_small_alloc;
1432 #endif
1433 	else
1434 		keg->uk_allocf = page_alloc;
1435 #ifdef UMA_MD_SMALL_ALLOC
1436 	if (keg->uk_ppera == 1)
1437 		keg->uk_freef = uma_small_free;
1438 	else
1439 #endif
1440 		keg->uk_freef = page_free;
1441 
1442 	/*
1443 	 * Initialize keg's lock
1444 	 */
1445 	KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1446 
1447 	/*
1448 	 * If we're putting the slab header in the actual page we need to
1449 	 * figure out where in each page it goes.  This calculates a right
1450 	 * justified offset into the memory on an ALIGN_PTR boundary.
1451 	 */
1452 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1453 		u_int totsize;
1454 
1455 		/* Size of the slab struct and free list */
1456 		totsize = sizeof(struct uma_slab);
1457 
1458 		if (totsize & UMA_ALIGN_PTR)
1459 			totsize = (totsize & ~UMA_ALIGN_PTR) +
1460 			    (UMA_ALIGN_PTR + 1);
1461 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize;
1462 
1463 		/*
1464 		 * The only way the following is possible is if with our
1465 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1466 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1467 		 * mathematically possible for all cases, so we make
1468 		 * sure here anyway.
1469 		 */
1470 		totsize = keg->uk_pgoff + sizeof(struct uma_slab);
1471 		if (totsize > PAGE_SIZE * keg->uk_ppera) {
1472 			printf("zone %s ipers %d rsize %d size %d\n",
1473 			    zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1474 			    keg->uk_size);
1475 			panic("UMA slab won't fit.");
1476 		}
1477 	}
1478 
1479 	if (keg->uk_flags & UMA_ZONE_HASH)
1480 		hash_alloc(&keg->uk_hash);
1481 
1482 	CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n",
1483 	    keg, zone->uz_name, zone,
1484 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
1485 	    keg->uk_free);
1486 
1487 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1488 
1489 	rw_wlock(&uma_rwlock);
1490 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1491 	rw_wunlock(&uma_rwlock);
1492 	return (0);
1493 }
1494 
1495 /*
1496  * Zone header ctor.  This initializes all fields, locks, etc.
1497  *
1498  * Arguments/Returns follow uma_ctor specifications
1499  *	udata  Actually uma_zctor_args
1500  */
1501 static int
1502 zone_ctor(void *mem, int size, void *udata, int flags)
1503 {
1504 	struct uma_zctor_args *arg = udata;
1505 	uma_zone_t zone = mem;
1506 	uma_zone_t z;
1507 	uma_keg_t keg;
1508 
1509 	bzero(zone, size);
1510 	zone->uz_name = arg->name;
1511 	zone->uz_ctor = arg->ctor;
1512 	zone->uz_dtor = arg->dtor;
1513 	zone->uz_slab = zone_fetch_slab;
1514 	zone->uz_init = NULL;
1515 	zone->uz_fini = NULL;
1516 	zone->uz_allocs = 0;
1517 	zone->uz_frees = 0;
1518 	zone->uz_fails = 0;
1519 	zone->uz_sleeps = 0;
1520 	zone->uz_count = 0;
1521 	zone->uz_count_min = 0;
1522 	zone->uz_flags = 0;
1523 	zone->uz_warning = NULL;
1524 	timevalclear(&zone->uz_ratecheck);
1525 	keg = arg->keg;
1526 
1527 	ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1528 
1529 	/*
1530 	 * This is a pure cache zone, no kegs.
1531 	 */
1532 	if (arg->import) {
1533 		if (arg->flags & UMA_ZONE_VM)
1534 			arg->flags |= UMA_ZFLAG_CACHEONLY;
1535 		zone->uz_flags = arg->flags;
1536 		zone->uz_size = arg->size;
1537 		zone->uz_import = arg->import;
1538 		zone->uz_release = arg->release;
1539 		zone->uz_arg = arg->arg;
1540 		zone->uz_lockptr = &zone->uz_lock;
1541 		rw_wlock(&uma_rwlock);
1542 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
1543 		rw_wunlock(&uma_rwlock);
1544 		goto out;
1545 	}
1546 
1547 	/*
1548 	 * Use the regular zone/keg/slab allocator.
1549 	 */
1550 	zone->uz_import = (uma_import)zone_import;
1551 	zone->uz_release = (uma_release)zone_release;
1552 	zone->uz_arg = zone;
1553 
1554 	if (arg->flags & UMA_ZONE_SECONDARY) {
1555 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1556 		zone->uz_init = arg->uminit;
1557 		zone->uz_fini = arg->fini;
1558 		zone->uz_lockptr = &keg->uk_lock;
1559 		zone->uz_flags |= UMA_ZONE_SECONDARY;
1560 		rw_wlock(&uma_rwlock);
1561 		ZONE_LOCK(zone);
1562 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1563 			if (LIST_NEXT(z, uz_link) == NULL) {
1564 				LIST_INSERT_AFTER(z, zone, uz_link);
1565 				break;
1566 			}
1567 		}
1568 		ZONE_UNLOCK(zone);
1569 		rw_wunlock(&uma_rwlock);
1570 	} else if (keg == NULL) {
1571 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1572 		    arg->align, arg->flags)) == NULL)
1573 			return (ENOMEM);
1574 	} else {
1575 		struct uma_kctor_args karg;
1576 		int error;
1577 
1578 		/* We should only be here from uma_startup() */
1579 		karg.size = arg->size;
1580 		karg.uminit = arg->uminit;
1581 		karg.fini = arg->fini;
1582 		karg.align = arg->align;
1583 		karg.flags = arg->flags;
1584 		karg.zone = zone;
1585 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1586 		    flags);
1587 		if (error)
1588 			return (error);
1589 	}
1590 
1591 	/*
1592 	 * Link in the first keg.
1593 	 */
1594 	zone->uz_klink.kl_keg = keg;
1595 	LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1596 	zone->uz_lockptr = &keg->uk_lock;
1597 	zone->uz_size = keg->uk_size;
1598 	zone->uz_flags |= (keg->uk_flags &
1599 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1600 
1601 	/*
1602 	 * Some internal zones don't have room allocated for the per cpu
1603 	 * caches.  If we're internal, bail out here.
1604 	 */
1605 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1606 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1607 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1608 		return (0);
1609 	}
1610 
1611 out:
1612 	if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0)
1613 		zone->uz_count = bucket_select(zone->uz_size);
1614 	else
1615 		zone->uz_count = BUCKET_MAX;
1616 	zone->uz_count_min = zone->uz_count;
1617 
1618 	return (0);
1619 }
1620 
1621 /*
1622  * Keg header dtor.  This frees all data, destroys locks, frees the hash
1623  * table and removes the keg from the global list.
1624  *
1625  * Arguments/Returns follow uma_dtor specifications
1626  *	udata  unused
1627  */
1628 static void
1629 keg_dtor(void *arg, int size, void *udata)
1630 {
1631 	uma_keg_t keg;
1632 
1633 	keg = (uma_keg_t)arg;
1634 	KEG_LOCK(keg);
1635 	if (keg->uk_free != 0) {
1636 		printf("Freed UMA keg (%s) was not empty (%d items). "
1637 		    " Lost %d pages of memory.\n",
1638 		    keg->uk_name ? keg->uk_name : "",
1639 		    keg->uk_free, keg->uk_pages);
1640 	}
1641 	KEG_UNLOCK(keg);
1642 
1643 	hash_free(&keg->uk_hash);
1644 
1645 	KEG_LOCK_FINI(keg);
1646 }
1647 
1648 /*
1649  * Zone header dtor.
1650  *
1651  * Arguments/Returns follow uma_dtor specifications
1652  *	udata  unused
1653  */
1654 static void
1655 zone_dtor(void *arg, int size, void *udata)
1656 {
1657 	uma_klink_t klink;
1658 	uma_zone_t zone;
1659 	uma_keg_t keg;
1660 
1661 	zone = (uma_zone_t)arg;
1662 	keg = zone_first_keg(zone);
1663 
1664 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1665 		cache_drain(zone);
1666 
1667 	rw_wlock(&uma_rwlock);
1668 	LIST_REMOVE(zone, uz_link);
1669 	rw_wunlock(&uma_rwlock);
1670 	/*
1671 	 * XXX there are some races here where
1672 	 * the zone can be drained but zone lock
1673 	 * released and then refilled before we
1674 	 * remove it... we dont care for now
1675 	 */
1676 	zone_drain_wait(zone, M_WAITOK);
1677 	/*
1678 	 * Unlink all of our kegs.
1679 	 */
1680 	while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
1681 		klink->kl_keg = NULL;
1682 		LIST_REMOVE(klink, kl_link);
1683 		if (klink == &zone->uz_klink)
1684 			continue;
1685 		free(klink, M_TEMP);
1686 	}
1687 	/*
1688 	 * We only destroy kegs from non secondary zones.
1689 	 */
1690 	if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0)  {
1691 		rw_wlock(&uma_rwlock);
1692 		LIST_REMOVE(keg, uk_link);
1693 		rw_wunlock(&uma_rwlock);
1694 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
1695 	}
1696 	ZONE_LOCK_FINI(zone);
1697 }
1698 
1699 /*
1700  * Traverses every zone in the system and calls a callback
1701  *
1702  * Arguments:
1703  *	zfunc  A pointer to a function which accepts a zone
1704  *		as an argument.
1705  *
1706  * Returns:
1707  *	Nothing
1708  */
1709 static void
1710 zone_foreach(void (*zfunc)(uma_zone_t))
1711 {
1712 	uma_keg_t keg;
1713 	uma_zone_t zone;
1714 
1715 	rw_rlock(&uma_rwlock);
1716 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
1717 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1718 			zfunc(zone);
1719 	}
1720 	rw_runlock(&uma_rwlock);
1721 }
1722 
1723 /* Public functions */
1724 /* See uma.h */
1725 void
1726 uma_startup(void *mem, int npages)
1727 {
1728 	struct uma_zctor_args args;
1729 
1730 	rw_init(&uma_rwlock, "UMA lock");
1731 
1732 	/* "manually" create the initial zone */
1733 	memset(&args, 0, sizeof(args));
1734 	args.name = "UMA Kegs";
1735 	args.size = sizeof(struct uma_keg);
1736 	args.ctor = keg_ctor;
1737 	args.dtor = keg_dtor;
1738 	args.uminit = zero_init;
1739 	args.fini = NULL;
1740 	args.keg = &masterkeg;
1741 	args.align = 32 - 1;
1742 	args.flags = UMA_ZFLAG_INTERNAL;
1743 	/* The initial zone has no Per cpu queues so it's smaller */
1744 	zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
1745 
1746 	mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
1747 	bootmem = mem;
1748 	boot_pages = npages;
1749 
1750 	args.name = "UMA Zones";
1751 	args.size = sizeof(struct uma_zone) +
1752 	    (sizeof(struct uma_cache) * (mp_maxid + 1));
1753 	args.ctor = zone_ctor;
1754 	args.dtor = zone_dtor;
1755 	args.uminit = zero_init;
1756 	args.fini = NULL;
1757 	args.keg = NULL;
1758 	args.align = 32 - 1;
1759 	args.flags = UMA_ZFLAG_INTERNAL;
1760 	/* The initial zone has no Per cpu queues so it's smaller */
1761 	zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
1762 
1763 	/* Now make a zone for slab headers */
1764 	slabzone = uma_zcreate("UMA Slabs",
1765 				sizeof(struct uma_slab),
1766 				NULL, NULL, NULL, NULL,
1767 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1768 
1769 	hashzone = uma_zcreate("UMA Hash",
1770 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1771 	    NULL, NULL, NULL, NULL,
1772 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1773 
1774 	bucket_init();
1775 
1776 	booted = UMA_STARTUP;
1777 }
1778 
1779 /* see uma.h */
1780 void
1781 uma_startup2(void)
1782 {
1783 	booted = UMA_STARTUP2;
1784 	bucket_enable();
1785 	sx_init(&uma_drain_lock, "umadrain");
1786 }
1787 
1788 /*
1789  * Initialize our callout handle
1790  *
1791  */
1792 
1793 static void
1794 uma_startup3(void)
1795 {
1796 
1797 	callout_init(&uma_callout, 1);
1798 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1799 }
1800 
1801 static uma_keg_t
1802 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
1803 		int align, uint32_t flags)
1804 {
1805 	struct uma_kctor_args args;
1806 
1807 	args.size = size;
1808 	args.uminit = uminit;
1809 	args.fini = fini;
1810 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
1811 	args.flags = flags;
1812 	args.zone = zone;
1813 	return (zone_alloc_item(kegs, &args, M_WAITOK));
1814 }
1815 
1816 /* See uma.h */
1817 void
1818 uma_set_align(int align)
1819 {
1820 
1821 	if (align != UMA_ALIGN_CACHE)
1822 		uma_align_cache = align;
1823 }
1824 
1825 /* See uma.h */
1826 uma_zone_t
1827 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1828 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
1829 
1830 {
1831 	struct uma_zctor_args args;
1832 	uma_zone_t res;
1833 	bool locked;
1834 
1835 	KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
1836 	    align, name));
1837 
1838 	/* This stuff is essential for the zone ctor */
1839 	memset(&args, 0, sizeof(args));
1840 	args.name = name;
1841 	args.size = size;
1842 	args.ctor = ctor;
1843 	args.dtor = dtor;
1844 	args.uminit = uminit;
1845 	args.fini = fini;
1846 #ifdef  INVARIANTS
1847 	/*
1848 	 * If a zone is being created with an empty constructor and
1849 	 * destructor, pass UMA constructor/destructor which checks for
1850 	 * memory use after free.
1851 	 */
1852 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
1853 	    ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) {
1854 		args.ctor = trash_ctor;
1855 		args.dtor = trash_dtor;
1856 		args.uminit = trash_init;
1857 		args.fini = trash_fini;
1858 	}
1859 #endif
1860 	args.align = align;
1861 	args.flags = flags;
1862 	args.keg = NULL;
1863 
1864 	if (booted < UMA_STARTUP2) {
1865 		locked = false;
1866 	} else {
1867 		sx_slock(&uma_drain_lock);
1868 		locked = true;
1869 	}
1870 	res = zone_alloc_item(zones, &args, M_WAITOK);
1871 	if (locked)
1872 		sx_sunlock(&uma_drain_lock);
1873 	return (res);
1874 }
1875 
1876 /* See uma.h */
1877 uma_zone_t
1878 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1879 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
1880 {
1881 	struct uma_zctor_args args;
1882 	uma_keg_t keg;
1883 	uma_zone_t res;
1884 	bool locked;
1885 
1886 	keg = zone_first_keg(master);
1887 	memset(&args, 0, sizeof(args));
1888 	args.name = name;
1889 	args.size = keg->uk_size;
1890 	args.ctor = ctor;
1891 	args.dtor = dtor;
1892 	args.uminit = zinit;
1893 	args.fini = zfini;
1894 	args.align = keg->uk_align;
1895 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
1896 	args.keg = keg;
1897 
1898 	if (booted < UMA_STARTUP2) {
1899 		locked = false;
1900 	} else {
1901 		sx_slock(&uma_drain_lock);
1902 		locked = true;
1903 	}
1904 	/* XXX Attaches only one keg of potentially many. */
1905 	res = zone_alloc_item(zones, &args, M_WAITOK);
1906 	if (locked)
1907 		sx_sunlock(&uma_drain_lock);
1908 	return (res);
1909 }
1910 
1911 /* See uma.h */
1912 uma_zone_t
1913 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
1914 		    uma_init zinit, uma_fini zfini, uma_import zimport,
1915 		    uma_release zrelease, void *arg, int flags)
1916 {
1917 	struct uma_zctor_args args;
1918 
1919 	memset(&args, 0, sizeof(args));
1920 	args.name = name;
1921 	args.size = size;
1922 	args.ctor = ctor;
1923 	args.dtor = dtor;
1924 	args.uminit = zinit;
1925 	args.fini = zfini;
1926 	args.import = zimport;
1927 	args.release = zrelease;
1928 	args.arg = arg;
1929 	args.align = 0;
1930 	args.flags = flags;
1931 
1932 	return (zone_alloc_item(zones, &args, M_WAITOK));
1933 }
1934 
1935 static void
1936 zone_lock_pair(uma_zone_t a, uma_zone_t b)
1937 {
1938 	if (a < b) {
1939 		ZONE_LOCK(a);
1940 		mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
1941 	} else {
1942 		ZONE_LOCK(b);
1943 		mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
1944 	}
1945 }
1946 
1947 static void
1948 zone_unlock_pair(uma_zone_t a, uma_zone_t b)
1949 {
1950 
1951 	ZONE_UNLOCK(a);
1952 	ZONE_UNLOCK(b);
1953 }
1954 
1955 int
1956 uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
1957 {
1958 	uma_klink_t klink;
1959 	uma_klink_t kl;
1960 	int error;
1961 
1962 	error = 0;
1963 	klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
1964 
1965 	zone_lock_pair(zone, master);
1966 	/*
1967 	 * zone must use vtoslab() to resolve objects and must already be
1968 	 * a secondary.
1969 	 */
1970 	if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
1971 	    != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
1972 		error = EINVAL;
1973 		goto out;
1974 	}
1975 	/*
1976 	 * The new master must also use vtoslab().
1977 	 */
1978 	if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
1979 		error = EINVAL;
1980 		goto out;
1981 	}
1982 
1983 	/*
1984 	 * The underlying object must be the same size.  rsize
1985 	 * may be different.
1986 	 */
1987 	if (master->uz_size != zone->uz_size) {
1988 		error = E2BIG;
1989 		goto out;
1990 	}
1991 	/*
1992 	 * Put it at the end of the list.
1993 	 */
1994 	klink->kl_keg = zone_first_keg(master);
1995 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
1996 		if (LIST_NEXT(kl, kl_link) == NULL) {
1997 			LIST_INSERT_AFTER(kl, klink, kl_link);
1998 			break;
1999 		}
2000 	}
2001 	klink = NULL;
2002 	zone->uz_flags |= UMA_ZFLAG_MULTI;
2003 	zone->uz_slab = zone_fetch_slab_multi;
2004 
2005 out:
2006 	zone_unlock_pair(zone, master);
2007 	if (klink != NULL)
2008 		free(klink, M_TEMP);
2009 
2010 	return (error);
2011 }
2012 
2013 
2014 /* See uma.h */
2015 void
2016 uma_zdestroy(uma_zone_t zone)
2017 {
2018 
2019 	sx_slock(&uma_drain_lock);
2020 	zone_free_item(zones, zone, NULL, SKIP_NONE);
2021 	sx_sunlock(&uma_drain_lock);
2022 }
2023 
2024 /* See uma.h */
2025 void *
2026 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
2027 {
2028 	void *item;
2029 	uma_cache_t cache;
2030 	uma_bucket_t bucket;
2031 	int lockfail;
2032 	int cpu;
2033 
2034 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2035 	random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
2036 
2037 	/* This is the fast path allocation */
2038 	CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d",
2039 	    curthread, zone->uz_name, zone, flags);
2040 
2041 	if (flags & M_WAITOK) {
2042 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2043 		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2044 	}
2045 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2046 	    ("uma_zalloc_arg: called with spinlock or critical section held"));
2047 
2048 #ifdef DEBUG_MEMGUARD
2049 	if (memguard_cmp_zone(zone)) {
2050 		item = memguard_alloc(zone->uz_size, flags);
2051 		if (item != NULL) {
2052 			if (zone->uz_init != NULL &&
2053 			    zone->uz_init(item, zone->uz_size, flags) != 0)
2054 				return (NULL);
2055 			if (zone->uz_ctor != NULL &&
2056 			    zone->uz_ctor(item, zone->uz_size, udata,
2057 			    flags) != 0) {
2058 			    	zone->uz_fini(item, zone->uz_size);
2059 				return (NULL);
2060 			}
2061 			return (item);
2062 		}
2063 		/* This is unfortunate but should not be fatal. */
2064 	}
2065 #endif
2066 	/*
2067 	 * If possible, allocate from the per-CPU cache.  There are two
2068 	 * requirements for safe access to the per-CPU cache: (1) the thread
2069 	 * accessing the cache must not be preempted or yield during access,
2070 	 * and (2) the thread must not migrate CPUs without switching which
2071 	 * cache it accesses.  We rely on a critical section to prevent
2072 	 * preemption and migration.  We release the critical section in
2073 	 * order to acquire the zone mutex if we are unable to allocate from
2074 	 * the current cache; when we re-acquire the critical section, we
2075 	 * must detect and handle migration if it has occurred.
2076 	 */
2077 	critical_enter();
2078 	cpu = curcpu;
2079 	cache = &zone->uz_cpu[cpu];
2080 
2081 zalloc_start:
2082 	bucket = cache->uc_allocbucket;
2083 	if (bucket != NULL && bucket->ub_cnt > 0) {
2084 		bucket->ub_cnt--;
2085 		item = bucket->ub_bucket[bucket->ub_cnt];
2086 #ifdef INVARIANTS
2087 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
2088 #endif
2089 		KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
2090 		cache->uc_allocs++;
2091 		critical_exit();
2092 		if (zone->uz_ctor != NULL &&
2093 		    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2094 			atomic_add_long(&zone->uz_fails, 1);
2095 			zone_free_item(zone, item, udata, SKIP_DTOR);
2096 			return (NULL);
2097 		}
2098 #ifdef INVARIANTS
2099 		uma_dbg_alloc(zone, NULL, item);
2100 #endif
2101 		if (flags & M_ZERO)
2102 			uma_zero_item(item, zone);
2103 		return (item);
2104 	}
2105 
2106 	/*
2107 	 * We have run out of items in our alloc bucket.
2108 	 * See if we can switch with our free bucket.
2109 	 */
2110 	bucket = cache->uc_freebucket;
2111 	if (bucket != NULL && bucket->ub_cnt > 0) {
2112 		CTR2(KTR_UMA,
2113 		    "uma_zalloc: zone %s(%p) swapping empty with alloc",
2114 		    zone->uz_name, zone);
2115 		cache->uc_freebucket = cache->uc_allocbucket;
2116 		cache->uc_allocbucket = bucket;
2117 		goto zalloc_start;
2118 	}
2119 
2120 	/*
2121 	 * Discard any empty allocation bucket while we hold no locks.
2122 	 */
2123 	bucket = cache->uc_allocbucket;
2124 	cache->uc_allocbucket = NULL;
2125 	critical_exit();
2126 	if (bucket != NULL)
2127 		bucket_free(zone, bucket, udata);
2128 
2129 	/* Short-circuit for zones without buckets and low memory. */
2130 	if (zone->uz_count == 0 || bucketdisable)
2131 		goto zalloc_item;
2132 
2133 	/*
2134 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
2135 	 * we must go back to the zone.  This requires the zone lock, so we
2136 	 * must drop the critical section, then re-acquire it when we go back
2137 	 * to the cache.  Since the critical section is released, we may be
2138 	 * preempted or migrate.  As such, make sure not to maintain any
2139 	 * thread-local state specific to the cache from prior to releasing
2140 	 * the critical section.
2141 	 */
2142 	lockfail = 0;
2143 	if (ZONE_TRYLOCK(zone) == 0) {
2144 		/* Record contention to size the buckets. */
2145 		ZONE_LOCK(zone);
2146 		lockfail = 1;
2147 	}
2148 	critical_enter();
2149 	cpu = curcpu;
2150 	cache = &zone->uz_cpu[cpu];
2151 
2152 	/*
2153 	 * Since we have locked the zone we may as well send back our stats.
2154 	 */
2155 	atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
2156 	atomic_add_long(&zone->uz_frees, cache->uc_frees);
2157 	cache->uc_allocs = 0;
2158 	cache->uc_frees = 0;
2159 
2160 	/* See if we lost the race to fill the cache. */
2161 	if (cache->uc_allocbucket != NULL) {
2162 		ZONE_UNLOCK(zone);
2163 		goto zalloc_start;
2164 	}
2165 
2166 	/*
2167 	 * Check the zone's cache of buckets.
2168 	 */
2169 	if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
2170 		KASSERT(bucket->ub_cnt != 0,
2171 		    ("uma_zalloc_arg: Returning an empty bucket."));
2172 
2173 		LIST_REMOVE(bucket, ub_link);
2174 		cache->uc_allocbucket = bucket;
2175 		ZONE_UNLOCK(zone);
2176 		goto zalloc_start;
2177 	}
2178 	/* We are no longer associated with this CPU. */
2179 	critical_exit();
2180 
2181 	/*
2182 	 * We bump the uz count when the cache size is insufficient to
2183 	 * handle the working set.
2184 	 */
2185 	if (lockfail && zone->uz_count < BUCKET_MAX)
2186 		zone->uz_count++;
2187 	ZONE_UNLOCK(zone);
2188 
2189 	/*
2190 	 * Now lets just fill a bucket and put it on the free list.  If that
2191 	 * works we'll restart the allocation from the beginning and it
2192 	 * will use the just filled bucket.
2193 	 */
2194 	bucket = zone_alloc_bucket(zone, udata, flags);
2195 	CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
2196 	    zone->uz_name, zone, bucket);
2197 	if (bucket != NULL) {
2198 		ZONE_LOCK(zone);
2199 		critical_enter();
2200 		cpu = curcpu;
2201 		cache = &zone->uz_cpu[cpu];
2202 		/*
2203 		 * See if we lost the race or were migrated.  Cache the
2204 		 * initialized bucket to make this less likely or claim
2205 		 * the memory directly.
2206 		 */
2207 		if (cache->uc_allocbucket == NULL)
2208 			cache->uc_allocbucket = bucket;
2209 		else
2210 			LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
2211 		ZONE_UNLOCK(zone);
2212 		goto zalloc_start;
2213 	}
2214 
2215 	/*
2216 	 * We may not be able to get a bucket so return an actual item.
2217 	 */
2218 zalloc_item:
2219 	item = zone_alloc_item(zone, udata, flags);
2220 
2221 	return (item);
2222 }
2223 
2224 static uma_slab_t
2225 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
2226 {
2227 	uma_slab_t slab;
2228 	int reserve;
2229 
2230 	mtx_assert(&keg->uk_lock, MA_OWNED);
2231 	slab = NULL;
2232 	reserve = 0;
2233 	if ((flags & M_USE_RESERVE) == 0)
2234 		reserve = keg->uk_reserve;
2235 
2236 	for (;;) {
2237 		/*
2238 		 * Find a slab with some space.  Prefer slabs that are partially
2239 		 * used over those that are totally full.  This helps to reduce
2240 		 * fragmentation.
2241 		 */
2242 		if (keg->uk_free > reserve) {
2243 			if (!LIST_EMPTY(&keg->uk_part_slab)) {
2244 				slab = LIST_FIRST(&keg->uk_part_slab);
2245 			} else {
2246 				slab = LIST_FIRST(&keg->uk_free_slab);
2247 				LIST_REMOVE(slab, us_link);
2248 				LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
2249 				    us_link);
2250 			}
2251 			MPASS(slab->us_keg == keg);
2252 			return (slab);
2253 		}
2254 
2255 		/*
2256 		 * M_NOVM means don't ask at all!
2257 		 */
2258 		if (flags & M_NOVM)
2259 			break;
2260 
2261 		if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
2262 			keg->uk_flags |= UMA_ZFLAG_FULL;
2263 			/*
2264 			 * If this is not a multi-zone, set the FULL bit.
2265 			 * Otherwise slab_multi() takes care of it.
2266 			 */
2267 			if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) {
2268 				zone->uz_flags |= UMA_ZFLAG_FULL;
2269 				zone_log_warning(zone);
2270 				zone_maxaction(zone);
2271 			}
2272 			if (flags & M_NOWAIT)
2273 				break;
2274 			zone->uz_sleeps++;
2275 			msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2276 			continue;
2277 		}
2278 		slab = keg_alloc_slab(keg, zone, flags);
2279 		/*
2280 		 * If we got a slab here it's safe to mark it partially used
2281 		 * and return.  We assume that the caller is going to remove
2282 		 * at least one item.
2283 		 */
2284 		if (slab) {
2285 			MPASS(slab->us_keg == keg);
2286 			LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2287 			return (slab);
2288 		}
2289 		/*
2290 		 * We might not have been able to get a slab but another cpu
2291 		 * could have while we were unlocked.  Check again before we
2292 		 * fail.
2293 		 */
2294 		flags |= M_NOVM;
2295 	}
2296 	return (slab);
2297 }
2298 
2299 static uma_slab_t
2300 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
2301 {
2302 	uma_slab_t slab;
2303 
2304 	if (keg == NULL) {
2305 		keg = zone_first_keg(zone);
2306 		KEG_LOCK(keg);
2307 	}
2308 
2309 	for (;;) {
2310 		slab = keg_fetch_slab(keg, zone, flags);
2311 		if (slab)
2312 			return (slab);
2313 		if (flags & (M_NOWAIT | M_NOVM))
2314 			break;
2315 	}
2316 	KEG_UNLOCK(keg);
2317 	return (NULL);
2318 }
2319 
2320 /*
2321  * uma_zone_fetch_slab_multi:  Fetches a slab from one available keg.  Returns
2322  * with the keg locked.  On NULL no lock is held.
2323  *
2324  * The last pointer is used to seed the search.  It is not required.
2325  */
2326 static uma_slab_t
2327 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
2328 {
2329 	uma_klink_t klink;
2330 	uma_slab_t slab;
2331 	uma_keg_t keg;
2332 	int flags;
2333 	int empty;
2334 	int full;
2335 
2336 	/*
2337 	 * Don't wait on the first pass.  This will skip limit tests
2338 	 * as well.  We don't want to block if we can find a provider
2339 	 * without blocking.
2340 	 */
2341 	flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2342 	/*
2343 	 * Use the last slab allocated as a hint for where to start
2344 	 * the search.
2345 	 */
2346 	if (last != NULL) {
2347 		slab = keg_fetch_slab(last, zone, flags);
2348 		if (slab)
2349 			return (slab);
2350 		KEG_UNLOCK(last);
2351 	}
2352 	/*
2353 	 * Loop until we have a slab incase of transient failures
2354 	 * while M_WAITOK is specified.  I'm not sure this is 100%
2355 	 * required but we've done it for so long now.
2356 	 */
2357 	for (;;) {
2358 		empty = 0;
2359 		full = 0;
2360 		/*
2361 		 * Search the available kegs for slabs.  Be careful to hold the
2362 		 * correct lock while calling into the keg layer.
2363 		 */
2364 		LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2365 			keg = klink->kl_keg;
2366 			KEG_LOCK(keg);
2367 			if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2368 				slab = keg_fetch_slab(keg, zone, flags);
2369 				if (slab)
2370 					return (slab);
2371 			}
2372 			if (keg->uk_flags & UMA_ZFLAG_FULL)
2373 				full++;
2374 			else
2375 				empty++;
2376 			KEG_UNLOCK(keg);
2377 		}
2378 		if (rflags & (M_NOWAIT | M_NOVM))
2379 			break;
2380 		flags = rflags;
2381 		/*
2382 		 * All kegs are full.  XXX We can't atomically check all kegs
2383 		 * and sleep so just sleep for a short period and retry.
2384 		 */
2385 		if (full && !empty) {
2386 			ZONE_LOCK(zone);
2387 			zone->uz_flags |= UMA_ZFLAG_FULL;
2388 			zone->uz_sleeps++;
2389 			zone_log_warning(zone);
2390 			zone_maxaction(zone);
2391 			msleep(zone, zone->uz_lockptr, PVM,
2392 			    "zonelimit", hz/100);
2393 			zone->uz_flags &= ~UMA_ZFLAG_FULL;
2394 			ZONE_UNLOCK(zone);
2395 			continue;
2396 		}
2397 	}
2398 	return (NULL);
2399 }
2400 
2401 static void *
2402 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
2403 {
2404 	void *item;
2405 	uint8_t freei;
2406 
2407 	MPASS(keg == slab->us_keg);
2408 	mtx_assert(&keg->uk_lock, MA_OWNED);
2409 
2410 	freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2411 	BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2412 	item = slab->us_data + (keg->uk_rsize * freei);
2413 	slab->us_freecount--;
2414 	keg->uk_free--;
2415 
2416 	/* Move this slab to the full list */
2417 	if (slab->us_freecount == 0) {
2418 		LIST_REMOVE(slab, us_link);
2419 		LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2420 	}
2421 
2422 	return (item);
2423 }
2424 
2425 static int
2426 zone_import(uma_zone_t zone, void **bucket, int max, int flags)
2427 {
2428 	uma_slab_t slab;
2429 	uma_keg_t keg;
2430 	int i;
2431 
2432 	slab = NULL;
2433 	keg = NULL;
2434 	/* Try to keep the buckets totally full */
2435 	for (i = 0; i < max; ) {
2436 		if ((slab = zone->uz_slab(zone, keg, flags)) == NULL)
2437 			break;
2438 		keg = slab->us_keg;
2439 		while (slab->us_freecount && i < max) {
2440 			bucket[i++] = slab_alloc_item(keg, slab);
2441 			if (keg->uk_free <= keg->uk_reserve)
2442 				break;
2443 		}
2444 		/* Don't grab more than one slab at a time. */
2445 		flags &= ~M_WAITOK;
2446 		flags |= M_NOWAIT;
2447 	}
2448 	if (slab != NULL)
2449 		KEG_UNLOCK(keg);
2450 
2451 	return i;
2452 }
2453 
2454 static uma_bucket_t
2455 zone_alloc_bucket(uma_zone_t zone, void *udata, int flags)
2456 {
2457 	uma_bucket_t bucket;
2458 	int max;
2459 
2460 	/* Don't wait for buckets, preserve caller's NOVM setting. */
2461 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
2462 	if (bucket == NULL)
2463 		return (NULL);
2464 
2465 	max = MIN(bucket->ub_entries, zone->uz_count);
2466 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2467 	    max, flags);
2468 
2469 	/*
2470 	 * Initialize the memory if necessary.
2471 	 */
2472 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2473 		int i;
2474 
2475 		for (i = 0; i < bucket->ub_cnt; i++)
2476 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2477 			    flags) != 0)
2478 				break;
2479 		/*
2480 		 * If we couldn't initialize the whole bucket, put the
2481 		 * rest back onto the freelist.
2482 		 */
2483 		if (i != bucket->ub_cnt) {
2484 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
2485 			    bucket->ub_cnt - i);
2486 #ifdef INVARIANTS
2487 			bzero(&bucket->ub_bucket[i],
2488 			    sizeof(void *) * (bucket->ub_cnt - i));
2489 #endif
2490 			bucket->ub_cnt = i;
2491 		}
2492 	}
2493 
2494 	if (bucket->ub_cnt == 0) {
2495 		bucket_free(zone, bucket, udata);
2496 		atomic_add_long(&zone->uz_fails, 1);
2497 		return (NULL);
2498 	}
2499 
2500 	return (bucket);
2501 }
2502 
2503 /*
2504  * Allocates a single item from a zone.
2505  *
2506  * Arguments
2507  *	zone   The zone to alloc for.
2508  *	udata  The data to be passed to the constructor.
2509  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
2510  *
2511  * Returns
2512  *	NULL if there is no memory and M_NOWAIT is set
2513  *	An item if successful
2514  */
2515 
2516 static void *
2517 zone_alloc_item(uma_zone_t zone, void *udata, int flags)
2518 {
2519 	void *item;
2520 
2521 	item = NULL;
2522 
2523 	if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1)
2524 		goto fail;
2525 	atomic_add_long(&zone->uz_allocs, 1);
2526 
2527 	/*
2528 	 * We have to call both the zone's init (not the keg's init)
2529 	 * and the zone's ctor.  This is because the item is going from
2530 	 * a keg slab directly to the user, and the user is expecting it
2531 	 * to be both zone-init'd as well as zone-ctor'd.
2532 	 */
2533 	if (zone->uz_init != NULL) {
2534 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
2535 			zone_free_item(zone, item, udata, SKIP_FINI);
2536 			goto fail;
2537 		}
2538 	}
2539 	if (zone->uz_ctor != NULL) {
2540 		if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2541 			zone_free_item(zone, item, udata, SKIP_DTOR);
2542 			goto fail;
2543 		}
2544 	}
2545 #ifdef INVARIANTS
2546 	uma_dbg_alloc(zone, NULL, item);
2547 #endif
2548 	if (flags & M_ZERO)
2549 		uma_zero_item(item, zone);
2550 
2551 	CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
2552 	    zone->uz_name, zone);
2553 
2554 	return (item);
2555 
2556 fail:
2557 	CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
2558 	    zone->uz_name, zone);
2559 	atomic_add_long(&zone->uz_fails, 1);
2560 	return (NULL);
2561 }
2562 
2563 /* See uma.h */
2564 void
2565 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2566 {
2567 	uma_cache_t cache;
2568 	uma_bucket_t bucket;
2569 	int lockfail;
2570 	int cpu;
2571 
2572 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2573 	random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
2574 
2575 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2576 	    zone->uz_name);
2577 
2578 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2579 	    ("uma_zfree_arg: called with spinlock or critical section held"));
2580 
2581         /* uma_zfree(..., NULL) does nothing, to match free(9). */
2582         if (item == NULL)
2583                 return;
2584 #ifdef DEBUG_MEMGUARD
2585 	if (is_memguard_addr(item)) {
2586 		if (zone->uz_dtor != NULL)
2587 			zone->uz_dtor(item, zone->uz_size, udata);
2588 		if (zone->uz_fini != NULL)
2589 			zone->uz_fini(item, zone->uz_size);
2590 		memguard_free(item);
2591 		return;
2592 	}
2593 #endif
2594 #ifdef INVARIANTS
2595 	if (zone->uz_flags & UMA_ZONE_MALLOC)
2596 		uma_dbg_free(zone, udata, item);
2597 	else
2598 		uma_dbg_free(zone, NULL, item);
2599 #endif
2600 	if (zone->uz_dtor != NULL)
2601 		zone->uz_dtor(item, zone->uz_size, udata);
2602 
2603 	/*
2604 	 * The race here is acceptable.  If we miss it we'll just have to wait
2605 	 * a little longer for the limits to be reset.
2606 	 */
2607 	if (zone->uz_flags & UMA_ZFLAG_FULL)
2608 		goto zfree_item;
2609 
2610 	/*
2611 	 * If possible, free to the per-CPU cache.  There are two
2612 	 * requirements for safe access to the per-CPU cache: (1) the thread
2613 	 * accessing the cache must not be preempted or yield during access,
2614 	 * and (2) the thread must not migrate CPUs without switching which
2615 	 * cache it accesses.  We rely on a critical section to prevent
2616 	 * preemption and migration.  We release the critical section in
2617 	 * order to acquire the zone mutex if we are unable to free to the
2618 	 * current cache; when we re-acquire the critical section, we must
2619 	 * detect and handle migration if it has occurred.
2620 	 */
2621 zfree_restart:
2622 	critical_enter();
2623 	cpu = curcpu;
2624 	cache = &zone->uz_cpu[cpu];
2625 
2626 zfree_start:
2627 	/*
2628 	 * Try to free into the allocbucket first to give LIFO ordering
2629 	 * for cache-hot datastructures.  Spill over into the freebucket
2630 	 * if necessary.  Alloc will swap them if one runs dry.
2631 	 */
2632 	bucket = cache->uc_allocbucket;
2633 	if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
2634 		bucket = cache->uc_freebucket;
2635 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2636 		KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2637 		    ("uma_zfree: Freeing to non free bucket index."));
2638 		bucket->ub_bucket[bucket->ub_cnt] = item;
2639 		bucket->ub_cnt++;
2640 		cache->uc_frees++;
2641 		critical_exit();
2642 		return;
2643 	}
2644 
2645 	/*
2646 	 * We must go back the zone, which requires acquiring the zone lock,
2647 	 * which in turn means we must release and re-acquire the critical
2648 	 * section.  Since the critical section is released, we may be
2649 	 * preempted or migrate.  As such, make sure not to maintain any
2650 	 * thread-local state specific to the cache from prior to releasing
2651 	 * the critical section.
2652 	 */
2653 	critical_exit();
2654 	if (zone->uz_count == 0 || bucketdisable)
2655 		goto zfree_item;
2656 
2657 	lockfail = 0;
2658 	if (ZONE_TRYLOCK(zone) == 0) {
2659 		/* Record contention to size the buckets. */
2660 		ZONE_LOCK(zone);
2661 		lockfail = 1;
2662 	}
2663 	critical_enter();
2664 	cpu = curcpu;
2665 	cache = &zone->uz_cpu[cpu];
2666 
2667 	/*
2668 	 * Since we have locked the zone we may as well send back our stats.
2669 	 */
2670 	atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
2671 	atomic_add_long(&zone->uz_frees, cache->uc_frees);
2672 	cache->uc_allocs = 0;
2673 	cache->uc_frees = 0;
2674 
2675 	bucket = cache->uc_freebucket;
2676 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2677 		ZONE_UNLOCK(zone);
2678 		goto zfree_start;
2679 	}
2680 	cache->uc_freebucket = NULL;
2681 	/* We are no longer associated with this CPU. */
2682 	critical_exit();
2683 
2684 	/* Can we throw this on the zone full list? */
2685 	if (bucket != NULL) {
2686 		CTR3(KTR_UMA,
2687 		    "uma_zfree: zone %s(%p) putting bucket %p on free list",
2688 		    zone->uz_name, zone, bucket);
2689 		/* ub_cnt is pointing to the last free item */
2690 		KASSERT(bucket->ub_cnt != 0,
2691 		    ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2692 		LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
2693 	}
2694 
2695 	/*
2696 	 * We bump the uz count when the cache size is insufficient to
2697 	 * handle the working set.
2698 	 */
2699 	if (lockfail && zone->uz_count < BUCKET_MAX)
2700 		zone->uz_count++;
2701 	ZONE_UNLOCK(zone);
2702 
2703 	bucket = bucket_alloc(zone, udata, M_NOWAIT);
2704 	CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
2705 	    zone->uz_name, zone, bucket);
2706 	if (bucket) {
2707 		critical_enter();
2708 		cpu = curcpu;
2709 		cache = &zone->uz_cpu[cpu];
2710 		if (cache->uc_freebucket == NULL) {
2711 			cache->uc_freebucket = bucket;
2712 			goto zfree_start;
2713 		}
2714 		/*
2715 		 * We lost the race, start over.  We have to drop our
2716 		 * critical section to free the bucket.
2717 		 */
2718 		critical_exit();
2719 		bucket_free(zone, bucket, udata);
2720 		goto zfree_restart;
2721 	}
2722 
2723 	/*
2724 	 * If nothing else caught this, we'll just do an internal free.
2725 	 */
2726 zfree_item:
2727 	zone_free_item(zone, item, udata, SKIP_DTOR);
2728 
2729 	return;
2730 }
2731 
2732 static void
2733 slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item)
2734 {
2735 	uint8_t freei;
2736 
2737 	mtx_assert(&keg->uk_lock, MA_OWNED);
2738 	MPASS(keg == slab->us_keg);
2739 
2740 	/* Do we need to remove from any lists? */
2741 	if (slab->us_freecount+1 == keg->uk_ipers) {
2742 		LIST_REMOVE(slab, us_link);
2743 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2744 	} else if (slab->us_freecount == 0) {
2745 		LIST_REMOVE(slab, us_link);
2746 		LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2747 	}
2748 
2749 	/* Slab management. */
2750 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
2751 	BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
2752 	slab->us_freecount++;
2753 
2754 	/* Keg statistics. */
2755 	keg->uk_free++;
2756 }
2757 
2758 static void
2759 zone_release(uma_zone_t zone, void **bucket, int cnt)
2760 {
2761 	void *item;
2762 	uma_slab_t slab;
2763 	uma_keg_t keg;
2764 	uint8_t *mem;
2765 	int clearfull;
2766 	int i;
2767 
2768 	clearfull = 0;
2769 	keg = zone_first_keg(zone);
2770 	KEG_LOCK(keg);
2771 	for (i = 0; i < cnt; i++) {
2772 		item = bucket[i];
2773 		if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
2774 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
2775 			if (zone->uz_flags & UMA_ZONE_HASH) {
2776 				slab = hash_sfind(&keg->uk_hash, mem);
2777 			} else {
2778 				mem += keg->uk_pgoff;
2779 				slab = (uma_slab_t)mem;
2780 			}
2781 		} else {
2782 			slab = vtoslab((vm_offset_t)item);
2783 			if (slab->us_keg != keg) {
2784 				KEG_UNLOCK(keg);
2785 				keg = slab->us_keg;
2786 				KEG_LOCK(keg);
2787 			}
2788 		}
2789 		slab_free_item(keg, slab, item);
2790 		if (keg->uk_flags & UMA_ZFLAG_FULL) {
2791 			if (keg->uk_pages < keg->uk_maxpages) {
2792 				keg->uk_flags &= ~UMA_ZFLAG_FULL;
2793 				clearfull = 1;
2794 			}
2795 
2796 			/*
2797 			 * We can handle one more allocation. Since we're
2798 			 * clearing ZFLAG_FULL, wake up all procs blocked
2799 			 * on pages. This should be uncommon, so keeping this
2800 			 * simple for now (rather than adding count of blocked
2801 			 * threads etc).
2802 			 */
2803 			wakeup(keg);
2804 		}
2805 	}
2806 	KEG_UNLOCK(keg);
2807 	if (clearfull) {
2808 		ZONE_LOCK(zone);
2809 		zone->uz_flags &= ~UMA_ZFLAG_FULL;
2810 		wakeup(zone);
2811 		ZONE_UNLOCK(zone);
2812 	}
2813 
2814 }
2815 
2816 /*
2817  * Frees a single item to any zone.
2818  *
2819  * Arguments:
2820  *	zone   The zone to free to
2821  *	item   The item we're freeing
2822  *	udata  User supplied data for the dtor
2823  *	skip   Skip dtors and finis
2824  */
2825 static void
2826 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
2827 {
2828 
2829 #ifdef INVARIANTS
2830 	if (skip == SKIP_NONE) {
2831 		if (zone->uz_flags & UMA_ZONE_MALLOC)
2832 			uma_dbg_free(zone, udata, item);
2833 		else
2834 			uma_dbg_free(zone, NULL, item);
2835 	}
2836 #endif
2837 	if (skip < SKIP_DTOR && zone->uz_dtor)
2838 		zone->uz_dtor(item, zone->uz_size, udata);
2839 
2840 	if (skip < SKIP_FINI && zone->uz_fini)
2841 		zone->uz_fini(item, zone->uz_size);
2842 
2843 	atomic_add_long(&zone->uz_frees, 1);
2844 	zone->uz_release(zone->uz_arg, &item, 1);
2845 }
2846 
2847 /* See uma.h */
2848 int
2849 uma_zone_set_max(uma_zone_t zone, int nitems)
2850 {
2851 	uma_keg_t keg;
2852 
2853 	keg = zone_first_keg(zone);
2854 	if (keg == NULL)
2855 		return (0);
2856 	KEG_LOCK(keg);
2857 	keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2858 	if (keg->uk_maxpages * keg->uk_ipers < nitems)
2859 		keg->uk_maxpages += keg->uk_ppera;
2860 	nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
2861 	KEG_UNLOCK(keg);
2862 
2863 	return (nitems);
2864 }
2865 
2866 /* See uma.h */
2867 int
2868 uma_zone_get_max(uma_zone_t zone)
2869 {
2870 	int nitems;
2871 	uma_keg_t keg;
2872 
2873 	keg = zone_first_keg(zone);
2874 	if (keg == NULL)
2875 		return (0);
2876 	KEG_LOCK(keg);
2877 	nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
2878 	KEG_UNLOCK(keg);
2879 
2880 	return (nitems);
2881 }
2882 
2883 /* See uma.h */
2884 void
2885 uma_zone_set_warning(uma_zone_t zone, const char *warning)
2886 {
2887 
2888 	ZONE_LOCK(zone);
2889 	zone->uz_warning = warning;
2890 	ZONE_UNLOCK(zone);
2891 }
2892 
2893 /* See uma.h */
2894 void
2895 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
2896 {
2897 
2898 	ZONE_LOCK(zone);
2899 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
2900 	ZONE_UNLOCK(zone);
2901 }
2902 
2903 /* See uma.h */
2904 int
2905 uma_zone_get_cur(uma_zone_t zone)
2906 {
2907 	int64_t nitems;
2908 	u_int i;
2909 
2910 	ZONE_LOCK(zone);
2911 	nitems = zone->uz_allocs - zone->uz_frees;
2912 	CPU_FOREACH(i) {
2913 		/*
2914 		 * See the comment in sysctl_vm_zone_stats() regarding the
2915 		 * safety of accessing the per-cpu caches. With the zone lock
2916 		 * held, it is safe, but can potentially result in stale data.
2917 		 */
2918 		nitems += zone->uz_cpu[i].uc_allocs -
2919 		    zone->uz_cpu[i].uc_frees;
2920 	}
2921 	ZONE_UNLOCK(zone);
2922 
2923 	return (nitems < 0 ? 0 : nitems);
2924 }
2925 
2926 /* See uma.h */
2927 void
2928 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2929 {
2930 	uma_keg_t keg;
2931 
2932 	keg = zone_first_keg(zone);
2933 	KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2934 	KEG_LOCK(keg);
2935 	KASSERT(keg->uk_pages == 0,
2936 	    ("uma_zone_set_init on non-empty keg"));
2937 	keg->uk_init = uminit;
2938 	KEG_UNLOCK(keg);
2939 }
2940 
2941 /* See uma.h */
2942 void
2943 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
2944 {
2945 	uma_keg_t keg;
2946 
2947 	keg = zone_first_keg(zone);
2948 	KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type"));
2949 	KEG_LOCK(keg);
2950 	KASSERT(keg->uk_pages == 0,
2951 	    ("uma_zone_set_fini on non-empty keg"));
2952 	keg->uk_fini = fini;
2953 	KEG_UNLOCK(keg);
2954 }
2955 
2956 /* See uma.h */
2957 void
2958 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
2959 {
2960 
2961 	ZONE_LOCK(zone);
2962 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
2963 	    ("uma_zone_set_zinit on non-empty keg"));
2964 	zone->uz_init = zinit;
2965 	ZONE_UNLOCK(zone);
2966 }
2967 
2968 /* See uma.h */
2969 void
2970 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
2971 {
2972 
2973 	ZONE_LOCK(zone);
2974 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
2975 	    ("uma_zone_set_zfini on non-empty keg"));
2976 	zone->uz_fini = zfini;
2977 	ZONE_UNLOCK(zone);
2978 }
2979 
2980 /* See uma.h */
2981 /* XXX uk_freef is not actually used with the zone locked */
2982 void
2983 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
2984 {
2985 	uma_keg_t keg;
2986 
2987 	keg = zone_first_keg(zone);
2988 	KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
2989 	KEG_LOCK(keg);
2990 	keg->uk_freef = freef;
2991 	KEG_UNLOCK(keg);
2992 }
2993 
2994 /* See uma.h */
2995 /* XXX uk_allocf is not actually used with the zone locked */
2996 void
2997 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
2998 {
2999 	uma_keg_t keg;
3000 
3001 	keg = zone_first_keg(zone);
3002 	KEG_LOCK(keg);
3003 	keg->uk_allocf = allocf;
3004 	KEG_UNLOCK(keg);
3005 }
3006 
3007 /* See uma.h */
3008 void
3009 uma_zone_reserve(uma_zone_t zone, int items)
3010 {
3011 	uma_keg_t keg;
3012 
3013 	keg = zone_first_keg(zone);
3014 	if (keg == NULL)
3015 		return;
3016 	KEG_LOCK(keg);
3017 	keg->uk_reserve = items;
3018 	KEG_UNLOCK(keg);
3019 
3020 	return;
3021 }
3022 
3023 /* See uma.h */
3024 int
3025 uma_zone_reserve_kva(uma_zone_t zone, int count)
3026 {
3027 	uma_keg_t keg;
3028 	vm_offset_t kva;
3029 	u_int pages;
3030 
3031 	keg = zone_first_keg(zone);
3032 	if (keg == NULL)
3033 		return (0);
3034 	pages = count / keg->uk_ipers;
3035 
3036 	if (pages * keg->uk_ipers < count)
3037 		pages++;
3038 	pages *= keg->uk_ppera;
3039 
3040 #ifdef UMA_MD_SMALL_ALLOC
3041 	if (keg->uk_ppera > 1) {
3042 #else
3043 	if (1) {
3044 #endif
3045 		kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
3046 		if (kva == 0)
3047 			return (0);
3048 	} else
3049 		kva = 0;
3050 	KEG_LOCK(keg);
3051 	keg->uk_kva = kva;
3052 	keg->uk_offset = 0;
3053 	keg->uk_maxpages = pages;
3054 #ifdef UMA_MD_SMALL_ALLOC
3055 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3056 #else
3057 	keg->uk_allocf = noobj_alloc;
3058 #endif
3059 	keg->uk_flags |= UMA_ZONE_NOFREE;
3060 	KEG_UNLOCK(keg);
3061 
3062 	return (1);
3063 }
3064 
3065 /* See uma.h */
3066 void
3067 uma_prealloc(uma_zone_t zone, int items)
3068 {
3069 	int slabs;
3070 	uma_slab_t slab;
3071 	uma_keg_t keg;
3072 
3073 	keg = zone_first_keg(zone);
3074 	if (keg == NULL)
3075 		return;
3076 	KEG_LOCK(keg);
3077 	slabs = items / keg->uk_ipers;
3078 	if (slabs * keg->uk_ipers < items)
3079 		slabs++;
3080 	while (slabs > 0) {
3081 		slab = keg_alloc_slab(keg, zone, M_WAITOK);
3082 		if (slab == NULL)
3083 			break;
3084 		MPASS(slab->us_keg == keg);
3085 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
3086 		slabs--;
3087 	}
3088 	KEG_UNLOCK(keg);
3089 }
3090 
3091 /* See uma.h */
3092 static void
3093 uma_reclaim_locked(bool kmem_danger)
3094 {
3095 
3096 	CTR0(KTR_UMA, "UMA: vm asked us to release pages!");
3097 	sx_assert(&uma_drain_lock, SA_XLOCKED);
3098 	bucket_enable();
3099 	zone_foreach(zone_drain);
3100 	if (vm_page_count_min() || kmem_danger) {
3101 		cache_drain_safe(NULL);
3102 		zone_foreach(zone_drain);
3103 	}
3104 	/*
3105 	 * Some slabs may have been freed but this zone will be visited early
3106 	 * we visit again so that we can free pages that are empty once other
3107 	 * zones are drained.  We have to do the same for buckets.
3108 	 */
3109 	zone_drain(slabzone);
3110 	bucket_zone_drain();
3111 }
3112 
3113 void
3114 uma_reclaim(void)
3115 {
3116 
3117 	sx_xlock(&uma_drain_lock);
3118 	uma_reclaim_locked(false);
3119 	sx_xunlock(&uma_drain_lock);
3120 }
3121 
3122 static int uma_reclaim_needed;
3123 
3124 void
3125 uma_reclaim_wakeup(void)
3126 {
3127 
3128 	uma_reclaim_needed = 1;
3129 	wakeup(&uma_reclaim_needed);
3130 }
3131 
3132 void
3133 uma_reclaim_worker(void *arg __unused)
3134 {
3135 
3136 	sx_xlock(&uma_drain_lock);
3137 	for (;;) {
3138 		sx_sleep(&uma_reclaim_needed, &uma_drain_lock, PVM,
3139 		    "umarcl", 0);
3140 		if (uma_reclaim_needed) {
3141 			uma_reclaim_needed = 0;
3142 			sx_xunlock(&uma_drain_lock);
3143 			EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
3144 			sx_xlock(&uma_drain_lock);
3145 			uma_reclaim_locked(true);
3146 		}
3147 	}
3148 }
3149 
3150 /* See uma.h */
3151 int
3152 uma_zone_exhausted(uma_zone_t zone)
3153 {
3154 	int full;
3155 
3156 	ZONE_LOCK(zone);
3157 	full = (zone->uz_flags & UMA_ZFLAG_FULL);
3158 	ZONE_UNLOCK(zone);
3159 	return (full);
3160 }
3161 
3162 int
3163 uma_zone_exhausted_nolock(uma_zone_t zone)
3164 {
3165 	return (zone->uz_flags & UMA_ZFLAG_FULL);
3166 }
3167 
3168 void *
3169 uma_large_malloc(vm_size_t size, int wait)
3170 {
3171 	void *mem;
3172 	uma_slab_t slab;
3173 	uint8_t flags;
3174 
3175 	slab = zone_alloc_item(slabzone, NULL, wait);
3176 	if (slab == NULL)
3177 		return (NULL);
3178 	mem = page_alloc(NULL, size, &flags, wait);
3179 	if (mem) {
3180 		vsetslab((vm_offset_t)mem, slab);
3181 		slab->us_data = mem;
3182 		slab->us_flags = flags | UMA_SLAB_MALLOC;
3183 		slab->us_size = size;
3184 	} else {
3185 		zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3186 	}
3187 
3188 	return (mem);
3189 }
3190 
3191 void
3192 uma_large_free(uma_slab_t slab)
3193 {
3194 
3195 	page_free(slab->us_data, slab->us_size, slab->us_flags);
3196 	zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3197 }
3198 
3199 static void
3200 uma_zero_item(void *item, uma_zone_t zone)
3201 {
3202 	int i;
3203 
3204 	if (zone->uz_flags & UMA_ZONE_PCPU) {
3205 		CPU_FOREACH(i)
3206 			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
3207 	} else
3208 		bzero(item, zone->uz_size);
3209 }
3210 
3211 void
3212 uma_print_stats(void)
3213 {
3214 	zone_foreach(uma_print_zone);
3215 }
3216 
3217 static void
3218 slab_print(uma_slab_t slab)
3219 {
3220 	printf("slab: keg %p, data %p, freecount %d\n",
3221 		slab->us_keg, slab->us_data, slab->us_freecount);
3222 }
3223 
3224 static void
3225 cache_print(uma_cache_t cache)
3226 {
3227 	printf("alloc: %p(%d), free: %p(%d)\n",
3228 		cache->uc_allocbucket,
3229 		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3230 		cache->uc_freebucket,
3231 		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3232 }
3233 
3234 static void
3235 uma_print_keg(uma_keg_t keg)
3236 {
3237 	uma_slab_t slab;
3238 
3239 	printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3240 	    "out %d free %d limit %d\n",
3241 	    keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3242 	    keg->uk_ipers, keg->uk_ppera,
3243 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
3244 	    keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
3245 	printf("Part slabs:\n");
3246 	LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
3247 		slab_print(slab);
3248 	printf("Free slabs:\n");
3249 	LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
3250 		slab_print(slab);
3251 	printf("Full slabs:\n");
3252 	LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
3253 		slab_print(slab);
3254 }
3255 
3256 void
3257 uma_print_zone(uma_zone_t zone)
3258 {
3259 	uma_cache_t cache;
3260 	uma_klink_t kl;
3261 	int i;
3262 
3263 	printf("zone: %s(%p) size %d flags %#x\n",
3264 	    zone->uz_name, zone, zone->uz_size, zone->uz_flags);
3265 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
3266 		uma_print_keg(kl->kl_keg);
3267 	CPU_FOREACH(i) {
3268 		cache = &zone->uz_cpu[i];
3269 		printf("CPU %d Cache:\n", i);
3270 		cache_print(cache);
3271 	}
3272 }
3273 
3274 #ifdef DDB
3275 /*
3276  * Generate statistics across both the zone and its per-cpu cache's.  Return
3277  * desired statistics if the pointer is non-NULL for that statistic.
3278  *
3279  * Note: does not update the zone statistics, as it can't safely clear the
3280  * per-CPU cache statistic.
3281  *
3282  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
3283  * safe from off-CPU; we should modify the caches to track this information
3284  * directly so that we don't have to.
3285  */
3286 static void
3287 uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp,
3288     uint64_t *freesp, uint64_t *sleepsp)
3289 {
3290 	uma_cache_t cache;
3291 	uint64_t allocs, frees, sleeps;
3292 	int cachefree, cpu;
3293 
3294 	allocs = frees = sleeps = 0;
3295 	cachefree = 0;
3296 	CPU_FOREACH(cpu) {
3297 		cache = &z->uz_cpu[cpu];
3298 		if (cache->uc_allocbucket != NULL)
3299 			cachefree += cache->uc_allocbucket->ub_cnt;
3300 		if (cache->uc_freebucket != NULL)
3301 			cachefree += cache->uc_freebucket->ub_cnt;
3302 		allocs += cache->uc_allocs;
3303 		frees += cache->uc_frees;
3304 	}
3305 	allocs += z->uz_allocs;
3306 	frees += z->uz_frees;
3307 	sleeps += z->uz_sleeps;
3308 	if (cachefreep != NULL)
3309 		*cachefreep = cachefree;
3310 	if (allocsp != NULL)
3311 		*allocsp = allocs;
3312 	if (freesp != NULL)
3313 		*freesp = frees;
3314 	if (sleepsp != NULL)
3315 		*sleepsp = sleeps;
3316 }
3317 #endif /* DDB */
3318 
3319 static int
3320 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
3321 {
3322 	uma_keg_t kz;
3323 	uma_zone_t z;
3324 	int count;
3325 
3326 	count = 0;
3327 	rw_rlock(&uma_rwlock);
3328 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3329 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3330 			count++;
3331 	}
3332 	rw_runlock(&uma_rwlock);
3333 	return (sysctl_handle_int(oidp, &count, 0, req));
3334 }
3335 
3336 static int
3337 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
3338 {
3339 	struct uma_stream_header ush;
3340 	struct uma_type_header uth;
3341 	struct uma_percpu_stat ups;
3342 	uma_bucket_t bucket;
3343 	struct sbuf sbuf;
3344 	uma_cache_t cache;
3345 	uma_klink_t kl;
3346 	uma_keg_t kz;
3347 	uma_zone_t z;
3348 	uma_keg_t k;
3349 	int count, error, i;
3350 
3351 	error = sysctl_wire_old_buffer(req, 0);
3352 	if (error != 0)
3353 		return (error);
3354 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
3355 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
3356 
3357 	count = 0;
3358 	rw_rlock(&uma_rwlock);
3359 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3360 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3361 			count++;
3362 	}
3363 
3364 	/*
3365 	 * Insert stream header.
3366 	 */
3367 	bzero(&ush, sizeof(ush));
3368 	ush.ush_version = UMA_STREAM_VERSION;
3369 	ush.ush_maxcpus = (mp_maxid + 1);
3370 	ush.ush_count = count;
3371 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
3372 
3373 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3374 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3375 			bzero(&uth, sizeof(uth));
3376 			ZONE_LOCK(z);
3377 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
3378 			uth.uth_align = kz->uk_align;
3379 			uth.uth_size = kz->uk_size;
3380 			uth.uth_rsize = kz->uk_rsize;
3381 			LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
3382 				k = kl->kl_keg;
3383 				uth.uth_maxpages += k->uk_maxpages;
3384 				uth.uth_pages += k->uk_pages;
3385 				uth.uth_keg_free += k->uk_free;
3386 				uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
3387 				    * k->uk_ipers;
3388 			}
3389 
3390 			/*
3391 			 * A zone is secondary is it is not the first entry
3392 			 * on the keg's zone list.
3393 			 */
3394 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3395 			    (LIST_FIRST(&kz->uk_zones) != z))
3396 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3397 
3398 			LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3399 				uth.uth_zone_free += bucket->ub_cnt;
3400 			uth.uth_allocs = z->uz_allocs;
3401 			uth.uth_frees = z->uz_frees;
3402 			uth.uth_fails = z->uz_fails;
3403 			uth.uth_sleeps = z->uz_sleeps;
3404 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
3405 			/*
3406 			 * While it is not normally safe to access the cache
3407 			 * bucket pointers while not on the CPU that owns the
3408 			 * cache, we only allow the pointers to be exchanged
3409 			 * without the zone lock held, not invalidated, so
3410 			 * accept the possible race associated with bucket
3411 			 * exchange during monitoring.
3412 			 */
3413 			for (i = 0; i < (mp_maxid + 1); i++) {
3414 				bzero(&ups, sizeof(ups));
3415 				if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
3416 					goto skip;
3417 				if (CPU_ABSENT(i))
3418 					goto skip;
3419 				cache = &z->uz_cpu[i];
3420 				if (cache->uc_allocbucket != NULL)
3421 					ups.ups_cache_free +=
3422 					    cache->uc_allocbucket->ub_cnt;
3423 				if (cache->uc_freebucket != NULL)
3424 					ups.ups_cache_free +=
3425 					    cache->uc_freebucket->ub_cnt;
3426 				ups.ups_allocs = cache->uc_allocs;
3427 				ups.ups_frees = cache->uc_frees;
3428 skip:
3429 				(void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
3430 			}
3431 			ZONE_UNLOCK(z);
3432 		}
3433 	}
3434 	rw_runlock(&uma_rwlock);
3435 	error = sbuf_finish(&sbuf);
3436 	sbuf_delete(&sbuf);
3437 	return (error);
3438 }
3439 
3440 int
3441 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
3442 {
3443 	uma_zone_t zone = *(uma_zone_t *)arg1;
3444 	int error, max;
3445 
3446 	max = uma_zone_get_max(zone);
3447 	error = sysctl_handle_int(oidp, &max, 0, req);
3448 	if (error || !req->newptr)
3449 		return (error);
3450 
3451 	uma_zone_set_max(zone, max);
3452 
3453 	return (0);
3454 }
3455 
3456 int
3457 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
3458 {
3459 	uma_zone_t zone = *(uma_zone_t *)arg1;
3460 	int cur;
3461 
3462 	cur = uma_zone_get_cur(zone);
3463 	return (sysctl_handle_int(oidp, &cur, 0, req));
3464 }
3465 
3466 #ifdef INVARIANTS
3467 static uma_slab_t
3468 uma_dbg_getslab(uma_zone_t zone, void *item)
3469 {
3470 	uma_slab_t slab;
3471 	uma_keg_t keg;
3472 	uint8_t *mem;
3473 
3474 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
3475 	if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
3476 		slab = vtoslab((vm_offset_t)mem);
3477 	} else {
3478 		/*
3479 		 * It is safe to return the slab here even though the
3480 		 * zone is unlocked because the item's allocation state
3481 		 * essentially holds a reference.
3482 		 */
3483 		ZONE_LOCK(zone);
3484 		keg = LIST_FIRST(&zone->uz_kegs)->kl_keg;
3485 		if (keg->uk_flags & UMA_ZONE_HASH)
3486 			slab = hash_sfind(&keg->uk_hash, mem);
3487 		else
3488 			slab = (uma_slab_t)(mem + keg->uk_pgoff);
3489 		ZONE_UNLOCK(zone);
3490 	}
3491 
3492 	return (slab);
3493 }
3494 
3495 /*
3496  * Set up the slab's freei data such that uma_dbg_free can function.
3497  *
3498  */
3499 static void
3500 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
3501 {
3502 	uma_keg_t keg;
3503 	int freei;
3504 
3505 	if (zone_first_keg(zone) == NULL)
3506 		return;
3507 	if (slab == NULL) {
3508 		slab = uma_dbg_getslab(zone, item);
3509 		if (slab == NULL)
3510 			panic("uma: item %p did not belong to zone %s\n",
3511 			    item, zone->uz_name);
3512 	}
3513 	keg = slab->us_keg;
3514 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3515 
3516 	if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
3517 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
3518 		    item, zone, zone->uz_name, slab, freei);
3519 	BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
3520 
3521 	return;
3522 }
3523 
3524 /*
3525  * Verifies freed addresses.  Checks for alignment, valid slab membership
3526  * and duplicate frees.
3527  *
3528  */
3529 static void
3530 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
3531 {
3532 	uma_keg_t keg;
3533 	int freei;
3534 
3535 	if (zone_first_keg(zone) == NULL)
3536 		return;
3537 	if (slab == NULL) {
3538 		slab = uma_dbg_getslab(zone, item);
3539 		if (slab == NULL)
3540 			panic("uma: Freed item %p did not belong to zone %s\n",
3541 			    item, zone->uz_name);
3542 	}
3543 	keg = slab->us_keg;
3544 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3545 
3546 	if (freei >= keg->uk_ipers)
3547 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
3548 		    item, zone, zone->uz_name, slab, freei);
3549 
3550 	if (((freei * keg->uk_rsize) + slab->us_data) != item)
3551 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
3552 		    item, zone, zone->uz_name, slab, freei);
3553 
3554 	if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
3555 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
3556 		    item, zone, zone->uz_name, slab, freei);
3557 
3558 	BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
3559 }
3560 #endif /* INVARIANTS */
3561 
3562 #ifdef DDB
3563 DB_SHOW_COMMAND(uma, db_show_uma)
3564 {
3565 	uint64_t allocs, frees, sleeps;
3566 	uma_bucket_t bucket;
3567 	uma_keg_t kz;
3568 	uma_zone_t z;
3569 	int cachefree;
3570 
3571 	db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used",
3572 	    "Free", "Requests", "Sleeps", "Bucket");
3573 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3574 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3575 			if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
3576 				allocs = z->uz_allocs;
3577 				frees = z->uz_frees;
3578 				sleeps = z->uz_sleeps;
3579 				cachefree = 0;
3580 			} else
3581 				uma_zone_sumstat(z, &cachefree, &allocs,
3582 				    &frees, &sleeps);
3583 			if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
3584 			    (LIST_FIRST(&kz->uk_zones) != z)))
3585 				cachefree += kz->uk_free;
3586 			LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3587 				cachefree += bucket->ub_cnt;
3588 			db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n",
3589 			    z->uz_name, (uintmax_t)kz->uk_size,
3590 			    (intmax_t)(allocs - frees), cachefree,
3591 			    (uintmax_t)allocs, sleeps, z->uz_count);
3592 			if (db_pager_quit)
3593 				return;
3594 		}
3595 	}
3596 }
3597 
3598 DB_SHOW_COMMAND(umacache, db_show_umacache)
3599 {
3600 	uint64_t allocs, frees;
3601 	uma_bucket_t bucket;
3602 	uma_zone_t z;
3603 	int cachefree;
3604 
3605 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
3606 	    "Requests", "Bucket");
3607 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
3608 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL);
3609 		LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3610 			cachefree += bucket->ub_cnt;
3611 		db_printf("%18s %8ju %8jd %8d %12ju %8u\n",
3612 		    z->uz_name, (uintmax_t)z->uz_size,
3613 		    (intmax_t)(allocs - frees), cachefree,
3614 		    (uintmax_t)allocs, z->uz_count);
3615 		if (db_pager_quit)
3616 			return;
3617 	}
3618 }
3619 #endif	/* DDB */
3620