xref: /freebsd/sys/kern/subr_vmem.c (revision 85732ac8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
5  * Copyright (c) 2013 EMC Corp.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * From:
32  *	$NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $
33  *	$NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $
34  */
35 
36 /*
37  * reference:
38  * -	Magazines and Vmem: Extending the Slab Allocator
39  *	to Many CPUs and Arbitrary Resources
40  *	http://www.usenix.org/event/usenix01/bonwick.html
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include "opt_ddb.h"
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/queue.h>
52 #include <sys/callout.h>
53 #include <sys/hash.h>
54 #include <sys/lock.h>
55 #include <sys/malloc.h>
56 #include <sys/mutex.h>
57 #include <sys/smp.h>
58 #include <sys/condvar.h>
59 #include <sys/sysctl.h>
60 #include <sys/taskqueue.h>
61 #include <sys/vmem.h>
62 #include <sys/vmmeter.h>
63 
64 #include "opt_vm.h"
65 
66 #include <vm/uma.h>
67 #include <vm/vm.h>
68 #include <vm/pmap.h>
69 #include <vm/vm_map.h>
70 #include <vm/vm_object.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_extern.h>
73 #include <vm/vm_param.h>
74 #include <vm/vm_page.h>
75 #include <vm/vm_pageout.h>
76 #include <vm/vm_phys.h>
77 #include <vm/vm_pagequeue.h>
78 #include <vm/uma_int.h>
79 
80 int	vmem_startup_count(void);
81 
82 #define	VMEM_OPTORDER		5
83 #define	VMEM_OPTVALUE		(1 << VMEM_OPTORDER)
84 #define	VMEM_MAXORDER						\
85     (VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER)
86 
87 #define	VMEM_HASHSIZE_MIN	16
88 #define	VMEM_HASHSIZE_MAX	131072
89 
90 #define	VMEM_QCACHE_IDX_MAX	16
91 
92 #define	VMEM_FITMASK	(M_BESTFIT | M_FIRSTFIT)
93 
94 #define	VMEM_FLAGS						\
95     (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | M_BESTFIT | M_FIRSTFIT)
96 
97 #define	BT_FLAGS	(M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM)
98 
99 #define	QC_NAME_MAX	16
100 
101 /*
102  * Data structures private to vmem.
103  */
104 MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures");
105 
106 typedef struct vmem_btag bt_t;
107 
108 TAILQ_HEAD(vmem_seglist, vmem_btag);
109 LIST_HEAD(vmem_freelist, vmem_btag);
110 LIST_HEAD(vmem_hashlist, vmem_btag);
111 
112 struct qcache {
113 	uma_zone_t	qc_cache;
114 	vmem_t 		*qc_vmem;
115 	vmem_size_t	qc_size;
116 	char		qc_name[QC_NAME_MAX];
117 };
118 typedef struct qcache qcache_t;
119 #define	QC_POOL_TO_QCACHE(pool)	((qcache_t *)(pool->pr_qcache))
120 
121 #define	VMEM_NAME_MAX	16
122 
123 /* vmem arena */
124 struct vmem {
125 	struct mtx_padalign	vm_lock;
126 	struct cv		vm_cv;
127 	char			vm_name[VMEM_NAME_MAX+1];
128 	LIST_ENTRY(vmem)	vm_alllist;
129 	struct vmem_hashlist	vm_hash0[VMEM_HASHSIZE_MIN];
130 	struct vmem_freelist	vm_freelist[VMEM_MAXORDER];
131 	struct vmem_seglist	vm_seglist;
132 	struct vmem_hashlist	*vm_hashlist;
133 	vmem_size_t		vm_hashsize;
134 
135 	/* Constant after init */
136 	vmem_size_t		vm_qcache_max;
137 	vmem_size_t		vm_quantum_mask;
138 	vmem_size_t		vm_import_quantum;
139 	int			vm_quantum_shift;
140 
141 	/* Written on alloc/free */
142 	LIST_HEAD(, vmem_btag)	vm_freetags;
143 	int			vm_nfreetags;
144 	int			vm_nbusytag;
145 	vmem_size_t		vm_inuse;
146 	vmem_size_t		vm_size;
147 	vmem_size_t		vm_limit;
148 
149 	/* Used on import. */
150 	vmem_import_t		*vm_importfn;
151 	vmem_release_t		*vm_releasefn;
152 	void			*vm_arg;
153 
154 	/* Space exhaustion callback. */
155 	vmem_reclaim_t		*vm_reclaimfn;
156 
157 	/* quantum cache */
158 	qcache_t		vm_qcache[VMEM_QCACHE_IDX_MAX];
159 };
160 
161 /* boundary tag */
162 struct vmem_btag {
163 	TAILQ_ENTRY(vmem_btag) bt_seglist;
164 	union {
165 		LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
166 		LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
167 	} bt_u;
168 #define	bt_hashlist	bt_u.u_hashlist
169 #define	bt_freelist	bt_u.u_freelist
170 	vmem_addr_t	bt_start;
171 	vmem_size_t	bt_size;
172 	int		bt_type;
173 };
174 
175 #define	BT_TYPE_SPAN		1	/* Allocated from importfn */
176 #define	BT_TYPE_SPAN_STATIC	2	/* vmem_add() or create. */
177 #define	BT_TYPE_FREE		3	/* Available space. */
178 #define	BT_TYPE_BUSY		4	/* Used space. */
179 #define	BT_ISSPAN_P(bt)	((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
180 
181 #define	BT_END(bt)	((bt)->bt_start + (bt)->bt_size - 1)
182 
183 #if defined(DIAGNOSTIC)
184 static int enable_vmem_check = 1;
185 SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN,
186     &enable_vmem_check, 0, "Enable vmem check");
187 static void vmem_check(vmem_t *);
188 #endif
189 
190 static struct callout	vmem_periodic_ch;
191 static int		vmem_periodic_interval;
192 static struct task	vmem_periodic_wk;
193 
194 static struct mtx_padalign __exclusive_cache_line vmem_list_lock;
195 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
196 static uma_zone_t vmem_zone;
197 
198 /* ---- misc */
199 #define	VMEM_CONDVAR_INIT(vm, wchan)	cv_init(&vm->vm_cv, wchan)
200 #define	VMEM_CONDVAR_DESTROY(vm)	cv_destroy(&vm->vm_cv)
201 #define	VMEM_CONDVAR_WAIT(vm)		cv_wait(&vm->vm_cv, &vm->vm_lock)
202 #define	VMEM_CONDVAR_BROADCAST(vm)	cv_broadcast(&vm->vm_cv)
203 
204 
205 #define	VMEM_LOCK(vm)		mtx_lock(&vm->vm_lock)
206 #define	VMEM_TRYLOCK(vm)	mtx_trylock(&vm->vm_lock)
207 #define	VMEM_UNLOCK(vm)		mtx_unlock(&vm->vm_lock)
208 #define	VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF)
209 #define	VMEM_LOCK_DESTROY(vm)	mtx_destroy(&vm->vm_lock)
210 #define	VMEM_ASSERT_LOCKED(vm)	mtx_assert(&vm->vm_lock, MA_OWNED);
211 
212 #define	VMEM_ALIGNUP(addr, align)	(-(-(addr) & -(align)))
213 
214 #define	VMEM_CROSS_P(addr1, addr2, boundary) \
215 	((((addr1) ^ (addr2)) & -(boundary)) != 0)
216 
217 #define	ORDER2SIZE(order)	((order) < VMEM_OPTVALUE ? ((order) + 1) : \
218     (vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1)))
219 #define	SIZE2ORDER(size)	((size) <= VMEM_OPTVALUE ? ((size) - 1) : \
220     (flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2)))
221 
222 /*
223  * Maximum number of boundary tags that may be required to satisfy an
224  * allocation.  Two may be required to import.  Another two may be
225  * required to clip edges.
226  */
227 #define	BT_MAXALLOC	4
228 
229 /*
230  * Max free limits the number of locally cached boundary tags.  We
231  * just want to avoid hitting the zone allocator for every call.
232  */
233 #define BT_MAXFREE	(BT_MAXALLOC * 8)
234 
235 /* Allocator for boundary tags. */
236 static uma_zone_t vmem_bt_zone;
237 
238 /* boot time arena storage. */
239 static struct vmem kernel_arena_storage;
240 static struct vmem buffer_arena_storage;
241 static struct vmem transient_arena_storage;
242 /* kernel and kmem arenas are aliased for backwards KPI compat. */
243 vmem_t *kernel_arena = &kernel_arena_storage;
244 vmem_t *kmem_arena = &kernel_arena_storage;
245 vmem_t *buffer_arena = &buffer_arena_storage;
246 vmem_t *transient_arena = &transient_arena_storage;
247 
248 #ifdef DEBUG_MEMGUARD
249 static struct vmem memguard_arena_storage;
250 vmem_t *memguard_arena = &memguard_arena_storage;
251 #endif
252 
253 /*
254  * Fill the vmem's boundary tag cache.  We guarantee that boundary tag
255  * allocation will not fail once bt_fill() passes.  To do so we cache
256  * at least the maximum possible tag allocations in the arena.
257  */
258 static int
259 bt_fill(vmem_t *vm, int flags)
260 {
261 	bt_t *bt;
262 
263 	VMEM_ASSERT_LOCKED(vm);
264 
265 	/*
266 	 * Only allow the kernel arena and arenas derived from kernel arena to
267 	 * dip into reserve tags.  They are where new tags come from.
268 	 */
269 	flags &= BT_FLAGS;
270 	if (vm != kernel_arena && vm->vm_arg != kernel_arena)
271 		flags &= ~M_USE_RESERVE;
272 
273 	/*
274 	 * Loop until we meet the reserve.  To minimize the lock shuffle
275 	 * and prevent simultaneous fills we first try a NOWAIT regardless
276 	 * of the caller's flags.  Specify M_NOVM so we don't recurse while
277 	 * holding a vmem lock.
278 	 */
279 	while (vm->vm_nfreetags < BT_MAXALLOC) {
280 		bt = uma_zalloc(vmem_bt_zone,
281 		    (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM);
282 		if (bt == NULL) {
283 			VMEM_UNLOCK(vm);
284 			bt = uma_zalloc(vmem_bt_zone, flags);
285 			VMEM_LOCK(vm);
286 			if (bt == NULL)
287 				break;
288 		}
289 		LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
290 		vm->vm_nfreetags++;
291 	}
292 
293 	if (vm->vm_nfreetags < BT_MAXALLOC)
294 		return ENOMEM;
295 
296 	return 0;
297 }
298 
299 /*
300  * Pop a tag off of the freetag stack.
301  */
302 static bt_t *
303 bt_alloc(vmem_t *vm)
304 {
305 	bt_t *bt;
306 
307 	VMEM_ASSERT_LOCKED(vm);
308 	bt = LIST_FIRST(&vm->vm_freetags);
309 	MPASS(bt != NULL);
310 	LIST_REMOVE(bt, bt_freelist);
311 	vm->vm_nfreetags--;
312 
313 	return bt;
314 }
315 
316 /*
317  * Trim the per-vmem free list.  Returns with the lock released to
318  * avoid allocator recursions.
319  */
320 static void
321 bt_freetrim(vmem_t *vm, int freelimit)
322 {
323 	LIST_HEAD(, vmem_btag) freetags;
324 	bt_t *bt;
325 
326 	LIST_INIT(&freetags);
327 	VMEM_ASSERT_LOCKED(vm);
328 	while (vm->vm_nfreetags > freelimit) {
329 		bt = LIST_FIRST(&vm->vm_freetags);
330 		LIST_REMOVE(bt, bt_freelist);
331 		vm->vm_nfreetags--;
332 		LIST_INSERT_HEAD(&freetags, bt, bt_freelist);
333 	}
334 	VMEM_UNLOCK(vm);
335 	while ((bt = LIST_FIRST(&freetags)) != NULL) {
336 		LIST_REMOVE(bt, bt_freelist);
337 		uma_zfree(vmem_bt_zone, bt);
338 	}
339 }
340 
341 static inline void
342 bt_free(vmem_t *vm, bt_t *bt)
343 {
344 
345 	VMEM_ASSERT_LOCKED(vm);
346 	MPASS(LIST_FIRST(&vm->vm_freetags) != bt);
347 	LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
348 	vm->vm_nfreetags++;
349 }
350 
351 /*
352  * freelist[0] ... [1, 1]
353  * freelist[1] ... [2, 2]
354  *  :
355  * freelist[29] ... [30, 30]
356  * freelist[30] ... [31, 31]
357  * freelist[31] ... [32, 63]
358  * freelist[33] ... [64, 127]
359  *  :
360  * freelist[n] ... [(1 << (n - 26)), (1 << (n - 25)) - 1]
361  *  :
362  */
363 
364 static struct vmem_freelist *
365 bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
366 {
367 	const vmem_size_t qsize = size >> vm->vm_quantum_shift;
368 	const int idx = SIZE2ORDER(qsize);
369 
370 	MPASS(size != 0 && qsize != 0);
371 	MPASS((size & vm->vm_quantum_mask) == 0);
372 	MPASS(idx >= 0);
373 	MPASS(idx < VMEM_MAXORDER);
374 
375 	return &vm->vm_freelist[idx];
376 }
377 
378 /*
379  * bt_freehead_toalloc: return the freelist for the given size and allocation
380  * strategy.
381  *
382  * For M_FIRSTFIT, return the list in which any blocks are large enough
383  * for the requested size.  otherwise, return the list which can have blocks
384  * large enough for the requested size.
385  */
386 static struct vmem_freelist *
387 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat)
388 {
389 	const vmem_size_t qsize = size >> vm->vm_quantum_shift;
390 	int idx = SIZE2ORDER(qsize);
391 
392 	MPASS(size != 0 && qsize != 0);
393 	MPASS((size & vm->vm_quantum_mask) == 0);
394 
395 	if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) {
396 		idx++;
397 		/* check too large request? */
398 	}
399 	MPASS(idx >= 0);
400 	MPASS(idx < VMEM_MAXORDER);
401 
402 	return &vm->vm_freelist[idx];
403 }
404 
405 /* ---- boundary tag hash */
406 
407 static struct vmem_hashlist *
408 bt_hashhead(vmem_t *vm, vmem_addr_t addr)
409 {
410 	struct vmem_hashlist *list;
411 	unsigned int hash;
412 
413 	hash = hash32_buf(&addr, sizeof(addr), 0);
414 	list = &vm->vm_hashlist[hash % vm->vm_hashsize];
415 
416 	return list;
417 }
418 
419 static bt_t *
420 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
421 {
422 	struct vmem_hashlist *list;
423 	bt_t *bt;
424 
425 	VMEM_ASSERT_LOCKED(vm);
426 	list = bt_hashhead(vm, addr);
427 	LIST_FOREACH(bt, list, bt_hashlist) {
428 		if (bt->bt_start == addr) {
429 			break;
430 		}
431 	}
432 
433 	return bt;
434 }
435 
436 static void
437 bt_rembusy(vmem_t *vm, bt_t *bt)
438 {
439 
440 	VMEM_ASSERT_LOCKED(vm);
441 	MPASS(vm->vm_nbusytag > 0);
442 	vm->vm_inuse -= bt->bt_size;
443 	vm->vm_nbusytag--;
444 	LIST_REMOVE(bt, bt_hashlist);
445 }
446 
447 static void
448 bt_insbusy(vmem_t *vm, bt_t *bt)
449 {
450 	struct vmem_hashlist *list;
451 
452 	VMEM_ASSERT_LOCKED(vm);
453 	MPASS(bt->bt_type == BT_TYPE_BUSY);
454 
455 	list = bt_hashhead(vm, bt->bt_start);
456 	LIST_INSERT_HEAD(list, bt, bt_hashlist);
457 	vm->vm_nbusytag++;
458 	vm->vm_inuse += bt->bt_size;
459 }
460 
461 /* ---- boundary tag list */
462 
463 static void
464 bt_remseg(vmem_t *vm, bt_t *bt)
465 {
466 
467 	TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
468 	bt_free(vm, bt);
469 }
470 
471 static void
472 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
473 {
474 
475 	TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
476 }
477 
478 static void
479 bt_insseg_tail(vmem_t *vm, bt_t *bt)
480 {
481 
482 	TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
483 }
484 
485 static void
486 bt_remfree(vmem_t *vm, bt_t *bt)
487 {
488 
489 	MPASS(bt->bt_type == BT_TYPE_FREE);
490 
491 	LIST_REMOVE(bt, bt_freelist);
492 }
493 
494 static void
495 bt_insfree(vmem_t *vm, bt_t *bt)
496 {
497 	struct vmem_freelist *list;
498 
499 	list = bt_freehead_tofree(vm, bt->bt_size);
500 	LIST_INSERT_HEAD(list, bt, bt_freelist);
501 }
502 
503 /* ---- vmem internal functions */
504 
505 /*
506  * Import from the arena into the quantum cache in UMA.
507  *
508  * We use VMEM_ADDR_QCACHE_MIN instead of 0: uma_zalloc() returns 0 to indicate
509  * failure, so UMA can't be used to cache a resource with value 0.
510  */
511 static int
512 qc_import(void *arg, void **store, int cnt, int domain, int flags)
513 {
514 	qcache_t *qc;
515 	vmem_addr_t addr;
516 	int i;
517 
518 	KASSERT((flags & M_WAITOK) == 0, ("blocking allocation"));
519 
520 	qc = arg;
521 	for (i = 0; i < cnt; i++) {
522 		if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0,
523 		    VMEM_ADDR_QCACHE_MIN, VMEM_ADDR_MAX, flags, &addr) != 0)
524 			break;
525 		store[i] = (void *)addr;
526 	}
527 	return (i);
528 }
529 
530 /*
531  * Release memory from the UMA cache to the arena.
532  */
533 static void
534 qc_release(void *arg, void **store, int cnt)
535 {
536 	qcache_t *qc;
537 	int i;
538 
539 	qc = arg;
540 	for (i = 0; i < cnt; i++)
541 		vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size);
542 }
543 
544 static void
545 qc_init(vmem_t *vm, vmem_size_t qcache_max)
546 {
547 	qcache_t *qc;
548 	vmem_size_t size;
549 	int qcache_idx_max;
550 	int i;
551 
552 	MPASS((qcache_max & vm->vm_quantum_mask) == 0);
553 	qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift,
554 	    VMEM_QCACHE_IDX_MAX);
555 	vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift;
556 	for (i = 0; i < qcache_idx_max; i++) {
557 		qc = &vm->vm_qcache[i];
558 		size = (i + 1) << vm->vm_quantum_shift;
559 		snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
560 		    vm->vm_name, size);
561 		qc->qc_vmem = vm;
562 		qc->qc_size = size;
563 		qc->qc_cache = uma_zcache_create(qc->qc_name, size,
564 		    NULL, NULL, NULL, NULL, qc_import, qc_release, qc,
565 		    UMA_ZONE_VM);
566 		MPASS(qc->qc_cache);
567 	}
568 }
569 
570 static void
571 qc_destroy(vmem_t *vm)
572 {
573 	int qcache_idx_max;
574 	int i;
575 
576 	qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
577 	for (i = 0; i < qcache_idx_max; i++)
578 		uma_zdestroy(vm->vm_qcache[i].qc_cache);
579 }
580 
581 static void
582 qc_drain(vmem_t *vm)
583 {
584 	int qcache_idx_max;
585 	int i;
586 
587 	qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
588 	for (i = 0; i < qcache_idx_max; i++)
589 		zone_drain(vm->vm_qcache[i].qc_cache);
590 }
591 
592 #ifndef UMA_MD_SMALL_ALLOC
593 
594 static struct mtx_padalign __exclusive_cache_line vmem_bt_lock;
595 
596 /*
597  * vmem_bt_alloc:  Allocate a new page of boundary tags.
598  *
599  * On architectures with uma_small_alloc there is no recursion; no address
600  * space need be allocated to allocate boundary tags.  For the others, we
601  * must handle recursion.  Boundary tags are necessary to allocate new
602  * boundary tags.
603  *
604  * UMA guarantees that enough tags are held in reserve to allocate a new
605  * page of kva.  We dip into this reserve by specifying M_USE_RESERVE only
606  * when allocating the page to hold new boundary tags.  In this way the
607  * reserve is automatically filled by the allocation that uses the reserve.
608  *
609  * We still have to guarantee that the new tags are allocated atomically since
610  * many threads may try concurrently.  The bt_lock provides this guarantee.
611  * We convert WAITOK allocations to NOWAIT and then handle the blocking here
612  * on failure.  It's ok to return NULL for a WAITOK allocation as UMA will
613  * loop again after checking to see if we lost the race to allocate.
614  *
615  * There is a small race between vmem_bt_alloc() returning the page and the
616  * zone lock being acquired to add the page to the zone.  For WAITOK
617  * allocations we just pause briefly.  NOWAIT may experience a transient
618  * failure.  To alleviate this we permit a small number of simultaneous
619  * fills to proceed concurrently so NOWAIT is less likely to fail unless
620  * we are really out of KVA.
621  */
622 static void *
623 vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
624     int wait)
625 {
626 	vmem_addr_t addr;
627 
628 	*pflag = UMA_SLAB_KERNEL;
629 
630 	/*
631 	 * Single thread boundary tag allocation so that the address space
632 	 * and memory are added in one atomic operation.
633 	 */
634 	mtx_lock(&vmem_bt_lock);
635 	if (vmem_xalloc(vm_dom[domain].vmd_kernel_arena, bytes, 0, 0, 0,
636 	    VMEM_ADDR_MIN, VMEM_ADDR_MAX,
637 	    M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, &addr) == 0) {
638 		if (kmem_back_domain(domain, kernel_object, addr, bytes,
639 		    M_NOWAIT | M_USE_RESERVE) == 0) {
640 			mtx_unlock(&vmem_bt_lock);
641 			return ((void *)addr);
642 		}
643 		vmem_xfree(vm_dom[domain].vmd_kernel_arena, addr, bytes);
644 		mtx_unlock(&vmem_bt_lock);
645 		/*
646 		 * Out of memory, not address space.  This may not even be
647 		 * possible due to M_USE_RESERVE page allocation.
648 		 */
649 		if (wait & M_WAITOK)
650 			vm_wait_domain(domain);
651 		return (NULL);
652 	}
653 	mtx_unlock(&vmem_bt_lock);
654 	/*
655 	 * We're either out of address space or lost a fill race.
656 	 */
657 	if (wait & M_WAITOK)
658 		pause("btalloc", 1);
659 
660 	return (NULL);
661 }
662 
663 /*
664  * How many pages do we need to startup_alloc.
665  */
666 int
667 vmem_startup_count(void)
668 {
669 
670 	return (howmany(BT_MAXALLOC,
671 	    UMA_SLAB_SPACE / sizeof(struct vmem_btag)));
672 }
673 #endif
674 
675 void
676 vmem_startup(void)
677 {
678 
679 	mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF);
680 	vmem_zone = uma_zcreate("vmem",
681 	    sizeof(struct vmem), NULL, NULL, NULL, NULL,
682 	    UMA_ALIGN_PTR, UMA_ZONE_VM);
683 	vmem_bt_zone = uma_zcreate("vmem btag",
684 	    sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
685 	    UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
686 #ifndef UMA_MD_SMALL_ALLOC
687 	mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF);
688 	uma_prealloc(vmem_bt_zone, BT_MAXALLOC);
689 	/*
690 	 * Reserve enough tags to allocate new tags.  We allow multiple
691 	 * CPUs to attempt to allocate new tags concurrently to limit
692 	 * false restarts in UMA.
693 	 */
694 	uma_zone_reserve(vmem_bt_zone, BT_MAXALLOC * (mp_ncpus + 1) / 2);
695 	uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc);
696 #endif
697 }
698 
699 /* ---- rehash */
700 
701 static int
702 vmem_rehash(vmem_t *vm, vmem_size_t newhashsize)
703 {
704 	bt_t *bt;
705 	int i;
706 	struct vmem_hashlist *newhashlist;
707 	struct vmem_hashlist *oldhashlist;
708 	vmem_size_t oldhashsize;
709 
710 	MPASS(newhashsize > 0);
711 
712 	newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize,
713 	    M_VMEM, M_NOWAIT);
714 	if (newhashlist == NULL)
715 		return ENOMEM;
716 	for (i = 0; i < newhashsize; i++) {
717 		LIST_INIT(&newhashlist[i]);
718 	}
719 
720 	VMEM_LOCK(vm);
721 	oldhashlist = vm->vm_hashlist;
722 	oldhashsize = vm->vm_hashsize;
723 	vm->vm_hashlist = newhashlist;
724 	vm->vm_hashsize = newhashsize;
725 	if (oldhashlist == NULL) {
726 		VMEM_UNLOCK(vm);
727 		return 0;
728 	}
729 	for (i = 0; i < oldhashsize; i++) {
730 		while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
731 			bt_rembusy(vm, bt);
732 			bt_insbusy(vm, bt);
733 		}
734 	}
735 	VMEM_UNLOCK(vm);
736 
737 	if (oldhashlist != vm->vm_hash0) {
738 		free(oldhashlist, M_VMEM);
739 	}
740 
741 	return 0;
742 }
743 
744 static void
745 vmem_periodic_kick(void *dummy)
746 {
747 
748 	taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk);
749 }
750 
751 static void
752 vmem_periodic(void *unused, int pending)
753 {
754 	vmem_t *vm;
755 	vmem_size_t desired;
756 	vmem_size_t current;
757 
758 	mtx_lock(&vmem_list_lock);
759 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
760 #ifdef DIAGNOSTIC
761 		/* Convenient time to verify vmem state. */
762 		if (enable_vmem_check == 1) {
763 			VMEM_LOCK(vm);
764 			vmem_check(vm);
765 			VMEM_UNLOCK(vm);
766 		}
767 #endif
768 		desired = 1 << flsl(vm->vm_nbusytag);
769 		desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN),
770 		    VMEM_HASHSIZE_MAX);
771 		current = vm->vm_hashsize;
772 
773 		/* Grow in powers of two.  Shrink less aggressively. */
774 		if (desired >= current * 2 || desired * 4 <= current)
775 			vmem_rehash(vm, desired);
776 
777 		/*
778 		 * Periodically wake up threads waiting for resources,
779 		 * so they could ask for reclamation again.
780 		 */
781 		VMEM_CONDVAR_BROADCAST(vm);
782 	}
783 	mtx_unlock(&vmem_list_lock);
784 
785 	callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
786 	    vmem_periodic_kick, NULL);
787 }
788 
789 static void
790 vmem_start_callout(void *unused)
791 {
792 
793 	TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL);
794 	vmem_periodic_interval = hz * 10;
795 	callout_init(&vmem_periodic_ch, 1);
796 	callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
797 	    vmem_periodic_kick, NULL);
798 }
799 SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL);
800 
801 static void
802 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type)
803 {
804 	bt_t *btspan;
805 	bt_t *btfree;
806 
807 	MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC);
808 	MPASS((size & vm->vm_quantum_mask) == 0);
809 
810 	btspan = bt_alloc(vm);
811 	btspan->bt_type = type;
812 	btspan->bt_start = addr;
813 	btspan->bt_size = size;
814 	bt_insseg_tail(vm, btspan);
815 
816 	btfree = bt_alloc(vm);
817 	btfree->bt_type = BT_TYPE_FREE;
818 	btfree->bt_start = addr;
819 	btfree->bt_size = size;
820 	bt_insseg(vm, btfree, btspan);
821 	bt_insfree(vm, btfree);
822 
823 	vm->vm_size += size;
824 }
825 
826 static void
827 vmem_destroy1(vmem_t *vm)
828 {
829 	bt_t *bt;
830 
831 	/*
832 	 * Drain per-cpu quantum caches.
833 	 */
834 	qc_destroy(vm);
835 
836 	/*
837 	 * The vmem should now only contain empty segments.
838 	 */
839 	VMEM_LOCK(vm);
840 	MPASS(vm->vm_nbusytag == 0);
841 
842 	while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL)
843 		bt_remseg(vm, bt);
844 
845 	if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0)
846 		free(vm->vm_hashlist, M_VMEM);
847 
848 	bt_freetrim(vm, 0);
849 
850 	VMEM_CONDVAR_DESTROY(vm);
851 	VMEM_LOCK_DESTROY(vm);
852 	uma_zfree(vmem_zone, vm);
853 }
854 
855 static int
856 vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags)
857 {
858 	vmem_addr_t addr;
859 	int error;
860 
861 	if (vm->vm_importfn == NULL)
862 		return (EINVAL);
863 
864 	/*
865 	 * To make sure we get a span that meets the alignment we double it
866 	 * and add the size to the tail.  This slightly overestimates.
867 	 */
868 	if (align != vm->vm_quantum_mask + 1)
869 		size = (align * 2) + size;
870 	size = roundup(size, vm->vm_import_quantum);
871 
872 	if (vm->vm_limit != 0 && vm->vm_limit < vm->vm_size + size)
873 		return (ENOMEM);
874 
875 	/*
876 	 * Hide MAXALLOC tags so we're guaranteed to be able to add this
877 	 * span and the tag we want to allocate from it.
878 	 */
879 	MPASS(vm->vm_nfreetags >= BT_MAXALLOC);
880 	vm->vm_nfreetags -= BT_MAXALLOC;
881 	VMEM_UNLOCK(vm);
882 	error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
883 	VMEM_LOCK(vm);
884 	vm->vm_nfreetags += BT_MAXALLOC;
885 	if (error)
886 		return (ENOMEM);
887 
888 	vmem_add1(vm, addr, size, BT_TYPE_SPAN);
889 
890 	return 0;
891 }
892 
893 /*
894  * vmem_fit: check if a bt can satisfy the given restrictions.
895  *
896  * it's a caller's responsibility to ensure the region is big enough
897  * before calling us.
898  */
899 static int
900 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align,
901     vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr,
902     vmem_addr_t maxaddr, vmem_addr_t *addrp)
903 {
904 	vmem_addr_t start;
905 	vmem_addr_t end;
906 
907 	MPASS(size > 0);
908 	MPASS(bt->bt_size >= size); /* caller's responsibility */
909 
910 	/*
911 	 * XXX assumption: vmem_addr_t and vmem_size_t are
912 	 * unsigned integer of the same size.
913 	 */
914 
915 	start = bt->bt_start;
916 	if (start < minaddr) {
917 		start = minaddr;
918 	}
919 	end = BT_END(bt);
920 	if (end > maxaddr)
921 		end = maxaddr;
922 	if (start > end)
923 		return (ENOMEM);
924 
925 	start = VMEM_ALIGNUP(start - phase, align) + phase;
926 	if (start < bt->bt_start)
927 		start += align;
928 	if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
929 		MPASS(align < nocross);
930 		start = VMEM_ALIGNUP(start - phase, nocross) + phase;
931 	}
932 	if (start <= end && end - start >= size - 1) {
933 		MPASS((start & (align - 1)) == phase);
934 		MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross));
935 		MPASS(minaddr <= start);
936 		MPASS(maxaddr == 0 || start + size - 1 <= maxaddr);
937 		MPASS(bt->bt_start <= start);
938 		MPASS(BT_END(bt) - start >= size - 1);
939 		*addrp = start;
940 
941 		return (0);
942 	}
943 	return (ENOMEM);
944 }
945 
946 /*
947  * vmem_clip:  Trim the boundary tag edges to the requested start and size.
948  */
949 static void
950 vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size)
951 {
952 	bt_t *btnew;
953 	bt_t *btprev;
954 
955 	VMEM_ASSERT_LOCKED(vm);
956 	MPASS(bt->bt_type == BT_TYPE_FREE);
957 	MPASS(bt->bt_size >= size);
958 	bt_remfree(vm, bt);
959 	if (bt->bt_start != start) {
960 		btprev = bt_alloc(vm);
961 		btprev->bt_type = BT_TYPE_FREE;
962 		btprev->bt_start = bt->bt_start;
963 		btprev->bt_size = start - bt->bt_start;
964 		bt->bt_start = start;
965 		bt->bt_size -= btprev->bt_size;
966 		bt_insfree(vm, btprev);
967 		bt_insseg(vm, btprev,
968 		    TAILQ_PREV(bt, vmem_seglist, bt_seglist));
969 	}
970 	MPASS(bt->bt_start == start);
971 	if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
972 		/* split */
973 		btnew = bt_alloc(vm);
974 		btnew->bt_type = BT_TYPE_BUSY;
975 		btnew->bt_start = bt->bt_start;
976 		btnew->bt_size = size;
977 		bt->bt_start = bt->bt_start + size;
978 		bt->bt_size -= size;
979 		bt_insfree(vm, bt);
980 		bt_insseg(vm, btnew,
981 		    TAILQ_PREV(bt, vmem_seglist, bt_seglist));
982 		bt_insbusy(vm, btnew);
983 		bt = btnew;
984 	} else {
985 		bt->bt_type = BT_TYPE_BUSY;
986 		bt_insbusy(vm, bt);
987 	}
988 	MPASS(bt->bt_size >= size);
989 }
990 
991 /* ---- vmem API */
992 
993 void
994 vmem_set_import(vmem_t *vm, vmem_import_t *importfn,
995      vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum)
996 {
997 
998 	VMEM_LOCK(vm);
999 	vm->vm_importfn = importfn;
1000 	vm->vm_releasefn = releasefn;
1001 	vm->vm_arg = arg;
1002 	vm->vm_import_quantum = import_quantum;
1003 	VMEM_UNLOCK(vm);
1004 }
1005 
1006 void
1007 vmem_set_limit(vmem_t *vm, vmem_size_t limit)
1008 {
1009 
1010 	VMEM_LOCK(vm);
1011 	vm->vm_limit = limit;
1012 	VMEM_UNLOCK(vm);
1013 }
1014 
1015 void
1016 vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn)
1017 {
1018 
1019 	VMEM_LOCK(vm);
1020 	vm->vm_reclaimfn = reclaimfn;
1021 	VMEM_UNLOCK(vm);
1022 }
1023 
1024 /*
1025  * vmem_init: Initializes vmem arena.
1026  */
1027 vmem_t *
1028 vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size,
1029     vmem_size_t quantum, vmem_size_t qcache_max, int flags)
1030 {
1031 	int i;
1032 
1033 	MPASS(quantum > 0);
1034 	MPASS((quantum & (quantum - 1)) == 0);
1035 
1036 	bzero(vm, sizeof(*vm));
1037 
1038 	VMEM_CONDVAR_INIT(vm, name);
1039 	VMEM_LOCK_INIT(vm, name);
1040 	vm->vm_nfreetags = 0;
1041 	LIST_INIT(&vm->vm_freetags);
1042 	strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
1043 	vm->vm_quantum_mask = quantum - 1;
1044 	vm->vm_quantum_shift = flsl(quantum) - 1;
1045 	vm->vm_nbusytag = 0;
1046 	vm->vm_size = 0;
1047 	vm->vm_limit = 0;
1048 	vm->vm_inuse = 0;
1049 	qc_init(vm, qcache_max);
1050 
1051 	TAILQ_INIT(&vm->vm_seglist);
1052 	for (i = 0; i < VMEM_MAXORDER; i++) {
1053 		LIST_INIT(&vm->vm_freelist[i]);
1054 	}
1055 	memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0));
1056 	vm->vm_hashsize = VMEM_HASHSIZE_MIN;
1057 	vm->vm_hashlist = vm->vm_hash0;
1058 
1059 	if (size != 0) {
1060 		if (vmem_add(vm, base, size, flags) != 0) {
1061 			vmem_destroy1(vm);
1062 			return NULL;
1063 		}
1064 	}
1065 
1066 	mtx_lock(&vmem_list_lock);
1067 	LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
1068 	mtx_unlock(&vmem_list_lock);
1069 
1070 	return vm;
1071 }
1072 
1073 /*
1074  * vmem_create: create an arena.
1075  */
1076 vmem_t *
1077 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
1078     vmem_size_t quantum, vmem_size_t qcache_max, int flags)
1079 {
1080 
1081 	vmem_t *vm;
1082 
1083 	vm = uma_zalloc(vmem_zone, flags & (M_WAITOK|M_NOWAIT));
1084 	if (vm == NULL)
1085 		return (NULL);
1086 	if (vmem_init(vm, name, base, size, quantum, qcache_max,
1087 	    flags) == NULL)
1088 		return (NULL);
1089 	return (vm);
1090 }
1091 
1092 void
1093 vmem_destroy(vmem_t *vm)
1094 {
1095 
1096 	mtx_lock(&vmem_list_lock);
1097 	LIST_REMOVE(vm, vm_alllist);
1098 	mtx_unlock(&vmem_list_lock);
1099 
1100 	vmem_destroy1(vm);
1101 }
1102 
1103 vmem_size_t
1104 vmem_roundup_size(vmem_t *vm, vmem_size_t size)
1105 {
1106 
1107 	return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
1108 }
1109 
1110 /*
1111  * vmem_alloc: allocate resource from the arena.
1112  */
1113 int
1114 vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp)
1115 {
1116 	const int strat __unused = flags & VMEM_FITMASK;
1117 	qcache_t *qc;
1118 
1119 	flags &= VMEM_FLAGS;
1120 	MPASS(size > 0);
1121 	MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT);
1122 	if ((flags & M_NOWAIT) == 0)
1123 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc");
1124 
1125 	if (size <= vm->vm_qcache_max) {
1126 		/*
1127 		 * Resource 0 cannot be cached, so avoid a blocking allocation
1128 		 * in qc_import() and give the vmem_xalloc() call below a chance
1129 		 * to return 0.
1130 		 */
1131 		qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1132 		*addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache,
1133 		    (flags & ~M_WAITOK) | M_NOWAIT);
1134 		if (__predict_true(*addrp != 0))
1135 			return (0);
1136 	}
1137 
1138 	return (vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
1139 	    flags, addrp));
1140 }
1141 
1142 int
1143 vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
1144     const vmem_size_t phase, const vmem_size_t nocross,
1145     const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags,
1146     vmem_addr_t *addrp)
1147 {
1148 	const vmem_size_t size = vmem_roundup_size(vm, size0);
1149 	struct vmem_freelist *list;
1150 	struct vmem_freelist *first;
1151 	struct vmem_freelist *end;
1152 	vmem_size_t avail;
1153 	bt_t *bt;
1154 	int error;
1155 	int strat;
1156 
1157 	flags &= VMEM_FLAGS;
1158 	strat = flags & VMEM_FITMASK;
1159 	MPASS(size0 > 0);
1160 	MPASS(size > 0);
1161 	MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT);
1162 	MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK));
1163 	if ((flags & M_NOWAIT) == 0)
1164 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc");
1165 	MPASS((align & vm->vm_quantum_mask) == 0);
1166 	MPASS((align & (align - 1)) == 0);
1167 	MPASS((phase & vm->vm_quantum_mask) == 0);
1168 	MPASS((nocross & vm->vm_quantum_mask) == 0);
1169 	MPASS((nocross & (nocross - 1)) == 0);
1170 	MPASS((align == 0 && phase == 0) || phase < align);
1171 	MPASS(nocross == 0 || nocross >= size);
1172 	MPASS(minaddr <= maxaddr);
1173 	MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1174 
1175 	if (align == 0)
1176 		align = vm->vm_quantum_mask + 1;
1177 
1178 	*addrp = 0;
1179 	end = &vm->vm_freelist[VMEM_MAXORDER];
1180 	/*
1181 	 * choose a free block from which we allocate.
1182 	 */
1183 	first = bt_freehead_toalloc(vm, size, strat);
1184 	VMEM_LOCK(vm);
1185 	for (;;) {
1186 		/*
1187 		 * Make sure we have enough tags to complete the
1188 		 * operation.
1189 		 */
1190 		if (vm->vm_nfreetags < BT_MAXALLOC &&
1191 		    bt_fill(vm, flags) != 0) {
1192 			error = ENOMEM;
1193 			break;
1194 		}
1195 		/*
1196 	 	 * Scan freelists looking for a tag that satisfies the
1197 		 * allocation.  If we're doing BESTFIT we may encounter
1198 		 * sizes below the request.  If we're doing FIRSTFIT we
1199 		 * inspect only the first element from each list.
1200 		 */
1201 		for (list = first; list < end; list++) {
1202 			LIST_FOREACH(bt, list, bt_freelist) {
1203 				if (bt->bt_size >= size) {
1204 					error = vmem_fit(bt, size, align, phase,
1205 					    nocross, minaddr, maxaddr, addrp);
1206 					if (error == 0) {
1207 						vmem_clip(vm, bt, *addrp, size);
1208 						goto out;
1209 					}
1210 				}
1211 				/* FIRST skips to the next list. */
1212 				if (strat == M_FIRSTFIT)
1213 					break;
1214 			}
1215 		}
1216 		/*
1217 		 * Retry if the fast algorithm failed.
1218 		 */
1219 		if (strat == M_FIRSTFIT) {
1220 			strat = M_BESTFIT;
1221 			first = bt_freehead_toalloc(vm, size, strat);
1222 			continue;
1223 		}
1224 		/*
1225 		 * XXX it is possible to fail to meet restrictions with the
1226 		 * imported region.  It is up to the user to specify the
1227 		 * import quantum such that it can satisfy any allocation.
1228 		 */
1229 		if (vmem_import(vm, size, align, flags) == 0)
1230 			continue;
1231 
1232 		/*
1233 		 * Try to free some space from the quantum cache or reclaim
1234 		 * functions if available.
1235 		 */
1236 		if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) {
1237 			avail = vm->vm_size - vm->vm_inuse;
1238 			VMEM_UNLOCK(vm);
1239 			if (vm->vm_qcache_max != 0)
1240 				qc_drain(vm);
1241 			if (vm->vm_reclaimfn != NULL)
1242 				vm->vm_reclaimfn(vm, flags);
1243 			VMEM_LOCK(vm);
1244 			/* If we were successful retry even NOWAIT. */
1245 			if (vm->vm_size - vm->vm_inuse > avail)
1246 				continue;
1247 		}
1248 		if ((flags & M_NOWAIT) != 0) {
1249 			error = ENOMEM;
1250 			break;
1251 		}
1252 		VMEM_CONDVAR_WAIT(vm);
1253 	}
1254 out:
1255 	VMEM_UNLOCK(vm);
1256 	if (error != 0 && (flags & M_NOWAIT) == 0)
1257 		panic("failed to allocate waiting allocation\n");
1258 
1259 	return (error);
1260 }
1261 
1262 /*
1263  * vmem_free: free the resource to the arena.
1264  */
1265 void
1266 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1267 {
1268 	qcache_t *qc;
1269 	MPASS(size > 0);
1270 
1271 	if (size <= vm->vm_qcache_max &&
1272 	    __predict_true(addr >= VMEM_ADDR_QCACHE_MIN)) {
1273 		qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1274 		uma_zfree(qc->qc_cache, (void *)addr);
1275 	} else
1276 		vmem_xfree(vm, addr, size);
1277 }
1278 
1279 void
1280 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1281 {
1282 	bt_t *bt;
1283 	bt_t *t;
1284 
1285 	MPASS(size > 0);
1286 
1287 	VMEM_LOCK(vm);
1288 	bt = bt_lookupbusy(vm, addr);
1289 	MPASS(bt != NULL);
1290 	MPASS(bt->bt_start == addr);
1291 	MPASS(bt->bt_size == vmem_roundup_size(vm, size) ||
1292 	    bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1293 	MPASS(bt->bt_type == BT_TYPE_BUSY);
1294 	bt_rembusy(vm, bt);
1295 	bt->bt_type = BT_TYPE_FREE;
1296 
1297 	/* coalesce */
1298 	t = TAILQ_NEXT(bt, bt_seglist);
1299 	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1300 		MPASS(BT_END(bt) < t->bt_start);	/* YYY */
1301 		bt->bt_size += t->bt_size;
1302 		bt_remfree(vm, t);
1303 		bt_remseg(vm, t);
1304 	}
1305 	t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1306 	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1307 		MPASS(BT_END(t) < bt->bt_start);	/* YYY */
1308 		bt->bt_size += t->bt_size;
1309 		bt->bt_start = t->bt_start;
1310 		bt_remfree(vm, t);
1311 		bt_remseg(vm, t);
1312 	}
1313 
1314 	t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1315 	MPASS(t != NULL);
1316 	MPASS(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY);
1317 	if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN &&
1318 	    t->bt_size == bt->bt_size) {
1319 		vmem_addr_t spanaddr;
1320 		vmem_size_t spansize;
1321 
1322 		MPASS(t->bt_start == bt->bt_start);
1323 		spanaddr = bt->bt_start;
1324 		spansize = bt->bt_size;
1325 		bt_remseg(vm, bt);
1326 		bt_remseg(vm, t);
1327 		vm->vm_size -= spansize;
1328 		VMEM_CONDVAR_BROADCAST(vm);
1329 		bt_freetrim(vm, BT_MAXFREE);
1330 		(*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize);
1331 	} else {
1332 		bt_insfree(vm, bt);
1333 		VMEM_CONDVAR_BROADCAST(vm);
1334 		bt_freetrim(vm, BT_MAXFREE);
1335 	}
1336 }
1337 
1338 /*
1339  * vmem_add:
1340  *
1341  */
1342 int
1343 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags)
1344 {
1345 	int error;
1346 
1347 	error = 0;
1348 	flags &= VMEM_FLAGS;
1349 	VMEM_LOCK(vm);
1350 	if (vm->vm_nfreetags >= BT_MAXALLOC || bt_fill(vm, flags) == 0)
1351 		vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC);
1352 	else
1353 		error = ENOMEM;
1354 	VMEM_UNLOCK(vm);
1355 
1356 	return (error);
1357 }
1358 
1359 /*
1360  * vmem_size: information about arenas size
1361  */
1362 vmem_size_t
1363 vmem_size(vmem_t *vm, int typemask)
1364 {
1365 	int i;
1366 
1367 	switch (typemask) {
1368 	case VMEM_ALLOC:
1369 		return vm->vm_inuse;
1370 	case VMEM_FREE:
1371 		return vm->vm_size - vm->vm_inuse;
1372 	case VMEM_FREE|VMEM_ALLOC:
1373 		return vm->vm_size;
1374 	case VMEM_MAXFREE:
1375 		VMEM_LOCK(vm);
1376 		for (i = VMEM_MAXORDER - 1; i >= 0; i--) {
1377 			if (LIST_EMPTY(&vm->vm_freelist[i]))
1378 				continue;
1379 			VMEM_UNLOCK(vm);
1380 			return ((vmem_size_t)ORDER2SIZE(i) <<
1381 			    vm->vm_quantum_shift);
1382 		}
1383 		VMEM_UNLOCK(vm);
1384 		return (0);
1385 	default:
1386 		panic("vmem_size");
1387 	}
1388 }
1389 
1390 /* ---- debug */
1391 
1392 #if defined(DDB) || defined(DIAGNOSTIC)
1393 
1394 static void bt_dump(const bt_t *, int (*)(const char *, ...)
1395     __printflike(1, 2));
1396 
1397 static const char *
1398 bt_type_string(int type)
1399 {
1400 
1401 	switch (type) {
1402 	case BT_TYPE_BUSY:
1403 		return "busy";
1404 	case BT_TYPE_FREE:
1405 		return "free";
1406 	case BT_TYPE_SPAN:
1407 		return "span";
1408 	case BT_TYPE_SPAN_STATIC:
1409 		return "static span";
1410 	default:
1411 		break;
1412 	}
1413 	return "BOGUS";
1414 }
1415 
1416 static void
1417 bt_dump(const bt_t *bt, int (*pr)(const char *, ...))
1418 {
1419 
1420 	(*pr)("\t%p: %jx %jx, %d(%s)\n",
1421 	    bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size,
1422 	    bt->bt_type, bt_type_string(bt->bt_type));
1423 }
1424 
1425 static void
1426 vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2))
1427 {
1428 	const bt_t *bt;
1429 	int i;
1430 
1431 	(*pr)("vmem %p '%s'\n", vm, vm->vm_name);
1432 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1433 		bt_dump(bt, pr);
1434 	}
1435 
1436 	for (i = 0; i < VMEM_MAXORDER; i++) {
1437 		const struct vmem_freelist *fl = &vm->vm_freelist[i];
1438 
1439 		if (LIST_EMPTY(fl)) {
1440 			continue;
1441 		}
1442 
1443 		(*pr)("freelist[%d]\n", i);
1444 		LIST_FOREACH(bt, fl, bt_freelist) {
1445 			bt_dump(bt, pr);
1446 		}
1447 	}
1448 }
1449 
1450 #endif /* defined(DDB) || defined(DIAGNOSTIC) */
1451 
1452 #if defined(DDB)
1453 #include <ddb/ddb.h>
1454 
1455 static bt_t *
1456 vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr)
1457 {
1458 	bt_t *bt;
1459 
1460 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1461 		if (BT_ISSPAN_P(bt)) {
1462 			continue;
1463 		}
1464 		if (bt->bt_start <= addr && addr <= BT_END(bt)) {
1465 			return bt;
1466 		}
1467 	}
1468 
1469 	return NULL;
1470 }
1471 
1472 void
1473 vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...))
1474 {
1475 	vmem_t *vm;
1476 
1477 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1478 		bt_t *bt;
1479 
1480 		bt = vmem_whatis_lookup(vm, addr);
1481 		if (bt == NULL) {
1482 			continue;
1483 		}
1484 		(*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
1485 		    (void *)addr, (void *)bt->bt_start,
1486 		    (vmem_size_t)(addr - bt->bt_start), vm->vm_name,
1487 		    (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
1488 	}
1489 }
1490 
1491 void
1492 vmem_printall(const char *modif, int (*pr)(const char *, ...))
1493 {
1494 	const vmem_t *vm;
1495 
1496 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1497 		vmem_dump(vm, pr);
1498 	}
1499 }
1500 
1501 void
1502 vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...))
1503 {
1504 	const vmem_t *vm = (const void *)addr;
1505 
1506 	vmem_dump(vm, pr);
1507 }
1508 
1509 DB_SHOW_COMMAND(vmemdump, vmemdump)
1510 {
1511 
1512 	if (!have_addr) {
1513 		db_printf("usage: show vmemdump <addr>\n");
1514 		return;
1515 	}
1516 
1517 	vmem_dump((const vmem_t *)addr, db_printf);
1518 }
1519 
1520 DB_SHOW_ALL_COMMAND(vmemdump, vmemdumpall)
1521 {
1522 	const vmem_t *vm;
1523 
1524 	LIST_FOREACH(vm, &vmem_list, vm_alllist)
1525 		vmem_dump(vm, db_printf);
1526 }
1527 
1528 DB_SHOW_COMMAND(vmem, vmem_summ)
1529 {
1530 	const vmem_t *vm = (const void *)addr;
1531 	const bt_t *bt;
1532 	size_t ft[VMEM_MAXORDER], ut[VMEM_MAXORDER];
1533 	size_t fs[VMEM_MAXORDER], us[VMEM_MAXORDER];
1534 	int ord;
1535 
1536 	if (!have_addr) {
1537 		db_printf("usage: show vmem <addr>\n");
1538 		return;
1539 	}
1540 
1541 	db_printf("vmem %p '%s'\n", vm, vm->vm_name);
1542 	db_printf("\tquantum:\t%zu\n", vm->vm_quantum_mask + 1);
1543 	db_printf("\tsize:\t%zu\n", vm->vm_size);
1544 	db_printf("\tinuse:\t%zu\n", vm->vm_inuse);
1545 	db_printf("\tfree:\t%zu\n", vm->vm_size - vm->vm_inuse);
1546 	db_printf("\tbusy tags:\t%d\n", vm->vm_nbusytag);
1547 	db_printf("\tfree tags:\t%d\n", vm->vm_nfreetags);
1548 
1549 	memset(&ft, 0, sizeof(ft));
1550 	memset(&ut, 0, sizeof(ut));
1551 	memset(&fs, 0, sizeof(fs));
1552 	memset(&us, 0, sizeof(us));
1553 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1554 		ord = SIZE2ORDER(bt->bt_size >> vm->vm_quantum_shift);
1555 		if (bt->bt_type == BT_TYPE_BUSY) {
1556 			ut[ord]++;
1557 			us[ord] += bt->bt_size;
1558 		} else if (bt->bt_type == BT_TYPE_FREE) {
1559 			ft[ord]++;
1560 			fs[ord] += bt->bt_size;
1561 		}
1562 	}
1563 	db_printf("\t\t\tinuse\tsize\t\tfree\tsize\n");
1564 	for (ord = 0; ord < VMEM_MAXORDER; ord++) {
1565 		if (ut[ord] == 0 && ft[ord] == 0)
1566 			continue;
1567 		db_printf("\t%-15zu %zu\t%-15zu %zu\t%-16zu\n",
1568 		    ORDER2SIZE(ord) << vm->vm_quantum_shift,
1569 		    ut[ord], us[ord], ft[ord], fs[ord]);
1570 	}
1571 }
1572 
1573 DB_SHOW_ALL_COMMAND(vmem, vmem_summall)
1574 {
1575 	const vmem_t *vm;
1576 
1577 	LIST_FOREACH(vm, &vmem_list, vm_alllist)
1578 		vmem_summ((db_expr_t)vm, TRUE, count, modif);
1579 }
1580 #endif /* defined(DDB) */
1581 
1582 #define vmem_printf printf
1583 
1584 #if defined(DIAGNOSTIC)
1585 
1586 static bool
1587 vmem_check_sanity(vmem_t *vm)
1588 {
1589 	const bt_t *bt, *bt2;
1590 
1591 	MPASS(vm != NULL);
1592 
1593 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1594 		if (bt->bt_start > BT_END(bt)) {
1595 			printf("corrupted tag\n");
1596 			bt_dump(bt, vmem_printf);
1597 			return false;
1598 		}
1599 	}
1600 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1601 		TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
1602 			if (bt == bt2) {
1603 				continue;
1604 			}
1605 			if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) {
1606 				continue;
1607 			}
1608 			if (bt->bt_start <= BT_END(bt2) &&
1609 			    bt2->bt_start <= BT_END(bt)) {
1610 				printf("overwrapped tags\n");
1611 				bt_dump(bt, vmem_printf);
1612 				bt_dump(bt2, vmem_printf);
1613 				return false;
1614 			}
1615 		}
1616 	}
1617 
1618 	return true;
1619 }
1620 
1621 static void
1622 vmem_check(vmem_t *vm)
1623 {
1624 
1625 	if (!vmem_check_sanity(vm)) {
1626 		panic("insanity vmem %p", vm);
1627 	}
1628 }
1629 
1630 #endif /* defined(DIAGNOSTIC) */
1631