xref: /netbsd/sys/kern/subr_vmem.c (revision 416a8a0e)
1 /*	$NetBSD: subr_vmem.c,v 1.109 2023/04/09 09:18:09 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * reference:
31  * -	Magazines and Vmem: Extending the Slab Allocator
32  *	to Many CPUs and Arbitrary Resources
33  *	http://www.usenix.org/event/usenix01/bonwick.html
34  *
35  * locking & the boundary tag pool:
36  * - 	A pool(9) is used for vmem boundary tags
37  * - 	During a pool get call the global vmem_btag_refill_lock is taken,
38  *	to serialize access to the allocation reserve, but no other
39  *	vmem arena locks.
40  * -	During pool_put calls no vmem mutexes are locked.
41  * - 	pool_drain doesn't hold the pool's mutex while releasing memory to
42  * 	its backing therefore no interference with any vmem mutexes.
43  * -	The boundary tag pool is forced to put page headers into pool pages
44  *  	(PR_PHINPAGE) and not off page to avoid pool recursion.
45  *  	(due to sizeof(bt_t) it should be the case anyway)
46  */
47 
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.109 2023/04/09 09:18:09 riastradh Exp $");
50 
51 #if defined(_KERNEL) && defined(_KERNEL_OPT)
52 #include "opt_ddb.h"
53 #endif /* defined(_KERNEL) && defined(_KERNEL_OPT) */
54 
55 #include <sys/param.h>
56 #include <sys/hash.h>
57 #include <sys/queue.h>
58 #include <sys/bitops.h>
59 
60 #if defined(_KERNEL)
61 #include <sys/systm.h>
62 #include <sys/kernel.h>	/* hz */
63 #include <sys/callout.h>
64 #include <sys/kmem.h>
65 #include <sys/pool.h>
66 #include <sys/vmem.h>
67 #include <sys/vmem_impl.h>
68 #include <sys/workqueue.h>
69 #include <sys/atomic.h>
70 #include <uvm/uvm.h>
71 #include <uvm/uvm_extern.h>
72 #include <uvm/uvm_km.h>
73 #include <uvm/uvm_page.h>
74 #include <uvm/uvm_pdaemon.h>
75 #else /* defined(_KERNEL) */
76 #include <stdio.h>
77 #include <errno.h>
78 #include <assert.h>
79 #include <stdlib.h>
80 #include <string.h>
81 #include "../sys/vmem.h"
82 #include "../sys/vmem_impl.h"
83 #endif /* defined(_KERNEL) */
84 
85 
86 #if defined(_KERNEL)
87 #include <sys/evcnt.h>
88 #define VMEM_EVCNT_DEFINE(name) \
89 struct evcnt vmem_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
90     "vmem", #name); \
91 EVCNT_ATTACH_STATIC(vmem_evcnt_##name);
92 #define VMEM_EVCNT_INCR(ev)	vmem_evcnt_##ev.ev_count++
93 #define VMEM_EVCNT_DECR(ev)	vmem_evcnt_##ev.ev_count--
94 
95 VMEM_EVCNT_DEFINE(static_bt_count)
96 VMEM_EVCNT_DEFINE(static_bt_inuse)
97 
98 #define	VMEM_CONDVAR_INIT(vm, wchan)	cv_init(&vm->vm_cv, wchan)
99 #define	VMEM_CONDVAR_DESTROY(vm)	cv_destroy(&vm->vm_cv)
100 #define	VMEM_CONDVAR_WAIT(vm)		cv_wait(&vm->vm_cv, &vm->vm_lock)
101 #define	VMEM_CONDVAR_BROADCAST(vm)	cv_broadcast(&vm->vm_cv)
102 
103 #else /* defined(_KERNEL) */
104 
105 #define VMEM_EVCNT_INCR(ev)	/* nothing */
106 #define VMEM_EVCNT_DECR(ev)	/* nothing */
107 
108 #define	VMEM_CONDVAR_INIT(vm, wchan)	/* nothing */
109 #define	VMEM_CONDVAR_DESTROY(vm)	/* nothing */
110 #define	VMEM_CONDVAR_WAIT(vm)		/* nothing */
111 #define	VMEM_CONDVAR_BROADCAST(vm)	/* nothing */
112 
113 #define	UNITTEST
114 #define	KASSERT(a)		assert(a)
115 #define	mutex_init(a, b, c)	/* nothing */
116 #define	mutex_destroy(a)	/* nothing */
117 #define	mutex_enter(a)		/* nothing */
118 #define	mutex_tryenter(a)	true
119 #define	mutex_exit(a)		/* nothing */
120 #define	mutex_owned(a)		/* nothing */
121 #define	ASSERT_SLEEPABLE()	/* nothing */
122 #define	panic(...)		printf(__VA_ARGS__); abort()
123 #endif /* defined(_KERNEL) */
124 
125 #if defined(VMEM_SANITY)
126 static void vmem_check(vmem_t *);
127 #else /* defined(VMEM_SANITY) */
128 #define vmem_check(vm)	/* nothing */
129 #endif /* defined(VMEM_SANITY) */
130 
131 #define	VMEM_HASHSIZE_MIN	1	/* XXX */
132 #define	VMEM_HASHSIZE_MAX	65536	/* XXX */
133 #define	VMEM_HASHSIZE_INIT	1
134 
135 #define	VM_FITMASK	(VM_BESTFIT | VM_INSTANTFIT)
136 
137 #if defined(_KERNEL)
138 static bool vmem_bootstrapped = false;
139 static kmutex_t vmem_list_lock;
140 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
141 #endif /* defined(_KERNEL) */
142 
143 /* ---- misc */
144 
145 #define	VMEM_LOCK(vm)		mutex_enter(&vm->vm_lock)
146 #define	VMEM_TRYLOCK(vm)	mutex_tryenter(&vm->vm_lock)
147 #define	VMEM_UNLOCK(vm)		mutex_exit(&vm->vm_lock)
148 #define	VMEM_LOCK_INIT(vm, ipl)	mutex_init(&vm->vm_lock, MUTEX_DEFAULT, ipl)
149 #define	VMEM_LOCK_DESTROY(vm)	mutex_destroy(&vm->vm_lock)
150 #define	VMEM_ASSERT_LOCKED(vm)	KASSERT(mutex_owned(&vm->vm_lock))
151 
152 #define	VMEM_ALIGNUP(addr, align) \
153 	(-(-(addr) & -(align)))
154 
155 #define	VMEM_CROSS_P(addr1, addr2, boundary) \
156 	((((addr1) ^ (addr2)) & -(boundary)) != 0)
157 
158 #define	ORDER2SIZE(order)	((vmem_size_t)1 << (order))
159 #define	SIZE2ORDER(size)	((int)ilog2(size))
160 
161 #if !defined(_KERNEL)
162 #define	xmalloc(sz, flags)	malloc(sz)
163 #define	xfree(p, sz)		free(p)
164 #define	bt_alloc(vm, flags)	malloc(sizeof(bt_t))
165 #define	bt_free(vm, bt)		free(bt)
166 #else /* defined(_KERNEL) */
167 
168 #define	xmalloc(sz, flags) \
169     kmem_alloc(sz, ((flags) & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
170 #define	xfree(p, sz)		kmem_free(p, sz);
171 
172 /*
173  * BT_RESERVE calculation:
174  * we allocate memory for boundary tags with vmem; therefore we have
175  * to keep a reserve of bts used to allocated memory for bts.
176  * This reserve is 4 for each arena involved in allocating vmems memory.
177  * BT_MAXFREE: don't cache excessive counts of bts in arenas
178  */
179 #define STATIC_BT_COUNT 200
180 #define BT_MINRESERVE 4
181 #define BT_MAXFREE 64
182 
183 static struct vmem_btag static_bts[STATIC_BT_COUNT];
184 static int static_bt_count = STATIC_BT_COUNT;
185 
186 static struct vmem kmem_va_meta_arena_store;
187 vmem_t *kmem_va_meta_arena;
188 static struct vmem kmem_meta_arena_store;
189 vmem_t *kmem_meta_arena = NULL;
190 
191 static kmutex_t vmem_btag_refill_lock;
192 static kmutex_t vmem_btag_lock;
193 static LIST_HEAD(, vmem_btag) vmem_btag_freelist;
194 static size_t vmem_btag_freelist_count = 0;
195 static struct pool vmem_btag_pool;
196 
197 static void vmem_xfree_bt(vmem_t *, bt_t *);
198 
199 static void
vmem_kick_pdaemon(void)200 vmem_kick_pdaemon(void)
201 {
202 #if defined(_KERNEL)
203 	uvm_kick_pdaemon();
204 #endif
205 }
206 
207 /* ---- boundary tag */
208 
209 static int bt_refill(vmem_t *vm);
210 static int bt_refill_locked(vmem_t *vm);
211 
212 static void *
pool_page_alloc_vmem_meta(struct pool * pp,int flags)213 pool_page_alloc_vmem_meta(struct pool *pp, int flags)
214 {
215 	const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP;
216 	vmem_addr_t va;
217 	int ret;
218 
219 	ret = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
220 	    (vflags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING, &va);
221 
222 	return ret ? NULL : (void *)va;
223 }
224 
225 static void
pool_page_free_vmem_meta(struct pool * pp,void * v)226 pool_page_free_vmem_meta(struct pool *pp, void *v)
227 {
228 
229 	vmem_free(kmem_meta_arena, (vmem_addr_t)v, pp->pr_alloc->pa_pagesz);
230 }
231 
232 /* allocator for vmem-pool metadata */
233 struct pool_allocator pool_allocator_vmem_meta = {
234 	.pa_alloc = pool_page_alloc_vmem_meta,
235 	.pa_free = pool_page_free_vmem_meta,
236 	.pa_pagesz = 0
237 };
238 
239 static int
bt_refill_locked(vmem_t * vm)240 bt_refill_locked(vmem_t *vm)
241 {
242 	bt_t *bt;
243 
244 	VMEM_ASSERT_LOCKED(vm);
245 
246 	if (vm->vm_nfreetags > BT_MINRESERVE) {
247 		return 0;
248 	}
249 
250 	mutex_enter(&vmem_btag_lock);
251 	while (!LIST_EMPTY(&vmem_btag_freelist) &&
252 	    vm->vm_nfreetags <= BT_MINRESERVE) {
253 		bt = LIST_FIRST(&vmem_btag_freelist);
254 		LIST_REMOVE(bt, bt_freelist);
255 		LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
256 		vm->vm_nfreetags++;
257 		vmem_btag_freelist_count--;
258 		VMEM_EVCNT_INCR(static_bt_inuse);
259 	}
260 	mutex_exit(&vmem_btag_lock);
261 
262 	while (vm->vm_nfreetags <= BT_MINRESERVE) {
263 		VMEM_UNLOCK(vm);
264 		mutex_enter(&vmem_btag_refill_lock);
265 		bt = pool_get(&vmem_btag_pool, PR_NOWAIT);
266 		mutex_exit(&vmem_btag_refill_lock);
267 		VMEM_LOCK(vm);
268 		if (bt == NULL)
269 			break;
270 		LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
271 		vm->vm_nfreetags++;
272 	}
273 
274 	if (vm->vm_nfreetags <= BT_MINRESERVE) {
275 		return ENOMEM;
276 	}
277 
278 	if (kmem_meta_arena != NULL) {
279 		VMEM_UNLOCK(vm);
280 		(void)bt_refill(kmem_arena);
281 		(void)bt_refill(kmem_va_meta_arena);
282 		(void)bt_refill(kmem_meta_arena);
283 		VMEM_LOCK(vm);
284 	}
285 
286 	return 0;
287 }
288 
289 static int
bt_refill(vmem_t * vm)290 bt_refill(vmem_t *vm)
291 {
292 	int rv;
293 
294 	VMEM_LOCK(vm);
295 	rv = bt_refill_locked(vm);
296 	VMEM_UNLOCK(vm);
297 	return rv;
298 }
299 
300 static bt_t *
bt_alloc(vmem_t * vm,vm_flag_t flags)301 bt_alloc(vmem_t *vm, vm_flag_t flags)
302 {
303 	bt_t *bt;
304 
305 	VMEM_ASSERT_LOCKED(vm);
306 
307 	while (vm->vm_nfreetags <= BT_MINRESERVE && (flags & VM_POPULATING) == 0) {
308 		if (bt_refill_locked(vm)) {
309 			if ((flags & VM_NOSLEEP) != 0) {
310 				return NULL;
311 			}
312 
313 			/*
314 			 * It would be nice to wait for something specific here
315 			 * but there are multiple ways that a retry could
316 			 * succeed and we can't wait for multiple things
317 			 * simultaneously.  So we'll just sleep for an arbitrary
318 			 * short period of time and retry regardless.
319 			 * This should be a very rare case.
320 			 */
321 
322 			vmem_kick_pdaemon();
323 			kpause("btalloc", false, 1, &vm->vm_lock);
324 		}
325 	}
326 	bt = LIST_FIRST(&vm->vm_freetags);
327 	LIST_REMOVE(bt, bt_freelist);
328 	vm->vm_nfreetags--;
329 
330 	return bt;
331 }
332 
333 static void
bt_free(vmem_t * vm,bt_t * bt)334 bt_free(vmem_t *vm, bt_t *bt)
335 {
336 
337 	VMEM_ASSERT_LOCKED(vm);
338 
339 	LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
340 	vm->vm_nfreetags++;
341 }
342 
343 static void
bt_freetrim(vmem_t * vm,int freelimit)344 bt_freetrim(vmem_t *vm, int freelimit)
345 {
346 	bt_t *t;
347 	LIST_HEAD(, vmem_btag) tofree;
348 
349 	VMEM_ASSERT_LOCKED(vm);
350 
351 	LIST_INIT(&tofree);
352 
353 	while (vm->vm_nfreetags > freelimit) {
354 		bt_t *bt = LIST_FIRST(&vm->vm_freetags);
355 		LIST_REMOVE(bt, bt_freelist);
356 		vm->vm_nfreetags--;
357 		if (bt >= static_bts
358 		    && bt < &static_bts[STATIC_BT_COUNT]) {
359 			mutex_enter(&vmem_btag_lock);
360 			LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
361 			vmem_btag_freelist_count++;
362 			mutex_exit(&vmem_btag_lock);
363 			VMEM_EVCNT_DECR(static_bt_inuse);
364 		} else {
365 			LIST_INSERT_HEAD(&tofree, bt, bt_freelist);
366 		}
367 	}
368 
369 	VMEM_UNLOCK(vm);
370 	while (!LIST_EMPTY(&tofree)) {
371 		t = LIST_FIRST(&tofree);
372 		LIST_REMOVE(t, bt_freelist);
373 		pool_put(&vmem_btag_pool, t);
374 	}
375 }
376 #endif	/* defined(_KERNEL) */
377 
378 /*
379  * freelist[0] ... [1, 1]
380  * freelist[1] ... [2, 3]
381  * freelist[2] ... [4, 7]
382  * freelist[3] ... [8, 15]
383  *  :
384  * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1]
385  *  :
386  */
387 
388 static struct vmem_freelist *
bt_freehead_tofree(vmem_t * vm,vmem_size_t size)389 bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
390 {
391 	const vmem_size_t qsize = size >> vm->vm_quantum_shift;
392 	const int idx = SIZE2ORDER(qsize);
393 
394 	KASSERT(size != 0);
395 	KASSERT(qsize != 0);
396 	KASSERT((size & vm->vm_quantum_mask) == 0);
397 	KASSERT(idx >= 0);
398 	KASSERT(idx < VMEM_MAXORDER);
399 
400 	return &vm->vm_freelist[idx];
401 }
402 
403 /*
404  * bt_freehead_toalloc: return the freelist for the given size and allocation
405  * strategy.
406  *
407  * for VM_INSTANTFIT, return the list in which any blocks are large enough
408  * for the requested size.  otherwise, return the list which can have blocks
409  * large enough for the requested size.
410  */
411 
412 static struct vmem_freelist *
bt_freehead_toalloc(vmem_t * vm,vmem_size_t size,vm_flag_t strat)413 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat)
414 {
415 	const vmem_size_t qsize = size >> vm->vm_quantum_shift;
416 	int idx = SIZE2ORDER(qsize);
417 
418 	KASSERT(size != 0);
419 	KASSERT(qsize != 0);
420 	KASSERT((size & vm->vm_quantum_mask) == 0);
421 
422 	if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) {
423 		idx++;
424 		/* check too large request? */
425 	}
426 	KASSERT(idx >= 0);
427 	KASSERT(idx < VMEM_MAXORDER);
428 
429 	return &vm->vm_freelist[idx];
430 }
431 
432 /* ---- boundary tag hash */
433 
434 static struct vmem_hashlist *
bt_hashhead(vmem_t * vm,vmem_addr_t addr)435 bt_hashhead(vmem_t *vm, vmem_addr_t addr)
436 {
437 	struct vmem_hashlist *list;
438 	unsigned int hash;
439 
440 	hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT);
441 	list = &vm->vm_hashlist[hash & vm->vm_hashmask];
442 
443 	return list;
444 }
445 
446 static bt_t *
bt_lookupbusy(vmem_t * vm,vmem_addr_t addr)447 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
448 {
449 	struct vmem_hashlist *list;
450 	bt_t *bt;
451 
452 	list = bt_hashhead(vm, addr);
453 	LIST_FOREACH(bt, list, bt_hashlist) {
454 		if (bt->bt_start == addr) {
455 			break;
456 		}
457 	}
458 
459 	return bt;
460 }
461 
462 static void
bt_rembusy(vmem_t * vm,bt_t * bt)463 bt_rembusy(vmem_t *vm, bt_t *bt)
464 {
465 
466 	KASSERT(vm->vm_nbusytag > 0);
467 	vm->vm_inuse -= bt->bt_size;
468 	vm->vm_nbusytag--;
469 	LIST_REMOVE(bt, bt_hashlist);
470 }
471 
472 static void
bt_insbusy(vmem_t * vm,bt_t * bt)473 bt_insbusy(vmem_t *vm, bt_t *bt)
474 {
475 	struct vmem_hashlist *list;
476 
477 	KASSERT(bt->bt_type == BT_TYPE_BUSY);
478 
479 	list = bt_hashhead(vm, bt->bt_start);
480 	LIST_INSERT_HEAD(list, bt, bt_hashlist);
481 	if (++vm->vm_nbusytag > vm->vm_maxbusytag) {
482 		vm->vm_maxbusytag = vm->vm_nbusytag;
483 	}
484 	vm->vm_inuse += bt->bt_size;
485 }
486 
487 /* ---- boundary tag list */
488 
489 static void
bt_remseg(vmem_t * vm,bt_t * bt)490 bt_remseg(vmem_t *vm, bt_t *bt)
491 {
492 
493 	TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
494 }
495 
496 static void
bt_insseg(vmem_t * vm,bt_t * bt,bt_t * prev)497 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
498 {
499 
500 	TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
501 }
502 
503 static void
bt_insseg_tail(vmem_t * vm,bt_t * bt)504 bt_insseg_tail(vmem_t *vm, bt_t *bt)
505 {
506 
507 	TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
508 }
509 
510 static void
bt_remfree(vmem_t * vm,bt_t * bt)511 bt_remfree(vmem_t *vm, bt_t *bt)
512 {
513 
514 	KASSERT(bt->bt_type == BT_TYPE_FREE);
515 
516 	LIST_REMOVE(bt, bt_freelist);
517 }
518 
519 static void
bt_insfree(vmem_t * vm,bt_t * bt)520 bt_insfree(vmem_t *vm, bt_t *bt)
521 {
522 	struct vmem_freelist *list;
523 
524 	list = bt_freehead_tofree(vm, bt->bt_size);
525 	LIST_INSERT_HEAD(list, bt, bt_freelist);
526 }
527 
528 /* ---- vmem internal functions */
529 
530 #if defined(QCACHE)
531 static inline vm_flag_t
prf_to_vmf(int prflags)532 prf_to_vmf(int prflags)
533 {
534 	vm_flag_t vmflags;
535 
536 	KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0);
537 	if ((prflags & PR_WAITOK) != 0) {
538 		vmflags = VM_SLEEP;
539 	} else {
540 		vmflags = VM_NOSLEEP;
541 	}
542 	return vmflags;
543 }
544 
545 static inline int
vmf_to_prf(vm_flag_t vmflags)546 vmf_to_prf(vm_flag_t vmflags)
547 {
548 	int prflags;
549 
550 	if ((vmflags & VM_SLEEP) != 0) {
551 		prflags = PR_WAITOK;
552 	} else {
553 		prflags = PR_NOWAIT;
554 	}
555 	return prflags;
556 }
557 
558 static size_t
qc_poolpage_size(size_t qcache_max)559 qc_poolpage_size(size_t qcache_max)
560 {
561 	int i;
562 
563 	for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) {
564 		/* nothing */
565 	}
566 	return ORDER2SIZE(i);
567 }
568 
569 static void *
qc_poolpage_alloc(struct pool * pool,int prflags)570 qc_poolpage_alloc(struct pool *pool, int prflags)
571 {
572 	qcache_t *qc = QC_POOL_TO_QCACHE(pool);
573 	vmem_t *vm = qc->qc_vmem;
574 	vmem_addr_t addr;
575 
576 	if (vmem_alloc(vm, pool->pr_alloc->pa_pagesz,
577 	    prf_to_vmf(prflags) | VM_INSTANTFIT, &addr) != 0)
578 		return NULL;
579 	return (void *)addr;
580 }
581 
582 static void
qc_poolpage_free(struct pool * pool,void * addr)583 qc_poolpage_free(struct pool *pool, void *addr)
584 {
585 	qcache_t *qc = QC_POOL_TO_QCACHE(pool);
586 	vmem_t *vm = qc->qc_vmem;
587 
588 	vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz);
589 }
590 
591 static void
qc_init(vmem_t * vm,size_t qcache_max,int ipl)592 qc_init(vmem_t *vm, size_t qcache_max, int ipl)
593 {
594 	qcache_t *prevqc;
595 	struct pool_allocator *pa;
596 	int qcache_idx_max;
597 	int i;
598 
599 	KASSERT((qcache_max & vm->vm_quantum_mask) == 0);
600 	if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) {
601 		qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift;
602 	}
603 	vm->vm_qcache_max = qcache_max;
604 	pa = &vm->vm_qcache_allocator;
605 	memset(pa, 0, sizeof(*pa));
606 	pa->pa_alloc = qc_poolpage_alloc;
607 	pa->pa_free = qc_poolpage_free;
608 	pa->pa_pagesz = qc_poolpage_size(qcache_max);
609 
610 	qcache_idx_max = qcache_max >> vm->vm_quantum_shift;
611 	prevqc = NULL;
612 	for (i = qcache_idx_max; i > 0; i--) {
613 		qcache_t *qc = &vm->vm_qcache_store[i - 1];
614 		size_t size = i << vm->vm_quantum_shift;
615 		pool_cache_t pc;
616 
617 		qc->qc_vmem = vm;
618 		snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
619 		    vm->vm_name, size);
620 
621 		pc = pool_cache_init(size,
622 		    ORDER2SIZE(vm->vm_quantum_shift), 0,
623 		    PR_NOALIGN | PR_NOTOUCH | PR_RECURSIVE /* XXX */,
624 		    qc->qc_name, pa, ipl, NULL, NULL, NULL);
625 
626 		KASSERT(pc);
627 
628 		qc->qc_cache = pc;
629 		KASSERT(qc->qc_cache != NULL);	/* XXX */
630 		if (prevqc != NULL &&
631 		    qc->qc_cache->pc_pool.pr_itemsperpage ==
632 		    prevqc->qc_cache->pc_pool.pr_itemsperpage) {
633 			pool_cache_destroy(qc->qc_cache);
634 			vm->vm_qcache[i - 1] = prevqc;
635 			continue;
636 		}
637 		qc->qc_cache->pc_pool.pr_qcache = qc;
638 		vm->vm_qcache[i - 1] = qc;
639 		prevqc = qc;
640 	}
641 }
642 
643 static void
qc_destroy(vmem_t * vm)644 qc_destroy(vmem_t *vm)
645 {
646 	const qcache_t *prevqc;
647 	int i;
648 	int qcache_idx_max;
649 
650 	qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
651 	prevqc = NULL;
652 	for (i = 0; i < qcache_idx_max; i++) {
653 		qcache_t *qc = vm->vm_qcache[i];
654 
655 		if (prevqc == qc) {
656 			continue;
657 		}
658 		pool_cache_destroy(qc->qc_cache);
659 		prevqc = qc;
660 	}
661 }
662 #endif
663 
664 #if defined(_KERNEL)
665 static void
vmem_bootstrap(void)666 vmem_bootstrap(void)
667 {
668 
669 	mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_NONE);
670 	mutex_init(&vmem_btag_lock, MUTEX_DEFAULT, IPL_VM);
671 	mutex_init(&vmem_btag_refill_lock, MUTEX_DEFAULT, IPL_VM);
672 
673 	while (static_bt_count-- > 0) {
674 		bt_t *bt = &static_bts[static_bt_count];
675 		LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
676 		VMEM_EVCNT_INCR(static_bt_count);
677 		vmem_btag_freelist_count++;
678 	}
679 	vmem_bootstrapped = TRUE;
680 }
681 
682 void
vmem_subsystem_init(vmem_t * vm)683 vmem_subsystem_init(vmem_t *vm)
684 {
685 
686 	kmem_va_meta_arena = vmem_init(&kmem_va_meta_arena_store, "vmem-va",
687 	    0, 0, PAGE_SIZE, vmem_alloc, vmem_free, vm,
688 	    0, VM_NOSLEEP | VM_BOOTSTRAP | VM_LARGEIMPORT,
689 	    IPL_VM);
690 
691 	kmem_meta_arena = vmem_init(&kmem_meta_arena_store, "vmem-meta",
692 	    0, 0, PAGE_SIZE,
693 	    uvm_km_kmem_alloc, uvm_km_kmem_free, kmem_va_meta_arena,
694 	    0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
695 
696 	pool_init(&vmem_btag_pool, sizeof(bt_t), coherency_unit, 0,
697 	    PR_PHINPAGE, "vmembt", &pool_allocator_vmem_meta, IPL_VM);
698 }
699 #endif /* defined(_KERNEL) */
700 
701 static int
vmem_add1(vmem_t * vm,vmem_addr_t addr,vmem_size_t size,vm_flag_t flags,int spanbttype)702 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags,
703     int spanbttype)
704 {
705 	bt_t *btspan;
706 	bt_t *btfree;
707 
708 	VMEM_ASSERT_LOCKED(vm);
709 	KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
710 	KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
711 	KASSERT(spanbttype == BT_TYPE_SPAN ||
712 	    spanbttype == BT_TYPE_SPAN_STATIC);
713 
714 	btspan = bt_alloc(vm, flags);
715 	if (btspan == NULL) {
716 		return ENOMEM;
717 	}
718 	btfree = bt_alloc(vm, flags);
719 	if (btfree == NULL) {
720 		bt_free(vm, btspan);
721 		return ENOMEM;
722 	}
723 
724 	btspan->bt_type = spanbttype;
725 	btspan->bt_start = addr;
726 	btspan->bt_size = size;
727 
728 	btfree->bt_type = BT_TYPE_FREE;
729 	btfree->bt_start = addr;
730 	btfree->bt_size = size;
731 
732 	bt_insseg_tail(vm, btspan);
733 	bt_insseg(vm, btfree, btspan);
734 	bt_insfree(vm, btfree);
735 	vm->vm_size += size;
736 
737 	return 0;
738 }
739 
740 static void
vmem_destroy1(vmem_t * vm)741 vmem_destroy1(vmem_t *vm)
742 {
743 
744 #if defined(QCACHE)
745 	qc_destroy(vm);
746 #endif /* defined(QCACHE) */
747 	VMEM_LOCK(vm);
748 
749 	for (int i = 0; i < vm->vm_hashsize; i++) {
750 		bt_t *bt;
751 
752 		while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) {
753 			KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC);
754 			LIST_REMOVE(bt, bt_hashlist);
755 			bt_free(vm, bt);
756 		}
757 	}
758 
759 	/* bt_freetrim() drops the lock. */
760 	bt_freetrim(vm, 0);
761 	if (vm->vm_hashlist != &vm->vm_hash0) {
762 		xfree(vm->vm_hashlist,
763 		    sizeof(struct vmem_hashlist) * vm->vm_hashsize);
764 	}
765 
766 	VMEM_CONDVAR_DESTROY(vm);
767 	VMEM_LOCK_DESTROY(vm);
768 	xfree(vm, sizeof(*vm));
769 }
770 
771 static int
vmem_import(vmem_t * vm,vmem_size_t size,vm_flag_t flags)772 vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags)
773 {
774 	vmem_addr_t addr;
775 	int rc;
776 
777 	VMEM_ASSERT_LOCKED(vm);
778 
779 	if (vm->vm_importfn == NULL) {
780 		return EINVAL;
781 	}
782 
783 	if (vm->vm_flags & VM_LARGEIMPORT) {
784 		size *= 16;
785 	}
786 
787 	VMEM_UNLOCK(vm);
788 	if (vm->vm_flags & VM_XIMPORT) {
789 		rc = __FPTRCAST(vmem_ximport_t *, vm->vm_importfn)(vm->vm_arg,
790 		    size, &size, flags, &addr);
791 	} else {
792 		rc = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
793 	}
794 	VMEM_LOCK(vm);
795 
796 	if (rc) {
797 		return ENOMEM;
798 	}
799 
800 	if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) != 0) {
801 		VMEM_UNLOCK(vm);
802 		(*vm->vm_releasefn)(vm->vm_arg, addr, size);
803 		VMEM_LOCK(vm);
804 		return ENOMEM;
805 	}
806 
807 	return 0;
808 }
809 
810 static int
vmem_rehash(vmem_t * vm,size_t newhashsize,vm_flag_t flags)811 vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags)
812 {
813 	bt_t *bt;
814 	int i;
815 	struct vmem_hashlist *newhashlist;
816 	struct vmem_hashlist *oldhashlist;
817 	size_t oldhashsize;
818 
819 	KASSERT(newhashsize > 0);
820 
821 	/* Round hash size up to a power of 2. */
822 	newhashsize = 1 << (ilog2(newhashsize) + 1);
823 
824 	newhashlist =
825 	    xmalloc(sizeof(struct vmem_hashlist) * newhashsize, flags);
826 	if (newhashlist == NULL) {
827 		return ENOMEM;
828 	}
829 	for (i = 0; i < newhashsize; i++) {
830 		LIST_INIT(&newhashlist[i]);
831 	}
832 
833 	VMEM_LOCK(vm);
834 	/* Decay back to a small hash slowly. */
835 	if (vm->vm_maxbusytag >= 2) {
836 		vm->vm_maxbusytag = vm->vm_maxbusytag / 2 - 1;
837 		if (vm->vm_nbusytag > vm->vm_maxbusytag) {
838 			vm->vm_maxbusytag = vm->vm_nbusytag;
839 		}
840 	} else {
841 		vm->vm_maxbusytag = vm->vm_nbusytag;
842 	}
843 	oldhashlist = vm->vm_hashlist;
844 	oldhashsize = vm->vm_hashsize;
845 	vm->vm_hashlist = newhashlist;
846 	vm->vm_hashsize = newhashsize;
847 	vm->vm_hashmask = newhashsize - 1;
848 	if (oldhashlist == NULL) {
849 		VMEM_UNLOCK(vm);
850 		return 0;
851 	}
852 	for (i = 0; i < oldhashsize; i++) {
853 		while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
854 			bt_rembusy(vm, bt); /* XXX */
855 			bt_insbusy(vm, bt);
856 		}
857 	}
858 	VMEM_UNLOCK(vm);
859 
860 	if (oldhashlist != &vm->vm_hash0) {
861 		xfree(oldhashlist,
862 		    sizeof(struct vmem_hashlist) * oldhashsize);
863 	}
864 
865 	return 0;
866 }
867 
868 /*
869  * vmem_fit: check if a bt can satisfy the given restrictions.
870  *
871  * it's a caller's responsibility to ensure the region is big enough
872  * before calling us.
873  */
874 
875 static int
vmem_fit(const bt_t * bt,vmem_size_t size,vmem_size_t align,vmem_size_t phase,vmem_size_t nocross,vmem_addr_t minaddr,vmem_addr_t maxaddr,vmem_addr_t * addrp)876 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align,
877     vmem_size_t phase, vmem_size_t nocross,
878     vmem_addr_t minaddr, vmem_addr_t maxaddr, vmem_addr_t *addrp)
879 {
880 	vmem_addr_t start;
881 	vmem_addr_t end;
882 
883 	KASSERT(size > 0);
884 	KASSERT(bt->bt_size >= size); /* caller's responsibility */
885 
886 	/*
887 	 * XXX assumption: vmem_addr_t and vmem_size_t are
888 	 * unsigned integer of the same size.
889 	 */
890 
891 	start = bt->bt_start;
892 	if (start < minaddr) {
893 		start = minaddr;
894 	}
895 	end = BT_END(bt);
896 	if (end > maxaddr) {
897 		end = maxaddr;
898 	}
899 	if (start > end) {
900 		return ENOMEM;
901 	}
902 
903 	start = VMEM_ALIGNUP(start - phase, align) + phase;
904 	if (start < bt->bt_start) {
905 		start += align;
906 	}
907 	if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
908 		KASSERT(align < nocross);
909 		start = VMEM_ALIGNUP(start - phase, nocross) + phase;
910 	}
911 	if (start <= end && end - start >= size - 1) {
912 		KASSERT((start & (align - 1)) == phase);
913 		KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross));
914 		KASSERT(minaddr <= start);
915 		KASSERT(maxaddr == 0 || start + size - 1 <= maxaddr);
916 		KASSERT(bt->bt_start <= start);
917 		KASSERT(BT_END(bt) - start >= size - 1);
918 		*addrp = start;
919 		return 0;
920 	}
921 	return ENOMEM;
922 }
923 
924 /* ---- vmem API */
925 
926 /*
927  * vmem_init: creates a vmem arena.
928  */
929 
930 vmem_t *
vmem_init(vmem_t * vm,const char * name,vmem_addr_t base,vmem_size_t size,vmem_size_t quantum,vmem_import_t * importfn,vmem_release_t * releasefn,vmem_t * arg,vmem_size_t qcache_max,vm_flag_t flags,int ipl)931 vmem_init(vmem_t *vm, const char *name,
932     vmem_addr_t base, vmem_size_t size, vmem_size_t quantum,
933     vmem_import_t *importfn, vmem_release_t *releasefn,
934     vmem_t *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
935 {
936 	int i;
937 
938 	KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
939 	KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
940 	KASSERT(quantum > 0);
941 
942 #if defined(_KERNEL)
943 	/* XXX: SMP, we get called early... */
944 	if (!vmem_bootstrapped) {
945 		vmem_bootstrap();
946 	}
947 #endif /* defined(_KERNEL) */
948 
949 	if (vm == NULL) {
950 		vm = xmalloc(sizeof(*vm), flags);
951 	}
952 	if (vm == NULL) {
953 		return NULL;
954 	}
955 
956 	VMEM_CONDVAR_INIT(vm, "vmem");
957 	VMEM_LOCK_INIT(vm, ipl);
958 	vm->vm_flags = flags;
959 	vm->vm_nfreetags = 0;
960 	LIST_INIT(&vm->vm_freetags);
961 	strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
962 	vm->vm_quantum_mask = quantum - 1;
963 	vm->vm_quantum_shift = SIZE2ORDER(quantum);
964 	KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum);
965 	vm->vm_importfn = importfn;
966 	vm->vm_releasefn = releasefn;
967 	vm->vm_arg = arg;
968 	vm->vm_nbusytag = 0;
969 	vm->vm_maxbusytag = 0;
970 	vm->vm_size = 0;
971 	vm->vm_inuse = 0;
972 #if defined(QCACHE)
973 	qc_init(vm, qcache_max, ipl);
974 #endif /* defined(QCACHE) */
975 
976 	TAILQ_INIT(&vm->vm_seglist);
977 	for (i = 0; i < VMEM_MAXORDER; i++) {
978 		LIST_INIT(&vm->vm_freelist[i]);
979 	}
980 	memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0));
981 	vm->vm_hashsize = 1;
982 	vm->vm_hashmask = vm->vm_hashsize - 1;
983 	vm->vm_hashlist = &vm->vm_hash0;
984 
985 	if (size != 0) {
986 		if (vmem_add(vm, base, size, flags) != 0) {
987 			vmem_destroy1(vm);
988 			return NULL;
989 		}
990 	}
991 
992 #if defined(_KERNEL)
993 	if (flags & VM_BOOTSTRAP) {
994 		bt_refill(vm);
995 	}
996 
997 	mutex_enter(&vmem_list_lock);
998 	LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
999 	mutex_exit(&vmem_list_lock);
1000 #endif /* defined(_KERNEL) */
1001 
1002 	return vm;
1003 }
1004 
1005 
1006 
1007 /*
1008  * vmem_create: create an arena.
1009  *
1010  * => must not be called from interrupt context.
1011  */
1012 
1013 vmem_t *
vmem_create(const char * name,vmem_addr_t base,vmem_size_t size,vmem_size_t quantum,vmem_import_t * importfn,vmem_release_t * releasefn,vmem_t * source,vmem_size_t qcache_max,vm_flag_t flags,int ipl)1014 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
1015     vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn,
1016     vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
1017 {
1018 
1019 	KASSERT((flags & (VM_XIMPORT)) == 0);
1020 
1021 	return vmem_init(NULL, name, base, size, quantum,
1022 	    importfn, releasefn, source, qcache_max, flags, ipl);
1023 }
1024 
1025 /*
1026  * vmem_xcreate: create an arena takes alternative import func.
1027  *
1028  * => must not be called from interrupt context.
1029  */
1030 
1031 vmem_t *
vmem_xcreate(const char * name,vmem_addr_t base,vmem_size_t size,vmem_size_t quantum,vmem_ximport_t * importfn,vmem_release_t * releasefn,vmem_t * source,vmem_size_t qcache_max,vm_flag_t flags,int ipl)1032 vmem_xcreate(const char *name, vmem_addr_t base, vmem_size_t size,
1033     vmem_size_t quantum, vmem_ximport_t *importfn, vmem_release_t *releasefn,
1034     vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
1035 {
1036 
1037 	KASSERT((flags & (VM_XIMPORT)) == 0);
1038 
1039 	return vmem_init(NULL, name, base, size, quantum,
1040 	    __FPTRCAST(vmem_import_t *, importfn), releasefn, source,
1041 	    qcache_max, flags | VM_XIMPORT, ipl);
1042 }
1043 
1044 void
vmem_destroy(vmem_t * vm)1045 vmem_destroy(vmem_t *vm)
1046 {
1047 
1048 #if defined(_KERNEL)
1049 	mutex_enter(&vmem_list_lock);
1050 	LIST_REMOVE(vm, vm_alllist);
1051 	mutex_exit(&vmem_list_lock);
1052 #endif /* defined(_KERNEL) */
1053 
1054 	vmem_destroy1(vm);
1055 }
1056 
1057 vmem_size_t
vmem_roundup_size(vmem_t * vm,vmem_size_t size)1058 vmem_roundup_size(vmem_t *vm, vmem_size_t size)
1059 {
1060 
1061 	return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
1062 }
1063 
1064 /*
1065  * vmem_alloc: allocate resource from the arena.
1066  */
1067 
1068 int
vmem_alloc(vmem_t * vm,vmem_size_t size,vm_flag_t flags,vmem_addr_t * addrp)1069 vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, vmem_addr_t *addrp)
1070 {
1071 	const vm_flag_t strat __diagused = flags & VM_FITMASK;
1072 	int error;
1073 
1074 	KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1075 	KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
1076 
1077 	KASSERT(size > 0);
1078 	KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
1079 	if ((flags & VM_SLEEP) != 0) {
1080 		ASSERT_SLEEPABLE();
1081 	}
1082 
1083 #if defined(QCACHE)
1084 	if (size <= vm->vm_qcache_max) {
1085 		void *p;
1086 		int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
1087 		qcache_t *qc = vm->vm_qcache[qidx - 1];
1088 
1089 		p = pool_cache_get(qc->qc_cache, vmf_to_prf(flags));
1090 		if (addrp != NULL)
1091 			*addrp = (vmem_addr_t)p;
1092 		error = (p == NULL) ? ENOMEM : 0;
1093 		goto out;
1094 	}
1095 #endif /* defined(QCACHE) */
1096 
1097 	error = vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
1098 	    flags, addrp);
1099 out:
1100 	KASSERTMSG(error || addrp == NULL ||
1101 	    (*addrp & vm->vm_quantum_mask) == 0,
1102 	    "vmem %s mask=0x%jx addr=0x%jx",
1103 	    vm->vm_name, (uintmax_t)vm->vm_quantum_mask, (uintmax_t)*addrp);
1104 	KASSERT(error == 0 || (flags & VM_SLEEP) == 0);
1105 	return error;
1106 }
1107 
1108 int
vmem_xalloc(vmem_t * vm,const vmem_size_t size0,vmem_size_t align,const vmem_size_t phase,const vmem_size_t nocross,const vmem_addr_t minaddr,const vmem_addr_t maxaddr,const vm_flag_t flags,vmem_addr_t * addrp)1109 vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
1110     const vmem_size_t phase, const vmem_size_t nocross,
1111     const vmem_addr_t minaddr, const vmem_addr_t maxaddr, const vm_flag_t flags,
1112     vmem_addr_t *addrp)
1113 {
1114 	struct vmem_freelist *list;
1115 	struct vmem_freelist *first;
1116 	struct vmem_freelist *end;
1117 	bt_t *bt;
1118 	bt_t *btnew;
1119 	bt_t *btnew2;
1120 	const vmem_size_t size = vmem_roundup_size(vm, size0);
1121 	vm_flag_t strat = flags & VM_FITMASK;
1122 	vmem_addr_t start;
1123 	int rc;
1124 
1125 	KASSERT(size0 > 0);
1126 	KASSERT(size > 0);
1127 	KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
1128 	if ((flags & VM_SLEEP) != 0) {
1129 		ASSERT_SLEEPABLE();
1130 	}
1131 	KASSERT((align & vm->vm_quantum_mask) == 0);
1132 	KASSERT((align & (align - 1)) == 0);
1133 	KASSERT((phase & vm->vm_quantum_mask) == 0);
1134 	KASSERT((nocross & vm->vm_quantum_mask) == 0);
1135 	KASSERT((nocross & (nocross - 1)) == 0);
1136 	KASSERT(align == 0 || phase < align);
1137 	KASSERT(phase == 0 || phase < align);
1138 	KASSERT(nocross == 0 || nocross >= size);
1139 	KASSERT(minaddr <= maxaddr);
1140 	KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1141 
1142 	if (align == 0) {
1143 		align = vm->vm_quantum_mask + 1;
1144 	}
1145 
1146 	/*
1147 	 * allocate boundary tags before acquiring the vmem lock.
1148 	 */
1149 	VMEM_LOCK(vm);
1150 	btnew = bt_alloc(vm, flags);
1151 	if (btnew == NULL) {
1152 		VMEM_UNLOCK(vm);
1153 		return ENOMEM;
1154 	}
1155 	btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */
1156 	if (btnew2 == NULL) {
1157 		bt_free(vm, btnew);
1158 		VMEM_UNLOCK(vm);
1159 		return ENOMEM;
1160 	}
1161 
1162 	/*
1163 	 * choose a free block from which we allocate.
1164 	 */
1165 retry_strat:
1166 	first = bt_freehead_toalloc(vm, size, strat);
1167 	end = &vm->vm_freelist[VMEM_MAXORDER];
1168 retry:
1169 	bt = NULL;
1170 	vmem_check(vm);
1171 	if (strat == VM_INSTANTFIT) {
1172 		/*
1173 		 * just choose the first block which satisfies our restrictions.
1174 		 *
1175 		 * note that we don't need to check the size of the blocks
1176 		 * because any blocks found on these list should be larger than
1177 		 * the given size.
1178 		 */
1179 		for (list = first; list < end; list++) {
1180 			bt = LIST_FIRST(list);
1181 			if (bt != NULL) {
1182 				rc = vmem_fit(bt, size, align, phase,
1183 				    nocross, minaddr, maxaddr, &start);
1184 				if (rc == 0) {
1185 					goto gotit;
1186 				}
1187 				/*
1188 				 * don't bother to follow the bt_freelist link
1189 				 * here.  the list can be very long and we are
1190 				 * told to run fast.  blocks from the later free
1191 				 * lists are larger and have better chances to
1192 				 * satisfy our restrictions.
1193 				 */
1194 			}
1195 		}
1196 	} else { /* VM_BESTFIT */
1197 		/*
1198 		 * we assume that, for space efficiency, it's better to
1199 		 * allocate from a smaller block.  thus we will start searching
1200 		 * from the lower-order list than VM_INSTANTFIT.
1201 		 * however, don't bother to find the smallest block in a free
1202 		 * list because the list can be very long.  we can revisit it
1203 		 * if/when it turns out to be a problem.
1204 		 *
1205 		 * note that the 'first' list can contain blocks smaller than
1206 		 * the requested size.  thus we need to check bt_size.
1207 		 */
1208 		for (list = first; list < end; list++) {
1209 			LIST_FOREACH(bt, list, bt_freelist) {
1210 				if (bt->bt_size >= size) {
1211 					rc = vmem_fit(bt, size, align, phase,
1212 					    nocross, minaddr, maxaddr, &start);
1213 					if (rc == 0) {
1214 						goto gotit;
1215 					}
1216 				}
1217 			}
1218 		}
1219 	}
1220 #if 1
1221 	if (strat == VM_INSTANTFIT) {
1222 		strat = VM_BESTFIT;
1223 		goto retry_strat;
1224 	}
1225 #endif
1226 	if (align != vm->vm_quantum_mask + 1 || phase != 0 || nocross != 0) {
1227 
1228 		/*
1229 		 * XXX should try to import a region large enough to
1230 		 * satisfy restrictions?
1231 		 */
1232 
1233 		goto fail;
1234 	}
1235 	/* XXX eeek, minaddr & maxaddr not respected */
1236 	if (vmem_import(vm, size, flags) == 0) {
1237 		goto retry;
1238 	}
1239 	/* XXX */
1240 
1241 	if ((flags & VM_SLEEP) != 0) {
1242 		vmem_kick_pdaemon();
1243 		VMEM_CONDVAR_WAIT(vm);
1244 		goto retry;
1245 	}
1246 fail:
1247 	bt_free(vm, btnew);
1248 	bt_free(vm, btnew2);
1249 	VMEM_UNLOCK(vm);
1250 	return ENOMEM;
1251 
1252 gotit:
1253 	KASSERT(bt->bt_type == BT_TYPE_FREE);
1254 	KASSERT(bt->bt_size >= size);
1255 	bt_remfree(vm, bt);
1256 	vmem_check(vm);
1257 	if (bt->bt_start != start) {
1258 		btnew2->bt_type = BT_TYPE_FREE;
1259 		btnew2->bt_start = bt->bt_start;
1260 		btnew2->bt_size = start - bt->bt_start;
1261 		bt->bt_start = start;
1262 		bt->bt_size -= btnew2->bt_size;
1263 		bt_insfree(vm, btnew2);
1264 		bt_insseg(vm, btnew2, TAILQ_PREV(bt, vmem_seglist, bt_seglist));
1265 		btnew2 = NULL;
1266 		vmem_check(vm);
1267 	}
1268 	KASSERT(bt->bt_start == start);
1269 	if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
1270 		/* split */
1271 		btnew->bt_type = BT_TYPE_BUSY;
1272 		btnew->bt_start = bt->bt_start;
1273 		btnew->bt_size = size;
1274 		bt->bt_start = bt->bt_start + size;
1275 		bt->bt_size -= size;
1276 		bt_insfree(vm, bt);
1277 		bt_insseg(vm, btnew, TAILQ_PREV(bt, vmem_seglist, bt_seglist));
1278 		bt_insbusy(vm, btnew);
1279 		vmem_check(vm);
1280 	} else {
1281 		bt->bt_type = BT_TYPE_BUSY;
1282 		bt_insbusy(vm, bt);
1283 		vmem_check(vm);
1284 		bt_free(vm, btnew);
1285 		btnew = bt;
1286 	}
1287 	if (btnew2 != NULL) {
1288 		bt_free(vm, btnew2);
1289 	}
1290 	KASSERT(btnew->bt_size >= size);
1291 	btnew->bt_type = BT_TYPE_BUSY;
1292 	if (addrp != NULL)
1293 		*addrp = btnew->bt_start;
1294 	VMEM_UNLOCK(vm);
1295 	KASSERTMSG(addrp == NULL ||
1296 	    (*addrp & vm->vm_quantum_mask) == 0,
1297 	    "vmem %s mask=0x%jx addr=0x%jx",
1298 	    vm->vm_name, (uintmax_t)vm->vm_quantum_mask, (uintmax_t)*addrp);
1299 	return 0;
1300 }
1301 
1302 /*
1303  * vmem_free: free the resource to the arena.
1304  */
1305 
1306 void
vmem_free(vmem_t * vm,vmem_addr_t addr,vmem_size_t size)1307 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1308 {
1309 
1310 	KASSERT(size > 0);
1311 	KASSERTMSG((addr & vm->vm_quantum_mask) == 0,
1312 	    "vmem %s mask=0x%jx addr=0x%jx",
1313 	    vm->vm_name, (uintmax_t)vm->vm_quantum_mask, (uintmax_t)addr);
1314 
1315 #if defined(QCACHE)
1316 	if (size <= vm->vm_qcache_max) {
1317 		int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
1318 		qcache_t *qc = vm->vm_qcache[qidx - 1];
1319 
1320 		pool_cache_put(qc->qc_cache, (void *)addr);
1321 		return;
1322 	}
1323 #endif /* defined(QCACHE) */
1324 
1325 	vmem_xfree(vm, addr, size);
1326 }
1327 
1328 void
vmem_xfree(vmem_t * vm,vmem_addr_t addr,vmem_size_t size)1329 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1330 {
1331 	bt_t *bt;
1332 
1333 	KASSERT(size > 0);
1334 	KASSERTMSG((addr & vm->vm_quantum_mask) == 0,
1335 	    "vmem %s mask=0x%jx addr=0x%jx",
1336 	    vm->vm_name, (uintmax_t)vm->vm_quantum_mask, (uintmax_t)addr);
1337 
1338 	VMEM_LOCK(vm);
1339 
1340 	bt = bt_lookupbusy(vm, addr);
1341 	KASSERTMSG(bt != NULL, "vmem %s addr 0x%jx size 0x%jx",
1342 	    vm->vm_name, (uintmax_t)addr, (uintmax_t)size);
1343 	KASSERT(bt->bt_start == addr);
1344 	KASSERT(bt->bt_size == vmem_roundup_size(vm, size) ||
1345 	    bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1346 
1347 	/* vmem_xfree_bt() drops the lock. */
1348 	vmem_xfree_bt(vm, bt);
1349 }
1350 
1351 void
vmem_xfreeall(vmem_t * vm)1352 vmem_xfreeall(vmem_t *vm)
1353 {
1354 	bt_t *bt;
1355 
1356 	/* This can't be used if the arena has a quantum cache. */
1357 	KASSERT(vm->vm_qcache_max == 0);
1358 
1359 	for (;;) {
1360 		VMEM_LOCK(vm);
1361 		TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1362 			if (bt->bt_type == BT_TYPE_BUSY)
1363 				break;
1364 		}
1365 		if (bt != NULL) {
1366 			/* vmem_xfree_bt() drops the lock. */
1367 			vmem_xfree_bt(vm, bt);
1368 		} else {
1369 			VMEM_UNLOCK(vm);
1370 			return;
1371 		}
1372 	}
1373 }
1374 
1375 static void
vmem_xfree_bt(vmem_t * vm,bt_t * bt)1376 vmem_xfree_bt(vmem_t *vm, bt_t *bt)
1377 {
1378 	bt_t *t;
1379 
1380 	VMEM_ASSERT_LOCKED(vm);
1381 
1382 	KASSERT(bt->bt_type == BT_TYPE_BUSY);
1383 	bt_rembusy(vm, bt);
1384 	bt->bt_type = BT_TYPE_FREE;
1385 
1386 	/* coalesce */
1387 	t = TAILQ_NEXT(bt, bt_seglist);
1388 	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1389 		KASSERT(BT_END(bt) < t->bt_start);	/* YYY */
1390 		bt_remfree(vm, t);
1391 		bt_remseg(vm, t);
1392 		bt->bt_size += t->bt_size;
1393 		bt_free(vm, t);
1394 	}
1395 	t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1396 	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1397 		KASSERT(BT_END(t) < bt->bt_start);	/* YYY */
1398 		bt_remfree(vm, t);
1399 		bt_remseg(vm, t);
1400 		bt->bt_size += t->bt_size;
1401 		bt->bt_start = t->bt_start;
1402 		bt_free(vm, t);
1403 	}
1404 
1405 	t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1406 	KASSERT(t != NULL);
1407 	KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY);
1408 	if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN &&
1409 	    t->bt_size == bt->bt_size) {
1410 		vmem_addr_t spanaddr;
1411 		vmem_size_t spansize;
1412 
1413 		KASSERT(t->bt_start == bt->bt_start);
1414 		spanaddr = bt->bt_start;
1415 		spansize = bt->bt_size;
1416 		bt_remseg(vm, bt);
1417 		bt_free(vm, bt);
1418 		bt_remseg(vm, t);
1419 		bt_free(vm, t);
1420 		vm->vm_size -= spansize;
1421 		VMEM_CONDVAR_BROADCAST(vm);
1422 		/* bt_freetrim() drops the lock. */
1423 		bt_freetrim(vm, BT_MAXFREE);
1424 		(*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize);
1425 	} else {
1426 		bt_insfree(vm, bt);
1427 		VMEM_CONDVAR_BROADCAST(vm);
1428 		/* bt_freetrim() drops the lock. */
1429 		bt_freetrim(vm, BT_MAXFREE);
1430 	}
1431 }
1432 
1433 /*
1434  * vmem_add:
1435  *
1436  * => caller must ensure appropriate spl,
1437  *    if the arena can be accessed from interrupt context.
1438  */
1439 
1440 int
vmem_add(vmem_t * vm,vmem_addr_t addr,vmem_size_t size,vm_flag_t flags)1441 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags)
1442 {
1443 	int rv;
1444 
1445 	VMEM_LOCK(vm);
1446 	rv = vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC);
1447 	VMEM_UNLOCK(vm);
1448 
1449 	return rv;
1450 }
1451 
1452 /*
1453  * vmem_size: information about arenas size
1454  *
1455  * => return free/allocated size in arena
1456  */
1457 vmem_size_t
vmem_size(vmem_t * vm,int typemask)1458 vmem_size(vmem_t *vm, int typemask)
1459 {
1460 
1461 	switch (typemask) {
1462 	case VMEM_ALLOC:
1463 		return vm->vm_inuse;
1464 	case VMEM_FREE:
1465 		return vm->vm_size - vm->vm_inuse;
1466 	case VMEM_FREE|VMEM_ALLOC:
1467 		return vm->vm_size;
1468 	default:
1469 		panic("vmem_size");
1470 	}
1471 }
1472 
1473 /* ---- rehash */
1474 
1475 #if defined(_KERNEL)
1476 static struct callout vmem_rehash_ch;
1477 static int vmem_rehash_interval;
1478 static struct workqueue *vmem_rehash_wq;
1479 static struct work vmem_rehash_wk;
1480 
1481 static void
vmem_rehash_all(struct work * wk,void * dummy)1482 vmem_rehash_all(struct work *wk, void *dummy)
1483 {
1484 	vmem_t *vm;
1485 
1486 	KASSERT(wk == &vmem_rehash_wk);
1487 	mutex_enter(&vmem_list_lock);
1488 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1489 		size_t desired;
1490 		size_t current;
1491 
1492 		desired = atomic_load_relaxed(&vm->vm_maxbusytag);
1493 		current = atomic_load_relaxed(&vm->vm_hashsize);
1494 
1495 		if (desired > VMEM_HASHSIZE_MAX) {
1496 			desired = VMEM_HASHSIZE_MAX;
1497 		} else if (desired < VMEM_HASHSIZE_MIN) {
1498 			desired = VMEM_HASHSIZE_MIN;
1499 		}
1500 		if (desired > current * 2 || desired * 2 < current) {
1501 			vmem_rehash(vm, desired, VM_NOSLEEP);
1502 		}
1503 	}
1504 	mutex_exit(&vmem_list_lock);
1505 
1506 	callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1507 }
1508 
1509 static void
vmem_rehash_all_kick(void * dummy)1510 vmem_rehash_all_kick(void *dummy)
1511 {
1512 
1513 	workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL);
1514 }
1515 
1516 void
vmem_rehash_start(void)1517 vmem_rehash_start(void)
1518 {
1519 	int error;
1520 
1521 	error = workqueue_create(&vmem_rehash_wq, "vmem_rehash",
1522 	    vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, WQ_MPSAFE);
1523 	if (error) {
1524 		panic("%s: workqueue_create %d\n", __func__, error);
1525 	}
1526 	callout_init(&vmem_rehash_ch, CALLOUT_MPSAFE);
1527 	callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL);
1528 
1529 	vmem_rehash_interval = hz * 10;
1530 	callout_schedule(&vmem_rehash_ch, vmem_rehash_interval);
1531 }
1532 #endif /* defined(_KERNEL) */
1533 
1534 /* ---- debug */
1535 
1536 #if defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY)
1537 
1538 static void bt_dump(const bt_t *, void (*)(const char *, ...)
1539     __printflike(1, 2));
1540 
1541 static const char *
bt_type_string(int type)1542 bt_type_string(int type)
1543 {
1544 	static const char * const table[] = {
1545 		[BT_TYPE_BUSY] = "busy",
1546 		[BT_TYPE_FREE] = "free",
1547 		[BT_TYPE_SPAN] = "span",
1548 		[BT_TYPE_SPAN_STATIC] = "static span",
1549 	};
1550 
1551 	if (type >= __arraycount(table)) {
1552 		return "BOGUS";
1553 	}
1554 	return table[type];
1555 }
1556 
1557 static void
bt_dump(const bt_t * bt,void (* pr)(const char *,...))1558 bt_dump(const bt_t *bt, void (*pr)(const char *, ...))
1559 {
1560 
1561 	(*pr)("\t%p: %" PRIu64 ", %" PRIu64 ", %d(%s)\n",
1562 	    bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size,
1563 	    bt->bt_type, bt_type_string(bt->bt_type));
1564 }
1565 
1566 static void
1567 vmem_dump(const vmem_t *vm , void (*pr)(const char *, ...) __printflike(1, 2))
1568 {
1569 	const bt_t *bt;
1570 	int i;
1571 
1572 	(*pr)("vmem %p '%s'\n", vm, vm->vm_name);
1573 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1574 		bt_dump(bt, pr);
1575 	}
1576 
1577 	for (i = 0; i < VMEM_MAXORDER; i++) {
1578 		const struct vmem_freelist *fl = &vm->vm_freelist[i];
1579 
1580 		if (LIST_EMPTY(fl)) {
1581 			continue;
1582 		}
1583 
1584 		(*pr)("freelist[%d]\n", i);
LIST_FOREACH(bt,fl,bt_freelist)1585 		LIST_FOREACH(bt, fl, bt_freelist) {
1586 			bt_dump(bt, pr);
1587 		}
1588 	}
1589 }
1590 
1591 #endif /* defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) */
1592 
1593 #if defined(DDB)
1594 static bt_t *
vmem_whatis_lookup(vmem_t * vm,uintptr_t addr)1595 vmem_whatis_lookup(vmem_t *vm, uintptr_t addr)
1596 {
1597 	bt_t *bt;
1598 
1599 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1600 		if (BT_ISSPAN_P(bt)) {
1601 			continue;
1602 		}
1603 		if (bt->bt_start <= addr && addr <= BT_END(bt)) {
1604 			return bt;
1605 		}
1606 	}
1607 
1608 	return NULL;
1609 }
1610 
1611 void
vmem_whatis(uintptr_t addr,void (* pr)(const char *,...))1612 vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...))
1613 {
1614 	vmem_t *vm;
1615 
1616 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1617 		bt_t *bt;
1618 
1619 		bt = vmem_whatis_lookup(vm, addr);
1620 		if (bt == NULL) {
1621 			continue;
1622 		}
1623 		(*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
1624 		    (void *)addr, (void *)bt->bt_start,
1625 		    (size_t)(addr - bt->bt_start), vm->vm_name,
1626 		    (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
1627 	}
1628 }
1629 
1630 void
vmem_printall(const char * modif,void (* pr)(const char *,...))1631 vmem_printall(const char *modif, void (*pr)(const char *, ...))
1632 {
1633 	const vmem_t *vm;
1634 
1635 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1636 		vmem_dump(vm, pr);
1637 	}
1638 }
1639 
1640 void
vmem_print(uintptr_t addr,const char * modif,void (* pr)(const char *,...))1641 vmem_print(uintptr_t addr, const char *modif, void (*pr)(const char *, ...))
1642 {
1643 	const vmem_t *vm = (const void *)addr;
1644 
1645 	vmem_dump(vm, pr);
1646 }
1647 #endif /* defined(DDB) */
1648 
1649 #if defined(_KERNEL)
1650 #define vmem_printf printf
1651 #else
1652 #include <stdio.h>
1653 #include <stdarg.h>
1654 
1655 static void
vmem_printf(const char * fmt,...)1656 vmem_printf(const char *fmt, ...)
1657 {
1658 	va_list ap;
1659 	va_start(ap, fmt);
1660 	vprintf(fmt, ap);
1661 	va_end(ap);
1662 }
1663 #endif
1664 
1665 #if defined(VMEM_SANITY)
1666 
1667 static bool
vmem_check_sanity(vmem_t * vm)1668 vmem_check_sanity(vmem_t *vm)
1669 {
1670 	const bt_t *bt, *bt2;
1671 
1672 	KASSERT(vm != NULL);
1673 
1674 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1675 		if (bt->bt_start > BT_END(bt)) {
1676 			printf("corrupted tag\n");
1677 			bt_dump(bt, vmem_printf);
1678 			return false;
1679 		}
1680 	}
1681 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1682 		TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
1683 			if (bt == bt2) {
1684 				continue;
1685 			}
1686 			if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) {
1687 				continue;
1688 			}
1689 			if (bt->bt_start <= BT_END(bt2) &&
1690 			    bt2->bt_start <= BT_END(bt)) {
1691 				printf("overwrapped tags\n");
1692 				bt_dump(bt, vmem_printf);
1693 				bt_dump(bt2, vmem_printf);
1694 				return false;
1695 			}
1696 		}
1697 	}
1698 
1699 	return true;
1700 }
1701 
1702 static void
vmem_check(vmem_t * vm)1703 vmem_check(vmem_t *vm)
1704 {
1705 
1706 	if (!vmem_check_sanity(vm)) {
1707 		panic("insanity vmem %p", vm);
1708 	}
1709 }
1710 
1711 #endif /* defined(VMEM_SANITY) */
1712 
1713 #if defined(UNITTEST)
1714 int
main(void)1715 main(void)
1716 {
1717 	int rc;
1718 	vmem_t *vm;
1719 	vmem_addr_t p;
1720 	struct reg {
1721 		vmem_addr_t p;
1722 		vmem_size_t sz;
1723 		bool x;
1724 	} *reg = NULL;
1725 	int nreg = 0;
1726 	int nalloc = 0;
1727 	int nfree = 0;
1728 	vmem_size_t total = 0;
1729 #if 1
1730 	vm_flag_t strat = VM_INSTANTFIT;
1731 #else
1732 	vm_flag_t strat = VM_BESTFIT;
1733 #endif
1734 
1735 	vm = vmem_create("test", 0, 0, 1, NULL, NULL, NULL, 0, VM_SLEEP,
1736 #ifdef _KERNEL
1737 	    IPL_NONE
1738 #else
1739 	    0
1740 #endif
1741 	    );
1742 	if (vm == NULL) {
1743 		printf("vmem_create\n");
1744 		exit(EXIT_FAILURE);
1745 	}
1746 	vmem_dump(vm, vmem_printf);
1747 
1748 	rc = vmem_add(vm, 0, 50, VM_SLEEP);
1749 	assert(rc == 0);
1750 	rc = vmem_add(vm, 100, 200, VM_SLEEP);
1751 	assert(rc == 0);
1752 	rc = vmem_add(vm, 2000, 1, VM_SLEEP);
1753 	assert(rc == 0);
1754 	rc = vmem_add(vm, 40000, 65536, VM_SLEEP);
1755 	assert(rc == 0);
1756 	rc = vmem_add(vm, 10000, 10000, VM_SLEEP);
1757 	assert(rc == 0);
1758 	rc = vmem_add(vm, 500, 1000, VM_SLEEP);
1759 	assert(rc == 0);
1760 	rc = vmem_add(vm, 0xffffff00, 0x100, VM_SLEEP);
1761 	assert(rc == 0);
1762 	rc = vmem_xalloc(vm, 0x101, 0, 0, 0,
1763 	    0xffffff00, 0xffffffff, strat|VM_SLEEP, &p);
1764 	assert(rc != 0);
1765 	rc = vmem_xalloc(vm, 50, 0, 0, 0, 0, 49, strat|VM_SLEEP, &p);
1766 	assert(rc == 0 && p == 0);
1767 	vmem_xfree(vm, p, 50);
1768 	rc = vmem_xalloc(vm, 25, 0, 0, 0, 0, 24, strat|VM_SLEEP, &p);
1769 	assert(rc == 0 && p == 0);
1770 	rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
1771 	    0xffffff01, 0xffffffff, strat|VM_SLEEP, &p);
1772 	assert(rc != 0);
1773 	rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
1774 	    0xffffff00, 0xfffffffe, strat|VM_SLEEP, &p);
1775 	assert(rc != 0);
1776 	rc = vmem_xalloc(vm, 0x100, 0, 0, 0,
1777 	    0xffffff00, 0xffffffff, strat|VM_SLEEP, &p);
1778 	assert(rc == 0);
1779 	vmem_dump(vm, vmem_printf);
1780 	for (;;) {
1781 		struct reg *r;
1782 		int t = rand() % 100;
1783 
1784 		if (t > 45) {
1785 			/* alloc */
1786 			vmem_size_t sz = rand() % 500 + 1;
1787 			bool x;
1788 			vmem_size_t align, phase, nocross;
1789 			vmem_addr_t minaddr, maxaddr;
1790 
1791 			if (t > 70) {
1792 				x = true;
1793 				/* XXX */
1794 				align = 1 << (rand() % 15);
1795 				phase = rand() % 65536;
1796 				nocross = 1 << (rand() % 15);
1797 				if (align <= phase) {
1798 					phase = 0;
1799 				}
1800 				if (VMEM_CROSS_P(phase, phase + sz - 1,
1801 				    nocross)) {
1802 					nocross = 0;
1803 				}
1804 				do {
1805 					minaddr = rand() % 50000;
1806 					maxaddr = rand() % 70000;
1807 				} while (minaddr > maxaddr);
1808 				printf("=== xalloc %" PRIu64
1809 				    " align=%" PRIu64 ", phase=%" PRIu64
1810 				    ", nocross=%" PRIu64 ", min=%" PRIu64
1811 				    ", max=%" PRIu64 "\n",
1812 				    (uint64_t)sz,
1813 				    (uint64_t)align,
1814 				    (uint64_t)phase,
1815 				    (uint64_t)nocross,
1816 				    (uint64_t)minaddr,
1817 				    (uint64_t)maxaddr);
1818 				rc = vmem_xalloc(vm, sz, align, phase, nocross,
1819 				    minaddr, maxaddr, strat|VM_SLEEP, &p);
1820 			} else {
1821 				x = false;
1822 				printf("=== alloc %" PRIu64 "\n", (uint64_t)sz);
1823 				rc = vmem_alloc(vm, sz, strat|VM_SLEEP, &p);
1824 			}
1825 			printf("-> %" PRIu64 "\n", (uint64_t)p);
1826 			vmem_dump(vm, vmem_printf);
1827 			if (rc != 0) {
1828 				if (x) {
1829 					continue;
1830 				}
1831 				break;
1832 			}
1833 			nreg++;
1834 			reg = realloc(reg, sizeof(*reg) * nreg);
1835 			r = &reg[nreg - 1];
1836 			r->p = p;
1837 			r->sz = sz;
1838 			r->x = x;
1839 			total += sz;
1840 			nalloc++;
1841 		} else if (nreg != 0) {
1842 			/* free */
1843 			r = &reg[rand() % nreg];
1844 			printf("=== free %" PRIu64 ", %" PRIu64 "\n",
1845 			    (uint64_t)r->p, (uint64_t)r->sz);
1846 			if (r->x) {
1847 				vmem_xfree(vm, r->p, r->sz);
1848 			} else {
1849 				vmem_free(vm, r->p, r->sz);
1850 			}
1851 			total -= r->sz;
1852 			vmem_dump(vm, vmem_printf);
1853 			*r = reg[nreg - 1];
1854 			nreg--;
1855 			nfree++;
1856 		}
1857 		printf("total=%" PRIu64 "\n", (uint64_t)total);
1858 	}
1859 	fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n",
1860 	    (uint64_t)total, nalloc, nfree);
1861 	exit(EXIT_SUCCESS);
1862 }
1863 #endif /* defined(UNITTEST) */
1864