xref: /dragonfly/lib/libc/stdlib/dmalloc.c (revision ce7a3582)
1 /*
2  * DMALLOC.C	- Dillon's malloc
3  *
4  * Copyright (c) 2011 The DragonFly Project. All rights reserved.
5  *
6  * This code is derived from software contributed to The DragonFly Project
7  * by Matthew Dillon <dillon@backplane.com>.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 /*
37  * This module implements a modified slab allocator drop-in replacement for
38  * the libc malloc().  The slab algorithm has been adjusted to support dynamic
39  * sizing of slabs which effectively allows slabs to be used for allocations of
40  * any size.  Because of this we neither have a small-block allocator or a
41  * big-block allocator and the code paths are simplified to the point where
42  * allocations, caching, and freeing, is screaming fast.
43  *
44  * There is very little interaction between threads.  A global depot accessed
45  * via atomic cmpxchg instructions (only! no spinlocks!) is used as a
46  * catch-all and to deal with thread exits and such.
47  *
48  * To support dynamic slab sizing available user virtual memory is broken
49  * down into ~1024 regions.  Each region has fixed slab size whos value is
50  * set when the region is opened up for use.  The free() path simply applies
51  * a mask based on the region to the pointer to acquire the base of the
52  * governing slab structure.
53  *
54  * Regions[NREGIONS]	(1024)
55  *
56  * Slab management and locking is done on a per-zone basis.
57  *
58  *	Alloc Size	Chunking        Number of zones
59  *	0-127		8		16
60  *	128-255		16		8
61  *	256-511		32		8
62  *	512-1023	64		8
63  *	1024-2047	128		8
64  *	2048-4095	256		8
65  *	4096-8191	512		8
66  *	8192-16383	1024		8
67  *	16384-32767	2048		8
68  *	32768-65535	4096		8
69  *	... continues unlimited ...	4 zones
70  *
71  *	For a 2^63 memory space each doubling >= 64K is broken down into
72  *	4 chunking zones, so we support 88 + (48 * 4) = 280 zones.
73  *
74  *			   API FEATURES AND SIDE EFFECTS
75  *
76  *    + power-of-2 sized allocations up to a page will be power-of-2 aligned.
77  *	Above that power-of-2 sized allocations are page-aligned.  Non
78  *	power-of-2 sized allocations are aligned the same as the chunk
79  *	size for their zone.
80  *    + malloc(0) returns a special non-NULL value
81  *    + ability to allocate arbitrarily large chunks of memory
82  *    + realloc will reuse the passed pointer if possible, within the
83  *	limitations of the zone chunking.
84  *
85  *				FUTURE FEATURES
86  *
87  *    + [better] garbage collection
88  *    + better initial sizing.
89  *
90  * TUNING
91  *
92  * The value of the environment variable MALLOC_OPTIONS is a character string
93  * containing various flags to tune nmalloc.  Upper case letters enabled
94  * or increase the feature, lower case disables or decreases the feature.
95  *
96  * U		Enable UTRACE for all operations, observable with ktrace.
97  *		Diasbled by default.
98  *
99  * Z		Zero out allocations, otherwise allocations (except for
100  *		calloc) will contain garbage.
101  *		Disabled by default.
102  *
103  * H		Pass a hint with madvise() about unused pages.
104  *		Disabled by default.
105  *		Not currently implemented.
106  *
107  * F		Disable local per-thread caching.
108  *		Disabled by default.
109  *
110  * C		Increase (decrease) how much excess cache to retain.
111  *		Set to 4 by default.
112  */
113 
114 /* cc -shared -fPIC -g -O -I/usr/src/lib/libc/include -o dmalloc.so dmalloc.c */
115 
116 #ifndef STANDALONE_DEBUG
117 #include "libc_private.h"
118 #endif
119 
120 #include <sys/param.h>
121 #include <sys/types.h>
122 #include <sys/mman.h>
123 #include <sys/queue.h>
124 #include <sys/uio.h>
125 #include <sys/ktrace.h>
126 #include <stdio.h>
127 #include <stdint.h>
128 #include <stdlib.h>
129 #include <stdarg.h>
130 #include <stddef.h>
131 #include <unistd.h>
132 #include <string.h>
133 #include <fcntl.h>
134 #include <errno.h>
135 #include <pthread.h>
136 #include <limits.h>
137 
138 #include <machine/atomic.h>
139 #include <machine/cpufunc.h>
140 
141 #ifdef STANDALONE_DEBUG
142 void _nmalloc_thr_init(void);
143 #else
144 #include "spinlock.h"
145 #include "un-namespace.h"
146 #endif
147 
148 #ifndef MAP_SIZEALIGN
149 #define MAP_SIZEALIGN	0
150 #endif
151 
152 #if SSIZE_MAX == 0x7FFFFFFF
153 #define ADDRBITS	32
154 #define UVM_BITS	32	/* worst case */
155 #else
156 #define ADDRBITS	64
157 #define UVM_BITS	48	/* worst case XXX */
158 #endif
159 
160 #if LONG_MAX == 0x7FFFFFFF
161 #define LONG_BITS	32
162 #define LONG_BITS_SHIFT	5
163 #else
164 #define LONG_BITS	64
165 #define LONG_BITS_SHIFT	6
166 #endif
167 
168 #define LOCKEDPTR	((void *)(intptr_t)-1)
169 
170 /*
171  * Regions[]
172  */
173 #define NREGIONS_BITS	10
174 #define NREGIONS	(1 << NREGIONS_BITS)
175 #define NREGIONS_MASK	(NREGIONS - 1)
176 #define NREGIONS_SHIFT	(UVM_BITS - NREGIONS_BITS)
177 #define NREGIONS_SIZE	(1LU << NREGIONS_SHIFT)
178 
179 typedef struct region *region_t;
180 typedef struct slglobaldata *slglobaldata_t;
181 typedef struct slab *slab_t;
182 
183 struct region {
184 	uintptr_t	mask;
185 	slab_t		slab;	/* conditional out of band slab */
186 };
187 
188 static struct region Regions[NREGIONS];
189 
190 /*
191  * Number of chunking zones available
192  */
193 #define CHUNKFACTOR	8
194 #if ADDRBITS == 32
195 #define NZONES		(16 + 9 * CHUNKFACTOR + 16 * CHUNKFACTOR)
196 #else
197 #define NZONES		(16 + 9 * CHUNKFACTOR + 48 * CHUNKFACTOR)
198 #endif
199 
200 static int MaxChunks[NZONES];
201 
202 #define NDEPOTS		8		/* must be power of 2 */
203 
204 /*
205  * Maximum number of chunks per slab, governed by the allocation bitmap in
206  * each slab.  The maximum is reduced for large chunk sizes.
207  */
208 #define MAXCHUNKS	(LONG_BITS * LONG_BITS)
209 #define MAXCHUNKS_BITS	(LONG_BITS_SHIFT * LONG_BITS_SHIFT)
210 #define LITSLABSIZE	(32 * 1024)
211 #define NOMSLABSIZE	(2 * 1024 * 1024)
212 #define BIGSLABSIZE	(128 * 1024 * 1024)
213 
214 #define ZALLOC_SLAB_MAGIC	0x736c6162	/* magic sanity */
215 
216 TAILQ_HEAD(slab_list, slab);
217 
218 /*
219  * A slab structure
220  */
221 struct slab {
222 	struct slab	*next;		/* slabs with available space */
223 	TAILQ_ENTRY(slab) entry;
224 	int32_t		magic;		/* magic number for sanity check */
225 	u_int		navail;		/* number of free elements available */
226 	u_int		nmax;
227 	u_int		free_bit;	/* free hint bitno */
228 	u_int		free_index;	/* free hint index */
229 	u_long		bitmap[LONG_BITS]; /* free chunks */
230 	size_t		slab_size;	/* size of entire slab */
231 	size_t		chunk_size;	/* chunk size for validation */
232 	int		zone_index;
233 	enum { UNKNOWN, AVAIL, EMPTY, FULL } state;
234 	int		flags;
235 	region_t	region;		/* related region */
236 	char		*chunks;	/* chunk base */
237 	slglobaldata_t	slgd;
238 };
239 
240 /*
241  * per-thread data
242  */
243 struct slglobaldata {
244 	struct zoneinfo {
245 		slab_t	avail_base;
246 		slab_t	empty_base;
247 		int	best_region;
248 		int	empty_count;
249 	} zone[NZONES];
250 	struct slab_list full_zones;		/* via entry */
251 	int		masked;
252 	int		biggest_index;
253 	size_t		nslabs;
254 	slglobaldata_t	sldepot;
255 };
256 
257 #define SLAB_ZEROD		0x0001
258 
259 /*
260  * Misc constants.  Note that allocations that are exact multiples of
261  * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
262  * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
263  */
264 #define MIN_CHUNK_SIZE		8		/* in bytes */
265 #define MIN_CHUNK_MASK		(MIN_CHUNK_SIZE - 1)
266 
267 #define SAFLAG_ZERO	0x00000001
268 
269 /*
270  * The WEIRD_ADDR is used as known text to copy into free objects to
271  * try to create deterministic failure cases if the data is accessed after
272  * free.
273  *
274  * WARNING: A limited number of spinlocks are available, BIGXSIZE should
275  *	    not be larger then 64.
276  */
277 #define WEIRD_ADDR      0xdeadc0de
278 #define MAX_COPY        sizeof(weirdary)
279 
280 #define arysize(ary)	(sizeof(ary)/sizeof((ary)[0]))
281 
282 /*
283  * Thread control
284  */
285 
286 #define MASSERT(exp)	do { if (__predict_false(!(exp)))	\
287 				_mpanic("assertion: %s in %s",	\
288 				#exp, __func__);		\
289 			    } while (0)
290 
291 /* With this attribute set, do not require a function call for accessing
292  * this variable when the code is compiled -fPIC */
293 #define TLS_ATTRIBUTE __attribute__ ((tls_model ("initial-exec")));
294 
295 static __thread struct slglobaldata slglobal TLS_ATTRIBUTE;
296 static pthread_key_t thread_malloc_key;
297 static pthread_once_t thread_malloc_once = PTHREAD_ONCE_INIT;
298 static struct slglobaldata sldepots[NDEPOTS];
299 
300 static int opt_madvise = 0;
301 static int opt_free = 0;
302 static int opt_cache = 4;
303 static int opt_utrace = 0;
304 static int g_malloc_flags = 0;
305 static int malloc_panic;
306 
307 static const int32_t weirdary[16] = {
308 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
309 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
310 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
311 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR
312 };
313 
314 static void *memalloc(size_t size, int flags);
315 static void *memrealloc(void *ptr, size_t size);
316 static void memfree(void *ptr, int);
317 static slab_t slaballoc(int zi, size_t chunking, size_t chunk_size);
318 static void slabfree(slab_t slab);
319 static void slabterm(slglobaldata_t slgd, slab_t slab);
320 static void *_vmem_alloc(int ri, size_t slab_size);
321 static void _vmem_free(void *ptr, size_t slab_size);
322 static void _mpanic(const char *ctl, ...) __printflike(1, 2);
323 #ifndef STANDALONE_DEBUG
324 static void malloc_init(void) __constructor(0);
325 #else
326 static void malloc_init(void) __constructor(101);
327 #endif
328 
329 struct nmalloc_utrace {
330 	void *p;
331 	size_t s;
332 	void *r;
333 };
334 
335 #define UTRACE(a, b, c)						\
336 	if (opt_utrace) {					\
337 		struct nmalloc_utrace ut = {			\
338 			.p = (a),				\
339 			.s = (b),				\
340 			.r = (c)				\
341 		};						\
342 		utrace(&ut, sizeof(ut));			\
343 	}
344 
345 #ifdef INVARIANTS
346 /*
347  * If enabled any memory allocated without M_ZERO is initialized to -1.
348  */
349 static int  use_malloc_pattern;
350 #endif
351 
352 static void
353 malloc_init(void)
354 {
355 	const char *p = NULL;
356 
357 	Regions[0].mask = -1; /* disallow activity in lowest region */
358 
359 	if (issetugid() == 0)
360 		p = getenv("MALLOC_OPTIONS");
361 
362 	for (; p != NULL && *p != '\0'; p++) {
363 		switch(*p) {
364 		case 'u':
365 			opt_utrace = 0;
366 			break;
367 		case 'U':
368 			opt_utrace = 1;
369 			break;
370 		case 'h':
371 			opt_madvise = 0;
372 			break;
373 		case 'H':
374 			opt_madvise = 1;
375 			break;
376 		case 'c':
377 			if (opt_cache > 0)
378 				--opt_cache;
379 			break;
380 		case 'C':
381 			++opt_cache;
382 			break;
383 		case 'f':
384 			opt_free = 0;
385 			break;
386 		case 'F':
387 			opt_free = 1;
388 			break;
389 		case 'z':
390 			g_malloc_flags = 0;
391 			break;
392 		case 'Z':
393 			g_malloc_flags = SAFLAG_ZERO;
394 			break;
395 		default:
396 			break;
397 		}
398 	}
399 
400 	UTRACE((void *) -1, 0, NULL);
401 	_nmalloc_thr_init();
402 }
403 
404 /*
405  * We have to install a handler for nmalloc thread teardowns when
406  * the thread is created.  We cannot delay this because destructors in
407  * sophisticated userland programs can call malloc() for the first time
408  * during their thread exit.
409  *
410  * This routine is called directly from pthreads.
411  */
412 static void _nmalloc_thr_init_once(void);
413 static void _nmalloc_thr_destructor(void *thrp);
414 
415 void
416 _nmalloc_thr_init(void)
417 {
418 	static int did_init;
419 	static int SLGI;
420 	int slgi;
421 
422 	slgi = SLGI++;
423 	cpu_ccfence();
424 	TAILQ_INIT(&slglobal.full_zones);
425 	slglobal.sldepot = &sldepots[slgi & (NDEPOTS - 1)];
426 
427 	slglobal.masked = 1;
428 	if (did_init == 0) {
429 		did_init = 1;
430 		pthread_once(&thread_malloc_once, _nmalloc_thr_init_once);
431 	}
432 	pthread_setspecific(thread_malloc_key, &slglobal);
433 	slglobal.masked = 0;
434 }
435 
436 /*
437  * Called just once
438  */
439 static void
440 _nmalloc_thr_init_once(void)
441 {
442 	int error;
443 
444 	error = pthread_key_create(&thread_malloc_key, _nmalloc_thr_destructor);
445 	if (error)
446 		abort();
447 }
448 
449 /*
450  * Called for each thread undergoing exit
451  *
452  * Move all of the thread's slabs into a depot.
453  */
454 static void
455 _nmalloc_thr_destructor(void *thrp)
456 {
457 	slglobaldata_t slgd = thrp;
458 	slab_t slab;
459 	int i;
460 
461 	slgd->masked = 1;
462 
463 	for (i = 0; i <= slgd->biggest_index; i++) {
464 		while ((slab = slgd->zone[i].empty_base) != NULL) {
465 			slgd->zone[i].empty_base = slab->next;
466 			slabterm(slgd, slab);
467 		}
468 
469 		while ((slab = slgd->zone[i].avail_base) != NULL) {
470 			slgd->zone[i].avail_base = slab->next;
471 			slabterm(slgd, slab);
472 		}
473 
474 		while ((slab = TAILQ_FIRST(&slgd->full_zones)) != NULL) {
475 			TAILQ_REMOVE(&slgd->full_zones, slab, entry);
476 			slabterm(slgd, slab);
477 		}
478 	}
479 }
480 
481 /*
482  * Calculate the zone index for the allocation request size and set the
483  * allocation request size to that particular zone's chunk size.
484  */
485 static __inline int
486 zoneindex(size_t *bytes, size_t *chunking)
487 {
488 	size_t n = (size_t)*bytes;
489 	size_t x;
490 	size_t c;
491 	int i;
492 
493 	if (n < 128) {
494 		*bytes = n = (n + 7) & ~7;
495 		*chunking = 8;
496 		return(n / 8);			/* 8 byte chunks, 16 zones */
497 	}
498 	if (n < 4096) {
499 		x = 256;
500 		c = x / (CHUNKFACTOR * 2);
501 		i = 16;
502 	} else {
503 		x = 8192;
504 		c = x / (CHUNKFACTOR * 2);
505 		i = 16 + CHUNKFACTOR * 5;  /* 256->512,1024,2048,4096,8192 */
506 	}
507 	while (n >= x) {
508 		x <<= 1;
509 		c <<= 1;
510 		i += CHUNKFACTOR;
511 		if (x == 0)
512 			_mpanic("slaballoc: byte value too high");
513 	}
514 	*bytes = n = (n + c - 1) & ~(c - 1);
515 	*chunking = c;
516 	return (i + n / c - CHUNKFACTOR);
517 #if 0
518 	*bytes = n = (n + c - 1) & ~(c - 1);
519 	*chunking = c;
520 	return (n / c + i);
521 
522 	if (n < 256) {
523 		*bytes = n = (n + 15) & ~15;
524 		*chunking = 16;
525 		return(n / (CHUNKINGLO*2) + CHUNKINGLO*1 - 1);
526 	}
527 	if (n < 8192) {
528 		if (n < 512) {
529 			*bytes = n = (n + 31) & ~31;
530 			*chunking = 32;
531 			return(n / (CHUNKINGLO*4) + CHUNKINGLO*2 - 1);
532 		}
533 		if (n < 1024) {
534 			*bytes = n = (n + 63) & ~63;
535 			*chunking = 64;
536 			return(n / (CHUNKINGLO*8) + CHUNKINGLO*3 - 1);
537 		}
538 		if (n < 2048) {
539 			*bytes = n = (n + 127) & ~127;
540 			*chunking = 128;
541 			return(n / (CHUNKINGLO*16) + CHUNKINGLO*4 - 1);
542 		}
543 		if (n < 4096) {
544 			*bytes = n = (n + 255) & ~255;
545 			*chunking = 256;
546 			return(n / (CHUNKINGLO*32) + CHUNKINGLO*5 - 1);
547 		}
548 		*bytes = n = (n + 511) & ~511;
549 		*chunking = 512;
550 		return(n / (CHUNKINGLO*64) + CHUNKINGLO*6 - 1);
551 	}
552 	if (n < 16384) {
553 		*bytes = n = (n + 1023) & ~1023;
554 		*chunking = 1024;
555 		return(n / (CHUNKINGLO*128) + CHUNKINGLO*7 - 1);
556 	}
557 	if (n < 32768) {				/* 16384-32767 */
558 		*bytes = n = (n + 2047) & ~2047;
559 		*chunking = 2048;
560 		return(n / (CHUNKINGLO*256) + CHUNKINGLO*8 - 1);
561 	}
562 	if (n < 65536) {
563 		*bytes = n = (n + 4095) & ~4095;	/* 32768-65535 */
564 		*chunking = 4096;
565 		return(n / (CHUNKINGLO*512) + CHUNKINGLO*9 - 1);
566 	}
567 
568 	x = 131072;
569 	c = 8192;
570 	i = CHUNKINGLO*10 - 1;
571 
572 	while (n >= x) {
573 		x <<= 1;
574 		c <<= 1;
575 		i += CHUNKINGHI;
576 		if (x == 0)
577 			_mpanic("slaballoc: byte value too high");
578 	}
579 	*bytes = n = (n + c - 1) & ~(c - 1);
580 	*chunking = c;
581 	return (n / c + i);
582 #endif
583 }
584 
585 /*
586  * malloc() - call internal slab allocator
587  */
588 void *
589 malloc(size_t size)
590 {
591 	void *ptr;
592 
593 	ptr = memalloc(size, 0);
594 	if (ptr == NULL)
595 		errno = ENOMEM;
596 	else
597 		UTRACE(0, size, ptr);
598 	return(ptr);
599 }
600 
601 /*
602  * calloc() - call internal slab allocator
603  */
604 void *
605 calloc(size_t number, size_t size)
606 {
607 	void *ptr;
608 
609 	ptr = memalloc(number * size, SAFLAG_ZERO);
610 	if (ptr == NULL)
611 		errno = ENOMEM;
612 	else
613 		UTRACE(0, number * size, ptr);
614 	return(ptr);
615 }
616 
617 /*
618  * realloc() (SLAB ALLOCATOR)
619  *
620  * We do not attempt to optimize this routine beyond reusing the same
621  * pointer if the new size fits within the chunking of the old pointer's
622  * zone.
623  */
624 void *
625 realloc(void *ptr, size_t size)
626 {
627 	void *ret;
628 
629 	if (ptr == NULL)
630 		ret = memalloc(size, 0);
631 	else
632 		ret = memrealloc(ptr, size);
633 	if (ret == NULL)
634 		errno = ENOMEM;
635 	else
636 		UTRACE(ptr, size, ret);
637 	return(ret);
638 }
639 
640 /*
641  * posix_memalign()
642  *
643  * Allocate (size) bytes with a alignment of (alignment), where (alignment)
644  * is a power of 2 >= sizeof(void *).
645  *
646  * The slab allocator will allocate on power-of-2 boundaries up to at least
647  * PAGE_SIZE.  Otherwise we use the zoneindex mechanic to find a zone
648  * matching the requirements.
649  */
650 int
651 posix_memalign(void **memptr, size_t alignment, size_t size)
652 {
653 	size_t chunking;
654 	int zi;
655 
656 	/*
657 	 * OpenGroup spec issue 6 checks
658 	 */
659 	if ((alignment | (alignment - 1)) + 1 != (alignment << 1)) {
660 		*memptr = NULL;
661 		return(EINVAL);
662 	}
663 	if (alignment < sizeof(void *)) {
664 		*memptr = NULL;
665 		return(EINVAL);
666 	}
667 
668 	/*
669 	 * XXX for now just find the nearest power of 2 >= size and also
670 	 * >= alignment and allocate that.
671 	 */
672 	while (alignment < size) {
673 		alignment <<= 1;
674 		if (alignment == 0)
675 			_mpanic("posix_memalign: byte value too high");
676 	}
677 	*memptr = memalloc(alignment, 0);
678 	return(*memptr ? 0 : ENOMEM);
679 }
680 
681 /*
682  * free() (SLAB ALLOCATOR) - do the obvious
683  */
684 void
685 free(void *ptr)
686 {
687 	if (ptr) {
688 		UTRACE(ptr, 0, 0);
689 		memfree(ptr, 0);
690 	}
691 }
692 
693 /*
694  * memalloc()	(SLAB ALLOCATOR)
695  *
696  *	Allocate memory via the slab allocator.
697  */
698 static void *
699 memalloc(size_t size, int flags)
700 {
701 	slglobaldata_t slgd;
702 	struct zoneinfo *zinfo;
703 	slab_t slab;
704 	size_t chunking;
705 	int bmi;
706 	int bno;
707 	u_long *bmp;
708 	int zi;
709 #ifdef INVARIANTS
710 	int i;
711 #endif
712 	size_t off;
713 	char *obj;
714 
715 	/*
716 	 * If 0 bytes is requested we have to return a unique pointer, allocate
717 	 * at least one byte.
718 	 */
719 	if (size == 0)
720 		size = 1;
721 
722 	/* Capture global flags */
723 	flags |= g_malloc_flags;
724 
725 	/* Compute allocation zone; zoneindex will panic on excessive sizes */
726 	zi = zoneindex(&size, &chunking);
727 	MASSERT(zi < NZONES);
728 	if (size == 0)
729 		return(NULL);
730 
731 	/*
732 	 * Locate a slab with available space.  If no slabs are available
733 	 * back-off to the empty list and if we still come up dry allocate
734 	 * a new slab (which will try the depot first).
735 	 */
736 retry:
737 	slgd = &slglobal;
738 	zinfo = &slgd->zone[zi];
739 	if ((slab = zinfo->avail_base) == NULL) {
740 		if ((slab = zinfo->empty_base) == NULL) {
741 			/*
742 			 * Still dry
743 			 */
744 			slab = slaballoc(zi, chunking, size);
745 			if (slab == NULL)
746 				return(NULL);
747 			slab->next = zinfo->avail_base;
748 			zinfo->avail_base = slab;
749 			slab->state = AVAIL;
750 			if (slgd->biggest_index < zi)
751 				slgd->biggest_index = zi;
752 			++slgd->nslabs;
753 		} else {
754 			/*
755 			 * Pulled from empty list
756 			 */
757 			zinfo->empty_base = slab->next;
758 			slab->next = zinfo->avail_base;
759 			zinfo->avail_base = slab;
760 			slab->state = AVAIL;
761 			--zinfo->empty_count;
762 		}
763 	}
764 
765 	/*
766 	 * Allocate a chunk out of the slab.  HOT PATH
767 	 *
768 	 * Only the thread owning the slab can allocate out of it.
769 	 *
770 	 * NOTE: The last bit in the bitmap is always marked allocated so
771 	 *	 we cannot overflow here.
772 	 */
773 	bno = slab->free_bit;
774 	bmi = slab->free_index;
775 	bmp = &slab->bitmap[bmi];
776 	if (*bmp & (1LU << bno)) {
777 		atomic_clear_long(bmp, 1LU << bno);
778 		obj = slab->chunks + ((bmi << LONG_BITS_SHIFT) + bno) * size;
779 		slab->free_bit = (bno + 1) & (LONG_BITS - 1);
780 		atomic_add_int(&slab->navail, -1);
781 		if (flags & SAFLAG_ZERO)
782 			bzero(obj, size);
783 		return (obj);
784 	}
785 
786 	/*
787 	 * Allocate a chunk out of a slab.  COLD PATH
788 	 */
789 	if (slab->navail == 0) {
790 		zinfo->avail_base = slab->next;
791 		slab->state = FULL;
792 		TAILQ_INSERT_TAIL(&slgd->full_zones, slab, entry);
793 		goto retry;
794 	}
795 
796 	while (bmi < LONG_BITS) {
797 		bmp = &slab->bitmap[bmi];
798 		if (*bmp) {
799 			bno = bsflong(*bmp);
800 			atomic_clear_long(bmp, 1LU << bno);
801 			obj = slab->chunks + ((bmi << LONG_BITS_SHIFT) + bno) *
802 					     size;
803 			slab->free_index = bmi;
804 			slab->free_bit = (bno + 1) & (LONG_BITS - 1);
805 			atomic_add_int(&slab->navail, -1);
806 			if (flags & SAFLAG_ZERO)
807 				bzero(obj, size);
808 			return (obj);
809 		}
810 		++bmi;
811 	}
812 	bmi = 0;
813 	while (bmi < LONG_BITS) {
814 		bmp = &slab->bitmap[bmi];
815 		if (*bmp) {
816 			bno = bsflong(*bmp);
817 			atomic_clear_long(bmp, 1LU << bno);
818 			obj = slab->chunks + ((bmi << LONG_BITS_SHIFT) + bno) *
819 					     size;
820 			slab->free_index = bmi;
821 			slab->free_bit = (bno + 1) & (LONG_BITS - 1);
822 			atomic_add_int(&slab->navail, -1);
823 			if (flags & SAFLAG_ZERO)
824 				bzero(obj, size);
825 			return (obj);
826 		}
827 		++bmi;
828 	}
829 	_mpanic("slaballoc: corrupted zone: navail %d", slab->navail);
830 	/* not reached */
831 	return NULL;
832 }
833 
834 /*
835  * Reallocate memory within the chunk
836  */
837 static void *
838 memrealloc(void *ptr, size_t nsize)
839 {
840 	region_t region;
841 	slglobaldata_t slgd;
842 	slab_t slab;
843 	size_t osize;
844 	char *obj;
845 	int flags;
846 
847 	/*
848 	 * If 0 bytes is requested we have to return a unique pointer, allocate
849 	 * at least one byte.
850 	 */
851 	if (nsize == 0)
852 		nsize = 1;
853 
854 	/* Capture global flags */
855 	flags |= g_malloc_flags;
856 
857 	/*
858 	 * Locate the zone by looking up the dynamic slab size mask based
859 	 * on the memory region the allocation resides in.
860 	 */
861 	region = &Regions[((uintptr_t)ptr >> NREGIONS_SHIFT) & NREGIONS_MASK];
862 	if ((slab = region->slab) == NULL)
863 		slab = (void *)((uintptr_t)ptr & region->mask);
864 	MASSERT(slab->magic == ZALLOC_SLAB_MAGIC);
865 	osize = slab->chunk_size;
866 	if (nsize <= osize) {
867 		if (osize < 32 || nsize >= osize / 2) {
868 			obj = ptr;
869 			if ((flags & SAFLAG_ZERO) && nsize < osize)
870 				bzero(obj + nsize, osize - nsize);
871 			return(obj);
872 		}
873 	}
874 
875 	/*
876 	 * Otherwise resize the object
877 	 */
878 	obj = memalloc(nsize, 0);
879 	if (obj) {
880 		if (nsize > osize)
881 			nsize = osize;
882 		bcopy(ptr, obj, nsize);
883 		memfree(ptr, 0);
884 	}
885 	return (obj);
886 }
887 
888 /*
889  * free (SLAB ALLOCATOR)
890  *
891  * Free a memory block previously allocated by malloc.
892  *
893  * MPSAFE
894  */
895 static void
896 memfree(void *ptr, int flags)
897 {
898 	region_t region;
899 	slglobaldata_t slgd;
900 	slab_t slab;
901 	slab_t stmp;
902 	slab_t *slabp;
903 	char *obj;
904 	int bmi;
905 	int bno;
906 	u_long *bmp;
907 
908 	/*
909 	 * Locate the zone by looking up the dynamic slab size mask based
910 	 * on the memory region the allocation resides in.
911 	 *
912 	 * WARNING!  The slab may be owned by another thread!
913 	 */
914 	region = &Regions[((uintptr_t)ptr >> NREGIONS_SHIFT) & NREGIONS_MASK];
915 	if ((slab = region->slab) == NULL)
916 		slab = (void *)((uintptr_t)ptr & region->mask);
917 	MASSERT(slab != NULL);
918 	MASSERT(slab->magic == ZALLOC_SLAB_MAGIC);
919 
920 #ifdef INVARIANTS
921 	/*
922 	 * Put weird data into the memory to detect modifications after
923 	 * freeing, illegal pointer use after freeing (we should fault on
924 	 * the odd address), and so forth.
925 	 */
926 	if (slab->chunk_size < sizeof(weirdary))
927 		bcopy(weirdary, ptr, slab->chunk_size);
928 	else
929 		bcopy(weirdary, ptr, sizeof(weirdary));
930 #endif
931 
932 	bno = ((uintptr_t)ptr - (uintptr_t)slab->chunks) / slab->chunk_size;
933 	bmi = bno >> LONG_BITS_SHIFT;
934 	bno &= (LONG_BITS - 1);
935 	bmp = &slab->bitmap[bmi];
936 
937 	MASSERT(bmi >= 0 && bmi < slab->nmax);
938 	MASSERT((*bmp & (1LU << bno)) == 0);
939 	atomic_set_long(bmp, 1LU << bno);
940 	atomic_add_int(&slab->navail, 1);
941 
942 	/*
943 	 * We can only do the following if we own the slab
944 	 */
945 	slgd = &slglobal;
946 	if (slab->slgd == slgd) {
947 		struct zoneinfo *zinfo;
948 
949 		if (slab->free_index > bmi) {
950 			slab->free_index = bmi;
951 			slab->free_bit = bno;
952 		} else if (slab->free_index == bmi &&
953 			   slab->free_bit > bno) {
954 			slab->free_bit = bno;
955 		}
956 		zinfo = &slgd->zone[slab->zone_index];
957 
958 		/*
959 		 * Freeing an object from a full slab will move it to the
960 		 * available list.  If the available list already has a
961 		 * slab we terminate the full slab instead, moving it to
962 		 * the depot.
963 		 */
964 		if (slab->state == FULL) {
965 			TAILQ_REMOVE(&slgd->full_zones, slab, entry);
966 			if (zinfo->avail_base == NULL) {
967 				slab->state = AVAIL;
968 				stmp = zinfo->avail_base;
969 				slab->next = stmp;
970 				zinfo->avail_base = slab;
971 			} else {
972 				slabterm(slgd, slab);
973 				goto done;
974 			}
975 		}
976 
977 		/*
978 		 * If the slab becomes completely empty dispose of it in
979 		 * some manner.  By default each thread caches up to 4
980 		 * empty slabs.  Only small slabs are cached.
981 		 */
982 		if (slab->navail == slab->nmax && slab->state == AVAIL) {
983 			/*
984 			 * Remove slab from available queue
985 			 */
986 			slabp = &zinfo->avail_base;
987 			while ((stmp = *slabp) != slab)
988 				slabp = &stmp->next;
989 			*slabp = slab->next;
990 
991 			if (opt_free || opt_cache == 0) {
992 				/*
993 				 * If local caching is disabled cache the
994 				 * slab in the depot (or free it).
995 				 */
996 				slabterm(slgd, slab);
997 			} else if (slab->slab_size > BIGSLABSIZE) {
998 				/*
999 				 * We do not try to retain large slabs
1000 				 * in per-thread caches.
1001 				 */
1002 				slabterm(slgd, slab);
1003 			} else if (zinfo->empty_count > opt_cache) {
1004 				/*
1005 				 * We have too many slabs cached, but
1006 				 * instead of freeing this one free
1007 				 * an empty slab that's been idle longer.
1008 				 *
1009 				 * (empty_count does not change)
1010 				 */
1011 				stmp = zinfo->empty_base;
1012 				slab->state = EMPTY;
1013 				slab->next = stmp->next;
1014 				zinfo->empty_base = slab;
1015 				slabterm(slgd, stmp);
1016 			} else {
1017 				/*
1018 				 * Cache the empty slab in our thread local
1019 				 * empty list.
1020 				 */
1021 				++zinfo->empty_count;
1022 				slab->state = EMPTY;
1023 				slab->next = zinfo->empty_base;
1024 				zinfo->empty_base = slab;
1025 			}
1026 		}
1027 	}
1028 done:
1029 	;
1030 }
1031 
1032 /*
1033  * Allocate a new slab holding objects of size chunk_size.
1034  */
1035 static slab_t
1036 slaballoc(int zi, size_t chunking, size_t chunk_size)
1037 {
1038 	slglobaldata_t slgd;
1039 	slglobaldata_t sldepot;
1040 	struct zoneinfo *zinfo;
1041 	region_t region;
1042 	void *save;
1043 	slab_t slab;
1044 	slab_t stmp;
1045 	size_t slab_desire;
1046 	size_t slab_size;
1047 	size_t region_mask;
1048 	uintptr_t chunk_offset;
1049 	ssize_t maxchunks;
1050 	ssize_t tmpchunks;
1051 	int ispower2;
1052 	int power;
1053 	int ri;
1054 	int rx;
1055 	int nswath;
1056 	int j;
1057 
1058 	/*
1059 	 * First look in the depot.  Any given zone in the depot may be
1060 	 * locked by being set to -1.  We have to do this instead of simply
1061 	 * removing the entire chain because removing the entire chain can
1062 	 * cause racing threads to allocate local slabs for large objects,
1063 	 * resulting in a large VSZ.
1064 	 */
1065 	slgd = &slglobal;
1066 	sldepot = slgd->sldepot;
1067 	zinfo = &sldepot->zone[zi];
1068 
1069 	while ((slab = zinfo->avail_base) != NULL) {
1070 		if ((void *)slab == LOCKEDPTR) {
1071 			cpu_pause();
1072 			continue;
1073 		}
1074 		if (atomic_cmpset_ptr(&zinfo->avail_base, slab, LOCKEDPTR)) {
1075 			MASSERT(slab->slgd == NULL);
1076 			slab->slgd = slgd;
1077 			zinfo->avail_base = slab->next;
1078 			return(slab);
1079 		}
1080 	}
1081 
1082 	/*
1083 	 * Nothing in the depot, allocate a new slab by locating or assigning
1084 	 * a region and then using the system virtual memory allocator.
1085 	 */
1086 	slab = NULL;
1087 
1088 	/*
1089 	 * Calculate the start of the data chunks relative to the start
1090 	 * of the slab.
1091 	 */
1092 	if ((chunk_size ^ (chunk_size - 1)) == (chunk_size << 1) - 1) {
1093 		ispower2 = 1;
1094 		chunk_offset = (sizeof(*slab) + (chunk_size - 1)) &
1095 			       ~(chunk_size - 1);
1096 	} else {
1097 		ispower2 = 0;
1098 		chunk_offset = sizeof(*slab) + chunking - 1;
1099 		chunk_offset -= chunk_offset % chunking;
1100 	}
1101 
1102 	/*
1103 	 * Calculate a reasonable number of chunks for the slab.
1104 	 *
1105 	 * Once initialized the MaxChunks[] array can only ever be
1106 	 * reinitialized to the same value.
1107 	 */
1108 	maxchunks = MaxChunks[zi];
1109 	if (maxchunks == 0) {
1110 		/*
1111 		 * First calculate how many chunks would fit in 1/1024
1112 		 * available memory.  This is around 2MB on a 32 bit
1113 		 * system and 128G on a 64-bit (48-bits available) system.
1114 		 */
1115 		maxchunks = (ssize_t)(NREGIONS_SIZE - chunk_offset) /
1116 			    (ssize_t)chunk_size;
1117 		if (maxchunks <= 0)
1118 			maxchunks = 1;
1119 
1120 		/*
1121 		 * A slab cannot handle more than MAXCHUNKS chunks, but
1122 		 * limit us to approximately MAXCHUNKS / 2 here because
1123 		 * we may have to expand maxchunks when we calculate the
1124 		 * actual power-of-2 slab.
1125 		 */
1126 		if (maxchunks > MAXCHUNKS / 2)
1127 			maxchunks = MAXCHUNKS / 2;
1128 
1129 		/*
1130 		 * Try to limit the slabs to BIGSLABSIZE (~128MB).  Larger
1131 		 * slabs will be created if the allocation does not fit.
1132 		 */
1133 		if (chunk_offset + chunk_size * maxchunks > BIGSLABSIZE) {
1134 			tmpchunks = (ssize_t)(BIGSLABSIZE - chunk_offset) /
1135 				    (ssize_t)chunk_size;
1136 			if (tmpchunks <= 0)
1137 				tmpchunks = 1;
1138 			if (maxchunks > tmpchunks)
1139 				maxchunks = tmpchunks;
1140 		}
1141 
1142 		/*
1143 		 * If the slab calculates to greater than 2MB see if we
1144 		 * can cut it down to ~2MB.  This controls VSZ but has
1145 		 * no effect on run-time size or performance.
1146 		 *
1147 		 * This is very important in case you core dump and also
1148 		 * important to reduce unnecessary region allocations.
1149 		 */
1150 		if (chunk_offset + chunk_size * maxchunks > NOMSLABSIZE) {
1151 			tmpchunks = (ssize_t)(NOMSLABSIZE - chunk_offset) /
1152 				    (ssize_t)chunk_size;
1153 			if (tmpchunks < 1)
1154 				tmpchunks = 1;
1155 			if (maxchunks > tmpchunks)
1156 				maxchunks = tmpchunks;
1157 		}
1158 
1159 		/*
1160 		 * If the slab calculates to greater than 128K see if we
1161 		 * can cut it down to ~128K while still maintaining a
1162 		 * reasonably large number of chunks in each slab.  This
1163 		 * controls VSZ but has no effect on run-time size or
1164 		 * performance.
1165 		 *
1166 		 * This is very important in case you core dump and also
1167 		 * important to reduce unnecessary region allocations.
1168 		 */
1169 		if (chunk_offset + chunk_size * maxchunks > LITSLABSIZE) {
1170 			tmpchunks = (ssize_t)(LITSLABSIZE - chunk_offset) /
1171 				    (ssize_t)chunk_size;
1172 			if (tmpchunks < 32)
1173 				tmpchunks = 32;
1174 			if (maxchunks > tmpchunks)
1175 				maxchunks = tmpchunks;
1176 		}
1177 
1178 		MaxChunks[zi] = maxchunks;
1179 	}
1180 	MASSERT(maxchunks > 0 && maxchunks <= MAXCHUNKS);
1181 
1182 	/*
1183 	 * Calculate the actual slab size.  maxchunks will be recalculated
1184 	 * a little later.
1185 	 */
1186 	slab_desire = chunk_offset + chunk_size * maxchunks;
1187 	slab_size = 8 * MAXCHUNKS;
1188 	power = 3 + MAXCHUNKS_BITS;
1189 	while (slab_size < slab_desire) {
1190 		slab_size <<= 1;
1191 		++power;
1192 	}
1193 
1194 	/*
1195 	 * Do a quick recalculation based on the actual slab size but not
1196 	 * yet dealing with whether the slab header is in-band or out-of-band.
1197 	 * The purpose here is to see if we can reasonably reduce slab_size
1198 	 * to a power of 4 to allow more chunk sizes to use the same slab
1199 	 * size.
1200 	 */
1201 	if ((power & 1) && slab_size > 32768) {
1202 		maxchunks = (slab_size - chunk_offset) / chunk_size;
1203 		if (maxchunks >= MAXCHUNKS / 8) {
1204 			slab_size >>= 1;
1205 			--power;
1206 		}
1207 	}
1208 	if ((power & 2) && slab_size > 32768 * 4) {
1209 		maxchunks = (slab_size - chunk_offset) / chunk_size;
1210 		if (maxchunks >= MAXCHUNKS / 4) {
1211 			slab_size >>= 2;
1212 			power -= 2;
1213 		}
1214 	}
1215 	/*
1216 	 * This case occurs when the slab_size is larger than 1/1024 available
1217 	 * UVM.
1218 	 */
1219 	nswath = slab_size / NREGIONS_SIZE;
1220 	if (nswath > NREGIONS)
1221 		return (NULL);
1222 
1223 
1224 	/*
1225 	 * Try to allocate from our current best region for this zi
1226 	 */
1227 	region_mask = ~(slab_size - 1);
1228 	ri = slgd->zone[zi].best_region;
1229 	if (Regions[ri].mask == region_mask) {
1230 		if ((slab = _vmem_alloc(ri, slab_size)) != NULL)
1231 			goto found;
1232 	}
1233 
1234 	/*
1235 	 * Try to find an existing region to allocate from.  The normal
1236 	 * case will be for allocations that are less than 1/1024 available
1237 	 * UVM, which fit into a single Regions[] entry.
1238 	 */
1239 	while (slab_size <= NREGIONS_SIZE) {
1240 		rx = -1;
1241 		for (ri = 0; ri < NREGIONS; ++ri) {
1242 			if (rx < 0 && Regions[ri].mask == 0)
1243 				rx = ri;
1244 			if (Regions[ri].mask == region_mask) {
1245 				slab = _vmem_alloc(ri, slab_size);
1246 				if (slab) {
1247 					slgd->zone[zi].best_region = ri;
1248 					goto found;
1249 				}
1250 			}
1251 		}
1252 
1253 		if (rx < 0)
1254 			return(NULL);
1255 
1256 		/*
1257 		 * This can fail, retry either way
1258 		 */
1259 		atomic_cmpset_ptr((void **)&Regions[rx].mask,
1260 				  NULL,
1261 				  (void *)region_mask);
1262 	}
1263 
1264 	for (;;) {
1265 		rx = -1;
1266 		for (ri = 0; ri < NREGIONS; ri += nswath) {
1267 			if (Regions[ri].mask == region_mask) {
1268 				slab = _vmem_alloc(ri, slab_size);
1269 				if (slab) {
1270 					slgd->zone[zi].best_region = ri;
1271 					goto found;
1272 				}
1273 			}
1274 			if (rx < 0) {
1275 				for (j = nswath - 1; j >= 0; --j) {
1276 					if (Regions[ri+j].mask != 0)
1277 						break;
1278 				}
1279 				if (j < 0)
1280 					rx = ri;
1281 			}
1282 		}
1283 
1284 		/*
1285 		 * We found a candidate, try to allocate it backwards so
1286 		 * another thread racing a slaballoc() does not see the
1287 		 * mask in the base index position until we are done.
1288 		 *
1289 		 * We can safely zero-out any partial allocations because
1290 		 * the mask is only accessed from the base index.  Any other
1291 		 * threads racing us will fail prior to us clearing the mask.
1292 		 */
1293 		if (rx < 0)
1294 			return(NULL);
1295 		for (j = nswath - 1; j >= 0; --j) {
1296 			if (!atomic_cmpset_ptr((void **)&Regions[rx+j].mask,
1297 					       NULL, (void *)region_mask)) {
1298 				while (++j < nswath)
1299 					Regions[rx+j].mask = 0;
1300 				break;
1301 			}
1302 		}
1303 		/* retry */
1304 	}
1305 
1306 	/*
1307 	 * Fill in the new slab in region ri.  If the slab_size completely
1308 	 * fills one or more region slots we move the slab structure out of
1309 	 * band which should optimize the chunking (particularly for a power
1310 	 * of 2).
1311 	 */
1312 found:
1313 	region = &Regions[ri];
1314 	MASSERT(region->slab == NULL);
1315 	if (slab_size >= NREGIONS_SIZE) {
1316 		save = slab;
1317 		slab = memalloc(sizeof(*slab), 0);
1318 		bzero(slab, sizeof(*slab));
1319 		slab->chunks = save;
1320 		for (j = 0; j < nswath; ++j)
1321 			region[j].slab = slab;
1322 		chunk_offset = 0;
1323 	} else {
1324 		bzero(slab, sizeof(*slab));
1325 		slab->chunks = (char *)slab + chunk_offset;
1326 	}
1327 
1328 	/*
1329 	 * Calculate the start of the chunks memory and recalculate the
1330 	 * actual number of chunks the slab can hold.
1331 	 */
1332 	maxchunks = (slab_size - chunk_offset) / chunk_size;
1333 	if (maxchunks > MAXCHUNKS)
1334 		maxchunks = MAXCHUNKS;
1335 
1336 	/*
1337 	 * And fill in the rest
1338 	 */
1339 	slab->magic = ZALLOC_SLAB_MAGIC;
1340 	slab->navail = maxchunks;
1341 	slab->nmax = maxchunks;
1342 	slab->slab_size = slab_size;
1343 	slab->chunk_size = chunk_size;
1344 	slab->zone_index = zi;
1345 	slab->slgd = slgd;
1346 	slab->state = UNKNOWN;
1347 	slab->region = region;
1348 
1349 	for (ri = 0; ri < maxchunks; ri += LONG_BITS) {
1350 		if (ri + LONG_BITS <= maxchunks)
1351 			slab->bitmap[ri >> LONG_BITS_SHIFT] = ULONG_MAX;
1352 		else
1353 			slab->bitmap[ri >> LONG_BITS_SHIFT] =
1354 						(1LU << (maxchunks - ri)) - 1;
1355 	}
1356 	return (slab);
1357 }
1358 
1359 /*
1360  * Free a slab.
1361  */
1362 static void
1363 slabfree(slab_t slab)
1364 {
1365 	int nswath;
1366 	int j;
1367 
1368 	if (slab->region->slab == slab) {
1369 		/*
1370 		 * Out-of-band slab.
1371 		 */
1372 		nswath = slab->slab_size / NREGIONS_SIZE;
1373 		for (j = 0; j < nswath; ++j)
1374 			slab->region[j].slab = NULL;
1375 		slab->magic = 0;
1376 		_vmem_free(slab->chunks, slab->slab_size);
1377 		memfree(slab, 0);
1378 	} else {
1379 		/*
1380 		 * In-band slab.
1381 		 */
1382 		slab->magic = 0;
1383 		_vmem_free(slab, slab->slab_size);
1384 	}
1385 }
1386 
1387 /*
1388  * Terminate a slab's use in the current thread.  The slab may still have
1389  * outstanding allocations and thus not be deallocatable.
1390  */
1391 static void
1392 slabterm(slglobaldata_t slgd, slab_t slab)
1393 {
1394 	slglobaldata_t sldepot = slgd->sldepot;
1395 	struct zoneinfo *zinfo;
1396 	slab_t dnext;
1397 	int zi = slab->zone_index;
1398 
1399 	slab->slgd = NULL;
1400 	--slgd->nslabs;
1401 	zinfo = &sldepot->zone[zi];
1402 
1403 	/*
1404 	 * If the slab can be freed and the depot is either locked or not
1405 	 * empty, then free the slab.
1406 	 */
1407 	if (slab->navail == slab->nmax && zinfo->avail_base) {
1408 		slab->state = UNKNOWN;
1409 		slabfree(slab);
1410 		return;
1411 	}
1412 	slab->state = AVAIL;
1413 
1414 	/*
1415 	 * Link the slab into the depot
1416 	 */
1417 	for (;;) {
1418 		dnext = zinfo->avail_base;
1419 		cpu_ccfence();
1420 		if ((void *)dnext == LOCKEDPTR) {
1421 			cpu_pause();
1422 			continue;
1423 		}
1424 		slab->next = dnext;
1425 		if (atomic_cmpset_ptr(&zinfo->avail_base, dnext, slab))
1426 			break;
1427 	}
1428 }
1429 
1430 /*
1431  * _vmem_alloc()
1432  *
1433  *	Directly map memory in PAGE_SIZE'd chunks with the specified
1434  *	alignment.
1435  *
1436  *	Alignment must be a multiple of PAGE_SIZE.
1437  *
1438  *	Size must be >= alignment.
1439  */
1440 static void *
1441 _vmem_alloc(int ri, size_t slab_size)
1442 {
1443 	char *baddr = (void *)((uintptr_t)ri << NREGIONS_SHIFT);
1444 	char *eaddr;
1445 	char *addr;
1446 	char *save;
1447 	uintptr_t excess;
1448 
1449 	if (slab_size < NREGIONS_SIZE)
1450 		eaddr = baddr + NREGIONS_SIZE;
1451 	else
1452 		eaddr = baddr + slab_size;
1453 
1454 	/*
1455 	 * This usually just works but might not.
1456 	 */
1457 	addr = mmap(baddr, slab_size, PROT_READ|PROT_WRITE,
1458 		    MAP_PRIVATE | MAP_ANON | MAP_SIZEALIGN, -1, 0);
1459 	if (addr == MAP_FAILED) {
1460 		return (NULL);
1461 	}
1462 	if (addr < baddr || addr + slab_size > eaddr) {
1463 		munmap(addr, slab_size);
1464 		return (NULL);
1465 	}
1466 
1467 	/*
1468 	 * Check alignment.  The misaligned offset is also the excess
1469 	 * amount.  If misaligned unmap the excess so we have a chance of
1470 	 * mapping at the next alignment point and recursively try again.
1471 	 *
1472 	 * BBBBBBBBBBB BBBBBBBBBBB BBBBBBBBBBB	block alignment
1473 	 *   aaaaaaaaa aaaaaaaaaaa aa		mis-aligned allocation
1474 	 *   xxxxxxxxx				final excess calculation
1475 	 *   ^ returned address
1476 	 */
1477 	excess = (uintptr_t)addr & (slab_size - 1);
1478 	while (excess) {
1479 		excess = slab_size - excess;
1480 		save = addr;
1481 
1482 		munmap(save + excess, slab_size - excess);
1483 		addr = _vmem_alloc(ri, slab_size);
1484 		munmap(save, excess);
1485 		if (addr == NULL)
1486 			return (NULL);
1487 		if (addr < baddr || addr + slab_size > eaddr) {
1488 			munmap(addr, slab_size);
1489 			return (NULL);
1490 		}
1491 		excess = (uintptr_t)addr & (slab_size - 1);
1492 	}
1493 	return (addr);
1494 }
1495 
1496 /*
1497  * _vmem_free()
1498  *
1499  *	Free a chunk of memory allocated with _vmem_alloc()
1500  */
1501 static void
1502 _vmem_free(void *ptr, size_t size)
1503 {
1504 	munmap(ptr, size);
1505 }
1506 
1507 /*
1508  * Panic on fatal conditions
1509  */
1510 static void
1511 _mpanic(const char *ctl, ...)
1512 {
1513 	va_list va;
1514 
1515 	if (malloc_panic == 0) {
1516 		malloc_panic = 1;
1517 		va_start(va, ctl);
1518 		vfprintf(stderr, ctl, va);
1519 		fprintf(stderr, "\n");
1520 		fflush(stderr);
1521 		va_end(va);
1522 	}
1523 	abort();
1524 }
1525