xref: /dragonfly/lib/libc/stdlib/nmalloc.c (revision 38b5d46c)
1 /*
2  * NMALLOC.C	- New Malloc (ported from kernel slab allocator)
3  *
4  * Copyright (c) 2003,2004,2009,2010 The DragonFly Project. All rights reserved.
5  *
6  * This code is derived from software contributed to The DragonFly Project
7  * by Matthew Dillon <dillon@backplane.com> and by
8  * Venkatesh Srinivas <me@endeavour.zapto.org>.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * $Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 vsrinivas Exp $
38  */
39 /*
40  * This module implements a slab allocator drop-in replacement for the
41  * libc malloc().
42  *
43  * A slab allocator reserves a ZONE for each chunk size, then lays the
44  * chunks out in an array within the zone.  Allocation and deallocation
45  * is nearly instantaneous, and overhead losses are limited to a fixed
46  * worst-case amount.
47  *
48  * The slab allocator does not have to pre-initialize the list of
49  * free chunks for each zone, and the underlying VM will not be
50  * touched at all beyond the zone header until an actual allocation
51  * needs it.
52  *
53  * Slab management and locking is done on a per-zone basis.
54  *
55  *	Alloc Size	Chunking        Number of zones
56  *	0-127		8		16
57  *	128-255		16		8
58  *	256-511		32		8
59  *	512-1023	64		8
60  *	1024-2047	128		8
61  *	2048-4095	256		8
62  *	4096-8191	512		8
63  *	8192-16383	1024		8
64  *	16384-32767	2048		8
65  *
66  *	Allocations >= ZoneLimit (16K) go directly to mmap and a hash table
67  *	is used to locate for free.  One and Two-page allocations use the
68  *	zone mechanic to avoid excessive mmap()/munmap() calls.
69  *
70  *			   API FEATURES AND SIDE EFFECTS
71  *
72  *    + power-of-2 sized allocations up to a page will be power-of-2 aligned.
73  *	Above that power-of-2 sized allocations are page-aligned.  Non
74  *	power-of-2 sized allocations are aligned the same as the chunk
75  *	size for their zone.
76  *    + malloc(0) returns a special non-NULL value
77  *    + ability to allocate arbitrarily large chunks of memory
78  *    + realloc will reuse the passed pointer if possible, within the
79  *	limitations of the zone chunking.
80  *
81  * Multithreaded enhancements for small allocations introduced August 2010.
82  * These are in the spirit of 'libumem'. See:
83  *	Bonwick, J.; Adams, J. (2001). "Magazines and Vmem: Extending the
84  *	slab allocator to many CPUs and arbitrary resources". In Proc. 2001
85  * 	USENIX Technical Conference. USENIX Association.
86  *
87  * Oversized allocations employ the BIGCACHE mechanic whereby large
88  * allocations may be handed significantly larger buffers, allowing them
89  * to avoid mmap/munmap operations even through significant realloc()s.
90  * The excess space is only trimmed if too many large allocations have been
91  * given this treatment.
92  *
93  * TUNING
94  *
95  * The value of the environment variable MALLOC_OPTIONS is a character string
96  * containing various flags to tune nmalloc.
97  *
98  * 'U'   / ['u']	Generate / do not generate utrace entries for ktrace(1)
99  *			This will generate utrace events for all malloc,
100  *			realloc, and free calls. There are tools (mtrplay) to
101  *			replay and allocation pattern or to graph heap structure
102  *			(mtrgraph) which can interpret these logs.
103  * 'Z'   / ['z']	Zero out / do not zero all allocations.
104  *			Each new byte of memory allocated by malloc, realloc, or
105  *			reallocf will be initialized to 0. This is intended for
106  *			debugging and will affect performance negatively.
107  * 'H'	/  ['h']	Pass a hint to the kernel about pages unused by the
108  *			allocation functions.
109  */
110 
111 /* cc -shared -fPIC -g -O -I/usr/src/lib/libc/include -o nmalloc.so nmalloc.c */
112 
113 #include "libc_private.h"
114 
115 #include <sys/param.h>
116 #include <sys/types.h>
117 #include <sys/mman.h>
118 #include <sys/queue.h>
119 #include <sys/uio.h>
120 #include <sys/ktrace.h>
121 #include <stdio.h>
122 #include <stdint.h>
123 #include <stdlib.h>
124 #include <stdarg.h>
125 #include <stddef.h>
126 #include <unistd.h>
127 #include <string.h>
128 #include <fcntl.h>
129 #include <errno.h>
130 #include <pthread.h>
131 #include <machine/atomic.h>
132 
133 #include "spinlock.h"
134 #include "un-namespace.h"
135 
136 
137 /*
138  * Linked list of large allocations
139  */
140 typedef struct bigalloc {
141 	struct bigalloc *next;	/* hash link */
142 	void	*base;		/* base pointer */
143 	u_long	active;		/* bytes active */
144 	u_long	bytes;		/* bytes allocated */
145 } *bigalloc_t;
146 
147 /*
148  * Note that any allocations which are exact multiples of PAGE_SIZE, or
149  * which are >= ZALLOC_ZONE_LIMIT, will fall through to the kmem subsystem.
150  */
151 #define ZALLOC_ZONE_LIMIT	(16 * 1024)	/* max slab-managed alloc */
152 #define ZALLOC_MIN_ZONE_SIZE	(32 * 1024)	/* minimum zone size */
153 #define ZALLOC_MAX_ZONE_SIZE	(128 * 1024)	/* maximum zone size */
154 #define ZALLOC_ZONE_SIZE	(64 * 1024)
155 #define ZALLOC_SLAB_MAGIC	0x736c6162	/* magic sanity */
156 #define ZALLOC_SLAB_SLIDE	20		/* L1-cache skip */
157 
158 #if ZALLOC_ZONE_LIMIT == 16384
159 #define NZONES			72
160 #elif ZALLOC_ZONE_LIMIT == 32768
161 #define NZONES			80
162 #else
163 #error "I couldn't figure out NZONES"
164 #endif
165 
166 /*
167  * Chunk structure for free elements
168  */
169 typedef struct slchunk {
170 	struct slchunk *c_Next;
171 } *slchunk_t;
172 
173 /*
174  * The IN-BAND zone header is placed at the beginning of each zone.
175  */
176 struct slglobaldata;
177 
178 typedef struct slzone {
179 	int32_t		z_Magic;	/* magic number for sanity check */
180 	int		z_NFree;	/* total free chunks / ualloc space */
181 	struct slzone *z_Next;		/* ZoneAry[] link if z_NFree non-zero */
182 	int		z_NMax;		/* maximum free chunks */
183 	char		*z_BasePtr;	/* pointer to start of chunk array */
184 	int		z_UIndex;	/* current initial allocation index */
185 	int		z_UEndIndex;	/* last (first) allocation index */
186 	int		z_ChunkSize;	/* chunk size for validation */
187 	int		z_FirstFreePg;	/* chunk list on a page-by-page basis */
188 	int		z_ZoneIndex;
189 	int		z_Flags;
190 	struct slchunk *z_PageAry[ZALLOC_ZONE_SIZE / PAGE_SIZE];
191 #if defined(INVARIANTS)
192 	__uint32_t	z_Bitmap[];	/* bitmap of free chunks / sanity */
193 #endif
194 } *slzone_t;
195 
196 typedef struct slglobaldata {
197 	spinlock_t	Spinlock;
198 	slzone_t	ZoneAry[NZONES];/* linked list of zones NFree > 0 */
199 	int		JunkIndex;
200 } *slglobaldata_t;
201 
202 #define SLZF_UNOTZEROD		0x0001
203 
204 #define FASTSLABREALLOC		0x02
205 
206 /*
207  * Misc constants.  Note that allocations that are exact multiples of
208  * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
209  * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
210  */
211 #define MIN_CHUNK_SIZE		8		/* in bytes */
212 #define MIN_CHUNK_MASK		(MIN_CHUNK_SIZE - 1)
213 #define IN_SAME_PAGE_MASK	(~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
214 
215 /*
216  * The WEIRD_ADDR is used as known text to copy into free objects to
217  * try to create deterministic failure cases if the data is accessed after
218  * free.
219  *
220  * WARNING: A limited number of spinlocks are available, BIGXSIZE should
221  *	    not be larger then 64.
222  */
223 #define WEIRD_ADDR      0xdeadc0de
224 #define MAX_COPY        sizeof(weirdary)
225 #define ZERO_LENGTH_PTR	((void *)&malloc_dummy_pointer)
226 
227 #define BIGHSHIFT	10			/* bigalloc hash table */
228 #define BIGHSIZE	(1 << BIGHSHIFT)
229 #define BIGHMASK	(BIGHSIZE - 1)
230 #define BIGXSIZE	(BIGHSIZE / 16)		/* bigalloc lock table */
231 #define BIGXMASK	(BIGXSIZE - 1)
232 
233 /*
234  * BIGCACHE caches oversized allocations.  Note that a linear search is
235  * performed, so do not make the cache too large.
236  *
237  * BIGCACHE will garbage-collect excess space when the excess exceeds the
238  * specified value.  A relatively large number should be used here because
239  * garbage collection is expensive.
240  */
241 #define BIGCACHE	16
242 #define BIGCACHE_MASK	(BIGCACHE - 1)
243 #define BIGCACHE_LIMIT	(1024 * 1024)		/* size limit */
244 #define BIGCACHE_EXCESS	(16 * 1024 * 1024)	/* garbage collect */
245 
246 #define SAFLAG_ZERO	0x0001
247 #define SAFLAG_PASSIVE	0x0002
248 
249 /*
250  * Thread control
251  */
252 
253 #define arysize(ary)	(sizeof(ary)/sizeof((ary)[0]))
254 
255 #define MASSERT(exp)	do { if (__predict_false(!(exp)))	\
256 				_mpanic("assertion: %s in %s",	\
257 				#exp, __func__);		\
258 			    } while (0)
259 
260 /*
261  * Magazines
262  */
263 
264 #define M_MAX_ROUNDS	64
265 #define M_ZONE_ROUNDS	64
266 #define M_LOW_ROUNDS	32
267 #define M_INIT_ROUNDS	8
268 #define M_BURST_FACTOR  8
269 #define M_BURST_NSCALE	2
270 
271 #define M_BURST		0x0001
272 #define M_BURST_EARLY	0x0002
273 
274 struct magazine {
275 	SLIST_ENTRY(magazine) nextmagazine;
276 
277 	int		flags;
278 	int 		capacity;	/* Max rounds in this magazine */
279 	int 		rounds;		/* Current number of free rounds */
280 	int		burst_factor;	/* Number of blocks to prefill with */
281 	int 		low_factor;	/* Free till low_factor from full mag */
282 	void		*objects[M_MAX_ROUNDS];
283 };
284 
285 SLIST_HEAD(magazinelist, magazine);
286 
287 static spinlock_t zone_mag_lock;
288 static struct magazine zone_magazine = {
289 	.flags = M_BURST | M_BURST_EARLY,
290 	.capacity = M_ZONE_ROUNDS,
291 	.rounds = 0,
292 	.burst_factor = M_BURST_FACTOR,
293 	.low_factor = M_LOW_ROUNDS
294 };
295 
296 #define MAGAZINE_FULL(mp)	(mp->rounds == mp->capacity)
297 #define MAGAZINE_NOTFULL(mp)	(mp->rounds < mp->capacity)
298 #define MAGAZINE_EMPTY(mp)	(mp->rounds == 0)
299 #define MAGAZINE_NOTEMPTY(mp)	(mp->rounds != 0)
300 
301 /*
302  * Each thread will have a pair of magazines per size-class (NZONES)
303  * The loaded magazine will support immediate allocations, the previous
304  * magazine will either be full or empty and can be swapped at need
305  */
306 typedef struct magazine_pair {
307 	struct magazine	*loaded;
308 	struct magazine	*prev;
309 } magazine_pair;
310 
311 /* A depot is a collection of magazines for a single zone. */
312 typedef struct magazine_depot {
313 	struct magazinelist full;
314 	struct magazinelist empty;
315 	spinlock_t	lock;
316 } magazine_depot;
317 
318 typedef struct thr_mags {
319 	magazine_pair	mags[NZONES];
320 	struct magazine	*newmag;
321 	int		init;
322 } thr_mags;
323 
324 /*
325  * With this attribute set, do not require a function call for accessing
326  * this variable when the code is compiled -fPIC. Empty for libc_rtld
327  * (like __thread).
328  */
329 #ifdef __LIBC_RTLD
330 #define TLS_ATTRIBUTE
331 #else
332 #define TLS_ATTRIBUTE __attribute__ ((tls_model ("initial-exec")))
333 #endif
334 
335 static int mtmagazine_free_live;
336 static __thread thr_mags thread_mags TLS_ATTRIBUTE;
337 static pthread_key_t thread_mags_key;
338 static pthread_once_t thread_mags_once = PTHREAD_ONCE_INIT;
339 static magazine_depot depots[NZONES];
340 
341 /*
342  * Fixed globals (not per-cpu)
343  */
344 static const int ZoneSize = ZALLOC_ZONE_SIZE;
345 static const int ZoneLimit = ZALLOC_ZONE_LIMIT;
346 static const int ZonePageCount = ZALLOC_ZONE_SIZE / PAGE_SIZE;
347 static const int ZoneMask = ZALLOC_ZONE_SIZE - 1;
348 
349 static int opt_madvise = 0;
350 static int opt_utrace = 0;
351 static int g_malloc_flags = 0;
352 static struct slglobaldata	SLGlobalData;
353 static bigalloc_t bigalloc_array[BIGHSIZE];
354 static spinlock_t bigspin_array[BIGXSIZE];
355 static volatile void *bigcache_array[BIGCACHE];		/* atomic swap */
356 static volatile size_t bigcache_size_array[BIGCACHE];	/* SMP races ok */
357 static volatile int bigcache_index;			/* SMP races ok */
358 static int malloc_panic;
359 static int malloc_dummy_pointer;
360 static size_t excess_alloc;				/* excess big allocs */
361 
362 static const int32_t weirdary[16] = {
363 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
364 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
365 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
366 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR
367 };
368 
369 static void *_slaballoc(size_t size, int flags);
370 static void *_slabrealloc(void *ptr, size_t size);
371 static void _slabfree(void *ptr, int, bigalloc_t *);
372 static void *_vmem_alloc(size_t bytes, size_t align, int flags);
373 static void _vmem_free(void *ptr, size_t bytes);
374 static void *magazine_alloc(struct magazine *, int *);
375 static int magazine_free(struct magazine *, void *);
376 static void *mtmagazine_alloc(int zi);
377 static int mtmagazine_free(int zi, void *);
378 static void mtmagazine_init(void);
379 static void mtmagazine_destructor(void *);
380 static slzone_t zone_alloc(int flags);
381 static void zone_free(void *z);
382 static void _mpanic(const char *ctl, ...) __printflike(1, 2);
383 static void malloc_init(void) __constructor(101);
384 #if defined(INVARIANTS)
385 static void chunk_mark_allocated(slzone_t z, void *chunk);
386 static void chunk_mark_free(slzone_t z, void *chunk);
387 #endif
388 
389 struct nmalloc_utrace {
390 	void *p;
391 	size_t s;
392 	void *r;
393 };
394 
395 #define UTRACE(a, b, c)						\
396 	if (opt_utrace) {					\
397 		struct nmalloc_utrace ut = {			\
398 			.p = (a),				\
399 			.s = (b),				\
400 			.r = (c)				\
401 		};						\
402 		utrace(&ut, sizeof(ut));			\
403 	}
404 
405 #ifdef INVARIANTS
406 /*
407  * If enabled any memory allocated without M_ZERO is initialized to -1.
408  */
409 static int  use_malloc_pattern;
410 #endif
411 
412 static void
413 malloc_init(void)
414 {
415 	const char *p = NULL;
416 
417 	if (issetugid() == 0)
418 		p = getenv("MALLOC_OPTIONS");
419 
420 	for (; p != NULL && *p != '\0'; p++) {
421 		switch(*p) {
422 		case 'u':	opt_utrace = 0; break;
423 		case 'U':	opt_utrace = 1; break;
424 		case 'h':	opt_madvise = 0; break;
425 		case 'H':	opt_madvise = 1; break;
426 		case 'z':	g_malloc_flags = 0; break;
427 		case 'Z': 	g_malloc_flags = SAFLAG_ZERO; break;
428 		default:
429 			break;
430 		}
431 	}
432 
433 	UTRACE((void *) -1, 0, NULL);
434 }
435 
436 /*
437  * We have to install a handler for nmalloc thread teardowns when
438  * the thread is created.  We cannot delay this because destructors in
439  * sophisticated userland programs can call malloc() for the first time
440  * during their thread exit.
441  *
442  * This routine is called directly from pthreads.
443  */
444 void
445 _nmalloc_thr_init(void)
446 {
447 	thr_mags *tp;
448 
449 	/*
450 	 * Disallow mtmagazine operations until the mtmagazine is
451 	 * initialized.
452 	 */
453 	tp = &thread_mags;
454 	tp->init = -1;
455 
456 	if (mtmagazine_free_live == 0) {
457 		mtmagazine_free_live = 1;
458 		pthread_once(&thread_mags_once, mtmagazine_init);
459 	}
460 	pthread_setspecific(thread_mags_key, tp);
461 	tp->init = 1;
462 }
463 
464 /*
465  * Thread locks.
466  */
467 static __inline void
468 slgd_lock(slglobaldata_t slgd)
469 {
470 	if (__isthreaded)
471 		_SPINLOCK(&slgd->Spinlock);
472 }
473 
474 static __inline void
475 slgd_unlock(slglobaldata_t slgd)
476 {
477 	if (__isthreaded)
478 		_SPINUNLOCK(&slgd->Spinlock);
479 }
480 
481 static __inline void
482 depot_lock(magazine_depot *dp)
483 {
484 	if (__isthreaded)
485 		_SPINLOCK(&dp->lock);
486 }
487 
488 static __inline void
489 depot_unlock(magazine_depot *dp)
490 {
491 	if (__isthreaded)
492 		_SPINUNLOCK(&dp->lock);
493 }
494 
495 static __inline void
496 zone_magazine_lock(void)
497 {
498 	if (__isthreaded)
499 		_SPINLOCK(&zone_mag_lock);
500 }
501 
502 static __inline void
503 zone_magazine_unlock(void)
504 {
505 	if (__isthreaded)
506 		_SPINUNLOCK(&zone_mag_lock);
507 }
508 
509 static __inline void
510 swap_mags(magazine_pair *mp)
511 {
512 	struct magazine *tmp;
513 	tmp = mp->loaded;
514 	mp->loaded = mp->prev;
515 	mp->prev = tmp;
516 }
517 
518 /*
519  * bigalloc hashing and locking support.
520  *
521  * Return an unmasked hash code for the passed pointer.
522  */
523 static __inline int
524 _bigalloc_hash(void *ptr)
525 {
526 	int hv;
527 
528 	hv = ((int)(intptr_t)ptr >> PAGE_SHIFT) ^
529 	      ((int)(intptr_t)ptr >> (PAGE_SHIFT + BIGHSHIFT));
530 
531 	return(hv);
532 }
533 
534 /*
535  * Lock the hash chain and return a pointer to its base for the specified
536  * address.
537  */
538 static __inline bigalloc_t *
539 bigalloc_lock(void *ptr)
540 {
541 	int hv = _bigalloc_hash(ptr);
542 	bigalloc_t *bigp;
543 
544 	bigp = &bigalloc_array[hv & BIGHMASK];
545 	if (__isthreaded)
546 		_SPINLOCK(&bigspin_array[hv & BIGXMASK]);
547 	return(bigp);
548 }
549 
550 /*
551  * Lock the hash chain and return a pointer to its base for the specified
552  * address.
553  *
554  * BUT, if the hash chain is empty, just return NULL and do not bother
555  * to lock anything.
556  */
557 static __inline bigalloc_t *
558 bigalloc_check_and_lock(void *ptr)
559 {
560 	int hv = _bigalloc_hash(ptr);
561 	bigalloc_t *bigp;
562 
563 	bigp = &bigalloc_array[hv & BIGHMASK];
564 	if (*bigp == NULL)
565 		return(NULL);
566 	if (__isthreaded) {
567 		_SPINLOCK(&bigspin_array[hv & BIGXMASK]);
568 	}
569 	return(bigp);
570 }
571 
572 static __inline void
573 bigalloc_unlock(void *ptr)
574 {
575 	int hv;
576 
577 	if (__isthreaded) {
578 		hv = _bigalloc_hash(ptr);
579 		_SPINUNLOCK(&bigspin_array[hv & BIGXMASK]);
580 	}
581 }
582 
583 /*
584  * Find a bigcache entry that might work for the allocation.  SMP races are
585  * ok here except for the swap (that is, it is ok if bigcache_size_array[i]
586  * is wrong or if a NULL or too-small big is returned).
587  *
588  * Generally speaking it is ok to find a large entry even if the bytes
589  * requested are relatively small (but still oversized), because we really
590  * don't know *what* the application is going to do with the buffer.
591  */
592 static __inline
593 bigalloc_t
594 bigcache_find_alloc(size_t bytes)
595 {
596 	bigalloc_t big = NULL;
597 	size_t test;
598 	int i;
599 
600 	for (i = 0; i < BIGCACHE; ++i) {
601 		test = bigcache_size_array[i];
602 		if (bytes <= test) {
603 			bigcache_size_array[i] = 0;
604 			big = atomic_swap_ptr(&bigcache_array[i], NULL);
605 			break;
606 		}
607 	}
608 	return big;
609 }
610 
611 /*
612  * Free a bigcache entry, possibly returning one that the caller really must
613  * free.  This is used to cache recent oversized memory blocks.  Only
614  * big blocks smaller than BIGCACHE_LIMIT will be cached this way, so try
615  * to collect the biggest ones we can that are under the limit.
616  */
617 static __inline
618 bigalloc_t
619 bigcache_find_free(bigalloc_t big)
620 {
621 	int i;
622 	int j;
623 	int b;
624 
625 	b = ++bigcache_index;
626 	for (i = 0; i < BIGCACHE; ++i) {
627 		j = (b + i) & BIGCACHE_MASK;
628 		if (bigcache_size_array[j] < big->bytes) {
629 			bigcache_size_array[j] = big->bytes;
630 			big = atomic_swap_ptr(&bigcache_array[j], big);
631 			break;
632 		}
633 	}
634 	return big;
635 }
636 
637 static __inline
638 void
639 handle_excess_big(void)
640 {
641 	int i;
642 	bigalloc_t big;
643 	bigalloc_t *bigp;
644 
645 	if (excess_alloc <= BIGCACHE_EXCESS)
646 		return;
647 
648 	for (i = 0; i < BIGHSIZE; ++i) {
649 		bigp = &bigalloc_array[i];
650 		if (*bigp == NULL)
651 			continue;
652 		if (__isthreaded)
653 			_SPINLOCK(&bigspin_array[i & BIGXMASK]);
654 		for (big = *bigp; big; big = big->next) {
655 			if (big->active < big->bytes) {
656 				MASSERT((big->active & PAGE_MASK) == 0);
657 				MASSERT((big->bytes & PAGE_MASK) == 0);
658 				munmap((char *)big->base + big->active,
659 				       big->bytes - big->active);
660 				atomic_add_long(&excess_alloc,
661 						big->active - big->bytes);
662 				big->bytes = big->active;
663 			}
664 		}
665 		if (__isthreaded)
666 			_SPINUNLOCK(&bigspin_array[i & BIGXMASK]);
667 	}
668 }
669 
670 /*
671  * Calculate the zone index for the allocation request size and set the
672  * allocation request size to that particular zone's chunk size.
673  */
674 static __inline int
675 zoneindex(size_t *bytes, size_t *chunking)
676 {
677 	size_t n = (unsigned int)*bytes;	/* unsigned for shift opt */
678 
679 	/*
680 	 * This used to be 8-byte chunks and 16 zones for n < 128.
681 	 * However some instructions may require 16-byte alignment
682 	 * (aka SIMD) and programs might not request an aligned size
683 	 * (aka GCC-7), so change this as follows:
684 	 *
685 	 * 0-15 bytes	8-byte alignment in two zones	(0-1)
686 	 * 16-127 bytes	16-byte alignment in four zones	(3-10)
687 	 * zone index 2 and 11-15 are currently unused.
688 	 */
689 	if (n < 16) {
690 		*bytes = n = (n + 7) & ~7;
691 		*chunking = 8;
692 		return(n / 8 - 1);		/* 8 byte chunks, 2 zones */
693 		/* zones 0,1, zone 2 is unused */
694 	}
695 	if (n < 128) {
696 		*bytes = n = (n + 15) & ~15;
697 		*chunking = 16;
698 		return(n / 16 + 2);		/* 16 byte chunks, 8 zones */
699 		/* zones 3-10, zones 11-15 unused */
700 	}
701 	if (n < 256) {
702 		*bytes = n = (n + 15) & ~15;
703 		*chunking = 16;
704 		return(n / 16 + 7);
705 	}
706 	if (n < 8192) {
707 		if (n < 512) {
708 			*bytes = n = (n + 31) & ~31;
709 			*chunking = 32;
710 			return(n / 32 + 15);
711 		}
712 		if (n < 1024) {
713 			*bytes = n = (n + 63) & ~63;
714 			*chunking = 64;
715 			return(n / 64 + 23);
716 		}
717 		if (n < 2048) {
718 			*bytes = n = (n + 127) & ~127;
719 			*chunking = 128;
720 			return(n / 128 + 31);
721 		}
722 		if (n < 4096) {
723 			*bytes = n = (n + 255) & ~255;
724 			*chunking = 256;
725 			return(n / 256 + 39);
726 		}
727 		*bytes = n = (n + 511) & ~511;
728 		*chunking = 512;
729 		return(n / 512 + 47);
730 	}
731 #if ZALLOC_ZONE_LIMIT > 8192
732 	if (n < 16384) {
733 		*bytes = n = (n + 1023) & ~1023;
734 		*chunking = 1024;
735 		return(n / 1024 + 55);
736 	}
737 #endif
738 #if ZALLOC_ZONE_LIMIT > 16384
739 	if (n < 32768) {
740 		*bytes = n = (n + 2047) & ~2047;
741 		*chunking = 2048;
742 		return(n / 2048 + 63);
743 	}
744 #endif
745 	_mpanic("Unexpected byte count %zu", n);
746 	return(0);
747 }
748 
749 /*
750  * malloc() - call internal slab allocator
751  */
752 void *
753 malloc(size_t size)
754 {
755 	void *ptr;
756 
757 	ptr = _slaballoc(size, 0);
758 	if (ptr == NULL)
759 		errno = ENOMEM;
760 	else
761 		UTRACE(0, size, ptr);
762 	return(ptr);
763 }
764 
765 #define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
766 
767 /*
768  * calloc() - call internal slab allocator
769  */
770 void *
771 calloc(size_t number, size_t size)
772 {
773 	void *ptr;
774 
775 	if ((number >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
776 	     number > 0 && SIZE_MAX / number < size) {
777 		errno = ENOMEM;
778 		return(NULL);
779 	}
780 
781 	ptr = _slaballoc(number * size, SAFLAG_ZERO);
782 	if (ptr == NULL)
783 		errno = ENOMEM;
784 	else
785 		UTRACE(0, number * size, ptr);
786 	return(ptr);
787 }
788 
789 /*
790  * realloc() (SLAB ALLOCATOR)
791  *
792  * We do not attempt to optimize this routine beyond reusing the same
793  * pointer if the new size fits within the chunking of the old pointer's
794  * zone.
795  */
796 void *
797 realloc(void *ptr, size_t size)
798 {
799 	void *ret;
800 	ret = _slabrealloc(ptr, size);
801 	if (ret == NULL)
802 		errno = ENOMEM;
803 	else
804 		UTRACE(ptr, size, ret);
805 	return(ret);
806 }
807 
808 /*
809  * posix_memalign()
810  *
811  * Allocate (size) bytes with a alignment of (alignment), where (alignment)
812  * is a power of 2 >= sizeof(void *).
813  *
814  * The slab allocator will allocate on power-of-2 boundaries up to
815  * at least PAGE_SIZE.  We use the zoneindex mechanic to find a
816  * zone matching the requirements, and _vmem_alloc() otherwise.
817  */
818 int
819 posix_memalign(void **memptr, size_t alignment, size_t size)
820 {
821 	bigalloc_t *bigp;
822 	bigalloc_t big;
823 	size_t chunking;
824 	int zi __unused;
825 
826 	/*
827 	 * OpenGroup spec issue 6 checks
828 	 */
829 	if ((alignment | (alignment - 1)) + 1 != (alignment << 1)) {
830 		*memptr = NULL;
831 		return(EINVAL);
832 	}
833 	if (alignment < sizeof(void *)) {
834 		*memptr = NULL;
835 		return(EINVAL);
836 	}
837 
838 	/*
839 	 * Our zone mechanism guarantees same-sized alignment for any
840 	 * power-of-2 allocation.  If size is a power-of-2 and reasonable
841 	 * we can just call _slaballoc() and be done.  We round size up
842 	 * to the nearest alignment boundary to improve our odds of
843 	 * it becoming a power-of-2 if it wasn't before.
844 	 */
845 	if (size <= alignment)
846 		size = alignment;
847 	else
848 		size = (size + alignment - 1) & ~(size_t)(alignment - 1);
849 	if (size < PAGE_SIZE && (size | (size - 1)) + 1 == (size << 1)) {
850 		*memptr = _slaballoc(size, 0);
851 		return(*memptr ? 0 : ENOMEM);
852 	}
853 
854 	/*
855 	 * Otherwise locate a zone with a chunking that matches
856 	 * the requested alignment, within reason.   Consider two cases:
857 	 *
858 	 * (1) A 1K allocation on a 32-byte alignment.  The first zoneindex
859 	 *     we find will be the best fit because the chunking will be
860 	 *     greater or equal to the alignment.
861 	 *
862 	 * (2) A 513 allocation on a 256-byte alignment.  In this case
863 	 *     the first zoneindex we find will be for 576 byte allocations
864 	 *     with a chunking of 64, which is not sufficient.  To fix this
865 	 *     we simply find the nearest power-of-2 >= size and use the
866 	 *     same side-effect of _slaballoc() which guarantees
867 	 *     same-alignment on a power-of-2 allocation.
868 	 */
869 	if (size < PAGE_SIZE) {
870 		zi = zoneindex(&size, &chunking);
871 		if (chunking >= alignment) {
872 			*memptr = _slaballoc(size, 0);
873 			return(*memptr ? 0 : ENOMEM);
874 		}
875 		if (size >= 1024)
876 			alignment = 1024;
877 		if (size >= 16384)
878 			alignment = 16384;
879 		while (alignment < size)
880 			alignment <<= 1;
881 		*memptr = _slaballoc(alignment, 0);
882 		return(*memptr ? 0 : ENOMEM);
883 	}
884 
885 	/*
886 	 * If the slab allocator cannot handle it use vmem_alloc().
887 	 *
888 	 * Alignment must be adjusted up to at least PAGE_SIZE in this case.
889 	 */
890 	if (alignment < PAGE_SIZE)
891 		alignment = PAGE_SIZE;
892 	if (size < alignment)
893 		size = alignment;
894 	size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
895 	*memptr = _vmem_alloc(size, alignment, 0);
896 	if (*memptr == NULL)
897 		return(ENOMEM);
898 
899 	big = _slaballoc(sizeof(struct bigalloc), 0);
900 	if (big == NULL) {
901 		_vmem_free(*memptr, size);
902 		*memptr = NULL;
903 		return(ENOMEM);
904 	}
905 	bigp = bigalloc_lock(*memptr);
906 	big->base = *memptr;
907 	big->active = size;
908 	big->bytes = size;		/* no excess */
909 	big->next = *bigp;
910 	*bigp = big;
911 	bigalloc_unlock(*memptr);
912 
913 	return(0);
914 }
915 
916 /*
917  * free() (SLAB ALLOCATOR) - do the obvious
918  */
919 void
920 free(void *ptr)
921 {
922 	UTRACE(ptr, 0, 0);
923 	_slabfree(ptr, 0, NULL);
924 }
925 
926 /*
927  * _slaballoc()	(SLAB ALLOCATOR)
928  *
929  *	Allocate memory via the slab allocator.  If the request is too large,
930  *	or if it page-aligned beyond a certain size, we fall back to the
931  *	KMEM subsystem
932  */
933 static void *
934 _slaballoc(size_t size, int flags)
935 {
936 	slzone_t z;
937 	slchunk_t chunk;
938 	slglobaldata_t slgd;
939 	size_t chunking;
940 	int zi;
941 #ifdef INVARIANTS
942 	int i;
943 #endif
944 	int off;
945 	void *obj;
946 
947 	/*
948 	 * Handle the degenerate size == 0 case.  Yes, this does happen.
949 	 * Return a special pointer.  This is to maintain compatibility with
950 	 * the original malloc implementation.  Certain devices, such as the
951 	 * adaptec driver, not only allocate 0 bytes, they check for NULL and
952 	 * also realloc() later on.  Joy.
953 	 */
954 	if (size == 0)
955 		return(ZERO_LENGTH_PTR);
956 
957 	/* Capture global flags */
958 	flags |= g_malloc_flags;
959 
960 	/*
961 	 * Handle large allocations directly.  There should not be very many
962 	 * of these so performance is not a big issue.
963 	 *
964 	 * The backend allocator is pretty nasty on a SMP system.   Use the
965 	 * slab allocator for one and two page-sized chunks even though we
966 	 * lose some efficiency.
967 	 */
968 	if (size >= ZoneLimit ||
969 	    ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
970 		bigalloc_t big;
971 		bigalloc_t *bigp;
972 
973 		/*
974 		 * Page-align and cache-color in case of virtually indexed
975 		 * physically tagged L1 caches (aka SandyBridge).  No sweat
976 		 * otherwise, so just do it.
977 		 *
978 		 * (don't count as excess).
979 		 */
980 		size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
981 		if ((size & (PAGE_SIZE * 2 - 1)) == 0)
982 			size += PAGE_SIZE;
983 
984 		/*
985 		 * Try to reuse a cached big block to avoid mmap'ing.  If it
986 		 * turns out not to fit our requirements we throw it away
987 		 * and allocate normally.
988 		 */
989 		big = NULL;
990 		if (size <= BIGCACHE_LIMIT) {
991 			big = bigcache_find_alloc(size);
992 			if (big && big->bytes < size) {
993 				_slabfree(big->base, FASTSLABREALLOC, &big);
994 				big = NULL;
995 			}
996 		}
997 		if (big) {
998 			chunk = big->base;
999 			if (flags & SAFLAG_ZERO)
1000 				bzero(chunk, size);
1001 		} else {
1002 			chunk = _vmem_alloc(size, PAGE_SIZE, flags);
1003 			if (chunk == NULL)
1004 				return(NULL);
1005 
1006 			big = _slaballoc(sizeof(struct bigalloc), 0);
1007 			if (big == NULL) {
1008 				_vmem_free(chunk, size);
1009 				return(NULL);
1010 			}
1011 			big->base = chunk;
1012 			big->bytes = size;
1013 		}
1014 		big->active = size;
1015 
1016 		bigp = bigalloc_lock(chunk);
1017 		if (big->active < big->bytes) {
1018 			atomic_add_long(&excess_alloc,
1019 					big->bytes - big->active);
1020 		}
1021 		big->next = *bigp;
1022 		*bigp = big;
1023 		bigalloc_unlock(chunk);
1024 		handle_excess_big();
1025 
1026 		return(chunk);
1027 	}
1028 
1029 	/* Compute allocation zone; zoneindex will panic on excessive sizes */
1030 	zi = zoneindex(&size, &chunking);
1031 	MASSERT(zi < NZONES);
1032 
1033 	obj = mtmagazine_alloc(zi);
1034 	if (obj != NULL) {
1035 		if (flags & SAFLAG_ZERO)
1036 			bzero(obj, size);
1037 		return (obj);
1038 	}
1039 
1040 	slgd = &SLGlobalData;
1041 	slgd_lock(slgd);
1042 
1043 	/*
1044 	 * Attempt to allocate out of an existing zone.  If all zones are
1045 	 * exhausted pull one off the free list or allocate a new one.
1046 	 */
1047 	if ((z = slgd->ZoneAry[zi]) == NULL) {
1048 		z = zone_alloc(flags);
1049 		if (z == NULL)
1050 			goto fail;
1051 
1052 		/*
1053 		 * How big is the base structure?
1054 		 */
1055 #if defined(INVARIANTS)
1056 		/*
1057 		 * Make room for z_Bitmap.  An exact calculation is
1058 		 * somewhat more complicated so don't make an exact
1059 		 * calculation.
1060 		 */
1061 		off = offsetof(struct slzone,
1062 				z_Bitmap[(ZoneSize / size + 31) / 32]);
1063 		bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
1064 #else
1065 		off = sizeof(struct slzone);
1066 #endif
1067 
1068 		/*
1069 		 * Align the storage in the zone based on the chunking.
1070 		 *
1071 		 * Guarantee power-of-2 alignment for power-of-2-sized
1072 		 * chunks.  Otherwise align based on the chunking size
1073 		 * (typically 8 or 16 bytes for small allocations).
1074 		 *
1075 		 * NOTE: Allocations >= ZoneLimit are governed by the
1076 		 * bigalloc code and typically only guarantee page-alignment.
1077 		 *
1078 		 * Set initial conditions for UIndex near the zone header
1079 		 * to reduce unecessary page faults, vs semi-randomization
1080 		 * to improve L1 cache saturation.
1081 		 */
1082 		if ((size | (size - 1)) + 1 == (size << 1))
1083 			off = roundup2(off, size);
1084 		else
1085 			off = roundup2(off, chunking);
1086 		z->z_Magic = ZALLOC_SLAB_MAGIC;
1087 		z->z_ZoneIndex = zi;
1088 		z->z_NMax = (ZoneSize - off) / size;
1089 		z->z_NFree = z->z_NMax;
1090 		z->z_BasePtr = (char *)z + off;
1091 		z->z_UIndex = z->z_UEndIndex = 0;
1092 		z->z_ChunkSize = size;
1093 		z->z_FirstFreePg = ZonePageCount;
1094 		z->z_Next = slgd->ZoneAry[zi];
1095 		slgd->ZoneAry[zi] = z;
1096 		if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
1097 			flags &= ~SAFLAG_ZERO;	/* already zero'd */
1098 			flags |= SAFLAG_PASSIVE;
1099 		}
1100 
1101 		/*
1102 		 * Slide the base index for initial allocations out of the
1103 		 * next zone we create so we do not over-weight the lower
1104 		 * part of the cpu memory caches.
1105 		 */
1106 		slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
1107 					& (ZALLOC_MAX_ZONE_SIZE - 1);
1108 	}
1109 
1110 	/*
1111 	 * Ok, we have a zone from which at least one chunk is available.
1112 	 *
1113 	 * Remove us from the ZoneAry[] when we become empty
1114 	 */
1115 	MASSERT(z->z_NFree > 0);
1116 
1117 	if (--z->z_NFree == 0) {
1118 		slgd->ZoneAry[zi] = z->z_Next;
1119 		z->z_Next = NULL;
1120 	}
1121 
1122 	/*
1123 	 * Locate a chunk in a free page.  This attempts to localize
1124 	 * reallocations into earlier pages without us having to sort
1125 	 * the chunk list.  A chunk may still overlap a page boundary.
1126 	 */
1127 	while (z->z_FirstFreePg < ZonePageCount) {
1128 		if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
1129 #ifdef DIAGNOSTIC
1130 			/*
1131 			 * Diagnostic: c_Next is not total garbage.
1132 			 */
1133 			MASSERT(chunk->c_Next == NULL ||
1134 			    ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) ==
1135 			    ((intptr_t)chunk & IN_SAME_PAGE_MASK));
1136 #endif
1137 #ifdef INVARIANTS
1138 			chunk_mark_allocated(z, chunk);
1139 #endif
1140 			MASSERT((uintptr_t)chunk & ZoneMask);
1141 			z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
1142 			goto done;
1143 		}
1144 		++z->z_FirstFreePg;
1145 	}
1146 
1147 	/*
1148 	 * No chunks are available but NFree said we had some memory,
1149 	 * so it must be available in the never-before-used-memory
1150 	 * area governed by UIndex.  The consequences are very
1151 	 * serious if our zone got corrupted so we use an explicit
1152 	 * panic rather then a KASSERT.
1153 	 */
1154 	chunk = (slchunk_t)(z->z_BasePtr + z->z_UIndex * size);
1155 
1156 	if (++z->z_UIndex == z->z_NMax)
1157 		z->z_UIndex = 0;
1158 	if (z->z_UIndex == z->z_UEndIndex) {
1159 		if (z->z_NFree != 0)
1160 			_mpanic("slaballoc: corrupted zone");
1161 	}
1162 
1163 	if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
1164 		flags &= ~SAFLAG_ZERO;
1165 		flags |= SAFLAG_PASSIVE;
1166 	}
1167 #if defined(INVARIANTS)
1168 	chunk_mark_allocated(z, chunk);
1169 #endif
1170 
1171 done:
1172 	slgd_unlock(slgd);
1173 	if (flags & SAFLAG_ZERO) {
1174 		bzero(chunk, size);
1175 #ifdef INVARIANTS
1176 	} else if ((flags & (SAFLAG_ZERO|SAFLAG_PASSIVE)) == 0) {
1177 		if (use_malloc_pattern) {
1178 			for (i = 0; i < size; i += sizeof(int)) {
1179 				*(int *)((char *)chunk + i) = -1;
1180 			}
1181 		}
1182 		/* avoid accidental double-free check */
1183 		chunk->c_Next = (void *)-1;
1184 #endif
1185 	}
1186 	return(chunk);
1187 fail:
1188 	slgd_unlock(slgd);
1189 	return(NULL);
1190 }
1191 
1192 /*
1193  * Reallocate memory within the chunk
1194  */
1195 static void *
1196 _slabrealloc(void *ptr, size_t size)
1197 {
1198 	bigalloc_t *bigp;
1199 	void *nptr;
1200 	slzone_t z;
1201 	size_t chunking;
1202 
1203 	if (ptr == NULL || ptr == ZERO_LENGTH_PTR) {
1204 		return(_slaballoc(size, 0));
1205 	}
1206 
1207 	if (size == 0) {
1208 		free(ptr);
1209 		return(ZERO_LENGTH_PTR);
1210 	}
1211 
1212 	/*
1213 	 * Handle oversized allocations.
1214 	 */
1215 	if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1216 		bigalloc_t big;
1217 		size_t bigbytes;
1218 
1219 		while ((big = *bigp) != NULL) {
1220 			if (big->base == ptr) {
1221 				size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
1222 				bigbytes = big->bytes;
1223 
1224 				/*
1225 				 * If it already fits determine if it makes
1226 				 * sense to shrink/reallocate.  Try to optimize
1227 				 * programs which stupidly make incremental
1228 				 * reallocations larger or smaller by scaling
1229 				 * the allocation.  Also deal with potential
1230 				 * coloring.
1231 				 */
1232 				if (size >= (bigbytes >> 1) &&
1233 				    size <= bigbytes) {
1234 					if (big->active != size) {
1235 						atomic_add_long(&excess_alloc,
1236 								big->active -
1237 								size);
1238 					}
1239 					big->active = size;
1240 					bigalloc_unlock(ptr);
1241 					return(ptr);
1242 				}
1243 
1244 				/*
1245 				 * For large reallocations, allocate more space
1246 				 * than we need to try to avoid excessive
1247 				 * reallocations later on.
1248 				 */
1249 				chunking = size + (size >> 3);
1250 				chunking = (chunking + PAGE_MASK) &
1251 					   ~(size_t)PAGE_MASK;
1252 
1253 				/*
1254 				 * Try to allocate adjacently in case the
1255 				 * program is idiotically realloc()ing a
1256 				 * huge memory block just slightly bigger.
1257 				 * (llvm's llc tends to do this a lot).
1258 				 *
1259 				 * (MAP_TRYFIXED forces mmap to fail if there
1260 				 *  is already something at the address).
1261 				 */
1262 				if (chunking > bigbytes) {
1263 					char *addr;
1264 					int errno_save = errno;
1265 
1266 					addr = mmap((char *)ptr + bigbytes,
1267 						    chunking - bigbytes,
1268 						    PROT_READ|PROT_WRITE,
1269 						    MAP_PRIVATE|MAP_ANON|
1270 						    MAP_TRYFIXED,
1271 						    -1, 0);
1272 					errno = errno_save;
1273 					if (addr == (char *)ptr + bigbytes) {
1274 						atomic_add_long(&excess_alloc,
1275 								big->active -
1276 								big->bytes +
1277 								chunking -
1278 								size);
1279 						big->bytes = chunking;
1280 						big->active = size;
1281 						bigalloc_unlock(ptr);
1282 
1283 						return(ptr);
1284 					}
1285 					MASSERT((void *)addr == MAP_FAILED);
1286 				}
1287 
1288 				/*
1289 				 * Failed, unlink big and allocate fresh.
1290 				 * (note that we have to leave (big) intact
1291 				 * in case the slaballoc fails).
1292 				 */
1293 				*bigp = big->next;
1294 				bigalloc_unlock(ptr);
1295 				if ((nptr = _slaballoc(size, 0)) == NULL) {
1296 					/* Relink block */
1297 					bigp = bigalloc_lock(ptr);
1298 					big->next = *bigp;
1299 					*bigp = big;
1300 					bigalloc_unlock(ptr);
1301 					return(NULL);
1302 				}
1303 				if (size > bigbytes)
1304 					size = bigbytes;
1305 				bcopy(ptr, nptr, size);
1306 				atomic_add_long(&excess_alloc, big->active -
1307 							       big->bytes);
1308 				_slabfree(ptr, FASTSLABREALLOC, &big);
1309 
1310 				return(nptr);
1311 			}
1312 			bigp = &big->next;
1313 		}
1314 		bigalloc_unlock(ptr);
1315 		handle_excess_big();
1316 	}
1317 
1318 	/*
1319 	 * Get the original allocation's zone.  If the new request winds
1320 	 * up using the same chunk size we do not have to do anything.
1321 	 *
1322 	 * NOTE: We don't have to lock the globaldata here, the fields we
1323 	 * access here will not change at least as long as we have control
1324 	 * over the allocation.
1325 	 */
1326 	z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1327 	MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1328 
1329 	/*
1330 	 * Use zoneindex() to chunk-align the new size, as long as the
1331 	 * new size is not too large.
1332 	 */
1333 	if (size < ZoneLimit) {
1334 		zoneindex(&size, &chunking);
1335 		if (z->z_ChunkSize == size) {
1336 			return(ptr);
1337 		}
1338 	}
1339 
1340 	/*
1341 	 * Allocate memory for the new request size and copy as appropriate.
1342 	 */
1343 	if ((nptr = _slaballoc(size, 0)) != NULL) {
1344 		if (size > z->z_ChunkSize)
1345 			size = z->z_ChunkSize;
1346 		bcopy(ptr, nptr, size);
1347 		_slabfree(ptr, 0, NULL);
1348 	}
1349 
1350 	return(nptr);
1351 }
1352 
1353 /*
1354  * free (SLAB ALLOCATOR)
1355  *
1356  * Free a memory block previously allocated by malloc.  Note that we do not
1357  * attempt to uplodate ks_loosememuse as MP races could prevent us from
1358  * checking memory limits in malloc.
1359  *
1360  * flags:
1361  *	FASTSLABREALLOC		Fast call from realloc, *rbigp already
1362  *				unlinked.
1363  *
1364  * MPSAFE
1365  */
1366 static void
1367 _slabfree(void *ptr, int flags, bigalloc_t *rbigp)
1368 {
1369 	slzone_t z;
1370 	slchunk_t chunk;
1371 	bigalloc_t big;
1372 	bigalloc_t *bigp;
1373 	slglobaldata_t slgd;
1374 	size_t size;
1375 	int zi;
1376 	int pgno;
1377 
1378 	/* Fast realloc path for big allocations */
1379 	if (flags & FASTSLABREALLOC) {
1380 		big = *rbigp;
1381 		goto fastslabrealloc;
1382 	}
1383 
1384 	/*
1385 	 * Handle NULL frees and special 0-byte allocations
1386 	 */
1387 	if (ptr == NULL)
1388 		return;
1389 	if (ptr == ZERO_LENGTH_PTR)
1390 		return;
1391 
1392 	/*
1393 	 * Handle oversized allocations.
1394 	 */
1395 	if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1396 		while ((big = *bigp) != NULL) {
1397 			if (big->base == ptr) {
1398 				*bigp = big->next;
1399 				atomic_add_long(&excess_alloc, big->active -
1400 							       big->bytes);
1401 				bigalloc_unlock(ptr);
1402 
1403 				/*
1404 				 * Try to stash the block we are freeing,
1405 				 * potentially receiving another block in
1406 				 * return which must be freed.
1407 				 */
1408 fastslabrealloc:
1409 				if (big->bytes <= BIGCACHE_LIMIT) {
1410 					big = bigcache_find_free(big);
1411 					if (big == NULL)
1412 						return;
1413 				}
1414 				ptr = big->base;	/* reload */
1415 				size = big->bytes;
1416 				_slabfree(big, 0, NULL);
1417 #ifdef INVARIANTS
1418 				MASSERT(sizeof(weirdary) <= size);
1419 				bcopy(weirdary, ptr, sizeof(weirdary));
1420 #endif
1421 				_vmem_free(ptr, size);
1422 				return;
1423 			}
1424 			bigp = &big->next;
1425 		}
1426 		bigalloc_unlock(ptr);
1427 		handle_excess_big();
1428 	}
1429 
1430 	/*
1431 	 * Zone case.  Figure out the zone based on the fact that it is
1432 	 * ZoneSize aligned.
1433 	 */
1434 	z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1435 	MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1436 
1437 	size = z->z_ChunkSize;
1438 	zi = z->z_ZoneIndex;
1439 
1440 	if (g_malloc_flags & SAFLAG_ZERO)
1441 		bzero(ptr, size);
1442 
1443 	if (mtmagazine_free(zi, ptr) == 0)
1444 		return;
1445 
1446 	pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
1447 	chunk = ptr;
1448 	slgd = &SLGlobalData;
1449 	slgd_lock(slgd);
1450 
1451 #ifdef INVARIANTS
1452 	/*
1453 	 * Attempt to detect a double-free.  To reduce overhead we only check
1454 	 * if there appears to be link pointer at the base of the data.
1455 	 */
1456 	if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) {
1457 		slchunk_t scan;
1458 
1459 		for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) {
1460 			if (scan == chunk)
1461 				_mpanic("Double free at %p", chunk);
1462 		}
1463 	}
1464 	chunk_mark_free(z, chunk);
1465 #endif
1466 
1467 	/*
1468 	 * Put weird data into the memory to detect modifications after
1469 	 * freeing, illegal pointer use after freeing (we should fault on
1470 	 * the odd address), and so forth.
1471 	 */
1472 #ifdef INVARIANTS
1473 	if (z->z_ChunkSize < sizeof(weirdary))
1474 		bcopy(weirdary, chunk, z->z_ChunkSize);
1475 	else
1476 		bcopy(weirdary, chunk, sizeof(weirdary));
1477 #endif
1478 
1479 	/*
1480 	 * Add this free non-zero'd chunk to a linked list for reuse, adjust
1481 	 * z_FirstFreePg.
1482 	 */
1483 	chunk->c_Next = z->z_PageAry[pgno];
1484 	z->z_PageAry[pgno] = chunk;
1485 	if (z->z_FirstFreePg > pgno)
1486 		z->z_FirstFreePg = pgno;
1487 
1488 	/*
1489 	 * Bump the number of free chunks.  If it becomes non-zero the zone
1490 	 * must be added back onto the appropriate list.
1491 	 */
1492 	if (z->z_NFree++ == 0) {
1493 		z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
1494 		slgd->ZoneAry[z->z_ZoneIndex] = z;
1495 	}
1496 
1497 	/*
1498 	 * If the zone becomes totally free then release it.
1499 	 */
1500 	if (z->z_NFree == z->z_NMax) {
1501 		slzone_t *pz;
1502 
1503 		pz = &slgd->ZoneAry[z->z_ZoneIndex];
1504 		while (z != *pz)
1505 			pz = &(*pz)->z_Next;
1506 		*pz = z->z_Next;
1507 		z->z_Magic = -1;
1508 		z->z_Next = NULL;
1509 		zone_free(z);
1510 		/* slgd lock released */
1511 		return;
1512 	}
1513 	slgd_unlock(slgd);
1514 }
1515 
1516 #if defined(INVARIANTS)
1517 /*
1518  * Helper routines for sanity checks
1519  */
1520 static
1521 void
1522 chunk_mark_allocated(slzone_t z, void *chunk)
1523 {
1524 	int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1525 	__uint32_t *bitptr;
1526 
1527 	MASSERT(bitdex >= 0 && bitdex < z->z_NMax);
1528 	bitptr = &z->z_Bitmap[bitdex >> 5];
1529 	bitdex &= 31;
1530 	MASSERT((*bitptr & (1 << bitdex)) == 0);
1531 	*bitptr |= 1 << bitdex;
1532 }
1533 
1534 static
1535 void
1536 chunk_mark_free(slzone_t z, void *chunk)
1537 {
1538 	int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1539 	__uint32_t *bitptr;
1540 
1541 	MASSERT(bitdex >= 0 && bitdex < z->z_NMax);
1542 	bitptr = &z->z_Bitmap[bitdex >> 5];
1543 	bitdex &= 31;
1544 	MASSERT((*bitptr & (1 << bitdex)) != 0);
1545 	*bitptr &= ~(1 << bitdex);
1546 }
1547 
1548 #endif
1549 
1550 /*
1551  * Allocate and return a magazine.  NULL is returned and *burst is adjusted
1552  * if the magazine is empty.
1553  */
1554 static __inline void *
1555 magazine_alloc(struct magazine *mp, int *burst)
1556 {
1557 	void *obj;
1558 
1559 	if (mp == NULL)
1560 		return(NULL);
1561 	if (MAGAZINE_NOTEMPTY(mp)) {
1562 		obj = mp->objects[--mp->rounds];
1563 		return(obj);
1564 	}
1565 
1566 	/*
1567 	 * Return burst factor to caller along with NULL
1568 	 */
1569 	if ((mp->flags & M_BURST) && (burst != NULL)) {
1570 		*burst = mp->burst_factor;
1571 	}
1572 	/* Reduce burst factor by NSCALE; if it hits 1, disable BURST */
1573 	if ((mp->flags & M_BURST) && (mp->flags & M_BURST_EARLY) &&
1574 	    (burst != NULL)) {
1575 		mp->burst_factor -= M_BURST_NSCALE;
1576 		if (mp->burst_factor <= 1) {
1577 			mp->burst_factor = 1;
1578 			mp->flags &= ~(M_BURST);
1579 			mp->flags &= ~(M_BURST_EARLY);
1580 		}
1581 	}
1582 	return (NULL);
1583 }
1584 
1585 static __inline int
1586 magazine_free(struct magazine *mp, void *p)
1587 {
1588 	if (mp != NULL && MAGAZINE_NOTFULL(mp)) {
1589 		mp->objects[mp->rounds++] = p;
1590 		return 0;
1591 	}
1592 
1593 	return -1;
1594 }
1595 
1596 static void *
1597 mtmagazine_alloc(int zi)
1598 {
1599 	thr_mags *tp;
1600 	struct magazine *mp, *emptymag;
1601 	magazine_depot *d;
1602 	void *obj;
1603 
1604 	/*
1605 	 * Do not try to access per-thread magazines while the mtmagazine
1606 	 * is being initialized or destroyed.
1607 	 */
1608 	tp = &thread_mags;
1609 	if (tp->init < 0)
1610 		return(NULL);
1611 
1612 	/*
1613 	 * Primary per-thread allocation loop
1614 	 */
1615 	for (;;) {
1616 		/*
1617 		 * If the loaded magazine has rounds, allocate and return
1618 		 */
1619 		mp = tp->mags[zi].loaded;
1620 		obj = magazine_alloc(mp, NULL);
1621 		if (obj)
1622 			break;
1623 
1624 		/*
1625 		 * If the prev magazine is full, swap with the loaded
1626 		 * magazine and retry.
1627 		 */
1628 		mp = tp->mags[zi].prev;
1629 		if (mp && MAGAZINE_FULL(mp)) {
1630 			MASSERT(mp->rounds != 0);
1631 			swap_mags(&tp->mags[zi]);	/* prev now empty */
1632 			continue;
1633 		}
1634 
1635 		/*
1636 		 * Try to get a full magazine from the depot.  Cycle
1637 		 * through depot(full)->loaded->prev->depot(empty).
1638 		 * Retry if a full magazine was available from the depot.
1639 		 *
1640 		 * Return NULL (caller will fall through) if no magazines
1641 		 * can be found anywhere.
1642 		 */
1643 		d = &depots[zi];
1644 		depot_lock(d);
1645 		emptymag = tp->mags[zi].prev;
1646 		if (emptymag)
1647 			SLIST_INSERT_HEAD(&d->empty, emptymag, nextmagazine);
1648 		tp->mags[zi].prev = tp->mags[zi].loaded;
1649 		mp = SLIST_FIRST(&d->full);	/* loaded magazine */
1650 		tp->mags[zi].loaded = mp;
1651 		if (mp) {
1652 			SLIST_REMOVE_HEAD(&d->full, nextmagazine);
1653 			MASSERT(MAGAZINE_NOTEMPTY(mp));
1654 			depot_unlock(d);
1655 			continue;
1656 		}
1657 		depot_unlock(d);
1658 		break;
1659 	}
1660 
1661 	return (obj);
1662 }
1663 
1664 static int
1665 mtmagazine_free(int zi, void *ptr)
1666 {
1667 	thr_mags *tp;
1668 	struct magazine *mp, *loadedmag;
1669 	magazine_depot *d;
1670 	int rc = -1;
1671 
1672 	/*
1673 	 * Do not try to access per-thread magazines while the mtmagazine
1674 	 * is being initialized or destroyed.
1675 	 */
1676 	tp = &thread_mags;
1677 	if (tp->init < 0)
1678 		return(-1);
1679 
1680 	/*
1681 	 * Primary per-thread freeing loop
1682 	 */
1683 	for (;;) {
1684 		/*
1685 		 * Make sure a new magazine is available in case we have
1686 		 * to use it.  Staging the newmag allows us to avoid
1687 		 * some locking/reentrancy complexity.
1688 		 *
1689 		 * Temporarily disable the per-thread caches for this
1690 		 * allocation to avoid reentrancy and/or to avoid a
1691 		 * stack overflow if the [zi] happens to be the same that
1692 		 * would be used to allocate the new magazine.
1693 		 */
1694 		if (tp->newmag == NULL) {
1695 			tp->init = -1;
1696 			tp->newmag = _slaballoc(sizeof(struct magazine),
1697 						SAFLAG_ZERO);
1698 			tp->init = 1;
1699 			if (tp->newmag == NULL) {
1700 				rc = -1;
1701 				break;
1702 			}
1703 		}
1704 
1705 		/*
1706 		 * If the loaded magazine has space, free directly to it
1707 		 */
1708 		rc = magazine_free(tp->mags[zi].loaded, ptr);
1709 		if (rc == 0)
1710 			break;
1711 
1712 		/*
1713 		 * If the prev magazine is empty, swap with the loaded
1714 		 * magazine and retry.
1715 		 */
1716 		mp = tp->mags[zi].prev;
1717 		if (mp && MAGAZINE_EMPTY(mp)) {
1718 			MASSERT(mp->rounds == 0);
1719 			swap_mags(&tp->mags[zi]);	/* prev now full */
1720 			continue;
1721 		}
1722 
1723 		/*
1724 		 * Try to get an empty magazine from the depot.  Cycle
1725 		 * through depot(empty)->loaded->prev->depot(full).
1726 		 * Retry if an empty magazine was available from the depot.
1727 		 */
1728 		d = &depots[zi];
1729 		depot_lock(d);
1730 
1731 		if ((loadedmag = tp->mags[zi].prev) != NULL)
1732 			SLIST_INSERT_HEAD(&d->full, loadedmag, nextmagazine);
1733 		tp->mags[zi].prev = tp->mags[zi].loaded;
1734 		mp = SLIST_FIRST(&d->empty);
1735 		if (mp) {
1736 			tp->mags[zi].loaded = mp;
1737 			SLIST_REMOVE_HEAD(&d->empty, nextmagazine);
1738 			MASSERT(MAGAZINE_NOTFULL(mp));
1739 		} else {
1740 			mp = tp->newmag;
1741 			tp->newmag = NULL;
1742 			mp->capacity = M_MAX_ROUNDS;
1743 			mp->rounds = 0;
1744 			mp->flags = 0;
1745 			tp->mags[zi].loaded = mp;
1746 		}
1747 		depot_unlock(d);
1748 	}
1749 
1750 	return rc;
1751 }
1752 
1753 static void
1754 mtmagazine_init(void)
1755 {
1756 	int error;
1757 
1758 	error = pthread_key_create(&thread_mags_key, mtmagazine_destructor);
1759 	if (error)
1760 		abort();
1761 }
1762 
1763 /*
1764  * This function is only used by the thread exit destructor
1765  */
1766 static void
1767 mtmagazine_drain(struct magazine *mp)
1768 {
1769 	void *obj;
1770 
1771 	while (MAGAZINE_NOTEMPTY(mp)) {
1772 		obj = magazine_alloc(mp, NULL);
1773 		_slabfree(obj, 0, NULL);
1774 	}
1775 }
1776 
1777 /*
1778  * mtmagazine_destructor()
1779  *
1780  * When a thread exits, we reclaim all its resources; all its magazines are
1781  * drained and the structures are freed.
1782  *
1783  * WARNING!  The destructor can be called multiple times if the larger user
1784  *	     program has its own destructors which run after ours which
1785  *	     allocate or free memory.
1786  */
1787 static void
1788 mtmagazine_destructor(void *thrp)
1789 {
1790 	thr_mags *tp = thrp;
1791 	struct magazine *mp;
1792 	int i;
1793 
1794 	/*
1795 	 * Prevent further use of mtmagazines while we are destructing
1796 	 * them, as well as for any destructors which are run after us
1797 	 * prior to the thread actually being destroyed.
1798 	 */
1799 	tp->init = -1;
1800 
1801 	for (i = 0; i < NZONES; i++) {
1802 		mp = tp->mags[i].loaded;
1803 		tp->mags[i].loaded = NULL;
1804 		if (mp) {
1805 			if (MAGAZINE_NOTEMPTY(mp))
1806 				mtmagazine_drain(mp);
1807 			_slabfree(mp, 0, NULL);
1808 		}
1809 
1810 		mp = tp->mags[i].prev;
1811 		tp->mags[i].prev = NULL;
1812 		if (mp) {
1813 			if (MAGAZINE_NOTEMPTY(mp))
1814 				mtmagazine_drain(mp);
1815 			_slabfree(mp, 0, NULL);
1816 		}
1817 	}
1818 
1819 	if (tp->newmag) {
1820 		mp = tp->newmag;
1821 		tp->newmag = NULL;
1822 		_slabfree(mp, 0, NULL);
1823 	}
1824 }
1825 
1826 /*
1827  * zone_alloc()
1828  *
1829  * Attempt to allocate a zone from the zone magazine; the zone magazine has
1830  * M_BURST_EARLY enabled, so honor the burst request from the magazine.
1831  */
1832 static slzone_t
1833 zone_alloc(int flags)
1834 {
1835 	slglobaldata_t slgd = &SLGlobalData;
1836 	int burst = 1;
1837 	int i, j;
1838 	slzone_t z;
1839 
1840 	zone_magazine_lock();
1841 	slgd_unlock(slgd);
1842 
1843 	z = magazine_alloc(&zone_magazine, &burst);
1844 	if (z == NULL && burst == 1) {
1845 		zone_magazine_unlock();
1846 		z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags);
1847 	} else if (z == NULL) {
1848 		z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags);
1849 		if (z) {
1850 			for (i = 1; i < burst; i++) {
1851 				j = magazine_free(&zone_magazine,
1852 						  (char *) z + (ZoneSize * i));
1853 				MASSERT(j == 0);
1854 			}
1855 		}
1856 		zone_magazine_unlock();
1857 	} else {
1858 		z->z_Flags |= SLZF_UNOTZEROD;
1859 		zone_magazine_unlock();
1860 	}
1861 	slgd_lock(slgd);
1862 	return z;
1863 }
1864 
1865 /*
1866  * zone_free()
1867  *
1868  * Release a zone and unlock the slgd lock.
1869  */
1870 static void
1871 zone_free(void *z)
1872 {
1873 	slglobaldata_t slgd = &SLGlobalData;
1874 	void *excess[M_ZONE_ROUNDS - M_LOW_ROUNDS] = {};
1875 	int i, j;
1876 
1877 	zone_magazine_lock();
1878 	slgd_unlock(slgd);
1879 
1880 	bzero(z, sizeof(struct slzone));
1881 
1882 	if (opt_madvise)
1883 		madvise(z, ZoneSize, MADV_FREE);
1884 
1885 	i = magazine_free(&zone_magazine, z);
1886 
1887 	/*
1888 	 * If we failed to free, collect excess magazines; release the zone
1889 	 * magazine lock, and then free to the system via _vmem_free. Re-enable
1890 	 * BURST mode for the magazine.
1891 	 */
1892 	if (i == -1) {
1893 		j = zone_magazine.rounds - zone_magazine.low_factor;
1894 		for (i = 0; i < j; i++) {
1895 			excess[i] = magazine_alloc(&zone_magazine, NULL);
1896 			MASSERT(excess[i] !=  NULL);
1897 		}
1898 
1899 		zone_magazine_unlock();
1900 
1901 		for (i = 0; i < j; i++)
1902 			_vmem_free(excess[i], ZoneSize);
1903 
1904 		_vmem_free(z, ZoneSize);
1905 	} else {
1906 		zone_magazine_unlock();
1907 	}
1908 }
1909 
1910 /*
1911  * _vmem_alloc()
1912  *
1913  *	Directly map memory in PAGE_SIZE'd chunks with the specified
1914  *	alignment.
1915  *
1916  *	Alignment must be a multiple of PAGE_SIZE.
1917  *
1918  *	Size must be >= alignment.
1919  */
1920 static void *
1921 _vmem_alloc(size_t size, size_t align, int flags)
1922 {
1923 	char *addr;
1924 	char *save;
1925 	size_t excess;
1926 
1927 	/*
1928 	 * Map anonymous private memory.
1929 	 */
1930 	addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
1931 		    MAP_PRIVATE|MAP_ANON, -1, 0);
1932 	if (addr == MAP_FAILED)
1933 		return(NULL);
1934 
1935 	/*
1936 	 * Check alignment.  The misaligned offset is also the excess
1937 	 * amount.  If misaligned unmap the excess so we have a chance of
1938 	 * mapping at the next alignment point and recursively try again.
1939 	 *
1940 	 * BBBBBBBBBBB BBBBBBBBBBB BBBBBBBBBBB	block alignment
1941 	 *   aaaaaaaaa aaaaaaaaaaa aa		mis-aligned allocation
1942 	 *   xxxxxxxxx				final excess calculation
1943 	 *   ^ returned address
1944 	 */
1945 	excess = (uintptr_t)addr & (align - 1);
1946 
1947 	if (excess) {
1948 		excess = align - excess;
1949 		save = addr;
1950 
1951 		munmap(save + excess, size - excess);
1952 		addr = _vmem_alloc(size, align, flags);
1953 		munmap(save, excess);
1954 	}
1955 	return((void *)addr);
1956 }
1957 
1958 /*
1959  * _vmem_free()
1960  *
1961  *	Free a chunk of memory allocated with _vmem_alloc()
1962  */
1963 static void
1964 _vmem_free(void *ptr, size_t size)
1965 {
1966 	munmap(ptr, size);
1967 }
1968 
1969 /*
1970  * Panic on fatal conditions
1971  */
1972 static void
1973 _mpanic(const char *ctl, ...)
1974 {
1975 	va_list va;
1976 
1977 	if (malloc_panic == 0) {
1978 		malloc_panic = 1;
1979 		va_start(va, ctl);
1980 		vfprintf(stderr, ctl, va);
1981 		fprintf(stderr, "\n");
1982 		fflush(stderr);
1983 		va_end(va);
1984 	}
1985 	abort();
1986 }
1987