xref: /dragonfly/lib/libc/stdlib/nmalloc.c (revision cfd1aba3)
1 /*
2  * NMALLOC.C	- New Malloc (ported from kernel slab allocator)
3  *
4  * Copyright (c) 2003,2004,2009,2010 The DragonFly Project. All rights reserved.
5  *
6  * This code is derived from software contributed to The DragonFly Project
7  * by Matthew Dillon <dillon@backplane.com> and by
8  * Venkatesh Srinivas <me@endeavour.zapto.org>.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * $Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 vsrinivas Exp $
38  */
39 /*
40  * This module implements a slab allocator drop-in replacement for the
41  * libc malloc().
42  *
43  * A slab allocator reserves a ZONE for each chunk size, then lays the
44  * chunks out in an array within the zone.  Allocation and deallocation
45  * is nearly instantaneous, and overhead losses are limited to a fixed
46  * worst-case amount.
47  *
48  * The slab allocator does not have to pre-initialize the list of
49  * free chunks for each zone, and the underlying VM will not be
50  * touched at all beyond the zone header until an actual allocation
51  * needs it.
52  *
53  * Slab management and locking is done on a per-zone basis.
54  *
55  *	Alloc Size	Chunking        Number of zones
56  *	0-127		8		16
57  *	128-255		16		8
58  *	256-511		32		8
59  *	512-1023	64		8
60  *	1024-2047	128		8
61  *	2048-4095	256		8
62  *	4096-8191	512		8
63  *	8192-16383	1024		8
64  *	16384-32767	2048		8
65  *
66  *	Allocations >= ZoneLimit (16K) go directly to mmap and a hash table
67  *	is used to locate for free.  One and Two-page allocations use the
68  *	zone mechanic to avoid excessive mmap()/munmap() calls.
69  *
70  *			   API FEATURES AND SIDE EFFECTS
71  *
72  *    + power-of-2 sized allocations up to a page will be power-of-2 aligned.
73  *	Above that power-of-2 sized allocations are page-aligned.  Non
74  *	power-of-2 sized allocations are aligned the same as the chunk
75  *	size for their zone.
76  *    + malloc(0) returns a special non-NULL value
77  *    + ability to allocate arbitrarily large chunks of memory
78  *    + realloc will reuse the passed pointer if possible, within the
79  *	limitations of the zone chunking.
80  *
81  * Multithreaded enhancements for small allocations introduced August 2010.
82  * These are in the spirit of 'libumem'. See:
83  *	Bonwick, J.; Adams, J. (2001). "Magazines and Vmem: Extending the
84  *	slab allocator to many CPUs and arbitrary resources". In Proc. 2001
85  * 	USENIX Technical Conference. USENIX Association.
86  *
87  * TUNING
88  *
89  * The value of the environment variable MALLOC_OPTIONS is a character string
90  * containing various flags to tune nmalloc.
91  *
92  * 'U'   / ['u']	Generate / do not generate utrace entries for ktrace(1)
93  *			This will generate utrace events for all malloc,
94  *			realloc, and free calls. There are tools (mtrplay) to
95  *			replay and allocation pattern or to graph heap structure
96  *			(mtrgraph) which can interpret these logs.
97  * 'Z'   / ['z']	Zero out / do not zero all allocations.
98  *			Each new byte of memory allocated by malloc, realloc, or
99  *			reallocf will be initialized to 0. This is intended for
100  *			debugging and will affect performance negatively.
101  * 'H'	/  ['h']	Pass a hint to the kernel about pages unused by the
102  *			allocation functions.
103  */
104 
105 /* cc -shared -fPIC -g -O -I/usr/src/lib/libc/include -o nmalloc.so nmalloc.c */
106 
107 #include "libc_private.h"
108 
109 #include <sys/param.h>
110 #include <sys/types.h>
111 #include <sys/mman.h>
112 #include <sys/queue.h>
113 #include <sys/uio.h>
114 #include <sys/ktrace.h>
115 #include <stdio.h>
116 #include <stdint.h>
117 #include <stdlib.h>
118 #include <stdarg.h>
119 #include <stddef.h>
120 #include <unistd.h>
121 #include <string.h>
122 #include <fcntl.h>
123 #include <errno.h>
124 #include <pthread.h>
125 
126 #include "spinlock.h"
127 #include "un-namespace.h"
128 
129 /*
130  * Linked list of large allocations
131  */
132 typedef struct bigalloc {
133 	struct bigalloc *next;	/* hash link */
134 	void	*base;		/* base pointer */
135 	u_long	bytes;		/* bytes allocated */
136 } *bigalloc_t;
137 
138 /*
139  * Note that any allocations which are exact multiples of PAGE_SIZE, or
140  * which are >= ZALLOC_ZONE_LIMIT, will fall through to the kmem subsystem.
141  */
142 #define ZALLOC_ZONE_LIMIT	(16 * 1024)	/* max slab-managed alloc */
143 #define ZALLOC_MIN_ZONE_SIZE	(32 * 1024)	/* minimum zone size */
144 #define ZALLOC_MAX_ZONE_SIZE	(128 * 1024)	/* maximum zone size */
145 #define ZALLOC_ZONE_SIZE	(64 * 1024)
146 #define ZALLOC_SLAB_MAGIC	0x736c6162	/* magic sanity */
147 #define ZALLOC_SLAB_SLIDE	20		/* L1-cache skip */
148 
149 #if ZALLOC_ZONE_LIMIT == 16384
150 #define NZONES			72
151 #elif ZALLOC_ZONE_LIMIT == 32768
152 #define NZONES			80
153 #else
154 #error "I couldn't figure out NZONES"
155 #endif
156 
157 /*
158  * Chunk structure for free elements
159  */
160 typedef struct slchunk {
161 	struct slchunk *c_Next;
162 } *slchunk_t;
163 
164 /*
165  * The IN-BAND zone header is placed at the beginning of each zone.
166  */
167 struct slglobaldata;
168 
169 typedef struct slzone {
170 	int32_t		z_Magic;	/* magic number for sanity check */
171 	int		z_NFree;	/* total free chunks / ualloc space */
172 	struct slzone *z_Next;		/* ZoneAry[] link if z_NFree non-zero */
173 	int		z_NMax;		/* maximum free chunks */
174 	char		*z_BasePtr;	/* pointer to start of chunk array */
175 	int		z_UIndex;	/* current initial allocation index */
176 	int		z_UEndIndex;	/* last (first) allocation index */
177 	int		z_ChunkSize;	/* chunk size for validation */
178 	int		z_FirstFreePg;	/* chunk list on a page-by-page basis */
179 	int		z_ZoneIndex;
180 	int		z_Flags;
181 	struct slchunk *z_PageAry[ZALLOC_ZONE_SIZE / PAGE_SIZE];
182 #if defined(INVARIANTS)
183 	__uint32_t	z_Bitmap[];	/* bitmap of free chunks / sanity */
184 #endif
185 } *slzone_t;
186 
187 typedef struct slglobaldata {
188 	spinlock_t	Spinlock;
189 	slzone_t	ZoneAry[NZONES];/* linked list of zones NFree > 0 */
190 	int		JunkIndex;
191 } *slglobaldata_t;
192 
193 #define SLZF_UNOTZEROD		0x0001
194 
195 #define FASTSLABREALLOC		0x02
196 
197 /*
198  * Misc constants.  Note that allocations that are exact multiples of
199  * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
200  * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
201  */
202 #define MIN_CHUNK_SIZE		8		/* in bytes */
203 #define MIN_CHUNK_MASK		(MIN_CHUNK_SIZE - 1)
204 #define IN_SAME_PAGE_MASK	(~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
205 
206 /*
207  * The WEIRD_ADDR is used as known text to copy into free objects to
208  * try to create deterministic failure cases if the data is accessed after
209  * free.
210  *
211  * WARNING: A limited number of spinlocks are available, BIGXSIZE should
212  *	    not be larger then 64.
213  */
214 #define WEIRD_ADDR      0xdeadc0de
215 #define MAX_COPY        sizeof(weirdary)
216 #define ZERO_LENGTH_PTR	((void *)&malloc_dummy_pointer)
217 
218 #define BIGHSHIFT	10			/* bigalloc hash table */
219 #define BIGHSIZE	(1 << BIGHSHIFT)
220 #define BIGHMASK	(BIGHSIZE - 1)
221 #define BIGXSIZE	(BIGHSIZE / 16)		/* bigalloc lock table */
222 #define BIGXMASK	(BIGXSIZE - 1)
223 
224 #define SAFLAG_ZERO	0x0001
225 #define SAFLAG_PASSIVE	0x0002
226 
227 /*
228  * Thread control
229  */
230 
231 #define arysize(ary)	(sizeof(ary)/sizeof((ary)[0]))
232 
233 #define MASSERT(exp)	do { if (__predict_false(!(exp)))	\
234 				_mpanic("assertion: %s in %s",	\
235 				#exp, __func__);		\
236 			    } while (0)
237 
238 /*
239  * Magazines
240  */
241 
242 #define M_MAX_ROUNDS	64
243 #define M_ZONE_ROUNDS	64
244 #define M_LOW_ROUNDS	32
245 #define M_INIT_ROUNDS	8
246 #define M_BURST_FACTOR  8
247 #define M_BURST_NSCALE	2
248 
249 #define M_BURST		0x0001
250 #define M_BURST_EARLY	0x0002
251 
252 struct magazine {
253 	SLIST_ENTRY(magazine) nextmagazine;
254 
255 	int		flags;
256 	int 		capacity;	/* Max rounds in this magazine */
257 	int 		rounds;		/* Current number of free rounds */
258 	int		burst_factor;	/* Number of blocks to prefill with */
259 	int 		low_factor;	/* Free till low_factor from full mag */
260 	void		*objects[M_MAX_ROUNDS];
261 };
262 
263 SLIST_HEAD(magazinelist, magazine);
264 
265 static spinlock_t zone_mag_lock;
266 static struct magazine zone_magazine = {
267 	.flags = M_BURST | M_BURST_EARLY,
268 	.capacity = M_ZONE_ROUNDS,
269 	.rounds = 0,
270 	.burst_factor = M_BURST_FACTOR,
271 	.low_factor = M_LOW_ROUNDS
272 };
273 
274 #define MAGAZINE_FULL(mp)	(mp->rounds == mp->capacity)
275 #define MAGAZINE_NOTFULL(mp)	(mp->rounds < mp->capacity)
276 #define MAGAZINE_EMPTY(mp)	(mp->rounds == 0)
277 #define MAGAZINE_NOTEMPTY(mp)	(mp->rounds != 0)
278 
279 /* Each thread will have a pair of magazines per size-class (NZONES)
280  * The loaded magazine will support immediate allocations, the previous
281  * magazine will either be full or empty and can be swapped at need */
282 typedef struct magazine_pair {
283 	struct magazine	*loaded;
284 	struct magazine	*prev;
285 } magazine_pair;
286 
287 /* A depot is a collection of magazines for a single zone. */
288 typedef struct magazine_depot {
289 	struct magazinelist full;
290 	struct magazinelist empty;
291 	spinlock_t	lock;
292 } magazine_depot;
293 
294 typedef struct thr_mags {
295 	magazine_pair	mags[NZONES];
296 	struct magazine	*newmag;
297 	int		init;
298 } thr_mags;
299 
300 /*
301  * With this attribute set, do not require a function call for accessing
302  * this variable when the code is compiled -fPIC. Empty for libc_rtld
303  * (like __thread).
304  */
305 #ifdef __LIBC_RTLD
306 #define TLS_ATTRIBUTE
307 #else
308 #define TLS_ATTRIBUTE __attribute__ ((tls_model ("initial-exec")))
309 #endif
310 
311 static int mtmagazine_free_live;
312 static __thread thr_mags thread_mags TLS_ATTRIBUTE;
313 static pthread_key_t thread_mags_key;
314 static pthread_once_t thread_mags_once = PTHREAD_ONCE_INIT;
315 static magazine_depot depots[NZONES];
316 
317 /*
318  * Fixed globals (not per-cpu)
319  */
320 static const int ZoneSize = ZALLOC_ZONE_SIZE;
321 static const int ZoneLimit = ZALLOC_ZONE_LIMIT;
322 static const int ZonePageCount = ZALLOC_ZONE_SIZE / PAGE_SIZE;
323 static const int ZoneMask = ZALLOC_ZONE_SIZE - 1;
324 
325 static int opt_madvise = 0;
326 static int opt_utrace = 0;
327 static int g_malloc_flags = 0;
328 static struct slglobaldata	SLGlobalData;
329 static bigalloc_t bigalloc_array[BIGHSIZE];
330 static spinlock_t bigspin_array[BIGXSIZE];
331 static int malloc_panic;
332 static int malloc_dummy_pointer;
333 
334 static const int32_t weirdary[16] = {
335 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
336 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
337 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
338 	WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR
339 };
340 
341 static void *_slaballoc(size_t size, int flags);
342 static void *_slabrealloc(void *ptr, size_t size);
343 static void _slabfree(void *ptr, int, bigalloc_t *);
344 static void *_vmem_alloc(size_t bytes, size_t align, int flags);
345 static void _vmem_free(void *ptr, size_t bytes);
346 static void *magazine_alloc(struct magazine *, int *);
347 static int magazine_free(struct magazine *, void *);
348 static void *mtmagazine_alloc(int zi);
349 static int mtmagazine_free(int zi, void *);
350 static void mtmagazine_init(void);
351 static void mtmagazine_destructor(void *);
352 static slzone_t zone_alloc(int flags);
353 static void zone_free(void *z);
354 static void _mpanic(const char *ctl, ...) __printflike(1, 2);
355 static void malloc_init(void) __constructor(101);
356 #if defined(INVARIANTS)
357 static void chunk_mark_allocated(slzone_t z, void *chunk);
358 static void chunk_mark_free(slzone_t z, void *chunk);
359 #endif
360 
361 struct nmalloc_utrace {
362 	void *p;
363 	size_t s;
364 	void *r;
365 };
366 
367 #define UTRACE(a, b, c)						\
368 	if (opt_utrace) {					\
369 		struct nmalloc_utrace ut = {			\
370 			.p = (a),				\
371 			.s = (b),				\
372 			.r = (c)				\
373 		};						\
374 		utrace(&ut, sizeof(ut));			\
375 	}
376 
377 #ifdef INVARIANTS
378 /*
379  * If enabled any memory allocated without M_ZERO is initialized to -1.
380  */
381 static int  use_malloc_pattern;
382 #endif
383 
384 static void
385 malloc_init(void)
386 {
387 	const char *p = NULL;
388 
389 	if (issetugid() == 0)
390 		p = getenv("MALLOC_OPTIONS");
391 
392 	for (; p != NULL && *p != '\0'; p++) {
393 		switch(*p) {
394 		case 'u':	opt_utrace = 0; break;
395 		case 'U':	opt_utrace = 1; break;
396 		case 'h':	opt_madvise = 0; break;
397 		case 'H':	opt_madvise = 1; break;
398 		case 'z':	g_malloc_flags = 0; break;
399 		case 'Z': 	g_malloc_flags = SAFLAG_ZERO; break;
400 		default:
401 			break;
402 		}
403 	}
404 
405 	UTRACE((void *) -1, 0, NULL);
406 }
407 
408 /*
409  * We have to install a handler for nmalloc thread teardowns when
410  * the thread is created.  We cannot delay this because destructors in
411  * sophisticated userland programs can call malloc() for the first time
412  * during their thread exit.
413  *
414  * This routine is called directly from pthreads.
415  */
416 void
417 _nmalloc_thr_init(void)
418 {
419 	thr_mags *tp;
420 
421 	/*
422 	 * Disallow mtmagazine operations until the mtmagazine is
423 	 * initialized.
424 	 */
425 	tp = &thread_mags;
426 	tp->init = -1;
427 
428 	if (mtmagazine_free_live == 0) {
429 		mtmagazine_free_live = 1;
430 		pthread_once(&thread_mags_once, mtmagazine_init);
431 	}
432 	pthread_setspecific(thread_mags_key, tp);
433 	tp->init = 1;
434 }
435 
436 /*
437  * Thread locks.
438  */
439 static __inline void
440 slgd_lock(slglobaldata_t slgd)
441 {
442 	if (__isthreaded)
443 		_SPINLOCK(&slgd->Spinlock);
444 }
445 
446 static __inline void
447 slgd_unlock(slglobaldata_t slgd)
448 {
449 	if (__isthreaded)
450 		_SPINUNLOCK(&slgd->Spinlock);
451 }
452 
453 static __inline void
454 depot_lock(magazine_depot *dp)
455 {
456 	if (__isthreaded)
457 		_SPINLOCK(&dp->lock);
458 }
459 
460 static __inline void
461 depot_unlock(magazine_depot *dp)
462 {
463 	if (__isthreaded)
464 		_SPINUNLOCK(&dp->lock);
465 }
466 
467 static __inline void
468 zone_magazine_lock(void)
469 {
470 	if (__isthreaded)
471 		_SPINLOCK(&zone_mag_lock);
472 }
473 
474 static __inline void
475 zone_magazine_unlock(void)
476 {
477 	if (__isthreaded)
478 		_SPINUNLOCK(&zone_mag_lock);
479 }
480 
481 static __inline void
482 swap_mags(magazine_pair *mp)
483 {
484 	struct magazine *tmp;
485 	tmp = mp->loaded;
486 	mp->loaded = mp->prev;
487 	mp->prev = tmp;
488 }
489 
490 /*
491  * bigalloc hashing and locking support.
492  *
493  * Return an unmasked hash code for the passed pointer.
494  */
495 static __inline int
496 _bigalloc_hash(void *ptr)
497 {
498 	int hv;
499 
500 	hv = ((int)(intptr_t)ptr >> PAGE_SHIFT) ^
501 	      ((int)(intptr_t)ptr >> (PAGE_SHIFT + BIGHSHIFT));
502 
503 	return(hv);
504 }
505 
506 /*
507  * Lock the hash chain and return a pointer to its base for the specified
508  * address.
509  */
510 static __inline bigalloc_t *
511 bigalloc_lock(void *ptr)
512 {
513 	int hv = _bigalloc_hash(ptr);
514 	bigalloc_t *bigp;
515 
516 	bigp = &bigalloc_array[hv & BIGHMASK];
517 	if (__isthreaded)
518 		_SPINLOCK(&bigspin_array[hv & BIGXMASK]);
519 	return(bigp);
520 }
521 
522 /*
523  * Lock the hash chain and return a pointer to its base for the specified
524  * address.
525  *
526  * BUT, if the hash chain is empty, just return NULL and do not bother
527  * to lock anything.
528  */
529 static __inline bigalloc_t *
530 bigalloc_check_and_lock(void *ptr)
531 {
532 	int hv = _bigalloc_hash(ptr);
533 	bigalloc_t *bigp;
534 
535 	bigp = &bigalloc_array[hv & BIGHMASK];
536 	if (*bigp == NULL)
537 		return(NULL);
538 	if (__isthreaded) {
539 		_SPINLOCK(&bigspin_array[hv & BIGXMASK]);
540 	}
541 	return(bigp);
542 }
543 
544 static __inline void
545 bigalloc_unlock(void *ptr)
546 {
547 	int hv;
548 
549 	if (__isthreaded) {
550 		hv = _bigalloc_hash(ptr);
551 		_SPINUNLOCK(&bigspin_array[hv & BIGXMASK]);
552 	}
553 }
554 
555 /*
556  * Calculate the zone index for the allocation request size and set the
557  * allocation request size to that particular zone's chunk size.
558  */
559 static __inline int
560 zoneindex(size_t *bytes, size_t *chunking)
561 {
562 	size_t n = (unsigned int)*bytes;	/* unsigned for shift opt */
563 	if (n < 128) {
564 		*bytes = n = (n + 7) & ~7;
565 		*chunking = 8;
566 		return(n / 8 - 1);		/* 8 byte chunks, 16 zones */
567 	}
568 	if (n < 256) {
569 		*bytes = n = (n + 15) & ~15;
570 		*chunking = 16;
571 		return(n / 16 + 7);
572 	}
573 	if (n < 8192) {
574 		if (n < 512) {
575 			*bytes = n = (n + 31) & ~31;
576 			*chunking = 32;
577 			return(n / 32 + 15);
578 		}
579 		if (n < 1024) {
580 			*bytes = n = (n + 63) & ~63;
581 			*chunking = 64;
582 			return(n / 64 + 23);
583 		}
584 		if (n < 2048) {
585 			*bytes = n = (n + 127) & ~127;
586 			*chunking = 128;
587 			return(n / 128 + 31);
588 		}
589 		if (n < 4096) {
590 			*bytes = n = (n + 255) & ~255;
591 			*chunking = 256;
592 			return(n / 256 + 39);
593 		}
594 		*bytes = n = (n + 511) & ~511;
595 		*chunking = 512;
596 		return(n / 512 + 47);
597 	}
598 #if ZALLOC_ZONE_LIMIT > 8192
599 	if (n < 16384) {
600 		*bytes = n = (n + 1023) & ~1023;
601 		*chunking = 1024;
602 		return(n / 1024 + 55);
603 	}
604 #endif
605 #if ZALLOC_ZONE_LIMIT > 16384
606 	if (n < 32768) {
607 		*bytes = n = (n + 2047) & ~2047;
608 		*chunking = 2048;
609 		return(n / 2048 + 63);
610 	}
611 #endif
612 	_mpanic("Unexpected byte count %zu", n);
613 	return(0);
614 }
615 
616 /*
617  * malloc() - call internal slab allocator
618  */
619 void *
620 malloc(size_t size)
621 {
622 	void *ptr;
623 
624 	ptr = _slaballoc(size, 0);
625 	if (ptr == NULL)
626 		errno = ENOMEM;
627 	else
628 		UTRACE(0, size, ptr);
629 	return(ptr);
630 }
631 
632 /*
633  * calloc() - call internal slab allocator
634  */
635 void *
636 calloc(size_t number, size_t size)
637 {
638 	void *ptr;
639 
640 	ptr = _slaballoc(number * size, SAFLAG_ZERO);
641 	if (ptr == NULL)
642 		errno = ENOMEM;
643 	else
644 		UTRACE(0, number * size, ptr);
645 	return(ptr);
646 }
647 
648 /*
649  * realloc() (SLAB ALLOCATOR)
650  *
651  * We do not attempt to optimize this routine beyond reusing the same
652  * pointer if the new size fits within the chunking of the old pointer's
653  * zone.
654  */
655 void *
656 realloc(void *ptr, size_t size)
657 {
658 	void *ret;
659 	ret = _slabrealloc(ptr, size);
660 	if (ret == NULL)
661 		errno = ENOMEM;
662 	else
663 		UTRACE(ptr, size, ret);
664 	return(ret);
665 }
666 
667 /*
668  * posix_memalign()
669  *
670  * Allocate (size) bytes with a alignment of (alignment), where (alignment)
671  * is a power of 2 >= sizeof(void *).
672  *
673  * The slab allocator will allocate on power-of-2 boundaries up to
674  * at least PAGE_SIZE.  We use the zoneindex mechanic to find a
675  * zone matching the requirements, and _vmem_alloc() otherwise.
676  */
677 int
678 posix_memalign(void **memptr, size_t alignment, size_t size)
679 {
680 	bigalloc_t *bigp;
681 	bigalloc_t big;
682 	size_t chunking;
683 	int zi;
684 
685 	/*
686 	 * OpenGroup spec issue 6 checks
687 	 */
688 	if ((alignment | (alignment - 1)) + 1 != (alignment << 1)) {
689 		*memptr = NULL;
690 		return(EINVAL);
691 	}
692 	if (alignment < sizeof(void *)) {
693 		*memptr = NULL;
694 		return(EINVAL);
695 	}
696 
697 	/*
698 	 * Our zone mechanism guarantees same-sized alignment for any
699 	 * power-of-2 allocation.  If size is a power-of-2 and reasonable
700 	 * we can just call _slaballoc() and be done.  We round size up
701 	 * to the nearest alignment boundary to improve our odds of
702 	 * it becoming a power-of-2 if it wasn't before.
703 	 */
704 	if (size <= alignment)
705 		size = alignment;
706 	else
707 		size = (size + alignment - 1) & ~(size_t)(alignment - 1);
708 	if (size < PAGE_SIZE && (size | (size - 1)) + 1 == (size << 1)) {
709 		*memptr = _slaballoc(size, 0);
710 		return(*memptr ? 0 : ENOMEM);
711 	}
712 
713 	/*
714 	 * Otherwise locate a zone with a chunking that matches
715 	 * the requested alignment, within reason.   Consider two cases:
716 	 *
717 	 * (1) A 1K allocation on a 32-byte alignment.  The first zoneindex
718 	 *     we find will be the best fit because the chunking will be
719 	 *     greater or equal to the alignment.
720 	 *
721 	 * (2) A 513 allocation on a 256-byte alignment.  In this case
722 	 *     the first zoneindex we find will be for 576 byte allocations
723 	 *     with a chunking of 64, which is not sufficient.  To fix this
724 	 *     we simply find the nearest power-of-2 >= size and use the
725 	 *     same side-effect of _slaballoc() which guarantees
726 	 *     same-alignment on a power-of-2 allocation.
727 	 */
728 	if (size < PAGE_SIZE) {
729 		zi = zoneindex(&size, &chunking);
730 		if (chunking >= alignment) {
731 			*memptr = _slaballoc(size, 0);
732 			return(*memptr ? 0 : ENOMEM);
733 		}
734 		if (size >= 1024)
735 			alignment = 1024;
736 		if (size >= 16384)
737 			alignment = 16384;
738 		while (alignment < size)
739 			alignment <<= 1;
740 		*memptr = _slaballoc(alignment, 0);
741 		return(*memptr ? 0 : ENOMEM);
742 	}
743 
744 	/*
745 	 * If the slab allocator cannot handle it use vmem_alloc().
746 	 *
747 	 * Alignment must be adjusted up to at least PAGE_SIZE in this case.
748 	 */
749 	if (alignment < PAGE_SIZE)
750 		alignment = PAGE_SIZE;
751 	if (size < alignment)
752 		size = alignment;
753 	size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
754 	*memptr = _vmem_alloc(size, alignment, 0);
755 	if (*memptr == NULL)
756 		return(ENOMEM);
757 
758 	big = _slaballoc(sizeof(struct bigalloc), 0);
759 	if (big == NULL) {
760 		_vmem_free(*memptr, size);
761 		*memptr = NULL;
762 		return(ENOMEM);
763 	}
764 	bigp = bigalloc_lock(*memptr);
765 	big->base = *memptr;
766 	big->bytes = size;
767 	big->next = *bigp;
768 	*bigp = big;
769 	bigalloc_unlock(*memptr);
770 
771 	return(0);
772 }
773 
774 /*
775  * free() (SLAB ALLOCATOR) - do the obvious
776  */
777 void
778 free(void *ptr)
779 {
780 	UTRACE(ptr, 0, 0);
781 	_slabfree(ptr, 0, NULL);
782 }
783 
784 /*
785  * _slaballoc()	(SLAB ALLOCATOR)
786  *
787  *	Allocate memory via the slab allocator.  If the request is too large,
788  *	or if it page-aligned beyond a certain size, we fall back to the
789  *	KMEM subsystem
790  */
791 static void *
792 _slaballoc(size_t size, int flags)
793 {
794 	slzone_t z;
795 	slchunk_t chunk;
796 	slglobaldata_t slgd;
797 	size_t chunking;
798 	int zi;
799 #ifdef INVARIANTS
800 	int i;
801 #endif
802 	int off;
803 	void *obj;
804 
805 	/*
806 	 * Handle the degenerate size == 0 case.  Yes, this does happen.
807 	 * Return a special pointer.  This is to maintain compatibility with
808 	 * the original malloc implementation.  Certain devices, such as the
809 	 * adaptec driver, not only allocate 0 bytes, they check for NULL and
810 	 * also realloc() later on.  Joy.
811 	 */
812 	if (size == 0)
813 		return(ZERO_LENGTH_PTR);
814 
815 	/* Capture global flags */
816 	flags |= g_malloc_flags;
817 
818 	/*
819 	 * Handle large allocations directly.  There should not be very many
820 	 * of these so performance is not a big issue.
821 	 *
822 	 * The backend allocator is pretty nasty on a SMP system.   Use the
823 	 * slab allocator for one and two page-sized chunks even though we
824 	 * lose some efficiency.
825 	 */
826 	if (size >= ZoneLimit ||
827 	    ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
828 		bigalloc_t big;
829 		bigalloc_t *bigp;
830 
831 		/*
832 		 * Page-align and cache-color in case of virtually indexed
833 		 * physically tagged L1 caches (aka SandyBridge).  No sweat
834 		 * otherwise, so just do it.
835 		 */
836 		size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
837 		if ((size & 8191) == 0)
838 			size += 4096;
839 
840 		chunk = _vmem_alloc(size, PAGE_SIZE, flags);
841 		if (chunk == NULL)
842 			return(NULL);
843 
844 		big = _slaballoc(sizeof(struct bigalloc), 0);
845 		if (big == NULL) {
846 			_vmem_free(chunk, size);
847 			return(NULL);
848 		}
849 		bigp = bigalloc_lock(chunk);
850 		big->base = chunk;
851 		big->bytes = size;
852 		big->next = *bigp;
853 		*bigp = big;
854 		bigalloc_unlock(chunk);
855 
856 		return(chunk);
857 	}
858 
859 	/* Compute allocation zone; zoneindex will panic on excessive sizes */
860 	zi = zoneindex(&size, &chunking);
861 	MASSERT(zi < NZONES);
862 
863 	obj = mtmagazine_alloc(zi);
864 	if (obj != NULL) {
865 		if (flags & SAFLAG_ZERO)
866 			bzero(obj, size);
867 		return (obj);
868 	}
869 
870 	slgd = &SLGlobalData;
871 	slgd_lock(slgd);
872 
873 	/*
874 	 * Attempt to allocate out of an existing zone.  If all zones are
875 	 * exhausted pull one off the free list or allocate a new one.
876 	 */
877 	if ((z = slgd->ZoneAry[zi]) == NULL) {
878 		z = zone_alloc(flags);
879 		if (z == NULL)
880 			goto fail;
881 
882 		/*
883 		 * How big is the base structure?
884 		 */
885 #if defined(INVARIANTS)
886 		/*
887 		 * Make room for z_Bitmap.  An exact calculation is
888 		 * somewhat more complicated so don't make an exact
889 		 * calculation.
890 		 */
891 		off = offsetof(struct slzone,
892 				z_Bitmap[(ZoneSize / size + 31) / 32]);
893 		bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
894 #else
895 		off = sizeof(struct slzone);
896 #endif
897 
898 		/*
899 		 * Align the storage in the zone based on the chunking.
900 		 *
901 		 * Guarantee power-of-2 alignment for power-of-2-sized
902 		 * chunks.  Otherwise align based on the chunking size
903 		 * (typically 8 or 16 bytes for small allocations).
904 		 *
905 		 * NOTE: Allocations >= ZoneLimit are governed by the
906 		 * bigalloc code and typically only guarantee page-alignment.
907 		 *
908 		 * Set initial conditions for UIndex near the zone header
909 		 * to reduce unecessary page faults, vs semi-randomization
910 		 * to improve L1 cache saturation.
911 		 */
912 		if ((size | (size - 1)) + 1 == (size << 1))
913 			off = (off + size - 1) & ~(size - 1);
914 		else
915 			off = (off + chunking - 1) & ~(chunking - 1);
916 		z->z_Magic = ZALLOC_SLAB_MAGIC;
917 		z->z_ZoneIndex = zi;
918 		z->z_NMax = (ZoneSize - off) / size;
919 		z->z_NFree = z->z_NMax;
920 		z->z_BasePtr = (char *)z + off;
921 		z->z_UIndex = z->z_UEndIndex = 0;
922 		z->z_ChunkSize = size;
923 		z->z_FirstFreePg = ZonePageCount;
924 		z->z_Next = slgd->ZoneAry[zi];
925 		slgd->ZoneAry[zi] = z;
926 		if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
927 			flags &= ~SAFLAG_ZERO;	/* already zero'd */
928 			flags |= SAFLAG_PASSIVE;
929 		}
930 
931 		/*
932 		 * Slide the base index for initial allocations out of the
933 		 * next zone we create so we do not over-weight the lower
934 		 * part of the cpu memory caches.
935 		 */
936 		slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
937 					& (ZALLOC_MAX_ZONE_SIZE - 1);
938 	}
939 
940 	/*
941 	 * Ok, we have a zone from which at least one chunk is available.
942 	 *
943 	 * Remove us from the ZoneAry[] when we become empty
944 	 */
945 	MASSERT(z->z_NFree > 0);
946 
947 	if (--z->z_NFree == 0) {
948 		slgd->ZoneAry[zi] = z->z_Next;
949 		z->z_Next = NULL;
950 	}
951 
952 	/*
953 	 * Locate a chunk in a free page.  This attempts to localize
954 	 * reallocations into earlier pages without us having to sort
955 	 * the chunk list.  A chunk may still overlap a page boundary.
956 	 */
957 	while (z->z_FirstFreePg < ZonePageCount) {
958 		if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
959 #ifdef DIAGNOSTIC
960 			/*
961 			 * Diagnostic: c_Next is not total garbage.
962 			 */
963 			MASSERT(chunk->c_Next == NULL ||
964 			    ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) ==
965 			    ((intptr_t)chunk & IN_SAME_PAGE_MASK));
966 #endif
967 #ifdef INVARIANTS
968 			chunk_mark_allocated(z, chunk);
969 #endif
970 			MASSERT((uintptr_t)chunk & ZoneMask);
971 			z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
972 			goto done;
973 		}
974 		++z->z_FirstFreePg;
975 	}
976 
977 	/*
978 	 * No chunks are available but NFree said we had some memory,
979 	 * so it must be available in the never-before-used-memory
980 	 * area governed by UIndex.  The consequences are very
981 	 * serious if our zone got corrupted so we use an explicit
982 	 * panic rather then a KASSERT.
983 	 */
984 	chunk = (slchunk_t)(z->z_BasePtr + z->z_UIndex * size);
985 
986 	if (++z->z_UIndex == z->z_NMax)
987 		z->z_UIndex = 0;
988 	if (z->z_UIndex == z->z_UEndIndex) {
989 		if (z->z_NFree != 0)
990 			_mpanic("slaballoc: corrupted zone");
991 	}
992 
993 	if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
994 		flags &= ~SAFLAG_ZERO;
995 		flags |= SAFLAG_PASSIVE;
996 	}
997 #if defined(INVARIANTS)
998 	chunk_mark_allocated(z, chunk);
999 #endif
1000 
1001 done:
1002 	slgd_unlock(slgd);
1003 	if (flags & SAFLAG_ZERO) {
1004 		bzero(chunk, size);
1005 #ifdef INVARIANTS
1006 	} else if ((flags & (SAFLAG_ZERO|SAFLAG_PASSIVE)) == 0) {
1007 		if (use_malloc_pattern) {
1008 			for (i = 0; i < size; i += sizeof(int)) {
1009 				*(int *)((char *)chunk + i) = -1;
1010 			}
1011 		}
1012 		/* avoid accidental double-free check */
1013 		chunk->c_Next = (void *)-1;
1014 #endif
1015 	}
1016 	return(chunk);
1017 fail:
1018 	slgd_unlock(slgd);
1019 	return(NULL);
1020 }
1021 
1022 /*
1023  * Reallocate memory within the chunk
1024  */
1025 static void *
1026 _slabrealloc(void *ptr, size_t size)
1027 {
1028 	bigalloc_t *bigp;
1029 	void *nptr;
1030 	slzone_t z;
1031 	size_t chunking;
1032 
1033 	if (ptr == NULL || ptr == ZERO_LENGTH_PTR) {
1034 		return(_slaballoc(size, 0));
1035 	}
1036 
1037 	if (size == 0) {
1038 		free(ptr);
1039 		return(ZERO_LENGTH_PTR);
1040 	}
1041 
1042 	/*
1043 	 * Handle oversized allocations.
1044 	 */
1045 	if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1046 		bigalloc_t big;
1047 		size_t bigbytes;
1048 
1049 		while ((big = *bigp) != NULL) {
1050 			if (big->base == ptr) {
1051 				size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
1052 				bigbytes = big->bytes;
1053 				if (bigbytes == size) {
1054 					bigalloc_unlock(ptr);
1055 					return(ptr);
1056 				}
1057 				*bigp = big->next;
1058 				bigalloc_unlock(ptr);
1059 				if ((nptr = _slaballoc(size, 0)) == NULL) {
1060 					/* Relink block */
1061 					bigp = bigalloc_lock(ptr);
1062 					big->next = *bigp;
1063 					*bigp = big;
1064 					bigalloc_unlock(ptr);
1065 					return(NULL);
1066 				}
1067 				if (size > bigbytes)
1068 					size = bigbytes;
1069 				bcopy(ptr, nptr, size);
1070 				_slabfree(ptr, FASTSLABREALLOC, &big);
1071 				return(nptr);
1072 			}
1073 			bigp = &big->next;
1074 		}
1075 		bigalloc_unlock(ptr);
1076 	}
1077 
1078 	/*
1079 	 * Get the original allocation's zone.  If the new request winds
1080 	 * up using the same chunk size we do not have to do anything.
1081 	 *
1082 	 * NOTE: We don't have to lock the globaldata here, the fields we
1083 	 * access here will not change at least as long as we have control
1084 	 * over the allocation.
1085 	 */
1086 	z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1087 	MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1088 
1089 	/*
1090 	 * Use zoneindex() to chunk-align the new size, as long as the
1091 	 * new size is not too large.
1092 	 */
1093 	if (size < ZoneLimit) {
1094 		zoneindex(&size, &chunking);
1095 		if (z->z_ChunkSize == size) {
1096 			return(ptr);
1097 		}
1098 	}
1099 
1100 	/*
1101 	 * Allocate memory for the new request size and copy as appropriate.
1102 	 */
1103 	if ((nptr = _slaballoc(size, 0)) != NULL) {
1104 		if (size > z->z_ChunkSize)
1105 			size = z->z_ChunkSize;
1106 		bcopy(ptr, nptr, size);
1107 		_slabfree(ptr, 0, NULL);
1108 	}
1109 
1110 	return(nptr);
1111 }
1112 
1113 /*
1114  * free (SLAB ALLOCATOR)
1115  *
1116  * Free a memory block previously allocated by malloc.  Note that we do not
1117  * attempt to uplodate ks_loosememuse as MP races could prevent us from
1118  * checking memory limits in malloc.
1119  *
1120  * flags:
1121  *	FASTSLABREALLOC		Fast call from realloc, *rbigp already
1122  *				unlinked.
1123  *
1124  * MPSAFE
1125  */
1126 static void
1127 _slabfree(void *ptr, int flags, bigalloc_t *rbigp)
1128 {
1129 	slzone_t z;
1130 	slchunk_t chunk;
1131 	bigalloc_t big;
1132 	bigalloc_t *bigp;
1133 	slglobaldata_t slgd;
1134 	size_t size;
1135 	int zi;
1136 	int pgno;
1137 
1138 	/* Fast realloc path for big allocations */
1139 	if (flags & FASTSLABREALLOC) {
1140 		big = *rbigp;
1141 		goto fastslabrealloc;
1142 	}
1143 
1144 	/*
1145 	 * Handle NULL frees and special 0-byte allocations
1146 	 */
1147 	if (ptr == NULL)
1148 		return;
1149 	if (ptr == ZERO_LENGTH_PTR)
1150 		return;
1151 
1152 	/*
1153 	 * Handle oversized allocations.
1154 	 */
1155 	if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1156 		while ((big = *bigp) != NULL) {
1157 			if (big->base == ptr) {
1158 				*bigp = big->next;
1159 				bigalloc_unlock(ptr);
1160 fastslabrealloc:
1161 				size = big->bytes;
1162 				_slabfree(big, 0, NULL);
1163 #ifdef INVARIANTS
1164 				MASSERT(sizeof(weirdary) <= size);
1165 				bcopy(weirdary, ptr, sizeof(weirdary));
1166 #endif
1167 				_vmem_free(ptr, size);
1168 				return;
1169 			}
1170 			bigp = &big->next;
1171 		}
1172 		bigalloc_unlock(ptr);
1173 	}
1174 
1175 	/*
1176 	 * Zone case.  Figure out the zone based on the fact that it is
1177 	 * ZoneSize aligned.
1178 	 */
1179 	z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1180 	MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1181 
1182 	size = z->z_ChunkSize;
1183 	zi = z->z_ZoneIndex;
1184 
1185 	if (g_malloc_flags & SAFLAG_ZERO)
1186 		bzero(ptr, size);
1187 
1188 	if (mtmagazine_free(zi, ptr) == 0)
1189 		return;
1190 
1191 	pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
1192 	chunk = ptr;
1193 	slgd = &SLGlobalData;
1194 	slgd_lock(slgd);
1195 
1196 #ifdef INVARIANTS
1197 	/*
1198 	 * Attempt to detect a double-free.  To reduce overhead we only check
1199 	 * if there appears to be link pointer at the base of the data.
1200 	 */
1201 	if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) {
1202 		slchunk_t scan;
1203 
1204 		for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) {
1205 			if (scan == chunk)
1206 				_mpanic("Double free at %p", chunk);
1207 		}
1208 	}
1209 	chunk_mark_free(z, chunk);
1210 #endif
1211 
1212 	/*
1213 	 * Put weird data into the memory to detect modifications after
1214 	 * freeing, illegal pointer use after freeing (we should fault on
1215 	 * the odd address), and so forth.
1216 	 */
1217 #ifdef INVARIANTS
1218 	if (z->z_ChunkSize < sizeof(weirdary))
1219 		bcopy(weirdary, chunk, z->z_ChunkSize);
1220 	else
1221 		bcopy(weirdary, chunk, sizeof(weirdary));
1222 #endif
1223 
1224 	/*
1225 	 * Add this free non-zero'd chunk to a linked list for reuse, adjust
1226 	 * z_FirstFreePg.
1227 	 */
1228 	chunk->c_Next = z->z_PageAry[pgno];
1229 	z->z_PageAry[pgno] = chunk;
1230 	if (z->z_FirstFreePg > pgno)
1231 		z->z_FirstFreePg = pgno;
1232 
1233 	/*
1234 	 * Bump the number of free chunks.  If it becomes non-zero the zone
1235 	 * must be added back onto the appropriate list.
1236 	 */
1237 	if (z->z_NFree++ == 0) {
1238 		z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
1239 		slgd->ZoneAry[z->z_ZoneIndex] = z;
1240 	}
1241 
1242 	/*
1243 	 * If the zone becomes totally free then release it.
1244 	 */
1245 	if (z->z_NFree == z->z_NMax) {
1246 		slzone_t *pz;
1247 
1248 		pz = &slgd->ZoneAry[z->z_ZoneIndex];
1249 		while (z != *pz)
1250 			pz = &(*pz)->z_Next;
1251 		*pz = z->z_Next;
1252 		z->z_Magic = -1;
1253 		z->z_Next = NULL;
1254 		zone_free(z);
1255 		/* slgd lock released */
1256 		return;
1257 	}
1258 	slgd_unlock(slgd);
1259 }
1260 
1261 #if defined(INVARIANTS)
1262 /*
1263  * Helper routines for sanity checks
1264  */
1265 static
1266 void
1267 chunk_mark_allocated(slzone_t z, void *chunk)
1268 {
1269 	int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1270 	__uint32_t *bitptr;
1271 
1272 	MASSERT(bitdex >= 0 && bitdex < z->z_NMax);
1273 	bitptr = &z->z_Bitmap[bitdex >> 5];
1274 	bitdex &= 31;
1275 	MASSERT((*bitptr & (1 << bitdex)) == 0);
1276 	*bitptr |= 1 << bitdex;
1277 }
1278 
1279 static
1280 void
1281 chunk_mark_free(slzone_t z, void *chunk)
1282 {
1283 	int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1284 	__uint32_t *bitptr;
1285 
1286 	MASSERT(bitdex >= 0 && bitdex < z->z_NMax);
1287 	bitptr = &z->z_Bitmap[bitdex >> 5];
1288 	bitdex &= 31;
1289 	MASSERT((*bitptr & (1 << bitdex)) != 0);
1290 	*bitptr &= ~(1 << bitdex);
1291 }
1292 
1293 #endif
1294 
1295 /*
1296  * Allocate and return a magazine.  NULL is returned and *burst is adjusted
1297  * if the magazine is empty.
1298  */
1299 static __inline void *
1300 magazine_alloc(struct magazine *mp, int *burst)
1301 {
1302 	void *obj;
1303 
1304 	if (mp == NULL)
1305 		return(NULL);
1306 	if (MAGAZINE_NOTEMPTY(mp)) {
1307 		obj = mp->objects[--mp->rounds];
1308 		return(obj);
1309 	}
1310 
1311 	/*
1312 	 * Return burst factor to caller along with NULL
1313 	 */
1314 	if ((mp->flags & M_BURST) && (burst != NULL)) {
1315 		*burst = mp->burst_factor;
1316 	}
1317 	/* Reduce burst factor by NSCALE; if it hits 1, disable BURST */
1318 	if ((mp->flags & M_BURST) && (mp->flags & M_BURST_EARLY) &&
1319 	    (burst != NULL)) {
1320 		mp->burst_factor -= M_BURST_NSCALE;
1321 		if (mp->burst_factor <= 1) {
1322 			mp->burst_factor = 1;
1323 			mp->flags &= ~(M_BURST);
1324 			mp->flags &= ~(M_BURST_EARLY);
1325 		}
1326 	}
1327 	return (NULL);
1328 }
1329 
1330 static __inline int
1331 magazine_free(struct magazine *mp, void *p)
1332 {
1333 	if (mp != NULL && MAGAZINE_NOTFULL(mp)) {
1334 		mp->objects[mp->rounds++] = p;
1335 		return 0;
1336 	}
1337 
1338 	return -1;
1339 }
1340 
1341 static void *
1342 mtmagazine_alloc(int zi)
1343 {
1344 	thr_mags *tp;
1345 	struct magazine *mp, *emptymag;
1346 	magazine_depot *d;
1347 	void *obj;
1348 
1349 	/*
1350 	 * Do not try to access per-thread magazines while the mtmagazine
1351 	 * is being initialized or destroyed.
1352 	 */
1353 	tp = &thread_mags;
1354 	if (tp->init < 0)
1355 		return(NULL);
1356 
1357 	/*
1358 	 * Primary per-thread allocation loop
1359 	 */
1360 	for (;;) {
1361 		/*
1362 		 * If the loaded magazine has rounds, allocate and return
1363 		 */
1364 		mp = tp->mags[zi].loaded;
1365 		obj = magazine_alloc(mp, NULL);
1366 		if (obj)
1367 			break;
1368 
1369 		/*
1370 		 * If the prev magazine is full, swap with the loaded
1371 		 * magazine and retry.
1372 		 */
1373 		mp = tp->mags[zi].prev;
1374 		if (mp && MAGAZINE_FULL(mp)) {
1375 			MASSERT(mp->rounds != 0);
1376 			swap_mags(&tp->mags[zi]);	/* prev now empty */
1377 			continue;
1378 		}
1379 
1380 		/*
1381 		 * Try to get a full magazine from the depot.  Cycle
1382 		 * through depot(full)->loaded->prev->depot(empty).
1383 		 * Retry if a full magazine was available from the depot.
1384 		 *
1385 		 * Return NULL (caller will fall through) if no magazines
1386 		 * can be found anywhere.
1387 		 */
1388 		d = &depots[zi];
1389 		depot_lock(d);
1390 		emptymag = tp->mags[zi].prev;
1391 		if (emptymag)
1392 			SLIST_INSERT_HEAD(&d->empty, emptymag, nextmagazine);
1393 		tp->mags[zi].prev = tp->mags[zi].loaded;
1394 		mp = SLIST_FIRST(&d->full);	/* loaded magazine */
1395 		tp->mags[zi].loaded = mp;
1396 		if (mp) {
1397 			SLIST_REMOVE_HEAD(&d->full, nextmagazine);
1398 			MASSERT(MAGAZINE_NOTEMPTY(mp));
1399 			depot_unlock(d);
1400 			continue;
1401 		}
1402 		depot_unlock(d);
1403 		break;
1404 	}
1405 
1406 	return (obj);
1407 }
1408 
1409 static int
1410 mtmagazine_free(int zi, void *ptr)
1411 {
1412 	thr_mags *tp;
1413 	struct magazine *mp, *loadedmag;
1414 	magazine_depot *d;
1415 	int rc = -1;
1416 
1417 	/*
1418 	 * Do not try to access per-thread magazines while the mtmagazine
1419 	 * is being initialized or destroyed.
1420 	 */
1421 	tp = &thread_mags;
1422 	if (tp->init < 0)
1423 		return(-1);
1424 
1425 	/*
1426 	 * Primary per-thread freeing loop
1427 	 */
1428 	for (;;) {
1429 		/*
1430 		 * Make sure a new magazine is available in case we have
1431 		 * to use it.  Staging the newmag allows us to avoid
1432 		 * some locking/reentrancy complexity.
1433 		 *
1434 		 * Temporarily disable the per-thread caches for this
1435 		 * allocation to avoid reentrancy and/or to avoid a
1436 		 * stack overflow if the [zi] happens to be the same that
1437 		 * would be used to allocate the new magazine.
1438 		 */
1439 		if (tp->newmag == NULL) {
1440 			tp->init = -1;
1441 			tp->newmag = _slaballoc(sizeof(struct magazine),
1442 						SAFLAG_ZERO);
1443 			tp->init = 1;
1444 			if (tp->newmag == NULL) {
1445 				rc = -1;
1446 				break;
1447 			}
1448 		}
1449 
1450 		/*
1451 		 * If the loaded magazine has space, free directly to it
1452 		 */
1453 		rc = magazine_free(tp->mags[zi].loaded, ptr);
1454 		if (rc == 0)
1455 			break;
1456 
1457 		/*
1458 		 * If the prev magazine is empty, swap with the loaded
1459 		 * magazine and retry.
1460 		 */
1461 		mp = tp->mags[zi].prev;
1462 		if (mp && MAGAZINE_EMPTY(mp)) {
1463 			MASSERT(mp->rounds == 0);
1464 			swap_mags(&tp->mags[zi]);	/* prev now full */
1465 			continue;
1466 		}
1467 
1468 		/*
1469 		 * Try to get an empty magazine from the depot.  Cycle
1470 		 * through depot(empty)->loaded->prev->depot(full).
1471 		 * Retry if an empty magazine was available from the depot.
1472 		 */
1473 		d = &depots[zi];
1474 		depot_lock(d);
1475 
1476 		if ((loadedmag = tp->mags[zi].prev) != NULL)
1477 			SLIST_INSERT_HEAD(&d->full, loadedmag, nextmagazine);
1478 		tp->mags[zi].prev = tp->mags[zi].loaded;
1479 		mp = SLIST_FIRST(&d->empty);
1480 		if (mp) {
1481 			tp->mags[zi].loaded = mp;
1482 			SLIST_REMOVE_HEAD(&d->empty, nextmagazine);
1483 			MASSERT(MAGAZINE_NOTFULL(mp));
1484 		} else {
1485 			mp = tp->newmag;
1486 			tp->newmag = NULL;
1487 			mp->capacity = M_MAX_ROUNDS;
1488 			mp->rounds = 0;
1489 			mp->flags = 0;
1490 			tp->mags[zi].loaded = mp;
1491 		}
1492 		depot_unlock(d);
1493 	}
1494 
1495 	return rc;
1496 }
1497 
1498 static void
1499 mtmagazine_init(void)
1500 {
1501 	int error;
1502 
1503 	error = pthread_key_create(&thread_mags_key, mtmagazine_destructor);
1504 	if (error)
1505 		abort();
1506 }
1507 
1508 /*
1509  * This function is only used by the thread exit destructor
1510  */
1511 static void
1512 mtmagazine_drain(struct magazine *mp)
1513 {
1514 	void *obj;
1515 
1516 	while (MAGAZINE_NOTEMPTY(mp)) {
1517 		obj = magazine_alloc(mp, NULL);
1518 		_slabfree(obj, 0, NULL);
1519 	}
1520 }
1521 
1522 /*
1523  * mtmagazine_destructor()
1524  *
1525  * When a thread exits, we reclaim all its resources; all its magazines are
1526  * drained and the structures are freed.
1527  *
1528  * WARNING!  The destructor can be called multiple times if the larger user
1529  *	     program has its own destructors which run after ours which
1530  *	     allocate or free memory.
1531  */
1532 static void
1533 mtmagazine_destructor(void *thrp)
1534 {
1535 	thr_mags *tp = thrp;
1536 	struct magazine *mp;
1537 	int i;
1538 
1539 	/*
1540 	 * Prevent further use of mtmagazines while we are destructing
1541 	 * them, as well as for any destructors which are run after us
1542 	 * prior to the thread actually being destroyed.
1543 	 */
1544 	tp->init = -1;
1545 
1546 	for (i = 0; i < NZONES; i++) {
1547 		mp = tp->mags[i].loaded;
1548 		tp->mags[i].loaded = NULL;
1549 		if (mp) {
1550 			if (MAGAZINE_NOTEMPTY(mp))
1551 				mtmagazine_drain(mp);
1552 			_slabfree(mp, 0, NULL);
1553 		}
1554 
1555 		mp = tp->mags[i].prev;
1556 		tp->mags[i].prev = NULL;
1557 		if (mp) {
1558 			if (MAGAZINE_NOTEMPTY(mp))
1559 				mtmagazine_drain(mp);
1560 			_slabfree(mp, 0, NULL);
1561 		}
1562 	}
1563 
1564 	if (tp->newmag) {
1565 		mp = tp->newmag;
1566 		tp->newmag = NULL;
1567 		_slabfree(mp, 0, NULL);
1568 	}
1569 }
1570 
1571 /*
1572  * zone_alloc()
1573  *
1574  * Attempt to allocate a zone from the zone magazine; the zone magazine has
1575  * M_BURST_EARLY enabled, so honor the burst request from the magazine.
1576  */
1577 static slzone_t
1578 zone_alloc(int flags)
1579 {
1580 	slglobaldata_t slgd = &SLGlobalData;
1581 	int burst = 1;
1582 	int i, j;
1583 	slzone_t z;
1584 
1585 	zone_magazine_lock();
1586 	slgd_unlock(slgd);
1587 
1588 	z = magazine_alloc(&zone_magazine, &burst);
1589 	if (z == NULL && burst == 1) {
1590 		zone_magazine_unlock();
1591 		z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags);
1592 	} else if (z == NULL) {
1593 		z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags);
1594 		if (z) {
1595 			for (i = 1; i < burst; i++) {
1596 				j = magazine_free(&zone_magazine,
1597 						  (char *) z + (ZoneSize * i));
1598 				MASSERT(j == 0);
1599 			}
1600 		}
1601 		zone_magazine_unlock();
1602 	} else {
1603 		z->z_Flags |= SLZF_UNOTZEROD;
1604 		zone_magazine_unlock();
1605 	}
1606 	slgd_lock(slgd);
1607 	return z;
1608 }
1609 
1610 /*
1611  * zone_free()
1612  *
1613  * Release a zone and unlock the slgd lock.
1614  */
1615 static void
1616 zone_free(void *z)
1617 {
1618 	slglobaldata_t slgd = &SLGlobalData;
1619 	void *excess[M_ZONE_ROUNDS - M_LOW_ROUNDS] = {};
1620 	int i, j;
1621 
1622 	zone_magazine_lock();
1623 	slgd_unlock(slgd);
1624 
1625 	bzero(z, sizeof(struct slzone));
1626 
1627 	if (opt_madvise)
1628 		madvise(z, ZoneSize, MADV_FREE);
1629 
1630 	i = magazine_free(&zone_magazine, z);
1631 
1632 	/*
1633 	 * If we failed to free, collect excess magazines; release the zone
1634 	 * magazine lock, and then free to the system via _vmem_free. Re-enable
1635 	 * BURST mode for the magazine.
1636 	 */
1637 	if (i == -1) {
1638 		j = zone_magazine.rounds - zone_magazine.low_factor;
1639 		for (i = 0; i < j; i++) {
1640 			excess[i] = magazine_alloc(&zone_magazine, NULL);
1641 			MASSERT(excess[i] !=  NULL);
1642 		}
1643 
1644 		zone_magazine_unlock();
1645 
1646 		for (i = 0; i < j; i++)
1647 			_vmem_free(excess[i], ZoneSize);
1648 
1649 		_vmem_free(z, ZoneSize);
1650 	} else {
1651 		zone_magazine_unlock();
1652 	}
1653 }
1654 
1655 /*
1656  * _vmem_alloc()
1657  *
1658  *	Directly map memory in PAGE_SIZE'd chunks with the specified
1659  *	alignment.
1660  *
1661  *	Alignment must be a multiple of PAGE_SIZE.
1662  *
1663  *	Size must be >= alignment.
1664  */
1665 static void *
1666 _vmem_alloc(size_t size, size_t align, int flags)
1667 {
1668 	char *addr;
1669 	char *save;
1670 	size_t excess;
1671 
1672 	/*
1673 	 * Map anonymous private memory.
1674 	 */
1675 	addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
1676 		    MAP_PRIVATE|MAP_ANON, -1, 0);
1677 	if (addr == MAP_FAILED)
1678 		return(NULL);
1679 
1680 	/*
1681 	 * Check alignment.  The misaligned offset is also the excess
1682 	 * amount.  If misaligned unmap the excess so we have a chance of
1683 	 * mapping at the next alignment point and recursively try again.
1684 	 *
1685 	 * BBBBBBBBBBB BBBBBBBBBBB BBBBBBBBBBB	block alignment
1686 	 *   aaaaaaaaa aaaaaaaaaaa aa		mis-aligned allocation
1687 	 *   xxxxxxxxx				final excess calculation
1688 	 *   ^ returned address
1689 	 */
1690 	excess = (uintptr_t)addr & (align - 1);
1691 
1692 	if (excess) {
1693 		excess = align - excess;
1694 		save = addr;
1695 
1696 		munmap(save + excess, size - excess);
1697 		addr = _vmem_alloc(size, align, flags);
1698 		munmap(save, excess);
1699 	}
1700 	return((void *)addr);
1701 }
1702 
1703 /*
1704  * _vmem_free()
1705  *
1706  *	Free a chunk of memory allocated with _vmem_alloc()
1707  */
1708 static void
1709 _vmem_free(void *ptr, size_t size)
1710 {
1711 	munmap(ptr, size);
1712 }
1713 
1714 /*
1715  * Panic on fatal conditions
1716  */
1717 static void
1718 _mpanic(const char *ctl, ...)
1719 {
1720 	va_list va;
1721 
1722 	if (malloc_panic == 0) {
1723 		malloc_panic = 1;
1724 		va_start(va, ctl);
1725 		vfprintf(stderr, ctl, va);
1726 		fprintf(stderr, "\n");
1727 		fflush(stderr);
1728 		va_end(va);
1729 	}
1730 	abort();
1731 }
1732