xref: /freebsd/contrib/jemalloc/src/arena.c (revision d184218c)
1 #define	JEMALLOC_ARENA_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 /* Data. */
6 
7 ssize_t		opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
8 arena_bin_info_t	arena_bin_info[NBINS];
9 
10 JEMALLOC_ALIGNED(CACHELINE)
11 const uint8_t	small_size2bin[] = {
12 #define	S2B_8(i)	i,
13 #define	S2B_16(i)	S2B_8(i) S2B_8(i)
14 #define	S2B_32(i)	S2B_16(i) S2B_16(i)
15 #define	S2B_64(i)	S2B_32(i) S2B_32(i)
16 #define	S2B_128(i)	S2B_64(i) S2B_64(i)
17 #define	S2B_256(i)	S2B_128(i) S2B_128(i)
18 #define	S2B_512(i)	S2B_256(i) S2B_256(i)
19 #define	S2B_1024(i)	S2B_512(i) S2B_512(i)
20 #define	S2B_2048(i)	S2B_1024(i) S2B_1024(i)
21 #define	S2B_4096(i)	S2B_2048(i) S2B_2048(i)
22 #define	S2B_8192(i)	S2B_4096(i) S2B_4096(i)
23 #define	SIZE_CLASS(bin, delta, size)					\
24 	S2B_##delta(bin)
25 	SIZE_CLASSES
26 #undef S2B_8
27 #undef S2B_16
28 #undef S2B_32
29 #undef S2B_64
30 #undef S2B_128
31 #undef S2B_256
32 #undef S2B_512
33 #undef S2B_1024
34 #undef S2B_2048
35 #undef S2B_4096
36 #undef S2B_8192
37 #undef SIZE_CLASS
38 };
39 
40 /******************************************************************************/
41 /* Function prototypes for non-inline static functions. */
42 
43 static void	arena_avail_insert(arena_t *arena, arena_chunk_t *chunk,
44     size_t pageind, size_t npages, bool maybe_adjac_pred,
45     bool maybe_adjac_succ);
46 static void	arena_avail_remove(arena_t *arena, arena_chunk_t *chunk,
47     size_t pageind, size_t npages, bool maybe_adjac_pred,
48     bool maybe_adjac_succ);
49 static void	arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
50     bool large, size_t binind, bool zero);
51 static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
52 static void	arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
53 static arena_run_t	*arena_run_alloc_helper(arena_t *arena, size_t size,
54     bool large, size_t binind, bool zero);
55 static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
56     size_t binind, bool zero);
57 static arena_chunk_t	*chunks_dirty_iter_cb(arena_chunk_tree_t *tree,
58     arena_chunk_t *chunk, void *arg);
59 static void	arena_purge(arena_t *arena, bool all);
60 static void	arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
61     bool cleaned);
62 static void	arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
63     arena_run_t *run, size_t oldsize, size_t newsize);
64 static void	arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
65     arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
66 static arena_run_t	*arena_bin_runs_first(arena_bin_t *bin);
67 static void	arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run);
68 static void	arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run);
69 static arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin);
70 static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
71 static void	*arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
72 static void	arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
73     arena_bin_t *bin);
74 static void	arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
75     arena_run_t *run, arena_bin_t *bin);
76 static void	arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
77     arena_run_t *run, arena_bin_t *bin);
78 static void	arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
79     void *ptr, size_t oldsize, size_t size);
80 static bool	arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
81     void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
82 static bool	arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
83     size_t extra, bool zero);
84 static size_t	bin_info_run_size_calc(arena_bin_info_t *bin_info,
85     size_t min_run_size);
86 static void	bin_info_init(void);
87 
88 /******************************************************************************/
89 
90 static inline int
91 arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
92 {
93 	uintptr_t a_mapelm = (uintptr_t)a;
94 	uintptr_t b_mapelm = (uintptr_t)b;
95 
96 	assert(a != NULL);
97 	assert(b != NULL);
98 
99 	return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
100 }
101 
102 /* Generate red-black tree functions. */
103 rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
104     u.rb_link, arena_run_comp)
105 
106 static inline int
107 arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
108 {
109 	int ret;
110 	size_t a_size = a->bits & ~PAGE_MASK;
111 	size_t b_size = b->bits & ~PAGE_MASK;
112 
113 	ret = (a_size > b_size) - (a_size < b_size);
114 	if (ret == 0) {
115 		uintptr_t a_mapelm, b_mapelm;
116 
117 		if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
118 			a_mapelm = (uintptr_t)a;
119 		else {
120 			/*
121 			 * Treat keys as though they are lower than anything
122 			 * else.
123 			 */
124 			a_mapelm = 0;
125 		}
126 		b_mapelm = (uintptr_t)b;
127 
128 		ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
129 	}
130 
131 	return (ret);
132 }
133 
134 /* Generate red-black tree functions. */
135 rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
136     u.rb_link, arena_avail_comp)
137 
138 static inline int
139 arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b)
140 {
141 
142 	assert(a != NULL);
143 	assert(b != NULL);
144 
145 	/*
146 	 * Short-circuit for self comparison.  The following comparison code
147 	 * would come to the same result, but at the cost of executing the slow
148 	 * path.
149 	 */
150 	if (a == b)
151 		return (0);
152 
153 	/*
154 	 * Order such that chunks with higher fragmentation are "less than"
155 	 * those with lower fragmentation -- purging order is from "least" to
156 	 * "greatest".  Fragmentation is measured as:
157 	 *
158 	 *     mean current avail run size
159 	 *   --------------------------------
160 	 *   mean defragmented avail run size
161 	 *
162 	 *            navail
163 	 *         -----------
164 	 *         nruns_avail           nruns_avail-nruns_adjac
165 	 * = ========================= = -----------------------
166 	 *            navail                  nruns_avail
167 	 *    -----------------------
168 	 *    nruns_avail-nruns_adjac
169 	 *
170 	 * The following code multiplies away the denominator prior to
171 	 * comparison, in order to avoid division.
172 	 *
173 	 */
174 	{
175 		size_t a_val = (a->nruns_avail - a->nruns_adjac) *
176 		    b->nruns_avail;
177 		size_t b_val = (b->nruns_avail - b->nruns_adjac) *
178 		    a->nruns_avail;
179 
180 		if (a_val < b_val)
181 			return (1);
182 		if (a_val > b_val)
183 			return (-1);
184 	}
185 	/*
186 	 * Break ties by chunk address.  For fragmented chunks, report lower
187 	 * addresses as "lower", so that fragmentation reduction happens first
188 	 * at lower addresses.  However, use the opposite ordering for
189 	 * unfragmented chunks, in order to increase the chances of
190 	 * re-allocating dirty runs.
191 	 */
192 	{
193 		uintptr_t a_chunk = (uintptr_t)a;
194 		uintptr_t b_chunk = (uintptr_t)b;
195 		int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk));
196 		if (a->nruns_adjac == 0) {
197 			assert(b->nruns_adjac == 0);
198 			ret = -ret;
199 		}
200 		return (ret);
201 	}
202 }
203 
204 /* Generate red-black tree functions. */
205 rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t,
206     dirty_link, arena_chunk_dirty_comp)
207 
208 static inline bool
209 arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind)
210 {
211 	bool ret;
212 
213 	if (pageind-1 < map_bias)
214 		ret = false;
215 	else {
216 		ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0);
217 		assert(ret == false || arena_mapbits_dirty_get(chunk,
218 		    pageind-1) != arena_mapbits_dirty_get(chunk, pageind));
219 	}
220 	return (ret);
221 }
222 
223 static inline bool
224 arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages)
225 {
226 	bool ret;
227 
228 	if (pageind+npages == chunk_npages)
229 		ret = false;
230 	else {
231 		assert(pageind+npages < chunk_npages);
232 		ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0);
233 		assert(ret == false || arena_mapbits_dirty_get(chunk, pageind)
234 		    != arena_mapbits_dirty_get(chunk, pageind+npages));
235 	}
236 	return (ret);
237 }
238 
239 static inline bool
240 arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages)
241 {
242 
243 	return (arena_avail_adjac_pred(chunk, pageind) ||
244 	    arena_avail_adjac_succ(chunk, pageind, npages));
245 }
246 
247 static void
248 arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
249     size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
250 {
251 
252 	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
253 	    LG_PAGE));
254 
255 	/*
256 	 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
257 	 * removed and reinserted even if the run to be inserted is clean.
258 	 */
259 	if (chunk->ndirty != 0)
260 		arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
261 
262 	if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
263 		chunk->nruns_adjac++;
264 	if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
265 		chunk->nruns_adjac++;
266 	chunk->nruns_avail++;
267 	assert(chunk->nruns_avail > chunk->nruns_adjac);
268 
269 	if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
270 		arena->ndirty += npages;
271 		chunk->ndirty += npages;
272 	}
273 	if (chunk->ndirty != 0)
274 		arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
275 
276 	arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
277 	    pageind));
278 }
279 
280 static void
281 arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
282     size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
283 {
284 
285 	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
286 	    LG_PAGE));
287 
288 	/*
289 	 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
290 	 * removed and reinserted even if the run to be removed is clean.
291 	 */
292 	if (chunk->ndirty != 0)
293 		arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
294 
295 	if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
296 		chunk->nruns_adjac--;
297 	if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
298 		chunk->nruns_adjac--;
299 	chunk->nruns_avail--;
300 	assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail
301 	    == 0 && chunk->nruns_adjac == 0));
302 
303 	if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
304 		arena->ndirty -= npages;
305 		chunk->ndirty -= npages;
306 	}
307 	if (chunk->ndirty != 0)
308 		arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
309 
310 	arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
311 	    pageind));
312 }
313 
314 static inline void *
315 arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
316 {
317 	void *ret;
318 	unsigned regind;
319 	bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
320 	    (uintptr_t)bin_info->bitmap_offset);
321 
322 	assert(run->nfree > 0);
323 	assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
324 
325 	regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
326 	ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
327 	    (uintptr_t)(bin_info->reg_interval * regind));
328 	run->nfree--;
329 	if (regind == run->nextind)
330 		run->nextind++;
331 	assert(regind < run->nextind);
332 	return (ret);
333 }
334 
335 static inline void
336 arena_run_reg_dalloc(arena_run_t *run, void *ptr)
337 {
338 	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
339 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
340 	size_t mapbits = arena_mapbits_get(chunk, pageind);
341 	size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
342 	arena_bin_info_t *bin_info = &arena_bin_info[binind];
343 	unsigned regind = arena_run_regind(run, bin_info, ptr);
344 	bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
345 	    (uintptr_t)bin_info->bitmap_offset);
346 
347 	assert(run->nfree < bin_info->nregs);
348 	/* Freeing an interior pointer can cause assertion failure. */
349 	assert(((uintptr_t)ptr - ((uintptr_t)run +
350 	    (uintptr_t)bin_info->reg0_offset)) %
351 	    (uintptr_t)bin_info->reg_interval == 0);
352 	assert((uintptr_t)ptr >= (uintptr_t)run +
353 	    (uintptr_t)bin_info->reg0_offset);
354 	/* Freeing an unallocated pointer can cause assertion failure. */
355 	assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
356 
357 	bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
358 	run->nfree++;
359 }
360 
361 static inline void
362 arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
363 {
364 
365 	VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
366 	    LG_PAGE)), (npages << LG_PAGE));
367 	memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
368 	    (npages << LG_PAGE));
369 	VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
370 	    LG_PAGE)), (npages << LG_PAGE));
371 }
372 
373 static inline void
374 arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
375 {
376 	size_t i;
377 	UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
378 
379 	VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind <<
380 	    LG_PAGE)), PAGE);
381 	for (i = 0; i < PAGE / sizeof(size_t); i++)
382 		assert(p[i] == 0);
383 	VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
384 	    LG_PAGE)), PAGE);
385 }
386 
387 static void
388 arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
389     size_t binind, bool zero)
390 {
391 	arena_chunk_t *chunk;
392 	size_t run_ind, total_pages, need_pages, rem_pages, i;
393 	size_t flag_dirty;
394 
395 	assert((large && binind == BININD_INVALID) || (large == false && binind
396 	    != BININD_INVALID));
397 
398 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
399 	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
400 	flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
401 	total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
402 	    LG_PAGE;
403 	assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
404 	    flag_dirty);
405 	need_pages = (size >> LG_PAGE);
406 	assert(need_pages > 0);
407 	assert(need_pages <= total_pages);
408 	rem_pages = total_pages - need_pages;
409 
410 	arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
411 	if (config_stats) {
412 		/*
413 		 * Update stats_cactive if nactive is crossing a chunk
414 		 * multiple.
415 		 */
416 		size_t cactive_diff = CHUNK_CEILING((arena->nactive +
417 		    need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
418 		    LG_PAGE);
419 		if (cactive_diff != 0)
420 			stats_cactive_add(cactive_diff);
421 	}
422 	arena->nactive += need_pages;
423 
424 	/* Keep track of trailing unused pages for later use. */
425 	if (rem_pages > 0) {
426 		if (flag_dirty != 0) {
427 			arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
428 			    (rem_pages << LG_PAGE), CHUNK_MAP_DIRTY);
429 			arena_mapbits_unallocated_set(chunk,
430 			    run_ind+total_pages-1, (rem_pages << LG_PAGE),
431 			    CHUNK_MAP_DIRTY);
432 		} else {
433 			arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
434 			    (rem_pages << LG_PAGE),
435 			    arena_mapbits_unzeroed_get(chunk,
436 			    run_ind+need_pages));
437 			arena_mapbits_unallocated_set(chunk,
438 			    run_ind+total_pages-1, (rem_pages << LG_PAGE),
439 			    arena_mapbits_unzeroed_get(chunk,
440 			    run_ind+total_pages-1));
441 		}
442 		arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
443 		    false, true);
444 	}
445 
446 	/*
447 	 * Update the page map separately for large vs. small runs, since it is
448 	 * possible to avoid iteration for large mallocs.
449 	 */
450 	if (large) {
451 		if (zero) {
452 			if (flag_dirty == 0) {
453 				/*
454 				 * The run is clean, so some pages may be
455 				 * zeroed (i.e. never before touched).
456 				 */
457 				for (i = 0; i < need_pages; i++) {
458 					if (arena_mapbits_unzeroed_get(chunk,
459 					    run_ind+i) != 0) {
460 						arena_run_zero(chunk, run_ind+i,
461 						    1);
462 					} else if (config_debug) {
463 						arena_run_page_validate_zeroed(
464 						    chunk, run_ind+i);
465 					}
466 				}
467 			} else {
468 				/*
469 				 * The run is dirty, so all pages must be
470 				 * zeroed.
471 				 */
472 				arena_run_zero(chunk, run_ind, need_pages);
473 			}
474 		}
475 
476 		/*
477 		 * Set the last element first, in case the run only contains one
478 		 * page (i.e. both statements set the same element).
479 		 */
480 		arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0,
481 		    flag_dirty);
482 		arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
483 	} else {
484 		assert(zero == false);
485 		/*
486 		 * Propagate the dirty and unzeroed flags to the allocated
487 		 * small run, so that arena_dalloc_bin_run() has the ability to
488 		 * conditionally trim clean pages.
489 		 */
490 		arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
491 		/*
492 		 * The first page will always be dirtied during small run
493 		 * initialization, so a validation failure here would not
494 		 * actually cause an observable failure.
495 		 */
496 		if (config_debug && flag_dirty == 0 &&
497 		    arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
498 			arena_run_page_validate_zeroed(chunk, run_ind);
499 		for (i = 1; i < need_pages - 1; i++) {
500 			arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
501 			if (config_debug && flag_dirty == 0 &&
502 			    arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) {
503 				arena_run_page_validate_zeroed(chunk,
504 				    run_ind+i);
505 			}
506 		}
507 		arena_mapbits_small_set(chunk, run_ind+need_pages-1,
508 		    need_pages-1, binind, flag_dirty);
509 		if (config_debug && flag_dirty == 0 &&
510 		    arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
511 		    0) {
512 			arena_run_page_validate_zeroed(chunk,
513 			    run_ind+need_pages-1);
514 		}
515 	}
516 }
517 
518 static arena_chunk_t *
519 arena_chunk_alloc(arena_t *arena)
520 {
521 	arena_chunk_t *chunk;
522 	size_t i;
523 
524 	if (arena->spare != NULL) {
525 		chunk = arena->spare;
526 		arena->spare = NULL;
527 
528 		assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
529 		assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
530 		assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
531 		    arena_maxclass);
532 		assert(arena_mapbits_unallocated_size_get(chunk,
533 		    chunk_npages-1) == arena_maxclass);
534 		assert(arena_mapbits_dirty_get(chunk, map_bias) ==
535 		    arena_mapbits_dirty_get(chunk, chunk_npages-1));
536 	} else {
537 		bool zero;
538 		size_t unzeroed;
539 
540 		zero = false;
541 		malloc_mutex_unlock(&arena->lock);
542 		chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
543 		    false, &zero, arena->dss_prec);
544 		malloc_mutex_lock(&arena->lock);
545 		if (chunk == NULL)
546 			return (NULL);
547 		if (config_stats)
548 			arena->stats.mapped += chunksize;
549 
550 		chunk->arena = arena;
551 
552 		/*
553 		 * Claim that no pages are in use, since the header is merely
554 		 * overhead.
555 		 */
556 		chunk->ndirty = 0;
557 
558 		chunk->nruns_avail = 0;
559 		chunk->nruns_adjac = 0;
560 
561 		/*
562 		 * Initialize the map to contain one maximal free untouched run.
563 		 * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
564 		 * chunk.
565 		 */
566 		unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
567 		arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
568 		    unzeroed);
569 		/*
570 		 * There is no need to initialize the internal page map entries
571 		 * unless the chunk is not zeroed.
572 		 */
573 		if (zero == false) {
574 			for (i = map_bias+1; i < chunk_npages-1; i++)
575 				arena_mapbits_unzeroed_set(chunk, i, unzeroed);
576 		} else if (config_debug) {
577 			for (i = map_bias+1; i < chunk_npages-1; i++) {
578 				assert(arena_mapbits_unzeroed_get(chunk, i) ==
579 				    unzeroed);
580 			}
581 		}
582 		arena_mapbits_unallocated_set(chunk, chunk_npages-1,
583 		    arena_maxclass, unzeroed);
584 	}
585 
586 	/* Insert the run into the runs_avail tree. */
587 	arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
588 	    false, false);
589 
590 	return (chunk);
591 }
592 
593 static void
594 arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
595 {
596 	assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
597 	assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
598 	assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
599 	    arena_maxclass);
600 	assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
601 	    arena_maxclass);
602 	assert(arena_mapbits_dirty_get(chunk, map_bias) ==
603 	    arena_mapbits_dirty_get(chunk, chunk_npages-1));
604 
605 	/*
606 	 * Remove run from the runs_avail tree, so that the arena does not use
607 	 * it.
608 	 */
609 	arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias,
610 	    false, false);
611 
612 	if (arena->spare != NULL) {
613 		arena_chunk_t *spare = arena->spare;
614 
615 		arena->spare = chunk;
616 		malloc_mutex_unlock(&arena->lock);
617 		chunk_dealloc((void *)spare, chunksize, true);
618 		malloc_mutex_lock(&arena->lock);
619 		if (config_stats)
620 			arena->stats.mapped -= chunksize;
621 	} else
622 		arena->spare = chunk;
623 }
624 
625 static arena_run_t *
626 arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
627     bool zero)
628 {
629 	arena_run_t *run;
630 	arena_chunk_map_t *mapelm, key;
631 
632 	key.bits = size | CHUNK_MAP_KEY;
633 	mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
634 	if (mapelm != NULL) {
635 		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
636 		size_t pageind = (((uintptr_t)mapelm -
637 		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
638 		    + map_bias;
639 
640 		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
641 		    LG_PAGE));
642 		arena_run_split(arena, run, size, large, binind, zero);
643 		return (run);
644 	}
645 
646 	return (NULL);
647 }
648 
649 static arena_run_t *
650 arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
651     bool zero)
652 {
653 	arena_chunk_t *chunk;
654 	arena_run_t *run;
655 
656 	assert(size <= arena_maxclass);
657 	assert((size & PAGE_MASK) == 0);
658 	assert((large && binind == BININD_INVALID) || (large == false && binind
659 	    != BININD_INVALID));
660 
661 	/* Search the arena's chunks for the lowest best fit. */
662 	run = arena_run_alloc_helper(arena, size, large, binind, zero);
663 	if (run != NULL)
664 		return (run);
665 
666 	/*
667 	 * No usable runs.  Create a new chunk from which to allocate the run.
668 	 */
669 	chunk = arena_chunk_alloc(arena);
670 	if (chunk != NULL) {
671 		run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
672 		arena_run_split(arena, run, size, large, binind, zero);
673 		return (run);
674 	}
675 
676 	/*
677 	 * arena_chunk_alloc() failed, but another thread may have made
678 	 * sufficient memory available while this one dropped arena->lock in
679 	 * arena_chunk_alloc(), so search one more time.
680 	 */
681 	return (arena_run_alloc_helper(arena, size, large, binind, zero));
682 }
683 
684 static inline void
685 arena_maybe_purge(arena_t *arena)
686 {
687 	size_t npurgeable, threshold;
688 
689 	/* Don't purge if the option is disabled. */
690 	if (opt_lg_dirty_mult < 0)
691 		return;
692 	/* Don't purge if all dirty pages are already being purged. */
693 	if (arena->ndirty <= arena->npurgatory)
694 		return;
695 	npurgeable = arena->ndirty - arena->npurgatory;
696 	threshold = (arena->nactive >> opt_lg_dirty_mult);
697 	/*
698 	 * Don't purge unless the number of purgeable pages exceeds the
699 	 * threshold.
700 	 */
701 	if (npurgeable <= threshold)
702 		return;
703 
704 	arena_purge(arena, false);
705 }
706 
707 static inline size_t
708 arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
709 {
710 	size_t npurged;
711 	ql_head(arena_chunk_map_t) mapelms;
712 	arena_chunk_map_t *mapelm;
713 	size_t pageind, npages;
714 	size_t nmadvise;
715 
716 	ql_new(&mapelms);
717 
718 	/*
719 	 * If chunk is the spare, temporarily re-allocate it, 1) so that its
720 	 * run is reinserted into runs_avail, and 2) so that it cannot be
721 	 * completely discarded by another thread while arena->lock is dropped
722 	 * by this thread.  Note that the arena_run_dalloc() call will
723 	 * implicitly deallocate the chunk, so no explicit action is required
724 	 * in this function to deallocate the chunk.
725 	 *
726 	 * Note that once a chunk contains dirty pages, it cannot again contain
727 	 * a single run unless 1) it is a dirty run, or 2) this function purges
728 	 * dirty pages and causes the transition to a single clean run.  Thus
729 	 * (chunk == arena->spare) is possible, but it is not possible for
730 	 * this function to be called on the spare unless it contains a dirty
731 	 * run.
732 	 */
733 	if (chunk == arena->spare) {
734 		assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
735 		assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
736 
737 		arena_chunk_alloc(arena);
738 	}
739 
740 	if (config_stats)
741 		arena->stats.purged += chunk->ndirty;
742 
743 	/*
744 	 * Operate on all dirty runs if there is no clean/dirty run
745 	 * fragmentation.
746 	 */
747 	if (chunk->nruns_adjac == 0)
748 		all = true;
749 
750 	/*
751 	 * Temporarily allocate free dirty runs within chunk.  If all is false,
752 	 * only operate on dirty runs that are fragments; otherwise operate on
753 	 * all dirty runs.
754 	 */
755 	for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
756 		mapelm = arena_mapp_get(chunk, pageind);
757 		if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
758 			size_t run_size =
759 			    arena_mapbits_unallocated_size_get(chunk, pageind);
760 
761 			npages = run_size >> LG_PAGE;
762 			assert(pageind + npages <= chunk_npages);
763 			assert(arena_mapbits_dirty_get(chunk, pageind) ==
764 			    arena_mapbits_dirty_get(chunk, pageind+npages-1));
765 
766 			if (arena_mapbits_dirty_get(chunk, pageind) != 0 &&
767 			    (all || arena_avail_adjac(chunk, pageind,
768 			    npages))) {
769 				arena_run_t *run = (arena_run_t *)((uintptr_t)
770 				    chunk + (uintptr_t)(pageind << LG_PAGE));
771 
772 				arena_run_split(arena, run, run_size, true,
773 				    BININD_INVALID, false);
774 				/* Append to list for later processing. */
775 				ql_elm_new(mapelm, u.ql_link);
776 				ql_tail_insert(&mapelms, mapelm, u.ql_link);
777 			}
778 		} else {
779 			/* Skip run. */
780 			if (arena_mapbits_large_get(chunk, pageind) != 0) {
781 				npages = arena_mapbits_large_size_get(chunk,
782 				    pageind) >> LG_PAGE;
783 			} else {
784 				size_t binind;
785 				arena_bin_info_t *bin_info;
786 				arena_run_t *run = (arena_run_t *)((uintptr_t)
787 				    chunk + (uintptr_t)(pageind << LG_PAGE));
788 
789 				assert(arena_mapbits_small_runind_get(chunk,
790 				    pageind) == 0);
791 				binind = arena_bin_index(arena, run->bin);
792 				bin_info = &arena_bin_info[binind];
793 				npages = bin_info->run_size >> LG_PAGE;
794 			}
795 		}
796 	}
797 	assert(pageind == chunk_npages);
798 	assert(chunk->ndirty == 0 || all == false);
799 	assert(chunk->nruns_adjac == 0);
800 
801 	malloc_mutex_unlock(&arena->lock);
802 	if (config_stats)
803 		nmadvise = 0;
804 	npurged = 0;
805 	ql_foreach(mapelm, &mapelms, u.ql_link) {
806 		bool unzeroed;
807 		size_t flag_unzeroed, i;
808 
809 		pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
810 		    sizeof(arena_chunk_map_t)) + map_bias;
811 		npages = arena_mapbits_large_size_get(chunk, pageind) >>
812 		    LG_PAGE;
813 		assert(pageind + npages <= chunk_npages);
814 		unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
815 		    LG_PAGE)), (npages << LG_PAGE));
816 		flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
817 		/*
818 		 * Set the unzeroed flag for all pages, now that pages_purge()
819 		 * has returned whether the pages were zeroed as a side effect
820 		 * of purging.  This chunk map modification is safe even though
821 		 * the arena mutex isn't currently owned by this thread,
822 		 * because the run is marked as allocated, thus protecting it
823 		 * from being modified by any other thread.  As long as these
824 		 * writes don't perturb the first and last elements'
825 		 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
826 		 */
827 		for (i = 0; i < npages; i++) {
828 			arena_mapbits_unzeroed_set(chunk, pageind+i,
829 			    flag_unzeroed);
830 		}
831 		npurged += npages;
832 		if (config_stats)
833 			nmadvise++;
834 	}
835 	malloc_mutex_lock(&arena->lock);
836 	if (config_stats)
837 		arena->stats.nmadvise += nmadvise;
838 
839 	/* Deallocate runs. */
840 	for (mapelm = ql_first(&mapelms); mapelm != NULL;
841 	    mapelm = ql_first(&mapelms)) {
842 		arena_run_t *run;
843 
844 		pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
845 		    sizeof(arena_chunk_map_t)) + map_bias;
846 		run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
847 		    LG_PAGE));
848 		ql_remove(&mapelms, mapelm, u.ql_link);
849 		arena_run_dalloc(arena, run, false, true);
850 	}
851 
852 	return (npurged);
853 }
854 
855 static arena_chunk_t *
856 chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
857 {
858        size_t *ndirty = (size_t *)arg;
859 
860        assert(chunk->ndirty != 0);
861        *ndirty += chunk->ndirty;
862        return (NULL);
863 }
864 
865 static void
866 arena_purge(arena_t *arena, bool all)
867 {
868 	arena_chunk_t *chunk;
869 	size_t npurgatory;
870 	if (config_debug) {
871 		size_t ndirty = 0;
872 
873 		arena_chunk_dirty_iter(&arena->chunks_dirty, NULL,
874 		    chunks_dirty_iter_cb, (void *)&ndirty);
875 		assert(ndirty == arena->ndirty);
876 	}
877 	assert(arena->ndirty > arena->npurgatory || all);
878 	assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
879 	    arena->npurgatory) || all);
880 
881 	if (config_stats)
882 		arena->stats.npurge++;
883 
884 	/*
885 	 * Compute the minimum number of pages that this thread should try to
886 	 * purge, and add the result to arena->npurgatory.  This will keep
887 	 * multiple threads from racing to reduce ndirty below the threshold.
888 	 */
889 	{
890 		size_t npurgeable = arena->ndirty - arena->npurgatory;
891 
892 		if (all == false) {
893 			size_t threshold = (arena->nactive >>
894 			    opt_lg_dirty_mult);
895 
896 			npurgatory = npurgeable - threshold;
897 		} else
898 			npurgatory = npurgeable;
899 	}
900 	arena->npurgatory += npurgatory;
901 
902 	while (npurgatory > 0) {
903 		size_t npurgeable, npurged, nunpurged;
904 
905 		/* Get next chunk with dirty pages. */
906 		chunk = arena_chunk_dirty_first(&arena->chunks_dirty);
907 		if (chunk == NULL) {
908 			/*
909 			 * This thread was unable to purge as many pages as
910 			 * originally intended, due to races with other threads
911 			 * that either did some of the purging work, or re-used
912 			 * dirty pages.
913 			 */
914 			arena->npurgatory -= npurgatory;
915 			return;
916 		}
917 		npurgeable = chunk->ndirty;
918 		assert(npurgeable != 0);
919 
920 		if (npurgeable > npurgatory && chunk->nruns_adjac == 0) {
921 			/*
922 			 * This thread will purge all the dirty pages in chunk,
923 			 * so set npurgatory to reflect this thread's intent to
924 			 * purge the pages.  This tends to reduce the chances
925 			 * of the following scenario:
926 			 *
927 			 * 1) This thread sets arena->npurgatory such that
928 			 *    (arena->ndirty - arena->npurgatory) is at the
929 			 *    threshold.
930 			 * 2) This thread drops arena->lock.
931 			 * 3) Another thread causes one or more pages to be
932 			 *    dirtied, and immediately determines that it must
933 			 *    purge dirty pages.
934 			 *
935 			 * If this scenario *does* play out, that's okay,
936 			 * because all of the purging work being done really
937 			 * needs to happen.
938 			 */
939 			arena->npurgatory += npurgeable - npurgatory;
940 			npurgatory = npurgeable;
941 		}
942 
943 		/*
944 		 * Keep track of how many pages are purgeable, versus how many
945 		 * actually get purged, and adjust counters accordingly.
946 		 */
947 		arena->npurgatory -= npurgeable;
948 		npurgatory -= npurgeable;
949 		npurged = arena_chunk_purge(arena, chunk, all);
950 		nunpurged = npurgeable - npurged;
951 		arena->npurgatory += nunpurged;
952 		npurgatory += nunpurged;
953 	}
954 }
955 
956 void
957 arena_purge_all(arena_t *arena)
958 {
959 
960 	malloc_mutex_lock(&arena->lock);
961 	arena_purge(arena, true);
962 	malloc_mutex_unlock(&arena->lock);
963 }
964 
965 static void
966 arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
967 {
968 	arena_chunk_t *chunk;
969 	size_t size, run_ind, run_pages, flag_dirty;
970 
971 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
972 	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
973 	assert(run_ind >= map_bias);
974 	assert(run_ind < chunk_npages);
975 	if (arena_mapbits_large_get(chunk, run_ind) != 0) {
976 		size = arena_mapbits_large_size_get(chunk, run_ind);
977 		assert(size == PAGE ||
978 		    arena_mapbits_large_size_get(chunk,
979 		    run_ind+(size>>LG_PAGE)-1) == 0);
980 	} else {
981 		size_t binind = arena_bin_index(arena, run->bin);
982 		arena_bin_info_t *bin_info = &arena_bin_info[binind];
983 		size = bin_info->run_size;
984 	}
985 	run_pages = (size >> LG_PAGE);
986 	if (config_stats) {
987 		/*
988 		 * Update stats_cactive if nactive is crossing a chunk
989 		 * multiple.
990 		 */
991 		size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) -
992 		    CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE);
993 		if (cactive_diff != 0)
994 			stats_cactive_sub(cactive_diff);
995 	}
996 	arena->nactive -= run_pages;
997 
998 	/*
999 	 * The run is dirty if the caller claims to have dirtied it, as well as
1000 	 * if it was already dirty before being allocated and the caller
1001 	 * doesn't claim to have cleaned it.
1002 	 */
1003 	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1004 	    arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
1005 	if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
1006 		dirty = true;
1007 	flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
1008 
1009 	/* Mark pages as unallocated in the chunk map. */
1010 	if (dirty) {
1011 		arena_mapbits_unallocated_set(chunk, run_ind, size,
1012 		    CHUNK_MAP_DIRTY);
1013 		arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1014 		    CHUNK_MAP_DIRTY);
1015 	} else {
1016 		arena_mapbits_unallocated_set(chunk, run_ind, size,
1017 		    arena_mapbits_unzeroed_get(chunk, run_ind));
1018 		arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1019 		    arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
1020 	}
1021 
1022 	/* Try to coalesce forward. */
1023 	if (run_ind + run_pages < chunk_npages &&
1024 	    arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
1025 	    arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
1026 		size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1027 		    run_ind+run_pages);
1028 		size_t nrun_pages = nrun_size >> LG_PAGE;
1029 
1030 		/*
1031 		 * Remove successor from runs_avail; the coalesced run is
1032 		 * inserted later.
1033 		 */
1034 		assert(arena_mapbits_unallocated_size_get(chunk,
1035 		    run_ind+run_pages+nrun_pages-1) == nrun_size);
1036 		assert(arena_mapbits_dirty_get(chunk,
1037 		    run_ind+run_pages+nrun_pages-1) == flag_dirty);
1038 		arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages,
1039 		    false, true);
1040 
1041 		size += nrun_size;
1042 		run_pages += nrun_pages;
1043 
1044 		arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1045 		arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1046 		    size);
1047 	}
1048 
1049 	/* Try to coalesce backward. */
1050 	if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1)
1051 	    == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty) {
1052 		size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
1053 		    run_ind-1);
1054 		size_t prun_pages = prun_size >> LG_PAGE;
1055 
1056 		run_ind -= prun_pages;
1057 
1058 		/*
1059 		 * Remove predecessor from runs_avail; the coalesced run is
1060 		 * inserted later.
1061 		 */
1062 		assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1063 		    prun_size);
1064 		assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
1065 		arena_avail_remove(arena, chunk, run_ind, prun_pages, true,
1066 		    false);
1067 
1068 		size += prun_size;
1069 		run_pages += prun_pages;
1070 
1071 		arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1072 		arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1073 		    size);
1074 	}
1075 
1076 	/* Insert into runs_avail, now that coalescing is complete. */
1077 	assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1078 	    arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
1079 	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1080 	    arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
1081 	arena_avail_insert(arena, chunk, run_ind, run_pages, true, true);
1082 
1083 	/* Deallocate chunk if it is now completely unused. */
1084 	if (size == arena_maxclass) {
1085 		assert(run_ind == map_bias);
1086 		assert(run_pages == (arena_maxclass >> LG_PAGE));
1087 		arena_chunk_dealloc(arena, chunk);
1088 	}
1089 
1090 	/*
1091 	 * It is okay to do dirty page processing here even if the chunk was
1092 	 * deallocated above, since in that case it is the spare.  Waiting
1093 	 * until after possible chunk deallocation to do dirty processing
1094 	 * allows for an old spare to be fully deallocated, thus decreasing the
1095 	 * chances of spuriously crossing the dirty page purging threshold.
1096 	 */
1097 	if (dirty)
1098 		arena_maybe_purge(arena);
1099 }
1100 
1101 static void
1102 arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1103     size_t oldsize, size_t newsize)
1104 {
1105 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1106 	size_t head_npages = (oldsize - newsize) >> LG_PAGE;
1107 	size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
1108 
1109 	assert(oldsize > newsize);
1110 
1111 	/*
1112 	 * Update the chunk map so that arena_run_dalloc() can treat the
1113 	 * leading run as separately allocated.  Set the last element of each
1114 	 * run first, in case of single-page runs.
1115 	 */
1116 	assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
1117 	arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1118 	arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
1119 
1120 	if (config_debug) {
1121 		UNUSED size_t tail_npages = newsize >> LG_PAGE;
1122 		assert(arena_mapbits_large_size_get(chunk,
1123 		    pageind+head_npages+tail_npages-1) == 0);
1124 		assert(arena_mapbits_dirty_get(chunk,
1125 		    pageind+head_npages+tail_npages-1) == flag_dirty);
1126 	}
1127 	arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
1128 	    flag_dirty);
1129 
1130 	arena_run_dalloc(arena, run, false, false);
1131 }
1132 
1133 static void
1134 arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1135     size_t oldsize, size_t newsize, bool dirty)
1136 {
1137 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1138 	size_t head_npages = newsize >> LG_PAGE;
1139 	size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
1140 
1141 	assert(oldsize > newsize);
1142 
1143 	/*
1144 	 * Update the chunk map so that arena_run_dalloc() can treat the
1145 	 * trailing run as separately allocated.  Set the last element of each
1146 	 * run first, in case of single-page runs.
1147 	 */
1148 	assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
1149 	arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1150 	arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
1151 
1152 	if (config_debug) {
1153 		UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
1154 		assert(arena_mapbits_large_size_get(chunk,
1155 		    pageind+head_npages+tail_npages-1) == 0);
1156 		assert(arena_mapbits_dirty_get(chunk,
1157 		    pageind+head_npages+tail_npages-1) == flag_dirty);
1158 	}
1159 	arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
1160 	    flag_dirty);
1161 
1162 	arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
1163 	    dirty, false);
1164 }
1165 
1166 static arena_run_t *
1167 arena_bin_runs_first(arena_bin_t *bin)
1168 {
1169 	arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
1170 	if (mapelm != NULL) {
1171 		arena_chunk_t *chunk;
1172 		size_t pageind;
1173 		arena_run_t *run;
1174 
1175 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
1176 		pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
1177 		    sizeof(arena_chunk_map_t))) + map_bias;
1178 		run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1179 		    arena_mapbits_small_runind_get(chunk, pageind)) <<
1180 		    LG_PAGE));
1181 		return (run);
1182 	}
1183 
1184 	return (NULL);
1185 }
1186 
1187 static void
1188 arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
1189 {
1190 	arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
1191 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1192 	arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
1193 
1194 	assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
1195 
1196 	arena_run_tree_insert(&bin->runs, mapelm);
1197 }
1198 
1199 static void
1200 arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
1201 {
1202 	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1203 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1204 	arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
1205 
1206 	assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
1207 
1208 	arena_run_tree_remove(&bin->runs, mapelm);
1209 }
1210 
1211 static arena_run_t *
1212 arena_bin_nonfull_run_tryget(arena_bin_t *bin)
1213 {
1214 	arena_run_t *run = arena_bin_runs_first(bin);
1215 	if (run != NULL) {
1216 		arena_bin_runs_remove(bin, run);
1217 		if (config_stats)
1218 			bin->stats.reruns++;
1219 	}
1220 	return (run);
1221 }
1222 
1223 static arena_run_t *
1224 arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1225 {
1226 	arena_run_t *run;
1227 	size_t binind;
1228 	arena_bin_info_t *bin_info;
1229 
1230 	/* Look for a usable run. */
1231 	run = arena_bin_nonfull_run_tryget(bin);
1232 	if (run != NULL)
1233 		return (run);
1234 	/* No existing runs have any space available. */
1235 
1236 	binind = arena_bin_index(arena, bin);
1237 	bin_info = &arena_bin_info[binind];
1238 
1239 	/* Allocate a new run. */
1240 	malloc_mutex_unlock(&bin->lock);
1241 	/******************************/
1242 	malloc_mutex_lock(&arena->lock);
1243 	run = arena_run_alloc(arena, bin_info->run_size, false, binind, false);
1244 	if (run != NULL) {
1245 		bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
1246 		    (uintptr_t)bin_info->bitmap_offset);
1247 
1248 		/* Initialize run internals. */
1249 		VALGRIND_MAKE_MEM_UNDEFINED(run, bin_info->reg0_offset -
1250 		    bin_info->redzone_size);
1251 		run->bin = bin;
1252 		run->nextind = 0;
1253 		run->nfree = bin_info->nregs;
1254 		bitmap_init(bitmap, &bin_info->bitmap_info);
1255 	}
1256 	malloc_mutex_unlock(&arena->lock);
1257 	/********************************/
1258 	malloc_mutex_lock(&bin->lock);
1259 	if (run != NULL) {
1260 		if (config_stats) {
1261 			bin->stats.nruns++;
1262 			bin->stats.curruns++;
1263 		}
1264 		return (run);
1265 	}
1266 
1267 	/*
1268 	 * arena_run_alloc() failed, but another thread may have made
1269 	 * sufficient memory available while this one dropped bin->lock above,
1270 	 * so search one more time.
1271 	 */
1272 	run = arena_bin_nonfull_run_tryget(bin);
1273 	if (run != NULL)
1274 		return (run);
1275 
1276 	return (NULL);
1277 }
1278 
1279 /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
1280 static void *
1281 arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1282 {
1283 	void *ret;
1284 	size_t binind;
1285 	arena_bin_info_t *bin_info;
1286 	arena_run_t *run;
1287 
1288 	binind = arena_bin_index(arena, bin);
1289 	bin_info = &arena_bin_info[binind];
1290 	bin->runcur = NULL;
1291 	run = arena_bin_nonfull_run_get(arena, bin);
1292 	if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1293 		/*
1294 		 * Another thread updated runcur while this one ran without the
1295 		 * bin lock in arena_bin_nonfull_run_get().
1296 		 */
1297 		assert(bin->runcur->nfree > 0);
1298 		ret = arena_run_reg_alloc(bin->runcur, bin_info);
1299 		if (run != NULL) {
1300 			arena_chunk_t *chunk;
1301 
1302 			/*
1303 			 * arena_run_alloc() may have allocated run, or it may
1304 			 * have pulled run from the bin's run tree.  Therefore
1305 			 * it is unsafe to make any assumptions about how run
1306 			 * has previously been used, and arena_bin_lower_run()
1307 			 * must be called, as if a region were just deallocated
1308 			 * from the run.
1309 			 */
1310 			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1311 			if (run->nfree == bin_info->nregs)
1312 				arena_dalloc_bin_run(arena, chunk, run, bin);
1313 			else
1314 				arena_bin_lower_run(arena, chunk, run, bin);
1315 		}
1316 		return (ret);
1317 	}
1318 
1319 	if (run == NULL)
1320 		return (NULL);
1321 
1322 	bin->runcur = run;
1323 
1324 	assert(bin->runcur->nfree > 0);
1325 
1326 	return (arena_run_reg_alloc(bin->runcur, bin_info));
1327 }
1328 
1329 void
1330 arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
1331     uint64_t prof_accumbytes)
1332 {
1333 	unsigned i, nfill;
1334 	arena_bin_t *bin;
1335 	arena_run_t *run;
1336 	void *ptr;
1337 
1338 	assert(tbin->ncached == 0);
1339 
1340 	if (config_prof)
1341 		arena_prof_accum(arena, prof_accumbytes);
1342 	bin = &arena->bins[binind];
1343 	malloc_mutex_lock(&bin->lock);
1344 	for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1345 	    tbin->lg_fill_div); i < nfill; i++) {
1346 		if ((run = bin->runcur) != NULL && run->nfree > 0)
1347 			ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
1348 		else
1349 			ptr = arena_bin_malloc_hard(arena, bin);
1350 		if (ptr == NULL)
1351 			break;
1352 		if (config_fill && opt_junk) {
1353 			arena_alloc_junk_small(ptr, &arena_bin_info[binind],
1354 			    true);
1355 		}
1356 		/* Insert such that low regions get used first. */
1357 		tbin->avail[nfill - 1 - i] = ptr;
1358 	}
1359 	if (config_stats) {
1360 		bin->stats.allocated += i * arena_bin_info[binind].reg_size;
1361 		bin->stats.nmalloc += i;
1362 		bin->stats.nrequests += tbin->tstats.nrequests;
1363 		bin->stats.nfills++;
1364 		tbin->tstats.nrequests = 0;
1365 	}
1366 	malloc_mutex_unlock(&bin->lock);
1367 	tbin->ncached = i;
1368 }
1369 
1370 void
1371 arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
1372 {
1373 
1374 	if (zero) {
1375 		size_t redzone_size = bin_info->redzone_size;
1376 		memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
1377 		    redzone_size);
1378 		memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
1379 		    redzone_size);
1380 	} else {
1381 		memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
1382 		    bin_info->reg_interval);
1383 	}
1384 }
1385 
1386 void
1387 arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
1388 {
1389 	size_t size = bin_info->reg_size;
1390 	size_t redzone_size = bin_info->redzone_size;
1391 	size_t i;
1392 	bool error = false;
1393 
1394 	for (i = 1; i <= redzone_size; i++) {
1395 		unsigned byte;
1396 		if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) {
1397 			error = true;
1398 			malloc_printf("<jemalloc>: Corrupt redzone "
1399 			    "%zu byte%s before %p (size %zu), byte=%#x\n", i,
1400 			    (i == 1) ? "" : "s", ptr, size, byte);
1401 		}
1402 	}
1403 	for (i = 0; i < redzone_size; i++) {
1404 		unsigned byte;
1405 		if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) {
1406 			error = true;
1407 			malloc_printf("<jemalloc>: Corrupt redzone "
1408 			    "%zu byte%s after end of %p (size %zu), byte=%#x\n",
1409 			    i, (i == 1) ? "" : "s", ptr, size, byte);
1410 		}
1411 	}
1412 	if (opt_abort && error)
1413 		abort();
1414 
1415 	memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
1416 	    bin_info->reg_interval);
1417 }
1418 
1419 void *
1420 arena_malloc_small(arena_t *arena, size_t size, bool zero)
1421 {
1422 	void *ret;
1423 	arena_bin_t *bin;
1424 	arena_run_t *run;
1425 	size_t binind;
1426 
1427 	binind = SMALL_SIZE2BIN(size);
1428 	assert(binind < NBINS);
1429 	bin = &arena->bins[binind];
1430 	size = arena_bin_info[binind].reg_size;
1431 
1432 	malloc_mutex_lock(&bin->lock);
1433 	if ((run = bin->runcur) != NULL && run->nfree > 0)
1434 		ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
1435 	else
1436 		ret = arena_bin_malloc_hard(arena, bin);
1437 
1438 	if (ret == NULL) {
1439 		malloc_mutex_unlock(&bin->lock);
1440 		return (NULL);
1441 	}
1442 
1443 	if (config_stats) {
1444 		bin->stats.allocated += size;
1445 		bin->stats.nmalloc++;
1446 		bin->stats.nrequests++;
1447 	}
1448 	malloc_mutex_unlock(&bin->lock);
1449 	if (config_prof && isthreaded == false)
1450 		arena_prof_accum(arena, size);
1451 
1452 	if (zero == false) {
1453 		if (config_fill) {
1454 			if (opt_junk) {
1455 				arena_alloc_junk_small(ret,
1456 				    &arena_bin_info[binind], false);
1457 			} else if (opt_zero)
1458 				memset(ret, 0, size);
1459 		}
1460 	} else {
1461 		if (config_fill && opt_junk) {
1462 			arena_alloc_junk_small(ret, &arena_bin_info[binind],
1463 			    true);
1464 		}
1465 		VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
1466 		memset(ret, 0, size);
1467 		VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
1468 	}
1469 
1470 	return (ret);
1471 }
1472 
1473 void *
1474 arena_malloc_large(arena_t *arena, size_t size, bool zero)
1475 {
1476 	void *ret;
1477 
1478 	/* Large allocation. */
1479 	size = PAGE_CEILING(size);
1480 	malloc_mutex_lock(&arena->lock);
1481 	ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero);
1482 	if (ret == NULL) {
1483 		malloc_mutex_unlock(&arena->lock);
1484 		return (NULL);
1485 	}
1486 	if (config_stats) {
1487 		arena->stats.nmalloc_large++;
1488 		arena->stats.nrequests_large++;
1489 		arena->stats.allocated_large += size;
1490 		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1491 		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1492 		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1493 	}
1494 	if (config_prof)
1495 		arena_prof_accum_locked(arena, size);
1496 	malloc_mutex_unlock(&arena->lock);
1497 
1498 	if (zero == false) {
1499 		if (config_fill) {
1500 			if (opt_junk)
1501 				memset(ret, 0xa5, size);
1502 			else if (opt_zero)
1503 				memset(ret, 0, size);
1504 		}
1505 	}
1506 
1507 	return (ret);
1508 }
1509 
1510 /* Only handles large allocations that require more than page alignment. */
1511 void *
1512 arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
1513 {
1514 	void *ret;
1515 	size_t alloc_size, leadsize, trailsize;
1516 	arena_run_t *run;
1517 	arena_chunk_t *chunk;
1518 
1519 	assert((size & PAGE_MASK) == 0);
1520 
1521 	alignment = PAGE_CEILING(alignment);
1522 	alloc_size = size + alignment - PAGE;
1523 
1524 	malloc_mutex_lock(&arena->lock);
1525 	run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, zero);
1526 	if (run == NULL) {
1527 		malloc_mutex_unlock(&arena->lock);
1528 		return (NULL);
1529 	}
1530 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1531 
1532 	leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
1533 	    (uintptr_t)run;
1534 	assert(alloc_size >= leadsize + size);
1535 	trailsize = alloc_size - leadsize - size;
1536 	ret = (void *)((uintptr_t)run + leadsize);
1537 	if (leadsize != 0) {
1538 		arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
1539 		    leadsize);
1540 	}
1541 	if (trailsize != 0) {
1542 		arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
1543 		    false);
1544 	}
1545 
1546 	if (config_stats) {
1547 		arena->stats.nmalloc_large++;
1548 		arena->stats.nrequests_large++;
1549 		arena->stats.allocated_large += size;
1550 		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1551 		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1552 		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1553 	}
1554 	malloc_mutex_unlock(&arena->lock);
1555 
1556 	if (config_fill && zero == false) {
1557 		if (opt_junk)
1558 			memset(ret, 0xa5, size);
1559 		else if (opt_zero)
1560 			memset(ret, 0, size);
1561 	}
1562 	return (ret);
1563 }
1564 
1565 void
1566 arena_prof_promoted(const void *ptr, size_t size)
1567 {
1568 	arena_chunk_t *chunk;
1569 	size_t pageind, binind;
1570 
1571 	cassert(config_prof);
1572 	assert(ptr != NULL);
1573 	assert(CHUNK_ADDR2BASE(ptr) != ptr);
1574 	assert(isalloc(ptr, false) == PAGE);
1575 	assert(isalloc(ptr, true) == PAGE);
1576 	assert(size <= SMALL_MAXCLASS);
1577 
1578 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1579 	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1580 	binind = SMALL_SIZE2BIN(size);
1581 	assert(binind < NBINS);
1582 	arena_mapbits_large_binind_set(chunk, pageind, binind);
1583 
1584 	assert(isalloc(ptr, false) == PAGE);
1585 	assert(isalloc(ptr, true) == size);
1586 }
1587 
1588 static void
1589 arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
1590     arena_bin_t *bin)
1591 {
1592 
1593 	/* Dissociate run from bin. */
1594 	if (run == bin->runcur)
1595 		bin->runcur = NULL;
1596 	else {
1597 		size_t binind = arena_bin_index(chunk->arena, bin);
1598 		arena_bin_info_t *bin_info = &arena_bin_info[binind];
1599 
1600 		if (bin_info->nregs != 1) {
1601 			/*
1602 			 * This block's conditional is necessary because if the
1603 			 * run only contains one region, then it never gets
1604 			 * inserted into the non-full runs tree.
1605 			 */
1606 			arena_bin_runs_remove(bin, run);
1607 		}
1608 	}
1609 }
1610 
1611 static void
1612 arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1613     arena_bin_t *bin)
1614 {
1615 	size_t binind;
1616 	arena_bin_info_t *bin_info;
1617 	size_t npages, run_ind, past;
1618 
1619 	assert(run != bin->runcur);
1620 	assert(arena_run_tree_search(&bin->runs,
1621 	    arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
1622 	    == NULL);
1623 
1624 	binind = arena_bin_index(chunk->arena, run->bin);
1625 	bin_info = &arena_bin_info[binind];
1626 
1627 	malloc_mutex_unlock(&bin->lock);
1628 	/******************************/
1629 	npages = bin_info->run_size >> LG_PAGE;
1630 	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
1631 	past = (size_t)(PAGE_CEILING((uintptr_t)run +
1632 	    (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
1633 	    bin_info->reg_interval - bin_info->redzone_size) -
1634 	    (uintptr_t)chunk) >> LG_PAGE);
1635 	malloc_mutex_lock(&arena->lock);
1636 
1637 	/*
1638 	 * If the run was originally clean, and some pages were never touched,
1639 	 * trim the clean pages before deallocating the dirty portion of the
1640 	 * run.
1641 	 */
1642 	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1643 	    arena_mapbits_dirty_get(chunk, run_ind+npages-1));
1644 	if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
1645 	    npages) {
1646 		/* Trim clean pages.  Convert to large run beforehand. */
1647 		assert(npages > 0);
1648 		arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0);
1649 		arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
1650 		arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
1651 		    ((past - run_ind) << LG_PAGE), false);
1652 		/* npages = past - run_ind; */
1653 	}
1654 	arena_run_dalloc(arena, run, true, false);
1655 	malloc_mutex_unlock(&arena->lock);
1656 	/****************************/
1657 	malloc_mutex_lock(&bin->lock);
1658 	if (config_stats)
1659 		bin->stats.curruns--;
1660 }
1661 
1662 static void
1663 arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1664     arena_bin_t *bin)
1665 {
1666 
1667 	/*
1668 	 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
1669 	 * non-full run.  It is okay to NULL runcur out rather than proactively
1670 	 * keeping it pointing at the lowest non-full run.
1671 	 */
1672 	if ((uintptr_t)run < (uintptr_t)bin->runcur) {
1673 		/* Switch runcur. */
1674 		if (bin->runcur->nfree > 0)
1675 			arena_bin_runs_insert(bin, bin->runcur);
1676 		bin->runcur = run;
1677 		if (config_stats)
1678 			bin->stats.reruns++;
1679 	} else
1680 		arena_bin_runs_insert(bin, run);
1681 }
1682 
1683 void
1684 arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1685     arena_chunk_map_t *mapelm)
1686 {
1687 	size_t pageind;
1688 	arena_run_t *run;
1689 	arena_bin_t *bin;
1690 	arena_bin_info_t *bin_info;
1691 	size_t size, binind;
1692 
1693 	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1694 	run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1695 	    arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
1696 	bin = run->bin;
1697 	binind = arena_ptr_small_binind_get(ptr, mapelm->bits);
1698 	bin_info = &arena_bin_info[binind];
1699 	if (config_fill || config_stats)
1700 		size = bin_info->reg_size;
1701 
1702 	if (config_fill && opt_junk)
1703 		arena_dalloc_junk_small(ptr, bin_info);
1704 
1705 	arena_run_reg_dalloc(run, ptr);
1706 	if (run->nfree == bin_info->nregs) {
1707 		arena_dissociate_bin_run(chunk, run, bin);
1708 		arena_dalloc_bin_run(arena, chunk, run, bin);
1709 	} else if (run->nfree == 1 && run != bin->runcur)
1710 		arena_bin_lower_run(arena, chunk, run, bin);
1711 
1712 	if (config_stats) {
1713 		bin->stats.allocated -= size;
1714 		bin->stats.ndalloc++;
1715 	}
1716 }
1717 
1718 void
1719 arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1720     size_t pageind, arena_chunk_map_t *mapelm)
1721 {
1722 	arena_run_t *run;
1723 	arena_bin_t *bin;
1724 
1725 	run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1726 	    arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
1727 	bin = run->bin;
1728 	malloc_mutex_lock(&bin->lock);
1729 	arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
1730 	malloc_mutex_unlock(&bin->lock);
1731 }
1732 
1733 void
1734 arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1735     size_t pageind)
1736 {
1737 	arena_chunk_map_t *mapelm;
1738 
1739 	if (config_debug) {
1740 		/* arena_ptr_small_binind_get() does extra sanity checking. */
1741 		assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
1742 		    pageind)) != BININD_INVALID);
1743 	}
1744 	mapelm = arena_mapp_get(chunk, pageind);
1745 	arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
1746 }
1747 
1748 void
1749 arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1750 {
1751 
1752 	if (config_fill || config_stats) {
1753 		size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1754 		size_t size = arena_mapbits_large_size_get(chunk, pageind);
1755 
1756 		if (config_fill && config_stats && opt_junk)
1757 			memset(ptr, 0x5a, size);
1758 		if (config_stats) {
1759 			arena->stats.ndalloc_large++;
1760 			arena->stats.allocated_large -= size;
1761 			arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
1762 			arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
1763 		}
1764 	}
1765 
1766 	arena_run_dalloc(arena, (arena_run_t *)ptr, true, false);
1767 }
1768 
1769 void
1770 arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1771 {
1772 
1773 	malloc_mutex_lock(&arena->lock);
1774 	arena_dalloc_large_locked(arena, chunk, ptr);
1775 	malloc_mutex_unlock(&arena->lock);
1776 }
1777 
1778 static void
1779 arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1780     size_t oldsize, size_t size)
1781 {
1782 
1783 	assert(size < oldsize);
1784 
1785 	/*
1786 	 * Shrink the run, and make trailing pages available for other
1787 	 * allocations.
1788 	 */
1789 	malloc_mutex_lock(&arena->lock);
1790 	arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
1791 	    true);
1792 	if (config_stats) {
1793 		arena->stats.ndalloc_large++;
1794 		arena->stats.allocated_large -= oldsize;
1795 		arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1796 		arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
1797 
1798 		arena->stats.nmalloc_large++;
1799 		arena->stats.nrequests_large++;
1800 		arena->stats.allocated_large += size;
1801 		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1802 		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1803 		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1804 	}
1805 	malloc_mutex_unlock(&arena->lock);
1806 }
1807 
1808 static bool
1809 arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1810     size_t oldsize, size_t size, size_t extra, bool zero)
1811 {
1812 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1813 	size_t npages = oldsize >> LG_PAGE;
1814 	size_t followsize;
1815 
1816 	assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
1817 
1818 	/* Try to extend the run. */
1819 	assert(size + extra > oldsize);
1820 	malloc_mutex_lock(&arena->lock);
1821 	if (pageind + npages < chunk_npages &&
1822 	    arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
1823 	    (followsize = arena_mapbits_unallocated_size_get(chunk,
1824 	    pageind+npages)) >= size - oldsize) {
1825 		/*
1826 		 * The next run is available and sufficiently large.  Split the
1827 		 * following run, then merge the first part with the existing
1828 		 * allocation.
1829 		 */
1830 		size_t flag_dirty;
1831 		size_t splitsize = (oldsize + followsize <= size + extra)
1832 		    ? followsize : size + extra - oldsize;
1833 		arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
1834 		    ((pageind+npages) << LG_PAGE)), splitsize, true,
1835 		    BININD_INVALID, zero);
1836 
1837 		size = oldsize + splitsize;
1838 		npages = size >> LG_PAGE;
1839 
1840 		/*
1841 		 * Mark the extended run as dirty if either portion of the run
1842 		 * was dirty before allocation.  This is rather pedantic,
1843 		 * because there's not actually any sequence of events that
1844 		 * could cause the resulting run to be passed to
1845 		 * arena_run_dalloc() with the dirty argument set to false
1846 		 * (which is when dirty flag consistency would really matter).
1847 		 */
1848 		flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
1849 		    arena_mapbits_dirty_get(chunk, pageind+npages-1);
1850 		arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
1851 		arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
1852 
1853 		if (config_stats) {
1854 			arena->stats.ndalloc_large++;
1855 			arena->stats.allocated_large -= oldsize;
1856 			arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1857 			arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
1858 
1859 			arena->stats.nmalloc_large++;
1860 			arena->stats.nrequests_large++;
1861 			arena->stats.allocated_large += size;
1862 			arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1863 			arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1864 			arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1865 		}
1866 		malloc_mutex_unlock(&arena->lock);
1867 		return (false);
1868 	}
1869 	malloc_mutex_unlock(&arena->lock);
1870 
1871 	return (true);
1872 }
1873 
1874 /*
1875  * Try to resize a large allocation, in order to avoid copying.  This will
1876  * always fail if growing an object, and the following run is already in use.
1877  */
1878 static bool
1879 arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
1880     bool zero)
1881 {
1882 	size_t psize;
1883 
1884 	psize = PAGE_CEILING(size + extra);
1885 	if (psize == oldsize) {
1886 		/* Same size class. */
1887 		if (config_fill && opt_junk && size < oldsize) {
1888 			memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
1889 			    size);
1890 		}
1891 		return (false);
1892 	} else {
1893 		arena_chunk_t *chunk;
1894 		arena_t *arena;
1895 
1896 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1897 		arena = chunk->arena;
1898 
1899 		if (psize < oldsize) {
1900 			/* Fill before shrinking in order avoid a race. */
1901 			if (config_fill && opt_junk) {
1902 				memset((void *)((uintptr_t)ptr + size), 0x5a,
1903 				    oldsize - size);
1904 			}
1905 			arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
1906 			    psize);
1907 			return (false);
1908 		} else {
1909 			bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
1910 			    oldsize, PAGE_CEILING(size),
1911 			    psize - PAGE_CEILING(size), zero);
1912 			if (config_fill && ret == false && zero == false &&
1913 			    opt_zero) {
1914 				memset((void *)((uintptr_t)ptr + oldsize), 0,
1915 				    size - oldsize);
1916 			}
1917 			return (ret);
1918 		}
1919 	}
1920 }
1921 
1922 void *
1923 arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
1924     bool zero)
1925 {
1926 
1927 	/*
1928 	 * Avoid moving the allocation if the size class can be left the same.
1929 	 */
1930 	if (oldsize <= arena_maxclass) {
1931 		if (oldsize <= SMALL_MAXCLASS) {
1932 			assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
1933 			    == oldsize);
1934 			if ((size + extra <= SMALL_MAXCLASS &&
1935 			    SMALL_SIZE2BIN(size + extra) ==
1936 			    SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
1937 			    size + extra >= oldsize)) {
1938 				if (config_fill && opt_junk && size < oldsize) {
1939 					memset((void *)((uintptr_t)ptr + size),
1940 					    0x5a, oldsize - size);
1941 				}
1942 				return (ptr);
1943 			}
1944 		} else {
1945 			assert(size <= arena_maxclass);
1946 			if (size + extra > SMALL_MAXCLASS) {
1947 				if (arena_ralloc_large(ptr, oldsize, size,
1948 				    extra, zero) == false)
1949 					return (ptr);
1950 			}
1951 		}
1952 	}
1953 
1954 	/* Reallocation would require a move. */
1955 	return (NULL);
1956 }
1957 
1958 void *
1959 arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
1960     size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
1961     bool try_tcache_dalloc)
1962 {
1963 	void *ret;
1964 	size_t copysize;
1965 
1966 	/* Try to avoid moving the allocation. */
1967 	ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero);
1968 	if (ret != NULL)
1969 		return (ret);
1970 
1971 	/*
1972 	 * size and oldsize are different enough that we need to move the
1973 	 * object.  In that case, fall back to allocating new space and
1974 	 * copying.
1975 	 */
1976 	if (alignment != 0) {
1977 		size_t usize = sa2u(size + extra, alignment);
1978 		if (usize == 0)
1979 			return (NULL);
1980 		ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
1981 	} else
1982 		ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
1983 
1984 	if (ret == NULL) {
1985 		if (extra == 0)
1986 			return (NULL);
1987 		/* Try again, this time without extra. */
1988 		if (alignment != 0) {
1989 			size_t usize = sa2u(size, alignment);
1990 			if (usize == 0)
1991 				return (NULL);
1992 			ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
1993 			    arena);
1994 		} else
1995 			ret = arena_malloc(arena, size, zero, try_tcache_alloc);
1996 
1997 		if (ret == NULL)
1998 			return (NULL);
1999 	}
2000 
2001 	/* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
2002 
2003 	/*
2004 	 * Copy at most size bytes (not size+extra), since the caller has no
2005 	 * expectation that the extra bytes will be reliably preserved.
2006 	 */
2007 	copysize = (size < oldsize) ? size : oldsize;
2008 	VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
2009 	memcpy(ret, ptr, copysize);
2010 	iqallocx(ptr, try_tcache_dalloc);
2011 	return (ret);
2012 }
2013 
2014 dss_prec_t
2015 arena_dss_prec_get(arena_t *arena)
2016 {
2017 	dss_prec_t ret;
2018 
2019 	malloc_mutex_lock(&arena->lock);
2020 	ret = arena->dss_prec;
2021 	malloc_mutex_unlock(&arena->lock);
2022 	return (ret);
2023 }
2024 
2025 void
2026 arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
2027 {
2028 
2029 	malloc_mutex_lock(&arena->lock);
2030 	arena->dss_prec = dss_prec;
2031 	malloc_mutex_unlock(&arena->lock);
2032 }
2033 
2034 void
2035 arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
2036     size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
2037     malloc_large_stats_t *lstats)
2038 {
2039 	unsigned i;
2040 
2041 	malloc_mutex_lock(&arena->lock);
2042 	*dss = dss_prec_names[arena->dss_prec];
2043 	*nactive += arena->nactive;
2044 	*ndirty += arena->ndirty;
2045 
2046 	astats->mapped += arena->stats.mapped;
2047 	astats->npurge += arena->stats.npurge;
2048 	astats->nmadvise += arena->stats.nmadvise;
2049 	astats->purged += arena->stats.purged;
2050 	astats->allocated_large += arena->stats.allocated_large;
2051 	astats->nmalloc_large += arena->stats.nmalloc_large;
2052 	astats->ndalloc_large += arena->stats.ndalloc_large;
2053 	astats->nrequests_large += arena->stats.nrequests_large;
2054 
2055 	for (i = 0; i < nlclasses; i++) {
2056 		lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
2057 		lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
2058 		lstats[i].nrequests += arena->stats.lstats[i].nrequests;
2059 		lstats[i].curruns += arena->stats.lstats[i].curruns;
2060 	}
2061 	malloc_mutex_unlock(&arena->lock);
2062 
2063 	for (i = 0; i < NBINS; i++) {
2064 		arena_bin_t *bin = &arena->bins[i];
2065 
2066 		malloc_mutex_lock(&bin->lock);
2067 		bstats[i].allocated += bin->stats.allocated;
2068 		bstats[i].nmalloc += bin->stats.nmalloc;
2069 		bstats[i].ndalloc += bin->stats.ndalloc;
2070 		bstats[i].nrequests += bin->stats.nrequests;
2071 		if (config_tcache) {
2072 			bstats[i].nfills += bin->stats.nfills;
2073 			bstats[i].nflushes += bin->stats.nflushes;
2074 		}
2075 		bstats[i].nruns += bin->stats.nruns;
2076 		bstats[i].reruns += bin->stats.reruns;
2077 		bstats[i].curruns += bin->stats.curruns;
2078 		malloc_mutex_unlock(&bin->lock);
2079 	}
2080 }
2081 
2082 bool
2083 arena_new(arena_t *arena, unsigned ind)
2084 {
2085 	unsigned i;
2086 	arena_bin_t *bin;
2087 
2088 	arena->ind = ind;
2089 	arena->nthreads = 0;
2090 
2091 	if (malloc_mutex_init(&arena->lock))
2092 		return (true);
2093 
2094 	if (config_stats) {
2095 		memset(&arena->stats, 0, sizeof(arena_stats_t));
2096 		arena->stats.lstats =
2097 		    (malloc_large_stats_t *)base_alloc(nlclasses *
2098 		    sizeof(malloc_large_stats_t));
2099 		if (arena->stats.lstats == NULL)
2100 			return (true);
2101 		memset(arena->stats.lstats, 0, nlclasses *
2102 		    sizeof(malloc_large_stats_t));
2103 		if (config_tcache)
2104 			ql_new(&arena->tcache_ql);
2105 	}
2106 
2107 	if (config_prof)
2108 		arena->prof_accumbytes = 0;
2109 
2110 	arena->dss_prec = chunk_dss_prec_get();
2111 
2112 	/* Initialize chunks. */
2113 	arena_chunk_dirty_new(&arena->chunks_dirty);
2114 	arena->spare = NULL;
2115 
2116 	arena->nactive = 0;
2117 	arena->ndirty = 0;
2118 	arena->npurgatory = 0;
2119 
2120 	arena_avail_tree_new(&arena->runs_avail);
2121 
2122 	/* Initialize bins. */
2123 	for (i = 0; i < NBINS; i++) {
2124 		bin = &arena->bins[i];
2125 		if (malloc_mutex_init(&bin->lock))
2126 			return (true);
2127 		bin->runcur = NULL;
2128 		arena_run_tree_new(&bin->runs);
2129 		if (config_stats)
2130 			memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
2131 	}
2132 
2133 	return (false);
2134 }
2135 
2136 /*
2137  * Calculate bin_info->run_size such that it meets the following constraints:
2138  *
2139  *   *) bin_info->run_size >= min_run_size
2140  *   *) bin_info->run_size <= arena_maxclass
2141  *   *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
2142  *   *) bin_info->nregs <= RUN_MAXREGS
2143  *
2144  * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
2145  * calculated here, since these settings are all interdependent.
2146  */
2147 static size_t
2148 bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
2149 {
2150 	size_t pad_size;
2151 	size_t try_run_size, good_run_size;
2152 	uint32_t try_nregs, good_nregs;
2153 	uint32_t try_hdr_size, good_hdr_size;
2154 	uint32_t try_bitmap_offset, good_bitmap_offset;
2155 	uint32_t try_ctx0_offset, good_ctx0_offset;
2156 	uint32_t try_redzone0_offset, good_redzone0_offset;
2157 
2158 	assert(min_run_size >= PAGE);
2159 	assert(min_run_size <= arena_maxclass);
2160 
2161 	/*
2162 	 * Determine redzone size based on minimum alignment and minimum
2163 	 * redzone size.  Add padding to the end of the run if it is needed to
2164 	 * align the regions.  The padding allows each redzone to be half the
2165 	 * minimum alignment; without the padding, each redzone would have to
2166 	 * be twice as large in order to maintain alignment.
2167 	 */
2168 	if (config_fill && opt_redzone) {
2169 		size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
2170 		if (align_min <= REDZONE_MINSIZE) {
2171 			bin_info->redzone_size = REDZONE_MINSIZE;
2172 			pad_size = 0;
2173 		} else {
2174 			bin_info->redzone_size = align_min >> 1;
2175 			pad_size = bin_info->redzone_size;
2176 		}
2177 	} else {
2178 		bin_info->redzone_size = 0;
2179 		pad_size = 0;
2180 	}
2181 	bin_info->reg_interval = bin_info->reg_size +
2182 	    (bin_info->redzone_size << 1);
2183 
2184 	/*
2185 	 * Calculate known-valid settings before entering the run_size
2186 	 * expansion loop, so that the first part of the loop always copies
2187 	 * valid settings.
2188 	 *
2189 	 * The do..while loop iteratively reduces the number of regions until
2190 	 * the run header and the regions no longer overlap.  A closed formula
2191 	 * would be quite messy, since there is an interdependency between the
2192 	 * header's mask length and the number of regions.
2193 	 */
2194 	try_run_size = min_run_size;
2195 	try_nregs = ((try_run_size - sizeof(arena_run_t)) /
2196 	    bin_info->reg_interval)
2197 	    + 1; /* Counter-act try_nregs-- in loop. */
2198 	if (try_nregs > RUN_MAXREGS) {
2199 		try_nregs = RUN_MAXREGS
2200 		    + 1; /* Counter-act try_nregs-- in loop. */
2201 	}
2202 	do {
2203 		try_nregs--;
2204 		try_hdr_size = sizeof(arena_run_t);
2205 		/* Pad to a long boundary. */
2206 		try_hdr_size = LONG_CEILING(try_hdr_size);
2207 		try_bitmap_offset = try_hdr_size;
2208 		/* Add space for bitmap. */
2209 		try_hdr_size += bitmap_size(try_nregs);
2210 		if (config_prof && opt_prof && prof_promote == false) {
2211 			/* Pad to a quantum boundary. */
2212 			try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2213 			try_ctx0_offset = try_hdr_size;
2214 			/* Add space for one (prof_ctx_t *) per region. */
2215 			try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
2216 		} else
2217 			try_ctx0_offset = 0;
2218 		try_redzone0_offset = try_run_size - (try_nregs *
2219 		    bin_info->reg_interval) - pad_size;
2220 	} while (try_hdr_size > try_redzone0_offset);
2221 
2222 	/* run_size expansion loop. */
2223 	do {
2224 		/*
2225 		 * Copy valid settings before trying more aggressive settings.
2226 		 */
2227 		good_run_size = try_run_size;
2228 		good_nregs = try_nregs;
2229 		good_hdr_size = try_hdr_size;
2230 		good_bitmap_offset = try_bitmap_offset;
2231 		good_ctx0_offset = try_ctx0_offset;
2232 		good_redzone0_offset = try_redzone0_offset;
2233 
2234 		/* Try more aggressive settings. */
2235 		try_run_size += PAGE;
2236 		try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
2237 		    bin_info->reg_interval)
2238 		    + 1; /* Counter-act try_nregs-- in loop. */
2239 		if (try_nregs > RUN_MAXREGS) {
2240 			try_nregs = RUN_MAXREGS
2241 			    + 1; /* Counter-act try_nregs-- in loop. */
2242 		}
2243 		do {
2244 			try_nregs--;
2245 			try_hdr_size = sizeof(arena_run_t);
2246 			/* Pad to a long boundary. */
2247 			try_hdr_size = LONG_CEILING(try_hdr_size);
2248 			try_bitmap_offset = try_hdr_size;
2249 			/* Add space for bitmap. */
2250 			try_hdr_size += bitmap_size(try_nregs);
2251 			if (config_prof && opt_prof && prof_promote == false) {
2252 				/* Pad to a quantum boundary. */
2253 				try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2254 				try_ctx0_offset = try_hdr_size;
2255 				/*
2256 				 * Add space for one (prof_ctx_t *) per region.
2257 				 */
2258 				try_hdr_size += try_nregs *
2259 				    sizeof(prof_ctx_t *);
2260 			}
2261 			try_redzone0_offset = try_run_size - (try_nregs *
2262 			    bin_info->reg_interval) - pad_size;
2263 		} while (try_hdr_size > try_redzone0_offset);
2264 	} while (try_run_size <= arena_maxclass
2265 	    && try_run_size <= arena_maxclass
2266 	    && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
2267 	    RUN_MAX_OVRHD_RELAX
2268 	    && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
2269 	    && try_nregs < RUN_MAXREGS);
2270 
2271 	assert(good_hdr_size <= good_redzone0_offset);
2272 
2273 	/* Copy final settings. */
2274 	bin_info->run_size = good_run_size;
2275 	bin_info->nregs = good_nregs;
2276 	bin_info->bitmap_offset = good_bitmap_offset;
2277 	bin_info->ctx0_offset = good_ctx0_offset;
2278 	bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
2279 
2280 	assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
2281 	    * bin_info->reg_interval) + pad_size == bin_info->run_size);
2282 
2283 	return (good_run_size);
2284 }
2285 
2286 static void
2287 bin_info_init(void)
2288 {
2289 	arena_bin_info_t *bin_info;
2290 	size_t prev_run_size = PAGE;
2291 
2292 #define	SIZE_CLASS(bin, delta, size)					\
2293 	bin_info = &arena_bin_info[bin];				\
2294 	bin_info->reg_size = size;					\
2295 	prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
2296 	bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
2297 	SIZE_CLASSES
2298 #undef SIZE_CLASS
2299 }
2300 
2301 void
2302 arena_boot(void)
2303 {
2304 	size_t header_size;
2305 	unsigned i;
2306 
2307 	/*
2308 	 * Compute the header size such that it is large enough to contain the
2309 	 * page map.  The page map is biased to omit entries for the header
2310 	 * itself, so some iteration is necessary to compute the map bias.
2311 	 *
2312 	 * 1) Compute safe header_size and map_bias values that include enough
2313 	 *    space for an unbiased page map.
2314 	 * 2) Refine map_bias based on (1) to omit the header pages in the page
2315 	 *    map.  The resulting map_bias may be one too small.
2316 	 * 3) Refine map_bias based on (2).  The result will be >= the result
2317 	 *    from (2), and will always be correct.
2318 	 */
2319 	map_bias = 0;
2320 	for (i = 0; i < 3; i++) {
2321 		header_size = offsetof(arena_chunk_t, map) +
2322 		    (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
2323 		map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
2324 		    != 0);
2325 	}
2326 	assert(map_bias > 0);
2327 
2328 	arena_maxclass = chunksize - (map_bias << LG_PAGE);
2329 
2330 	bin_info_init();
2331 }
2332 
2333 void
2334 arena_prefork(arena_t *arena)
2335 {
2336 	unsigned i;
2337 
2338 	malloc_mutex_prefork(&arena->lock);
2339 	for (i = 0; i < NBINS; i++)
2340 		malloc_mutex_prefork(&arena->bins[i].lock);
2341 }
2342 
2343 void
2344 arena_postfork_parent(arena_t *arena)
2345 {
2346 	unsigned i;
2347 
2348 	for (i = 0; i < NBINS; i++)
2349 		malloc_mutex_postfork_parent(&arena->bins[i].lock);
2350 	malloc_mutex_postfork_parent(&arena->lock);
2351 }
2352 
2353 void
2354 arena_postfork_child(arena_t *arena)
2355 {
2356 	unsigned i;
2357 
2358 	for (i = 0; i < NBINS; i++)
2359 		malloc_mutex_postfork_child(&arena->bins[i].lock);
2360 	malloc_mutex_postfork_child(&arena->lock);
2361 }
2362