1 #define JEMALLOC_EXTENT_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/extent_dss.h"
7 #include "jemalloc/internal/extent_mmap.h"
8 #include "jemalloc/internal/ph.h"
9 #include "jemalloc/internal/rtree.h"
10 #include "jemalloc/internal/mutex.h"
11 #include "jemalloc/internal/mutex_pool.h"
12
13 /******************************************************************************/
14 /* Data. */
15
16 rtree_t extents_rtree;
17 /* Keyed by the address of the extent_t being protected. */
18 mutex_pool_t extent_mutex_pool;
19
20 size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
21
22 static const bitmap_info_t extents_bitmap_info =
23 BITMAP_INFO_INITIALIZER(SC_NPSIZES+1);
24
25 static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
26 size_t size, size_t alignment, bool *zero, bool *commit,
27 unsigned arena_ind);
28 static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
29 size_t size, bool committed, unsigned arena_ind);
30 static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
31 size_t size, bool committed, unsigned arena_ind);
32 static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
33 size_t size, size_t offset, size_t length, unsigned arena_ind);
34 static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
35 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
36 size_t length, bool growing_retained);
37 static bool extent_decommit_default(extent_hooks_t *extent_hooks,
38 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
39 #ifdef PAGES_CAN_PURGE_LAZY
40 static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
41 size_t size, size_t offset, size_t length, unsigned arena_ind);
42 #endif
43 static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
44 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
45 size_t length, bool growing_retained);
46 #ifdef PAGES_CAN_PURGE_FORCED
47 static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
48 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
49 #endif
50 static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
51 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
52 size_t length, bool growing_retained);
53 #ifdef JEMALLOC_MAPS_COALESCE
54 static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
55 size_t size, size_t size_a, size_t size_b, bool committed,
56 unsigned arena_ind);
57 #endif
58 static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
59 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
60 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
61 bool growing_retained);
62 #ifdef JEMALLOC_MAPS_COALESCE
63 static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
64 size_t size_a, void *addr_b, size_t size_b, bool committed,
65 unsigned arena_ind);
66 #endif
67 static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
68 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
69 bool growing_retained);
70
71 const extent_hooks_t extent_hooks_default = {
72 extent_alloc_default,
73 extent_dalloc_default,
74 extent_destroy_default,
75 extent_commit_default,
76 extent_decommit_default
77 #ifdef PAGES_CAN_PURGE_LAZY
78 ,
79 extent_purge_lazy_default
80 #else
81 ,
82 NULL
83 #endif
84 #ifdef PAGES_CAN_PURGE_FORCED
85 ,
86 extent_purge_forced_default
87 #else
88 ,
89 NULL
90 #endif
91 #ifdef JEMALLOC_MAPS_COALESCE
92 ,
93 extent_split_default,
94 extent_merge_default
95 #endif
96 };
97
98 /* Used exclusively for gdump triggering. */
99 static atomic_zu_t curpages;
100 static atomic_zu_t highpages;
101
102 /******************************************************************************/
103 /*
104 * Function prototypes for static functions that are referenced prior to
105 * definition.
106 */
107
108 static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
109 static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
110 extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
111 size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
112 bool *zero, bool *commit, bool growing_retained);
113 static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
114 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
115 extent_t *extent, bool *coalesced, bool growing_retained);
116 static void extent_record(tsdn_t *tsdn, arena_t *arena,
117 extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
118 bool growing_retained);
119
120 /******************************************************************************/
121
122 #define ATTR_NONE /* does nothing */
123
124 ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link,
125 extent_esnead_comp)
126
127 #undef ATTR_NONE
128
129 typedef enum {
130 lock_result_success,
131 lock_result_failure,
132 lock_result_no_extent
133 } lock_result_t;
134
135 static lock_result_t
extent_rtree_leaf_elm_try_lock(tsdn_t * tsdn,rtree_leaf_elm_t * elm,extent_t ** result,bool inactive_only)136 extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
137 extent_t **result, bool inactive_only) {
138 extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
139 elm, true);
140
141 /* Slab implies active extents and should be skipped. */
142 if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn,
143 &extents_rtree, elm, true))) {
144 return lock_result_no_extent;
145 }
146
147 /*
148 * It's possible that the extent changed out from under us, and with it
149 * the leaf->extent mapping. We have to recheck while holding the lock.
150 */
151 extent_lock(tsdn, extent1);
152 extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
153 &extents_rtree, elm, true);
154
155 if (extent1 == extent2) {
156 *result = extent1;
157 return lock_result_success;
158 } else {
159 extent_unlock(tsdn, extent1);
160 return lock_result_failure;
161 }
162 }
163
164 /*
165 * Returns a pool-locked extent_t * if there's one associated with the given
166 * address, and NULL otherwise.
167 */
168 static extent_t *
extent_lock_from_addr(tsdn_t * tsdn,rtree_ctx_t * rtree_ctx,void * addr,bool inactive_only)169 extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr,
170 bool inactive_only) {
171 extent_t *ret = NULL;
172 rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
173 rtree_ctx, (uintptr_t)addr, false, false);
174 if (elm == NULL) {
175 return NULL;
176 }
177 lock_result_t lock_result;
178 do {
179 lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret,
180 inactive_only);
181 } while (lock_result == lock_result_failure);
182 return ret;
183 }
184
185 extent_t *
extent_alloc(tsdn_t * tsdn,arena_t * arena)186 extent_alloc(tsdn_t *tsdn, arena_t *arena) {
187 malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
188 extent_t *extent = extent_avail_first(&arena->extent_avail);
189 if (extent == NULL) {
190 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
191 return base_alloc_extent(tsdn, arena->base);
192 }
193 extent_avail_remove(&arena->extent_avail, extent);
194 atomic_fetch_sub_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
195 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
196 return extent;
197 }
198
199 void
extent_dalloc(tsdn_t * tsdn,arena_t * arena,extent_t * extent)200 extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
201 malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
202 extent_avail_insert(&arena->extent_avail, extent);
203 atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
204 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
205 }
206
207 extent_hooks_t *
extent_hooks_get(arena_t * arena)208 extent_hooks_get(arena_t *arena) {
209 return base_extent_hooks_get(arena->base);
210 }
211
212 extent_hooks_t *
extent_hooks_set(tsd_t * tsd,arena_t * arena,extent_hooks_t * extent_hooks)213 extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
214 background_thread_info_t *info;
215 if (have_background_thread) {
216 info = arena_background_thread_info_get(arena);
217 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
218 }
219 extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
220 if (have_background_thread) {
221 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
222 }
223
224 return ret;
225 }
226
227 static void
extent_hooks_assure_initialized(arena_t * arena,extent_hooks_t ** r_extent_hooks)228 extent_hooks_assure_initialized(arena_t *arena,
229 extent_hooks_t **r_extent_hooks) {
230 if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
231 *r_extent_hooks = extent_hooks_get(arena);
232 }
233 }
234
235 #ifndef JEMALLOC_JET
236 static
237 #endif
238 size_t
extent_size_quantize_floor(size_t size)239 extent_size_quantize_floor(size_t size) {
240 size_t ret;
241 pszind_t pind;
242
243 assert(size > 0);
244 assert((size & PAGE_MASK) == 0);
245
246 pind = sz_psz2ind(size - sz_large_pad + 1);
247 if (pind == 0) {
248 /*
249 * Avoid underflow. This short-circuit would also do the right
250 * thing for all sizes in the range for which there are
251 * PAGE-spaced size classes, but it's simplest to just handle
252 * the one case that would cause erroneous results.
253 */
254 return size;
255 }
256 ret = sz_pind2sz(pind - 1) + sz_large_pad;
257 assert(ret <= size);
258 return ret;
259 }
260
261 #ifndef JEMALLOC_JET
262 static
263 #endif
264 size_t
extent_size_quantize_ceil(size_t size)265 extent_size_quantize_ceil(size_t size) {
266 size_t ret;
267
268 assert(size > 0);
269 assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
270 assert((size & PAGE_MASK) == 0);
271
272 ret = extent_size_quantize_floor(size);
273 if (ret < size) {
274 /*
275 * Skip a quantization that may have an adequately large extent,
276 * because under-sized extents may be mixed in. This only
277 * happens when an unusual size is requested, i.e. for aligned
278 * allocation, and is just one of several places where linear
279 * search would potentially find sufficiently aligned available
280 * memory somewhere lower.
281 */
282 ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
283 sz_large_pad;
284 }
285 return ret;
286 }
287
288 /* Generate pairing heap functions. */
289 ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
290
291 bool
extents_init(tsdn_t * tsdn,extents_t * extents,extent_state_t state,bool delay_coalesce)292 extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
293 bool delay_coalesce) {
294 if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
295 malloc_mutex_rank_exclusive)) {
296 return true;
297 }
298 for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
299 extent_heap_new(&extents->heaps[i]);
300 }
301 bitmap_init(extents->bitmap, &extents_bitmap_info, true);
302 extent_list_init(&extents->lru);
303 atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
304 extents->state = state;
305 extents->delay_coalesce = delay_coalesce;
306 return false;
307 }
308
309 extent_state_t
extents_state_get(const extents_t * extents)310 extents_state_get(const extents_t *extents) {
311 return extents->state;
312 }
313
314 size_t
extents_npages_get(extents_t * extents)315 extents_npages_get(extents_t *extents) {
316 return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
317 }
318
319 size_t
extents_nextents_get(extents_t * extents,pszind_t pind)320 extents_nextents_get(extents_t *extents, pszind_t pind) {
321 return atomic_load_zu(&extents->nextents[pind], ATOMIC_RELAXED);
322 }
323
324 size_t
extents_nbytes_get(extents_t * extents,pszind_t pind)325 extents_nbytes_get(extents_t *extents, pszind_t pind) {
326 return atomic_load_zu(&extents->nbytes[pind], ATOMIC_RELAXED);
327 }
328
329 static void
extents_stats_add(extents_t * extent,pszind_t pind,size_t sz)330 extents_stats_add(extents_t *extent, pszind_t pind, size_t sz) {
331 size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
332 atomic_store_zu(&extent->nextents[pind], cur + 1, ATOMIC_RELAXED);
333 cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
334 atomic_store_zu(&extent->nbytes[pind], cur + sz, ATOMIC_RELAXED);
335 }
336
337 static void
extents_stats_sub(extents_t * extent,pszind_t pind,size_t sz)338 extents_stats_sub(extents_t *extent, pszind_t pind, size_t sz) {
339 size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
340 atomic_store_zu(&extent->nextents[pind], cur - 1, ATOMIC_RELAXED);
341 cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
342 atomic_store_zu(&extent->nbytes[pind], cur - sz, ATOMIC_RELAXED);
343 }
344
345 static void
extents_insert_locked(tsdn_t * tsdn,extents_t * extents,extent_t * extent)346 extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
347 malloc_mutex_assert_owner(tsdn, &extents->mtx);
348 assert(extent_state_get(extent) == extents->state);
349
350 size_t size = extent_size_get(extent);
351 size_t psz = extent_size_quantize_floor(size);
352 pszind_t pind = sz_psz2ind(psz);
353 if (extent_heap_empty(&extents->heaps[pind])) {
354 bitmap_unset(extents->bitmap, &extents_bitmap_info,
355 (size_t)pind);
356 }
357 extent_heap_insert(&extents->heaps[pind], extent);
358
359 if (config_stats) {
360 extents_stats_add(extents, pind, size);
361 }
362
363 extent_list_append(&extents->lru, extent);
364 size_t npages = size >> LG_PAGE;
365 /*
366 * All modifications to npages hold the mutex (as asserted above), so we
367 * don't need an atomic fetch-add; we can get by with a load followed by
368 * a store.
369 */
370 size_t cur_extents_npages =
371 atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
372 atomic_store_zu(&extents->npages, cur_extents_npages + npages,
373 ATOMIC_RELAXED);
374 }
375
376 static void
extents_remove_locked(tsdn_t * tsdn,extents_t * extents,extent_t * extent)377 extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
378 malloc_mutex_assert_owner(tsdn, &extents->mtx);
379 assert(extent_state_get(extent) == extents->state);
380
381 size_t size = extent_size_get(extent);
382 size_t psz = extent_size_quantize_floor(size);
383 pszind_t pind = sz_psz2ind(psz);
384 extent_heap_remove(&extents->heaps[pind], extent);
385
386 if (config_stats) {
387 extents_stats_sub(extents, pind, size);
388 }
389
390 if (extent_heap_empty(&extents->heaps[pind])) {
391 bitmap_set(extents->bitmap, &extents_bitmap_info,
392 (size_t)pind);
393 }
394 extent_list_remove(&extents->lru, extent);
395 size_t npages = size >> LG_PAGE;
396 /*
397 * As in extents_insert_locked, we hold extents->mtx and so don't need
398 * atomic operations for updating extents->npages.
399 */
400 size_t cur_extents_npages =
401 atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
402 assert(cur_extents_npages >= npages);
403 atomic_store_zu(&extents->npages,
404 cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
405 }
406
407 /*
408 * Find an extent with size [min_size, max_size) to satisfy the alignment
409 * requirement. For each size, try only the first extent in the heap.
410 */
411 static extent_t *
extents_fit_alignment(extents_t * extents,size_t min_size,size_t max_size,size_t alignment)412 extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
413 size_t alignment) {
414 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
415 pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
416
417 for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
418 &extents_bitmap_info, (size_t)pind); i < pind_max; i =
419 (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
420 (size_t)i+1)) {
421 assert(i < SC_NPSIZES);
422 assert(!extent_heap_empty(&extents->heaps[i]));
423 extent_t *extent = extent_heap_first(&extents->heaps[i]);
424 uintptr_t base = (uintptr_t)extent_base_get(extent);
425 size_t candidate_size = extent_size_get(extent);
426 assert(candidate_size >= min_size);
427
428 uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
429 PAGE_CEILING(alignment));
430 if (base > next_align || base + candidate_size <= next_align) {
431 /* Overflow or not crossing the next alignment. */
432 continue;
433 }
434
435 size_t leadsize = next_align - base;
436 if (candidate_size - leadsize >= min_size) {
437 return extent;
438 }
439 }
440
441 return NULL;
442 }
443
444 /* Do any-best-fit extent selection, i.e. select any extent that best fits. */
445 static extent_t *
extents_best_fit_locked(tsdn_t * tsdn,arena_t * arena,extents_t * extents,size_t size)446 extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
447 size_t size) {
448 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
449 pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
450 (size_t)pind);
451 if (i < SC_NPSIZES + 1) {
452 /*
453 * In order to reduce fragmentation, avoid reusing and splitting
454 * large extents for much smaller sizes.
455 */
456 if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
457 return NULL;
458 }
459 assert(!extent_heap_empty(&extents->heaps[i]));
460 extent_t *extent = extent_heap_first(&extents->heaps[i]);
461 assert(extent_size_get(extent) >= size);
462 return extent;
463 }
464
465 return NULL;
466 }
467
468 /*
469 * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
470 * large enough.
471 */
472 static extent_t *
extents_first_fit_locked(tsdn_t * tsdn,arena_t * arena,extents_t * extents,size_t size)473 extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
474 size_t size) {
475 extent_t *ret = NULL;
476
477 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
478 for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
479 &extents_bitmap_info, (size_t)pind);
480 i < SC_NPSIZES + 1;
481 i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
482 (size_t)i+1)) {
483 assert(!extent_heap_empty(&extents->heaps[i]));
484 extent_t *extent = extent_heap_first(&extents->heaps[i]);
485 assert(extent_size_get(extent) >= size);
486 if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
487 ret = extent;
488 }
489 if (i == SC_NPSIZES) {
490 break;
491 }
492 assert(i < SC_NPSIZES);
493 }
494
495 return ret;
496 }
497
498 /*
499 * Do {best,first}-fit extent selection, where the selection policy choice is
500 * based on extents->delay_coalesce. Best-fit selection requires less
501 * searching, but its layout policy is less stable and may cause higher virtual
502 * memory fragmentation as a side effect.
503 */
504 static extent_t *
extents_fit_locked(tsdn_t * tsdn,arena_t * arena,extents_t * extents,size_t esize,size_t alignment)505 extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
506 size_t esize, size_t alignment) {
507 malloc_mutex_assert_owner(tsdn, &extents->mtx);
508
509 size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
510 /* Beware size_t wrap-around. */
511 if (max_size < esize) {
512 return NULL;
513 }
514
515 extent_t *extent = extents->delay_coalesce ?
516 extents_best_fit_locked(tsdn, arena, extents, max_size) :
517 extents_first_fit_locked(tsdn, arena, extents, max_size);
518
519 if (alignment > PAGE && extent == NULL) {
520 /*
521 * max_size guarantees the alignment requirement but is rather
522 * pessimistic. Next we try to satisfy the aligned allocation
523 * with sizes in [esize, max_size).
524 */
525 extent = extents_fit_alignment(extents, esize, max_size,
526 alignment);
527 }
528
529 return extent;
530 }
531
532 static bool
extent_try_delayed_coalesce(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,extent_t * extent)533 extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
534 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
535 extent_t *extent) {
536 extent_state_set(extent, extent_state_active);
537 bool coalesced;
538 extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
539 extents, extent, &coalesced, false);
540 extent_state_set(extent, extents_state_get(extents));
541
542 if (!coalesced) {
543 return true;
544 }
545 extents_insert_locked(tsdn, extents, extent);
546 return false;
547 }
548
549 extent_t *
extents_alloc(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit)550 extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
551 extents_t *extents, void *new_addr, size_t size, size_t pad,
552 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
553 assert(size + pad != 0);
554 assert(alignment != 0);
555 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
556 WITNESS_RANK_CORE, 0);
557
558 extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
559 new_addr, size, pad, alignment, slab, szind, zero, commit, false);
560 assert(extent == NULL || extent_dumpable_get(extent));
561 return extent;
562 }
563
564 void
extents_dalloc(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,extent_t * extent)565 extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
566 extents_t *extents, extent_t *extent) {
567 assert(extent_base_get(extent) != NULL);
568 assert(extent_size_get(extent) != 0);
569 assert(extent_dumpable_get(extent));
570 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
571 WITNESS_RANK_CORE, 0);
572
573 extent_addr_set(extent, extent_base_get(extent));
574 extent_zeroed_set(extent, false);
575
576 extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
577 }
578
579 extent_t *
extents_evict(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,size_t npages_min)580 extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
581 extents_t *extents, size_t npages_min) {
582 rtree_ctx_t rtree_ctx_fallback;
583 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
584
585 malloc_mutex_lock(tsdn, &extents->mtx);
586
587 /*
588 * Get the LRU coalesced extent, if any. If coalescing was delayed,
589 * the loop will iterate until the LRU extent is fully coalesced.
590 */
591 extent_t *extent;
592 while (true) {
593 /* Get the LRU extent, if any. */
594 extent = extent_list_first(&extents->lru);
595 if (extent == NULL) {
596 goto label_return;
597 }
598 /* Check the eviction limit. */
599 size_t extents_npages = atomic_load_zu(&extents->npages,
600 ATOMIC_RELAXED);
601 if (extents_npages <= npages_min) {
602 extent = NULL;
603 goto label_return;
604 }
605 extents_remove_locked(tsdn, extents, extent);
606 if (!extents->delay_coalesce) {
607 break;
608 }
609 /* Try to coalesce. */
610 if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
611 rtree_ctx, extents, extent)) {
612 break;
613 }
614 /*
615 * The LRU extent was just coalesced and the result placed in
616 * the LRU at its neighbor's position. Start over.
617 */
618 }
619
620 /*
621 * Either mark the extent active or deregister it to protect against
622 * concurrent operations.
623 */
624 switch (extents_state_get(extents)) {
625 case extent_state_active:
626 not_reached();
627 case extent_state_dirty:
628 case extent_state_muzzy:
629 extent_state_set(extent, extent_state_active);
630 break;
631 case extent_state_retained:
632 extent_deregister(tsdn, extent);
633 break;
634 default:
635 not_reached();
636 }
637
638 label_return:
639 malloc_mutex_unlock(tsdn, &extents->mtx);
640 return extent;
641 }
642
643 static void
extents_leak(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,extent_t * extent,bool growing_retained)644 extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
645 extents_t *extents, extent_t *extent, bool growing_retained) {
646 /*
647 * Leak extent after making sure its pages have already been purged, so
648 * that this is only a virtual memory leak.
649 */
650 if (extents_state_get(extents) == extent_state_dirty) {
651 if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
652 extent, 0, extent_size_get(extent), growing_retained)) {
653 extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
654 extent, 0, extent_size_get(extent),
655 growing_retained);
656 }
657 }
658 extent_dalloc(tsdn, arena, extent);
659 }
660
661 void
extents_prefork(tsdn_t * tsdn,extents_t * extents)662 extents_prefork(tsdn_t *tsdn, extents_t *extents) {
663 malloc_mutex_prefork(tsdn, &extents->mtx);
664 }
665
666 void
extents_postfork_parent(tsdn_t * tsdn,extents_t * extents)667 extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
668 malloc_mutex_postfork_parent(tsdn, &extents->mtx);
669 }
670
671 void
extents_postfork_child(tsdn_t * tsdn,extents_t * extents)672 extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
673 malloc_mutex_postfork_child(tsdn, &extents->mtx);
674 }
675
676 static void
extent_deactivate_locked(tsdn_t * tsdn,arena_t * arena,extents_t * extents,extent_t * extent)677 extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
678 extent_t *extent) {
679 assert(extent_arena_get(extent) == arena);
680 assert(extent_state_get(extent) == extent_state_active);
681
682 extent_state_set(extent, extents_state_get(extents));
683 extents_insert_locked(tsdn, extents, extent);
684 }
685
686 static void
extent_deactivate(tsdn_t * tsdn,arena_t * arena,extents_t * extents,extent_t * extent)687 extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
688 extent_t *extent) {
689 malloc_mutex_lock(tsdn, &extents->mtx);
690 extent_deactivate_locked(tsdn, arena, extents, extent);
691 malloc_mutex_unlock(tsdn, &extents->mtx);
692 }
693
694 static void
extent_activate_locked(tsdn_t * tsdn,arena_t * arena,extents_t * extents,extent_t * extent)695 extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
696 extent_t *extent) {
697 assert(extent_arena_get(extent) == arena);
698 assert(extent_state_get(extent) == extents_state_get(extents));
699
700 extents_remove_locked(tsdn, extents, extent);
701 extent_state_set(extent, extent_state_active);
702 }
703
704 static bool
extent_rtree_leaf_elms_lookup(tsdn_t * tsdn,rtree_ctx_t * rtree_ctx,const extent_t * extent,bool dependent,bool init_missing,rtree_leaf_elm_t ** r_elm_a,rtree_leaf_elm_t ** r_elm_b)705 extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
706 const extent_t *extent, bool dependent, bool init_missing,
707 rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
708 *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
709 (uintptr_t)extent_base_get(extent), dependent, init_missing);
710 if (!dependent && *r_elm_a == NULL) {
711 return true;
712 }
713 assert(*r_elm_a != NULL);
714
715 *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
716 (uintptr_t)extent_last_get(extent), dependent, init_missing);
717 if (!dependent && *r_elm_b == NULL) {
718 return true;
719 }
720 assert(*r_elm_b != NULL);
721
722 return false;
723 }
724
725 static void
extent_rtree_write_acquired(tsdn_t * tsdn,rtree_leaf_elm_t * elm_a,rtree_leaf_elm_t * elm_b,extent_t * extent,szind_t szind,bool slab)726 extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
727 rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
728 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
729 if (elm_b != NULL) {
730 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
731 slab);
732 }
733 }
734
735 static void
extent_interior_register(tsdn_t * tsdn,rtree_ctx_t * rtree_ctx,extent_t * extent,szind_t szind)736 extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
737 szind_t szind) {
738 assert(extent_slab_get(extent));
739
740 /* Register interior. */
741 for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
742 rtree_write(tsdn, &extents_rtree, rtree_ctx,
743 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
744 LG_PAGE), extent, szind, true);
745 }
746 }
747
748 static void
extent_gdump_add(tsdn_t * tsdn,const extent_t * extent)749 extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
750 cassert(config_prof);
751 /* prof_gdump() requirement. */
752 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
753 WITNESS_RANK_CORE, 0);
754
755 if (opt_prof && extent_state_get(extent) == extent_state_active) {
756 size_t nadd = extent_size_get(extent) >> LG_PAGE;
757 size_t cur = atomic_fetch_add_zu(&curpages, nadd,
758 ATOMIC_RELAXED) + nadd;
759 size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
760 while (cur > high && !atomic_compare_exchange_weak_zu(
761 &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
762 /*
763 * Don't refresh cur, because it may have decreased
764 * since this thread lost the highpages update race.
765 * Note that high is updated in case of CAS failure.
766 */
767 }
768 if (cur > high && prof_gdump_get_unlocked()) {
769 prof_gdump(tsdn);
770 }
771 }
772 }
773
774 static void
extent_gdump_sub(tsdn_t * tsdn,const extent_t * extent)775 extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
776 cassert(config_prof);
777
778 if (opt_prof && extent_state_get(extent) == extent_state_active) {
779 size_t nsub = extent_size_get(extent) >> LG_PAGE;
780 assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
781 atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
782 }
783 }
784
785 static bool
extent_register_impl(tsdn_t * tsdn,extent_t * extent,bool gdump_add)786 extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
787 rtree_ctx_t rtree_ctx_fallback;
788 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
789 rtree_leaf_elm_t *elm_a, *elm_b;
790
791 /*
792 * We need to hold the lock to protect against a concurrent coalesce
793 * operation that sees us in a partial state.
794 */
795 extent_lock(tsdn, extent);
796
797 if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
798 &elm_a, &elm_b)) {
799 return true;
800 }
801
802 szind_t szind = extent_szind_get_maybe_invalid(extent);
803 bool slab = extent_slab_get(extent);
804 extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
805 if (slab) {
806 extent_interior_register(tsdn, rtree_ctx, extent, szind);
807 }
808
809 extent_unlock(tsdn, extent);
810
811 if (config_prof && gdump_add) {
812 extent_gdump_add(tsdn, extent);
813 }
814
815 return false;
816 }
817
818 static bool
extent_register(tsdn_t * tsdn,extent_t * extent)819 extent_register(tsdn_t *tsdn, extent_t *extent) {
820 return extent_register_impl(tsdn, extent, true);
821 }
822
823 static bool
extent_register_no_gdump_add(tsdn_t * tsdn,extent_t * extent)824 extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
825 return extent_register_impl(tsdn, extent, false);
826 }
827
828 static void
extent_reregister(tsdn_t * tsdn,extent_t * extent)829 extent_reregister(tsdn_t *tsdn, extent_t *extent) {
830 bool err = extent_register(tsdn, extent);
831 assert(!err);
832 }
833
834 /*
835 * Removes all pointers to the given extent from the global rtree indices for
836 * its interior. This is relevant for slab extents, for which we need to do
837 * metadata lookups at places other than the head of the extent. We deregister
838 * on the interior, then, when an extent moves from being an active slab to an
839 * inactive state.
840 */
841 static void
extent_interior_deregister(tsdn_t * tsdn,rtree_ctx_t * rtree_ctx,extent_t * extent)842 extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
843 extent_t *extent) {
844 size_t i;
845
846 assert(extent_slab_get(extent));
847
848 for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
849 rtree_clear(tsdn, &extents_rtree, rtree_ctx,
850 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
851 LG_PAGE));
852 }
853 }
854
855 /*
856 * Removes all pointers to the given extent from the global rtree.
857 */
858 static void
extent_deregister_impl(tsdn_t * tsdn,extent_t * extent,bool gdump)859 extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
860 rtree_ctx_t rtree_ctx_fallback;
861 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
862 rtree_leaf_elm_t *elm_a, *elm_b;
863 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
864 &elm_a, &elm_b);
865
866 extent_lock(tsdn, extent);
867
868 extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false);
869 if (extent_slab_get(extent)) {
870 extent_interior_deregister(tsdn, rtree_ctx, extent);
871 extent_slab_set(extent, false);
872 }
873
874 extent_unlock(tsdn, extent);
875
876 if (config_prof && gdump) {
877 extent_gdump_sub(tsdn, extent);
878 }
879 }
880
881 static void
extent_deregister(tsdn_t * tsdn,extent_t * extent)882 extent_deregister(tsdn_t *tsdn, extent_t *extent) {
883 extent_deregister_impl(tsdn, extent, true);
884 }
885
886 static void
extent_deregister_no_gdump_sub(tsdn_t * tsdn,extent_t * extent)887 extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
888 extent_deregister_impl(tsdn, extent, false);
889 }
890
891 /*
892 * Tries to find and remove an extent from extents that can be used for the
893 * given allocation request.
894 */
895 static extent_t *
extent_recycle_extract(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,bool growing_retained)896 extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
897 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
898 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
899 bool growing_retained) {
900 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
901 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
902 assert(alignment > 0);
903 if (config_debug && new_addr != NULL) {
904 /*
905 * Non-NULL new_addr has two use cases:
906 *
907 * 1) Recycle a known-extant extent, e.g. during purging.
908 * 2) Perform in-place expanding reallocation.
909 *
910 * Regardless of use case, new_addr must either refer to a
911 * non-existing extent, or to the base of an extant extent,
912 * since only active slabs support interior lookups (which of
913 * course cannot be recycled).
914 */
915 assert(PAGE_ADDR2BASE(new_addr) == new_addr);
916 assert(pad == 0);
917 assert(alignment <= PAGE);
918 }
919
920 size_t esize = size + pad;
921 malloc_mutex_lock(tsdn, &extents->mtx);
922 extent_hooks_assure_initialized(arena, r_extent_hooks);
923 extent_t *extent;
924 if (new_addr != NULL) {
925 extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr,
926 false);
927 if (extent != NULL) {
928 /*
929 * We might null-out extent to report an error, but we
930 * still need to unlock the associated mutex after.
931 */
932 extent_t *unlock_extent = extent;
933 assert(extent_base_get(extent) == new_addr);
934 if (extent_arena_get(extent) != arena ||
935 extent_size_get(extent) < esize ||
936 extent_state_get(extent) !=
937 extents_state_get(extents)) {
938 extent = NULL;
939 }
940 extent_unlock(tsdn, unlock_extent);
941 }
942 } else {
943 extent = extents_fit_locked(tsdn, arena, extents, esize,
944 alignment);
945 }
946 if (extent == NULL) {
947 malloc_mutex_unlock(tsdn, &extents->mtx);
948 return NULL;
949 }
950
951 extent_activate_locked(tsdn, arena, extents, extent);
952 malloc_mutex_unlock(tsdn, &extents->mtx);
953
954 return extent;
955 }
956
957 /*
958 * Given an allocation request and an extent guaranteed to be able to satisfy
959 * it, this splits off lead and trail extents, leaving extent pointing to an
960 * extent satisfying the allocation.
961 * This function doesn't put lead or trail into any extents_t; it's the caller's
962 * job to ensure that they can be reused.
963 */
964 typedef enum {
965 /*
966 * Split successfully. lead, extent, and trail, are modified to extents
967 * describing the ranges before, in, and after the given allocation.
968 */
969 extent_split_interior_ok,
970 /*
971 * The extent can't satisfy the given allocation request. None of the
972 * input extent_t *s are touched.
973 */
974 extent_split_interior_cant_alloc,
975 /*
976 * In a potentially invalid state. Must leak (if *to_leak is non-NULL),
977 * and salvage what's still salvageable (if *to_salvage is non-NULL).
978 * None of lead, extent, or trail are valid.
979 */
980 extent_split_interior_error
981 } extent_split_interior_result_t;
982
983 static extent_split_interior_result_t
extent_split_interior(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extent_t ** extent,extent_t ** lead,extent_t ** trail,extent_t ** to_leak,extent_t ** to_salvage,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool growing_retained)984 extent_split_interior(tsdn_t *tsdn, arena_t *arena,
985 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
986 /* The result of splitting, in case of success. */
987 extent_t **extent, extent_t **lead, extent_t **trail,
988 /* The mess to clean up, in case of error. */
989 extent_t **to_leak, extent_t **to_salvage,
990 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
991 szind_t szind, bool growing_retained) {
992 size_t esize = size + pad;
993 size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
994 PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
995 assert(new_addr == NULL || leadsize == 0);
996 if (extent_size_get(*extent) < leadsize + esize) {
997 return extent_split_interior_cant_alloc;
998 }
999 size_t trailsize = extent_size_get(*extent) - leadsize - esize;
1000
1001 *lead = NULL;
1002 *trail = NULL;
1003 *to_leak = NULL;
1004 *to_salvage = NULL;
1005
1006 /* Split the lead. */
1007 if (leadsize != 0) {
1008 *lead = *extent;
1009 *extent = extent_split_impl(tsdn, arena, r_extent_hooks,
1010 *lead, leadsize, SC_NSIZES, false, esize + trailsize, szind,
1011 slab, growing_retained);
1012 if (*extent == NULL) {
1013 *to_leak = *lead;
1014 *lead = NULL;
1015 return extent_split_interior_error;
1016 }
1017 }
1018
1019 /* Split the trail. */
1020 if (trailsize != 0) {
1021 *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
1022 esize, szind, slab, trailsize, SC_NSIZES, false,
1023 growing_retained);
1024 if (*trail == NULL) {
1025 *to_leak = *extent;
1026 *to_salvage = *lead;
1027 *lead = NULL;
1028 *extent = NULL;
1029 return extent_split_interior_error;
1030 }
1031 }
1032
1033 if (leadsize == 0 && trailsize == 0) {
1034 /*
1035 * Splitting causes szind to be set as a side effect, but no
1036 * splitting occurred.
1037 */
1038 extent_szind_set(*extent, szind);
1039 if (szind != SC_NSIZES) {
1040 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
1041 (uintptr_t)extent_addr_get(*extent), szind, slab);
1042 if (slab && extent_size_get(*extent) > PAGE) {
1043 rtree_szind_slab_update(tsdn, &extents_rtree,
1044 rtree_ctx,
1045 (uintptr_t)extent_past_get(*extent) -
1046 (uintptr_t)PAGE, szind, slab);
1047 }
1048 }
1049 }
1050
1051 return extent_split_interior_ok;
1052 }
1053
1054 /*
1055 * This fulfills the indicated allocation request out of the given extent (which
1056 * the caller should have ensured was big enough). If there's any unused space
1057 * before or after the resulting allocation, that space is given its own extent
1058 * and put back into extents.
1059 */
1060 static extent_t *
extent_recycle_split(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,extent_t * extent,bool growing_retained)1061 extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
1062 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1063 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
1064 szind_t szind, extent_t *extent, bool growing_retained) {
1065 extent_t *lead;
1066 extent_t *trail;
1067 extent_t *to_leak;
1068 extent_t *to_salvage;
1069
1070 extent_split_interior_result_t result = extent_split_interior(
1071 tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1072 &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
1073 growing_retained);
1074
1075 if (result == extent_split_interior_ok) {
1076 if (lead != NULL) {
1077 extent_deactivate(tsdn, arena, extents, lead);
1078 }
1079 if (trail != NULL) {
1080 extent_deactivate(tsdn, arena, extents, trail);
1081 }
1082 return extent;
1083 } else {
1084 /*
1085 * We should have picked an extent that was large enough to
1086 * fulfill our allocation request.
1087 */
1088 assert(result == extent_split_interior_error);
1089 if (to_salvage != NULL) {
1090 extent_deregister(tsdn, to_salvage);
1091 }
1092 if (to_leak != NULL) {
1093 void *leak = extent_base_get(to_leak);
1094 extent_deregister_no_gdump_sub(tsdn, to_leak);
1095 extents_leak(tsdn, arena, r_extent_hooks, extents,
1096 to_leak, growing_retained);
1097 assert(extent_lock_from_addr(tsdn, rtree_ctx, leak,
1098 false) == NULL);
1099 }
1100 return NULL;
1101 }
1102 unreachable();
1103 }
1104
1105 static bool
extent_need_manual_zero(arena_t * arena)1106 extent_need_manual_zero(arena_t *arena) {
1107 /*
1108 * Need to manually zero the extent on repopulating if either; 1) non
1109 * default extent hooks installed (in which case the purge semantics may
1110 * change); or 2) transparent huge pages enabled.
1111 */
1112 return (!arena_has_default_hooks(arena) ||
1113 (opt_thp == thp_mode_always));
1114 }
1115
1116 /*
1117 * Tries to satisfy the given allocation request by reusing one of the extents
1118 * in the given extents_t.
1119 */
1120 static extent_t *
extent_recycle(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit,bool growing_retained)1121 extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1122 extents_t *extents, void *new_addr, size_t size, size_t pad,
1123 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
1124 bool growing_retained) {
1125 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1126 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1127 assert(new_addr == NULL || !slab);
1128 assert(pad == 0 || !slab);
1129 assert(!*zero || !slab);
1130
1131 rtree_ctx_t rtree_ctx_fallback;
1132 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1133
1134 extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
1135 rtree_ctx, extents, new_addr, size, pad, alignment, slab,
1136 growing_retained);
1137 if (extent == NULL) {
1138 return NULL;
1139 }
1140
1141 extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
1142 extents, new_addr, size, pad, alignment, slab, szind, extent,
1143 growing_retained);
1144 if (extent == NULL) {
1145 return NULL;
1146 }
1147
1148 if (*commit && !extent_committed_get(extent)) {
1149 if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
1150 0, extent_size_get(extent), growing_retained)) {
1151 extent_record(tsdn, arena, r_extent_hooks, extents,
1152 extent, growing_retained);
1153 return NULL;
1154 }
1155 if (!extent_need_manual_zero(arena)) {
1156 extent_zeroed_set(extent, true);
1157 }
1158 }
1159
1160 if (extent_committed_get(extent)) {
1161 *commit = true;
1162 }
1163 if (extent_zeroed_get(extent)) {
1164 *zero = true;
1165 }
1166
1167 if (pad != 0) {
1168 extent_addr_randomize(tsdn, extent, alignment);
1169 }
1170 assert(extent_state_get(extent) == extent_state_active);
1171 if (slab) {
1172 extent_slab_set(extent, slab);
1173 extent_interior_register(tsdn, rtree_ctx, extent, szind);
1174 }
1175
1176 if (*zero) {
1177 void *addr = extent_base_get(extent);
1178 if (!extent_zeroed_get(extent)) {
1179 size_t size = extent_size_get(extent);
1180 if (extent_need_manual_zero(arena) ||
1181 pages_purge_forced(addr, size)) {
1182 memset(addr, 0, size);
1183 }
1184 } else if (config_debug) {
1185 size_t *p = (size_t *)(uintptr_t)addr;
1186 /* Check the first page only. */
1187 for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
1188 assert(p[i] == 0);
1189 }
1190 }
1191 }
1192 return extent;
1193 }
1194
1195 /*
1196 * If the caller specifies (!*zero), it is still possible to receive zeroed
1197 * memory, in which case *zero is toggled to true. arena_extent_alloc() takes
1198 * advantage of this to avoid demanding zeroed extents, but taking advantage of
1199 * them if they are returned.
1200 */
1201 static void *
extent_alloc_core(tsdn_t * tsdn,arena_t * arena,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit,dss_prec_t dss_prec)1202 extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
1203 size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
1204 void *ret;
1205
1206 assert(size != 0);
1207 assert(alignment != 0);
1208
1209 /* "primary" dss. */
1210 if (have_dss && dss_prec == dss_prec_primary && (ret =
1211 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1212 commit)) != NULL) {
1213 return ret;
1214 }
1215 /* mmap. */
1216 if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
1217 != NULL) {
1218 return ret;
1219 }
1220 /* "secondary" dss. */
1221 if (have_dss && dss_prec == dss_prec_secondary && (ret =
1222 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1223 commit)) != NULL) {
1224 return ret;
1225 }
1226
1227 /* All strategies for allocation failed. */
1228 return NULL;
1229 }
1230
1231 static void *
extent_alloc_default_impl(tsdn_t * tsdn,arena_t * arena,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit)1232 extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
1233 size_t size, size_t alignment, bool *zero, bool *commit) {
1234 void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
1235 commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
1236 ATOMIC_RELAXED));
1237 if (have_madvise_huge && ret) {
1238 pages_set_thp_state(ret, size);
1239 }
1240 return ret;
1241 }
1242
1243 static void *
extent_alloc_default(extent_hooks_t * extent_hooks,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit,unsigned arena_ind)1244 extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
1245 size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
1246 tsdn_t *tsdn;
1247 arena_t *arena;
1248
1249 tsdn = tsdn_fetch();
1250 arena = arena_get(tsdn, arena_ind, false);
1251 /*
1252 * The arena we're allocating on behalf of must have been initialized
1253 * already.
1254 */
1255 assert(arena != NULL);
1256
1257 return extent_alloc_default_impl(tsdn, arena, new_addr, size,
1258 alignment, zero, commit);
1259 }
1260
1261 static void
extent_hook_pre_reentrancy(tsdn_t * tsdn,arena_t * arena)1262 extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
1263 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1264 if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
1265 /*
1266 * The only legitimate case of customized extent hooks for a0 is
1267 * hooks with no allocation activities. One such example is to
1268 * place metadata on pre-allocated resources such as huge pages.
1269 * In that case, rely on reentrancy_level checks to catch
1270 * infinite recursions.
1271 */
1272 pre_reentrancy(tsd, NULL);
1273 } else {
1274 pre_reentrancy(tsd, arena);
1275 }
1276 }
1277
1278 static void
extent_hook_post_reentrancy(tsdn_t * tsdn)1279 extent_hook_post_reentrancy(tsdn_t *tsdn) {
1280 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1281 post_reentrancy(tsd);
1282 }
1283
1284 /*
1285 * If virtual memory is retained, create increasingly larger extents from which
1286 * to split requested extents in order to limit the total number of disjoint
1287 * virtual memory ranges retained by each arena.
1288 */
1289 static extent_t *
extent_grow_retained(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit)1290 extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
1291 extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
1292 bool slab, szind_t szind, bool *zero, bool *commit) {
1293 malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
1294 assert(pad == 0 || !slab);
1295 assert(!*zero || !slab);
1296
1297 size_t esize = size + pad;
1298 size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
1299 /* Beware size_t wrap-around. */
1300 if (alloc_size_min < esize) {
1301 goto label_err;
1302 }
1303 /*
1304 * Find the next extent size in the series that would be large enough to
1305 * satisfy this request.
1306 */
1307 pszind_t egn_skip = 0;
1308 size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1309 while (alloc_size < alloc_size_min) {
1310 egn_skip++;
1311 if (arena->extent_grow_next + egn_skip >=
1312 sz_psz2ind(SC_LARGE_MAXCLASS)) {
1313 /* Outside legal range. */
1314 goto label_err;
1315 }
1316 alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1317 }
1318
1319 extent_t *extent = extent_alloc(tsdn, arena);
1320 if (extent == NULL) {
1321 goto label_err;
1322 }
1323 bool zeroed = false;
1324 bool committed = false;
1325
1326 void *ptr;
1327 if (*r_extent_hooks == &extent_hooks_default) {
1328 ptr = extent_alloc_default_impl(tsdn, arena, NULL,
1329 alloc_size, PAGE, &zeroed, &committed);
1330 } else {
1331 extent_hook_pre_reentrancy(tsdn, arena);
1332 ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
1333 alloc_size, PAGE, &zeroed, &committed,
1334 arena_ind_get(arena));
1335 extent_hook_post_reentrancy(tsdn);
1336 }
1337
1338 extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES,
1339 arena_extent_sn_next(arena), extent_state_active, zeroed,
1340 committed, true);
1341 if (ptr == NULL) {
1342 extent_dalloc(tsdn, arena, extent);
1343 goto label_err;
1344 }
1345
1346 if (extent_register_no_gdump_add(tsdn, extent)) {
1347 extents_leak(tsdn, arena, r_extent_hooks,
1348 &arena->extents_retained, extent, true);
1349 goto label_err;
1350 }
1351
1352 if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
1353 *zero = true;
1354 }
1355 if (extent_committed_get(extent)) {
1356 *commit = true;
1357 }
1358
1359 rtree_ctx_t rtree_ctx_fallback;
1360 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1361
1362 extent_t *lead;
1363 extent_t *trail;
1364 extent_t *to_leak;
1365 extent_t *to_salvage;
1366 extent_split_interior_result_t result = extent_split_interior(
1367 tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1368 &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
1369 true);
1370
1371 if (result == extent_split_interior_ok) {
1372 if (lead != NULL) {
1373 extent_record(tsdn, arena, r_extent_hooks,
1374 &arena->extents_retained, lead, true);
1375 }
1376 if (trail != NULL) {
1377 extent_record(tsdn, arena, r_extent_hooks,
1378 &arena->extents_retained, trail, true);
1379 }
1380 } else {
1381 /*
1382 * We should have allocated a sufficiently large extent; the
1383 * cant_alloc case should not occur.
1384 */
1385 assert(result == extent_split_interior_error);
1386 if (to_salvage != NULL) {
1387 if (config_prof) {
1388 extent_gdump_add(tsdn, to_salvage);
1389 }
1390 extent_record(tsdn, arena, r_extent_hooks,
1391 &arena->extents_retained, to_salvage, true);
1392 }
1393 if (to_leak != NULL) {
1394 extent_deregister_no_gdump_sub(tsdn, to_leak);
1395 extents_leak(tsdn, arena, r_extent_hooks,
1396 &arena->extents_retained, to_leak, true);
1397 }
1398 goto label_err;
1399 }
1400
1401 if (*commit && !extent_committed_get(extent)) {
1402 if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
1403 extent_size_get(extent), true)) {
1404 extent_record(tsdn, arena, r_extent_hooks,
1405 &arena->extents_retained, extent, true);
1406 goto label_err;
1407 }
1408 if (!extent_need_manual_zero(arena)) {
1409 extent_zeroed_set(extent, true);
1410 }
1411 }
1412
1413 /*
1414 * Increment extent_grow_next if doing so wouldn't exceed the allowed
1415 * range.
1416 */
1417 if (arena->extent_grow_next + egn_skip + 1 <=
1418 arena->retain_grow_limit) {
1419 arena->extent_grow_next += egn_skip + 1;
1420 } else {
1421 arena->extent_grow_next = arena->retain_grow_limit;
1422 }
1423 /* All opportunities for failure are past. */
1424 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1425
1426 if (config_prof) {
1427 /* Adjust gdump stats now that extent is final size. */
1428 extent_gdump_add(tsdn, extent);
1429 }
1430 if (pad != 0) {
1431 extent_addr_randomize(tsdn, extent, alignment);
1432 }
1433 if (slab) {
1434 rtree_ctx_t rtree_ctx_fallback;
1435 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
1436 &rtree_ctx_fallback);
1437
1438 extent_slab_set(extent, true);
1439 extent_interior_register(tsdn, rtree_ctx, extent, szind);
1440 }
1441 if (*zero && !extent_zeroed_get(extent)) {
1442 void *addr = extent_base_get(extent);
1443 size_t size = extent_size_get(extent);
1444 if (extent_need_manual_zero(arena) ||
1445 pages_purge_forced(addr, size)) {
1446 memset(addr, 0, size);
1447 }
1448 }
1449
1450 return extent;
1451 label_err:
1452 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1453 return NULL;
1454 }
1455
1456 static extent_t *
extent_alloc_retained(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit)1457 extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
1458 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1459 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1460 assert(size != 0);
1461 assert(alignment != 0);
1462
1463 malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
1464
1465 extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
1466 &arena->extents_retained, new_addr, size, pad, alignment, slab,
1467 szind, zero, commit, true);
1468 if (extent != NULL) {
1469 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1470 if (config_prof) {
1471 extent_gdump_add(tsdn, extent);
1472 }
1473 } else if (opt_retain && new_addr == NULL) {
1474 extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
1475 pad, alignment, slab, szind, zero, commit);
1476 /* extent_grow_retained() always releases extent_grow_mtx. */
1477 } else {
1478 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1479 }
1480 malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
1481
1482 return extent;
1483 }
1484
1485 static extent_t *
extent_alloc_wrapper_hard(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit)1486 extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
1487 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1488 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1489 size_t esize = size + pad;
1490 extent_t *extent = extent_alloc(tsdn, arena);
1491 if (extent == NULL) {
1492 return NULL;
1493 }
1494 void *addr;
1495 if (*r_extent_hooks == &extent_hooks_default) {
1496 /* Call directly to propagate tsdn. */
1497 addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
1498 alignment, zero, commit);
1499 } else {
1500 extent_hook_pre_reentrancy(tsdn, arena);
1501 addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
1502 esize, alignment, zero, commit, arena_ind_get(arena));
1503 extent_hook_post_reentrancy(tsdn);
1504 }
1505 if (addr == NULL) {
1506 extent_dalloc(tsdn, arena, extent);
1507 return NULL;
1508 }
1509 extent_init(extent, arena, addr, esize, slab, szind,
1510 arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
1511 true);
1512 if (pad != 0) {
1513 extent_addr_randomize(tsdn, extent, alignment);
1514 }
1515 if (extent_register(tsdn, extent)) {
1516 extents_leak(tsdn, arena, r_extent_hooks,
1517 &arena->extents_retained, extent, false);
1518 return NULL;
1519 }
1520
1521 return extent;
1522 }
1523
1524 extent_t *
extent_alloc_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit)1525 extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1526 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1527 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1528 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1529 WITNESS_RANK_CORE, 0);
1530
1531 extent_hooks_assure_initialized(arena, r_extent_hooks);
1532
1533 extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
1534 new_addr, size, pad, alignment, slab, szind, zero, commit);
1535 if (extent == NULL) {
1536 if (opt_retain && new_addr != NULL) {
1537 /*
1538 * When retain is enabled and new_addr is set, we do not
1539 * attempt extent_alloc_wrapper_hard which does mmap
1540 * that is very unlikely to succeed (unless it happens
1541 * to be at the end).
1542 */
1543 return NULL;
1544 }
1545 extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
1546 new_addr, size, pad, alignment, slab, szind, zero, commit);
1547 }
1548
1549 assert(extent == NULL || extent_dumpable_get(extent));
1550 return extent;
1551 }
1552
1553 static bool
extent_can_coalesce(arena_t * arena,extents_t * extents,const extent_t * inner,const extent_t * outer)1554 extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
1555 const extent_t *outer) {
1556 assert(extent_arena_get(inner) == arena);
1557 if (extent_arena_get(outer) != arena) {
1558 return false;
1559 }
1560
1561 assert(extent_state_get(inner) == extent_state_active);
1562 if (extent_state_get(outer) != extents->state) {
1563 return false;
1564 }
1565
1566 if (extent_committed_get(inner) != extent_committed_get(outer)) {
1567 return false;
1568 }
1569
1570 return true;
1571 }
1572
1573 static bool
extent_coalesce(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,extent_t * inner,extent_t * outer,bool forward,bool growing_retained)1574 extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1575 extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
1576 bool growing_retained) {
1577 assert(extent_can_coalesce(arena, extents, inner, outer));
1578
1579 extent_activate_locked(tsdn, arena, extents, outer);
1580
1581 malloc_mutex_unlock(tsdn, &extents->mtx);
1582 bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
1583 forward ? inner : outer, forward ? outer : inner, growing_retained);
1584 malloc_mutex_lock(tsdn, &extents->mtx);
1585
1586 if (err) {
1587 extent_deactivate_locked(tsdn, arena, extents, outer);
1588 }
1589
1590 return err;
1591 }
1592
1593 static extent_t *
extent_try_coalesce_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,extent_t * extent,bool * coalesced,bool growing_retained,bool inactive_only)1594 extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena,
1595 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1596 extent_t *extent, bool *coalesced, bool growing_retained,
1597 bool inactive_only) {
1598 /*
1599 * We avoid checking / locking inactive neighbors for large size
1600 * classes, since they are eagerly coalesced on deallocation which can
1601 * cause lock contention.
1602 */
1603 /*
1604 * Continue attempting to coalesce until failure, to protect against
1605 * races with other threads that are thwarted by this one.
1606 */
1607 bool again;
1608 do {
1609 again = false;
1610
1611 /* Try to coalesce forward. */
1612 extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
1613 extent_past_get(extent), inactive_only);
1614 if (next != NULL) {
1615 /*
1616 * extents->mtx only protects against races for
1617 * like-state extents, so call extent_can_coalesce()
1618 * before releasing next's pool lock.
1619 */
1620 bool can_coalesce = extent_can_coalesce(arena, extents,
1621 extent, next);
1622
1623 extent_unlock(tsdn, next);
1624
1625 if (can_coalesce && !extent_coalesce(tsdn, arena,
1626 r_extent_hooks, extents, extent, next, true,
1627 growing_retained)) {
1628 if (extents->delay_coalesce) {
1629 /* Do minimal coalescing. */
1630 *coalesced = true;
1631 return extent;
1632 }
1633 again = true;
1634 }
1635 }
1636
1637 /* Try to coalesce backward. */
1638 extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
1639 extent_before_get(extent), inactive_only);
1640 if (prev != NULL) {
1641 bool can_coalesce = extent_can_coalesce(arena, extents,
1642 extent, prev);
1643 extent_unlock(tsdn, prev);
1644
1645 if (can_coalesce && !extent_coalesce(tsdn, arena,
1646 r_extent_hooks, extents, extent, prev, false,
1647 growing_retained)) {
1648 extent = prev;
1649 if (extents->delay_coalesce) {
1650 /* Do minimal coalescing. */
1651 *coalesced = true;
1652 return extent;
1653 }
1654 again = true;
1655 }
1656 }
1657 } while (again);
1658
1659 if (extents->delay_coalesce) {
1660 *coalesced = false;
1661 }
1662 return extent;
1663 }
1664
1665 static extent_t *
extent_try_coalesce(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,extent_t * extent,bool * coalesced,bool growing_retained)1666 extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
1667 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1668 extent_t *extent, bool *coalesced, bool growing_retained) {
1669 return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
1670 extents, extent, coalesced, growing_retained, false);
1671 }
1672
1673 static extent_t *
extent_try_coalesce_large(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,extent_t * extent,bool * coalesced,bool growing_retained)1674 extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena,
1675 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1676 extent_t *extent, bool *coalesced, bool growing_retained) {
1677 return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
1678 extents, extent, coalesced, growing_retained, true);
1679 }
1680
1681 /*
1682 * Does the metadata management portions of putting an unused extent into the
1683 * given extents_t (coalesces, deregisters slab interiors, the heap operations).
1684 */
1685 static void
extent_record(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,extent_t * extent,bool growing_retained)1686 extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1687 extents_t *extents, extent_t *extent, bool growing_retained) {
1688 rtree_ctx_t rtree_ctx_fallback;
1689 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1690
1691 assert((extents_state_get(extents) != extent_state_dirty &&
1692 extents_state_get(extents) != extent_state_muzzy) ||
1693 !extent_zeroed_get(extent));
1694
1695 malloc_mutex_lock(tsdn, &extents->mtx);
1696 extent_hooks_assure_initialized(arena, r_extent_hooks);
1697
1698 extent_szind_set(extent, SC_NSIZES);
1699 if (extent_slab_get(extent)) {
1700 extent_interior_deregister(tsdn, rtree_ctx, extent);
1701 extent_slab_set(extent, false);
1702 }
1703
1704 assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1705 (uintptr_t)extent_base_get(extent), true) == extent);
1706
1707 if (!extents->delay_coalesce) {
1708 extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
1709 rtree_ctx, extents, extent, NULL, growing_retained);
1710 } else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) {
1711 assert(extents == &arena->extents_dirty);
1712 /* Always coalesce large extents eagerly. */
1713 bool coalesced;
1714 do {
1715 assert(extent_state_get(extent) == extent_state_active);
1716 extent = extent_try_coalesce_large(tsdn, arena,
1717 r_extent_hooks, rtree_ctx, extents, extent,
1718 &coalesced, growing_retained);
1719 } while (coalesced);
1720 if (extent_size_get(extent) >= oversize_threshold) {
1721 /* Shortcut to purge the oversize extent eagerly. */
1722 malloc_mutex_unlock(tsdn, &extents->mtx);
1723 arena_decay_extent(tsdn, arena, r_extent_hooks, extent);
1724 return;
1725 }
1726 }
1727 extent_deactivate_locked(tsdn, arena, extents, extent);
1728
1729 malloc_mutex_unlock(tsdn, &extents->mtx);
1730 }
1731
1732 void
extent_dalloc_gap(tsdn_t * tsdn,arena_t * arena,extent_t * extent)1733 extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
1734 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1735
1736 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1737 WITNESS_RANK_CORE, 0);
1738
1739 if (extent_register(tsdn, extent)) {
1740 extents_leak(tsdn, arena, &extent_hooks,
1741 &arena->extents_retained, extent, false);
1742 return;
1743 }
1744 extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
1745 }
1746
1747 static bool
extent_may_dalloc(void)1748 extent_may_dalloc(void) {
1749 /* With retain enabled, the default dalloc always fails. */
1750 return !opt_retain;
1751 }
1752
1753 static bool
extent_dalloc_default_impl(void * addr,size_t size)1754 extent_dalloc_default_impl(void *addr, size_t size) {
1755 if (!have_dss || !extent_in_dss(addr)) {
1756 return extent_dalloc_mmap(addr, size);
1757 }
1758 return true;
1759 }
1760
1761 static bool
extent_dalloc_default(extent_hooks_t * extent_hooks,void * addr,size_t size,bool committed,unsigned arena_ind)1762 extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1763 bool committed, unsigned arena_ind) {
1764 return extent_dalloc_default_impl(addr, size);
1765 }
1766
1767 static bool
extent_dalloc_wrapper_try(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent)1768 extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
1769 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1770 bool err;
1771
1772 assert(extent_base_get(extent) != NULL);
1773 assert(extent_size_get(extent) != 0);
1774 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1775 WITNESS_RANK_CORE, 0);
1776
1777 extent_addr_set(extent, extent_base_get(extent));
1778
1779 extent_hooks_assure_initialized(arena, r_extent_hooks);
1780 /* Try to deallocate. */
1781 if (*r_extent_hooks == &extent_hooks_default) {
1782 /* Call directly to propagate tsdn. */
1783 err = extent_dalloc_default_impl(extent_base_get(extent),
1784 extent_size_get(extent));
1785 } else {
1786 extent_hook_pre_reentrancy(tsdn, arena);
1787 err = ((*r_extent_hooks)->dalloc == NULL ||
1788 (*r_extent_hooks)->dalloc(*r_extent_hooks,
1789 extent_base_get(extent), extent_size_get(extent),
1790 extent_committed_get(extent), arena_ind_get(arena)));
1791 extent_hook_post_reentrancy(tsdn);
1792 }
1793
1794 if (!err) {
1795 extent_dalloc(tsdn, arena, extent);
1796 }
1797
1798 return err;
1799 }
1800
1801 void
extent_dalloc_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent)1802 extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1803 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1804 assert(extent_dumpable_get(extent));
1805 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1806 WITNESS_RANK_CORE, 0);
1807
1808 /* Avoid calling the default extent_dalloc unless have to. */
1809 if (*r_extent_hooks != &extent_hooks_default || extent_may_dalloc()) {
1810 /*
1811 * Deregister first to avoid a race with other allocating
1812 * threads, and reregister if deallocation fails.
1813 */
1814 extent_deregister(tsdn, extent);
1815 if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks,
1816 extent)) {
1817 return;
1818 }
1819 extent_reregister(tsdn, extent);
1820 }
1821
1822 if (*r_extent_hooks != &extent_hooks_default) {
1823 extent_hook_pre_reentrancy(tsdn, arena);
1824 }
1825 /* Try to decommit; purge if that fails. */
1826 bool zeroed;
1827 if (!extent_committed_get(extent)) {
1828 zeroed = true;
1829 } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
1830 0, extent_size_get(extent))) {
1831 zeroed = true;
1832 } else if ((*r_extent_hooks)->purge_forced != NULL &&
1833 !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
1834 extent_base_get(extent), extent_size_get(extent), 0,
1835 extent_size_get(extent), arena_ind_get(arena))) {
1836 zeroed = true;
1837 } else if (extent_state_get(extent) == extent_state_muzzy ||
1838 ((*r_extent_hooks)->purge_lazy != NULL &&
1839 !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1840 extent_base_get(extent), extent_size_get(extent), 0,
1841 extent_size_get(extent), arena_ind_get(arena)))) {
1842 zeroed = false;
1843 } else {
1844 zeroed = false;
1845 }
1846 if (*r_extent_hooks != &extent_hooks_default) {
1847 extent_hook_post_reentrancy(tsdn);
1848 }
1849 extent_zeroed_set(extent, zeroed);
1850
1851 if (config_prof) {
1852 extent_gdump_sub(tsdn, extent);
1853 }
1854
1855 extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
1856 extent, false);
1857 }
1858
1859 static void
extent_destroy_default_impl(void * addr,size_t size)1860 extent_destroy_default_impl(void *addr, size_t size) {
1861 if (!have_dss || !extent_in_dss(addr)) {
1862 pages_unmap(addr, size);
1863 }
1864 }
1865
1866 static void
extent_destroy_default(extent_hooks_t * extent_hooks,void * addr,size_t size,bool committed,unsigned arena_ind)1867 extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1868 bool committed, unsigned arena_ind) {
1869 extent_destroy_default_impl(addr, size);
1870 }
1871
1872 void
extent_destroy_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent)1873 extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
1874 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1875 assert(extent_base_get(extent) != NULL);
1876 assert(extent_size_get(extent) != 0);
1877 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1878 WITNESS_RANK_CORE, 0);
1879
1880 /* Deregister first to avoid a race with other allocating threads. */
1881 extent_deregister(tsdn, extent);
1882
1883 extent_addr_set(extent, extent_base_get(extent));
1884
1885 extent_hooks_assure_initialized(arena, r_extent_hooks);
1886 /* Try to destroy; silently fail otherwise. */
1887 if (*r_extent_hooks == &extent_hooks_default) {
1888 /* Call directly to propagate tsdn. */
1889 extent_destroy_default_impl(extent_base_get(extent),
1890 extent_size_get(extent));
1891 } else if ((*r_extent_hooks)->destroy != NULL) {
1892 extent_hook_pre_reentrancy(tsdn, arena);
1893 (*r_extent_hooks)->destroy(*r_extent_hooks,
1894 extent_base_get(extent), extent_size_get(extent),
1895 extent_committed_get(extent), arena_ind_get(arena));
1896 extent_hook_post_reentrancy(tsdn);
1897 }
1898
1899 extent_dalloc(tsdn, arena, extent);
1900 }
1901
1902 static bool
extent_commit_default(extent_hooks_t * extent_hooks,void * addr,size_t size,size_t offset,size_t length,unsigned arena_ind)1903 extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1904 size_t offset, size_t length, unsigned arena_ind) {
1905 return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
1906 length);
1907 }
1908
1909 static bool
extent_commit_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length,bool growing_retained)1910 extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
1911 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1912 size_t length, bool growing_retained) {
1913 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1914 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1915
1916 extent_hooks_assure_initialized(arena, r_extent_hooks);
1917 if (*r_extent_hooks != &extent_hooks_default) {
1918 extent_hook_pre_reentrancy(tsdn, arena);
1919 }
1920 bool err = ((*r_extent_hooks)->commit == NULL ||
1921 (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
1922 extent_size_get(extent), offset, length, arena_ind_get(arena)));
1923 if (*r_extent_hooks != &extent_hooks_default) {
1924 extent_hook_post_reentrancy(tsdn);
1925 }
1926 extent_committed_set(extent, extent_committed_get(extent) || !err);
1927 return err;
1928 }
1929
1930 bool
extent_commit_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length)1931 extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
1932 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1933 size_t length) {
1934 return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
1935 length, false);
1936 }
1937
1938 static bool
extent_decommit_default(extent_hooks_t * extent_hooks,void * addr,size_t size,size_t offset,size_t length,unsigned arena_ind)1939 extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1940 size_t offset, size_t length, unsigned arena_ind) {
1941 return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
1942 length);
1943 }
1944
1945 bool
extent_decommit_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length)1946 extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
1947 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1948 size_t length) {
1949 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1950 WITNESS_RANK_CORE, 0);
1951
1952 extent_hooks_assure_initialized(arena, r_extent_hooks);
1953
1954 if (*r_extent_hooks != &extent_hooks_default) {
1955 extent_hook_pre_reentrancy(tsdn, arena);
1956 }
1957 bool err = ((*r_extent_hooks)->decommit == NULL ||
1958 (*r_extent_hooks)->decommit(*r_extent_hooks,
1959 extent_base_get(extent), extent_size_get(extent), offset, length,
1960 arena_ind_get(arena)));
1961 if (*r_extent_hooks != &extent_hooks_default) {
1962 extent_hook_post_reentrancy(tsdn);
1963 }
1964 extent_committed_set(extent, extent_committed_get(extent) && err);
1965 return err;
1966 }
1967
1968 #ifdef PAGES_CAN_PURGE_LAZY
1969 static bool
extent_purge_lazy_default(extent_hooks_t * extent_hooks,void * addr,size_t size,size_t offset,size_t length,unsigned arena_ind)1970 extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1971 size_t offset, size_t length, unsigned arena_ind) {
1972 assert(addr != NULL);
1973 assert((offset & PAGE_MASK) == 0);
1974 assert(length != 0);
1975 assert((length & PAGE_MASK) == 0);
1976
1977 return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
1978 length);
1979 }
1980 #endif
1981
1982 static bool
extent_purge_lazy_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length,bool growing_retained)1983 extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
1984 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1985 size_t length, bool growing_retained) {
1986 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1987 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1988
1989 extent_hooks_assure_initialized(arena, r_extent_hooks);
1990
1991 if ((*r_extent_hooks)->purge_lazy == NULL) {
1992 return true;
1993 }
1994 if (*r_extent_hooks != &extent_hooks_default) {
1995 extent_hook_pre_reentrancy(tsdn, arena);
1996 }
1997 bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1998 extent_base_get(extent), extent_size_get(extent), offset, length,
1999 arena_ind_get(arena));
2000 if (*r_extent_hooks != &extent_hooks_default) {
2001 extent_hook_post_reentrancy(tsdn);
2002 }
2003
2004 return err;
2005 }
2006
2007 bool
extent_purge_lazy_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length)2008 extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
2009 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
2010 size_t length) {
2011 return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
2012 offset, length, false);
2013 }
2014
2015 #ifdef PAGES_CAN_PURGE_FORCED
2016 static bool
extent_purge_forced_default(extent_hooks_t * extent_hooks,void * addr,size_t size,size_t offset,size_t length,unsigned arena_ind)2017 extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
2018 size_t size, size_t offset, size_t length, unsigned arena_ind) {
2019 assert(addr != NULL);
2020 assert((offset & PAGE_MASK) == 0);
2021 assert(length != 0);
2022 assert((length & PAGE_MASK) == 0);
2023
2024 return pages_purge_forced((void *)((uintptr_t)addr +
2025 (uintptr_t)offset), length);
2026 }
2027 #endif
2028
2029 static bool
extent_purge_forced_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length,bool growing_retained)2030 extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
2031 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
2032 size_t length, bool growing_retained) {
2033 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2034 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2035
2036 extent_hooks_assure_initialized(arena, r_extent_hooks);
2037
2038 if ((*r_extent_hooks)->purge_forced == NULL) {
2039 return true;
2040 }
2041 if (*r_extent_hooks != &extent_hooks_default) {
2042 extent_hook_pre_reentrancy(tsdn, arena);
2043 }
2044 bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
2045 extent_base_get(extent), extent_size_get(extent), offset, length,
2046 arena_ind_get(arena));
2047 if (*r_extent_hooks != &extent_hooks_default) {
2048 extent_hook_post_reentrancy(tsdn);
2049 }
2050 return err;
2051 }
2052
2053 bool
extent_purge_forced_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length)2054 extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
2055 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
2056 size_t length) {
2057 return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
2058 offset, length, false);
2059 }
2060
2061 #ifdef JEMALLOC_MAPS_COALESCE
2062 static bool
extent_split_default(extent_hooks_t * extent_hooks,void * addr,size_t size,size_t size_a,size_t size_b,bool committed,unsigned arena_ind)2063 extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
2064 size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
2065 return !maps_coalesce;
2066 }
2067 #endif
2068
2069 /*
2070 * Accepts the extent to split, and the characteristics of each side of the
2071 * split. The 'a' parameters go with the 'lead' of the resulting pair of
2072 * extents (the lower addressed portion of the split), and the 'b' parameters go
2073 * with the trail (the higher addressed portion). This makes 'extent' the lead,
2074 * and returns the trail (except in case of error).
2075 */
2076 static extent_t *
extent_split_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t size_a,szind_t szind_a,bool slab_a,size_t size_b,szind_t szind_b,bool slab_b,bool growing_retained)2077 extent_split_impl(tsdn_t *tsdn, arena_t *arena,
2078 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
2079 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
2080 bool growing_retained) {
2081 assert(extent_size_get(extent) == size_a + size_b);
2082 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2083 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2084
2085 extent_hooks_assure_initialized(arena, r_extent_hooks);
2086
2087 if ((*r_extent_hooks)->split == NULL) {
2088 return NULL;
2089 }
2090
2091 extent_t *trail = extent_alloc(tsdn, arena);
2092 if (trail == NULL) {
2093 goto label_error_a;
2094 }
2095
2096 extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
2097 size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
2098 extent_state_get(extent), extent_zeroed_get(extent),
2099 extent_committed_get(extent), extent_dumpable_get(extent));
2100
2101 rtree_ctx_t rtree_ctx_fallback;
2102 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2103 rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
2104 {
2105 extent_t lead;
2106
2107 extent_init(&lead, arena, extent_addr_get(extent), size_a,
2108 slab_a, szind_a, extent_sn_get(extent),
2109 extent_state_get(extent), extent_zeroed_get(extent),
2110 extent_committed_get(extent), extent_dumpable_get(extent));
2111
2112 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
2113 true, &lead_elm_a, &lead_elm_b);
2114 }
2115 rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
2116 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
2117 &trail_elm_a, &trail_elm_b);
2118
2119 if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
2120 || trail_elm_b == NULL) {
2121 goto label_error_b;
2122 }
2123
2124 extent_lock2(tsdn, extent, trail);
2125
2126 if (*r_extent_hooks != &extent_hooks_default) {
2127 extent_hook_pre_reentrancy(tsdn, arena);
2128 }
2129 bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
2130 size_a + size_b, size_a, size_b, extent_committed_get(extent),
2131 arena_ind_get(arena));
2132 if (*r_extent_hooks != &extent_hooks_default) {
2133 extent_hook_post_reentrancy(tsdn);
2134 }
2135 if (err) {
2136 goto label_error_c;
2137 }
2138
2139 extent_size_set(extent, size_a);
2140 extent_szind_set(extent, szind_a);
2141
2142 extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
2143 szind_a, slab_a);
2144 extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
2145 szind_b, slab_b);
2146
2147 extent_unlock2(tsdn, extent, trail);
2148
2149 return trail;
2150 label_error_c:
2151 extent_unlock2(tsdn, extent, trail);
2152 label_error_b:
2153 extent_dalloc(tsdn, arena, trail);
2154 label_error_a:
2155 return NULL;
2156 }
2157
2158 extent_t *
extent_split_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t size_a,szind_t szind_a,bool slab_a,size_t size_b,szind_t szind_b,bool slab_b)2159 extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
2160 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
2161 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
2162 return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
2163 szind_a, slab_a, size_b, szind_b, slab_b, false);
2164 }
2165
2166 static bool
extent_merge_default_impl(void * addr_a,void * addr_b)2167 extent_merge_default_impl(void *addr_a, void *addr_b) {
2168 if (!maps_coalesce) {
2169 return true;
2170 }
2171 if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
2172 return true;
2173 }
2174
2175 return false;
2176 }
2177
2178 #ifdef JEMALLOC_MAPS_COALESCE
2179 static bool
extent_merge_default(extent_hooks_t * extent_hooks,void * addr_a,size_t size_a,void * addr_b,size_t size_b,bool committed,unsigned arena_ind)2180 extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
2181 void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
2182 return extent_merge_default_impl(addr_a, addr_b);
2183 }
2184 #endif
2185
2186 static bool
extent_merge_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * a,extent_t * b,bool growing_retained)2187 extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
2188 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
2189 bool growing_retained) {
2190 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2191 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2192
2193 extent_hooks_assure_initialized(arena, r_extent_hooks);
2194
2195 if ((*r_extent_hooks)->merge == NULL) {
2196 return true;
2197 }
2198
2199 bool err;
2200 if (*r_extent_hooks == &extent_hooks_default) {
2201 /* Call directly to propagate tsdn. */
2202 err = extent_merge_default_impl(extent_base_get(a),
2203 extent_base_get(b));
2204 } else {
2205 extent_hook_pre_reentrancy(tsdn, arena);
2206 err = (*r_extent_hooks)->merge(*r_extent_hooks,
2207 extent_base_get(a), extent_size_get(a), extent_base_get(b),
2208 extent_size_get(b), extent_committed_get(a),
2209 arena_ind_get(arena));
2210 extent_hook_post_reentrancy(tsdn);
2211 }
2212
2213 if (err) {
2214 return true;
2215 }
2216
2217 /*
2218 * The rtree writes must happen while all the relevant elements are
2219 * owned, so the following code uses decomposed helper functions rather
2220 * than extent_{,de}register() to do things in the right order.
2221 */
2222 rtree_ctx_t rtree_ctx_fallback;
2223 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2224 rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
2225 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
2226 &a_elm_b);
2227 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
2228 &b_elm_b);
2229
2230 extent_lock2(tsdn, a, b);
2231
2232 if (a_elm_b != NULL) {
2233 rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
2234 SC_NSIZES, false);
2235 }
2236 if (b_elm_b != NULL) {
2237 rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
2238 SC_NSIZES, false);
2239 } else {
2240 b_elm_b = b_elm_a;
2241 }
2242
2243 extent_size_set(a, extent_size_get(a) + extent_size_get(b));
2244 extent_szind_set(a, SC_NSIZES);
2245 extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
2246 extent_sn_get(a) : extent_sn_get(b));
2247 extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
2248
2249 extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES,
2250 false);
2251
2252 extent_unlock2(tsdn, a, b);
2253
2254 extent_dalloc(tsdn, extent_arena_get(b), b);
2255
2256 return false;
2257 }
2258
2259 bool
extent_merge_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * a,extent_t * b)2260 extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
2261 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
2262 return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
2263 }
2264
2265 bool
extent_boot(void)2266 extent_boot(void) {
2267 if (rtree_new(&extents_rtree, true)) {
2268 return true;
2269 }
2270
2271 if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
2272 WITNESS_RANK_EXTENT_POOL)) {
2273 return true;
2274 }
2275
2276 if (have_dss) {
2277 extent_dss_boot();
2278 }
2279
2280 return false;
2281 }
2282