1 #define JEMALLOC_EXTENT_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/extent_dss.h"
7 #include "jemalloc/internal/extent_mmap.h"
8 #include "jemalloc/internal/ph.h"
9 #include "jemalloc/internal/rtree.h"
10 #include "jemalloc/internal/mutex.h"
11 #include "jemalloc/internal/mutex_pool.h"
12
13 /******************************************************************************/
14 /* Data. */
15
16 rtree_t extents_rtree;
17 /* Keyed by the address of the extent_t being protected. */
18 mutex_pool_t extent_mutex_pool;
19
20 size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
21
22 static const bitmap_info_t extents_bitmap_info =
23 BITMAP_INFO_INITIALIZER(NPSIZES+1);
24
25 static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
26 size_t size, size_t alignment, bool *zero, bool *commit,
27 unsigned arena_ind);
28 static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
29 size_t size, bool committed, unsigned arena_ind);
30 static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
31 size_t size, bool committed, unsigned arena_ind);
32 static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
33 size_t size, size_t offset, size_t length, unsigned arena_ind);
34 static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
35 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
36 size_t length, bool growing_retained);
37 static bool extent_decommit_default(extent_hooks_t *extent_hooks,
38 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
39 #ifdef PAGES_CAN_PURGE_LAZY
40 static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
41 size_t size, size_t offset, size_t length, unsigned arena_ind);
42 #endif
43 static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
44 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
45 size_t length, bool growing_retained);
46 #ifdef PAGES_CAN_PURGE_FORCED
47 static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
48 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
49 #endif
50 static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
51 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
52 size_t length, bool growing_retained);
53 #ifdef JEMALLOC_MAPS_COALESCE
54 static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
55 size_t size, size_t size_a, size_t size_b, bool committed,
56 unsigned arena_ind);
57 #endif
58 static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
59 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
60 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
61 bool growing_retained);
62 #ifdef JEMALLOC_MAPS_COALESCE
63 static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
64 size_t size_a, void *addr_b, size_t size_b, bool committed,
65 unsigned arena_ind);
66 #endif
67 static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
68 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
69 bool growing_retained);
70
71 const extent_hooks_t extent_hooks_default = {
72 extent_alloc_default,
73 extent_dalloc_default,
74 extent_destroy_default,
75 extent_commit_default,
76 extent_decommit_default
77 #ifdef PAGES_CAN_PURGE_LAZY
78 ,
79 extent_purge_lazy_default
80 #else
81 ,
82 NULL
83 #endif
84 #ifdef PAGES_CAN_PURGE_FORCED
85 ,
86 extent_purge_forced_default
87 #else
88 ,
89 NULL
90 #endif
91 #ifdef JEMALLOC_MAPS_COALESCE
92 ,
93 extent_split_default,
94 extent_merge_default
95 #endif
96 };
97
98 /* Used exclusively for gdump triggering. */
99 static atomic_zu_t curpages;
100 static atomic_zu_t highpages;
101
102 /******************************************************************************/
103 /*
104 * Function prototypes for static functions that are referenced prior to
105 * definition.
106 */
107
108 static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
109 static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
110 extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
111 size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
112 bool *zero, bool *commit, bool growing_retained);
113 static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
114 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
115 extent_t *extent, bool *coalesced, bool growing_retained);
116 static void extent_record(tsdn_t *tsdn, arena_t *arena,
117 extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
118 bool growing_retained);
119
120 /******************************************************************************/
121
122 ph_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, ph_link,
123 extent_esnead_comp)
124
125 typedef enum {
126 lock_result_success,
127 lock_result_failure,
128 lock_result_no_extent
129 } lock_result_t;
130
131 static lock_result_t
extent_rtree_leaf_elm_try_lock(tsdn_t * tsdn,rtree_leaf_elm_t * elm,extent_t ** result)132 extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
133 extent_t **result) {
134 extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
135 elm, true);
136
137 if (extent1 == NULL) {
138 return lock_result_no_extent;
139 }
140 /*
141 * It's possible that the extent changed out from under us, and with it
142 * the leaf->extent mapping. We have to recheck while holding the lock.
143 */
144 extent_lock(tsdn, extent1);
145 extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
146 &extents_rtree, elm, true);
147
148 if (extent1 == extent2) {
149 *result = extent1;
150 return lock_result_success;
151 } else {
152 extent_unlock(tsdn, extent1);
153 return lock_result_failure;
154 }
155 }
156
157 /*
158 * Returns a pool-locked extent_t * if there's one associated with the given
159 * address, and NULL otherwise.
160 */
161 static extent_t *
extent_lock_from_addr(tsdn_t * tsdn,rtree_ctx_t * rtree_ctx,void * addr)162 extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) {
163 extent_t *ret = NULL;
164 rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
165 rtree_ctx, (uintptr_t)addr, false, false);
166 if (elm == NULL) {
167 return NULL;
168 }
169 lock_result_t lock_result;
170 do {
171 lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret);
172 } while (lock_result == lock_result_failure);
173 return ret;
174 }
175
176 extent_t *
extent_alloc(tsdn_t * tsdn,arena_t * arena)177 extent_alloc(tsdn_t *tsdn, arena_t *arena) {
178 malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
179 extent_t *extent = extent_avail_first(&arena->extent_avail);
180 if (extent == NULL) {
181 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
182 return base_alloc_extent(tsdn, arena->base);
183 }
184 extent_avail_remove(&arena->extent_avail, extent);
185 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
186 return extent;
187 }
188
189 void
extent_dalloc(tsdn_t * tsdn,arena_t * arena,extent_t * extent)190 extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
191 malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
192 extent_avail_insert(&arena->extent_avail, extent);
193 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
194 }
195
196 extent_hooks_t *
extent_hooks_get(arena_t * arena)197 extent_hooks_get(arena_t *arena) {
198 return base_extent_hooks_get(arena->base);
199 }
200
201 extent_hooks_t *
extent_hooks_set(tsd_t * tsd,arena_t * arena,extent_hooks_t * extent_hooks)202 extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
203 background_thread_info_t *info;
204 if (have_background_thread) {
205 info = arena_background_thread_info_get(arena);
206 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
207 }
208 extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
209 if (have_background_thread) {
210 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
211 }
212
213 return ret;
214 }
215
216 static void
extent_hooks_assure_initialized(arena_t * arena,extent_hooks_t ** r_extent_hooks)217 extent_hooks_assure_initialized(arena_t *arena,
218 extent_hooks_t **r_extent_hooks) {
219 if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
220 *r_extent_hooks = extent_hooks_get(arena);
221 }
222 }
223
224 #ifndef JEMALLOC_JET
225 static
226 #endif
227 size_t
extent_size_quantize_floor(size_t size)228 extent_size_quantize_floor(size_t size) {
229 size_t ret;
230 pszind_t pind;
231
232 assert(size > 0);
233 assert((size & PAGE_MASK) == 0);
234
235 pind = sz_psz2ind(size - sz_large_pad + 1);
236 if (pind == 0) {
237 /*
238 * Avoid underflow. This short-circuit would also do the right
239 * thing for all sizes in the range for which there are
240 * PAGE-spaced size classes, but it's simplest to just handle
241 * the one case that would cause erroneous results.
242 */
243 return size;
244 }
245 ret = sz_pind2sz(pind - 1) + sz_large_pad;
246 assert(ret <= size);
247 return ret;
248 }
249
250 #ifndef JEMALLOC_JET
251 static
252 #endif
253 size_t
extent_size_quantize_ceil(size_t size)254 extent_size_quantize_ceil(size_t size) {
255 size_t ret;
256
257 assert(size > 0);
258 assert(size - sz_large_pad <= LARGE_MAXCLASS);
259 assert((size & PAGE_MASK) == 0);
260
261 ret = extent_size_quantize_floor(size);
262 if (ret < size) {
263 /*
264 * Skip a quantization that may have an adequately large extent,
265 * because under-sized extents may be mixed in. This only
266 * happens when an unusual size is requested, i.e. for aligned
267 * allocation, and is just one of several places where linear
268 * search would potentially find sufficiently aligned available
269 * memory somewhere lower.
270 */
271 ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
272 sz_large_pad;
273 }
274 return ret;
275 }
276
277 /* Generate pairing heap functions. */
278 ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
279
280 bool
extents_init(tsdn_t * tsdn,extents_t * extents,extent_state_t state,bool delay_coalesce)281 extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
282 bool delay_coalesce) {
283 if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
284 malloc_mutex_rank_exclusive)) {
285 return true;
286 }
287 for (unsigned i = 0; i < NPSIZES+1; i++) {
288 extent_heap_new(&extents->heaps[i]);
289 }
290 bitmap_init(extents->bitmap, &extents_bitmap_info, true);
291 extent_list_init(&extents->lru);
292 atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
293 extents->state = state;
294 extents->delay_coalesce = delay_coalesce;
295 return false;
296 }
297
298 extent_state_t
extents_state_get(const extents_t * extents)299 extents_state_get(const extents_t *extents) {
300 return extents->state;
301 }
302
303 size_t
extents_npages_get(extents_t * extents)304 extents_npages_get(extents_t *extents) {
305 return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
306 }
307
308 static void
extents_insert_locked(tsdn_t * tsdn,extents_t * extents,extent_t * extent)309 extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
310 malloc_mutex_assert_owner(tsdn, &extents->mtx);
311 assert(extent_state_get(extent) == extents->state);
312
313 size_t size = extent_size_get(extent);
314 size_t psz = extent_size_quantize_floor(size);
315 pszind_t pind = sz_psz2ind(psz);
316 if (extent_heap_empty(&extents->heaps[pind])) {
317 bitmap_unset(extents->bitmap, &extents_bitmap_info,
318 (size_t)pind);
319 }
320 extent_heap_insert(&extents->heaps[pind], extent);
321 extent_list_append(&extents->lru, extent);
322 size_t npages = size >> LG_PAGE;
323 /*
324 * All modifications to npages hold the mutex (as asserted above), so we
325 * don't need an atomic fetch-add; we can get by with a load followed by
326 * a store.
327 */
328 size_t cur_extents_npages =
329 atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
330 atomic_store_zu(&extents->npages, cur_extents_npages + npages,
331 ATOMIC_RELAXED);
332 }
333
334 static void
extents_remove_locked(tsdn_t * tsdn,extents_t * extents,extent_t * extent)335 extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
336 malloc_mutex_assert_owner(tsdn, &extents->mtx);
337 assert(extent_state_get(extent) == extents->state);
338
339 size_t size = extent_size_get(extent);
340 size_t psz = extent_size_quantize_floor(size);
341 pszind_t pind = sz_psz2ind(psz);
342 extent_heap_remove(&extents->heaps[pind], extent);
343 if (extent_heap_empty(&extents->heaps[pind])) {
344 bitmap_set(extents->bitmap, &extents_bitmap_info,
345 (size_t)pind);
346 }
347 extent_list_remove(&extents->lru, extent);
348 size_t npages = size >> LG_PAGE;
349 /*
350 * As in extents_insert_locked, we hold extents->mtx and so don't need
351 * atomic operations for updating extents->npages.
352 */
353 size_t cur_extents_npages =
354 atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
355 assert(cur_extents_npages >= npages);
356 atomic_store_zu(&extents->npages,
357 cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
358 }
359
360 /*
361 * Find an extent with size [min_size, max_size) to satisfy the alignment
362 * requirement. For each size, try only the first extent in the heap.
363 */
364 static extent_t *
extents_fit_alignment(extents_t * extents,size_t min_size,size_t max_size,size_t alignment)365 extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
366 size_t alignment) {
367 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
368 pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
369
370 for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
371 &extents_bitmap_info, (size_t)pind); i < pind_max; i =
372 (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
373 (size_t)i+1)) {
374 assert(i < NPSIZES);
375 assert(!extent_heap_empty(&extents->heaps[i]));
376 extent_t *extent = extent_heap_first(&extents->heaps[i]);
377 uintptr_t base = (uintptr_t)extent_base_get(extent);
378 size_t candidate_size = extent_size_get(extent);
379 assert(candidate_size >= min_size);
380
381 uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
382 PAGE_CEILING(alignment));
383 if (base > next_align || base + candidate_size <= next_align) {
384 /* Overflow or not crossing the next alignment. */
385 continue;
386 }
387
388 size_t leadsize = next_align - base;
389 if (candidate_size - leadsize >= min_size) {
390 return extent;
391 }
392 }
393
394 return NULL;
395 }
396
397 /* Do any-best-fit extent selection, i.e. select any extent that best fits. */
398 static extent_t *
extents_best_fit_locked(tsdn_t * tsdn,arena_t * arena,extents_t * extents,size_t size)399 extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
400 size_t size) {
401 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
402 pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
403 (size_t)pind);
404 if (i < NPSIZES+1) {
405 /*
406 * In order to reduce fragmentation, avoid reusing and splitting
407 * large extents for much smaller sizes.
408 */
409 if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
410 return NULL;
411 }
412 assert(!extent_heap_empty(&extents->heaps[i]));
413 extent_t *extent = extent_heap_first(&extents->heaps[i]);
414 assert(extent_size_get(extent) >= size);
415 return extent;
416 }
417
418 return NULL;
419 }
420
421 /*
422 * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
423 * large enough.
424 */
425 static extent_t *
extents_first_fit_locked(tsdn_t * tsdn,arena_t * arena,extents_t * extents,size_t size)426 extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
427 size_t size) {
428 extent_t *ret = NULL;
429
430 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
431 for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
432 &extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i =
433 (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
434 (size_t)i+1)) {
435 assert(!extent_heap_empty(&extents->heaps[i]));
436 extent_t *extent = extent_heap_first(&extents->heaps[i]);
437 assert(extent_size_get(extent) >= size);
438 if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
439 ret = extent;
440 }
441 if (i == NPSIZES) {
442 break;
443 }
444 assert(i < NPSIZES);
445 }
446
447 return ret;
448 }
449
450 /*
451 * Do {best,first}-fit extent selection, where the selection policy choice is
452 * based on extents->delay_coalesce. Best-fit selection requires less
453 * searching, but its layout policy is less stable and may cause higher virtual
454 * memory fragmentation as a side effect.
455 */
456 static extent_t *
extents_fit_locked(tsdn_t * tsdn,arena_t * arena,extents_t * extents,size_t esize,size_t alignment)457 extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
458 size_t esize, size_t alignment) {
459 malloc_mutex_assert_owner(tsdn, &extents->mtx);
460
461 size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
462 /* Beware size_t wrap-around. */
463 if (max_size < esize) {
464 return NULL;
465 }
466
467 extent_t *extent = extents->delay_coalesce ?
468 extents_best_fit_locked(tsdn, arena, extents, max_size) :
469 extents_first_fit_locked(tsdn, arena, extents, max_size);
470
471 if (alignment > PAGE && extent == NULL) {
472 /*
473 * max_size guarantees the alignment requirement but is rather
474 * pessimistic. Next we try to satisfy the aligned allocation
475 * with sizes in [esize, max_size).
476 */
477 extent = extents_fit_alignment(extents, esize, max_size,
478 alignment);
479 }
480
481 return extent;
482 }
483
484 static bool
extent_try_delayed_coalesce(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,extent_t * extent)485 extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
486 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
487 extent_t *extent) {
488 extent_state_set(extent, extent_state_active);
489 bool coalesced;
490 extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
491 extents, extent, &coalesced, false);
492 extent_state_set(extent, extents_state_get(extents));
493
494 if (!coalesced) {
495 return true;
496 }
497 extents_insert_locked(tsdn, extents, extent);
498 return false;
499 }
500
501 extent_t *
extents_alloc(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit)502 extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
503 extents_t *extents, void *new_addr, size_t size, size_t pad,
504 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
505 assert(size + pad != 0);
506 assert(alignment != 0);
507 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
508 WITNESS_RANK_CORE, 0);
509
510 extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
511 new_addr, size, pad, alignment, slab, szind, zero, commit, false);
512 assert(extent == NULL || extent_dumpable_get(extent));
513 return extent;
514 }
515
516 void
extents_dalloc(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,extent_t * extent)517 extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
518 extents_t *extents, extent_t *extent) {
519 assert(extent_base_get(extent) != NULL);
520 assert(extent_size_get(extent) != 0);
521 assert(extent_dumpable_get(extent));
522 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
523 WITNESS_RANK_CORE, 0);
524
525 extent_addr_set(extent, extent_base_get(extent));
526 extent_zeroed_set(extent, false);
527
528 extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
529 }
530
531 extent_t *
extents_evict(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,size_t npages_min)532 extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
533 extents_t *extents, size_t npages_min) {
534 rtree_ctx_t rtree_ctx_fallback;
535 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
536
537 malloc_mutex_lock(tsdn, &extents->mtx);
538
539 /*
540 * Get the LRU coalesced extent, if any. If coalescing was delayed,
541 * the loop will iterate until the LRU extent is fully coalesced.
542 */
543 extent_t *extent;
544 while (true) {
545 /* Get the LRU extent, if any. */
546 extent = extent_list_first(&extents->lru);
547 if (extent == NULL) {
548 goto label_return;
549 }
550 /* Check the eviction limit. */
551 size_t extents_npages = atomic_load_zu(&extents->npages,
552 ATOMIC_RELAXED);
553 if (extents_npages <= npages_min) {
554 extent = NULL;
555 goto label_return;
556 }
557 extents_remove_locked(tsdn, extents, extent);
558 if (!extents->delay_coalesce) {
559 break;
560 }
561 /* Try to coalesce. */
562 if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
563 rtree_ctx, extents, extent)) {
564 break;
565 }
566 /*
567 * The LRU extent was just coalesced and the result placed in
568 * the LRU at its neighbor's position. Start over.
569 */
570 }
571
572 /*
573 * Either mark the extent active or deregister it to protect against
574 * concurrent operations.
575 */
576 switch (extents_state_get(extents)) {
577 case extent_state_active:
578 not_reached();
579 case extent_state_dirty:
580 case extent_state_muzzy:
581 extent_state_set(extent, extent_state_active);
582 break;
583 case extent_state_retained:
584 extent_deregister(tsdn, extent);
585 break;
586 default:
587 not_reached();
588 }
589
590 label_return:
591 malloc_mutex_unlock(tsdn, &extents->mtx);
592 return extent;
593 }
594
595 static void
extents_leak(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,extent_t * extent,bool growing_retained)596 extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
597 extents_t *extents, extent_t *extent, bool growing_retained) {
598 /*
599 * Leak extent after making sure its pages have already been purged, so
600 * that this is only a virtual memory leak.
601 */
602 if (extents_state_get(extents) == extent_state_dirty) {
603 if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
604 extent, 0, extent_size_get(extent), growing_retained)) {
605 extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
606 extent, 0, extent_size_get(extent),
607 growing_retained);
608 }
609 }
610 extent_dalloc(tsdn, arena, extent);
611 }
612
613 void
extents_prefork(tsdn_t * tsdn,extents_t * extents)614 extents_prefork(tsdn_t *tsdn, extents_t *extents) {
615 malloc_mutex_prefork(tsdn, &extents->mtx);
616 }
617
618 void
extents_postfork_parent(tsdn_t * tsdn,extents_t * extents)619 extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
620 malloc_mutex_postfork_parent(tsdn, &extents->mtx);
621 }
622
623 void
extents_postfork_child(tsdn_t * tsdn,extents_t * extents)624 extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
625 malloc_mutex_postfork_child(tsdn, &extents->mtx);
626 }
627
628 static void
extent_deactivate_locked(tsdn_t * tsdn,arena_t * arena,extents_t * extents,extent_t * extent)629 extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
630 extent_t *extent) {
631 assert(extent_arena_get(extent) == arena);
632 assert(extent_state_get(extent) == extent_state_active);
633
634 extent_state_set(extent, extents_state_get(extents));
635 extents_insert_locked(tsdn, extents, extent);
636 }
637
638 static void
extent_deactivate(tsdn_t * tsdn,arena_t * arena,extents_t * extents,extent_t * extent)639 extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
640 extent_t *extent) {
641 malloc_mutex_lock(tsdn, &extents->mtx);
642 extent_deactivate_locked(tsdn, arena, extents, extent);
643 malloc_mutex_unlock(tsdn, &extents->mtx);
644 }
645
646 static void
extent_activate_locked(tsdn_t * tsdn,arena_t * arena,extents_t * extents,extent_t * extent)647 extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
648 extent_t *extent) {
649 assert(extent_arena_get(extent) == arena);
650 assert(extent_state_get(extent) == extents_state_get(extents));
651
652 extents_remove_locked(tsdn, extents, extent);
653 extent_state_set(extent, extent_state_active);
654 }
655
656 static bool
extent_rtree_leaf_elms_lookup(tsdn_t * tsdn,rtree_ctx_t * rtree_ctx,const extent_t * extent,bool dependent,bool init_missing,rtree_leaf_elm_t ** r_elm_a,rtree_leaf_elm_t ** r_elm_b)657 extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
658 const extent_t *extent, bool dependent, bool init_missing,
659 rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
660 *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
661 (uintptr_t)extent_base_get(extent), dependent, init_missing);
662 if (!dependent && *r_elm_a == NULL) {
663 return true;
664 }
665 assert(*r_elm_a != NULL);
666
667 *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
668 (uintptr_t)extent_last_get(extent), dependent, init_missing);
669 if (!dependent && *r_elm_b == NULL) {
670 return true;
671 }
672 assert(*r_elm_b != NULL);
673
674 return false;
675 }
676
677 static void
extent_rtree_write_acquired(tsdn_t * tsdn,rtree_leaf_elm_t * elm_a,rtree_leaf_elm_t * elm_b,extent_t * extent,szind_t szind,bool slab)678 extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
679 rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
680 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
681 if (elm_b != NULL) {
682 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
683 slab);
684 }
685 }
686
687 static void
extent_interior_register(tsdn_t * tsdn,rtree_ctx_t * rtree_ctx,extent_t * extent,szind_t szind)688 extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
689 szind_t szind) {
690 assert(extent_slab_get(extent));
691
692 /* Register interior. */
693 for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
694 rtree_write(tsdn, &extents_rtree, rtree_ctx,
695 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
696 LG_PAGE), extent, szind, true);
697 }
698 }
699
700 static void
extent_gdump_add(tsdn_t * tsdn,const extent_t * extent)701 extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
702 cassert(config_prof);
703 /* prof_gdump() requirement. */
704 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
705 WITNESS_RANK_CORE, 0);
706
707 if (opt_prof && extent_state_get(extent) == extent_state_active) {
708 size_t nadd = extent_size_get(extent) >> LG_PAGE;
709 size_t cur = atomic_fetch_add_zu(&curpages, nadd,
710 ATOMIC_RELAXED) + nadd;
711 size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
712 while (cur > high && !atomic_compare_exchange_weak_zu(
713 &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
714 /*
715 * Don't refresh cur, because it may have decreased
716 * since this thread lost the highpages update race.
717 * Note that high is updated in case of CAS failure.
718 */
719 }
720 if (cur > high && prof_gdump_get_unlocked()) {
721 prof_gdump(tsdn);
722 }
723 }
724 }
725
726 static void
extent_gdump_sub(tsdn_t * tsdn,const extent_t * extent)727 extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
728 cassert(config_prof);
729
730 if (opt_prof && extent_state_get(extent) == extent_state_active) {
731 size_t nsub = extent_size_get(extent) >> LG_PAGE;
732 assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
733 atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
734 }
735 }
736
737 static bool
extent_register_impl(tsdn_t * tsdn,extent_t * extent,bool gdump_add)738 extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
739 rtree_ctx_t rtree_ctx_fallback;
740 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
741 rtree_leaf_elm_t *elm_a, *elm_b;
742
743 /*
744 * We need to hold the lock to protect against a concurrent coalesce
745 * operation that sees us in a partial state.
746 */
747 extent_lock(tsdn, extent);
748
749 if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
750 &elm_a, &elm_b)) {
751 return true;
752 }
753
754 szind_t szind = extent_szind_get_maybe_invalid(extent);
755 bool slab = extent_slab_get(extent);
756 extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
757 if (slab) {
758 extent_interior_register(tsdn, rtree_ctx, extent, szind);
759 }
760
761 extent_unlock(tsdn, extent);
762
763 if (config_prof && gdump_add) {
764 extent_gdump_add(tsdn, extent);
765 }
766
767 return false;
768 }
769
770 static bool
extent_register(tsdn_t * tsdn,extent_t * extent)771 extent_register(tsdn_t *tsdn, extent_t *extent) {
772 return extent_register_impl(tsdn, extent, true);
773 }
774
775 static bool
extent_register_no_gdump_add(tsdn_t * tsdn,extent_t * extent)776 extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
777 return extent_register_impl(tsdn, extent, false);
778 }
779
780 static void
extent_reregister(tsdn_t * tsdn,extent_t * extent)781 extent_reregister(tsdn_t *tsdn, extent_t *extent) {
782 bool err = extent_register(tsdn, extent);
783 assert(!err);
784 }
785
786 /*
787 * Removes all pointers to the given extent from the global rtree indices for
788 * its interior. This is relevant for slab extents, for which we need to do
789 * metadata lookups at places other than the head of the extent. We deregister
790 * on the interior, then, when an extent moves from being an active slab to an
791 * inactive state.
792 */
793 static void
extent_interior_deregister(tsdn_t * tsdn,rtree_ctx_t * rtree_ctx,extent_t * extent)794 extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
795 extent_t *extent) {
796 size_t i;
797
798 assert(extent_slab_get(extent));
799
800 for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
801 rtree_clear(tsdn, &extents_rtree, rtree_ctx,
802 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
803 LG_PAGE));
804 }
805 }
806
807 /*
808 * Removes all pointers to the given extent from the global rtree.
809 */
810 static void
extent_deregister_impl(tsdn_t * tsdn,extent_t * extent,bool gdump)811 extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
812 rtree_ctx_t rtree_ctx_fallback;
813 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
814 rtree_leaf_elm_t *elm_a, *elm_b;
815 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
816 &elm_a, &elm_b);
817
818 extent_lock(tsdn, extent);
819
820 extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false);
821 if (extent_slab_get(extent)) {
822 extent_interior_deregister(tsdn, rtree_ctx, extent);
823 extent_slab_set(extent, false);
824 }
825
826 extent_unlock(tsdn, extent);
827
828 if (config_prof && gdump) {
829 extent_gdump_sub(tsdn, extent);
830 }
831 }
832
833 static void
extent_deregister(tsdn_t * tsdn,extent_t * extent)834 extent_deregister(tsdn_t *tsdn, extent_t *extent) {
835 extent_deregister_impl(tsdn, extent, true);
836 }
837
838 static void
extent_deregister_no_gdump_sub(tsdn_t * tsdn,extent_t * extent)839 extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
840 extent_deregister_impl(tsdn, extent, false);
841 }
842
843 /*
844 * Tries to find and remove an extent from extents that can be used for the
845 * given allocation request.
846 */
847 static extent_t *
extent_recycle_extract(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,bool growing_retained)848 extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
849 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
850 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
851 bool growing_retained) {
852 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
853 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
854 assert(alignment > 0);
855 if (config_debug && new_addr != NULL) {
856 /*
857 * Non-NULL new_addr has two use cases:
858 *
859 * 1) Recycle a known-extant extent, e.g. during purging.
860 * 2) Perform in-place expanding reallocation.
861 *
862 * Regardless of use case, new_addr must either refer to a
863 * non-existing extent, or to the base of an extant extent,
864 * since only active slabs support interior lookups (which of
865 * course cannot be recycled).
866 */
867 assert(PAGE_ADDR2BASE(new_addr) == new_addr);
868 assert(pad == 0);
869 assert(alignment <= PAGE);
870 }
871
872 size_t esize = size + pad;
873 malloc_mutex_lock(tsdn, &extents->mtx);
874 extent_hooks_assure_initialized(arena, r_extent_hooks);
875 extent_t *extent;
876 if (new_addr != NULL) {
877 extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr);
878 if (extent != NULL) {
879 /*
880 * We might null-out extent to report an error, but we
881 * still need to unlock the associated mutex after.
882 */
883 extent_t *unlock_extent = extent;
884 assert(extent_base_get(extent) == new_addr);
885 if (extent_arena_get(extent) != arena ||
886 extent_size_get(extent) < esize ||
887 extent_state_get(extent) !=
888 extents_state_get(extents)) {
889 extent = NULL;
890 }
891 extent_unlock(tsdn, unlock_extent);
892 }
893 } else {
894 extent = extents_fit_locked(tsdn, arena, extents, esize,
895 alignment);
896 }
897 if (extent == NULL) {
898 malloc_mutex_unlock(tsdn, &extents->mtx);
899 return NULL;
900 }
901
902 extent_activate_locked(tsdn, arena, extents, extent);
903 malloc_mutex_unlock(tsdn, &extents->mtx);
904
905 return extent;
906 }
907
908 /*
909 * Given an allocation request and an extent guaranteed to be able to satisfy
910 * it, this splits off lead and trail extents, leaving extent pointing to an
911 * extent satisfying the allocation.
912 * This function doesn't put lead or trail into any extents_t; it's the caller's
913 * job to ensure that they can be reused.
914 */
915 typedef enum {
916 /*
917 * Split successfully. lead, extent, and trail, are modified to extents
918 * describing the ranges before, in, and after the given allocation.
919 */
920 extent_split_interior_ok,
921 /*
922 * The extent can't satisfy the given allocation request. None of the
923 * input extent_t *s are touched.
924 */
925 extent_split_interior_cant_alloc,
926 /*
927 * In a potentially invalid state. Must leak (if *to_leak is non-NULL),
928 * and salvage what's still salvageable (if *to_salvage is non-NULL).
929 * None of lead, extent, or trail are valid.
930 */
931 extent_split_interior_error
932 } extent_split_interior_result_t;
933
934 static extent_split_interior_result_t
extent_split_interior(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extent_t ** extent,extent_t ** lead,extent_t ** trail,extent_t ** to_leak,extent_t ** to_salvage,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool growing_retained)935 extent_split_interior(tsdn_t *tsdn, arena_t *arena,
936 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
937 /* The result of splitting, in case of success. */
938 extent_t **extent, extent_t **lead, extent_t **trail,
939 /* The mess to clean up, in case of error. */
940 extent_t **to_leak, extent_t **to_salvage,
941 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
942 szind_t szind, bool growing_retained) {
943 size_t esize = size + pad;
944 size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
945 PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
946 assert(new_addr == NULL || leadsize == 0);
947 if (extent_size_get(*extent) < leadsize + esize) {
948 return extent_split_interior_cant_alloc;
949 }
950 size_t trailsize = extent_size_get(*extent) - leadsize - esize;
951
952 *lead = NULL;
953 *trail = NULL;
954 *to_leak = NULL;
955 *to_salvage = NULL;
956
957 /* Split the lead. */
958 if (leadsize != 0) {
959 *lead = *extent;
960 *extent = extent_split_impl(tsdn, arena, r_extent_hooks,
961 *lead, leadsize, NSIZES, false, esize + trailsize, szind,
962 slab, growing_retained);
963 if (*extent == NULL) {
964 *to_leak = *lead;
965 *lead = NULL;
966 return extent_split_interior_error;
967 }
968 }
969
970 /* Split the trail. */
971 if (trailsize != 0) {
972 *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
973 esize, szind, slab, trailsize, NSIZES, false,
974 growing_retained);
975 if (*trail == NULL) {
976 *to_leak = *extent;
977 *to_salvage = *lead;
978 *lead = NULL;
979 *extent = NULL;
980 return extent_split_interior_error;
981 }
982 }
983
984 if (leadsize == 0 && trailsize == 0) {
985 /*
986 * Splitting causes szind to be set as a side effect, but no
987 * splitting occurred.
988 */
989 extent_szind_set(*extent, szind);
990 if (szind != NSIZES) {
991 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
992 (uintptr_t)extent_addr_get(*extent), szind, slab);
993 if (slab && extent_size_get(*extent) > PAGE) {
994 rtree_szind_slab_update(tsdn, &extents_rtree,
995 rtree_ctx,
996 (uintptr_t)extent_past_get(*extent) -
997 (uintptr_t)PAGE, szind, slab);
998 }
999 }
1000 }
1001
1002 return extent_split_interior_ok;
1003 }
1004
1005 /*
1006 * This fulfills the indicated allocation request out of the given extent (which
1007 * the caller should have ensured was big enough). If there's any unused space
1008 * before or after the resulting allocation, that space is given its own extent
1009 * and put back into extents.
1010 */
1011 static extent_t *
extent_recycle_split(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,extent_t * extent,bool growing_retained)1012 extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
1013 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1014 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
1015 szind_t szind, extent_t *extent, bool growing_retained) {
1016 extent_t *lead;
1017 extent_t *trail;
1018 extent_t *to_leak;
1019 extent_t *to_salvage;
1020
1021 extent_split_interior_result_t result = extent_split_interior(
1022 tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1023 &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
1024 growing_retained);
1025
1026 if (result == extent_split_interior_ok) {
1027 if (lead != NULL) {
1028 extent_deactivate(tsdn, arena, extents, lead);
1029 }
1030 if (trail != NULL) {
1031 extent_deactivate(tsdn, arena, extents, trail);
1032 }
1033 return extent;
1034 } else {
1035 /*
1036 * We should have picked an extent that was large enough to
1037 * fulfill our allocation request.
1038 */
1039 assert(result == extent_split_interior_error);
1040 if (to_salvage != NULL) {
1041 extent_deregister(tsdn, to_salvage);
1042 }
1043 if (to_leak != NULL) {
1044 void *leak = extent_base_get(to_leak);
1045 extent_deregister_no_gdump_sub(tsdn, to_leak);
1046 extents_leak(tsdn, arena, r_extent_hooks, extents,
1047 to_leak, growing_retained);
1048 assert(extent_lock_from_addr(tsdn, rtree_ctx, leak)
1049 == NULL);
1050 }
1051 return NULL;
1052 }
1053 unreachable();
1054 }
1055
1056 /*
1057 * Tries to satisfy the given allocation request by reusing one of the extents
1058 * in the given extents_t.
1059 */
1060 static extent_t *
extent_recycle(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit,bool growing_retained)1061 extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1062 extents_t *extents, void *new_addr, size_t size, size_t pad,
1063 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
1064 bool growing_retained) {
1065 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1066 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1067 assert(new_addr == NULL || !slab);
1068 assert(pad == 0 || !slab);
1069 assert(!*zero || !slab);
1070
1071 rtree_ctx_t rtree_ctx_fallback;
1072 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1073
1074 extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
1075 rtree_ctx, extents, new_addr, size, pad, alignment, slab,
1076 growing_retained);
1077 if (extent == NULL) {
1078 return NULL;
1079 }
1080
1081 extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
1082 extents, new_addr, size, pad, alignment, slab, szind, extent,
1083 growing_retained);
1084 if (extent == NULL) {
1085 return NULL;
1086 }
1087
1088 if (*commit && !extent_committed_get(extent)) {
1089 if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
1090 0, extent_size_get(extent), growing_retained)) {
1091 extent_record(tsdn, arena, r_extent_hooks, extents,
1092 extent, growing_retained);
1093 return NULL;
1094 }
1095 extent_zeroed_set(extent, true);
1096 }
1097
1098 if (extent_committed_get(extent)) {
1099 *commit = true;
1100 }
1101 if (extent_zeroed_get(extent)) {
1102 *zero = true;
1103 }
1104
1105 if (pad != 0) {
1106 extent_addr_randomize(tsdn, extent, alignment);
1107 }
1108 assert(extent_state_get(extent) == extent_state_active);
1109 if (slab) {
1110 extent_slab_set(extent, slab);
1111 extent_interior_register(tsdn, rtree_ctx, extent, szind);
1112 }
1113
1114 if (*zero) {
1115 void *addr = extent_base_get(extent);
1116 size_t size = extent_size_get(extent);
1117 if (!extent_zeroed_get(extent)) {
1118 if (pages_purge_forced(addr, size)) {
1119 memset(addr, 0, size);
1120 }
1121 } else if (config_debug) {
1122 size_t *p = (size_t *)(uintptr_t)addr;
1123 for (size_t i = 0; i < size / sizeof(size_t); i++) {
1124 assert(p[i] == 0);
1125 }
1126 }
1127 }
1128 return extent;
1129 }
1130
1131 /*
1132 * If the caller specifies (!*zero), it is still possible to receive zeroed
1133 * memory, in which case *zero is toggled to true. arena_extent_alloc() takes
1134 * advantage of this to avoid demanding zeroed extents, but taking advantage of
1135 * them if they are returned.
1136 */
1137 static void *
extent_alloc_core(tsdn_t * tsdn,arena_t * arena,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit,dss_prec_t dss_prec)1138 extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
1139 size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
1140 void *ret;
1141
1142 assert(size != 0);
1143 assert(alignment != 0);
1144
1145 /* "primary" dss. */
1146 if (have_dss && dss_prec == dss_prec_primary && (ret =
1147 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1148 commit)) != NULL) {
1149 return ret;
1150 }
1151 /* mmap. */
1152 if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
1153 != NULL) {
1154 return ret;
1155 }
1156 /* "secondary" dss. */
1157 if (have_dss && dss_prec == dss_prec_secondary && (ret =
1158 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1159 commit)) != NULL) {
1160 return ret;
1161 }
1162
1163 /* All strategies for allocation failed. */
1164 return NULL;
1165 }
1166
1167 static void *
extent_alloc_default_impl(tsdn_t * tsdn,arena_t * arena,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit)1168 extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
1169 size_t size, size_t alignment, bool *zero, bool *commit) {
1170 void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
1171 commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
1172 ATOMIC_RELAXED));
1173 if (have_madvise_huge && ret) {
1174 pages_set_thp_state(ret, size);
1175 }
1176 return ret;
1177 }
1178
1179 static void *
extent_alloc_default(extent_hooks_t * extent_hooks,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit,unsigned arena_ind)1180 extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
1181 size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
1182 tsdn_t *tsdn;
1183 arena_t *arena;
1184
1185 tsdn = tsdn_fetch();
1186 arena = arena_get(tsdn, arena_ind, false);
1187 /*
1188 * The arena we're allocating on behalf of must have been initialized
1189 * already.
1190 */
1191 assert(arena != NULL);
1192
1193 return extent_alloc_default_impl(tsdn, arena, new_addr, size,
1194 alignment, zero, commit);
1195 }
1196
1197 static void
extent_hook_pre_reentrancy(tsdn_t * tsdn,arena_t * arena)1198 extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
1199 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1200 if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
1201 /*
1202 * The only legitimate case of customized extent hooks for a0 is
1203 * hooks with no allocation activities. One such example is to
1204 * place metadata on pre-allocated resources such as huge pages.
1205 * In that case, rely on reentrancy_level checks to catch
1206 * infinite recursions.
1207 */
1208 pre_reentrancy(tsd, NULL);
1209 } else {
1210 pre_reentrancy(tsd, arena);
1211 }
1212 }
1213
1214 static void
extent_hook_post_reentrancy(tsdn_t * tsdn)1215 extent_hook_post_reentrancy(tsdn_t *tsdn) {
1216 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1217 post_reentrancy(tsd);
1218 }
1219
1220 /*
1221 * If virtual memory is retained, create increasingly larger extents from which
1222 * to split requested extents in order to limit the total number of disjoint
1223 * virtual memory ranges retained by each arena.
1224 */
1225 static extent_t *
extent_grow_retained(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit)1226 extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
1227 extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
1228 bool slab, szind_t szind, bool *zero, bool *commit) {
1229 malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
1230 assert(pad == 0 || !slab);
1231 assert(!*zero || !slab);
1232
1233 size_t esize = size + pad;
1234 size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
1235 /* Beware size_t wrap-around. */
1236 if (alloc_size_min < esize) {
1237 goto label_err;
1238 }
1239 /*
1240 * Find the next extent size in the series that would be large enough to
1241 * satisfy this request.
1242 */
1243 pszind_t egn_skip = 0;
1244 size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1245 while (alloc_size < alloc_size_min) {
1246 egn_skip++;
1247 if (arena->extent_grow_next + egn_skip == NPSIZES) {
1248 /* Outside legal range. */
1249 goto label_err;
1250 }
1251 assert(arena->extent_grow_next + egn_skip < NPSIZES);
1252 alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1253 }
1254
1255 extent_t *extent = extent_alloc(tsdn, arena);
1256 if (extent == NULL) {
1257 goto label_err;
1258 }
1259 bool zeroed = false;
1260 bool committed = false;
1261
1262 void *ptr;
1263 if (*r_extent_hooks == &extent_hooks_default) {
1264 ptr = extent_alloc_default_impl(tsdn, arena, NULL,
1265 alloc_size, PAGE, &zeroed, &committed);
1266 } else {
1267 extent_hook_pre_reentrancy(tsdn, arena);
1268 ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
1269 alloc_size, PAGE, &zeroed, &committed,
1270 arena_ind_get(arena));
1271 extent_hook_post_reentrancy(tsdn);
1272 }
1273
1274 extent_init(extent, arena, ptr, alloc_size, false, NSIZES,
1275 arena_extent_sn_next(arena), extent_state_active, zeroed,
1276 committed, true);
1277 if (ptr == NULL) {
1278 extent_dalloc(tsdn, arena, extent);
1279 goto label_err;
1280 }
1281
1282 if (extent_register_no_gdump_add(tsdn, extent)) {
1283 extents_leak(tsdn, arena, r_extent_hooks,
1284 &arena->extents_retained, extent, true);
1285 goto label_err;
1286 }
1287
1288 if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
1289 *zero = true;
1290 }
1291 if (extent_committed_get(extent)) {
1292 *commit = true;
1293 }
1294
1295 rtree_ctx_t rtree_ctx_fallback;
1296 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1297
1298 extent_t *lead;
1299 extent_t *trail;
1300 extent_t *to_leak;
1301 extent_t *to_salvage;
1302 extent_split_interior_result_t result = extent_split_interior(
1303 tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1304 &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
1305 true);
1306
1307 if (result == extent_split_interior_ok) {
1308 if (lead != NULL) {
1309 extent_record(tsdn, arena, r_extent_hooks,
1310 &arena->extents_retained, lead, true);
1311 }
1312 if (trail != NULL) {
1313 extent_record(tsdn, arena, r_extent_hooks,
1314 &arena->extents_retained, trail, true);
1315 }
1316 } else {
1317 /*
1318 * We should have allocated a sufficiently large extent; the
1319 * cant_alloc case should not occur.
1320 */
1321 assert(result == extent_split_interior_error);
1322 if (to_salvage != NULL) {
1323 if (config_prof) {
1324 extent_gdump_add(tsdn, to_salvage);
1325 }
1326 extent_record(tsdn, arena, r_extent_hooks,
1327 &arena->extents_retained, to_salvage, true);
1328 }
1329 if (to_leak != NULL) {
1330 extent_deregister_no_gdump_sub(tsdn, to_leak);
1331 extents_leak(tsdn, arena, r_extent_hooks,
1332 &arena->extents_retained, to_leak, true);
1333 }
1334 goto label_err;
1335 }
1336
1337 if (*commit && !extent_committed_get(extent)) {
1338 if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
1339 extent_size_get(extent), true)) {
1340 extent_record(tsdn, arena, r_extent_hooks,
1341 &arena->extents_retained, extent, true);
1342 goto label_err;
1343 }
1344 extent_zeroed_set(extent, true);
1345 }
1346
1347 /*
1348 * Increment extent_grow_next if doing so wouldn't exceed the allowed
1349 * range.
1350 */
1351 if (arena->extent_grow_next + egn_skip + 1 <=
1352 arena->retain_grow_limit) {
1353 arena->extent_grow_next += egn_skip + 1;
1354 } else {
1355 arena->extent_grow_next = arena->retain_grow_limit;
1356 }
1357 /* All opportunities for failure are past. */
1358 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1359
1360 if (config_prof) {
1361 /* Adjust gdump stats now that extent is final size. */
1362 extent_gdump_add(tsdn, extent);
1363 }
1364 if (pad != 0) {
1365 extent_addr_randomize(tsdn, extent, alignment);
1366 }
1367 if (slab) {
1368 rtree_ctx_t rtree_ctx_fallback;
1369 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
1370 &rtree_ctx_fallback);
1371
1372 extent_slab_set(extent, true);
1373 extent_interior_register(tsdn, rtree_ctx, extent, szind);
1374 }
1375 if (*zero && !extent_zeroed_get(extent)) {
1376 void *addr = extent_base_get(extent);
1377 size_t size = extent_size_get(extent);
1378 if (pages_purge_forced(addr, size)) {
1379 memset(addr, 0, size);
1380 }
1381 }
1382
1383 return extent;
1384 label_err:
1385 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1386 return NULL;
1387 }
1388
1389 static extent_t *
extent_alloc_retained(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit)1390 extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
1391 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1392 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1393 assert(size != 0);
1394 assert(alignment != 0);
1395
1396 malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
1397
1398 extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
1399 &arena->extents_retained, new_addr, size, pad, alignment, slab,
1400 szind, zero, commit, true);
1401 if (extent != NULL) {
1402 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1403 if (config_prof) {
1404 extent_gdump_add(tsdn, extent);
1405 }
1406 } else if (opt_retain && new_addr == NULL) {
1407 extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
1408 pad, alignment, slab, szind, zero, commit);
1409 /* extent_grow_retained() always releases extent_grow_mtx. */
1410 } else {
1411 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1412 }
1413 malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
1414
1415 return extent;
1416 }
1417
1418 static extent_t *
extent_alloc_wrapper_hard(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit)1419 extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
1420 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1421 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1422 size_t esize = size + pad;
1423 extent_t *extent = extent_alloc(tsdn, arena);
1424 if (extent == NULL) {
1425 return NULL;
1426 }
1427 void *addr;
1428 if (*r_extent_hooks == &extent_hooks_default) {
1429 /* Call directly to propagate tsdn. */
1430 addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
1431 alignment, zero, commit);
1432 } else {
1433 extent_hook_pre_reentrancy(tsdn, arena);
1434 addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
1435 esize, alignment, zero, commit, arena_ind_get(arena));
1436 extent_hook_post_reentrancy(tsdn);
1437 }
1438 if (addr == NULL) {
1439 extent_dalloc(tsdn, arena, extent);
1440 return NULL;
1441 }
1442 extent_init(extent, arena, addr, esize, slab, szind,
1443 arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
1444 true);
1445 if (pad != 0) {
1446 extent_addr_randomize(tsdn, extent, alignment);
1447 }
1448 if (extent_register(tsdn, extent)) {
1449 extents_leak(tsdn, arena, r_extent_hooks,
1450 &arena->extents_retained, extent, false);
1451 return NULL;
1452 }
1453
1454 return extent;
1455 }
1456
1457 extent_t *
extent_alloc_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit)1458 extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1459 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1460 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1461 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1462 WITNESS_RANK_CORE, 0);
1463
1464 extent_hooks_assure_initialized(arena, r_extent_hooks);
1465
1466 extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
1467 new_addr, size, pad, alignment, slab, szind, zero, commit);
1468 if (extent == NULL) {
1469 if (opt_retain && new_addr != NULL) {
1470 /*
1471 * When retain is enabled and new_addr is set, we do not
1472 * attempt extent_alloc_wrapper_hard which does mmap
1473 * that is very unlikely to succeed (unless it happens
1474 * to be at the end).
1475 */
1476 return NULL;
1477 }
1478 extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
1479 new_addr, size, pad, alignment, slab, szind, zero, commit);
1480 }
1481
1482 assert(extent == NULL || extent_dumpable_get(extent));
1483 return extent;
1484 }
1485
1486 static bool
extent_can_coalesce(arena_t * arena,extents_t * extents,const extent_t * inner,const extent_t * outer)1487 extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
1488 const extent_t *outer) {
1489 assert(extent_arena_get(inner) == arena);
1490 if (extent_arena_get(outer) != arena) {
1491 return false;
1492 }
1493
1494 assert(extent_state_get(inner) == extent_state_active);
1495 if (extent_state_get(outer) != extents->state) {
1496 return false;
1497 }
1498
1499 if (extent_committed_get(inner) != extent_committed_get(outer)) {
1500 return false;
1501 }
1502
1503 return true;
1504 }
1505
1506 static bool
extent_coalesce(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,extent_t * inner,extent_t * outer,bool forward,bool growing_retained)1507 extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1508 extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
1509 bool growing_retained) {
1510 assert(extent_can_coalesce(arena, extents, inner, outer));
1511
1512 extent_activate_locked(tsdn, arena, extents, outer);
1513
1514 malloc_mutex_unlock(tsdn, &extents->mtx);
1515 bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
1516 forward ? inner : outer, forward ? outer : inner, growing_retained);
1517 malloc_mutex_lock(tsdn, &extents->mtx);
1518
1519 if (err) {
1520 extent_deactivate_locked(tsdn, arena, extents, outer);
1521 }
1522
1523 return err;
1524 }
1525
1526 static extent_t *
extent_try_coalesce(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,extent_t * extent,bool * coalesced,bool growing_retained)1527 extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
1528 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1529 extent_t *extent, bool *coalesced, bool growing_retained) {
1530 /*
1531 * Continue attempting to coalesce until failure, to protect against
1532 * races with other threads that are thwarted by this one.
1533 */
1534 bool again;
1535 do {
1536 again = false;
1537
1538 /* Try to coalesce forward. */
1539 extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
1540 extent_past_get(extent));
1541 if (next != NULL) {
1542 /*
1543 * extents->mtx only protects against races for
1544 * like-state extents, so call extent_can_coalesce()
1545 * before releasing next's pool lock.
1546 */
1547 bool can_coalesce = extent_can_coalesce(arena, extents,
1548 extent, next);
1549
1550 extent_unlock(tsdn, next);
1551
1552 if (can_coalesce && !extent_coalesce(tsdn, arena,
1553 r_extent_hooks, extents, extent, next, true,
1554 growing_retained)) {
1555 if (extents->delay_coalesce) {
1556 /* Do minimal coalescing. */
1557 *coalesced = true;
1558 return extent;
1559 }
1560 again = true;
1561 }
1562 }
1563
1564 /* Try to coalesce backward. */
1565 extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
1566 extent_before_get(extent));
1567 if (prev != NULL) {
1568 bool can_coalesce = extent_can_coalesce(arena, extents,
1569 extent, prev);
1570 extent_unlock(tsdn, prev);
1571
1572 if (can_coalesce && !extent_coalesce(tsdn, arena,
1573 r_extent_hooks, extents, extent, prev, false,
1574 growing_retained)) {
1575 extent = prev;
1576 if (extents->delay_coalesce) {
1577 /* Do minimal coalescing. */
1578 *coalesced = true;
1579 return extent;
1580 }
1581 again = true;
1582 }
1583 }
1584 } while (again);
1585
1586 if (extents->delay_coalesce) {
1587 *coalesced = false;
1588 }
1589 return extent;
1590 }
1591
1592 /*
1593 * Does the metadata management portions of putting an unused extent into the
1594 * given extents_t (coalesces, deregisters slab interiors, the heap operations).
1595 */
1596 static void
extent_record(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,extent_t * extent,bool growing_retained)1597 extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1598 extents_t *extents, extent_t *extent, bool growing_retained) {
1599 rtree_ctx_t rtree_ctx_fallback;
1600 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1601
1602 assert((extents_state_get(extents) != extent_state_dirty &&
1603 extents_state_get(extents) != extent_state_muzzy) ||
1604 !extent_zeroed_get(extent));
1605
1606 malloc_mutex_lock(tsdn, &extents->mtx);
1607 extent_hooks_assure_initialized(arena, r_extent_hooks);
1608
1609 extent_szind_set(extent, NSIZES);
1610 if (extent_slab_get(extent)) {
1611 extent_interior_deregister(tsdn, rtree_ctx, extent);
1612 extent_slab_set(extent, false);
1613 }
1614
1615 assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1616 (uintptr_t)extent_base_get(extent), true) == extent);
1617
1618 if (!extents->delay_coalesce) {
1619 extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
1620 rtree_ctx, extents, extent, NULL, growing_retained);
1621 } else if (extent_size_get(extent) >= LARGE_MINCLASS) {
1622 /* Always coalesce large extents eagerly. */
1623 bool coalesced;
1624 size_t prev_size;
1625 do {
1626 prev_size = extent_size_get(extent);
1627 assert(extent_state_get(extent) == extent_state_active);
1628 extent = extent_try_coalesce(tsdn, arena,
1629 r_extent_hooks, rtree_ctx, extents, extent,
1630 &coalesced, growing_retained);
1631 } while (coalesced &&
1632 extent_size_get(extent) >= prev_size + LARGE_MINCLASS);
1633 }
1634 extent_deactivate_locked(tsdn, arena, extents, extent);
1635
1636 malloc_mutex_unlock(tsdn, &extents->mtx);
1637 }
1638
1639 void
extent_dalloc_gap(tsdn_t * tsdn,arena_t * arena,extent_t * extent)1640 extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
1641 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1642
1643 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1644 WITNESS_RANK_CORE, 0);
1645
1646 if (extent_register(tsdn, extent)) {
1647 extents_leak(tsdn, arena, &extent_hooks,
1648 &arena->extents_retained, extent, false);
1649 return;
1650 }
1651 extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
1652 }
1653
1654 static bool
extent_dalloc_default_impl(void * addr,size_t size)1655 extent_dalloc_default_impl(void *addr, size_t size) {
1656 if (!have_dss || !extent_in_dss(addr)) {
1657 return extent_dalloc_mmap(addr, size);
1658 }
1659 return true;
1660 }
1661
1662 static bool
extent_dalloc_default(extent_hooks_t * extent_hooks,void * addr,size_t size,bool committed,unsigned arena_ind)1663 extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1664 bool committed, unsigned arena_ind) {
1665 return extent_dalloc_default_impl(addr, size);
1666 }
1667
1668 static bool
extent_dalloc_wrapper_try(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent)1669 extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
1670 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1671 bool err;
1672
1673 assert(extent_base_get(extent) != NULL);
1674 assert(extent_size_get(extent) != 0);
1675 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1676 WITNESS_RANK_CORE, 0);
1677
1678 extent_addr_set(extent, extent_base_get(extent));
1679
1680 extent_hooks_assure_initialized(arena, r_extent_hooks);
1681 /* Try to deallocate. */
1682 if (*r_extent_hooks == &extent_hooks_default) {
1683 /* Call directly to propagate tsdn. */
1684 err = extent_dalloc_default_impl(extent_base_get(extent),
1685 extent_size_get(extent));
1686 } else {
1687 extent_hook_pre_reentrancy(tsdn, arena);
1688 err = ((*r_extent_hooks)->dalloc == NULL ||
1689 (*r_extent_hooks)->dalloc(*r_extent_hooks,
1690 extent_base_get(extent), extent_size_get(extent),
1691 extent_committed_get(extent), arena_ind_get(arena)));
1692 extent_hook_post_reentrancy(tsdn);
1693 }
1694
1695 if (!err) {
1696 extent_dalloc(tsdn, arena, extent);
1697 }
1698
1699 return err;
1700 }
1701
1702 void
extent_dalloc_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent)1703 extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1704 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1705 assert(extent_dumpable_get(extent));
1706 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1707 WITNESS_RANK_CORE, 0);
1708
1709 /*
1710 * Deregister first to avoid a race with other allocating threads, and
1711 * reregister if deallocation fails.
1712 */
1713 extent_deregister(tsdn, extent);
1714 if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) {
1715 return;
1716 }
1717
1718 extent_reregister(tsdn, extent);
1719 if (*r_extent_hooks != &extent_hooks_default) {
1720 extent_hook_pre_reentrancy(tsdn, arena);
1721 }
1722 /* Try to decommit; purge if that fails. */
1723 bool zeroed;
1724 if (!extent_committed_get(extent)) {
1725 zeroed = true;
1726 } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
1727 0, extent_size_get(extent))) {
1728 zeroed = true;
1729 } else if ((*r_extent_hooks)->purge_forced != NULL &&
1730 !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
1731 extent_base_get(extent), extent_size_get(extent), 0,
1732 extent_size_get(extent), arena_ind_get(arena))) {
1733 zeroed = true;
1734 } else if (extent_state_get(extent) == extent_state_muzzy ||
1735 ((*r_extent_hooks)->purge_lazy != NULL &&
1736 !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1737 extent_base_get(extent), extent_size_get(extent), 0,
1738 extent_size_get(extent), arena_ind_get(arena)))) {
1739 zeroed = false;
1740 } else {
1741 zeroed = false;
1742 }
1743 if (*r_extent_hooks != &extent_hooks_default) {
1744 extent_hook_post_reentrancy(tsdn);
1745 }
1746 extent_zeroed_set(extent, zeroed);
1747
1748 if (config_prof) {
1749 extent_gdump_sub(tsdn, extent);
1750 }
1751
1752 extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
1753 extent, false);
1754 }
1755
1756 static void
extent_destroy_default_impl(void * addr,size_t size)1757 extent_destroy_default_impl(void *addr, size_t size) {
1758 if (!have_dss || !extent_in_dss(addr)) {
1759 pages_unmap(addr, size);
1760 }
1761 }
1762
1763 static void
extent_destroy_default(extent_hooks_t * extent_hooks,void * addr,size_t size,bool committed,unsigned arena_ind)1764 extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1765 bool committed, unsigned arena_ind) {
1766 extent_destroy_default_impl(addr, size);
1767 }
1768
1769 void
extent_destroy_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent)1770 extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
1771 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1772 assert(extent_base_get(extent) != NULL);
1773 assert(extent_size_get(extent) != 0);
1774 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1775 WITNESS_RANK_CORE, 0);
1776
1777 /* Deregister first to avoid a race with other allocating threads. */
1778 extent_deregister(tsdn, extent);
1779
1780 extent_addr_set(extent, extent_base_get(extent));
1781
1782 extent_hooks_assure_initialized(arena, r_extent_hooks);
1783 /* Try to destroy; silently fail otherwise. */
1784 if (*r_extent_hooks == &extent_hooks_default) {
1785 /* Call directly to propagate tsdn. */
1786 extent_destroy_default_impl(extent_base_get(extent),
1787 extent_size_get(extent));
1788 } else if ((*r_extent_hooks)->destroy != NULL) {
1789 extent_hook_pre_reentrancy(tsdn, arena);
1790 (*r_extent_hooks)->destroy(*r_extent_hooks,
1791 extent_base_get(extent), extent_size_get(extent),
1792 extent_committed_get(extent), arena_ind_get(arena));
1793 extent_hook_post_reentrancy(tsdn);
1794 }
1795
1796 extent_dalloc(tsdn, arena, extent);
1797 }
1798
1799 static bool
extent_commit_default(extent_hooks_t * extent_hooks,void * addr,size_t size,size_t offset,size_t length,unsigned arena_ind)1800 extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1801 size_t offset, size_t length, unsigned arena_ind) {
1802 return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
1803 length);
1804 }
1805
1806 static bool
extent_commit_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length,bool growing_retained)1807 extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
1808 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1809 size_t length, bool growing_retained) {
1810 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1811 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1812
1813 extent_hooks_assure_initialized(arena, r_extent_hooks);
1814 if (*r_extent_hooks != &extent_hooks_default) {
1815 extent_hook_pre_reentrancy(tsdn, arena);
1816 }
1817 bool err = ((*r_extent_hooks)->commit == NULL ||
1818 (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
1819 extent_size_get(extent), offset, length, arena_ind_get(arena)));
1820 if (*r_extent_hooks != &extent_hooks_default) {
1821 extent_hook_post_reentrancy(tsdn);
1822 }
1823 extent_committed_set(extent, extent_committed_get(extent) || !err);
1824 return err;
1825 }
1826
1827 bool
extent_commit_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length)1828 extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
1829 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1830 size_t length) {
1831 return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
1832 length, false);
1833 }
1834
1835 static bool
extent_decommit_default(extent_hooks_t * extent_hooks,void * addr,size_t size,size_t offset,size_t length,unsigned arena_ind)1836 extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1837 size_t offset, size_t length, unsigned arena_ind) {
1838 return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
1839 length);
1840 }
1841
1842 bool
extent_decommit_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length)1843 extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
1844 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1845 size_t length) {
1846 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1847 WITNESS_RANK_CORE, 0);
1848
1849 extent_hooks_assure_initialized(arena, r_extent_hooks);
1850
1851 if (*r_extent_hooks != &extent_hooks_default) {
1852 extent_hook_pre_reentrancy(tsdn, arena);
1853 }
1854 bool err = ((*r_extent_hooks)->decommit == NULL ||
1855 (*r_extent_hooks)->decommit(*r_extent_hooks,
1856 extent_base_get(extent), extent_size_get(extent), offset, length,
1857 arena_ind_get(arena)));
1858 if (*r_extent_hooks != &extent_hooks_default) {
1859 extent_hook_post_reentrancy(tsdn);
1860 }
1861 extent_committed_set(extent, extent_committed_get(extent) && err);
1862 return err;
1863 }
1864
1865 #ifdef PAGES_CAN_PURGE_LAZY
1866 static bool
extent_purge_lazy_default(extent_hooks_t * extent_hooks,void * addr,size_t size,size_t offset,size_t length,unsigned arena_ind)1867 extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1868 size_t offset, size_t length, unsigned arena_ind) {
1869 assert(addr != NULL);
1870 assert((offset & PAGE_MASK) == 0);
1871 assert(length != 0);
1872 assert((length & PAGE_MASK) == 0);
1873
1874 return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
1875 length);
1876 }
1877 #endif
1878
1879 static bool
extent_purge_lazy_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length,bool growing_retained)1880 extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
1881 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1882 size_t length, bool growing_retained) {
1883 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1884 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1885
1886 extent_hooks_assure_initialized(arena, r_extent_hooks);
1887
1888 if ((*r_extent_hooks)->purge_lazy == NULL) {
1889 return true;
1890 }
1891 if (*r_extent_hooks != &extent_hooks_default) {
1892 extent_hook_pre_reentrancy(tsdn, arena);
1893 }
1894 bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1895 extent_base_get(extent), extent_size_get(extent), offset, length,
1896 arena_ind_get(arena));
1897 if (*r_extent_hooks != &extent_hooks_default) {
1898 extent_hook_post_reentrancy(tsdn);
1899 }
1900
1901 return err;
1902 }
1903
1904 bool
extent_purge_lazy_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length)1905 extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
1906 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1907 size_t length) {
1908 return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
1909 offset, length, false);
1910 }
1911
1912 #ifdef PAGES_CAN_PURGE_FORCED
1913 static bool
extent_purge_forced_default(extent_hooks_t * extent_hooks,void * addr,size_t size,size_t offset,size_t length,unsigned arena_ind)1914 extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
1915 size_t size, size_t offset, size_t length, unsigned arena_ind) {
1916 assert(addr != NULL);
1917 assert((offset & PAGE_MASK) == 0);
1918 assert(length != 0);
1919 assert((length & PAGE_MASK) == 0);
1920
1921 return pages_purge_forced((void *)((uintptr_t)addr +
1922 (uintptr_t)offset), length);
1923 }
1924 #endif
1925
1926 static bool
extent_purge_forced_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length,bool growing_retained)1927 extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
1928 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1929 size_t length, bool growing_retained) {
1930 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1931 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1932
1933 extent_hooks_assure_initialized(arena, r_extent_hooks);
1934
1935 if ((*r_extent_hooks)->purge_forced == NULL) {
1936 return true;
1937 }
1938 if (*r_extent_hooks != &extent_hooks_default) {
1939 extent_hook_pre_reentrancy(tsdn, arena);
1940 }
1941 bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
1942 extent_base_get(extent), extent_size_get(extent), offset, length,
1943 arena_ind_get(arena));
1944 if (*r_extent_hooks != &extent_hooks_default) {
1945 extent_hook_post_reentrancy(tsdn);
1946 }
1947 return err;
1948 }
1949
1950 bool
extent_purge_forced_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length)1951 extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
1952 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1953 size_t length) {
1954 return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
1955 offset, length, false);
1956 }
1957
1958 #ifdef JEMALLOC_MAPS_COALESCE
1959 static bool
extent_split_default(extent_hooks_t * extent_hooks,void * addr,size_t size,size_t size_a,size_t size_b,bool committed,unsigned arena_ind)1960 extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1961 size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
1962 return !maps_coalesce;
1963 }
1964 #endif
1965
1966 /*
1967 * Accepts the extent to split, and the characteristics of each side of the
1968 * split. The 'a' parameters go with the 'lead' of the resulting pair of
1969 * extents (the lower addressed portion of the split), and the 'b' parameters go
1970 * with the trail (the higher addressed portion). This makes 'extent' the lead,
1971 * and returns the trail (except in case of error).
1972 */
1973 static extent_t *
extent_split_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t size_a,szind_t szind_a,bool slab_a,size_t size_b,szind_t szind_b,bool slab_b,bool growing_retained)1974 extent_split_impl(tsdn_t *tsdn, arena_t *arena,
1975 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
1976 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
1977 bool growing_retained) {
1978 assert(extent_size_get(extent) == size_a + size_b);
1979 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1980 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1981
1982 extent_hooks_assure_initialized(arena, r_extent_hooks);
1983
1984 if ((*r_extent_hooks)->split == NULL) {
1985 return NULL;
1986 }
1987
1988 extent_t *trail = extent_alloc(tsdn, arena);
1989 if (trail == NULL) {
1990 goto label_error_a;
1991 }
1992
1993 extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
1994 size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
1995 extent_state_get(extent), extent_zeroed_get(extent),
1996 extent_committed_get(extent), extent_dumpable_get(extent));
1997
1998 rtree_ctx_t rtree_ctx_fallback;
1999 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2000 rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
2001 {
2002 extent_t lead;
2003
2004 extent_init(&lead, arena, extent_addr_get(extent), size_a,
2005 slab_a, szind_a, extent_sn_get(extent),
2006 extent_state_get(extent), extent_zeroed_get(extent),
2007 extent_committed_get(extent), extent_dumpable_get(extent));
2008
2009 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
2010 true, &lead_elm_a, &lead_elm_b);
2011 }
2012 rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
2013 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
2014 &trail_elm_a, &trail_elm_b);
2015
2016 if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
2017 || trail_elm_b == NULL) {
2018 goto label_error_b;
2019 }
2020
2021 extent_lock2(tsdn, extent, trail);
2022
2023 if (*r_extent_hooks != &extent_hooks_default) {
2024 extent_hook_pre_reentrancy(tsdn, arena);
2025 }
2026 bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
2027 size_a + size_b, size_a, size_b, extent_committed_get(extent),
2028 arena_ind_get(arena));
2029 if (*r_extent_hooks != &extent_hooks_default) {
2030 extent_hook_post_reentrancy(tsdn);
2031 }
2032 if (err) {
2033 goto label_error_c;
2034 }
2035
2036 extent_size_set(extent, size_a);
2037 extent_szind_set(extent, szind_a);
2038
2039 extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
2040 szind_a, slab_a);
2041 extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
2042 szind_b, slab_b);
2043
2044 extent_unlock2(tsdn, extent, trail);
2045
2046 return trail;
2047 label_error_c:
2048 extent_unlock2(tsdn, extent, trail);
2049 label_error_b:
2050 extent_dalloc(tsdn, arena, trail);
2051 label_error_a:
2052 return NULL;
2053 }
2054
2055 extent_t *
extent_split_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t size_a,szind_t szind_a,bool slab_a,size_t size_b,szind_t szind_b,bool slab_b)2056 extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
2057 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
2058 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
2059 return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
2060 szind_a, slab_a, size_b, szind_b, slab_b, false);
2061 }
2062
2063 static bool
extent_merge_default_impl(void * addr_a,void * addr_b)2064 extent_merge_default_impl(void *addr_a, void *addr_b) {
2065 if (!maps_coalesce) {
2066 return true;
2067 }
2068 if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
2069 return true;
2070 }
2071
2072 return false;
2073 }
2074
2075 #ifdef JEMALLOC_MAPS_COALESCE
2076 static bool
extent_merge_default(extent_hooks_t * extent_hooks,void * addr_a,size_t size_a,void * addr_b,size_t size_b,bool committed,unsigned arena_ind)2077 extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
2078 void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
2079 return extent_merge_default_impl(addr_a, addr_b);
2080 }
2081 #endif
2082
2083 static bool
extent_merge_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * a,extent_t * b,bool growing_retained)2084 extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
2085 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
2086 bool growing_retained) {
2087 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2088 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2089
2090 extent_hooks_assure_initialized(arena, r_extent_hooks);
2091
2092 if ((*r_extent_hooks)->merge == NULL) {
2093 return true;
2094 }
2095
2096 bool err;
2097 if (*r_extent_hooks == &extent_hooks_default) {
2098 /* Call directly to propagate tsdn. */
2099 err = extent_merge_default_impl(extent_base_get(a),
2100 extent_base_get(b));
2101 } else {
2102 extent_hook_pre_reentrancy(tsdn, arena);
2103 err = (*r_extent_hooks)->merge(*r_extent_hooks,
2104 extent_base_get(a), extent_size_get(a), extent_base_get(b),
2105 extent_size_get(b), extent_committed_get(a),
2106 arena_ind_get(arena));
2107 extent_hook_post_reentrancy(tsdn);
2108 }
2109
2110 if (err) {
2111 return true;
2112 }
2113
2114 /*
2115 * The rtree writes must happen while all the relevant elements are
2116 * owned, so the following code uses decomposed helper functions rather
2117 * than extent_{,de}register() to do things in the right order.
2118 */
2119 rtree_ctx_t rtree_ctx_fallback;
2120 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2121 rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
2122 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
2123 &a_elm_b);
2124 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
2125 &b_elm_b);
2126
2127 extent_lock2(tsdn, a, b);
2128
2129 if (a_elm_b != NULL) {
2130 rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
2131 NSIZES, false);
2132 }
2133 if (b_elm_b != NULL) {
2134 rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
2135 NSIZES, false);
2136 } else {
2137 b_elm_b = b_elm_a;
2138 }
2139
2140 extent_size_set(a, extent_size_get(a) + extent_size_get(b));
2141 extent_szind_set(a, NSIZES);
2142 extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
2143 extent_sn_get(a) : extent_sn_get(b));
2144 extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
2145
2146 extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false);
2147
2148 extent_unlock2(tsdn, a, b);
2149
2150 extent_dalloc(tsdn, extent_arena_get(b), b);
2151
2152 return false;
2153 }
2154
2155 bool
extent_merge_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * a,extent_t * b)2156 extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
2157 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
2158 return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
2159 }
2160
2161 bool
extent_boot(void)2162 extent_boot(void) {
2163 if (rtree_new(&extents_rtree, true)) {
2164 return true;
2165 }
2166
2167 if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
2168 WITNESS_RANK_EXTENT_POOL)) {
2169 return true;
2170 }
2171
2172 if (have_dss) {
2173 extent_dss_boot();
2174 }
2175
2176 return false;
2177 }
2178