1 #ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
2 #define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
3 
4 #include "jemalloc/internal/jemalloc_internal_types.h"
5 #include "jemalloc/internal/mutex.h"
6 #include "jemalloc/internal/rtree.h"
7 #include "jemalloc/internal/sc.h"
8 #include "jemalloc/internal/sz.h"
9 #include "jemalloc/internal/ticker.h"
10 
11 JEMALLOC_ALWAYS_INLINE bool
12 arena_has_default_hooks(arena_t *arena) {
13 	return (extent_hooks_get(arena) == &extent_hooks_default);
14 }
15 
16 JEMALLOC_ALWAYS_INLINE arena_t *
17 arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) {
18 	if (arena != NULL) {
19 		return arena;
20 	}
21 
22 	/*
23 	 * For huge allocations, use the dedicated huge arena if both are true:
24 	 * 1) is using auto arena selection (i.e. arena == NULL), and 2) the
25 	 * thread is not assigned to a manual arena.
26 	 */
27 	if (unlikely(size >= oversize_threshold)) {
28 		arena_t *tsd_arena = tsd_arena_get(tsd);
29 		if (tsd_arena == NULL || arena_is_auto(tsd_arena)) {
30 			return arena_choose_huge(tsd);
31 		}
32 	}
33 
34 	return arena_choose(tsd, NULL);
35 }
36 
37 JEMALLOC_ALWAYS_INLINE prof_tctx_t *
38 arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
39 	cassert(config_prof);
40 	assert(ptr != NULL);
41 
42 	/* Static check. */
43 	if (alloc_ctx == NULL) {
44 		const extent_t *extent = iealloc(tsdn, ptr);
45 		if (unlikely(!extent_slab_get(extent))) {
46 			return large_prof_tctx_get(tsdn, extent);
47 		}
48 	} else {
49 		if (unlikely(!alloc_ctx->slab)) {
50 			return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr));
51 		}
52 	}
53 	return (prof_tctx_t *)(uintptr_t)1U;
54 }
55 
56 JEMALLOC_ALWAYS_INLINE void
57 arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
58     alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
59 	cassert(config_prof);
60 	assert(ptr != NULL);
61 
62 	/* Static check. */
63 	if (alloc_ctx == NULL) {
64 		extent_t *extent = iealloc(tsdn, ptr);
65 		if (unlikely(!extent_slab_get(extent))) {
66 			large_prof_tctx_set(tsdn, extent, tctx);
67 		}
68 	} else {
69 		if (unlikely(!alloc_ctx->slab)) {
70 			large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx);
71 		}
72 	}
73 }
74 
75 static inline void
76 arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
77 	cassert(config_prof);
78 	assert(ptr != NULL);
79 
80 	extent_t *extent = iealloc(tsdn, ptr);
81 	assert(!extent_slab_get(extent));
82 
83 	large_prof_tctx_reset(tsdn, extent);
84 }
85 
86 JEMALLOC_ALWAYS_INLINE nstime_t
87 arena_prof_alloc_time_get(tsdn_t *tsdn, const void *ptr,
88     alloc_ctx_t *alloc_ctx) {
89 	cassert(config_prof);
90 	assert(ptr != NULL);
91 
92 	extent_t *extent = iealloc(tsdn, ptr);
93 	/*
94 	 * Unlike arena_prof_prof_tctx_{get, set}, we only call this once we're
95 	 * sure we have a sampled allocation.
96 	 */
97 	assert(!extent_slab_get(extent));
98 	return large_prof_alloc_time_get(extent);
99 }
100 
101 JEMALLOC_ALWAYS_INLINE void
102 arena_prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx,
103     nstime_t t) {
104 	cassert(config_prof);
105 	assert(ptr != NULL);
106 
107 	extent_t *extent = iealloc(tsdn, ptr);
108 	assert(!extent_slab_get(extent));
109 	large_prof_alloc_time_set(extent, t);
110 }
111 
112 JEMALLOC_ALWAYS_INLINE void
113 arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
114 	tsd_t *tsd;
115 	ticker_t *decay_ticker;
116 
117 	if (unlikely(tsdn_null(tsdn))) {
118 		return;
119 	}
120 	tsd = tsdn_tsd(tsdn);
121 	decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
122 	if (unlikely(decay_ticker == NULL)) {
123 		return;
124 	}
125 	if (unlikely(ticker_ticks(decay_ticker, nticks))) {
126 		arena_decay(tsdn, arena, false, false);
127 	}
128 }
129 
130 JEMALLOC_ALWAYS_INLINE void
131 arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
132 	malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx);
133 	malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx);
134 
135 	arena_decay_ticks(tsdn, arena, 1);
136 }
137 
138 /* Purge a single extent to retained / unmapped directly. */
139 JEMALLOC_ALWAYS_INLINE void
140 arena_decay_extent(tsdn_t *tsdn,arena_t *arena, extent_hooks_t **r_extent_hooks,
141     extent_t *extent) {
142 	size_t extent_size = extent_size_get(extent);
143 	extent_dalloc_wrapper(tsdn, arena,
144 	    r_extent_hooks, extent);
145 	if (config_stats) {
146 		/* Update stats accordingly. */
147 		arena_stats_lock(tsdn, &arena->stats);
148 		arena_stats_add_u64(tsdn, &arena->stats,
149 		    &arena->decay_dirty.stats->nmadvise, 1);
150 		arena_stats_add_u64(tsdn, &arena->stats,
151 		    &arena->decay_dirty.stats->purged, extent_size >> LG_PAGE);
152 		arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
153 		    extent_size);
154 		arena_stats_unlock(tsdn, &arena->stats);
155 	}
156 }
157 
158 JEMALLOC_ALWAYS_INLINE void *
159 arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
160     tcache_t *tcache, bool slow_path) {
161 	assert(!tsdn_null(tsdn) || tcache == NULL);
162 
163 	if (likely(tcache != NULL)) {
164 		if (likely(size <= SC_SMALL_MAXCLASS)) {
165 			return tcache_alloc_small(tsdn_tsd(tsdn), arena,
166 			    tcache, size, ind, zero, slow_path);
167 		}
168 		if (likely(size <= tcache_maxclass)) {
169 			return tcache_alloc_large(tsdn_tsd(tsdn), arena,
170 			    tcache, size, ind, zero, slow_path);
171 		}
172 		/* (size > tcache_maxclass) case falls through. */
173 		assert(size > tcache_maxclass);
174 	}
175 
176 	return arena_malloc_hard(tsdn, arena, size, ind, zero);
177 }
178 
179 JEMALLOC_ALWAYS_INLINE arena_t *
180 arena_aalloc(tsdn_t *tsdn, const void *ptr) {
181 	return extent_arena_get(iealloc(tsdn, ptr));
182 }
183 
184 JEMALLOC_ALWAYS_INLINE size_t
185 arena_salloc(tsdn_t *tsdn, const void *ptr) {
186 	assert(ptr != NULL);
187 
188 	rtree_ctx_t rtree_ctx_fallback;
189 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
190 
191 	szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx,
192 	    (uintptr_t)ptr, true);
193 	assert(szind != SC_NSIZES);
194 
195 	return sz_index2size(szind);
196 }
197 
198 JEMALLOC_ALWAYS_INLINE size_t
199 arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
200 	/*
201 	 * Return 0 if ptr is not within an extent managed by jemalloc.  This
202 	 * function has two extra costs relative to isalloc():
203 	 * - The rtree calls cannot claim to be dependent lookups, which induces
204 	 *   rtree lookup load dependencies.
205 	 * - The lookup may fail, so there is an extra branch to check for
206 	 *   failure.
207 	 */
208 
209 	rtree_ctx_t rtree_ctx_fallback;
210 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
211 
212 	extent_t *extent;
213 	szind_t szind;
214 	if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
215 	    (uintptr_t)ptr, false, &extent, &szind)) {
216 		return 0;
217 	}
218 
219 	if (extent == NULL) {
220 		return 0;
221 	}
222 	assert(extent_state_get(extent) == extent_state_active);
223 	/* Only slab members should be looked up via interior pointers. */
224 	assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
225 
226 	assert(szind != SC_NSIZES);
227 
228 	return sz_index2size(szind);
229 }
230 
231 static inline void
232 arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
233 	if (config_prof && unlikely(szind < SC_NBINS)) {
234 		arena_dalloc_promoted(tsdn, ptr, NULL, true);
235 	} else {
236 		extent_t *extent = iealloc(tsdn, ptr);
237 		large_dalloc(tsdn, extent);
238 	}
239 }
240 
241 static inline void
242 arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
243 	assert(ptr != NULL);
244 
245 	rtree_ctx_t rtree_ctx_fallback;
246 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
247 
248 	szind_t szind;
249 	bool slab;
250 	rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
251 	    true, &szind, &slab);
252 
253 	if (config_debug) {
254 		extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
255 		    rtree_ctx, (uintptr_t)ptr, true);
256 		assert(szind == extent_szind_get(extent));
257 		assert(szind < SC_NSIZES);
258 		assert(slab == extent_slab_get(extent));
259 	}
260 
261 	if (likely(slab)) {
262 		/* Small allocation. */
263 		arena_dalloc_small(tsdn, ptr);
264 	} else {
265 		arena_dalloc_large_no_tcache(tsdn, ptr, szind);
266 	}
267 }
268 
269 JEMALLOC_ALWAYS_INLINE void
270 arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
271     bool slow_path) {
272 	if (szind < nhbins) {
273 		if (config_prof && unlikely(szind < SC_NBINS)) {
274 			arena_dalloc_promoted(tsdn, ptr, tcache, slow_path);
275 		} else {
276 			tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind,
277 			    slow_path);
278 		}
279 	} else {
280 		extent_t *extent = iealloc(tsdn, ptr);
281 		large_dalloc(tsdn, extent);
282 	}
283 }
284 
285 JEMALLOC_ALWAYS_INLINE void
286 arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
287     alloc_ctx_t *alloc_ctx, bool slow_path) {
288 	assert(!tsdn_null(tsdn) || tcache == NULL);
289 	assert(ptr != NULL);
290 
291 	if (unlikely(tcache == NULL)) {
292 		arena_dalloc_no_tcache(tsdn, ptr);
293 		return;
294 	}
295 
296 	szind_t szind;
297 	bool slab;
298 	rtree_ctx_t *rtree_ctx;
299 	if (alloc_ctx != NULL) {
300 		szind = alloc_ctx->szind;
301 		slab = alloc_ctx->slab;
302 		assert(szind != SC_NSIZES);
303 	} else {
304 		rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
305 		rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
306 		    (uintptr_t)ptr, true, &szind, &slab);
307 	}
308 
309 	if (config_debug) {
310 		rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
311 		extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
312 		    rtree_ctx, (uintptr_t)ptr, true);
313 		assert(szind == extent_szind_get(extent));
314 		assert(szind < SC_NSIZES);
315 		assert(slab == extent_slab_get(extent));
316 	}
317 
318 	if (likely(slab)) {
319 		/* Small allocation. */
320 		tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
321 		    slow_path);
322 	} else {
323 		arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path);
324 	}
325 }
326 
327 static inline void
328 arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
329 	assert(ptr != NULL);
330 	assert(size <= SC_LARGE_MAXCLASS);
331 
332 	szind_t szind;
333 	bool slab;
334 	if (!config_prof || !opt_prof) {
335 		/*
336 		 * There is no risk of being confused by a promoted sampled
337 		 * object, so base szind and slab on the given size.
338 		 */
339 		szind = sz_size2index(size);
340 		slab = (szind < SC_NBINS);
341 	}
342 
343 	if ((config_prof && opt_prof) || config_debug) {
344 		rtree_ctx_t rtree_ctx_fallback;
345 		rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
346 		    &rtree_ctx_fallback);
347 
348 		rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
349 		    (uintptr_t)ptr, true, &szind, &slab);
350 
351 		assert(szind == sz_size2index(size));
352 		assert((config_prof && opt_prof) || slab == (szind < SC_NBINS));
353 
354 		if (config_debug) {
355 			extent_t *extent = rtree_extent_read(tsdn,
356 			    &extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
357 			assert(szind == extent_szind_get(extent));
358 			assert(slab == extent_slab_get(extent));
359 		}
360 	}
361 
362 	if (likely(slab)) {
363 		/* Small allocation. */
364 		arena_dalloc_small(tsdn, ptr);
365 	} else {
366 		arena_dalloc_large_no_tcache(tsdn, ptr, szind);
367 	}
368 }
369 
370 JEMALLOC_ALWAYS_INLINE void
371 arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
372     alloc_ctx_t *alloc_ctx, bool slow_path) {
373 	assert(!tsdn_null(tsdn) || tcache == NULL);
374 	assert(ptr != NULL);
375 	assert(size <= SC_LARGE_MAXCLASS);
376 
377 	if (unlikely(tcache == NULL)) {
378 		arena_sdalloc_no_tcache(tsdn, ptr, size);
379 		return;
380 	}
381 
382 	szind_t szind;
383 	bool slab;
384 	alloc_ctx_t local_ctx;
385 	if (config_prof && opt_prof) {
386 		if (alloc_ctx == NULL) {
387 			/* Uncommon case and should be a static check. */
388 			rtree_ctx_t rtree_ctx_fallback;
389 			rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
390 			    &rtree_ctx_fallback);
391 			rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
392 			    (uintptr_t)ptr, true, &local_ctx.szind,
393 			    &local_ctx.slab);
394 			assert(local_ctx.szind == sz_size2index(size));
395 			alloc_ctx = &local_ctx;
396 		}
397 		slab = alloc_ctx->slab;
398 		szind = alloc_ctx->szind;
399 	} else {
400 		/*
401 		 * There is no risk of being confused by a promoted sampled
402 		 * object, so base szind and slab on the given size.
403 		 */
404 		szind = sz_size2index(size);
405 		slab = (szind < SC_NBINS);
406 	}
407 
408 	if (config_debug) {
409 		rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
410 		rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
411 		    (uintptr_t)ptr, true, &szind, &slab);
412 		extent_t *extent = rtree_extent_read(tsdn,
413 		    &extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
414 		assert(szind == extent_szind_get(extent));
415 		assert(slab == extent_slab_get(extent));
416 	}
417 
418 	if (likely(slab)) {
419 		/* Small allocation. */
420 		tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
421 		    slow_path);
422 	} else {
423 		arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path);
424 	}
425 }
426 
427 #endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
428