1 #ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
2 #define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
3
4 #include "jemalloc/internal/jemalloc_internal_types.h"
5 #include "jemalloc/internal/mutex.h"
6 #include "jemalloc/internal/rtree.h"
7 #include "jemalloc/internal/sc.h"
8 #include "jemalloc/internal/sz.h"
9 #include "jemalloc/internal/ticker.h"
10
11 JEMALLOC_ALWAYS_INLINE bool
arena_has_default_hooks(arena_t * arena)12 arena_has_default_hooks(arena_t *arena) {
13 return (extent_hooks_get(arena) == &extent_hooks_default);
14 }
15
16 JEMALLOC_ALWAYS_INLINE arena_t *
arena_choose_maybe_huge(tsd_t * tsd,arena_t * arena,size_t size)17 arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) {
18 if (arena != NULL) {
19 return arena;
20 }
21
22 /*
23 * For huge allocations, use the dedicated huge arena if both are true:
24 * 1) is using auto arena selection (i.e. arena == NULL), and 2) the
25 * thread is not assigned to a manual arena.
26 */
27 if (unlikely(size >= oversize_threshold)) {
28 arena_t *tsd_arena = tsd_arena_get(tsd);
29 if (tsd_arena == NULL || arena_is_auto(tsd_arena)) {
30 return arena_choose_huge(tsd);
31 }
32 }
33
34 return arena_choose(tsd, NULL);
35 }
36
37 JEMALLOC_ALWAYS_INLINE prof_tctx_t *
arena_prof_tctx_get(tsdn_t * tsdn,const void * ptr,alloc_ctx_t * alloc_ctx)38 arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
39 cassert(config_prof);
40 assert(ptr != NULL);
41
42 /* Static check. */
43 if (alloc_ctx == NULL) {
44 const extent_t *extent = iealloc(tsdn, ptr);
45 if (unlikely(!extent_slab_get(extent))) {
46 return large_prof_tctx_get(tsdn, extent);
47 }
48 } else {
49 if (unlikely(!alloc_ctx->slab)) {
50 return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr));
51 }
52 }
53 return (prof_tctx_t *)(uintptr_t)1U;
54 }
55
56 JEMALLOC_ALWAYS_INLINE void
arena_prof_tctx_set(tsdn_t * tsdn,const void * ptr,size_t usize,alloc_ctx_t * alloc_ctx,prof_tctx_t * tctx)57 arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
58 alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
59 cassert(config_prof);
60 assert(ptr != NULL);
61
62 /* Static check. */
63 if (alloc_ctx == NULL) {
64 extent_t *extent = iealloc(tsdn, ptr);
65 if (unlikely(!extent_slab_get(extent))) {
66 large_prof_tctx_set(tsdn, extent, tctx);
67 }
68 } else {
69 if (unlikely(!alloc_ctx->slab)) {
70 large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx);
71 }
72 }
73 }
74
75 static inline void
arena_prof_tctx_reset(tsdn_t * tsdn,const void * ptr,prof_tctx_t * tctx)76 arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
77 cassert(config_prof);
78 assert(ptr != NULL);
79
80 extent_t *extent = iealloc(tsdn, ptr);
81 assert(!extent_slab_get(extent));
82
83 large_prof_tctx_reset(tsdn, extent);
84 }
85
86 JEMALLOC_ALWAYS_INLINE nstime_t
arena_prof_alloc_time_get(tsdn_t * tsdn,const void * ptr,alloc_ctx_t * alloc_ctx)87 arena_prof_alloc_time_get(tsdn_t *tsdn, const void *ptr,
88 alloc_ctx_t *alloc_ctx) {
89 cassert(config_prof);
90 assert(ptr != NULL);
91
92 extent_t *extent = iealloc(tsdn, ptr);
93 /*
94 * Unlike arena_prof_prof_tctx_{get, set}, we only call this once we're
95 * sure we have a sampled allocation.
96 */
97 assert(!extent_slab_get(extent));
98 return large_prof_alloc_time_get(extent);
99 }
100
101 JEMALLOC_ALWAYS_INLINE void
arena_prof_alloc_time_set(tsdn_t * tsdn,const void * ptr,alloc_ctx_t * alloc_ctx,nstime_t t)102 arena_prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx,
103 nstime_t t) {
104 cassert(config_prof);
105 assert(ptr != NULL);
106
107 extent_t *extent = iealloc(tsdn, ptr);
108 assert(!extent_slab_get(extent));
109 large_prof_alloc_time_set(extent, t);
110 }
111
112 JEMALLOC_ALWAYS_INLINE void
arena_decay_ticks(tsdn_t * tsdn,arena_t * arena,unsigned nticks)113 arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
114 tsd_t *tsd;
115 ticker_t *decay_ticker;
116
117 if (unlikely(tsdn_null(tsdn))) {
118 return;
119 }
120 tsd = tsdn_tsd(tsdn);
121 decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
122 if (unlikely(decay_ticker == NULL)) {
123 return;
124 }
125 if (unlikely(ticker_ticks(decay_ticker, nticks))) {
126 arena_decay(tsdn, arena, false, false);
127 }
128 }
129
130 JEMALLOC_ALWAYS_INLINE void
arena_decay_tick(tsdn_t * tsdn,arena_t * arena)131 arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
132 malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx);
133 malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx);
134
135 arena_decay_ticks(tsdn, arena, 1);
136 }
137
138 /* Purge a single extent to retained / unmapped directly. */
139 JEMALLOC_ALWAYS_INLINE void
arena_decay_extent(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent)140 arena_decay_extent(tsdn_t *tsdn,arena_t *arena, extent_hooks_t **r_extent_hooks,
141 extent_t *extent) {
142 size_t extent_size = extent_size_get(extent);
143 extent_dalloc_wrapper(tsdn, arena,
144 r_extent_hooks, extent);
145 if (config_stats) {
146 /* Update stats accordingly. */
147 arena_stats_lock(tsdn, &arena->stats);
148 arena_stats_add_u64(tsdn, &arena->stats,
149 &arena->decay_dirty.stats->nmadvise, 1);
150 arena_stats_add_u64(tsdn, &arena->stats,
151 &arena->decay_dirty.stats->purged, extent_size >> LG_PAGE);
152 arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
153 extent_size);
154 arena_stats_unlock(tsdn, &arena->stats);
155 }
156 }
157
158 JEMALLOC_ALWAYS_INLINE void *
arena_malloc(tsdn_t * tsdn,arena_t * arena,size_t size,szind_t ind,bool zero,tcache_t * tcache,bool slow_path)159 arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
160 tcache_t *tcache, bool slow_path) {
161 assert(!tsdn_null(tsdn) || tcache == NULL);
162
163 if (likely(tcache != NULL)) {
164 if (likely(size <= SC_SMALL_MAXCLASS)) {
165 return tcache_alloc_small(tsdn_tsd(tsdn), arena,
166 tcache, size, ind, zero, slow_path);
167 }
168 if (likely(size <= tcache_maxclass)) {
169 return tcache_alloc_large(tsdn_tsd(tsdn), arena,
170 tcache, size, ind, zero, slow_path);
171 }
172 /* (size > tcache_maxclass) case falls through. */
173 assert(size > tcache_maxclass);
174 }
175
176 return arena_malloc_hard(tsdn, arena, size, ind, zero);
177 }
178
179 JEMALLOC_ALWAYS_INLINE arena_t *
arena_aalloc(tsdn_t * tsdn,const void * ptr)180 arena_aalloc(tsdn_t *tsdn, const void *ptr) {
181 return extent_arena_get(iealloc(tsdn, ptr));
182 }
183
184 JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(tsdn_t * tsdn,const void * ptr)185 arena_salloc(tsdn_t *tsdn, const void *ptr) {
186 assert(ptr != NULL);
187
188 rtree_ctx_t rtree_ctx_fallback;
189 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
190
191 szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx,
192 (uintptr_t)ptr, true);
193 assert(szind != SC_NSIZES);
194
195 return sz_index2size(szind);
196 }
197
198 JEMALLOC_ALWAYS_INLINE size_t
arena_vsalloc(tsdn_t * tsdn,const void * ptr)199 arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
200 /*
201 * Return 0 if ptr is not within an extent managed by jemalloc. This
202 * function has two extra costs relative to isalloc():
203 * - The rtree calls cannot claim to be dependent lookups, which induces
204 * rtree lookup load dependencies.
205 * - The lookup may fail, so there is an extra branch to check for
206 * failure.
207 */
208
209 rtree_ctx_t rtree_ctx_fallback;
210 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
211
212 extent_t *extent;
213 szind_t szind;
214 if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
215 (uintptr_t)ptr, false, &extent, &szind)) {
216 return 0;
217 }
218
219 if (extent == NULL) {
220 return 0;
221 }
222 assert(extent_state_get(extent) == extent_state_active);
223 /* Only slab members should be looked up via interior pointers. */
224 assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
225
226 assert(szind != SC_NSIZES);
227
228 return sz_index2size(szind);
229 }
230
231 static inline void
arena_dalloc_no_tcache(tsdn_t * tsdn,void * ptr)232 arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
233 assert(ptr != NULL);
234
235 rtree_ctx_t rtree_ctx_fallback;
236 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
237
238 szind_t szind;
239 bool slab;
240 rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
241 true, &szind, &slab);
242
243 if (config_debug) {
244 extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
245 rtree_ctx, (uintptr_t)ptr, true);
246 assert(szind == extent_szind_get(extent));
247 assert(szind < SC_NSIZES);
248 assert(slab == extent_slab_get(extent));
249 }
250
251 if (likely(slab)) {
252 /* Small allocation. */
253 arena_dalloc_small(tsdn, ptr);
254 } else {
255 extent_t *extent = iealloc(tsdn, ptr);
256 large_dalloc(tsdn, extent);
257 }
258 }
259
260 JEMALLOC_ALWAYS_INLINE void
arena_dalloc(tsdn_t * tsdn,void * ptr,tcache_t * tcache,alloc_ctx_t * alloc_ctx,bool slow_path)261 arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
262 alloc_ctx_t *alloc_ctx, bool slow_path) {
263 assert(!tsdn_null(tsdn) || tcache == NULL);
264 assert(ptr != NULL);
265
266 if (unlikely(tcache == NULL)) {
267 arena_dalloc_no_tcache(tsdn, ptr);
268 return;
269 }
270
271 szind_t szind;
272 bool slab;
273 rtree_ctx_t *rtree_ctx;
274 if (alloc_ctx != NULL) {
275 szind = alloc_ctx->szind;
276 slab = alloc_ctx->slab;
277 assert(szind != SC_NSIZES);
278 } else {
279 rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
280 rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
281 (uintptr_t)ptr, true, &szind, &slab);
282 }
283
284 if (config_debug) {
285 rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
286 extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
287 rtree_ctx, (uintptr_t)ptr, true);
288 assert(szind == extent_szind_get(extent));
289 assert(szind < SC_NSIZES);
290 assert(slab == extent_slab_get(extent));
291 }
292
293 if (likely(slab)) {
294 /* Small allocation. */
295 tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
296 slow_path);
297 } else {
298 if (szind < nhbins) {
299 if (config_prof && unlikely(szind < SC_NBINS)) {
300 arena_dalloc_promoted(tsdn, ptr, tcache,
301 slow_path);
302 } else {
303 tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
304 szind, slow_path);
305 }
306 } else {
307 extent_t *extent = iealloc(tsdn, ptr);
308 large_dalloc(tsdn, extent);
309 }
310 }
311 }
312
313 static inline void
arena_sdalloc_no_tcache(tsdn_t * tsdn,void * ptr,size_t size)314 arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
315 assert(ptr != NULL);
316 assert(size <= SC_LARGE_MAXCLASS);
317
318 szind_t szind;
319 bool slab;
320 if (!config_prof || !opt_prof) {
321 /*
322 * There is no risk of being confused by a promoted sampled
323 * object, so base szind and slab on the given size.
324 */
325 szind = sz_size2index(size);
326 slab = (szind < SC_NBINS);
327 }
328
329 if ((config_prof && opt_prof) || config_debug) {
330 rtree_ctx_t rtree_ctx_fallback;
331 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
332 &rtree_ctx_fallback);
333
334 rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
335 (uintptr_t)ptr, true, &szind, &slab);
336
337 assert(szind == sz_size2index(size));
338 assert((config_prof && opt_prof) || slab == (szind < SC_NBINS));
339
340 if (config_debug) {
341 extent_t *extent = rtree_extent_read(tsdn,
342 &extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
343 assert(szind == extent_szind_get(extent));
344 assert(slab == extent_slab_get(extent));
345 }
346 }
347
348 if (likely(slab)) {
349 /* Small allocation. */
350 arena_dalloc_small(tsdn, ptr);
351 } else {
352 extent_t *extent = iealloc(tsdn, ptr);
353 large_dalloc(tsdn, extent);
354 }
355 }
356
357 JEMALLOC_ALWAYS_INLINE void
arena_sdalloc(tsdn_t * tsdn,void * ptr,size_t size,tcache_t * tcache,alloc_ctx_t * alloc_ctx,bool slow_path)358 arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
359 alloc_ctx_t *alloc_ctx, bool slow_path) {
360 assert(!tsdn_null(tsdn) || tcache == NULL);
361 assert(ptr != NULL);
362 assert(size <= SC_LARGE_MAXCLASS);
363
364 if (unlikely(tcache == NULL)) {
365 arena_sdalloc_no_tcache(tsdn, ptr, size);
366 return;
367 }
368
369 szind_t szind;
370 bool slab;
371 alloc_ctx_t local_ctx;
372 if (config_prof && opt_prof) {
373 if (alloc_ctx == NULL) {
374 /* Uncommon case and should be a static check. */
375 rtree_ctx_t rtree_ctx_fallback;
376 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
377 &rtree_ctx_fallback);
378 rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
379 (uintptr_t)ptr, true, &local_ctx.szind,
380 &local_ctx.slab);
381 assert(local_ctx.szind == sz_size2index(size));
382 alloc_ctx = &local_ctx;
383 }
384 slab = alloc_ctx->slab;
385 szind = alloc_ctx->szind;
386 } else {
387 /*
388 * There is no risk of being confused by a promoted sampled
389 * object, so base szind and slab on the given size.
390 */
391 szind = sz_size2index(size);
392 slab = (szind < SC_NBINS);
393 }
394
395 if (config_debug) {
396 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
397 rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
398 (uintptr_t)ptr, true, &szind, &slab);
399 extent_t *extent = rtree_extent_read(tsdn,
400 &extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
401 assert(szind == extent_szind_get(extent));
402 assert(slab == extent_slab_get(extent));
403 }
404
405 if (likely(slab)) {
406 /* Small allocation. */
407 tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
408 slow_path);
409 } else {
410 if (szind < nhbins) {
411 if (config_prof && unlikely(szind < SC_NBINS)) {
412 arena_dalloc_promoted(tsdn, ptr, tcache,
413 slow_path);
414 } else {
415 tcache_dalloc_large(tsdn_tsd(tsdn),
416 tcache, ptr, szind, slow_path);
417 }
418 } else {
419 extent_t *extent = iealloc(tsdn, ptr);
420 large_dalloc(tsdn, extent);
421 }
422 }
423 }
424
425 #endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
426