xref: /freebsd/contrib/jemalloc/src/tcache.c (revision a0ee8cc6)
1 #define	JEMALLOC_TCACHE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 /* Data. */
6 
7 bool	opt_tcache = true;
8 ssize_t	opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
9 
10 tcache_bin_info_t	*tcache_bin_info;
11 static unsigned		stack_nelms; /* Total stack elms per tcache. */
12 
13 size_t			nhbins;
14 size_t			tcache_maxclass;
15 
16 tcaches_t		*tcaches;
17 
18 /* Index of first element within tcaches that has never been used. */
19 static unsigned		tcaches_past;
20 
21 /* Head of singly linked list tracking available tcaches elements. */
22 static tcaches_t	*tcaches_avail;
23 
24 /******************************************************************************/
25 
26 size_t	tcache_salloc(const void *ptr)
27 {
28 
29 	return (arena_salloc(ptr, false));
30 }
31 
32 void
33 tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
34 {
35 	szind_t binind = tcache->next_gc_bin;
36 	tcache_bin_t *tbin = &tcache->tbins[binind];
37 	tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
38 
39 	if (tbin->low_water > 0) {
40 		/*
41 		 * Flush (ceiling) 3/4 of the objects below the low water mark.
42 		 */
43 		if (binind < NBINS) {
44 			tcache_bin_flush_small(tsd, tcache, tbin, binind,
45 			    tbin->ncached - tbin->low_water + (tbin->low_water
46 			    >> 2));
47 		} else {
48 			tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
49 			    - tbin->low_water + (tbin->low_water >> 2), tcache);
50 		}
51 		/*
52 		 * Reduce fill count by 2X.  Limit lg_fill_div such that the
53 		 * fill count is always at least 1.
54 		 */
55 		if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
56 			tbin->lg_fill_div++;
57 	} else if (tbin->low_water < 0) {
58 		/*
59 		 * Increase fill count by 2X.  Make sure lg_fill_div stays
60 		 * greater than 0.
61 		 */
62 		if (tbin->lg_fill_div > 1)
63 			tbin->lg_fill_div--;
64 	}
65 	tbin->low_water = tbin->ncached;
66 
67 	tcache->next_gc_bin++;
68 	if (tcache->next_gc_bin == nhbins)
69 		tcache->next_gc_bin = 0;
70 	tcache->ev_cnt = 0;
71 }
72 
73 void *
74 tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
75     tcache_bin_t *tbin, szind_t binind)
76 {
77 	void *ret;
78 
79 	arena_tcache_fill_small(arena, tbin, binind, config_prof ?
80 	    tcache->prof_accumbytes : 0);
81 	if (config_prof)
82 		tcache->prof_accumbytes = 0;
83 	ret = tcache_alloc_easy(tbin);
84 
85 	return (ret);
86 }
87 
88 void
89 tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
90     szind_t binind, unsigned rem)
91 {
92 	arena_t *arena;
93 	void *ptr;
94 	unsigned i, nflush, ndeferred;
95 	bool merged_stats = false;
96 
97 	assert(binind < NBINS);
98 	assert(rem <= tbin->ncached);
99 
100 	arena = arena_choose(tsd, NULL);
101 	assert(arena != NULL);
102 	for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
103 		/* Lock the arena bin associated with the first object. */
104 		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
105 		    tbin->avail[0]);
106 		arena_t *bin_arena = extent_node_arena_get(&chunk->node);
107 		arena_bin_t *bin = &bin_arena->bins[binind];
108 
109 		if (config_prof && bin_arena == arena) {
110 			if (arena_prof_accum(arena, tcache->prof_accumbytes))
111 				prof_idump();
112 			tcache->prof_accumbytes = 0;
113 		}
114 
115 		malloc_mutex_lock(&bin->lock);
116 		if (config_stats && bin_arena == arena) {
117 			assert(!merged_stats);
118 			merged_stats = true;
119 			bin->stats.nflushes++;
120 			bin->stats.nrequests += tbin->tstats.nrequests;
121 			tbin->tstats.nrequests = 0;
122 		}
123 		ndeferred = 0;
124 		for (i = 0; i < nflush; i++) {
125 			ptr = tbin->avail[i];
126 			assert(ptr != NULL);
127 			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
128 			if (extent_node_arena_get(&chunk->node) == bin_arena) {
129 				size_t pageind = ((uintptr_t)ptr -
130 				    (uintptr_t)chunk) >> LG_PAGE;
131 				arena_chunk_map_bits_t *bitselm =
132 				    arena_bitselm_get(chunk, pageind);
133 				arena_dalloc_bin_junked_locked(bin_arena, chunk,
134 				    ptr, bitselm);
135 			} else {
136 				/*
137 				 * This object was allocated via a different
138 				 * arena bin than the one that is currently
139 				 * locked.  Stash the object, so that it can be
140 				 * handled in a future pass.
141 				 */
142 				tbin->avail[ndeferred] = ptr;
143 				ndeferred++;
144 			}
145 		}
146 		malloc_mutex_unlock(&bin->lock);
147 	}
148 	if (config_stats && !merged_stats) {
149 		/*
150 		 * The flush loop didn't happen to flush to this thread's
151 		 * arena, so the stats didn't get merged.  Manually do so now.
152 		 */
153 		arena_bin_t *bin = &arena->bins[binind];
154 		malloc_mutex_lock(&bin->lock);
155 		bin->stats.nflushes++;
156 		bin->stats.nrequests += tbin->tstats.nrequests;
157 		tbin->tstats.nrequests = 0;
158 		malloc_mutex_unlock(&bin->lock);
159 	}
160 
161 	memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
162 	    rem * sizeof(void *));
163 	tbin->ncached = rem;
164 	if ((int)tbin->ncached < tbin->low_water)
165 		tbin->low_water = tbin->ncached;
166 }
167 
168 void
169 tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
170     unsigned rem, tcache_t *tcache)
171 {
172 	arena_t *arena;
173 	void *ptr;
174 	unsigned i, nflush, ndeferred;
175 	bool merged_stats = false;
176 
177 	assert(binind < nhbins);
178 	assert(rem <= tbin->ncached);
179 
180 	arena = arena_choose(tsd, NULL);
181 	assert(arena != NULL);
182 	for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
183 		/* Lock the arena associated with the first object. */
184 		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
185 		    tbin->avail[0]);
186 		arena_t *locked_arena = extent_node_arena_get(&chunk->node);
187 		UNUSED bool idump;
188 
189 		if (config_prof)
190 			idump = false;
191 		malloc_mutex_lock(&locked_arena->lock);
192 		if ((config_prof || config_stats) && locked_arena == arena) {
193 			if (config_prof) {
194 				idump = arena_prof_accum_locked(arena,
195 				    tcache->prof_accumbytes);
196 				tcache->prof_accumbytes = 0;
197 			}
198 			if (config_stats) {
199 				merged_stats = true;
200 				arena->stats.nrequests_large +=
201 				    tbin->tstats.nrequests;
202 				arena->stats.lstats[binind - NBINS].nrequests +=
203 				    tbin->tstats.nrequests;
204 				tbin->tstats.nrequests = 0;
205 			}
206 		}
207 		ndeferred = 0;
208 		for (i = 0; i < nflush; i++) {
209 			ptr = tbin->avail[i];
210 			assert(ptr != NULL);
211 			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
212 			if (extent_node_arena_get(&chunk->node) ==
213 			    locked_arena) {
214 				arena_dalloc_large_junked_locked(locked_arena,
215 				    chunk, ptr);
216 			} else {
217 				/*
218 				 * This object was allocated via a different
219 				 * arena than the one that is currently locked.
220 				 * Stash the object, so that it can be handled
221 				 * in a future pass.
222 				 */
223 				tbin->avail[ndeferred] = ptr;
224 				ndeferred++;
225 			}
226 		}
227 		malloc_mutex_unlock(&locked_arena->lock);
228 		if (config_prof && idump)
229 			prof_idump();
230 	}
231 	if (config_stats && !merged_stats) {
232 		/*
233 		 * The flush loop didn't happen to flush to this thread's
234 		 * arena, so the stats didn't get merged.  Manually do so now.
235 		 */
236 		malloc_mutex_lock(&arena->lock);
237 		arena->stats.nrequests_large += tbin->tstats.nrequests;
238 		arena->stats.lstats[binind - NBINS].nrequests +=
239 		    tbin->tstats.nrequests;
240 		tbin->tstats.nrequests = 0;
241 		malloc_mutex_unlock(&arena->lock);
242 	}
243 
244 	memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
245 	    rem * sizeof(void *));
246 	tbin->ncached = rem;
247 	if ((int)tbin->ncached < tbin->low_water)
248 		tbin->low_water = tbin->ncached;
249 }
250 
251 void
252 tcache_arena_associate(tcache_t *tcache, arena_t *arena)
253 {
254 
255 	if (config_stats) {
256 		/* Link into list of extant tcaches. */
257 		malloc_mutex_lock(&arena->lock);
258 		ql_elm_new(tcache, link);
259 		ql_tail_insert(&arena->tcache_ql, tcache, link);
260 		malloc_mutex_unlock(&arena->lock);
261 	}
262 }
263 
264 void
265 tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena)
266 {
267 
268 	tcache_arena_dissociate(tcache, oldarena);
269 	tcache_arena_associate(tcache, newarena);
270 }
271 
272 void
273 tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
274 {
275 
276 	if (config_stats) {
277 		/* Unlink from list of extant tcaches. */
278 		malloc_mutex_lock(&arena->lock);
279 		if (config_debug) {
280 			bool in_ql = false;
281 			tcache_t *iter;
282 			ql_foreach(iter, &arena->tcache_ql, link) {
283 				if (iter == tcache) {
284 					in_ql = true;
285 					break;
286 				}
287 			}
288 			assert(in_ql);
289 		}
290 		ql_remove(&arena->tcache_ql, tcache, link);
291 		tcache_stats_merge(tcache, arena);
292 		malloc_mutex_unlock(&arena->lock);
293 	}
294 }
295 
296 tcache_t *
297 tcache_get_hard(tsd_t *tsd)
298 {
299 	arena_t *arena;
300 
301 	if (!tcache_enabled_get()) {
302 		if (tsd_nominal(tsd))
303 			tcache_enabled_set(false); /* Memoize. */
304 		return (NULL);
305 	}
306 	arena = arena_choose(tsd, NULL);
307 	if (unlikely(arena == NULL))
308 		return (NULL);
309 	return (tcache_create(tsd, arena));
310 }
311 
312 tcache_t *
313 tcache_create(tsd_t *tsd, arena_t *arena)
314 {
315 	tcache_t *tcache;
316 	size_t size, stack_offset;
317 	unsigned i;
318 
319 	size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
320 	/* Naturally align the pointer stacks. */
321 	size = PTR_CEILING(size);
322 	stack_offset = size;
323 	size += stack_nelms * sizeof(void *);
324 	/* Avoid false cacheline sharing. */
325 	size = sa2u(size, CACHELINE);
326 
327 	tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get());
328 	if (tcache == NULL)
329 		return (NULL);
330 
331 	tcache_arena_associate(tcache, arena);
332 
333 	assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
334 	for (i = 0; i < nhbins; i++) {
335 		tcache->tbins[i].lg_fill_div = 1;
336 		tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
337 		    (uintptr_t)stack_offset);
338 		stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
339 	}
340 
341 	return (tcache);
342 }
343 
344 static void
345 tcache_destroy(tsd_t *tsd, tcache_t *tcache)
346 {
347 	arena_t *arena;
348 	unsigned i;
349 
350 	arena = arena_choose(tsd, NULL);
351 	tcache_arena_dissociate(tcache, arena);
352 
353 	for (i = 0; i < NBINS; i++) {
354 		tcache_bin_t *tbin = &tcache->tbins[i];
355 		tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
356 
357 		if (config_stats && tbin->tstats.nrequests != 0) {
358 			arena_bin_t *bin = &arena->bins[i];
359 			malloc_mutex_lock(&bin->lock);
360 			bin->stats.nrequests += tbin->tstats.nrequests;
361 			malloc_mutex_unlock(&bin->lock);
362 		}
363 	}
364 
365 	for (; i < nhbins; i++) {
366 		tcache_bin_t *tbin = &tcache->tbins[i];
367 		tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
368 
369 		if (config_stats && tbin->tstats.nrequests != 0) {
370 			malloc_mutex_lock(&arena->lock);
371 			arena->stats.nrequests_large += tbin->tstats.nrequests;
372 			arena->stats.lstats[i - NBINS].nrequests +=
373 			    tbin->tstats.nrequests;
374 			malloc_mutex_unlock(&arena->lock);
375 		}
376 	}
377 
378 	if (config_prof && tcache->prof_accumbytes > 0 &&
379 	    arena_prof_accum(arena, tcache->prof_accumbytes))
380 		prof_idump();
381 
382 	idalloctm(tsd, tcache, false, true);
383 }
384 
385 void
386 tcache_cleanup(tsd_t *tsd)
387 {
388 	tcache_t *tcache;
389 
390 	if (!config_tcache)
391 		return;
392 
393 	if ((tcache = tsd_tcache_get(tsd)) != NULL) {
394 		tcache_destroy(tsd, tcache);
395 		tsd_tcache_set(tsd, NULL);
396 	}
397 }
398 
399 void
400 tcache_enabled_cleanup(tsd_t *tsd)
401 {
402 
403 	/* Do nothing. */
404 }
405 
406 /* Caller must own arena->lock. */
407 void
408 tcache_stats_merge(tcache_t *tcache, arena_t *arena)
409 {
410 	unsigned i;
411 
412 	cassert(config_stats);
413 
414 	/* Merge and reset tcache stats. */
415 	for (i = 0; i < NBINS; i++) {
416 		arena_bin_t *bin = &arena->bins[i];
417 		tcache_bin_t *tbin = &tcache->tbins[i];
418 		malloc_mutex_lock(&bin->lock);
419 		bin->stats.nrequests += tbin->tstats.nrequests;
420 		malloc_mutex_unlock(&bin->lock);
421 		tbin->tstats.nrequests = 0;
422 	}
423 
424 	for (; i < nhbins; i++) {
425 		malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
426 		tcache_bin_t *tbin = &tcache->tbins[i];
427 		arena->stats.nrequests_large += tbin->tstats.nrequests;
428 		lstats->nrequests += tbin->tstats.nrequests;
429 		tbin->tstats.nrequests = 0;
430 	}
431 }
432 
433 bool
434 tcaches_create(tsd_t *tsd, unsigned *r_ind)
435 {
436 	tcache_t *tcache;
437 	tcaches_t *elm;
438 
439 	if (tcaches == NULL) {
440 		tcaches = base_alloc(sizeof(tcache_t *) *
441 		    (MALLOCX_TCACHE_MAX+1));
442 		if (tcaches == NULL)
443 			return (true);
444 	}
445 
446 	if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
447 		return (true);
448 	tcache = tcache_create(tsd, a0get());
449 	if (tcache == NULL)
450 		return (true);
451 
452 	if (tcaches_avail != NULL) {
453 		elm = tcaches_avail;
454 		tcaches_avail = tcaches_avail->next;
455 		elm->tcache = tcache;
456 		*r_ind = elm - tcaches;
457 	} else {
458 		elm = &tcaches[tcaches_past];
459 		elm->tcache = tcache;
460 		*r_ind = tcaches_past;
461 		tcaches_past++;
462 	}
463 
464 	return (false);
465 }
466 
467 static void
468 tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm)
469 {
470 
471 	if (elm->tcache == NULL)
472 		return;
473 	tcache_destroy(tsd, elm->tcache);
474 	elm->tcache = NULL;
475 }
476 
477 void
478 tcaches_flush(tsd_t *tsd, unsigned ind)
479 {
480 
481 	tcaches_elm_flush(tsd, &tcaches[ind]);
482 }
483 
484 void
485 tcaches_destroy(tsd_t *tsd, unsigned ind)
486 {
487 	tcaches_t *elm = &tcaches[ind];
488 	tcaches_elm_flush(tsd, elm);
489 	elm->next = tcaches_avail;
490 	tcaches_avail = elm;
491 }
492 
493 bool
494 tcache_boot(void)
495 {
496 	unsigned i;
497 
498 	/*
499 	 * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
500 	 * known.
501 	 */
502 	if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
503 		tcache_maxclass = SMALL_MAXCLASS;
504 	else if ((1U << opt_lg_tcache_max) > large_maxclass)
505 		tcache_maxclass = large_maxclass;
506 	else
507 		tcache_maxclass = (1U << opt_lg_tcache_max);
508 
509 	nhbins = size2index(tcache_maxclass) + 1;
510 
511 	/* Initialize tcache_bin_info. */
512 	tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
513 	    sizeof(tcache_bin_info_t));
514 	if (tcache_bin_info == NULL)
515 		return (true);
516 	stack_nelms = 0;
517 	for (i = 0; i < NBINS; i++) {
518 		if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
519 			tcache_bin_info[i].ncached_max =
520 			    TCACHE_NSLOTS_SMALL_MIN;
521 		} else if ((arena_bin_info[i].nregs << 1) <=
522 		    TCACHE_NSLOTS_SMALL_MAX) {
523 			tcache_bin_info[i].ncached_max =
524 			    (arena_bin_info[i].nregs << 1);
525 		} else {
526 			tcache_bin_info[i].ncached_max =
527 			    TCACHE_NSLOTS_SMALL_MAX;
528 		}
529 		stack_nelms += tcache_bin_info[i].ncached_max;
530 	}
531 	for (; i < nhbins; i++) {
532 		tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
533 		stack_nelms += tcache_bin_info[i].ncached_max;
534 	}
535 
536 	return (false);
537 }
538