1 #ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
2 #define JEMALLOC_INTERNAL_TCACHE_INLINES_H
3 
4 #include "jemalloc/internal/jemalloc_internal_types.h"
5 #include "jemalloc/internal/size_classes.h"
6 #include "jemalloc/internal/sz.h"
7 #include "jemalloc/internal/ticker.h"
8 #include "jemalloc/internal/util.h"
9 
10 static inline bool
11 tcache_enabled_get(tsd_t *tsd) {
12 	return tsd_tcache_enabled_get(tsd);
13 }
14 
15 static inline void
16 tcache_enabled_set(tsd_t *tsd, bool enabled) {
17 	bool was_enabled = tsd_tcache_enabled_get(tsd);
18 
19 	if (!was_enabled && enabled) {
20 		tsd_tcache_data_init(tsd);
21 	} else if (was_enabled && !enabled) {
22 		tcache_cleanup(tsd);
23 	}
24 	/* Commit the state last.  Above calls check current state. */
25 	tsd_tcache_enabled_set(tsd, enabled);
26 	tsd_slow_update(tsd);
27 }
28 
29 JEMALLOC_ALWAYS_INLINE void
30 tcache_event(tsd_t *tsd, tcache_t *tcache) {
31 	if (TCACHE_GC_INCR == 0) {
32 		return;
33 	}
34 
35 	if (unlikely(ticker_tick(&tcache->gc_ticker))) {
36 		tcache_event_hard(tsd, tcache);
37 	}
38 }
39 
40 JEMALLOC_ALWAYS_INLINE void *
41 tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success) {
42 	void *ret;
43 
44 	if (unlikely(tbin->ncached == 0)) {
45 		tbin->low_water = -1;
46 		*tcache_success = false;
47 		return NULL;
48 	}
49 	/*
50 	 * tcache_success (instead of ret) should be checked upon the return of
51 	 * this function.  We avoid checking (ret == NULL) because there is
52 	 * never a null stored on the avail stack (which is unknown to the
53 	 * compiler), and eagerly checking ret would cause pipeline stall
54 	 * (waiting for the cacheline).
55 	 */
56 	*tcache_success = true;
57 	ret = *(tbin->avail - tbin->ncached);
58 	tbin->ncached--;
59 
60 	if (unlikely((low_water_t)tbin->ncached < tbin->low_water)) {
61 		tbin->low_water = tbin->ncached;
62 	}
63 
64 	return ret;
65 }
66 
67 JEMALLOC_ALWAYS_INLINE void *
68 tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
69     szind_t binind, bool zero, bool slow_path) {
70 	void *ret;
71 	tcache_bin_t *tbin;
72 	bool tcache_success;
73 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
74 
75 	assert(binind < NBINS);
76 	tbin = tcache_small_bin_get(tcache, binind);
77 	ret = tcache_alloc_easy(tbin, &tcache_success);
78 	assert(tcache_success == (ret != NULL));
79 	if (unlikely(!tcache_success)) {
80 		bool tcache_hard_success;
81 		arena = arena_choose(tsd, arena);
82 		if (unlikely(arena == NULL)) {
83 			return NULL;
84 		}
85 
86 		ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
87 		    tbin, binind, &tcache_hard_success);
88 		if (tcache_hard_success == false) {
89 			return NULL;
90 		}
91 	}
92 
93 	assert(ret);
94 	/*
95 	 * Only compute usize if required.  The checks in the following if
96 	 * statement are all static.
97 	 */
98 	if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
99 		usize = sz_index2size(binind);
100 		assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
101 	}
102 
103 	if (likely(!zero)) {
104 		if (slow_path && config_fill) {
105 			if (unlikely(opt_junk_alloc)) {
106 				arena_alloc_junk_small(ret,
107 				    &arena_bin_info[binind], false);
108 			} else if (unlikely(opt_zero)) {
109 				memset(ret, 0, usize);
110 			}
111 		}
112 	} else {
113 		if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
114 			arena_alloc_junk_small(ret, &arena_bin_info[binind],
115 			    true);
116 		}
117 		memset(ret, 0, usize);
118 	}
119 
120 	if (config_stats) {
121 		tbin->tstats.nrequests++;
122 	}
123 	if (config_prof) {
124 		tcache->prof_accumbytes += usize;
125 	}
126 	tcache_event(tsd, tcache);
127 	return ret;
128 }
129 
130 JEMALLOC_ALWAYS_INLINE void *
131 tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
132     szind_t binind, bool zero, bool slow_path) {
133 	void *ret;
134 	tcache_bin_t *tbin;
135 	bool tcache_success;
136 
137 	assert(binind >= NBINS &&binind < nhbins);
138 	tbin = tcache_large_bin_get(tcache, binind);
139 	ret = tcache_alloc_easy(tbin, &tcache_success);
140 	assert(tcache_success == (ret != NULL));
141 	if (unlikely(!tcache_success)) {
142 		/*
143 		 * Only allocate one large object at a time, because it's quite
144 		 * expensive to create one and not use it.
145 		 */
146 		arena = arena_choose(tsd, arena);
147 		if (unlikely(arena == NULL)) {
148 			return NULL;
149 		}
150 
151 		ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero);
152 		if (ret == NULL) {
153 			return NULL;
154 		}
155 	} else {
156 		size_t usize JEMALLOC_CC_SILENCE_INIT(0);
157 
158 		/* Only compute usize on demand */
159 		if (config_prof || (slow_path && config_fill) ||
160 		    unlikely(zero)) {
161 			usize = sz_index2size(binind);
162 			assert(usize <= tcache_maxclass);
163 		}
164 
165 		if (likely(!zero)) {
166 			if (slow_path && config_fill) {
167 				if (unlikely(opt_junk_alloc)) {
168 					memset(ret, JEMALLOC_ALLOC_JUNK,
169 					    usize);
170 				} else if (unlikely(opt_zero)) {
171 					memset(ret, 0, usize);
172 				}
173 			}
174 		} else {
175 			memset(ret, 0, usize);
176 		}
177 
178 		if (config_stats) {
179 			tbin->tstats.nrequests++;
180 		}
181 		if (config_prof) {
182 			tcache->prof_accumbytes += usize;
183 		}
184 	}
185 
186 	tcache_event(tsd, tcache);
187 	return ret;
188 }
189 
190 JEMALLOC_ALWAYS_INLINE void
191 tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
192     bool slow_path) {
193 	tcache_bin_t *tbin;
194 	tcache_bin_info_t *tbin_info;
195 
196 	assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
197 
198 	if (slow_path && config_fill && unlikely(opt_junk_free)) {
199 		arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
200 	}
201 
202 	tbin = tcache_small_bin_get(tcache, binind);
203 	tbin_info = &tcache_bin_info[binind];
204 	if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
205 		tcache_bin_flush_small(tsd, tcache, tbin, binind,
206 		    (tbin_info->ncached_max >> 1));
207 	}
208 	assert(tbin->ncached < tbin_info->ncached_max);
209 	tbin->ncached++;
210 	*(tbin->avail - tbin->ncached) = ptr;
211 
212 	tcache_event(tsd, tcache);
213 }
214 
215 JEMALLOC_ALWAYS_INLINE void
216 tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
217     bool slow_path) {
218 	tcache_bin_t *tbin;
219 	tcache_bin_info_t *tbin_info;
220 
221 	assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
222 	assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
223 
224 	if (slow_path && config_fill && unlikely(opt_junk_free)) {
225 		large_dalloc_junk(ptr, sz_index2size(binind));
226 	}
227 
228 	tbin = tcache_large_bin_get(tcache, binind);
229 	tbin_info = &tcache_bin_info[binind];
230 	if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
231 		tcache_bin_flush_large(tsd, tbin, binind,
232 		    (tbin_info->ncached_max >> 1), tcache);
233 	}
234 	assert(tbin->ncached < tbin_info->ncached_max);
235 	tbin->ncached++;
236 	*(tbin->avail - tbin->ncached) = ptr;
237 
238 	tcache_event(tsd, tcache);
239 }
240 
241 JEMALLOC_ALWAYS_INLINE tcache_t *
242 tcaches_get(tsd_t *tsd, unsigned ind) {
243 	tcaches_t *elm = &tcaches[ind];
244 	if (unlikely(elm->tcache == NULL)) {
245 		elm->tcache = tcache_create_explicit(tsd);
246 	}
247 	return elm->tcache;
248 }
249 
250 #endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */
251