1 #ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H 2 #define JEMALLOC_INTERNAL_TCACHE_INLINES_H 3 4 #include "jemalloc/internal/bin.h" 5 #include "jemalloc/internal/jemalloc_internal_types.h" 6 #include "jemalloc/internal/sc.h" 7 #include "jemalloc/internal/sz.h" 8 #include "jemalloc/internal/ticker.h" 9 #include "jemalloc/internal/util.h" 10 11 static inline bool 12 tcache_enabled_get(tsd_t *tsd) { 13 return tsd_tcache_enabled_get(tsd); 14 } 15 16 static inline void 17 tcache_enabled_set(tsd_t *tsd, bool enabled) { 18 bool was_enabled = tsd_tcache_enabled_get(tsd); 19 20 if (!was_enabled && enabled) { 21 tsd_tcache_data_init(tsd); 22 } else if (was_enabled && !enabled) { 23 tcache_cleanup(tsd); 24 } 25 /* Commit the state last. Above calls check current state. */ 26 tsd_tcache_enabled_set(tsd, enabled); 27 tsd_slow_update(tsd); 28 } 29 30 JEMALLOC_ALWAYS_INLINE void 31 tcache_event(tsd_t *tsd, tcache_t *tcache) { 32 if (TCACHE_GC_INCR == 0) { 33 return; 34 } 35 36 if (unlikely(ticker_tick(&tcache->gc_ticker))) { 37 tcache_event_hard(tsd, tcache); 38 } 39 } 40 41 JEMALLOC_ALWAYS_INLINE void * 42 tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, 43 size_t size, szind_t binind, bool zero, bool slow_path) { 44 void *ret; 45 cache_bin_t *bin; 46 bool tcache_success; 47 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 48 49 assert(binind < SC_NBINS); 50 bin = tcache_small_bin_get(tcache, binind); 51 ret = cache_bin_alloc_easy(bin, &tcache_success); 52 assert(tcache_success == (ret != NULL)); 53 if (unlikely(!tcache_success)) { 54 bool tcache_hard_success; 55 arena = arena_choose(tsd, arena); 56 if (unlikely(arena == NULL)) { 57 return NULL; 58 } 59 60 ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache, 61 bin, binind, &tcache_hard_success); 62 if (tcache_hard_success == false) { 63 return NULL; 64 } 65 } 66 67 assert(ret); 68 /* 69 * Only compute usize if required. The checks in the following if 70 * statement are all static. 71 */ 72 if (config_prof || (slow_path && config_fill) || unlikely(zero)) { 73 usize = sz_index2size(binind); 74 assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize); 75 } 76 77 if (likely(!zero)) { 78 if (slow_path && config_fill) { 79 if (unlikely(opt_junk_alloc)) { 80 arena_alloc_junk_small(ret, &bin_infos[binind], 81 false); 82 } else if (unlikely(opt_zero)) { 83 memset(ret, 0, usize); 84 } 85 } 86 } else { 87 if (slow_path && config_fill && unlikely(opt_junk_alloc)) { 88 arena_alloc_junk_small(ret, &bin_infos[binind], true); 89 } 90 memset(ret, 0, usize); 91 } 92 93 if (config_stats) { 94 bin->tstats.nrequests++; 95 } 96 if (config_prof) { 97 tcache->prof_accumbytes += usize; 98 } 99 tcache_event(tsd, tcache); 100 return ret; 101 } 102 103 JEMALLOC_ALWAYS_INLINE void * 104 tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, 105 szind_t binind, bool zero, bool slow_path) { 106 void *ret; 107 cache_bin_t *bin; 108 bool tcache_success; 109 110 assert(binind >= SC_NBINS &&binind < nhbins); 111 bin = tcache_large_bin_get(tcache, binind); 112 ret = cache_bin_alloc_easy(bin, &tcache_success); 113 assert(tcache_success == (ret != NULL)); 114 if (unlikely(!tcache_success)) { 115 /* 116 * Only allocate one large object at a time, because it's quite 117 * expensive to create one and not use it. 118 */ 119 arena = arena_choose(tsd, arena); 120 if (unlikely(arena == NULL)) { 121 return NULL; 122 } 123 124 ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero); 125 if (ret == NULL) { 126 return NULL; 127 } 128 } else { 129 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 130 131 /* Only compute usize on demand */ 132 if (config_prof || (slow_path && config_fill) || 133 unlikely(zero)) { 134 usize = sz_index2size(binind); 135 assert(usize <= tcache_maxclass); 136 } 137 138 if (likely(!zero)) { 139 if (slow_path && config_fill) { 140 if (unlikely(opt_junk_alloc)) { 141 memset(ret, JEMALLOC_ALLOC_JUNK, 142 usize); 143 } else if (unlikely(opt_zero)) { 144 memset(ret, 0, usize); 145 } 146 } 147 } else { 148 memset(ret, 0, usize); 149 } 150 151 if (config_stats) { 152 bin->tstats.nrequests++; 153 } 154 if (config_prof) { 155 tcache->prof_accumbytes += usize; 156 } 157 } 158 159 tcache_event(tsd, tcache); 160 return ret; 161 } 162 163 JEMALLOC_ALWAYS_INLINE void 164 tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, 165 bool slow_path) { 166 cache_bin_t *bin; 167 cache_bin_info_t *bin_info; 168 169 assert(tcache_salloc(tsd_tsdn(tsd), ptr) 170 <= SC_SMALL_MAXCLASS); 171 172 if (slow_path && config_fill && unlikely(opt_junk_free)) { 173 arena_dalloc_junk_small(ptr, &bin_infos[binind]); 174 } 175 176 bin = tcache_small_bin_get(tcache, binind); 177 bin_info = &tcache_bin_info[binind]; 178 if (unlikely(!cache_bin_dalloc_easy(bin, bin_info, ptr))) { 179 tcache_bin_flush_small(tsd, tcache, bin, binind, 180 (bin_info->ncached_max >> 1)); 181 bool ret = cache_bin_dalloc_easy(bin, bin_info, ptr); 182 assert(ret); 183 } 184 185 tcache_event(tsd, tcache); 186 } 187 188 JEMALLOC_ALWAYS_INLINE void 189 tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, 190 bool slow_path) { 191 cache_bin_t *bin; 192 cache_bin_info_t *bin_info; 193 194 assert(tcache_salloc(tsd_tsdn(tsd), ptr) 195 > SC_SMALL_MAXCLASS); 196 assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass); 197 198 if (slow_path && config_fill && unlikely(opt_junk_free)) { 199 large_dalloc_junk(ptr, sz_index2size(binind)); 200 } 201 202 bin = tcache_large_bin_get(tcache, binind); 203 bin_info = &tcache_bin_info[binind]; 204 if (unlikely(bin->ncached == bin_info->ncached_max)) { 205 tcache_bin_flush_large(tsd, bin, binind, 206 (bin_info->ncached_max >> 1), tcache); 207 } 208 assert(bin->ncached < bin_info->ncached_max); 209 bin->ncached++; 210 *(bin->avail - bin->ncached) = ptr; 211 212 tcache_event(tsd, tcache); 213 } 214 215 JEMALLOC_ALWAYS_INLINE tcache_t * 216 tcaches_get(tsd_t *tsd, unsigned ind) { 217 tcaches_t *elm = &tcaches[ind]; 218 if (unlikely(elm->tcache == NULL)) { 219 malloc_printf("<jemalloc>: invalid tcache id (%u).\n", ind); 220 abort(); 221 } else if (unlikely(elm->tcache == TCACHES_ELM_NEED_REINIT)) { 222 elm->tcache = tcache_create_explicit(tsd); 223 } 224 return elm->tcache; 225 } 226 227 #endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */ 228