1 #ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H 2 #define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H 3 4 #include "jemalloc/internal/arena_stats.h" 5 #include "jemalloc/internal/atomic.h" 6 #include "jemalloc/internal/bin.h" 7 #include "jemalloc/internal/bitmap.h" 8 #include "jemalloc/internal/extent_dss.h" 9 #include "jemalloc/internal/jemalloc_internal_types.h" 10 #include "jemalloc/internal/mutex.h" 11 #include "jemalloc/internal/nstime.h" 12 #include "jemalloc/internal/ql.h" 13 #include "jemalloc/internal/sc.h" 14 #include "jemalloc/internal/smoothstep.h" 15 #include "jemalloc/internal/ticker.h" 16 17 struct arena_decay_s { 18 /* Synchronizes all non-atomic fields. */ 19 malloc_mutex_t mtx; 20 /* 21 * True if a thread is currently purging the extents associated with 22 * this decay structure. 23 */ 24 bool purging; 25 /* 26 * Approximate time in milliseconds from the creation of a set of unused 27 * dirty pages until an equivalent set of unused dirty pages is purged 28 * and/or reused. 29 */ 30 atomic_zd_t time_ms; 31 /* time / SMOOTHSTEP_NSTEPS. */ 32 nstime_t interval; 33 /* 34 * Time at which the current decay interval logically started. We do 35 * not actually advance to a new epoch until sometime after it starts 36 * because of scheduling and computation delays, and it is even possible 37 * to completely skip epochs. In all cases, during epoch advancement we 38 * merge all relevant activity into the most recently recorded epoch. 39 */ 40 nstime_t epoch; 41 /* Deadline randomness generator. */ 42 uint64_t jitter_state; 43 /* 44 * Deadline for current epoch. This is the sum of interval and per 45 * epoch jitter which is a uniform random variable in [0..interval). 46 * Epochs always advance by precise multiples of interval, but we 47 * randomize the deadline to reduce the likelihood of arenas purging in 48 * lockstep. 49 */ 50 nstime_t deadline; 51 /* 52 * Number of unpurged pages at beginning of current epoch. During epoch 53 * advancement we use the delta between arena->decay_*.nunpurged and 54 * extents_npages_get(&arena->extents_*) to determine how many dirty 55 * pages, if any, were generated. 56 */ 57 size_t nunpurged; 58 /* 59 * Trailing log of how many unused dirty pages were generated during 60 * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last 61 * element is the most recent epoch. Corresponding epoch times are 62 * relative to epoch. 63 */ 64 size_t backlog[SMOOTHSTEP_NSTEPS]; 65 66 /* 67 * Pointer to associated stats. These stats are embedded directly in 68 * the arena's stats due to how stats structures are shared between the 69 * arena and ctl code. 70 * 71 * Synchronization: Same as associated arena's stats field. */ 72 arena_stats_decay_t *stats; 73 /* Peak number of pages in associated extents. Used for debug only. */ 74 uint64_t ceil_npages; 75 }; 76 77 struct arena_s { 78 /* 79 * Number of threads currently assigned to this arena. Each thread has 80 * two distinct assignments, one for application-serving allocation, and 81 * the other for internal metadata allocation. Internal metadata must 82 * not be allocated from arenas explicitly created via the arenas.create 83 * mallctl, because the arena.<i>.reset mallctl indiscriminately 84 * discards all allocations for the affected arena. 85 * 86 * 0: Application allocation. 87 * 1: Internal metadata allocation. 88 * 89 * Synchronization: atomic. 90 */ 91 atomic_u_t nthreads[2]; 92 93 /* Next bin shard for binding new threads. Synchronization: atomic. */ 94 atomic_u_t binshard_next; 95 96 /* 97 * When percpu_arena is enabled, to amortize the cost of reading / 98 * updating the current CPU id, track the most recent thread accessing 99 * this arena, and only read CPU if there is a mismatch. 100 */ 101 tsdn_t *last_thd; 102 103 /* Synchronization: internal. */ 104 arena_stats_t stats; 105 106 /* 107 * Lists of tcaches and cache_bin_array_descriptors for extant threads 108 * associated with this arena. Stats from these are merged 109 * incrementally, and at exit if opt_stats_print is enabled. 110 * 111 * Synchronization: tcache_ql_mtx. 112 */ 113 ql_head(tcache_t) tcache_ql; 114 ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql; 115 malloc_mutex_t tcache_ql_mtx; 116 117 /* Synchronization: internal. */ 118 prof_accum_t prof_accum; 119 uint64_t prof_accumbytes; 120 121 /* 122 * PRNG state for cache index randomization of large allocation base 123 * pointers. 124 * 125 * Synchronization: atomic. 126 */ 127 atomic_zu_t offset_state; 128 129 /* 130 * Extent serial number generator state. 131 * 132 * Synchronization: atomic. 133 */ 134 atomic_zu_t extent_sn_next; 135 136 /* 137 * Represents a dss_prec_t, but atomically. 138 * 139 * Synchronization: atomic. 140 */ 141 atomic_u_t dss_prec; 142 143 /* 144 * Number of pages in active extents. 145 * 146 * Synchronization: atomic. 147 */ 148 atomic_zu_t nactive; 149 150 /* 151 * Extant large allocations. 152 * 153 * Synchronization: large_mtx. 154 */ 155 extent_list_t large; 156 /* Synchronizes all large allocation/update/deallocation. */ 157 malloc_mutex_t large_mtx; 158 159 /* 160 * Collections of extents that were previously allocated. These are 161 * used when allocating extents, in an attempt to re-use address space. 162 * 163 * Synchronization: internal. 164 */ 165 extents_t extents_dirty; 166 extents_t extents_muzzy; 167 extents_t extents_retained; 168 169 /* 170 * Decay-based purging state, responsible for scheduling extent state 171 * transitions. 172 * 173 * Synchronization: internal. 174 */ 175 arena_decay_t decay_dirty; /* dirty --> muzzy */ 176 arena_decay_t decay_muzzy; /* muzzy --> retained */ 177 178 /* 179 * Next extent size class in a growing series to use when satisfying a 180 * request via the extent hooks (only if opt_retain). This limits the 181 * number of disjoint virtual memory ranges so that extent merging can 182 * be effective even if multiple arenas' extent allocation requests are 183 * highly interleaved. 184 * 185 * retain_grow_limit is the max allowed size ind to expand (unless the 186 * required size is greater). Default is no limit, and controlled 187 * through mallctl only. 188 * 189 * Synchronization: extent_grow_mtx 190 */ 191 pszind_t extent_grow_next; 192 pszind_t retain_grow_limit; 193 malloc_mutex_t extent_grow_mtx; 194 195 /* 196 * Available extent structures that were allocated via 197 * base_alloc_extent(). 198 * 199 * Synchronization: extent_avail_mtx. 200 */ 201 extent_tree_t extent_avail; 202 atomic_zu_t extent_avail_cnt; 203 malloc_mutex_t extent_avail_mtx; 204 205 /* 206 * bins is used to store heaps of free regions. 207 * 208 * Synchronization: internal. 209 */ 210 bins_t bins[SC_NBINS]; 211 212 /* 213 * Base allocator, from which arena metadata are allocated. 214 * 215 * Synchronization: internal. 216 */ 217 base_t *base; 218 /* Used to determine uptime. Read-only after initialization. */ 219 nstime_t create_time; 220 }; 221 222 /* Used in conjunction with tsd for fast arena-related context lookup. */ 223 struct arena_tdata_s { 224 ticker_t decay_ticker; 225 }; 226 227 /* Used to pass rtree lookup context down the path. */ 228 struct alloc_ctx_s { 229 szind_t szind; 230 bool slab; 231 }; 232 233 #endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */ 234