1 #ifndef JEMALLOC_INTERNAL_SC_H 2 #define JEMALLOC_INTERNAL_SC_H 3 4 #include "jemalloc/internal/jemalloc_internal_types.h" 5 6 /* 7 * Size class computations: 8 * 9 * These are a little tricky; we'll first start by describing how things 10 * generally work, and then describe some of the details. 11 * 12 * Ignore the first few size classes for a moment. We can then split all the 13 * remaining size classes into groups. The size classes in a group are spaced 14 * such that they cover allocation request sizes in a power-of-2 range. The 15 * power of two is called the base of the group, and the size classes in it 16 * satisfy allocations in the half-open range (base, base * 2]. There are 17 * SC_NGROUP size classes in each group, equally spaced in the range, so that 18 * each one covers allocations for base / SC_NGROUP possible allocation sizes. 19 * We call that value (base / SC_NGROUP) the delta of the group. Each size class 20 * is delta larger than the one before it (including the initial size class in a 21 * group, which is delta large than 2**base, the largest size class in the 22 * previous group). 23 * To make the math all work out nicely, we require that SC_NGROUP is a power of 24 * two, and define it in terms of SC_LG_NGROUP. We'll often talk in terms of 25 * lg_base and lg_delta. For each of these groups then, we have that 26 * lg_delta == lg_base - SC_LG_NGROUP. 27 * The size classes in a group with a given lg_base and lg_delta (which, recall, 28 * can be computed from lg_base for these groups) are therefore: 29 * base + 1 * delta 30 * which covers allocations in (base, base + 1 * delta] 31 * base + 2 * delta 32 * which covers allocations in (base + 1 * delta, base + 2 * delta]. 33 * base + 3 * delta 34 * which covers allocations in (base + 2 * delta, base + 3 * delta]. 35 * ... 36 * base + SC_NGROUP * delta ( == 2 * base) 37 * which covers allocations in (base + (SC_NGROUP - 1) * delta, 2 * base]. 38 * (Note that currently SC_NGROUP is always 4, so the "..." is empty in 39 * practice.) 40 * Note that the last size class in the group is the next power of two (after 41 * base), so that we've set up the induction correctly for the next group's 42 * selection of delta. 43 * 44 * Now, let's start considering the first few size classes. Two extra constants 45 * come into play here: LG_QUANTUM and SC_LG_TINY_MIN. LG_QUANTUM ensures 46 * correct platform alignment; all objects of size (1 << LG_QUANTUM) or larger 47 * are at least (1 << LG_QUANTUM) aligned; this can be used to ensure that we 48 * never return improperly aligned memory, by making (1 << LG_QUANTUM) equal the 49 * highest required alignment of a platform. For allocation sizes smaller than 50 * (1 << LG_QUANTUM) though, we can be more relaxed (since we don't support 51 * platforms with types with alignment larger than their size). To allow such 52 * allocations (without wasting space unnecessarily), we introduce tiny size 53 * classes; one per power of two, up until we hit the quantum size. There are 54 * therefore LG_QUANTUM - SC_LG_TINY_MIN such size classes. 55 * 56 * Next, we have a size class of size LG_QUANTUM. This can't be the start of a 57 * group in the sense we described above (covering a power of two range) since, 58 * if we divided into it to pick a value of delta, we'd get a delta smaller than 59 * (1 << LG_QUANTUM) for sizes >= (1 << LG_QUANTUM), which is against the rules. 60 * 61 * The first base we can divide by SC_NGROUP while still being at least 62 * (1 << LG_QUANTUM) is SC_NGROUP * (1 << LG_QUANTUM). We can get there by 63 * having SC_NGROUP size classes, spaced (1 << LG_QUANTUM) apart. These size 64 * classes are: 65 * 1 * (1 << LG_QUANTUM) 66 * 2 * (1 << LG_QUANTUM) 67 * 3 * (1 << LG_QUANTUM) 68 * ... (although, as above, this "..." is empty in practice) 69 * SC_NGROUP * (1 << LG_QUANTUM). 70 * 71 * There are SC_NGROUP of these size classes, so we can regard it as a sort of 72 * pseudo-group, even though it spans multiple powers of 2, is divided 73 * differently, and both starts and ends on a power of 2 (as opposed to just 74 * ending). SC_NGROUP is itself a power of two, so the first group after the 75 * pseudo-group has the power-of-two base SC_NGROUP * (1 << LG_QUANTUM), for a 76 * lg_base of LG_QUANTUM + SC_LG_NGROUP. We can divide this base into SC_NGROUP 77 * sizes without violating our LG_QUANTUM requirements, so we can safely set 78 * lg_delta = lg_base - SC_LG_GROUP (== LG_QUANTUM). 79 * 80 * So, in order, the size classes are: 81 * 82 * Tiny size classes: 83 * - Count: LG_QUANTUM - SC_LG_TINY_MIN. 84 * - Sizes: 85 * 1 << SC_LG_TINY_MIN 86 * 1 << (SC_LG_TINY_MIN + 1) 87 * 1 << (SC_LG_TINY_MIN + 2) 88 * ... 89 * 1 << (LG_QUANTUM - 1) 90 * 91 * Initial pseudo-group: 92 * - Count: SC_NGROUP 93 * - Sizes: 94 * 1 * (1 << LG_QUANTUM) 95 * 2 * (1 << LG_QUANTUM) 96 * 3 * (1 << LG_QUANTUM) 97 * ... 98 * SC_NGROUP * (1 << LG_QUANTUM) 99 * 100 * Regular group 0: 101 * - Count: SC_NGROUP 102 * - Sizes: 103 * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP and lg_delta of 104 * lg_base - SC_LG_NGROUP) 105 * (1 << lg_base) + 1 * (1 << lg_delta) 106 * (1 << lg_base) + 2 * (1 << lg_delta) 107 * (1 << lg_base) + 3 * (1 << lg_delta) 108 * ... 109 * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ] 110 * 111 * Regular group 1: 112 * - Count: SC_NGROUP 113 * - Sizes: 114 * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + 1 and lg_delta of 115 * lg_base - SC_LG_NGROUP) 116 * (1 << lg_base) + 1 * (1 << lg_delta) 117 * (1 << lg_base) + 2 * (1 << lg_delta) 118 * (1 << lg_base) + 3 * (1 << lg_delta) 119 * ... 120 * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ] 121 * 122 * ... 123 * 124 * Regular group N: 125 * - Count: SC_NGROUP 126 * - Sizes: 127 * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + N and lg_delta of 128 * lg_base - SC_LG_NGROUP) 129 * (1 << lg_base) + 1 * (1 << lg_delta) 130 * (1 << lg_base) + 2 * (1 << lg_delta) 131 * (1 << lg_base) + 3 * (1 << lg_delta) 132 * ... 133 * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ] 134 * 135 * 136 * Representation of metadata: 137 * To make the math easy, we'll mostly work in lg quantities. We record lg_base, 138 * lg_delta, and ndelta (i.e. number of deltas above the base) on a 139 * per-size-class basis, and maintain the invariant that, across all size 140 * classes, size == (1 << lg_base) + ndelta * (1 << lg_delta). 141 * 142 * For regular groups (i.e. those with lg_base >= LG_QUANTUM + SC_LG_NGROUP), 143 * lg_delta is lg_base - SC_LG_NGROUP, and ndelta goes from 1 to SC_NGROUP. 144 * 145 * For the initial tiny size classes (if any), lg_base is lg(size class size). 146 * lg_delta is lg_base for the first size class, and lg_base - 1 for all 147 * subsequent ones. ndelta is always 0. 148 * 149 * For the pseudo-group, if there are no tiny size classes, then we set 150 * lg_base == LG_QUANTUM, lg_delta == LG_QUANTUM, and have ndelta range from 0 151 * to SC_NGROUP - 1. (Note that delta == base, so base + (SC_NGROUP - 1) * delta 152 * is just SC_NGROUP * base, or (1 << (SC_LG_NGROUP + LG_QUANTUM)), so we do 153 * indeed get a power of two that way). If there *are* tiny size classes, then 154 * the first size class needs to have lg_delta relative to the largest tiny size 155 * class. We therefore set lg_base == LG_QUANTUM - 1, 156 * lg_delta == LG_QUANTUM - 1, and ndelta == 1, keeping the rest of the 157 * pseudo-group the same. 158 * 159 * 160 * Other terminology: 161 * "Small" size classes mean those that are allocated out of bins, which is the 162 * same as those that are slab allocated. 163 * "Large" size classes are those that are not small. The cutoff for counting as 164 * large is page size * group size. 165 */ 166 167 /* 168 * Size class N + (1 << SC_LG_NGROUP) twice the size of size class N. 169 */ 170 #define SC_LG_NGROUP 2 171 #define SC_LG_TINY_MIN 3 172 173 #if SC_LG_TINY_MIN == 0 174 /* The div module doesn't support division by 1, which this would require. */ 175 #error "Unsupported LG_TINY_MIN" 176 #endif 177 178 /* 179 * The definitions below are all determined by the above settings and system 180 * characteristics. 181 */ 182 #define SC_NGROUP (1ULL << SC_LG_NGROUP) 183 #define SC_PTR_BITS ((1ULL << LG_SIZEOF_PTR) * 8) 184 #define SC_NTINY (LG_QUANTUM - SC_LG_TINY_MIN) 185 #define SC_LG_TINY_MAXCLASS (LG_QUANTUM > SC_LG_TINY_MIN ? LG_QUANTUM - 1 : -1) 186 #define SC_NPSEUDO SC_NGROUP 187 #define SC_LG_FIRST_REGULAR_BASE (LG_QUANTUM + SC_LG_NGROUP) 188 /* 189 * We cap allocations to be less than 2 ** (ptr_bits - 1), so the highest base 190 * we need is 2 ** (ptr_bits - 2). (This also means that the last group is 1 191 * size class shorter than the others). 192 * We could probably save some space in arenas by capping this at LG_VADDR size. 193 */ 194 #define SC_LG_BASE_MAX (SC_PTR_BITS - 2) 195 #define SC_NREGULAR (SC_NGROUP * \ 196 (SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1) 197 #define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR) 198 199 /* The number of size classes that are a multiple of the page size. */ 200 #define SC_NPSIZES ( \ 201 /* Start with all the size classes. */ \ 202 SC_NSIZES \ 203 /* Subtract out those groups with too small a base. */ \ 204 - (LG_PAGE - 1 - SC_LG_FIRST_REGULAR_BASE) * SC_NGROUP \ 205 /* And the pseudo-group. */ \ 206 - SC_NPSEUDO \ 207 /* And the tiny group. */ \ 208 - SC_NTINY \ 209 /* Groups where ndelta*delta is not a multiple of the page size. */ \ 210 - (2 * (SC_NGROUP))) 211 212 /* 213 * We declare a size class is binnable if size < page size * group. Or, in other 214 * words, lg(size) < lg(page size) + lg(group size). 215 */ 216 #define SC_NBINS ( \ 217 /* Sub-regular size classes. */ \ 218 SC_NTINY + SC_NPSEUDO \ 219 /* Groups with lg_regular_min_base <= lg_base <= lg_base_max */ \ 220 + SC_NGROUP * (LG_PAGE + SC_LG_NGROUP - SC_LG_FIRST_REGULAR_BASE) \ 221 /* Last SC of the last group hits the bound exactly; exclude it. */ \ 222 - 1) 223 224 /* 225 * The size2index_tab lookup table uses uint8_t to encode each bin index, so we 226 * cannot support more than 256 small size classes. 227 */ 228 #if (SC_NBINS > 256) 229 # error "Too many small size classes" 230 #endif 231 232 /* The largest size class in the lookup table. */ 233 #define SC_LOOKUP_MAXCLASS ((size_t)1 << 12) 234 235 /* Internal, only used for the definition of SC_SMALL_MAXCLASS. */ 236 #define SC_SMALL_MAX_BASE ((size_t)1 << (LG_PAGE + SC_LG_NGROUP - 1)) 237 #define SC_SMALL_MAX_DELTA ((size_t)1 << (LG_PAGE - 1)) 238 239 /* The largest size class allocated out of a slab. */ 240 #define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \ 241 + (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA) 242 243 /* The smallest size class not allocated out of a slab. */ 244 #define SC_LARGE_MINCLASS ((size_t)1ULL << (LG_PAGE + SC_LG_NGROUP)) 245 #define SC_LG_LARGE_MINCLASS (LG_PAGE + SC_LG_NGROUP) 246 247 /* Internal; only used for the definition of SC_LARGE_MAXCLASS. */ 248 #define SC_MAX_BASE ((size_t)1 << (SC_PTR_BITS - 2)) 249 #define SC_MAX_DELTA ((size_t)1 << (SC_PTR_BITS - 2 - SC_LG_NGROUP)) 250 251 /* The largest size class supported. */ 252 #define SC_LARGE_MAXCLASS (SC_MAX_BASE + (SC_NGROUP - 1) * SC_MAX_DELTA) 253 254 typedef struct sc_s sc_t; 255 struct sc_s { 256 /* Size class index, or -1 if not a valid size class. */ 257 int index; 258 /* Lg group base size (no deltas added). */ 259 int lg_base; 260 /* Lg delta to previous size class. */ 261 int lg_delta; 262 /* Delta multiplier. size == 1<<lg_base + ndelta<<lg_delta */ 263 int ndelta; 264 /* 265 * True if the size class is a multiple of the page size, false 266 * otherwise. 267 */ 268 bool psz; 269 /* 270 * True if the size class is a small, bin, size class. False otherwise. 271 */ 272 bool bin; 273 /* The slab page count if a small bin size class, 0 otherwise. */ 274 int pgs; 275 /* Same as lg_delta if a lookup table size class, 0 otherwise. */ 276 int lg_delta_lookup; 277 }; 278 279 typedef struct sc_data_s sc_data_t; 280 struct sc_data_s { 281 /* Number of tiny size classes. */ 282 unsigned ntiny; 283 /* Number of bins supported by the lookup table. */ 284 int nlbins; 285 /* Number of small size class bins. */ 286 int nbins; 287 /* Number of size classes. */ 288 int nsizes; 289 /* Number of bits required to store NSIZES. */ 290 int lg_ceil_nsizes; 291 /* Number of size classes that are a multiple of (1U << LG_PAGE). */ 292 unsigned npsizes; 293 /* Lg of maximum tiny size class (or -1, if none). */ 294 int lg_tiny_maxclass; 295 /* Maximum size class included in lookup table. */ 296 size_t lookup_maxclass; 297 /* Maximum small size class. */ 298 size_t small_maxclass; 299 /* Lg of minimum large size class. */ 300 int lg_large_minclass; 301 /* The minimum large size class. */ 302 size_t large_minclass; 303 /* Maximum (large) size class. */ 304 size_t large_maxclass; 305 /* True if the sc_data_t has been initialized (for debugging only). */ 306 bool initialized; 307 308 sc_t sc[SC_NSIZES]; 309 }; 310 311 void sc_data_init(sc_data_t *data); 312 /* 313 * Updates slab sizes in [begin, end] to be pgs pages in length, if possible. 314 * Otherwise, does its best to accomodate the request. 315 */ 316 void sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end, 317 int pgs); 318 void sc_boot(sc_data_t *data); 319 320 #endif /* JEMALLOC_INTERNAL_SC_H */ 321