1 #ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H
2 #define JEMALLOC_INTERNAL_EXTENT_INLINES_H
3 
4 #include "jemalloc/internal/mutex.h"
5 #include "jemalloc/internal/mutex_pool.h"
6 #include "jemalloc/internal/pages.h"
7 #include "jemalloc/internal/prng.h"
8 #include "jemalloc/internal/ql.h"
9 #include "jemalloc/internal/sz.h"
10 
11 static inline void
12 extent_lock(tsdn_t *tsdn, extent_t *extent) {
13 	assert(extent != NULL);
14 	mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
15 }
16 
17 static inline void
18 extent_unlock(tsdn_t *tsdn, extent_t *extent) {
19 	assert(extent != NULL);
20 	mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
21 }
22 
23 static inline void
24 extent_lock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
25 	assert(extent1 != NULL && extent2 != NULL);
26 	mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
27 	    (uintptr_t)extent2);
28 }
29 
30 static inline void
31 extent_unlock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
32 	assert(extent1 != NULL && extent2 != NULL);
33 	mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
34 	    (uintptr_t)extent2);
35 }
36 
37 static inline arena_t *
38 extent_arena_get(const extent_t *extent) {
39 	unsigned arena_ind = (unsigned)((extent->e_bits &
40 	    EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT);
41 	/*
42 	 * The following check is omitted because we should never actually read
43 	 * a NULL arena pointer.
44 	 */
45 	if (false && arena_ind >= MALLOCX_ARENA_LIMIT) {
46 		return NULL;
47 	}
48 	assert(arena_ind < MALLOCX_ARENA_LIMIT);
49 	return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE);
50 }
51 
52 static inline szind_t
53 extent_szind_get_maybe_invalid(const extent_t *extent) {
54 	szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >>
55 	    EXTENT_BITS_SZIND_SHIFT);
56 	assert(szind <= NSIZES);
57 	return szind;
58 }
59 
60 static inline szind_t
61 extent_szind_get(const extent_t *extent) {
62 	szind_t szind = extent_szind_get_maybe_invalid(extent);
63 	assert(szind < NSIZES); /* Never call when "invalid". */
64 	return szind;
65 }
66 
67 static inline size_t
68 extent_usize_get(const extent_t *extent) {
69 	return sz_index2size(extent_szind_get(extent));
70 }
71 
72 static inline size_t
73 extent_sn_get(const extent_t *extent) {
74 	return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >>
75 	    EXTENT_BITS_SN_SHIFT);
76 }
77 
78 static inline extent_state_t
79 extent_state_get(const extent_t *extent) {
80 	return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >>
81 	    EXTENT_BITS_STATE_SHIFT);
82 }
83 
84 static inline bool
85 extent_zeroed_get(const extent_t *extent) {
86 	return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >>
87 	    EXTENT_BITS_ZEROED_SHIFT);
88 }
89 
90 static inline bool
91 extent_committed_get(const extent_t *extent) {
92 	return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >>
93 	    EXTENT_BITS_COMMITTED_SHIFT);
94 }
95 
96 static inline bool
97 extent_dumpable_get(const extent_t *extent) {
98 	return (bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK) >>
99 	    EXTENT_BITS_DUMPABLE_SHIFT);
100 }
101 
102 static inline bool
103 extent_slab_get(const extent_t *extent) {
104 	return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
105 	    EXTENT_BITS_SLAB_SHIFT);
106 }
107 
108 static inline unsigned
109 extent_nfree_get(const extent_t *extent) {
110 	assert(extent_slab_get(extent));
111 	return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >>
112 	    EXTENT_BITS_NFREE_SHIFT);
113 }
114 
115 static inline void *
116 extent_base_get(const extent_t *extent) {
117 	assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
118 	    !extent_slab_get(extent));
119 	return PAGE_ADDR2BASE(extent->e_addr);
120 }
121 
122 static inline void *
123 extent_addr_get(const extent_t *extent) {
124 	assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
125 	    !extent_slab_get(extent));
126 	return extent->e_addr;
127 }
128 
129 static inline size_t
130 extent_size_get(const extent_t *extent) {
131 	return (extent->e_size_esn & EXTENT_SIZE_MASK);
132 }
133 
134 static inline size_t
135 extent_esn_get(const extent_t *extent) {
136 	return (extent->e_size_esn & EXTENT_ESN_MASK);
137 }
138 
139 static inline size_t
140 extent_bsize_get(const extent_t *extent) {
141 	return extent->e_bsize;
142 }
143 
144 static inline void *
145 extent_before_get(const extent_t *extent) {
146 	return (void *)((uintptr_t)extent_base_get(extent) - PAGE);
147 }
148 
149 static inline void *
150 extent_last_get(const extent_t *extent) {
151 	return (void *)((uintptr_t)extent_base_get(extent) +
152 	    extent_size_get(extent) - PAGE);
153 }
154 
155 static inline void *
156 extent_past_get(const extent_t *extent) {
157 	return (void *)((uintptr_t)extent_base_get(extent) +
158 	    extent_size_get(extent));
159 }
160 
161 static inline arena_slab_data_t *
162 extent_slab_data_get(extent_t *extent) {
163 	assert(extent_slab_get(extent));
164 	return &extent->e_slab_data;
165 }
166 
167 static inline const arena_slab_data_t *
168 extent_slab_data_get_const(const extent_t *extent) {
169 	assert(extent_slab_get(extent));
170 	return &extent->e_slab_data;
171 }
172 
173 static inline prof_tctx_t *
174 extent_prof_tctx_get(const extent_t *extent) {
175 	return (prof_tctx_t *)atomic_load_p(&extent->e_prof_tctx,
176 	    ATOMIC_ACQUIRE);
177 }
178 
179 static inline void
180 extent_arena_set(extent_t *extent, arena_t *arena) {
181 	unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U <<
182 	    MALLOCX_ARENA_BITS) - 1);
183 	extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) |
184 	    ((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT);
185 }
186 
187 static inline void
188 extent_addr_set(extent_t *extent, void *addr) {
189 	extent->e_addr = addr;
190 }
191 
192 static inline void
193 extent_addr_randomize(UNUSED tsdn_t *tsdn, extent_t *extent, size_t alignment) {
194 	assert(extent_base_get(extent) == extent_addr_get(extent));
195 
196 	if (alignment < PAGE) {
197 		unsigned lg_range = LG_PAGE -
198 		    lg_floor(CACHELINE_CEILING(alignment));
199 		size_t r;
200 		if (!tsdn_null(tsdn)) {
201 			tsd_t *tsd = tsdn_tsd(tsdn);
202 			r = (size_t)prng_lg_range_u64(
203 			    tsd_offset_statep_get(tsd), lg_range);
204 		} else {
205 			r = prng_lg_range_zu(
206 			    &extent_arena_get(extent)->offset_state,
207 			    lg_range, true);
208 		}
209 		uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
210 		    lg_range);
211 		extent->e_addr = (void *)((uintptr_t)extent->e_addr +
212 		    random_offset);
213 		assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) ==
214 		    extent->e_addr);
215 	}
216 }
217 
218 static inline void
219 extent_size_set(extent_t *extent, size_t size) {
220 	assert((size & ~EXTENT_SIZE_MASK) == 0);
221 	extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK);
222 }
223 
224 static inline void
225 extent_esn_set(extent_t *extent, size_t esn) {
226 	extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn &
227 	    EXTENT_ESN_MASK);
228 }
229 
230 static inline void
231 extent_bsize_set(extent_t *extent, size_t bsize) {
232 	extent->e_bsize = bsize;
233 }
234 
235 static inline void
236 extent_szind_set(extent_t *extent, szind_t szind) {
237 	assert(szind <= NSIZES); /* NSIZES means "invalid". */
238 	extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) |
239 	    ((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT);
240 }
241 
242 static inline void
243 extent_nfree_set(extent_t *extent, unsigned nfree) {
244 	assert(extent_slab_get(extent));
245 	extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) |
246 	    ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
247 }
248 
249 static inline void
250 extent_nfree_inc(extent_t *extent) {
251 	assert(extent_slab_get(extent));
252 	extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
253 }
254 
255 static inline void
256 extent_nfree_dec(extent_t *extent) {
257 	assert(extent_slab_get(extent));
258 	extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
259 }
260 
261 static inline void
262 extent_sn_set(extent_t *extent, size_t sn) {
263 	extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) |
264 	    ((uint64_t)sn << EXTENT_BITS_SN_SHIFT);
265 }
266 
267 static inline void
268 extent_state_set(extent_t *extent, extent_state_t state) {
269 	extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) |
270 	    ((uint64_t)state << EXTENT_BITS_STATE_SHIFT);
271 }
272 
273 static inline void
274 extent_zeroed_set(extent_t *extent, bool zeroed) {
275 	extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) |
276 	    ((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT);
277 }
278 
279 static inline void
280 extent_committed_set(extent_t *extent, bool committed) {
281 	extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) |
282 	    ((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT);
283 }
284 
285 static inline void
286 extent_dumpable_set(extent_t *extent, bool dumpable) {
287 	extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK) |
288 	    ((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT);
289 }
290 
291 static inline void
292 extent_slab_set(extent_t *extent, bool slab) {
293 	extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
294 	    ((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT);
295 }
296 
297 static inline void
298 extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
299 	atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE);
300 }
301 
302 static inline void
303 extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
304     bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
305     bool committed, bool dumpable) {
306 	assert(addr == PAGE_ADDR2BASE(addr) || !slab);
307 
308 	extent_arena_set(extent, arena);
309 	extent_addr_set(extent, addr);
310 	extent_size_set(extent, size);
311 	extent_slab_set(extent, slab);
312 	extent_szind_set(extent, szind);
313 	extent_sn_set(extent, sn);
314 	extent_state_set(extent, state);
315 	extent_zeroed_set(extent, zeroed);
316 	extent_committed_set(extent, committed);
317 	extent_dumpable_set(extent, dumpable);
318 	ql_elm_new(extent, ql_link);
319 	if (config_prof) {
320 		extent_prof_tctx_set(extent, NULL);
321 	}
322 }
323 
324 static inline void
325 extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
326 	extent_arena_set(extent, NULL);
327 	extent_addr_set(extent, addr);
328 	extent_bsize_set(extent, bsize);
329 	extent_slab_set(extent, false);
330 	extent_szind_set(extent, NSIZES);
331 	extent_sn_set(extent, sn);
332 	extent_state_set(extent, extent_state_active);
333 	extent_zeroed_set(extent, true);
334 	extent_committed_set(extent, true);
335 	extent_dumpable_set(extent, true);
336 }
337 
338 static inline void
339 extent_list_init(extent_list_t *list) {
340 	ql_new(list);
341 }
342 
343 static inline extent_t *
344 extent_list_first(const extent_list_t *list) {
345 	return ql_first(list);
346 }
347 
348 static inline extent_t *
349 extent_list_last(const extent_list_t *list) {
350 	return ql_last(list, ql_link);
351 }
352 
353 static inline void
354 extent_list_append(extent_list_t *list, extent_t *extent) {
355 	ql_tail_insert(list, extent, ql_link);
356 }
357 
358 static inline void
359 extent_list_prepend(extent_list_t *list, extent_t *extent) {
360 	ql_head_insert(list, extent, ql_link);
361 }
362 
363 static inline void
364 extent_list_replace(extent_list_t *list, extent_t *to_remove,
365     extent_t *to_insert) {
366 	ql_after_insert(to_remove, to_insert, ql_link);
367 	ql_remove(list, to_remove, ql_link);
368 }
369 
370 static inline void
371 extent_list_remove(extent_list_t *list, extent_t *extent) {
372 	ql_remove(list, extent, ql_link);
373 }
374 
375 static inline int
376 extent_sn_comp(const extent_t *a, const extent_t *b) {
377 	size_t a_sn = extent_sn_get(a);
378 	size_t b_sn = extent_sn_get(b);
379 
380 	return (a_sn > b_sn) - (a_sn < b_sn);
381 }
382 
383 static inline int
384 extent_esn_comp(const extent_t *a, const extent_t *b) {
385 	size_t a_esn = extent_esn_get(a);
386 	size_t b_esn = extent_esn_get(b);
387 
388 	return (a_esn > b_esn) - (a_esn < b_esn);
389 }
390 
391 static inline int
392 extent_ad_comp(const extent_t *a, const extent_t *b) {
393 	uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
394 	uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
395 
396 	return (a_addr > b_addr) - (a_addr < b_addr);
397 }
398 
399 static inline int
400 extent_ead_comp(const extent_t *a, const extent_t *b) {
401 	uintptr_t a_eaddr = (uintptr_t)a;
402 	uintptr_t b_eaddr = (uintptr_t)b;
403 
404 	return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
405 }
406 
407 static inline int
408 extent_snad_comp(const extent_t *a, const extent_t *b) {
409 	int ret;
410 
411 	ret = extent_sn_comp(a, b);
412 	if (ret != 0) {
413 		return ret;
414 	}
415 
416 	ret = extent_ad_comp(a, b);
417 	return ret;
418 }
419 
420 static inline int
421 extent_esnead_comp(const extent_t *a, const extent_t *b) {
422 	int ret;
423 
424 	ret = extent_esn_comp(a, b);
425 	if (ret != 0) {
426 		return ret;
427 	}
428 
429 	ret = extent_ead_comp(a, b);
430 	return ret;
431 }
432 
433 #endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */
434