1 #ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
2 #define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
3 
4 #include "jemalloc/internal/atomic.h"
5 #include "jemalloc/internal/bitmap.h"
6 #include "jemalloc/internal/mutex.h"
7 #include "jemalloc/internal/ql.h"
8 #include "jemalloc/internal/rb.h"
9 #include "jemalloc/internal/ph.h"
10 #include "jemalloc/internal/size_classes.h"
11 
12 typedef enum {
13 	extent_state_active   = 0,
14 	extent_state_dirty    = 1,
15 	extent_state_muzzy    = 2,
16 	extent_state_retained = 3
17 } extent_state_t;
18 
19 /* Extent (span of pages).  Use accessor functions for e_* fields. */
20 struct extent_s {
21 	/*
22 	 * Bitfield containing several fields:
23 	 *
24 	 * a: arena_ind
25 	 * b: slab
26 	 * c: committed
27 	 * z: zeroed
28 	 * t: state
29 	 * i: szind
30 	 * f: nfree
31 	 * n: sn
32 	 *
33 	 * nnnnnnnn ... nnnnnfff fffffffi iiiiiiit tzcbaaaa aaaaaaaa
34 	 *
35 	 * arena_ind: Arena from which this extent came, or all 1 bits if
36 	 *            unassociated.
37 	 *
38 	 * slab: The slab flag indicates whether the extent is used for a slab
39 	 *       of small regions.  This helps differentiate small size classes,
40 	 *       and it indicates whether interior pointers can be looked up via
41 	 *       iealloc().
42 	 *
43 	 * committed: The committed flag indicates whether physical memory is
44 	 *            committed to the extent, whether explicitly or implicitly
45 	 *            as on a system that overcommits and satisfies physical
46 	 *            memory needs on demand via soft page faults.
47 	 *
48 	 * zeroed: The zeroed flag is used by extent recycling code to track
49 	 *         whether memory is zero-filled.
50 	 *
51 	 * state: The state flag is an extent_state_t.
52 	 *
53 	 * szind: The szind flag indicates usable size class index for
54 	 *        allocations residing in this extent, regardless of whether the
55 	 *        extent is a slab.  Extent size and usable size often differ
56 	 *        even for non-slabs, either due to sz_large_pad or promotion of
57 	 *        sampled small regions.
58 	 *
59 	 * nfree: Number of free regions in slab.
60 	 *
61 	 * sn: Serial number (potentially non-unique).
62 	 *
63 	 *     Serial numbers may wrap around if !opt_retain, but as long as
64 	 *     comparison functions fall back on address comparison for equal
65 	 *     serial numbers, stable (if imperfect) ordering is maintained.
66 	 *
67 	 *     Serial numbers may not be unique even in the absence of
68 	 *     wrap-around, e.g. when splitting an extent and assigning the same
69 	 *     serial number to both resulting adjacent extents.
70 	 */
71 	uint64_t		e_bits;
72 #define EXTENT_BITS_ARENA_SHIFT		0
73 #define EXTENT_BITS_ARENA_MASK \
74     (((uint64_t)(1U << MALLOCX_ARENA_BITS) - 1) << EXTENT_BITS_ARENA_SHIFT)
75 
76 #define EXTENT_BITS_SLAB_SHIFT		MALLOCX_ARENA_BITS
77 #define EXTENT_BITS_SLAB_MASK \
78     ((uint64_t)0x1U << EXTENT_BITS_SLAB_SHIFT)
79 
80 #define EXTENT_BITS_COMMITTED_SHIFT	(MALLOCX_ARENA_BITS + 1)
81 #define EXTENT_BITS_COMMITTED_MASK \
82     ((uint64_t)0x1U << EXTENT_BITS_COMMITTED_SHIFT)
83 
84 #define EXTENT_BITS_ZEROED_SHIFT	(MALLOCX_ARENA_BITS + 2)
85 #define EXTENT_BITS_ZEROED_MASK \
86     ((uint64_t)0x1U << EXTENT_BITS_ZEROED_SHIFT)
87 
88 #define EXTENT_BITS_STATE_SHIFT		(MALLOCX_ARENA_BITS + 3)
89 #define EXTENT_BITS_STATE_MASK \
90     ((uint64_t)0x3U << EXTENT_BITS_STATE_SHIFT)
91 
92 #define EXTENT_BITS_SZIND_SHIFT		(MALLOCX_ARENA_BITS + 5)
93 #define EXTENT_BITS_SZIND_MASK \
94     (((uint64_t)(1U << LG_CEIL_NSIZES) - 1) << EXTENT_BITS_SZIND_SHIFT)
95 
96 #define EXTENT_BITS_NFREE_SHIFT \
97     (MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES)
98 #define EXTENT_BITS_NFREE_MASK \
99     ((uint64_t)((1U << (LG_SLAB_MAXREGS + 1)) - 1) << EXTENT_BITS_NFREE_SHIFT)
100 
101 #define EXTENT_BITS_SN_SHIFT \
102     (MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES + (LG_SLAB_MAXREGS + 1))
103 #define EXTENT_BITS_SN_MASK		(UINT64_MAX << EXTENT_BITS_SN_SHIFT)
104 
105 	/* Pointer to the extent that this structure is responsible for. */
106 	void			*e_addr;
107 
108 	union {
109 		/*
110 		 * Extent size and serial number associated with the extent
111 		 * structure (different than the serial number for the extent at
112 		 * e_addr).
113 		 *
114 		 * ssssssss [...] ssssssss ssssnnnn nnnnnnnn
115 		 */
116 		size_t			e_size_esn;
117 	#define EXTENT_SIZE_MASK	((size_t)~(PAGE-1))
118 	#define EXTENT_ESN_MASK		((size_t)PAGE-1)
119 		/* Base extent size, which may not be a multiple of PAGE. */
120 		size_t			e_bsize;
121 	};
122 
123 	union {
124 		/*
125 		 * List linkage, used by a variety of lists:
126 		 * - arena_bin_t's slabs_full
127 		 * - extents_t's LRU
128 		 * - stashed dirty extents
129 		 * - arena's large allocations
130 		 */
131 		ql_elm(extent_t)	ql_link;
132 		/* Red-black tree linkage, used by arena's extent_avail. */
133 		rb_node(extent_t)	rb_link;
134 	};
135 
136 	/* Linkage for per size class sn/address-ordered heaps. */
137 	phn(extent_t)		ph_link;
138 
139 	union {
140 		/* Small region slab metadata. */
141 		arena_slab_data_t	e_slab_data;
142 
143 		/*
144 		 * Profile counters, used for large objects.  Points to a
145 		 * prof_tctx_t.
146 		 */
147 		atomic_p_t		e_prof_tctx;
148 	};
149 };
150 typedef ql_head(extent_t) extent_list_t;
151 typedef rb_tree(extent_t) extent_tree_t;
152 typedef ph(extent_t) extent_heap_t;
153 
154 /* Quantized collection of extents, with built-in LRU queue. */
155 struct extents_s {
156 	malloc_mutex_t		mtx;
157 
158 	/*
159 	 * Quantized per size class heaps of extents.
160 	 *
161 	 * Synchronization: mtx.
162 	 */
163 	extent_heap_t		heaps[NPSIZES+1];
164 
165 	/*
166 	 * Bitmap for which set bits correspond to non-empty heaps.
167 	 *
168 	 * Synchronization: mtx.
169 	 */
170 	bitmap_t		bitmap[BITMAP_GROUPS(NPSIZES+1)];
171 
172 	/*
173 	 * LRU of all extents in heaps.
174 	 *
175 	 * Synchronization: mtx.
176 	 */
177 	extent_list_t		lru;
178 
179 	/*
180 	 * Page sum for all extents in heaps.
181 	 *
182 	 * The synchronization here is a little tricky.  Modifications to npages
183 	 * must hold mtx, but reads need not (though, a reader who sees npages
184 	 * without holding the mutex can't assume anything about the rest of the
185 	 * state of the extents_t).
186 	 */
187 	atomic_zu_t		npages;
188 
189 	/* All stored extents must be in the same state. */
190 	extent_state_t		state;
191 
192 	/*
193 	 * If true, delay coalescing until eviction; otherwise coalesce during
194 	 * deallocation.
195 	 */
196 	bool			delay_coalesce;
197 };
198 
199 #endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */
200