1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2018
4 *
5 * Non-moving garbage collector and allocator
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #pragma once
10
11 #if !defined(CMINUSMINUS)
12
13 #include <string.h>
14 #include "HeapAlloc.h"
15 #include "NonMovingMark.h"
16
17 #include "BeginPrivate.h"
18
19 // Segments
20 #define NONMOVING_SEGMENT_BITS 15 // 2^15 = 32kByte
21 // Mask to find base of segment
22 #define NONMOVING_SEGMENT_MASK ((1 << NONMOVING_SEGMENT_BITS) - 1)
23 // In bytes
24 #define NONMOVING_SEGMENT_SIZE (1 << NONMOVING_SEGMENT_BITS)
25 // In words
26 #define NONMOVING_SEGMENT_SIZE_W ((1 << NONMOVING_SEGMENT_BITS) / SIZEOF_VOID_P)
27 // In blocks
28 #define NONMOVING_SEGMENT_BLOCKS (NONMOVING_SEGMENT_SIZE / BLOCK_SIZE)
29
30 _Static_assert(NONMOVING_SEGMENT_SIZE % BLOCK_SIZE == 0,
31 "non-moving segment size must be multiple of block size");
32
33 // The index of a block within a segment
34 typedef uint16_t nonmoving_block_idx;
35
36 // A non-moving heap segment
37 struct NonmovingSegment {
38 struct NonmovingSegment *link; // for linking together segments into lists
39 struct NonmovingSegment *todo_link; // NULL when not in todo list
40 nonmoving_block_idx next_free; // index of the next unallocated block
41 uint8_t bitmap[]; // liveness bitmap
42 // After the liveness bitmap comes the data blocks. Note that we need to
43 // ensure that the size of this struct (including the bitmap) is a multiple
44 // of the word size since GHC assumes that all object pointers are
45 // so-aligned.
46
47 // N.B. There are also bits of information which are stored in the
48 // NonmovingBlockInfo stored in the segment's block descriptor. Namely:
49 //
50 // * the block size can be found in nonmovingBlockInfo(seg)->log_block_size.
51 // * the next_free snapshot can be found in
52 // nonmovingBlockInfo(seg)->next_free_snap.
53 //
54 // This allows us to mark a nonmoving closure without bringing the
55 // NonmovingSegment header into cache.
56 };
57
58 // This is how we mark end of todo lists. Not NULL because todo_link == NULL
59 // means segment is not in list.
60 #define END_NONMOVING_TODO_LIST ((struct NonmovingSegment*)1)
61
62 // A non-moving allocator for a particular block size
63 struct NonmovingAllocator {
64 struct NonmovingSegment *filled;
65 struct NonmovingSegment *saved_filled;
66 struct NonmovingSegment *active;
67 // indexed by capability number
68 struct NonmovingSegment *current[];
69 };
70
71 // first allocator is of size 2^NONMOVING_ALLOCA0 (in bytes)
72 #define NONMOVING_ALLOCA0 3
73
74 // allocators cover block sizes of 2^NONMOVING_ALLOCA0 to
75 // 2^(NONMOVING_ALLOCA0 + NONMOVING_ALLOCA_CNT) (in bytes)
76 #define NONMOVING_ALLOCA_CNT 12
77
78 // maximum number of free segments to hold on to
79 #define NONMOVING_MAX_FREE 16
80
81 struct NonmovingHeap {
82 struct NonmovingAllocator *allocators[NONMOVING_ALLOCA_CNT];
83 // free segment list. This is a cache where we keep up to
84 // NONMOVING_MAX_FREE segments to avoid thrashing the block allocator.
85 // Note that segments in this list are still counted towards
86 // oldest_gen->n_blocks.
87 struct NonmovingSegment *free;
88 // how many segments in free segment list? accessed atomically.
89 unsigned int n_free;
90
91 // records the current length of the nonmovingAllocator.current arrays
92 unsigned int n_caps;
93
94 // The set of segments being swept in this GC. Segments are moved here from
95 // the filled list during preparation and moved back to either the filled,
96 // active, or free lists during sweep. Should be NULL before mark and
97 // after sweep.
98 struct NonmovingSegment *sweep_list;
99 };
100
101 extern struct NonmovingHeap nonmovingHeap;
102
103 extern memcount nonmoving_live_words;
104
105 #if defined(THREADED_RTS)
106 extern bool concurrent_coll_running;
107 #endif
108
109 void nonmovingInit(void);
110 void nonmovingStop(void);
111 void nonmovingExit(void);
112
113
114 // dead_weaks and resurrected_threads lists are used for two things:
115 //
116 // - The weaks and threads in those lists are found to be dead during
117 // preparation, but the weaks will be used for finalization and threads will
118 // be scheduled again (aka. resurrection) so we need to keep them alive in the
119 // non-moving heap as well. So we treat them as roots and mark them.
120 //
121 // - In non-threaded runtime we add weaks and threads found to be dead in the
122 // non-moving heap to those lists so that they'll be finalized and scheduled
123 // as other weaks and threads. In threaded runtime we can't do this as that'd
124 // cause races between a minor collection and non-moving collection. Instead
125 // in non-moving heap we finalize the weaks and resurrect the threads
126 // directly, but in a pause.
127 //
128 void nonmovingCollect(StgWeak **dead_weaks,
129 StgTSO **resurrected_threads);
130
131 void *nonmovingAllocate(Capability *cap, StgWord sz);
132 void nonmovingAddCapabilities(uint32_t new_n_caps);
133 void nonmovingPushFreeSegment(struct NonmovingSegment *seg);
134 void nonmovingClearBitmap(struct NonmovingSegment *seg);
135
136
nonmovingSegmentInfo(struct NonmovingSegment * seg)137 INLINE_HEADER struct NonmovingSegmentInfo *nonmovingSegmentInfo(struct NonmovingSegment *seg) {
138 return &Bdescr((StgPtr) seg)->nonmoving_segment;
139 }
140
nonmovingSegmentLogBlockSize(struct NonmovingSegment * seg)141 INLINE_HEADER uint8_t nonmovingSegmentLogBlockSize(struct NonmovingSegment *seg) {
142 return nonmovingSegmentInfo(seg)->log_block_size;
143 }
144
145 // Add a segment to the appropriate active list.
nonmovingPushActiveSegment(struct NonmovingSegment * seg)146 INLINE_HEADER void nonmovingPushActiveSegment(struct NonmovingSegment *seg)
147 {
148 struct NonmovingAllocator *alloc =
149 nonmovingHeap.allocators[nonmovingSegmentLogBlockSize(seg) - NONMOVING_ALLOCA0];
150 while (true) {
151 struct NonmovingSegment *current_active = (struct NonmovingSegment*)VOLATILE_LOAD(&alloc->active);
152 seg->link = current_active;
153 if (cas((StgVolatilePtr) &alloc->active, (StgWord) current_active, (StgWord) seg) == (StgWord) current_active) {
154 break;
155 }
156 }
157 }
158
159 // Add a segment to the appropriate filled list.
nonmovingPushFilledSegment(struct NonmovingSegment * seg)160 INLINE_HEADER void nonmovingPushFilledSegment(struct NonmovingSegment *seg)
161 {
162 struct NonmovingAllocator *alloc =
163 nonmovingHeap.allocators[nonmovingSegmentLogBlockSize(seg) - NONMOVING_ALLOCA0];
164 while (true) {
165 struct NonmovingSegment *current_filled = (struct NonmovingSegment*)VOLATILE_LOAD(&alloc->filled);
166 seg->link = current_filled;
167 if (cas((StgVolatilePtr) &alloc->filled, (StgWord) current_filled, (StgWord) seg) == (StgWord) current_filled) {
168 break;
169 }
170 }
171 }
172 // Assert that the pointer can be traced by the non-moving collector (e.g. in
173 // mark phase). This means one of the following:
174 //
175 // - A static object
176 // - A large object
177 // - An object in the non-moving heap (e.g. in one of the segments)
178 //
179 void assert_in_nonmoving_heap(StgPtr p);
180
181 // The block size of a given segment in bytes.
nonmovingSegmentBlockSize(struct NonmovingSegment * seg)182 INLINE_HEADER unsigned int nonmovingSegmentBlockSize(struct NonmovingSegment *seg)
183 {
184 return 1 << nonmovingSegmentLogBlockSize(seg);
185 }
186
187 // How many blocks does a segment with the given block size have?
nonmovingBlockCount(uint8_t log_block_size)188 INLINE_HEADER unsigned int nonmovingBlockCount(uint8_t log_block_size)
189 {
190 unsigned int segment_data_size = NONMOVING_SEGMENT_SIZE - sizeof(struct NonmovingSegment);
191 segment_data_size -= segment_data_size % SIZEOF_VOID_P;
192 unsigned int blk_size = 1 << log_block_size;
193 // N.B. +1 accounts for the byte in the mark bitmap.
194 return segment_data_size / (blk_size + 1);
195 }
196
197 unsigned int nonmovingBlockCountFromSize(uint8_t log_block_size);
198
199 // How many blocks does the given segment contain? Also the size of the bitmap.
nonmovingSegmentBlockCount(struct NonmovingSegment * seg)200 INLINE_HEADER unsigned int nonmovingSegmentBlockCount(struct NonmovingSegment *seg)
201 {
202 return nonmovingBlockCountFromSize(nonmovingSegmentLogBlockSize(seg));
203 }
204
205 // Get a pointer to the given block index assuming that the block size is as
206 // given (avoiding a potential cache miss when this information is already
207 // available). The log_block_size argument must be equal to seg->block_size.
nonmovingSegmentGetBlock_(struct NonmovingSegment * seg,uint8_t log_block_size,nonmoving_block_idx i)208 INLINE_HEADER void *nonmovingSegmentGetBlock_(struct NonmovingSegment *seg, uint8_t log_block_size, nonmoving_block_idx i)
209 {
210 ASSERT(log_block_size == nonmovingSegmentLogBlockSize(seg));
211 // Block size in bytes
212 unsigned int blk_size = 1 << log_block_size;
213 // Bitmap size in bytes
214 W_ bitmap_size = nonmovingBlockCountFromSize(log_block_size) * sizeof(uint8_t);
215 // Where the actual data starts (address of the first block).
216 // Use ROUNDUP_BYTES_TO_WDS to align to word size. Note that
217 // ROUNDUP_BYTES_TO_WDS returns in _words_, not in _bytes_, so convert it back
218 // back to bytes by multiplying with word size.
219 W_ data = ROUNDUP_BYTES_TO_WDS(((W_)seg) + sizeof(struct NonmovingSegment) + bitmap_size) * sizeof(W_);
220 return (void*)(data + i*blk_size);
221 }
222
223 // Get a pointer to the given block index.
nonmovingSegmentGetBlock(struct NonmovingSegment * seg,nonmoving_block_idx i)224 INLINE_HEADER void *nonmovingSegmentGetBlock(struct NonmovingSegment *seg, nonmoving_block_idx i)
225 {
226 return nonmovingSegmentGetBlock_(seg, nonmovingSegmentLogBlockSize(seg), i);
227 }
228
229 // Get the segment which a closure resides in. Assumes that pointer points into
230 // non-moving heap.
nonmovingGetSegment_unchecked(StgPtr p)231 INLINE_HEADER struct NonmovingSegment *nonmovingGetSegment_unchecked(StgPtr p)
232 {
233 const uintptr_t mask = ~NONMOVING_SEGMENT_MASK;
234 return (struct NonmovingSegment *) (((uintptr_t) p) & mask);
235 }
236
nonmovingGetSegment(StgPtr p)237 INLINE_HEADER struct NonmovingSegment *nonmovingGetSegment(StgPtr p)
238 {
239 ASSERT(HEAP_ALLOCED_GC(p) && (Bdescr(p)->flags & BF_NONMOVING));
240 return nonmovingGetSegment_unchecked(p);
241 }
242
nonmovingGetBlockIdx(StgPtr p)243 INLINE_HEADER nonmoving_block_idx nonmovingGetBlockIdx(StgPtr p)
244 {
245 ASSERT(HEAP_ALLOCED_GC(p) && (Bdescr(p)->flags & BF_NONMOVING));
246 struct NonmovingSegment *seg = nonmovingGetSegment(p);
247 ptrdiff_t blk0 = (ptrdiff_t)nonmovingSegmentGetBlock(seg, 0);
248 ptrdiff_t offset = (ptrdiff_t)p - blk0;
249 return (nonmoving_block_idx) (offset >> nonmovingSegmentLogBlockSize(seg));
250 }
251
252 // TODO: Eliminate this
253 extern uint8_t nonmovingMarkEpoch;
254
nonmovingSetMark(struct NonmovingSegment * seg,nonmoving_block_idx i)255 INLINE_HEADER void nonmovingSetMark(struct NonmovingSegment *seg, nonmoving_block_idx i)
256 {
257 seg->bitmap[i] = nonmovingMarkEpoch;
258 }
259
nonmovingGetMark(struct NonmovingSegment * seg,nonmoving_block_idx i)260 INLINE_HEADER uint8_t nonmovingGetMark(struct NonmovingSegment *seg, nonmoving_block_idx i)
261 {
262 return seg->bitmap[i];
263 }
264
nonmovingSetClosureMark(StgPtr p)265 INLINE_HEADER void nonmovingSetClosureMark(StgPtr p)
266 {
267 nonmovingSetMark(nonmovingGetSegment(p), nonmovingGetBlockIdx(p));
268 }
269
270 // TODO: Audit the uses of these
271 /* Was the given closure marked this major GC cycle? */
nonmovingClosureMarkedThisCycle(StgPtr p)272 INLINE_HEADER bool nonmovingClosureMarkedThisCycle(StgPtr p)
273 {
274 struct NonmovingSegment *seg = nonmovingGetSegment(p);
275 nonmoving_block_idx blk_idx = nonmovingGetBlockIdx(p);
276 return nonmovingGetMark(seg, blk_idx) == nonmovingMarkEpoch;
277 }
278
nonmovingClosureMarked(StgPtr p)279 INLINE_HEADER bool nonmovingClosureMarked(StgPtr p)
280 {
281 struct NonmovingSegment *seg = nonmovingGetSegment(p);
282 nonmoving_block_idx blk_idx = nonmovingGetBlockIdx(p);
283 return nonmovingGetMark(seg, blk_idx) != 0;
284 }
285
286 // Can be called during a major collection to determine whether a particular
287 // segment is in the set of segments that will be swept this collection cycle.
nonmovingSegmentBeingSwept(struct NonmovingSegment * seg)288 INLINE_HEADER bool nonmovingSegmentBeingSwept(struct NonmovingSegment *seg)
289 {
290 struct NonmovingSegmentInfo *seginfo = nonmovingSegmentInfo(seg);
291 unsigned int n = nonmovingBlockCountFromSize(seginfo->log_block_size);
292 return seginfo->next_free_snap >= n;
293 }
294
295 // Can be called during a major collection to determine whether a particular
296 // closure lives in a segment that will be swept this collection cycle.
297 // Note that this returns true for both large and normal objects.
nonmovingClosureBeingSwept(StgClosure * p)298 INLINE_HEADER bool nonmovingClosureBeingSwept(StgClosure *p)
299 {
300 bdescr *bd = Bdescr((StgPtr) p);
301 if (HEAP_ALLOCED_GC(p)) {
302 if (bd->flags & BF_NONMOVING_SWEEPING) {
303 return true;
304 } else if (bd->flags & BF_NONMOVING) {
305 struct NonmovingSegment *seg = nonmovingGetSegment((StgPtr) p);
306 return nonmovingSegmentBeingSwept(seg);
307 } else {
308 // outside of the nonmoving heap
309 return false;
310 }
311 } else {
312 // a static object
313 return true;
314 }
315 }
316
isNonmovingClosure(StgClosure * p)317 INLINE_HEADER bool isNonmovingClosure(StgClosure *p)
318 {
319 return !HEAP_ALLOCED_GC(p) || Bdescr((P_)p)->flags & BF_NONMOVING;
320 }
321
322 #if defined(DEBUG)
323
324 void nonmovingPrintSegment(struct NonmovingSegment *seg);
325 void nonmovingPrintAllocator(struct NonmovingAllocator *alloc);
326 void locate_object(P_ obj);
327 void nonmovingPrintSweepList(void);
328 // Check if the object is in one of non-moving heap mut_lists
329 void check_in_mut_list(StgClosure *p);
330 void print_block_list(bdescr *bd);
331 void print_thread_list(StgTSO* tso);
332
333 #endif
334
335 #include "EndPrivate.h"
336
337 #endif // CMINUSMINUS
338