1 /* -----------------------------------------------------------------------------
2  *
3  * (c) The GHC Team, 1998-2018
4  *
5  * Non-moving garbage collector and allocator: Sweep phase
6  *
7  * ---------------------------------------------------------------------------*/
8 
9 #include "Rts.h"
10 #include "NonMovingSweep.h"
11 #include "NonMoving.h"
12 #include "NonMovingMark.h" // for nonmovingIsAlive
13 #include "Capability.h"
14 #include "GCThread.h" // for GCUtils.h
15 #include "GCUtils.h"
16 #include "Storage.h"
17 #include "Trace.h"
18 #include "StableName.h"
19 #include "CNF.h" // compactFree
20 
21 // On which list should a particular segment be placed?
22 enum SweepResult {
23     SEGMENT_FREE,     // segment is empty: place on free list
24     SEGMENT_PARTIAL,  // segment is partially filled: place on active list
25     SEGMENT_FILLED    // segment is full: place on filled list
26 };
27 
28 // Determine which list a marked segment should be placed on and initialize
29 // next_free indices as appropriate.
30 GNUC_ATTR_HOT static enum SweepResult
nonmovingSweepSegment(struct NonmovingSegment * seg)31 nonmovingSweepSegment(struct NonmovingSegment *seg)
32 {
33     const nonmoving_block_idx blk_cnt = nonmovingSegmentBlockCount(seg);
34     bool found_free = false;
35     bool found_live = false;
36 
37     for (nonmoving_block_idx i = 0; i < blk_cnt; ++i)
38     {
39         if (seg->bitmap[i] == nonmovingMarkEpoch) {
40             found_live = true;
41         } else if (!found_free) {
42             found_free = true;
43             seg->next_free = i;
44             nonmovingSegmentInfo(seg)->next_free_snap = i;
45             Bdescr((P_)seg)->u.scan = (P_)nonmovingSegmentGetBlock(seg, i);
46             seg->bitmap[i] = 0;
47         } else {
48             seg->bitmap[i] = 0;
49         }
50 
51         if (found_free && found_live) {
52             // zero the remaining dead object's mark bits
53             for (; i < nonmovingSegmentBlockCount(seg); ++i) {
54                 if (seg->bitmap[i] != nonmovingMarkEpoch) {
55                     seg->bitmap[i] = 0;
56                 }
57             }
58             return SEGMENT_PARTIAL;
59         }
60     }
61 
62     if (found_live) {
63         return SEGMENT_FILLED;
64     } else {
65         ASSERT(seg->next_free == 0);
66         ASSERT(nonmovingSegmentInfo(seg)->next_free_snap == 0);
67         nonmovingClearBitmap(seg);
68         return SEGMENT_FREE;
69     }
70 }
71 
72 #if defined(DEBUG)
73 
nonmovingGcCafs()74 void nonmovingGcCafs()
75 {
76     uint32_t i = 0;
77     StgIndStatic *next;
78 
79     for (StgIndStatic *caf = debug_caf_list_snapshot;
80          caf != (StgIndStatic*) END_OF_CAF_LIST;
81          caf = next)
82     {
83         next = (StgIndStatic*)caf->saved_info;
84 
85         const StgInfoTable *info = get_itbl((StgClosure*)caf);
86         ASSERT(info->type == IND_STATIC);
87 
88         StgWord flag = ((StgWord) caf->static_link) & STATIC_BITS;
89         if (flag != 0 && flag != static_flag) {
90             debugTrace(DEBUG_gccafs, "CAF gc'd at 0x%p", caf);
91             SET_INFO((StgClosure*)caf, &stg_GCD_CAF_info); // stub it
92         } else {
93             // CAF is alive, move it back to the debug_caf_list
94             ++i;
95             debugTrace(DEBUG_gccafs, "CAF alive at 0x%p", caf);
96             ACQUIRE_SM_LOCK; // debug_caf_list is global, locked by sm_mutex
97             caf->saved_info = (const StgInfoTable*)debug_caf_list;
98             debug_caf_list = caf;
99             RELEASE_SM_LOCK;
100         }
101     }
102 
103     debugTrace(DEBUG_gccafs, "%d CAFs live", i);
104     debug_caf_list_snapshot = (StgIndStatic*)END_OF_CAF_LIST;
105 }
106 
107 static void
clear_segment(struct NonmovingSegment * seg)108 clear_segment(struct NonmovingSegment* seg)
109 {
110     size_t end = ((size_t)seg) + NONMOVING_SEGMENT_SIZE;
111     memset(&seg->bitmap, 0, end - (size_t)&seg->bitmap);
112 }
113 
114 static void
clear_segment_free_blocks(struct NonmovingSegment * seg)115 clear_segment_free_blocks(struct NonmovingSegment* seg)
116 {
117     unsigned int block_size = nonmovingSegmentBlockSize(seg);
118     for (unsigned int p_idx = 0; p_idx < nonmovingSegmentBlockCount(seg); ++p_idx) {
119         // after mark, so bit not set == dead
120         if (nonmovingGetMark(seg, p_idx) == 0) {
121             memset(nonmovingSegmentGetBlock(seg, p_idx), 0, block_size);
122         }
123     }
124 }
125 
126 #endif
127 
nonmovingSweep(void)128 GNUC_ATTR_HOT void nonmovingSweep(void)
129 {
130     while (nonmovingHeap.sweep_list) {
131         struct NonmovingSegment *seg = nonmovingHeap.sweep_list;
132 
133         // Pushing the segment to one of the free/active/filled segments
134         // updates the link field, so update sweep_list here
135         nonmovingHeap.sweep_list = seg->link;
136 
137         enum SweepResult ret = nonmovingSweepSegment(seg);
138 
139         switch (ret) {
140         case SEGMENT_FREE:
141             IF_DEBUG(sanity, clear_segment(seg));
142             nonmovingPushFreeSegment(seg);
143             break;
144         case SEGMENT_PARTIAL:
145             IF_DEBUG(sanity, clear_segment_free_blocks(seg));
146             nonmovingPushActiveSegment(seg);
147             break;
148         case SEGMENT_FILLED:
149             nonmovingPushFilledSegment(seg);
150             break;
151         default:
152             barf("nonmovingSweep: weird sweep return: %d\n", ret);
153         }
154     }
155 }
156 
157 /* Must a closure remain on the mutable list?
158  *
159  * A closure must remain if any of the following applies:
160  *
161  *  1. it contains references to a younger generation
162  *  2. it's a mutable closure (e.g. mutable array or MUT_PRIM)
163  */
is_closure_clean(StgClosure * p)164 static bool is_closure_clean(StgClosure *p)
165 {
166     const StgInfoTable *info = get_itbl(p);
167 
168 #define CLEAN(ptr) (!HEAP_ALLOCED((StgClosure*) ptr) || Bdescr((StgPtr) ptr)->gen == oldest_gen)
169 
170     switch (info->type) {
171     case MVAR_CLEAN:
172     case MVAR_DIRTY:
173     {
174         StgMVar *mvar = ((StgMVar *)p);
175         if (!CLEAN(mvar->head)) goto dirty_MVAR;
176         if (!CLEAN(mvar->tail)) goto dirty_MVAR;
177         if (!CLEAN(mvar->value)) goto dirty_MVAR;
178         mvar->header.info = &stg_MVAR_CLEAN_info;
179         return true;
180 
181 dirty_MVAR:
182         mvar->header.info = &stg_MVAR_DIRTY_info;
183         return false;
184     }
185 
186     case TVAR:
187     {
188         StgTVar *tvar = ((StgTVar *)p);
189         if (!CLEAN(tvar->current_value)) goto dirty_TVAR;
190         if (!CLEAN(tvar->first_watch_queue_entry)) goto dirty_TVAR;
191         tvar->header.info = &stg_TVAR_CLEAN_info;
192         return true;
193 
194 dirty_TVAR:
195         tvar->header.info = &stg_TVAR_DIRTY_info;
196         return false;
197     }
198 
199     case THUNK:
200     case THUNK_1_0:
201     case THUNK_0_1:
202     case THUNK_1_1:
203     case THUNK_0_2:
204     case THUNK_2_0:
205     {
206         StgPtr end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
207         for (StgPtr q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
208             if (!CLEAN(*q)) return false;
209         }
210         return true;
211     }
212 
213     case FUN:
214     case FUN_1_0:                       // hardly worth specialising these guys
215     case FUN_0_1:
216     case FUN_1_1:
217     case FUN_0_2:
218     case FUN_2_0:
219     case CONSTR:
220     case CONSTR_NOCAF:
221     case CONSTR_1_0:
222     case CONSTR_0_1:
223     case CONSTR_1_1:
224     case CONSTR_0_2:
225     case CONSTR_2_0:
226     case PRIM:
227     {
228         StgPtr end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
229         for (StgPtr q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
230             if (!CLEAN(*q)) return false;
231         }
232         return true;
233     }
234 
235     case WEAK:
236         return false; // TODO
237 
238     case MUT_VAR_CLEAN:
239     case MUT_VAR_DIRTY:
240         if (!CLEAN(((StgMutVar *)p)->var)) {
241             p->header.info = &stg_MUT_VAR_DIRTY_info;
242             return false;
243         } else {
244             p->header.info = &stg_MUT_VAR_CLEAN_info;
245             return true;
246         }
247 
248     case BLOCKING_QUEUE:
249     {
250         StgBlockingQueue *bq = (StgBlockingQueue *)p;
251 
252         if (!CLEAN(bq->bh)) goto dirty_BLOCKING_QUEUE;
253         if (!CLEAN(bq->owner)) goto dirty_BLOCKING_QUEUE;
254         if (!CLEAN(bq->queue)) goto dirty_BLOCKING_QUEUE;
255         if (!CLEAN(bq->link)) goto dirty_BLOCKING_QUEUE;
256         bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
257         return true;
258 
259 dirty_BLOCKING_QUEUE:
260         bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
261         return false;
262     }
263 
264     case THUNK_SELECTOR:
265         return CLEAN(((StgSelector *) p)->selectee);
266 
267     case ARR_WORDS:
268         return true;
269 
270     default:
271         // TODO: the rest
272         return false;
273     }
274 #undef CLEAN
275 }
276 
277 /* N.B. This happens during the pause so we own all capabilities. */
nonmovingSweepMutLists()278 void nonmovingSweepMutLists()
279 {
280     for (uint32_t n = 0; n < n_capabilities; n++) {
281         Capability *cap = capabilities[n];
282         bdescr *old_mut_list = cap->mut_lists[oldest_gen->no];
283         cap->mut_lists[oldest_gen->no] = allocBlockOnNode_lock(cap->node);
284         for (bdescr *bd = old_mut_list; bd; bd = bd->link) {
285             for (StgPtr p = bd->start; p < bd->free; p++) {
286                 StgClosure **q = (StgClosure**)p;
287                 if (nonmovingIsAlive(*q) && !is_closure_clean(*q)) {
288                     recordMutableCap(*q, cap, oldest_gen->no);
289                 }
290             }
291         }
292         freeChain_lock(old_mut_list);
293     }
294 }
295 
296 /* A variant of freeChain_lock that will only hold the lock for at most max_dur
297  * freed blocks to ensure that we don't starve other lock users (e.g. the
298  * mutator).
299  */
freeChain_lock_max(bdescr * bd,int max_dur)300 static void freeChain_lock_max(bdescr *bd, int max_dur)
301 {
302   ACQUIRE_SM_LOCK;
303   bdescr *next_bd;
304   int i = 0;
305   while (bd != NULL) {
306     next_bd = bd->link;
307     freeGroup(bd);
308     bd = next_bd;
309     if (i == max_dur) {
310 #if defined(THREADED_RTS)
311         RELEASE_SM_LOCK;
312         yieldThread();
313         ACQUIRE_SM_LOCK;
314 #endif
315         i = 0;
316     }
317     i++;
318   }
319   RELEASE_SM_LOCK;
320 }
321 
nonmovingSweepLargeObjects()322 void nonmovingSweepLargeObjects()
323 {
324     freeChain_lock_max(nonmoving_large_objects, 10000);
325     nonmoving_large_objects = nonmoving_marked_large_objects;
326     n_nonmoving_large_blocks = n_nonmoving_marked_large_blocks;
327     nonmoving_marked_large_objects = NULL;
328     n_nonmoving_marked_large_blocks = 0;
329 }
330 
nonmovingSweepCompactObjects()331 void nonmovingSweepCompactObjects()
332 {
333     bdescr *next;
334     ACQUIRE_SM_LOCK;
335     for (bdescr *bd = nonmoving_compact_objects; bd; bd = next) {
336         next = bd->link;
337         compactFree(((StgCompactNFDataBlock*)bd->start)->owner);
338     }
339     RELEASE_SM_LOCK;
340     nonmoving_compact_objects = nonmoving_marked_compact_objects;
341     n_nonmoving_compact_blocks = n_nonmoving_marked_compact_blocks;
342     nonmoving_marked_compact_objects = NULL;
343     n_nonmoving_marked_compact_blocks = 0;
344 }
345 
346 // Helper for nonmovingSweepStableNameTable. Essentially nonmovingIsAlive,
347 // but works when the object died in moving heap, see
348 // nonmovingSweepStableNameTable
is_alive(StgClosure * p)349 static bool is_alive(StgClosure *p)
350 {
351     if (!HEAP_ALLOCED_GC(p)) {
352         return true;
353     }
354 
355     if (nonmovingClosureBeingSwept(p)) {
356         return nonmovingIsAlive(p);
357     } else {
358         // We don't want to sweep any stable names which weren't in the
359         // set of segments that we swept.
360         // See Note [Sweeping stable names in the concurrent collector]
361         return true;
362     }
363 }
364 
nonmovingSweepStableNameTable()365 void nonmovingSweepStableNameTable()
366 {
367     // See comments in gcStableTables
368 
369     /* Note [Sweeping stable names in the concurrent collector]
370      * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
371      *
372      * When collecting concurrently we need to take care to avoid freeing
373      * stable names the we didn't sweep this collection cycle. For instance,
374      * consider the following situation:
375      *
376      *  1. We take a snapshot and start collection
377      *  2. A mutator allocates a new object, then makes a stable name for it
378      *  3. The mutator performs a minor GC and promotes the new object to the nonmoving heap
379      *  4. The GC thread gets to the sweep phase and, when traversing the stable
380      *     name table, finds the new object unmarked. It then assumes that the
381      *     object is dead and removes the stable name from the stable name table.
382      *
383      */
384 
385     // FIXME: We can't use nonmovingIsAlive here without first using isAlive:
386     // a stable name can die during moving heap collection and we can't use
387     // nonmovingIsAlive on those objects. Inefficient.
388 
389     stableNameLock();
390     FOR_EACH_STABLE_NAME(
391         p, {
392             if (p->sn_obj != NULL) {
393                 if (!is_alive((StgClosure*)p->sn_obj)) {
394                     p->sn_obj = NULL; // Just to make an assertion happy
395                     freeSnEntry(p);
396                 } else if (p->addr != NULL) {
397                     if (!is_alive((StgClosure*)p->addr)) {
398                         p->addr = NULL;
399                     }
400                 }
401             }
402         });
403     stableNameUnlock();
404 }
405