1 /* -----------------------------------------------------------------------------
2  *
3  * (c) The GHC Team 1998-2008
4  *
5  * Generational garbage collector: scavenging functions
6  *
7  * Documentation on the architecture of the Garbage Collector can be
8  * found in the online commentary:
9  *
10  *   https://gitlab.haskell.org/ghc/ghc/wikis/commentary/rts/storage/gc
11  *
12  * ---------------------------------------------------------------------------*/
13 
14 /* ----------------------------------------------------------------------------
15    We have two main scavenge functions:
16 
17    - scavenge_block(bdescr *bd)
18    - scavenge_one(StgPtr p)
19 
20    As the names and parameters suggest, first one scavenges a whole block while
21    the second one only scavenges one object. This however is not the only
22    difference. scavenge_block scavenges all SRTs while scavenge_one only
23    scavenges SRTs of stacks. The reason is because scavenge_one is called in two
24    cases:
25 
26    - When scavenging a mut_list
27    - When scavenging a large object
28 
29    We don't have to scavenge SRTs when scavenging a mut_list, because we only
30    scavenge mut_lists in minor GCs, and static objects are only collected in
31    major GCs.
32 
33    However, because scavenge_one is also used to scavenge large objects (which
34    are scavenged even in major GCs), we need to deal with SRTs of large
35    objects. We never allocate large FUNs and THUNKs, but we allocate large
36    STACKs (e.g. in threadStackOverflow), and stack frames can have SRTs. So
37    scavenge_one skips FUN and THUNK SRTs but scavenges stack frame SRTs.
38 
39    In summary, in a major GC:
40 
41    - scavenge_block() scavenges all SRTs
42    - scavenge_one() scavenges only stack frame SRTs
43    ------------------------------------------------------------------------- */
44 
45 #include "PosixSource.h"
46 #include "Rts.h"
47 
48 #include "Storage.h"
49 #include "GC.h"
50 #include "GCThread.h"
51 #include "GCUtils.h"
52 #include "Compact.h"
53 #include "MarkStack.h"
54 #include "Evac.h"
55 #include "Scav.h"
56 #include "Apply.h"
57 #include "Trace.h"
58 #include "Sanity.h"
59 #include "Capability.h"
60 #include "LdvProfile.h"
61 #include "HeapUtils.h"
62 #include "Hash.h"
63 
64 #include "sm/MarkWeak.h"
65 #include "sm/NonMoving.h" // for nonmoving_set_closure_mark_bit
66 #include "sm/NonMovingScav.h"
67 
68 #include <string.h> /* for memset */
69 
70 static void scavenge_large_bitmap (StgPtr p,
71                                    StgLargeBitmap *large_bitmap,
72                                    StgWord size );
73 
74 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
75 # define evacuate(a) evacuate1(a)
76 # define evacuate_BLACKHOLE(a) evacuate_BLACKHOLE1(a)
77 # define scavenge_loop(a) scavenge_loop1(a)
78 # define scavenge_block(a) scavenge_block1(a)
79 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
80 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
81 # define scavengeTSO(tso) scavengeTSO1(tso)
82 # define scavenge_stack(p, stack_end) scavenge_stack1(p, stack_end)
83 # define scavenge_fun_srt(info) scavenge_fun_srt1(info)
84 # define scavenge_fun_srt(info) scavenge_fun_srt1(info)
85 # define scavenge_thunk_srt(info) scavenge_thunk_srt1(info)
86 # define scavenge_mut_arr_ptrs(info) scavenge_mut_arr_ptrs1(info)
87 # define scavenge_PAP(pap) scavenge_PAP1(pap)
88 # define scavenge_AP(ap) scavenge_AP1(ap)
89 # define scavenge_compact(str) scavenge_compact1(str)
90 #endif
91 
do_evacuate(StgClosure ** p,void * user STG_UNUSED)92 static void do_evacuate(StgClosure **p, void *user STG_UNUSED)
93 {
94     evacuate(p);
95 }
96 
97 /* -----------------------------------------------------------------------------
98    Scavenge a TSO.
99    -------------------------------------------------------------------------- */
100 
101 void
scavengeTSO(StgTSO * tso)102 scavengeTSO (StgTSO *tso)
103 {
104     bool saved_eager;
105 
106     debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
107 
108     // update the pointer from the InCall.
109     if (tso->bound != NULL) {
110         // NB. We can't just set tso->bound->tso = tso, because this
111         // might be an invalid copy the TSO resulting from multiple
112         // threads evacuating the TSO simultaneously (see
113         // Evac.c:copy_tag()).  Calling evacuate() on this pointer
114         // will ensure that we update it to point to the correct copy.
115         evacuate((StgClosure **)&tso->bound->tso);
116     }
117 
118     saved_eager = gct->eager_promotion;
119     gct->eager_promotion = false;
120 
121     evacuate((StgClosure **)&tso->blocked_exceptions);
122     evacuate((StgClosure **)&tso->bq);
123 
124     // scavange current transaction record
125     evacuate((StgClosure **)&tso->trec);
126 
127     evacuate((StgClosure **)&tso->stackobj);
128 
129     evacuate((StgClosure **)&tso->_link);
130     if (   tso->why_blocked == BlockedOnMVar
131         || tso->why_blocked == BlockedOnMVarRead
132         || tso->why_blocked == BlockedOnBlackHole
133         || tso->why_blocked == BlockedOnMsgThrowTo
134         || tso->why_blocked == NotBlocked
135         ) {
136         evacuate(&tso->block_info.closure);
137     }
138 #if defined(THREADED_RTS)
139     // in the THREADED_RTS, block_info.closure must always point to a
140     // valid closure, because we assume this in throwTo().  In the
141     // non-threaded RTS it might be a FD (for
142     // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
143     else {
144         tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
145     }
146 #endif
147 
148     tso->dirty = gct->failed_to_evac;
149 
150     gct->eager_promotion = saved_eager;
151 }
152 
153 /* ----------------------------------------------------------------------------
154    Scavenging compact objects
155    ------------------------------------------------------------------------- */
156 
157 typedef struct {
158     // We must save gct when calling mapHashTable(), which is compiled
159     // without GCThread.h and so uses a different calling convention.
160     // See also GC.c:mark_root where we do a similar thing.
161     gc_thread *saved_gct;
162     HashTable *newHash;
163 } MapHashData;
164 
165 static void
evacuate_hash_entry(MapHashData * dat,StgWord key,const void * value)166 evacuate_hash_entry(MapHashData *dat, StgWord key, const void *value)
167 {
168     StgClosure *p = (StgClosure*)key;
169 #if defined(THREADED_RTS)
170     gc_thread *old_gct = gct;
171 #endif
172 
173     SET_GCT(dat->saved_gct);
174     evacuate(&p);
175     insertHashTable(dat->newHash, (StgWord)p, value);
176     SET_GCT(old_gct);
177 }
178 
179 /* Here we scavenge the sharing-preservation hash-table, which may contain keys
180  * living in from-space.
181  */
182 void
scavenge_compact(StgCompactNFData * str)183 scavenge_compact(StgCompactNFData *str)
184 {
185     bool saved_eager;
186     saved_eager = gct->eager_promotion;
187     gct->eager_promotion = false;
188 
189     if (str->hash) {
190         MapHashData dat;
191         dat.saved_gct = gct;
192         HashTable *newHash = allocHashTable();
193         dat.newHash = newHash;
194         mapHashTable(str->hash, (void*)&dat, (MapHashFn)evacuate_hash_entry);
195         freeHashTable(str->hash, NULL);
196         str->hash = newHash;
197     }
198 
199     debugTrace(DEBUG_compact,
200                "compact alive @%p, gen %d, %" FMT_Word " bytes",
201                str, Bdescr((P_)str)->gen_no, str->totalW * sizeof(W_))
202 
203     gct->eager_promotion = saved_eager;
204     if (gct->failed_to_evac) {
205         RELEASE_STORE(&((StgClosure *)str)->header.info, &stg_COMPACT_NFDATA_DIRTY_info);
206     } else {
207         RELEASE_STORE(&((StgClosure *)str)->header.info, &stg_COMPACT_NFDATA_CLEAN_info);
208     }
209 }
210 
211 /* -----------------------------------------------------------------------------
212    Mutable arrays of pointers
213    -------------------------------------------------------------------------- */
214 
scavenge_mut_arr_ptrs(StgMutArrPtrs * a)215 StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
216 {
217     W_ m;
218     bool any_failed;
219     StgPtr p, q;
220 
221     any_failed = false;
222     p = (StgPtr)&a->payload[0];
223     for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
224     {
225         q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
226         for (; p < q; p++) {
227             evacuate((StgClosure**)p);
228         }
229         if (gct->failed_to_evac) {
230             any_failed = true;
231             *mutArrPtrsCard(a,m) = 1;
232             gct->failed_to_evac = false;
233         } else {
234             *mutArrPtrsCard(a,m) = 0;
235         }
236     }
237 
238     q = (StgPtr)&a->payload[a->ptrs];
239     if (p < q) {
240         for (; p < q; p++) {
241             evacuate((StgClosure**)p);
242         }
243         if (gct->failed_to_evac) {
244             any_failed = true;
245             *mutArrPtrsCard(a,m) = 1;
246             gct->failed_to_evac = false;
247         } else {
248             *mutArrPtrsCard(a,m) = 0;
249         }
250     }
251 
252     gct->failed_to_evac = any_failed;
253     return (StgPtr)a + mut_arr_ptrs_sizeW(a);
254 }
255 
256 // scavenge only the marked areas of a MUT_ARR_PTRS
scavenge_mut_arr_ptrs_marked(StgMutArrPtrs * a)257 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
258 {
259     W_ m;
260     StgPtr p, q;
261     bool any_failed;
262 
263     any_failed = false;
264     for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
265     {
266         if (*mutArrPtrsCard(a,m) != 0) {
267             p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
268             q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
269                         (StgPtr)&a->payload[a->ptrs]);
270             for (; p < q; p++) {
271                 evacuate((StgClosure**)p);
272             }
273             if (gct->failed_to_evac) {
274                 any_failed = true;
275                 gct->failed_to_evac = false;
276             } else {
277                 *mutArrPtrsCard(a,m) = 0;
278             }
279         }
280     }
281 
282     gct->failed_to_evac = any_failed;
283     return (StgPtr)a + mut_arr_ptrs_sizeW(a);
284 }
285 
286 STATIC_INLINE StgPtr
scavenge_small_bitmap(StgPtr p,StgWord size,StgWord bitmap)287 scavenge_small_bitmap (StgPtr p, StgWord size, StgWord bitmap)
288 {
289     while (size > 0) {
290         if ((bitmap & 1) == 0) {
291             evacuate((StgClosure **)p);
292         }
293         p++;
294         bitmap = bitmap >> 1;
295         size--;
296     }
297     return p;
298 }
299 
300 /* -----------------------------------------------------------------------------
301    Blocks of function args occur on the stack (at the top) and
302    in PAPs.
303    -------------------------------------------------------------------------- */
304 
305 STATIC_INLINE StgPtr
scavenge_arg_block(const StgFunInfoTable * fun_info,StgClosure ** args)306 scavenge_arg_block (const StgFunInfoTable *fun_info, StgClosure **args)
307 {
308     StgPtr p;
309     StgWord bitmap;
310     StgWord size;
311 
312     p = (StgPtr)args;
313     switch (fun_info->f.fun_type) {
314     case ARG_GEN:
315         bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
316         size = BITMAP_SIZE(fun_info->f.b.bitmap);
317         goto small_bitmap;
318     case ARG_GEN_BIG:
319         size = GET_FUN_LARGE_BITMAP(fun_info)->size;
320         scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
321         p += size;
322         break;
323     default:
324         bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
325         size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
326     small_bitmap:
327         p = scavenge_small_bitmap(p, size, bitmap);
328         break;
329     }
330     return p;
331 }
332 
333 STATIC_INLINE GNUC_ATTR_HOT StgPtr
scavenge_PAP_payload(StgClosure * fun,StgClosure ** payload,StgWord size)334 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
335 {
336     StgPtr p;
337     StgWord bitmap;
338     const StgFunInfoTable *fun_info;
339 
340     fun_info = get_fun_itbl(UNTAG_CONST_CLOSURE(fun));
341     ASSERT(fun_info->i.type != PAP);
342     p = (StgPtr)payload;
343 
344     switch (fun_info->f.fun_type) {
345     case ARG_GEN:
346         bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
347         goto small_bitmap;
348     case ARG_GEN_BIG:
349         scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
350         p += size;
351         break;
352     case ARG_BCO:
353         scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
354         p += size;
355         break;
356     default:
357         bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
358     small_bitmap:
359         p = scavenge_small_bitmap(p, size, bitmap);
360         break;
361     }
362     return p;
363 }
364 
365 GNUC_ATTR_HOT StgPtr
scavenge_PAP(StgPAP * pap)366 scavenge_PAP (StgPAP *pap)
367 {
368     evacuate(&pap->fun);
369     return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
370 }
371 
372 StgPtr
scavenge_AP(StgAP * ap)373 scavenge_AP (StgAP *ap)
374 {
375     evacuate(&ap->fun);
376     return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
377 }
378 
379 /* -----------------------------------------------------------------------------
380    Scavenge SRTs
381    -------------------------------------------------------------------------- */
382 
383 GNUC_ATTR_HOT void
scavenge_thunk_srt(const StgInfoTable * info)384 scavenge_thunk_srt(const StgInfoTable *info)
385 {
386     StgThunkInfoTable *thunk_info;
387 
388     if (!major_gc) return;
389 
390     thunk_info = itbl_to_thunk_itbl(info);
391     if (thunk_info->i.srt) {
392         StgClosure *srt = (StgClosure*)GET_SRT(thunk_info);
393         evacuate(&srt);
394     }
395 }
396 
397 GNUC_ATTR_HOT void
scavenge_fun_srt(const StgInfoTable * info)398 scavenge_fun_srt(const StgInfoTable *info)
399 {
400     StgFunInfoTable *fun_info;
401 
402     if (!major_gc) return;
403 
404     fun_info = itbl_to_fun_itbl(info);
405     if (fun_info->i.srt) {
406         StgClosure *srt = (StgClosure*)GET_FUN_SRT(fun_info);
407         evacuate(&srt);
408     }
409 }
410 
411 /* -----------------------------------------------------------------------------
412    Scavenge a block from the given scan pointer up to bd->free.
413 
414    evac_gen_no is set by the caller to be either zero (for a step in a
415    generation < N) or G where G is the generation of the step being
416    scavenged.
417 
418    We sometimes temporarily change evac_gen_no back to zero if we're
419    scavenging a mutable object where eager promotion isn't such a good
420    idea.
421    -------------------------------------------------------------------------- */
422 
423 static GNUC_ATTR_HOT void
scavenge_block(bdescr * bd)424 scavenge_block (bdescr *bd)
425 {
426   StgPtr p, q;
427   const StgInfoTable *info;
428   bool saved_eager_promotion;
429   gen_workspace *ws;
430 
431   debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
432              bd->start, bd->gen_no, bd->u.scan);
433 
434   gct->scan_bd = bd;
435   gct->evac_gen_no = bd->gen_no;
436   saved_eager_promotion = gct->eager_promotion;
437   gct->failed_to_evac = false;
438 
439   ws = &gct->gens[bd->gen->no];
440 
441   p = bd->u.scan;
442 
443   // Sanity check: See Note [Deadlock detection under nonmoving collector].
444 #if defined(DEBUG)
445   if (RtsFlags.GcFlags.useNonmoving && deadlock_detect_gc) {
446       ASSERT(bd->gen == oldest_gen);
447   }
448 #endif
449 
450 
451   // we might be evacuating into the very object that we're
452   // scavenging, so we have to check the real bd->free pointer each
453   // time around the loop.
454   while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
455 
456     ASSERT(bd->link == NULL);
457     ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
458     info = get_itbl((StgClosure *)p);
459 
460     ASSERT(gct->thunk_selector_depth == 0);
461 
462     q = p;
463     switch (info->type) {
464 
465     case MVAR_CLEAN:
466     case MVAR_DIRTY:
467     {
468         StgMVar *mvar = ((StgMVar *)p);
469         gct->eager_promotion = false;
470         evacuate((StgClosure **)&mvar->head);
471         evacuate((StgClosure **)&mvar->tail);
472         evacuate((StgClosure **)&mvar->value);
473         gct->eager_promotion = saved_eager_promotion;
474 
475         if (gct->failed_to_evac) {
476             RELEASE_STORE(&mvar->header.info, &stg_MVAR_DIRTY_info);
477         } else {
478             RELEASE_STORE(&mvar->header.info, &stg_MVAR_CLEAN_info);
479         }
480         p += sizeofW(StgMVar);
481         break;
482     }
483 
484     case TVAR:
485     {
486         StgTVar *tvar = ((StgTVar *)p);
487         gct->eager_promotion = false;
488         evacuate((StgClosure **)&tvar->current_value);
489         evacuate((StgClosure **)&tvar->first_watch_queue_entry);
490         gct->eager_promotion = saved_eager_promotion;
491 
492         if (gct->failed_to_evac) {
493             RELEASE_STORE(&tvar->header.info, &stg_TVAR_DIRTY_info);
494         } else {
495             RELEASE_STORE(&tvar->header.info, &stg_TVAR_CLEAN_info);
496         }
497         p += sizeofW(StgTVar);
498         break;
499     }
500 
501     case FUN_2_0:
502         scavenge_fun_srt(info);
503         evacuate(&((StgClosure *)p)->payload[1]);
504         evacuate(&((StgClosure *)p)->payload[0]);
505         p += sizeofW(StgHeader) + 2;
506         break;
507 
508     case THUNK_2_0:
509         scavenge_thunk_srt(info);
510         evacuate(&((StgThunk *)p)->payload[1]);
511         evacuate(&((StgThunk *)p)->payload[0]);
512         p += sizeofW(StgThunk) + 2;
513         break;
514 
515     case CONSTR_2_0:
516         evacuate(&((StgClosure *)p)->payload[1]);
517         evacuate(&((StgClosure *)p)->payload[0]);
518         p += sizeofW(StgHeader) + 2;
519         break;
520 
521     case THUNK_1_0:
522         scavenge_thunk_srt(info);
523         evacuate(&((StgThunk *)p)->payload[0]);
524         p += sizeofW(StgThunk) + 1;
525         break;
526 
527     case FUN_1_0:
528         scavenge_fun_srt(info);
529         FALLTHROUGH;
530     case CONSTR_1_0:
531         evacuate(&((StgClosure *)p)->payload[0]);
532         p += sizeofW(StgHeader) + 1;
533         break;
534 
535     case THUNK_0_1:
536         scavenge_thunk_srt(info);
537         p += sizeofW(StgThunk) + 1;
538         break;
539 
540     case FUN_0_1:
541         scavenge_fun_srt(info);
542         FALLTHROUGH;
543     case CONSTR_0_1:
544         p += sizeofW(StgHeader) + 1;
545         break;
546 
547     case THUNK_0_2:
548         scavenge_thunk_srt(info);
549         p += sizeofW(StgThunk) + 2;
550         break;
551 
552     case FUN_0_2:
553         scavenge_fun_srt(info);
554         FALLTHROUGH;
555     case CONSTR_0_2:
556         p += sizeofW(StgHeader) + 2;
557         break;
558 
559     case THUNK_1_1:
560         scavenge_thunk_srt(info);
561         evacuate(&((StgThunk *)p)->payload[0]);
562         p += sizeofW(StgThunk) + 2;
563         break;
564 
565     case FUN_1_1:
566         scavenge_fun_srt(info);
567         FALLTHROUGH;
568     case CONSTR_1_1:
569         evacuate(&((StgClosure *)p)->payload[0]);
570         p += sizeofW(StgHeader) + 2;
571         break;
572 
573     case FUN:
574         scavenge_fun_srt(info);
575         goto gen_obj;
576 
577     case THUNK:
578     {
579         StgPtr end;
580 
581         scavenge_thunk_srt(info);
582         end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
583         for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
584             evacuate((StgClosure **)p);
585         }
586         p += info->layout.payload.nptrs;
587         break;
588     }
589 
590     gen_obj:
591     case CONSTR:
592     case CONSTR_NOCAF:
593     case WEAK:
594     case PRIM:
595     {
596         StgPtr end;
597 
598         end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
599         for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
600             evacuate((StgClosure **)p);
601         }
602         p += info->layout.payload.nptrs;
603         break;
604     }
605 
606     case BCO: {
607         StgBCO *bco = (StgBCO *)p;
608         evacuate((StgClosure **)&bco->instrs);
609         evacuate((StgClosure **)&bco->literals);
610         evacuate((StgClosure **)&bco->ptrs);
611         p += bco_sizeW(bco);
612         break;
613     }
614 
615     case BLACKHOLE:
616         evacuate(&((StgInd *)p)->indirectee);
617         p += sizeofW(StgInd);
618         break;
619 
620     case MUT_VAR_CLEAN:
621     case MUT_VAR_DIRTY:
622         gct->eager_promotion = false;
623         evacuate(&((StgMutVar *)p)->var);
624         gct->eager_promotion = saved_eager_promotion;
625 
626         if (gct->failed_to_evac) {
627             RELEASE_STORE(&((StgClosure *) q)->header.info, &stg_MUT_VAR_DIRTY_info);
628         } else {
629             RELEASE_STORE(&((StgClosure *) q)->header.info, &stg_MUT_VAR_CLEAN_info);
630         }
631         p += sizeofW(StgMutVar);
632         break;
633 
634     case BLOCKING_QUEUE:
635     {
636         StgBlockingQueue *bq = (StgBlockingQueue *)p;
637 
638         gct->eager_promotion = false;
639         evacuate(&bq->bh);
640         evacuate((StgClosure**)&bq->owner);
641         evacuate((StgClosure**)&bq->queue);
642         evacuate((StgClosure**)&bq->link);
643         gct->eager_promotion = saved_eager_promotion;
644 
645         if (gct->failed_to_evac) {
646             RELEASE_STORE(&bq->header.info, &stg_BLOCKING_QUEUE_DIRTY_info);
647         } else {
648             RELEASE_STORE(&bq->header.info, &stg_BLOCKING_QUEUE_CLEAN_info);
649         }
650         p += sizeofW(StgBlockingQueue);
651         break;
652     }
653 
654     case THUNK_SELECTOR:
655     {
656         StgSelector *s = (StgSelector *)p;
657         evacuate(&s->selectee);
658         p += THUNK_SELECTOR_sizeW();
659         break;
660     }
661 
662     // A chunk of stack saved in a heap object
663     case AP_STACK:
664     {
665         StgAP_STACK *ap = (StgAP_STACK *)p;
666 
667         evacuate(&ap->fun);
668         scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
669         p = (StgPtr)ap->payload + ap->size;
670         break;
671     }
672 
673     case PAP:
674         p = scavenge_PAP((StgPAP *)p);
675         break;
676 
677     case AP:
678         p = scavenge_AP((StgAP *)p);
679         break;
680 
681     case ARR_WORDS:
682         // nothing to follow
683         p += arr_words_sizeW((StgArrBytes *)p);
684         break;
685 
686     case MUT_ARR_PTRS_CLEAN:
687     case MUT_ARR_PTRS_DIRTY:
688     {
689         // We don't eagerly promote objects pointed to by a mutable
690         // array, but if we find the array only points to objects in
691         // the same or an older generation, we mark it "clean" and
692         // avoid traversing it during minor GCs.
693         gct->eager_promotion = false;
694 
695         p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
696 
697         if (gct->failed_to_evac) {
698             RELEASE_STORE(&((StgClosure *) q)->header.info, &stg_MUT_ARR_PTRS_DIRTY_info);
699         } else {
700             RELEASE_STORE(&((StgClosure *) q)->header.info, &stg_MUT_ARR_PTRS_CLEAN_info);
701         }
702 
703         gct->eager_promotion = saved_eager_promotion;
704         gct->failed_to_evac = true; // always put it on the mutable list.
705         break;
706     }
707 
708     case MUT_ARR_PTRS_FROZEN_CLEAN:
709     case MUT_ARR_PTRS_FROZEN_DIRTY:
710         // follow everything
711     {
712         p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
713 
714         if (gct->failed_to_evac) {
715             RELEASE_STORE(&((StgClosure *) q)->header.info, &stg_MUT_ARR_PTRS_FROZEN_DIRTY_info);
716         } else {
717             RELEASE_STORE(&((StgClosure *) q)->header.info, &stg_MUT_ARR_PTRS_FROZEN_CLEAN_info);
718         }
719         break;
720     }
721 
722     case SMALL_MUT_ARR_PTRS_CLEAN:
723     case SMALL_MUT_ARR_PTRS_DIRTY:
724         // follow everything
725     {
726         StgPtr next;
727 
728         // We don't eagerly promote objects pointed to by a mutable
729         // array, but if we find the array only points to objects in
730         // the same or an older generation, we mark it "clean" and
731         // avoid traversing it during minor GCs.
732         gct->eager_promotion = false;
733         next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
734         for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
735             evacuate((StgClosure **)p);
736         }
737         gct->eager_promotion = saved_eager_promotion;
738 
739         if (gct->failed_to_evac) {
740             RELEASE_STORE(&((StgClosure *) q)->header.info, &stg_SMALL_MUT_ARR_PTRS_DIRTY_info);
741         } else {
742             RELEASE_STORE(&((StgClosure *) q)->header.info, &stg_SMALL_MUT_ARR_PTRS_CLEAN_info);
743         }
744 
745         gct->failed_to_evac = true; // always put it on the mutable list.
746         break;
747     }
748 
749     case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
750     case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
751         // follow everything
752     {
753         StgPtr next;
754 
755         next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
756         for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
757             evacuate((StgClosure **)p);
758         }
759 
760         if (gct->failed_to_evac) {
761             RELEASE_STORE(&((StgClosure *) q)->header.info, &stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY_info);
762         } else {
763             RELEASE_STORE(&((StgClosure *) q)->header.info, &stg_SMALL_MUT_ARR_PTRS_FROZEN_CLEAN_info);
764         }
765         break;
766     }
767 
768     case TSO:
769     {
770         scavengeTSO((StgTSO *)p);
771         p += sizeofW(StgTSO);
772         break;
773     }
774 
775     case STACK:
776     {
777         StgStack *stack = (StgStack*)p;
778 
779         gct->eager_promotion = false;
780 
781         scavenge_stack(stack->sp, stack->stack + stack->stack_size);
782         stack->dirty = gct->failed_to_evac;
783         p += stack_sizeW(stack);
784 
785         gct->eager_promotion = saved_eager_promotion;
786         break;
787     }
788 
789     case MUT_PRIM:
790       {
791         StgPtr end;
792 
793         gct->eager_promotion = false;
794 
795         end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
796         for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
797             evacuate((StgClosure **)p);
798         }
799         p += info->layout.payload.nptrs;
800 
801         gct->eager_promotion = saved_eager_promotion;
802         gct->failed_to_evac = true; // mutable
803         break;
804       }
805 
806     case TREC_CHUNK:
807       {
808         StgWord i;
809         StgTRecChunk *tc = ((StgTRecChunk *) p);
810         TRecEntry *e = &(tc -> entries[0]);
811         gct->eager_promotion = false;
812         evacuate((StgClosure **)&tc->prev_chunk);
813         for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
814           evacuate((StgClosure **)&e->tvar);
815           evacuate((StgClosure **)&e->expected_value);
816           evacuate((StgClosure **)&e->new_value);
817         }
818         gct->eager_promotion = saved_eager_promotion;
819         gct->failed_to_evac = true; // mutable
820         p += sizeofW(StgTRecChunk);
821         break;
822       }
823 
824     default:
825         barf("scavenge: unimplemented/strange closure type %d @ %p",
826              info->type, p);
827     }
828 
829     /*
830      * We need to record the current object on the mutable list if
831      *  (a) It is actually mutable, or
832      *  (b) It contains pointers to a younger generation.
833      * Case (b) arises if we didn't manage to promote everything that
834      * the current object points to into the current generation.
835      */
836     if (gct->failed_to_evac) {
837         gct->failed_to_evac = false;
838         if (bd->gen_no > 0) {
839             recordMutableGen_GC((StgClosure *)q, bd->gen_no);
840         }
841     }
842   }
843 
844   if (p > bd->free)  {
845       gct->copied += ws->todo_free - bd->free;
846       RELEASE_STORE(&bd->free, p);
847   }
848 
849   debugTrace(DEBUG_gc, "   scavenged %ld bytes",
850              (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
851 
852   // update stats: this is a block that has been scavenged
853   gct->scanned += bd->free - bd->u.scan;
854   bd->u.scan = bd->free;
855 
856   if (bd != ws->todo_bd) {
857       // we're not going to evac any more objects into
858       // this block, so push it now.
859       push_scanned_block(bd, ws);
860   }
861 
862   gct->scan_bd = NULL;
863 }
864 /* -----------------------------------------------------------------------------
865    Scavenge everything on the mark stack.
866 
867    This is slightly different from scavenge():
868       - we don't walk linearly through the objects, so the scavenger
869         doesn't need to advance the pointer on to the next object.
870    -------------------------------------------------------------------------- */
871 
872 static void
scavenge_mark_stack(void)873 scavenge_mark_stack(void)
874 {
875     StgPtr p, q;
876     const StgInfoTable *info;
877     bool saved_eager_promotion;
878 
879     gct->evac_gen_no = oldest_gen->no;
880     saved_eager_promotion = gct->eager_promotion;
881 
882     while ((p = pop_mark_stack())) {
883 
884         ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
885         info = get_itbl((StgClosure *)p);
886 
887         q = p;
888         switch (info->type) {
889 
890         case MVAR_CLEAN:
891         case MVAR_DIRTY:
892         {
893             StgMVar *mvar = ((StgMVar *)p);
894             gct->eager_promotion = false;
895             evacuate((StgClosure **)&mvar->head);
896             evacuate((StgClosure **)&mvar->tail);
897             evacuate((StgClosure **)&mvar->value);
898             gct->eager_promotion = saved_eager_promotion;
899 
900             if (gct->failed_to_evac) {
901                 RELEASE_STORE(&mvar->header.info, &stg_MVAR_DIRTY_info);
902             } else {
903                 RELEASE_STORE(&mvar->header.info, &stg_MVAR_CLEAN_info);
904             }
905             break;
906         }
907 
908         case TVAR:
909         {
910             StgTVar *tvar = ((StgTVar *)p);
911             gct->eager_promotion = false;
912             evacuate((StgClosure **)&tvar->current_value);
913             evacuate((StgClosure **)&tvar->first_watch_queue_entry);
914             gct->eager_promotion = saved_eager_promotion;
915 
916             if (gct->failed_to_evac) {
917                 RELEASE_STORE(&tvar->header.info, &stg_TVAR_DIRTY_info);
918             } else {
919                 RELEASE_STORE(&tvar->header.info, &stg_TVAR_CLEAN_info);
920             }
921             break;
922         }
923 
924         case FUN_2_0:
925             scavenge_fun_srt(info);
926             evacuate(&((StgClosure *)p)->payload[1]);
927             evacuate(&((StgClosure *)p)->payload[0]);
928             break;
929 
930         case THUNK_2_0:
931             scavenge_thunk_srt(info);
932             evacuate(&((StgThunk *)p)->payload[1]);
933             evacuate(&((StgThunk *)p)->payload[0]);
934             break;
935 
936         case CONSTR_2_0:
937             evacuate(&((StgClosure *)p)->payload[1]);
938             evacuate(&((StgClosure *)p)->payload[0]);
939             break;
940 
941         case FUN_1_0:
942         case FUN_1_1:
943             scavenge_fun_srt(info);
944             evacuate(&((StgClosure *)p)->payload[0]);
945             break;
946 
947         case THUNK_1_0:
948         case THUNK_1_1:
949             scavenge_thunk_srt(info);
950             evacuate(&((StgThunk *)p)->payload[0]);
951             break;
952 
953         case CONSTR_1_0:
954         case CONSTR_1_1:
955             evacuate(&((StgClosure *)p)->payload[0]);
956             break;
957 
958         case FUN_0_1:
959         case FUN_0_2:
960             scavenge_fun_srt(info);
961             break;
962 
963         case THUNK_0_1:
964         case THUNK_0_2:
965             scavenge_thunk_srt(info);
966             break;
967 
968         case CONSTR_0_1:
969         case CONSTR_0_2:
970             break;
971 
972         case FUN:
973             scavenge_fun_srt(info);
974             goto gen_obj;
975 
976         case THUNK:
977         {
978             StgPtr end;
979 
980             scavenge_thunk_srt(info);
981             end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
982             for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
983                 evacuate((StgClosure **)p);
984             }
985             break;
986         }
987 
988         gen_obj:
989         case CONSTR:
990         case CONSTR_NOCAF:
991         case WEAK:
992         case PRIM:
993         {
994             StgPtr end;
995 
996             end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
997             for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
998                 evacuate((StgClosure **)p);
999             }
1000             break;
1001         }
1002 
1003         case BCO: {
1004             StgBCO *bco = (StgBCO *)p;
1005             evacuate((StgClosure **)&bco->instrs);
1006             evacuate((StgClosure **)&bco->literals);
1007             evacuate((StgClosure **)&bco->ptrs);
1008             break;
1009         }
1010 
1011         case IND:
1012         case BLACKHOLE:
1013             evacuate(&((StgInd *)p)->indirectee);
1014             break;
1015 
1016         case MUT_VAR_CLEAN:
1017         case MUT_VAR_DIRTY: {
1018             gct->eager_promotion = false;
1019             evacuate(&((StgMutVar *)p)->var);
1020             gct->eager_promotion = saved_eager_promotion;
1021 
1022             if (gct->failed_to_evac) {
1023                 RELEASE_STORE(&((StgClosure *) q)->header.info, &stg_MUT_VAR_DIRTY_info);
1024             } else {
1025                 RELEASE_STORE(&((StgClosure *) q)->header.info, &stg_MUT_VAR_CLEAN_info);
1026             }
1027             break;
1028         }
1029 
1030         case BLOCKING_QUEUE:
1031         {
1032             StgBlockingQueue *bq = (StgBlockingQueue *)p;
1033 
1034             gct->eager_promotion = false;
1035             evacuate(&bq->bh);
1036             evacuate((StgClosure**)&bq->owner);
1037             evacuate((StgClosure**)&bq->queue);
1038             evacuate((StgClosure**)&bq->link);
1039             gct->eager_promotion = saved_eager_promotion;
1040 
1041             if (gct->failed_to_evac) {
1042                 RELEASE_STORE(&bq->header.info, &stg_BLOCKING_QUEUE_DIRTY_info);
1043             } else {
1044                 RELEASE_STORE(&bq->header.info, &stg_BLOCKING_QUEUE_CLEAN_info);
1045             }
1046             break;
1047         }
1048 
1049         case ARR_WORDS:
1050             break;
1051 
1052         case THUNK_SELECTOR:
1053         {
1054             StgSelector *s = (StgSelector *)p;
1055             evacuate(&s->selectee);
1056             break;
1057         }
1058 
1059         // A chunk of stack saved in a heap object
1060         case AP_STACK:
1061         {
1062             StgAP_STACK *ap = (StgAP_STACK *)p;
1063 
1064             evacuate(&ap->fun);
1065             scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1066             break;
1067         }
1068 
1069         case PAP:
1070             scavenge_PAP((StgPAP *)p);
1071             break;
1072 
1073         case AP:
1074             scavenge_AP((StgAP *)p);
1075             break;
1076 
1077         case MUT_ARR_PTRS_CLEAN:
1078         case MUT_ARR_PTRS_DIRTY:
1079             // follow everything
1080         {
1081             // We don't eagerly promote objects pointed to by a mutable
1082             // array, but if we find the array only points to objects in
1083             // the same or an older generation, we mark it "clean" and
1084             // avoid traversing it during minor GCs.
1085             gct->eager_promotion = false;
1086 
1087             scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1088 
1089             if (gct->failed_to_evac) {
1090                 RELEASE_STORE(&((StgClosure *) q)->header.info, &stg_MUT_ARR_PTRS_DIRTY_info);
1091             } else {
1092                 RELEASE_STORE(&((StgClosure *) q)->header.info, &stg_MUT_ARR_PTRS_CLEAN_info);
1093             }
1094 
1095             gct->eager_promotion = saved_eager_promotion;
1096             gct->failed_to_evac = true; // mutable anyhow.
1097             break;
1098         }
1099 
1100         case MUT_ARR_PTRS_FROZEN_CLEAN:
1101         case MUT_ARR_PTRS_FROZEN_DIRTY:
1102             // follow everything
1103         {
1104             StgPtr q = p;
1105 
1106             scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1107 
1108             if (gct->failed_to_evac) {
1109                 RELEASE_STORE(&((StgClosure *) q)->header.info, &stg_MUT_ARR_PTRS_FROZEN_DIRTY_info);
1110             } else {
1111                 RELEASE_STORE(&((StgClosure *) q)->header.info, &stg_MUT_ARR_PTRS_FROZEN_CLEAN_info);
1112             }
1113             break;
1114         }
1115 
1116         case SMALL_MUT_ARR_PTRS_CLEAN:
1117         case SMALL_MUT_ARR_PTRS_DIRTY:
1118             // follow everything
1119         {
1120             StgPtr next;
1121             bool saved_eager;
1122 
1123             // We don't eagerly promote objects pointed to by a mutable
1124             // array, but if we find the array only points to objects in
1125             // the same or an older generation, we mark it "clean" and
1126             // avoid traversing it during minor GCs.
1127             saved_eager = gct->eager_promotion;
1128             gct->eager_promotion = false;
1129             next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1130             for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1131                 evacuate((StgClosure **)p);
1132             }
1133             gct->eager_promotion = saved_eager;
1134 
1135             if (gct->failed_to_evac) {
1136                 RELEASE_STORE(&((StgClosure *)q)->header.info, &stg_SMALL_MUT_ARR_PTRS_DIRTY_info);
1137             } else {
1138                 RELEASE_STORE(&((StgClosure *)q)->header.info, &stg_SMALL_MUT_ARR_PTRS_CLEAN_info);
1139             }
1140 
1141             gct->failed_to_evac = true; // mutable anyhow.
1142             break;
1143         }
1144 
1145         case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
1146         case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
1147             // follow everything
1148         {
1149             StgPtr next, q = p;
1150 
1151             next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1152             for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1153                 evacuate((StgClosure **)p);
1154             }
1155 
1156             if (gct->failed_to_evac) {
1157                 RELEASE_STORE(&((StgClosure *)q)->header.info, &stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY_info);
1158             } else {
1159                 RELEASE_STORE(&((StgClosure *)q)->header.info, &stg_SMALL_MUT_ARR_PTRS_FROZEN_CLEAN_info);
1160             }
1161             break;
1162         }
1163 
1164         case TSO:
1165         {
1166             scavengeTSO((StgTSO*)p);
1167             break;
1168         }
1169 
1170         case STACK:
1171         {
1172             StgStack *stack = (StgStack*)p;
1173 
1174             gct->eager_promotion = false;
1175 
1176             scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1177             stack->dirty = gct->failed_to_evac;
1178 
1179             gct->eager_promotion = saved_eager_promotion;
1180             break;
1181         }
1182 
1183         case MUT_PRIM:
1184         {
1185             StgPtr end;
1186 
1187             gct->eager_promotion = false;
1188 
1189             end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1190             for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1191                 evacuate((StgClosure **)p);
1192             }
1193 
1194             gct->eager_promotion = saved_eager_promotion;
1195             gct->failed_to_evac = true; // mutable
1196             break;
1197         }
1198 
1199         case TREC_CHUNK:
1200           {
1201             StgWord i;
1202             StgTRecChunk *tc = ((StgTRecChunk *) p);
1203             TRecEntry *e = &(tc -> entries[0]);
1204             gct->eager_promotion = false;
1205             evacuate((StgClosure **)&tc->prev_chunk);
1206             for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1207               evacuate((StgClosure **)&e->tvar);
1208               evacuate((StgClosure **)&e->expected_value);
1209               evacuate((StgClosure **)&e->new_value);
1210             }
1211             gct->eager_promotion = saved_eager_promotion;
1212             gct->failed_to_evac = true; // mutable
1213             break;
1214           }
1215 
1216         default:
1217             barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1218                  info->type, p);
1219         }
1220 
1221         if (gct->failed_to_evac) {
1222             gct->failed_to_evac = false;
1223             if (gct->evac_gen_no) {
1224                 recordMutableGen_GC((StgClosure *)q, gct->evac_gen_no);
1225             }
1226         }
1227     } // while (p = pop_mark_stack())
1228 }
1229 
1230 /* -----------------------------------------------------------------------------
1231    Scavenge one object.
1232 
1233    This is used for objects that are temporarily marked as mutable
1234    because they contain old-to-new generation pointers.  Only certain
1235    objects can have this property.
1236    -------------------------------------------------------------------------- */
1237 
1238 static bool
scavenge_one(StgPtr p)1239 scavenge_one(StgPtr p)
1240 {
1241     const StgInfoTable *info;
1242     bool no_luck;
1243     bool saved_eager_promotion;
1244 
1245     saved_eager_promotion = gct->eager_promotion;
1246 
1247     ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1248     info = get_itbl((StgClosure *)p);
1249 
1250     switch (info->type) {
1251 
1252     case MVAR_CLEAN:
1253     case MVAR_DIRTY:
1254     {
1255         StgMVar *mvar = ((StgMVar *)p);
1256         gct->eager_promotion = false;
1257         evacuate((StgClosure **)&mvar->head);
1258         evacuate((StgClosure **)&mvar->tail);
1259         evacuate((StgClosure **)&mvar->value);
1260         gct->eager_promotion = saved_eager_promotion;
1261 
1262         if (gct->failed_to_evac) {
1263             RELEASE_STORE(&mvar->header.info, &stg_MVAR_DIRTY_info);
1264         } else {
1265             RELEASE_STORE(&mvar->header.info, &stg_MVAR_CLEAN_info);
1266         }
1267         break;
1268     }
1269 
1270     case TVAR:
1271     {
1272         StgTVar *tvar = ((StgTVar *)p);
1273         gct->eager_promotion = false;
1274         evacuate((StgClosure **)&tvar->current_value);
1275         evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1276         gct->eager_promotion = saved_eager_promotion;
1277 
1278         if (gct->failed_to_evac) {
1279             RELEASE_STORE(&tvar->header.info, &stg_TVAR_DIRTY_info);
1280         } else {
1281             RELEASE_STORE(&tvar->header.info, &stg_TVAR_CLEAN_info);
1282         }
1283         break;
1284     }
1285 
1286     case THUNK:
1287     case THUNK_1_0:
1288     case THUNK_0_1:
1289     case THUNK_1_1:
1290     case THUNK_0_2:
1291     case THUNK_2_0:
1292     {
1293         StgPtr q, end;
1294 
1295         end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1296         for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1297             evacuate((StgClosure **)q);
1298         }
1299         break;
1300     }
1301 
1302     case FUN:
1303     case FUN_1_0:                       // hardly worth specialising these guys
1304     case FUN_0_1:
1305     case FUN_1_1:
1306     case FUN_0_2:
1307     case FUN_2_0:
1308     case CONSTR:
1309     case CONSTR_NOCAF:
1310     case CONSTR_1_0:
1311     case CONSTR_0_1:
1312     case CONSTR_1_1:
1313     case CONSTR_0_2:
1314     case CONSTR_2_0:
1315     case PRIM:
1316     {
1317         StgPtr q, end;
1318 
1319         end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1320         for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1321             evacuate((StgClosure **)q);
1322         }
1323         break;
1324     }
1325 
1326     case WEAK:
1327         // This WEAK object will not be considered by tidyWeakList during this
1328         // collection because it is in a generation > N, but it is on the
1329         // mutable list so we must evacuate all of its pointers because some
1330         // of them may point into a younger generation.
1331         scavengeLiveWeak((StgWeak *)p);
1332         break;
1333 
1334     case MUT_VAR_CLEAN:
1335     case MUT_VAR_DIRTY: {
1336         StgPtr q = p;
1337 
1338         gct->eager_promotion = false;
1339         evacuate(&((StgMutVar *)p)->var);
1340         gct->eager_promotion = saved_eager_promotion;
1341 
1342         if (gct->failed_to_evac) {
1343             RELEASE_STORE(&((StgClosure *)q)->header.info, &stg_MUT_VAR_DIRTY_info);
1344         } else {
1345             RELEASE_STORE(&((StgClosure *)q)->header.info, &stg_MUT_VAR_CLEAN_info);
1346         }
1347         break;
1348     }
1349 
1350     case BLOCKING_QUEUE:
1351     {
1352         StgBlockingQueue *bq = (StgBlockingQueue *)p;
1353 
1354         gct->eager_promotion = false;
1355         evacuate(&bq->bh);
1356         evacuate((StgClosure**)&bq->owner);
1357         evacuate((StgClosure**)&bq->queue);
1358         evacuate((StgClosure**)&bq->link);
1359         gct->eager_promotion = saved_eager_promotion;
1360 
1361         if (gct->failed_to_evac) {
1362             RELEASE_STORE(&bq->header.info, &stg_BLOCKING_QUEUE_DIRTY_info);
1363         } else {
1364             RELEASE_STORE(&bq->header.info, &stg_BLOCKING_QUEUE_CLEAN_info);
1365         }
1366         break;
1367     }
1368 
1369     case THUNK_SELECTOR:
1370     {
1371         StgSelector *s = (StgSelector *)p;
1372         evacuate(&s->selectee);
1373         break;
1374     }
1375 
1376     case AP_STACK:
1377     {
1378         StgAP_STACK *ap = (StgAP_STACK *)p;
1379 
1380         evacuate(&ap->fun);
1381         scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1382         p = (StgPtr)ap->payload + ap->size;
1383         break;
1384     }
1385 
1386     case PAP:
1387         p = scavenge_PAP((StgPAP *)p);
1388         break;
1389 
1390     case AP:
1391         p = scavenge_AP((StgAP *)p);
1392         break;
1393 
1394     case ARR_WORDS:
1395         // nothing to follow
1396         break;
1397 
1398     case MUT_ARR_PTRS_CLEAN:
1399     case MUT_ARR_PTRS_DIRTY:
1400     {
1401         // We don't eagerly promote objects pointed to by a mutable
1402         // array, but if we find the array only points to objects in
1403         // the same or an older generation, we mark it "clean" and
1404         // avoid traversing it during minor GCs.
1405         gct->eager_promotion = false;
1406 
1407         scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1408 
1409         if (gct->failed_to_evac) {
1410             RELEASE_STORE(&((StgClosure *)p)->header.info, &stg_MUT_ARR_PTRS_DIRTY_info);
1411         } else {
1412             RELEASE_STORE(&((StgClosure *)p)->header.info, &stg_MUT_ARR_PTRS_CLEAN_info);
1413         }
1414 
1415         gct->eager_promotion = saved_eager_promotion;
1416         gct->failed_to_evac = true;
1417         break;
1418     }
1419 
1420     case MUT_ARR_PTRS_FROZEN_CLEAN:
1421     case MUT_ARR_PTRS_FROZEN_DIRTY:
1422     {
1423         // follow everything
1424         scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1425 
1426         if (gct->failed_to_evac) {
1427             RELEASE_STORE(&((StgClosure *)p)->header.info, &stg_MUT_ARR_PTRS_FROZEN_DIRTY_info);
1428         } else {
1429             RELEASE_STORE(&((StgClosure *)p)->header.info, &stg_MUT_ARR_PTRS_FROZEN_CLEAN_info);
1430         }
1431         break;
1432     }
1433 
1434     case SMALL_MUT_ARR_PTRS_CLEAN:
1435     case SMALL_MUT_ARR_PTRS_DIRTY:
1436     {
1437         StgPtr next, q;
1438         bool saved_eager;
1439 
1440         // We don't eagerly promote objects pointed to by a mutable
1441         // array, but if we find the array only points to objects in
1442         // the same or an older generation, we mark it "clean" and
1443         // avoid traversing it during minor GCs.
1444         saved_eager = gct->eager_promotion;
1445         gct->eager_promotion = false;
1446         q = p;
1447         next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1448         for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1449             evacuate((StgClosure **)p);
1450         }
1451         gct->eager_promotion = saved_eager;
1452 
1453         if (gct->failed_to_evac) {
1454             RELEASE_STORE(&((StgClosure *)q)->header.info, &stg_SMALL_MUT_ARR_PTRS_DIRTY_info);
1455         } else {
1456             RELEASE_STORE(&((StgClosure *)q)->header.info, &stg_SMALL_MUT_ARR_PTRS_CLEAN_info);
1457         }
1458 
1459         gct->failed_to_evac = true;
1460         break;
1461     }
1462 
1463     case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
1464     case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
1465     {
1466         // follow everything
1467         StgPtr next, q=p;
1468 
1469         next = p + small_mut_arr_ptrs_sizeW((StgSmallMutArrPtrs*)p);
1470         for (p = (P_)((StgSmallMutArrPtrs *)p)->payload; p < next; p++) {
1471             evacuate((StgClosure **)p);
1472         }
1473 
1474         if (gct->failed_to_evac) {
1475             RELEASE_STORE(&((StgClosure *)q)->header.info, &stg_SMALL_MUT_ARR_PTRS_FROZEN_DIRTY_info);
1476         } else {
1477             RELEASE_STORE(&((StgClosure *)q)->header.info, &stg_SMALL_MUT_ARR_PTRS_FROZEN_CLEAN_info);
1478         }
1479         break;
1480     }
1481 
1482     case TSO:
1483     {
1484         scavengeTSO((StgTSO*)p);
1485         break;
1486     }
1487 
1488     case STACK:
1489     {
1490         StgStack *stack = (StgStack*)p;
1491 
1492         gct->eager_promotion = false;
1493 
1494         scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1495         stack->dirty = gct->failed_to_evac;
1496 
1497         gct->eager_promotion = saved_eager_promotion;
1498         break;
1499     }
1500 
1501     case MUT_PRIM:
1502     {
1503         StgPtr end;
1504 
1505         gct->eager_promotion = false;
1506 
1507         end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1508         for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1509             evacuate((StgClosure **)p);
1510         }
1511 
1512         gct->eager_promotion = saved_eager_promotion;
1513         gct->failed_to_evac = true; // mutable
1514         break;
1515 
1516     }
1517 
1518     case TREC_CHUNK:
1519       {
1520         StgWord i;
1521         StgTRecChunk *tc = ((StgTRecChunk *) p);
1522         TRecEntry *e = &(tc -> entries[0]);
1523         gct->eager_promotion = false;
1524         evacuate((StgClosure **)&tc->prev_chunk);
1525         for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1526           evacuate((StgClosure **)&e->tvar);
1527           evacuate((StgClosure **)&e->expected_value);
1528           evacuate((StgClosure **)&e->new_value);
1529         }
1530         gct->eager_promotion = saved_eager_promotion;
1531         gct->failed_to_evac = true; // mutable
1532         break;
1533       }
1534 
1535     case IND:
1536         // IND can happen, for example, when the interpreter allocates
1537         // a gigantic AP closure (more than one block), which ends up
1538         // on the large-object list and then gets updated.  See #3424.
1539     case BLACKHOLE:
1540     case IND_STATIC:
1541         evacuate(&((StgInd *)p)->indirectee);
1542 
1543 #if 0 && defined(DEBUG)
1544       if (RtsFlags.DebugFlags.gc)
1545       /* Debugging code to print out the size of the thing we just
1546        * promoted
1547        */
1548       {
1549         StgPtr start = gen->scan;
1550         bdescr *start_bd = gen->scan_bd;
1551         StgWord size = 0;
1552         scavenge(&gen);
1553         if (start_bd != gen->scan_bd) {
1554           size += (P_)BLOCK_ROUND_UP(start) - start;
1555           start_bd = start_bd->link;
1556           while (start_bd != gen->scan_bd) {
1557             size += BLOCK_SIZE_W;
1558             start_bd = start_bd->link;
1559           }
1560           size += gen->scan -
1561             (P_)BLOCK_ROUND_DOWN(gen->scan);
1562         } else {
1563           size = gen->scan - start;
1564         }
1565         debugBelch("evac IND: %ld bytes", size * sizeof(W_));
1566       }
1567 #endif
1568       break;
1569 
1570     case COMPACT_NFDATA:
1571         scavenge_compact((StgCompactNFData*)p);
1572         break;
1573 
1574     default:
1575         barf("scavenge_one: strange object %d", (int)(info->type));
1576     }
1577 
1578     no_luck = gct->failed_to_evac;
1579     gct->failed_to_evac = false;
1580     return (no_luck);
1581 }
1582 
1583 /* -----------------------------------------------------------------------------
1584    Scavenging mutable lists.
1585 
1586    We treat the mutable list of each generation > N (i.e. all the
1587    generations older than the one being collected) as roots.  We also
1588    remove non-mutable objects from the mutable list at this point.
1589    -------------------------------------------------------------------------- */
1590 
1591 static void
scavenge_mutable_list(bdescr * bd,generation * gen)1592 scavenge_mutable_list(bdescr *bd, generation *gen)
1593 {
1594     StgPtr p, q;
1595 #if defined(DEBUG)
1596     MutListScavStats stats; // Local accumulator
1597     zeroMutListScavStats(&stats);
1598 #endif
1599 
1600     uint32_t gen_no = gen->no;
1601     gct->evac_gen_no = gen_no;
1602 
1603     for (; bd != NULL; bd = bd->link) {
1604         for (q = bd->start; q < bd->free; q++) {
1605             p = (StgPtr)*q;
1606             ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1607 
1608 #if defined(DEBUG)
1609             const StgInfoTable *pinfo;
1610             switch (get_itbl((StgClosure *)p)->type) {
1611             case MUT_VAR_CLEAN:
1612                 // can happen due to concurrent writeMutVars
1613             case MUT_VAR_DIRTY:
1614                 stats.n_MUTVAR++; break;
1615             case MUT_ARR_PTRS_CLEAN:
1616             case MUT_ARR_PTRS_DIRTY:
1617             case MUT_ARR_PTRS_FROZEN_CLEAN:
1618             case MUT_ARR_PTRS_FROZEN_DIRTY:
1619                 stats.n_MUTARR++; break;
1620             case MVAR_CLEAN:
1621                 barf("MVAR_CLEAN on mutable list");
1622             case MVAR_DIRTY:
1623                 stats.n_MVAR++; break;
1624             case TVAR:
1625                 stats.n_TVAR++; break;
1626             case TREC_CHUNK:
1627                 stats.n_TREC_CHUNK++; break;
1628             case MUT_PRIM:
1629                 pinfo = ((StgClosure*)p)->header.info;
1630                 if (pinfo == &stg_TVAR_WATCH_QUEUE_info)
1631                     stats.n_TVAR_WATCH_QUEUE++;
1632                 else if (pinfo == &stg_TREC_HEADER_info)
1633                     stats.n_TREC_HEADER++;
1634                 else
1635                     stats.n_OTHERS++;
1636                 break;
1637             default:
1638                 stats.n_OTHERS++; break;
1639             }
1640 #endif
1641 
1642             // Check whether this object is "clean", that is it
1643             // definitely doesn't point into a young generation.
1644             // Clean objects don't need to be scavenged.  Some clean
1645             // objects (MUT_VAR_CLEAN) are not kept on the mutable
1646             // list at all; others, such as MUT_ARR_PTRS
1647             // are always on the mutable list.
1648             //
1649             switch (get_itbl((StgClosure *)p)->type) {
1650             case MUT_ARR_PTRS_CLEAN:
1651             case SMALL_MUT_ARR_PTRS_CLEAN:
1652                 recordMutableGen_GC((StgClosure *)p,gen_no);
1653                 continue;
1654             case MUT_ARR_PTRS_DIRTY:
1655             {
1656                 bool saved_eager_promotion;
1657                 saved_eager_promotion = gct->eager_promotion;
1658                 gct->eager_promotion = false;
1659 
1660                 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1661 
1662                 if (gct->failed_to_evac) {
1663                     RELEASE_STORE(&((StgClosure *)p)->header.info, &stg_MUT_ARR_PTRS_DIRTY_info);
1664                 } else {
1665                     RELEASE_STORE(&((StgClosure *)p)->header.info, &stg_MUT_ARR_PTRS_CLEAN_info);
1666                 }
1667 
1668                 gct->eager_promotion = saved_eager_promotion;
1669                 gct->failed_to_evac = false;
1670                 recordMutableGen_GC((StgClosure *)p,gen_no);
1671                 continue;
1672             }
1673             default:
1674                 ;
1675             }
1676 
1677             if (RtsFlags.GcFlags.useNonmoving && major_gc && gen == oldest_gen) {
1678                 // We can't use scavenge_one here as we need to scavenge SRTs
1679                 nonmovingScavengeOne((StgClosure *)p);
1680             } else if (scavenge_one(p)) {
1681                 // didn't manage to promote everything, so put the
1682                 // object back on the list.
1683                 recordMutableGen_GC((StgClosure *)p,gen_no);
1684             }
1685         }
1686     }
1687 
1688 #if defined(DEBUG)
1689     // For lack of a better option we protect mutlist_scav_stats with oldest_gen->sync
1690     ACQUIRE_SPIN_LOCK(&oldest_gen->sync);
1691     addMutListScavStats(&stats, &mutlist_scav_stats);
1692     RELEASE_SPIN_LOCK(&oldest_gen->sync);
1693 #endif
1694 }
1695 
1696 void
scavenge_capability_mut_lists(Capability * cap)1697 scavenge_capability_mut_lists (Capability *cap)
1698 {
1699     // In a major GC only nonmoving heap's mut list is root
1700     if (RtsFlags.GcFlags.useNonmoving && major_gc) {
1701         uint32_t g = oldest_gen->no;
1702         scavenge_mutable_list(cap->saved_mut_lists[g], oldest_gen);
1703         freeChain_sync(cap->saved_mut_lists[g]);
1704         cap->saved_mut_lists[g] = NULL;
1705         return;
1706     }
1707 
1708     /* Mutable lists from each generation > N
1709      * we want to *scavenge* these roots, not evacuate them: they're not
1710      * going to move in this GC.
1711      * Also do them in reverse generation order, for the usual reason:
1712      * namely to reduce the likelihood of spurious old->new pointers.
1713      */
1714     for (uint32_t g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1715         scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1716         freeChain_sync(cap->saved_mut_lists[g]);
1717         cap->saved_mut_lists[g] = NULL;
1718     }
1719 }
1720 
1721 /* -----------------------------------------------------------------------------
1722    Scavenging the static objects.
1723 
1724    We treat the mutable list of each generation > N (i.e. all the
1725    generations older than the one being collected) as roots.  We also
1726    remove non-mutable objects from the mutable list at this point.
1727    -------------------------------------------------------------------------- */
1728 
1729 static void
scavenge_static(void)1730 scavenge_static(void)
1731 {
1732   StgClosure *flagged_p, *p;
1733   const StgInfoTable *info;
1734 
1735   debugTrace(DEBUG_gc, "scavenging static objects");
1736 
1737   /* Always evacuate straight to the oldest generation for static
1738    * objects */
1739   gct->evac_gen_no = oldest_gen->no;
1740 
1741   /* keep going until we've scavenged all the objects on the linked
1742      list... */
1743 
1744   while (1) {
1745 
1746     /* get the next static object from the list.  Remember, there might
1747      * be more stuff on this list after each evacuation...
1748      * (static_objects is a global)
1749      */
1750     flagged_p = gct->static_objects;
1751     if (flagged_p == END_OF_STATIC_OBJECT_LIST) {
1752           break;
1753     }
1754     p = UNTAG_STATIC_LIST_PTR(flagged_p);
1755 
1756     ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1757     info = get_itbl(p);
1758     // make sure the info pointer is into text space
1759 
1760     /* Take this object *off* the static_objects list,
1761      * and put it on the scavenged_static_objects list.
1762      */
1763     StgClosure **link = STATIC_LINK(info,p);
1764     gct->static_objects = RELAXED_LOAD(link);
1765     RELAXED_STORE(link, gct->scavenged_static_objects);
1766     gct->scavenged_static_objects = flagged_p;
1767 
1768     switch (info -> type) {
1769 
1770     case IND_STATIC:
1771       {
1772         StgInd *ind = (StgInd *)p;
1773         evacuate(&ind->indirectee);
1774 
1775         /* might fail to evacuate it, in which case we have to pop it
1776          * back on the mutable list of the oldest generation.  We
1777          * leave it *on* the scavenged_static_objects list, though,
1778          * in case we visit this object again.
1779          */
1780         if (gct->failed_to_evac) {
1781           gct->failed_to_evac = false;
1782           recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1783         }
1784         break;
1785       }
1786 
1787     case THUNK_STATIC:
1788       scavenge_thunk_srt(info);
1789       break;
1790 
1791     case FUN_STATIC:
1792       scavenge_fun_srt(info);
1793       FALLTHROUGH;
1794 
1795       // a FUN_STATIC can also be an SRT, so it may have pointer
1796       // fields.  See Note [SRTs] in CmmBuildInfoTables, specifically
1797       // the [FUN] optimisation.
1798 
1799     case CONSTR:
1800     case CONSTR_NOCAF:
1801     case CONSTR_1_0:
1802     case CONSTR_0_1:
1803     case CONSTR_2_0:
1804     case CONSTR_1_1:
1805     case CONSTR_0_2:
1806       {
1807         StgPtr q, next;
1808 
1809         next = (P_)p->payload + info->layout.payload.ptrs;
1810         // evacuate the pointers
1811         for (q = (P_)p->payload; q < next; q++) {
1812             evacuate((StgClosure **)q);
1813         }
1814         break;
1815       }
1816 
1817     default:
1818       barf("scavenge_static: strange closure %d", (int)(info->type));
1819     }
1820 
1821     ASSERT(gct->failed_to_evac == false);
1822   }
1823 }
1824 
1825 /* -----------------------------------------------------------------------------
1826    scavenge a chunk of memory described by a bitmap
1827    -------------------------------------------------------------------------- */
1828 
1829 static void
scavenge_large_bitmap(StgPtr p,StgLargeBitmap * large_bitmap,StgWord size)1830 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, StgWord size )
1831 {
1832     walk_large_bitmap(do_evacuate, (StgClosure **) p, large_bitmap, size, NULL);
1833 }
1834 
1835 
1836 /* -----------------------------------------------------------------------------
1837    scavenge_stack walks over a section of stack and evacuates all the
1838    objects pointed to by it.  We can use the same code for walking
1839    AP_STACK_UPDs, since these are just sections of copied stack.
1840    -------------------------------------------------------------------------- */
1841 
1842 void
scavenge_stack(StgPtr p,StgPtr stack_end)1843 scavenge_stack(StgPtr p, StgPtr stack_end)
1844 {
1845   const StgRetInfoTable* info;
1846   StgWord bitmap;
1847   StgWord size;
1848 
1849   /*
1850    * Each time around this loop, we are looking at a chunk of stack
1851    * that starts with an activation record.
1852    */
1853 
1854   while (p < stack_end) {
1855     info  = get_ret_itbl((StgClosure *)p);
1856 
1857     switch (info->i.type) {
1858 
1859     case UPDATE_FRAME:
1860         // Note [upd-black-hole]
1861         //
1862         // In SMP, we can get update frames that point to indirections
1863         // when two threads evaluate the same thunk.  We do attempt to
1864         // discover this situation in threadPaused(), but it's
1865         // possible that the following sequence occurs:
1866         //
1867         //        A             B
1868         //                  enter T
1869         //     enter T
1870         //     blackhole T
1871         //                  update T
1872         //     GC
1873         //
1874         // Now T is an indirection, and the update frame is already
1875         // marked on A's stack, so we won't traverse it again in
1876         // threadPaused().  We could traverse the whole stack again
1877         // before GC, but that would be too expensive.
1878         //
1879         // Scavenging this update frame as normal would be disastrous;
1880         // the indirection will be shorted out, and the updatee would
1881         // end up pointing to the value.  The update code will then
1882         // overwrite the value, instead of the BLACKHOLE it is
1883         // expecting to write to.
1884         //
1885         // One way we could try to fix this is to detect when the
1886         // BLACKHOLE has been updated by another thread, and then
1887         // replace this update frame with a special frame that just
1888         // enters the value.  But this introduces some other
1889         // complexities:
1890         //
1891         // - we must be careful to call checkBlockingQueues() in this
1892         //   special frame, because we might otherwise miss wakeups
1893         //   for threads that blocked on the original BLACKHOLE,
1894         // - we must spot this frame when we're stripping the stack in
1895         //   raiseAsync() and raiseExceptionHelper(), and arrange to call
1896         //   checkBlockingQueues() there too.
1897         //
1898         // This is hard to get right, indeed we previously got it
1899         // wrong (see #13751).  So we now take a different approach:
1900         // always copy the BLACKHOLE, even if it is actually an
1901         // indirection.  This way we keep the update frame, we're
1902         // guaranteed to still perform the update, and check for
1903         // missed wakeups even when stripping the stack in
1904         // raiseAsync() and raiseExceptionHelper().  This is also a
1905         // little more efficient, because evacuating a known BLACKHOLE
1906         // is faster than evacuating an unknown closure.
1907         //
1908         // NOTE: for the reasons above, blackholing (either lazy or
1909         // eager) is NOT optional.  See also Note [avoiding
1910         // threadPaused] in Interpreter.c.
1911         //
1912         // There are a couple of alternative solutions:
1913         // - if we see an update frame that points to an indirection,
1914         //   arrange to call checkBlockingQueues() on that thread
1915         //   after GC.
1916         // - spot a BLOCKING_QUEUE that points to a value and
1917         //   arrange to wake it up after the GC.
1918         //
1919         // These are more difficult to implement, requiring an extra
1920         // list to be maintained during GC.  They also rely on more
1921         // subtle invariants than the solution implemented here.
1922         //
1923 
1924     {
1925         StgUpdateFrame *frame = (StgUpdateFrame *)p;
1926 
1927         evacuate_BLACKHOLE(&frame->updatee);
1928         p += sizeofW(StgUpdateFrame);
1929         continue;
1930     }
1931 
1932       // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1933     case CATCH_STM_FRAME:
1934     case CATCH_RETRY_FRAME:
1935     case ATOMICALLY_FRAME:
1936     case UNDERFLOW_FRAME:
1937     case STOP_FRAME:
1938     case CATCH_FRAME:
1939     case RET_SMALL:
1940         bitmap = BITMAP_BITS(info->i.layout.bitmap);
1941         size   = BITMAP_SIZE(info->i.layout.bitmap);
1942         // NOTE: the payload starts immediately after the info-ptr, we
1943         // don't have an StgHeader in the same sense as a heap closure.
1944         p++;
1945         p = scavenge_small_bitmap(p, size, bitmap);
1946 
1947     follow_srt:
1948         if (major_gc && info->i.srt) {
1949             StgClosure *srt = (StgClosure*)GET_SRT(info);
1950             evacuate(&srt);
1951         }
1952         continue;
1953 
1954     case RET_BCO: {
1955         StgBCO *bco;
1956         StgWord size;
1957 
1958         p++;
1959         evacuate((StgClosure **)p);
1960         bco = (StgBCO *)*p;
1961         p++;
1962         size = BCO_BITMAP_SIZE(bco);
1963         scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1964         p += size;
1965         continue;
1966     }
1967 
1968       // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1969     case RET_BIG:
1970     {
1971         StgWord size;
1972 
1973         size = GET_LARGE_BITMAP(&info->i)->size;
1974         p++;
1975         scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1976         p += size;
1977         // and don't forget to follow the SRT
1978         goto follow_srt;
1979     }
1980 
1981     case RET_FUN:
1982     {
1983         StgRetFun *ret_fun = (StgRetFun *)p;
1984         const StgFunInfoTable *fun_info;
1985 
1986         evacuate(&ret_fun->fun);
1987         fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1988         p = scavenge_arg_block(fun_info, ret_fun->payload);
1989         goto follow_srt;
1990     }
1991 
1992     default:
1993         barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1994     }
1995   }
1996 }
1997 
1998 /*-----------------------------------------------------------------------------
1999   scavenge the large object list.
2000 
2001   evac_gen set by caller; similar games played with evac_gen as with
2002   scavenge() - see comment at the top of scavenge().  Most large
2003   objects are (repeatedly) mutable, so most of the time evac_gen will
2004   be zero.
2005   --------------------------------------------------------------------------- */
2006 
2007 static void
scavenge_large(gen_workspace * ws)2008 scavenge_large (gen_workspace *ws)
2009 {
2010     bdescr *bd;
2011     StgPtr p;
2012 
2013     gct->evac_gen_no = ws->gen->no;
2014 
2015     bd = ws->todo_large_objects;
2016 
2017     for (; bd != NULL; bd = ws->todo_large_objects) {
2018 
2019         // take this object *off* the large objects list and put it on
2020         // the scavenged large objects list.  This is so that we can
2021         // treat todo_large_objects as a stack and push new objects on
2022         // the front when evacuating.
2023         ws->todo_large_objects = bd->link;
2024 
2025         ACQUIRE_SPIN_LOCK(&ws->gen->sync);
2026         if (bd->flags & BF_COMPACT) {
2027             dbl_link_onto(bd, &ws->gen->live_compact_objects);
2028             StgCompactNFData *str = ((StgCompactNFDataBlock*)bd->start)->owner;
2029             ws->gen->n_live_compact_blocks += str->totalW / BLOCK_SIZE_W;
2030             p = (StgPtr)str;
2031         } else {
2032             dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
2033             ws->gen->n_scavenged_large_blocks += bd->blocks;
2034             p = bd->start;
2035         }
2036         RELEASE_SPIN_LOCK(&ws->gen->sync);
2037 
2038         if (scavenge_one(p)) {
2039             if (ws->gen->no > 0) {
2040                 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
2041             }
2042         }
2043 
2044         // stats
2045         gct->scanned += closure_sizeW((StgClosure*)p);
2046     }
2047 }
2048 
2049 /* ----------------------------------------------------------------------------
2050    Look for work to do.
2051 
2052    We look for the oldest gen that has either a todo block that can
2053    be scanned, or a block of work on the global queue that we can
2054    scan.
2055 
2056    It is important to take work from the *oldest* generation that we
2057    has work available, because that minimizes the likelihood of
2058    evacuating objects into a young generation when they should have
2059    been eagerly promoted.  This really does make a difference (the
2060    cacheprof benchmark is one that is affected).
2061 
2062    We also want to scan the todo block if possible before grabbing
2063    work from the global queue, the reason being that we don't want to
2064    steal work from the global queue and starve other threads if there
2065    is other work we can usefully be doing.
2066    ------------------------------------------------------------------------- */
2067 
2068 static bool
scavenge_find_work(void)2069 scavenge_find_work (void)
2070 {
2071     int g;
2072     gen_workspace *ws;
2073     bool did_something, did_anything;
2074     bdescr *bd;
2075 
2076     gct->scav_find_work++;
2077 
2078     did_anything = false;
2079 
2080 loop:
2081     did_something = false;
2082     for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
2083         ws = &gct->gens[g];
2084 
2085         if (ws->todo_seg != END_NONMOVING_TODO_LIST) {
2086             struct NonmovingSegment *seg = ws->todo_seg;
2087             ASSERT(seg->todo_link);
2088             ws->todo_seg = seg->todo_link;
2089             seg->todo_link = NULL;
2090             scavengeNonmovingSegment(seg);
2091             did_something = true;
2092             break;
2093         }
2094 
2095         gct->scan_bd = NULL;
2096 
2097         // If we have a scan block with some work to do,
2098         // scavenge everything up to the free pointer.
2099         if (ws->todo_bd->u.scan < ws->todo_free)
2100         {
2101             scavenge_block(ws->todo_bd);
2102             did_something = true;
2103             break;
2104         }
2105 
2106         // If we have any large objects to scavenge, do them now.
2107         if (ws->todo_large_objects) {
2108             scavenge_large(ws);
2109             did_something = true;
2110             break;
2111         }
2112 
2113         if ((bd = grab_local_todo_block(ws)) != NULL) {
2114             scavenge_block(bd);
2115             did_something = true;
2116             break;
2117         }
2118     }
2119 
2120     if (did_something) {
2121         did_anything = true;
2122         goto loop;
2123     }
2124 
2125 #if defined(THREADED_RTS)
2126     if (work_stealing) {
2127         // look for work to steal
2128         for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
2129             if ((bd = steal_todo_block(g)) != NULL) {
2130                 scavenge_block(bd);
2131                 did_something = true;
2132                 break;
2133             }
2134         }
2135 
2136         if (did_something) {
2137             did_anything = true;
2138             goto loop;
2139         }
2140     }
2141 #endif
2142 
2143     // only return when there is no more work to do
2144 
2145     return did_anything;
2146 }
2147 
2148 /* ----------------------------------------------------------------------------
2149    Scavenge until we can't find anything more to scavenge.
2150    ------------------------------------------------------------------------- */
2151 
2152 void
scavenge_loop(void)2153 scavenge_loop(void)
2154 {
2155     bool work_to_do;
2156 
2157 loop:
2158     work_to_do = false;
2159 
2160     // scavenge static objects
2161     if (major_gc && gct->static_objects != END_OF_STATIC_OBJECT_LIST) {
2162         IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
2163         scavenge_static();
2164     }
2165 
2166     // scavenge objects in compacted generation
2167     if (mark_stack_bd != NULL && !mark_stack_empty()) {
2168         scavenge_mark_stack();
2169         work_to_do = true;
2170     }
2171 
2172     // Order is important here: we want to deal in full blocks as
2173     // much as possible, so go for global work in preference to
2174     // local work.  Only if all the global work has been exhausted
2175     // do we start scavenging the fragments of blocks in the local
2176     // workspaces.
2177     if (scavenge_find_work()) goto loop;
2178 
2179     if (work_to_do) goto loop;
2180 }
2181