1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2018
4 *
5 * Non-moving garbage collector and allocator: Mark phase
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #pragma once
10
11 #include "Hash.h"
12 #include "Task.h"
13 #include "NonMoving.h"
14
15 #include "BeginPrivate.h"
16
17 #include "Hash.h"
18
19 enum EntryType {
20 NULL_ENTRY = 0,
21 MARK_CLOSURE = 1,
22 MARK_ARRAY = 2
23 };
24
25 /* Note [Origin references in the nonmoving collector]
26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27 *
28 * To implement indirection short-cutting and the selector optimisation the
29 * collector needs to know where it found references, so it can update the
30 * reference if it later turns out that points to an indirection. For this
31 * reason, each mark queue entry contains two things:
32 *
33 * - a pointer to the object to be marked (p), and
34 *
35 * - a pointer to the field where we found the reference (origin)
36 *
37 * Note that the origin pointer is an interior pointer: it points not to a
38 * valid closure (with info table pointer) but rather to a field inside a closure.
39 * Since such references can't be safely scavenged we establish the invariant
40 * that the origin pointer may only point to a field of an object living in the
41 * nonmoving heap, where no scavenging is needed.
42 *
43 */
44
45 typedef struct {
46 // Which kind of mark queue entry we have is determined by the tag bits of
47 // the first word (using the tags defined by the EntryType enum).
48 union {
49 // A null_entry indicates the end of the queue.
50 struct {
51 void *p; // must be NULL
52 } null_entry;
53 struct {
54 StgClosure *p; // the object to be marked
55 StgClosure **origin; // field where this reference was found.
56 // See Note [Origin references in the nonmoving collector]
57 } mark_closure;
58 struct {
59 const StgMutArrPtrs *array;
60 StgWord start_index; // start index is shifted to the left by 16 bits
61 } mark_array;
62 };
63 } MarkQueueEnt;
64
nonmovingMarkQueueEntryType(MarkQueueEnt * ent)65 INLINE_HEADER enum EntryType nonmovingMarkQueueEntryType(MarkQueueEnt *ent)
66 {
67 uintptr_t tag = (uintptr_t) ent->null_entry.p & TAG_MASK;
68 ASSERT(tag <= MARK_ARRAY);
69 return tag;
70 }
71
72 typedef struct {
73 // index of first *unused* queue entry
74 uint32_t head;
75
76 MarkQueueEnt entries[];
77 } MarkQueueBlock;
78
79 // How far ahead in mark queue to prefetch?
80 #define MARK_PREFETCH_QUEUE_DEPTH 5
81
82 /* The mark queue is not capable of concurrent read or write.
83 *
84 * invariants:
85 *
86 * a. top == blocks->start;
87 * b. there is always a valid MarkQueueChunk, although it may be empty
88 * (e.g. top->head == 0).
89 */
90 typedef struct MarkQueue_ {
91 // A singly link-list of blocks, each containing a MarkQueueChunk.
92 bdescr *blocks;
93
94 // Cached value of blocks->start.
95 MarkQueueBlock *top;
96
97 // Is this a mark queue or a capability-local update remembered set?
98 bool is_upd_rem_set;
99
100 #if MARK_PREFETCH_QUEUE_DEPTH > 0
101 // A ring-buffer of entries which we will mark next
102 MarkQueueEnt prefetch_queue[MARK_PREFETCH_QUEUE_DEPTH];
103 // The first free slot in prefetch_queue.
104 uint8_t prefetch_head;
105 #endif
106 } MarkQueue;
107
108 /* While it shares its representation with MarkQueue, UpdRemSet differs in
109 * behavior when pushing; namely full chunks are immediately pushed to the
110 * global update remembered set, not accumulated into a chain. We make this
111 * distinction apparent in the types.
112 */
113 typedef struct {
114 MarkQueue queue;
115 } UpdRemSet;
116
117 // Number of blocks to allocate for a mark queue
118 #define MARK_QUEUE_BLOCKS 16
119
120 // The length of MarkQueueBlock.entries
121 #define MARK_QUEUE_BLOCK_ENTRIES ((MARK_QUEUE_BLOCKS * BLOCK_SIZE - sizeof(MarkQueueBlock)) / sizeof(MarkQueueEnt))
122
123 extern bdescr *nonmoving_large_objects, *nonmoving_marked_large_objects,
124 *nonmoving_compact_objects, *nonmoving_marked_compact_objects;
125 extern memcount n_nonmoving_large_blocks, n_nonmoving_marked_large_blocks,
126 n_nonmoving_compact_blocks, n_nonmoving_marked_compact_blocks;
127
128 extern StgTSO *nonmoving_old_threads;
129 extern StgWeak *nonmoving_old_weak_ptr_list;
130 extern StgTSO *nonmoving_threads;
131 extern StgWeak *nonmoving_weak_ptr_list;
132
133 #if defined(DEBUG)
134 extern StgIndStatic *debug_caf_list_snapshot;
135 #endif
136
137 extern MarkQueue *current_mark_queue;
138 extern bdescr *upd_rem_set_block_list;
139
140
141 void nonmovingMarkInitUpdRemSet(void);
142
143 void init_upd_rem_set(UpdRemSet *rset);
144 void reset_upd_rem_set(UpdRemSet *rset);
145 void updateRemembSetPushClosure(Capability *cap, StgClosure *p);
146 void updateRemembSetPushThunk(Capability *cap, StgThunk *p);
147 void updateRemembSetPushTSO(Capability *cap, StgTSO *tso);
148 void updateRemembSetPushStack(Capability *cap, StgStack *stack);
149
150 #if defined(THREADED_RTS)
151 void nonmovingFlushCapUpdRemSetBlocks(Capability *cap);
152 void nonmovingBeginFlush(Task *task);
153 bool nonmovingWaitForFlush(void);
154 void nonmovingFinishFlush(Task *task);
155 #endif
156
157 void markQueueAddRoot(MarkQueue* q, StgClosure** root);
158
159 void initMarkQueue(MarkQueue *queue);
160 void freeMarkQueue(MarkQueue *queue);
161 void nonmovingMark(struct MarkQueue_ *restrict queue);
162
163 bool nonmovingTidyWeaks(struct MarkQueue_ *queue);
164 void nonmovingTidyThreads(void);
165 void nonmovingMarkDeadWeaks(struct MarkQueue_ *queue, StgWeak **dead_weak_ptr_list);
166 void nonmovingResurrectThreads(struct MarkQueue_ *queue, StgTSO **resurrected_threads);
167 bool nonmovingIsAlive(StgClosure *p);
168 void nonmovingMarkDeadWeak(struct MarkQueue_ *queue, StgWeak *w);
169 void nonmovingMarkLiveWeak(struct MarkQueue_ *queue, StgWeak *w);
170 void nonmovingAddUpdRemSetBlocks(struct MarkQueue_ *rset);
171
172 void markQueuePush(MarkQueue *q, const MarkQueueEnt *ent);
173 void markQueuePushClosureGC(MarkQueue *q, StgClosure *p);
174 void markQueuePushClosure(MarkQueue *q,
175 StgClosure *p,
176 StgClosure **origin);
177 void markQueuePushClosure_(MarkQueue *q, StgClosure *p);
178 void markQueuePushThunkSrt(MarkQueue *q, const StgInfoTable *info);
179 void markQueuePushFunSrt(MarkQueue *q, const StgInfoTable *info);
180 void markQueuePushArray(MarkQueue *q, const StgMutArrPtrs *array, StgWord start_index);
181 void updateRemembSetPushThunkEager(Capability *cap,
182 const StgThunkInfoTable *orig_info,
183 StgThunk *thunk);
184
markQueueIsEmpty(MarkQueue * q)185 INLINE_HEADER bool markQueueIsEmpty(MarkQueue *q)
186 {
187 return (q->blocks == NULL) || (q->top->head == 0 && q->blocks->link == NULL);
188 }
189
190 #if defined(DEBUG)
191
192 void printMarkQueueEntry(MarkQueueEnt *ent);
193 void printMarkQueue(MarkQueue *q);
194
195 #endif
196
197 #include "EndPrivate.h"
198