1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-1999
4 *
5 * Weak pointers / finalizers
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11 #include "RtsAPI.h"
12
13 #include "RtsUtils.h"
14 #include "Weak.h"
15 #include "Schedule.h"
16 #include "Prelude.h"
17 #include "ThreadLabels.h"
18 #include "Trace.h"
19
20 // List of dead weak pointers collected by the last GC
21 static StgWeak *finalizer_list = NULL;
22
23 // Count of the above list.
24 static uint32_t n_finalizers = 0;
25
26 void
runCFinalizers(StgCFinalizerList * list)27 runCFinalizers(StgCFinalizerList *list)
28 {
29 StgCFinalizerList *head;
30 for (head = list;
31 (StgClosure *)head != &stg_NO_FINALIZER_closure;
32 head = (StgCFinalizerList *)head->link)
33 {
34 if (head->flag)
35 ((void (*)(void *, void *))head->fptr)(head->eptr, head->ptr);
36 else
37 ((void (*)(void *))head->fptr)(head->ptr);
38 }
39 }
40
41 void
runAllCFinalizers(StgWeak * list)42 runAllCFinalizers(StgWeak *list)
43 {
44 StgWeak *w;
45 Task *task;
46
47 task = myTask();
48 if (task != NULL) {
49 task->running_finalizers = true;
50 }
51
52 for (w = list; w; w = w->link) {
53 // We need to filter out DEAD_WEAK objects, because it's not guaranteed
54 // that the list will not have them when shutting down.
55 // They only get filtered out during GC for the generation they
56 // belong to.
57 // If there's no major GC between the time that the finalizer for the
58 // object from the oldest generation is manually called and shutdown
59 // we end up running the same finalizer twice. See #7170.
60 const StgInfoTable *winfo = ACQUIRE_LOAD(&w->header.info);
61 if (winfo != &stg_DEAD_WEAK_info) {
62 runCFinalizers((StgCFinalizerList *)w->cfinalizers);
63 }
64 }
65
66 if (task != NULL) {
67 task->running_finalizers = false;
68 }
69 }
70
71 /*
72 * scheduleFinalizers() is called on the list of weak pointers found
73 * to be dead after a garbage collection. It overwrites each object
74 * with DEAD_WEAK, and creates a new thread to run the pending finalizers.
75 *
76 * This function is called just after GC. The weak pointers on the
77 * argument list are those whose keys were found to be not reachable,
78 * however the value and finalizer fields have by now been marked live.
79 * The weak pointer object itself may not be alive - i.e. we may be
80 * looking at either an object in from-space or one in to-space. It
81 * doesn't really matter either way.
82 *
83 * Pre-condition: sched_mutex _not_ held.
84 */
85
86 void
scheduleFinalizers(Capability * cap,StgWeak * list)87 scheduleFinalizers(Capability *cap, StgWeak *list)
88 {
89 StgWeak *w;
90 StgTSO *t;
91 StgMutArrPtrs *arr;
92 StgWord size;
93 uint32_t n, i;
94
95 // n_finalizers is not necessarily zero under non-moving collection
96 // because non-moving collector does not wait for the list to be consumed
97 // (by doIdleGcWork()) before appending the list with more finalizers.
98 ASSERT(RtsFlags.GcFlags.useNonmoving || SEQ_CST_LOAD(&n_finalizers) == 0);
99
100 // Append finalizer_list with the new list. TODO: Perhaps cache tail of the
101 // list for faster append. NOTE: We can't append `list` here! Otherwise we
102 // end up traversing already visited weaks in the loops below.
103 StgWeak **tl = &finalizer_list;
104 while (*tl) {
105 tl = &(*tl)->link;
106 }
107 SEQ_CST_STORE(tl, list);
108
109 // Traverse the list and
110 // * count the number of Haskell finalizers
111 // * overwrite all the weak pointers with DEAD_WEAK
112 n = 0;
113 i = 0;
114 for (w = list; w; w = w->link) {
115 // Better not be a DEAD_WEAK at this stage; the garbage
116 // collector removes DEAD_WEAKs from the weak pointer list.
117 ASSERT(w->header.info != &stg_DEAD_WEAK_info);
118
119 if (w->finalizer != &stg_NO_FINALIZER_closure) {
120 n++;
121 }
122
123 // Remember the length of the list, for runSomeFinalizers() below
124 i++;
125
126 #if defined(PROFILING)
127 // A weak pointer is inherently used, so we do not need to call
128 // LDV_recordDead().
129 //
130 // Furthermore, when PROFILING is turned on, dead weak
131 // pointers are exactly as large as weak pointers, so there is
132 // no need to fill the slop, either. See stg_DEAD_WEAK_info
133 // in StgMiscClosures.cmm.
134 #endif
135
136 // We must overwrite the header with DEAD_WEAK, so that if
137 // there's a later call to finalizeWeak# on this weak pointer,
138 // we don't run the finalizer again.
139 SET_HDR(w, &stg_DEAD_WEAK_info, w->header.prof.ccs);
140 }
141
142 SEQ_CST_ADD(&n_finalizers, i);
143
144 // No Haskell finalizers to run?
145 if (n == 0) return;
146
147 debugTrace(DEBUG_weak, "weak: batching %d finalizers", n);
148
149 size = n + mutArrPtrsCardTableSize(n);
150 arr = (StgMutArrPtrs *)allocate(cap, sizeofW(StgMutArrPtrs) + size);
151 TICK_ALLOC_PRIM(sizeofW(StgMutArrPtrs), n, 0);
152 // No write barrier needed here; this array is only going to referred to by this core.
153 SET_HDR(arr, &stg_MUT_ARR_PTRS_FROZEN_CLEAN_info, CCS_SYSTEM);
154 arr->ptrs = n;
155 arr->size = size;
156
157 n = 0;
158 for (w = list; w; w = w->link) {
159 if (w->finalizer != &stg_NO_FINALIZER_closure) {
160 arr->payload[n] = w->finalizer;
161 n++;
162 }
163 }
164 // set all the cards to 1
165 for (i = n; i < size; i++) {
166 arr->payload[i] = (StgClosure *)(W_)(-1);
167 }
168
169 t = createIOThread(cap,
170 RtsFlags.GcFlags.initialStkSize,
171 rts_apply(cap,
172 rts_apply(cap,
173 (StgClosure *)runFinalizerBatch_closure,
174 rts_mkInt(cap,n)),
175 (StgClosure *)arr)
176 );
177
178 scheduleThread(cap,t);
179 labelThread(cap, t, "weak finalizer thread");
180 }
181
182 /* -----------------------------------------------------------------------------
183 Incrementally running C finalizers
184
185 The GC detects all the dead finalizers, but we don't want to run
186 them during the GC because that increases the time that the runtime
187 is paused.
188
189 What options are there?
190
191 1. Parallelise running the C finalizers across the GC threads
192 - doesn't solve the pause problem, just reduces it (maybe by a lot)
193
194 2. Make a Haskell thread to run the C finalizers, like we do for
195 Haskell finalizers.
196 + scheduling is handled for us
197 - no guarantee that we'll process finalizers in a timely manner
198
199 3. Run finalizers when any capability is idle.
200 + reduces pause to 0
201 - requires scheduler modifications
202 - if the runtime is busy, finalizers wait until the next GC
203
204 4. like (3), but also run finalizers incrementally between GCs.
205 - reduces the delay to run finalizers compared with (3)
206
207 For now we do (3). It would be easy to do (4) later by adding a
208 call to doIdleGCWork() in the scheduler loop, but I haven't found
209 that necessary so far.
210
211 -------------------------------------------------------------------------- */
212
213 // Run this many finalizers before returning from
214 // runSomeFinalizers(). This is so that we only tie up the capability
215 // for a short time, and respond quickly if new work becomes
216 // available.
217 static const int32_t finalizer_chunk = 100;
218
219 // non-zero if a thread is already in runSomeFinalizers(). This
220 // protects the globals finalizer_list and n_finalizers.
221 static volatile StgWord finalizer_lock = 0;
222
223 //
224 // Run some C finalizers. Returns true if there's more work to do.
225 //
runSomeFinalizers(bool all)226 bool runSomeFinalizers(bool all)
227 {
228 if (RELAXED_LOAD(&n_finalizers) == 0)
229 return false;
230
231 if (cas(&finalizer_lock, 0, 1) != 0) {
232 // another capability is doing the work, it's safe to say
233 // there's nothing to do, because the thread already in
234 // runSomeFinalizers() will call in again.
235 return false;
236 }
237
238 debugTrace(DEBUG_sched, "running C finalizers, %d remaining", n_finalizers);
239
240 Task *task = myTask();
241 if (task != NULL) {
242 task->running_finalizers = true;
243 }
244
245 StgWeak *w = finalizer_list;
246 int32_t count = 0;
247 while (w != NULL) {
248 runCFinalizers((StgCFinalizerList *)w->cfinalizers);
249 w = w->link;
250 ++count;
251 if (!all && count >= finalizer_chunk) break;
252 }
253
254 RELAXED_STORE(&finalizer_list, w);
255 SEQ_CST_ADD(&n_finalizers, -count);
256
257 if (task != NULL) {
258 task->running_finalizers = false;
259 }
260
261 debugTrace(DEBUG_sched, "ran %d C finalizers", count);
262 bool ret = n_finalizers != 0;
263 RELEASE_STORE(&finalizer_lock, 0);
264 return ret;
265 }
266