1 /*
2 * Mark-and-sweep garbage collection.
3 */
4
5 #include "duk_internal.h"
6
7 DUK_LOCAL_DECL void duk__mark_heaphdr(duk_heap *heap, duk_heaphdr *h);
8 DUK_LOCAL_DECL void duk__mark_heaphdr_nonnull(duk_heap *heap, duk_heaphdr *h);
9 DUK_LOCAL_DECL void duk__mark_tval(duk_heap *heap, duk_tval *tv);
10 DUK_LOCAL_DECL void duk__mark_tvals(duk_heap *heap, duk_tval *tv, duk_idx_t count);
11
12 /*
13 * Marking functions for heap types: mark children recursively.
14 */
15
duk__mark_hstring(duk_heap * heap,duk_hstring * h)16 DUK_LOCAL void duk__mark_hstring(duk_heap *heap, duk_hstring *h) {
17 DUK_UNREF(heap);
18 DUK_UNREF(h);
19
20 DUK_DDD(DUK_DDDPRINT("duk__mark_hstring: %p", (void *) h));
21 DUK_ASSERT(h);
22 DUK_HSTRING_ASSERT_VALID(h);
23
24 /* nothing to process */
25 }
26
duk__mark_hobject(duk_heap * heap,duk_hobject * h)27 DUK_LOCAL void duk__mark_hobject(duk_heap *heap, duk_hobject *h) {
28 duk_uint_fast32_t i;
29
30 DUK_DDD(DUK_DDDPRINT("duk__mark_hobject: %p", (void *) h));
31
32 DUK_ASSERT(h);
33 DUK_HOBJECT_ASSERT_VALID(h);
34
35 /* XXX: use advancing pointers instead of index macros -> faster and smaller? */
36
37 for (i = 0; i < (duk_uint_fast32_t) DUK_HOBJECT_GET_ENEXT(h); i++) {
38 duk_hstring *key = DUK_HOBJECT_E_GET_KEY(heap, h, i);
39 if (key == NULL) {
40 continue;
41 }
42 duk__mark_heaphdr_nonnull(heap, (duk_heaphdr *) key);
43 if (DUK_HOBJECT_E_SLOT_IS_ACCESSOR(heap, h, i)) {
44 duk__mark_heaphdr(heap, (duk_heaphdr *) DUK_HOBJECT_E_GET_VALUE_PTR(heap, h, i)->a.get);
45 duk__mark_heaphdr(heap, (duk_heaphdr *) DUK_HOBJECT_E_GET_VALUE_PTR(heap, h, i)->a.set);
46 } else {
47 duk__mark_tval(heap, &DUK_HOBJECT_E_GET_VALUE_PTR(heap, h, i)->v);
48 }
49 }
50
51 for (i = 0; i < (duk_uint_fast32_t) DUK_HOBJECT_GET_ASIZE(h); i++) {
52 duk__mark_tval(heap, DUK_HOBJECT_A_GET_VALUE_PTR(heap, h, i));
53 }
54
55 /* Hash part is a 'weak reference' and does not contribute. */
56
57 duk__mark_heaphdr(heap, (duk_heaphdr *) DUK_HOBJECT_GET_PROTOTYPE(heap, h));
58
59 /* Fast path for objects which don't have a subclass struct, or have a
60 * subclass struct but nothing that needs marking in the subclass struct.
61 */
62 if (DUK_HOBJECT_HAS_FASTREFS(h)) {
63 DUK_ASSERT(DUK_HOBJECT_ALLOWS_FASTREFS(h));
64 return;
65 }
66 DUK_ASSERT(DUK_HOBJECT_PROHIBITS_FASTREFS(h));
67
68 /* XXX: reorg, more common first */
69 if (DUK_HOBJECT_IS_COMPFUNC(h)) {
70 duk_hcompfunc *f = (duk_hcompfunc *) h;
71 duk_tval *tv, *tv_end;
72 duk_hobject **fn, **fn_end;
73
74 DUK_HCOMPFUNC_ASSERT_VALID(f);
75
76 /* 'data' is reachable through every compiled function which
77 * contains a reference.
78 */
79
80 duk__mark_heaphdr(heap, (duk_heaphdr *) DUK_HCOMPFUNC_GET_DATA(heap, f));
81 duk__mark_heaphdr(heap, (duk_heaphdr *) DUK_HCOMPFUNC_GET_LEXENV(heap, f));
82 duk__mark_heaphdr(heap, (duk_heaphdr *) DUK_HCOMPFUNC_GET_VARENV(heap, f));
83
84 if (DUK_HCOMPFUNC_GET_DATA(heap, f) != NULL) {
85 tv = DUK_HCOMPFUNC_GET_CONSTS_BASE(heap, f);
86 tv_end = DUK_HCOMPFUNC_GET_CONSTS_END(heap, f);
87 while (tv < tv_end) {
88 duk__mark_tval(heap, tv);
89 tv++;
90 }
91
92 fn = DUK_HCOMPFUNC_GET_FUNCS_BASE(heap, f);
93 fn_end = DUK_HCOMPFUNC_GET_FUNCS_END(heap, f);
94 while (fn < fn_end) {
95 duk__mark_heaphdr_nonnull(heap, (duk_heaphdr *) *fn);
96 fn++;
97 }
98 } else {
99 /* May happen in some out-of-memory corner cases. */
100 DUK_D(DUK_DPRINT("duk_hcompfunc 'data' is NULL, skipping marking"));
101 }
102 } else if (DUK_HOBJECT_IS_DECENV(h)) {
103 duk_hdecenv *e = (duk_hdecenv *) h;
104 DUK_HDECENV_ASSERT_VALID(e);
105 duk__mark_heaphdr(heap, (duk_heaphdr *) e->thread);
106 duk__mark_heaphdr(heap, (duk_heaphdr *) e->varmap);
107 } else if (DUK_HOBJECT_IS_OBJENV(h)) {
108 duk_hobjenv *e = (duk_hobjenv *) h;
109 DUK_HOBJENV_ASSERT_VALID(e);
110 duk__mark_heaphdr_nonnull(heap, (duk_heaphdr *) e->target);
111 #if defined(DUK_USE_BUFFEROBJECT_SUPPORT)
112 } else if (DUK_HOBJECT_IS_BUFOBJ(h)) {
113 duk_hbufobj *b = (duk_hbufobj *) h;
114 DUK_HBUFOBJ_ASSERT_VALID(b);
115 duk__mark_heaphdr(heap, (duk_heaphdr *) b->buf);
116 duk__mark_heaphdr(heap, (duk_heaphdr *) b->buf_prop);
117 #endif /* DUK_USE_BUFFEROBJECT_SUPPORT */
118 } else if (DUK_HOBJECT_IS_BOUNDFUNC(h)) {
119 duk_hboundfunc *f = (duk_hboundfunc *) (void *) h;
120 DUK_HBOUNDFUNC_ASSERT_VALID(f);
121 duk__mark_tval(heap, &f->target);
122 duk__mark_tval(heap, &f->this_binding);
123 duk__mark_tvals(heap, f->args, f->nargs);
124 #if defined(DUK_USE_ES6_PROXY)
125 } else if (DUK_HOBJECT_IS_PROXY(h)) {
126 duk_hproxy *p = (duk_hproxy *) h;
127 DUK_HPROXY_ASSERT_VALID(p);
128 duk__mark_heaphdr_nonnull(heap, (duk_heaphdr *) p->target);
129 duk__mark_heaphdr_nonnull(heap, (duk_heaphdr *) p->handler);
130 #endif /* DUK_USE_ES6_PROXY */
131 } else if (DUK_HOBJECT_IS_THREAD(h)) {
132 duk_hthread *t = (duk_hthread *) h;
133 duk_activation *act;
134 duk_tval *tv;
135
136 DUK_HTHREAD_ASSERT_VALID(t);
137
138 tv = t->valstack;
139 while (tv < t->valstack_top) {
140 duk__mark_tval(heap, tv);
141 tv++;
142 }
143
144 for (act = t->callstack_curr; act != NULL; act = act->parent) {
145 duk__mark_heaphdr(heap, (duk_heaphdr *) DUK_ACT_GET_FUNC(act));
146 duk__mark_heaphdr(heap, (duk_heaphdr *) act->var_env);
147 duk__mark_heaphdr(heap, (duk_heaphdr *) act->lex_env);
148 #if defined(DUK_USE_NONSTD_FUNC_CALLER_PROPERTY)
149 duk__mark_heaphdr(heap, (duk_heaphdr *) act->prev_caller);
150 #endif
151 #if 0 /* nothing now */
152 for (cat = act->cat; cat != NULL; cat = cat->parent) {
153 }
154 #endif
155 }
156
157 duk__mark_heaphdr(heap, (duk_heaphdr *) t->resumer);
158
159 for (i = 0; i < DUK_NUM_BUILTINS; i++) {
160 duk__mark_heaphdr(heap, (duk_heaphdr *) t->builtins[i]);
161 }
162 } else {
163 /* We may come here if the object should have a FASTREFS flag
164 * but it's missing for some reason. Assert for never getting
165 * here; however, other than performance, this is harmless.
166 */
167 DUK_D(DUK_DPRINT("missing FASTREFS flag for: %!iO", h));
168 DUK_ASSERT(0);
169 }
170 }
171
172 /* Mark any duk_heaphdr type. Recursion tracking happens only here. */
duk__mark_heaphdr(duk_heap * heap,duk_heaphdr * h)173 DUK_LOCAL void duk__mark_heaphdr(duk_heap *heap, duk_heaphdr *h) {
174 DUK_DDD(DUK_DDDPRINT("duk__mark_heaphdr %p, type %ld",
175 (void *) h,
176 (h != NULL ? (long) DUK_HEAPHDR_GET_TYPE(h) : (long) -1)));
177
178 /* XXX: add non-null variant? */
179 if (h == NULL) {
180 return;
181 }
182
183 DUK_HEAPHDR_ASSERT_VALID(h);
184 DUK_ASSERT(!DUK_HEAPHDR_HAS_READONLY(h) || DUK_HEAPHDR_HAS_REACHABLE(h));
185
186 #if defined(DUK_USE_ASSERTIONS) && defined(DUK_USE_REFERENCE_COUNTING)
187 if (!DUK_HEAPHDR_HAS_READONLY(h)) {
188 h->h_assert_refcount++; /* Comparison refcount: bump even if already reachable. */
189 }
190 #endif
191 if (DUK_HEAPHDR_HAS_REACHABLE(h)) {
192 DUK_DDD(DUK_DDDPRINT("already marked reachable, skip"));
193 return;
194 }
195 #if defined(DUK_USE_ROM_OBJECTS)
196 /* READONLY objects always have REACHABLE set, so the check above
197 * will prevent READONLY objects from being marked here.
198 */
199 DUK_ASSERT(!DUK_HEAPHDR_HAS_READONLY(h));
200 #endif
201
202 DUK_HEAPHDR_SET_REACHABLE(h);
203
204 if (heap->ms_recursion_depth >= DUK_USE_MARK_AND_SWEEP_RECLIMIT) {
205 DUK_D(DUK_DPRINT("mark-and-sweep recursion limit reached, marking as temproot: %p", (void *) h));
206 DUK_HEAP_SET_MARKANDSWEEP_RECLIMIT_REACHED(heap);
207 DUK_HEAPHDR_SET_TEMPROOT(h);
208 return;
209 }
210
211 heap->ms_recursion_depth++;
212 DUK_ASSERT(heap->ms_recursion_depth != 0); /* Wrap. */
213
214 switch (DUK_HEAPHDR_GET_TYPE(h)) {
215 case DUK_HTYPE_STRING:
216 duk__mark_hstring(heap, (duk_hstring *) h);
217 break;
218 case DUK_HTYPE_OBJECT:
219 duk__mark_hobject(heap, (duk_hobject *) h);
220 break;
221 case DUK_HTYPE_BUFFER:
222 /* nothing to mark */
223 break;
224 default:
225 DUK_D(DUK_DPRINT("attempt to mark heaphdr %p with invalid htype %ld", (void *) h, (long) DUK_HEAPHDR_GET_TYPE(h)));
226 DUK_UNREACHABLE();
227 }
228
229 DUK_ASSERT(heap->ms_recursion_depth > 0);
230 heap->ms_recursion_depth--;
231 }
232
duk__mark_tval(duk_heap * heap,duk_tval * tv)233 DUK_LOCAL void duk__mark_tval(duk_heap *heap, duk_tval *tv) {
234 DUK_DDD(DUK_DDDPRINT("duk__mark_tval %p", (void *) tv));
235 if (tv == NULL) {
236 return;
237 }
238 DUK_TVAL_ASSERT_VALID(tv);
239 if (DUK_TVAL_IS_HEAP_ALLOCATED(tv)) {
240 duk_heaphdr *h;
241 h = DUK_TVAL_GET_HEAPHDR(tv);
242 DUK_ASSERT(h != NULL);
243 duk__mark_heaphdr_nonnull(heap, h);
244 }
245 }
246
duk__mark_tvals(duk_heap * heap,duk_tval * tv,duk_idx_t count)247 DUK_LOCAL void duk__mark_tvals(duk_heap *heap, duk_tval *tv, duk_idx_t count) {
248 DUK_ASSERT(count == 0 || tv != NULL);
249
250 while (count-- > 0) {
251 DUK_TVAL_ASSERT_VALID(tv);
252 if (DUK_TVAL_IS_HEAP_ALLOCATED(tv)) {
253 duk_heaphdr *h;
254 h = DUK_TVAL_GET_HEAPHDR(tv);
255 DUK_ASSERT(h != NULL);
256 duk__mark_heaphdr_nonnull(heap, h);
257 }
258 tv++;
259 }
260 }
261
262 /* Mark any duk_heaphdr type, caller guarantees a non-NULL pointer. */
duk__mark_heaphdr_nonnull(duk_heap * heap,duk_heaphdr * h)263 DUK_LOCAL void duk__mark_heaphdr_nonnull(duk_heap *heap, duk_heaphdr *h) {
264 /* For now, just call the generic handler. Change when call sites
265 * are changed too.
266 */
267 duk__mark_heaphdr(heap, h);
268 }
269
270 /*
271 * Mark the heap.
272 */
273
duk__mark_roots_heap(duk_heap * heap)274 DUK_LOCAL void duk__mark_roots_heap(duk_heap *heap) {
275 duk_small_uint_t i;
276
277 DUK_DD(DUK_DDPRINT("duk__mark_roots_heap: %p", (void *) heap));
278
279 duk__mark_heaphdr(heap, (duk_heaphdr *) heap->heap_thread);
280 duk__mark_heaphdr(heap, (duk_heaphdr *) heap->heap_object);
281
282 for (i = 0; i < DUK_HEAP_NUM_STRINGS; i++) {
283 duk_hstring *h = DUK_HEAP_GET_STRING(heap, i);
284 duk__mark_heaphdr(heap, (duk_heaphdr *) h);
285 }
286
287 duk__mark_tval(heap, &heap->lj.value1);
288 duk__mark_tval(heap, &heap->lj.value2);
289
290 #if defined(DUK_USE_DEBUGGER_SUPPORT)
291 for (i = 0; i < heap->dbg_breakpoint_count; i++) {
292 duk__mark_heaphdr(heap, (duk_heaphdr *) heap->dbg_breakpoints[i].filename);
293 }
294 #endif
295 }
296
297 /*
298 * Mark unreachable, finalizable objects.
299 *
300 * Such objects will be moved aside and their finalizers run later. They
301 * have to be treated as reachability roots for their properties etc to
302 * remain allocated. This marking is only done for unreachable values which
303 * would be swept later.
304 *
305 * Objects are first marked FINALIZABLE and only then marked as reachability
306 * roots; otherwise circular references might be handled inconsistently.
307 */
308
309 #if defined(DUK_USE_FINALIZER_SUPPORT)
duk__mark_finalizable(duk_heap * heap)310 DUK_LOCAL void duk__mark_finalizable(duk_heap *heap) {
311 duk_heaphdr *hdr;
312 duk_size_t count_finalizable = 0;
313
314 DUK_DD(DUK_DDPRINT("duk__mark_finalizable: %p", (void *) heap));
315
316 DUK_ASSERT(heap->heap_thread != NULL);
317
318 hdr = heap->heap_allocated;
319 while (hdr != NULL) {
320 /* A finalizer is looked up from the object and up its
321 * prototype chain (which allows inherited finalizers).
322 * The finalizer is checked for using a duk_hobject flag
323 * which is kept in sync with the presence and callability
324 * of a _Finalizer hidden symbol.
325 */
326
327 if (!DUK_HEAPHDR_HAS_REACHABLE(hdr) &&
328 DUK_HEAPHDR_IS_OBJECT(hdr) &&
329 !DUK_HEAPHDR_HAS_FINALIZED(hdr) &&
330 DUK_HOBJECT_HAS_FINALIZER_FAST(heap, (duk_hobject *) hdr)) {
331 /* heaphdr:
332 * - is not reachable
333 * - is an object
334 * - is not a finalized object waiting for rescue/keep decision
335 * - has a finalizer
336 */
337
338 DUK_DD(DUK_DDPRINT("unreachable heap object will be "
339 "finalized -> mark as finalizable "
340 "and treat as a reachability root: %p",
341 (void *) hdr));
342 DUK_ASSERT(!DUK_HEAPHDR_HAS_READONLY(hdr));
343 DUK_HEAPHDR_SET_FINALIZABLE(hdr);
344 count_finalizable++;
345 }
346
347 hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr);
348 }
349
350 if (count_finalizable == 0) {
351 return;
352 }
353
354 DUK_DD(DUK_DDPRINT("marked %ld heap objects as finalizable, now mark them reachable",
355 (long) count_finalizable));
356
357 hdr = heap->heap_allocated;
358 while (hdr != NULL) {
359 if (DUK_HEAPHDR_HAS_FINALIZABLE(hdr)) {
360 duk__mark_heaphdr_nonnull(heap, hdr);
361 }
362
363 hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr);
364 }
365
366 /* Caller will finish the marking process if we hit a recursion limit. */
367 }
368 #endif /* DUK_USE_FINALIZER_SUPPORT */
369
370 /*
371 * Mark objects on finalize_list.
372 */
373
374 #if defined(DUK_USE_FINALIZER_SUPPORT)
duk__mark_finalize_list(duk_heap * heap)375 DUK_LOCAL void duk__mark_finalize_list(duk_heap *heap) {
376 duk_heaphdr *hdr;
377 #if defined(DUK_USE_DEBUG)
378 duk_size_t count_finalize_list = 0;
379 #endif
380
381 DUK_DD(DUK_DDPRINT("duk__mark_finalize_list: %p", (void *) heap));
382
383 hdr = heap->finalize_list;
384 while (hdr != NULL) {
385 duk__mark_heaphdr_nonnull(heap, hdr);
386 hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr);
387 #if defined(DUK_USE_DEBUG)
388 count_finalize_list++;
389 #endif
390 }
391
392 #if defined(DUK_USE_DEBUG)
393 if (count_finalize_list > 0) {
394 DUK_D(DUK_DPRINT("marked %ld objects on the finalize_list as reachable (previous finalizer run skipped)",
395 (long) count_finalize_list));
396 }
397 #endif
398 }
399 #endif /* DUK_USE_FINALIZER_SUPPORT */
400
401 /*
402 * Fallback marking handler if recursion limit is reached.
403 *
404 * Iterates 'temproots' until recursion limit is no longer hit. Temproots
405 * can be in heap_allocated or finalize_list; refzero_list is now always
406 * empty for mark-and-sweep. A temproot may occur in finalize_list now if
407 * there are objects on the finalize_list and user code creates a reference
408 * from an object in heap_allocated to the object in finalize_list (which is
409 * now allowed), and it happened to coincide with the recursion depth limit.
410 *
411 * This is a slow scan, but guarantees that we finish with a bounded C stack.
412 *
413 * Note that nodes may have been marked as temproots before this scan begun,
414 * OR they may have been marked during the scan (as we process nodes
415 * recursively also during the scan). This is intended behavior.
416 */
417
418 #if defined(DUK_USE_DEBUG)
duk__handle_temproot(duk_heap * heap,duk_heaphdr * hdr,duk_size_t * count)419 DUK_LOCAL void duk__handle_temproot(duk_heap *heap, duk_heaphdr *hdr, duk_size_t *count) {
420 #else
421 DUK_LOCAL void duk__handle_temproot(duk_heap *heap, duk_heaphdr *hdr) {
422 #endif
423 DUK_ASSERT(hdr != NULL);
424
425 if (!DUK_HEAPHDR_HAS_TEMPROOT(hdr)) {
426 DUK_DDD(DUK_DDDPRINT("not a temp root: %p", (void *) hdr));
427 return;
428 }
429
430 DUK_DDD(DUK_DDDPRINT("found a temp root: %p", (void *) hdr));
431 DUK_HEAPHDR_CLEAR_TEMPROOT(hdr);
432 DUK_HEAPHDR_CLEAR_REACHABLE(hdr); /* Done so that duk__mark_heaphdr() works correctly. */
433 #if defined(DUK_USE_ASSERTIONS) && defined(DUK_USE_REFERENCE_COUNTING)
434 hdr->h_assert_refcount--; /* Same node visited twice. */
435 #endif
436 duk__mark_heaphdr_nonnull(heap, hdr);
437
438 #if defined(DUK_USE_DEBUG)
439 (*count)++;
440 #endif
441 }
442
443 DUK_LOCAL void duk__mark_temproots_by_heap_scan(duk_heap *heap) {
444 duk_heaphdr *hdr;
445 #if defined(DUK_USE_DEBUG)
446 duk_size_t count;
447 #endif
448
449 DUK_DD(DUK_DDPRINT("duk__mark_temproots_by_heap_scan: %p", (void *) heap));
450
451 while (DUK_HEAP_HAS_MARKANDSWEEP_RECLIMIT_REACHED(heap)) {
452 DUK_DD(DUK_DDPRINT("recursion limit reached, doing heap scan to continue from temproots"));
453
454 #if defined(DUK_USE_DEBUG)
455 count = 0;
456 #endif
457 DUK_HEAP_CLEAR_MARKANDSWEEP_RECLIMIT_REACHED(heap);
458
459 hdr = heap->heap_allocated;
460 while (hdr) {
461 #if defined(DUK_USE_DEBUG)
462 duk__handle_temproot(heap, hdr, &count);
463 #else
464 duk__handle_temproot(heap, hdr);
465 #endif
466 hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr);
467 }
468
469 #if defined(DUK_USE_FINALIZER_SUPPORT)
470 hdr = heap->finalize_list;
471 while (hdr) {
472 #if defined(DUK_USE_DEBUG)
473 duk__handle_temproot(heap, hdr, &count);
474 #else
475 duk__handle_temproot(heap, hdr);
476 #endif
477 hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr);
478 }
479 #endif
480
481 #if defined(DUK_USE_DEBUG)
482 DUK_DD(DUK_DDPRINT("temproot mark heap scan processed %ld temp roots", (long) count));
483 #endif
484 }
485 }
486
487 /*
488 * Finalize refcounts for heap elements just about to be freed.
489 * This must be done for all objects before freeing to avoid any
490 * stale pointer dereferences.
491 *
492 * Note that this must deduce the set of objects to be freed
493 * identically to duk__sweep_heap().
494 */
495
496 #if defined(DUK_USE_REFERENCE_COUNTING)
497 DUK_LOCAL void duk__finalize_refcounts(duk_heap *heap) {
498 duk_heaphdr *hdr;
499
500 DUK_ASSERT(heap->heap_thread != NULL);
501
502 DUK_DD(DUK_DDPRINT("duk__finalize_refcounts: heap=%p", (void *) heap));
503
504 hdr = heap->heap_allocated;
505 while (hdr) {
506 if (!DUK_HEAPHDR_HAS_REACHABLE(hdr)) {
507 /*
508 * Unreachable object about to be swept. Finalize target refcounts
509 * (objects which the unreachable object points to) without doing
510 * refzero processing. Recursive decrefs are also prevented when
511 * refzero processing is disabled.
512 *
513 * Value cannot be a finalizable object, as they have been made
514 * temporarily reachable for this round.
515 */
516
517 DUK_DDD(DUK_DDDPRINT("unreachable object, refcount finalize before sweeping: %p", (void *) hdr));
518
519 /* Finalize using heap->heap_thread; DECREF has a
520 * suppress check for mark-and-sweep which is based
521 * on heap->ms_running.
522 */
523 duk_heaphdr_refcount_finalize_norz(heap, hdr);
524 }
525
526 hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr);
527 }
528 }
529 #endif /* DUK_USE_REFERENCE_COUNTING */
530
531 /*
532 * Clear (reachable) flags of finalize_list.
533 *
534 * We could mostly do in the sweep phase when we move objects from the
535 * heap into the finalize_list. However, if a finalizer run is skipped
536 * during a mark-and-sweep, the objects on the finalize_list will be marked
537 * reachable during the next mark-and-sweep. Since they're already on the
538 * finalize_list, no-one will be clearing their REACHABLE flag so we do it
539 * here. (This now overlaps with the sweep handling in a harmless way.)
540 */
541
542 #if defined(DUK_USE_FINALIZER_SUPPORT)
543 DUK_LOCAL void duk__clear_finalize_list_flags(duk_heap *heap) {
544 duk_heaphdr *hdr;
545
546 DUK_DD(DUK_DDPRINT("duk__clear_finalize_list_flags: %p", (void *) heap));
547
548 hdr = heap->finalize_list;
549 while (hdr) {
550 DUK_HEAPHDR_CLEAR_REACHABLE(hdr);
551 #if defined(DUK_USE_ASSERTIONS)
552 DUK_ASSERT(DUK_HEAPHDR_HAS_FINALIZABLE(hdr) || \
553 (heap->currently_finalizing == hdr));
554 #endif
555 /* DUK_HEAPHDR_FLAG_FINALIZED may be set. */
556 DUK_ASSERT(!DUK_HEAPHDR_HAS_TEMPROOT(hdr));
557 hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr);
558 }
559 }
560 #endif /* DUK_USE_FINALIZER_SUPPORT */
561
562 /*
563 * Sweep stringtable.
564 */
565
566 DUK_LOCAL void duk__sweep_stringtable(duk_heap *heap, duk_size_t *out_count_keep) {
567 duk_hstring *h;
568 duk_hstring *prev;
569 duk_uint32_t i;
570 #if defined(DUK_USE_DEBUG)
571 duk_size_t count_free = 0;
572 #endif
573 duk_size_t count_keep = 0;
574
575 DUK_DD(DUK_DDPRINT("duk__sweep_stringtable: %p", (void *) heap));
576
577 #if defined(DUK_USE_STRTAB_PTRCOMP)
578 if (heap->strtable16 == NULL) {
579 #else
580 if (heap->strtable == NULL) {
581 #endif
582 goto done;
583 }
584
585 for (i = 0; i < heap->st_size; i++) {
586 #if defined(DUK_USE_STRTAB_PTRCOMP)
587 h = DUK_USE_HEAPPTR_DEC16(heap->heap_udata, heap->strtable16[i]);
588 #else
589 h = heap->strtable[i];
590 #endif
591 prev = NULL;
592 while (h != NULL) {
593 duk_hstring *next;
594 next = h->hdr.h_next;
595
596 if (DUK_HEAPHDR_HAS_REACHABLE((duk_heaphdr *) h))
597 {
598 DUK_HEAPHDR_CLEAR_REACHABLE((duk_heaphdr *) h);
599 count_keep++;
600 prev = h;
601 } else {
602 #if defined(DUK_USE_DEBUG)
603 count_free++;
604 #endif
605
606 /* For pinned strings the refcount has been
607 * bumped. We could unbump it here before
608 * freeing, but that's actually not necessary
609 * except for assertions.
610 */
611 #if 0
612 if (DUK_HSTRING_HAS_PINNED_LITERAL(h)) {
613 DUK_ASSERT(DUK_HEAPHDR_GET_REFCOUNT((duk_heaphdr *) h) > 0U);
614 DUK_HSTRING_DECREF_NORZ(heap->heap_thread, h);
615 DUK_HSTRING_CLEAR_PINNED_LITERAL(h);
616 }
617 #endif
618 #if defined(DUK_USE_REFERENCE_COUNTING)
619 /* Non-zero refcounts should not happen for unreachable strings,
620 * because we refcount finalize all unreachable objects which
621 * should have decreased unreachable string refcounts to zero
622 * (even for cycles). However, pinned strings have a +1 bump.
623 */
624 DUK_ASSERT(DUK_HEAPHDR_GET_REFCOUNT((duk_heaphdr *) h) ==
625 DUK_HSTRING_HAS_PINNED_LITERAL(h) ? 1U : 0U);
626 #endif
627
628 /* Deal with weak references first. */
629 duk_heap_strcache_string_remove(heap, (duk_hstring *) h);
630
631 /* Remove the string from the string table. */
632 duk_heap_strtable_unlink_prev(heap, (duk_hstring *) h, (duk_hstring *) prev);
633
634 /* Free inner references (these exist e.g. when external
635 * strings are enabled) and the struct itself.
636 */
637 duk_free_hstring(heap, (duk_hstring *) h);
638
639 /* Don't update 'prev'; it should be last string kept. */
640 }
641
642 h = next;
643 }
644 }
645
646 done:
647 #if defined(DUK_USE_DEBUG)
648 DUK_D(DUK_DPRINT("mark-and-sweep sweep stringtable: %ld freed, %ld kept",
649 (long) count_free, (long) count_keep));
650 #endif
651 *out_count_keep = count_keep;
652 }
653
654 /*
655 * Sweep heap.
656 */
657
658 DUK_LOCAL void duk__sweep_heap(duk_heap *heap, duk_small_uint_t flags, duk_size_t *out_count_keep) {
659 duk_heaphdr *prev; /* last element that was left in the heap */
660 duk_heaphdr *curr;
661 duk_heaphdr *next;
662 #if defined(DUK_USE_DEBUG)
663 duk_size_t count_free = 0;
664 duk_size_t count_finalize = 0;
665 duk_size_t count_rescue = 0;
666 #endif
667 duk_size_t count_keep = 0;
668
669 DUK_DD(DUK_DDPRINT("duk__sweep_heap: %p", (void *) heap));
670
671 prev = NULL;
672 curr = heap->heap_allocated;
673 heap->heap_allocated = NULL;
674 while (curr) {
675 /* Strings and ROM objects are never placed on the heap allocated list. */
676 DUK_ASSERT(DUK_HEAPHDR_GET_TYPE(curr) != DUK_HTYPE_STRING);
677 DUK_ASSERT(!DUK_HEAPHDR_HAS_READONLY(curr));
678
679 next = DUK_HEAPHDR_GET_NEXT(heap, curr);
680
681 if (DUK_HEAPHDR_HAS_REACHABLE(curr)) {
682 /*
683 * Reachable object:
684 * - If FINALIZABLE -> actually unreachable (but marked
685 * artificially reachable), queue to finalize_list.
686 * - If !FINALIZABLE but FINALIZED -> rescued after
687 * finalizer execution.
688 * - Otherwise just a normal, reachable object.
689 *
690 * Objects which are kept are queued to heap_allocated
691 * tail (we're essentially filtering heap_allocated in
692 * practice).
693 */
694
695 #if defined(DUK_USE_FINALIZER_SUPPORT)
696 if (DUK_UNLIKELY(DUK_HEAPHDR_HAS_FINALIZABLE(curr))) {
697 DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZED(curr));
698 DUK_ASSERT(DUK_HEAPHDR_GET_TYPE(curr) == DUK_HTYPE_OBJECT);
699 DUK_DD(DUK_DDPRINT("sweep; reachable, finalizable --> move to finalize_list: %p", (void *) curr));
700
701 #if defined(DUK_USE_REFERENCE_COUNTING)
702 DUK_HEAPHDR_PREINC_REFCOUNT(curr); /* Bump refcount so that refzero never occurs when pending a finalizer call. */
703 #endif
704 DUK_HEAP_INSERT_INTO_FINALIZE_LIST(heap, curr);
705 #if defined(DUK_USE_DEBUG)
706 count_finalize++;
707 #endif
708 }
709 else
710 #endif /* DUK_USE_FINALIZER_SUPPORT */
711 {
712 if (DUK_UNLIKELY(DUK_HEAPHDR_HAS_FINALIZED(curr))) {
713 DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZABLE(curr));
714 DUK_ASSERT(DUK_HEAPHDR_GET_TYPE(curr) == DUK_HTYPE_OBJECT);
715
716 if (flags & DUK_MS_FLAG_POSTPONE_RESCUE) {
717 DUK_DD(DUK_DDPRINT("sweep; reachable, finalized, but postponing rescue decisions --> keep object (with FINALIZED set): %!iO", curr));
718 count_keep++;
719 } else {
720 DUK_DD(DUK_DDPRINT("sweep; reachable, finalized --> rescued after finalization: %p", (void *) curr));
721 #if defined(DUK_USE_FINALIZER_SUPPORT)
722 DUK_HEAPHDR_CLEAR_FINALIZED(curr);
723 #endif
724 #if defined(DUK_USE_DEBUG)
725 count_rescue++;
726 #endif
727 }
728 } else {
729 DUK_DD(DUK_DDPRINT("sweep; reachable --> keep: %!iO", curr));
730 count_keep++;
731 }
732
733 if (prev != NULL) {
734 DUK_ASSERT(heap->heap_allocated != NULL);
735 DUK_HEAPHDR_SET_NEXT(heap, prev, curr);
736 } else {
737 DUK_ASSERT(heap->heap_allocated == NULL);
738 heap->heap_allocated = curr;
739 }
740 #if defined(DUK_USE_DOUBLE_LINKED_HEAP)
741 DUK_HEAPHDR_SET_PREV(heap, curr, prev);
742 #endif
743 DUK_HEAPHDR_ASSERT_LINKS(heap, prev);
744 DUK_HEAPHDR_ASSERT_LINKS(heap, curr);
745 prev = curr;
746 }
747
748 /*
749 * Shrink check for value stacks here. We're inside
750 * ms_prevent_count protection which prevents recursive
751 * mark-and-sweep and refzero finalizers, so there are
752 * no side effects that would affect the heap lists.
753 */
754 if (DUK_HEAPHDR_IS_OBJECT(curr) && DUK_HOBJECT_IS_THREAD((duk_hobject *) curr)) {
755 duk_hthread *thr_curr = (duk_hthread *) curr;
756 DUK_DD(DUK_DDPRINT("value stack shrink check for thread: %!O", curr));
757 duk_valstack_shrink_check_nothrow(thr_curr, flags & DUK_MS_FLAG_EMERGENCY /*snug*/);
758 }
759
760 DUK_HEAPHDR_CLEAR_REACHABLE(curr);
761 /* Keep FINALIZED if set, used if rescue decisions are postponed. */
762 /* Keep FINALIZABLE for objects on finalize_list. */
763 DUK_ASSERT(!DUK_HEAPHDR_HAS_REACHABLE(curr));
764 } else {
765 /*
766 * Unreachable object:
767 * - If FINALIZED, object was finalized but not
768 * rescued. This doesn't affect freeing.
769 * - Otherwise normal unreachable object.
770 *
771 * There's no guard preventing a FINALIZED object
772 * from being freed while finalizers execute: the
773 * artificial finalize_list reachability roots can't
774 * cause an incorrect free decision (but can cause
775 * an incorrect rescue decision).
776 */
777
778 #if defined(DUK_USE_REFERENCE_COUNTING)
779 /* Non-zero refcounts should not happen because we refcount
780 * finalize all unreachable objects which should cancel out
781 * refcounts (even for cycles).
782 */
783 DUK_ASSERT(DUK_HEAPHDR_GET_REFCOUNT(curr) == 0);
784 #endif
785 DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZABLE(curr));
786
787 #if defined(DUK_USE_DEBUG)
788 if (DUK_HEAPHDR_HAS_FINALIZED(curr)) {
789 DUK_DD(DUK_DDPRINT("sweep; unreachable, finalized --> finalized object not rescued: %p", (void *) curr));
790 } else {
791 DUK_DD(DUK_DDPRINT("sweep; not reachable --> free: %p", (void *) curr));
792 }
793
794 #endif
795
796 /* Note: object cannot be a finalizable unreachable object, as
797 * they have been marked temporarily reachable for this round,
798 * and are handled above.
799 */
800
801 #if defined(DUK_USE_DEBUG)
802 count_free++;
803 #endif
804
805 /* Weak refs should be handled here, but no weak refs for
806 * any non-string objects exist right now.
807 */
808
809 /* Free object and all auxiliary (non-heap) allocs. */
810 duk_heap_free_heaphdr_raw(heap, curr);
811 }
812
813 curr = next;
814 }
815
816 if (prev != NULL) {
817 DUK_HEAPHDR_SET_NEXT(heap, prev, NULL);
818 }
819 DUK_HEAPHDR_ASSERT_LINKS(heap, prev);
820
821 #if defined(DUK_USE_DEBUG)
822 DUK_D(DUK_DPRINT("mark-and-sweep sweep objects (non-string): %ld freed, %ld kept, %ld rescued, %ld queued for finalization",
823 (long) count_free, (long) count_keep, (long) count_rescue, (long) count_finalize));
824 #endif
825 *out_count_keep = count_keep;
826 }
827
828 /*
829 * Litcache helpers.
830 */
831
832 #if defined(DUK_USE_LITCACHE_SIZE)
833 DUK_LOCAL void duk__wipe_litcache(duk_heap *heap) {
834 duk_uint_t i;
835 duk_litcache_entry *e;
836
837 e = heap->litcache;
838 for (i = 0; i < DUK_USE_LITCACHE_SIZE; i++) {
839 e->addr = NULL;
840 /* e->h does not need to be invalidated: when e->addr is
841 * NULL, e->h is considered garbage.
842 */
843 e++;
844 }
845 }
846 #endif /* DUK_USE_LITCACHE_SIZE */
847
848 /*
849 * Object compaction.
850 *
851 * Compaction is assumed to never throw an error.
852 */
853
854 DUK_LOCAL int duk__protected_compact_object(duk_hthread *thr, void *udata) {
855 duk_hobject *obj;
856 /* XXX: for threads, compact stacks? */
857
858 DUK_UNREF(udata);
859 obj = duk_known_hobject(thr, -1);
860 duk_hobject_compact_props(thr, obj);
861 return 0;
862 }
863
864 #if defined(DUK_USE_DEBUG)
865 DUK_LOCAL void duk__compact_object_list(duk_heap *heap, duk_hthread *thr, duk_heaphdr *start, duk_size_t *p_count_check, duk_size_t *p_count_compact, duk_size_t *p_count_bytes_saved) {
866 #else
867 DUK_LOCAL void duk__compact_object_list(duk_heap *heap, duk_hthread *thr, duk_heaphdr *start) {
868 #endif
869 duk_heaphdr *curr;
870 #if defined(DUK_USE_DEBUG)
871 duk_size_t old_size, new_size;
872 #endif
873 duk_hobject *obj;
874
875 DUK_UNREF(heap);
876
877 curr = start;
878 while (curr) {
879 DUK_DDD(DUK_DDDPRINT("mark-and-sweep compact: %p", (void *) curr));
880
881 if (DUK_HEAPHDR_GET_TYPE(curr) != DUK_HTYPE_OBJECT) {
882 goto next;
883 }
884 obj = (duk_hobject *) curr;
885
886 #if defined(DUK_USE_DEBUG)
887 old_size = DUK_HOBJECT_P_COMPUTE_SIZE(DUK_HOBJECT_GET_ESIZE(obj),
888 DUK_HOBJECT_GET_ASIZE(obj),
889 DUK_HOBJECT_GET_HSIZE(obj));
890 #endif
891
892 DUK_DD(DUK_DDPRINT("compact object: %p", (void *) obj));
893 duk_push_hobject(thr, obj);
894 /* XXX: disable error handlers for duration of compaction? */
895 duk_safe_call(thr, duk__protected_compact_object, NULL, 1, 0);
896
897 #if defined(DUK_USE_DEBUG)
898 new_size = DUK_HOBJECT_P_COMPUTE_SIZE(DUK_HOBJECT_GET_ESIZE(obj),
899 DUK_HOBJECT_GET_ASIZE(obj),
900 DUK_HOBJECT_GET_HSIZE(obj));
901 #endif
902
903 #if defined(DUK_USE_DEBUG)
904 (*p_count_compact)++;
905 (*p_count_bytes_saved) += (duk_size_t) (old_size - new_size);
906 #endif
907
908 next:
909 curr = DUK_HEAPHDR_GET_NEXT(heap, curr);
910 #if defined(DUK_USE_DEBUG)
911 (*p_count_check)++;
912 #endif
913 }
914 }
915
916 DUK_LOCAL void duk__compact_objects(duk_heap *heap) {
917 /* XXX: which lists should participate? to be finalized? */
918 #if defined(DUK_USE_DEBUG)
919 duk_size_t count_check = 0;
920 duk_size_t count_compact = 0;
921 duk_size_t count_bytes_saved = 0;
922 #endif
923
924 DUK_DD(DUK_DDPRINT("duk__compact_objects: %p", (void *) heap));
925
926 DUK_ASSERT(heap->heap_thread != NULL);
927
928 #if defined(DUK_USE_DEBUG)
929 duk__compact_object_list(heap, heap->heap_thread, heap->heap_allocated, &count_check, &count_compact, &count_bytes_saved);
930 #if defined(DUK_USE_FINALIZER_SUPPORT)
931 duk__compact_object_list(heap, heap->heap_thread, heap->finalize_list, &count_check, &count_compact, &count_bytes_saved);
932 #endif
933 #else
934 duk__compact_object_list(heap, heap->heap_thread, heap->heap_allocated);
935 #if defined(DUK_USE_FINALIZER_SUPPORT)
936 duk__compact_object_list(heap, heap->heap_thread, heap->finalize_list);
937 #endif
938 #endif
939 #if defined(DUK_USE_REFERENCE_COUNTING)
940 DUK_ASSERT(heap->refzero_list == NULL); /* Always handled to completion inline in DECREF. */
941 #endif
942
943 #if defined(DUK_USE_DEBUG)
944 DUK_D(DUK_DPRINT("mark-and-sweep compact objects: %ld checked, %ld compaction attempts, %ld bytes saved by compaction",
945 (long) count_check, (long) count_compact, (long) count_bytes_saved));
946 #endif
947 }
948
949 /*
950 * Assertion helpers.
951 */
952
953 #if defined(DUK_USE_ASSERTIONS)
954 typedef void (*duk__gc_heaphdr_assert)(duk_heap *heap, duk_heaphdr *h);
955 typedef void (*duk__gc_hstring_assert)(duk_heap *heap, duk_hstring *h);
956
957 DUK_LOCAL void duk__assert_walk_list(duk_heap *heap, duk_heaphdr *start, duk__gc_heaphdr_assert func) {
958 duk_heaphdr *curr;
959 for (curr = start; curr != NULL; curr = DUK_HEAPHDR_GET_NEXT(heap, curr)) {
960 func(heap, curr);
961 }
962 }
963
964 DUK_LOCAL void duk__assert_walk_strtable(duk_heap *heap, duk__gc_hstring_assert func) {
965 duk_uint32_t i;
966
967 for (i = 0; i < heap->st_size; i++) {
968 duk_hstring *h;
969
970 #if defined(DUK_USE_STRTAB_PTRCOMP)
971 h = DUK_USE_HEAPPTR_DEC16(heap->heap_udata, heap->strtable16[i]);
972 #else
973 h = heap->strtable[i];
974 #endif
975 while (h != NULL) {
976 func(heap, h);
977 h = h->hdr.h_next;
978 }
979 }
980 }
981
982 DUK_LOCAL void duk__assert_heaphdr_flags_cb(duk_heap *heap, duk_heaphdr *h) {
983 DUK_UNREF(heap);
984 DUK_ASSERT(!DUK_HEAPHDR_HAS_REACHABLE(h));
985 DUK_ASSERT(!DUK_HEAPHDR_HAS_TEMPROOT(h));
986 DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZABLE(h));
987 /* may have FINALIZED */
988 }
989 DUK_LOCAL void duk__assert_heaphdr_flags(duk_heap *heap) {
990 duk__assert_walk_list(heap, heap->heap_allocated, duk__assert_heaphdr_flags_cb);
991 #if defined(DUK_USE_REFERENCE_COUNTING)
992 DUK_ASSERT(heap->refzero_list == NULL); /* Always handled to completion inline in DECREF. */
993 #endif
994 /* XXX: Assertions for finalize_list? */
995 }
996
997 DUK_LOCAL void duk__assert_validity_cb1(duk_heap *heap, duk_heaphdr *h) {
998 DUK_UNREF(heap);
999 DUK_ASSERT(DUK_HEAPHDR_IS_OBJECT(h) || DUK_HEAPHDR_IS_BUFFER(h));
1000 duk_heaphdr_assert_valid_subclassed(h);
1001 }
1002 DUK_LOCAL void duk__assert_validity_cb2(duk_heap *heap, duk_hstring *h) {
1003 DUK_UNREF(heap);
1004 DUK_ASSERT(DUK_HEAPHDR_IS_STRING((duk_heaphdr *) h));
1005 duk_heaphdr_assert_valid_subclassed((duk_heaphdr *) h);
1006 }
1007 DUK_LOCAL void duk__assert_validity(duk_heap *heap) {
1008 duk__assert_walk_list(heap, heap->heap_allocated, duk__assert_validity_cb1);
1009 #if defined(DUK_USE_FINALIZER_SUPPORT)
1010 duk__assert_walk_list(heap, heap->finalize_list, duk__assert_validity_cb1);
1011 #endif
1012 #if defined(DUK_USE_REFERENCE_COUNTING)
1013 duk__assert_walk_list(heap, heap->refzero_list, duk__assert_validity_cb1);
1014 #endif
1015 duk__assert_walk_strtable(heap, duk__assert_validity_cb2);
1016 }
1017
1018 #if defined(DUK_USE_REFERENCE_COUNTING)
1019 DUK_LOCAL void duk__assert_valid_refcounts_cb(duk_heap *heap, duk_heaphdr *h) {
1020 /* Cannot really assert much w.r.t. refcounts now. */
1021
1022 DUK_UNREF(heap);
1023 if (DUK_HEAPHDR_GET_REFCOUNT(h) == 0 &&
1024 DUK_HEAPHDR_HAS_FINALIZED(h)) {
1025 /* An object may be in heap_allocated list with a zero
1026 * refcount if it has just been finalized and is waiting
1027 * to be collected by the next cycle.
1028 * (This doesn't currently happen however.)
1029 */
1030 } else if (DUK_HEAPHDR_GET_REFCOUNT(h) == 0) {
1031 /* An object may be in heap_allocated list with a zero
1032 * refcount also if it is a temporary object created
1033 * during debugger paused state. It will get collected
1034 * by mark-and-sweep based on its reachability status
1035 * (presumably not reachable because refcount is 0).
1036 */
1037 }
1038 DUK_ASSERT_DISABLE(DUK_HEAPHDR_GET_REFCOUNT(h) >= 0); /* Unsigned. */
1039 }
1040 DUK_LOCAL void duk__assert_valid_refcounts(duk_heap *heap) {
1041 duk__assert_walk_list(heap, heap->heap_allocated, duk__assert_valid_refcounts_cb);
1042 }
1043
1044 DUK_LOCAL void duk__clear_assert_refcounts_cb1(duk_heap *heap, duk_heaphdr *h) {
1045 DUK_UNREF(heap);
1046 h->h_assert_refcount = 0;
1047 }
1048 DUK_LOCAL void duk__clear_assert_refcounts_cb2(duk_heap *heap, duk_hstring *h) {
1049 DUK_UNREF(heap);
1050 ((duk_heaphdr *) h)->h_assert_refcount = 0;
1051 }
1052 DUK_LOCAL void duk__clear_assert_refcounts(duk_heap *heap) {
1053 duk__assert_walk_list(heap, heap->heap_allocated, duk__clear_assert_refcounts_cb1);
1054 #if defined(DUK_USE_FINALIZER_SUPPORT)
1055 duk__assert_walk_list(heap, heap->finalize_list, duk__clear_assert_refcounts_cb1);
1056 #endif
1057 #if defined(DUK_USE_REFERENCE_COUNTING)
1058 duk__assert_walk_list(heap, heap->refzero_list, duk__clear_assert_refcounts_cb1);
1059 #endif
1060 duk__assert_walk_strtable(heap, duk__clear_assert_refcounts_cb2);
1061 }
1062
1063 DUK_LOCAL void duk__check_refcount_heaphdr(duk_heaphdr *hdr) {
1064 duk_bool_t count_ok;
1065 duk_size_t expect_refc;
1066
1067 /* The refcount check only makes sense for reachable objects on
1068 * heap_allocated or string table, after the sweep phase. Prior to
1069 * sweep phase refcounts will include references that are not visible
1070 * via reachability roots.
1071 *
1072 * Because we're called after the sweep phase, all heap objects on
1073 * heap_allocated are reachable. REACHABLE flags have already been
1074 * cleared so we can't check them.
1075 */
1076
1077 /* ROM objects have intentionally incorrect refcount (1), but we won't
1078 * check them.
1079 */
1080 DUK_ASSERT(!DUK_HEAPHDR_HAS_READONLY(hdr));
1081
1082 expect_refc = hdr->h_assert_refcount;
1083 if (DUK_HEAPHDR_IS_STRING(hdr) && DUK_HSTRING_HAS_PINNED_LITERAL((duk_hstring *) hdr)) {
1084 expect_refc++;
1085 }
1086 count_ok = ((duk_size_t) DUK_HEAPHDR_GET_REFCOUNT(hdr) == expect_refc);
1087 if (!count_ok) {
1088 DUK_D(DUK_DPRINT("refcount mismatch for: %p: header=%ld counted=%ld --> %!iO",
1089 (void *) hdr, (long) DUK_HEAPHDR_GET_REFCOUNT(hdr),
1090 (long) hdr->h_assert_refcount, hdr));
1091 DUK_ASSERT(0);
1092 }
1093 }
1094
1095 DUK_LOCAL void duk__check_assert_refcounts_cb1(duk_heap *heap, duk_heaphdr *h) {
1096 DUK_UNREF(heap);
1097 duk__check_refcount_heaphdr(h);
1098 }
1099 DUK_LOCAL void duk__check_assert_refcounts_cb2(duk_heap *heap, duk_hstring *h) {
1100 DUK_UNREF(heap);
1101 duk__check_refcount_heaphdr((duk_heaphdr *) h);
1102 }
1103 DUK_LOCAL void duk__check_assert_refcounts(duk_heap *heap) {
1104 duk__assert_walk_list(heap, heap->heap_allocated, duk__check_assert_refcounts_cb1);
1105 #if defined(DUK_USE_FINALIZER_SUPPORT)
1106 duk__assert_walk_list(heap, heap->finalize_list, duk__check_assert_refcounts_cb1);
1107 #endif
1108 /* XXX: Assert anything for refzero_list? */
1109 duk__assert_walk_strtable(heap, duk__check_assert_refcounts_cb2);
1110 }
1111 #endif /* DUK_USE_REFERENCE_COUNTING */
1112
1113 #if defined(DUK_USE_LITCACHE_SIZE)
1114 DUK_LOCAL void duk__assert_litcache_nulls(duk_heap *heap) {
1115 duk_uint_t i;
1116 duk_litcache_entry *e;
1117
1118 e = heap->litcache;
1119 for (i = 0; i < DUK_USE_LITCACHE_SIZE; i++) {
1120 /* Entry addresses were NULLed before mark-and-sweep, check
1121 * that they're still NULL afterwards to ensure no pointers
1122 * were recorded through any side effects.
1123 */
1124 DUK_ASSERT(e->addr == NULL);
1125 }
1126 }
1127 #endif /* DUK_USE_LITCACHE_SIZE */
1128 #endif /* DUK_USE_ASSERTIONS */
1129
1130 /*
1131 * Stats dump.
1132 */
1133
1134 #if defined(DUK_USE_DEBUG)
1135 DUK_LOCAL void duk__dump_stats(duk_heap *heap) {
1136 DUK_D(DUK_DPRINT("stats executor: opcodes=%ld, interrupt=%ld, throw=%ld",
1137 (long) heap->stats_exec_opcodes, (long) heap->stats_exec_interrupt,
1138 (long) heap->stats_exec_throw));
1139 DUK_D(DUK_DPRINT("stats call: all=%ld, tailcall=%ld, ecmatoecma=%ld",
1140 (long) heap->stats_call_all, (long) heap->stats_call_tailcall,
1141 (long) heap->stats_call_ecmatoecma));
1142 DUK_D(DUK_DPRINT("stats safecall: all=%ld, nothrow=%ld, throw=%ld",
1143 (long) heap->stats_safecall_all, (long) heap->stats_safecall_nothrow,
1144 (long) heap->stats_safecall_throw));
1145 DUK_D(DUK_DPRINT("stats mark-and-sweep: try_count=%ld, skip_count=%ld, emergency_count=%ld",
1146 (long) heap->stats_ms_try_count, (long) heap->stats_ms_skip_count,
1147 (long) heap->stats_ms_emergency_count));
1148 DUK_D(DUK_DPRINT("stats stringtable: intern_hit=%ld, intern_miss=%ld, "
1149 "resize_check=%ld, resize_grow=%ld, resize_shrink=%ld, "
1150 "litcache_hit=%ld, litcache_miss=%ld, litcache_pin=%ld",
1151 (long) heap->stats_strtab_intern_hit, (long) heap->stats_strtab_intern_miss,
1152 (long) heap->stats_strtab_resize_check, (long) heap->stats_strtab_resize_grow,
1153 (long) heap->stats_strtab_resize_shrink, (long) heap->stats_strtab_litcache_hit,
1154 (long) heap->stats_strtab_litcache_miss, (long) heap->stats_strtab_litcache_pin));
1155 DUK_D(DUK_DPRINT("stats object: realloc_props=%ld, abandon_array=%ld",
1156 (long) heap->stats_object_realloc_props, (long) heap->stats_object_abandon_array));
1157 DUK_D(DUK_DPRINT("stats getownpropdesc: count=%ld, hit=%ld, miss=%ld",
1158 (long) heap->stats_getownpropdesc_count, (long) heap->stats_getownpropdesc_hit,
1159 (long) heap->stats_getownpropdesc_miss));
1160 DUK_D(DUK_DPRINT("stats getpropdesc: count=%ld, hit=%ld, miss=%ld",
1161 (long) heap->stats_getpropdesc_count, (long) heap->stats_getpropdesc_hit,
1162 (long) heap->stats_getpropdesc_miss));
1163 DUK_D(DUK_DPRINT("stats getprop: all=%ld, arrayidx=%ld, bufobjidx=%ld, "
1164 "bufferidx=%ld, bufferlen=%ld, stringidx=%ld, stringlen=%ld, "
1165 "proxy=%ld, arguments=%ld",
1166 (long) heap->stats_getprop_all, (long) heap->stats_getprop_arrayidx,
1167 (long) heap->stats_getprop_bufobjidx, (long) heap->stats_getprop_bufferidx,
1168 (long) heap->stats_getprop_bufferlen, (long) heap->stats_getprop_stringidx,
1169 (long) heap->stats_getprop_stringlen, (long) heap->stats_getprop_proxy,
1170 (long) heap->stats_getprop_arguments));
1171 DUK_D(DUK_DPRINT("stats putprop: all=%ld, arrayidx=%ld, bufobjidx=%ld, "
1172 "bufferidx=%ld, proxy=%ld",
1173 (long) heap->stats_putprop_all, (long) heap->stats_putprop_arrayidx,
1174 (long) heap->stats_putprop_bufobjidx, (long) heap->stats_putprop_bufferidx,
1175 (long) heap->stats_putprop_proxy));
1176 DUK_D(DUK_DPRINT("stats getvar: all=%ld",
1177 (long) heap->stats_getvar_all));
1178 DUK_D(DUK_DPRINT("stats putvar: all=%ld",
1179 (long) heap->stats_putvar_all));
1180 DUK_D(DUK_DPRINT("stats envrec: delayedcreate=%ld, create=%ld, newenv=%ld, oldenv=%ld, pushclosure=%ld",
1181 (long) heap->stats_envrec_delayedcreate,
1182 (long) heap->stats_envrec_create,
1183 (long) heap->stats_envrec_newenv,
1184 (long) heap->stats_envrec_oldenv,
1185 (long) heap->stats_envrec_pushclosure));
1186 }
1187 #endif /* DUK_USE_DEBUG */
1188
1189 /*
1190 * Main mark-and-sweep function.
1191 *
1192 * 'flags' represents the features requested by the caller. The current
1193 * heap->ms_base_flags is ORed automatically into the flags; the base flags
1194 * mask typically prevents certain mark-and-sweep operation to avoid trouble.
1195 */
1196
1197 DUK_INTERNAL void duk_heap_mark_and_sweep(duk_heap *heap, duk_small_uint_t flags) {
1198 duk_size_t count_keep_obj;
1199 duk_size_t count_keep_str;
1200 #if defined(DUK_USE_VOLUNTARY_GC)
1201 duk_size_t tmp;
1202 #endif
1203 duk_bool_t entry_creating_error;
1204
1205 DUK_STATS_INC(heap, stats_ms_try_count);
1206 #if defined(DUK_USE_DEBUG)
1207 if (flags & DUK_MS_FLAG_EMERGENCY) {
1208 DUK_STATS_INC(heap, stats_ms_emergency_count);
1209 }
1210 #endif
1211
1212 /* If debugger is paused, garbage collection is disabled by default.
1213 * This is achieved by bumping ms_prevent_count when becoming paused.
1214 */
1215 DUK_ASSERT(!DUK_HEAP_HAS_DEBUGGER_PAUSED(heap) || heap->ms_prevent_count > 0);
1216
1217 /* Prevention/recursion check as soon as possible because we may
1218 * be called a number of times when voluntary mark-and-sweep is
1219 * pending.
1220 */
1221 if (heap->ms_prevent_count != 0) {
1222 DUK_DD(DUK_DDPRINT("reject recursive mark-and-sweep"));
1223 DUK_STATS_INC(heap, stats_ms_skip_count);
1224 return;
1225 }
1226 DUK_ASSERT(heap->ms_running == 0); /* ms_prevent_count is bumped when ms_running is set */
1227
1228 /* Heap_thread is used during mark-and-sweep for refcount finalization
1229 * (it's also used for finalizer execution once mark-and-sweep is
1230 * complete). Heap allocation code ensures heap_thread is set and
1231 * properly initialized before setting ms_prevent_count to 0.
1232 */
1233 DUK_ASSERT(heap->heap_thread != NULL);
1234 DUK_ASSERT(heap->heap_thread->valstack != NULL);
1235
1236 DUK_D(DUK_DPRINT("garbage collect (mark-and-sweep) starting, requested flags: 0x%08lx, effective flags: 0x%08lx",
1237 (unsigned long) flags, (unsigned long) (flags | heap->ms_base_flags)));
1238
1239 flags |= heap->ms_base_flags;
1240 #if defined(DUK_USE_FINALIZER_SUPPORT)
1241 if (heap->finalize_list != NULL) {
1242 flags |= DUK_MS_FLAG_POSTPONE_RESCUE;
1243 }
1244 #endif
1245
1246 /*
1247 * Assertions before
1248 */
1249
1250 #if defined(DUK_USE_ASSERTIONS)
1251 DUK_ASSERT(heap->ms_prevent_count == 0);
1252 DUK_ASSERT(heap->ms_running == 0);
1253 DUK_ASSERT(!DUK_HEAP_HAS_DEBUGGER_PAUSED(heap));
1254 DUK_ASSERT(!DUK_HEAP_HAS_MARKANDSWEEP_RECLIMIT_REACHED(heap));
1255 DUK_ASSERT(heap->ms_recursion_depth == 0);
1256 duk__assert_heaphdr_flags(heap);
1257 duk__assert_validity(heap);
1258 #if defined(DUK_USE_REFERENCE_COUNTING)
1259 /* Note: heap->refzero_free_running may be true; a refcount
1260 * finalizer may trigger a mark-and-sweep.
1261 */
1262 duk__assert_valid_refcounts(heap);
1263 #endif /* DUK_USE_REFERENCE_COUNTING */
1264 #endif /* DUK_USE_ASSERTIONS */
1265
1266 /*
1267 * Begin
1268 */
1269
1270 DUK_ASSERT(heap->ms_prevent_count == 0);
1271 DUK_ASSERT(heap->ms_running == 0);
1272 heap->ms_prevent_count = 1;
1273 heap->ms_running = 1;
1274 entry_creating_error = heap->creating_error;
1275 heap->creating_error = 0;
1276
1277 /*
1278 * Free activation/catcher freelists on every mark-and-sweep for now.
1279 * This is an initial rough draft; ideally we'd keep count of the
1280 * freelist size and free only excess entries.
1281 */
1282
1283 DUK_D(DUK_DPRINT("freeing temporary freelists"));
1284 duk_heap_free_freelists(heap);
1285
1286 /*
1287 * Mark roots, hoping that recursion limit is not normally hit.
1288 * If recursion limit is hit, run additional reachability rounds
1289 * starting from "temproots" until marking is complete.
1290 *
1291 * Marking happens in two phases: first we mark actual reachability
1292 * roots (and run "temproots" to complete the process). Then we
1293 * check which objects are unreachable and are finalizable; such
1294 * objects are marked as FINALIZABLE and marked as reachability
1295 * (and "temproots" is run again to complete the process).
1296 *
1297 * The heap finalize_list must also be marked as a reachability root.
1298 * There may be objects on the list from a previous round if the
1299 * previous run had finalizer skip flag.
1300 */
1301
1302 #if defined(DUK_USE_ASSERTIONS) && defined(DUK_USE_REFERENCE_COUNTING)
1303 duk__clear_assert_refcounts(heap);
1304 #endif
1305 #if defined(DUK_USE_LITCACHE_SIZE)
1306 duk__wipe_litcache(heap);
1307 #endif
1308 duk__mark_roots_heap(heap); /* Mark main reachability roots. */
1309 #if defined(DUK_USE_REFERENCE_COUNTING)
1310 DUK_ASSERT(heap->refzero_list == NULL); /* Always handled to completion inline in DECREF. */
1311 #endif
1312 duk__mark_temproots_by_heap_scan(heap); /* Temproots. */
1313
1314 #if defined(DUK_USE_FINALIZER_SUPPORT)
1315 duk__mark_finalizable(heap); /* Mark finalizable as reachability roots. */
1316 duk__mark_finalize_list(heap); /* Mark finalizer work list as reachability roots. */
1317 #endif
1318 duk__mark_temproots_by_heap_scan(heap); /* Temproots. */
1319
1320 /*
1321 * Sweep garbage and remove marking flags, and move objects with
1322 * finalizers to the finalizer work list.
1323 *
1324 * Objects to be swept need to get their refcounts finalized before
1325 * they are swept. In other words, their target object refcounts
1326 * need to be decreased. This has to be done before freeing any
1327 * objects to avoid decref'ing dangling pointers (which may happen
1328 * even without bugs, e.g. with reference loops)
1329 *
1330 * Because strings don't point to other heap objects, similar
1331 * finalization is not necessary for strings.
1332 */
1333
1334 /* XXX: more emergency behavior, e.g. find smaller hash sizes etc */
1335
1336 #if defined(DUK_USE_REFERENCE_COUNTING)
1337 duk__finalize_refcounts(heap);
1338 #endif
1339 duk__sweep_heap(heap, flags, &count_keep_obj);
1340 duk__sweep_stringtable(heap, &count_keep_str);
1341 #if defined(DUK_USE_ASSERTIONS) && defined(DUK_USE_REFERENCE_COUNTING)
1342 duk__check_assert_refcounts(heap);
1343 #endif
1344 #if defined(DUK_USE_REFERENCE_COUNTING)
1345 DUK_ASSERT(heap->refzero_list == NULL); /* Always handled to completion inline in DECREF. */
1346 #endif
1347 #if defined(DUK_USE_FINALIZER_SUPPORT)
1348 duk__clear_finalize_list_flags(heap);
1349 #endif
1350
1351 /*
1352 * Object compaction (emergency only).
1353 *
1354 * Object compaction is a separate step after sweeping, as there is
1355 * more free memory for it to work with. Also, currently compaction
1356 * may insert new objects into the heap allocated list and the string
1357 * table which we don't want to do during a sweep (the reachability
1358 * flags of such objects would be incorrect). The objects inserted
1359 * are currently:
1360 *
1361 * - a temporary duk_hbuffer for a new properties allocation
1362 * - if array part is abandoned, string keys are interned
1363 *
1364 * The object insertions go to the front of the list, so they do not
1365 * cause an infinite loop (they are not compacted).
1366 *
1367 * At present compaction is not allowed when mark-and-sweep runs
1368 * during error handling because it involves a duk_safe_call()
1369 * interfering with error state.
1370 */
1371
1372 if ((flags & DUK_MS_FLAG_EMERGENCY) &&
1373 !(flags & DUK_MS_FLAG_NO_OBJECT_COMPACTION)) {
1374 if (heap->lj.type != DUK_LJ_TYPE_UNKNOWN) {
1375 DUK_D(DUK_DPRINT("lj.type (%ld) not DUK_LJ_TYPE_UNKNOWN, skip object compaction", (long) heap->lj.type));
1376 } else {
1377 DUK_D(DUK_DPRINT("object compaction"));
1378 duk__compact_objects(heap);
1379 }
1380 }
1381
1382 /*
1383 * String table resize check.
1384 *
1385 * This is mainly useful in emergency GC: if the string table load
1386 * factor is really low for some reason, we can shrink the string
1387 * table to a smaller size and free some memory in the process.
1388 * Only execute in emergency GC. String table has internal flags
1389 * to protect against recursive resizing if this mark-and-sweep pass
1390 * was triggered by a string table resize.
1391 */
1392
1393 if (flags & DUK_MS_FLAG_EMERGENCY) {
1394 DUK_D(DUK_DPRINT("stringtable resize check in emergency gc"));
1395 duk_heap_strtable_force_resize(heap);
1396 }
1397
1398 /*
1399 * Finish
1400 */
1401
1402 DUK_ASSERT(heap->ms_prevent_count == 1);
1403 DUK_ASSERT(heap->ms_running == 1);
1404 heap->ms_prevent_count = 0;
1405 heap->ms_running = 0;
1406 heap->creating_error = entry_creating_error; /* for nested error handling, see GH-2278 */
1407
1408 /*
1409 * Assertions after
1410 */
1411
1412 #if defined(DUK_USE_ASSERTIONS)
1413 DUK_ASSERT(heap->ms_prevent_count == 0);
1414 DUK_ASSERT(!DUK_HEAP_HAS_MARKANDSWEEP_RECLIMIT_REACHED(heap));
1415 DUK_ASSERT(heap->ms_recursion_depth == 0);
1416 duk__assert_heaphdr_flags(heap);
1417 duk__assert_validity(heap);
1418 #if defined(DUK_USE_REFERENCE_COUNTING)
1419 /* Note: heap->refzero_free_running may be true; a refcount
1420 * finalizer may trigger a mark-and-sweep.
1421 */
1422 duk__assert_valid_refcounts(heap);
1423 #endif /* DUK_USE_REFERENCE_COUNTING */
1424 #if defined(DUK_USE_LITCACHE_SIZE)
1425 duk__assert_litcache_nulls(heap);
1426 #endif /* DUK_USE_LITCACHE_SIZE */
1427 #endif /* DUK_USE_ASSERTIONS */
1428
1429 /*
1430 * Reset trigger counter
1431 */
1432
1433 #if defined(DUK_USE_VOLUNTARY_GC)
1434 tmp = (count_keep_obj + count_keep_str) / 256;
1435 heap->ms_trigger_counter = (duk_int_t) (
1436 (tmp * DUK_HEAP_MARK_AND_SWEEP_TRIGGER_MULT) +
1437 DUK_HEAP_MARK_AND_SWEEP_TRIGGER_ADD);
1438 DUK_D(DUK_DPRINT("garbage collect (mark-and-sweep) finished: %ld objects kept, %ld strings kept, trigger reset to %ld",
1439 (long) count_keep_obj, (long) count_keep_str, (long) heap->ms_trigger_counter));
1440 #else
1441 DUK_D(DUK_DPRINT("garbage collect (mark-and-sweep) finished: %ld objects kept, %ld strings kept, no voluntary trigger",
1442 (long) count_keep_obj, (long) count_keep_str));
1443 #endif
1444
1445 /*
1446 * Stats dump
1447 */
1448
1449 #if defined(DUK_USE_DEBUG)
1450 duk__dump_stats(heap);
1451 #endif
1452
1453 /*
1454 * Finalize objects in the finalization work list. Finalized
1455 * objects are queued back to heap_allocated with FINALIZED set.
1456 *
1457 * Since finalizers may cause arbitrary side effects, they are
1458 * prevented e.g. during string table and object property allocation
1459 * resizing using heap->pf_prevent_count. In this case the objects
1460 * remain in the finalization work list after mark-and-sweep exits
1461 * and they may be finalized on the next pass or any DECREF checking
1462 * for finalize_list.
1463 *
1464 * As of Duktape 2.1 finalization happens outside mark-and-sweep
1465 * protection. Mark-and-sweep is allowed while the finalize_list
1466 * is being processed, but no rescue decisions are done while the
1467 * process is on-going. This avoids incorrect rescue decisions
1468 * if an object is considered reachable (and thus rescued) because
1469 * of a reference via finalize_list (which is considered a reachability
1470 * root). When finalize_list is being processed, reachable objects
1471 * with FINALIZED set will just keep their FINALIZED flag for later
1472 * mark-and-sweep processing.
1473 *
1474 * This could also be handled (a bit better) by having a more refined
1475 * notion of reachability for rescue/free decisions.
1476 *
1477 * XXX: avoid finalizer execution when doing emergency GC?
1478 */
1479
1480 #if defined(DUK_USE_FINALIZER_SUPPORT)
1481 /* Attempt to process finalize_list, pf_prevent_count check
1482 * is inside the target.
1483 */
1484 duk_heap_process_finalize_list(heap);
1485 #endif /* DUK_USE_FINALIZER_SUPPORT */
1486 }
1487