1 /*
2  *  Reference counting implementation.
3  *
4  *  INCREF/DECREF, finalization and freeing of objects whose refcount reaches
5  *  zero (refzero).  These operations are very performance sensitive, so
6  *  various small tricks are used in an attempt to maximize speed.
7  */
8 
9 #include "duk_internal.h"
10 
11 #if defined(DUK_USE_REFERENCE_COUNTING)
12 
13 #if !defined(DUK_USE_DOUBLE_LINKED_HEAP)
14 #error internal error, reference counting requires a double linked heap
15 #endif
16 
17 /*
18  *  Heap object refcount finalization.
19  *
20  *  When an object is about to be freed, all other objects it refers to must
21  *  be decref'd.  Refcount finalization does NOT free the object or its inner
22  *  allocations (mark-and-sweep shares these helpers), it just manipulates
23  *  the refcounts.
24  *
25  *  Note that any of the DECREFs may cause a refcount to drop to zero.  If so,
26  *  the object won't be refzero processed inline, but will just be queued to
27  *  refzero_list and processed by an earlier caller working on refzero_list,
28  *  eliminating C recursion from even long refzero cascades.  If refzero
29  *  finalization is triggered by mark-and-sweep, refzero conditions are ignored
30  *  (objects are not even queued to refzero_list) because mark-and-sweep deals
31  *  with them; refcounts are still updated so that they remain in sync with
32  *  actual references.
33  */
34 
duk__decref_tvals_norz(duk_hthread * thr,duk_tval * tv,duk_idx_t count)35 DUK_LOCAL void duk__decref_tvals_norz(duk_hthread *thr, duk_tval *tv, duk_idx_t count) {
36 	DUK_ASSERT(count == 0 || tv != NULL);
37 
38 	while (count-- > 0) {
39 		DUK_TVAL_DECREF_NORZ(thr, tv);
40 		tv++;
41 	}
42 }
43 
duk_hobject_refcount_finalize_norz(duk_heap * heap,duk_hobject * h)44 DUK_INTERNAL void duk_hobject_refcount_finalize_norz(duk_heap *heap, duk_hobject *h) {
45 	duk_hthread *thr;
46 	duk_uint_fast32_t i;
47 	duk_uint_fast32_t n;
48 	duk_propvalue *p_val;
49 	duk_tval *p_tv;
50 	duk_hstring **p_key;
51 	duk_uint8_t *p_flag;
52 	duk_hobject *h_proto;
53 
54 	DUK_ASSERT(heap != NULL);
55 	DUK_ASSERT(heap->heap_thread != NULL);
56 	DUK_ASSERT(h);
57 	DUK_ASSERT(DUK_HEAPHDR_GET_TYPE((duk_heaphdr *) h) == DUK_HTYPE_OBJECT);
58 
59 	thr = heap->heap_thread;
60 	DUK_ASSERT(thr != NULL);
61 
62 	p_key = DUK_HOBJECT_E_GET_KEY_BASE(heap, h);
63 	p_val = DUK_HOBJECT_E_GET_VALUE_BASE(heap, h);
64 	p_flag = DUK_HOBJECT_E_GET_FLAGS_BASE(heap, h);
65 	n = DUK_HOBJECT_GET_ENEXT(h);
66 	while (n-- > 0) {
67 		duk_hstring *key;
68 
69 		key = p_key[n];
70 		if (DUK_UNLIKELY(key == NULL)) {
71 			continue;
72 		}
73 		DUK_HSTRING_DECREF_NORZ(thr, key);
74 		if (DUK_UNLIKELY(p_flag[n] & DUK_PROPDESC_FLAG_ACCESSOR)) {
75 			duk_hobject *h_getset;
76 			h_getset = p_val[n].a.get;
77 			DUK_ASSERT(h_getset == NULL || DUK_HEAPHDR_IS_OBJECT((duk_heaphdr *) h_getset));
78 			DUK_HOBJECT_DECREF_NORZ_ALLOWNULL(thr, h_getset);
79 			h_getset = p_val[n].a.set;
80 			DUK_ASSERT(h_getset == NULL || DUK_HEAPHDR_IS_OBJECT((duk_heaphdr *) h_getset));
81 			DUK_HOBJECT_DECREF_NORZ_ALLOWNULL(thr, h_getset);
82 		} else {
83 			duk_tval *tv_val;
84 			tv_val = &p_val[n].v;
85 			DUK_TVAL_DECREF_NORZ(thr, tv_val);
86 		}
87 	}
88 
89 	p_tv = DUK_HOBJECT_A_GET_BASE(heap, h);
90 	n = DUK_HOBJECT_GET_ASIZE(h);
91 	while (n-- > 0) {
92 		duk_tval *tv_val;
93 		tv_val = p_tv + n;
94 		DUK_TVAL_DECREF_NORZ(thr, tv_val);
95 	}
96 
97 	/* Hash part is a 'weak reference' and doesn't contribute to refcounts. */
98 
99 	h_proto = (duk_hobject *) DUK_HOBJECT_GET_PROTOTYPE(heap, h);
100 	DUK_ASSERT(h_proto == NULL || DUK_HEAPHDR_IS_OBJECT((duk_heaphdr *) h_proto));
101 	DUK_HOBJECT_DECREF_NORZ_ALLOWNULL(thr, h_proto);
102 
103 	/* XXX: Object subclass tests are quite awkward at present, ideally
104 	 * we should be able to switch-case here with a dense index (subtype
105 	 * number or something).  For now, fast path plain objects and arrays
106 	 * and bit test the rest individually.
107 	 */
108 
109 	if (DUK_HOBJECT_HAS_FASTREFS(h)) {
110 		/* Plain object or array, nothing more to do.  While a
111 		 * duk_harray has additional fields, none of them need
112 		 * DECREF updates.
113 		 */
114 		DUK_ASSERT(DUK_HOBJECT_ALLOWS_FASTREFS(h));
115 		return;
116 	}
117 	DUK_ASSERT(DUK_HOBJECT_PROHIBITS_FASTREFS(h));
118 
119 	/* Slow path: special object, start bit checks from most likely. */
120 
121 	/* XXX: reorg, more common first */
122 	if (DUK_HOBJECT_IS_COMPFUNC(h)) {
123 		duk_hcompfunc *f = (duk_hcompfunc *) h;
124 		duk_tval *tv, *tv_end;
125 		duk_hobject **funcs, **funcs_end;
126 
127 		DUK_HCOMPFUNC_ASSERT_VALID(f);
128 
129 		if (DUK_LIKELY(DUK_HCOMPFUNC_GET_DATA(heap, f) != NULL)) {
130 			tv = DUK_HCOMPFUNC_GET_CONSTS_BASE(heap, f);
131 			tv_end = DUK_HCOMPFUNC_GET_CONSTS_END(heap, f);
132 			while (tv < tv_end) {
133 				DUK_TVAL_DECREF_NORZ(thr, tv);
134 				tv++;
135 			}
136 
137 			funcs = DUK_HCOMPFUNC_GET_FUNCS_BASE(heap, f);
138 			funcs_end = DUK_HCOMPFUNC_GET_FUNCS_END(heap, f);
139 			while (funcs < funcs_end) {
140 				duk_hobject *h_func;
141 				h_func = *funcs;
142 				DUK_ASSERT(h_func != NULL);
143 				DUK_ASSERT(DUK_HEAPHDR_IS_OBJECT((duk_heaphdr *) h_func));
144 				DUK_HCOMPFUNC_DECREF_NORZ(thr, (duk_hcompfunc *) h_func);
145 				funcs++;
146 			}
147 		} else {
148 			/* May happen in some out-of-memory corner cases. */
149 			DUK_D(DUK_DPRINT("duk_hcompfunc 'data' is NULL, skipping decref"));
150 		}
151 
152 		DUK_HEAPHDR_DECREF_ALLOWNULL(thr, (duk_heaphdr *) DUK_HCOMPFUNC_GET_LEXENV(heap, f));
153 		DUK_HEAPHDR_DECREF_ALLOWNULL(thr, (duk_heaphdr *) DUK_HCOMPFUNC_GET_VARENV(heap, f));
154 		DUK_HEAPHDR_DECREF_ALLOWNULL(thr, (duk_hbuffer *) DUK_HCOMPFUNC_GET_DATA(heap, f));
155 	} else if (DUK_HOBJECT_IS_DECENV(h)) {
156 		duk_hdecenv *e = (duk_hdecenv *) h;
157 		DUK_HDECENV_ASSERT_VALID(e);
158 		DUK_HTHREAD_DECREF_NORZ_ALLOWNULL(thr, e->thread);
159 		DUK_HOBJECT_DECREF_NORZ_ALLOWNULL(thr, e->varmap);
160 	} else if (DUK_HOBJECT_IS_OBJENV(h)) {
161 		duk_hobjenv *e = (duk_hobjenv *) h;
162 		DUK_HOBJENV_ASSERT_VALID(e);
163 		DUK_ASSERT(e->target != NULL);  /* Required for object environments. */
164 		DUK_HOBJECT_DECREF_NORZ(thr, e->target);
165 #if defined(DUK_USE_BUFFEROBJECT_SUPPORT)
166 	} else if (DUK_HOBJECT_IS_BUFOBJ(h)) {
167 		duk_hbufobj *b = (duk_hbufobj *) h;
168 		DUK_HBUFOBJ_ASSERT_VALID(b);
169 		DUK_HBUFFER_DECREF_NORZ_ALLOWNULL(thr, (duk_hbuffer *) b->buf);
170 		DUK_HOBJECT_DECREF_NORZ_ALLOWNULL(thr, (duk_hobject *) b->buf_prop);
171 #endif  /* DUK_USE_BUFFEROBJECT_SUPPORT */
172 	} else if (DUK_HOBJECT_IS_BOUNDFUNC(h)) {
173 		duk_hboundfunc *f = (duk_hboundfunc *) (void *) h;
174 		DUK_HBOUNDFUNC_ASSERT_VALID(f);
175 		DUK_TVAL_DECREF_NORZ(thr, &f->target);
176 		DUK_TVAL_DECREF_NORZ(thr, &f->this_binding);
177 		duk__decref_tvals_norz(thr, f->args, f->nargs);
178 #if defined(DUK_USE_ES6_PROXY)
179 	} else if (DUK_HOBJECT_IS_PROXY(h)) {
180 		duk_hproxy *p = (duk_hproxy *) h;
181 		DUK_HPROXY_ASSERT_VALID(p);
182 		DUK_HOBJECT_DECREF_NORZ(thr, p->target);
183 		DUK_HOBJECT_DECREF_NORZ(thr, p->handler);
184 #endif  /* DUK_USE_ES6_PROXY */
185 	} else if (DUK_HOBJECT_IS_THREAD(h)) {
186 		duk_hthread *t = (duk_hthread *) h;
187 		duk_activation *act;
188 		duk_tval *tv;
189 
190 		DUK_HTHREAD_ASSERT_VALID(t);
191 
192 		tv = t->valstack;
193 		while (tv < t->valstack_top) {
194 			DUK_TVAL_DECREF_NORZ(thr, tv);
195 			tv++;
196 		}
197 
198 		for (act = t->callstack_curr; act != NULL; act = act->parent) {
199 			DUK_HOBJECT_DECREF_NORZ_ALLOWNULL(thr, (duk_hobject *) DUK_ACT_GET_FUNC(act));
200 			DUK_HOBJECT_DECREF_NORZ_ALLOWNULL(thr, (duk_hobject *) act->var_env);
201 			DUK_HOBJECT_DECREF_NORZ_ALLOWNULL(thr, (duk_hobject *) act->lex_env);
202 #if defined(DUK_USE_NONSTD_FUNC_CALLER_PROPERTY)
203 			DUK_HOBJECT_DECREF_NORZ_ALLOWNULL(thr, (duk_hobject *) act->prev_caller);
204 #endif
205 #if 0  /* nothing now */
206 			for (cat = act->cat; cat != NULL; cat = cat->parent) {
207 			}
208 #endif
209 		}
210 
211 
212 		for (i = 0; i < DUK_NUM_BUILTINS; i++) {
213 			DUK_HOBJECT_DECREF_NORZ_ALLOWNULL(thr, (duk_hobject *) t->builtins[i]);
214 		}
215 
216 		DUK_HTHREAD_DECREF_NORZ_ALLOWNULL(thr, (duk_hthread *) t->resumer);
217 	} else {
218 		/* We may come here if the object should have a FASTREFS flag
219 		 * but it's missing for some reason.  Assert for never getting
220 		 * here; however, other than performance, this is harmless.
221 		 */
222 		DUK_D(DUK_DPRINT("missing FASTREFS flag for: %!iO", h));
223 		DUK_ASSERT(0);
224 	}
225 }
226 
duk_heaphdr_refcount_finalize_norz(duk_heap * heap,duk_heaphdr * hdr)227 DUK_INTERNAL void duk_heaphdr_refcount_finalize_norz(duk_heap *heap, duk_heaphdr *hdr) {
228 	DUK_ASSERT(heap != NULL);
229 	DUK_ASSERT(heap->heap_thread != NULL);
230 	DUK_ASSERT(hdr != NULL);
231 
232 	if (DUK_HEAPHDR_IS_OBJECT(hdr)) {
233 		duk_hobject_refcount_finalize_norz(heap, (duk_hobject *) hdr);
234 	}
235 	/* DUK_HTYPE_BUFFER: nothing to finalize */
236 	/* DUK_HTYPE_STRING: nothing to finalize */
237 }
238 
239 /*
240  *  Refzero processing for duk_hobject: queue a refzero'ed object to either
241  *  finalize_list or refzero_list and process the relevent list(s) if
242  *  necessary.
243  *
244  *  Refzero_list is single linked, with only 'prev' pointers set and valid.
245  *  All 'next' pointers are intentionally left as garbage.  This doesn't
246  *  matter because refzero_list is processed to completion before any other
247  *  code (like mark-and-sweep) might walk the list.
248  *
249  *  In more detail:
250  *
251  *  - On first insert refzero_list is NULL and the new object becomes the
252  *    first and only element on the list; duk__refcount_free_pending() is
253  *    called and it starts processing the list from the initial element,
254  *    i.e. the list tail.
255  *
256  *  - As each object is refcount finalized, new objects may be queued to
257  *    refzero_list head.  Their 'next' pointers are left as garbage, but
258  *    'prev' points are set correctly, with the element at refzero_list
259  *    having a NULL 'prev' pointer.  The fact that refzero_list is non-NULL
260  *    is used to reject (1) recursive duk__refcount_free_pending() and
261  *    (2) finalize_list processing calls.
262  *
263  *  - When we're done with the current object, read its 'prev' pointer and
264  *    free the object.  If 'prev' is NULL, we've reached head of list and are
265  *    done: set refzero_list to NULL and process pending finalizers.  Otherwise
266  *    continue processing the list.
267  *
268  *  A refzero cascade is free of side effects because it only involves
269  *  queueing more objects and freeing memory; finalizer execution is blocked
270  *  in the code path queueing objects to finalize_list.  As a result the
271  *  initial refzero call (which triggers duk__refcount_free_pending()) must
272  *  check finalize_list so that finalizers are executed snappily.
273  *
274  *  If finalize_list processing starts first, refzero may occur while we're
275  *  processing finalizers.  That's fine: that particular refzero cascade is
276  *  handled to completion without side effects.  Once the cascade is complete,
277  *  we'll run pending finalizers but notice that we're already doing that and
278  *  return.
279  *
280  *  This could be expanded to allow incremental freeing: just bail out
281  *  early and resume at a future alloc/decref/refzero.  However, if that
282  *  were done, the list structure would need to be kept consistent at all
283  *  times, mark-and-sweep would need to handle refzero_list, etc.
284  */
285 
duk__refcount_free_pending(duk_heap * heap)286 DUK_LOCAL void duk__refcount_free_pending(duk_heap *heap) {
287 	duk_heaphdr *curr;
288 #if defined(DUK_USE_DEBUG)
289 	duk_int_t count = 0;
290 #endif
291 
292 	DUK_ASSERT(heap != NULL);
293 
294 	curr = heap->refzero_list;
295 	DUK_ASSERT(curr != NULL);
296 	DUK_ASSERT(DUK_HEAPHDR_GET_PREV(heap, curr) == NULL);  /* We're called on initial insert only. */
297 	/* curr->next is GARBAGE. */
298 
299 	do {
300 		duk_heaphdr *prev;
301 
302 		DUK_DDD(DUK_DDDPRINT("refzero processing %p: %!O", (void *) curr, (duk_heaphdr *) curr));
303 
304 #if defined(DUK_USE_DEBUG)
305 		count++;
306 #endif
307 
308 		DUK_ASSERT(curr != NULL);
309 		DUK_ASSERT(DUK_HEAPHDR_GET_TYPE(curr) == DUK_HTYPE_OBJECT);  /* currently, always the case */
310 		/* FINALIZED may be set; don't care about flags here. */
311 
312 		/* Refcount finalize 'curr'.  Refzero_list must be non-NULL
313 		 * here to prevent recursive entry to duk__refcount_free_pending().
314 		 */
315 		DUK_ASSERT(heap->refzero_list != NULL);
316 		duk_hobject_refcount_finalize_norz(heap, (duk_hobject *) curr);
317 
318 		prev = DUK_HEAPHDR_GET_PREV(heap, curr);
319 		DUK_ASSERT((prev == NULL && heap->refzero_list == curr) || \
320 		           (prev != NULL && heap->refzero_list != curr));
321 		/* prev->next is intentionally not updated and is garbage. */
322 
323 		duk_free_hobject(heap, (duk_hobject *) curr);  /* Invalidates 'curr'. */
324 
325 		curr = prev;
326 	} while (curr != NULL);
327 
328 	heap->refzero_list = NULL;
329 
330 	DUK_DD(DUK_DDPRINT("refzero processed %ld objects", (long) count));
331 }
332 
duk__refcount_refzero_hobject(duk_heap * heap,duk_hobject * obj,duk_bool_t skip_free_pending)333 DUK_LOCAL DUK_INLINE void duk__refcount_refzero_hobject(duk_heap *heap, duk_hobject *obj, duk_bool_t skip_free_pending) {
334 	duk_heaphdr *hdr;
335 	duk_heaphdr *root;
336 
337 	DUK_ASSERT(heap != NULL);
338 	DUK_ASSERT(heap->heap_thread != NULL);
339 	DUK_ASSERT(obj != NULL);
340 	DUK_ASSERT(DUK_HEAPHDR_GET_TYPE((duk_heaphdr *) obj) == DUK_HTYPE_OBJECT);
341 
342 	hdr = (duk_heaphdr *) obj;
343 
344 	/* Refzero'd objects must be in heap_allocated.  They can't be in
345 	 * finalize_list because all objects on finalize_list have an
346 	 * artificial +1 refcount bump.
347 	 */
348 #if defined(DUK_USE_ASSERTIONS)
349 	DUK_ASSERT(duk_heap_in_heap_allocated(heap, (duk_heaphdr *) obj));
350 #endif
351 
352 	DUK_HEAP_REMOVE_FROM_HEAP_ALLOCATED(heap, hdr);
353 
354 #if defined(DUK_USE_FINALIZER_SUPPORT)
355 	/* This finalizer check MUST BE side effect free.  It should also be
356 	 * as fast as possible because it's applied to every object freed.
357 	 */
358 	if (DUK_UNLIKELY(DUK_HOBJECT_HAS_FINALIZER_FAST(heap, (duk_hobject *) hdr) != 0U)) {
359 		/* Special case: FINALIZED may be set if mark-and-sweep queued
360 		 * object for finalization, the finalizer was executed (and
361 		 * FINALIZED set), mark-and-sweep hasn't yet processed the
362 		 * object again, but its refcount drops to zero.  Free without
363 		 * running the finalizer again.
364 		 */
365 		if (DUK_HEAPHDR_HAS_FINALIZED(hdr)) {
366 			DUK_D(DUK_DPRINT("refzero'd object has finalizer and FINALIZED is set -> free"));
367 		} else {
368 			/* Set FINALIZABLE flag so that all objects on finalize_list
369 			 * will have it set and are thus detectable based on the
370 			 * flag alone.
371 			 */
372 			DUK_HEAPHDR_SET_FINALIZABLE(hdr);
373 			DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZED(hdr));
374 
375 #if defined(DUK_USE_REFERENCE_COUNTING)
376 			/* Bump refcount on finalize_list insert so that a
377 			 * refzero can never occur when an object is waiting
378 			 * for its finalizer call.  Refzero might otherwise
379 			 * now happen because we allow duk_push_heapptr() for
380 			 * objects pending finalization.
381 			 */
382 			DUK_HEAPHDR_PREINC_REFCOUNT(hdr);
383 #endif
384 			DUK_HEAP_INSERT_INTO_FINALIZE_LIST(heap, hdr);
385 
386 			/* Process finalizers unless skipping is explicitly
387 			 * requested (NORZ) or refzero_list is being processed
388 			 * (avoids side effects during a refzero cascade).
389 			 * If refzero_list is processed, the initial refzero
390 			 * call will run pending finalizers when refzero_list
391 			 * is done.
392 			 */
393 			if (!skip_free_pending && heap->refzero_list == NULL) {
394 				duk_heap_process_finalize_list(heap);
395 			}
396 			return;
397 		}
398 	}
399 #endif  /* DUK_USE_FINALIZER_SUPPORT */
400 
401 	/* No need to finalize, free object via refzero_list. */
402 
403 	root = heap->refzero_list;
404 
405 	DUK_HEAPHDR_SET_PREV(heap, hdr, NULL);
406 	/* 'next' is left as GARBAGE. */
407 	heap->refzero_list = hdr;
408 
409 	if (root == NULL) {
410 		/* Object is now queued.  Refzero_list was NULL so
411 		 * no-one is currently processing it; do it here.
412 		 * With refzero processing just doing a cascade of
413 		 * free calls, we can process it directly even when
414 		 * NORZ macros are used: there are no side effects.
415 		 */
416 		duk__refcount_free_pending(heap);
417 		DUK_ASSERT(heap->refzero_list == NULL);
418 
419 		/* Process finalizers only after the entire cascade
420 		 * is finished.  In most cases there's nothing to
421 		 * finalize, so fast path check to avoid a call.
422 		 */
423 #if defined(DUK_USE_FINALIZER_SUPPORT)
424 		if (!skip_free_pending && DUK_UNLIKELY(heap->finalize_list != NULL)) {
425 			duk_heap_process_finalize_list(heap);
426 		}
427 #endif
428 	} else {
429 		DUK_ASSERT(DUK_HEAPHDR_GET_PREV(heap, root) == NULL);
430 		DUK_HEAPHDR_SET_PREV(heap, root, hdr);
431 
432 		/* Object is now queued.  Because refzero_list was
433 		 * non-NULL, it's already being processed by someone
434 		 * in the C call stack, so we're done.
435 		 */
436 	}
437 }
438 
439 #if defined(DUK_USE_FINALIZER_SUPPORT)
duk_refzero_check_fast(duk_hthread * thr)440 DUK_INTERNAL DUK_ALWAYS_INLINE void duk_refzero_check_fast(duk_hthread *thr) {
441 	DUK_ASSERT(thr != NULL);
442 	DUK_ASSERT(thr->heap != NULL);
443 	DUK_ASSERT(thr->heap->refzero_list == NULL);  /* Processed to completion inline. */
444 
445 	if (DUK_UNLIKELY(thr->heap->finalize_list != NULL)) {
446 		duk_heap_process_finalize_list(thr->heap);
447 	}
448 }
449 
duk_refzero_check_slow(duk_hthread * thr)450 DUK_INTERNAL void duk_refzero_check_slow(duk_hthread *thr) {
451 	DUK_ASSERT(thr != NULL);
452 	DUK_ASSERT(thr->heap != NULL);
453 	DUK_ASSERT(thr->heap->refzero_list == NULL);  /* Processed to completion inline. */
454 
455 	if (DUK_UNLIKELY(thr->heap->finalize_list != NULL)) {
456 		duk_heap_process_finalize_list(thr->heap);
457 	}
458 }
459 #endif  /* DUK_USE_FINALIZER_SUPPORT */
460 
461 /*
462  *  Refzero processing for duk_hstring.
463  */
464 
duk__refcount_refzero_hstring(duk_heap * heap,duk_hstring * str)465 DUK_LOCAL DUK_INLINE void duk__refcount_refzero_hstring(duk_heap *heap, duk_hstring *str) {
466 	DUK_ASSERT(heap != NULL);
467 	DUK_ASSERT(heap->heap_thread != NULL);
468 	DUK_ASSERT(str != NULL);
469 	DUK_ASSERT(DUK_HEAPHDR_GET_TYPE((duk_heaphdr *) str) == DUK_HTYPE_STRING);
470 
471 	duk_heap_strcache_string_remove(heap, str);
472 	duk_heap_strtable_unlink(heap, str);
473 	duk_free_hstring(heap, str);
474 }
475 
476 /*
477  *  Refzero processing for duk_hbuffer.
478  */
479 
duk__refcount_refzero_hbuffer(duk_heap * heap,duk_hbuffer * buf)480 DUK_LOCAL DUK_INLINE void duk__refcount_refzero_hbuffer(duk_heap *heap, duk_hbuffer *buf) {
481 	DUK_ASSERT(heap != NULL);
482 	DUK_ASSERT(heap->heap_thread != NULL);
483 	DUK_ASSERT(buf != NULL);
484 	DUK_ASSERT(DUK_HEAPHDR_GET_TYPE((duk_heaphdr *) buf) == DUK_HTYPE_BUFFER);
485 
486 	DUK_HEAP_REMOVE_FROM_HEAP_ALLOCATED(heap, (duk_heaphdr *) buf);
487 	duk_free_hbuffer(heap, buf);
488 }
489 
490 /*
491  *  Incref and decref functions.
492  *
493  *  Decref may trigger immediate refzero handling, which may free and finalize
494  *  an arbitrary number of objects (a "DECREF cascade").
495  *
496  *  Refzero handling is skipped entirely if (1) mark-and-sweep is running or
497  *  (2) execution is paused in the debugger.  The objects are left in the heap,
498  *  and will be freed by mark-and-sweep or eventual heap destruction.
499  *
500  *  This is necessary during mark-and-sweep because refcounts are also updated
501  *  during the sweep phase (otherwise objects referenced by a swept object
502  *  would have incorrect refcounts) which then calls here.  This could be
503  *  avoided by using separate decref macros in mark-and-sweep; however,
504  *  mark-and-sweep also calls finalizers which would use the ordinary decref
505  *  macros anyway.
506  *
507  *  We can't process refzeros (= free objects) when the debugger is running
508  *  as the debugger might make an object unreachable but still continue
509  *  inspecting it (or even cause it to be pushed back).  So we must rely on
510  *  mark-and-sweep to collect them.
511  *
512  *  The DUK__RZ_SUPPRESS_CHECK() condition is also used in heap destruction
513  *  when running finalizers for remaining objects: the flag prevents objects
514  *  from being moved around in heap linked lists while that's being done.
515  *
516  *  The suppress condition is important to performance.
517  */
518 
519 #define DUK__RZ_SUPPRESS_ASSERT1() do { \
520 		DUK_ASSERT(thr != NULL); \
521 		DUK_ASSERT(thr->heap != NULL); \
522 		/* When mark-and-sweep runs, heap_thread must exist. */ \
523 		DUK_ASSERT(thr->heap->ms_running == 0 || thr->heap->heap_thread != NULL); \
524 		/* In normal operation finalizers are executed with ms_running == 0 \
525 		 * so we should never see ms_running == 1 and thr != heap_thread. \
526 		 * In heap destruction finalizers are executed with ms_running != 0 \
527 		 * to e.g. prevent refzero; a special value ms_running == 2 is used \
528 		 * in that case so it can be distinguished from the normal runtime \
529 		 * case, and allows a stronger assertion here (GH-2030). \
530 		 */ \
531 		DUK_ASSERT(!(thr->heap->ms_running == 1 && thr != thr->heap->heap_thread)); \
532 		/* We may be called when the heap is initializing and we process \
533 		 * refzeros normally, but mark-and-sweep and finalizers are prevented \
534 		 * if that's the case. \
535 		 */ \
536 		DUK_ASSERT(thr->heap->heap_initializing == 0 || thr->heap->ms_prevent_count > 0); \
537 		DUK_ASSERT(thr->heap->heap_initializing == 0 || thr->heap->pf_prevent_count > 0); \
538 	} while (0)
539 
540 #if defined(DUK_USE_DEBUGGER_SUPPORT)
541 #define DUK__RZ_SUPPRESS_ASSERT2() do { \
542 		/* When debugger is paused, ms_running is set. */ \
543 		DUK_ASSERT(!DUK_HEAP_HAS_DEBUGGER_PAUSED(thr->heap) || thr->heap->ms_running != 0); \
544 	} while (0)
545 #define DUK__RZ_SUPPRESS_COND()  (heap->ms_running != 0)
546 #else
547 #define DUK__RZ_SUPPRESS_ASSERT2() do { } while (0)
548 #define DUK__RZ_SUPPRESS_COND()  (heap->ms_running != 0)
549 #endif  /* DUK_USE_DEBUGGER_SUPPORT */
550 
551 #define DUK__RZ_SUPPRESS_CHECK() do { \
552 		DUK__RZ_SUPPRESS_ASSERT1(); \
553 		DUK__RZ_SUPPRESS_ASSERT2(); \
554 		if (DUK_UNLIKELY(DUK__RZ_SUPPRESS_COND())) { \
555 			DUK_DDD(DUK_DDDPRINT("refzero handling suppressed (not even queued) when mark-and-sweep running, object: %p", (void *) h)); \
556 			return; \
557 		} \
558 	} while (0)
559 
560 #define DUK__RZ_STRING() do { \
561 		duk__refcount_refzero_hstring(heap, (duk_hstring *) h); \
562 	} while (0)
563 #define DUK__RZ_BUFFER() do { \
564 		duk__refcount_refzero_hbuffer(heap, (duk_hbuffer *) h); \
565 	} while (0)
566 #define DUK__RZ_OBJECT() do { \
567 		duk__refcount_refzero_hobject(heap, (duk_hobject *) h, skip_free_pending); \
568 	} while (0)
569 
570 /* XXX: test the effect of inlining here vs. NOINLINE in refzero helpers */
571 #if defined(DUK_USE_FAST_REFCOUNT_DEFAULT)
572 #define DUK__RZ_INLINE DUK_ALWAYS_INLINE
573 #else
574 #define DUK__RZ_INLINE /*nop*/
575 #endif
576 
duk__hstring_refzero_helper(duk_hthread * thr,duk_hstring * h)577 DUK_LOCAL DUK__RZ_INLINE void duk__hstring_refzero_helper(duk_hthread *thr, duk_hstring *h) {
578 	duk_heap *heap;
579 
580 	DUK_ASSERT(thr != NULL);
581 	DUK_ASSERT(h != NULL);
582 	heap = thr->heap;
583 
584 	DUK__RZ_SUPPRESS_CHECK();
585 	DUK__RZ_STRING();
586 }
587 
duk__hbuffer_refzero_helper(duk_hthread * thr,duk_hbuffer * h)588 DUK_LOCAL DUK__RZ_INLINE void duk__hbuffer_refzero_helper(duk_hthread *thr, duk_hbuffer *h) {
589 	duk_heap *heap;
590 
591 	DUK_ASSERT(thr != NULL);
592 	DUK_ASSERT(h != NULL);
593 	heap = thr->heap;
594 
595 	DUK__RZ_SUPPRESS_CHECK();
596 	DUK__RZ_BUFFER();
597 }
598 
duk__hobject_refzero_helper(duk_hthread * thr,duk_hobject * h,duk_bool_t skip_free_pending)599 DUK_LOCAL DUK__RZ_INLINE void duk__hobject_refzero_helper(duk_hthread *thr, duk_hobject *h, duk_bool_t skip_free_pending) {
600 	duk_heap *heap;
601 
602 	DUK_ASSERT(thr != NULL);
603 	DUK_ASSERT(h != NULL);
604 	heap = thr->heap;
605 
606 	DUK__RZ_SUPPRESS_CHECK();
607 	DUK__RZ_OBJECT();
608 }
609 
duk__heaphdr_refzero_helper(duk_hthread * thr,duk_heaphdr * h,duk_bool_t skip_free_pending)610 DUK_LOCAL DUK__RZ_INLINE void duk__heaphdr_refzero_helper(duk_hthread *thr, duk_heaphdr *h, duk_bool_t skip_free_pending) {
611 	duk_heap *heap;
612 	duk_small_uint_t htype;
613 
614 	DUK_ASSERT(thr != NULL);
615 	DUK_ASSERT(h != NULL);
616 	heap = thr->heap;
617 
618 	htype = (duk_small_uint_t) DUK_HEAPHDR_GET_TYPE(h);
619 	DUK_DDD(DUK_DDDPRINT("ms_running=%ld, heap_thread=%p", (long) thr->heap->ms_running, thr->heap->heap_thread));
620 	DUK__RZ_SUPPRESS_CHECK();
621 
622 	switch (htype) {
623 	case DUK_HTYPE_STRING:
624 		/* Strings have no internal references but do have "weak"
625 		 * references in the string cache.  Also note that strings
626 		 * are not on the heap_allocated list like other heap
627 		 * elements.
628 		 */
629 
630 		DUK__RZ_STRING();
631 		break;
632 
633 	case DUK_HTYPE_OBJECT:
634 		/* Objects have internal references.  Must finalize through
635 		 * the "refzero" work list.
636 		 */
637 
638 		DUK__RZ_OBJECT();
639 		break;
640 
641 	default:
642 		/* Buffers have no internal references.  However, a dynamic
643 		 * buffer has a separate allocation for the buffer.  This is
644 		 * freed by duk_heap_free_heaphdr_raw().
645 		 */
646 
647 		DUK_ASSERT(DUK_HEAPHDR_GET_TYPE(h) == DUK_HTYPE_BUFFER);
648 		DUK__RZ_BUFFER();
649 		break;
650 	}
651 }
652 
duk_heaphdr_refzero(duk_hthread * thr,duk_heaphdr * h)653 DUK_INTERNAL DUK_NOINLINE void duk_heaphdr_refzero(duk_hthread *thr, duk_heaphdr *h) {
654 	duk__heaphdr_refzero_helper(thr, h, 0 /*skip_free_pending*/);
655 }
656 
duk_heaphdr_refzero_norz(duk_hthread * thr,duk_heaphdr * h)657 DUK_INTERNAL DUK_NOINLINE void duk_heaphdr_refzero_norz(duk_hthread *thr, duk_heaphdr *h) {
658 	duk__heaphdr_refzero_helper(thr, h, 1 /*skip_free_pending*/);
659 }
660 
duk_hstring_refzero(duk_hthread * thr,duk_hstring * h)661 DUK_INTERNAL DUK_NOINLINE void duk_hstring_refzero(duk_hthread *thr, duk_hstring *h) {
662 	duk__hstring_refzero_helper(thr, h);
663 }
664 
duk_hbuffer_refzero(duk_hthread * thr,duk_hbuffer * h)665 DUK_INTERNAL DUK_NOINLINE void duk_hbuffer_refzero(duk_hthread *thr, duk_hbuffer *h) {
666 	duk__hbuffer_refzero_helper(thr, h);
667 }
668 
duk_hobject_refzero(duk_hthread * thr,duk_hobject * h)669 DUK_INTERNAL DUK_NOINLINE void duk_hobject_refzero(duk_hthread *thr, duk_hobject *h) {
670 	duk__hobject_refzero_helper(thr, h, 0 /*skip_free_pending*/);
671 }
672 
duk_hobject_refzero_norz(duk_hthread * thr,duk_hobject * h)673 DUK_INTERNAL DUK_NOINLINE void duk_hobject_refzero_norz(duk_hthread *thr, duk_hobject *h) {
674 	duk__hobject_refzero_helper(thr, h, 1 /*skip_free_pending*/);
675 }
676 
677 #if !defined(DUK_USE_FAST_REFCOUNT_DEFAULT)
duk_tval_incref(duk_tval * tv)678 DUK_INTERNAL void duk_tval_incref(duk_tval *tv) {
679 	DUK_ASSERT(tv != NULL);
680 
681 	if (DUK_TVAL_NEEDS_REFCOUNT_UPDATE(tv)) {
682 		duk_heaphdr *h = DUK_TVAL_GET_HEAPHDR(tv);
683 		DUK_ASSERT(h != NULL);
684 		DUK_ASSERT(DUK_HEAPHDR_HTYPE_VALID(h));
685 		DUK_ASSERT_DISABLE(h->h_refcount >= 0);
686 		DUK_HEAPHDR_PREINC_REFCOUNT(h);
687 		DUK_ASSERT(DUK_HEAPHDR_GET_REFCOUNT(h) != 0);  /* No wrapping. */
688 	}
689 }
690 
duk_tval_decref(duk_hthread * thr,duk_tval * tv)691 DUK_INTERNAL void duk_tval_decref(duk_hthread *thr, duk_tval *tv) {
692 	DUK_ASSERT(thr != NULL);
693 	DUK_ASSERT(tv != NULL);
694 
695 	if (DUK_TVAL_NEEDS_REFCOUNT_UPDATE(tv)) {
696 		duk_heaphdr *h = DUK_TVAL_GET_HEAPHDR(tv);
697 		DUK_ASSERT(h != NULL);
698 		DUK_ASSERT(DUK_HEAPHDR_HTYPE_VALID(h));
699 		DUK_ASSERT(DUK_HEAPHDR_GET_REFCOUNT(h) >= 1);
700 #if 0
701 		if (DUK_HEAPHDR_PREDEC_REFCOUNT(h) != 0) {
702 			return;
703 		}
704 		duk_heaphdr_refzero(thr, h);
705 #else
706 		duk_heaphdr_decref(thr, h);
707 #endif
708 	}
709 }
710 
duk_tval_decref_norz(duk_hthread * thr,duk_tval * tv)711 DUK_INTERNAL void duk_tval_decref_norz(duk_hthread *thr, duk_tval *tv) {
712 	DUK_ASSERT(thr != NULL);
713 	DUK_ASSERT(tv != NULL);
714 
715 	if (DUK_TVAL_NEEDS_REFCOUNT_UPDATE(tv)) {
716 		duk_heaphdr *h = DUK_TVAL_GET_HEAPHDR(tv);
717 		DUK_ASSERT(h != NULL);
718 		DUK_ASSERT(DUK_HEAPHDR_HTYPE_VALID(h));
719 		DUK_ASSERT(DUK_HEAPHDR_GET_REFCOUNT(h) >= 1);
720 #if 0
721 		if (DUK_HEAPHDR_PREDEC_REFCOUNT(h) != 0) {
722 			return;
723 		}
724 		duk_heaphdr_refzero_norz(thr, h);
725 #else
726 		duk_heaphdr_decref_norz(thr, h);
727 #endif
728 	}
729 }
730 #endif  /* !DUK_USE_FAST_REFCOUNT_DEFAULT */
731 
732 #define DUK__DECREF_ASSERTS() do { \
733 		DUK_ASSERT(thr != NULL); \
734 		DUK_ASSERT(thr->heap != NULL); \
735 		DUK_ASSERT(h != NULL); \
736 		DUK_ASSERT(DUK_HEAPHDR_HTYPE_VALID((duk_heaphdr *) h)); \
737 		DUK_ASSERT(DUK_HEAPHDR_GET_REFCOUNT((duk_heaphdr *) h) >= 1); \
738 	} while (0)
739 #if defined(DUK_USE_ROM_OBJECTS)
740 #define DUK__INCREF_SHARED() do { \
741 		if (DUK_HEAPHDR_HAS_READONLY((duk_heaphdr *) h)) { \
742 			return; \
743 		} \
744 		DUK_HEAPHDR_PREINC_REFCOUNT((duk_heaphdr *) h); \
745 		DUK_ASSERT(DUK_HEAPHDR_GET_REFCOUNT((duk_heaphdr *) h) != 0);  /* No wrapping. */ \
746 	} while (0)
747 #define DUK__DECREF_SHARED() do { \
748 		if (DUK_HEAPHDR_HAS_READONLY((duk_heaphdr *) h)) { \
749 			return; \
750 		} \
751 		if (DUK_HEAPHDR_PREDEC_REFCOUNT((duk_heaphdr *) h) != 0) { \
752 			return; \
753 		} \
754 	} while (0)
755 #else
756 #define DUK__INCREF_SHARED() do { \
757 		DUK_HEAPHDR_PREINC_REFCOUNT((duk_heaphdr *) h); \
758 		DUK_ASSERT(DUK_HEAPHDR_GET_REFCOUNT((duk_heaphdr *) h) != 0);  /* No wrapping. */ \
759 	} while (0)
760 #define DUK__DECREF_SHARED() do { \
761 		if (DUK_HEAPHDR_PREDEC_REFCOUNT((duk_heaphdr *) h) != 0) { \
762 			return; \
763 		} \
764 	} while (0)
765 #endif
766 
767 #if !defined(DUK_USE_FAST_REFCOUNT_DEFAULT)
768 /* This will in practice be inlined because it's just an INC instructions
769  * and a bit test + INC when ROM objects are enabled.
770  */
duk_heaphdr_incref(duk_heaphdr * h)771 DUK_INTERNAL void duk_heaphdr_incref(duk_heaphdr *h) {
772 	DUK_ASSERT(h != NULL);
773 	DUK_ASSERT(DUK_HEAPHDR_HTYPE_VALID(h));
774 	DUK_ASSERT_DISABLE(DUK_HEAPHDR_GET_REFCOUNT(h) >= 0);
775 
776 	DUK__INCREF_SHARED();
777 }
778 
duk_heaphdr_decref(duk_hthread * thr,duk_heaphdr * h)779 DUK_INTERNAL void duk_heaphdr_decref(duk_hthread *thr, duk_heaphdr *h) {
780 	DUK__DECREF_ASSERTS();
781 	DUK__DECREF_SHARED();
782 	duk_heaphdr_refzero(thr, h);
783 
784 	/* Forced mark-and-sweep when GC torture enabled; this could happen
785 	 * on any DECREF (but not DECREF_NORZ).
786 	 */
787 	DUK_GC_TORTURE(thr->heap);
788 }
duk_heaphdr_decref_norz(duk_hthread * thr,duk_heaphdr * h)789 DUK_INTERNAL void duk_heaphdr_decref_norz(duk_hthread *thr, duk_heaphdr *h) {
790 	DUK__DECREF_ASSERTS();
791 	DUK__DECREF_SHARED();
792 	duk_heaphdr_refzero_norz(thr, h);
793 }
794 #endif  /* !DUK_USE_FAST_REFCOUNT_DEFAULT */
795 
796 #if 0  /* Not needed. */
797 DUK_INTERNAL void duk_hstring_decref(duk_hthread *thr, duk_hstring *h) {
798 	DUK__DECREF_ASSERTS();
799 	DUK__DECREF_SHARED();
800 	duk_hstring_refzero(thr, h);
801 }
802 DUK_INTERNAL void duk_hstring_decref_norz(duk_hthread *thr, duk_hstring *h) {
803 	DUK__DECREF_ASSERTS();
804 	DUK__DECREF_SHARED();
805 	duk_hstring_refzero_norz(thr, h);
806 }
807 DUK_INTERNAL void duk_hbuffer_decref(duk_hthread *thr, duk_hbuffer *h) {
808 	DUK__DECREF_ASSERTS();
809 	DUK__DECREF_SHARED();
810 	duk_hbuffer_refzero(thr, h);
811 }
812 DUK_INTERNAL void duk_hbuffer_decref_norz(duk_hthread *thr, duk_hbuffer *h) {
813 	DUK__DECREF_ASSERTS();
814 	DUK__DECREF_SHARED();
815 	duk_hbuffer_refzero_norz(thr, h);
816 }
817 DUK_INTERNAL void duk_hobject_decref(duk_hthread *thr, duk_hobject *h) {
818 	DUK__DECREF_ASSERTS();
819 	DUK__DECREF_SHARED();
820 	duk_hobject_refzero(thr, h);
821 }
822 DUK_INTERNAL void duk_hobject_decref_norz(duk_hthread *thr, duk_hobject *h) {
823 	DUK__DECREF_ASSERTS();
824 	DUK__DECREF_SHARED();
825 	duk_hobject_refzero_norz(thr, h);
826 }
827 #endif
828 
829 #else  /* DUK_USE_REFERENCE_COUNTING */
830 
831 /* no refcounting */
832 
833 #endif  /* DUK_USE_REFERENCE_COUNTING */
834