1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (C) 2007 Free Software Foundation, Inc
6
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 *
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
15 */
16
17 #include "private/gc_pmark.h"
18
19 #ifndef GC_NO_FINALIZATION
20 # include "javaxfc.h" /* to get GC_finalize_all() as extern "C" */
21
22 /* Type of mark procedure used for marking from finalizable object. */
23 /* This procedure normally does not mark the object, only its */
24 /* descendants. */
25 typedef void (* finalization_mark_proc)(ptr_t /* finalizable_obj_ptr */);
26
27 #define HASH3(addr,size,log_size) \
28 ((((word)(addr) >> 3) ^ ((word)(addr) >> (3 + (log_size)))) \
29 & ((size) - 1))
30 #define HASH2(addr,log_size) HASH3(addr, (word)1 << (log_size), log_size)
31
32 struct hash_chain_entry {
33 word hidden_key;
34 struct hash_chain_entry * next;
35 };
36
37 struct disappearing_link {
38 struct hash_chain_entry prolog;
39 # define dl_hidden_link prolog.hidden_key
40 /* Field to be cleared. */
41 # define dl_next(x) (struct disappearing_link *)((x) -> prolog.next)
42 # define dl_set_next(x, y) \
43 (void)((x)->prolog.next = (struct hash_chain_entry *)(y))
44 word dl_hidden_obj; /* Pointer to object base */
45 };
46
47 struct dl_hashtbl_s {
48 struct disappearing_link **head;
49 signed_word log_size;
50 word entries;
51 };
52
53 STATIC struct dl_hashtbl_s GC_dl_hashtbl = {
54 /* head */ NULL, /* log_size */ -1, /* entries */ 0 };
55 #ifndef GC_LONG_REFS_NOT_NEEDED
56 STATIC struct dl_hashtbl_s GC_ll_hashtbl = { NULL, -1, 0 };
57 #endif
58
59 struct finalizable_object {
60 struct hash_chain_entry prolog;
61 # define fo_hidden_base prolog.hidden_key
62 /* Pointer to object base. */
63 /* No longer hidden once object */
64 /* is on finalize_now queue. */
65 # define fo_next(x) (struct finalizable_object *)((x) -> prolog.next)
66 # define fo_set_next(x,y) ((x)->prolog.next = (struct hash_chain_entry *)(y))
67 GC_finalization_proc fo_fn; /* Finalizer. */
68 ptr_t fo_client_data;
69 word fo_object_size; /* In bytes. */
70 finalization_mark_proc fo_mark_proc; /* Mark-through procedure */
71 };
72
73 static signed_word log_fo_table_size = -1;
74
75 STATIC struct fnlz_roots_s {
76 struct finalizable_object **fo_head;
77 /* List of objects that should be finalized now: */
78 struct finalizable_object *finalize_now;
79 } GC_fnlz_roots = { NULL, NULL };
80
81 #ifdef AO_HAVE_store
82 /* Update finalize_now atomically as GC_should_invoke_finalizers does */
83 /* not acquire the allocation lock. */
84 # define SET_FINALIZE_NOW(fo) \
85 AO_store((volatile AO_t *)&GC_fnlz_roots.finalize_now, (AO_t)(fo))
86 #else
87 # define SET_FINALIZE_NOW(fo) (void)(GC_fnlz_roots.finalize_now = (fo))
88 #endif /* !THREADS */
89
GC_push_finalizer_structures(void)90 GC_API void GC_CALL GC_push_finalizer_structures(void)
91 {
92 GC_ASSERT((word)(&GC_dl_hashtbl.head) % sizeof(word) == 0);
93 GC_ASSERT((word)(&GC_fnlz_roots) % sizeof(word) == 0);
94 # ifndef GC_LONG_REFS_NOT_NEEDED
95 GC_ASSERT((word)(&GC_ll_hashtbl.head) % sizeof(word) == 0);
96 GC_PUSH_ALL_SYM(GC_ll_hashtbl.head);
97 # endif
98 GC_PUSH_ALL_SYM(GC_dl_hashtbl.head);
99 GC_PUSH_ALL_SYM(GC_fnlz_roots);
100 }
101
102 /* Threshold of log_size to initiate full collection before growing */
103 /* a hash table. */
104 #ifndef GC_ON_GROW_LOG_SIZE_MIN
105 # define GC_ON_GROW_LOG_SIZE_MIN CPP_LOG_HBLKSIZE
106 #endif
107
108 /* Double the size of a hash table. *log_size_ptr is the log of its */
109 /* current size. May be a no-op. */
110 /* *table is a pointer to an array of hash headers. If we succeed, we */
111 /* update both *table and *log_size_ptr. Lock is held. */
GC_grow_table(struct hash_chain_entry *** table,signed_word * log_size_ptr,word * entries_ptr)112 STATIC void GC_grow_table(struct hash_chain_entry ***table,
113 signed_word *log_size_ptr, word *entries_ptr)
114 {
115 word i;
116 struct hash_chain_entry *p;
117 signed_word log_old_size = *log_size_ptr;
118 signed_word log_new_size = log_old_size + 1;
119 word old_size = log_old_size == -1 ? 0 : (word)1 << log_old_size;
120 word new_size = (word)1 << log_new_size;
121 /* FIXME: Power of 2 size often gets rounded up to one more page. */
122 struct hash_chain_entry **new_table;
123
124 GC_ASSERT(I_HOLD_LOCK());
125 /* Avoid growing the table in case of at least 25% of entries can */
126 /* be deleted by enforcing a collection. Ignored for small tables. */
127 /* In incremental mode we skip this optimization, as we want to */
128 /* avoid triggering a full GC whenever possible. */
129 if (log_old_size >= GC_ON_GROW_LOG_SIZE_MIN && !GC_incremental) {
130 IF_CANCEL(int cancel_state;)
131
132 DISABLE_CANCEL(cancel_state);
133 (void)GC_try_to_collect_inner(GC_never_stop_func);
134 RESTORE_CANCEL(cancel_state);
135 /* GC_finalize might decrease entries value. */
136 if (*entries_ptr < ((word)1 << log_old_size) - (*entries_ptr >> 2))
137 return;
138 }
139
140 new_table = (struct hash_chain_entry **)
141 GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
142 (size_t)new_size * sizeof(struct hash_chain_entry *),
143 NORMAL);
144 if (new_table == 0) {
145 if (*table == 0) {
146 ABORT("Insufficient space for initial table allocation");
147 } else {
148 return;
149 }
150 }
151 for (i = 0; i < old_size; i++) {
152 p = (*table)[i];
153 while (p != 0) {
154 ptr_t real_key = (ptr_t)GC_REVEAL_POINTER(p->hidden_key);
155 struct hash_chain_entry *next = p -> next;
156 size_t new_hash = HASH3(real_key, new_size, log_new_size);
157
158 p -> next = new_table[new_hash];
159 GC_dirty(p);
160 new_table[new_hash] = p;
161 p = next;
162 }
163 }
164 *log_size_ptr = log_new_size;
165 *table = new_table;
166 GC_dirty(new_table); /* entire object */
167 }
168
GC_register_disappearing_link(void ** link)169 GC_API int GC_CALL GC_register_disappearing_link(void * * link)
170 {
171 ptr_t base;
172
173 base = (ptr_t)GC_base(link);
174 if (base == 0)
175 ABORT("Bad arg to GC_register_disappearing_link");
176 return(GC_general_register_disappearing_link(link, base));
177 }
178
GC_register_disappearing_link_inner(struct dl_hashtbl_s * dl_hashtbl,void ** link,const void * obj,const char * tbl_log_name)179 STATIC int GC_register_disappearing_link_inner(
180 struct dl_hashtbl_s *dl_hashtbl, void **link,
181 const void *obj, const char *tbl_log_name)
182 {
183 struct disappearing_link *curr_dl;
184 size_t index;
185 struct disappearing_link * new_dl;
186 DCL_LOCK_STATE;
187
188 if (EXPECT(GC_find_leak, FALSE)) return GC_UNIMPLEMENTED;
189 LOCK();
190 GC_ASSERT(obj != NULL && GC_base_C(obj) == obj);
191 if (dl_hashtbl -> log_size == -1
192 || dl_hashtbl -> entries > ((word)1 << dl_hashtbl -> log_size)) {
193 GC_grow_table((struct hash_chain_entry ***)&dl_hashtbl -> head,
194 &dl_hashtbl -> log_size, &dl_hashtbl -> entries);
195 # ifdef LINT2
196 if (dl_hashtbl->log_size < 0) ABORT("log_size is negative");
197 # endif
198 GC_COND_LOG_PRINTF("Grew %s table to %u entries\n", tbl_log_name,
199 1 << (unsigned)dl_hashtbl -> log_size);
200 }
201 index = HASH2(link, dl_hashtbl -> log_size);
202 for (curr_dl = dl_hashtbl -> head[index]; curr_dl != 0;
203 curr_dl = dl_next(curr_dl)) {
204 if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
205 curr_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
206 UNLOCK();
207 return GC_DUPLICATE;
208 }
209 }
210 new_dl = (struct disappearing_link *)
211 GC_INTERNAL_MALLOC(sizeof(struct disappearing_link),NORMAL);
212 if (0 == new_dl) {
213 GC_oom_func oom_fn = GC_oom_fn;
214 UNLOCK();
215 new_dl = (struct disappearing_link *)
216 (*oom_fn)(sizeof(struct disappearing_link));
217 if (0 == new_dl) {
218 return GC_NO_MEMORY;
219 }
220 /* It's not likely we'll make it here, but ... */
221 LOCK();
222 /* Recalculate index since the table may grow. */
223 index = HASH2(link, dl_hashtbl -> log_size);
224 /* Check again that our disappearing link not in the table. */
225 for (curr_dl = dl_hashtbl -> head[index]; curr_dl != 0;
226 curr_dl = dl_next(curr_dl)) {
227 if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
228 curr_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
229 UNLOCK();
230 # ifndef DBG_HDRS_ALL
231 /* Free unused new_dl returned by GC_oom_fn() */
232 GC_free((void *)new_dl);
233 # endif
234 return GC_DUPLICATE;
235 }
236 }
237 }
238 new_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
239 new_dl -> dl_hidden_link = GC_HIDE_POINTER(link);
240 dl_set_next(new_dl, dl_hashtbl -> head[index]);
241 GC_dirty(new_dl);
242 dl_hashtbl -> head[index] = new_dl;
243 dl_hashtbl -> entries++;
244 GC_dirty(dl_hashtbl->head + index);
245 UNLOCK();
246 return GC_SUCCESS;
247 }
248
GC_general_register_disappearing_link(void ** link,const void * obj)249 GC_API int GC_CALL GC_general_register_disappearing_link(void * * link,
250 const void * obj)
251 {
252 if (((word)link & (ALIGNMENT-1)) != 0 || !NONNULL_ARG_NOT_NULL(link))
253 ABORT("Bad arg to GC_general_register_disappearing_link");
254 return GC_register_disappearing_link_inner(&GC_dl_hashtbl, link, obj,
255 "dl");
256 }
257
258 #ifdef DBG_HDRS_ALL
259 # define FREE_DL_ENTRY(curr_dl) dl_set_next(curr_dl, NULL)
260 #else
261 # define FREE_DL_ENTRY(curr_dl) GC_free(curr_dl)
262 #endif
263
264 /* Unregisters given link and returns the link entry to free. */
GC_unregister_disappearing_link_inner(struct dl_hashtbl_s * dl_hashtbl,void ** link)265 GC_INLINE struct disappearing_link *GC_unregister_disappearing_link_inner(
266 struct dl_hashtbl_s *dl_hashtbl, void **link)
267 {
268 struct disappearing_link *curr_dl;
269 struct disappearing_link *prev_dl = NULL;
270 size_t index;
271
272 GC_ASSERT(I_HOLD_LOCK());
273 if (dl_hashtbl->log_size == -1)
274 return NULL; /* prevent integer shift by a negative amount */
275
276 index = HASH2(link, dl_hashtbl->log_size);
277 for (curr_dl = dl_hashtbl -> head[index]; curr_dl;
278 curr_dl = dl_next(curr_dl)) {
279 if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
280 /* Remove found entry from the table. */
281 if (NULL == prev_dl) {
282 dl_hashtbl -> head[index] = dl_next(curr_dl);
283 GC_dirty(dl_hashtbl->head + index);
284 } else {
285 dl_set_next(prev_dl, dl_next(curr_dl));
286 GC_dirty(prev_dl);
287 }
288 dl_hashtbl -> entries--;
289 break;
290 }
291 prev_dl = curr_dl;
292 }
293 return curr_dl;
294 }
295
GC_unregister_disappearing_link(void ** link)296 GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
297 {
298 struct disappearing_link *curr_dl;
299 DCL_LOCK_STATE;
300
301 if (((word)link & (ALIGNMENT-1)) != 0) return(0); /* Nothing to do. */
302
303 LOCK();
304 curr_dl = GC_unregister_disappearing_link_inner(&GC_dl_hashtbl, link);
305 UNLOCK();
306 if (NULL == curr_dl) return 0;
307 FREE_DL_ENTRY(curr_dl);
308 return 1;
309 }
310
311 /* Toggle-ref support. */
312 #ifndef GC_TOGGLE_REFS_NOT_NEEDED
313 typedef union {
314 /* Lowest bit is used to distinguish between choices. */
315 void *strong_ref;
316 GC_hidden_pointer weak_ref;
317 } GCToggleRef;
318
319 STATIC GC_toggleref_func GC_toggleref_callback = 0;
320 STATIC GCToggleRef *GC_toggleref_arr = NULL;
321 STATIC int GC_toggleref_array_size = 0;
322 STATIC int GC_toggleref_array_capacity = 0;
323
GC_process_togglerefs(void)324 GC_INNER void GC_process_togglerefs(void)
325 {
326 int i;
327 int new_size = 0;
328 GC_bool needs_barrier = FALSE;
329
330 GC_ASSERT(I_HOLD_LOCK());
331 for (i = 0; i < GC_toggleref_array_size; ++i) {
332 GCToggleRef r = GC_toggleref_arr[i];
333 void *obj = r.strong_ref;
334
335 if (((word)obj & 1) != 0) {
336 obj = GC_REVEAL_POINTER(r.weak_ref);
337 }
338 if (NULL == obj) {
339 continue;
340 }
341 switch (GC_toggleref_callback(obj)) {
342 case GC_TOGGLE_REF_DROP:
343 break;
344 case GC_TOGGLE_REF_STRONG:
345 GC_toggleref_arr[new_size++].strong_ref = obj;
346 needs_barrier = TRUE;
347 break;
348 case GC_TOGGLE_REF_WEAK:
349 GC_toggleref_arr[new_size++].weak_ref = GC_HIDE_POINTER(obj);
350 break;
351 default:
352 ABORT("Bad toggle-ref status returned by callback");
353 }
354 }
355
356 if (new_size < GC_toggleref_array_size) {
357 BZERO(&GC_toggleref_arr[new_size],
358 (GC_toggleref_array_size - new_size) * sizeof(GCToggleRef));
359 GC_toggleref_array_size = new_size;
360 }
361 if (needs_barrier)
362 GC_dirty(GC_toggleref_arr); /* entire object */
363 }
364
365 STATIC void GC_normal_finalize_mark_proc(ptr_t);
366
push_and_mark_object(void * p)367 static void push_and_mark_object(void *p)
368 {
369 GC_normal_finalize_mark_proc((ptr_t)p);
370 while (!GC_mark_stack_empty()) {
371 MARK_FROM_MARK_STACK();
372 }
373 GC_set_mark_bit(p);
374 if (GC_mark_state != MS_NONE) {
375 while (!GC_mark_some(0)) {
376 /* Empty. */
377 }
378 }
379 }
380
GC_mark_togglerefs(void)381 STATIC void GC_mark_togglerefs(void)
382 {
383 int i;
384 if (NULL == GC_toggleref_arr)
385 return;
386
387 /* TODO: Hide GC_toggleref_arr to avoid its marking from roots. */
388 GC_set_mark_bit(GC_toggleref_arr);
389 for (i = 0; i < GC_toggleref_array_size; ++i) {
390 void *obj = GC_toggleref_arr[i].strong_ref;
391 if (obj != NULL && ((word)obj & 1) == 0) {
392 push_and_mark_object(obj);
393 }
394 }
395 }
396
GC_clear_togglerefs(void)397 STATIC void GC_clear_togglerefs(void)
398 {
399 int i;
400 for (i = 0; i < GC_toggleref_array_size; ++i) {
401 if ((GC_toggleref_arr[i].weak_ref & 1) != 0) {
402 if (!GC_is_marked(GC_REVEAL_POINTER(GC_toggleref_arr[i].weak_ref))) {
403 GC_toggleref_arr[i].weak_ref = 0;
404 } else {
405 /* No need to copy, BDWGC is a non-moving collector. */
406 }
407 }
408 }
409 }
410
GC_set_toggleref_func(GC_toggleref_func fn)411 GC_API void GC_CALL GC_set_toggleref_func(GC_toggleref_func fn)
412 {
413 DCL_LOCK_STATE;
414
415 LOCK();
416 GC_toggleref_callback = fn;
417 UNLOCK();
418 }
419
GC_get_toggleref_func(void)420 GC_API GC_toggleref_func GC_CALL GC_get_toggleref_func(void)
421 {
422 GC_toggleref_func fn;
423 DCL_LOCK_STATE;
424
425 LOCK();
426 fn = GC_toggleref_callback;
427 UNLOCK();
428 return fn;
429 }
430
ensure_toggleref_capacity(int capacity_inc)431 static GC_bool ensure_toggleref_capacity(int capacity_inc)
432 {
433 GC_ASSERT(capacity_inc >= 0);
434 GC_ASSERT(I_HOLD_LOCK());
435 if (NULL == GC_toggleref_arr) {
436 GC_toggleref_array_capacity = 32; /* initial capacity */
437 GC_toggleref_arr = (GCToggleRef *)GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
438 GC_toggleref_array_capacity * sizeof(GCToggleRef),
439 NORMAL);
440 if (NULL == GC_toggleref_arr)
441 return FALSE;
442 }
443 if ((unsigned)GC_toggleref_array_size + (unsigned)capacity_inc
444 >= (unsigned)GC_toggleref_array_capacity) {
445 GCToggleRef *new_array;
446 while ((unsigned)GC_toggleref_array_capacity
447 < (unsigned)GC_toggleref_array_size + (unsigned)capacity_inc) {
448 GC_toggleref_array_capacity *= 2;
449 if (GC_toggleref_array_capacity < 0) /* overflow */
450 return FALSE;
451 }
452
453 new_array = (GCToggleRef *)GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
454 GC_toggleref_array_capacity * sizeof(GCToggleRef),
455 NORMAL);
456 if (NULL == new_array)
457 return FALSE;
458 if (EXPECT(GC_toggleref_array_size > 0, TRUE))
459 BCOPY(GC_toggleref_arr, new_array,
460 GC_toggleref_array_size * sizeof(GCToggleRef));
461 GC_INTERNAL_FREE(GC_toggleref_arr);
462 GC_toggleref_arr = new_array;
463 }
464 return TRUE;
465 }
466
GC_toggleref_add(void * obj,int is_strong_ref)467 GC_API int GC_CALL GC_toggleref_add(void *obj, int is_strong_ref)
468 {
469 int res = GC_SUCCESS;
470 DCL_LOCK_STATE;
471
472 GC_ASSERT(NONNULL_ARG_NOT_NULL(obj));
473 LOCK();
474 if (GC_toggleref_callback != 0) {
475 if (!ensure_toggleref_capacity(1)) {
476 res = GC_NO_MEMORY;
477 } else {
478 GC_toggleref_arr[GC_toggleref_array_size].strong_ref =
479 is_strong_ref ? obj : (void *)GC_HIDE_POINTER(obj);
480 if (is_strong_ref)
481 GC_dirty(GC_toggleref_arr + GC_toggleref_array_size);
482 GC_toggleref_array_size++;
483 }
484 }
485 UNLOCK();
486 return res;
487 }
488 #endif /* !GC_TOGGLE_REFS_NOT_NEEDED */
489
490 /* Finalizer callback support. */
491 STATIC GC_await_finalize_proc GC_object_finalized_proc = 0;
492
GC_set_await_finalize_proc(GC_await_finalize_proc fn)493 GC_API void GC_CALL GC_set_await_finalize_proc(GC_await_finalize_proc fn)
494 {
495 DCL_LOCK_STATE;
496
497 LOCK();
498 GC_object_finalized_proc = fn;
499 UNLOCK();
500 }
501
GC_get_await_finalize_proc(void)502 GC_API GC_await_finalize_proc GC_CALL GC_get_await_finalize_proc(void)
503 {
504 GC_await_finalize_proc fn;
505 DCL_LOCK_STATE;
506
507 LOCK();
508 fn = GC_object_finalized_proc;
509 UNLOCK();
510 return fn;
511 }
512
513 #ifndef GC_LONG_REFS_NOT_NEEDED
GC_register_long_link(void ** link,const void * obj)514 GC_API int GC_CALL GC_register_long_link(void * * link, const void * obj)
515 {
516 if (((word)link & (ALIGNMENT-1)) != 0 || !NONNULL_ARG_NOT_NULL(link))
517 ABORT("Bad arg to GC_register_long_link");
518 return GC_register_disappearing_link_inner(&GC_ll_hashtbl, link, obj,
519 "long dl");
520 }
521
GC_unregister_long_link(void ** link)522 GC_API int GC_CALL GC_unregister_long_link(void * * link)
523 {
524 struct disappearing_link *curr_dl;
525 DCL_LOCK_STATE;
526
527 if (((word)link & (ALIGNMENT-1)) != 0) return(0); /* Nothing to do. */
528
529 LOCK();
530 curr_dl = GC_unregister_disappearing_link_inner(&GC_ll_hashtbl, link);
531 UNLOCK();
532 if (NULL == curr_dl) return 0;
533 FREE_DL_ENTRY(curr_dl);
534 return 1;
535 }
536 #endif /* !GC_LONG_REFS_NOT_NEEDED */
537
538 #ifndef GC_MOVE_DISAPPEARING_LINK_NOT_NEEDED
539 /* Moves a link. Assume the lock is held. */
GC_move_disappearing_link_inner(struct dl_hashtbl_s * dl_hashtbl,void ** link,void ** new_link)540 STATIC int GC_move_disappearing_link_inner(
541 struct dl_hashtbl_s *dl_hashtbl,
542 void **link, void **new_link)
543 {
544 struct disappearing_link *curr_dl, *prev_dl, *new_dl;
545 size_t curr_index, new_index;
546 word curr_hidden_link;
547 word new_hidden_link;
548
549 GC_ASSERT(I_HOLD_LOCK());
550 if (dl_hashtbl->log_size == -1)
551 return GC_NOT_FOUND; /* prevent integer shift by a negative amount */
552
553 /* Find current link. */
554 curr_index = HASH2(link, dl_hashtbl -> log_size);
555 curr_hidden_link = GC_HIDE_POINTER(link);
556 prev_dl = NULL;
557 for (curr_dl = dl_hashtbl -> head[curr_index]; curr_dl;
558 curr_dl = dl_next(curr_dl)) {
559 if (curr_dl -> dl_hidden_link == curr_hidden_link)
560 break;
561 prev_dl = curr_dl;
562 }
563
564 if (NULL == curr_dl) {
565 return GC_NOT_FOUND;
566 }
567
568 if (link == new_link) {
569 return GC_SUCCESS; /* Nothing to do. */
570 }
571
572 /* link found; now check new_link not present. */
573 new_index = HASH2(new_link, dl_hashtbl -> log_size);
574 new_hidden_link = GC_HIDE_POINTER(new_link);
575 for (new_dl = dl_hashtbl -> head[new_index]; new_dl;
576 new_dl = dl_next(new_dl)) {
577 if (new_dl -> dl_hidden_link == new_hidden_link) {
578 /* Target already registered; bail. */
579 return GC_DUPLICATE;
580 }
581 }
582
583 /* Remove from old, add to new, update link. */
584 if (NULL == prev_dl) {
585 dl_hashtbl -> head[curr_index] = dl_next(curr_dl);
586 } else {
587 dl_set_next(prev_dl, dl_next(curr_dl));
588 GC_dirty(prev_dl);
589 }
590 curr_dl -> dl_hidden_link = new_hidden_link;
591 dl_set_next(curr_dl, dl_hashtbl -> head[new_index]);
592 dl_hashtbl -> head[new_index] = curr_dl;
593 GC_dirty(curr_dl);
594 GC_dirty(dl_hashtbl->head); /* entire object */
595 return GC_SUCCESS;
596 }
597
GC_move_disappearing_link(void ** link,void ** new_link)598 GC_API int GC_CALL GC_move_disappearing_link(void **link, void **new_link)
599 {
600 int result;
601 DCL_LOCK_STATE;
602
603 if (((word)new_link & (ALIGNMENT-1)) != 0
604 || !NONNULL_ARG_NOT_NULL(new_link))
605 ABORT("Bad new_link arg to GC_move_disappearing_link");
606 if (((word)link & (ALIGNMENT-1)) != 0)
607 return GC_NOT_FOUND; /* Nothing to do. */
608
609 LOCK();
610 result = GC_move_disappearing_link_inner(&GC_dl_hashtbl, link, new_link);
611 UNLOCK();
612 return result;
613 }
614
615 # ifndef GC_LONG_REFS_NOT_NEEDED
GC_move_long_link(void ** link,void ** new_link)616 GC_API int GC_CALL GC_move_long_link(void **link, void **new_link)
617 {
618 int result;
619 DCL_LOCK_STATE;
620
621 if (((word)new_link & (ALIGNMENT-1)) != 0
622 || !NONNULL_ARG_NOT_NULL(new_link))
623 ABORT("Bad new_link arg to GC_move_long_link");
624 if (((word)link & (ALIGNMENT-1)) != 0)
625 return GC_NOT_FOUND; /* Nothing to do. */
626
627 LOCK();
628 result = GC_move_disappearing_link_inner(&GC_ll_hashtbl, link, new_link);
629 UNLOCK();
630 return result;
631 }
632 # endif /* !GC_LONG_REFS_NOT_NEEDED */
633 #endif /* !GC_MOVE_DISAPPEARING_LINK_NOT_NEEDED */
634
635 /* Possible finalization_marker procedures. Note that mark stack */
636 /* overflow is handled by the caller, and is not a disaster. */
637 #if defined(_MSC_VER) && defined(I386)
638 GC_ATTR_NOINLINE
639 /* Otherwise some optimizer bug is tickled in VC for X86 (v19, at least). */
640 #endif
GC_normal_finalize_mark_proc(ptr_t p)641 STATIC void GC_normal_finalize_mark_proc(ptr_t p)
642 {
643 GC_mark_stack_top = GC_push_obj(p, HDR(p), GC_mark_stack_top,
644 GC_mark_stack + GC_mark_stack_size);
645 }
646
647 /* This only pays very partial attention to the mark descriptor. */
648 /* It does the right thing for normal and atomic objects, and treats */
649 /* most others as normal. */
GC_ignore_self_finalize_mark_proc(ptr_t p)650 STATIC void GC_ignore_self_finalize_mark_proc(ptr_t p)
651 {
652 hdr * hhdr = HDR(p);
653 word descr = hhdr -> hb_descr;
654 ptr_t q;
655 ptr_t scan_limit;
656 ptr_t target_limit = p + hhdr -> hb_sz - 1;
657
658 if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) {
659 scan_limit = p + descr - sizeof(word);
660 } else {
661 scan_limit = target_limit + 1 - sizeof(word);
662 }
663 for (q = p; (word)q <= (word)scan_limit; q += ALIGNMENT) {
664 word r = *(word *)q;
665
666 if (r < (word)p || r > (word)target_limit) {
667 GC_PUSH_ONE_HEAP(r, q, GC_mark_stack_top);
668 }
669 }
670 }
671
GC_null_finalize_mark_proc(ptr_t p GC_ATTR_UNUSED)672 STATIC void GC_null_finalize_mark_proc(ptr_t p GC_ATTR_UNUSED) {}
673
674 /* Possible finalization_marker procedures. Note that mark stack */
675 /* overflow is handled by the caller, and is not a disaster. */
676
677 /* GC_unreachable_finalize_mark_proc is an alias for normal marking, */
678 /* but it is explicitly tested for, and triggers different */
679 /* behavior. Objects registered in this way are not finalized */
680 /* if they are reachable by other finalizable objects, even if those */
681 /* other objects specify no ordering. */
GC_unreachable_finalize_mark_proc(ptr_t p)682 STATIC void GC_unreachable_finalize_mark_proc(ptr_t p)
683 {
684 GC_normal_finalize_mark_proc(p);
685 }
686
687 /* Register a finalization function. See gc.h for details. */
688 /* The last parameter is a procedure that determines */
689 /* marking for finalization ordering. Any objects marked */
690 /* by that procedure will be guaranteed to not have been */
691 /* finalized when this finalizer is invoked. */
GC_register_finalizer_inner(void * obj,GC_finalization_proc fn,void * cd,GC_finalization_proc * ofn,void ** ocd,finalization_mark_proc mp)692 STATIC void GC_register_finalizer_inner(void * obj,
693 GC_finalization_proc fn, void *cd,
694 GC_finalization_proc *ofn, void **ocd,
695 finalization_mark_proc mp)
696 {
697 struct finalizable_object * curr_fo;
698 size_t index;
699 struct finalizable_object *new_fo = 0;
700 hdr *hhdr = NULL; /* initialized to prevent warning. */
701 DCL_LOCK_STATE;
702
703 if (EXPECT(GC_find_leak, FALSE)) return;
704 LOCK();
705 if (log_fo_table_size == -1
706 || GC_fo_entries > ((word)1 << log_fo_table_size)) {
707 GC_grow_table((struct hash_chain_entry ***)&GC_fnlz_roots.fo_head,
708 &log_fo_table_size, &GC_fo_entries);
709 # ifdef LINT2
710 if (log_fo_table_size < 0) ABORT("log_size is negative");
711 # endif
712 GC_COND_LOG_PRINTF("Grew fo table to %u entries\n",
713 1 << (unsigned)log_fo_table_size);
714 }
715 /* in the THREADS case we hold allocation lock. */
716 for (;;) {
717 struct finalizable_object *prev_fo = NULL;
718 GC_oom_func oom_fn;
719
720 index = HASH2(obj, log_fo_table_size);
721 curr_fo = GC_fnlz_roots.fo_head[index];
722 while (curr_fo != 0) {
723 GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
724 if (curr_fo -> fo_hidden_base == GC_HIDE_POINTER(obj)) {
725 /* Interruption by a signal in the middle of this */
726 /* should be safe. The client may see only *ocd */
727 /* updated, but we'll declare that to be his problem. */
728 if (ocd) *ocd = (void *) (curr_fo -> fo_client_data);
729 if (ofn) *ofn = curr_fo -> fo_fn;
730 /* Delete the structure for obj. */
731 if (prev_fo == 0) {
732 GC_fnlz_roots.fo_head[index] = fo_next(curr_fo);
733 } else {
734 fo_set_next(prev_fo, fo_next(curr_fo));
735 GC_dirty(prev_fo);
736 }
737 if (fn == 0) {
738 GC_fo_entries--;
739 /* May not happen if we get a signal. But a high */
740 /* estimate will only make the table larger than */
741 /* necessary. */
742 # if !defined(THREADS) && !defined(DBG_HDRS_ALL)
743 GC_free((void *)curr_fo);
744 # endif
745 } else {
746 curr_fo -> fo_fn = fn;
747 curr_fo -> fo_client_data = (ptr_t)cd;
748 curr_fo -> fo_mark_proc = mp;
749 GC_dirty(curr_fo);
750 /* Reinsert it. We deleted it first to maintain */
751 /* consistency in the event of a signal. */
752 if (prev_fo == 0) {
753 GC_fnlz_roots.fo_head[index] = curr_fo;
754 } else {
755 fo_set_next(prev_fo, curr_fo);
756 GC_dirty(prev_fo);
757 }
758 }
759 if (NULL == prev_fo)
760 GC_dirty(GC_fnlz_roots.fo_head + index);
761 UNLOCK();
762 # ifndef DBG_HDRS_ALL
763 if (EXPECT(new_fo != 0, FALSE)) {
764 /* Free unused new_fo returned by GC_oom_fn() */
765 GC_free((void *)new_fo);
766 }
767 # endif
768 return;
769 }
770 prev_fo = curr_fo;
771 curr_fo = fo_next(curr_fo);
772 }
773 if (EXPECT(new_fo != 0, FALSE)) {
774 /* new_fo is returned by GC_oom_fn(). */
775 GC_ASSERT(fn != 0);
776 # ifdef LINT2
777 if (NULL == hhdr) ABORT("Bad hhdr in GC_register_finalizer_inner");
778 # endif
779 break;
780 }
781 if (fn == 0) {
782 if (ocd) *ocd = 0;
783 if (ofn) *ofn = 0;
784 UNLOCK();
785 return;
786 }
787 GET_HDR(obj, hhdr);
788 if (EXPECT(0 == hhdr, FALSE)) {
789 /* We won't collect it, hence finalizer wouldn't be run. */
790 if (ocd) *ocd = 0;
791 if (ofn) *ofn = 0;
792 UNLOCK();
793 return;
794 }
795 new_fo = (struct finalizable_object *)
796 GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
797 if (EXPECT(new_fo != 0, TRUE))
798 break;
799 oom_fn = GC_oom_fn;
800 UNLOCK();
801 new_fo = (struct finalizable_object *)
802 (*oom_fn)(sizeof(struct finalizable_object));
803 if (0 == new_fo) {
804 /* No enough memory. *ocd and *ofn remains unchanged. */
805 return;
806 }
807 /* It's not likely we'll make it here, but ... */
808 LOCK();
809 /* Recalculate index since the table may grow and */
810 /* check again that our finalizer is not in the table. */
811 }
812 GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
813 if (ocd) *ocd = 0;
814 if (ofn) *ofn = 0;
815 new_fo -> fo_hidden_base = GC_HIDE_POINTER(obj);
816 new_fo -> fo_fn = fn;
817 new_fo -> fo_client_data = (ptr_t)cd;
818 new_fo -> fo_object_size = hhdr -> hb_sz;
819 new_fo -> fo_mark_proc = mp;
820 fo_set_next(new_fo, GC_fnlz_roots.fo_head[index]);
821 GC_dirty(new_fo);
822 GC_fo_entries++;
823 GC_fnlz_roots.fo_head[index] = new_fo;
824 GC_dirty(GC_fnlz_roots.fo_head + index);
825 UNLOCK();
826 }
827
GC_register_finalizer(void * obj,GC_finalization_proc fn,void * cd,GC_finalization_proc * ofn,void ** ocd)828 GC_API void GC_CALL GC_register_finalizer(void * obj,
829 GC_finalization_proc fn, void * cd,
830 GC_finalization_proc *ofn, void ** ocd)
831 {
832 GC_register_finalizer_inner(obj, fn, cd, ofn,
833 ocd, GC_normal_finalize_mark_proc);
834 }
835
GC_register_finalizer_ignore_self(void * obj,GC_finalization_proc fn,void * cd,GC_finalization_proc * ofn,void ** ocd)836 GC_API void GC_CALL GC_register_finalizer_ignore_self(void * obj,
837 GC_finalization_proc fn, void * cd,
838 GC_finalization_proc *ofn, void ** ocd)
839 {
840 GC_register_finalizer_inner(obj, fn, cd, ofn,
841 ocd, GC_ignore_self_finalize_mark_proc);
842 }
843
GC_register_finalizer_no_order(void * obj,GC_finalization_proc fn,void * cd,GC_finalization_proc * ofn,void ** ocd)844 GC_API void GC_CALL GC_register_finalizer_no_order(void * obj,
845 GC_finalization_proc fn, void * cd,
846 GC_finalization_proc *ofn, void ** ocd)
847 {
848 GC_register_finalizer_inner(obj, fn, cd, ofn,
849 ocd, GC_null_finalize_mark_proc);
850 }
851
852 static GC_bool need_unreachable_finalization = FALSE;
853 /* Avoid the work if this isn't used. */
854
GC_register_finalizer_unreachable(void * obj,GC_finalization_proc fn,void * cd,GC_finalization_proc * ofn,void ** ocd)855 GC_API void GC_CALL GC_register_finalizer_unreachable(void * obj,
856 GC_finalization_proc fn, void * cd,
857 GC_finalization_proc *ofn, void ** ocd)
858 {
859 need_unreachable_finalization = TRUE;
860 GC_ASSERT(GC_java_finalization);
861 GC_register_finalizer_inner(obj, fn, cd, ofn,
862 ocd, GC_unreachable_finalize_mark_proc);
863 }
864
865 #ifndef NO_DEBUGGING
GC_dump_finalization_links(const struct dl_hashtbl_s * dl_hashtbl)866 STATIC void GC_dump_finalization_links(
867 const struct dl_hashtbl_s *dl_hashtbl)
868 {
869 size_t dl_size = dl_hashtbl->log_size == -1 ? 0 :
870 (size_t)1 << dl_hashtbl->log_size;
871 size_t i;
872
873 for (i = 0; i < dl_size; i++) {
874 struct disappearing_link *curr_dl;
875
876 for (curr_dl = dl_hashtbl -> head[i]; curr_dl != 0;
877 curr_dl = dl_next(curr_dl)) {
878 ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_dl->dl_hidden_obj);
879 ptr_t real_link = (ptr_t)GC_REVEAL_POINTER(curr_dl->dl_hidden_link);
880
881 GC_printf("Object: %p, link: %p\n",
882 (void *)real_ptr, (void *)real_link);
883 }
884 }
885 }
886
GC_dump_finalization(void)887 GC_API void GC_CALL GC_dump_finalization(void)
888 {
889 struct finalizable_object * curr_fo;
890 size_t fo_size = log_fo_table_size == -1 ? 0 :
891 (size_t)1 << log_fo_table_size;
892 size_t i;
893
894 GC_printf("Disappearing (short) links:\n");
895 GC_dump_finalization_links(&GC_dl_hashtbl);
896 # ifndef GC_LONG_REFS_NOT_NEEDED
897 GC_printf("Disappearing long links:\n");
898 GC_dump_finalization_links(&GC_ll_hashtbl);
899 # endif
900 GC_printf("Finalizers:\n");
901 for (i = 0; i < fo_size; i++) {
902 for (curr_fo = GC_fnlz_roots.fo_head[i];
903 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
904 ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
905
906 GC_printf("Finalizable object: %p\n", (void *)real_ptr);
907 }
908 }
909 }
910 #endif /* !NO_DEBUGGING */
911
912 #ifndef SMALL_CONFIG
913 STATIC word GC_old_dl_entries = 0; /* for stats printing */
914 # ifndef GC_LONG_REFS_NOT_NEEDED
915 STATIC word GC_old_ll_entries = 0;
916 # endif
917 #endif /* !SMALL_CONFIG */
918
919 #ifndef THREADS
920 /* Global variables to minimize the level of recursion when a client */
921 /* finalizer allocates memory. */
922 STATIC int GC_finalizer_nested = 0;
923 /* Only the lowest byte is used, the rest is */
924 /* padding for proper global data alignment */
925 /* required for some compilers (like Watcom). */
926 STATIC unsigned GC_finalizer_skipped = 0;
927
928 /* Checks and updates the level of finalizers recursion. */
929 /* Returns NULL if GC_invoke_finalizers() should not be called by the */
930 /* collector (to minimize the risk of a deep finalizers recursion), */
931 /* otherwise returns a pointer to GC_finalizer_nested. */
GC_check_finalizer_nested(void)932 STATIC unsigned char *GC_check_finalizer_nested(void)
933 {
934 unsigned nesting_level = *(unsigned char *)&GC_finalizer_nested;
935 if (nesting_level) {
936 /* We are inside another GC_invoke_finalizers(). */
937 /* Skip some implicitly-called GC_invoke_finalizers() */
938 /* depending on the nesting (recursion) level. */
939 if (++GC_finalizer_skipped < (1U << nesting_level)) return NULL;
940 GC_finalizer_skipped = 0;
941 }
942 *(char *)&GC_finalizer_nested = (char)(nesting_level + 1);
943 return (unsigned char *)&GC_finalizer_nested;
944 }
945 #endif /* THREADS */
946
GC_make_disappearing_links_disappear(struct dl_hashtbl_s * dl_hashtbl,GC_bool is_remove_dangling)947 GC_INLINE void GC_make_disappearing_links_disappear(
948 struct dl_hashtbl_s* dl_hashtbl,
949 GC_bool is_remove_dangling)
950 {
951 size_t i;
952 size_t dl_size = dl_hashtbl->log_size == -1 ? 0
953 : (size_t)1 << dl_hashtbl->log_size;
954 GC_bool needs_barrier = FALSE;
955
956 GC_ASSERT(I_HOLD_LOCK());
957 for (i = 0; i < dl_size; i++) {
958 struct disappearing_link *curr_dl, *next_dl;
959 struct disappearing_link *prev_dl = NULL;
960
961 for (curr_dl = dl_hashtbl->head[i]; curr_dl != NULL; curr_dl = next_dl) {
962 next_dl = dl_next(curr_dl);
963 if (is_remove_dangling) {
964 ptr_t real_link = (ptr_t)GC_base(GC_REVEAL_POINTER(
965 curr_dl->dl_hidden_link));
966
967 if (NULL == real_link || EXPECT(GC_is_marked(real_link), TRUE)) {
968 prev_dl = curr_dl;
969 continue;
970 }
971 } else {
972 if (EXPECT(GC_is_marked((ptr_t)GC_REVEAL_POINTER(
973 curr_dl->dl_hidden_obj)), TRUE)) {
974 prev_dl = curr_dl;
975 continue;
976 }
977 *(ptr_t *)GC_REVEAL_POINTER(curr_dl->dl_hidden_link) = NULL;
978 }
979
980 /* Delete curr_dl entry from dl_hashtbl. */
981 if (NULL == prev_dl) {
982 dl_hashtbl -> head[i] = next_dl;
983 needs_barrier = TRUE;
984 } else {
985 dl_set_next(prev_dl, next_dl);
986 GC_dirty(prev_dl);
987 }
988 GC_clear_mark_bit(curr_dl);
989 dl_hashtbl -> entries--;
990 }
991 }
992 if (needs_barrier)
993 GC_dirty(dl_hashtbl -> head); /* entire object */
994 }
995
996 /* Called with held lock (but the world is running). */
997 /* Cause disappearing links to disappear and unreachable objects to be */
998 /* enqueued for finalization. */
GC_finalize(void)999 GC_INNER void GC_finalize(void)
1000 {
1001 struct finalizable_object * curr_fo, * prev_fo, * next_fo;
1002 ptr_t real_ptr;
1003 size_t i;
1004 size_t fo_size = log_fo_table_size == -1 ? 0 :
1005 (size_t)1 << log_fo_table_size;
1006 GC_bool needs_barrier = FALSE;
1007
1008 GC_ASSERT(I_HOLD_LOCK());
1009 # ifndef SMALL_CONFIG
1010 /* Save current GC_[dl/ll]_entries value for stats printing */
1011 GC_old_dl_entries = GC_dl_hashtbl.entries;
1012 # ifndef GC_LONG_REFS_NOT_NEEDED
1013 GC_old_ll_entries = GC_ll_hashtbl.entries;
1014 # endif
1015 # endif
1016
1017 # ifndef GC_TOGGLE_REFS_NOT_NEEDED
1018 GC_mark_togglerefs();
1019 # endif
1020 GC_make_disappearing_links_disappear(&GC_dl_hashtbl, FALSE);
1021
1022 /* Mark all objects reachable via chains of 1 or more pointers */
1023 /* from finalizable objects. */
1024 GC_ASSERT(GC_mark_state == MS_NONE);
1025 for (i = 0; i < fo_size; i++) {
1026 for (curr_fo = GC_fnlz_roots.fo_head[i];
1027 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
1028 GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
1029 real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
1030 if (!GC_is_marked(real_ptr)) {
1031 GC_MARKED_FOR_FINALIZATION(real_ptr);
1032 GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
1033 if (GC_is_marked(real_ptr)) {
1034 WARN("Finalization cycle involving %p\n", real_ptr);
1035 }
1036 }
1037 }
1038 }
1039 /* Enqueue for finalization all objects that are still */
1040 /* unreachable. */
1041 GC_bytes_finalized = 0;
1042 for (i = 0; i < fo_size; i++) {
1043 curr_fo = GC_fnlz_roots.fo_head[i];
1044 prev_fo = 0;
1045 while (curr_fo != 0) {
1046 real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
1047 if (!GC_is_marked(real_ptr)) {
1048 if (!GC_java_finalization) {
1049 GC_set_mark_bit(real_ptr);
1050 }
1051 /* Delete from hash table */
1052 next_fo = fo_next(curr_fo);
1053 if (NULL == prev_fo) {
1054 GC_fnlz_roots.fo_head[i] = next_fo;
1055 if (GC_object_finalized_proc) {
1056 GC_dirty(GC_fnlz_roots.fo_head + i);
1057 } else {
1058 needs_barrier = TRUE;
1059 }
1060 } else {
1061 fo_set_next(prev_fo, next_fo);
1062 GC_dirty(prev_fo);
1063 }
1064 GC_fo_entries--;
1065 if (GC_object_finalized_proc)
1066 GC_object_finalized_proc(real_ptr);
1067
1068 /* Add to list of objects awaiting finalization. */
1069 fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
1070 GC_dirty(curr_fo);
1071 SET_FINALIZE_NOW(curr_fo);
1072 /* unhide object pointer so any future collections will */
1073 /* see it. */
1074 curr_fo -> fo_hidden_base =
1075 (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
1076 GC_bytes_finalized +=
1077 curr_fo -> fo_object_size
1078 + sizeof(struct finalizable_object);
1079 GC_ASSERT(GC_is_marked(GC_base(curr_fo)));
1080 curr_fo = next_fo;
1081 } else {
1082 prev_fo = curr_fo;
1083 curr_fo = fo_next(curr_fo);
1084 }
1085 }
1086 }
1087
1088 if (GC_java_finalization) {
1089 /* make sure we mark everything reachable from objects finalized
1090 using the no_order mark_proc */
1091 for (curr_fo = GC_fnlz_roots.finalize_now;
1092 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
1093 real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
1094 if (!GC_is_marked(real_ptr)) {
1095 if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
1096 GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
1097 }
1098 if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
1099 GC_set_mark_bit(real_ptr);
1100 }
1101 }
1102 }
1103
1104 /* now revive finalize-when-unreachable objects reachable from
1105 other finalizable objects */
1106 if (need_unreachable_finalization) {
1107 curr_fo = GC_fnlz_roots.finalize_now;
1108 # if defined(GC_ASSERTIONS) || defined(LINT2)
1109 if (curr_fo != NULL && log_fo_table_size < 0)
1110 ABORT("log_size is negative");
1111 # endif
1112 prev_fo = NULL;
1113 while (curr_fo != NULL) {
1114 next_fo = fo_next(curr_fo);
1115 if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
1116 real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
1117 if (!GC_is_marked(real_ptr)) {
1118 GC_set_mark_bit(real_ptr);
1119 } else {
1120 if (NULL == prev_fo) {
1121 SET_FINALIZE_NOW(next_fo);
1122 } else {
1123 fo_set_next(prev_fo, next_fo);
1124 GC_dirty(prev_fo);
1125 }
1126 curr_fo -> fo_hidden_base =
1127 GC_HIDE_POINTER(curr_fo -> fo_hidden_base);
1128 GC_bytes_finalized -=
1129 curr_fo->fo_object_size + sizeof(struct finalizable_object);
1130
1131 i = HASH2(real_ptr, log_fo_table_size);
1132 fo_set_next(curr_fo, GC_fnlz_roots.fo_head[i]);
1133 GC_dirty(curr_fo);
1134 GC_fo_entries++;
1135 GC_fnlz_roots.fo_head[i] = curr_fo;
1136 curr_fo = prev_fo;
1137 needs_barrier = TRUE;
1138 }
1139 }
1140 prev_fo = curr_fo;
1141 curr_fo = next_fo;
1142 }
1143 }
1144 }
1145 if (needs_barrier)
1146 GC_dirty(GC_fnlz_roots.fo_head); /* entire object */
1147
1148 /* Remove dangling disappearing links. */
1149 GC_make_disappearing_links_disappear(&GC_dl_hashtbl, TRUE);
1150
1151 # ifndef GC_TOGGLE_REFS_NOT_NEEDED
1152 GC_clear_togglerefs();
1153 # endif
1154 # ifndef GC_LONG_REFS_NOT_NEEDED
1155 GC_make_disappearing_links_disappear(&GC_ll_hashtbl, FALSE);
1156 GC_make_disappearing_links_disappear(&GC_ll_hashtbl, TRUE);
1157 # endif
1158
1159 if (GC_fail_count) {
1160 /* Don't prevent running finalizers if there has been an allocation */
1161 /* failure recently. */
1162 # ifdef THREADS
1163 GC_reset_finalizer_nested();
1164 # else
1165 GC_finalizer_nested = 0;
1166 # endif
1167 }
1168 }
1169
1170 #ifndef JAVA_FINALIZATION_NOT_NEEDED
1171
1172 /* Enqueue all remaining finalizers to be run. */
GC_enqueue_all_finalizers(void)1173 STATIC void GC_enqueue_all_finalizers(void)
1174 {
1175 struct finalizable_object * next_fo;
1176 int i;
1177 int fo_size;
1178
1179 GC_ASSERT(I_HOLD_LOCK());
1180 fo_size = log_fo_table_size == -1 ? 0 : 1 << log_fo_table_size;
1181 GC_bytes_finalized = 0;
1182 for (i = 0; i < fo_size; i++) {
1183 struct finalizable_object * curr_fo = GC_fnlz_roots.fo_head[i];
1184
1185 GC_fnlz_roots.fo_head[i] = NULL;
1186 while (curr_fo != NULL) {
1187 ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
1188
1189 GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
1190 GC_set_mark_bit(real_ptr);
1191
1192 next_fo = fo_next(curr_fo);
1193
1194 /* Add to list of objects awaiting finalization. */
1195 fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
1196 GC_dirty(curr_fo);
1197 SET_FINALIZE_NOW(curr_fo);
1198
1199 /* unhide object pointer so any future collections will */
1200 /* see it. */
1201 curr_fo -> fo_hidden_base =
1202 (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
1203 GC_bytes_finalized +=
1204 curr_fo -> fo_object_size + sizeof(struct finalizable_object);
1205 curr_fo = next_fo;
1206 }
1207 }
1208 GC_fo_entries = 0; /* all entries deleted from the hash table */
1209 }
1210
1211 /* Invoke all remaining finalizers that haven't yet been run.
1212 * This is needed for strict compliance with the Java standard,
1213 * which can make the runtime guarantee that all finalizers are run.
1214 * Unfortunately, the Java standard implies we have to keep running
1215 * finalizers until there are no more left, a potential infinite loop.
1216 * YUCK.
1217 * Note that this is even more dangerous than the usual Java
1218 * finalizers, in that objects reachable from static variables
1219 * may have been finalized when these finalizers are run.
1220 * Finalizers run at this point must be prepared to deal with a
1221 * mostly broken world.
1222 * This routine is externally callable, so is called without
1223 * the allocation lock.
1224 */
GC_finalize_all(void)1225 GC_API void GC_CALL GC_finalize_all(void)
1226 {
1227 DCL_LOCK_STATE;
1228
1229 LOCK();
1230 while (GC_fo_entries > 0) {
1231 GC_enqueue_all_finalizers();
1232 UNLOCK();
1233 GC_invoke_finalizers();
1234 /* Running the finalizers in this thread is arguably not a good */
1235 /* idea when we should be notifying another thread to run them. */
1236 /* But otherwise we don't have a great way to wait for them to */
1237 /* run. */
1238 LOCK();
1239 }
1240 UNLOCK();
1241 }
1242
1243 #endif /* !JAVA_FINALIZATION_NOT_NEEDED */
1244
1245 /* Returns true if it is worth calling GC_invoke_finalizers. (Useful if */
1246 /* finalizers can only be called from some kind of "safe state" and */
1247 /* getting into that safe state is expensive.) */
GC_should_invoke_finalizers(void)1248 GC_API int GC_CALL GC_should_invoke_finalizers(void)
1249 {
1250 # ifdef AO_HAVE_load
1251 return AO_load((volatile AO_t *)&GC_fnlz_roots.finalize_now) != 0;
1252 # else
1253 return GC_fnlz_roots.finalize_now != NULL;
1254 # endif /* !THREADS */
1255 }
1256
1257 /* Invoke finalizers for all objects that are ready to be finalized. */
1258 /* Should be called without allocation lock. */
GC_invoke_finalizers(void)1259 GC_API int GC_CALL GC_invoke_finalizers(void)
1260 {
1261 int count = 0;
1262 word bytes_freed_before = 0; /* initialized to prevent warning. */
1263 DCL_LOCK_STATE;
1264
1265 while (GC_should_invoke_finalizers()) {
1266 struct finalizable_object * curr_fo;
1267
1268 # ifdef THREADS
1269 LOCK();
1270 # endif
1271 if (count == 0) {
1272 bytes_freed_before = GC_bytes_freed;
1273 /* Don't do this outside, since we need the lock. */
1274 }
1275 curr_fo = GC_fnlz_roots.finalize_now;
1276 # ifdef THREADS
1277 if (curr_fo != NULL)
1278 SET_FINALIZE_NOW(fo_next(curr_fo));
1279 UNLOCK();
1280 if (curr_fo == 0) break;
1281 # else
1282 GC_fnlz_roots.finalize_now = fo_next(curr_fo);
1283 # endif
1284 fo_set_next(curr_fo, 0);
1285 (*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base),
1286 curr_fo -> fo_client_data);
1287 curr_fo -> fo_client_data = 0;
1288 ++count;
1289 /* Explicit freeing of curr_fo is probably a bad idea. */
1290 /* It throws off accounting if nearly all objects are */
1291 /* finalizable. Otherwise it should not matter. */
1292 }
1293 /* bytes_freed_before is initialized whenever count != 0 */
1294 if (count != 0
1295 # if defined(THREADS) && !defined(THREAD_SANITIZER)
1296 /* A quick check whether some memory was freed. */
1297 /* The race with GC_free() is safe to be ignored */
1298 /* because we only need to know if the current */
1299 /* thread has deallocated something. */
1300 && bytes_freed_before != GC_bytes_freed
1301 # endif
1302 ) {
1303 LOCK();
1304 GC_finalizer_bytes_freed += (GC_bytes_freed - bytes_freed_before);
1305 UNLOCK();
1306 }
1307 return count;
1308 }
1309
1310 static word last_finalizer_notification = 0;
1311
GC_notify_or_invoke_finalizers(void)1312 GC_INNER void GC_notify_or_invoke_finalizers(void)
1313 {
1314 GC_finalizer_notifier_proc notifier_fn = 0;
1315 # if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
1316 static word last_back_trace_gc_no = 1; /* Skip first one. */
1317 # endif
1318 DCL_LOCK_STATE;
1319
1320 # if defined(THREADS) && !defined(KEEP_BACK_PTRS) \
1321 && !defined(MAKE_BACK_GRAPH)
1322 /* Quick check (while unlocked) for an empty finalization queue. */
1323 if (!GC_should_invoke_finalizers())
1324 return;
1325 # endif
1326 LOCK();
1327
1328 /* This is a convenient place to generate backtraces if appropriate, */
1329 /* since that code is not callable with the allocation lock. */
1330 # if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
1331 if (GC_gc_no > last_back_trace_gc_no) {
1332 # ifdef KEEP_BACK_PTRS
1333 long i;
1334 /* Stops when GC_gc_no wraps; that's OK. */
1335 last_back_trace_gc_no = GC_WORD_MAX; /* disable others. */
1336 for (i = 0; i < GC_backtraces; ++i) {
1337 /* FIXME: This tolerates concurrent heap mutation, */
1338 /* which may cause occasional mysterious results. */
1339 /* We need to release the GC lock, since GC_print_callers */
1340 /* acquires it. It probably shouldn't. */
1341 UNLOCK();
1342 GC_generate_random_backtrace_no_gc();
1343 LOCK();
1344 }
1345 last_back_trace_gc_no = GC_gc_no;
1346 # endif
1347 # ifdef MAKE_BACK_GRAPH
1348 if (GC_print_back_height) {
1349 GC_print_back_graph_stats();
1350 }
1351 # endif
1352 }
1353 # endif
1354 if (NULL == GC_fnlz_roots.finalize_now) {
1355 UNLOCK();
1356 return;
1357 }
1358
1359 if (!GC_finalize_on_demand) {
1360 unsigned char *pnested = GC_check_finalizer_nested();
1361 UNLOCK();
1362 /* Skip GC_invoke_finalizers() if nested */
1363 if (pnested != NULL) {
1364 (void) GC_invoke_finalizers();
1365 *pnested = 0; /* Reset since no more finalizers. */
1366 # ifndef THREADS
1367 GC_ASSERT(NULL == GC_fnlz_roots.finalize_now);
1368 # endif /* Otherwise GC can run concurrently and add more */
1369 }
1370 return;
1371 }
1372
1373 /* These variables require synchronization to avoid data races. */
1374 if (last_finalizer_notification != GC_gc_no) {
1375 last_finalizer_notification = GC_gc_no;
1376 notifier_fn = GC_finalizer_notifier;
1377 }
1378 UNLOCK();
1379 if (notifier_fn != 0)
1380 (*notifier_fn)(); /* Invoke the notifier */
1381 }
1382
1383 #ifndef SMALL_CONFIG
1384 # ifndef GC_LONG_REFS_NOT_NEEDED
1385 # define IF_LONG_REFS_PRESENT_ELSE(x,y) (x)
1386 # else
1387 # define IF_LONG_REFS_PRESENT_ELSE(x,y) (y)
1388 # endif
1389
GC_print_finalization_stats(void)1390 GC_INNER void GC_print_finalization_stats(void)
1391 {
1392 struct finalizable_object *fo;
1393 unsigned long ready = 0;
1394
1395 GC_log_printf("%lu finalization entries;"
1396 " %lu/%lu short/long disappearing links alive\n",
1397 (unsigned long)GC_fo_entries,
1398 (unsigned long)GC_dl_hashtbl.entries,
1399 (unsigned long)IF_LONG_REFS_PRESENT_ELSE(
1400 GC_ll_hashtbl.entries, 0));
1401
1402 for (fo = GC_fnlz_roots.finalize_now; fo != NULL; fo = fo_next(fo))
1403 ++ready;
1404 GC_log_printf("%lu finalization-ready objects;"
1405 " %ld/%ld short/long links cleared\n",
1406 ready,
1407 (long)GC_old_dl_entries - (long)GC_dl_hashtbl.entries,
1408 (long)IF_LONG_REFS_PRESENT_ELSE(
1409 GC_old_ll_entries - GC_ll_hashtbl.entries, 0));
1410 }
1411 #endif /* !SMALL_CONFIG */
1412
1413 #endif /* !GC_NO_FINALIZATION */
1414