1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
6 *
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 *
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
15 */
16
17 #include "private/gc_priv.h"
18
19 #ifdef ENABLE_DISCLAIM
20 # include "gc_disclaim.h"
21 #endif
22
23 #include <stdio.h>
24
25 GC_INNER signed_word GC_bytes_found = 0;
26 /* Number of bytes of memory reclaimed */
27 /* minus the number of bytes originally */
28 /* on free lists which we had to drop. */
29
30 #if defined(PARALLEL_MARK)
31 GC_INNER signed_word GC_fl_builder_count = 0;
32 /* Number of threads currently building free lists without */
33 /* holding GC lock. It is not safe to collect if this is */
34 /* nonzero. Also, together with the mark lock, it is used as */
35 /* a semaphore during marker threads startup. */
36 #endif /* PARALLEL_MARK */
37
38 /* We defer printing of leaked objects until we're done with the GC */
39 /* cycle, since the routine for printing objects needs to run outside */
40 /* the collector, e.g. without the allocation lock. */
41 #ifndef MAX_LEAKED
42 # define MAX_LEAKED 40
43 #endif
44 STATIC ptr_t GC_leaked[MAX_LEAKED] = { NULL };
45 STATIC unsigned GC_n_leaked = 0;
46
47 GC_INNER GC_bool GC_have_errors = FALSE;
48
49 #if !defined(EAGER_SWEEP) && defined(ENABLE_DISCLAIM)
50 STATIC void GC_reclaim_unconditionally_marked(void);
51 #endif
52
GC_add_leaked(ptr_t leaked)53 GC_INLINE void GC_add_leaked(ptr_t leaked)
54 {
55 # ifndef SHORT_DBG_HDRS
56 if (GC_findleak_delay_free && !GC_check_leaked(leaked))
57 return;
58 # endif
59
60 GC_have_errors = TRUE;
61 if (GC_n_leaked < MAX_LEAKED) {
62 GC_leaked[GC_n_leaked++] = leaked;
63 /* Make sure it's not reclaimed this cycle */
64 GC_set_mark_bit(leaked);
65 }
66 }
67
68 /* Print all objects on the list after printing any smashed objects. */
69 /* Clear both lists. Called without the allocation lock held. */
GC_print_all_errors(void)70 GC_INNER void GC_print_all_errors(void)
71 {
72 static GC_bool printing_errors = FALSE;
73 GC_bool have_errors;
74 unsigned i, n_leaked;
75 ptr_t leaked[MAX_LEAKED];
76 DCL_LOCK_STATE;
77
78 LOCK();
79 if (printing_errors) {
80 UNLOCK();
81 return;
82 }
83 have_errors = GC_have_errors;
84 printing_errors = TRUE;
85 n_leaked = GC_n_leaked;
86 if (n_leaked > 0) {
87 GC_ASSERT(n_leaked <= MAX_LEAKED);
88 BCOPY(GC_leaked, leaked, n_leaked * sizeof(ptr_t));
89 GC_n_leaked = 0;
90 BZERO(GC_leaked, n_leaked * sizeof(ptr_t));
91 }
92 UNLOCK();
93
94 if (GC_debugging_started) {
95 GC_print_all_smashed();
96 } else {
97 have_errors = FALSE;
98 }
99
100 if (n_leaked > 0) {
101 GC_err_printf("Found %u leaked objects:\n", n_leaked);
102 have_errors = TRUE;
103 }
104 for (i = 0; i < n_leaked; i++) {
105 ptr_t p = leaked[i];
106 # ifndef SKIP_LEAKED_OBJECTS_PRINTING
107 GC_print_heap_obj(p);
108 # endif
109 GC_free(p);
110 }
111
112 if (have_errors
113 # ifndef GC_ABORT_ON_LEAK
114 && GETENV("GC_ABORT_ON_LEAK") != NULL
115 # endif
116 ) {
117 ABORT("Leaked or smashed objects encountered");
118 }
119
120 LOCK();
121 printing_errors = FALSE;
122 UNLOCK();
123 }
124
125
126 /*
127 * reclaim phase
128 *
129 */
130
131 /* Test whether a block is completely empty, i.e. contains no marked */
132 /* objects. This does not require the block to be in physical memory. */
GC_block_empty(hdr * hhdr)133 GC_INNER GC_bool GC_block_empty(hdr *hhdr)
134 {
135 return (hhdr -> hb_n_marks == 0);
136 }
137
GC_block_nearly_full(hdr * hhdr,word sz)138 STATIC GC_bool GC_block_nearly_full(hdr *hhdr, word sz)
139 {
140 return hhdr -> hb_n_marks > HBLK_OBJS(sz) * 7 / 8;
141 }
142
143 /* TODO: This should perhaps again be specialized for USE_MARK_BYTES */
144 /* and USE_MARK_BITS cases. */
145
146 /*
147 * Restore unmarked small objects in h of size sz to the object
148 * free list. Returns the new list.
149 * Clears unmarked objects. Sz is in bytes.
150 */
GC_reclaim_clear(struct hblk * hbp,hdr * hhdr,word sz,ptr_t list,signed_word * count)151 STATIC ptr_t GC_reclaim_clear(struct hblk *hbp, hdr *hhdr, word sz,
152 ptr_t list, signed_word *count)
153 {
154 word bit_no = 0;
155 word *p, *q, *plim;
156 signed_word n_bytes_found = 0;
157
158 GC_ASSERT(hhdr == GC_find_header((ptr_t)hbp));
159 # ifndef THREADS
160 GC_ASSERT(sz == hhdr -> hb_sz);
161 # else
162 /* Skip the assertion because of a potential race with GC_realloc. */
163 # endif
164 GC_ASSERT((sz & (BYTES_PER_WORD-1)) == 0);
165 p = (word *)(hbp->hb_body);
166 plim = (word *)(hbp->hb_body + HBLKSIZE - sz);
167
168 /* go through all words in block */
169 while ((word)p <= (word)plim) {
170 if (mark_bit_from_hdr(hhdr, bit_no)) {
171 p = (word *)((ptr_t)p + sz);
172 } else {
173 n_bytes_found += sz;
174 /* object is available - put on list */
175 obj_link(p) = list;
176 list = ((ptr_t)p);
177 /* Clear object, advance p to next object in the process */
178 q = (word *)((ptr_t)p + sz);
179 # ifdef USE_MARK_BYTES
180 GC_ASSERT(!(sz & 1)
181 && !((word)p & (2 * sizeof(word) - 1)));
182 p[1] = 0;
183 p += 2;
184 while ((word)p < (word)q) {
185 CLEAR_DOUBLE(p);
186 p += 2;
187 }
188 # else
189 p++; /* Skip link field */
190 while ((word)p < (word)q) {
191 *p++ = 0;
192 }
193 # endif
194 }
195 bit_no += MARK_BIT_OFFSET(sz);
196 }
197 *count += n_bytes_found;
198 return(list);
199 }
200
201 /* The same thing, but don't clear objects: */
GC_reclaim_uninit(struct hblk * hbp,hdr * hhdr,word sz,ptr_t list,signed_word * count)202 STATIC ptr_t GC_reclaim_uninit(struct hblk *hbp, hdr *hhdr, word sz,
203 ptr_t list, signed_word *count)
204 {
205 word bit_no = 0;
206 word *p, *plim;
207 signed_word n_bytes_found = 0;
208
209 # ifndef THREADS
210 GC_ASSERT(sz == hhdr -> hb_sz);
211 # endif
212 p = (word *)(hbp->hb_body);
213 plim = (word *)((ptr_t)hbp + HBLKSIZE - sz);
214
215 /* go through all words in block */
216 while ((word)p <= (word)plim) {
217 if (!mark_bit_from_hdr(hhdr, bit_no)) {
218 n_bytes_found += sz;
219 /* object is available - put on list */
220 obj_link(p) = list;
221 list = ((ptr_t)p);
222 }
223 p = (word *)((ptr_t)p + sz);
224 bit_no += MARK_BIT_OFFSET(sz);
225 }
226 *count += n_bytes_found;
227 return(list);
228 }
229
230 #ifdef ENABLE_DISCLAIM
231 /* Call reclaim notifier for block's kind on each unmarked object in */
232 /* block, all within a pair of corresponding enter/leave callbacks. */
GC_disclaim_and_reclaim(struct hblk * hbp,hdr * hhdr,word sz,ptr_t list,signed_word * count)233 STATIC ptr_t GC_disclaim_and_reclaim(struct hblk *hbp, hdr *hhdr, word sz,
234 ptr_t list, signed_word *count)
235 {
236 word bit_no = 0;
237 word *p, *q, *plim;
238 signed_word n_bytes_found = 0;
239 struct obj_kind *ok = &GC_obj_kinds[hhdr->hb_obj_kind];
240 int (GC_CALLBACK *disclaim)(void *) = ok->ok_disclaim_proc;
241
242 # ifndef THREADS
243 GC_ASSERT(sz == hhdr -> hb_sz);
244 # endif
245 p = (word *)(hbp -> hb_body);
246 plim = (word *)((ptr_t)p + HBLKSIZE - sz);
247
248 while ((word)p <= (word)plim) {
249 int marked = mark_bit_from_hdr(hhdr, bit_no);
250 if (!marked && (*disclaim)(p)) {
251 set_mark_bit_from_hdr(hhdr, bit_no);
252 hhdr -> hb_n_marks++;
253 marked = 1;
254 }
255 if (marked)
256 p = (word *)((ptr_t)p + sz);
257 else {
258 n_bytes_found += sz;
259 /* object is available - put on list */
260 obj_link(p) = list;
261 list = ((ptr_t)p);
262 /* Clear object, advance p to next object in the process */
263 q = (word *)((ptr_t)p + sz);
264 # ifdef USE_MARK_BYTES
265 GC_ASSERT((sz & 1) == 0);
266 GC_ASSERT(((word)p & (2 * sizeof(word) - 1)) == 0);
267 p[1] = 0;
268 p += 2;
269 while ((word)p < (word)q) {
270 CLEAR_DOUBLE(p);
271 p += 2;
272 }
273 # else
274 p++; /* Skip link field */
275 while ((word)p < (word)q) {
276 *p++ = 0;
277 }
278 # endif
279 }
280 bit_no += MARK_BIT_OFFSET(sz);
281 }
282 *count += n_bytes_found;
283 return list;
284 }
285 #endif /* ENABLE_DISCLAIM */
286
287 /* Don't really reclaim objects, just check for unmarked ones: */
GC_reclaim_check(struct hblk * hbp,hdr * hhdr,word sz)288 STATIC void GC_reclaim_check(struct hblk *hbp, hdr *hhdr, word sz)
289 {
290 word bit_no;
291 ptr_t p, plim;
292
293 # ifndef THREADS
294 GC_ASSERT(sz == hhdr -> hb_sz);
295 # endif
296 /* go through all words in block */
297 p = hbp->hb_body;
298 plim = p + HBLKSIZE - sz;
299 for (bit_no = 0; (word)p <= (word)plim;
300 p += sz, bit_no += MARK_BIT_OFFSET(sz)) {
301 if (!mark_bit_from_hdr(hhdr, bit_no)) {
302 GC_add_leaked(p);
303 }
304 }
305 }
306
307 /* Is a pointer-free block? Same as IS_PTRFREE macro (in os_dep.c) but */
308 /* uses unordered atomic access to avoid racing with GC_realloc. */
309 #ifdef AO_HAVE_load
310 # define IS_PTRFREE_SAFE(hhdr) \
311 (AO_load((volatile AO_t *)&(hhdr)->hb_descr) == 0)
312 #else
313 /* No race as GC_realloc holds the lock while updating hb_descr. */
314 # define IS_PTRFREE_SAFE(hhdr) ((hhdr)->hb_descr == 0)
315 #endif
316
317 /*
318 * Generic procedure to rebuild a free list in hbp.
319 * Also called directly from GC_malloc_many.
320 * Sz is now in bytes.
321 */
GC_reclaim_generic(struct hblk * hbp,hdr * hhdr,size_t sz,GC_bool init,ptr_t list,signed_word * count)322 GC_INNER ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
323 GC_bool init, ptr_t list,
324 signed_word *count)
325 {
326 ptr_t result;
327
328 GC_ASSERT(GC_find_header((ptr_t)hbp) == hhdr);
329 # ifndef GC_DISABLE_INCREMENTAL
330 GC_remove_protection(hbp, 1, IS_PTRFREE_SAFE(hhdr));
331 # endif
332 # ifdef ENABLE_DISCLAIM
333 if ((hhdr -> hb_flags & HAS_DISCLAIM) != 0) {
334 result = GC_disclaim_and_reclaim(hbp, hhdr, sz, list, count);
335 } else
336 # endif
337 /* else */ if (init || GC_debugging_started) {
338 result = GC_reclaim_clear(hbp, hhdr, sz, list, count);
339 } else {
340 GC_ASSERT(IS_PTRFREE_SAFE(hhdr));
341 result = GC_reclaim_uninit(hbp, hhdr, sz, list, count);
342 }
343 if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) GC_set_hdr_marks(hhdr);
344 return result;
345 }
346
347 /*
348 * Restore unmarked small objects in the block pointed to by hbp
349 * to the appropriate object free list.
350 * If entirely empty blocks are to be completely deallocated, then
351 * caller should perform that check.
352 */
GC_reclaim_small_nonempty_block(struct hblk * hbp,word sz,GC_bool report_if_found)353 STATIC void GC_reclaim_small_nonempty_block(struct hblk *hbp, word sz,
354 GC_bool report_if_found)
355 {
356 hdr *hhdr = HDR(hbp);
357 struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
358 void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]);
359
360 hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
361
362 if (report_if_found) {
363 GC_reclaim_check(hbp, hhdr, sz);
364 } else {
365 *flh = GC_reclaim_generic(hbp, hhdr, sz, ok -> ok_init,
366 (ptr_t)(*flh), &GC_bytes_found);
367 }
368 }
369
370 #ifdef ENABLE_DISCLAIM
GC_disclaim_and_reclaim_or_free_small_block(struct hblk * hbp)371 STATIC void GC_disclaim_and_reclaim_or_free_small_block(struct hblk *hbp)
372 {
373 hdr *hhdr = HDR(hbp);
374 word sz = hhdr -> hb_sz;
375 struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
376 void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]);
377 void *flh_next;
378
379 hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
380 flh_next = GC_reclaim_generic(hbp, hhdr, sz, ok -> ok_init,
381 (ptr_t)(*flh), &GC_bytes_found);
382 if (hhdr -> hb_n_marks)
383 *flh = flh_next;
384 else {
385 GC_bytes_found += HBLKSIZE;
386 GC_freehblk(hbp);
387 }
388 }
389 #endif /* ENABLE_DISCLAIM */
390
391 /*
392 * Restore an unmarked large object or an entirely empty blocks of small objects
393 * to the heap block free list.
394 * Otherwise enqueue the block for later processing
395 * by GC_reclaim_small_nonempty_block.
396 * If report_if_found is TRUE, then process any block immediately, and
397 * simply report free objects; do not actually reclaim them.
398 */
GC_reclaim_block(struct hblk * hbp,word report_if_found)399 STATIC void GC_reclaim_block(struct hblk *hbp, word report_if_found)
400 {
401 hdr * hhdr = HDR(hbp);
402 word sz; /* size of objects in current block */
403 struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
404
405 # ifdef AO_HAVE_load
406 /* Atomic access is used to avoid racing with GC_realloc. */
407 sz = (word)AO_load((volatile AO_t *)&hhdr->hb_sz);
408 # else
409 /* No race as GC_realloc holds the lock while updating hb_sz. */
410 sz = hhdr -> hb_sz;
411 # endif
412 if( sz > MAXOBJBYTES ) { /* 1 big object */
413 if( !mark_bit_from_hdr(hhdr, 0) ) {
414 if (report_if_found) {
415 GC_add_leaked((ptr_t)hbp);
416 } else {
417 word blocks;
418
419 # ifdef ENABLE_DISCLAIM
420 if (EXPECT(hhdr->hb_flags & HAS_DISCLAIM, 0)) {
421 if ((*ok->ok_disclaim_proc)(hbp)) {
422 /* Not disclaimed => resurrect the object. */
423 set_mark_bit_from_hdr(hhdr, 0);
424 goto in_use;
425 }
426 }
427 # endif
428 blocks = OBJ_SZ_TO_BLOCKS(sz);
429 if (blocks > 1) {
430 GC_large_allocd_bytes -= blocks * HBLKSIZE;
431 }
432 GC_bytes_found += sz;
433 GC_freehblk(hbp);
434 }
435 } else {
436 # ifdef ENABLE_DISCLAIM
437 in_use:
438 # endif
439 if (IS_PTRFREE_SAFE(hhdr)) {
440 GC_atomic_in_use += sz;
441 } else {
442 GC_composite_in_use += sz;
443 }
444 }
445 } else {
446 GC_bool empty = GC_block_empty(hhdr);
447 # ifdef PARALLEL_MARK
448 /* Count can be low or one too high because we sometimes */
449 /* have to ignore decrements. Objects can also potentially */
450 /* be repeatedly marked by each marker. */
451 /* Here we assume two markers, but this is extremely */
452 /* unlikely to fail spuriously with more. And if it does, it */
453 /* should be looked at. */
454 GC_ASSERT(hhdr -> hb_n_marks <= 2 * (HBLKSIZE/sz + 1) + 16);
455 # else
456 GC_ASSERT(sz * hhdr -> hb_n_marks <= HBLKSIZE);
457 # endif
458 if (report_if_found) {
459 GC_reclaim_small_nonempty_block(hbp, sz,
460 TRUE /* report_if_found */);
461 } else if (empty) {
462 # ifdef ENABLE_DISCLAIM
463 if ((hhdr -> hb_flags & HAS_DISCLAIM) != 0) {
464 GC_disclaim_and_reclaim_or_free_small_block(hbp);
465 } else
466 # endif
467 /* else */ {
468 GC_bytes_found += HBLKSIZE;
469 GC_freehblk(hbp);
470 }
471 } else if (GC_find_leak || !GC_block_nearly_full(hhdr, sz)) {
472 /* group of smaller objects, enqueue the real work */
473 struct hblk **rlh = ok -> ok_reclaim_list;
474
475 if (rlh != NULL) {
476 rlh += BYTES_TO_GRANULES(sz);
477 hhdr -> hb_next = *rlh;
478 *rlh = hbp;
479 }
480 } /* else not worth salvaging. */
481 /* We used to do the nearly_full check later, but we */
482 /* already have the right cache context here. Also */
483 /* doing it here avoids some silly lock contention in */
484 /* GC_malloc_many. */
485 if (IS_PTRFREE_SAFE(hhdr)) {
486 GC_atomic_in_use += sz * hhdr -> hb_n_marks;
487 } else {
488 GC_composite_in_use += sz * hhdr -> hb_n_marks;
489 }
490 }
491 }
492
493 #if !defined(NO_DEBUGGING)
494 /* Routines to gather and print heap block info */
495 /* intended for debugging. Otherwise should be called */
496 /* with lock. */
497
498 struct Print_stats
499 {
500 size_t number_of_blocks;
501 size_t total_bytes;
502 };
503
504 #ifdef USE_MARK_BYTES
505
506 /* Return the number of set mark bits in the given header. */
507 /* Remains externally visible as used by GNU GCJ currently. */
GC_n_set_marks(hdr * hhdr)508 unsigned GC_n_set_marks(hdr *hhdr)
509 {
510 unsigned result = 0;
511 word i;
512 word sz = hhdr -> hb_sz;
513 word offset = MARK_BIT_OFFSET(sz);
514 word limit = FINAL_MARK_BIT(sz);
515
516 for (i = 0; i < limit; i += offset) {
517 result += hhdr -> hb_marks[i];
518 }
519 GC_ASSERT(hhdr -> hb_marks[limit]);
520 return(result);
521 }
522
523 #else
524
525 /* Number of set bits in a word. Not performance critical. */
set_bits(word n)526 static unsigned set_bits(word n)
527 {
528 word m = n;
529 unsigned result = 0;
530
531 while (m > 0) {
532 if (m & 1) result++;
533 m >>= 1;
534 }
535 return(result);
536 }
537
GC_n_set_marks(hdr * hhdr)538 unsigned GC_n_set_marks(hdr *hhdr)
539 {
540 unsigned result = 0;
541 word i;
542 word n_mark_words;
543 # ifdef MARK_BIT_PER_OBJ
544 word n_objs = HBLK_OBJS(hhdr -> hb_sz);
545
546 if (0 == n_objs) n_objs = 1;
547 n_mark_words = divWORDSZ(n_objs + WORDSZ - 1);
548 # else /* MARK_BIT_PER_GRANULE */
549 n_mark_words = MARK_BITS_SZ;
550 # endif
551 for (i = 0; i < n_mark_words - 1; i++) {
552 result += set_bits(hhdr -> hb_marks[i]);
553 }
554 # ifdef MARK_BIT_PER_OBJ
555 result += set_bits((hhdr -> hb_marks[n_mark_words - 1])
556 << (n_mark_words * WORDSZ - n_objs));
557 # else
558 result += set_bits(hhdr -> hb_marks[n_mark_words - 1]);
559 # endif
560 return result; /* the number of set bits excluding the one past the end */
561 }
562
563 #endif /* !USE_MARK_BYTES */
564
GC_print_block_descr(struct hblk * h,word raw_ps)565 STATIC void GC_print_block_descr(struct hblk *h,
566 word /* struct PrintStats */ raw_ps)
567 {
568 hdr * hhdr = HDR(h);
569 size_t bytes = hhdr -> hb_sz;
570 struct Print_stats *ps;
571 unsigned n_marks = GC_n_set_marks(hhdr);
572 unsigned n_objs = (unsigned)HBLK_OBJS(bytes);
573
574 if (0 == n_objs) n_objs = 1;
575 if (hhdr -> hb_n_marks != n_marks) {
576 GC_printf("%u,%u,%u!=%u,%u\n", hhdr->hb_obj_kind, (unsigned)bytes,
577 (unsigned)hhdr->hb_n_marks, n_marks, n_objs);
578 } else {
579 GC_printf("%u,%u,%u,%u\n", hhdr->hb_obj_kind, (unsigned)bytes,
580 n_marks, n_objs);
581 }
582
583 ps = (struct Print_stats *)raw_ps;
584 ps->total_bytes += (bytes + (HBLKSIZE-1)) & ~(HBLKSIZE-1); /* round up */
585 ps->number_of_blocks++;
586 }
587
GC_print_block_list(void)588 void GC_print_block_list(void)
589 {
590 struct Print_stats pstats;
591
592 GC_printf("kind(0=ptrfree,1=normal,2=unc.),"
593 "size_in_bytes,#_marks_set,#objs\n");
594 pstats.number_of_blocks = 0;
595 pstats.total_bytes = 0;
596 GC_apply_to_all_blocks(GC_print_block_descr, (word)&pstats);
597 GC_printf("blocks= %lu, bytes= %lu\n",
598 (unsigned long)pstats.number_of_blocks,
599 (unsigned long)pstats.total_bytes);
600 }
601
602 #include "gc_inline.h" /* for GC_print_free_list prototype */
603
604 /* Currently for debugger use only: */
GC_print_free_list(int kind,size_t sz_in_granules)605 GC_API void GC_CALL GC_print_free_list(int kind, size_t sz_in_granules)
606 {
607 void *flh_next;
608 int n;
609
610 GC_ASSERT(kind < MAXOBJKINDS);
611 GC_ASSERT(sz_in_granules <= MAXOBJGRANULES);
612 flh_next = GC_obj_kinds[kind].ok_freelist[sz_in_granules];
613 for (n = 0; flh_next; n++) {
614 GC_printf("Free object in heap block %p [%d]: %p\n",
615 (void *)HBLKPTR(flh_next), n, flh_next);
616 flh_next = obj_link(flh_next);
617 }
618 }
619
620 #endif /* !NO_DEBUGGING */
621
622 /*
623 * Clear all obj_link pointers in the list of free objects *flp.
624 * Clear *flp.
625 * This must be done before dropping a list of free gcj-style objects,
626 * since may otherwise end up with dangling "descriptor" pointers.
627 * It may help for other pointer-containing objects.
628 */
GC_clear_fl_links(void ** flp)629 STATIC void GC_clear_fl_links(void **flp)
630 {
631 void *next = *flp;
632
633 while (0 != next) {
634 *flp = 0;
635 flp = &(obj_link(next));
636 next = *flp;
637 }
638 }
639
640 /*
641 * Perform GC_reclaim_block on the entire heap, after first clearing
642 * small object free lists (if we are not just looking for leaks).
643 */
GC_start_reclaim(GC_bool report_if_found)644 GC_INNER void GC_start_reclaim(GC_bool report_if_found)
645 {
646 unsigned kind;
647
648 # if defined(PARALLEL_MARK)
649 GC_ASSERT(0 == GC_fl_builder_count);
650 # endif
651 /* Reset in use counters. GC_reclaim_block recomputes them. */
652 GC_composite_in_use = 0;
653 GC_atomic_in_use = 0;
654 /* Clear reclaim- and free-lists */
655 for (kind = 0; kind < GC_n_kinds; kind++) {
656 struct hblk ** rlist = GC_obj_kinds[kind].ok_reclaim_list;
657 GC_bool should_clobber = (GC_obj_kinds[kind].ok_descriptor != 0);
658
659 if (rlist == 0) continue; /* This kind not used. */
660 if (!report_if_found) {
661 void **fop;
662 void **lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJGRANULES+1]);
663
664 for (fop = GC_obj_kinds[kind].ok_freelist;
665 (word)fop < (word)lim; (*(word **)&fop)++) {
666 if (*fop != 0) {
667 if (should_clobber) {
668 GC_clear_fl_links(fop);
669 } else {
670 *fop = 0;
671 }
672 }
673 }
674 } /* otherwise free list objects are marked, */
675 /* and its safe to leave them */
676 BZERO(rlist, (MAXOBJGRANULES + 1) * sizeof(void *));
677 }
678
679
680 /* Go through all heap blocks (in hblklist) and reclaim unmarked objects */
681 /* or enqueue the block for later processing. */
682 GC_apply_to_all_blocks(GC_reclaim_block, (word)report_if_found);
683
684 # ifdef EAGER_SWEEP
685 /* This is a very stupid thing to do. We make it possible anyway, */
686 /* so that you can convince yourself that it really is very stupid. */
687 GC_reclaim_all((GC_stop_func)0, FALSE);
688 # elif defined(ENABLE_DISCLAIM)
689 /* However, make sure to clear reclaimable objects of kinds with */
690 /* unconditional marking enabled before we do any significant */
691 /* marking work. */
692 GC_reclaim_unconditionally_marked();
693 # endif
694 # if defined(PARALLEL_MARK)
695 GC_ASSERT(0 == GC_fl_builder_count);
696 # endif
697
698 }
699
700 /*
701 * Sweep blocks of the indicated object size and kind until either the
702 * appropriate free list is nonempty, or there are no more blocks to
703 * sweep.
704 */
GC_continue_reclaim(word sz,int kind)705 GC_INNER void GC_continue_reclaim(word sz /* granules */, int kind)
706 {
707 hdr * hhdr;
708 struct hblk * hbp;
709 struct obj_kind * ok = &(GC_obj_kinds[kind]);
710 struct hblk ** rlh = ok -> ok_reclaim_list;
711 void **flh = &(ok -> ok_freelist[sz]);
712
713 if (rlh == 0) return; /* No blocks of this kind. */
714 rlh += sz;
715 while ((hbp = *rlh) != 0) {
716 hhdr = HDR(hbp);
717 *rlh = hhdr -> hb_next;
718 GC_reclaim_small_nonempty_block(hbp, hhdr -> hb_sz, FALSE);
719 if (*flh != 0)
720 break;
721 }
722 }
723
724 /*
725 * Reclaim all small blocks waiting to be reclaimed.
726 * Abort and return FALSE when/if (*stop_func)() returns TRUE.
727 * If this returns TRUE, then it's safe to restart the world
728 * with incorrectly cleared mark bits.
729 * If ignore_old is TRUE, then reclaim only blocks that have been
730 * recently reclaimed, and discard the rest.
731 * Stop_func may be 0.
732 */
GC_reclaim_all(GC_stop_func stop_func,GC_bool ignore_old)733 GC_INNER GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old)
734 {
735 word sz;
736 unsigned kind;
737 hdr * hhdr;
738 struct hblk * hbp;
739 struct obj_kind * ok;
740 struct hblk ** rlp;
741 struct hblk ** rlh;
742 # ifndef NO_CLOCK
743 CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
744
745 if (GC_print_stats == VERBOSE)
746 GET_TIME(start_time);
747 # endif
748
749 for (kind = 0; kind < GC_n_kinds; kind++) {
750 ok = &(GC_obj_kinds[kind]);
751 rlp = ok -> ok_reclaim_list;
752 if (rlp == 0) continue;
753 for (sz = 1; sz <= MAXOBJGRANULES; sz++) {
754 rlh = rlp + sz;
755 while ((hbp = *rlh) != 0) {
756 if (stop_func != (GC_stop_func)0 && (*stop_func)()) {
757 return(FALSE);
758 }
759 hhdr = HDR(hbp);
760 *rlh = hhdr -> hb_next;
761 if (!ignore_old
762 || (word)hhdr->hb_last_reclaimed == GC_gc_no - 1) {
763 /* It's likely we'll need it this time, too */
764 /* It's been touched recently, so this */
765 /* shouldn't trigger paging. */
766 GC_reclaim_small_nonempty_block(hbp, hhdr->hb_sz, FALSE);
767 }
768 }
769 }
770 }
771 # ifndef NO_CLOCK
772 if (GC_print_stats == VERBOSE) {
773 CLOCK_TYPE done_time;
774
775 GET_TIME(done_time);
776 GC_verbose_log_printf("Disposing of reclaim lists took %lu msecs\n",
777 MS_TIME_DIFF(done_time,start_time));
778 }
779 # endif
780 return(TRUE);
781 }
782
783 #if !defined(EAGER_SWEEP) && defined(ENABLE_DISCLAIM)
784 /* We do an eager sweep on heap blocks where unconditional marking has */
785 /* been enabled, so that any reclaimable objects have been reclaimed */
786 /* before we start marking. This is a simplified GC_reclaim_all */
787 /* restricted to kinds where ok_mark_unconditionally is true. */
GC_reclaim_unconditionally_marked(void)788 STATIC void GC_reclaim_unconditionally_marked(void)
789 {
790 word sz;
791 unsigned kind;
792 hdr * hhdr;
793 struct hblk * hbp;
794 struct obj_kind * ok;
795 struct hblk ** rlp;
796 struct hblk ** rlh;
797
798 for (kind = 0; kind < GC_n_kinds; kind++) {
799 ok = &(GC_obj_kinds[kind]);
800 if (!ok->ok_mark_unconditionally)
801 continue;
802 rlp = ok->ok_reclaim_list;
803 if (rlp == 0)
804 continue;
805 for (sz = 1; sz <= MAXOBJGRANULES; sz++) {
806 rlh = rlp + sz;
807 while ((hbp = *rlh) != 0) {
808 hhdr = HDR(hbp);
809 *rlh = hhdr->hb_next;
810 GC_reclaim_small_nonempty_block(hbp, hhdr->hb_sz, FALSE);
811 }
812 }
813 }
814 }
815 #endif /* !EAGER_SWEEP && ENABLE_DISCLAIM */
816
817 struct enumerate_reachable_s {
818 GC_reachable_object_proc proc;
819 void *client_data;
820 };
821
GC_do_enumerate_reachable_objects(struct hblk * hbp,word ped)822 STATIC void GC_do_enumerate_reachable_objects(struct hblk *hbp, word ped)
823 {
824 struct hblkhdr *hhdr = HDR(hbp);
825 size_t sz = (size_t)hhdr->hb_sz;
826 size_t bit_no;
827 char *p, *plim;
828
829 if (GC_block_empty(hhdr)) {
830 return;
831 }
832
833 p = hbp->hb_body;
834 if (sz > MAXOBJBYTES) { /* one big object */
835 plim = p;
836 } else {
837 plim = hbp->hb_body + HBLKSIZE - sz;
838 }
839 /* Go through all words in block. */
840 for (bit_no = 0; p <= plim; bit_no += MARK_BIT_OFFSET(sz), p += sz) {
841 if (mark_bit_from_hdr(hhdr, bit_no)) {
842 ((struct enumerate_reachable_s *)ped)->proc(p, sz,
843 ((struct enumerate_reachable_s *)ped)->client_data);
844 }
845 }
846 }
847
GC_enumerate_reachable_objects_inner(GC_reachable_object_proc proc,void * client_data)848 GC_API void GC_CALL GC_enumerate_reachable_objects_inner(
849 GC_reachable_object_proc proc,
850 void *client_data)
851 {
852 struct enumerate_reachable_s ed;
853
854 GC_ASSERT(I_HOLD_LOCK());
855 ed.proc = proc;
856 ed.client_data = client_data;
857 GC_apply_to_all_blocks(GC_do_enumerate_reachable_objects, (word)&ed);
858 }
859