1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
6 *
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 *
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
15 */
16
17 #include <stdio.h>
18 #include "private/gc_priv.h"
19
20 signed_word GC_bytes_found = 0;
21 /* Number of bytes of memory reclaimed */
22
23 #if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
24 word GC_fl_builder_count = 0;
25 /* Number of threads currently building free lists without */
26 /* holding GC lock. It is not safe to collect if this is */
27 /* nonzero. */
28 #endif /* PARALLEL_MARK */
29
30 /* We defer printing of leaked objects until we're done with the GC */
31 /* cycle, since the routine for printing objects needs to run outside */
32 /* the collector, e.g. without the allocation lock. */
33 #define MAX_LEAKED 40
34 ptr_t GC_leaked[MAX_LEAKED];
35 unsigned GC_n_leaked = 0;
36
37 GC_bool GC_have_errors = FALSE;
38
GC_add_leaked(ptr_t leaked)39 void GC_add_leaked(ptr_t leaked)
40 {
41 if (GC_n_leaked < MAX_LEAKED) {
42 GC_have_errors = TRUE;
43 GC_leaked[GC_n_leaked++] = leaked;
44 /* Make sure it's not reclaimed this cycle */
45 GC_set_mark_bit(leaked);
46 }
47 }
48
49 static GC_bool printing_errors = FALSE;
50 /* Print all objects on the list after printing any smashed objs. */
51 /* Clear both lists. */
GC_print_all_errors()52 void GC_print_all_errors ()
53 {
54 unsigned i;
55
56 LOCK();
57 if (printing_errors) {
58 UNLOCK();
59 return;
60 }
61 printing_errors = TRUE;
62 UNLOCK();
63 if (GC_debugging_started) GC_print_all_smashed();
64 for (i = 0; i < GC_n_leaked; ++i) {
65 ptr_t p = GC_leaked[i];
66 if (HDR(p) -> hb_obj_kind == PTRFREE) {
67 GC_err_printf("Leaked atomic object at ");
68 } else {
69 GC_err_printf("Leaked composite object at ");
70 }
71 GC_print_heap_obj(p);
72 GC_err_printf("\n");
73 GC_free(p);
74 GC_leaked[i] = 0;
75 }
76 GC_n_leaked = 0;
77 printing_errors = FALSE;
78 }
79
80
81 /*
82 * reclaim phase
83 *
84 */
85
86
87 /*
88 * Test whether a block is completely empty, i.e. contains no marked
89 * objects. This does not require the block to be in physical
90 * memory.
91 */
92
GC_block_empty(hdr * hhdr)93 GC_bool GC_block_empty(hdr *hhdr)
94 {
95 return (hhdr -> hb_n_marks == 0);
96 }
97
GC_block_nearly_full(hdr * hhdr)98 GC_bool GC_block_nearly_full(hdr *hhdr)
99 {
100 return (hhdr -> hb_n_marks > 7 * HBLK_OBJS(hhdr -> hb_sz)/8);
101 }
102
103 /* FIXME: This should perhaps again be specialized for USE_MARK_BYTES */
104 /* and USE_MARK_BITS cases. */
105
106 /*
107 * Restore unmarked small objects in h of size sz to the object
108 * free list. Returns the new list.
109 * Clears unmarked objects. Sz is in bytes.
110 */
111 /*ARGSUSED*/
GC_reclaim_clear(struct hblk * hbp,hdr * hhdr,size_t sz,ptr_t list,signed_word * count)112 ptr_t GC_reclaim_clear(struct hblk *hbp, hdr *hhdr, size_t sz,
113 ptr_t list, signed_word *count)
114 {
115 word bit_no = 0;
116 word *p, *q, *plim;
117 signed_word n_bytes_found = 0;
118
119 GC_ASSERT(hhdr == GC_find_header((ptr_t)hbp));
120 GC_ASSERT(sz == hhdr -> hb_sz);
121 GC_ASSERT((sz & (BYTES_PER_WORD-1)) == 0);
122 p = (word *)(hbp->hb_body);
123 plim = (word *)(hbp->hb_body + HBLKSIZE - sz);
124
125 /* go through all words in block */
126 while( p <= plim ) {
127 if( mark_bit_from_hdr(hhdr, bit_no) ) {
128 p = (word *)((ptr_t)p + sz);
129 } else {
130 n_bytes_found += sz;
131 /* object is available - put on list */
132 obj_link(p) = list;
133 list = ((ptr_t)p);
134 /* Clear object, advance p to next object in the process */
135 q = (word *)((ptr_t)p + sz);
136 # ifdef USE_MARK_BYTES
137 GC_ASSERT(!(sz & 1)
138 && !((word)p & (2 * sizeof(word) - 1)));
139 p[1] = 0;
140 p += 2;
141 while (p < q) {
142 CLEAR_DOUBLE(p);
143 p += 2;
144 }
145 # else
146 p++; /* Skip link field */
147 while (p < q) {
148 *p++ = 0;
149 }
150 # endif
151 }
152 bit_no += MARK_BIT_OFFSET(sz);
153 }
154 *count += n_bytes_found;
155 return(list);
156 }
157
158 /* The same thing, but don't clear objects: */
159 /*ARGSUSED*/
GC_reclaim_uninit(struct hblk * hbp,hdr * hhdr,size_t sz,ptr_t list,signed_word * count)160 ptr_t GC_reclaim_uninit(struct hblk *hbp, hdr *hhdr, size_t sz,
161 ptr_t list, signed_word *count)
162 {
163 word bit_no = 0;
164 word *p, *plim;
165 signed_word n_bytes_found = 0;
166
167 GC_ASSERT(sz == hhdr -> hb_sz);
168 p = (word *)(hbp->hb_body);
169 plim = (word *)((ptr_t)hbp + HBLKSIZE - sz);
170
171 /* go through all words in block */
172 while( p <= plim ) {
173 if( !mark_bit_from_hdr(hhdr, bit_no) ) {
174 n_bytes_found += sz;
175 /* object is available - put on list */
176 obj_link(p) = list;
177 list = ((ptr_t)p);
178 }
179 p = (word *)((ptr_t)p + sz);
180 bit_no += MARK_BIT_OFFSET(sz);
181 }
182 *count += n_bytes_found;
183 return(list);
184 }
185
186 /* Don't really reclaim objects, just check for unmarked ones: */
187 /*ARGSUSED*/
GC_reclaim_check(struct hblk * hbp,hdr * hhdr,word sz)188 void GC_reclaim_check(struct hblk *hbp, hdr *hhdr, word sz)
189 {
190 word bit_no = 0;
191 ptr_t p, plim;
192
193 GC_ASSERT(sz == hhdr -> hb_sz);
194 p = hbp->hb_body;
195 plim = p + HBLKSIZE - sz;
196
197 /* go through all words in block */
198 while( p <= plim ) {
199 if( !mark_bit_from_hdr(hhdr, bit_no) ) {
200 GC_add_leaked(p);
201 }
202 p += sz;
203 bit_no += MARK_BIT_OFFSET(sz);
204 }
205 }
206
207
208 /*
209 * Generic procedure to rebuild a free list in hbp.
210 * Also called directly from GC_malloc_many.
211 * Sz is now in bytes.
212 */
GC_reclaim_generic(struct hblk * hbp,hdr * hhdr,size_t sz,GC_bool init,ptr_t list,signed_word * count)213 ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
214 GC_bool init, ptr_t list, signed_word *count)
215 {
216 ptr_t result = list;
217
218 GC_ASSERT(GC_find_header((ptr_t)hbp) == hhdr);
219 GC_remove_protection(hbp, 1, (hhdr)->hb_descr == 0 /* Pointer-free? */);
220 if (init) {
221 result = GC_reclaim_clear(hbp, hhdr, sz, list, count);
222 } else {
223 GC_ASSERT((hhdr)->hb_descr == 0 /* Pointer-free block */);
224 result = GC_reclaim_uninit(hbp, hhdr, sz, list, count);
225 }
226 if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) GC_set_hdr_marks(hhdr);
227 return result;
228 }
229
230 /*
231 * Restore unmarked small objects in the block pointed to by hbp
232 * to the appropriate object free list.
233 * If entirely empty blocks are to be completely deallocated, then
234 * caller should perform that check.
235 */
GC_reclaim_small_nonempty_block(struct hblk * hbp,int report_if_found,signed_word * count)236 void GC_reclaim_small_nonempty_block(struct hblk *hbp,
237 int report_if_found, signed_word *count)
238 {
239 hdr *hhdr = HDR(hbp);
240 size_t sz = hhdr -> hb_sz;
241 int kind = hhdr -> hb_obj_kind;
242 struct obj_kind * ok = &GC_obj_kinds[kind];
243 void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]);
244
245 hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
246
247 if (report_if_found) {
248 GC_reclaim_check(hbp, hhdr, sz);
249 } else {
250 *flh = GC_reclaim_generic(hbp, hhdr, sz,
251 (ok -> ok_init || GC_debugging_started),
252 *flh, &GC_bytes_found);
253 }
254 }
255
256 /*
257 * Restore an unmarked large object or an entirely empty blocks of small objects
258 * to the heap block free list.
259 * Otherwise enqueue the block for later processing
260 * by GC_reclaim_small_nonempty_block.
261 * If report_if_found is TRUE, then process any block immediately, and
262 * simply report free objects; do not actually reclaim them.
263 */
GC_reclaim_block(struct hblk * hbp,word report_if_found)264 void GC_reclaim_block(struct hblk *hbp, word report_if_found)
265 {
266 hdr * hhdr = HDR(hbp);
267 size_t sz = hhdr -> hb_sz; /* size of objects in current block */
268 struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
269 struct hblk ** rlh;
270
271 if( sz > MAXOBJBYTES ) { /* 1 big object */
272 if( !mark_bit_from_hdr(hhdr, 0) ) {
273 if (report_if_found) {
274 GC_add_leaked((ptr_t)hbp);
275 } else {
276 size_t blocks = OBJ_SZ_TO_BLOCKS(sz);
277 if (blocks > 1) {
278 GC_large_allocd_bytes -= blocks * HBLKSIZE;
279 }
280 GC_bytes_found += sz;
281 GC_freehblk(hbp);
282 }
283 } else {
284 if (hhdr -> hb_descr != 0) {
285 GC_composite_in_use += sz;
286 } else {
287 GC_atomic_in_use += sz;
288 }
289 }
290 } else {
291 GC_bool empty = GC_block_empty(hhdr);
292 # ifdef PARALLEL_MARK
293 /* Count can be low or one too high because we sometimes */
294 /* have to ignore decrements. Objects can also potentially */
295 /* be repeatedly marked by each marker. */
296 /* Here we assume two markers, but this is extremely */
297 /* unlikely to fail spuriously with more. And if it does, it */
298 /* should be looked at. */
299 GC_ASSERT(hhdr -> hb_n_marks <= 2 * (HBLKSIZE/sz + 1) + 16);
300 # else
301 GC_ASSERT(sz * hhdr -> hb_n_marks <= HBLKSIZE);
302 # endif
303 if (hhdr -> hb_descr != 0) {
304 GC_composite_in_use += sz * hhdr -> hb_n_marks;
305 } else {
306 GC_atomic_in_use += sz * hhdr -> hb_n_marks;
307 }
308 if (report_if_found) {
309 GC_reclaim_small_nonempty_block(hbp, (int)report_if_found,
310 &GC_bytes_found);
311 } else if (empty) {
312 GC_bytes_found += HBLKSIZE;
313 GC_freehblk(hbp);
314 } else if (TRUE != GC_block_nearly_full(hhdr)){
315 /* group of smaller objects, enqueue the real work */
316 rlh = &(ok -> ok_reclaim_list[BYTES_TO_GRANULES(sz)]);
317 hhdr -> hb_next = *rlh;
318 *rlh = hbp;
319 } /* else not worth salvaging. */
320 /* We used to do the nearly_full check later, but we */
321 /* already have the right cache context here. Also */
322 /* doing it here avoids some silly lock contention in */
323 /* GC_malloc_many. */
324 }
325 }
326
327 #if !defined(NO_DEBUGGING)
328 /* Routines to gather and print heap block info */
329 /* intended for debugging. Otherwise should be called */
330 /* with lock. */
331
332 struct Print_stats
333 {
334 size_t number_of_blocks;
335 size_t total_bytes;
336 };
337
338 #ifdef USE_MARK_BYTES
339
340 /* Return the number of set mark bits in the given header */
GC_n_set_marks(hdr * hhdr)341 int GC_n_set_marks(hdr *hhdr)
342 {
343 int result = 0;
344 int i;
345 size_t sz = hhdr -> hb_sz;
346 int offset = MARK_BIT_OFFSET(sz);
347 int limit = FINAL_MARK_BIT(sz);
348
349 for (i = 0; i < limit; i += offset) {
350 result += hhdr -> hb_marks[i];
351 }
352 GC_ASSERT(hhdr -> hb_marks[limit]);
353 return(result);
354 }
355
356 #else
357
358 /* Number of set bits in a word. Not performance critical. */
set_bits(word n)359 static int set_bits(word n)
360 {
361 word m = n;
362 int result = 0;
363
364 while (m > 0) {
365 if (m & 1) result++;
366 m >>= 1;
367 }
368 return(result);
369 }
370
371 /* Return the number of set mark bits in the given header */
GC_n_set_marks(hdr * hhdr)372 int GC_n_set_marks(hdr *hhdr)
373 {
374 int result = 0;
375 int i;
376 int n_mark_words;
377 # ifdef MARK_BIT_PER_OBJ
378 int n_objs = HBLK_OBJS(hhdr -> hb_sz);
379
380 if (0 == n_objs) n_objs = 1;
381 n_mark_words = divWORDSZ(n_objs + WORDSZ - 1);
382 # else /* MARK_BIT_PER_GRANULE */
383 n_mark_words = MARK_BITS_SZ;
384 # endif
385 for (i = 0; i < n_mark_words - 1; i++) {
386 result += set_bits(hhdr -> hb_marks[i]);
387 }
388 # ifdef MARK_BIT_PER_OBJ
389 result += set_bits((hhdr -> hb_marks[n_mark_words - 1])
390 << (n_mark_words * WORDSZ - n_objs));
391 # else
392 result += set_bits(hhdr -> hb_marks[n_mark_words - 1]);
393 # endif
394 return(result - 1);
395 }
396
397 #endif /* !USE_MARK_BYTES */
398
399 /*ARGSUSED*/
GC_print_block_descr(struct hblk * h,word raw_ps)400 void GC_print_block_descr(struct hblk *h, word /* struct PrintStats */ raw_ps)
401 {
402 hdr * hhdr = HDR(h);
403 size_t bytes = hhdr -> hb_sz;
404 struct Print_stats *ps;
405 unsigned n_marks = GC_n_set_marks(hhdr);
406
407 if (hhdr -> hb_n_marks != n_marks) {
408 GC_printf("(%u:%u,%u!=%u)", hhdr -> hb_obj_kind,
409 bytes,
410 hhdr -> hb_n_marks, n_marks);
411 } else {
412 GC_printf("(%u:%u,%u)", hhdr -> hb_obj_kind,
413 bytes, n_marks);
414 }
415 bytes += HBLKSIZE-1;
416 bytes &= ~(HBLKSIZE-1);
417
418 ps = (struct Print_stats *)raw_ps;
419 ps->total_bytes += bytes;
420 ps->number_of_blocks++;
421 }
422
GC_print_block_list()423 void GC_print_block_list()
424 {
425 struct Print_stats pstats;
426
427 GC_printf("(kind(0=ptrfree,1=normal,2=unc.):size_in_bytes, #_marks_set)\n");
428 pstats.number_of_blocks = 0;
429 pstats.total_bytes = 0;
430 GC_apply_to_all_blocks(GC_print_block_descr, (word)&pstats);
431 GC_printf("\nblocks = %lu, bytes = %lu\n",
432 (unsigned long)pstats.number_of_blocks,
433 (unsigned long)pstats.total_bytes);
434 }
435
436 /* Currently for debugger use only: */
GC_print_free_list(int kind,size_t sz_in_granules)437 void GC_print_free_list(int kind, size_t sz_in_granules)
438 {
439 struct obj_kind * ok = &GC_obj_kinds[kind];
440 ptr_t flh = ok -> ok_freelist[sz_in_granules];
441 struct hblk *lastBlock = 0;
442 int n = 0;
443
444 while (flh){
445 struct hblk *block = HBLKPTR(flh);
446 if (block != lastBlock){
447 GC_printf("\nIn heap block at 0x%x:\n\t", block);
448 lastBlock = block;
449 }
450 GC_printf("%d: 0x%x;", ++n, flh);
451 flh = obj_link(flh);
452 }
453 }
454
455 #endif /* NO_DEBUGGING */
456
457 /*
458 * Clear all obj_link pointers in the list of free objects *flp.
459 * Clear *flp.
460 * This must be done before dropping a list of free gcj-style objects,
461 * since may otherwise end up with dangling "descriptor" pointers.
462 * It may help for other pointer-containing objects.
463 */
GC_clear_fl_links(void ** flp)464 void GC_clear_fl_links(void **flp)
465 {
466 void *next = *flp;
467
468 while (0 != next) {
469 *flp = 0;
470 flp = &(obj_link(next));
471 next = *flp;
472 }
473 }
474
475 /*
476 * Perform GC_reclaim_block on the entire heap, after first clearing
477 * small object free lists (if we are not just looking for leaks).
478 */
GC_start_reclaim(GC_bool report_if_found)479 void GC_start_reclaim(GC_bool report_if_found)
480 {
481 unsigned kind;
482
483 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
484 GC_ASSERT(0 == GC_fl_builder_count);
485 # endif
486 /* Reset in use counters. GC_reclaim_block recomputes them. */
487 GC_composite_in_use = 0;
488 GC_atomic_in_use = 0;
489 /* Clear reclaim- and free-lists */
490 for (kind = 0; kind < GC_n_kinds; kind++) {
491 void **fop;
492 void **lim;
493 struct hblk ** rlp;
494 struct hblk ** rlim;
495 struct hblk ** rlist = GC_obj_kinds[kind].ok_reclaim_list;
496 GC_bool should_clobber = (GC_obj_kinds[kind].ok_descriptor != 0);
497
498 if (rlist == 0) continue; /* This kind not used. */
499 if (!report_if_found) {
500 lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJGRANULES+1]);
501 for( fop = GC_obj_kinds[kind].ok_freelist; fop < lim; fop++ ) {
502 if (*fop != 0) {
503 if (should_clobber) {
504 GC_clear_fl_links(fop);
505 } else {
506 *fop = 0;
507 }
508 }
509 }
510 } /* otherwise free list objects are marked, */
511 /* and its safe to leave them */
512 rlim = rlist + MAXOBJGRANULES+1;
513 for( rlp = rlist; rlp < rlim; rlp++ ) {
514 *rlp = 0;
515 }
516 }
517
518
519 /* Go through all heap blocks (in hblklist) and reclaim unmarked objects */
520 /* or enqueue the block for later processing. */
521 GC_apply_to_all_blocks(GC_reclaim_block, (word)report_if_found);
522
523 # ifdef EAGER_SWEEP
524 /* This is a very stupid thing to do. We make it possible anyway, */
525 /* so that you can convince yourself that it really is very stupid. */
526 GC_reclaim_all((GC_stop_func)0, FALSE);
527 # endif
528 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
529 GC_ASSERT(0 == GC_fl_builder_count);
530 # endif
531
532 }
533
534 /*
535 * Sweep blocks of the indicated object size and kind until either the
536 * appropriate free list is nonempty, or there are no more blocks to
537 * sweep.
538 */
GC_continue_reclaim(size_t sz,int kind)539 void GC_continue_reclaim(size_t sz /* granules */, int kind)
540 {
541 hdr * hhdr;
542 struct hblk * hbp;
543 struct obj_kind * ok = &(GC_obj_kinds[kind]);
544 struct hblk ** rlh = ok -> ok_reclaim_list;
545 void **flh = &(ok -> ok_freelist[sz]);
546
547 if (rlh == 0) return; /* No blocks of this kind. */
548 rlh += sz;
549 while ((hbp = *rlh) != 0) {
550 hhdr = HDR(hbp);
551 *rlh = hhdr -> hb_next;
552 GC_reclaim_small_nonempty_block(hbp, FALSE, &GC_bytes_found);
553 if (*flh != 0) break;
554 }
555 }
556
557 /*
558 * Reclaim all small blocks waiting to be reclaimed.
559 * Abort and return FALSE when/if (*stop_func)() returns TRUE.
560 * If this returns TRUE, then it's safe to restart the world
561 * with incorrectly cleared mark bits.
562 * If ignore_old is TRUE, then reclaim only blocks that have been
563 * recently reclaimed, and discard the rest.
564 * Stop_func may be 0.
565 */
GC_reclaim_all(GC_stop_func stop_func,GC_bool ignore_old)566 GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old)
567 {
568 word sz;
569 unsigned kind;
570 hdr * hhdr;
571 struct hblk * hbp;
572 struct obj_kind * ok;
573 struct hblk ** rlp;
574 struct hblk ** rlh;
575 CLOCK_TYPE start_time;
576 CLOCK_TYPE done_time;
577
578 if (GC_print_stats == VERBOSE)
579 GET_TIME(start_time);
580
581 for (kind = 0; kind < GC_n_kinds; kind++) {
582 ok = &(GC_obj_kinds[kind]);
583 rlp = ok -> ok_reclaim_list;
584 if (rlp == 0) continue;
585 for (sz = 1; sz <= MAXOBJGRANULES; sz++) {
586 rlh = rlp + sz;
587 while ((hbp = *rlh) != 0) {
588 if (stop_func != (GC_stop_func)0 && (*stop_func)()) {
589 return(FALSE);
590 }
591 hhdr = HDR(hbp);
592 *rlh = hhdr -> hb_next;
593 if (!ignore_old || hhdr -> hb_last_reclaimed == GC_gc_no - 1) {
594 /* It's likely we'll need it this time, too */
595 /* It's been touched recently, so this */
596 /* shouldn't trigger paging. */
597 GC_reclaim_small_nonempty_block(hbp, FALSE, &GC_bytes_found);
598 }
599 }
600 }
601 }
602 if (GC_print_stats == VERBOSE) {
603 GET_TIME(done_time);
604 GC_log_printf("Disposing of reclaim lists took %lu msecs\n",
605 MS_TIME_DIFF(done_time,start_time));
606 }
607 return(TRUE);
608 }
609