1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1998 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
6 *
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 *
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
15 *
16 */
17
18 #include "private/gc_priv.h"
19
20 #include <stdio.h>
21 #if !defined(MACOS) && !defined(MSWINCE)
22 # include <signal.h>
23 # if !defined(__CC_ARM)
24 # include <sys/types.h>
25 # endif
26 #endif
27
28 /*
29 * Separate free lists are maintained for different sized objects
30 * up to MAXOBJBYTES.
31 * The call GC_allocobj(i,k) ensures that the freelist for
32 * kind k objects of size i points to a non-empty
33 * free list. It returns a pointer to the first entry on the free list.
34 * In a single-threaded world, GC_allocobj may be called to allocate
35 * an object of (small) size i as follows:
36 *
37 * opp = &(GC_objfreelist[i]);
38 * if (*opp == 0) GC_allocobj(i, NORMAL);
39 * ptr = *opp;
40 * *opp = obj_link(ptr);
41 *
42 * Note that this is very fast if the free list is non-empty; it should
43 * only involve the execution of 4 or 5 simple instructions.
44 * All composite objects on freelists are cleared, except for
45 * their first word.
46 */
47
48 /*
49 * The allocator uses GC_allochblk to allocate large chunks of objects.
50 * These chunks all start on addresses which are multiples of
51 * HBLKSZ. Each allocated chunk has an associated header,
52 * which can be located quickly based on the address of the chunk.
53 * (See headers.c for details.)
54 * This makes it possible to check quickly whether an
55 * arbitrary address corresponds to an object administered by the
56 * allocator.
57 */
58
59 word GC_non_gc_bytes = 0; /* Number of bytes not intended to be collected */
60
61 word GC_gc_no = 0;
62
63 #ifndef GC_DISABLE_INCREMENTAL
64 GC_INNER int GC_incremental = 0; /* By default, stop the world. */
65 #endif
66
67 #ifdef THREADS
68 int GC_parallel = FALSE; /* By default, parallel GC is off. */
69 #endif
70
71 #ifndef GC_FULL_FREQ
72 # define GC_FULL_FREQ 19 /* Every 20th collection is a full */
73 /* collection, whether we need it */
74 /* or not. */
75 #endif
76
77 int GC_full_freq = GC_FULL_FREQ;
78
79 STATIC GC_bool GC_need_full_gc = FALSE;
80 /* Need full GC do to heap growth. */
81
82 #ifdef THREAD_LOCAL_ALLOC
83 GC_INNER GC_bool GC_world_stopped = FALSE;
84 #endif
85
86 STATIC word GC_used_heap_size_after_full = 0;
87
88 /* GC_copyright symbol is externally visible. */
89 char * const GC_copyright[] =
90 {"Copyright 1988,1989 Hans-J. Boehm and Alan J. Demers ",
91 "Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. ",
92 "Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved. ",
93 "Copyright (c) 1999-2009 by Hewlett-Packard Company. All rights reserved. ",
94 "THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY",
95 " EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.",
96 "See source code for details." };
97
98 /* Version macros are now defined in gc_version.h, which is included by */
99 /* gc.h, which is included by gc_priv.h. */
100 #ifndef GC_NO_VERSION_VAR
101 const unsigned GC_version = ((GC_VERSION_MAJOR << 16) |
102 (GC_VERSION_MINOR << 8) | GC_TMP_ALPHA_VERSION);
103 #endif
104
GC_get_version(void)105 GC_API unsigned GC_CALL GC_get_version(void)
106 {
107 return (GC_VERSION_MAJOR << 16) | (GC_VERSION_MINOR << 8) |
108 GC_TMP_ALPHA_VERSION;
109 }
110
111 /* some more variables */
112
113 #ifdef GC_DONT_EXPAND
114 GC_bool GC_dont_expand = TRUE;
115 #else
116 GC_bool GC_dont_expand = FALSE;
117 #endif
118
119 #ifndef GC_FREE_SPACE_DIVISOR
120 # define GC_FREE_SPACE_DIVISOR 3 /* must be > 0 */
121 #endif
122
123 word GC_free_space_divisor = GC_FREE_SPACE_DIVISOR;
124
GC_never_stop_func(void)125 GC_INNER int GC_CALLBACK GC_never_stop_func(void)
126 {
127 return(0);
128 }
129
130 #ifndef GC_TIME_LIMIT
131 # define GC_TIME_LIMIT 50 /* We try to keep pause times from exceeding */
132 /* this by much. In milliseconds. */
133 #endif
134
135 unsigned long GC_time_limit = GC_TIME_LIMIT;
136
137 #ifndef NO_CLOCK
138 STATIC CLOCK_TYPE GC_start_time = 0;
139 /* Time at which we stopped world. */
140 /* used only in GC_timeout_stop_func. */
141 #endif
142
143 STATIC int GC_n_attempts = 0; /* Number of attempts at finishing */
144 /* collection within GC_time_limit. */
145
146 STATIC GC_stop_func GC_default_stop_func = GC_never_stop_func;
147 /* accessed holding the lock. */
148
GC_set_stop_func(GC_stop_func stop_func)149 GC_API void GC_CALL GC_set_stop_func(GC_stop_func stop_func)
150 {
151 DCL_LOCK_STATE;
152 GC_ASSERT(stop_func != 0);
153 LOCK();
154 GC_default_stop_func = stop_func;
155 UNLOCK();
156 }
157
GC_get_stop_func(void)158 GC_API GC_stop_func GC_CALL GC_get_stop_func(void)
159 {
160 GC_stop_func stop_func;
161 DCL_LOCK_STATE;
162 LOCK();
163 stop_func = GC_default_stop_func;
164 UNLOCK();
165 return stop_func;
166 }
167
168 #if defined(GC_DISABLE_INCREMENTAL) || defined(NO_CLOCK)
169 # define GC_timeout_stop_func GC_default_stop_func
170 #else
GC_timeout_stop_func(void)171 STATIC int GC_CALLBACK GC_timeout_stop_func (void)
172 {
173 CLOCK_TYPE current_time;
174 static unsigned count = 0;
175 unsigned long time_diff;
176
177 if ((*GC_default_stop_func)())
178 return(1);
179
180 if ((count++ & 3) != 0) return(0);
181 GET_TIME(current_time);
182 time_diff = MS_TIME_DIFF(current_time,GC_start_time);
183 if (time_diff >= GC_time_limit) {
184 if (GC_print_stats) {
185 GC_log_printf(
186 "Abandoning stopped marking after %lu msecs (attempt %d)\n",
187 time_diff, GC_n_attempts);
188 }
189 return(1);
190 }
191 return(0);
192 }
193 #endif /* !GC_DISABLE_INCREMENTAL */
194
195 #ifdef THREADS
196 GC_INNER word GC_total_stacksize = 0; /* updated on every push_all_stacks */
197 #endif
198
199 /* Return the minimum number of words that must be allocated between */
200 /* collections to amortize the collection cost. */
min_bytes_allocd(void)201 static word min_bytes_allocd(void)
202 {
203 int dummy; /* GC_stackbottom is used only for a single-threaded case. */
204 # ifdef STACK_GROWS_UP
205 word stack_size = (ptr_t)(&dummy) - GC_stackbottom;
206 # else
207 word stack_size = GC_stackbottom - (ptr_t)(&dummy);
208 # endif
209
210 word total_root_size; /* includes double stack size, */
211 /* since the stack is expensive */
212 /* to scan. */
213 word scan_size; /* Estimate of memory to be scanned */
214 /* during normal GC. */
215
216 # ifdef THREADS
217 if (GC_need_to_lock) {
218 /* We are multi-threaded... */
219 stack_size = GC_total_stacksize;
220 /* For now, we just use the value computed during the latest GC. */
221 # ifdef DEBUG_THREADS
222 GC_log_printf("Total stacks size: %lu\n",
223 (unsigned long)stack_size);
224 # endif
225 }
226 # endif
227
228 total_root_size = 2 * stack_size + GC_root_size;
229 scan_size = 2 * GC_composite_in_use + GC_atomic_in_use / 4
230 + total_root_size;
231 if (GC_incremental) {
232 return scan_size / (2 * GC_free_space_divisor);
233 } else {
234 return scan_size / GC_free_space_divisor;
235 }
236 }
237
238 /* Return the number of bytes allocated, adjusted for explicit storage */
239 /* management, etc.. This number is used in deciding when to trigger */
240 /* collections. */
GC_adj_bytes_allocd(void)241 STATIC word GC_adj_bytes_allocd(void)
242 {
243 signed_word result;
244 signed_word expl_managed = (signed_word)GC_non_gc_bytes
245 - (signed_word)GC_non_gc_bytes_at_gc;
246
247 /* Don't count what was explicitly freed, or newly allocated for */
248 /* explicit management. Note that deallocating an explicitly */
249 /* managed object should not alter result, assuming the client */
250 /* is playing by the rules. */
251 result = (signed_word)GC_bytes_allocd
252 + (signed_word)GC_bytes_dropped
253 - (signed_word)GC_bytes_freed
254 + (signed_word)GC_finalizer_bytes_freed
255 - expl_managed;
256 if (result > (signed_word)GC_bytes_allocd) {
257 result = GC_bytes_allocd;
258 /* probably client bug or unfortunate scheduling */
259 }
260 result += GC_bytes_finalized;
261 /* We count objects enqueued for finalization as though they */
262 /* had been reallocated this round. Finalization is user */
263 /* visible progress. And if we don't count this, we have */
264 /* stability problems for programs that finalize all objects. */
265 if (result < (signed_word)(GC_bytes_allocd >> 3)) {
266 /* Always count at least 1/8 of the allocations. We don't want */
267 /* to collect too infrequently, since that would inhibit */
268 /* coalescing of free storage blocks. */
269 /* This also makes us partially robust against client bugs. */
270 return(GC_bytes_allocd >> 3);
271 } else {
272 return(result);
273 }
274 }
275
276
277 /* Clear up a few frames worth of garbage left at the top of the stack. */
278 /* This is used to prevent us from accidentally treating garbage left */
279 /* on the stack by other parts of the collector as roots. This */
280 /* differs from the code in misc.c, which actually tries to keep the */
281 /* stack clear of long-lived, client-generated garbage. */
GC_clear_a_few_frames(void)282 STATIC void GC_clear_a_few_frames(void)
283 {
284 # ifndef CLEAR_NWORDS
285 # define CLEAR_NWORDS 64
286 # endif
287 volatile word frames[CLEAR_NWORDS];
288 BZERO((word *)frames, CLEAR_NWORDS * sizeof(word));
289 }
290
291 /* Heap size at which we need a collection to avoid expanding past */
292 /* limits used by blacklisting. */
293 STATIC word GC_collect_at_heapsize = (word)(-1);
294
295 /* Have we allocated enough to amortize a collection? */
GC_should_collect(void)296 GC_INNER GC_bool GC_should_collect(void)
297 {
298 static word last_min_bytes_allocd;
299 static word last_gc_no;
300 if (last_gc_no != GC_gc_no) {
301 last_gc_no = GC_gc_no;
302 last_min_bytes_allocd = min_bytes_allocd();
303 }
304 return(GC_adj_bytes_allocd() >= last_min_bytes_allocd
305 || GC_heapsize >= GC_collect_at_heapsize);
306 }
307
308 /* STATIC */ GC_start_callback_proc GC_start_call_back = 0;
309 /* Called at start of full collections. */
310 /* Not called if 0. Called with the allocation */
311 /* lock held. Not used by GC itself. */
312
GC_set_start_callback(GC_start_callback_proc fn)313 GC_API void GC_CALL GC_set_start_callback(GC_start_callback_proc fn)
314 {
315 DCL_LOCK_STATE;
316 LOCK();
317 GC_start_call_back = fn;
318 UNLOCK();
319 }
320
GC_get_start_callback(void)321 GC_API GC_start_callback_proc GC_CALL GC_get_start_callback(void)
322 {
323 GC_start_callback_proc fn;
324 DCL_LOCK_STATE;
325 LOCK();
326 fn = GC_start_call_back;
327 UNLOCK();
328 return fn;
329 }
330
GC_notify_full_gc(void)331 GC_INLINE void GC_notify_full_gc(void)
332 {
333 if (GC_start_call_back != 0) {
334 (*GC_start_call_back)();
335 }
336 }
337
338 STATIC GC_bool GC_is_full_gc = FALSE;
339
340 STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func);
341 STATIC void GC_finish_collection(void);
342
343 /*
344 * Initiate a garbage collection if appropriate.
345 * Choose judiciously
346 * between partial, full, and stop-world collections.
347 */
GC_maybe_gc(void)348 STATIC void GC_maybe_gc(void)
349 {
350 static int n_partial_gcs = 0;
351
352 GC_ASSERT(I_HOLD_LOCK());
353 ASSERT_CANCEL_DISABLED();
354 if (GC_should_collect()) {
355 if (!GC_incremental) {
356 /* FIXME: If possible, GC_default_stop_func should be used here */
357 GC_try_to_collect_inner(GC_never_stop_func);
358 n_partial_gcs = 0;
359 return;
360 } else {
361 # ifdef PARALLEL_MARK
362 if (GC_parallel)
363 GC_wait_for_reclaim();
364 # endif
365 if (GC_need_full_gc || n_partial_gcs >= GC_full_freq) {
366 if (GC_print_stats) {
367 GC_log_printf(
368 "***>Full mark for collection %lu after %ld allocd bytes\n",
369 (unsigned long)GC_gc_no + 1, (long)GC_bytes_allocd);
370 }
371 GC_promote_black_lists();
372 (void)GC_reclaim_all((GC_stop_func)0, TRUE);
373 GC_notify_full_gc();
374 GC_clear_marks();
375 n_partial_gcs = 0;
376 GC_is_full_gc = TRUE;
377 } else {
378 n_partial_gcs++;
379 }
380 }
381 /* We try to mark with the world stopped. */
382 /* If we run out of time, this turns into */
383 /* incremental marking. */
384 # ifndef NO_CLOCK
385 if (GC_time_limit != GC_TIME_UNLIMITED) { GET_TIME(GC_start_time); }
386 # endif
387 /* FIXME: If possible, GC_default_stop_func should be */
388 /* used instead of GC_never_stop_func here. */
389 if (GC_stopped_mark(GC_time_limit == GC_TIME_UNLIMITED?
390 GC_never_stop_func : GC_timeout_stop_func)) {
391 # ifdef SAVE_CALL_CHAIN
392 GC_save_callers(GC_last_stack);
393 # endif
394 GC_finish_collection();
395 } else {
396 if (!GC_is_full_gc) {
397 /* Count this as the first attempt */
398 GC_n_attempts++;
399 }
400 }
401 }
402 }
403
404
405 /*
406 * Stop the world garbage collection. Assumes lock held. If stop_func is
407 * not GC_never_stop_func then abort if stop_func returns TRUE.
408 * Return TRUE if we successfully completed the collection.
409 */
GC_try_to_collect_inner(GC_stop_func stop_func)410 GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func stop_func)
411 {
412 # ifndef SMALL_CONFIG
413 CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
414 CLOCK_TYPE current_time;
415 # endif
416 ASSERT_CANCEL_DISABLED();
417 if (GC_dont_gc || (*stop_func)()) return FALSE;
418 if (GC_incremental && GC_collection_in_progress()) {
419 if (GC_print_stats) {
420 GC_log_printf(
421 "GC_try_to_collect_inner: finishing collection in progress\n");
422 }
423 /* Just finish collection already in progress. */
424 while(GC_collection_in_progress()) {
425 if ((*stop_func)()) return(FALSE);
426 GC_collect_a_little_inner(1);
427 }
428 }
429 GC_notify_full_gc();
430 # ifndef SMALL_CONFIG
431 if (GC_print_stats) {
432 GET_TIME(start_time);
433 GC_log_printf("Initiating full world-stop collection!\n");
434 }
435 # endif
436 GC_promote_black_lists();
437 /* Make sure all blocks have been reclaimed, so sweep routines */
438 /* don't see cleared mark bits. */
439 /* If we're guaranteed to finish, then this is unnecessary. */
440 /* In the find_leak case, we have to finish to guarantee that */
441 /* previously unmarked objects are not reported as leaks. */
442 # ifdef PARALLEL_MARK
443 if (GC_parallel)
444 GC_wait_for_reclaim();
445 # endif
446 if ((GC_find_leak || stop_func != GC_never_stop_func)
447 && !GC_reclaim_all(stop_func, FALSE)) {
448 /* Aborted. So far everything is still consistent. */
449 return(FALSE);
450 }
451 GC_invalidate_mark_state(); /* Flush mark stack. */
452 GC_clear_marks();
453 # ifdef SAVE_CALL_CHAIN
454 GC_save_callers(GC_last_stack);
455 # endif
456 GC_is_full_gc = TRUE;
457 if (!GC_stopped_mark(stop_func)) {
458 if (!GC_incremental) {
459 /* We're partially done and have no way to complete or use */
460 /* current work. Reestablish invariants as cheaply as */
461 /* possible. */
462 GC_invalidate_mark_state();
463 GC_unpromote_black_lists();
464 } /* else we claim the world is already still consistent. We'll */
465 /* finish incrementally. */
466 return(FALSE);
467 }
468 GC_finish_collection();
469 # ifndef SMALL_CONFIG
470 if (GC_print_stats) {
471 GET_TIME(current_time);
472 GC_log_printf("Complete collection took %lu msecs\n",
473 MS_TIME_DIFF(current_time,start_time));
474 }
475 # endif
476 return(TRUE);
477 }
478
479 /*
480 * Perform n units of garbage collection work. A unit is intended to touch
481 * roughly GC_RATE pages. Every once in a while, we do more than that.
482 * This needs to be a fairly large number with our current incremental
483 * GC strategy, since otherwise we allocate too much during GC, and the
484 * cleanup gets expensive.
485 */
486 #ifndef GC_RATE
487 # define GC_RATE 10
488 #endif
489 #ifndef MAX_PRIOR_ATTEMPTS
490 # define MAX_PRIOR_ATTEMPTS 1
491 #endif
492 /* Maximum number of prior attempts at world stop marking */
493 /* A value of 1 means that we finish the second time, no matter */
494 /* how long it takes. Doesn't count the initial root scan */
495 /* for a full GC. */
496
497 STATIC int GC_deficit = 0;/* The number of extra calls to GC_mark_some */
498 /* that we have made. */
499
GC_collect_a_little_inner(int n)500 GC_INNER void GC_collect_a_little_inner(int n)
501 {
502 int i;
503 IF_CANCEL(int cancel_state;)
504
505 if (GC_dont_gc) return;
506 DISABLE_CANCEL(cancel_state);
507 if (GC_incremental && GC_collection_in_progress()) {
508 for (i = GC_deficit; i < GC_RATE*n; i++) {
509 if (GC_mark_some((ptr_t)0)) {
510 /* Need to finish a collection */
511 # ifdef SAVE_CALL_CHAIN
512 GC_save_callers(GC_last_stack);
513 # endif
514 # ifdef PARALLEL_MARK
515 if (GC_parallel)
516 GC_wait_for_reclaim();
517 # endif
518 if (GC_n_attempts < MAX_PRIOR_ATTEMPTS
519 && GC_time_limit != GC_TIME_UNLIMITED) {
520 # ifndef NO_CLOCK
521 GET_TIME(GC_start_time);
522 # endif
523 if (!GC_stopped_mark(GC_timeout_stop_func)) {
524 GC_n_attempts++;
525 break;
526 }
527 } else {
528 /* FIXME: If possible, GC_default_stop_func should be */
529 /* used here. */
530 (void)GC_stopped_mark(GC_never_stop_func);
531 }
532 GC_finish_collection();
533 break;
534 }
535 }
536 if (GC_deficit > 0) GC_deficit -= GC_RATE*n;
537 if (GC_deficit < 0) GC_deficit = 0;
538 } else {
539 GC_maybe_gc();
540 }
541 RESTORE_CANCEL(cancel_state);
542 }
543
544 GC_INNER void (*GC_check_heap)(void) = 0;
545 GC_INNER void (*GC_print_all_smashed)(void) = 0;
546
GC_collect_a_little(void)547 GC_API int GC_CALL GC_collect_a_little(void)
548 {
549 int result;
550 DCL_LOCK_STATE;
551
552 LOCK();
553 GC_collect_a_little_inner(1);
554 result = (int)GC_collection_in_progress();
555 UNLOCK();
556 if (!result && GC_debugging_started) GC_print_all_smashed();
557 return(result);
558 }
559
560 #if !defined(REDIRECT_MALLOC) && (defined(MSWIN32) || defined(MSWINCE))
561 GC_INNER void GC_add_current_malloc_heap(void);
562 #endif
563
564 #ifdef MAKE_BACK_GRAPH
565 GC_INNER void GC_build_back_graph(void);
566 #endif
567
568 #ifndef SMALL_CONFIG
569 /* Variables for world-stop average delay time statistic computation. */
570 /* "divisor" is incremented every world-stop and halved when reached */
571 /* its maximum (or upon "total_time" oveflow). */
572 static unsigned world_stopped_total_time = 0;
573 static unsigned world_stopped_total_divisor = 0;
574 # ifndef MAX_TOTAL_TIME_DIVISOR
575 /* We shall not use big values here (so "outdated" delay time */
576 /* values would have less impact on "average" delay time value than */
577 /* newer ones). */
578 # define MAX_TOTAL_TIME_DIVISOR 1000
579 # endif
580 #endif
581
582 /*
583 * Assumes lock is held. We stop the world and mark from all roots.
584 * If stop_func() ever returns TRUE, we may fail and return FALSE.
585 * Increment GC_gc_no if we succeed.
586 */
GC_stopped_mark(GC_stop_func stop_func)587 STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func)
588 {
589 unsigned i;
590 int dummy;
591 # ifndef SMALL_CONFIG
592 CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
593 CLOCK_TYPE current_time;
594 # endif
595
596 # if !defined(REDIRECT_MALLOC) && (defined(MSWIN32) || defined(MSWINCE))
597 GC_add_current_malloc_heap();
598 # endif
599 # if defined(REGISTER_LIBRARIES_EARLY)
600 GC_cond_register_dynamic_libraries();
601 # endif
602
603 # ifndef SMALL_CONFIG
604 if (GC_print_stats)
605 GET_TIME(start_time);
606 # endif
607
608 STOP_WORLD();
609 # ifdef THREAD_LOCAL_ALLOC
610 GC_world_stopped = TRUE;
611 # endif
612 if (GC_print_stats) {
613 /* Output blank line for convenience here */
614 GC_log_printf(
615 "\n--> Marking for collection %lu after %lu allocated bytes\n",
616 (unsigned long)GC_gc_no + 1, (unsigned long) GC_bytes_allocd);
617 }
618 # ifdef MAKE_BACK_GRAPH
619 if (GC_print_back_height) {
620 GC_build_back_graph();
621 }
622 # endif
623
624 /* Mark from all roots. */
625 /* Minimize junk left in my registers and on the stack */
626 GC_clear_a_few_frames();
627 GC_noop(0,0,0,0,0,0);
628 GC_initiate_gc();
629 for (i = 0;;i++) {
630 if ((*stop_func)()) {
631 if (GC_print_stats) {
632 GC_log_printf("Abandoned stopped marking after %u iterations\n",
633 i);
634 }
635 GC_deficit = i; /* Give the mutator a chance. */
636 # ifdef THREAD_LOCAL_ALLOC
637 GC_world_stopped = FALSE;
638 # endif
639 START_WORLD();
640 return(FALSE);
641 }
642 if (GC_mark_some((ptr_t)(&dummy))) break;
643 }
644
645 GC_gc_no++;
646 if (GC_print_stats) {
647 GC_log_printf(
648 "Collection %lu reclaimed %ld bytes ---> heapsize = %lu bytes\n",
649 (unsigned long)(GC_gc_no - 1), (long)GC_bytes_found,
650 (unsigned long)GC_heapsize);
651 }
652
653 /* Check all debugged objects for consistency */
654 if (GC_debugging_started) {
655 (*GC_check_heap)();
656 }
657
658 # ifdef THREAD_LOCAL_ALLOC
659 GC_world_stopped = FALSE;
660 # endif
661 START_WORLD();
662 # ifndef SMALL_CONFIG
663 if (GC_print_stats) {
664 unsigned long time_diff;
665 unsigned total_time, divisor;
666 GET_TIME(current_time);
667 time_diff = MS_TIME_DIFF(current_time,start_time);
668
669 /* Compute new world-stop delay total time */
670 total_time = world_stopped_total_time;
671 divisor = world_stopped_total_divisor;
672 if ((int)total_time < 0 || divisor >= MAX_TOTAL_TIME_DIVISOR) {
673 /* Halve values if overflow occurs */
674 total_time >>= 1;
675 divisor >>= 1;
676 }
677 total_time += time_diff < (((unsigned)-1) >> 1) ?
678 (unsigned)time_diff : ((unsigned)-1) >> 1;
679 /* Update old world_stopped_total_time and its divisor */
680 world_stopped_total_time = total_time;
681 world_stopped_total_divisor = ++divisor;
682
683 GC_ASSERT(divisor != 0);
684 GC_log_printf(
685 "World-stopped marking took %lu msecs (%u in average)\n",
686 time_diff, total_time / divisor);
687 }
688 # endif
689 return(TRUE);
690 }
691
692 /* Set all mark bits for the free list whose first entry is q */
GC_set_fl_marks(ptr_t q)693 GC_INNER void GC_set_fl_marks(ptr_t q)
694 {
695 struct hblk *h, *last_h;
696 hdr *hhdr;
697 IF_PER_OBJ(size_t sz;)
698 unsigned bit_no;
699
700 if (q != NULL) {
701 h = HBLKPTR(q);
702 last_h = h;
703 hhdr = HDR(h);
704 IF_PER_OBJ(sz = hhdr->hb_sz;)
705
706 for (;;) {
707 bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
708 if (!mark_bit_from_hdr(hhdr, bit_no)) {
709 set_mark_bit_from_hdr(hhdr, bit_no);
710 ++hhdr -> hb_n_marks;
711 }
712
713 q = obj_link(q);
714 if (q == NULL)
715 break;
716
717 h = HBLKPTR(q);
718 if (h != last_h) {
719 last_h = h;
720 hhdr = HDR(h);
721 IF_PER_OBJ(sz = hhdr->hb_sz;)
722 }
723 }
724 }
725 }
726
727 #ifdef GC_ASSERTIONS
728 /* Check that all mark bits for the free list whose first entry is q */
729 /* are set. */
GC_check_fl_marks(ptr_t q)730 void GC_check_fl_marks(ptr_t q)
731 {
732 ptr_t p;
733 for (p = q; p != 0; p = obj_link(p)) {
734 if (!GC_is_marked(p)) {
735 GC_err_printf("Unmarked object %p on list %p\n", p, q);
736 ABORT("Unmarked local free list entry");
737 }
738 }
739 }
740 #endif
741
742 /* Clear all mark bits for the free list whose first entry is q */
743 /* Decrement GC_bytes_found by number of bytes on free list. */
GC_clear_fl_marks(ptr_t q)744 STATIC void GC_clear_fl_marks(ptr_t q)
745 {
746 struct hblk *h, *last_h;
747 hdr *hhdr;
748 size_t sz;
749 unsigned bit_no;
750
751 if (q != NULL) {
752 h = HBLKPTR(q);
753 last_h = h;
754 hhdr = HDR(h);
755 sz = hhdr->hb_sz; /* Normally set only once. */
756
757 for (;;) {
758 bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
759 if (mark_bit_from_hdr(hhdr, bit_no)) {
760 size_t n_marks = hhdr -> hb_n_marks - 1;
761 clear_mark_bit_from_hdr(hhdr, bit_no);
762 # ifdef PARALLEL_MARK
763 /* Appr. count, don't decrement to zero! */
764 if (0 != n_marks || !GC_parallel) {
765 hhdr -> hb_n_marks = n_marks;
766 }
767 # else
768 hhdr -> hb_n_marks = n_marks;
769 # endif
770 }
771 GC_bytes_found -= sz;
772
773 q = obj_link(q);
774 if (q == NULL)
775 break;
776
777 h = HBLKPTR(q);
778 if (h != last_h) {
779 last_h = h;
780 hhdr = HDR(h);
781 sz = hhdr->hb_sz;
782 }
783 }
784 }
785 }
786
787 #if defined(GC_ASSERTIONS) && defined(THREADS) && defined(THREAD_LOCAL_ALLOC)
788 void GC_check_tls(void);
789 #endif
790
791 #ifdef MAKE_BACK_GRAPH
792 GC_INNER void GC_traverse_back_graph(void);
793 #endif
794
795 /* Finish up a collection. Assumes mark bits are consistent, lock is */
796 /* held, but the world is otherwise running. */
GC_finish_collection(void)797 STATIC void GC_finish_collection(void)
798 {
799 # ifndef SMALL_CONFIG
800 CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
801 CLOCK_TYPE finalize_time = 0;
802 CLOCK_TYPE done_time;
803 # endif
804
805 # if defined(GC_ASSERTIONS) && defined(THREADS) \
806 && defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
807 /* Check that we marked some of our own data. */
808 /* FIXME: Add more checks. */
809 GC_check_tls();
810 # endif
811
812 # ifndef SMALL_CONFIG
813 if (GC_print_stats)
814 GET_TIME(start_time);
815 # endif
816
817 GC_bytes_found = 0;
818 # if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
819 if (GETENV("GC_PRINT_ADDRESS_MAP") != 0) {
820 GC_print_address_map();
821 }
822 # endif
823 COND_DUMP;
824 if (GC_find_leak) {
825 /* Mark all objects on the free list. All objects should be */
826 /* marked when we're done. */
827 word size; /* current object size */
828 unsigned kind;
829 ptr_t q;
830
831 for (kind = 0; kind < GC_n_kinds; kind++) {
832 for (size = 1; size <= MAXOBJGRANULES; size++) {
833 q = GC_obj_kinds[kind].ok_freelist[size];
834 if (q != 0) GC_set_fl_marks(q);
835 }
836 }
837 GC_start_reclaim(TRUE);
838 /* The above just checks; it doesn't really reclaim anything. */
839 }
840
841 GC_finalize();
842 # ifdef STUBBORN_ALLOC
843 GC_clean_changing_list();
844 # endif
845
846 # ifndef SMALL_CONFIG
847 if (GC_print_stats)
848 GET_TIME(finalize_time);
849 # endif
850
851 if (GC_print_back_height) {
852 # ifdef MAKE_BACK_GRAPH
853 GC_traverse_back_graph();
854 # elif !defined(SMALL_CONFIG)
855 GC_err_printf("Back height not available: "
856 "Rebuild collector with -DMAKE_BACK_GRAPH\n");
857 # endif
858 }
859
860 /* Clear free list mark bits, in case they got accidentally marked */
861 /* (or GC_find_leak is set and they were intentionally marked). */
862 /* Also subtract memory remaining from GC_bytes_found count. */
863 /* Note that composite objects on free list are cleared. */
864 /* Thus accidentally marking a free list is not a problem; only */
865 /* objects on the list itself will be marked, and that's fixed here. */
866 {
867 word size; /* current object size */
868 ptr_t q; /* pointer to current object */
869 unsigned kind;
870
871 for (kind = 0; kind < GC_n_kinds; kind++) {
872 for (size = 1; size <= MAXOBJGRANULES; size++) {
873 q = GC_obj_kinds[kind].ok_freelist[size];
874 if (q != 0) GC_clear_fl_marks(q);
875 }
876 }
877 }
878
879 if (GC_print_stats == VERBOSE)
880 GC_log_printf("Bytes recovered before sweep - f.l. count = %ld\n",
881 (long)GC_bytes_found);
882
883 /* Reconstruct free lists to contain everything not marked */
884 GC_start_reclaim(FALSE);
885 if (GC_print_stats) {
886 GC_log_printf("Heap contains %lu pointer-containing "
887 "+ %lu pointer-free reachable bytes\n",
888 (unsigned long)GC_composite_in_use,
889 (unsigned long)GC_atomic_in_use);
890 }
891 if (GC_is_full_gc) {
892 GC_used_heap_size_after_full = USED_HEAP_SIZE;
893 GC_need_full_gc = FALSE;
894 } else {
895 GC_need_full_gc = USED_HEAP_SIZE - GC_used_heap_size_after_full
896 > min_bytes_allocd();
897 }
898
899 if (GC_print_stats == VERBOSE) {
900 # ifdef USE_MUNMAP
901 GC_log_printf("Immediately reclaimed %ld bytes in heap"
902 " of size %lu bytes (%lu unmapped)\n",
903 (long)GC_bytes_found, (unsigned long)GC_heapsize,
904 (unsigned long)GC_unmapped_bytes);
905 # else
906 GC_log_printf(
907 "Immediately reclaimed %ld bytes in heap of size %lu bytes\n",
908 (long)GC_bytes_found, (unsigned long)GC_heapsize);
909 # endif
910 }
911
912 /* Reset or increment counters for next cycle */
913 GC_n_attempts = 0;
914 GC_is_full_gc = FALSE;
915 GC_bytes_allocd_before_gc += GC_bytes_allocd;
916 GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
917 GC_bytes_allocd = 0;
918 GC_bytes_dropped = 0;
919 GC_bytes_freed = 0;
920 GC_finalizer_bytes_freed = 0;
921
922 # ifdef USE_MUNMAP
923 GC_unmap_old();
924 # endif
925
926 # ifndef SMALL_CONFIG
927 if (GC_print_stats) {
928 GET_TIME(done_time);
929
930 /* A convenient place to output finalization statistics. */
931 GC_print_finalization_stats();
932
933 GC_log_printf("Finalize plus initiate sweep took %lu + %lu msecs\n",
934 MS_TIME_DIFF(finalize_time,start_time),
935 MS_TIME_DIFF(done_time,finalize_time));
936 }
937 # endif
938 }
939
940 /* If stop_func == 0 then GC_default_stop_func is used instead. */
GC_try_to_collect_general(GC_stop_func stop_func,GC_bool force_unmap)941 STATIC GC_bool GC_try_to_collect_general(GC_stop_func stop_func,
942 GC_bool force_unmap)
943 {
944 GC_bool result;
945 # ifdef USE_MUNMAP
946 int old_unmap_threshold;
947 # endif
948 IF_CANCEL(int cancel_state;)
949 DCL_LOCK_STATE;
950
951 if (!GC_is_initialized) GC_init();
952 if (GC_debugging_started) GC_print_all_smashed();
953 GC_INVOKE_FINALIZERS();
954 LOCK();
955 DISABLE_CANCEL(cancel_state);
956 # ifdef USE_MUNMAP
957 old_unmap_threshold = GC_unmap_threshold;
958 if (force_unmap ||
959 (GC_force_unmap_on_gcollect && old_unmap_threshold > 0))
960 GC_unmap_threshold = 1; /* unmap as much as possible */
961 # endif
962 ENTER_GC();
963 /* Minimize junk left in my registers */
964 GC_noop(0,0,0,0,0,0);
965 result = GC_try_to_collect_inner(stop_func != 0 ? stop_func :
966 GC_default_stop_func);
967 EXIT_GC();
968 # ifdef USE_MUNMAP
969 GC_unmap_threshold = old_unmap_threshold; /* restore */
970 # endif
971 RESTORE_CANCEL(cancel_state);
972 UNLOCK();
973 if (result) {
974 if (GC_debugging_started) GC_print_all_smashed();
975 GC_INVOKE_FINALIZERS();
976 }
977 return(result);
978 }
979
980 /* Externally callable routines to invoke full, stop-the-world collection. */
GC_try_to_collect(GC_stop_func stop_func)981 GC_API int GC_CALL GC_try_to_collect(GC_stop_func stop_func)
982 {
983 GC_ASSERT(stop_func != 0);
984 return (int)GC_try_to_collect_general(stop_func, FALSE);
985 }
986
GC_gcollect(void)987 GC_API void GC_CALL GC_gcollect(void)
988 {
989 /* 0 is passed as stop_func to get GC_default_stop_func value */
990 /* while holding the allocation lock (to prevent data races). */
991 (void)GC_try_to_collect_general(0, FALSE);
992 if (GC_have_errors) GC_print_all_errors();
993 }
994
GC_gcollect_and_unmap(void)995 GC_API void GC_CALL GC_gcollect_and_unmap(void)
996 {
997 (void)GC_try_to_collect_general(GC_never_stop_func, TRUE);
998 }
999
1000 GC_INNER word GC_n_heap_sects = 0;
1001 /* Number of sections currently in heap. */
1002
1003 #ifdef USE_PROC_FOR_LIBRARIES
1004 GC_INNER word GC_n_memory = 0;
1005 /* Number of GET_MEM allocated memory sections. */
1006 #endif
1007
1008 #ifdef USE_PROC_FOR_LIBRARIES
1009 /* Add HBLKSIZE aligned, GET_MEM-generated block to GC_our_memory. */
1010 /* Defined to do nothing if USE_PROC_FOR_LIBRARIES not set. */
GC_add_to_our_memory(ptr_t p,size_t bytes)1011 GC_INNER void GC_add_to_our_memory(ptr_t p, size_t bytes)
1012 {
1013 if (0 == p) return;
1014 if (GC_n_memory >= MAX_HEAP_SECTS)
1015 ABORT("Too many GC-allocated memory sections: Increase MAX_HEAP_SECTS");
1016 GC_our_memory[GC_n_memory].hs_start = p;
1017 GC_our_memory[GC_n_memory].hs_bytes = bytes;
1018 GC_n_memory++;
1019 }
1020 #endif
1021
1022 /*
1023 * Use the chunk of memory starting at p of size bytes as part of the heap.
1024 * Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE.
1025 */
GC_add_to_heap(struct hblk * p,size_t bytes)1026 GC_INNER void GC_add_to_heap(struct hblk *p, size_t bytes)
1027 {
1028 hdr * phdr;
1029 word endp;
1030
1031 if (GC_n_heap_sects >= MAX_HEAP_SECTS) {
1032 ABORT("Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS");
1033 }
1034 while ((word)p <= HBLKSIZE) {
1035 /* Can't handle memory near address zero. */
1036 ++p;
1037 bytes -= HBLKSIZE;
1038 if (0 == bytes) return;
1039 }
1040 endp = (word)p + bytes;
1041 if (endp <= (word)p) {
1042 /* Address wrapped. */
1043 bytes -= HBLKSIZE;
1044 if (0 == bytes) return;
1045 endp -= HBLKSIZE;
1046 }
1047 phdr = GC_install_header(p);
1048 if (0 == phdr) {
1049 /* This is extremely unlikely. Can't add it. This will */
1050 /* almost certainly result in a 0 return from the allocator, */
1051 /* which is entirely appropriate. */
1052 return;
1053 }
1054 GC_ASSERT(endp > (word)p && endp == (word)p + bytes);
1055 GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)p;
1056 GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
1057 GC_n_heap_sects++;
1058 phdr -> hb_sz = bytes;
1059 phdr -> hb_flags = 0;
1060 GC_freehblk(p);
1061 GC_heapsize += bytes;
1062 if ((ptr_t)p <= (ptr_t)GC_least_plausible_heap_addr
1063 || GC_least_plausible_heap_addr == 0) {
1064 GC_least_plausible_heap_addr = (void *)((ptr_t)p - sizeof(word));
1065 /* Making it a little smaller than necessary prevents */
1066 /* us from getting a false hit from the variable */
1067 /* itself. There's some unintentional reflection */
1068 /* here. */
1069 }
1070 if ((ptr_t)p + bytes >= (ptr_t)GC_greatest_plausible_heap_addr) {
1071 GC_greatest_plausible_heap_addr = (void *)endp;
1072 }
1073 }
1074
1075 #if !defined(NO_DEBUGGING)
GC_print_heap_sects(void)1076 void GC_print_heap_sects(void)
1077 {
1078 unsigned i;
1079
1080 GC_printf("Total heap size: %lu\n", (unsigned long)GC_heapsize);
1081 for (i = 0; i < GC_n_heap_sects; i++) {
1082 ptr_t start = GC_heap_sects[i].hs_start;
1083 size_t len = GC_heap_sects[i].hs_bytes;
1084 struct hblk *h;
1085 unsigned nbl = 0;
1086
1087 for (h = (struct hblk *)start; h < (struct hblk *)(start + len); h++) {
1088 if (GC_is_black_listed(h, HBLKSIZE)) nbl++;
1089 }
1090 GC_printf("Section %d from %p to %p %lu/%lu blacklisted\n",
1091 i, start, start + len,
1092 (unsigned long)nbl, (unsigned long)(len/HBLKSIZE));
1093 }
1094 }
1095 #endif
1096
1097 void * GC_least_plausible_heap_addr = (void *)ONES;
1098 void * GC_greatest_plausible_heap_addr = 0;
1099
GC_max(word x,word y)1100 GC_INLINE word GC_max(word x, word y)
1101 {
1102 return(x > y? x : y);
1103 }
1104
GC_min(word x,word y)1105 GC_INLINE word GC_min(word x, word y)
1106 {
1107 return(x < y? x : y);
1108 }
1109
GC_set_max_heap_size(GC_word n)1110 GC_API void GC_CALL GC_set_max_heap_size(GC_word n)
1111 {
1112 GC_max_heapsize = n;
1113 }
1114
1115 GC_word GC_max_retries = 0;
1116
1117 /*
1118 * this explicitly increases the size of the heap. It is used
1119 * internally, but may also be invoked from GC_expand_hp by the user.
1120 * The argument is in units of HBLKSIZE.
1121 * Tiny values of n are rounded up.
1122 * Returns FALSE on failure.
1123 */
GC_expand_hp_inner(word n)1124 GC_INNER GC_bool GC_expand_hp_inner(word n)
1125 {
1126 word bytes;
1127 struct hblk * space;
1128 word expansion_slop; /* Number of bytes by which we expect the */
1129 /* heap to expand soon. */
1130
1131 if (n < MINHINCR) n = MINHINCR;
1132 bytes = n * HBLKSIZE;
1133 /* Make sure bytes is a multiple of GC_page_size */
1134 {
1135 word mask = GC_page_size - 1;
1136 bytes += mask;
1137 bytes &= ~mask;
1138 }
1139
1140 if (GC_max_heapsize != 0 && GC_heapsize + bytes > GC_max_heapsize) {
1141 /* Exceeded self-imposed limit */
1142 return(FALSE);
1143 }
1144 space = GET_MEM(bytes);
1145 GC_add_to_our_memory((ptr_t)space, bytes);
1146 if (space == 0) {
1147 if (GC_print_stats) {
1148 GC_log_printf("Failed to expand heap by %ld bytes\n",
1149 (unsigned long)bytes);
1150 }
1151 return(FALSE);
1152 }
1153 if (GC_print_stats) {
1154 GC_log_printf("Increasing heap size by %lu after %lu allocated bytes\n",
1155 (unsigned long)bytes, (unsigned long)GC_bytes_allocd);
1156 }
1157 /* Adjust heap limits generously for blacklisting to work better. */
1158 /* GC_add_to_heap performs minimal adjustment needed for */
1159 /* correctness. */
1160 expansion_slop = min_bytes_allocd() + 4*MAXHINCR*HBLKSIZE;
1161 if ((GC_last_heap_addr == 0 && !((word)space & SIGNB))
1162 || (GC_last_heap_addr != 0 && GC_last_heap_addr < (ptr_t)space)) {
1163 /* Assume the heap is growing up */
1164 word new_limit = (word)space + bytes + expansion_slop;
1165 if (new_limit > (word)space) {
1166 GC_greatest_plausible_heap_addr =
1167 (void *)GC_max((word)GC_greatest_plausible_heap_addr,
1168 (word)new_limit);
1169 }
1170 } else {
1171 /* Heap is growing down */
1172 word new_limit = (word)space - expansion_slop;
1173 if (new_limit < (word)space) {
1174 GC_least_plausible_heap_addr =
1175 (void *)GC_min((word)GC_least_plausible_heap_addr,
1176 (word)space - expansion_slop);
1177 }
1178 }
1179 GC_prev_heap_addr = GC_last_heap_addr;
1180 GC_last_heap_addr = (ptr_t)space;
1181 GC_add_to_heap(space, bytes);
1182 /* Force GC before we are likely to allocate past expansion_slop */
1183 GC_collect_at_heapsize =
1184 GC_heapsize + expansion_slop - 2*MAXHINCR*HBLKSIZE;
1185 if (GC_collect_at_heapsize < GC_heapsize /* wrapped */)
1186 GC_collect_at_heapsize = (word)(-1);
1187 return(TRUE);
1188 }
1189
1190 /* Really returns a bool, but it's externally visible, so that's clumsy. */
1191 /* Arguments is in bytes. Includes GC_init() call. */
GC_expand_hp(size_t bytes)1192 GC_API int GC_CALL GC_expand_hp(size_t bytes)
1193 {
1194 int result;
1195 DCL_LOCK_STATE;
1196
1197 LOCK();
1198 if (!GC_is_initialized) GC_init();
1199 result = (int)GC_expand_hp_inner(divHBLKSZ((word)bytes));
1200 if (result) GC_requested_heapsize += bytes;
1201 UNLOCK();
1202 return(result);
1203 }
1204
1205 GC_INNER unsigned GC_fail_count = 0;
1206 /* How many consecutive GC/expansion failures? */
1207 /* Reset by GC_allochblk. */
1208
1209 /* Collect or expand heap in an attempt make the indicated number of */
1210 /* free blocks available. Should be called until the blocks are */
1211 /* available (seting retry value to TRUE unless this is the first call */
1212 /* in a loop) or until it fails by returning FALSE. */
GC_collect_or_expand(word needed_blocks,GC_bool ignore_off_page,GC_bool retry)1213 GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
1214 GC_bool ignore_off_page,
1215 GC_bool retry)
1216 {
1217 GC_bool gc_not_stopped = TRUE;
1218 word blocks_to_get;
1219 IF_CANCEL(int cancel_state;)
1220
1221 DISABLE_CANCEL(cancel_state);
1222 if (!GC_incremental && !GC_dont_gc &&
1223 ((GC_dont_expand && GC_bytes_allocd > 0) || GC_should_collect())) {
1224 /* Try to do a full collection using 'default' stop_func (unless */
1225 /* nothing has been allocated since the latest collection or heap */
1226 /* expansion is disabled). */
1227 gc_not_stopped = GC_try_to_collect_inner(
1228 GC_bytes_allocd > 0 && (!GC_dont_expand || !retry) ?
1229 GC_default_stop_func : GC_never_stop_func);
1230 if (gc_not_stopped == TRUE || !retry) {
1231 /* Either the collection hasn't been aborted or this is the */
1232 /* first attempt (in a loop). */
1233 RESTORE_CANCEL(cancel_state);
1234 return(TRUE);
1235 }
1236 }
1237
1238 blocks_to_get = GC_heapsize/(HBLKSIZE*GC_free_space_divisor)
1239 + needed_blocks;
1240 if (blocks_to_get > MAXHINCR) {
1241 word slop;
1242
1243 /* Get the minimum required to make it likely that we can satisfy */
1244 /* the current request in the presence of black-listing. */
1245 /* This will probably be more than MAXHINCR. */
1246 if (ignore_off_page) {
1247 slop = 4;
1248 } else {
1249 slop = 2 * divHBLKSZ(BL_LIMIT);
1250 if (slop > needed_blocks) slop = needed_blocks;
1251 }
1252 if (needed_blocks + slop > MAXHINCR) {
1253 blocks_to_get = needed_blocks + slop;
1254 } else {
1255 blocks_to_get = MAXHINCR;
1256 }
1257 }
1258
1259 if (!GC_expand_hp_inner(blocks_to_get)
1260 && !GC_expand_hp_inner(needed_blocks)) {
1261 if (gc_not_stopped == FALSE) {
1262 /* Don't increment GC_fail_count here (and no warning). */
1263 GC_gcollect_inner();
1264 GC_ASSERT(GC_bytes_allocd == 0);
1265 } else if (GC_fail_count++ < GC_max_retries) {
1266 WARN("Out of Memory! Trying to continue ...\n", 0);
1267 GC_gcollect_inner();
1268 } else {
1269 # if !defined(AMIGA) || !defined(GC_AMIGA_FASTALLOC)
1270 WARN("Out of Memory! Heap size: %" GC_PRIdPTR " MiB."
1271 " Returning NULL!\n", (GC_heapsize - GC_unmapped_bytes) >> 20);
1272 # endif
1273 RESTORE_CANCEL(cancel_state);
1274 return(FALSE);
1275 }
1276 } else if (GC_fail_count && GC_print_stats) {
1277 GC_log_printf("Memory available again...\n");
1278 }
1279 RESTORE_CANCEL(cancel_state);
1280 return(TRUE);
1281 }
1282
1283 /*
1284 * Make sure the object free list for size gran (in granules) is not empty.
1285 * Return a pointer to the first object on the free list.
1286 * The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
1287 * Assumes we hold the allocator lock.
1288 */
GC_allocobj(size_t gran,int kind)1289 GC_INNER ptr_t GC_allocobj(size_t gran, int kind)
1290 {
1291 void ** flh = &(GC_obj_kinds[kind].ok_freelist[gran]);
1292 GC_bool tried_minor = FALSE;
1293 GC_bool retry = FALSE;
1294
1295 if (gran == 0) return(0);
1296
1297 while (*flh == 0) {
1298 ENTER_GC();
1299 /* Do our share of marking work */
1300 if(TRUE_INCREMENTAL) GC_collect_a_little_inner(1);
1301 /* Sweep blocks for objects of this size */
1302 GC_continue_reclaim(gran, kind);
1303 EXIT_GC();
1304 if (*flh == 0) {
1305 GC_new_hblk(gran, kind);
1306 }
1307 if (*flh == 0) {
1308 ENTER_GC();
1309 if (GC_incremental && GC_time_limit == GC_TIME_UNLIMITED
1310 && !tried_minor) {
1311 GC_collect_a_little_inner(1);
1312 tried_minor = TRUE;
1313 } else {
1314 if (!GC_collect_or_expand(1, FALSE, retry)) {
1315 EXIT_GC();
1316 return(0);
1317 }
1318 retry = TRUE;
1319 }
1320 EXIT_GC();
1321 }
1322 }
1323 /* Successful allocation; reset failure count. */
1324 GC_fail_count = 0;
1325
1326 return(*flh);
1327 }
1328