1 /*
2  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3  * Copyright (c) 1991-1996 by Xerox Corporation.  All rights reserved.
4  * Copyright (c) 1998 by Silicon Graphics.  All rights reserved.
5  * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
6  * Copyright (c) 2008-2019 Ivan Maidanski
7  *
8  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10  *
11  * Permission is hereby granted to use or copy this program
12  * for any purpose,  provided the above notices are retained on all copies.
13  * Permission to modify the code and to distribute modified code is granted,
14  * provided the above notices are retained, and a notice that the code was
15  * modified is included with the above copyright notice.
16  *
17  */
18 
19 #include "private/gc_priv.h"
20 
21 #include <stdio.h>
22 #if !defined(MACOS) && !defined(MSWINCE)
23 # include <signal.h>
24 # if !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2) \
25      && !defined(__CC_ARM)
26 #   include <sys/types.h>
27 # endif
28 #endif
29 
30 /*
31  * Separate free lists are maintained for different sized objects
32  * up to MAXOBJBYTES.
33  * The call GC_allocobj(i,k) ensures that the freelist for
34  * kind k objects of size i points to a non-empty
35  * free list. It returns a pointer to the first entry on the free list.
36  * In a single-threaded world, GC_allocobj may be called to allocate
37  * an object of small size lb (and NORMAL kind) as follows
38  * (GC_generic_malloc_inner is a wrapper over GC_allocobj which also
39  * fills in GC_size_map if needed):
40  *
41  *   lg = GC_size_map[lb];
42  *   op = GC_objfreelist[lg];
43  *   if (NULL == op) {
44  *     op = GC_generic_malloc_inner(lb, NORMAL);
45  *   } else {
46  *     GC_objfreelist[lg] = obj_link(op);
47  *     GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
48  *   }
49  *
50  * Note that this is very fast if the free list is non-empty; it should
51  * only involve the execution of 4 or 5 simple instructions.
52  * All composite objects on freelists are cleared, except for
53  * their first word.
54  */
55 
56 /*
57  * The allocator uses GC_allochblk to allocate large chunks of objects.
58  * These chunks all start on addresses which are multiples of
59  * HBLKSZ.   Each allocated chunk has an associated header,
60  * which can be located quickly based on the address of the chunk.
61  * (See headers.c for details.)
62  * This makes it possible to check quickly whether an
63  * arbitrary address corresponds to an object administered by the
64  * allocator.
65  */
66 
67 word GC_non_gc_bytes = 0;  /* Number of bytes not intended to be collected */
68 
69 word GC_gc_no = 0;
70 
71 #ifndef NO_CLOCK
72   static unsigned long full_gc_total_time = 0; /* in msecs, may wrap */
73   static GC_bool measure_performance = FALSE;
74                 /* Do performance measurements if set to true (e.g.,    */
75                 /* accumulation of the total time of full collections). */
76 
GC_start_performance_measurement(void)77   GC_API void GC_CALL GC_start_performance_measurement(void)
78   {
79     measure_performance = TRUE;
80   }
81 
GC_get_full_gc_total_time(void)82   GC_API unsigned long GC_CALL GC_get_full_gc_total_time(void)
83   {
84     return full_gc_total_time;
85   }
86 #endif /* !NO_CLOCK */
87 
88 #ifndef GC_DISABLE_INCREMENTAL
89   GC_INNER GC_bool GC_incremental = FALSE; /* By default, stop the world. */
90 #endif
91 
GC_is_incremental_mode(void)92 GC_API int GC_CALL GC_is_incremental_mode(void)
93 {
94   return (int)GC_incremental;
95 }
96 
97 #ifdef THREADS
98   int GC_parallel = FALSE;      /* By default, parallel GC is off.      */
99 #endif
100 
101 #if defined(GC_FULL_FREQ) && !defined(CPPCHECK)
102   int GC_full_freq = GC_FULL_FREQ;
103 #else
104   int GC_full_freq = 19;   /* Every 20th collection is a full   */
105                            /* collection, whether we need it    */
106                            /* or not.                           */
107 #endif
108 
109 STATIC GC_bool GC_need_full_gc = FALSE;
110                            /* Need full GC do to heap growth.   */
111 
112 #ifdef THREAD_LOCAL_ALLOC
113   GC_INNER GC_bool GC_world_stopped = FALSE;
114 #endif
115 
116 STATIC word GC_used_heap_size_after_full = 0;
117 
118 /* GC_copyright symbol is externally visible. */
119 EXTERN_C_BEGIN
120 extern const char * const GC_copyright[];
121 EXTERN_C_END
122 const char * const GC_copyright[] =
123 {"Copyright 1988, 1989 Hans-J. Boehm and Alan J. Demers ",
124 "Copyright (c) 1991-1995 by Xerox Corporation.  All rights reserved. ",
125 "Copyright (c) 1996-1998 by Silicon Graphics.  All rights reserved. ",
126 "Copyright (c) 1999-2009 by Hewlett-Packard Company.  All rights reserved. ",
127 "Copyright (c) 2008-2019 Ivan Maidanski ",
128 "THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY",
129 " EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.",
130 "See source code for details." };
131 
132 /* Version macros are now defined in gc_version.h, which is included by */
133 /* gc.h, which is included by gc_priv.h.                                */
134 #ifndef GC_NO_VERSION_VAR
135   EXTERN_C_BEGIN
136   extern const unsigned GC_version;
137   EXTERN_C_END
138   const unsigned GC_version = ((GC_VERSION_MAJOR << 16) |
139                         (GC_VERSION_MINOR << 8) | GC_VERSION_MICRO);
140 #endif
141 
GC_get_version(void)142 GC_API unsigned GC_CALL GC_get_version(void)
143 {
144   return (GC_VERSION_MAJOR << 16) | (GC_VERSION_MINOR << 8) |
145           GC_VERSION_MICRO;
146 }
147 
148 /* some more variables */
149 
150 #ifdef GC_DONT_EXPAND
151   int GC_dont_expand = TRUE;
152 #else
153   int GC_dont_expand = FALSE;
154 #endif
155 
156 #if defined(GC_FREE_SPACE_DIVISOR) && !defined(CPPCHECK)
157   word GC_free_space_divisor = GC_FREE_SPACE_DIVISOR; /* must be > 0 */
158 #else
159   word GC_free_space_divisor = 3;
160 #endif
161 
GC_never_stop_func(void)162 GC_INNER int GC_CALLBACK GC_never_stop_func(void)
163 {
164   return(0);
165 }
166 
167 #if defined(GC_TIME_LIMIT) && !defined(CPPCHECK)
168   unsigned long GC_time_limit = GC_TIME_LIMIT;
169                            /* We try to keep pause times from exceeding  */
170                            /* this by much. In milliseconds.             */
171 #else
172   unsigned long GC_time_limit = 50;
173 #endif
174 
175 #ifndef NO_CLOCK
176   STATIC CLOCK_TYPE GC_start_time = CLOCK_TYPE_INITIALIZER;
177                                 /* Time at which we stopped world.      */
178                                 /* used only in GC_timeout_stop_func.   */
179 #endif
180 
181 STATIC int GC_n_attempts = 0;   /* Number of attempts at finishing      */
182                                 /* collection within GC_time_limit.     */
183 
184 STATIC GC_stop_func GC_default_stop_func = GC_never_stop_func;
185                                 /* accessed holding the lock.           */
186 
GC_set_stop_func(GC_stop_func stop_func)187 GC_API void GC_CALL GC_set_stop_func(GC_stop_func stop_func)
188 {
189   DCL_LOCK_STATE;
190   GC_ASSERT(NONNULL_ARG_NOT_NULL(stop_func));
191   LOCK();
192   GC_default_stop_func = stop_func;
193   UNLOCK();
194 }
195 
GC_get_stop_func(void)196 GC_API GC_stop_func GC_CALL GC_get_stop_func(void)
197 {
198   GC_stop_func stop_func;
199   DCL_LOCK_STATE;
200   LOCK();
201   stop_func = GC_default_stop_func;
202   UNLOCK();
203   return stop_func;
204 }
205 
206 #if defined(GC_DISABLE_INCREMENTAL) || defined(NO_CLOCK)
207 # define GC_timeout_stop_func GC_default_stop_func
208 #else
GC_timeout_stop_func(void)209   STATIC int GC_CALLBACK GC_timeout_stop_func (void)
210   {
211     CLOCK_TYPE current_time;
212     static unsigned count = 0;
213     unsigned long time_diff;
214 
215     if ((*GC_default_stop_func)())
216       return(1);
217 
218     if ((count++ & 3) != 0) return(0);
219     GET_TIME(current_time);
220     time_diff = MS_TIME_DIFF(current_time,GC_start_time);
221     if (time_diff >= GC_time_limit) {
222         GC_COND_LOG_PRINTF(
223                 "Abandoning stopped marking after %lu msecs (attempt %d)\n",
224                 time_diff, GC_n_attempts);
225         return(1);
226     }
227     return(0);
228   }
229 #endif /* !GC_DISABLE_INCREMENTAL */
230 
231 #ifdef THREADS
232   GC_INNER word GC_total_stacksize = 0; /* updated on every push_all_stacks */
233 #endif
234 
235 static size_t min_bytes_allocd_minimum = 1;
236                         /* The lowest value returned by min_bytes_allocd(). */
237 
GC_set_min_bytes_allocd(size_t value)238 GC_API void GC_CALL GC_set_min_bytes_allocd(size_t value)
239 {
240     GC_ASSERT(value > 0);
241     min_bytes_allocd_minimum = value;
242 }
243 
GC_get_min_bytes_allocd(void)244 GC_API size_t GC_CALL GC_get_min_bytes_allocd(void)
245 {
246     return min_bytes_allocd_minimum;
247 }
248 
249 /* Return the minimum number of bytes that must be allocated between    */
250 /* collections to amortize the collection cost.  Should be non-zero.    */
min_bytes_allocd(void)251 static word min_bytes_allocd(void)
252 {
253     word result;
254     word stack_size;
255     word total_root_size;       /* includes double stack size,  */
256                                 /* since the stack is expensive */
257                                 /* to scan.                     */
258     word scan_size;             /* Estimate of memory to be scanned     */
259                                 /* during normal GC.                    */
260 
261 #   ifdef THREADS
262       if (GC_need_to_lock) {
263         /* We are multi-threaded... */
264         stack_size = GC_total_stacksize;
265         /* For now, we just use the value computed during the latest GC. */
266 #       ifdef DEBUG_THREADS
267           GC_log_printf("Total stacks size: %lu\n",
268                         (unsigned long)stack_size);
269 #       endif
270       } else
271 #   endif
272     /* else*/ {
273 #     ifdef STACK_NOT_SCANNED
274         stack_size = 0;
275 #     elif defined(STACK_GROWS_UP)
276         stack_size = GC_approx_sp() - GC_stackbottom;
277 #     else
278         stack_size = GC_stackbottom - GC_approx_sp();
279 #     endif
280     }
281 
282     total_root_size = 2 * stack_size + GC_root_size;
283     scan_size = 2 * GC_composite_in_use + GC_atomic_in_use / 4
284                 + total_root_size;
285     result = scan_size / GC_free_space_divisor;
286     if (GC_incremental) {
287       result /= 2;
288     }
289     return result > min_bytes_allocd_minimum
290             ? result : min_bytes_allocd_minimum;
291 }
292 
293 STATIC word GC_non_gc_bytes_at_gc = 0;
294                 /* Number of explicitly managed bytes of storage        */
295                 /* at last collection.                                  */
296 
297 /* Return the number of bytes allocated, adjusted for explicit storage  */
298 /* management, etc..  This number is used in deciding when to trigger   */
299 /* collections.                                                         */
GC_adj_bytes_allocd(void)300 STATIC word GC_adj_bytes_allocd(void)
301 {
302     signed_word result;
303     signed_word expl_managed = (signed_word)GC_non_gc_bytes
304                                 - (signed_word)GC_non_gc_bytes_at_gc;
305 
306     /* Don't count what was explicitly freed, or newly allocated for    */
307     /* explicit management.  Note that deallocating an explicitly       */
308     /* managed object should not alter result, assuming the client      */
309     /* is playing by the rules.                                         */
310     result = (signed_word)GC_bytes_allocd
311              + (signed_word)GC_bytes_dropped
312              - (signed_word)GC_bytes_freed
313              + (signed_word)GC_finalizer_bytes_freed
314              - expl_managed;
315     if (result > (signed_word)GC_bytes_allocd) {
316         result = GC_bytes_allocd;
317         /* probably client bug or unfortunate scheduling */
318     }
319     result += GC_bytes_finalized;
320         /* We count objects enqueued for finalization as though they    */
321         /* had been reallocated this round. Finalization is user        */
322         /* visible progress.  And if we don't count this, we have       */
323         /* stability problems for programs that finalize all objects.   */
324     if (result < (signed_word)(GC_bytes_allocd >> 3)) {
325         /* Always count at least 1/8 of the allocations.  We don't want */
326         /* to collect too infrequently, since that would inhibit        */
327         /* coalescing of free storage blocks.                           */
328         /* This also makes us partially robust against client bugs.     */
329         return(GC_bytes_allocd >> 3);
330     } else {
331         return(result);
332     }
333 }
334 
335 
336 /* Clear up a few frames worth of garbage left at the top of the stack. */
337 /* This is used to prevent us from accidentally treating garbage left   */
338 /* on the stack by other parts of the collector as roots.  This         */
339 /* differs from the code in misc.c, which actually tries to keep the    */
340 /* stack clear of long-lived, client-generated garbage.                 */
GC_clear_a_few_frames(void)341 STATIC void GC_clear_a_few_frames(void)
342 {
343 #   ifndef CLEAR_NWORDS
344 #     define CLEAR_NWORDS 64
345 #   endif
346     volatile word frames[CLEAR_NWORDS];
347     BZERO((word *)frames, CLEAR_NWORDS * sizeof(word));
348 }
349 
350 /* Heap size at which we need a collection to avoid expanding past      */
351 /* limits used by blacklisting.                                         */
352 STATIC word GC_collect_at_heapsize = GC_WORD_MAX;
353 
354 /* Have we allocated enough to amortize a collection? */
GC_should_collect(void)355 GC_INNER GC_bool GC_should_collect(void)
356 {
357     static word last_min_bytes_allocd;
358     static word last_gc_no;
359     if (last_gc_no != GC_gc_no) {
360       last_gc_no = GC_gc_no;
361       last_min_bytes_allocd = min_bytes_allocd();
362     }
363     return(GC_adj_bytes_allocd() >= last_min_bytes_allocd
364            || GC_heapsize >= GC_collect_at_heapsize);
365 }
366 
367 /* STATIC */ GC_start_callback_proc GC_start_call_back = 0;
368                         /* Called at start of full collections.         */
369                         /* Not called if 0.  Called with the allocation */
370                         /* lock held.  Not used by GC itself.           */
371 
GC_set_start_callback(GC_start_callback_proc fn)372 GC_API void GC_CALL GC_set_start_callback(GC_start_callback_proc fn)
373 {
374     DCL_LOCK_STATE;
375     LOCK();
376     GC_start_call_back = fn;
377     UNLOCK();
378 }
379 
GC_get_start_callback(void)380 GC_API GC_start_callback_proc GC_CALL GC_get_start_callback(void)
381 {
382     GC_start_callback_proc fn;
383     DCL_LOCK_STATE;
384     LOCK();
385     fn = GC_start_call_back;
386     UNLOCK();
387     return fn;
388 }
389 
GC_notify_full_gc(void)390 GC_INLINE void GC_notify_full_gc(void)
391 {
392     if (GC_start_call_back != 0) {
393         (*GC_start_call_back)();
394     }
395 }
396 
397 STATIC GC_bool GC_is_full_gc = FALSE;
398 
399 STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func);
400 STATIC void GC_finish_collection(void);
401 
402 /*
403  * Initiate a garbage collection if appropriate.
404  * Choose judiciously
405  * between partial, full, and stop-world collections.
406  */
GC_maybe_gc(void)407 STATIC void GC_maybe_gc(void)
408 {
409     GC_ASSERT(I_HOLD_LOCK());
410     ASSERT_CANCEL_DISABLED();
411     if (GC_should_collect()) {
412         static int n_partial_gcs = 0;
413 
414         if (!GC_incremental) {
415             /* TODO: If possible, GC_default_stop_func should be used here */
416             GC_try_to_collect_inner(GC_never_stop_func);
417             n_partial_gcs = 0;
418             return;
419         } else {
420 #         ifdef PARALLEL_MARK
421             if (GC_parallel)
422               GC_wait_for_reclaim();
423 #         endif
424           if (GC_need_full_gc || n_partial_gcs >= GC_full_freq) {
425             GC_COND_LOG_PRINTF(
426                 "***>Full mark for collection #%lu after %lu allocd bytes\n",
427                 (unsigned long)GC_gc_no + 1, (unsigned long)GC_bytes_allocd);
428             GC_promote_black_lists();
429             (void)GC_reclaim_all((GC_stop_func)0, TRUE);
430             GC_notify_full_gc();
431             GC_clear_marks();
432             n_partial_gcs = 0;
433             GC_is_full_gc = TRUE;
434           } else {
435             n_partial_gcs++;
436           }
437         }
438         /* We try to mark with the world stopped.       */
439         /* If we run out of time, this turns into       */
440         /* incremental marking.                         */
441 #       ifndef NO_CLOCK
442           if (GC_time_limit != GC_TIME_UNLIMITED) { GET_TIME(GC_start_time); }
443 #       endif
444         /* TODO: If possible, GC_default_stop_func should be    */
445         /* used instead of GC_never_stop_func here.             */
446         if (GC_stopped_mark(GC_time_limit == GC_TIME_UNLIMITED?
447                             GC_never_stop_func : GC_timeout_stop_func)) {
448 #           ifdef SAVE_CALL_CHAIN
449                 GC_save_callers(GC_last_stack);
450 #           endif
451             GC_finish_collection();
452         } else {
453             if (!GC_is_full_gc) {
454                 /* Count this as the first attempt */
455                 GC_n_attempts++;
456             }
457         }
458     }
459 }
460 
461 STATIC GC_on_collection_event_proc GC_on_collection_event = 0;
462 
GC_set_on_collection_event(GC_on_collection_event_proc fn)463 GC_API void GC_CALL GC_set_on_collection_event(GC_on_collection_event_proc fn)
464 {
465     /* fn may be 0 (means no event notifier). */
466     DCL_LOCK_STATE;
467     LOCK();
468     GC_on_collection_event = fn;
469     UNLOCK();
470 }
471 
GC_get_on_collection_event(void)472 GC_API GC_on_collection_event_proc GC_CALL GC_get_on_collection_event(void)
473 {
474     GC_on_collection_event_proc fn;
475     DCL_LOCK_STATE;
476     LOCK();
477     fn = GC_on_collection_event;
478     UNLOCK();
479     return fn;
480 }
481 
482 /* Stop the world garbage collection.  If stop_func is not      */
483 /* GC_never_stop_func then abort if stop_func returns TRUE.     */
484 /* Return TRUE if we successfully completed the collection.     */
GC_try_to_collect_inner(GC_stop_func stop_func)485 GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func stop_func)
486 {
487 #   ifndef NO_CLOCK
488       CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
489       GC_bool start_time_valid;
490 #   endif
491 
492     ASSERT_CANCEL_DISABLED();
493     GC_ASSERT(I_HOLD_LOCK());
494     if (GC_dont_gc || (*stop_func)()) return FALSE;
495     if (GC_on_collection_event)
496       GC_on_collection_event(GC_EVENT_START);
497     if (GC_incremental && GC_collection_in_progress()) {
498       GC_COND_LOG_PRINTF(
499             "GC_try_to_collect_inner: finishing collection in progress\n");
500       /* Just finish collection already in progress.    */
501         while(GC_collection_in_progress()) {
502             if ((*stop_func)()) {
503               /* TODO: Notify GC_EVENT_ABANDON */
504               return(FALSE);
505             }
506             GC_collect_a_little_inner(1);
507         }
508     }
509     GC_notify_full_gc();
510 #   ifndef NO_CLOCK
511       start_time_valid = FALSE;
512       if ((GC_print_stats | (int)measure_performance) != 0) {
513         if (GC_print_stats)
514           GC_log_printf("Initiating full world-stop collection!\n");
515         start_time_valid = TRUE;
516         GET_TIME(start_time);
517       }
518 #   endif
519     GC_promote_black_lists();
520     /* Make sure all blocks have been reclaimed, so sweep routines      */
521     /* don't see cleared mark bits.                                     */
522     /* If we're guaranteed to finish, then this is unnecessary.         */
523     /* In the find_leak case, we have to finish to guarantee that       */
524     /* previously unmarked objects are not reported as leaks.           */
525 #       ifdef PARALLEL_MARK
526           if (GC_parallel)
527             GC_wait_for_reclaim();
528 #       endif
529         if ((GC_find_leak || stop_func != GC_never_stop_func)
530             && !GC_reclaim_all(stop_func, FALSE)) {
531             /* Aborted.  So far everything is still consistent. */
532             /* TODO: Notify GC_EVENT_ABANDON */
533             return(FALSE);
534         }
535     GC_invalidate_mark_state();  /* Flush mark stack.   */
536     GC_clear_marks();
537 #   ifdef SAVE_CALL_CHAIN
538         GC_save_callers(GC_last_stack);
539 #   endif
540     GC_is_full_gc = TRUE;
541     if (!GC_stopped_mark(stop_func)) {
542       if (!GC_incremental) {
543         /* We're partially done and have no way to complete or use      */
544         /* current work.  Reestablish invariants as cheaply as          */
545         /* possible.                                                    */
546         GC_invalidate_mark_state();
547         GC_unpromote_black_lists();
548       } /* else we claim the world is already still consistent.  We'll  */
549         /* finish incrementally.                                        */
550       /* TODO: Notify GC_EVENT_ABANDON */
551       return(FALSE);
552     }
553     GC_finish_collection();
554 #   ifndef NO_CLOCK
555       if (start_time_valid) {
556         CLOCK_TYPE current_time;
557         unsigned long time_diff;
558 
559         GET_TIME(current_time);
560         time_diff = MS_TIME_DIFF(current_time, start_time);
561         if (measure_performance)
562           full_gc_total_time += time_diff; /* may wrap */
563         if (GC_print_stats)
564           GC_log_printf("Complete collection took %lu msecs\n", time_diff);
565       }
566 #   endif
567     if (GC_on_collection_event)
568       GC_on_collection_event(GC_EVENT_END);
569     return(TRUE);
570 }
571 
572 /*
573  * Perform n units of garbage collection work.  A unit is intended to touch
574  * roughly GC_rate pages.  Every once in a while, we do more than that.
575  * This needs to be a fairly large number with our current incremental
576  * GC strategy, since otherwise we allocate too much during GC, and the
577  * cleanup gets expensive.
578  */
579 #ifndef GC_RATE
580 # define GC_RATE 10
581 #endif
582 
583 #ifndef MAX_PRIOR_ATTEMPTS
584 # define MAX_PRIOR_ATTEMPTS 1
585 #endif
586         /* Maximum number of prior attempts at world stop marking       */
587         /* A value of 1 means that we finish the second time, no matter */
588         /* how long it takes.  Doesn't count the initial root scan      */
589         /* for a full GC.                                               */
590 
591 STATIC int GC_deficit = 0;/* The number of extra calls to GC_mark_some  */
592                           /* that we have made.                         */
593 
594 STATIC int GC_rate = GC_RATE;
595 
GC_set_rate(int value)596 GC_API void GC_CALL GC_set_rate(int value)
597 {
598     GC_ASSERT(value > 0);
599     GC_rate = value;
600 }
601 
GC_get_rate(void)602 GC_API int GC_CALL GC_get_rate(void)
603 {
604     return GC_rate;
605 }
606 
607 static int max_prior_attempts = MAX_PRIOR_ATTEMPTS;
608 
GC_set_max_prior_attempts(int value)609 GC_API void GC_CALL GC_set_max_prior_attempts(int value)
610 {
611     GC_ASSERT(value >= 0);
612     max_prior_attempts = value;
613 }
614 
GC_get_max_prior_attempts(void)615 GC_API int GC_CALL GC_get_max_prior_attempts(void)
616 {
617     return max_prior_attempts;
618 }
619 
GC_collect_a_little_inner(int n)620 GC_INNER void GC_collect_a_little_inner(int n)
621 {
622     IF_CANCEL(int cancel_state;)
623 
624     GC_ASSERT(I_HOLD_LOCK());
625     if (GC_dont_gc) return;
626 
627     DISABLE_CANCEL(cancel_state);
628     if (GC_incremental && GC_collection_in_progress()) {
629         int i;
630         int max_deficit = GC_rate * n;
631 
632         for (i = GC_deficit; i < max_deficit; i++) {
633             if (GC_mark_some((ptr_t)0)) {
634                 /* Need to finish a collection */
635 #               ifdef SAVE_CALL_CHAIN
636                     GC_save_callers(GC_last_stack);
637 #               endif
638 #               ifdef PARALLEL_MARK
639                     if (GC_parallel)
640                       GC_wait_for_reclaim();
641 #               endif
642                 if (GC_n_attempts < max_prior_attempts
643                     && GC_time_limit != GC_TIME_UNLIMITED) {
644 #                 ifndef NO_CLOCK
645                     GET_TIME(GC_start_time);
646 #                 endif
647                   if (!GC_stopped_mark(GC_timeout_stop_func)) {
648                     GC_n_attempts++;
649                     break;
650                   }
651                 } else {
652                   /* TODO: If possible, GC_default_stop_func should be  */
653                   /* used here.                                         */
654                   (void)GC_stopped_mark(GC_never_stop_func);
655                 }
656                 GC_finish_collection();
657                 break;
658             }
659         }
660         if (GC_deficit > 0) {
661             GC_deficit -= max_deficit;
662             if (GC_deficit < 0)
663                 GC_deficit = 0;
664         }
665     } else {
666         GC_maybe_gc();
667     }
668     RESTORE_CANCEL(cancel_state);
669 }
670 
671 GC_INNER void (*GC_check_heap)(void) = 0;
672 GC_INNER void (*GC_print_all_smashed)(void) = 0;
673 
GC_collect_a_little(void)674 GC_API int GC_CALL GC_collect_a_little(void)
675 {
676     int result;
677     DCL_LOCK_STATE;
678 
679     LOCK();
680     GC_collect_a_little_inner(1);
681     result = (int)GC_collection_in_progress();
682     UNLOCK();
683     if (!result && GC_debugging_started) GC_print_all_smashed();
684     return(result);
685 }
686 
687 #ifndef NO_CLOCK
688   /* Variables for world-stop average delay time statistic computation. */
689   /* "divisor" is incremented every world-stop and halved when reached  */
690   /* its maximum (or upon "total_time" overflow).                       */
691   static unsigned world_stopped_total_time = 0;
692   static unsigned world_stopped_total_divisor = 0;
693 # ifndef MAX_TOTAL_TIME_DIVISOR
694     /* We shall not use big values here (so "outdated" delay time       */
695     /* values would have less impact on "average" delay time value than */
696     /* newer ones).                                                     */
697 #   define MAX_TOTAL_TIME_DIVISOR 1000
698 # endif
699 #endif /* !NO_CLOCK */
700 
701 #ifdef USE_MUNMAP
702 # define IF_USE_MUNMAP(x) x
703 # define COMMA_IF_USE_MUNMAP(x) /* comma */, x
704 #else
705 # define IF_USE_MUNMAP(x) /* empty */
706 # define COMMA_IF_USE_MUNMAP(x) /* empty */
707 #endif
708 
709 /*
710  * We stop the world and mark from all roots.
711  * If stop_func() ever returns TRUE, we may fail and return FALSE.
712  * Increment GC_gc_no if we succeed.
713  */
GC_stopped_mark(GC_stop_func stop_func)714 STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func)
715 {
716     unsigned i;
717 #   ifndef NO_CLOCK
718       CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
719 #   endif
720 
721     GC_ASSERT(I_HOLD_LOCK());
722 #   if !defined(REDIRECT_MALLOC) && defined(USE_WINALLOC)
723         GC_add_current_malloc_heap();
724 #   endif
725 #   if defined(REGISTER_LIBRARIES_EARLY)
726         GC_cond_register_dynamic_libraries();
727 #   endif
728 
729 #   ifndef NO_CLOCK
730       if (GC_PRINT_STATS_FLAG)
731         GET_TIME(start_time);
732 #   endif
733 
734 #   if !defined(GC_NO_FINALIZATION) && !defined(GC_TOGGLE_REFS_NOT_NEEDED)
735       GC_process_togglerefs();
736 #   endif
737 #   ifdef THREADS
738       if (GC_on_collection_event)
739         GC_on_collection_event(GC_EVENT_PRE_STOP_WORLD);
740 #   endif
741     STOP_WORLD();
742 #   ifdef THREADS
743       if (GC_on_collection_event)
744         GC_on_collection_event(GC_EVENT_POST_STOP_WORLD);
745 #   endif
746 
747 #   ifdef THREAD_LOCAL_ALLOC
748       GC_world_stopped = TRUE;
749 #   endif
750         /* Output blank line for convenience here */
751     GC_COND_LOG_PRINTF(
752               "\n--> Marking for collection #%lu after %lu allocated bytes\n",
753               (unsigned long)GC_gc_no + 1, (unsigned long) GC_bytes_allocd);
754 #   ifdef MAKE_BACK_GRAPH
755       if (GC_print_back_height) {
756         GC_build_back_graph();
757       }
758 #   endif
759 
760     /* Mark from all roots.  */
761         if (GC_on_collection_event)
762           GC_on_collection_event(GC_EVENT_MARK_START);
763 
764         /* Minimize junk left in my registers and on the stack */
765             GC_clear_a_few_frames();
766             GC_noop6(0,0,0,0,0,0);
767 
768         GC_initiate_gc();
769         for (i = 0;;i++) {
770           if ((*stop_func)()) {
771             GC_COND_LOG_PRINTF("Abandoned stopped marking after"
772                                " %u iterations\n", i);
773             GC_deficit = i;     /* Give the mutator a chance.   */
774 #           ifdef THREAD_LOCAL_ALLOC
775               GC_world_stopped = FALSE;
776 #           endif
777 
778 #           ifdef THREADS
779               if (GC_on_collection_event)
780                 GC_on_collection_event(GC_EVENT_PRE_START_WORLD);
781 #           endif
782 
783             START_WORLD();
784 
785 #           ifdef THREADS
786               if (GC_on_collection_event)
787                 GC_on_collection_event(GC_EVENT_POST_START_WORLD);
788 #           endif
789 
790             /* TODO: Notify GC_EVENT_MARK_ABANDON */
791             return(FALSE);
792           }
793           if (GC_mark_some(GC_approx_sp())) break;
794         }
795 
796     GC_gc_no++;
797     GC_DBGLOG_PRINTF("GC #%lu freed %ld bytes, heap %lu KiB"
798                      IF_USE_MUNMAP(" (+ %lu KiB unmapped)") "\n",
799                      (unsigned long)GC_gc_no, (long)GC_bytes_found,
800                      TO_KiB_UL(GC_heapsize - GC_unmapped_bytes) /*, */
801                      COMMA_IF_USE_MUNMAP(TO_KiB_UL(GC_unmapped_bytes)));
802 
803     /* Check all debugged objects for consistency */
804     if (GC_debugging_started) {
805       (*GC_check_heap)();
806     }
807     if (GC_on_collection_event)
808       GC_on_collection_event(GC_EVENT_MARK_END);
809 
810 #   ifdef THREAD_LOCAL_ALLOC
811       GC_world_stopped = FALSE;
812 #   endif
813 
814 #   ifdef THREADS
815       if (GC_on_collection_event)
816         GC_on_collection_event(GC_EVENT_PRE_START_WORLD);
817 #   endif
818 
819     START_WORLD();
820 
821 #   ifdef THREADS
822       if (GC_on_collection_event)
823         GC_on_collection_event(GC_EVENT_POST_START_WORLD);
824 #   endif
825 
826 #   ifndef NO_CLOCK
827       if (GC_PRINT_STATS_FLAG) {
828         unsigned long time_diff;
829         unsigned total_time, divisor;
830         CLOCK_TYPE current_time;
831 
832         GET_TIME(current_time);
833         time_diff = MS_TIME_DIFF(current_time,start_time);
834 
835         /* Compute new world-stop delay total time */
836         total_time = world_stopped_total_time;
837         divisor = world_stopped_total_divisor;
838         if ((int)total_time < 0 || divisor >= MAX_TOTAL_TIME_DIVISOR) {
839           /* Halve values if overflow occurs */
840           total_time >>= 1;
841           divisor >>= 1;
842         }
843         total_time += time_diff < (((unsigned)-1) >> 1) ?
844                         (unsigned)time_diff : ((unsigned)-1) >> 1;
845         /* Update old world_stopped_total_time and its divisor */
846         world_stopped_total_time = total_time;
847         world_stopped_total_divisor = ++divisor;
848 
849         GC_ASSERT(divisor != 0);
850         GC_log_printf(
851                 "World-stopped marking took %lu msecs (%u in average)\n",
852                 time_diff, total_time / divisor);
853       }
854 #   endif
855     return(TRUE);
856 }
857 
858 /* Set all mark bits for the free list whose first entry is q   */
GC_set_fl_marks(ptr_t q)859 GC_INNER void GC_set_fl_marks(ptr_t q)
860 {
861     if (q != NULL) {
862       struct hblk *h = HBLKPTR(q);
863       struct hblk *last_h = h;
864       hdr *hhdr = HDR(h);
865       IF_PER_OBJ(word sz = hhdr->hb_sz;)
866 
867       for (;;) {
868         word bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
869 
870         if (!mark_bit_from_hdr(hhdr, bit_no)) {
871           set_mark_bit_from_hdr(hhdr, bit_no);
872           ++hhdr -> hb_n_marks;
873         }
874 
875         q = (ptr_t)obj_link(q);
876         if (q == NULL)
877           break;
878 
879         h = HBLKPTR(q);
880         if (h != last_h) {
881           last_h = h;
882           hhdr = HDR(h);
883           IF_PER_OBJ(sz = hhdr->hb_sz;)
884         }
885       }
886     }
887 }
888 
889 #if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
890   /* Check that all mark bits for the free list whose first entry is    */
891   /* (*pfreelist) are set.  Check skipped if points to a special value. */
GC_check_fl_marks(void ** pfreelist)892   void GC_check_fl_marks(void **pfreelist)
893   {
894     /* TODO: There is a data race with GC_FAST_MALLOC_GRANS (which does */
895     /* not do atomic updates to the free-list).  The race seems to be   */
896     /* harmless, and for now we just skip this check in case of TSan.   */
897 #   if defined(AO_HAVE_load_acquire_read) && !defined(THREAD_SANITIZER)
898       AO_t *list = (AO_t *)AO_load_acquire_read((AO_t *)pfreelist);
899                 /* Atomic operations are used because the world is running. */
900       AO_t *prev;
901       AO_t *p;
902 
903       if ((word)list <= HBLKSIZE) return;
904 
905       prev = (AO_t *)pfreelist;
906       for (p = list; p != NULL;) {
907         AO_t *next;
908 
909         if (!GC_is_marked(p)) {
910           ABORT_ARG2("Unmarked local free list entry",
911                      ": object %p on list %p", (void *)p, (void *)list);
912         }
913 
914         /* While traversing the free-list, it re-reads the pointer to   */
915         /* the current node before accepting its next pointer and       */
916         /* bails out if the latter has changed.  That way, it won't     */
917         /* try to follow the pointer which might be been modified       */
918         /* after the object was returned to the client.  It might       */
919         /* perform the mark-check on the just allocated object but      */
920         /* that should be harmless.                                     */
921         next = (AO_t *)AO_load_acquire_read(p);
922         if (AO_load(prev) != (AO_t)p)
923           break;
924         prev = p;
925         p = next;
926       }
927 #   else
928       /* FIXME: Not implemented (just skipped). */
929       (void)pfreelist;
930 #   endif
931   }
932 #endif /* GC_ASSERTIONS && THREAD_LOCAL_ALLOC */
933 
934 /* Clear all mark bits for the free list whose first entry is q */
935 /* Decrement GC_bytes_found by number of bytes on free list.    */
GC_clear_fl_marks(ptr_t q)936 STATIC void GC_clear_fl_marks(ptr_t q)
937 {
938       struct hblk *h = HBLKPTR(q);
939       struct hblk *last_h = h;
940       hdr *hhdr = HDR(h);
941       word sz = hhdr->hb_sz; /* Normally set only once. */
942 
943       for (;;) {
944         word bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
945 
946         if (mark_bit_from_hdr(hhdr, bit_no)) {
947           size_t n_marks = hhdr -> hb_n_marks;
948 
949           GC_ASSERT(n_marks != 0);
950           clear_mark_bit_from_hdr(hhdr, bit_no);
951           n_marks--;
952 #         ifdef PARALLEL_MARK
953             /* Appr. count, don't decrement to zero! */
954             if (0 != n_marks || !GC_parallel) {
955               hhdr -> hb_n_marks = n_marks;
956             }
957 #         else
958             hhdr -> hb_n_marks = n_marks;
959 #         endif
960         }
961         GC_bytes_found -= sz;
962 
963         q = (ptr_t)obj_link(q);
964         if (q == NULL)
965           break;
966 
967         h = HBLKPTR(q);
968         if (h != last_h) {
969           last_h = h;
970           hhdr = HDR(h);
971           sz = hhdr->hb_sz;
972         }
973       }
974 }
975 
976 #if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
977   void GC_check_tls(void);
978 #endif
979 
980 GC_on_heap_resize_proc GC_on_heap_resize = 0;
981 
982 /* Used for logging only. */
GC_compute_heap_usage_percent(void)983 GC_INLINE int GC_compute_heap_usage_percent(void)
984 {
985   word used = GC_composite_in_use + GC_atomic_in_use;
986   word heap_sz = GC_heapsize - GC_unmapped_bytes;
987   return used >= heap_sz ? 0 : used < GC_WORD_MAX / 100 ?
988                 (int)((used * 100) / heap_sz) : (int)(used / (heap_sz / 100));
989 }
990 
991 /* Finish up a collection.  Assumes mark bits are consistent, lock is   */
992 /* held, but the world is otherwise running.                            */
GC_finish_collection(void)993 STATIC void GC_finish_collection(void)
994 {
995 #   ifndef NO_CLOCK
996       CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
997       CLOCK_TYPE finalize_time = CLOCK_TYPE_INITIALIZER;
998 #   endif
999 
1000     GC_ASSERT(I_HOLD_LOCK());
1001 #   if defined(GC_ASSERTIONS) \
1002        && defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
1003         /* Check that we marked some of our own data.           */
1004         /* TODO: Add more checks. */
1005         GC_check_tls();
1006 #   endif
1007 
1008 #   ifndef NO_CLOCK
1009       if (GC_print_stats)
1010         GET_TIME(start_time);
1011 #   endif
1012     if (GC_on_collection_event)
1013       GC_on_collection_event(GC_EVENT_RECLAIM_START);
1014 
1015 #   ifndef GC_GET_HEAP_USAGE_NOT_NEEDED
1016       if (GC_bytes_found > 0)
1017         GC_reclaimed_bytes_before_gc += (word)GC_bytes_found;
1018 #   endif
1019     GC_bytes_found = 0;
1020 #   if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
1021         if (GETENV("GC_PRINT_ADDRESS_MAP") != 0) {
1022           GC_print_address_map();
1023         }
1024 #   endif
1025     COND_DUMP;
1026     if (GC_find_leak) {
1027       /* Mark all objects on the free list.  All objects should be      */
1028       /* marked when we're done.                                        */
1029       word size;        /* current object size  */
1030       unsigned kind;
1031       ptr_t q;
1032 
1033       for (kind = 0; kind < GC_n_kinds; kind++) {
1034         for (size = 1; size <= MAXOBJGRANULES; size++) {
1035           q = (ptr_t)GC_obj_kinds[kind].ok_freelist[size];
1036           if (q != NULL)
1037             GC_set_fl_marks(q);
1038         }
1039       }
1040       GC_start_reclaim(TRUE);
1041         /* The above just checks; it doesn't really reclaim anything.   */
1042     }
1043 
1044 #   ifndef GC_NO_FINALIZATION
1045       GC_finalize();
1046 #   endif
1047 #   ifndef NO_CLOCK
1048       if (GC_print_stats)
1049         GET_TIME(finalize_time);
1050 #   endif
1051 
1052     if (GC_print_back_height) {
1053 #     ifdef MAKE_BACK_GRAPH
1054         GC_traverse_back_graph();
1055 #     elif !defined(SMALL_CONFIG)
1056         GC_err_printf("Back height not available: "
1057                       "Rebuild collector with -DMAKE_BACK_GRAPH\n");
1058 #     endif
1059     }
1060 
1061     /* Clear free list mark bits, in case they got accidentally marked   */
1062     /* (or GC_find_leak is set and they were intentionally marked).      */
1063     /* Also subtract memory remaining from GC_bytes_found count.         */
1064     /* Note that composite objects on free list are cleared.             */
1065     /* Thus accidentally marking a free list is not a problem;  only     */
1066     /* objects on the list itself will be marked, and that's fixed here. */
1067     {
1068       word size;        /* current object size          */
1069       ptr_t q;          /* pointer to current object    */
1070       unsigned kind;
1071 
1072       for (kind = 0; kind < GC_n_kinds; kind++) {
1073         for (size = 1; size <= MAXOBJGRANULES; size++) {
1074           q = (ptr_t)GC_obj_kinds[kind].ok_freelist[size];
1075           if (q != NULL)
1076             GC_clear_fl_marks(q);
1077         }
1078       }
1079     }
1080 
1081     GC_VERBOSE_LOG_PRINTF("Bytes recovered before sweep - f.l. count = %ld\n",
1082                           (long)GC_bytes_found);
1083 
1084     /* Reconstruct free lists to contain everything not marked */
1085     GC_start_reclaim(FALSE);
1086     GC_DBGLOG_PRINTF("In-use heap: %d%% (%lu KiB pointers + %lu KiB other)\n",
1087                      GC_compute_heap_usage_percent(),
1088                      TO_KiB_UL(GC_composite_in_use),
1089                      TO_KiB_UL(GC_atomic_in_use));
1090     if (GC_is_full_gc) {
1091         GC_used_heap_size_after_full = USED_HEAP_SIZE;
1092         GC_need_full_gc = FALSE;
1093     } else {
1094         GC_need_full_gc = USED_HEAP_SIZE - GC_used_heap_size_after_full
1095                             > min_bytes_allocd();
1096     }
1097 
1098     GC_VERBOSE_LOG_PRINTF("Immediately reclaimed %ld bytes, heapsize:"
1099                           " %lu bytes" IF_USE_MUNMAP(" (%lu unmapped)") "\n",
1100                           (long)GC_bytes_found,
1101                           (unsigned long)GC_heapsize /*, */
1102                           COMMA_IF_USE_MUNMAP((unsigned long)
1103                                               GC_unmapped_bytes));
1104 
1105     /* Reset or increment counters for next cycle */
1106     GC_n_attempts = 0;
1107     GC_is_full_gc = FALSE;
1108     GC_bytes_allocd_before_gc += GC_bytes_allocd;
1109     GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
1110     GC_bytes_allocd = 0;
1111     GC_bytes_dropped = 0;
1112     GC_bytes_freed = 0;
1113     GC_finalizer_bytes_freed = 0;
1114 
1115     IF_USE_MUNMAP(GC_unmap_old());
1116 
1117     if (GC_on_collection_event)
1118       GC_on_collection_event(GC_EVENT_RECLAIM_END);
1119 #   ifndef NO_CLOCK
1120       if (GC_print_stats) {
1121         CLOCK_TYPE done_time;
1122 
1123         GET_TIME(done_time);
1124 #       if !defined(SMALL_CONFIG) && !defined(GC_NO_FINALIZATION)
1125           /* A convenient place to output finalization statistics.      */
1126           GC_print_finalization_stats();
1127 #       endif
1128         GC_log_printf("Finalize plus initiate sweep took %lu + %lu msecs\n",
1129                       MS_TIME_DIFF(finalize_time,start_time),
1130                       MS_TIME_DIFF(done_time,finalize_time));
1131       }
1132 #   elif !defined(SMALL_CONFIG) && !defined(GC_NO_FINALIZATION)
1133       if (GC_print_stats)
1134         GC_print_finalization_stats();
1135 #   endif
1136 }
1137 
1138 /* If stop_func == 0 then GC_default_stop_func is used instead.         */
GC_try_to_collect_general(GC_stop_func stop_func,GC_bool force_unmap GC_ATTR_UNUSED)1139 STATIC GC_bool GC_try_to_collect_general(GC_stop_func stop_func,
1140                                          GC_bool force_unmap GC_ATTR_UNUSED)
1141 {
1142     GC_bool result;
1143     IF_USE_MUNMAP(int old_unmap_threshold;)
1144     IF_CANCEL(int cancel_state;)
1145     DCL_LOCK_STATE;
1146 
1147     if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
1148     if (GC_debugging_started) GC_print_all_smashed();
1149     GC_INVOKE_FINALIZERS();
1150     LOCK();
1151     DISABLE_CANCEL(cancel_state);
1152 #   ifdef USE_MUNMAP
1153       old_unmap_threshold = GC_unmap_threshold;
1154       if (force_unmap ||
1155           (GC_force_unmap_on_gcollect && old_unmap_threshold > 0))
1156         GC_unmap_threshold = 1; /* unmap as much as possible */
1157 #   endif
1158     ENTER_GC();
1159     /* Minimize junk left in my registers */
1160       GC_noop6(0,0,0,0,0,0);
1161     result = GC_try_to_collect_inner(stop_func != 0 ? stop_func :
1162                                      GC_default_stop_func);
1163     EXIT_GC();
1164     IF_USE_MUNMAP(GC_unmap_threshold = old_unmap_threshold); /* restore */
1165     RESTORE_CANCEL(cancel_state);
1166     UNLOCK();
1167     if (result) {
1168         if (GC_debugging_started) GC_print_all_smashed();
1169         GC_INVOKE_FINALIZERS();
1170     }
1171     return(result);
1172 }
1173 
1174 /* Externally callable routines to invoke full, stop-the-world collection. */
GC_try_to_collect(GC_stop_func stop_func)1175 GC_API int GC_CALL GC_try_to_collect(GC_stop_func stop_func)
1176 {
1177     GC_ASSERT(NONNULL_ARG_NOT_NULL(stop_func));
1178     return (int)GC_try_to_collect_general(stop_func, FALSE);
1179 }
1180 
GC_gcollect(void)1181 GC_API void GC_CALL GC_gcollect(void)
1182 {
1183     /* 0 is passed as stop_func to get GC_default_stop_func value       */
1184     /* while holding the allocation lock (to prevent data races).       */
1185     (void)GC_try_to_collect_general(0, FALSE);
1186     if (GC_have_errors) GC_print_all_errors();
1187 }
1188 
1189 STATIC word GC_heapsize_at_forced_unmap = 0;
1190 
GC_gcollect_and_unmap(void)1191 GC_API void GC_CALL GC_gcollect_and_unmap(void)
1192 {
1193     /* Record current heap size to make heap growth more conservative   */
1194     /* afterwards (as if the heap is growing from zero size again).     */
1195     GC_heapsize_at_forced_unmap = GC_heapsize;
1196     /* Collect and force memory unmapping to OS. */
1197     (void)GC_try_to_collect_general(GC_never_stop_func, TRUE);
1198 }
1199 
1200 GC_INNER word GC_n_heap_sects = 0;
1201                         /* Number of sections currently in heap. */
1202 
1203 #ifdef USE_PROC_FOR_LIBRARIES
1204   GC_INNER word GC_n_memory = 0;
1205                         /* Number of GET_MEM allocated memory sections. */
1206 #endif
1207 
1208 #ifdef USE_PROC_FOR_LIBRARIES
1209   /* Add HBLKSIZE aligned, GET_MEM-generated block to GC_our_memory. */
1210   /* Defined to do nothing if USE_PROC_FOR_LIBRARIES not set.       */
GC_add_to_our_memory(ptr_t p,size_t bytes)1211   GC_INNER void GC_add_to_our_memory(ptr_t p, size_t bytes)
1212   {
1213     if (0 == p) return;
1214     if (GC_n_memory >= MAX_HEAP_SECTS)
1215       ABORT("Too many GC-allocated memory sections: Increase MAX_HEAP_SECTS");
1216     GC_our_memory[GC_n_memory].hs_start = p;
1217     GC_our_memory[GC_n_memory].hs_bytes = bytes;
1218     GC_n_memory++;
1219   }
1220 #endif
1221 
1222 /*
1223  * Use the chunk of memory starting at p of size bytes as part of the heap.
1224  * Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE.
1225  */
GC_add_to_heap(struct hblk * p,size_t bytes)1226 GC_INNER void GC_add_to_heap(struct hblk *p, size_t bytes)
1227 {
1228     hdr * phdr;
1229     word endp;
1230 
1231     if (GC_n_heap_sects >= MAX_HEAP_SECTS) {
1232         ABORT("Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS");
1233     }
1234     while ((word)p <= HBLKSIZE) {
1235         /* Can't handle memory near address zero. */
1236         ++p;
1237         bytes -= HBLKSIZE;
1238         if (0 == bytes) return;
1239     }
1240     endp = (word)p + bytes;
1241     if (endp <= (word)p) {
1242         /* Address wrapped. */
1243         bytes -= HBLKSIZE;
1244         if (0 == bytes) return;
1245         endp -= HBLKSIZE;
1246     }
1247     phdr = GC_install_header(p);
1248     if (0 == phdr) {
1249         /* This is extremely unlikely. Can't add it.  This will         */
1250         /* almost certainly result in a 0 return from the allocator,    */
1251         /* which is entirely appropriate.                               */
1252         return;
1253     }
1254     GC_ASSERT(endp > (word)p && endp == (word)p + bytes);
1255     GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)p;
1256     GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
1257     GC_n_heap_sects++;
1258     phdr -> hb_sz = bytes;
1259     phdr -> hb_flags = 0;
1260     GC_freehblk(p);
1261     GC_heapsize += bytes;
1262 
1263     /* Normally the caller calculates a new GC_collect_at_heapsize,
1264      * but this is also called directly from GC_scratch_recycle_inner, so
1265      * adjust here. It will be recalculated when called from
1266      * GC_expand_hp_inner.
1267      */
1268     GC_collect_at_heapsize += bytes;
1269     if (GC_collect_at_heapsize < GC_heapsize /* wrapped */)
1270        GC_collect_at_heapsize = GC_WORD_MAX;
1271 
1272     if ((word)p <= (word)GC_least_plausible_heap_addr
1273         || GC_least_plausible_heap_addr == 0) {
1274         GC_least_plausible_heap_addr = (void *)((ptr_t)p - sizeof(word));
1275                 /* Making it a little smaller than necessary prevents   */
1276                 /* us from getting a false hit from the variable        */
1277                 /* itself.  There's some unintentional reflection       */
1278                 /* here.                                                */
1279     }
1280     if ((word)p + bytes >= (word)GC_greatest_plausible_heap_addr) {
1281         GC_greatest_plausible_heap_addr = (void *)endp;
1282     }
1283 }
1284 
1285 #if !defined(NO_DEBUGGING)
GC_print_heap_sects(void)1286   void GC_print_heap_sects(void)
1287   {
1288     unsigned i;
1289 
1290     GC_printf("Total heap size: %lu" IF_USE_MUNMAP(" (%lu unmapped)") "\n",
1291               (unsigned long)GC_heapsize /*, */
1292               COMMA_IF_USE_MUNMAP((unsigned long)GC_unmapped_bytes));
1293 
1294     for (i = 0; i < GC_n_heap_sects; i++) {
1295       ptr_t start = GC_heap_sects[i].hs_start;
1296       size_t len = GC_heap_sects[i].hs_bytes;
1297       struct hblk *h;
1298       unsigned nbl = 0;
1299 
1300       for (h = (struct hblk *)start; (word)h < (word)(start + len); h++) {
1301         if (GC_is_black_listed(h, HBLKSIZE)) nbl++;
1302       }
1303       GC_printf("Section %d from %p to %p %u/%lu blacklisted\n",
1304                 i, (void *)start, (void *)&start[len],
1305                 nbl, (unsigned long)divHBLKSZ(len));
1306     }
1307   }
1308 #endif
1309 
1310 void * GC_least_plausible_heap_addr = (void *)GC_WORD_MAX;
1311 void * GC_greatest_plausible_heap_addr = 0;
1312 
GC_max(word x,word y)1313 GC_INLINE word GC_max(word x, word y)
1314 {
1315     return(x > y? x : y);
1316 }
1317 
GC_min(word x,word y)1318 GC_INLINE word GC_min(word x, word y)
1319 {
1320     return(x < y? x : y);
1321 }
1322 
1323 STATIC word GC_max_heapsize = 0;
1324 
GC_set_max_heap_size(GC_word n)1325 GC_API void GC_CALL GC_set_max_heap_size(GC_word n)
1326 {
1327     GC_max_heapsize = n;
1328 }
1329 
1330 GC_word GC_max_retries = 0;
1331 
1332 /* This explicitly increases the size of the heap.  It is used          */
1333 /* internally, but may also be invoked from GC_expand_hp by the user.   */
1334 /* The argument is in units of HBLKSIZE (tiny values are rounded up).   */
1335 /* Returns FALSE on failure.                                            */
GC_expand_hp_inner(word n)1336 GC_INNER GC_bool GC_expand_hp_inner(word n)
1337 {
1338     size_t bytes;
1339     struct hblk * space;
1340     word expansion_slop;        /* Number of bytes by which we expect the */
1341                                 /* heap to expand soon.                   */
1342 
1343     if (n < MINHINCR) n = MINHINCR;
1344     bytes = ROUNDUP_PAGESIZE((size_t)n * HBLKSIZE);
1345     if (GC_max_heapsize != 0
1346         && (GC_max_heapsize < (word)bytes
1347             || GC_heapsize > GC_max_heapsize - (word)bytes)) {
1348         /* Exceeded self-imposed limit */
1349         return(FALSE);
1350     }
1351     space = GET_MEM(bytes);
1352     GC_add_to_our_memory((ptr_t)space, bytes);
1353     if (space == 0) {
1354         WARN("Failed to expand heap by %" WARN_PRIdPTR " bytes\n",
1355              (word)bytes);
1356         return(FALSE);
1357     }
1358     GC_INFOLOG_PRINTF("Grow heap to %lu KiB after %lu bytes allocated\n",
1359                       TO_KiB_UL(GC_heapsize + (word)bytes),
1360                       (unsigned long)GC_bytes_allocd);
1361     /* Adjust heap limits generously for blacklisting to work better.   */
1362     /* GC_add_to_heap performs minimal adjustment needed for            */
1363     /* correctness.                                                     */
1364     expansion_slop = min_bytes_allocd() + 4*MAXHINCR*HBLKSIZE;
1365     if ((GC_last_heap_addr == 0 && !((word)space & SIGNB))
1366         || (GC_last_heap_addr != 0
1367             && (word)GC_last_heap_addr < (word)space)) {
1368         /* Assume the heap is growing up */
1369         word new_limit = (word)space + (word)bytes + expansion_slop;
1370         if (new_limit > (word)space) {
1371           GC_greatest_plausible_heap_addr =
1372             (void *)GC_max((word)GC_greatest_plausible_heap_addr,
1373                            (word)new_limit);
1374         }
1375     } else {
1376         /* Heap is growing down */
1377         word new_limit = (word)space - expansion_slop;
1378         if (new_limit < (word)space) {
1379           GC_least_plausible_heap_addr =
1380             (void *)GC_min((word)GC_least_plausible_heap_addr,
1381                            (word)space - expansion_slop);
1382         }
1383     }
1384     GC_prev_heap_addr = GC_last_heap_addr;
1385     GC_last_heap_addr = (ptr_t)space;
1386     GC_add_to_heap(space, bytes);
1387     /* Force GC before we are likely to allocate past expansion_slop */
1388       GC_collect_at_heapsize =
1389          GC_heapsize + expansion_slop - 2*MAXHINCR*HBLKSIZE;
1390       if (GC_collect_at_heapsize < GC_heapsize /* wrapped */)
1391          GC_collect_at_heapsize = GC_WORD_MAX;
1392     if (GC_on_heap_resize)
1393       (*GC_on_heap_resize)(GC_heapsize);
1394 
1395     return(TRUE);
1396 }
1397 
1398 /* Really returns a bool, but it's externally visible, so that's clumsy. */
1399 /* Arguments is in bytes.  Includes GC_init() call.                      */
GC_expand_hp(size_t bytes)1400 GC_API int GC_CALL GC_expand_hp(size_t bytes)
1401 {
1402     int result;
1403     DCL_LOCK_STATE;
1404 
1405     if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
1406     LOCK();
1407     result = (int)GC_expand_hp_inner(divHBLKSZ((word)bytes));
1408     if (result) GC_requested_heapsize += bytes;
1409     UNLOCK();
1410     return(result);
1411 }
1412 
1413 word GC_fo_entries = 0; /* used also in extra/MacOS.c */
1414 
1415 GC_INNER unsigned GC_fail_count = 0;
1416                         /* How many consecutive GC/expansion failures?  */
1417                         /* Reset by GC_allochblk.                       */
1418 
1419 static word last_fo_entries = 0;
1420 static word last_bytes_finalized = 0;
1421 
1422 /* Collect or expand heap in an attempt make the indicated number of    */
1423 /* free blocks available.  Should be called until the blocks are        */
1424 /* available (setting retry value to TRUE unless this is the first call */
1425 /* in a loop) or until it fails by returning FALSE.                     */
GC_collect_or_expand(word needed_blocks,GC_bool ignore_off_page,GC_bool retry)1426 GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
1427                                       GC_bool ignore_off_page,
1428                                       GC_bool retry)
1429 {
1430     GC_bool gc_not_stopped = TRUE;
1431     word blocks_to_get;
1432     IF_CANCEL(int cancel_state;)
1433 
1434     DISABLE_CANCEL(cancel_state);
1435     if (!GC_incremental && !GC_dont_gc &&
1436         ((GC_dont_expand && GC_bytes_allocd > 0)
1437          || (GC_fo_entries > (last_fo_entries + 500)
1438              && (last_bytes_finalized | GC_bytes_finalized) != 0)
1439          || GC_should_collect())) {
1440       /* Try to do a full collection using 'default' stop_func (unless  */
1441       /* nothing has been allocated since the latest collection or heap */
1442       /* expansion is disabled).                                        */
1443       gc_not_stopped = GC_try_to_collect_inner(
1444                         GC_bytes_allocd > 0 && (!GC_dont_expand || !retry) ?
1445                         GC_default_stop_func : GC_never_stop_func);
1446       if (gc_not_stopped == TRUE || !retry) {
1447         /* Either the collection hasn't been aborted or this is the     */
1448         /* first attempt (in a loop).                                   */
1449         last_fo_entries = GC_fo_entries;
1450         last_bytes_finalized = GC_bytes_finalized;
1451         RESTORE_CANCEL(cancel_state);
1452         return(TRUE);
1453       }
1454     }
1455 
1456     blocks_to_get = (GC_heapsize - GC_heapsize_at_forced_unmap)
1457                         / (HBLKSIZE * GC_free_space_divisor)
1458                     + needed_blocks;
1459     if (blocks_to_get > MAXHINCR) {
1460       word slop;
1461 
1462       /* Get the minimum required to make it likely that we can satisfy */
1463       /* the current request in the presence of black-listing.          */
1464       /* This will probably be more than MAXHINCR.                      */
1465       if (ignore_off_page) {
1466         slop = 4;
1467       } else {
1468         slop = 2 * divHBLKSZ(BL_LIMIT);
1469         if (slop > needed_blocks) slop = needed_blocks;
1470       }
1471       if (needed_blocks + slop > MAXHINCR) {
1472         blocks_to_get = needed_blocks + slop;
1473       } else {
1474         blocks_to_get = MAXHINCR;
1475       }
1476       if (blocks_to_get > divHBLKSZ(GC_WORD_MAX))
1477         blocks_to_get = divHBLKSZ(GC_WORD_MAX);
1478     }
1479 
1480     if (!GC_expand_hp_inner(blocks_to_get)
1481         && (blocks_to_get == needed_blocks
1482             || !GC_expand_hp_inner(needed_blocks))) {
1483       if (gc_not_stopped == FALSE) {
1484         /* Don't increment GC_fail_count here (and no warning).     */
1485         GC_gcollect_inner();
1486         GC_ASSERT(GC_bytes_allocd == 0);
1487       } else if (GC_fail_count++ < GC_max_retries) {
1488         WARN("Out of Memory!  Trying to continue...\n", 0);
1489         GC_gcollect_inner();
1490       } else {
1491 #       if !defined(AMIGA) || !defined(GC_AMIGA_FASTALLOC)
1492           WARN("Out of Memory! Heap size: %" WARN_PRIdPTR " MiB."
1493                " Returning NULL!\n", (GC_heapsize - GC_unmapped_bytes) >> 20);
1494 #       endif
1495         RESTORE_CANCEL(cancel_state);
1496         return(FALSE);
1497       }
1498     } else if (GC_fail_count) {
1499       GC_COND_LOG_PRINTF("Memory available again...\n");
1500     }
1501     RESTORE_CANCEL(cancel_state);
1502     return(TRUE);
1503 }
1504 
1505 /*
1506  * Make sure the object free list for size gran (in granules) is not empty.
1507  * Return a pointer to the first object on the free list.
1508  * The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
1509  */
GC_allocobj(size_t gran,int kind)1510 GC_INNER ptr_t GC_allocobj(size_t gran, int kind)
1511 {
1512     void ** flh = &(GC_obj_kinds[kind].ok_freelist[gran]);
1513     GC_bool tried_minor = FALSE;
1514     GC_bool retry = FALSE;
1515 
1516     GC_ASSERT(I_HOLD_LOCK());
1517     if (gran == 0) return(0);
1518 
1519     while (*flh == 0) {
1520       ENTER_GC();
1521 #     ifndef GC_DISABLE_INCREMENTAL
1522         if (GC_incremental && GC_time_limit != GC_TIME_UNLIMITED) {
1523           /* True incremental mode, not just generational.      */
1524           /* Do our share of marking work.                      */
1525           GC_collect_a_little_inner(1);
1526         }
1527 #     endif
1528       /* Sweep blocks for objects of this size */
1529         GC_ASSERT(!GC_is_full_gc
1530                   || NULL == GC_obj_kinds[kind].ok_reclaim_list
1531                   || NULL == GC_obj_kinds[kind].ok_reclaim_list[gran]);
1532         GC_continue_reclaim(gran, kind);
1533       EXIT_GC();
1534       if (*flh == 0) {
1535         GC_new_hblk(gran, kind);
1536         if (*flh == 0) {
1537           ENTER_GC();
1538           if (GC_incremental && GC_time_limit == GC_TIME_UNLIMITED
1539               && !tried_minor) {
1540             GC_collect_a_little_inner(1);
1541             tried_minor = TRUE;
1542           } else {
1543             if (!GC_collect_or_expand(1, FALSE, retry)) {
1544               EXIT_GC();
1545               return(0);
1546             }
1547             retry = TRUE;
1548           }
1549           EXIT_GC();
1550         }
1551       }
1552     }
1553     /* Successful allocation; reset failure count.      */
1554     GC_fail_count = 0;
1555 
1556     return (ptr_t)(*flh);
1557 }
1558