1 /*
2  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
4  * Copyright (c) 1999-2001 by Hewlett-Packard Company. All rights reserved.
5  *
6  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
8  *
9  * Permission is hereby granted to use or copy this program
10  * for any purpose,  provided the above notices are retained on all copies.
11  * Permission to modify the code and to distribute modified code is granted,
12  * provided the above notices are retained, and a notice that the code was
13  * modified is included with the above copyright notice.
14  */
15 
16 #include "private/gc_pmark.h"
17 
18 #include <stdio.h>
19 #include <limits.h>
20 #include <stdarg.h>
21 
22 #ifndef MSWINCE
23 # include <signal.h>
24 #endif
25 
26 #ifdef GC_SOLARIS_THREADS
27 # include <sys/syscall.h>
28 #endif
29 
30 #if defined(MSWIN32) || defined(MSWINCE) \
31     || (defined(CYGWIN32) && defined(GC_READ_ENV_FILE))
32 # ifndef WIN32_LEAN_AND_MEAN
33 #   define WIN32_LEAN_AND_MEAN 1
34 # endif
35 # define NOSERVICE
36 # include <windows.h>
37 #endif
38 
39 #if defined(UNIX_LIKE) || defined(CYGWIN32) || defined(SYMBIAN)
40 # include <fcntl.h>
41 # include <sys/types.h>
42 # include <sys/stat.h>
43 #endif
44 
45 #ifdef NONSTOP
46 # include <floss.h>
47 #endif
48 
49 #ifdef THREADS
50 # ifdef PCR
51 #   include "il/PCR_IL.h"
52     GC_INNER PCR_Th_ML GC_allocate_ml;
53 # elif defined(SN_TARGET_PS3)
54 #   include <pthread.h>
55     GC_INNER pthread_mutex_t GC_allocate_ml;
56 # endif
57   /* For other platforms with threads, the lock and possibly            */
58   /* GC_lock_holder variables are defined in the thread support code.   */
59 #endif /* THREADS */
60 
61 #ifdef DYNAMIC_LOADING
62   /* We need to register the main data segment.  Returns  TRUE unless   */
63   /* this is done implicitly as part of dynamic library registration.   */
64 # define GC_REGISTER_MAIN_STATIC_DATA() GC_register_main_static_data()
65 #elif defined(GC_DONT_REGISTER_MAIN_STATIC_DATA)
66 # define GC_REGISTER_MAIN_STATIC_DATA() FALSE
67 #else
68   /* Don't unnecessarily call GC_register_main_static_data() in case    */
69   /* dyn_load.c isn't linked in.                                        */
70 # define GC_REGISTER_MAIN_STATIC_DATA() TRUE
71 #endif
72 
73 #ifdef NEED_CANCEL_DISABLE_COUNT
74   __thread unsigned char GC_cancel_disable_count = 0;
75 #endif
76 
77 GC_FAR struct _GC_arrays GC_arrays /* = { 0 } */;
78 
79 GC_INNER GC_bool GC_debugging_started = FALSE;
80         /* defined here so we don't have to load debug_malloc.o */
81 
82 ptr_t GC_stackbottom = 0;
83 
84 #ifdef IA64
85   ptr_t GC_register_stackbottom = 0;
86 #endif
87 
88 int GC_dont_gc = FALSE;
89 
90 int GC_dont_precollect = FALSE;
91 
92 GC_bool GC_quiet = 0; /* used also in pcr_interface.c */
93 
94 #ifndef SMALL_CONFIG
95   int GC_print_stats = 0;
96 #endif
97 
98 #ifdef GC_PRINT_BACK_HEIGHT
99   GC_INNER GC_bool GC_print_back_height = TRUE;
100 #else
101   GC_INNER GC_bool GC_print_back_height = FALSE;
102 #endif
103 
104 #ifndef NO_DEBUGGING
105   GC_INNER GC_bool GC_dump_regularly = FALSE;
106                                 /* Generate regular debugging dumps. */
107 #endif
108 
109 #ifdef KEEP_BACK_PTRS
110   GC_INNER long GC_backtraces = 0;
111                 /* Number of random backtraces to generate for each GC. */
112 #endif
113 
114 #ifdef FIND_LEAK
115   int GC_find_leak = 1;
116 #else
117   int GC_find_leak = 0;
118 #endif
119 
120 #ifndef SHORT_DBG_HDRS
121 # ifdef GC_FINDLEAK_DELAY_FREE
122     GC_INNER GC_bool GC_findleak_delay_free = TRUE;
123 # else
124     GC_INNER GC_bool GC_findleak_delay_free = FALSE;
125 # endif
126 #endif /* !SHORT_DBG_HDRS */
127 
128 #ifdef ALL_INTERIOR_POINTERS
129   int GC_all_interior_pointers = 1;
130 #else
131   int GC_all_interior_pointers = 0;
132 #endif
133 
134 #ifdef FINALIZE_ON_DEMAND
135   int GC_finalize_on_demand = 1;
136 #else
137   int GC_finalize_on_demand = 0;
138 #endif
139 
140 #ifdef JAVA_FINALIZATION
141   int GC_java_finalization = 1;
142 #else
143   int GC_java_finalization = 0;
144 #endif
145 
146 /* All accesses to it should be synchronized to avoid data races.       */
147 GC_finalizer_notifier_proc GC_finalizer_notifier =
148                                         (GC_finalizer_notifier_proc)0;
149 
150 #ifdef GC_FORCE_UNMAP_ON_GCOLLECT
151   /* Has no effect unless USE_MUNMAP.                           */
152   /* Has no effect on implicitly-initiated garbage collections. */
153   GC_INNER GC_bool GC_force_unmap_on_gcollect = TRUE;
154 #else
155   GC_INNER GC_bool GC_force_unmap_on_gcollect = FALSE;
156 #endif
157 
158 #ifndef GC_LARGE_ALLOC_WARN_INTERVAL
159 # define GC_LARGE_ALLOC_WARN_INTERVAL 5
160 #endif
161 GC_INNER long GC_large_alloc_warn_interval = GC_LARGE_ALLOC_WARN_INTERVAL;
162                         /* Interval between unsuppressed warnings.      */
163 
GC_default_oom_fn(size_t bytes_requested GC_ATTR_UNUSED)164 STATIC void * GC_CALLBACK GC_default_oom_fn(
165                                         size_t bytes_requested GC_ATTR_UNUSED)
166 {
167     return(0);
168 }
169 
170 /* All accesses to it should be synchronized to avoid data races.       */
171 GC_oom_func GC_oom_fn = GC_default_oom_fn;
172 
173 #ifdef CAN_HANDLE_FORK
174 # ifdef HANDLE_FORK
175     GC_INNER int GC_handle_fork = 1;
176                         /* The value is examined by GC_thr_init.        */
177 # else
178     GC_INNER int GC_handle_fork = FALSE;
179 # endif
180 
181 #elif !defined(HAVE_NO_FORK)
182 
183   /* Same as above but with GC_CALL calling conventions.  */
GC_atfork_prepare(void)184   GC_API void GC_CALL GC_atfork_prepare(void)
185   {
186 #   ifdef THREADS
187       ABORT("fork() handling unsupported");
188 #   endif
189   }
190 
GC_atfork_parent(void)191   GC_API void GC_CALL GC_atfork_parent(void)
192   {
193     /* empty */
194   }
195 
GC_atfork_child(void)196   GC_API void GC_CALL GC_atfork_child(void)
197   {
198     /* empty */
199   }
200 #endif /* !CAN_HANDLE_FORK && !HAVE_NO_FORK */
201 
202 /* Overrides the default automatic handle-fork mode.  Has effect only   */
203 /* if called before GC_INIT.                                            */
GC_set_handle_fork(int value GC_ATTR_UNUSED)204 GC_API void GC_CALL GC_set_handle_fork(int value GC_ATTR_UNUSED)
205 {
206 # ifdef CAN_HANDLE_FORK
207     if (!GC_is_initialized)
208       GC_handle_fork = value >= -1 ? value : 1;
209                 /* Map all negative values except for -1 to a positive one. */
210 # elif defined(THREADS) || (defined(DARWIN) && defined(MPROTECT_VDB))
211     if (!GC_is_initialized && value) {
212 #     ifndef SMALL_CONFIG
213         GC_init(); /* just to initialize GC_stderr */
214 #     endif
215       ABORT("fork() handling unsupported");
216     }
217 # else
218     /* No at-fork handler is needed in the single-threaded mode.        */
219 # endif
220 }
221 
222 /* Set things up so that GC_size_map[i] >= granules(i),                 */
223 /* but not too much bigger                                              */
224 /* and so that size_map contains relatively few distinct entries        */
225 /* This was originally stolen from Russ Atkinson's Cedar                */
226 /* quantization algorithm (but we precompute it).                       */
GC_init_size_map(void)227 STATIC void GC_init_size_map(void)
228 {
229     int i;
230 
231     /* Map size 0 to something bigger.                  */
232     /* This avoids problems at lower levels.            */
233       GC_size_map[0] = 1;
234     for (i = 1; i <= GRANULES_TO_BYTES(TINY_FREELISTS-1) - EXTRA_BYTES; i++) {
235         GC_size_map[i] = (unsigned)ROUNDED_UP_GRANULES(i);
236 #       ifndef _MSC_VER
237           GC_ASSERT(GC_size_map[i] < TINY_FREELISTS);
238           /* Seems to tickle bug in VC++ 2008 for AMD64 */
239 #       endif
240     }
241     /* We leave the rest of the array to be filled in on demand. */
242 }
243 
244 /* Fill in additional entries in GC_size_map, including the ith one     */
245 /* We assume the ith entry is currently 0.                              */
246 /* Note that a filled in section of the array ending at n always        */
247 /* has length at least n/4.                                             */
GC_extend_size_map(size_t i)248 GC_INNER void GC_extend_size_map(size_t i)
249 {
250     size_t orig_granule_sz = ROUNDED_UP_GRANULES(i);
251     size_t granule_sz = orig_granule_sz;
252     size_t byte_sz = GRANULES_TO_BYTES(granule_sz);
253                         /* The size we try to preserve.         */
254                         /* Close to i, unless this would        */
255                         /* introduce too many distinct sizes.   */
256     size_t smaller_than_i = byte_sz - (byte_sz >> 3);
257     size_t much_smaller_than_i = byte_sz - (byte_sz >> 2);
258     size_t low_limit;   /* The lowest indexed entry we  */
259                         /* initialize.                  */
260     size_t j;
261 
262     if (GC_size_map[smaller_than_i] == 0) {
263         low_limit = much_smaller_than_i;
264         while (GC_size_map[low_limit] != 0) low_limit++;
265     } else {
266         low_limit = smaller_than_i + 1;
267         while (GC_size_map[low_limit] != 0) low_limit++;
268         granule_sz = ROUNDED_UP_GRANULES(low_limit);
269         granule_sz += granule_sz >> 3;
270         if (granule_sz < orig_granule_sz) granule_sz = orig_granule_sz;
271     }
272     /* For these larger sizes, we use an even number of granules.       */
273     /* This makes it easier to, for example, construct a 16byte-aligned */
274     /* allocator even if GRANULE_BYTES is 8.                            */
275         granule_sz += 1;
276         granule_sz &= ~1;
277     if (granule_sz > MAXOBJGRANULES) {
278         granule_sz = MAXOBJGRANULES;
279     }
280     /* If we can fit the same number of larger objects in a block,      */
281     /* do so.                                                           */
282     {
283         size_t number_of_objs = HBLK_GRANULES/granule_sz;
284         GC_ASSERT(number_of_objs != 0);
285         granule_sz = HBLK_GRANULES/number_of_objs;
286         granule_sz &= ~1;
287     }
288     byte_sz = GRANULES_TO_BYTES(granule_sz);
289     /* We may need one extra byte;                      */
290     /* don't always fill in GC_size_map[byte_sz]        */
291     byte_sz -= EXTRA_BYTES;
292 
293     for (j = low_limit; j <= byte_sz; j++) GC_size_map[j] = granule_sz;
294 }
295 
296 
297 /*
298  * The following is a gross hack to deal with a problem that can occur
299  * on machines that are sloppy about stack frame sizes, notably SPARC.
300  * Bogus pointers may be written to the stack and not cleared for
301  * a LONG time, because they always fall into holes in stack frames
302  * that are not written.  We partially address this by clearing
303  * sections of the stack whenever we get control.
304  */
305 # ifdef THREADS
306 #   define BIG_CLEAR_SIZE 2048  /* Clear this much now and then.        */
307 #   define SMALL_CLEAR_SIZE 256 /* Clear this much every time.          */
308 # else
309   STATIC word GC_stack_last_cleared = 0; /* GC_no when we last did this */
310   STATIC ptr_t GC_min_sp = NULL;
311                         /* Coolest stack pointer value from which       */
312                         /* we've already cleared the stack.             */
313   STATIC ptr_t GC_high_water = NULL;
314                         /* "hottest" stack pointer value we have seen   */
315                         /* recently.  Degrades over time.               */
316   STATIC word GC_bytes_allocd_at_reset = 0;
317 #   define DEGRADE_RATE 50
318 # endif
319 
320 # define CLEAR_SIZE 213  /* Granularity for GC_clear_stack_inner */
321 
322 #if defined(ASM_CLEAR_CODE)
323   void *GC_clear_stack_inner(void *, ptr_t);
324 #else
325   /* Clear the stack up to about limit.  Return arg.  This function is  */
326   /* not static because it could also be erroneously defined in .S      */
327   /* file, so this error would be caught by the linker.                 */
GC_clear_stack_inner(void * arg,ptr_t limit)328   void * GC_clear_stack_inner(void *arg, ptr_t limit)
329   {
330     volatile word dummy[CLEAR_SIZE];
331 
332     BZERO((/* no volatile */ void *)dummy, sizeof(dummy));
333     if ((word)GC_approx_sp() COOLER_THAN (word)limit) {
334         (void) GC_clear_stack_inner(arg, limit);
335     }
336     /* Make sure the recursive call is not a tail call, and the bzero   */
337     /* call is not recognized as dead code.                             */
338     GC_noop1((word)dummy);
339     return(arg);
340   }
341 #endif
342 
343 /* Clear some of the inaccessible part of the stack.  Returns its       */
344 /* argument, so it can be used in a tail call position, hence clearing  */
345 /* another frame.                                                       */
GC_clear_stack(void * arg)346 GC_API void * GC_CALL GC_clear_stack(void *arg)
347 {
348     ptr_t sp = GC_approx_sp();  /* Hotter than actual sp */
349 #   ifdef THREADS
350         word volatile dummy[SMALL_CLEAR_SIZE];
351         static unsigned random_no = 0;
352                                  /* Should be more random than it is ... */
353                                  /* Used to occasionally clear a bigger  */
354                                  /* chunk.                               */
355 #   endif
356     ptr_t limit;
357 
358 #   define SLOP 400
359         /* Extra bytes we clear every time.  This clears our own        */
360         /* activation record, and should cause more frequent            */
361         /* clearing near the cold end of the stack, a good thing.       */
362 #   define GC_SLOP 4000
363         /* We make GC_high_water this much hotter than we really saw    */
364         /* saw it, to cover for GC noise etc. above our current frame.  */
365 #   define CLEAR_THRESHOLD 100000
366         /* We restart the clearing process after this many bytes of     */
367         /* allocation.  Otherwise very heavily recursive programs       */
368         /* with sparse stacks may result in heaps that grow almost      */
369         /* without bounds.  As the heap gets larger, collection         */
370         /* frequency decreases, thus clearing frequency would decrease, */
371         /* thus more junk remains accessible, thus the heap gets        */
372         /* larger ...                                                   */
373 # ifdef THREADS
374     if (++random_no % 13 == 0) {
375         limit = sp;
376         MAKE_HOTTER(limit, BIG_CLEAR_SIZE*sizeof(word));
377         limit = (ptr_t)((word)limit & ~0xf);
378                         /* Make it sufficiently aligned for assembly    */
379                         /* implementations of GC_clear_stack_inner.     */
380         return GC_clear_stack_inner(arg, limit);
381     } else {
382         BZERO((void *)dummy, SMALL_CLEAR_SIZE*sizeof(word));
383         return arg;
384     }
385 # else
386     if (GC_gc_no > GC_stack_last_cleared) {
387         /* Start things over, so we clear the entire stack again */
388         if (GC_stack_last_cleared == 0) GC_high_water = (ptr_t)GC_stackbottom;
389         GC_min_sp = GC_high_water;
390         GC_stack_last_cleared = GC_gc_no;
391         GC_bytes_allocd_at_reset = GC_bytes_allocd;
392     }
393     /* Adjust GC_high_water */
394         MAKE_COOLER(GC_high_water, WORDS_TO_BYTES(DEGRADE_RATE) + GC_SLOP);
395         if ((word)sp HOTTER_THAN (word)GC_high_water) {
396             GC_high_water = sp;
397         }
398         MAKE_HOTTER(GC_high_water, GC_SLOP);
399     limit = GC_min_sp;
400     MAKE_HOTTER(limit, SLOP);
401     if ((word)sp COOLER_THAN (word)limit) {
402         limit = (ptr_t)((word)limit & ~0xf);
403                         /* Make it sufficiently aligned for assembly    */
404                         /* implementations of GC_clear_stack_inner.     */
405         GC_min_sp = sp;
406         return(GC_clear_stack_inner(arg, limit));
407     } else if (GC_bytes_allocd - GC_bytes_allocd_at_reset > CLEAR_THRESHOLD) {
408         /* Restart clearing process, but limit how much clearing we do. */
409         GC_min_sp = sp;
410         MAKE_HOTTER(GC_min_sp, CLEAR_THRESHOLD/4);
411         if ((word)GC_min_sp HOTTER_THAN (word)GC_high_water)
412           GC_min_sp = GC_high_water;
413         GC_bytes_allocd_at_reset = GC_bytes_allocd;
414     }
415     return(arg);
416 # endif
417 }
418 
419 
420 /* Return a pointer to the base address of p, given a pointer to a      */
421 /* an address within an object.  Return 0 o.w.                          */
GC_base(void * p)422 GC_API void * GC_CALL GC_base(void * p)
423 {
424     ptr_t r;
425     struct hblk *h;
426     bottom_index *bi;
427     hdr *candidate_hdr;
428     ptr_t limit;
429 
430     r = p;
431     if (!EXPECT(GC_is_initialized, TRUE)) return 0;
432     h = HBLKPTR(r);
433     GET_BI(r, bi);
434     candidate_hdr = HDR_FROM_BI(bi, r);
435     if (candidate_hdr == 0) return(0);
436     /* If it's a pointer to the middle of a large object, move it       */
437     /* to the beginning.                                                */
438         while (IS_FORWARDING_ADDR_OR_NIL(candidate_hdr)) {
439            h = FORWARDED_ADDR(h,candidate_hdr);
440            r = (ptr_t)h;
441            candidate_hdr = HDR(h);
442         }
443     if (HBLK_IS_FREE(candidate_hdr)) return(0);
444     /* Make sure r points to the beginning of the object */
445         r = (ptr_t)((word)r & ~(WORDS_TO_BYTES(1) - 1));
446         {
447             size_t offset = HBLKDISPL(r);
448             word sz = candidate_hdr -> hb_sz;
449             size_t obj_displ = offset % sz;
450 
451             r -= obj_displ;
452             limit = r + sz;
453             if ((word)limit > (word)(h + 1) && sz <= HBLKSIZE) {
454                 return(0);
455             }
456             if ((word)p >= (word)limit) return(0);
457         }
458     return((void *)r);
459 }
460 
461 /* Return TRUE if and only if p points to somewhere in GC heap. */
GC_is_heap_ptr(const void * p)462 GC_API int GC_CALL GC_is_heap_ptr(const void *p)
463 {
464     bottom_index *bi;
465 
466     GC_ASSERT(GC_is_initialized);
467     GET_BI(p, bi);
468     return HDR_FROM_BI(bi, p) != 0;
469 }
470 
471 /* Return the size of an object, given a pointer to its base.           */
472 /* (For small objects this also happens to work from interior pointers, */
473 /* but that shouldn't be relied upon.)                                  */
GC_size(const void * p)474 GC_API size_t GC_CALL GC_size(const void * p)
475 {
476     hdr * hhdr = HDR(p);
477 
478     return hhdr -> hb_sz;
479 }
480 
481 
482 /* These getters remain unsynchronized for compatibility (since some    */
483 /* clients could call some of them from a GC callback holding the       */
484 /* allocator lock).                                                     */
GC_get_heap_size(void)485 GC_API size_t GC_CALL GC_get_heap_size(void)
486 {
487     /* ignore the memory space returned to OS (i.e. count only the      */
488     /* space owned by the garbage collector)                            */
489     return (size_t)(GC_heapsize - GC_unmapped_bytes);
490 }
491 
GC_get_free_bytes(void)492 GC_API size_t GC_CALL GC_get_free_bytes(void)
493 {
494     /* ignore the memory space returned to OS */
495     return (size_t)(GC_large_free_bytes - GC_unmapped_bytes);
496 }
497 
GC_get_unmapped_bytes(void)498 GC_API size_t GC_CALL GC_get_unmapped_bytes(void)
499 {
500     return (size_t)GC_unmapped_bytes;
501 }
502 
GC_get_bytes_since_gc(void)503 GC_API size_t GC_CALL GC_get_bytes_since_gc(void)
504 {
505     return (size_t)GC_bytes_allocd;
506 }
507 
GC_get_total_bytes(void)508 GC_API size_t GC_CALL GC_get_total_bytes(void)
509 {
510     return (size_t)(GC_bytes_allocd + GC_bytes_allocd_before_gc);
511 }
512 
513 #ifndef GC_GET_HEAP_USAGE_NOT_NEEDED
514 
515 /* Return the heap usage information.  This is a thread-safe (atomic)   */
516 /* alternative for the five above getters.  NULL pointer is allowed for */
517 /* any argument.  Returned (filled in) values are of word type.         */
GC_get_heap_usage_safe(GC_word * pheap_size,GC_word * pfree_bytes,GC_word * punmapped_bytes,GC_word * pbytes_since_gc,GC_word * ptotal_bytes)518 GC_API void GC_CALL GC_get_heap_usage_safe(GC_word *pheap_size,
519                         GC_word *pfree_bytes, GC_word *punmapped_bytes,
520                         GC_word *pbytes_since_gc, GC_word *ptotal_bytes)
521 {
522   DCL_LOCK_STATE;
523 
524   LOCK();
525   if (pheap_size != NULL)
526     *pheap_size = GC_heapsize - GC_unmapped_bytes;
527   if (pfree_bytes != NULL)
528     *pfree_bytes = GC_large_free_bytes - GC_unmapped_bytes;
529   if (punmapped_bytes != NULL)
530     *punmapped_bytes = GC_unmapped_bytes;
531   if (pbytes_since_gc != NULL)
532     *pbytes_since_gc = GC_bytes_allocd;
533   if (ptotal_bytes != NULL)
534     *ptotal_bytes = GC_bytes_allocd + GC_bytes_allocd_before_gc;
535   UNLOCK();
536 }
537 
538   GC_INNER word GC_reclaimed_bytes_before_gc = 0;
539 
540   /* Fill in GC statistics provided the destination is of enough size.  */
fill_prof_stats(struct GC_prof_stats_s * pstats)541   static void fill_prof_stats(struct GC_prof_stats_s *pstats)
542   {
543     pstats->heapsize_full = GC_heapsize;
544     pstats->free_bytes_full = GC_large_free_bytes;
545     pstats->unmapped_bytes = GC_unmapped_bytes;
546     pstats->bytes_allocd_since_gc = GC_bytes_allocd;
547     pstats->allocd_bytes_before_gc = GC_bytes_allocd_before_gc;
548     pstats->non_gc_bytes = GC_non_gc_bytes;
549     pstats->gc_no = GC_gc_no; /* could be -1 */
550 #   ifdef PARALLEL_MARK
551       pstats->markers_m1 = (word)GC_markers_m1;
552 #   else
553       pstats->markers_m1 = 0; /* one marker */
554 #   endif
555     pstats->bytes_reclaimed_since_gc = GC_bytes_found > 0 ?
556                                         (word)GC_bytes_found : 0;
557     pstats->reclaimed_bytes_before_gc = GC_reclaimed_bytes_before_gc;
558   }
559 
560 # include <string.h> /* for memset() */
561 
GC_get_prof_stats(struct GC_prof_stats_s * pstats,size_t stats_sz)562   GC_API size_t GC_CALL GC_get_prof_stats(struct GC_prof_stats_s *pstats,
563                                           size_t stats_sz)
564   {
565     struct GC_prof_stats_s stats;
566     DCL_LOCK_STATE;
567 
568     LOCK();
569     fill_prof_stats(stats_sz >= sizeof(stats) ? pstats : &stats);
570     UNLOCK();
571 
572     if (stats_sz == sizeof(stats)) {
573       return sizeof(stats);
574     } else if (stats_sz > sizeof(stats)) {
575       /* Fill in the remaining part with -1.    */
576       memset((char *)pstats + sizeof(stats), 0xff, stats_sz - sizeof(stats));
577       return sizeof(stats);
578     } else {
579       BCOPY(&stats, pstats, stats_sz);
580       return stats_sz;
581     }
582   }
583 
584 # ifdef THREADS
585     /* The _unsafe version assumes the caller holds the allocation lock. */
GC_get_prof_stats_unsafe(struct GC_prof_stats_s * pstats,size_t stats_sz)586     GC_API size_t GC_CALL GC_get_prof_stats_unsafe(
587                                             struct GC_prof_stats_s *pstats,
588                                             size_t stats_sz)
589     {
590       struct GC_prof_stats_s stats;
591 
592       if (stats_sz >= sizeof(stats)) {
593         fill_prof_stats(pstats);
594         if (stats_sz > sizeof(stats))
595           memset((char *)pstats + sizeof(stats), 0xff,
596                  stats_sz - sizeof(stats));
597         return sizeof(stats);
598       } else {
599         fill_prof_stats(&stats);
600         BCOPY(&stats, pstats, stats_sz);
601         return stats_sz;
602       }
603     }
604 # endif /* THREADS */
605 
606 #endif /* !GC_GET_HEAP_USAGE_NOT_NEEDED */
607 
608 #if defined(GC_DARWIN_THREADS) || defined(GC_OPENBSD_UTHREADS) \
609     || defined(GC_WIN32_THREADS) || (defined(NACL) && defined(THREADS))
610   /* GC does not use signals to suspend and restart threads.    */
GC_set_suspend_signal(int sig GC_ATTR_UNUSED)611   GC_API void GC_CALL GC_set_suspend_signal(int sig GC_ATTR_UNUSED)
612   {
613     /* empty */
614   }
615 
GC_set_thr_restart_signal(int sig GC_ATTR_UNUSED)616   GC_API void GC_CALL GC_set_thr_restart_signal(int sig GC_ATTR_UNUSED)
617   {
618     /* empty */
619   }
620 
GC_get_suspend_signal(void)621   GC_API int GC_CALL GC_get_suspend_signal(void)
622   {
623     return -1;
624   }
625 
GC_get_thr_restart_signal(void)626   GC_API int GC_CALL GC_get_thr_restart_signal(void)
627   {
628     return -1;
629   }
630 #endif /* GC_DARWIN_THREADS || GC_WIN32_THREADS || ... */
631 
632 #if !defined(_MAX_PATH) && (defined(MSWIN32) || defined(MSWINCE) \
633                             || defined(CYGWIN32))
634 # define _MAX_PATH MAX_PATH
635 #endif
636 
637 #ifdef GC_READ_ENV_FILE
638   /* This works for Win32/WinCE for now.  Really useful only for WinCE. */
639   STATIC char *GC_envfile_content = NULL;
640                         /* The content of the GC "env" file with CR and */
641                         /* LF replaced to '\0'.  NULL if the file is    */
642                         /* missing or empty.  Otherwise, always ends    */
643                         /* with '\0'.                                   */
644   STATIC unsigned GC_envfile_length = 0;
645                         /* Length of GC_envfile_content (if non-NULL).  */
646 
647 # ifndef GC_ENVFILE_MAXLEN
648 #   define GC_ENVFILE_MAXLEN 0x4000
649 # endif
650 
651   /* The routine initializes GC_envfile_content from the GC "env" file. */
GC_envfile_init(void)652   STATIC void GC_envfile_init(void)
653   {
654 #   if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
655       HANDLE hFile;
656       char *content;
657       unsigned ofs;
658       unsigned len;
659       DWORD nBytesRead;
660       TCHAR path[_MAX_PATH + 0x10]; /* buffer for path + ext */
661       len = (unsigned)GetModuleFileName(NULL /* hModule */, path,
662                                         _MAX_PATH + 1);
663       /* If GetModuleFileName() has failed then len is 0. */
664       if (len > 4 && path[len - 4] == (TCHAR)'.') {
665         len -= 4; /* strip executable file extension */
666       }
667       BCOPY(TEXT(".gc.env"), &path[len], sizeof(TEXT(".gc.env")));
668       hFile = CreateFile(path, GENERIC_READ,
669                          FILE_SHARE_READ | FILE_SHARE_WRITE,
670                          NULL /* lpSecurityAttributes */, OPEN_EXISTING,
671                          FILE_ATTRIBUTE_NORMAL, NULL /* hTemplateFile */);
672       if (hFile == INVALID_HANDLE_VALUE)
673         return; /* the file is absent or the operation is failed */
674       len = (unsigned)GetFileSize(hFile, NULL);
675       if (len <= 1 || len >= GC_ENVFILE_MAXLEN) {
676         CloseHandle(hFile);
677         return; /* invalid file length - ignoring the file content */
678       }
679       /* At this execution point, GC_setpagesize() and GC_init_win32()  */
680       /* must already be called (for GET_MEM() to work correctly).      */
681       content = (char *)GET_MEM(ROUNDUP_PAGESIZE_IF_MMAP(len + 1));
682       if (content == NULL) {
683         CloseHandle(hFile);
684         return; /* allocation failure */
685       }
686       ofs = 0;
687       nBytesRead = (DWORD)-1L;
688           /* Last ReadFile() call should clear nBytesRead on success. */
689       while (ReadFile(hFile, content + ofs, len - ofs + 1, &nBytesRead,
690                       NULL /* lpOverlapped */) && nBytesRead != 0) {
691         if ((ofs += nBytesRead) > len)
692           break;
693       }
694       CloseHandle(hFile);
695       if (ofs != len || nBytesRead != 0)
696         return; /* read operation is failed - ignoring the file content */
697       content[ofs] = '\0';
698       while (ofs-- > 0) {
699        if (content[ofs] == '\r' || content[ofs] == '\n')
700          content[ofs] = '\0';
701       }
702       GC_envfile_length = len + 1;
703       GC_envfile_content = content;
704 #   endif
705   }
706 
707   /* This routine scans GC_envfile_content for the specified            */
708   /* environment variable (and returns its value if found).             */
GC_envfile_getenv(const char * name)709   GC_INNER char * GC_envfile_getenv(const char *name)
710   {
711     char *p;
712     char *end_of_content;
713     unsigned namelen;
714 #   ifndef NO_GETENV
715       p = getenv(name); /* try the standard getenv() first */
716       if (p != NULL)
717         return *p != '\0' ? p : NULL;
718 #   endif
719     p = GC_envfile_content;
720     if (p == NULL)
721       return NULL; /* "env" file is absent (or empty) */
722     namelen = strlen(name);
723     if (namelen == 0) /* a sanity check */
724       return NULL;
725     for (end_of_content = p + GC_envfile_length;
726          p != end_of_content; p += strlen(p) + 1) {
727       if (strncmp(p, name, namelen) == 0 && *(p += namelen) == '=') {
728         p++; /* the match is found; skip '=' */
729         return *p != '\0' ? p : NULL;
730       }
731       /* If not matching then skip to the next line. */
732     }
733     return NULL; /* no match found */
734   }
735 #endif /* GC_READ_ENV_FILE */
736 
737 GC_INNER GC_bool GC_is_initialized = FALSE;
738 
739 #if (defined(MSWIN32) || defined(MSWINCE)) && defined(THREADS)
740     GC_INNER CRITICAL_SECTION GC_write_cs;
741 #endif
742 
743 #ifndef DONT_USE_ATEXIT
GC_exit_check(void)744   STATIC void GC_exit_check(void)
745   {
746     if (GC_find_leak) {
747       GC_gcollect();
748     }
749   }
750 #endif
751 
752 #if defined(UNIX_LIKE) && !defined(NO_DEBUGGING)
looping_handler(int sig)753   static void looping_handler(int sig)
754   {
755     GC_err_printf("Caught signal %d: looping in handler\n", sig);
756     for (;;) {
757        /* empty */
758     }
759   }
760 
761   static GC_bool installed_looping_handler = FALSE;
762 
maybe_install_looping_handler(void)763   static void maybe_install_looping_handler(void)
764   {
765     /* Install looping handler before the write fault handler, so we    */
766     /* handle write faults correctly.                                   */
767     if (!installed_looping_handler && 0 != GETENV("GC_LOOP_ON_ABORT")) {
768       GC_set_and_save_fault_handler(looping_handler);
769       installed_looping_handler = TRUE;
770     }
771   }
772 
773 #else /* !UNIX_LIKE */
774 # define maybe_install_looping_handler()
775 #endif
776 
777 #define GC_DEFAULT_STDOUT_FD 1
778 #define GC_DEFAULT_STDERR_FD 2
779 
780 #if !defined(OS2) && !defined(MACOS) && !defined(GC_ANDROID_LOG) \
781     && !defined(MSWIN32) && !defined(MSWINCE)
782   STATIC int GC_stdout = GC_DEFAULT_STDOUT_FD;
783   STATIC int GC_stderr = GC_DEFAULT_STDERR_FD;
784   STATIC int GC_log = GC_DEFAULT_STDERR_FD;
785 #endif
786 
GC_parse_mem_size_arg(const char * str)787 STATIC word GC_parse_mem_size_arg(const char *str)
788 {
789   char *endptr;
790   word result = 0; /* bad value */
791   char ch;
792 
793   if (*str != '\0') {
794     result = (word)STRTOULL(str, &endptr, 10);
795     ch = *endptr;
796     if (ch != '\0') {
797       if (*(endptr + 1) != '\0')
798         return 0;
799       /* Allow k, M or G suffix. */
800       switch (ch) {
801       case 'K':
802       case 'k':
803         result <<= 10;
804         break;
805       case 'M':
806       case 'm':
807         result <<= 20;
808         break;
809       case 'G':
810       case 'g':
811         result <<= 30;
812         break;
813       default:
814         result = 0;
815       }
816     }
817   }
818   return result;
819 }
820 
821 #define GC_LOG_STD_NAME "gc.log"
822 
GC_init(void)823 GC_API void GC_CALL GC_init(void)
824 {
825     /* LOCK(); -- no longer does anything this early. */
826     word initial_heap_sz;
827     IF_CANCEL(int cancel_state;)
828 
829     if (EXPECT(GC_is_initialized, TRUE)) return;
830 #   ifdef REDIRECT_MALLOC
831       {
832         static GC_bool init_started = FALSE;
833         if (init_started)
834           ABORT("Redirected malloc() called during GC init");
835         init_started = TRUE;
836       }
837 #   endif
838 
839 #   ifdef GC_INITIAL_HEAP_SIZE
840       initial_heap_sz = divHBLKSZ(GC_INITIAL_HEAP_SIZE);
841 #   else
842       initial_heap_sz = (word)MINHINCR;
843 #   endif
844     DISABLE_CANCEL(cancel_state);
845     /* Note that although we are nominally called with the */
846     /* allocation lock held, the allocation lock is now    */
847     /* only really acquired once a second thread is forked.*/
848     /* And the initialization code needs to run before     */
849     /* then.  Thus we really don't hold any locks, and can */
850     /* in fact safely initialize them here.                */
851 #   ifdef THREADS
852       GC_ASSERT(!GC_need_to_lock);
853 #     ifdef SN_TARGET_PS3
854         {
855           pthread_mutexattr_t mattr;
856 
857           if (0 != pthread_mutexattr_init(&mattr)) {
858             ABORT("pthread_mutexattr_init failed");
859           }
860           if (0 != pthread_mutex_init(&GC_allocate_ml, &mattr)) {
861             ABORT("pthread_mutex_init failed");
862           }
863           (void)pthread_mutexattr_destroy(&mattr);
864         }
865 #     endif
866 #   endif /* THREADS */
867 #   if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
868      {
869 #     ifndef MSWINCE
870         BOOL (WINAPI *pfn) (LPCRITICAL_SECTION, DWORD) = NULL;
871         HMODULE hK32 = GetModuleHandle(TEXT("kernel32.dll"));
872         if (hK32)
873           pfn = (BOOL (WINAPI *) (LPCRITICAL_SECTION, DWORD))
874                 GetProcAddress (hK32,
875                                 "InitializeCriticalSectionAndSpinCount");
876         if (pfn)
877             pfn(&GC_allocate_ml, 4000);
878         else
879 #     endif /* !MSWINCE */
880         /* else */ InitializeCriticalSection (&GC_allocate_ml);
881      }
882 #   endif /* GC_WIN32_THREADS */
883 #   if (defined(MSWIN32) || defined(MSWINCE)) && defined(THREADS)
884       InitializeCriticalSection(&GC_write_cs);
885 #   endif
886     GC_setpagesize();
887 #   ifdef MSWIN32
888       GC_init_win32();
889 #   endif
890 #   ifdef GC_READ_ENV_FILE
891       GC_envfile_init();
892 #   endif
893 #   ifndef SMALL_CONFIG
894 #     ifdef GC_PRINT_VERBOSE_STATS
895         /* This is useful for debugging and profiling on platforms with */
896         /* missing getenv() (like WinCE).                               */
897         GC_print_stats = VERBOSE;
898 #     else
899         if (0 != GETENV("GC_PRINT_VERBOSE_STATS")) {
900           GC_print_stats = VERBOSE;
901         } else if (0 != GETENV("GC_PRINT_STATS")) {
902           GC_print_stats = 1;
903         }
904 #     endif
905 #     if (defined(UNIX_LIKE) && !defined(GC_ANDROID_LOG)) \
906          || defined(CYGWIN32) || defined(SYMBIAN)
907         {
908           char * file_name = GETENV("GC_LOG_FILE");
909 #         ifdef GC_LOG_TO_FILE_ALWAYS
910             if (NULL == file_name)
911               file_name = GC_LOG_STD_NAME;
912 #         else
913             if (0 != file_name)
914 #         endif
915           {
916             int log_d = open(file_name, O_CREAT|O_WRONLY|O_APPEND, 0666);
917             if (log_d < 0) {
918               GC_err_printf("Failed to open %s as log file\n", file_name);
919             } else {
920               char *str;
921               GC_log = log_d;
922               str = GETENV("GC_ONLY_LOG_TO_FILE");
923 #             ifdef GC_ONLY_LOG_TO_FILE
924                 /* The similar environment variable set to "0"  */
925                 /* overrides the effect of the macro defined.   */
926                 if (str != NULL && *str == '0' && *(str + 1) == '\0')
927 #             else
928                 /* Otherwise setting the environment variable   */
929                 /* to anything other than "0" will prevent from */
930                 /* redirecting stdout/err to the log file.      */
931                 if (str == NULL || (*str == '0' && *(str + 1) == '\0'))
932 #             endif
933               {
934                 GC_stdout = log_d;
935                 GC_stderr = log_d;
936               }
937             }
938           }
939         }
940 #     endif
941 #   endif /* !SMALL_CONFIG */
942 #   ifndef NO_DEBUGGING
943       if (0 != GETENV("GC_DUMP_REGULARLY")) {
944         GC_dump_regularly = TRUE;
945       }
946 #   endif
947 #   ifdef KEEP_BACK_PTRS
948       {
949         char * backtraces_string = GETENV("GC_BACKTRACES");
950         if (0 != backtraces_string) {
951           GC_backtraces = atol(backtraces_string);
952           if (backtraces_string[0] == '\0') GC_backtraces = 1;
953         }
954       }
955 #   endif
956     if (0 != GETENV("GC_FIND_LEAK")) {
957       GC_find_leak = 1;
958     }
959 #   ifndef SHORT_DBG_HDRS
960       if (0 != GETENV("GC_FINDLEAK_DELAY_FREE")) {
961         GC_findleak_delay_free = TRUE;
962       }
963 #   endif
964     if (0 != GETENV("GC_ALL_INTERIOR_POINTERS")) {
965       GC_all_interior_pointers = 1;
966     }
967     if (0 != GETENV("GC_DONT_GC")) {
968       GC_dont_gc = 1;
969     }
970     if (0 != GETENV("GC_PRINT_BACK_HEIGHT")) {
971       GC_print_back_height = TRUE;
972     }
973     if (0 != GETENV("GC_NO_BLACKLIST_WARNING")) {
974       GC_large_alloc_warn_interval = LONG_MAX;
975     }
976     {
977       char * addr_string = GETENV("GC_TRACE");
978       if (0 != addr_string) {
979 #       ifndef ENABLE_TRACE
980           WARN("Tracing not enabled: Ignoring GC_TRACE value\n", 0);
981 #       else
982           word addr = (word)STRTOULL(addr_string, NULL, 16);
983           if (addr < 0x1000)
984               WARN("Unlikely trace address: %p\n", addr);
985           GC_trace_addr = (ptr_t)addr;
986 #       endif
987       }
988     }
989 #   ifdef GC_COLLECT_AT_MALLOC
990       {
991         char * string = GETENV("GC_COLLECT_AT_MALLOC");
992         if (0 != string) {
993           size_t min_lb = (size_t)STRTOULL(string, NULL, 10);
994           if (min_lb > 0)
995             GC_dbg_collect_at_malloc_min_lb = min_lb;
996         }
997       }
998 #   endif
999 #   ifndef GC_DISABLE_INCREMENTAL
1000       {
1001         char * time_limit_string = GETENV("GC_PAUSE_TIME_TARGET");
1002         if (0 != time_limit_string) {
1003           long time_limit = atol(time_limit_string);
1004           if (time_limit < 5) {
1005             WARN("GC_PAUSE_TIME_TARGET environment variable value too small "
1006                  "or bad syntax: Ignoring\n", 0);
1007           } else {
1008             GC_time_limit = time_limit;
1009           }
1010         }
1011       }
1012 #   endif
1013 #   ifndef SMALL_CONFIG
1014       {
1015         char * full_freq_string = GETENV("GC_FULL_FREQUENCY");
1016         if (full_freq_string != NULL) {
1017           int full_freq = atoi(full_freq_string);
1018           if (full_freq > 0)
1019             GC_full_freq = full_freq;
1020         }
1021       }
1022 #   endif
1023     {
1024       char * interval_string = GETENV("GC_LARGE_ALLOC_WARN_INTERVAL");
1025       if (0 != interval_string) {
1026         long interval = atol(interval_string);
1027         if (interval <= 0) {
1028           WARN("GC_LARGE_ALLOC_WARN_INTERVAL environment variable has "
1029                "bad value: Ignoring\n", 0);
1030         } else {
1031           GC_large_alloc_warn_interval = interval;
1032         }
1033       }
1034     }
1035     {
1036         char * space_divisor_string = GETENV("GC_FREE_SPACE_DIVISOR");
1037         if (space_divisor_string != NULL) {
1038           int space_divisor = atoi(space_divisor_string);
1039           if (space_divisor > 0)
1040             GC_free_space_divisor = (GC_word)space_divisor;
1041         }
1042     }
1043 #   ifdef USE_MUNMAP
1044       {
1045         char * string = GETENV("GC_UNMAP_THRESHOLD");
1046         if (string != NULL) {
1047           if (*string == '0' && *(string + 1) == '\0') {
1048             /* "0" is used to disable unmapping. */
1049             GC_unmap_threshold = 0;
1050           } else {
1051             int unmap_threshold = atoi(string);
1052             if (unmap_threshold > 0)
1053               GC_unmap_threshold = unmap_threshold;
1054           }
1055         }
1056       }
1057       {
1058         char * string = GETENV("GC_FORCE_UNMAP_ON_GCOLLECT");
1059         if (string != NULL) {
1060           if (*string == '0' && *(string + 1) == '\0') {
1061             /* "0" is used to turn off the mode. */
1062             GC_force_unmap_on_gcollect = FALSE;
1063           } else {
1064             GC_force_unmap_on_gcollect = TRUE;
1065           }
1066         }
1067       }
1068       {
1069         char * string = GETENV("GC_USE_ENTIRE_HEAP");
1070         if (string != NULL) {
1071           if (*string == '0' && *(string + 1) == '\0') {
1072             /* "0" is used to turn off the mode. */
1073             GC_use_entire_heap = FALSE;
1074           } else {
1075             GC_use_entire_heap = TRUE;
1076           }
1077         }
1078       }
1079 #   endif
1080     maybe_install_looping_handler();
1081     /* Adjust normal object descriptor for extra allocation.    */
1082     if (ALIGNMENT > GC_DS_TAGS && EXTRA_BYTES != 0) {
1083       GC_obj_kinds[NORMAL].ok_descriptor = ((word)(-ALIGNMENT) | GC_DS_LENGTH);
1084     }
1085     GC_exclude_static_roots_inner(beginGC_arrays, endGC_arrays);
1086     GC_exclude_static_roots_inner(beginGC_obj_kinds, endGC_obj_kinds);
1087 #   ifdef SEPARATE_GLOBALS
1088       GC_exclude_static_roots_inner(beginGC_objfreelist, endGC_objfreelist);
1089       GC_exclude_static_roots_inner(beginGC_aobjfreelist, endGC_aobjfreelist);
1090 #   endif
1091 #   if defined(USE_PROC_FOR_LIBRARIES) && defined(GC_LINUX_THREADS)
1092         WARN("USE_PROC_FOR_LIBRARIES + GC_LINUX_THREADS performs poorly.\n", 0);
1093         /* If thread stacks are cached, they tend to be scanned in      */
1094         /* entirety as part of the root set.  This wil grow them to     */
1095         /* maximum size, and is generally not desirable.                */
1096 #   endif
1097 #   if defined(SEARCH_FOR_DATA_START)
1098         GC_init_linux_data_start();
1099 #   endif
1100 #   if defined(NETBSD) && defined(__ELF__)
1101         GC_init_netbsd_elf();
1102 #   endif
1103 #   if !defined(THREADS) || defined(GC_PTHREADS) \
1104         || defined(GC_WIN32_THREADS) || defined(GC_SOLARIS_THREADS)
1105       if (GC_stackbottom == 0) {
1106         GC_stackbottom = GC_get_main_stack_base();
1107 #       if (defined(LINUX) || defined(HPUX)) && defined(IA64)
1108           GC_register_stackbottom = GC_get_register_stack_base();
1109 #       endif
1110       } else {
1111 #       if (defined(LINUX) || defined(HPUX)) && defined(IA64)
1112           if (GC_register_stackbottom == 0) {
1113             WARN("GC_register_stackbottom should be set with GC_stackbottom\n", 0);
1114             /* The following may fail, since we may rely on             */
1115             /* alignment properties that may not hold with a user set   */
1116             /* GC_stackbottom.                                          */
1117             GC_register_stackbottom = GC_get_register_stack_base();
1118           }
1119 #       endif
1120       }
1121 #   endif
1122     GC_STATIC_ASSERT(sizeof (ptr_t) == sizeof(word));
1123     GC_STATIC_ASSERT(sizeof (signed_word) == sizeof(word));
1124     GC_STATIC_ASSERT(sizeof (struct hblk) == HBLKSIZE);
1125 #   ifndef THREADS
1126       GC_ASSERT(!((word)GC_stackbottom HOTTER_THAN (word)GC_approx_sp()));
1127 #   endif
1128 #   if !defined(_AUX_SOURCE) || defined(__GNUC__)
1129       GC_STATIC_ASSERT((word)(-1) > (word)0);
1130       /* word should be unsigned */
1131 #   endif
1132     /* We no longer check for ((void*)(-1) > NULL) since all pointers   */
1133     /* are explicitly cast to word in every less-greater comparison.    */
1134     GC_STATIC_ASSERT((signed_word)(-1) < (signed_word)0);
1135 #   ifndef GC_DISABLE_INCREMENTAL
1136       if (GC_incremental || 0 != GETENV("GC_ENABLE_INCREMENTAL")) {
1137         /* For GWW_VDB on Win32, this needs to happen before any        */
1138         /* heap memory is allocated.                                    */
1139         GC_dirty_init();
1140         GC_ASSERT(GC_bytes_allocd == 0);
1141         GC_incremental = TRUE;
1142       }
1143 #   endif
1144 
1145     /* Add initial guess of root sets.  Do this first, since sbrk(0)    */
1146     /* might be used.                                                   */
1147       if (GC_REGISTER_MAIN_STATIC_DATA()) GC_register_data_segments();
1148     GC_init_headers();
1149     GC_bl_init();
1150     GC_mark_init();
1151     {
1152         char * sz_str = GETENV("GC_INITIAL_HEAP_SIZE");
1153         if (sz_str != NULL) {
1154           initial_heap_sz = GC_parse_mem_size_arg(sz_str);
1155           if (initial_heap_sz <= MINHINCR * HBLKSIZE) {
1156             WARN("Bad initial heap size %s - ignoring it.\n", sz_str);
1157           }
1158           initial_heap_sz = divHBLKSZ(initial_heap_sz);
1159         }
1160     }
1161     {
1162         char * sz_str = GETENV("GC_MAXIMUM_HEAP_SIZE");
1163         if (sz_str != NULL) {
1164           word max_heap_sz = GC_parse_mem_size_arg(sz_str);
1165           if (max_heap_sz < initial_heap_sz * HBLKSIZE) {
1166             WARN("Bad maximum heap size %s - ignoring it.\n", sz_str);
1167           }
1168           if (0 == GC_max_retries) GC_max_retries = 2;
1169           GC_set_max_heap_size(max_heap_sz);
1170         }
1171     }
1172     if (!GC_expand_hp_inner(initial_heap_sz)) {
1173         GC_err_printf("Can't start up: not enough memory\n");
1174         EXIT();
1175     } else {
1176         GC_requested_heapsize += initial_heap_sz;
1177     }
1178     if (GC_all_interior_pointers)
1179       GC_initialize_offsets();
1180     GC_register_displacement_inner(0L);
1181 #   if defined(GC_LINUX_THREADS) && defined(REDIRECT_MALLOC)
1182       if (!GC_all_interior_pointers) {
1183         /* TLS ABI uses pointer-sized offsets for dtv. */
1184         GC_register_displacement_inner(sizeof(void *));
1185       }
1186 #   endif
1187     GC_init_size_map();
1188 #   ifdef PCR
1189       if (PCR_IL_Lock(PCR_Bool_false, PCR_allSigsBlocked, PCR_waitForever)
1190           != PCR_ERes_okay) {
1191           ABORT("Can't lock load state");
1192       } else if (PCR_IL_Unlock() != PCR_ERes_okay) {
1193           ABORT("Can't unlock load state");
1194       }
1195       PCR_IL_Unlock();
1196       GC_pcr_install();
1197 #   endif
1198     GC_is_initialized = TRUE;
1199 #   if defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
1200         GC_thr_init();
1201 #   endif
1202     COND_DUMP;
1203     /* Get black list set up and/or incremental GC started */
1204       if (!GC_dont_precollect || GC_incremental) GC_gcollect_inner();
1205 #   ifdef STUBBORN_ALLOC
1206         GC_stubborn_init();
1207 #   endif
1208 #   ifndef DONT_USE_ATEXIT
1209       if (GC_find_leak) {
1210         /* This is to give us at least one chance to detect leaks.        */
1211         /* This may report some very benign leaks, but ...                */
1212         atexit(GC_exit_check);
1213       }
1214 #   endif
1215 
1216     /* The rest of this again assumes we don't really hold      */
1217     /* the allocation lock.                                     */
1218 #   if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
1219         /* Make sure marker threads are started and thread local */
1220         /* allocation is initialized, in case we didn't get      */
1221         /* called from GC_init_parallel.                         */
1222         GC_init_parallel();
1223 #   endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
1224 
1225 #   if defined(DYNAMIC_LOADING) && defined(DARWIN)
1226         /* This must be called WITHOUT the allocation lock held */
1227         /* and before any threads are created.                  */
1228         GC_init_dyld();
1229 #   endif
1230     RESTORE_CANCEL(cancel_state);
1231 }
1232 
GC_enable_incremental(void)1233 GC_API void GC_CALL GC_enable_incremental(void)
1234 {
1235 # if !defined(GC_DISABLE_INCREMENTAL) && !defined(KEEP_BACK_PTRS)
1236     DCL_LOCK_STATE;
1237     /* If we are keeping back pointers, the GC itself dirties all */
1238     /* pages on which objects have been marked, making            */
1239     /* incremental GC pointless.                                  */
1240     if (!GC_find_leak && 0 == GETENV("GC_DISABLE_INCREMENTAL")) {
1241       LOCK();
1242       if (!GC_incremental) {
1243         GC_setpagesize();
1244         /* if (GC_no_win32_dlls) goto out; Should be win32S test? */
1245         maybe_install_looping_handler(); /* Before write fault handler! */
1246         GC_incremental = TRUE;
1247         if (!GC_is_initialized) {
1248           GC_init();
1249         } else {
1250           GC_dirty_init();
1251         }
1252         if (GC_dirty_maintained && !GC_dont_gc) {
1253                                 /* Can't easily do it if GC_dont_gc.    */
1254           if (GC_bytes_allocd > 0) {
1255             /* There may be unmarked reachable objects. */
1256             GC_gcollect_inner();
1257           }
1258             /* else we're OK in assuming everything's   */
1259             /* clean since nothing can point to an      */
1260             /* unmarked object.                         */
1261           GC_read_dirty();
1262         }
1263       }
1264       UNLOCK();
1265       return;
1266     }
1267 # endif
1268   GC_init();
1269 }
1270 
1271 #if defined(THREADS) && (!defined(PARALLEL_MARK) || !defined(CAN_HANDLE_FORK))
GC_start_mark_threads(void)1272   GC_API void GC_CALL GC_start_mark_threads(void)
1273   {
1274     /* No action since parallel markers are disabled (or no POSIX fork). */
1275     GC_ASSERT(I_DONT_HOLD_LOCK());
1276   }
1277 #endif
1278 
1279 #if defined(MSWIN32) || defined(MSWINCE)
1280 
1281 # if defined(_MSC_VER) && defined(_DEBUG) && !defined(MSWINCE)
1282 #   include <crtdbg.h>
1283 # endif
1284 
1285   STATIC HANDLE GC_log = 0;
1286 
GC_deinit(void)1287   void GC_deinit(void)
1288   {
1289 #   ifdef THREADS
1290       if (GC_is_initialized) {
1291         DeleteCriticalSection(&GC_write_cs);
1292       }
1293 #   endif
1294   }
1295 
1296 # ifdef THREADS
1297 #   ifdef PARALLEL_MARK
1298 #     define IF_NEED_TO_LOCK(x) if (GC_parallel || GC_need_to_lock) x
1299 #   else
1300 #     define IF_NEED_TO_LOCK(x) if (GC_need_to_lock) x
1301 #   endif
1302 # else
1303 #   define IF_NEED_TO_LOCK(x)
1304 # endif /* !THREADS */
1305 
GC_CreateLogFile(void)1306   STATIC HANDLE GC_CreateLogFile(void)
1307   {
1308     HANDLE hFile;
1309     TCHAR *logPath;
1310     BOOL appendToFile = FALSE;
1311 #   if !defined(NO_GETENV_WIN32) || !defined(OLD_WIN32_LOG_FILE)
1312       TCHAR pathBuf[_MAX_PATH + 0x10]; /* buffer for path + ext */
1313 
1314       logPath = pathBuf;
1315 #   endif
1316 
1317     /* Use GetEnvironmentVariable instead of GETENV() for unicode support. */
1318 #   ifndef NO_GETENV_WIN32
1319       if (GetEnvironmentVariable(TEXT("GC_LOG_FILE"), pathBuf,
1320                                  _MAX_PATH + 1) - 1U < (DWORD)_MAX_PATH) {
1321         appendToFile = TRUE;
1322       } else
1323 #   endif
1324     /* else */ {
1325       /* Env var not found or its value too long.       */
1326 #     ifdef OLD_WIN32_LOG_FILE
1327         logPath = TEXT(GC_LOG_STD_NAME);
1328 #     else
1329         int len = (int)GetModuleFileName(NULL /* hModule */, pathBuf,
1330                                          _MAX_PATH + 1);
1331         /* If GetModuleFileName() has failed then len is 0. */
1332         if (len > 4 && pathBuf[len - 4] == (TCHAR)'.') {
1333           len -= 4; /* strip executable file extension */
1334         }
1335         BCOPY(TEXT(".") TEXT(GC_LOG_STD_NAME), &pathBuf[len],
1336               sizeof(TEXT(".") TEXT(GC_LOG_STD_NAME)));
1337 #     endif
1338     }
1339 
1340     hFile = CreateFile(logPath, GENERIC_WRITE, FILE_SHARE_READ,
1341                        NULL /* lpSecurityAttributes */,
1342                        appendToFile ? OPEN_ALWAYS : CREATE_ALWAYS,
1343                        GC_print_stats == VERBOSE ? FILE_ATTRIBUTE_NORMAL :
1344                             /* immediately flush writes unless very verbose */
1345                             FILE_ATTRIBUTE_NORMAL | FILE_FLAG_WRITE_THROUGH,
1346                        NULL /* hTemplateFile */);
1347 #   ifndef NO_GETENV_WIN32
1348       if (appendToFile && hFile != INVALID_HANDLE_VALUE) {
1349         LONG posHigh = 0;
1350         (void)SetFilePointer(hFile, 0, &posHigh, FILE_END);
1351                                   /* Seek to file end (ignoring any error) */
1352       }
1353 #   endif
1354     return hFile;
1355   }
1356 
GC_write(const char * buf,size_t len)1357   STATIC int GC_write(const char *buf, size_t len)
1358   {
1359       BOOL res;
1360       DWORD written;
1361 #     if defined(THREADS) && defined(GC_ASSERTIONS)
1362         static GC_bool inside_write = FALSE;
1363                         /* to prevent infinite recursion at abort.      */
1364         if (inside_write)
1365           return -1;
1366 #     endif
1367 
1368       if (len == 0)
1369           return 0;
1370       IF_NEED_TO_LOCK(EnterCriticalSection(&GC_write_cs));
1371 #     if defined(THREADS) && defined(GC_ASSERTIONS)
1372         if (GC_write_disabled) {
1373           inside_write = TRUE;
1374           ABORT("Assertion failure: GC_write called with write_disabled");
1375         }
1376 #     endif
1377       if (GC_log == 0) {
1378         GC_log = GC_CreateLogFile();
1379       }
1380       if (GC_log == INVALID_HANDLE_VALUE) {
1381         IF_NEED_TO_LOCK(LeaveCriticalSection(&GC_write_cs));
1382 #       ifdef NO_DEBUGGING
1383           /* Ignore open log failure (e.g., it might be caused by       */
1384           /* read-only folder of the client application).               */
1385           return 0;
1386 #       else
1387           return -1;
1388 #       endif
1389       }
1390       res = WriteFile(GC_log, buf, (DWORD)len, &written, NULL);
1391 #     if defined(_MSC_VER) && defined(_DEBUG)
1392 #         ifdef MSWINCE
1393               /* There is no CrtDbgReport() in WinCE */
1394               {
1395                   WCHAR wbuf[1024];
1396                   /* Always use Unicode variant of OutputDebugString() */
1397                   wbuf[MultiByteToWideChar(CP_ACP, 0 /* dwFlags */,
1398                                 buf, len, wbuf,
1399                                 sizeof(wbuf) / sizeof(wbuf[0]) - 1)] = 0;
1400                   OutputDebugStringW(wbuf);
1401               }
1402 #         else
1403               _CrtDbgReport(_CRT_WARN, NULL, 0, NULL, "%.*s", len, buf);
1404 #         endif
1405 #     endif
1406       IF_NEED_TO_LOCK(LeaveCriticalSection(&GC_write_cs));
1407       return res ? (int)written : -1;
1408   }
1409 
1410   /* FIXME: This is pretty ugly ... */
1411 # define WRITE(f, buf, len) GC_write(buf, len)
1412 
1413 #elif defined(OS2) || defined(MACOS)
1414   STATIC FILE * GC_stdout = NULL;
1415   STATIC FILE * GC_stderr = NULL;
1416   STATIC FILE * GC_log = NULL;
1417 
1418   /* Initialize GC_log (and the friends) passed to GC_write().  */
GC_set_files(void)1419   STATIC void GC_set_files(void)
1420   {
1421     if (GC_stdout == NULL) {
1422       GC_stdout = stdout;
1423     }
1424     if (GC_stderr == NULL) {
1425       GC_stderr = stderr;
1426     }
1427     if (GC_log == NULL) {
1428       GC_log = stderr;
1429     }
1430   }
1431 
GC_write(FILE * f,const char * buf,size_t len)1432   GC_INLINE int GC_write(FILE *f, const char *buf, size_t len)
1433   {
1434     int res = fwrite(buf, 1, len, f);
1435     fflush(f);
1436     return res;
1437   }
1438 
1439 # define WRITE(f, buf, len) (GC_set_files(), GC_write(f, buf, len))
1440 
1441 #elif defined(GC_ANDROID_LOG)
1442 
1443 # include <android/log.h>
1444 
1445 # ifndef GC_ANDROID_LOG_TAG
1446 #   define GC_ANDROID_LOG_TAG "BDWGC"
1447 # endif
1448 
1449 # define GC_stdout ANDROID_LOG_DEBUG
1450 # define GC_stderr ANDROID_LOG_ERROR
1451 # define GC_log GC_stdout
1452 
1453 # define WRITE(level, buf, unused_len) \
1454                 __android_log_write(level, GC_ANDROID_LOG_TAG, buf)
1455 
1456 #else
1457 # if !defined(AMIGA) && !defined(__CC_ARM)
1458 #   include <unistd.h>
1459 # endif
1460 
GC_write(int fd,const char * buf,size_t len)1461   STATIC int GC_write(int fd, const char *buf, size_t len)
1462   {
1463 #   if defined(ECOS) || defined(NOSYS)
1464 #     ifdef ECOS
1465         /* FIXME: This seems to be defined nowhere at present.  */
1466         /* _Jv_diag_write(buf, len); */
1467 #     else
1468         /* No writing.  */
1469 #     endif
1470       return len;
1471 #   else
1472       int bytes_written = 0;
1473       int result;
1474       IF_CANCEL(int cancel_state;)
1475 
1476       DISABLE_CANCEL(cancel_state);
1477       while ((size_t)bytes_written < len) {
1478 #        ifdef GC_SOLARIS_THREADS
1479              result = syscall(SYS_write, fd, buf + bytes_written,
1480                                              len - bytes_written);
1481 #        else
1482              result = write(fd, buf + bytes_written, len - bytes_written);
1483 #        endif
1484          if (-1 == result) {
1485              RESTORE_CANCEL(cancel_state);
1486              return(result);
1487          }
1488          bytes_written += result;
1489       }
1490       RESTORE_CANCEL(cancel_state);
1491       return(bytes_written);
1492 #   endif
1493   }
1494 
1495 # define WRITE(f, buf, len) GC_write(f, buf, len)
1496 #endif /* !MSWIN32 && !OS2 && !MACOS && !GC_ANDROID_LOG */
1497 
1498 #define BUFSZ 1024
1499 
1500 #if defined(DJGPP) || defined(__STRICT_ANSI__)
1501   /* vsnprintf is missing in DJGPP (v2.0.3) */
1502 # define GC_VSNPRINTF(buf, bufsz, format, args) vsprintf(buf, format, args)
1503 #elif defined(_MSC_VER)
1504 # ifdef MSWINCE
1505     /* _vsnprintf is deprecated in WinCE */
1506 #   define GC_VSNPRINTF StringCchVPrintfA
1507 # else
1508 #   define GC_VSNPRINTF _vsnprintf
1509 # endif
1510 #else
1511 # define GC_VSNPRINTF vsnprintf
1512 #endif
1513 
1514 /* A version of printf that is unlikely to call malloc, and is thus safer */
1515 /* to call from the collector in case malloc has been bound to GC_malloc. */
1516 /* Floating point arguments and formats should be avoided, since FP       */
1517 /* conversion is more likely to allocate memory.                          */
1518 /* Assumes that no more than BUFSZ-1 characters are written at once.      */
1519 #define GC_PRINTF_FILLBUF(buf, format) \
1520         do { \
1521           va_list args; \
1522           va_start(args, format); \
1523           (buf)[sizeof(buf) - 1] = 0x15; /* guard */ \
1524           (void)GC_VSNPRINTF(buf, sizeof(buf) - 1, format, args); \
1525           va_end(args); \
1526           if ((buf)[sizeof(buf) - 1] != 0x15) \
1527             ABORT("GC_printf clobbered stack"); \
1528         } while (0)
1529 
GC_printf(const char * format,...)1530 void GC_printf(const char *format, ...)
1531 {
1532     char buf[BUFSZ + 1];
1533 
1534     if (!GC_quiet) {
1535       GC_PRINTF_FILLBUF(buf, format);
1536       if (WRITE(GC_stdout, buf, strlen(buf)) < 0)
1537         ABORT("write to stdout failed");
1538     }
1539 }
1540 
GC_err_printf(const char * format,...)1541 void GC_err_printf(const char *format, ...)
1542 {
1543     char buf[BUFSZ + 1];
1544 
1545     GC_PRINTF_FILLBUF(buf, format);
1546     GC_err_puts(buf);
1547 }
1548 
GC_log_printf(const char * format,...)1549 void GC_log_printf(const char *format, ...)
1550 {
1551     char buf[BUFSZ + 1];
1552 
1553     GC_PRINTF_FILLBUF(buf, format);
1554     if (WRITE(GC_log, buf, strlen(buf)) < 0)
1555       ABORT("write to GC log failed");
1556 }
1557 
1558 #ifndef GC_ANDROID_LOG
1559 
1560 # define GC_warn_printf GC_err_printf
1561 
1562 #else
1563 
GC_info_log_printf(const char * format,...)1564   GC_INNER void GC_info_log_printf(const char *format, ...)
1565   {
1566     char buf[BUFSZ + 1];
1567 
1568     GC_PRINTF_FILLBUF(buf, format);
1569     (void)WRITE(ANDROID_LOG_INFO, buf, 0 /* unused */);
1570   }
1571 
GC_verbose_log_printf(const char * format,...)1572   GC_INNER void GC_verbose_log_printf(const char *format, ...)
1573   {
1574     char buf[BUFSZ + 1];
1575 
1576     GC_PRINTF_FILLBUF(buf, format);
1577     (void)WRITE(ANDROID_LOG_VERBOSE, buf, 0); /* ignore write errors */
1578   }
1579 
GC_warn_printf(const char * format,...)1580   STATIC void GC_warn_printf(const char *format, ...)
1581   {
1582     char buf[BUFSZ + 1];
1583 
1584     GC_PRINTF_FILLBUF(buf, format);
1585     (void)WRITE(ANDROID_LOG_WARN, buf, 0);
1586   }
1587 
1588 #endif /* GC_ANDROID_LOG */
1589 
GC_err_puts(const char * s)1590 void GC_err_puts(const char *s)
1591 {
1592     (void)WRITE(GC_stderr, s, strlen(s)); /* ignore errors */
1593 }
1594 
GC_default_warn_proc(char * msg,GC_word arg)1595 STATIC void GC_CALLBACK GC_default_warn_proc(char *msg, GC_word arg)
1596 {
1597     /* TODO: Add assertion on arg comply with msg (format).     */
1598     GC_warn_printf(msg, arg);
1599 }
1600 
1601 GC_INNER GC_warn_proc GC_current_warn_proc = GC_default_warn_proc;
1602 
1603 /* This is recommended for production code (release). */
GC_ignore_warn_proc(char * msg,GC_word arg)1604 GC_API void GC_CALLBACK GC_ignore_warn_proc(char *msg, GC_word arg)
1605 {
1606     if (GC_print_stats) {
1607       /* Don't ignore warnings if stats printing is on. */
1608       GC_default_warn_proc(msg, arg);
1609     }
1610 }
1611 
GC_set_warn_proc(GC_warn_proc p)1612 GC_API void GC_CALL GC_set_warn_proc(GC_warn_proc p)
1613 {
1614     DCL_LOCK_STATE;
1615     GC_ASSERT(NONNULL_ARG_NOT_NULL(p));
1616 #   ifdef GC_WIN32_THREADS
1617 #     ifdef CYGWIN32
1618         /* Need explicit GC_INIT call */
1619         GC_ASSERT(GC_is_initialized);
1620 #     else
1621         if (!GC_is_initialized) GC_init();
1622 #     endif
1623 #   endif
1624     LOCK();
1625     GC_current_warn_proc = p;
1626     UNLOCK();
1627 }
1628 
GC_get_warn_proc(void)1629 GC_API GC_warn_proc GC_CALL GC_get_warn_proc(void)
1630 {
1631     GC_warn_proc result;
1632     DCL_LOCK_STATE;
1633     LOCK();
1634     result = GC_current_warn_proc;
1635     UNLOCK();
1636     return(result);
1637 }
1638 
1639 #if !defined(PCR) && !defined(SMALL_CONFIG)
1640   /* Print (or display) a message before abnormal exit (including       */
1641   /* abort).  Invoked from ABORT(msg) macro (there msg is non-NULL)     */
1642   /* and from EXIT() macro (msg is NULL in that case).                  */
GC_default_on_abort(const char * msg)1643   STATIC void GC_CALLBACK GC_default_on_abort(const char *msg)
1644   {
1645     GC_find_leak = FALSE; /* disable at-exit GC_gcollect()  */
1646 
1647     if (msg != NULL) {
1648 #     if defined(MSWIN32)
1649 #       ifndef DONT_USE_USER32_DLL
1650           /* Use static binding to "user32.dll".        */
1651           (void)MessageBoxA(NULL, msg, "Fatal error in GC",
1652                             MB_ICONERROR | MB_OK);
1653 #       else
1654           /* This simplifies linking - resolve "MessageBoxA" at run-time. */
1655           HINSTANCE hU32 = LoadLibrary(TEXT("user32.dll"));
1656           if (hU32) {
1657             FARPROC pfn = GetProcAddress(hU32, "MessageBoxA");
1658             if (pfn)
1659               (void)(*(int (WINAPI *)(HWND, LPCSTR, LPCSTR, UINT))pfn)(
1660                                   NULL /* hWnd */, msg, "Fatal error in GC",
1661                                   MB_ICONERROR | MB_OK);
1662             (void)FreeLibrary(hU32);
1663           }
1664 #       endif
1665         /* Also duplicate msg to GC log file.   */
1666 #     endif
1667 
1668 #   ifndef GC_ANDROID_LOG
1669       /* Avoid calling GC_err_printf() here, as GC_on_abort() could be  */
1670       /* called from it.  Note 1: this is not an atomic output.         */
1671       /* Note 2: possible write errors are ignored.                     */
1672 #     if defined(THREADS) && defined(GC_ASSERTIONS) \
1673          && (defined(MSWIN32) || defined(MSWINCE))
1674         if (!GC_write_disabled)
1675 #     endif
1676       {
1677         if (WRITE(GC_stderr, (void *)msg, strlen(msg)) >= 0)
1678           (void)WRITE(GC_stderr, (void *)("\n"), 1);
1679       }
1680 #   else
1681       __android_log_assert("*" /* cond */, GC_ANDROID_LOG_TAG, "%s\n", msg);
1682 #   endif
1683     }
1684 
1685 #   if !defined(NO_DEBUGGING) && !defined(GC_ANDROID_LOG)
1686       if (GETENV("GC_LOOP_ON_ABORT") != NULL) {
1687             /* In many cases it's easier to debug a running process.    */
1688             /* It's arguably nicer to sleep, but that makes it harder   */
1689             /* to look at the thread if the debugger doesn't know much  */
1690             /* about threads.                                           */
1691             for(;;) {
1692               /* Empty */
1693             }
1694       }
1695 #   endif
1696   }
1697 
1698   GC_abort_func GC_on_abort = GC_default_on_abort;
1699 
GC_set_abort_func(GC_abort_func fn)1700   GC_API void GC_CALL GC_set_abort_func(GC_abort_func fn)
1701   {
1702       DCL_LOCK_STATE;
1703       GC_ASSERT(NONNULL_ARG_NOT_NULL(fn));
1704       LOCK();
1705       GC_on_abort = fn;
1706       UNLOCK();
1707   }
1708 
GC_get_abort_func(void)1709   GC_API GC_abort_func GC_CALL GC_get_abort_func(void)
1710   {
1711       GC_abort_func fn;
1712       DCL_LOCK_STATE;
1713       LOCK();
1714       fn = GC_on_abort;
1715       UNLOCK();
1716       return fn;
1717   }
1718 #endif /* !SMALL_CONFIG */
1719 
GC_enable(void)1720 GC_API void GC_CALL GC_enable(void)
1721 {
1722     DCL_LOCK_STATE;
1723 
1724     LOCK();
1725     GC_ASSERT(GC_dont_gc != 0); /* ensure no counter underflow */
1726     GC_dont_gc--;
1727     UNLOCK();
1728 }
1729 
GC_disable(void)1730 GC_API void GC_CALL GC_disable(void)
1731 {
1732     DCL_LOCK_STATE;
1733     LOCK();
1734     GC_dont_gc++;
1735     UNLOCK();
1736 }
1737 
GC_is_disabled(void)1738 GC_API int GC_CALL GC_is_disabled(void)
1739 {
1740     return GC_dont_gc != 0;
1741 }
1742 
1743 /* Helper procedures for new kind creation.     */
GC_new_free_list_inner(void)1744 GC_API void ** GC_CALL GC_new_free_list_inner(void)
1745 {
1746     void *result = GC_INTERNAL_MALLOC((MAXOBJGRANULES+1)*sizeof(ptr_t),
1747                                       PTRFREE);
1748     if (result == 0) ABORT("Failed to allocate freelist for new kind");
1749     BZERO(result, (MAXOBJGRANULES+1)*sizeof(ptr_t));
1750     return result;
1751 }
1752 
GC_new_free_list(void)1753 GC_API void ** GC_CALL GC_new_free_list(void)
1754 {
1755     void *result;
1756     DCL_LOCK_STATE;
1757     LOCK();
1758     result = GC_new_free_list_inner();
1759     UNLOCK();
1760     return result;
1761 }
1762 
GC_new_kind_inner(void ** fl,GC_word descr,int adjust,int clear)1763 GC_API unsigned GC_CALL GC_new_kind_inner(void **fl, GC_word descr,
1764                                           int adjust, int clear)
1765 {
1766     unsigned result = GC_n_kinds;
1767 
1768     if (result < MAXOBJKINDS) {
1769       GC_n_kinds++;
1770       GC_obj_kinds[result].ok_freelist = fl;
1771       GC_obj_kinds[result].ok_reclaim_list = 0;
1772       GC_obj_kinds[result].ok_descriptor = descr;
1773       GC_obj_kinds[result].ok_relocate_descr = adjust;
1774       GC_obj_kinds[result].ok_init = (GC_bool)clear;
1775 #     ifdef ENABLE_DISCLAIM
1776         GC_obj_kinds[result].ok_mark_unconditionally = FALSE;
1777         GC_obj_kinds[result].ok_disclaim_proc = 0;
1778 #     endif
1779     } else {
1780       ABORT("Too many kinds");
1781     }
1782     return result;
1783 }
1784 
GC_new_kind(void ** fl,GC_word descr,int adjust,int clear)1785 GC_API unsigned GC_CALL GC_new_kind(void **fl, GC_word descr, int adjust,
1786                                     int clear)
1787 {
1788     unsigned result;
1789     DCL_LOCK_STATE;
1790     LOCK();
1791     result = GC_new_kind_inner(fl, descr, adjust, clear);
1792     UNLOCK();
1793     return result;
1794 }
1795 
GC_new_proc_inner(GC_mark_proc proc)1796 GC_API unsigned GC_CALL GC_new_proc_inner(GC_mark_proc proc)
1797 {
1798     unsigned result = GC_n_mark_procs;
1799 
1800     if (result < MAX_MARK_PROCS) {
1801       GC_n_mark_procs++;
1802       GC_mark_procs[result] = proc;
1803     } else {
1804       ABORT("Too many mark procedures");
1805     }
1806     return result;
1807 }
1808 
GC_new_proc(GC_mark_proc proc)1809 GC_API unsigned GC_CALL GC_new_proc(GC_mark_proc proc)
1810 {
1811     unsigned result;
1812     DCL_LOCK_STATE;
1813     LOCK();
1814     result = GC_new_proc_inner(proc);
1815     UNLOCK();
1816     return result;
1817 }
1818 
GC_call_with_alloc_lock(GC_fn_type fn,void * client_data)1819 GC_API void * GC_CALL GC_call_with_alloc_lock(GC_fn_type fn, void *client_data)
1820 {
1821     void * result;
1822     DCL_LOCK_STATE;
1823 
1824 #   ifdef THREADS
1825       LOCK();
1826 #   endif
1827     result = (*fn)(client_data);
1828 #   ifdef THREADS
1829       UNLOCK();
1830 #   endif
1831     return(result);
1832 }
1833 
GC_call_with_stack_base(GC_stack_base_func fn,void * arg)1834 GC_API void * GC_CALL GC_call_with_stack_base(GC_stack_base_func fn, void *arg)
1835 {
1836     struct GC_stack_base base;
1837     void *result;
1838 
1839     base.mem_base = (void *)&base;
1840 #   ifdef IA64
1841       base.reg_base = (void *)GC_save_regs_in_stack();
1842       /* Unnecessarily flushes register stack,          */
1843       /* but that probably doesn't hurt.                */
1844 #   endif
1845     result = fn(&base, arg);
1846     /* Strongly discourage the compiler from treating the above */
1847     /* as a tail call.                                          */
1848     GC_noop1((word)(&base));
1849     return result;
1850 }
1851 
1852 #ifndef THREADS
1853 
1854 GC_INNER ptr_t GC_blocked_sp = NULL;
1855         /* NULL value means we are not inside GC_do_blocking() call. */
1856 # ifdef IA64
1857     STATIC ptr_t GC_blocked_register_sp = NULL;
1858 # endif
1859 
1860 GC_INNER struct GC_traced_stack_sect_s *GC_traced_stack_sect = NULL;
1861 
1862 /* This is nearly the same as in win32_threads.c        */
GC_call_with_gc_active(GC_fn_type fn,void * client_data)1863 GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn,
1864                                              void * client_data)
1865 {
1866     struct GC_traced_stack_sect_s stacksect;
1867     GC_ASSERT(GC_is_initialized);
1868 
1869     /* Adjust our stack base value (this could happen if        */
1870     /* GC_get_main_stack_base() is unimplemented or broken for  */
1871     /* the platform).                                           */
1872     if ((word)GC_stackbottom HOTTER_THAN (word)(&stacksect))
1873       GC_stackbottom = (ptr_t)(&stacksect);
1874 
1875     if (GC_blocked_sp == NULL) {
1876       /* We are not inside GC_do_blocking() - do nothing more.  */
1877       client_data = fn(client_data);
1878       /* Prevent treating the above as a tail call.     */
1879       GC_noop1((word)(&stacksect));
1880       return client_data; /* result */
1881     }
1882 
1883     /* Setup new "stack section".       */
1884     stacksect.saved_stack_ptr = GC_blocked_sp;
1885 #   ifdef IA64
1886       /* This is the same as in GC_call_with_stack_base().      */
1887       stacksect.backing_store_end = GC_save_regs_in_stack();
1888       /* Unnecessarily flushes register stack,          */
1889       /* but that probably doesn't hurt.                */
1890       stacksect.saved_backing_store_ptr = GC_blocked_register_sp;
1891 #   endif
1892     stacksect.prev = GC_traced_stack_sect;
1893     GC_blocked_sp = NULL;
1894     GC_traced_stack_sect = &stacksect;
1895 
1896     client_data = fn(client_data);
1897     GC_ASSERT(GC_blocked_sp == NULL);
1898     GC_ASSERT(GC_traced_stack_sect == &stacksect);
1899 
1900     /* Restore original "stack section".        */
1901     GC_traced_stack_sect = stacksect.prev;
1902 #   ifdef IA64
1903       GC_blocked_register_sp = stacksect.saved_backing_store_ptr;
1904 #   endif
1905     GC_blocked_sp = stacksect.saved_stack_ptr;
1906 
1907     return client_data; /* result */
1908 }
1909 
1910 /* This is nearly the same as in win32_threads.c        */
GC_do_blocking_inner(ptr_t data,void * context GC_ATTR_UNUSED)1911 STATIC void GC_do_blocking_inner(ptr_t data, void * context GC_ATTR_UNUSED)
1912 {
1913     struct blocking_data * d = (struct blocking_data *) data;
1914     GC_ASSERT(GC_is_initialized);
1915     GC_ASSERT(GC_blocked_sp == NULL);
1916 #   ifdef SPARC
1917         GC_blocked_sp = GC_save_regs_in_stack();
1918 #   else
1919         GC_blocked_sp = (ptr_t) &d; /* save approx. sp */
1920 #   endif
1921 #   ifdef IA64
1922         GC_blocked_register_sp = GC_save_regs_in_stack();
1923 #   endif
1924 
1925     d -> client_data = (d -> fn)(d -> client_data);
1926 
1927 #   ifdef SPARC
1928         GC_ASSERT(GC_blocked_sp != NULL);
1929 #   else
1930         GC_ASSERT(GC_blocked_sp == (ptr_t) &d);
1931 #   endif
1932     GC_blocked_sp = NULL;
1933 }
1934 
1935 #endif /* !THREADS */
1936 
1937 /* Wrapper for functions that are likely to block (or, at least, do not */
1938 /* allocate garbage collected memory and/or manipulate pointers to the  */
1939 /* garbage collected heap) for an appreciable length of time.           */
1940 /* In the single threaded case, GC_do_blocking() (together              */
1941 /* with GC_call_with_gc_active()) might be used to make stack scanning  */
1942 /* more precise (i.e. scan only stack frames of functions that allocate */
1943 /* garbage collected memory and/or manipulate pointers to the garbage   */
1944 /* collected heap).                                                     */
GC_do_blocking(GC_fn_type fn,void * client_data)1945 GC_API void * GC_CALL GC_do_blocking(GC_fn_type fn, void * client_data)
1946 {
1947     struct blocking_data my_data;
1948 
1949     my_data.fn = fn;
1950     my_data.client_data = client_data;
1951     GC_with_callee_saves_pushed(GC_do_blocking_inner, (ptr_t)(&my_data));
1952     return my_data.client_data; /* result */
1953 }
1954 
1955 #if !defined(NO_DEBUGGING)
GC_dump(void)1956   GC_API void GC_CALL GC_dump(void)
1957   {
1958     GC_printf("***Static roots:\n");
1959     GC_print_static_roots();
1960     GC_printf("\n***Heap sections:\n");
1961     GC_print_heap_sects();
1962     GC_printf("\n***Free blocks:\n");
1963     GC_print_hblkfreelist();
1964     GC_printf("\n***Blocks in use:\n");
1965     GC_print_block_list();
1966   }
1967 #endif /* !NO_DEBUGGING */
1968 
1969 /* Getter functions for the public Read-only variables.                 */
1970 
1971 /* GC_get_gc_no() is unsynchronized and should be typically called      */
1972 /* inside the context of GC_call_with_alloc_lock() to prevent data      */
1973 /* races (on multiprocessors).                                          */
GC_get_gc_no(void)1974 GC_API GC_word GC_CALL GC_get_gc_no(void)
1975 {
1976     return GC_gc_no;
1977 }
1978 
1979 #ifdef THREADS
GC_get_parallel(void)1980   GC_API int GC_CALL GC_get_parallel(void)
1981   {
1982     /* GC_parallel is initialized at start-up.  */
1983     return GC_parallel;
1984   }
1985 #endif
1986 
1987 /* Setter and getter functions for the public R/W function variables.   */
1988 /* These functions are synchronized (like GC_set_warn_proc() and        */
1989 /* GC_get_warn_proc()).                                                 */
1990 
GC_set_oom_fn(GC_oom_func fn)1991 GC_API void GC_CALL GC_set_oom_fn(GC_oom_func fn)
1992 {
1993     GC_ASSERT(NONNULL_ARG_NOT_NULL(fn));
1994     DCL_LOCK_STATE;
1995     LOCK();
1996     GC_oom_fn = fn;
1997     UNLOCK();
1998 }
1999 
GC_get_oom_fn(void)2000 GC_API GC_oom_func GC_CALL GC_get_oom_fn(void)
2001 {
2002     GC_oom_func fn;
2003     DCL_LOCK_STATE;
2004     LOCK();
2005     fn = GC_oom_fn;
2006     UNLOCK();
2007     return fn;
2008 }
2009 
GC_set_on_heap_resize(GC_on_heap_resize_proc fn)2010 GC_API void GC_CALL GC_set_on_heap_resize(GC_on_heap_resize_proc fn)
2011 {
2012     /* fn may be 0 (means no event notifier). */
2013     DCL_LOCK_STATE;
2014     LOCK();
2015     GC_on_heap_resize = fn;
2016     UNLOCK();
2017 }
2018 
GC_get_on_heap_resize(void)2019 GC_API GC_on_heap_resize_proc GC_CALL GC_get_on_heap_resize(void)
2020 {
2021     GC_on_heap_resize_proc fn;
2022     DCL_LOCK_STATE;
2023     LOCK();
2024     fn = GC_on_heap_resize;
2025     UNLOCK();
2026     return fn;
2027 }
2028 
GC_set_finalizer_notifier(GC_finalizer_notifier_proc fn)2029 GC_API void GC_CALL GC_set_finalizer_notifier(GC_finalizer_notifier_proc fn)
2030 {
2031     /* fn may be 0 (means no finalizer notifier). */
2032     DCL_LOCK_STATE;
2033     LOCK();
2034     GC_finalizer_notifier = fn;
2035     UNLOCK();
2036 }
2037 
GC_get_finalizer_notifier(void)2038 GC_API GC_finalizer_notifier_proc GC_CALL GC_get_finalizer_notifier(void)
2039 {
2040     GC_finalizer_notifier_proc fn;
2041     DCL_LOCK_STATE;
2042     LOCK();
2043     fn = GC_finalizer_notifier;
2044     UNLOCK();
2045     return fn;
2046 }
2047 
2048 /* Setter and getter functions for the public numeric R/W variables.    */
2049 /* It is safe to call these functions even before GC_INIT().            */
2050 /* These functions are unsynchronized and should be typically called    */
2051 /* inside the context of GC_call_with_alloc_lock() (if called after     */
2052 /* GC_INIT()) to prevent data races (unless it is guaranteed the        */
2053 /* collector is not multi-threaded at that execution point).            */
2054 
GC_set_find_leak(int value)2055 GC_API void GC_CALL GC_set_find_leak(int value)
2056 {
2057     /* value is of boolean type. */
2058     GC_find_leak = value;
2059 }
2060 
GC_get_find_leak(void)2061 GC_API int GC_CALL GC_get_find_leak(void)
2062 {
2063     return GC_find_leak;
2064 }
2065 
GC_set_all_interior_pointers(int value)2066 GC_API void GC_CALL GC_set_all_interior_pointers(int value)
2067 {
2068     DCL_LOCK_STATE;
2069 
2070     GC_all_interior_pointers = value ? 1 : 0;
2071     if (GC_is_initialized) {
2072       /* It is not recommended to change GC_all_interior_pointers value */
2073       /* after GC is initialized but it seems GC could work correctly   */
2074       /* even after switching the mode.                                 */
2075       LOCK();
2076       GC_initialize_offsets(); /* NOTE: this resets manual offsets as well */
2077       if (!GC_all_interior_pointers)
2078         GC_bl_init_no_interiors();
2079       UNLOCK();
2080     }
2081 }
2082 
GC_get_all_interior_pointers(void)2083 GC_API int GC_CALL GC_get_all_interior_pointers(void)
2084 {
2085     return GC_all_interior_pointers;
2086 }
2087 
GC_set_finalize_on_demand(int value)2088 GC_API void GC_CALL GC_set_finalize_on_demand(int value)
2089 {
2090     GC_ASSERT(value != -1);
2091     /* value is of boolean type. */
2092     GC_finalize_on_demand = value;
2093 }
2094 
GC_get_finalize_on_demand(void)2095 GC_API int GC_CALL GC_get_finalize_on_demand(void)
2096 {
2097     return GC_finalize_on_demand;
2098 }
2099 
GC_set_java_finalization(int value)2100 GC_API void GC_CALL GC_set_java_finalization(int value)
2101 {
2102     GC_ASSERT(value != -1);
2103     /* value is of boolean type. */
2104     GC_java_finalization = value;
2105 }
2106 
GC_get_java_finalization(void)2107 GC_API int GC_CALL GC_get_java_finalization(void)
2108 {
2109     return GC_java_finalization;
2110 }
2111 
GC_set_dont_expand(int value)2112 GC_API void GC_CALL GC_set_dont_expand(int value)
2113 {
2114     GC_ASSERT(value != -1);
2115     /* value is of boolean type. */
2116     GC_dont_expand = value;
2117 }
2118 
GC_get_dont_expand(void)2119 GC_API int GC_CALL GC_get_dont_expand(void)
2120 {
2121     return GC_dont_expand;
2122 }
2123 
GC_set_no_dls(int value)2124 GC_API void GC_CALL GC_set_no_dls(int value)
2125 {
2126     GC_ASSERT(value != -1);
2127     /* value is of boolean type. */
2128     GC_no_dls = value;
2129 }
2130 
GC_get_no_dls(void)2131 GC_API int GC_CALL GC_get_no_dls(void)
2132 {
2133     return GC_no_dls;
2134 }
2135 
GC_set_non_gc_bytes(GC_word value)2136 GC_API void GC_CALL GC_set_non_gc_bytes(GC_word value)
2137 {
2138     GC_non_gc_bytes = value;
2139 }
2140 
GC_get_non_gc_bytes(void)2141 GC_API GC_word GC_CALL GC_get_non_gc_bytes(void)
2142 {
2143     return GC_non_gc_bytes;
2144 }
2145 
GC_set_free_space_divisor(GC_word value)2146 GC_API void GC_CALL GC_set_free_space_divisor(GC_word value)
2147 {
2148     GC_ASSERT(value > 0);
2149     GC_free_space_divisor = value;
2150 }
2151 
GC_get_free_space_divisor(void)2152 GC_API GC_word GC_CALL GC_get_free_space_divisor(void)
2153 {
2154     return GC_free_space_divisor;
2155 }
2156 
GC_set_max_retries(GC_word value)2157 GC_API void GC_CALL GC_set_max_retries(GC_word value)
2158 {
2159     GC_ASSERT(value != ~(GC_word)0);
2160     GC_max_retries = value;
2161 }
2162 
GC_get_max_retries(void)2163 GC_API GC_word GC_CALL GC_get_max_retries(void)
2164 {
2165     return GC_max_retries;
2166 }
2167 
GC_set_dont_precollect(int value)2168 GC_API void GC_CALL GC_set_dont_precollect(int value)
2169 {
2170     GC_ASSERT(value != -1);
2171     /* value is of boolean type. */
2172     GC_dont_precollect = value;
2173 }
2174 
GC_get_dont_precollect(void)2175 GC_API int GC_CALL GC_get_dont_precollect(void)
2176 {
2177     return GC_dont_precollect;
2178 }
2179 
GC_set_full_freq(int value)2180 GC_API void GC_CALL GC_set_full_freq(int value)
2181 {
2182     GC_ASSERT(value >= 0);
2183     GC_full_freq = value;
2184 }
2185 
GC_get_full_freq(void)2186 GC_API int GC_CALL GC_get_full_freq(void)
2187 {
2188     return GC_full_freq;
2189 }
2190 
GC_set_time_limit(unsigned long value)2191 GC_API void GC_CALL GC_set_time_limit(unsigned long value)
2192 {
2193     GC_ASSERT(value != (unsigned long)-1L);
2194     GC_time_limit = value;
2195 }
2196 
GC_get_time_limit(void)2197 GC_API unsigned long GC_CALL GC_get_time_limit(void)
2198 {
2199     return GC_time_limit;
2200 }
2201 
GC_set_force_unmap_on_gcollect(int value)2202 GC_API void GC_CALL GC_set_force_unmap_on_gcollect(int value)
2203 {
2204     GC_force_unmap_on_gcollect = (GC_bool)value;
2205 }
2206 
GC_get_force_unmap_on_gcollect(void)2207 GC_API int GC_CALL GC_get_force_unmap_on_gcollect(void)
2208 {
2209     return (int)GC_force_unmap_on_gcollect;
2210 }
2211