1 /*
2  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
4  * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
5  * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
6  *
7  *
8  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10  *
11  * Permission is hereby granted to use or copy this program
12  * for any purpose,  provided the above notices are retained on all copies.
13  * Permission to modify the code and to distribute modified code is granted,
14  * provided the above notices are retained, and a notice that the code was
15  * modified is included with the above copyright notice.
16  */
17 
18 #ifndef GC_PRIVATE_H
19 #define GC_PRIVATE_H
20 
21 #ifdef HAVE_CONFIG_H
22 # include "config.h"
23 #endif
24 
25 #ifndef GC_BUILD
26 # define GC_BUILD
27 #endif
28 
29 #if (defined(__linux__) || defined(__GLIBC__) || defined(__GNU__) \
30      || (defined(__CYGWIN__) && !defined(USE_MMAP))) \
31     && !defined(_GNU_SOURCE)
32   /* Can't test LINUX, since this must be defined before other includes. */
33 # define _GNU_SOURCE 1
34 #endif
35 
36 #if defined(__INTERIX) && !defined(_ALL_SOURCE)
37 # define _ALL_SOURCE 1
38 #endif
39 
40 #if (defined(DGUX) && defined(GC_THREADS) || defined(DGUX386_THREADS) \
41      || defined(GC_DGUX386_THREADS)) && !defined(_USING_POSIX4A_DRAFT10)
42 # define _USING_POSIX4A_DRAFT10 1
43 #endif
44 
45 #if defined(__MINGW32__) && !defined(__MINGW_EXCPT_DEFINE_PSDK) \
46     && defined(__i386__) && defined(GC_EXTERN) /* defined in gc.c */
47   /* See the description in mark.c.     */
48 # define __MINGW_EXCPT_DEFINE_PSDK 1
49 #endif
50 
51 # if defined(NO_DEBUGGING) && !defined(GC_ASSERTIONS) && !defined(NDEBUG)
52     /* To turn off assertion checking (in atomic_ops.h). */
53 #   define NDEBUG 1
54 # endif
55 
56 #ifndef GC_H
57 # include "../gc.h"
58 #endif
59 
60 #include <stdlib.h>
61 #if !defined(sony_news)
62 # include <stddef.h>
63 #endif
64 
65 #ifdef DGUX
66 # include <sys/types.h>
67 # include <sys/time.h>
68 # include <sys/resource.h>
69 #endif /* DGUX */
70 
71 #ifdef BSD_TIME
72 # include <sys/types.h>
73 # include <sys/time.h>
74 # include <sys/resource.h>
75 #endif /* BSD_TIME */
76 
77 #ifdef PARALLEL_MARK
78 # define AO_REQUIRE_CAS
79 # if !defined(__GNUC__) && !defined(AO_ASSUME_WINDOWS98)
80 #   define AO_ASSUME_WINDOWS98
81 # endif
82 #endif
83 
84 #include "../gc_tiny_fl.h"
85 #include "../gc_mark.h"
86 
87 typedef GC_word word;
88 typedef GC_signed_word signed_word;
89 typedef unsigned int unsigned32;
90 
91 typedef int GC_bool;
92 #define TRUE 1
93 #define FALSE 0
94 
95 #ifndef PTR_T_DEFINED
96   typedef char * ptr_t; /* A generic pointer to which we can add        */
97                         /* byte displacements and which can be used     */
98                         /* for address comparisons.                     */
99 # define PTR_T_DEFINED
100 #endif
101 
102 #ifndef SIZE_MAX
103 # include <limits.h>
104 #endif
105 #if defined(SIZE_MAX) && !defined(CPPCHECK)
106 # define GC_SIZE_MAX ((size_t)SIZE_MAX)
107             /* Extra cast to workaround some buggy SIZE_MAX definitions. */
108 #else
109 # define GC_SIZE_MAX (~(size_t)0)
110 #endif
111 
112 #if GC_GNUC_PREREQ(3, 0) && !defined(LINT2)
113 # define EXPECT(expr, outcome) __builtin_expect(expr,outcome)
114   /* Equivalent to (expr), but predict that usually (expr)==outcome. */
115 #else
116 # define EXPECT(expr, outcome) (expr)
117 #endif /* __GNUC__ */
118 
119 /* Saturated addition of size_t values.  Used to avoid value wrap       */
120 /* around on overflow.  The arguments should have no side effects.      */
121 #define SIZET_SAT_ADD(a, b) \
122             (EXPECT((a) < GC_SIZE_MAX - (b), TRUE) ? (a) + (b) : GC_SIZE_MAX)
123 
124 #include "gcconfig.h"
125 
126 #if !defined(GC_ATOMIC_UNCOLLECTABLE) && defined(ATOMIC_UNCOLLECTABLE)
127   /* For compatibility with old-style naming. */
128 # define GC_ATOMIC_UNCOLLECTABLE
129 #endif
130 
131 #ifndef GC_INNER
132   /* This tagging macro must be used at the start of every variable     */
133   /* definition which is declared with GC_EXTERN.  Should be also used  */
134   /* for the GC-scope function definitions and prototypes.  Must not be */
135   /* used in gcconfig.h.  Shouldn't be used for the debugging-only      */
136   /* functions.  Currently, not used for the functions declared in or   */
137   /* called from the "dated" source files (located in "extra" folder).  */
138 # if defined(GC_DLL) && defined(__GNUC__) && !defined(MSWIN32) \
139         && !defined(MSWINCE) && !defined(CYGWIN32)
140 #   if GC_GNUC_PREREQ(4, 0) && !defined(GC_NO_VISIBILITY)
141       /* See the corresponding GC_API definition. */
142 #     define GC_INNER __attribute__((__visibility__("hidden")))
143 #   else
144       /* The attribute is unsupported. */
145 #     define GC_INNER /* empty */
146 #   endif
147 # else
148 #   define GC_INNER /* empty */
149 # endif
150 
151 # define GC_EXTERN extern GC_INNER
152   /* Used only for the GC-scope variables (prefixed with "GC_")         */
153   /* declared in the header files.  Must not be used for thread-local   */
154   /* variables.  Must not be used in gcconfig.h.  Shouldn't be used for */
155   /* the debugging-only or profiling-only variables.  Currently, not    */
156   /* used for the variables accessed from the "dated" source files      */
157   /* (specific.c/h, and in the "extra" folder).                         */
158   /* The corresponding variable definition must start with GC_INNER.    */
159 #endif /* !GC_INNER */
160 
161 #ifdef __cplusplus
162   /* Register storage specifier is deprecated in C++11. */
163 # define REGISTER /* empty */
164 #else
165   /* Used only for several local variables in the performance-critical  */
166   /* functions.  Should not be used for new code.                       */
167 # define REGISTER register
168 #endif
169 
170 #if defined(M68K) && defined(__GNUC__)
171   /* By default, __alignof__(word) is 2 on m68k.  Use this attribute to */
172   /* have proper word alignment (i.e. 4-byte on a 32-bit arch).         */
173 # define GC_ATTR_WORD_ALIGNED __attribute__((__aligned__(sizeof(word))))
174 #else
175 # define GC_ATTR_WORD_ALIGNED /* empty */
176 #endif
177 
178 #ifndef HEADERS_H
179 # include "gc_hdrs.h"
180 #endif
181 
182 #ifndef GC_ATTR_NO_SANITIZE_ADDR
183 # ifndef ADDRESS_SANITIZER
184 #   define GC_ATTR_NO_SANITIZE_ADDR /* empty */
185 # elif GC_CLANG_PREREQ(3, 8)
186 #   define GC_ATTR_NO_SANITIZE_ADDR __attribute__((no_sanitize("address")))
187 # else
188 #   define GC_ATTR_NO_SANITIZE_ADDR __attribute__((no_sanitize_address))
189 # endif
190 #endif /* !GC_ATTR_NO_SANITIZE_ADDR */
191 
192 #ifndef GC_ATTR_NO_SANITIZE_MEMORY
193 # ifndef MEMORY_SANITIZER
194 #   define GC_ATTR_NO_SANITIZE_MEMORY /* empty */
195 # elif GC_CLANG_PREREQ(3, 8)
196 #   define GC_ATTR_NO_SANITIZE_MEMORY __attribute__((no_sanitize("memory")))
197 # else
198 #   define GC_ATTR_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory))
199 # endif
200 #endif /* !GC_ATTR_NO_SANITIZE_MEMORY */
201 
202 #ifndef GC_ATTR_NO_SANITIZE_THREAD
203 # ifndef THREAD_SANITIZER
204 #   define GC_ATTR_NO_SANITIZE_THREAD /* empty */
205 # elif GC_CLANG_PREREQ(3, 8)
206 #   define GC_ATTR_NO_SANITIZE_THREAD __attribute__((no_sanitize("thread")))
207 # else
208 #   define GC_ATTR_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread))
209 # endif
210 #endif /* !GC_ATTR_NO_SANITIZE_THREAD */
211 
212 #ifndef GC_ATTR_UNUSED
213 # if GC_GNUC_PREREQ(3, 4)
214 #   define GC_ATTR_UNUSED __attribute__((__unused__))
215 # else
216 #   define GC_ATTR_UNUSED /* empty */
217 # endif
218 #endif /* !GC_ATTR_UNUSED */
219 
220 #ifdef HAVE_CONFIG_H
221   /* The "inline" keyword is determined by Autoconf AC_C_INLINE.    */
222 # define GC_INLINE static inline
223 #elif defined(_MSC_VER) || defined(__INTEL_COMPILER) || defined(__DMC__) \
224         || (GC_GNUC_PREREQ(3, 0) && defined(__STRICT_ANSI__)) \
225         || defined(__WATCOMC__)
226 # define GC_INLINE static __inline
227 #elif GC_GNUC_PREREQ(3, 0) || defined(__sun)
228 # define GC_INLINE static inline
229 #else
230 # define GC_INLINE static
231 #endif
232 
233 #ifndef GC_ATTR_NOINLINE
234 # if GC_GNUC_PREREQ(4, 0)
235 #   define GC_ATTR_NOINLINE __attribute__((__noinline__))
236 # elif _MSC_VER >= 1400
237 #   define GC_ATTR_NOINLINE __declspec(noinline)
238 # else
239 #   define GC_ATTR_NOINLINE /* empty */
240 # endif
241 #endif
242 
243 #ifndef GC_API_OSCALL
244   /* This is used to identify GC routines called by name from OS.       */
245 # if defined(__GNUC__)
246 #   if GC_GNUC_PREREQ(4, 0) && !defined(GC_NO_VISIBILITY)
247       /* Same as GC_API if GC_DLL.      */
248 #     define GC_API_OSCALL extern __attribute__((__visibility__("default")))
249 #   else
250       /* The attribute is unsupported.  */
251 #     define GC_API_OSCALL extern
252 #   endif
253 # else
254 #   define GC_API_OSCALL GC_API
255 # endif
256 #endif
257 
258 #ifndef GC_API_PRIV
259 # define GC_API_PRIV GC_API
260 #endif
261 
262 #if defined(THREADS) && !defined(NN_PLATFORM_CTR) \
263     && !defined(SN_TARGET_ORBIS) && !defined(SN_TARGET_PSP2)
264 # include "gc_atomic_ops.h"
265 # ifndef AO_HAVE_compiler_barrier
266 #   define AO_HAVE_compiler_barrier 1
267 # endif
268 #endif
269 
270 #include "gc_locks.h"
271 
272 #define GC_WORD_MAX (~(word)0)
273 
274 # ifdef STACK_GROWS_DOWN
275 #   define COOLER_THAN >
276 #   define HOTTER_THAN <
277 #   define MAKE_COOLER(x,y) if ((word)((x) + (y)) > (word)(x)) {(x) += (y);} \
278                             else (x) = (ptr_t)GC_WORD_MAX
279 #   define MAKE_HOTTER(x,y) (x) -= (y)
280 # else
281 #   define COOLER_THAN <
282 #   define HOTTER_THAN >
283 #   define MAKE_COOLER(x,y) if ((word)((x) - (y)) < (word)(x)) {(x) -= (y);} \
284                             else (x) = 0
285 #   define MAKE_HOTTER(x,y) (x) += (y)
286 # endif
287 
288 #if defined(AMIGA) && defined(__SASC)
289 #   define GC_FAR __far
290 #else
291 #   define GC_FAR
292 #endif
293 
294 
295 /*********************************/
296 /*                               */
297 /* Definitions for conservative  */
298 /* collector                     */
299 /*                               */
300 /*********************************/
301 
302 /*********************************/
303 /*                               */
304 /* Easily changeable parameters  */
305 /*                               */
306 /*********************************/
307 
308 /* #define ALL_INTERIOR_POINTERS */
309                     /* Forces all pointers into the interior of an      */
310                     /* object to be considered valid.  Also causes the  */
311                     /* sizes of all objects to be inflated by at least  */
312                     /* one byte.  This should suffice to guarantee      */
313                     /* that in the presence of a compiler that does     */
314                     /* not perform garbage-collector-unsafe             */
315                     /* optimizations, all portable, strictly ANSI       */
316                     /* conforming C programs should be safely usable    */
317                     /* with malloc replaced by GC_malloc and free       */
318                     /* calls removed.  There are several disadvantages: */
319                     /* 1. There are probably no interesting, portable,  */
320                     /*    strictly ANSI conforming C programs.          */
321                     /* 2. This option makes it hard for the collector   */
322                     /*    to allocate space that is not "pointed to"    */
323                     /*    by integers, etc.  Under SunOS 4.X with a     */
324                     /*    statically linked libc, we empirically        */
325                     /*    observed that it would be difficult to        */
326                     /*    allocate individual objects larger than 100K. */
327                     /*    Even if only smaller objects are allocated,   */
328                     /*    more swap space is likely to be needed.       */
329                     /*    Fortunately, much of this will never be       */
330                     /*    touched.                                      */
331                     /* If you can easily avoid using this option, do.   */
332                     /* If not, try to keep individual objects small.    */
333                     /* This is now really controlled at startup,        */
334                     /* through GC_all_interior_pointers.                */
335 
336 EXTERN_C_BEGIN
337 
338 #ifndef GC_NO_FINALIZATION
339 # define GC_INVOKE_FINALIZERS() GC_notify_or_invoke_finalizers()
340   GC_INNER void GC_notify_or_invoke_finalizers(void);
341                         /* If GC_finalize_on_demand is not set, invoke  */
342                         /* eligible finalizers. Otherwise:              */
343                         /* Call *GC_finalizer_notifier if there are     */
344                         /* finalizers to be run, and we haven't called  */
345                         /* this procedure yet this GC cycle.            */
346 
347   GC_INNER void GC_finalize(void);
348                         /* Perform all indicated finalization actions   */
349                         /* on unmarked objects.                         */
350                         /* Unreachable finalizable objects are enqueued */
351                         /* for processing by GC_invoke_finalizers.      */
352                         /* Invoked with lock.                           */
353 
354 # ifndef GC_TOGGLE_REFS_NOT_NEEDED
355     GC_INNER void GC_process_togglerefs(void);
356                         /* Process the toggle-refs before GC starts.    */
357 # endif
358 # ifndef SMALL_CONFIG
359     GC_INNER void GC_print_finalization_stats(void);
360 # endif
361 #else
362 # define GC_INVOKE_FINALIZERS() (void)0
363 #endif /* GC_NO_FINALIZATION */
364 
365 #if !defined(DONT_ADD_BYTE_AT_END)
366 # ifdef LINT2
367     /* Explicitly instruct the code analysis tool that                  */
368     /* GC_all_interior_pointers is assumed to have only 0 or 1 value.   */
369 #   define EXTRA_BYTES ((size_t)(GC_all_interior_pointers? 1 : 0))
370 # else
371 #   define EXTRA_BYTES (size_t)GC_all_interior_pointers
372 # endif
373 # define MAX_EXTRA_BYTES 1
374 #else
375 # define EXTRA_BYTES 0
376 # define MAX_EXTRA_BYTES 0
377 #endif
378 
379 
380 # ifndef LARGE_CONFIG
381 #   define MINHINCR 16   /* Minimum heap increment, in blocks of HBLKSIZE  */
382                          /* Must be multiple of largest page size.         */
383 #   define MAXHINCR 2048 /* Maximum heap increment, in blocks              */
384 # else
385 #   define MINHINCR 64
386 #   define MAXHINCR 4096
387 # endif
388 
389 # define BL_LIMIT GC_black_list_spacing
390                            /* If we need a block of N bytes, and we have */
391                            /* a block of N + BL_LIMIT bytes available,   */
392                            /* and N > BL_LIMIT,                          */
393                            /* but all possible positions in it are       */
394                            /* blacklisted, we just use it anyway (and    */
395                            /* print a warning, if warnings are enabled). */
396                            /* This risks subsequently leaking the block  */
397                            /* due to a false reference.  But not using   */
398                            /* the block risks unreasonable immediate     */
399                            /* heap growth.                               */
400 
401 /*********************************/
402 /*                               */
403 /* Stack saving for debugging    */
404 /*                               */
405 /*********************************/
406 
407 #ifdef NEED_CALLINFO
408     struct callinfo {
409         word ci_pc;     /* Caller, not callee, pc       */
410 #       if NARGS > 0
411             word ci_arg[NARGS]; /* bit-wise complement to avoid retention */
412 #       endif
413 #       if (NFRAMES * (NARGS + 1)) % 2 == 1
414             /* Likely alignment problem. */
415             word ci_dummy;
416 #       endif
417     };
418 #endif
419 
420 #ifdef SAVE_CALL_CHAIN
421   /* Fill in the pc and argument information for up to NFRAMES of my    */
422   /* callers.  Ignore my frame and my callers frame.                    */
423   GC_INNER void GC_save_callers(struct callinfo info[NFRAMES]);
424   GC_INNER void GC_print_callers(struct callinfo info[NFRAMES]);
425 #endif
426 
427 EXTERN_C_END
428 
429 /*********************************/
430 /*                               */
431 /* OS interface routines         */
432 /*                               */
433 /*********************************/
434 
435 #ifndef NO_CLOCK
436 #ifdef BSD_TIME
437 # undef CLOCK_TYPE
438 # undef GET_TIME
439 # undef MS_TIME_DIFF
440 # define CLOCK_TYPE struct timeval
441 # define CLOCK_TYPE_INITIALIZER { 0, 0 }
442 # define GET_TIME(x) \
443                 do { \
444                   struct rusage rusage; \
445                   getrusage(RUSAGE_SELF, &rusage); \
446                   x = rusage.ru_utime; \
447                 } while (0)
448 # define MS_TIME_DIFF(a,b) ((unsigned long)((long)(a.tv_sec-b.tv_sec) * 1000 \
449                                     + (long)(a.tv_usec-b.tv_usec) / 1000))
450                             /* "a" time is expected to be not earlier than  */
451                             /* "b" one; the result has unsigned long type.  */
452 #elif defined(MSWIN32) || defined(MSWINCE)
453 # ifndef WIN32_LEAN_AND_MEAN
454 #   define WIN32_LEAN_AND_MEAN 1
455 # endif
456 # define NOSERVICE
457 # include <windows.h>
458 # include <winbase.h>
459 # define CLOCK_TYPE DWORD
460 # ifdef MSWINRT_FLAVOR
461 #   define GET_TIME(x) (void)(x = (DWORD)GetTickCount64())
462 # else
463 #   define GET_TIME(x) (void)(x = GetTickCount())
464 # endif
465 # define MS_TIME_DIFF(a,b) ((unsigned long)((a)-(b)))
466 #elif defined(NN_PLATFORM_CTR)
467 # define CLOCK_TYPE long long
468   EXTERN_C_BEGIN
469   CLOCK_TYPE n3ds_get_system_tick(void);
470   CLOCK_TYPE n3ds_convert_tick_to_ms(CLOCK_TYPE tick);
471   EXTERN_C_END
472 # define GET_TIME(x) (void)(x = n3ds_get_system_tick())
473 # define MS_TIME_DIFF(a,b) ((unsigned long)n3ds_convert_tick_to_ms((a)-(b)))
474 #else /* !BSD_TIME && !NN_PLATFORM_CTR && !MSWIN32 && !MSWINCE */
475 # include <time.h>
476 # if defined(FREEBSD) && !defined(CLOCKS_PER_SEC)
477 #   include <machine/limits.h>
478 #   define CLOCKS_PER_SEC CLK_TCK
479 # endif
480 # if !defined(CLOCKS_PER_SEC)
481 #   define CLOCKS_PER_SEC 1000000
482     /* This is technically a bug in the implementation.                 */
483     /* ANSI requires that CLOCKS_PER_SEC be defined.  But at least      */
484     /* under SunOS 4.1.1, it isn't.  Also note that the combination of  */
485     /* ANSI C and POSIX is incredibly gross here.  The type clock_t     */
486     /* is used by both clock() and times().  But on some machines       */
487     /* these use different notions of a clock tick, CLOCKS_PER_SEC      */
488     /* seems to apply only to clock.  Hence we use it here.  On many    */
489     /* machines, including SunOS, clock actually uses units of          */
490     /* microseconds (which are not really clock ticks).                 */
491 # endif
492 # define CLOCK_TYPE clock_t
493 # define GET_TIME(x) (void)(x = clock())
494 # define MS_TIME_DIFF(a,b) (CLOCKS_PER_SEC % 1000 == 0 ? \
495         (unsigned long)((a) - (b)) / (unsigned long)(CLOCKS_PER_SEC / 1000) \
496         : ((unsigned long)((a) - (b)) * 1000) / (unsigned long)CLOCKS_PER_SEC)
497   /* Avoid using double type since some targets (like ARM) might        */
498   /* require -lm option for double-to-long conversion.                  */
499 #endif /* !BSD_TIME && !MSWIN32 */
500 # ifndef CLOCK_TYPE_INITIALIZER
501     /* This is used to initialize CLOCK_TYPE variables (to some value)  */
502     /* to avoid "variable might be uninitialized" compiler warnings.    */
503 #   define CLOCK_TYPE_INITIALIZER 0
504 # endif
505 #endif /* !NO_CLOCK */
506 
507 /* We use bzero and bcopy internally.  They may not be available.       */
508 # if defined(SPARC) && defined(SUNOS4) \
509      || (defined(M68K) && defined(NEXT)) || defined(VAX)
510 #   define BCOPY_EXISTS
511 # elif defined(AMIGA) || defined(DARWIN)
512 #   include <string.h>
513 #   define BCOPY_EXISTS
514 # elif defined(MACOS) && defined(POWERPC)
515 #   include <MacMemory.h>
516 #   define bcopy(x,y,n) BlockMoveData(x, y, n)
517 #   define bzero(x,n) BlockZero(x, n)
518 #   define BCOPY_EXISTS
519 # endif
520 
521 # if !defined(BCOPY_EXISTS) || defined(CPPCHECK)
522 #   include <string.h>
523 #   define BCOPY(x,y,n) memcpy(y, x, (size_t)(n))
524 #   define BZERO(x,n)  memset(x, 0, (size_t)(n))
525 # else
526 #   define BCOPY(x,y,n) bcopy((void *)(x),(void *)(y),(size_t)(n))
527 #   define BZERO(x,n) bzero((void *)(x),(size_t)(n))
528 # endif
529 
530 #ifdef PCR
531 # include "th/PCR_ThCtl.h"
532 #endif
533 
534 EXTERN_C_BEGIN
535 
536 /*
537  * Stop and restart mutator threads.
538  */
539 # ifdef PCR
540 #     define STOP_WORLD() \
541         PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_stopNormal, \
542                                    PCR_allSigsBlocked, \
543                                    PCR_waitForever)
544 #     define START_WORLD() \
545         PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_null, \
546                                    PCR_allSigsBlocked, \
547                                    PCR_waitForever)
548 # else
549 #   if defined(NN_PLATFORM_CTR) || defined(NINTENDO_SWITCH) \
550        || defined(GC_WIN32_THREADS) || defined(GC_PTHREADS)
551       GC_INNER void GC_stop_world(void);
552       GC_INNER void GC_start_world(void);
553 #     define STOP_WORLD() GC_stop_world()
554 #     define START_WORLD() GC_start_world()
555 #   else
556         /* Just do a sanity check: we are not inside GC_do_blocking().  */
557 #     define STOP_WORLD() GC_ASSERT(GC_blocked_sp == NULL)
558 #     define START_WORLD()
559 #   endif
560 # endif
561 
562 #ifdef THREADS
563   GC_EXTERN GC_on_thread_event_proc GC_on_thread_event;
564 #endif
565 
566 /* Abandon ship */
567 # if defined(SMALL_CONFIG) || defined(PCR)
568 #   define GC_on_abort(msg) (void)0 /* be silent on abort */
569 # else
570     GC_API_PRIV GC_abort_func GC_on_abort;
571 # endif
572 # if defined(CPPCHECK)
573 #   define ABORT(msg) { GC_on_abort(msg); abort(); }
574 # elif defined(PCR)
575 #   define ABORT(s) PCR_Base_Panic(s)
576 # else
577 #   if defined(MSWIN_XBOX1) && !defined(DebugBreak)
578 #     define DebugBreak() __debugbreak()
579 #   elif defined(MSWINCE) && !defined(DebugBreak) \
580        && (!defined(UNDER_CE) || (defined(__MINGW32CE__) && !defined(ARM32)))
581       /* This simplifies linking for WinCE (and, probably, doesn't      */
582       /* hurt debugging much); use -DDebugBreak=DebugBreak to override  */
583       /* this behavior if really needed.  This is also a workaround for */
584       /* x86mingw32ce toolchain (if it is still declaring DebugBreak()  */
585       /* instead of defining it as a macro).                            */
586 #     define DebugBreak() _exit(-1) /* there is no abort() in WinCE */
587 #   endif
588 #   if defined(MSWIN32) && (defined(NO_DEBUGGING) || defined(LINT2))
589       /* A more user-friendly abort after showing fatal message.        */
590 #     define ABORT(msg) (GC_on_abort(msg), _exit(-1))
591                 /* Exit on error without running "at-exit" callbacks.   */
592 #   elif defined(MSWINCE) && defined(NO_DEBUGGING)
593 #     define ABORT(msg) (GC_on_abort(msg), ExitProcess(-1))
594 #   elif defined(MSWIN32) || defined(MSWINCE)
595 #     if defined(_CrtDbgBreak) && defined(_DEBUG) && defined(_MSC_VER)
596 #       define ABORT(msg) { GC_on_abort(msg); \
597                             _CrtDbgBreak() /* __debugbreak() */; }
598 #     else
599 #       define ABORT(msg) { GC_on_abort(msg); DebugBreak(); }
600                 /* Note that: on a WinCE box, this could be silently    */
601                 /* ignored (i.e., the program is not aborted);          */
602                 /* DebugBreak is a statement in some toolchains.        */
603 #     endif
604 #   else
605 #     define ABORT(msg) (GC_on_abort(msg), abort())
606 #   endif /* !MSWIN32 */
607 # endif /* !PCR */
608 
609 /* For abort message with 1-3 arguments.  C_msg and C_fmt should be     */
610 /* literals.  C_msg should not contain format specifiers.  Arguments    */
611 /* should match their format specifiers.                                */
612 #define ABORT_ARG1(C_msg, C_fmt, arg1) \
613                 do { \
614                   GC_INFOLOG_PRINTF(C_msg /* + */ C_fmt "\n", arg1); \
615                   ABORT(C_msg); \
616                 } while (0)
617 #define ABORT_ARG2(C_msg, C_fmt, arg1, arg2) \
618                 do { \
619                   GC_INFOLOG_PRINTF(C_msg /* + */ C_fmt "\n", arg1, arg2); \
620                   ABORT(C_msg); \
621                 } while (0)
622 #define ABORT_ARG3(C_msg, C_fmt, arg1, arg2, arg3) \
623                 do { \
624                   GC_INFOLOG_PRINTF(C_msg /* + */ C_fmt "\n", \
625                                     arg1, arg2, arg3); \
626                   ABORT(C_msg); \
627                 } while (0)
628 
629 /* Same as ABORT but does not have 'no-return' attribute.       */
630 /* ABORT on a dummy condition (which is always true).           */
631 #define ABORT_RET(msg) \
632               if ((signed_word)GC_current_warn_proc == -1) {} else ABORT(msg)
633 
634 /* Exit abnormally, but without making a mess (e.g. out of memory) */
635 # ifdef PCR
636 #   define EXIT() PCR_Base_Exit(1,PCR_waitForever)
637 # else
638 #   define EXIT() (GC_on_abort(NULL), exit(1 /* EXIT_FAILURE */))
639 # endif
640 
641 /* Print warning message, e.g. almost out of memory.    */
642 /* The argument (if any) format specifier should be:    */
643 /* "%s", "%p" or "%"WARN_PRIdPTR.                       */
644 #define WARN(msg, arg) \
645     (*GC_current_warn_proc)((/* no const */ char *)("GC Warning: " msg), \
646                             (word)(arg))
647 GC_EXTERN GC_warn_proc GC_current_warn_proc;
648 
649 /* Print format type macro for decimal signed_word value passed WARN(). */
650 /* This could be redefined for Win64 or LLP64, but typically should     */
651 /* not be done as the WARN format string is, possibly, processed on the */
652 /* client side, so non-standard print type modifiers (like MS "I64d")   */
653 /* should be avoided here if possible.                                  */
654 #ifndef WARN_PRIdPTR
655   /* Assume sizeof(void *) == sizeof(long) (or a little-endian machine) */
656 # define WARN_PRIdPTR "ld"
657 #endif
658 
659 /* A tagging macro (for a code static analyzer) to indicate that the    */
660 /* string obtained from an untrusted source (e.g., argv[], getenv) is   */
661 /* safe to use in a vulnerable operation (e.g., open, exec).            */
662 #define TRUSTED_STRING(s) (char*)COVERT_DATAFLOW(s)
663 
664 /* Get environment entry */
665 #ifdef GC_READ_ENV_FILE
666   GC_INNER char * GC_envfile_getenv(const char *name);
667 # define GETENV(name) GC_envfile_getenv(name)
668 #elif defined(NO_GETENV) && !defined(CPPCHECK)
669 # define GETENV(name) NULL
670 #elif defined(EMPTY_GETENV_RESULTS)
671   /* Workaround for a reputed Wine bug.   */
fixed_getenv(const char * name)672   GC_INLINE char * fixed_getenv(const char *name)
673   {
674     char *value = getenv(name);
675     return value != NULL && *value != '\0' ? value : NULL;
676   }
677 # define GETENV(name) fixed_getenv(name)
678 #else
679 # define GETENV(name) getenv(name)
680 #endif
681 
682 EXTERN_C_END
683 
684 #if defined(DARWIN)
685 # include <mach/thread_status.h>
686 # ifndef MAC_OS_X_VERSION_MAX_ALLOWED
687 #   include <AvailabilityMacros.h>
688                 /* Include this header just to import the above macro.  */
689 # endif
690 # if defined(POWERPC)
691 #   if CPP_WORDSZ == 32
692 #     define GC_THREAD_STATE_T          ppc_thread_state_t
693 #   else
694 #     define GC_THREAD_STATE_T          ppc_thread_state64_t
695 #     define GC_MACH_THREAD_STATE       PPC_THREAD_STATE64
696 #     define GC_MACH_THREAD_STATE_COUNT PPC_THREAD_STATE64_COUNT
697 #   endif
698 # elif defined(I386) || defined(X86_64)
699 #   if CPP_WORDSZ == 32
700 #     if defined(i386_THREAD_STATE_COUNT) && !defined(x86_THREAD_STATE32_COUNT)
701         /* Use old naming convention for 32-bit x86.    */
702 #       define GC_THREAD_STATE_T                i386_thread_state_t
703 #       define GC_MACH_THREAD_STATE             i386_THREAD_STATE
704 #       define GC_MACH_THREAD_STATE_COUNT       i386_THREAD_STATE_COUNT
705 #     else
706 #       define GC_THREAD_STATE_T                x86_thread_state32_t
707 #       define GC_MACH_THREAD_STATE             x86_THREAD_STATE32
708 #       define GC_MACH_THREAD_STATE_COUNT       x86_THREAD_STATE32_COUNT
709 #     endif
710 #   else
711 #     define GC_THREAD_STATE_T          x86_thread_state64_t
712 #     define GC_MACH_THREAD_STATE       x86_THREAD_STATE64
713 #     define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE64_COUNT
714 #   endif
715 # elif defined(ARM32) && defined(ARM_UNIFIED_THREAD_STATE) \
716        && !defined(CPPCHECK)
717 #   define GC_THREAD_STATE_T            arm_unified_thread_state_t
718 #   define GC_MACH_THREAD_STATE         ARM_UNIFIED_THREAD_STATE
719 #   define GC_MACH_THREAD_STATE_COUNT   ARM_UNIFIED_THREAD_STATE_COUNT
720 # elif defined(ARM32)
721 #   define GC_THREAD_STATE_T            arm_thread_state_t
722 #   ifdef ARM_MACHINE_THREAD_STATE_COUNT
723 #     define GC_MACH_THREAD_STATE       ARM_MACHINE_THREAD_STATE
724 #     define GC_MACH_THREAD_STATE_COUNT ARM_MACHINE_THREAD_STATE_COUNT
725 #   endif
726 # elif defined(AARCH64)
727 #   define GC_THREAD_STATE_T            arm_thread_state64_t
728 #   define GC_MACH_THREAD_STATE         ARM_THREAD_STATE64
729 #   define GC_MACH_THREAD_STATE_COUNT   ARM_THREAD_STATE64_COUNT
730 # elif !defined(CPPCHECK)
731 #   error define GC_THREAD_STATE_T
732 # endif
733 # ifndef GC_MACH_THREAD_STATE
734 #   define GC_MACH_THREAD_STATE         MACHINE_THREAD_STATE
735 #   define GC_MACH_THREAD_STATE_COUNT   MACHINE_THREAD_STATE_COUNT
736 # endif
737 
738 # if CPP_WORDSZ == 32
739 #   define GC_MACH_HEADER   mach_header
740 #   define GC_MACH_SECTION  section
741 #   define GC_GETSECTBYNAME getsectbynamefromheader
742 # else
743 #   define GC_MACH_HEADER   mach_header_64
744 #   define GC_MACH_SECTION  section_64
745 #   define GC_GETSECTBYNAME getsectbynamefromheader_64
746 # endif
747 
748   /* Try to work out the right way to access thread state structure     */
749   /* members.  The structure has changed its definition in different    */
750   /* Darwin versions.  This now defaults to the (older) names           */
751   /* without __, thus hopefully, not breaking any existing              */
752   /* Makefile.direct builds.                                            */
753 # if __DARWIN_UNIX03
754 #   define THREAD_FLD_NAME(x) __ ## x
755 # else
756 #   define THREAD_FLD_NAME(x) x
757 # endif
758 # if defined(ARM32) && defined(ARM_UNIFIED_THREAD_STATE)
759 #   define THREAD_FLD(x) ts_32.THREAD_FLD_NAME(x)
760 # else
761 #   define THREAD_FLD(x) THREAD_FLD_NAME(x)
762 # endif
763 #endif /* DARWIN */
764 
765 #include "../gc_tiny_fl.h"
766 
767 #include <setjmp.h>
768 
769 #if __STDC_VERSION__ >= 201112L
770 # include <assert.h> /* for static_assert */
771 #endif
772 
773 EXTERN_C_BEGIN
774 
775 /*********************************/
776 /*                               */
777 /* Word-size-dependent defines   */
778 /*                               */
779 /*********************************/
780 
781 #if CPP_WORDSZ == 32
782 # define WORDS_TO_BYTES(x) ((x)<<2)
783 # define BYTES_TO_WORDS(x) ((x)>>2)
784 # define LOGWL             ((word)5) /* log[2] of CPP_WORDSZ    */
785 # define modWORDSZ(n) ((n) & 0x1f) /* n mod size of word        */
786 # if ALIGNMENT != 4
787 #   define UNALIGNED_PTRS
788 # endif
789 #endif
790 
791 #if CPP_WORDSZ == 64
792 #  define WORDS_TO_BYTES(x)   ((x)<<3)
793 #  define BYTES_TO_WORDS(x)   ((x)>>3)
794 #  define LOGWL               ((word)6)    /* log[2] of CPP_WORDSZ */
795 #  define modWORDSZ(n) ((n) & 0x3f)        /* n mod size of word            */
796 #  if ALIGNMENT != 8
797 #       define UNALIGNED_PTRS
798 #  endif
799 #endif
800 
801 /* The first TINY_FREELISTS free lists correspond to the first  */
802 /* TINY_FREELISTS multiples of GRANULE_BYTES, i.e. we keep      */
803 /* separate free lists for each multiple of GRANULE_BYTES       */
804 /* up to (TINY_FREELISTS-1) * GRANULE_BYTES.  After that they   */
805 /* may be spread out further.                                   */
806 
807 #define GRANULE_BYTES GC_GRANULE_BYTES
808 #define TINY_FREELISTS GC_TINY_FREELISTS
809 
810 #define WORDSZ ((word)CPP_WORDSZ)
811 #define SIGNB  ((word)1 << (WORDSZ-1))
812 #define BYTES_PER_WORD      ((word)(sizeof (word)))
813 #define divWORDSZ(n) ((n) >> LOGWL)     /* divide n by size of word */
814 
815 #if GRANULE_BYTES == 8
816 # define BYTES_TO_GRANULES(n) ((n)>>3)
817 # define GRANULES_TO_BYTES(n) ((n)<<3)
818 # if CPP_WORDSZ == 64
819 #   define GRANULES_TO_WORDS(n) (n)
820 # elif CPP_WORDSZ == 32
821 #   define GRANULES_TO_WORDS(n) ((n)<<1)
822 # else
823 #   define GRANULES_TO_WORDS(n) BYTES_TO_WORDS(GRANULES_TO_BYTES(n))
824 # endif
825 #elif GRANULE_BYTES == 16
826 # define BYTES_TO_GRANULES(n) ((n)>>4)
827 # define GRANULES_TO_BYTES(n) ((n)<<4)
828 # if CPP_WORDSZ == 64
829 #   define GRANULES_TO_WORDS(n) ((n)<<1)
830 # elif CPP_WORDSZ == 32
831 #   define GRANULES_TO_WORDS(n) ((n)<<2)
832 # else
833 #   define GRANULES_TO_WORDS(n) BYTES_TO_WORDS(GRANULES_TO_BYTES(n))
834 # endif
835 #else
836 # error Bad GRANULE_BYTES value
837 #endif
838 
839 /*********************/
840 /*                   */
841 /*  Size Parameters  */
842 /*                   */
843 /*********************/
844 
845 /* Heap block size, bytes. Should be power of 2.                */
846 /* Incremental GC with MPROTECT_VDB currently requires the      */
847 /* page size to be a multiple of HBLKSIZE.  Since most modern   */
848 /* architectures support variable page sizes down to 4K, and    */
849 /* X86 is generally 4K, we now default to 4K, except for        */
850 /*   Alpha: Seems to be used with 8K pages.                     */
851 /*   SMALL_CONFIG: Want less block-level fragmentation.         */
852 #ifndef HBLKSIZE
853 # if defined(LARGE_CONFIG) || !defined(SMALL_CONFIG)
854 #   ifdef ALPHA
855 #     define CPP_LOG_HBLKSIZE 13
856 #   elif defined(SN_TARGET_ORBIS) || defined(SN_TARGET_PSP2)
857 #     define CPP_LOG_HBLKSIZE 16    /* page size is set to 64K  */
858 #   else
859 #     define CPP_LOG_HBLKSIZE 12
860 #   endif
861 # else
862 #   define CPP_LOG_HBLKSIZE 10
863 # endif
864 #else
865 # if HBLKSIZE == 512
866 #   define CPP_LOG_HBLKSIZE 9
867 # elif HBLKSIZE == 1024
868 #   define CPP_LOG_HBLKSIZE 10
869 # elif HBLKSIZE == 2048
870 #   define CPP_LOG_HBLKSIZE 11
871 # elif HBLKSIZE == 4096
872 #   define CPP_LOG_HBLKSIZE 12
873 # elif HBLKSIZE == 8192
874 #   define CPP_LOG_HBLKSIZE 13
875 # elif HBLKSIZE == 16384
876 #   define CPP_LOG_HBLKSIZE 14
877 # elif !defined(CPPCHECK)
878 #   error Bad HBLKSIZE value
879 # endif
880 # undef HBLKSIZE
881 #endif
882 
883 # define CPP_HBLKSIZE (1 << CPP_LOG_HBLKSIZE)
884 # define LOG_HBLKSIZE   ((size_t)CPP_LOG_HBLKSIZE)
885 # define HBLKSIZE ((size_t)CPP_HBLKSIZE)
886 
887 #define GC_SQRT_SIZE_MAX ((((size_t)1) << (WORDSZ / 2)) - 1)
888 
889 /*  Max size objects supported by freelist (larger objects are  */
890 /*  allocated directly with allchblk(), by rounding to the next */
891 /*  multiple of HBLKSIZE).                                      */
892 #define CPP_MAXOBJBYTES (CPP_HBLKSIZE/2)
893 #define MAXOBJBYTES ((size_t)CPP_MAXOBJBYTES)
894 #define CPP_MAXOBJWORDS BYTES_TO_WORDS(CPP_MAXOBJBYTES)
895 #define MAXOBJWORDS ((size_t)CPP_MAXOBJWORDS)
896 #define CPP_MAXOBJGRANULES BYTES_TO_GRANULES(CPP_MAXOBJBYTES)
897 #define MAXOBJGRANULES ((size_t)CPP_MAXOBJGRANULES)
898 
899 # define divHBLKSZ(n) ((n) >> LOG_HBLKSIZE)
900 
901 # define HBLK_PTR_DIFF(p,q) divHBLKSZ((ptr_t)p - (ptr_t)q)
902         /* Equivalent to subtracting 2 hblk pointers.   */
903         /* We do it this way because a compiler should  */
904         /* find it hard to use an integer division      */
905         /* instead of a shift.  The bundled SunOS 4.1   */
906         /* o.w. sometimes pessimizes the subtraction to */
907         /* involve a call to .div.                      */
908 
909 # define modHBLKSZ(n) ((n) & (HBLKSIZE-1))
910 
911 # define HBLKPTR(objptr) ((struct hblk *)(((word)(objptr)) \
912                                           & ~(word)(HBLKSIZE-1)))
913 # define HBLKDISPL(objptr) (((size_t) (objptr)) & (HBLKSIZE-1))
914 
915 /* Round up allocation size (in bytes) to a multiple of a granule.      */
916 #define ROUNDUP_GRANULE_SIZE(lb) /* lb should have no side-effect */ \
917             (SIZET_SAT_ADD(lb, GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1))
918 
919 /* Round up byte allocation requests to integral number of words, etc. */
920 # define ROUNDED_UP_GRANULES(lb) /* lb should have no side-effect */ \
921         BYTES_TO_GRANULES(SIZET_SAT_ADD(lb, GRANULE_BYTES - 1 + EXTRA_BYTES))
922 # if MAX_EXTRA_BYTES == 0
923 #  define SMALL_OBJ(bytes) EXPECT((bytes) <= (MAXOBJBYTES), TRUE)
924 # else
925 #  define SMALL_OBJ(bytes) \
926             (EXPECT((bytes) <= (MAXOBJBYTES - MAX_EXTRA_BYTES), TRUE) \
927              || (bytes) <= MAXOBJBYTES - EXTRA_BYTES)
928         /* This really just tests bytes <= MAXOBJBYTES - EXTRA_BYTES.   */
929         /* But we try to avoid looking up EXTRA_BYTES.                  */
930 # endif
931 # define ADD_SLOP(lb) /* lb should have no side-effect */ \
932                 SIZET_SAT_ADD(lb, EXTRA_BYTES)
933 
934 /*
935  * Hash table representation of sets of pages.
936  * Implements a map from aligned HBLKSIZE chunks of the address space to one
937  * bit each.
938  * This assumes it is OK to spuriously set bits, e.g. because multiple
939  * addresses are represented by a single location.
940  * Used by black-listing code, and perhaps by dirty bit maintenance code.
941  */
942 
943 # ifdef LARGE_CONFIG
944 #   if CPP_WORDSZ == 32
945 #     define LOG_PHT_ENTRIES 20 /* Collisions likely at 1M blocks,      */
946                                 /* which is >= 4GB.  Each table takes   */
947                                 /* 128KB, some of which may never be    */
948                                 /* touched.                             */
949 #   else
950 #     define LOG_PHT_ENTRIES 21 /* Collisions likely at 2M blocks,      */
951                                 /* which is >= 8GB.  Each table takes   */
952                                 /* 256KB, some of which may never be    */
953                                 /* touched.                             */
954 #   endif
955 # elif !defined(SMALL_CONFIG)
956 #   define LOG_PHT_ENTRIES  18   /* Collisions are likely if heap grows */
957                                  /* to more than 256K hblks >= 1GB.     */
958                                  /* Each hash table occupies 32K bytes. */
959                                  /* Even for somewhat smaller heaps,    */
960                                  /* say half that, collisions may be an */
961                                  /* issue because we blacklist          */
962                                  /* addresses outside the heap.         */
963 # else
964 #   define LOG_PHT_ENTRIES  15   /* Collisions are likely if heap grows */
965                                  /* to more than 32K hblks = 128MB.     */
966                                  /* Each hash table occupies 4K bytes.  */
967 # endif
968 # define PHT_ENTRIES ((word)1 << LOG_PHT_ENTRIES)
969 # define PHT_SIZE (PHT_ENTRIES >> LOGWL)
970 typedef word page_hash_table[PHT_SIZE];
971 
972 # define PHT_HASH(addr) ((((word)(addr)) >> LOG_HBLKSIZE) & (PHT_ENTRIES - 1))
973 
974 # define get_pht_entry_from_index(bl, index) \
975                 (((bl)[divWORDSZ(index)] >> modWORDSZ(index)) & 1)
976 # define set_pht_entry_from_index(bl, index) \
977                 (void)((bl)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index))
978 
979 #if defined(THREADS) && defined(AO_HAVE_or)
980   /* And, one more version for GC_add_to_black_list_normal/stack        */
981   /* (invoked indirectly by GC_do_local_mark) and                       */
982   /* async_set_pht_entry_from_index (invoked by GC_dirty or the write   */
983   /* fault handler).                                                    */
984 # define set_pht_entry_from_index_concurrent(bl, index) \
985                 AO_or((volatile AO_t *)&(bl)[divWORDSZ(index)], \
986                       (AO_t)((word)1 << modWORDSZ(index)))
987 #else
988 # define set_pht_entry_from_index_concurrent(bl, index) \
989                 set_pht_entry_from_index(bl, index)
990 #endif
991 
992 
993 /********************************************/
994 /*                                          */
995 /*    H e a p   B l o c k s                 */
996 /*                                          */
997 /********************************************/
998 
999 /*  heap block header */
1000 #define HBLKMASK   (HBLKSIZE-1)
1001 
1002 #define MARK_BITS_PER_HBLK (HBLKSIZE/GRANULE_BYTES)
1003            /* upper bound                                    */
1004            /* We allocate 1 bit per allocation granule.      */
1005            /* If MARK_BIT_PER_GRANULE is defined, we use     */
1006            /* every nth bit, where n is the number of        */
1007            /* allocation granules per object.  If            */
1008            /* MARK_BIT_PER_OBJ is defined, we only use the   */
1009            /* initial group of mark bits, and it is safe     */
1010            /* to allocate smaller header for large objects.  */
1011 
1012 union word_ptr_ao_u {
1013   word w;
1014   signed_word sw;
1015   void *vp;
1016 # ifdef PARALLEL_MARK
1017     volatile AO_t ao;
1018 # endif
1019 };
1020 
1021 /* We maintain layout maps for heap blocks containing objects of a given */
1022 /* size.  Each entry in this map describes a byte offset and has the     */
1023 /* following type.                                                       */
1024 struct hblkhdr {
1025     struct hblk * hb_next;      /* Link field for hblk free list         */
1026                                 /* and for lists of chunks waiting to be */
1027                                 /* reclaimed.                            */
1028     struct hblk * hb_prev;      /* Backwards link for free list.        */
1029     struct hblk * hb_block;     /* The corresponding block.             */
1030     unsigned char hb_obj_kind;
1031                          /* Kind of objects in the block.  Each kind    */
1032                          /* identifies a mark procedure and a set of    */
1033                          /* list headers.  Sometimes called regions.    */
1034     unsigned char hb_flags;
1035 #       define IGNORE_OFF_PAGE  1       /* Ignore pointers that do not  */
1036                                         /* point to the first page of   */
1037                                         /* this object.                 */
1038 #       define WAS_UNMAPPED 2   /* This is a free block, which has      */
1039                                 /* been unmapped from the address       */
1040                                 /* space.                               */
1041                                 /* GC_remap must be invoked on it       */
1042                                 /* before it can be reallocated.        */
1043                                 /* Only set with USE_MUNMAP.            */
1044 #       define FREE_BLK 4       /* Block is free, i.e. not in use.      */
1045 #       ifdef ENABLE_DISCLAIM
1046 #         define HAS_DISCLAIM 8
1047                                 /* This kind has a callback on reclaim. */
1048 #         define MARK_UNCONDITIONALLY 0x10
1049                                 /* Mark from all objects, marked or     */
1050                                 /* not.  Used to mark objects needed by */
1051                                 /* reclaim notifier.                    */
1052 #       endif
1053 #       ifdef MARK_BIT_PER_GRANULE
1054 #         define LARGE_BLOCK 0x20
1055 #       endif
1056     unsigned short hb_last_reclaimed;
1057                                 /* Value of GC_gc_no when block was     */
1058                                 /* last allocated or swept. May wrap.   */
1059                                 /* For a free block, this is maintained */
1060                                 /* only for USE_MUNMAP, and indicates   */
1061                                 /* when the header was allocated, or    */
1062                                 /* when the size of the block last      */
1063                                 /* changed.                             */
1064 #   ifdef MARK_BIT_PER_OBJ
1065       unsigned32 hb_inv_sz;     /* A good upper bound for 2**32/hb_sz.  */
1066                                 /* For large objects, we use            */
1067                                 /* LARGE_INV_SZ.                        */
1068 #     define LARGE_INV_SZ (1 << 16)
1069 #   endif
1070     word hb_sz; /* If in use, size in bytes, of objects in the block.   */
1071                 /* if free, the size in bytes of the whole block.       */
1072                 /* We assume that this is convertible to signed_word    */
1073                 /* without generating a negative result.  We avoid      */
1074                 /* generating free blocks larger than that.             */
1075     word hb_descr;              /* object descriptor for marking.  See  */
1076                                 /* gc_mark.h.                           */
1077 #   ifdef MARK_BIT_PER_GRANULE
1078       unsigned short * hb_map;  /* Essentially a table of remainders    */
1079                                 /* mod BYTES_TO_GRANULES(hb_sz), except */
1080                                 /* for large blocks.  See GC_obj_map.   */
1081 #   endif
1082 #   ifdef PARALLEL_MARK
1083       volatile AO_t hb_n_marks; /* Number of set mark bits, excluding   */
1084                                 /* the one always set at the end.       */
1085                                 /* Currently it is concurrently         */
1086                                 /* updated and hence only approximate.  */
1087                                 /* But a zero value does guarantee that */
1088                                 /* the block contains no marked         */
1089                                 /* objects.                             */
1090                                 /* Ensuring this property means that we */
1091                                 /* never decrement it to zero during a  */
1092                                 /* collection, and hence the count may  */
1093                                 /* be one too high.  Due to concurrent  */
1094                                 /* updates, an arbitrary number of      */
1095                                 /* increments, but not all of them (!)  */
1096                                 /* may be lost, hence it may in theory  */
1097                                 /* be much too low.                     */
1098                                 /* The count may also be too high if    */
1099                                 /* multiple mark threads mark the       */
1100                                 /* same object due to a race.           */
1101 #   else
1102       size_t hb_n_marks;        /* Without parallel marking, the count  */
1103                                 /* is accurate.                         */
1104 #   endif
1105 #   ifdef USE_MARK_BYTES
1106 #     define MARK_BITS_SZ (MARK_BITS_PER_HBLK + 1)
1107         /* Unlike the other case, this is in units of bytes.            */
1108         /* Since we force double-word alignment, we need at most one    */
1109         /* mark bit per 2 words.  But we do allocate and set one        */
1110         /* extra mark bit to avoid an explicit check for the            */
1111         /* partial object at the end of each block.                     */
1112       union {
1113         char _hb_marks[MARK_BITS_SZ];
1114                             /* The i'th byte is 1 if the object         */
1115                             /* starting at granule i or object i is     */
1116                             /* marked, 0 o.w.                           */
1117                             /* The mark bit for the "one past the       */
1118                             /* end" object is always set to avoid a     */
1119                             /* special case test in the marker.         */
1120         word dummy;     /* Force word alignment of mark bytes. */
1121       } _mark_byte_union;
1122 #     define hb_marks _mark_byte_union._hb_marks
1123 #   else
1124 #     define MARK_BITS_SZ (MARK_BITS_PER_HBLK/CPP_WORDSZ + 1)
1125       word hb_marks[MARK_BITS_SZ];
1126 #   endif /* !USE_MARK_BYTES */
1127 };
1128 
1129 # define ANY_INDEX 23   /* "Random" mark bit index for assertions */
1130 
1131 /*  heap block body */
1132 
1133 # define HBLK_WORDS (HBLKSIZE/sizeof(word))
1134 # define HBLK_GRANULES (HBLKSIZE/GRANULE_BYTES)
1135 
1136 /* The number of objects in a block dedicated to a certain size.        */
1137 /* may erroneously yield zero (instead of one) for large objects.       */
1138 # define HBLK_OBJS(sz_in_bytes) (HBLKSIZE/(sz_in_bytes))
1139 
1140 struct hblk {
1141     char hb_body[HBLKSIZE];
1142 };
1143 
1144 # define HBLK_IS_FREE(hdr) (((hdr) -> hb_flags & FREE_BLK) != 0)
1145 
1146 # define OBJ_SZ_TO_BLOCKS(lb) divHBLKSZ((lb) + HBLKSIZE-1)
1147 # define OBJ_SZ_TO_BLOCKS_CHECKED(lb) /* lb should have no side-effect */ \
1148                                 divHBLKSZ(SIZET_SAT_ADD(lb, HBLKSIZE - 1))
1149     /* Size of block (in units of HBLKSIZE) needed to hold objects of   */
1150     /* given lb (in bytes).  The checked variant prevents wrap around.  */
1151 
1152 /* Object free list link */
1153 # define obj_link(p) (*(void  **)(p))
1154 
1155 # define LOG_MAX_MARK_PROCS 6
1156 # define MAX_MARK_PROCS (1 << LOG_MAX_MARK_PROCS)
1157 
1158 /* Root sets.  Logically private to mark_rts.c.  But we don't want the  */
1159 /* tables scanned, so we put them here.                                 */
1160 /* MAX_ROOT_SETS is the maximum number of ranges that can be    */
1161 /* registered as static roots.                                  */
1162 # ifdef LARGE_CONFIG
1163 #   define MAX_ROOT_SETS 8192
1164 # elif !defined(SMALL_CONFIG)
1165 #   define MAX_ROOT_SETS 2048
1166 # else
1167 #   define MAX_ROOT_SETS 512
1168 # endif
1169 
1170 # define MAX_EXCLUSIONS (MAX_ROOT_SETS/4)
1171 /* Maximum number of segments that can be excluded from root sets.      */
1172 
1173 /*
1174  * Data structure for excluded static roots.
1175  */
1176 struct exclusion {
1177     ptr_t e_start;
1178     ptr_t e_end;
1179 };
1180 
1181 /* Data structure for list of root sets.                                */
1182 /* We keep a hash table, so that we can filter out duplicate additions. */
1183 /* Under Win32, we need to do a better job of filtering overlaps, so    */
1184 /* we resort to sequential search, and pay the price.                   */
1185 struct roots {
1186         ptr_t r_start;/* multiple of word size */
1187         ptr_t r_end;  /* multiple of word size and greater than r_start */
1188 #       if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
1189           struct roots * r_next;
1190 #       endif
1191         GC_bool r_tmp;
1192                 /* Delete before registering new dynamic libraries */
1193 };
1194 
1195 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
1196     /* Size of hash table index to roots.       */
1197 #   define LOG_RT_SIZE 6
1198 #   define RT_SIZE (1 << LOG_RT_SIZE) /* Power of 2, may be != MAX_ROOT_SETS */
1199 #endif
1200 
1201 #ifndef MAX_HEAP_SECTS
1202 # ifdef LARGE_CONFIG
1203 #   if CPP_WORDSZ > 32
1204 #     define MAX_HEAP_SECTS 81920
1205 #   else
1206 #     define MAX_HEAP_SECTS 7680
1207 #   endif
1208 # elif defined(SMALL_CONFIG) && !defined(USE_PROC_FOR_LIBRARIES)
1209 #   if defined(PARALLEL_MARK) && (defined(MSWIN32) || defined(CYGWIN32))
1210 #     define MAX_HEAP_SECTS 384
1211 #   else
1212 #     define MAX_HEAP_SECTS 128         /* Roughly 256MB (128*2048*1K)  */
1213 #   endif
1214 # elif CPP_WORDSZ > 32
1215 #   define MAX_HEAP_SECTS 1024          /* Roughly 8GB                  */
1216 # else
1217 #   define MAX_HEAP_SECTS 512           /* Roughly 4GB                  */
1218 # endif
1219 #endif /* !MAX_HEAP_SECTS */
1220 
1221 typedef struct GC_ms_entry {
1222     ptr_t mse_start;    /* First word of object, word aligned.  */
1223     union word_ptr_ao_u mse_descr;
1224                         /* Descriptor; low order two bits are tags,     */
1225                         /* as described in gc_mark.h.                   */
1226 } mse;
1227 
1228 /* Lists of all heap blocks and free lists      */
1229 /* as well as other random data structures      */
1230 /* that should not be scanned by the            */
1231 /* collector.                                   */
1232 /* These are grouped together in a struct       */
1233 /* so that they can be easily skipped by the    */
1234 /* GC_mark routine.                             */
1235 /* The ordering is weird to make GC_malloc      */
1236 /* faster by keeping the important fields       */
1237 /* sufficiently close together that a           */
1238 /* single load of a base register will do.      */
1239 /* Scalars that could easily appear to          */
1240 /* be pointers are also put here.               */
1241 /* The main fields should precede any           */
1242 /* conditionally included fields, so that       */
1243 /* gc_inline.h will work even if a different    */
1244 /* set of macros is defined when the client is  */
1245 /* compiled.                                    */
1246 
1247 struct _GC_arrays {
1248   word _heapsize;       /* Heap size in bytes (value never goes down).  */
1249   word _requested_heapsize;     /* Heap size due to explicit expansion. */
1250   ptr_t _last_heap_addr;
1251   ptr_t _prev_heap_addr;
1252   word _large_free_bytes;
1253         /* Total bytes contained in blocks on large object free */
1254         /* list.                                                */
1255   word _large_allocd_bytes;
1256         /* Total number of bytes in allocated large objects blocks.     */
1257         /* For the purposes of this counter and the next one only, a    */
1258         /* large object is one that occupies a block of at least        */
1259         /* 2*HBLKSIZE.                                                  */
1260   word _max_large_allocd_bytes;
1261         /* Maximum number of bytes that were ever allocated in          */
1262         /* large object blocks.  This is used to help decide when it    */
1263         /* is safe to split up a large block.                           */
1264   word _bytes_allocd_before_gc;
1265                 /* Number of bytes allocated before this        */
1266                 /* collection cycle.                            */
1267 # ifndef SEPARATE_GLOBALS
1268 #   define GC_bytes_allocd GC_arrays._bytes_allocd
1269     word _bytes_allocd;
1270         /* Number of bytes allocated during this collection cycle.      */
1271 # endif
1272   word _bytes_dropped;
1273         /* Number of black-listed bytes dropped during GC cycle */
1274         /* as a result of repeated scanning during allocation   */
1275         /* attempts.  These are treated largely as allocated,   */
1276         /* even though they are not useful to the client.       */
1277   word _bytes_finalized;
1278         /* Approximate number of bytes in objects (and headers) */
1279         /* that became ready for finalization in the last       */
1280         /* collection.                                          */
1281   word _bytes_freed;
1282         /* Number of explicitly deallocated bytes of memory     */
1283         /* since last collection.                               */
1284   word _finalizer_bytes_freed;
1285         /* Bytes of memory explicitly deallocated while         */
1286         /* finalizers were running.  Used to approximate memory */
1287         /* explicitly deallocated by finalizers.                */
1288   ptr_t _scratch_end_ptr;
1289   ptr_t _scratch_last_end_ptr;
1290         /* Used by headers.c, and can easily appear to point to */
1291         /* heap.  Also used by GC_register_dynamic_libraries(). */
1292   mse *_mark_stack;
1293         /* Limits of stack for GC_mark routine.  All ranges     */
1294         /* between GC_mark_stack (incl.) and GC_mark_stack_top  */
1295         /* (incl.) still need to be marked from.                */
1296   mse *_mark_stack_limit;
1297 # ifdef PARALLEL_MARK
1298     mse *volatile _mark_stack_top;
1299         /* Updated only with mark lock held, but read asynchronously.   */
1300         /* TODO: Use union to avoid casts to AO_t */
1301 # else
1302     mse *_mark_stack_top;
1303 # endif
1304   word _composite_in_use; /* Number of bytes in the accessible  */
1305                           /* composite objects.                 */
1306   word _atomic_in_use;    /* Number of bytes in the accessible  */
1307                           /* atomic objects.                    */
1308 # ifdef USE_MUNMAP
1309 #   define GC_unmapped_bytes GC_arrays._unmapped_bytes
1310     word _unmapped_bytes;
1311 #   ifdef COUNT_UNMAPPED_REGIONS
1312 #     define GC_num_unmapped_regions GC_arrays._num_unmapped_regions
1313       signed_word _num_unmapped_regions;
1314 #   endif
1315 # else
1316 #   define GC_unmapped_bytes 0
1317 # endif
1318   bottom_index * _all_nils;
1319 # ifdef ENABLE_TRACE
1320 #   define GC_trace_addr GC_arrays._trace_addr
1321     ptr_t _trace_addr;
1322 # endif
1323   GC_mark_proc _mark_procs[MAX_MARK_PROCS];
1324         /* Table of user-defined mark procedures.  There is     */
1325         /* a small number of these, which can be referenced     */
1326         /* by DS_PROC mark descriptors.  See gc_mark.h.         */
1327   char _modws_valid_offsets[sizeof(word)];
1328                                 /* GC_valid_offsets[i] ==>                */
1329                                 /* GC_modws_valid_offsets[i%sizeof(word)] */
1330 # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
1331 #   define GC_root_index GC_arrays._root_index
1332     struct roots * _root_index[RT_SIZE];
1333 # endif
1334 # ifdef SAVE_CALL_CHAIN
1335 #   define GC_last_stack GC_arrays._last_stack
1336     struct callinfo _last_stack[NFRAMES];
1337                 /* Stack at last garbage collection.  Useful for        */
1338                 /* debugging mysterious object disappearances.  In the  */
1339                 /* multi-threaded case, we currently only save the      */
1340                 /* calling stack.                                       */
1341 # endif
1342 # ifndef SEPARATE_GLOBALS
1343 #   define GC_objfreelist GC_arrays._objfreelist
1344     void *_objfreelist[MAXOBJGRANULES+1];
1345                           /* free list for objects */
1346 #   define GC_aobjfreelist GC_arrays._aobjfreelist
1347     void *_aobjfreelist[MAXOBJGRANULES+1];
1348                           /* free list for atomic objects       */
1349 # endif
1350   void *_uobjfreelist[MAXOBJGRANULES+1];
1351                           /* Uncollectible but traced objects.  */
1352                           /* Objects on this and _auobjfreelist */
1353                           /* are always marked, except during   */
1354                           /* garbage collections.               */
1355 # ifdef GC_ATOMIC_UNCOLLECTABLE
1356 #   define GC_auobjfreelist GC_arrays._auobjfreelist
1357     void *_auobjfreelist[MAXOBJGRANULES+1];
1358                 /* Atomic uncollectible but traced objects.     */
1359 # endif
1360   size_t _size_map[MAXOBJBYTES+1];
1361         /* Number of granules to allocate when asked for a certain      */
1362         /* number of bytes.  Should be accessed with the allocation     */
1363         /* lock held.                                                   */
1364 # ifdef MARK_BIT_PER_GRANULE
1365 #   define GC_obj_map GC_arrays._obj_map
1366     unsigned short * _obj_map[MAXOBJGRANULES + 1];
1367                        /* If not NULL, then a pointer to a map of valid */
1368                        /* object addresses.                             */
1369                        /* _obj_map[sz_in_granules][i] is                */
1370                        /* i % sz_in_granules.                           */
1371                        /* This is now used purely to replace a          */
1372                        /* division in the marker by a table lookup.     */
1373                        /* _obj_map[0] is used for large objects and     */
1374                        /* contains all nonzero entries.  This gets us   */
1375                        /* out of the marker fast path without an extra  */
1376                        /* test.                                         */
1377 #   define MAP_LEN BYTES_TO_GRANULES(HBLKSIZE)
1378 # endif
1379 # define VALID_OFFSET_SZ HBLKSIZE
1380   char _valid_offsets[VALID_OFFSET_SZ];
1381                                 /* GC_valid_offsets[i] == TRUE ==> i    */
1382                                 /* is registered as a displacement.     */
1383 # ifndef GC_DISABLE_INCREMENTAL
1384 #   define GC_grungy_pages GC_arrays._grungy_pages
1385     page_hash_table _grungy_pages; /* Pages that were dirty at last     */
1386                                    /* GC_read_dirty.                    */
1387 #   define GC_dirty_pages GC_arrays._dirty_pages
1388     volatile page_hash_table _dirty_pages;
1389                         /* Pages dirtied since last GC_read_dirty. */
1390 # endif
1391 # if (defined(CHECKSUMS) && defined(GWW_VDB)) || defined(PROC_VDB)
1392 #   define GC_written_pages GC_arrays._written_pages
1393     page_hash_table _written_pages;     /* Pages ever dirtied   */
1394 # endif
1395 # define GC_heap_sects GC_arrays._heap_sects
1396   struct HeapSect {
1397     ptr_t hs_start;
1398     size_t hs_bytes;
1399   } _heap_sects[MAX_HEAP_SECTS];        /* Heap segments potentially    */
1400                                         /* client objects.              */
1401 # if defined(USE_PROC_FOR_LIBRARIES)
1402 #   define GC_our_memory GC_arrays._our_memory
1403     struct HeapSect _our_memory[MAX_HEAP_SECTS];
1404                                         /* All GET_MEM allocated        */
1405                                         /* memory.  Includes block      */
1406                                         /* headers and the like.        */
1407 # endif
1408 # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
1409 #   define GC_heap_bases GC_arrays._heap_bases
1410     ptr_t _heap_bases[MAX_HEAP_SECTS];
1411                 /* Start address of memory regions obtained from kernel. */
1412 # endif
1413 # ifdef MSWINCE
1414 #   define GC_heap_lengths GC_arrays._heap_lengths
1415     word _heap_lengths[MAX_HEAP_SECTS];
1416                 /* Committed lengths of memory regions obtained from kernel. */
1417 # endif
1418   struct roots _static_roots[MAX_ROOT_SETS];
1419   struct exclusion _excl_table[MAX_EXCLUSIONS];
1420   /* Block header index; see gc_headers.h */
1421   bottom_index * _top_index[TOP_SZ];
1422 };
1423 
1424 GC_API_PRIV GC_FAR struct _GC_arrays GC_arrays;
1425 
1426 #define GC_all_nils GC_arrays._all_nils
1427 #define GC_atomic_in_use GC_arrays._atomic_in_use
1428 #define GC_bytes_allocd_before_gc GC_arrays._bytes_allocd_before_gc
1429 #define GC_bytes_dropped GC_arrays._bytes_dropped
1430 #define GC_bytes_finalized GC_arrays._bytes_finalized
1431 #define GC_bytes_freed GC_arrays._bytes_freed
1432 #define GC_composite_in_use GC_arrays._composite_in_use
1433 #define GC_excl_table GC_arrays._excl_table
1434 #define GC_finalizer_bytes_freed GC_arrays._finalizer_bytes_freed
1435 #define GC_heapsize GC_arrays._heapsize
1436 #define GC_large_allocd_bytes GC_arrays._large_allocd_bytes
1437 #define GC_large_free_bytes GC_arrays._large_free_bytes
1438 #define GC_last_heap_addr GC_arrays._last_heap_addr
1439 #define GC_mark_stack GC_arrays._mark_stack
1440 #define GC_mark_stack_limit GC_arrays._mark_stack_limit
1441 #define GC_mark_stack_top GC_arrays._mark_stack_top
1442 #define GC_mark_procs GC_arrays._mark_procs
1443 #define GC_max_large_allocd_bytes GC_arrays._max_large_allocd_bytes
1444 #define GC_modws_valid_offsets GC_arrays._modws_valid_offsets
1445 #define GC_prev_heap_addr GC_arrays._prev_heap_addr
1446 #define GC_requested_heapsize GC_arrays._requested_heapsize
1447 #define GC_scratch_end_ptr GC_arrays._scratch_end_ptr
1448 #define GC_scratch_last_end_ptr GC_arrays._scratch_last_end_ptr
1449 #define GC_size_map GC_arrays._size_map
1450 #define GC_static_roots GC_arrays._static_roots
1451 #define GC_top_index GC_arrays._top_index
1452 #define GC_uobjfreelist GC_arrays._uobjfreelist
1453 #define GC_valid_offsets GC_arrays._valid_offsets
1454 
1455 #define beginGC_arrays ((ptr_t)(&GC_arrays))
1456 #define endGC_arrays (((ptr_t)(&GC_arrays)) + (sizeof GC_arrays))
1457 #define USED_HEAP_SIZE (GC_heapsize - GC_large_free_bytes)
1458 
1459 /* Object kinds: */
1460 #ifndef MAXOBJKINDS
1461 # define MAXOBJKINDS 16
1462 #endif
1463 GC_EXTERN struct obj_kind {
1464    void **ok_freelist;  /* Array of free list headers for this kind of  */
1465                         /* object.  Point either to GC_arrays or to     */
1466                         /* storage allocated with GC_scratch_alloc.     */
1467    struct hblk **ok_reclaim_list;
1468                         /* List headers for lists of blocks waiting to  */
1469                         /* be swept.  Indexed by object size in         */
1470                         /* granules.                                    */
1471    word ok_descriptor;  /* Descriptor template for objects in this      */
1472                         /* block.                                       */
1473    GC_bool ok_relocate_descr;
1474                         /* Add object size in bytes to descriptor       */
1475                         /* template to obtain descriptor.  Otherwise    */
1476                         /* template is used as is.                      */
1477    GC_bool ok_init;   /* Clear objects before putting them on the free list. */
1478 #  ifdef ENABLE_DISCLAIM
1479      GC_bool ok_mark_unconditionally;
1480                         /* Mark from all, including unmarked, objects   */
1481                         /* in block.  Used to protect objects reachable */
1482                         /* from reclaim notifiers.                      */
1483      int (GC_CALLBACK *ok_disclaim_proc)(void * /*obj*/);
1484                         /* The disclaim procedure is called before obj  */
1485                         /* is reclaimed, but must also tolerate being   */
1486                         /* called with object from freelist.  Non-zero  */
1487                         /* exit prevents object from being reclaimed.   */
1488 #    define OK_DISCLAIM_INITZ /* comma */, FALSE, 0
1489 #  else
1490 #    define OK_DISCLAIM_INITZ /* empty */
1491 #  endif /* !ENABLE_DISCLAIM */
1492 } GC_obj_kinds[MAXOBJKINDS];
1493 
1494 #define beginGC_obj_kinds ((ptr_t)(&GC_obj_kinds))
1495 #define endGC_obj_kinds (beginGC_obj_kinds + (sizeof GC_obj_kinds))
1496 
1497 /* Variables that used to be in GC_arrays, but need to be accessed by   */
1498 /* inline allocation code.  If they were in GC_arrays, the inlined      */
1499 /* allocation code would include GC_arrays offsets (as it did), which   */
1500 /* introduce maintenance problems.                                      */
1501 
1502 #ifdef SEPARATE_GLOBALS
1503   extern word GC_bytes_allocd;
1504         /* Number of bytes allocated during this collection cycle.      */
1505   extern ptr_t GC_objfreelist[MAXOBJGRANULES+1];
1506                           /* free list for NORMAL objects */
1507 # define beginGC_objfreelist ((ptr_t)(&GC_objfreelist))
1508 # define endGC_objfreelist (beginGC_objfreelist + sizeof(GC_objfreelist))
1509 
1510   extern ptr_t GC_aobjfreelist[MAXOBJGRANULES+1];
1511                           /* free list for atomic (PTRFREE) objects     */
1512 # define beginGC_aobjfreelist ((ptr_t)(&GC_aobjfreelist))
1513 # define endGC_aobjfreelist (beginGC_aobjfreelist + sizeof(GC_aobjfreelist))
1514 #endif /* SEPARATE_GLOBALS */
1515 
1516 /* Predefined kinds: */
1517 #define PTRFREE 0
1518 #define NORMAL  1
1519 #define UNCOLLECTABLE 2
1520 #ifdef GC_ATOMIC_UNCOLLECTABLE
1521 # define AUNCOLLECTABLE 3
1522 # define IS_UNCOLLECTABLE(k) (((k) & ~1) == UNCOLLECTABLE)
1523 # define GC_N_KINDS_INITIAL_VALUE 4
1524 #else
1525 # define IS_UNCOLLECTABLE(k) ((k) == UNCOLLECTABLE)
1526 # define GC_N_KINDS_INITIAL_VALUE 3
1527 #endif
1528 
1529 GC_EXTERN unsigned GC_n_kinds;
1530 
1531 GC_EXTERN word GC_n_heap_sects; /* Number of separately added heap      */
1532                                 /* sections.                            */
1533 
1534 #ifdef USE_PROC_FOR_LIBRARIES
1535   GC_EXTERN word GC_n_memory;   /* Number of GET_MEM allocated memory   */
1536                                 /* sections.                            */
1537 #endif
1538 
1539 GC_EXTERN size_t GC_page_size;
1540 
1541 /* Round up allocation size to a multiple of a page size.       */
1542 /* GC_setpagesize() is assumed to be already invoked.           */
1543 #define ROUNDUP_PAGESIZE(lb) /* lb should have no side-effect */ \
1544             (SIZET_SAT_ADD(lb, GC_page_size - 1) & ~(GC_page_size - 1))
1545 
1546 /* Same as above but used to make GET_MEM() argument safe.      */
1547 #ifdef MMAP_SUPPORTED
1548 # define ROUNDUP_PAGESIZE_IF_MMAP(lb) ROUNDUP_PAGESIZE(lb)
1549 #else
1550 # define ROUNDUP_PAGESIZE_IF_MMAP(lb) (lb)
1551 #endif
1552 
1553 #if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
1554 # ifndef WIN32_LEAN_AND_MEAN
1555 #   define WIN32_LEAN_AND_MEAN 1
1556 # endif
1557 # define NOSERVICE
1558   EXTERN_C_END
1559 # include <windows.h>
1560   EXTERN_C_BEGIN
1561   GC_EXTERN SYSTEM_INFO GC_sysinfo;
1562   GC_INNER GC_bool GC_is_heap_base(void *p);
1563 #endif
1564 
1565 GC_EXTERN word GC_black_list_spacing;
1566                         /* Average number of bytes between blacklisted  */
1567                         /* blocks. Approximate.                         */
1568                         /* Counts only blocks that are                  */
1569                         /* "stack-blacklisted", i.e. that are           */
1570                         /* problematic in the interior of an object.    */
1571 
1572 #ifdef GC_GCJ_SUPPORT
1573   extern struct hblk * GC_hblkfreelist[];
1574   extern word GC_free_bytes[];  /* Both remain visible to GNU GCJ.      */
1575 #endif
1576 
1577 GC_EXTERN word GC_root_size; /* Total size of registered root sections. */
1578 
1579 GC_EXTERN GC_bool GC_debugging_started;
1580                                 /* GC_debug_malloc has been called.     */
1581 
1582 /* This is used by GC_do_blocking[_inner]().            */
1583 struct blocking_data {
1584     GC_fn_type fn;
1585     void * client_data; /* and result */
1586 };
1587 
1588 /* This is used by GC_call_with_gc_active(), GC_push_all_stack_sections(). */
1589 struct GC_traced_stack_sect_s {
1590   ptr_t saved_stack_ptr;
1591 # ifdef IA64
1592     ptr_t saved_backing_store_ptr;
1593     ptr_t backing_store_end;
1594 # endif
1595   struct GC_traced_stack_sect_s *prev;
1596 };
1597 
1598 #ifdef THREADS
1599   /* Process all "traced stack sections" - scan entire stack except for */
1600   /* frames belonging to the user functions invoked by GC_do_blocking.  */
1601   GC_INNER void GC_push_all_stack_sections(ptr_t lo, ptr_t hi,
1602                         struct GC_traced_stack_sect_s *traced_stack_sect);
1603   GC_EXTERN word GC_total_stacksize; /* updated on every push_all_stacks */
1604 #else
1605   GC_EXTERN ptr_t GC_blocked_sp;
1606   GC_EXTERN struct GC_traced_stack_sect_s *GC_traced_stack_sect;
1607                         /* Points to the "frame" data held in stack by  */
1608                         /* the innermost GC_call_with_gc_active().      */
1609                         /* NULL if no such "frame" active.              */
1610 #endif /* !THREADS */
1611 
1612 #ifdef IA64
1613   /* Similar to GC_push_all_stack_sections() but for IA-64 registers store. */
1614   GC_INNER void GC_push_all_register_sections(ptr_t bs_lo, ptr_t bs_hi,
1615                   int eager, struct GC_traced_stack_sect_s *traced_stack_sect);
1616 #endif
1617 
1618 /*  Marks are in a reserved area in                          */
1619 /*  each heap block.  Each word has one mark bit associated  */
1620 /*  with it. Only those corresponding to the beginning of an */
1621 /*  object are used.                                         */
1622 
1623 /* Mark bit operations */
1624 
1625 /*
1626  * Retrieve, set, clear the nth mark bit in a given heap block.
1627  *
1628  * (Recall that bit n corresponds to nth object or allocation granule
1629  * relative to the beginning of the block, including unused words)
1630  */
1631 
1632 #ifdef USE_MARK_BYTES
1633 # define mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n])
1634 # define set_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n] = 1)
1635 # define clear_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n] = 0)
1636 #else
1637 /* Set mark bit correctly, even if mark bits may be concurrently        */
1638 /* accessed.                                                            */
1639 # if defined(PARALLEL_MARK) || (defined(THREAD_SANITIZER) && defined(THREADS))
1640     /* Workaround TSan false positive: there is no race between         */
1641     /* mark_bit_from_hdr and set_mark_bit_from_hdr when n is different  */
1642     /* (alternatively, USE_MARK_BYTES could be used).  If TSan is off,  */
1643     /* AO_or() is used only if we set USE_MARK_BITS explicitly.         */
1644 #   define OR_WORD(addr, bits) AO_or((volatile AO_t *)(addr), (AO_t)(bits))
1645 # else
1646 #   define OR_WORD(addr, bits) (void)(*(addr) |= (bits))
1647 # endif
1648 # define mark_bit_from_hdr(hhdr,n) \
1649               (((hhdr)->hb_marks[divWORDSZ(n)] >> modWORDSZ(n)) & (word)1)
1650 # define set_mark_bit_from_hdr(hhdr,n) \
1651               OR_WORD((hhdr)->hb_marks+divWORDSZ(n), (word)1 << modWORDSZ(n))
1652 # define clear_mark_bit_from_hdr(hhdr,n) \
1653               ((hhdr)->hb_marks[divWORDSZ(n)] &= ~((word)1 << modWORDSZ(n)))
1654 #endif /* !USE_MARK_BYTES */
1655 
1656 #ifdef MARK_BIT_PER_OBJ
1657 #  define MARK_BIT_NO(offset, sz) (((word)(offset))/(sz))
1658         /* Get the mark bit index corresponding to the given byte       */
1659         /* offset and size (in bytes).                                  */
1660 #  define MARK_BIT_OFFSET(sz) 1
1661         /* Spacing between useful mark bits.                            */
1662 #  define IF_PER_OBJ(x) x
1663 #  define FINAL_MARK_BIT(sz) ((sz) > MAXOBJBYTES? 1 : HBLK_OBJS(sz))
1664         /* Position of final, always set, mark bit.                     */
1665 #else /* MARK_BIT_PER_GRANULE */
1666 #  define MARK_BIT_NO(offset, sz) BYTES_TO_GRANULES((word)(offset))
1667 #  define MARK_BIT_OFFSET(sz) BYTES_TO_GRANULES(sz)
1668 #  define IF_PER_OBJ(x)
1669 #  define FINAL_MARK_BIT(sz) \
1670                 ((sz) > MAXOBJBYTES ? MARK_BITS_PER_HBLK \
1671                                 : BYTES_TO_GRANULES((sz) * HBLK_OBJS(sz)))
1672 #endif
1673 
1674 /* Important internal collector routines */
1675 
1676 GC_INNER ptr_t GC_approx_sp(void);
1677 
1678 GC_INNER GC_bool GC_should_collect(void);
1679 
1680 void GC_apply_to_all_blocks(void (*fn)(struct hblk *h, word client_data),
1681                             word client_data);
1682                         /* Invoke fn(hbp, client_data) for each         */
1683                         /* allocated heap block.                        */
1684 GC_INNER struct hblk * GC_next_block(struct hblk *h, GC_bool allow_free);
1685                         /* Get the next block whose address is at least */
1686                         /* h.  Returned block is managed by GC.  The    */
1687                         /* block must be in use unless allow_free is    */
1688                         /* true.  Return 0 if there is no such block.   */
1689 GC_INNER struct hblk * GC_prev_block(struct hblk * h);
1690                         /* Get the last (highest address) block whose   */
1691                         /* address is at most h.  Returned block is     */
1692                         /* managed by GC, but may or may not be in use. */
1693                         /* Return 0 if there is no such block.          */
1694 GC_INNER void GC_mark_init(void);
1695 GC_INNER void GC_clear_marks(void);
1696                         /* Clear mark bits for all heap objects.        */
1697 GC_INNER void GC_invalidate_mark_state(void);
1698                                 /* Tell the marker that marked          */
1699                                 /* objects may point to unmarked        */
1700                                 /* ones, and roots may point to         */
1701                                 /* unmarked objects.  Reset mark stack. */
1702 GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame);
1703                         /* Perform about one pages worth of marking     */
1704                         /* work of whatever kind is needed.  Returns    */
1705                         /* quickly if no collection is in progress.     */
1706                         /* Return TRUE if mark phase finished.          */
1707 GC_INNER void GC_initiate_gc(void);
1708                                 /* initiate collection.                 */
1709                                 /* If the mark state is invalid, this   */
1710                                 /* becomes full collection.  Otherwise  */
1711                                 /* it's partial.                        */
1712 
1713 GC_INNER GC_bool GC_collection_in_progress(void);
1714                         /* Collection is in progress, or was abandoned. */
1715 
1716 #define GC_PUSH_ALL_SYM(sym) \
1717                 GC_push_all((/* no volatile */ void *)&(sym), \
1718                             (/* no volatile */ void *)(&(sym) + 1))
1719 
1720 GC_INNER void GC_push_all_stack(ptr_t b, ptr_t t);
1721                                     /* As GC_push_all but consider      */
1722                                     /* interior pointers as valid.      */
1723 
1724 #if defined(WRAP_MARK_SOME) && defined(PARALLEL_MARK)
1725   /* GC_mark_local does not handle memory protection faults yet.  So,   */
1726   /* the static data regions are scanned immediately by GC_push_roots.  */
1727   GC_INNER void GC_push_conditional_eager(void *bottom, void *top,
1728                                           GC_bool all);
1729 #endif
1730 
1731   /* In the threads case, we push part of the current thread stack      */
1732   /* with GC_push_all_eager when we push the registers.  This gets the  */
1733   /* callee-save registers that may disappear.  The remainder of the    */
1734   /* stacks are scheduled for scanning in *GC_push_other_roots, which   */
1735   /* is thread-package-specific.                                        */
1736 
1737 GC_INNER void GC_push_roots(GC_bool all, ptr_t cold_gc_frame);
1738                                         /* Push all or dirty roots.     */
1739 
1740 GC_API_PRIV GC_push_other_roots_proc GC_push_other_roots;
1741                         /* Push system or application specific roots    */
1742                         /* onto the mark stack.  In some environments   */
1743                         /* (e.g. threads environments) this is          */
1744                         /* predefined to be non-zero.  A client         */
1745                         /* supplied replacement should also call the    */
1746                         /* original function.  Remains externally       */
1747                         /* visible as used by some well-known 3rd-party */
1748                         /* software (e.g., ECL) currently.              */
1749 
1750 #ifdef THREADS
1751   void GC_push_thread_structures(void);
1752 #endif
1753 GC_EXTERN void (*GC_push_typed_structures)(void);
1754                         /* A pointer such that we can avoid linking in  */
1755                         /* the typed allocation support if unused.      */
1756 
1757 GC_INNER void GC_with_callee_saves_pushed(void (*fn)(ptr_t, void *),
1758                                           volatile ptr_t arg);
1759 
1760 #if defined(SPARC) || defined(IA64)
1761   /* Cause all stacked registers to be saved in memory.  Return a       */
1762   /* pointer to the top of the corresponding memory stack.              */
1763   ptr_t GC_save_regs_in_stack(void);
1764 #endif
1765                         /* Push register contents onto mark stack.      */
1766 
1767 #if defined(MSWIN32) || defined(MSWINCE)
1768   void __cdecl GC_push_one(word p);
1769 #else
1770   void GC_push_one(word p);
1771                               /* If p points to an object, mark it    */
1772                               /* and push contents on the mark stack  */
1773                               /* Pointer recognition test always      */
1774                               /* accepts interior pointers, i.e. this */
1775                               /* is appropriate for pointers found on */
1776                               /* stack.                               */
1777 #endif
1778 
1779 #if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
1780   GC_INNER void GC_mark_and_push_stack(ptr_t p, ptr_t source);
1781                                 /* Ditto, omits plausibility test       */
1782 #else
1783   GC_INNER void GC_mark_and_push_stack(ptr_t p);
1784 #endif
1785 
1786 GC_INNER void GC_clear_hdr_marks(hdr * hhdr);
1787                                     /* Clear the mark bits in a header */
1788 GC_INNER void GC_set_hdr_marks(hdr * hhdr);
1789                                     /* Set the mark bits in a header */
1790 GC_INNER void GC_set_fl_marks(ptr_t p);
1791                                     /* Set all mark bits associated with */
1792                                     /* a free list.                      */
1793 #if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
1794   void GC_check_fl_marks(void **);
1795                                     /* Check that all mark bits         */
1796                                     /* associated with a free list are  */
1797                                     /* set.  Abort if not.              */
1798 #endif
1799 void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp);
1800 #ifdef USE_PROC_FOR_LIBRARIES
1801   GC_INNER void GC_remove_roots_subregion(ptr_t b, ptr_t e);
1802 #endif
1803 GC_INNER void GC_exclude_static_roots_inner(void *start, void *finish);
1804 #if defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \
1805     || defined(CYGWIN32) || defined(PCR)
1806   GC_INNER void GC_register_dynamic_libraries(void);
1807                 /* Add dynamic library data sections to the root set. */
1808 #endif
1809 GC_INNER void GC_cond_register_dynamic_libraries(void);
1810                 /* Remove and reregister dynamic libraries if we're     */
1811                 /* configured to do that at each GC.                    */
1812 
1813 /* Machine dependent startup routines */
1814 ptr_t GC_get_main_stack_base(void);     /* Cold end of stack.           */
1815 #ifdef IA64
1816   GC_INNER ptr_t GC_get_register_stack_base(void);
1817                                         /* Cold end of register stack.  */
1818 #endif
1819 void GC_register_data_segments(void);
1820 
1821 #ifdef THREADS
1822   GC_INNER void GC_thr_init(void);
1823   GC_INNER void GC_init_parallel(void);
1824 #else
1825   GC_INNER GC_bool GC_is_static_root(void *p);
1826                 /* Is the address p in one of the registered static     */
1827                 /* root sections?                                       */
1828 # ifdef TRACE_BUF
1829     void GC_add_trace_entry(char *kind, word arg1, word arg2);
1830 # endif
1831 #endif /* !THREADS */
1832 
1833 /* Black listing: */
1834 #ifdef PRINT_BLACK_LIST
1835   GC_INNER void GC_add_to_black_list_normal(word p, ptr_t source);
1836                         /* Register bits as a possible future false     */
1837                         /* reference from the heap or static data       */
1838 # define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
1839                 if (GC_all_interior_pointers) { \
1840                   GC_add_to_black_list_stack((word)(bits), (source)); \
1841                 } else \
1842                   GC_add_to_black_list_normal((word)(bits), (source))
1843   GC_INNER void GC_add_to_black_list_stack(word p, ptr_t source);
1844 # define GC_ADD_TO_BLACK_LIST_STACK(bits, source) \
1845             GC_add_to_black_list_stack((word)(bits), (source))
1846 #else
1847   GC_INNER void GC_add_to_black_list_normal(word p);
1848 # define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
1849                 if (GC_all_interior_pointers) { \
1850                   GC_add_to_black_list_stack((word)(bits)); \
1851                 } else \
1852                   GC_add_to_black_list_normal((word)(bits))
1853   GC_INNER void GC_add_to_black_list_stack(word p);
1854 # define GC_ADD_TO_BLACK_LIST_STACK(bits, source) \
1855             GC_add_to_black_list_stack((word)(bits))
1856 #endif /* PRINT_BLACK_LIST */
1857 
1858 struct hblk * GC_is_black_listed(struct hblk * h, word len);
1859                         /* If there are likely to be false references   */
1860                         /* to a block starting at h of the indicated    */
1861                         /* length, then return the next plausible       */
1862                         /* starting location for h that might avoid     */
1863                         /* these false references.  Remains externally  */
1864                         /* visible as used by GNU GCJ currently.        */
1865 
1866 GC_INNER void GC_promote_black_lists(void);
1867                         /* Declare an end to a black listing phase.     */
1868 GC_INNER void GC_unpromote_black_lists(void);
1869                         /* Approximately undo the effect of the above.  */
1870                         /* This actually loses some information, but    */
1871                         /* only in a reasonably safe way.               */
1872 
1873 GC_INNER ptr_t GC_scratch_alloc(size_t bytes);
1874                                 /* GC internal memory allocation for    */
1875                                 /* small objects.  Deallocation is not  */
1876                                 /* possible.  May return NULL.          */
1877 
1878 #ifdef GWW_VDB
1879   /* GC_scratch_recycle_no_gww() not used.      */
1880 #else
1881 # define GC_scratch_recycle_no_gww GC_scratch_recycle_inner
1882 #endif
1883 GC_INNER void GC_scratch_recycle_inner(void *ptr, size_t bytes);
1884                                 /* Reuse the memory region by the heap. */
1885 
1886 /* Heap block layout maps: */
1887 #ifdef MARK_BIT_PER_GRANULE
1888   GC_INNER GC_bool GC_add_map_entry(size_t sz);
1889                                 /* Add a heap block map for objects of  */
1890                                 /* size sz to obj_map.                  */
1891                                 /* Return FALSE on failure.             */
1892 #endif
1893 
1894 GC_INNER void GC_register_displacement_inner(size_t offset);
1895                                 /* Version of GC_register_displacement  */
1896                                 /* that assumes lock is already held.   */
1897 
1898 /*  hblk allocation: */
1899 GC_INNER void GC_new_hblk(size_t size_in_granules, int kind);
1900                                 /* Allocate a new heap block, and build */
1901                                 /* a free list in it.                   */
1902 
1903 GC_INNER ptr_t GC_build_fl(struct hblk *h, size_t words, GC_bool clear,
1904                            ptr_t list);
1905                                 /* Build a free list for objects of     */
1906                                 /* size sz in block h.  Append list to  */
1907                                 /* end of the free lists.  Possibly     */
1908                                 /* clear objects on the list.  Normally */
1909                                 /* called by GC_new_hblk, but also      */
1910                                 /* called explicitly without GC lock.   */
1911 
1912 GC_INNER struct hblk * GC_allochblk(size_t size_in_bytes, int kind,
1913                                     unsigned flags);
1914                                 /* Allocate a heap block, inform        */
1915                                 /* the marker that block is valid       */
1916                                 /* for objects of indicated size.       */
1917 
1918 GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags);
1919                         /* Allocate a large block of size lb bytes.     */
1920                         /* The block is not cleared.                    */
1921                         /* Flags is 0 or IGNORE_OFF_PAGE.               */
1922                         /* Calls GC_allchblk to do the actual           */
1923                         /* allocation, but also triggers GC and/or      */
1924                         /* heap expansion as appropriate.               */
1925                         /* Does not update GC_bytes_allocd, but does    */
1926                         /* other accounting.                            */
1927 
1928 GC_INNER void GC_freehblk(struct hblk * p);
1929                                 /* Deallocate a heap block and mark it  */
1930                                 /* as invalid.                          */
1931 
1932 /*  Misc GC: */
1933 GC_INNER GC_bool GC_expand_hp_inner(word n);
1934 GC_INNER void GC_start_reclaim(GC_bool abort_if_found);
1935                                 /* Restore unmarked objects to free     */
1936                                 /* lists, or (if abort_if_found is      */
1937                                 /* TRUE) report them.                   */
1938                                 /* Sweeping of small object pages is    */
1939                                 /* largely deferred.                    */
1940 GC_INNER void GC_continue_reclaim(word sz, int kind);
1941                                 /* Sweep pages of the given size and    */
1942                                 /* kind, as long as possible, and       */
1943                                 /* as long as the corr. free list is    */
1944                                 /* empty.  Sz is in granules.           */
1945 
1946 GC_INNER GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old);
1947                                 /* Reclaim all blocks.  Abort (in a     */
1948                                 /* consistent state) if f returns TRUE. */
1949 GC_INNER ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
1950                                   GC_bool init, ptr_t list,
1951                                   signed_word *count);
1952                                 /* Rebuild free list in hbp with        */
1953                                 /* header hhdr, with objects of size sz */
1954                                 /* bytes.  Add list to the end of the   */
1955                                 /* free list.  Add the number of        */
1956                                 /* reclaimed bytes to *count.           */
1957 GC_INNER GC_bool GC_block_empty(hdr * hhdr);
1958                                 /* Block completely unmarked?   */
1959 GC_INNER int GC_CALLBACK GC_never_stop_func(void);
1960                                 /* Always returns 0 (FALSE).            */
1961 GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func f);
1962 
1963                                 /* Collect; caller must have acquired   */
1964                                 /* lock.  Collection is aborted if f    */
1965                                 /* returns TRUE.  Returns TRUE if it    */
1966                                 /* completes successfully.              */
1967 #define GC_gcollect_inner() \
1968                 (void)GC_try_to_collect_inner(GC_never_stop_func)
1969 
1970 #ifdef THREADS
1971   GC_EXTERN GC_bool GC_in_thread_creation;
1972         /* We may currently be in thread creation or destruction.       */
1973         /* Only set to TRUE while allocation lock is held.              */
1974         /* When set, it is OK to run GC from unknown thread.            */
1975 #endif
1976 
1977 GC_EXTERN GC_bool GC_is_initialized; /* GC_init() has been run. */
1978 
1979 GC_INNER void GC_collect_a_little_inner(int n);
1980                                 /* Do n units worth of garbage          */
1981                                 /* collection work, if appropriate.     */
1982                                 /* A unit is an amount appropriate for  */
1983                                 /* HBLKSIZE bytes of allocation.        */
1984 
1985 GC_INNER void * GC_generic_malloc_inner(size_t lb, int k);
1986                                 /* Allocate an object of the given      */
1987                                 /* kind but assuming lock already held. */
1988 #if defined(DBG_HDRS_ALL) || defined(GC_GCJ_SUPPORT) \
1989     || !defined(GC_NO_FINALIZATION)
1990   GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k);
1991                                 /* Allocate an object, where            */
1992                                 /* the client guarantees that there     */
1993                                 /* will always be a pointer to the      */
1994                                 /* beginning of the object while the    */
1995                                 /* object is live.                      */
1996 #endif
1997 
1998 GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
1999                                       GC_bool ignore_off_page, GC_bool retry);
2000 
2001 GC_INNER ptr_t GC_allocobj(size_t sz, int kind);
2002                                 /* Make the indicated                   */
2003                                 /* free list nonempty, and return its   */
2004                                 /* head.  Sz is in granules.            */
2005 
2006 #ifdef GC_ADD_CALLER
2007   /* GC_DBG_EXTRAS is used by GC debug API functions (unlike GC_EXTRAS  */
2008   /* used by GC debug API macros) thus GC_RETURN_ADDR_PARENT (pointing  */
2009   /* to client caller) should be used if possible.                      */
2010 # ifdef GC_HAVE_RETURN_ADDR_PARENT
2011 #  define GC_DBG_EXTRAS GC_RETURN_ADDR_PARENT, NULL, 0
2012 # else
2013 #  define GC_DBG_EXTRAS GC_RETURN_ADDR, NULL, 0
2014 # endif
2015 #else
2016 # define GC_DBG_EXTRAS "unknown", 0
2017 #endif
2018 
2019 #ifdef GC_COLLECT_AT_MALLOC
2020   extern size_t GC_dbg_collect_at_malloc_min_lb;
2021                             /* variable visible outside for debugging   */
2022 # define GC_DBG_COLLECT_AT_MALLOC(lb) \
2023                 (void)((lb) >= GC_dbg_collect_at_malloc_min_lb ? \
2024                             (GC_gcollect(), 0) : 0)
2025 #else
2026 # define GC_DBG_COLLECT_AT_MALLOC(lb) (void)0
2027 #endif /* !GC_COLLECT_AT_MALLOC */
2028 
2029 /* Allocation routines that bypass the thread local cache.      */
2030 #if defined(THREAD_LOCAL_ALLOC) && defined(GC_GCJ_SUPPORT)
2031     GC_INNER void * GC_core_gcj_malloc(size_t, void *);
2032 #endif
2033 
2034 GC_INNER void GC_init_headers(void);
2035 GC_INNER struct hblkhdr * GC_install_header(struct hblk *h);
2036                                 /* Install a header for block h.        */
2037                                 /* Return 0 on failure, or the header   */
2038                                 /* otherwise.                           */
2039 GC_INNER GC_bool GC_install_counts(struct hblk * h, size_t sz);
2040                                 /* Set up forwarding counts for block   */
2041                                 /* h of size sz.                        */
2042                                 /* Return FALSE on failure.             */
2043 GC_INNER void GC_remove_header(struct hblk * h);
2044                                 /* Remove the header for block h.       */
2045 GC_INNER void GC_remove_counts(struct hblk * h, size_t sz);
2046                                 /* Remove forwarding counts for h.      */
2047 GC_INNER hdr * GC_find_header(ptr_t h);
2048 
2049 GC_INNER void GC_add_to_heap(struct hblk *p, size_t bytes);
2050                         /* Add a HBLKSIZE aligned chunk to the heap.    */
2051 
2052 #ifdef USE_PROC_FOR_LIBRARIES
2053   GC_INNER void GC_add_to_our_memory(ptr_t p, size_t bytes);
2054                         /* Add a chunk to GC_our_memory.        */
2055                         /* If p == 0, do nothing.               */
2056 #else
2057 # define GC_add_to_our_memory(p, bytes)
2058 #endif
2059 
2060 GC_INNER void GC_print_all_errors(void);
2061                         /* Print smashed and leaked objects, if any.    */
2062                         /* Clear the lists of such objects.             */
2063 
2064 GC_EXTERN void (*GC_check_heap)(void);
2065                         /* Check that all objects in the heap with      */
2066                         /* debugging info are intact.                   */
2067                         /* Add any that are not to GC_smashed list.     */
2068 GC_EXTERN void (*GC_print_all_smashed)(void);
2069                         /* Print GC_smashed if it's not empty.          */
2070                         /* Clear GC_smashed list.                       */
2071 GC_EXTERN void (*GC_print_heap_obj)(ptr_t p);
2072                         /* If possible print (using GC_err_printf)      */
2073                         /* a more detailed description (terminated with */
2074                         /* "\n") of the object referred to by p.        */
2075 
2076 #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
2077   void GC_print_address_map(void);
2078                         /* Print an address map of the process.         */
2079 #endif
2080 
2081 #ifndef SHORT_DBG_HDRS
2082   GC_EXTERN GC_bool GC_findleak_delay_free;
2083                         /* Do not immediately deallocate object on      */
2084                         /* free() in the leak-finding mode, just mark   */
2085                         /* it as freed (and deallocate it after GC).    */
2086   GC_INNER GC_bool GC_check_leaked(ptr_t base); /* from dbg_mlc.c */
2087 #endif
2088 
2089 GC_EXTERN GC_bool GC_have_errors; /* We saw a smashed or leaked object. */
2090                                   /* Call error printing routine        */
2091                                   /* occasionally.  It is OK to read it */
2092                                   /* without acquiring the lock.        */
2093 
2094 #define VERBOSE 2
2095 #if !defined(NO_CLOCK) || !defined(SMALL_CONFIG)
2096   /* GC_print_stats should be visible to extra/MacOS.c. */
2097   extern int GC_print_stats;    /* Nonzero generates basic GC log.      */
2098                                 /* VERBOSE generates add'l messages.    */
2099 #else /* SMALL_CONFIG */
2100 # define GC_print_stats 0
2101   /* Will this remove the message character strings from the executable? */
2102   /* With a particular level of optimizations, it should...              */
2103 #endif
2104 
2105 #ifdef KEEP_BACK_PTRS
2106   GC_EXTERN long GC_backtraces;
2107   GC_INNER void GC_generate_random_backtrace_no_gc(void);
2108 #endif
2109 
2110 #ifdef LINT2
2111 # define GC_RAND_MAX (~0U >> 1)
2112   GC_API_PRIV long GC_random(void);
2113 #endif
2114 
2115 GC_EXTERN GC_bool GC_print_back_height;
2116 
2117 #ifdef MAKE_BACK_GRAPH
2118   void GC_print_back_graph_stats(void);
2119 #endif
2120 
2121 #ifdef THREADS
2122   GC_INNER void GC_free_inner(void * p);
2123 #endif
2124 
2125 /* Macros used for collector internal allocation.       */
2126 /* These assume the collector lock is held.             */
2127 #ifdef DBG_HDRS_ALL
2128   GC_INNER void * GC_debug_generic_malloc_inner(size_t lb, int k);
2129   GC_INNER void * GC_debug_generic_malloc_inner_ignore_off_page(size_t lb,
2130                                                                 int k);
2131 # define GC_INTERNAL_MALLOC GC_debug_generic_malloc_inner
2132 # define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE \
2133                GC_debug_generic_malloc_inner_ignore_off_page
2134 # ifdef THREADS
2135     GC_INNER void GC_debug_free_inner(void * p);
2136 #   define GC_INTERNAL_FREE GC_debug_free_inner
2137 # else
2138 #   define GC_INTERNAL_FREE GC_debug_free
2139 # endif
2140 #else
2141 # define GC_INTERNAL_MALLOC GC_generic_malloc_inner
2142 # define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE \
2143                GC_generic_malloc_inner_ignore_off_page
2144 # ifdef THREADS
2145 #   define GC_INTERNAL_FREE GC_free_inner
2146 # else
2147 #   define GC_INTERNAL_FREE GC_free
2148 # endif
2149 #endif /* !DBG_HDRS_ALL */
2150 
2151 #ifdef USE_MUNMAP
2152   /* Memory unmapping: */
2153   GC_INNER void GC_unmap_old(void);
2154   GC_INNER void GC_merge_unmapped(void);
2155   GC_INNER void GC_unmap(ptr_t start, size_t bytes);
2156   GC_INNER void GC_remap(ptr_t start, size_t bytes);
2157   GC_INNER void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2,
2158                              size_t bytes2);
2159 
2160   /* Compute end address for an unmap operation on the indicated block. */
GC_unmap_end(ptr_t start,size_t bytes)2161   GC_INLINE ptr_t GC_unmap_end(ptr_t start, size_t bytes)
2162   {
2163      return (ptr_t)((word)(start + bytes) & ~(GC_page_size - 1));
2164   }
2165 #endif /* USE_MUNMAP */
2166 
2167 #ifdef CAN_HANDLE_FORK
2168   GC_EXTERN int GC_handle_fork;
2169                 /* Fork-handling mode:                                  */
2170                 /* 0 means no fork handling requested (but client could */
2171                 /* anyway call fork() provided it is surrounded with    */
2172                 /* GC_atfork_prepare/parent/child calls);               */
2173                 /* -1 means GC tries to use pthread_at_fork if it is    */
2174                 /* available (if it succeeds then GC_handle_fork value  */
2175                 /* is changed to 1), client should nonetheless surround */
2176                 /* fork() with GC_atfork_prepare/parent/child (for the  */
2177                 /* case of pthread_at_fork failure or absence);         */
2178                 /* 1 (or other values) means client fully relies on     */
2179                 /* pthread_at_fork (so if it is missing or failed then  */
2180                 /* abort occurs in GC_init), GC_atfork_prepare and the  */
2181                 /* accompanying routines are no-op in such a case.      */
2182 #endif
2183 
2184 #ifdef GC_DISABLE_INCREMENTAL
2185 # define GC_incremental FALSE
2186 # define GC_auto_incremental FALSE
2187 # define GC_manual_vdb FALSE
2188 # define GC_dirty(p) (void)(p)
2189 # define REACHABLE_AFTER_DIRTY(p) (void)(p)
2190 
2191 #else /* !GC_DISABLE_INCREMENTAL */
2192   GC_EXTERN GC_bool GC_incremental;
2193                         /* Using incremental/generational collection.   */
2194                         /* Assumes dirty bits are being maintained.     */
2195 
2196   /* Virtual dirty bit implementation:            */
2197   /* Each implementation exports the following:   */
2198   GC_INNER void GC_read_dirty(GC_bool output_unneeded);
2199                         /* Retrieve dirty bits.  Set output_unneeded to */
2200                         /* indicate that reading of the retrieved dirty */
2201                         /* bits is not planned till the next retrieval. */
2202   GC_INNER GC_bool GC_page_was_dirty(struct hblk *h);
2203                         /* Read retrieved dirty bits.   */
2204 
2205   GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
2206                                    GC_bool pointerfree);
2207                 /* h is about to be written or allocated.  Ensure that  */
2208                 /* it is not write protected by the virtual dirty bit   */
2209                 /* implementation.  I.e., this is a call that:          */
2210                 /* - hints that [h, h+nblocks) is about to be written;  */
2211                 /* - guarantees that protection is removed;             */
2212                 /* - may speed up some dirty bit implementations;       */
2213                 /* - may be essential if we need to ensure that         */
2214                 /* pointer-free system call buffers in the heap are     */
2215                 /* not protected.                                       */
2216 
2217 # ifdef CAN_HANDLE_FORK
2218 #   if defined(PROC_VDB)
2219       GC_INNER void GC_dirty_update_child(void);
2220                 /* Update pid-specific resources (like /proc file       */
2221                 /* descriptors) needed by the dirty bits implementation */
2222                 /* after fork in the child process.                     */
2223 #   else
2224 #     define GC_dirty_update_child() (void)0
2225 #   endif
2226 # endif /* CAN_HANDLE_FORK */
2227 
2228   GC_INNER GC_bool GC_dirty_init(void);
2229                 /* Returns true if dirty bits are maintained (otherwise */
2230                 /* it is OK to be called again if the client invokes    */
2231                 /* GC_enable_incremental once more).                    */
2232 
2233   GC_EXTERN GC_bool GC_manual_vdb;
2234                 /* The incremental collection is in the manual VDB      */
2235                 /* mode.  Assumes GC_incremental is true.  Should not   */
2236                 /* be modified once GC_incremental is set to true.      */
2237 
2238 # define GC_auto_incremental (GC_incremental && !GC_manual_vdb)
2239 
2240   GC_INNER void GC_dirty_inner(const void *p); /* does not require locking */
2241 # define GC_dirty(p) (GC_manual_vdb ? GC_dirty_inner(p) : (void)0)
2242 # define REACHABLE_AFTER_DIRTY(p) GC_reachable_here(p)
2243 #endif /* !GC_DISABLE_INCREMENTAL */
2244 
2245 /* Same as GC_base but excepts and returns a pointer to const object.   */
2246 #define GC_base_C(p) ((const void *)GC_base((/* no const */ void *)(p)))
2247 
2248 /* Debugging print routines: */
2249 void GC_print_block_list(void);
2250 void GC_print_hblkfreelist(void);
2251 void GC_print_heap_sects(void);
2252 void GC_print_static_roots(void);
2253 
2254 extern word GC_fo_entries; /* should be visible in extra/MacOS.c */
2255 
2256 #ifdef KEEP_BACK_PTRS
2257    GC_INNER void GC_store_back_pointer(ptr_t source, ptr_t dest);
2258    GC_INNER void GC_marked_for_finalization(ptr_t dest);
2259 #  define GC_STORE_BACK_PTR(source, dest) GC_store_back_pointer(source, dest)
2260 #  define GC_MARKED_FOR_FINALIZATION(dest) GC_marked_for_finalization(dest)
2261 #else
2262 #  define GC_STORE_BACK_PTR(source, dest) (void)(source)
2263 #  define GC_MARKED_FOR_FINALIZATION(dest)
2264 #endif
2265 
2266 /* Make arguments appear live to compiler */
2267 void GC_noop6(word, word, word, word, word, word);
2268 
2269 GC_API void GC_CALL GC_noop1(word);
2270 
2271 #ifndef GC_ATTR_FORMAT_PRINTF
2272 # if GC_GNUC_PREREQ(3, 0)
2273 #   define GC_ATTR_FORMAT_PRINTF(spec_argnum, first_checked) \
2274         __attribute__((__format__(__printf__, spec_argnum, first_checked)))
2275 # else
2276 #   define GC_ATTR_FORMAT_PRINTF(spec_argnum, first_checked)
2277 # endif
2278 #endif
2279 
2280 /* Logging and diagnostic output:       */
2281 /* GC_printf is used typically on client explicit print requests.       */
2282 /* For all GC_X_printf routines, it is recommended to put "\n" at       */
2283 /* 'format' string end (for output atomicity).                          */
2284 GC_API_PRIV void GC_printf(const char * format, ...)
2285                         GC_ATTR_FORMAT_PRINTF(1, 2);
2286                         /* A version of printf that doesn't allocate,   */
2287                         /* 1K total output length.                      */
2288                         /* (We use sprintf.  Hopefully that doesn't     */
2289                         /* allocate for long arguments.)                */
2290 GC_API_PRIV void GC_err_printf(const char * format, ...)
2291                         GC_ATTR_FORMAT_PRINTF(1, 2);
2292 
2293 /* Basic logging routine.  Typically, GC_log_printf is called directly  */
2294 /* only inside various DEBUG_x blocks.                                  */
2295 GC_API_PRIV void GC_log_printf(const char * format, ...)
2296                         GC_ATTR_FORMAT_PRINTF(1, 2);
2297 
2298 #ifndef GC_ANDROID_LOG
2299 # define GC_PRINT_STATS_FLAG (GC_print_stats != 0)
2300 # define GC_INFOLOG_PRINTF GC_COND_LOG_PRINTF
2301   /* GC_verbose_log_printf is called only if GC_print_stats is VERBOSE. */
2302 # define GC_verbose_log_printf GC_log_printf
2303 #else
2304   extern GC_bool GC_quiet;
2305 # define GC_PRINT_STATS_FLAG (!GC_quiet)
2306   /* INFO/DBG loggers are enabled even if GC_print_stats is off. */
2307 # ifndef GC_INFOLOG_PRINTF
2308 #   define GC_INFOLOG_PRINTF if (GC_quiet) {} else GC_info_log_printf
2309 # endif
2310   GC_INNER void GC_info_log_printf(const char *format, ...)
2311                         GC_ATTR_FORMAT_PRINTF(1, 2);
2312   GC_INNER void GC_verbose_log_printf(const char *format, ...)
2313                         GC_ATTR_FORMAT_PRINTF(1, 2);
2314 #endif /* GC_ANDROID_LOG */
2315 
2316 /* Convenient macros for GC_[verbose_]log_printf invocation.    */
2317 #define GC_COND_LOG_PRINTF \
2318                 if (EXPECT(!GC_print_stats, TRUE)) {} else GC_log_printf
2319 #define GC_VERBOSE_LOG_PRINTF \
2320     if (EXPECT(GC_print_stats != VERBOSE, TRUE)) {} else GC_verbose_log_printf
2321 #ifndef GC_DBGLOG_PRINTF
2322 # define GC_DBGLOG_PRINTF if (!GC_PRINT_STATS_FLAG) {} else GC_log_printf
2323 #endif
2324 
2325 void GC_err_puts(const char *s);
2326                         /* Write s to stderr, don't buffer, don't add   */
2327                         /* newlines, don't ...                          */
2328 
2329 /* Handy macro for logging size values (of word type) in KiB (rounding  */
2330 /* to nearest value).                                                   */
2331 #define TO_KiB_UL(v) ((unsigned long)(((v) + ((1 << 9) - 1)) >> 10))
2332 
2333 GC_EXTERN unsigned GC_fail_count;
2334                         /* How many consecutive GC/expansion failures?  */
2335                         /* Reset by GC_allochblk(); defined in alloc.c. */
2336 
2337 GC_EXTERN long GC_large_alloc_warn_interval; /* defined in misc.c */
2338 
2339 GC_EXTERN signed_word GC_bytes_found;
2340                 /* Number of reclaimed bytes after garbage collection;  */
2341                 /* protected by GC lock; defined in reclaim.c.          */
2342 
2343 #ifndef GC_GET_HEAP_USAGE_NOT_NEEDED
2344   GC_EXTERN word GC_reclaimed_bytes_before_gc;
2345                 /* Number of bytes reclaimed before this        */
2346                 /* collection cycle; used for statistics only.  */
2347 #endif
2348 
2349 #ifdef USE_MUNMAP
2350   GC_EXTERN int GC_unmap_threshold; /* defined in allchblk.c */
2351   GC_EXTERN GC_bool GC_force_unmap_on_gcollect; /* defined in misc.c */
2352 #endif
2353 
2354 #ifdef MSWIN32
2355   GC_EXTERN GC_bool GC_no_win32_dlls; /* defined in os_dep.c */
2356   GC_EXTERN GC_bool GC_wnt;     /* Is Windows NT derivative;    */
2357                                 /* defined and set in os_dep.c. */
2358 #endif
2359 
2360 #ifdef THREADS
2361 # if defined(MSWIN32) || defined(MSWINCE) || defined(MSWIN_XBOX1)
2362     GC_EXTERN CRITICAL_SECTION GC_write_cs; /* defined in misc.c */
2363 # endif
2364 # if defined(GC_ASSERTIONS) && (defined(MSWIN32) || defined(MSWINCE))
2365     GC_EXTERN GC_bool GC_write_disabled;
2366                                 /* defined in win32_threads.c;  */
2367                                 /* protected by GC_write_cs.    */
2368 
2369 # endif
2370 # if defined(GC_DISABLE_INCREMENTAL) || defined(HAVE_LOCKFREE_AO_OR)
2371 #   define GC_acquire_dirty_lock() (void)0
2372 #   define GC_release_dirty_lock() (void)0
2373 # else
2374     /* Acquire the spin lock we use to update dirty bits.       */
2375     /* Threads should not get stopped holding it.  But we may   */
2376     /* acquire and release it during GC_remove_protection call. */
2377 #   define GC_acquire_dirty_lock() \
2378         do { /* empty */ \
2379         } while (AO_test_and_set_acquire(&GC_fault_handler_lock) == AO_TS_SET)
2380 #   define GC_release_dirty_lock() AO_CLEAR(&GC_fault_handler_lock)
2381     GC_EXTERN volatile AO_TS_t GC_fault_handler_lock;
2382                                         /* defined in os_dep.c */
2383 # endif
2384 # ifdef MSWINCE
2385     GC_EXTERN GC_bool GC_dont_query_stack_min;
2386                                 /* Defined and set in os_dep.c. */
2387 # endif
2388 #elif defined(IA64)
2389   GC_EXTERN ptr_t GC_save_regs_ret_val; /* defined in mach_dep.c. */
2390                         /* Previously set to backing store pointer.     */
2391 #endif /* !THREADS */
2392 
2393 #ifdef THREAD_LOCAL_ALLOC
2394   GC_EXTERN GC_bool GC_world_stopped; /* defined in alloc.c */
2395   GC_INNER void GC_mark_thread_local_free_lists(void);
2396 #endif
2397 
2398 #ifdef GC_GCJ_SUPPORT
2399 # ifdef GC_ASSERTIONS
2400     GC_EXTERN GC_bool GC_gcj_malloc_initialized; /* defined in gcj_mlc.c */
2401 # endif
2402   GC_EXTERN ptr_t * GC_gcjobjfreelist;
2403 #endif
2404 
2405 #ifdef MPROTECT_VDB
2406 # ifdef GWW_VDB
2407     GC_INNER GC_bool GC_gww_dirty_init(void);
2408                         /* Returns TRUE if GetWriteWatch is available.  */
2409                         /* May be called repeatedly.                    */
2410 # endif
2411 # ifdef USE_MUNMAP
2412     GC_INNER GC_bool GC_mprotect_dirty_init(void);
2413     GC_INNER GC_bool GC_has_unmapped_memory(void);
2414 # endif
2415 #endif /* MPROTECT_VDB */
2416 
2417 #if defined(CHECKSUMS) || defined(PROC_VDB)
2418   GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk * h);
2419                         /* Could the page contain valid heap pointers?  */
2420 #endif
2421 
2422 #ifdef CHECKSUMS
2423 # if defined(MPROTECT_VDB) && !defined(DARWIN)
2424     void GC_record_fault(struct hblk * h);
2425 # endif
2426   void GC_check_dirty(void);
2427 #endif
2428 
2429 GC_INNER void GC_default_print_heap_obj_proc(ptr_t p);
2430 
2431 GC_INNER void GC_setpagesize(void);
2432 
2433 GC_INNER void GC_initialize_offsets(void);      /* defined in obj_map.c */
2434 
2435 GC_INNER void GC_bl_init(void);
2436 GC_INNER void GC_bl_init_no_interiors(void);    /* defined in blacklst.c */
2437 
2438 GC_INNER void GC_start_debugging_inner(void);   /* defined in dbg_mlc.c. */
2439                         /* Should not be called if GC_debugging_started. */
2440 
2441 /* Store debugging info into p.  Return displaced pointer.      */
2442 /* Assumes we hold the allocation lock.                         */
2443 GC_INNER void *GC_store_debug_info_inner(void *p, word sz, const char *str,
2444                                          int linenum);
2445 
2446 #ifdef REDIRECT_MALLOC
2447 # ifdef GC_LINUX_THREADS
2448     GC_INNER GC_bool GC_text_mapping(char *nm, ptr_t *startp, ptr_t *endp);
2449                                                 /* from os_dep.c */
2450 # endif
2451 #elif defined(USE_WINALLOC)
2452   GC_INNER void GC_add_current_malloc_heap(void);
2453 #endif /* !REDIRECT_MALLOC */
2454 
2455 #ifdef MAKE_BACK_GRAPH
2456   GC_INNER void GC_build_back_graph(void);
2457   GC_INNER void GC_traverse_back_graph(void);
2458 #endif
2459 
2460 #ifdef MSWIN32
2461   GC_INNER void GC_init_win32(void);
2462 #endif
2463 
2464 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
2465   GC_INNER void * GC_roots_present(ptr_t);
2466         /* The type is a lie, since the real type doesn't make sense here, */
2467         /* and we only test for NULL.                                      */
2468 #endif
2469 
2470 #ifdef GC_WIN32_THREADS
2471   GC_INNER void GC_get_next_stack(char *start, char * limit, char **lo,
2472                                   char **hi);
2473 # if defined(MPROTECT_VDB) && !defined(CYGWIN32)
2474     GC_INNER void GC_set_write_fault_handler(void);
2475 # endif
2476 # if defined(WRAP_MARK_SOME) && !defined(GC_PTHREADS)
2477     GC_INNER GC_bool GC_started_thread_while_stopped(void);
2478         /* Did we invalidate mark phase with an unexpected thread start? */
2479 # endif
2480 #endif /* GC_WIN32_THREADS */
2481 
2482 #ifdef THREADS
2483   GC_INNER void GC_reset_finalizer_nested(void);
2484   GC_INNER unsigned char *GC_check_finalizer_nested(void);
2485   GC_INNER void GC_do_blocking_inner(ptr_t data, void * context);
2486   GC_INNER void GC_push_all_stacks(void);
2487 # ifdef USE_PROC_FOR_LIBRARIES
2488     GC_INNER GC_bool GC_segment_is_thread_stack(ptr_t lo, ptr_t hi);
2489 # endif
2490 # ifdef IA64
2491     GC_INNER ptr_t GC_greatest_stack_base_below(ptr_t bound);
2492 # endif
2493 #endif /* THREADS */
2494 
2495 #ifdef DYNAMIC_LOADING
2496   GC_INNER GC_bool GC_register_main_static_data(void);
2497 # ifdef DARWIN
2498     GC_INNER void GC_init_dyld(void);
2499 # endif
2500 #endif /* DYNAMIC_LOADING */
2501 
2502 #ifdef SEARCH_FOR_DATA_START
2503   GC_INNER void GC_init_linux_data_start(void);
2504   void * GC_find_limit(void *, int);
2505 #endif
2506 
2507 #if defined(NETBSD) && defined(__ELF__)
2508   GC_INNER void GC_init_netbsd_elf(void);
2509   void * GC_find_limit(void *, int);
2510 #endif
2511 
2512 #ifdef UNIX_LIKE
2513   GC_INNER void GC_set_and_save_fault_handler(void (*handler)(int));
2514 #endif
2515 
2516 #ifdef NEED_PROC_MAPS
2517 # if defined(DYNAMIC_LOADING) && defined(USE_PROC_FOR_LIBRARIES)
2518     GC_INNER char *GC_parse_map_entry(char *buf_ptr, ptr_t *start, ptr_t *end,
2519                                       char **prot, unsigned int *maj_dev,
2520                                       char **mapping_name);
2521 # endif
2522 # if defined(IA64) || defined(INCLUDE_LINUX_THREAD_DESCR)
2523     GC_INNER GC_bool GC_enclosing_mapping(ptr_t addr,
2524                                           ptr_t *startp, ptr_t *endp);
2525 # endif
2526   GC_INNER char *GC_get_maps(void); /* from os_dep.c */
2527 #endif /* NEED_PROC_MAPS */
2528 
2529 #ifdef GC_ASSERTIONS
2530 # define GC_ASSERT(expr) \
2531               do { \
2532                 if (!(expr)) { \
2533                   GC_err_printf("Assertion failure: %s:%d\n", \
2534                                 __FILE__, __LINE__); \
2535                   ABORT("assertion failure"); \
2536                 } \
2537               } while (0)
2538   GC_INNER word GC_compute_large_free_bytes(void);
2539   GC_INNER word GC_compute_root_size(void);
2540 #else
2541 # define GC_ASSERT(expr)
2542 #endif
2543 
2544 /* Check a compile time assertion at compile time.      */
2545 #if _MSC_VER >= 1700
2546 # define GC_STATIC_ASSERT(expr) \
2547                 static_assert(expr, "static assertion failed: " #expr)
2548 #elif defined(static_assert) && __STDC_VERSION__ >= 201112L
2549 # define GC_STATIC_ASSERT(expr) static_assert(expr, #expr)
2550 #elif defined(mips) && !defined(__GNUC__)
2551 /* DOB: MIPSPro C gets an internal error taking the sizeof an array type.
2552    This code works correctly (ugliness is to avoid "unused var" warnings) */
2553 # define GC_STATIC_ASSERT(expr) \
2554     do { if (0) { char j[(expr)? 1 : -1]; j[0]='\0'; j[0]=j[0]; } } while(0)
2555 #else
2556   /* The error message for failure is a bit baroque, but ...    */
2557 # define GC_STATIC_ASSERT(expr) (void)sizeof(char[(expr)? 1 : -1])
2558 #endif
2559 
2560 /* Runtime check for an argument declared as non-null is actually not null. */
2561 #if GC_GNUC_PREREQ(4, 0)
2562   /* Workaround tautological-pointer-compare Clang warning.     */
2563 # define NONNULL_ARG_NOT_NULL(arg) (*(volatile void **)&(arg) != NULL)
2564 #else
2565 # define NONNULL_ARG_NOT_NULL(arg) (NULL != (arg))
2566 #endif
2567 
2568 #define COND_DUMP_CHECKS \
2569           do { \
2570             GC_ASSERT(GC_compute_large_free_bytes() == GC_large_free_bytes); \
2571             GC_ASSERT(GC_compute_root_size() == GC_root_size); \
2572           } while (0)
2573 
2574 #ifndef NO_DEBUGGING
2575   GC_EXTERN GC_bool GC_dump_regularly;
2576                                 /* Generate regular debugging dumps.    */
2577 # define COND_DUMP if (EXPECT(GC_dump_regularly, FALSE)) { \
2578                         GC_dump_named(NULL); \
2579                    } else COND_DUMP_CHECKS
2580 #else
2581 # define COND_DUMP COND_DUMP_CHECKS
2582 #endif
2583 
2584 #if defined(PARALLEL_MARK)
2585   /* We need additional synchronization facilities from the thread      */
2586   /* support.  We believe these are less performance critical           */
2587   /* than the main garbage collector lock; standard pthreads-based      */
2588   /* implementations should be sufficient.                              */
2589 
2590 # define GC_markers_m1 GC_parallel
2591                         /* Number of mark threads we would like to have */
2592                         /* excluding the initiating thread.             */
2593 
2594   /* The mark lock and condition variable.  If the GC lock is also      */
2595   /* acquired, the GC lock must be acquired first.  The mark lock is    */
2596   /* used to both protect some variables used by the parallel           */
2597   /* marker, and to protect GC_fl_builder_count, below.                 */
2598   /* GC_notify_all_marker() is called when                              */
2599   /* the state of the parallel marker changes                           */
2600   /* in some significant way (see gc_mark.h for details).  The          */
2601   /* latter set of events includes incrementing GC_mark_no.             */
2602   /* GC_notify_all_builder() is called when GC_fl_builder_count         */
2603   /* reaches 0.                                                         */
2604 
2605   GC_INNER void GC_wait_for_markers_init(void);
2606   GC_INNER void GC_acquire_mark_lock(void);
2607   GC_INNER void GC_release_mark_lock(void);
2608   GC_INNER void GC_notify_all_builder(void);
2609   GC_INNER void GC_wait_for_reclaim(void);
2610 
2611   GC_EXTERN signed_word GC_fl_builder_count; /* Protected by mark lock. */
2612 
2613   GC_INNER void GC_notify_all_marker(void);
2614   GC_INNER void GC_wait_marker(void);
2615   GC_EXTERN word GC_mark_no;            /* Protected by mark lock.      */
2616 
2617   GC_INNER void GC_help_marker(word my_mark_no);
2618               /* Try to help out parallel marker for mark cycle         */
2619               /* my_mark_no.  Returns if the mark cycle finishes or     */
2620               /* was already done, or there was nothing to do for       */
2621               /* some other reason.                                     */
2622 
2623   GC_INNER void GC_start_mark_threads_inner(void);
2624 #endif /* PARALLEL_MARK */
2625 
2626 #if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS) && !defined(NACL) \
2627     && !defined(GC_DARWIN_THREADS) && !defined(SIG_SUSPEND)
2628   /* We define the thread suspension signal here, so that we can refer  */
2629   /* to it in the dirty bit implementation, if necessary.  Ideally we   */
2630   /* would allocate a (real-time?) signal using the standard mechanism. */
2631   /* unfortunately, there is no standard mechanism.  (There is one      */
2632   /* in Linux glibc, but it's not exported.)  Thus we continue to use   */
2633   /* the same hard-coded signals we've always used.                     */
2634 # if (defined(GC_LINUX_THREADS) || defined(GC_DGUX386_THREADS)) \
2635      && !defined(GC_USESIGRT_SIGNALS)
2636 #   if defined(SPARC) && !defined(SIGPWR)
2637       /* SPARC/Linux doesn't properly define SIGPWR in <signal.h>.      */
2638       /* It is aliased to SIGLOST in asm/signal.h, though.              */
2639 #     define SIG_SUSPEND SIGLOST
2640 #   else
2641       /* Linuxthreads itself uses SIGUSR1 and SIGUSR2.                  */
2642 #     define SIG_SUSPEND SIGPWR
2643 #   endif
2644 # elif defined(GC_OPENBSD_THREADS)
2645 #   ifndef GC_OPENBSD_UTHREADS
2646 #     define SIG_SUSPEND SIGXFSZ
2647 #   endif
2648 # elif defined(_SIGRTMIN) && !defined(CPPCHECK)
2649 #   define SIG_SUSPEND _SIGRTMIN + 6
2650 # else
2651 #   define SIG_SUSPEND SIGRTMIN + 6
2652 # endif
2653 #endif /* GC_PTHREADS && !SIG_SUSPEND */
2654 
2655 #if defined(GC_PTHREADS) && !defined(GC_SEM_INIT_PSHARED)
2656 # define GC_SEM_INIT_PSHARED 0
2657 #endif
2658 
2659 /* Some macros for setjmp that works across signal handlers     */
2660 /* were possible, and a couple of routines to facilitate        */
2661 /* catching accesses to bad addresses when that's               */
2662 /* possible/needed.                                             */
2663 #if (defined(UNIX_LIKE) || (defined(NEED_FIND_LIMIT) && defined(CYGWIN32))) \
2664     && !defined(GC_NO_SIGSETJMP)
2665 # if defined(SUNOS5SIGS) && !defined(FREEBSD) && !defined(LINUX)
2666     EXTERN_C_END
2667 #   include <sys/siginfo.h>
2668     EXTERN_C_BEGIN
2669 # endif
2670   /* Define SETJMP and friends to be the version that restores  */
2671   /* the signal mask.                                           */
2672 # define SETJMP(env) sigsetjmp(env, 1)
2673 # define LONGJMP(env, val) siglongjmp(env, val)
2674 # define JMP_BUF sigjmp_buf
2675 #else
2676 # ifdef ECOS
2677 #   define SETJMP(env) hal_setjmp(env)
2678 # else
2679 #   define SETJMP(env) setjmp(env)
2680 # endif
2681 # define LONGJMP(env, val) longjmp(env, val)
2682 # define JMP_BUF jmp_buf
2683 #endif /* !UNIX_LIKE || GC_NO_SIGSETJMP */
2684 
2685 /* Do we need the GC_find_limit machinery to find the end of a  */
2686 /* data segment.                                                */
2687 #if defined(HEURISTIC2) || defined(SEARCH_FOR_DATA_START) \
2688     || ((defined(SVR4) || defined(AIX) || defined(DGUX) \
2689          || (defined(LINUX) && defined(SPARC))) && !defined(PCR))
2690 # define NEED_FIND_LIMIT
2691 #endif
2692 
2693 #if defined(DATASTART_USES_BSDGETDATASTART)
2694   EXTERN_C_END
2695 # include <machine/trap.h>
2696   EXTERN_C_BEGIN
2697 # if !defined(PCR)
2698 #   define NEED_FIND_LIMIT
2699 # endif
2700   GC_INNER ptr_t GC_FreeBSDGetDataStart(size_t, ptr_t);
2701 # define DATASTART_IS_FUNC
2702 #endif /* DATASTART_USES_BSDGETDATASTART */
2703 
2704 #if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__) \
2705     && !defined(NEED_FIND_LIMIT)
2706   /* Used by GC_init_netbsd_elf() in os_dep.c. */
2707 # define NEED_FIND_LIMIT
2708 #endif
2709 
2710 #if defined(IA64) && !defined(NEED_FIND_LIMIT)
2711 # define NEED_FIND_LIMIT
2712      /* May be needed for register backing store base. */
2713 #endif
2714 
2715 #if defined(NEED_FIND_LIMIT) \
2716      || (defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS))
2717   GC_EXTERN JMP_BUF GC_jmp_buf;
2718 
2719   /* Set up a handler for address faults which will longjmp to  */
2720   /* GC_jmp_buf.                                                */
2721   GC_INNER void GC_setup_temporary_fault_handler(void);
2722   /* Undo the effect of GC_setup_temporary_fault_handler.       */
2723   GC_INNER void GC_reset_fault_handler(void);
2724 #endif /* NEED_FIND_LIMIT || USE_PROC_FOR_LIBRARIES */
2725 
2726 /* Some convenience macros for cancellation support. */
2727 #if defined(CANCEL_SAFE)
2728 # if defined(GC_ASSERTIONS) \
2729      && (defined(USE_COMPILER_TLS) \
2730          || (defined(LINUX) && !defined(ARM32) && GC_GNUC_PREREQ(3, 3) \
2731              || defined(HPUX) /* and probably others ... */))
2732     extern __thread unsigned char GC_cancel_disable_count;
2733 #   define NEED_CANCEL_DISABLE_COUNT
2734 #   define INCR_CANCEL_DISABLE() ++GC_cancel_disable_count
2735 #   define DECR_CANCEL_DISABLE() --GC_cancel_disable_count
2736 #   define ASSERT_CANCEL_DISABLED() GC_ASSERT(GC_cancel_disable_count > 0)
2737 # else
2738 #   define INCR_CANCEL_DISABLE()
2739 #   define DECR_CANCEL_DISABLE()
2740 #   define ASSERT_CANCEL_DISABLED() (void)0
2741 # endif /* GC_ASSERTIONS & ... */
2742 # define DISABLE_CANCEL(state) \
2743         do { pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &state); \
2744           INCR_CANCEL_DISABLE(); } while (0)
2745 # define RESTORE_CANCEL(state) \
2746         do { ASSERT_CANCEL_DISABLED(); \
2747           pthread_setcancelstate(state, NULL); \
2748           DECR_CANCEL_DISABLE(); } while (0)
2749 #else /* !CANCEL_SAFE */
2750 # define DISABLE_CANCEL(state) (void)0
2751 # define RESTORE_CANCEL(state) (void)0
2752 # define ASSERT_CANCEL_DISABLED() (void)0
2753 #endif /* !CANCEL_SAFE */
2754 
2755 EXTERN_C_END
2756 
2757 #endif /* GC_PRIVATE_H */
2758