1 /*
2  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3  * Copyright (c) 1991-1995 by Xerox Corporation.  All rights reserved.
4  * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
5  * Copyright (c) 1999 by Hewlett-Packard Company.  All rights reserved.
6  *
7  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
9  *
10  * Permission is hereby granted to use or copy this program
11  * for any purpose,  provided the above notices are retained on all copies.
12  * Permission to modify the code and to distribute modified code is granted,
13  * provided the above notices are retained, and a notice that the code was
14  * modified is included with the above copyright notice.
15  */
16 
17 # include "private/gc_priv.h"
18 
19 # if defined(LINUX) && !defined(POWERPC)
20 #   include <linux/version.h>
21 #   if (LINUX_VERSION_CODE <= 0x10400)
22       /* Ugly hack to get struct sigcontext_struct definition.  Required      */
23       /* for some early 1.3.X releases.  Will hopefully go away soon. */
24       /* in some later Linux releases, asm/sigcontext.h may have to   */
25       /* be included instead.                                         */
26 #     define __KERNEL__
27 #     include <asm/signal.h>
28 #     undef __KERNEL__
29 #   else
30       /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
31       /* struct sigcontext.  libc6 (glibc2) uses "struct sigcontext" in     */
32       /* prototypes, so we have to include the top-level sigcontext.h to    */
33       /* make sure the former gets defined to be the latter if appropriate. */
34 #     include <features.h>
35 #     if 2 <= __GLIBC__
36 #       if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
37 	  /* glibc 2.1 no longer has sigcontext.h.  But signal.h	*/
38 	  /* has the right declaration for glibc 2.1.			*/
39 #         include <sigcontext.h>
40 #       endif /* 0 == __GLIBC_MINOR__ */
41 #     else /* not 2 <= __GLIBC__ */
42         /* libc5 doesn't have <sigcontext.h>: go directly with the kernel   */
43         /* one.  Check LINUX_VERSION_CODE to see which we should reference. */
44 #       include <asm/sigcontext.h>
45 #     endif /* 2 <= __GLIBC__ */
46 #   endif
47 # endif
48 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \
49     && !defined(MSWINCE)
50 #   include <sys/types.h>
51 #   if !defined(MSWIN32) && !defined(SUNOS4)
52 #   	include <unistd.h>
53 #   endif
54 # endif
55 
56 # include <stdio.h>
57 # if defined(MSWINCE)
58 #   define SIGSEGV 0 /* value is irrelevant */
59 # else
60 #   include <signal.h>
61 # endif
62 
63 /* Blatantly OS dependent routines, except for those that are related 	*/
64 /* to dynamic loading.							*/
65 
66 # if defined(HEURISTIC2) || defined(SEARCH_FOR_DATA_START)
67 #   define NEED_FIND_LIMIT
68 # endif
69 
70 # if !defined(STACKBOTTOM) && defined(HEURISTIC2)
71 #   define NEED_FIND_LIMIT
72 # endif
73 
74 # if (defined(SUNOS4) && defined(DYNAMIC_LOADING)) && !defined(PCR)
75 #   define NEED_FIND_LIMIT
76 # endif
77 
78 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
79       || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
80 #   define NEED_FIND_LIMIT
81 # endif
82 
83 #if defined(FREEBSD) && defined(I386)
84 #  include <machine/trap.h>
85 #  if !defined(PCR)
86 #    define NEED_FIND_LIMIT
87 #  endif
88 #endif
89 
90 #ifdef NEED_FIND_LIMIT
91 #   include <setjmp.h>
92 #endif
93 
94 #ifdef AMIGA
95 # define GC_AMIGA_DEF
96 # include "AmigaOS.c"
97 # undef GC_AMIGA_DEF
98 #endif
99 
100 #if defined(MSWIN32) || defined(MSWINCE)
101 # define WIN32_LEAN_AND_MEAN
102 # define NOSERVICE
103 # include <windows.h>
104 #endif
105 
106 #ifdef MACOS
107 # include <Processes.h>
108 #endif
109 
110 #ifdef IRIX5
111 # include <sys/uio.h>
112 # include <malloc.h>   /* for locking */
113 #endif
114 #ifdef USE_MMAP
115 # include <sys/types.h>
116 # include <sys/mman.h>
117 # include <sys/stat.h>
118 #endif
119 
120 #ifdef UNIX_LIKE
121 # include <fcntl.h>
122 #endif
123 
124 #if defined(SUNOS5SIGS) || defined (HURD) || defined(LINUX)
125 # ifdef SUNOS5SIGS
126 #  include <sys/siginfo.h>
127 # endif
128 # undef setjmp
129 # undef longjmp
130 # define setjmp(env) sigsetjmp(env, 1)
131 # define longjmp(env, val) siglongjmp(env, val)
132 # define jmp_buf sigjmp_buf
133 #endif
134 
135 #ifdef DARWIN
136 /* for get_etext and friends */
137 #include <mach-o/getsect.h>
138 #endif
139 
140 #ifdef DJGPP
141   /* Apparently necessary for djgpp 2.01.  May cause problems with	*/
142   /* other versions.							*/
143   typedef long unsigned int caddr_t;
144 #endif
145 
146 #ifdef PCR
147 # include "il/PCR_IL.h"
148 # include "th/PCR_ThCtl.h"
149 # include "mm/PCR_MM.h"
150 #endif
151 
152 #if !defined(NO_EXECUTE_PERMISSION)
153 # define OPT_PROT_EXEC PROT_EXEC
154 #else
155 # define OPT_PROT_EXEC 0
156 #endif
157 
158 #if defined(LINUX) && \
159     (defined(USE_PROC_FOR_LIBRARIES) || defined(IA64) || !defined(SMALL_CONFIG))
160 
161 /* We need to parse /proc/self/maps, either to find dynamic libraries,	*/
162 /* and/or to find the register backing store base (IA64).  Do it once	*/
163 /* here.								*/
164 
165 #define READ read
166 
167 /* Repeatedly perform a read call until the buffer is filled or	*/
168 /* we encounter EOF.						*/
GC_repeat_read(int fd,char * buf,size_t count)169 ssize_t GC_repeat_read(int fd, char *buf, size_t count)
170 {
171     ssize_t num_read = 0;
172     ssize_t result;
173 
174     while (num_read < count) {
175 	result = READ(fd, buf + num_read, count - num_read);
176 	if (result < 0) return result;
177 	if (result == 0) break;
178 	num_read += result;
179     }
180     return num_read;
181 }
182 
183 /*
184  * Apply fn to a buffer containing the contents of /proc/self/maps.
185  * Return the result of fn or, if we failed, 0.
186  */
187 
GC_apply_to_maps(word (* fn)(char *))188 word GC_apply_to_maps(word (*fn)(char *))
189 {
190     int f;
191     int result;
192     int maps_size;
193     char maps_temp[32768];
194     char *maps_buf;
195 
196     /* Read /proc/self/maps	*/
197         /* Note that we may not allocate, and thus can't use stdio.	*/
198         f = open("/proc/self/maps", O_RDONLY);
199         if (-1 == f) return 0;
200 	/* stat() doesn't work for /proc/self/maps, so we have to
201 	   read it to find out how large it is... */
202 	maps_size = 0;
203 	do {
204 	    result = GC_repeat_read(f, maps_temp, sizeof(maps_temp));
205 	    if (result <= 0) return 0;
206 	    maps_size += result;
207 	} while (result == sizeof(maps_temp));
208 
209 	if (maps_size > sizeof(maps_temp)) {
210 	    /* If larger than our buffer, close and re-read it. */
211 	    close(f);
212 	    f = open("/proc/self/maps", O_RDONLY);
213 	    if (-1 == f) return 0;
214 	    maps_buf = alloca(maps_size);
215 	    if (NULL == maps_buf) return 0;
216 	    result = GC_repeat_read(f, maps_buf, maps_size);
217 	    if (result <= 0) return 0;
218 	} else {
219 	    /* Otherwise use the fixed size buffer */
220 	    maps_buf = maps_temp;
221 	}
222 
223 	close(f);
224         maps_buf[result] = '\0';
225 
226     /* Apply fn to result. */
227 	return fn(maps_buf);
228 }
229 
230 #endif /* Need GC_apply_to_maps */
231 
232 #if defined(LINUX) && (defined(USE_PROC_FOR_LIBRARIES) || defined(IA64))
233 //
234 //  GC_parse_map_entry parses an entry from /proc/self/maps so we can
235 //  locate all writable data segments that belong to shared libraries.
236 //  The format of one of these entries and the fields we care about
237 //  is as follows:
238 //  XXXXXXXX-XXXXXXXX r-xp 00000000 30:05 260537     name of mapping...\n
239 //  ^^^^^^^^ ^^^^^^^^ ^^^^          ^^
240 //  start    end      prot          maj_dev
241 //  0        9        18            32
242 //
243 //  For 64 bit ABIs:
244 //  0	     17	      34	    56
245 //
246 //  The parser is called with a pointer to the entry and the return value
247 //  is either NULL or is advanced to the next entry(the byte after the
248 //  trailing '\n'.)
249 //
250 #if CPP_WORDSZ == 32
251 # define OFFSET_MAP_START   0
252 # define OFFSET_MAP_END     9
253 # define OFFSET_MAP_PROT   18
254 # define OFFSET_MAP_MAJDEV 32
255 # define ADDR_WIDTH 	    8
256 #endif
257 
258 #if CPP_WORDSZ == 64
259 # define OFFSET_MAP_START   0
260 # define OFFSET_MAP_END    17
261 # define OFFSET_MAP_PROT   34
262 # define OFFSET_MAP_MAJDEV 56
263 # define ADDR_WIDTH 	   16
264 #endif
265 
266 /*
267  * Assign various fields of the first line in buf_ptr to *start, *end,
268  * *prot_buf and *maj_dev.  Only *prot_buf may be set for unwritable maps.
269  */
GC_parse_map_entry(char * buf_ptr,word * start,word * end,char * prot_buf,unsigned int * maj_dev)270 char *GC_parse_map_entry(char *buf_ptr, word *start, word *end,
271                                 char *prot_buf, unsigned int *maj_dev)
272 {
273     int i;
274     char *tok;
275 
276     if (buf_ptr == NULL || *buf_ptr == '\0') {
277         return NULL;
278     }
279 
280     memcpy(prot_buf, buf_ptr+OFFSET_MAP_PROT, 4);
281     				/* do the protections first. */
282     prot_buf[4] = '\0';
283 
284     if (prot_buf[1] == 'w') {/* we can skip all of this if it's not writable. */
285 
286         tok = buf_ptr;
287         buf_ptr[OFFSET_MAP_START+ADDR_WIDTH] = '\0';
288         *start = strtoul(tok, NULL, 16);
289 
290         tok = buf_ptr+OFFSET_MAP_END;
291         buf_ptr[OFFSET_MAP_END+ADDR_WIDTH] = '\0';
292         *end = strtoul(tok, NULL, 16);
293 
294         buf_ptr += OFFSET_MAP_MAJDEV;
295         tok = buf_ptr;
296         while (*buf_ptr != ':') buf_ptr++;
297         *buf_ptr++ = '\0';
298         *maj_dev = strtoul(tok, NULL, 16);
299     }
300 
301     while (*buf_ptr && *buf_ptr++ != '\n');
302 
303     return buf_ptr;
304 }
305 
306 #endif /* Need to parse /proc/self/maps. */
307 
308 #if defined(SEARCH_FOR_DATA_START)
309   /* The I386 case can be handled without a search.  The Alpha case	*/
310   /* used to be handled differently as well, but the rules changed	*/
311   /* for recent Linux versions.  This seems to be the easiest way to	*/
312   /* cover all versions.						*/
313 
314 # ifdef LINUX
315     /* Some Linux distributions arrange to define __data_start.  Some	*/
316     /* define data_start as a weak symbol.  The latter is technically	*/
317     /* broken, since the user program may define data_start, in which	*/
318     /* case we lose.  Nonetheless, we try both, prefering __data_start.	*/
319     /* We assume gcc-compatible pragmas.	*/
320 #   pragma weak __data_start
321     extern int __data_start[];
322 #   pragma weak data_start
323     extern int data_start[];
324 # endif /* LINUX */
325   extern int _end[];
326 
327   ptr_t GC_data_start;
328 
GC_init_linux_data_start()329   void GC_init_linux_data_start()
330   {
331     extern ptr_t GC_find_limit();
332 
333 #   ifdef LINUX
334       /* Try the easy approaches first:	*/
335       if ((ptr_t)__data_start != 0) {
336 	  GC_data_start = (ptr_t)(__data_start);
337 	  return;
338       }
339       if ((ptr_t)data_start != 0) {
340 	  GC_data_start = (ptr_t)(data_start);
341 	  return;
342       }
343 #   endif /* LINUX */
344     GC_data_start = GC_find_limit((ptr_t)(_end), FALSE);
345   }
346 #endif
347 
348 # ifdef ECOS
349 
350 # ifndef ECOS_GC_MEMORY_SIZE
351 # define ECOS_GC_MEMORY_SIZE (448 * 1024)
352 # endif /* ECOS_GC_MEMORY_SIZE */
353 
354 // setjmp() function, as described in ANSI para 7.6.1.1
355 #define setjmp( __env__ )  hal_setjmp( __env__ )
356 
357 // FIXME: This is a simple way of allocating memory which is
358 // compatible with ECOS early releases.  Later releases use a more
359 // sophisticated means of allocating memory than this simple static
360 // allocator, but this method is at least bound to work.
361 static char memory[ECOS_GC_MEMORY_SIZE];
362 static char *brk = memory;
363 
tiny_sbrk(ptrdiff_t increment)364 static void *tiny_sbrk(ptrdiff_t increment)
365 {
366   void *p = brk;
367 
368   brk += increment;
369 
370   if (brk >  memory + sizeof memory)
371     {
372       brk -= increment;
373       return NULL;
374     }
375 
376   return p;
377 }
378 #define sbrk tiny_sbrk
379 # endif /* ECOS */
380 
381 #if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__)
382   ptr_t GC_data_start;
383 
GC_init_netbsd_elf()384   void GC_init_netbsd_elf()
385   {
386     extern ptr_t GC_find_limit();
387     extern char **environ;
388 	/* This may need to be environ, without the underscore, for	*/
389 	/* some versions.						*/
390     GC_data_start = GC_find_limit((ptr_t)&environ, FALSE);
391   }
392 #endif
393 
394 # ifdef OS2
395 
396 # include <stddef.h>
397 
398 # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
399 
400 struct exe_hdr {
401     unsigned short      magic_number;
402     unsigned short      padding[29];
403     long                new_exe_offset;
404 };
405 
406 #define E_MAGIC(x)      (x).magic_number
407 #define EMAGIC          0x5A4D
408 #define E_LFANEW(x)     (x).new_exe_offset
409 
410 struct e32_exe {
411     unsigned char       magic_number[2];
412     unsigned char       byte_order;
413     unsigned char       word_order;
414     unsigned long       exe_format_level;
415     unsigned short      cpu;
416     unsigned short      os;
417     unsigned long       padding1[13];
418     unsigned long       object_table_offset;
419     unsigned long       object_count;
420     unsigned long       padding2[31];
421 };
422 
423 #define E32_MAGIC1(x)   (x).magic_number[0]
424 #define E32MAGIC1       'L'
425 #define E32_MAGIC2(x)   (x).magic_number[1]
426 #define E32MAGIC2       'X'
427 #define E32_BORDER(x)   (x).byte_order
428 #define E32LEBO         0
429 #define E32_WORDER(x)   (x).word_order
430 #define E32LEWO         0
431 #define E32_CPU(x)      (x).cpu
432 #define E32CPU286       1
433 #define E32_OBJTAB(x)   (x).object_table_offset
434 #define E32_OBJCNT(x)   (x).object_count
435 
436 struct o32_obj {
437     unsigned long       size;
438     unsigned long       base;
439     unsigned long       flags;
440     unsigned long       pagemap;
441     unsigned long       mapsize;
442     unsigned long       reserved;
443 };
444 
445 #define O32_FLAGS(x)    (x).flags
446 #define OBJREAD         0x0001L
447 #define OBJWRITE        0x0002L
448 #define OBJINVALID      0x0080L
449 #define O32_SIZE(x)     (x).size
450 #define O32_BASE(x)     (x).base
451 
452 # else  /* IBM's compiler */
453 
454 /* A kludge to get around what appears to be a header file bug */
455 # ifndef WORD
456 #   define WORD unsigned short
457 # endif
458 # ifndef DWORD
459 #   define DWORD unsigned long
460 # endif
461 
462 # define EXE386 1
463 # include <newexe.h>
464 # include <exe386.h>
465 
466 # endif  /* __IBMC__ */
467 
468 # define INCL_DOSEXCEPTIONS
469 # define INCL_DOSPROCESS
470 # define INCL_DOSERRORS
471 # define INCL_DOSMODULEMGR
472 # define INCL_DOSMEMMGR
473 # include <os2.h>
474 
475 
476 /* Disable and enable signals during nontrivial allocations	*/
477 
GC_disable_signals(void)478 void GC_disable_signals(void)
479 {
480     ULONG nest;
481 
482     DosEnterMustComplete(&nest);
483     if (nest != 1) ABORT("nested GC_disable_signals");
484 }
485 
GC_enable_signals(void)486 void GC_enable_signals(void)
487 {
488     ULONG nest;
489 
490     DosExitMustComplete(&nest);
491     if (nest != 0) ABORT("GC_enable_signals");
492 }
493 
494 
495 # else
496 
497 #  if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
498       && !defined(MSWINCE) \
499       && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW) \
500       && !defined(NOSYS) && !defined(ECOS)
501 
502 #   if defined(sigmask) && !defined(UTS4) && !defined(HURD)
503 	/* Use the traditional BSD interface */
504 #	define SIGSET_T int
505 #	define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
506 #	define SIG_FILL(set)  (set) = 0x7fffffff
507     	  /* Setting the leading bit appears to provoke a bug in some	*/
508     	  /* longjmp implementations.  Most systems appear not to have	*/
509     	  /* a signal 32.						*/
510 #	define SIGSETMASK(old, new) (old) = sigsetmask(new)
511 #   else
512 	/* Use POSIX/SYSV interface	*/
513 #	define SIGSET_T sigset_t
514 #	define SIG_DEL(set, signal) sigdelset(&(set), (signal))
515 #	define SIG_FILL(set) sigfillset(&set)
516 #	define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
517 #   endif
518 
519 static GC_bool mask_initialized = FALSE;
520 
521 static SIGSET_T new_mask;
522 
523 static SIGSET_T old_mask;
524 
525 static SIGSET_T dummy;
526 
527 #if defined(PRINTSTATS) && !defined(THREADS)
528 # define CHECK_SIGNALS
529   int GC_sig_disabled = 0;
530 #endif
531 
GC_disable_signals()532 void GC_disable_signals()
533 {
534     if (!mask_initialized) {
535     	SIG_FILL(new_mask);
536 
537 	SIG_DEL(new_mask, SIGSEGV);
538 	SIG_DEL(new_mask, SIGILL);
539 	SIG_DEL(new_mask, SIGQUIT);
540 #	ifdef SIGBUS
541 	    SIG_DEL(new_mask, SIGBUS);
542 #	endif
543 #	ifdef SIGIOT
544 	    SIG_DEL(new_mask, SIGIOT);
545 #	endif
546 #	ifdef SIGEMT
547 	    SIG_DEL(new_mask, SIGEMT);
548 #	endif
549 #	ifdef SIGTRAP
550 	    SIG_DEL(new_mask, SIGTRAP);
551 #	endif
552 	mask_initialized = TRUE;
553     }
554 #   ifdef CHECK_SIGNALS
555 	if (GC_sig_disabled != 0) ABORT("Nested disables");
556 	GC_sig_disabled++;
557 #   endif
558     SIGSETMASK(old_mask,new_mask);
559 }
560 
GC_enable_signals()561 void GC_enable_signals()
562 {
563 #   ifdef CHECK_SIGNALS
564 	if (GC_sig_disabled != 1) ABORT("Unmatched enable");
565 	GC_sig_disabled--;
566 #   endif
567     SIGSETMASK(dummy,old_mask);
568 }
569 
570 #  endif  /* !PCR */
571 
572 # endif /*!OS/2 */
573 
574 /* Ivan Demakov: simplest way (to me) */
575 #if defined (DOS4GW)
GC_disable_signals()576   void GC_disable_signals() { }
GC_enable_signals()577   void GC_enable_signals() { }
578 #endif
579 
580 /* Find the page size */
581 word GC_page_size;
582 
583 # if defined(MSWIN32) || defined(MSWINCE)
GC_setpagesize()584   void GC_setpagesize()
585   {
586     GetSystemInfo(&GC_sysinfo);
587     GC_page_size = GC_sysinfo.dwPageSize;
588   }
589 
590 # else
591 #   if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
592        || defined(USE_MUNMAP)
GC_setpagesize()593 	void GC_setpagesize()
594 	{
595 	    GC_page_size = GETPAGESIZE();
596 	}
597 #   else
598 	/* It's acceptable to fake it. */
GC_setpagesize()599 	void GC_setpagesize()
600 	{
601 	    GC_page_size = HBLKSIZE;
602 	}
603 #   endif
604 # endif
605 
606 /*
607  * Find the base of the stack.
608  * Used only in single-threaded environment.
609  * With threads, GC_mark_roots needs to know how to do this.
610  * Called with allocator lock held.
611  */
612 # if defined(MSWIN32) || defined(MSWINCE)
613 # define is_writable(prot) ((prot) == PAGE_READWRITE \
614 			    || (prot) == PAGE_WRITECOPY \
615 			    || (prot) == PAGE_EXECUTE_READWRITE \
616 			    || (prot) == PAGE_EXECUTE_WRITECOPY)
617 /* Return the number of bytes that are writable starting at p.	*/
618 /* The pointer p is assumed to be page aligned.			*/
619 /* If base is not 0, *base becomes the beginning of the 	*/
620 /* allocation region containing p.				*/
GC_get_writable_length(ptr_t p,ptr_t * base)621 word GC_get_writable_length(ptr_t p, ptr_t *base)
622 {
623     MEMORY_BASIC_INFORMATION buf;
624     word result;
625     word protect;
626 
627     result = VirtualQuery(p, &buf, sizeof(buf));
628     if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
629     if (base != 0) *base = (ptr_t)(buf.AllocationBase);
630     protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
631     if (!is_writable(protect)) {
632         return(0);
633     }
634     if (buf.State != MEM_COMMIT) return(0);
635     return(buf.RegionSize);
636 }
637 
GC_get_stack_base()638 ptr_t GC_get_stack_base()
639 {
640     int dummy;
641     ptr_t sp = (ptr_t)(&dummy);
642     ptr_t trunc_sp = (ptr_t)((word)sp & ~(GC_page_size - 1));
643     word size = GC_get_writable_length(trunc_sp, 0);
644 
645     return(trunc_sp + size);
646 }
647 
648 
649 # endif /* MS Windows */
650 
651 # ifdef BEOS
652 # include <kernel/OS.h>
GC_get_stack_base()653 ptr_t GC_get_stack_base(){
654 	thread_info th;
655 	get_thread_info(find_thread(NULL),&th);
656 	return th.stack_end;
657 }
658 # endif /* BEOS */
659 
660 
661 # ifdef OS2
662 
GC_get_stack_base()663 ptr_t GC_get_stack_base()
664 {
665     PTIB ptib;
666     PPIB ppib;
667 
668     if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
669     	GC_err_printf0("DosGetInfoBlocks failed\n");
670     	ABORT("DosGetInfoBlocks failed\n");
671     }
672     return((ptr_t)(ptib -> tib_pstacklimit));
673 }
674 
675 # endif /* OS2 */
676 
677 # ifdef AMIGA
678 #   define GC_AMIGA_SB
679 #   include "AmigaOS.c"
680 #   undef GC_AMIGA_SB
681 # endif /* AMIGA */
682 
683 # if defined(NEED_FIND_LIMIT) || defined(UNIX_LIKE)
684 
685 #   ifdef __STDC__
686 	typedef void (*handler)(int);
687 #   else
688 	typedef void (*handler)();
689 #   endif
690 
691 #   if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) || defined(HURD)
692 	static struct sigaction old_segv_act;
693 #	if defined(_sigargs) /* !Irix6.x */ || defined(HPUX) || defined(HURD)
694 	    static struct sigaction old_bus_act;
695 #	endif
696 #   else
697         static handler old_segv_handler, old_bus_handler;
698 #   endif
699 
700 #   ifdef __STDC__
GC_set_and_save_fault_handler(handler h)701       void GC_set_and_save_fault_handler(handler h)
702 #   else
703       void GC_set_and_save_fault_handler(h)
704       handler h;
705 #   endif
706     {
707 #     if defined(SUNOS5SIGS) || defined(IRIX5)  \
708         || defined(OSF1) || defined(HURD)
709 	  struct sigaction	act;
710 
711 	  act.sa_handler	= h;
712 #	  ifdef SUNOS5SIGS
713             act.sa_flags          = SA_RESTART | SA_NODEFER;
714 #         else
715             act.sa_flags          = SA_RESTART;
716 #	  endif
717           /* The presence of SA_NODEFER represents yet another gross    */
718           /* hack.  Under Solaris 2.3, siglongjmp doesn't appear to     */
719           /* interact correctly with -lthread.  We hide the confusion   */
720           /* by making sure that signal handling doesn't affect the     */
721           /* signal mask.                                               */
722 
723 	  (void) sigemptyset(&act.sa_mask);
724 #	  ifdef GC_IRIX_THREADS
725 		/* Older versions have a bug related to retrieving and	*/
726 		/* and setting a handler at the same time.		*/
727 	        (void) sigaction(SIGSEGV, 0, &old_segv_act);
728 	        (void) sigaction(SIGSEGV, &act, 0);
729 #	  else
730 	        (void) sigaction(SIGSEGV, &act, &old_segv_act);
731 #		if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
732 		   || defined(HPUX) || defined(HURD)
733 		    /* Under Irix 5.x or HP/UX, we may get SIGBUS.	*/
734 		    /* Pthreads doesn't exist under Irix 5.x, so we	*/
735 		    /* don't have to worry in the threads case.		*/
736 		    (void) sigaction(SIGBUS, &act, &old_bus_act);
737 #		endif
738 #	  endif	/* GC_IRIX_THREADS */
739 #	else
740     	  old_segv_handler = signal(SIGSEGV, h);
741 #	  ifdef SIGBUS
742 	    old_bus_handler = signal(SIGBUS, h);
743 #	  endif
744 #	endif
745     }
746 # endif /* NEED_FIND_LIMIT || UNIX_LIKE */
747 
748 # ifdef NEED_FIND_LIMIT
749   /* Some tools to implement HEURISTIC2	*/
750 #   define MIN_PAGE_SIZE 256	/* Smallest conceivable page size, bytes */
751     /* static */ jmp_buf GC_jmp_buf;
752 
753     /*ARGSUSED*/
GC_fault_handler(sig)754     void GC_fault_handler(sig)
755     int sig;
756     {
757         longjmp(GC_jmp_buf, 1);
758     }
759 
GC_setup_temporary_fault_handler()760     void GC_setup_temporary_fault_handler()
761     {
762 	GC_set_and_save_fault_handler(GC_fault_handler);
763     }
764 
GC_reset_fault_handler()765     void GC_reset_fault_handler()
766     {
767 #     if defined(SUNOS5SIGS) || defined(IRIX5) \
768 	 || defined(OSF1) || defined(HURD)
769 	(void) sigaction(SIGSEGV, &old_segv_act, 0);
770 #	if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
771 	   || defined(HPUX) || defined(HURD)
772 	    (void) sigaction(SIGBUS, &old_bus_act, 0);
773 #	endif
774 #      else
775 	(void) signal(SIGSEGV, old_segv_handler);
776 #	ifdef SIGBUS
777 	  (void) signal(SIGBUS, old_bus_handler);
778 #	endif
779 #     endif
780     }
781 
782     /* Return the first nonaddressible location > p (up) or 	*/
783     /* the smallest location q s.t. [q,p) is addressable (!up).	*/
784     /* We assume that p (up) or p-1 (!up) is addressable.	*/
GC_find_limit(p,up)785     ptr_t GC_find_limit(p, up)
786     ptr_t p;
787     GC_bool up;
788     {
789       static VOLATILE ptr_t result;
790   		/* Needs to be static, since otherwise it may not be	*/
791   		/* preserved across the longjmp.  Can safely be 	*/
792   		/* static since it's only called once, with the		*/
793   		/* allocation lock held.				*/
794 
795 
796       GC_setup_temporary_fault_handler();
797       if (setjmp(GC_jmp_buf) == 0) {
798 	result = (ptr_t)(((word)(p))
799 			 & ~(MIN_PAGE_SIZE-1));
800 	for (;;) {
801 	  if (up) {
802 	    result += MIN_PAGE_SIZE;
803 	  } else {
804 	    result -= MIN_PAGE_SIZE;
805 	  }
806 	  GC_noop1((word)(*result));
807 	}
808       }
809       GC_reset_fault_handler();
810       if (!up) {
811 	result += MIN_PAGE_SIZE;
812       }
813       return(result);
814     }
815 # endif
816 
817 #if defined(ECOS) || defined(NOSYS)
GC_get_stack_base()818   ptr_t GC_get_stack_base()
819   {
820     return STACKBOTTOM;
821   }
822 #endif
823 
824 #ifdef LINUX_STACKBOTTOM
825 
826 #include <sys/types.h>
827 #include <sys/stat.h>
828 #include <ctype.h>
829 
830 # define STAT_SKIP 27   /* Number of fields preceding startstack	*/
831 			/* field in /proc/self/stat			*/
832 
833 # pragma weak __libc_stack_end
834   extern ptr_t __libc_stack_end;
835 
836 # ifdef IA64
837     /* Try to read the backing store base from /proc/self/maps.	*/
838     /* We look for the writable mapping with a 0 major device,  */
839     /* which is	as close to our frame as possible, but below it.*/
backing_store_base_from_maps(char * maps)840     static word backing_store_base_from_maps(char *maps)
841     {
842       char prot_buf[5];
843       char *buf_ptr = maps;
844       word start, end;
845       unsigned int maj_dev;
846       word current_best = 0;
847       word dummy;
848 
849       for (;;) {
850         buf_ptr = GC_parse_map_entry(buf_ptr, &start, &end, prot_buf, &maj_dev);
851 	if (buf_ptr == NULL) return current_best;
852 	if (prot_buf[1] == 'w' && maj_dev == 0) {
853 	    if (end < (word)(&dummy) && start > current_best) current_best = start;
854 	}
855       }
856       return current_best;
857     }
858 
backing_store_base_from_proc(void)859     static word backing_store_base_from_proc(void)
860     {
861         return GC_apply_to_maps(backing_store_base_from_maps);
862     }
863 
864 #   pragma weak __libc_ia64_register_backing_store_base
865     extern ptr_t __libc_ia64_register_backing_store_base;
866 
GC_get_register_stack_base(void)867     ptr_t GC_get_register_stack_base(void)
868     {
869       if (0 != &__libc_ia64_register_backing_store_base
870 	  && 0 != __libc_ia64_register_backing_store_base) {
871 	/* Glibc 2.2.4 has a bug such that for dynamically linked	*/
872 	/* executables __libc_ia64_register_backing_store_base is 	*/
873 	/* defined but uninitialized during constructor calls.  	*/
874 	/* Hence we check for both nonzero address and value.		*/
875 	return __libc_ia64_register_backing_store_base;
876       } else {
877 	word result = backing_store_base_from_proc();
878 	if (0 == result) {
879 	  /* Use dumb heuristics.  Works only for default configuration. */
880 	  result = (word)GC_stackbottom - BACKING_STORE_DISPLACEMENT;
881 	  result += BACKING_STORE_ALIGNMENT - 1;
882 	  result &= ~(BACKING_STORE_ALIGNMENT - 1);
883 	  /* Verify that it's at least readable.  If not, we goofed. */
884 	  GC_noop1(*(word *)result);
885 	}
886 	return (ptr_t)result;
887       }
888     }
889 # endif
890 
GC_linux_stack_base(void)891   ptr_t GC_linux_stack_base(void)
892   {
893     /* We read the stack base value from /proc/self/stat.  We do this	*/
894     /* using direct I/O system calls in order to avoid calling malloc   */
895     /* in case REDIRECT_MALLOC is defined.				*/
896 #   define STAT_BUF_SIZE 4096
897 #   define STAT_READ read
898 	  /* Should probably call the real read, if read is wrapped.	*/
899     char stat_buf[STAT_BUF_SIZE];
900     int f;
901     char c;
902     word result = 0;
903     size_t i, buf_offset = 0;
904 
905     /* First try the easy way.  This should work for glibc 2.2	*/
906       if (0 != &__libc_stack_end) {
907 #       ifdef IA64
908 	  /* Some versions of glibc set the address 16 bytes too	*/
909 	  /* low while the initialization code is running.		*/
910 	  if (((word)__libc_stack_end & 0xfff) + 0x10 < 0x1000) {
911 	    return __libc_stack_end + 0x10;
912 	  } /* Otherwise it's not safe to add 16 bytes and we fall	*/
913 	    /* back to using /proc.					*/
914 #	else
915 	  return __libc_stack_end;
916 #	endif
917       }
918     f = open("/proc/self/stat", O_RDONLY);
919     if (f < 0 || STAT_READ(f, stat_buf, STAT_BUF_SIZE) < 2 * STAT_SKIP) {
920 	ABORT("Couldn't read /proc/self/stat");
921     }
922     c = stat_buf[buf_offset++];
923     /* Skip the required number of fields.  This number is hopefully	*/
924     /* constant across all Linux implementations.			*/
925       for (i = 0; i < STAT_SKIP; ++i) {
926 	while (isspace(c)) c = stat_buf[buf_offset++];
927 	while (!isspace(c)) c = stat_buf[buf_offset++];
928       }
929     while (isspace(c)) c = stat_buf[buf_offset++];
930     while (isdigit(c)) {
931       result *= 10;
932       result += c - '0';
933       c = stat_buf[buf_offset++];
934     }
935     close(f);
936     if (result < 0x10000000) ABORT("Absurd stack bottom value");
937     return (ptr_t)result;
938   }
939 
940 #endif /* LINUX_STACKBOTTOM */
941 
942 #ifdef FREEBSD_STACKBOTTOM
943 
944 /* This uses an undocumented sysctl call, but at least one expert 	*/
945 /* believes it will stay.						*/
946 
947 #include <unistd.h>
948 #include <sys/types.h>
949 #include <sys/sysctl.h>
950 
GC_freebsd_stack_base(void)951   ptr_t GC_freebsd_stack_base(void)
952   {
953     int nm[2] = {CTL_KERN, KERN_USRSTACK};
954     ptr_t base;
955     size_t len = sizeof(ptr_t);
956     int r = sysctl(nm, 2, &base, &len, NULL, 0);
957 
958     if (r) ABORT("Error getting stack base");
959 
960     return base;
961   }
962 
963 #endif /* FREEBSD_STACKBOTTOM */
964 
965 #if !defined(BEOS) && !defined(AMIGA) && !defined(MSWIN32) \
966     && !defined(MSWINCE) && !defined(OS2) && !defined(NOSYS) && !defined(ECOS)
967 
GC_get_stack_base()968 ptr_t GC_get_stack_base()
969 {
970 #   if defined(HEURISTIC1) || defined(HEURISTIC2) || \
971        defined(LINUX_STACKBOTTOM) || defined(FREEBSD_STACKBOTTOM)
972     word dummy;
973     ptr_t result;
974 #   endif
975 
976 #   define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
977 
978 #   ifdef STACKBOTTOM
979 	return(STACKBOTTOM);
980 #   else
981 #	ifdef HEURISTIC1
982 #	   ifdef STACK_GROWS_DOWN
983 	     result = (ptr_t)((((word)(&dummy))
984 	     		       + STACKBOTTOM_ALIGNMENT_M1)
985 			      & ~STACKBOTTOM_ALIGNMENT_M1);
986 #	   else
987 	     result = (ptr_t)(((word)(&dummy))
988 			      & ~STACKBOTTOM_ALIGNMENT_M1);
989 #	   endif
990 #	endif /* HEURISTIC1 */
991 #	ifdef LINUX_STACKBOTTOM
992 	   result = GC_linux_stack_base();
993 #	endif
994 #	ifdef FREEBSD_STACKBOTTOM
995 	   result = GC_freebsd_stack_base();
996 #	endif
997 #	ifdef HEURISTIC2
998 #	    ifdef STACK_GROWS_DOWN
999 		result = GC_find_limit((ptr_t)(&dummy), TRUE);
1000 #           	ifdef HEURISTIC2_LIMIT
1001 		    if (result > HEURISTIC2_LIMIT
1002 		        && (ptr_t)(&dummy) < HEURISTIC2_LIMIT) {
1003 		            result = HEURISTIC2_LIMIT;
1004 		    }
1005 #	        endif
1006 #	    else
1007 		result = GC_find_limit((ptr_t)(&dummy), FALSE);
1008 #           	ifdef HEURISTIC2_LIMIT
1009 		    if (result < HEURISTIC2_LIMIT
1010 		        && (ptr_t)(&dummy) > HEURISTIC2_LIMIT) {
1011 		            result = HEURISTIC2_LIMIT;
1012 		    }
1013 #	        endif
1014 #	    endif
1015 
1016 #	endif /* HEURISTIC2 */
1017 #	ifdef STACK_GROWS_DOWN
1018 	    if (result == 0) result = (ptr_t)(signed_word)(-sizeof(ptr_t));
1019 #	endif
1020     	return(result);
1021 #   endif /* STACKBOTTOM */
1022 }
1023 
1024 # endif /* ! AMIGA, !OS 2, ! MS Windows, !BEOS, !NOSYS, !ECOS */
1025 
1026 /*
1027  * Register static data segment(s) as roots.
1028  * If more data segments are added later then they need to be registered
1029  * add that point (as we do with SunOS dynamic loading),
1030  * or GC_mark_roots needs to check for them (as we do with PCR).
1031  * Called with allocator lock held.
1032  */
1033 
1034 # ifdef OS2
1035 
GC_register_data_segments()1036 void GC_register_data_segments()
1037 {
1038     PTIB ptib;
1039     PPIB ppib;
1040     HMODULE module_handle;
1041 #   define PBUFSIZ 512
1042     UCHAR path[PBUFSIZ];
1043     FILE * myexefile;
1044     struct exe_hdr hdrdos;	/* MSDOS header.	*/
1045     struct e32_exe hdr386;	/* Real header for my executable */
1046     struct o32_obj seg;	/* Currrent segment */
1047     int nsegs;
1048 
1049 
1050     if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
1051     	GC_err_printf0("DosGetInfoBlocks failed\n");
1052     	ABORT("DosGetInfoBlocks failed\n");
1053     }
1054     module_handle = ppib -> pib_hmte;
1055     if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
1056     	GC_err_printf0("DosQueryModuleName failed\n");
1057     	ABORT("DosGetInfoBlocks failed\n");
1058     }
1059     myexefile = fopen(path, "rb");
1060     if (myexefile == 0) {
1061         GC_err_puts("Couldn't open executable ");
1062         GC_err_puts(path); GC_err_puts("\n");
1063         ABORT("Failed to open executable\n");
1064     }
1065     if (fread((char *)(&hdrdos), 1, sizeof hdrdos, myexefile) < sizeof hdrdos) {
1066         GC_err_puts("Couldn't read MSDOS header from ");
1067         GC_err_puts(path); GC_err_puts("\n");
1068         ABORT("Couldn't read MSDOS header");
1069     }
1070     if (E_MAGIC(hdrdos) != EMAGIC) {
1071         GC_err_puts("Executable has wrong DOS magic number: ");
1072         GC_err_puts(path); GC_err_puts("\n");
1073         ABORT("Bad DOS magic number");
1074     }
1075     if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
1076         GC_err_puts("Seek to new header failed in ");
1077         GC_err_puts(path); GC_err_puts("\n");
1078         ABORT("Bad DOS magic number");
1079     }
1080     if (fread((char *)(&hdr386), 1, sizeof hdr386, myexefile) < sizeof hdr386) {
1081         GC_err_puts("Couldn't read MSDOS header from ");
1082         GC_err_puts(path); GC_err_puts("\n");
1083         ABORT("Couldn't read OS/2 header");
1084     }
1085     if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
1086         GC_err_puts("Executable has wrong OS/2 magic number:");
1087         GC_err_puts(path); GC_err_puts("\n");
1088         ABORT("Bad OS/2 magic number");
1089     }
1090     if ( E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
1091         GC_err_puts("Executable %s has wrong byte order: ");
1092         GC_err_puts(path); GC_err_puts("\n");
1093         ABORT("Bad byte order");
1094     }
1095     if ( E32_CPU(hdr386) == E32CPU286) {
1096         GC_err_puts("GC can't handle 80286 executables: ");
1097         GC_err_puts(path); GC_err_puts("\n");
1098         EXIT();
1099     }
1100     if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
1101     	      SEEK_SET) != 0) {
1102         GC_err_puts("Seek to object table failed: ");
1103         GC_err_puts(path); GC_err_puts("\n");
1104         ABORT("Seek to object table failed");
1105     }
1106     for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
1107       int flags;
1108       if (fread((char *)(&seg), 1, sizeof seg, myexefile) < sizeof seg) {
1109         GC_err_puts("Couldn't read obj table entry from ");
1110         GC_err_puts(path); GC_err_puts("\n");
1111         ABORT("Couldn't read obj table entry");
1112       }
1113       flags = O32_FLAGS(seg);
1114       if (!(flags & OBJWRITE)) continue;
1115       if (!(flags & OBJREAD)) continue;
1116       if (flags & OBJINVALID) {
1117           GC_err_printf0("Object with invalid pages?\n");
1118           continue;
1119       }
1120       GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg), FALSE);
1121     }
1122 }
1123 
1124 # else /* !OS2 */
1125 
1126 # if defined(MSWIN32) || defined(MSWINCE)
1127 
1128 # ifdef MSWIN32
1129   /* Unfortunately, we have to handle win32s very differently from NT, 	*/
1130   /* Since VirtualQuery has very different semantics.  In particular,	*/
1131   /* under win32s a VirtualQuery call on an unmapped page returns an	*/
1132   /* invalid result.  Under NT, GC_register_data_segments is a noop and	*/
1133   /* all real work is done by GC_register_dynamic_libraries.  Under	*/
1134   /* win32s, we cannot find the data segments associated with dll's.	*/
1135   /* We register the main data segment here.				*/
1136   GC_bool GC_no_win32_dlls = FALSE;
1137   	/* This used to be set for gcc, to avoid dealing with		*/
1138   	/* the structured exception handling issues.  But we now have	*/
1139   	/* assembly code to do that right.				*/
1140 
GC_init_win32()1141   void GC_init_win32()
1142   {
1143     /* if we're running under win32s, assume that no DLLs will be loaded */
1144     DWORD v = GetVersion();
1145     GC_no_win32_dlls |= ((v & 0x80000000) && (v & 0xff) <= 3);
1146   }
1147 
1148   /* Return the smallest address a such that VirtualQuery		*/
1149   /* returns correct results for all addresses between a and start.	*/
1150   /* Assumes VirtualQuery returns correct information for start.	*/
GC_least_described_address(ptr_t start)1151   ptr_t GC_least_described_address(ptr_t start)
1152   {
1153     MEMORY_BASIC_INFORMATION buf;
1154     DWORD result;
1155     LPVOID limit;
1156     ptr_t p;
1157     LPVOID q;
1158 
1159     limit = GC_sysinfo.lpMinimumApplicationAddress;
1160     p = (ptr_t)((word)start & ~(GC_page_size - 1));
1161     for (;;) {
1162     	q = (LPVOID)(p - GC_page_size);
1163     	if ((ptr_t)q > (ptr_t)p /* underflow */ || q < limit) break;
1164     	result = VirtualQuery(q, &buf, sizeof(buf));
1165     	if (result != sizeof(buf) || buf.AllocationBase == 0) break;
1166     	p = (ptr_t)(buf.AllocationBase);
1167     }
1168     return(p);
1169   }
1170 # endif
1171 
1172 # ifndef REDIRECT_MALLOC
1173   /* We maintain a linked list of AllocationBase values that we know	*/
1174   /* correspond to malloc heap sections.  Currently this is only called */
1175   /* during a GC.  But there is some hope that for long running		*/
1176   /* programs we will eventually see most heap sections.		*/
1177 
1178   /* In the long run, it would be more reliable to occasionally walk 	*/
1179   /* the malloc heap with HeapWalk on the default heap.  But that	*/
1180   /* apparently works only for NT-based Windows. 			*/
1181 
1182   /* In the long run, a better data structure would also be nice ...	*/
1183   struct GC_malloc_heap_list {
1184     void * allocation_base;
1185     struct GC_malloc_heap_list *next;
1186   } *GC_malloc_heap_l = 0;
1187 
1188   /* Is p the base of one of the malloc heap sections we already know	*/
1189   /* about?								*/
GC_is_malloc_heap_base(ptr_t p)1190   GC_bool GC_is_malloc_heap_base(ptr_t p)
1191   {
1192     struct GC_malloc_heap_list *q = GC_malloc_heap_l;
1193 
1194     while (0 != q) {
1195       if (q -> allocation_base == p) return TRUE;
1196       q = q -> next;
1197     }
1198     return FALSE;
1199   }
1200 
GC_get_allocation_base(void * p)1201   void *GC_get_allocation_base(void *p)
1202   {
1203     MEMORY_BASIC_INFORMATION buf;
1204     DWORD result = VirtualQuery(p, &buf, sizeof(buf));
1205     if (result != sizeof(buf)) {
1206       ABORT("Weird VirtualQuery result");
1207     }
1208     return buf.AllocationBase;
1209   }
1210 
1211   size_t GC_max_root_size = 100000;	/* Appr. largest root size.	*/
1212 
GC_add_current_malloc_heap()1213   void GC_add_current_malloc_heap()
1214   {
1215     struct GC_malloc_heap_list *new_l =
1216                  malloc(sizeof(struct GC_malloc_heap_list));
1217     void * candidate = GC_get_allocation_base(new_l);
1218 
1219     if (new_l == 0) return;
1220     if (GC_is_malloc_heap_base(candidate)) {
1221       /* Try a little harder to find malloc heap.			*/
1222 	size_t req_size = 10000;
1223 	do {
1224 	  void *p = malloc(req_size);
1225 	  if (0 == p) { free(new_l); return; }
1226  	  candidate = GC_get_allocation_base(p);
1227 	  free(p);
1228 	  req_size *= 2;
1229 	} while (GC_is_malloc_heap_base(candidate)
1230 	         && req_size < GC_max_root_size/10 && req_size < 500000);
1231 	if (GC_is_malloc_heap_base(candidate)) {
1232 	  free(new_l); return;
1233 	}
1234     }
1235 #   ifdef CONDPRINT
1236       if (GC_print_stats)
1237 	  GC_printf1("Found new system malloc AllocationBase at 0x%lx\n",
1238                      candidate);
1239 #   endif
1240     new_l -> allocation_base = candidate;
1241     new_l -> next = GC_malloc_heap_l;
1242     GC_malloc_heap_l = new_l;
1243   }
1244 # endif /* REDIRECT_MALLOC */
1245 
1246   /* Is p the start of either the malloc heap, or of one of our */
1247   /* heap sections?						*/
GC_is_heap_base(ptr_t p)1248   GC_bool GC_is_heap_base (ptr_t p)
1249   {
1250 
1251      unsigned i;
1252 
1253 #    ifndef REDIRECT_MALLOC
1254        static word last_gc_no = -1;
1255 
1256        if (last_gc_no != GC_gc_no) {
1257 	 GC_add_current_malloc_heap();
1258 	 last_gc_no = GC_gc_no;
1259        }
1260        if (GC_root_size > GC_max_root_size) GC_max_root_size = GC_root_size;
1261        if (GC_is_malloc_heap_base(p)) return TRUE;
1262 #    endif
1263      for (i = 0; i < GC_n_heap_bases; i++) {
1264          if (GC_heap_bases[i] == p) return TRUE;
1265      }
1266      return FALSE ;
1267   }
1268 
1269 # ifdef MSWIN32
GC_register_root_section(ptr_t static_root)1270   void GC_register_root_section(ptr_t static_root)
1271   {
1272       MEMORY_BASIC_INFORMATION buf;
1273       DWORD result;
1274       DWORD protect;
1275       LPVOID p;
1276       char * base;
1277       char * limit, * new_limit;
1278 
1279       if (!GC_no_win32_dlls) return;
1280       p = base = limit = GC_least_described_address(static_root);
1281       while (p < GC_sysinfo.lpMaximumApplicationAddress) {
1282         result = VirtualQuery(p, &buf, sizeof(buf));
1283         if (result != sizeof(buf) || buf.AllocationBase == 0
1284             || GC_is_heap_base(buf.AllocationBase)) break;
1285         new_limit = (char *)p + buf.RegionSize;
1286         protect = buf.Protect;
1287         if (buf.State == MEM_COMMIT
1288             && is_writable(protect)) {
1289             if ((char *)p == limit) {
1290                 limit = new_limit;
1291             } else {
1292                 if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1293                 base = p;
1294                 limit = new_limit;
1295             }
1296         }
1297         if (p > (LPVOID)new_limit /* overflow */) break;
1298         p = (LPVOID)new_limit;
1299       }
1300       if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1301   }
1302 #endif
1303 
GC_register_data_segments()1304   void GC_register_data_segments()
1305   {
1306 #     ifdef MSWIN32
1307       static char dummy;
1308       GC_register_root_section((ptr_t)(&dummy));
1309 #     endif
1310   }
1311 
1312 # else /* !OS2 && !Windows */
1313 
1314 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
1315       || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
GC_SysVGetDataStart(max_page_size,etext_addr)1316 ptr_t GC_SysVGetDataStart(max_page_size, etext_addr)
1317 int max_page_size;
1318 int * etext_addr;
1319 {
1320     word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1321     		    & ~(sizeof(word) - 1);
1322     	/* etext rounded to word boundary	*/
1323     word next_page = ((text_end + (word)max_page_size - 1)
1324     		      & ~((word)max_page_size - 1));
1325     word page_offset = (text_end & ((word)max_page_size - 1));
1326     VOLATILE char * result = (char *)(next_page + page_offset);
1327     /* Note that this isnt equivalent to just adding		*/
1328     /* max_page_size to &etext if &etext is at a page boundary	*/
1329 
1330     GC_setup_temporary_fault_handler();
1331     if (setjmp(GC_jmp_buf) == 0) {
1332     	/* Try writing to the address.	*/
1333     	*result = *result;
1334         GC_reset_fault_handler();
1335     } else {
1336         GC_reset_fault_handler();
1337     	/* We got here via a longjmp.  The address is not readable.	*/
1338     	/* This is known to happen under Solaris 2.4 + gcc, which place	*/
1339     	/* string constants in the text segment, but after etext.	*/
1340     	/* Use plan B.  Note that we now know there is a gap between	*/
1341     	/* text and data segments, so plan A bought us something.	*/
1342     	result = (char *)GC_find_limit((ptr_t)(DATAEND), FALSE);
1343     }
1344     return((ptr_t)result);
1345 }
1346 # endif
1347 
1348 # if defined(FREEBSD) && defined(I386) && !defined(PCR)
1349 /* Its unclear whether this should be identical to the above, or 	*/
1350 /* whether it should apply to non-X86 architectures.			*/
1351 /* For now we don't assume that there is always an empty page after	*/
1352 /* etext.  But in some cases there actually seems to be slightly more.  */
1353 /* This also deals with holes between read-only data and writable data.	*/
GC_FreeBSDGetDataStart(max_page_size,etext_addr)1354 ptr_t GC_FreeBSDGetDataStart(max_page_size, etext_addr)
1355 int max_page_size;
1356 int * etext_addr;
1357 {
1358     word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1359 		     & ~(sizeof(word) - 1);
1360 	/* etext rounded to word boundary	*/
1361     VOLATILE word next_page = (text_end + (word)max_page_size - 1)
1362 			      & ~((word)max_page_size - 1);
1363     VOLATILE ptr_t result = (ptr_t)text_end;
1364     GC_setup_temporary_fault_handler();
1365     if (setjmp(GC_jmp_buf) == 0) {
1366 	/* Try reading at the address.				*/
1367 	/* This should happen before there is another thread.	*/
1368 	for (; next_page < (word)(DATAEND); next_page += (word)max_page_size)
1369 	    *(VOLATILE char *)next_page;
1370 	GC_reset_fault_handler();
1371     } else {
1372 	GC_reset_fault_handler();
1373 	/* As above, we go to plan B	*/
1374 	result = GC_find_limit((ptr_t)(DATAEND), FALSE);
1375     }
1376     return(result);
1377 }
1378 
1379 # endif
1380 
1381 
1382 #ifdef AMIGA
1383 
1384 #  define GC_AMIGA_DS
1385 #  include "AmigaOS.c"
1386 #  undef GC_AMIGA_DS
1387 
1388 #else /* !OS2 && !Windows && !AMIGA */
1389 
GC_register_data_segments()1390 void GC_register_data_segments()
1391 {
1392 #   if !defined(PCR) && !defined(SRC_M3) && !defined(MACOS)
1393 #     if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS)
1394 	/* As of Solaris 2.3, the Solaris threads implementation	*/
1395 	/* allocates the data structure for the initial thread with	*/
1396 	/* sbrk at process startup.  It needs to be scanned, so that	*/
1397 	/* we don't lose some malloc allocated data structures		*/
1398 	/* hanging from it.  We're on thin ice here ...			*/
1399         extern caddr_t sbrk();
1400 
1401 	GC_add_roots_inner(DATASTART, (char *)sbrk(0), FALSE);
1402 #     else
1403 	GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);
1404 #       if defined(DATASTART2)
1405          GC_add_roots_inner(DATASTART2, (char *)(DATAEND2), FALSE);
1406 #       endif
1407 #     endif
1408 #   endif
1409 #   if defined(MACOS)
1410     {
1411 #   if defined(THINK_C)
1412 	extern void* GC_MacGetDataStart(void);
1413 	/* globals begin above stack and end at a5. */
1414 	GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1415 			   (ptr_t)LMGetCurrentA5(), FALSE);
1416 #   else
1417 #     if defined(__MWERKS__)
1418 #       if !__POWERPC__
1419 	  extern void* GC_MacGetDataStart(void);
1420 	  /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
1421 #         if __option(far_data)
1422 	  extern void* GC_MacGetDataEnd(void);
1423 #         endif
1424 	  /* globals begin above stack and end at a5. */
1425 	  GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1426           		     (ptr_t)LMGetCurrentA5(), FALSE);
1427 	  /* MATTHEW: Handle Far Globals */
1428 #         if __option(far_data)
1429       /* Far globals follow he QD globals: */
1430 	  GC_add_roots_inner((ptr_t)LMGetCurrentA5(),
1431           		     (ptr_t)GC_MacGetDataEnd(), FALSE);
1432 #         endif
1433 #       else
1434 	  extern char __data_start__[], __data_end__[];
1435 	  GC_add_roots_inner((ptr_t)&__data_start__,
1436 	  		     (ptr_t)&__data_end__, FALSE);
1437 #       endif /* __POWERPC__ */
1438 #     endif /* __MWERKS__ */
1439 #   endif /* !THINK_C */
1440     }
1441 #   endif /* MACOS */
1442 
1443     /* Dynamic libraries are added at every collection, since they may  */
1444     /* change.								*/
1445 }
1446 
1447 # endif  /* ! AMIGA */
1448 # endif  /* ! MSWIN32 && ! MSWINCE*/
1449 # endif  /* ! OS2 */
1450 
1451 /*
1452  * Auxiliary routines for obtaining memory from OS.
1453  */
1454 
1455 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
1456 	&& !defined(MSWIN32) && !defined(MSWINCE) \
1457 	&& !defined(MACOS) && !defined(DOS4GW)
1458 
1459 # ifdef SUNOS4
1460     extern caddr_t sbrk();
1461 # endif
1462 # ifdef __STDC__
1463 #   define SBRK_ARG_T ptrdiff_t
1464 # else
1465 #   define SBRK_ARG_T int
1466 # endif
1467 
1468 
1469 # ifdef RS6000
1470 /* The compiler seems to generate speculative reads one past the end of	*/
1471 /* an allocated object.  Hence we need to make sure that the page 	*/
1472 /* following the last heap page is also mapped.				*/
GC_unix_get_mem(bytes)1473 ptr_t GC_unix_get_mem(bytes)
1474 word bytes;
1475 {
1476     caddr_t cur_brk = (caddr_t)sbrk(0);
1477     caddr_t result;
1478     SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1479     static caddr_t my_brk_val = 0;
1480 
1481     if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1482     if (lsbs != 0) {
1483         if((caddr_t)(sbrk(GC_page_size - lsbs)) == (caddr_t)(-1)) return(0);
1484     }
1485     if (cur_brk == my_brk_val) {
1486     	/* Use the extra block we allocated last time. */
1487         result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1488         if (result == (caddr_t)(-1)) return(0);
1489         result -= GC_page_size;
1490     } else {
1491         result = (ptr_t)sbrk(GC_page_size + (SBRK_ARG_T)bytes);
1492         if (result == (caddr_t)(-1)) return(0);
1493     }
1494     my_brk_val = result + bytes + GC_page_size;	/* Always page aligned */
1495     return((ptr_t)result);
1496 }
1497 
1498 #else  /* Not RS6000 */
1499 
1500 #if defined(USE_MMAP)
1501 /* Tested only under Linux, IRIX5 and Solaris 2 */
1502 
1503 #ifdef USE_MMAP_FIXED
1504 #   define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
1505 	/* Seems to yield better performance on Solaris 2, but can	*/
1506 	/* be unreliable if something is already mapped at the address.	*/
1507 #else
1508 #   define GC_MMAP_FLAGS MAP_PRIVATE
1509 #endif
1510 
1511 #ifndef HEAP_START
1512 #   define HEAP_START 0
1513 #endif
1514 
GC_unix_get_mem(bytes)1515 ptr_t GC_unix_get_mem(bytes)
1516 word bytes;
1517 {
1518     void *result;
1519     static ptr_t last_addr = HEAP_START;
1520 
1521 #   ifndef USE_MMAP_ANON
1522       static GC_bool initialized = FALSE;
1523       static int fd;
1524 
1525       if (!initialized) {
1526 	  fd = open("/dev/zero", O_RDONLY);
1527 	  fcntl(fd, F_SETFD, FD_CLOEXEC);
1528 	  initialized = TRUE;
1529       }
1530 #   endif
1531 
1532     if (bytes & (GC_page_size -1)) ABORT("Bad GET_MEM arg");
1533 #   ifdef USE_MMAP_ANON
1534       result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1535 		    GC_MMAP_FLAGS | MAP_ANON, -1, 0/* offset */);
1536 #   else
1537       result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1538 		    GC_MMAP_FLAGS, fd, 0/* offset */);
1539 #   endif
1540     if (result == MAP_FAILED) return(0);
1541     last_addr = (ptr_t)result + bytes + GC_page_size - 1;
1542     last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));
1543 #   if !defined(LINUX)
1544       if (last_addr == 0) {
1545         /* Oops.  We got the end of the address space.  This isn't	*/
1546 	/* usable by arbitrary C code, since one-past-end pointers	*/
1547 	/* don't work, so we discard it and try again.			*/
1548 	munmap(result, (size_t)(-GC_page_size) - (size_t)result);
1549 			/* Leave last page mapped, so we can't repeat. */
1550 	return GC_unix_get_mem(bytes);
1551       }
1552 #   else
1553       GC_ASSERT(last_addr != 0);
1554 #   endif
1555     return((ptr_t)result);
1556 }
1557 
1558 #else /* Not RS6000, not USE_MMAP */
GC_unix_get_mem(bytes)1559 ptr_t GC_unix_get_mem(bytes)
1560 word bytes;
1561 {
1562   ptr_t result;
1563 # ifdef IRIX5
1564     /* Bare sbrk isn't thread safe.  Play by malloc rules.	*/
1565     /* The equivalent may be needed on other systems as well. 	*/
1566     __LOCK_MALLOC();
1567 # endif
1568   {
1569     ptr_t cur_brk = (ptr_t)sbrk(0);
1570     SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1571 
1572     if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1573     if (lsbs != 0) {
1574         if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) return(0);
1575     }
1576     result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1577     if (result == (ptr_t)(-1)) result = 0;
1578   }
1579 # ifdef IRIX5
1580     __UNLOCK_MALLOC();
1581 # endif
1582   return(result);
1583 }
1584 
1585 #endif /* Not USE_MMAP */
1586 #endif /* Not RS6000 */
1587 
1588 # endif /* UN*X */
1589 
1590 # ifdef OS2
1591 
os2_alloc(size_t bytes)1592 void * os2_alloc(size_t bytes)
1593 {
1594     void * result;
1595 
1596     if (DosAllocMem(&result, bytes, PAG_EXECUTE | PAG_READ |
1597     				    PAG_WRITE | PAG_COMMIT)
1598 		    != NO_ERROR) {
1599 	return(0);
1600     }
1601     if (result == 0) return(os2_alloc(bytes));
1602     return(result);
1603 }
1604 
1605 # endif /* OS2 */
1606 
1607 
1608 # if defined(MSWIN32) || defined(MSWINCE)
1609 SYSTEM_INFO GC_sysinfo;
1610 # endif
1611 
1612 # ifdef MSWIN32
1613 
1614 # ifdef USE_GLOBAL_ALLOC
1615 #   define GLOBAL_ALLOC_TEST 1
1616 # else
1617 #   define GLOBAL_ALLOC_TEST GC_no_win32_dlls
1618 # endif
1619 
1620 word GC_n_heap_bases = 0;
1621 
GC_win32_get_mem(bytes)1622 ptr_t GC_win32_get_mem(bytes)
1623 word bytes;
1624 {
1625     ptr_t result;
1626 
1627     if (GLOBAL_ALLOC_TEST) {
1628     	/* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE.	*/
1629     	/* There are also unconfirmed rumors of other		*/
1630     	/* problems, so we dodge the issue.			*/
1631         result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE);
1632         result = (ptr_t)(((word)result + HBLKSIZE) & ~(HBLKSIZE-1));
1633     } else {
1634 	/* VirtualProtect only works on regions returned by a	*/
1635 	/* single VirtualAlloc call.  Thus we allocate one 	*/
1636 	/* extra page, which will prevent merging of blocks	*/
1637 	/* in separate regions, and eliminate any temptation	*/
1638 	/* to call VirtualProtect on a range spanning regions.	*/
1639 	/* This wastes a small amount of memory, and risks	*/
1640 	/* increased fragmentation.  But better alternatives	*/
1641 	/* would require effort.				*/
1642         result = (ptr_t) VirtualAlloc(NULL, bytes + 1,
1643     				      MEM_COMMIT | MEM_RESERVE,
1644     				      PAGE_EXECUTE_READWRITE);
1645     }
1646     if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1647     	/* If I read the documentation correctly, this can	*/
1648     	/* only happen if HBLKSIZE > 64k or not a power of 2.	*/
1649     if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
1650     GC_heap_bases[GC_n_heap_bases++] = result;
1651     return(result);
1652 }
1653 
GC_win32_free_heap()1654 void GC_win32_free_heap ()
1655 {
1656     if (GC_no_win32_dlls) {
1657  	while (GC_n_heap_bases > 0) {
1658  	    GlobalFree (GC_heap_bases[--GC_n_heap_bases]);
1659  	    GC_heap_bases[GC_n_heap_bases] = 0;
1660  	}
1661     }
1662 }
1663 # endif
1664 
1665 #ifdef AMIGA
1666 # define GC_AMIGA_AM
1667 # include "AmigaOS.c"
1668 # undef GC_AMIGA_AM
1669 #endif
1670 
1671 
1672 # ifdef MSWINCE
1673 word GC_n_heap_bases = 0;
1674 
GC_wince_get_mem(bytes)1675 ptr_t GC_wince_get_mem(bytes)
1676 word bytes;
1677 {
1678     ptr_t result;
1679     word i;
1680 
1681     /* Round up allocation size to multiple of page size */
1682     bytes = (bytes + GC_page_size-1) & ~(GC_page_size-1);
1683 
1684     /* Try to find reserved, uncommitted pages */
1685     for (i = 0; i < GC_n_heap_bases; i++) {
1686 	if (((word)(-(signed_word)GC_heap_lengths[i])
1687 	     & (GC_sysinfo.dwAllocationGranularity-1))
1688 	    >= bytes) {
1689 	    result = GC_heap_bases[i] + GC_heap_lengths[i];
1690 	    break;
1691 	}
1692     }
1693 
1694     if (i == GC_n_heap_bases) {
1695 	/* Reserve more pages */
1696 	word res_bytes = (bytes + GC_sysinfo.dwAllocationGranularity-1)
1697 			 & ~(GC_sysinfo.dwAllocationGranularity-1);
1698 	/* If we ever support MPROTECT_VDB here, we will probably need to	*/
1699 	/* ensure that res_bytes is strictly > bytes, so that VirtualProtect	*/
1700 	/* never spans regions.  It seems to be OK for a VirtualFree argument	*/
1701 	/* to span regions, so we should be OK for now.				*/
1702 	result = (ptr_t) VirtualAlloc(NULL, res_bytes,
1703     				      MEM_RESERVE | MEM_TOP_DOWN,
1704     				      PAGE_EXECUTE_READWRITE);
1705 	if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1706     	    /* If I read the documentation correctly, this can	*/
1707     	    /* only happen if HBLKSIZE > 64k or not a power of 2.	*/
1708 	if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
1709 	GC_heap_bases[GC_n_heap_bases] = result;
1710 	GC_heap_lengths[GC_n_heap_bases] = 0;
1711 	GC_n_heap_bases++;
1712     }
1713 
1714     /* Commit pages */
1715     result = (ptr_t) VirtualAlloc(result, bytes,
1716 				  MEM_COMMIT,
1717     				  PAGE_EXECUTE_READWRITE);
1718     if (result != NULL) {
1719 	if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1720 	GC_heap_lengths[i] += bytes;
1721     }
1722 
1723     return(result);
1724 }
1725 # endif
1726 
1727 #ifdef USE_MUNMAP
1728 
1729 /* For now, this only works on Win32/WinCE and some Unix-like	*/
1730 /* systems.  If you have something else, don't define		*/
1731 /* USE_MUNMAP.							*/
1732 /* We assume ANSI C to support this feature.			*/
1733 
1734 #if !defined(MSWIN32) && !defined(MSWINCE)
1735 
1736 #include <unistd.h>
1737 #include <sys/mman.h>
1738 #include <sys/stat.h>
1739 #include <sys/types.h>
1740 
1741 #endif
1742 
1743 /* Compute a page aligned starting address for the unmap 	*/
1744 /* operation on a block of size bytes starting at start.	*/
1745 /* Return 0 if the block is too small to make this feasible.	*/
GC_unmap_start(ptr_t start,word bytes)1746 ptr_t GC_unmap_start(ptr_t start, word bytes)
1747 {
1748     ptr_t result = start;
1749     /* Round start to next page boundary.       */
1750         result += GC_page_size - 1;
1751         result = (ptr_t)((word)result & ~(GC_page_size - 1));
1752     if (result + GC_page_size > start + bytes) return 0;
1753     return result;
1754 }
1755 
1756 /* Compute end address for an unmap operation on the indicated	*/
1757 /* block.							*/
GC_unmap_end(ptr_t start,word bytes)1758 ptr_t GC_unmap_end(ptr_t start, word bytes)
1759 {
1760     ptr_t end_addr = start + bytes;
1761     end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1));
1762     return end_addr;
1763 }
1764 
1765 /* Under Win32/WinCE we commit (map) and decommit (unmap)	*/
1766 /* memory using	VirtualAlloc and VirtualFree.  These functions	*/
1767 /* work on individual allocations of virtual memory, made	*/
1768 /* previously using VirtualAlloc with the MEM_RESERVE flag.	*/
1769 /* The ranges we need to (de)commit may span several of these	*/
1770 /* allocations; therefore we use VirtualQuery to check		*/
1771 /* allocation lengths, and split up the range as necessary.	*/
1772 
1773 /* We assume that GC_remap is called on exactly the same range	*/
1774 /* as a previous call to GC_unmap.  It is safe to consistently	*/
1775 /* round the endpoints in both places.				*/
GC_unmap(ptr_t start,word bytes)1776 void GC_unmap(ptr_t start, word bytes)
1777 {
1778     ptr_t start_addr = GC_unmap_start(start, bytes);
1779     ptr_t end_addr = GC_unmap_end(start, bytes);
1780     word len = end_addr - start_addr;
1781     if (0 == start_addr) return;
1782 #   if defined(MSWIN32) || defined(MSWINCE)
1783       while (len != 0) {
1784           MEMORY_BASIC_INFORMATION mem_info;
1785 	  GC_word free_len;
1786 	  if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1787 	      != sizeof(mem_info))
1788 	      ABORT("Weird VirtualQuery result");
1789 	  free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1790 	  if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
1791 	      ABORT("VirtualFree failed");
1792 	  GC_unmapped_bytes += free_len;
1793 	  start_addr += free_len;
1794 	  len -= free_len;
1795       }
1796 #   else
1797       if (munmap(start_addr, len) != 0) ABORT("munmap failed");
1798       GC_unmapped_bytes += len;
1799 #   endif
1800 }
1801 
1802 
GC_remap(ptr_t start,word bytes)1803 void GC_remap(ptr_t start, word bytes)
1804 {
1805     static int zero_descr = -1;
1806     ptr_t start_addr = GC_unmap_start(start, bytes);
1807     ptr_t end_addr = GC_unmap_end(start, bytes);
1808     word len = end_addr - start_addr;
1809     ptr_t result;
1810 
1811 #   if defined(MSWIN32) || defined(MSWINCE)
1812       if (0 == start_addr) return;
1813       while (len != 0) {
1814           MEMORY_BASIC_INFORMATION mem_info;
1815 	  GC_word alloc_len;
1816 	  if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1817 	      != sizeof(mem_info))
1818 	      ABORT("Weird VirtualQuery result");
1819 	  alloc_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1820 	  result = VirtualAlloc(start_addr, alloc_len,
1821 				MEM_COMMIT,
1822 				PAGE_EXECUTE_READWRITE);
1823 	  if (result != start_addr) {
1824 	      ABORT("VirtualAlloc remapping failed");
1825 	  }
1826 	  GC_unmapped_bytes -= alloc_len;
1827 	  start_addr += alloc_len;
1828 	  len -= alloc_len;
1829       }
1830 #   else
1831       if (-1 == zero_descr) zero_descr = open("/dev/zero", O_RDWR);
1832       fcntl(zero_descr, F_SETFD, FD_CLOEXEC);
1833       if (0 == start_addr) return;
1834       result = mmap(start_addr, len, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1835 		    MAP_FIXED | MAP_PRIVATE, zero_descr, 0);
1836       if (result != start_addr) {
1837 	  ABORT("mmap remapping failed");
1838       }
1839       GC_unmapped_bytes -= len;
1840 #   endif
1841 }
1842 
1843 /* Two adjacent blocks have already been unmapped and are about to	*/
1844 /* be merged.  Unmap the whole block.  This typically requires		*/
1845 /* that we unmap a small section in the middle that was not previously	*/
1846 /* unmapped due to alignment constraints.				*/
GC_unmap_gap(ptr_t start1,word bytes1,ptr_t start2,word bytes2)1847 void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2)
1848 {
1849     ptr_t start1_addr = GC_unmap_start(start1, bytes1);
1850     ptr_t end1_addr = GC_unmap_end(start1, bytes1);
1851     ptr_t start2_addr = GC_unmap_start(start2, bytes2);
1852     ptr_t end2_addr = GC_unmap_end(start2, bytes2);
1853     ptr_t start_addr = end1_addr;
1854     ptr_t end_addr = start2_addr;
1855     word len;
1856     GC_ASSERT(start1 + bytes1 == start2);
1857     if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
1858     if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
1859     if (0 == start_addr) return;
1860     len = end_addr - start_addr;
1861 #   if defined(MSWIN32) || defined(MSWINCE)
1862       while (len != 0) {
1863           MEMORY_BASIC_INFORMATION mem_info;
1864 	  GC_word free_len;
1865 	  if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
1866 	      != sizeof(mem_info))
1867 	      ABORT("Weird VirtualQuery result");
1868 	  free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
1869 	  if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
1870 	      ABORT("VirtualFree failed");
1871 	  GC_unmapped_bytes += free_len;
1872 	  start_addr += free_len;
1873 	  len -= free_len;
1874       }
1875 #   else
1876       if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed");
1877       GC_unmapped_bytes += len;
1878 #   endif
1879 }
1880 
1881 #endif /* USE_MUNMAP */
1882 
1883 /* Routine for pushing any additional roots.  In THREADS 	*/
1884 /* environment, this is also responsible for marking from 	*/
1885 /* thread stacks. 						*/
1886 #ifndef THREADS
1887 void (*GC_push_other_roots)() = 0;
1888 #else /* THREADS */
1889 
1890 # ifdef PCR
GC_push_thread_stack(PCR_Th_T * t,PCR_Any dummy)1891 PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
1892 {
1893     struct PCR_ThCtl_TInfoRep info;
1894     PCR_ERes result;
1895 
1896     info.ti_stkLow = info.ti_stkHi = 0;
1897     result = PCR_ThCtl_GetInfo(t, &info);
1898     GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
1899     return(result);
1900 }
1901 
1902 /* Push the contents of an old object. We treat this as stack	*/
1903 /* data only becasue that makes it robust against mark stack	*/
1904 /* overflow.							*/
GC_push_old_obj(void * p,size_t size,PCR_Any data)1905 PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
1906 {
1907     GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
1908     return(PCR_ERes_okay);
1909 }
1910 
1911 
GC_default_push_other_roots(void)1912 void GC_default_push_other_roots GC_PROTO((void))
1913 {
1914     /* Traverse data allocated by previous memory managers.		*/
1915 	{
1916 	  extern struct PCR_MM_ProcsRep * GC_old_allocator;
1917 
1918 	  if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
1919 	  					   GC_push_old_obj, 0)
1920 	      != PCR_ERes_okay) {
1921 	      ABORT("Old object enumeration failed");
1922 	  }
1923 	}
1924     /* Traverse all thread stacks. */
1925 	if (PCR_ERes_IsErr(
1926                 PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
1927               || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
1928               ABORT("Thread stack marking failed\n");
1929 	}
1930 }
1931 
1932 # endif /* PCR */
1933 
1934 # ifdef SRC_M3
1935 
1936 # ifdef ALL_INTERIOR_POINTERS
1937     --> misconfigured
1938 # endif
1939 
1940 void GC_push_thread_structures GC_PROTO((void))
1941 {
1942     /* Not our responsibibility. */
1943 }
1944 
1945 extern void ThreadF__ProcessStacks();
1946 
GC_push_thread_stack(start,stop)1947 void GC_push_thread_stack(start, stop)
1948 word start, stop;
1949 {
1950    GC_push_all_stack((ptr_t)start, (ptr_t)stop + sizeof(word));
1951 }
1952 
1953 /* Push routine with M3 specific calling convention. */
GC_m3_push_root(dummy1,p,dummy2,dummy3)1954 GC_m3_push_root(dummy1, p, dummy2, dummy3)
1955 word *p;
1956 ptr_t dummy1, dummy2;
1957 int dummy3;
1958 {
1959     word q = *p;
1960 
1961     GC_PUSH_ONE_STACK(q, p);
1962 }
1963 
1964 /* M3 set equivalent to RTHeap.TracedRefTypes */
1965 typedef struct { int elts[1]; }  RefTypeSet;
1966 RefTypeSet GC_TracedRefTypes = {{0x1}};
1967 
GC_default_push_other_roots(void)1968 void GC_default_push_other_roots GC_PROTO((void))
1969 {
1970     /* Use the M3 provided routine for finding static roots.	 */
1971     /* This is a bit dubious, since it presumes no C roots.	 */
1972     /* We handle the collector roots explicitly in GC_push_roots */
1973       	RTMain__GlobalMapProc(GC_m3_push_root, 0, GC_TracedRefTypes);
1974 	if (GC_words_allocd > 0) {
1975 	    ThreadF__ProcessStacks(GC_push_thread_stack);
1976 	}
1977 	/* Otherwise this isn't absolutely necessary, and we have	*/
1978 	/* startup ordering problems.					*/
1979 }
1980 
1981 # endif /* SRC_M3 */
1982 
1983 # if defined(GC_SOLARIS_THREADS) || defined(GC_PTHREADS) || \
1984      defined(GC_WIN32_THREADS)
1985 
1986 extern void GC_push_all_stacks();
1987 
GC_default_push_other_roots(void)1988 void GC_default_push_other_roots GC_PROTO((void))
1989 {
1990     GC_push_all_stacks();
1991 }
1992 
1993 # endif /* GC_SOLARIS_THREADS || GC_PTHREADS */
1994 
1995 void (*GC_push_other_roots) GC_PROTO((void)) = GC_default_push_other_roots;
1996 
1997 #endif /* THREADS */
1998 
1999 /*
2000  * Routines for accessing dirty  bits on virtual pages.
2001  * We plan to eventually implement four strategies for doing so:
2002  * DEFAULT_VDB:	A simple dummy implementation that treats every page
2003  *		as possibly dirty.  This makes incremental collection
2004  *		useless, but the implementation is still correct.
2005  * PCR_VDB:	Use PPCRs virtual dirty bit facility.
2006  * PROC_VDB:	Use the /proc facility for reading dirty bits.  Only
2007  *		works under some SVR4 variants.  Even then, it may be
2008  *		too slow to be entirely satisfactory.  Requires reading
2009  *		dirty bits for entire address space.  Implementations tend
2010  *		to assume that the client is a (slow) debugger.
2011  * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
2012  *		dirtied pages.  The implementation (and implementability)
2013  *		is highly system dependent.  This usually fails when system
2014  *		calls write to a protected page.  We prevent the read system
2015  *		call from doing so.  It is the clients responsibility to
2016  *		make sure that other system calls are similarly protected
2017  *		or write only to the stack.
2018  */
2019 GC_bool GC_dirty_maintained = FALSE;
2020 
2021 # ifdef DEFAULT_VDB
2022 
2023 /* All of the following assume the allocation lock is held, and	*/
2024 /* signals are disabled.					*/
2025 
2026 /* The client asserts that unallocated pages in the heap are never	*/
2027 /* written.								*/
2028 
2029 /* Initialize virtual dirty bit implementation.			*/
GC_dirty_init()2030 void GC_dirty_init()
2031 {
2032 #   ifdef PRINTSTATS
2033       GC_printf0("Initializing DEFAULT_VDB...\n");
2034 #   endif
2035     GC_dirty_maintained = TRUE;
2036 }
2037 
2038 /* Retrieve system dirty bits for heap to a local buffer.	*/
2039 /* Restore the systems notion of which pages are dirty.		*/
GC_read_dirty()2040 void GC_read_dirty()
2041 {}
2042 
2043 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer?	*/
2044 /* If the actual page size is different, this returns TRUE if any	*/
2045 /* of the pages overlapping h are dirty.  This routine may err on the	*/
2046 /* side of labelling pages as dirty (and this implementation does).	*/
2047 /*ARGSUSED*/
GC_page_was_dirty(h)2048 GC_bool GC_page_was_dirty(h)
2049 struct hblk *h;
2050 {
2051     return(TRUE);
2052 }
2053 
2054 /*
2055  * The following two routines are typically less crucial.  They matter
2056  * most with large dynamic libraries, or if we can't accurately identify
2057  * stacks, e.g. under Solaris 2.X.  Otherwise the following default
2058  * versions are adequate.
2059  */
2060 
2061 /* Could any valid GC heap pointer ever have been written to this page?	*/
2062 /*ARGSUSED*/
GC_page_was_ever_dirty(h)2063 GC_bool GC_page_was_ever_dirty(h)
2064 struct hblk *h;
2065 {
2066     return(TRUE);
2067 }
2068 
2069 /* Reset the n pages starting at h to "was never dirty" status.	*/
GC_is_fresh(h,n)2070 void GC_is_fresh(h, n)
2071 struct hblk *h;
2072 word n;
2073 {
2074 }
2075 
2076 /* A call that:						*/
2077 /* I) hints that [h, h+nblocks) is about to be written.	*/
2078 /* II) guarantees that protection is removed.		*/
2079 /* (I) may speed up some dirty bit implementations.	*/
2080 /* (II) may be essential if we need to ensure that	*/
2081 /* pointer-free system call buffers in the heap are 	*/
2082 /* not protected.					*/
2083 /*ARGSUSED*/
GC_remove_protection(h,nblocks,is_ptrfree)2084 void GC_remove_protection(h, nblocks, is_ptrfree)
2085 struct hblk *h;
2086 word nblocks;
2087 GC_bool is_ptrfree;
2088 {
2089 }
2090 
2091 # endif /* DEFAULT_VDB */
2092 
2093 
2094 # ifdef MPROTECT_VDB
2095 
2096 /*
2097  * See DEFAULT_VDB for interface descriptions.
2098  */
2099 
2100 /*
2101  * This implementation maintains dirty bits itself by catching write
2102  * faults and keeping track of them.  We assume nobody else catches
2103  * SIGBUS or SIGSEGV.  We assume no write faults occur in system calls.
2104  * This means that clients must ensure that system calls don't write
2105  * to the write-protected heap.  Probably the best way to do this is to
2106  * ensure that system calls write at most to POINTERFREE objects in the
2107  * heap, and do even that only if we are on a platform on which those
2108  * are not protected.  Another alternative is to wrap system calls
2109  * (see example for read below), but the current implementation holds
2110  * a lock across blocking calls, making it problematic for multithreaded
2111  * applications.
2112  * We assume the page size is a multiple of HBLKSIZE.
2113  * We prefer them to be the same.  We avoid protecting POINTERFREE
2114  * objects only if they are the same.
2115  */
2116 
2117 # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(DARWIN)
2118 
2119 #   include <sys/mman.h>
2120 #   include <signal.h>
2121 #   include <sys/syscall.h>
2122 
2123 #   define PROTECT(addr, len) \
2124     	  if (mprotect((caddr_t)(addr), (size_t)(len), \
2125     	      	       PROT_READ | OPT_PROT_EXEC) < 0) { \
2126     	    ABORT("mprotect failed"); \
2127     	  }
2128 #   define UNPROTECT(addr, len) \
2129     	  if (mprotect((caddr_t)(addr), (size_t)(len), \
2130     	  	       PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
2131     	    ABORT("un-mprotect failed"); \
2132     	  }
2133 
2134 # else
2135 
2136 # ifdef DARWIN
2137     /* Using vm_protect (mach syscall) over mprotect (BSD syscall) seems to
2138        decrease the likelihood of some of the problems described below. */
2139     #include <mach/vm_map.h>
2140     extern mach_port_t GC_task_self;
2141     #define PROTECT(addr,len) \
2142         if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
2143                 FALSE,VM_PROT_READ) != KERN_SUCCESS) { \
2144             ABORT("vm_portect failed"); \
2145         }
2146     #define UNPROTECT(addr,len) \
2147         if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
2148                 FALSE,VM_PROT_READ|VM_PROT_WRITE) != KERN_SUCCESS) { \
2149             ABORT("vm_portect failed"); \
2150         }
2151 # else
2152 
2153 #   ifndef MSWINCE
2154 #     include <signal.h>
2155 #   endif
2156 
2157     static DWORD protect_junk;
2158 #   define PROTECT(addr, len) \
2159 	  if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
2160 	  		      &protect_junk)) { \
2161 	    DWORD last_error = GetLastError(); \
2162 	    GC_printf1("Last error code: %lx\n", last_error); \
2163 	    ABORT("VirtualProtect failed"); \
2164 	  }
2165 #   define UNPROTECT(addr, len) \
2166 	  if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
2167 	  		      &protect_junk)) { \
2168 	    ABORT("un-VirtualProtect failed"); \
2169 	  }
2170 # endif /* !DARWIN */
2171 # endif /* MSWIN32 || MSWINCE || DARWIN */
2172 
2173 #if defined(SUNOS4) || defined(FREEBSD)
2174     typedef void (* SIG_PF)();
2175 #endif /* SUNOS4 || FREEBSD */
2176 
2177 #if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX) \
2178     || defined(HURD)
2179 # ifdef __STDC__
2180     typedef void (* SIG_PF)(int);
2181 # else
2182     typedef void (* SIG_PF)();
2183 # endif
2184 #endif /* SUNOS5SIGS || OSF1 || LINUX || HURD */
2185 
2186 #if defined(MSWIN32)
2187     typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF;
2188 #   undef SIG_DFL
2189 #   define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
2190 #endif
2191 #if defined(MSWINCE)
2192     typedef LONG (WINAPI *SIG_PF)(struct _EXCEPTION_POINTERS *);
2193 #   undef SIG_DFL
2194 #   define SIG_DFL (SIG_PF) (-1)
2195 #endif
2196 
2197 #if defined(IRIX5) || defined(OSF1) || defined(HURD)
2198     typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
2199 #endif /* IRIX5 || OSF1 || HURD */
2200 
2201 #if defined(SUNOS5SIGS)
2202 # ifdef HPUX
2203 #   define SIGINFO __siginfo
2204 # else
2205 #   define SIGINFO siginfo
2206 # endif
2207 # ifdef __STDC__
2208     typedef void (* REAL_SIG_PF)(int, struct SIGINFO *, void *);
2209 # else
2210     typedef void (* REAL_SIG_PF)();
2211 # endif
2212 #endif /* SUNOS5SIGS */
2213 
2214 #if defined(LINUX)
2215 #   if __GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 2
2216       typedef struct sigcontext s_c;
2217 #   else  /* glibc < 2.2 */
2218 #     include <linux/version.h>
2219 #     if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(ARM32)
2220         typedef struct sigcontext s_c;
2221 #     else
2222         typedef struct sigcontext_struct s_c;
2223 #     endif
2224 #   endif  /* glibc < 2.2 */
2225 #   if defined(ALPHA) || defined(M68K)
2226       typedef void (* REAL_SIG_PF)(int, int, s_c *);
2227 #   else
2228 #     if defined(IA64) || defined(HP_PA)
2229         typedef void (* REAL_SIG_PF)(int, siginfo_t *, s_c *);
2230 #     else
2231         typedef void (* REAL_SIG_PF)(int, s_c);
2232 #     endif
2233 #   endif
2234 #   ifdef ALPHA
2235     /* Retrieve fault address from sigcontext structure by decoding	*/
2236     /* instruction.							*/
get_fault_addr(s_c * sc)2237     char * get_fault_addr(s_c *sc) {
2238         unsigned instr;
2239 	word faultaddr;
2240 
2241 	instr = *((unsigned *)(sc->sc_pc));
2242 	faultaddr = sc->sc_regs[(instr >> 16) & 0x1f];
2243 	faultaddr += (word) (((int)instr << 16) >> 16);
2244 	return (char *)faultaddr;
2245     }
2246 #   endif /* !ALPHA */
2247 # endif /* LINUX */
2248 
2249 #ifndef DARWIN
2250 SIG_PF GC_old_bus_handler;
2251 SIG_PF GC_old_segv_handler;	/* Also old MSWIN32 ACCESS_VIOLATION filter */
2252 #endif /* !DARWIN */
2253 
2254 #if defined(THREADS)
2255 /* We need to lock around the bitmap update in the write fault handler	*/
2256 /* in order to avoid the risk of losing a bit.  We do this with a 	*/
2257 /* test-and-set spin lock if we know how to do that.  Otherwise we	*/
2258 /* check whether we are already in the handler and use the dumb but	*/
2259 /* safe fallback algorithm of setting all bits in the word.		*/
2260 /* Contention should be very rare, so we do the minimum to handle it	*/
2261 /* correctly.								*/
2262 #ifdef GC_TEST_AND_SET_DEFINED
2263   static VOLATILE unsigned int fault_handler_lock = 0;
async_set_pht_entry_from_index(VOLATILE page_hash_table db,int index)2264   void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
2265     while (GC_test_and_set(&fault_handler_lock)) {}
2266     /* Could also revert to set_pht_entry_from_index_safe if initial	*/
2267     /* GC_test_and_set fails.						*/
2268     set_pht_entry_from_index(db, index);
2269     GC_clear(&fault_handler_lock);
2270   }
2271 #else /* !GC_TEST_AND_SET_DEFINED */
2272   /* THIS IS INCORRECT! The dirty bit vector may be temporarily wrong,	*/
2273   /* just before we notice the conflict and correct it. We may end up   */
2274   /* looking at it while it's wrong.  But this requires contention	*/
2275   /* exactly when a GC is triggered, which seems far less likely to	*/
2276   /* fail than the old code, which had no reported failures.  Thus we	*/
2277   /* leave it this way while we think of something better, or support	*/
2278   /* GC_test_and_set on the remaining platforms.			*/
2279   static VOLATILE word currently_updating = 0;
async_set_pht_entry_from_index(VOLATILE page_hash_table db,int index)2280   void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
2281     unsigned int update_dummy;
2282     currently_updating = (word)(&update_dummy);
2283     set_pht_entry_from_index(db, index);
2284     /* If we get contention in the 10 or so instruction window here,	*/
2285     /* and we get stopped by a GC between the two updates, we lose!	*/
2286     if (currently_updating != (word)(&update_dummy)) {
2287 	set_pht_entry_from_index_safe(db, index);
2288 	/* We claim that if two threads concurrently try to update the	*/
2289 	/* dirty bit vector, the first one to execute UPDATE_START 	*/
2290 	/* will see it changed when UPDATE_END is executed.  (Note that	*/
2291 	/* &update_dummy must differ in two distinct threads.)  It	*/
2292 	/* will then execute set_pht_entry_from_index_safe, thus 	*/
2293 	/* returning us to a safe state, though not soon enough.	*/
2294     }
2295   }
2296 #endif /* !GC_TEST_AND_SET_DEFINED */
2297 #else /* !THREADS */
2298 # define async_set_pht_entry_from_index(db, index) \
2299 	set_pht_entry_from_index(db, index)
2300 #endif /* !THREADS */
2301 
2302 /*ARGSUSED*/
2303 #if !defined(DARWIN)
2304 # if defined (SUNOS4) || defined(FREEBSD)
GC_write_fault_handler(sig,code,scp,addr)2305     void GC_write_fault_handler(sig, code, scp, addr)
2306     int sig, code;
2307     struct sigcontext *scp;
2308     char * addr;
2309 #   ifdef SUNOS4
2310 #     define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2311 #     define CODE_OK (FC_CODE(code) == FC_PROT \
2312               	    || (FC_CODE(code) == FC_OBJERR \
2313               	       && FC_ERRNO(code) == FC_PROT))
2314 #   endif
2315 #   ifdef FREEBSD
2316 #     define SIG_OK (sig == SIGBUS)
2317 #     define CODE_OK (code == BUS_PAGE_FAULT)
2318 #   endif
2319 # endif /* SUNOS4 || FREEBSD */
2320 
2321 # if defined(IRIX5) || defined(OSF1) || defined(HURD)
2322 #   include <errno.h>
2323     void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
2324 #   ifdef OSF1
2325 #     define SIG_OK (sig == SIGSEGV)
2326 #     define CODE_OK (code == 2 /* experimentally determined */)
2327 #   endif
2328 #   ifdef IRIX5
2329 #     define SIG_OK (sig == SIGSEGV)
2330 #     define CODE_OK (code == EACCES)
2331 #   endif
2332 #   ifdef HURD
2333 #     define SIG_OK (sig == SIGBUS || sig == SIGSEGV)
2334 #     define CODE_OK  TRUE
2335 #   endif
2336 # endif /* IRIX5 || OSF1 || HURD */
2337 
2338 # if defined(LINUX)
2339 #   if defined(ALPHA) || defined(M68K)
2340       void GC_write_fault_handler(int sig, int code, s_c * sc)
2341 #   else
2342 #     if defined(IA64) || defined(HP_PA)
2343         void GC_write_fault_handler(int sig, siginfo_t * si, s_c * scp)
2344 #     else
2345 #       if defined(ARM32)
2346           void GC_write_fault_handler(int sig, int a2, int a3, int a4, s_c sc)
2347 #       else
2348           void GC_write_fault_handler(int sig, s_c sc)
2349 #       endif
2350 #     endif
2351 #   endif
2352 #   define SIG_OK (sig == SIGSEGV)
2353 #   define CODE_OK TRUE
2354 	/* Empirically c.trapno == 14, on IA32, but is that useful?     */
2355 	/* Should probably consider alignment issues on other 		*/
2356 	/* architectures.						*/
2357 # endif /* LINUX */
2358 
2359 # if defined(SUNOS5SIGS)
2360 #  ifdef __STDC__
2361     void GC_write_fault_handler(int sig, struct SIGINFO *scp, void * context)
2362 #  else
2363     void GC_write_fault_handler(sig, scp, context)
2364     int sig;
2365     struct SIGINFO *scp;
2366     void * context;
2367 #  endif
2368 #   ifdef HPUX
2369 #     define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2370 #     define CODE_OK (scp -> si_code == SEGV_ACCERR) \
2371 		     || (scp -> si_code == BUS_ADRERR) \
2372 		     || (scp -> si_code == BUS_UNKNOWN) \
2373 		     || (scp -> si_code == SEGV_UNKNOWN) \
2374 		     || (scp -> si_code == BUS_OBJERR)
2375 #   else
2376 #     define SIG_OK (sig == SIGSEGV)
2377 #     define CODE_OK (scp -> si_code == SEGV_ACCERR)
2378 #   endif
2379 # endif /* SUNOS5SIGS */
2380 
2381 # if defined(MSWIN32) || defined(MSWINCE)
2382     LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info)
2383 #   define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
2384 			STATUS_ACCESS_VIOLATION)
2385 #   define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
2386 			/* Write fault */
2387 # endif /* MSWIN32 || MSWINCE */
2388 {
2389     register unsigned i;
2390 #   if defined(HURD)
2391 	char *addr = (char *) code;
2392 #   endif
2393 #   ifdef IRIX5
2394 	char * addr = (char *) (size_t) (scp -> sc_badvaddr);
2395 #   endif
2396 #   if defined(OSF1) && defined(ALPHA)
2397 	char * addr = (char *) (scp -> sc_traparg_a0);
2398 #   endif
2399 #   ifdef SUNOS5SIGS
2400 	char * addr = (char *) (scp -> si_addr);
2401 #   endif
2402 #   ifdef LINUX
2403 #     if defined(I386) || defined (X86_64)
2404 	char * addr = (char *) (sc.cr2);
2405 #     else
2406 #	if defined(M68K)
2407           char * addr = NULL;
2408 
2409 	  struct sigcontext *scp = (struct sigcontext *)(sc);
2410 
2411 	  int format = (scp->sc_formatvec >> 12) & 0xf;
2412 	  unsigned long *framedata = (unsigned long *)(scp + 1);
2413 	  unsigned long ea;
2414 
2415 	  if (format == 0xa || format == 0xb) {
2416 	  	/* 68020/030 */
2417 	  	ea = framedata[2];
2418 	  } else if (format == 7) {
2419 	  	/* 68040 */
2420 	  	ea = framedata[3];
2421 	  	if (framedata[1] & 0x08000000) {
2422 	  		/* correct addr on misaligned access */
2423 	  		ea = (ea+4095)&(~4095);
2424 		}
2425 	  } else if (format == 4) {
2426 	  	/* 68060 */
2427 	  	ea = framedata[0];
2428 	  	if (framedata[1] & 0x08000000) {
2429 	  		/* correct addr on misaligned access */
2430 	  		ea = (ea+4095)&(~4095);
2431 	  	}
2432 	  }
2433 	  addr = (char *)ea;
2434 #	else
2435 #	  ifdef ALPHA
2436             char * addr = get_fault_addr(sc);
2437 #	  else
2438 #	    if defined(IA64) || defined(HP_PA)
2439 	      char * addr = si -> si_addr;
2440 	      /* I believe this is claimed to work on all platforms for	*/
2441 	      /* Linux 2.3.47 and later.  Hopefully we don't have to	*/
2442 	      /* worry about earlier kernels on IA64.			*/
2443 #	    else
2444 #             if defined(POWERPC)
2445                 char * addr = (char *) (sc.regs->dar);
2446 #	      else
2447 #               if defined(ARM32)
2448                   char * addr = (char *)sc.fault_address;
2449 #               else
2450 		  --> architecture not supported
2451 #               endif
2452 #	      endif
2453 #	    endif
2454 #	  endif
2455 #	endif
2456 #     endif
2457 #   endif
2458 #   if defined(MSWIN32) || defined(MSWINCE)
2459 	char * addr = (char *) (exc_info -> ExceptionRecord
2460 				-> ExceptionInformation[1]);
2461 #	define sig SIGSEGV
2462 #   endif
2463 
2464     if (SIG_OK && CODE_OK) {
2465         register struct hblk * h =
2466         		(struct hblk *)((word)addr & ~(GC_page_size-1));
2467         GC_bool in_allocd_block;
2468 
2469 #	ifdef SUNOS5SIGS
2470 	    /* Address is only within the correct physical page.	*/
2471 	    in_allocd_block = FALSE;
2472             for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
2473               if (HDR(h+i) != 0) {
2474                 in_allocd_block = TRUE;
2475               }
2476             }
2477 #	else
2478 	    in_allocd_block = (HDR(addr) != 0);
2479 #	endif
2480         if (!in_allocd_block) {
2481 	    /* Heap blocks now begin and end on page boundaries */
2482             SIG_PF old_handler;
2483 
2484             if (sig == SIGSEGV) {
2485             	old_handler = GC_old_segv_handler;
2486             } else {
2487                 old_handler = GC_old_bus_handler;
2488             }
2489             if (old_handler == SIG_DFL) {
2490 #		if !defined(MSWIN32) && !defined(MSWINCE)
2491 		    GC_err_printf1("Segfault at 0x%lx\n", addr);
2492                     ABORT("Unexpected bus error or segmentation fault");
2493 #		else
2494 		    return(EXCEPTION_CONTINUE_SEARCH);
2495 #		endif
2496             } else {
2497 #		if defined (SUNOS4) || defined(FREEBSD)
2498 		    (*old_handler) (sig, code, scp, addr);
2499 		    return;
2500 #		endif
2501 #		if defined (SUNOS5SIGS)
2502 		    (*(REAL_SIG_PF)old_handler) (sig, scp, context);
2503 		    return;
2504 #		endif
2505 #		if defined (LINUX)
2506 #		    if defined(ALPHA) || defined(M68K)
2507 		        (*(REAL_SIG_PF)old_handler) (sig, code, sc);
2508 #		    else
2509 #		      if defined(IA64) || defined(HP_PA)
2510 		        (*(REAL_SIG_PF)old_handler) (sig, si, scp);
2511 #		      else
2512 		        (*(REAL_SIG_PF)old_handler) (sig, sc);
2513 #		      endif
2514 #		    endif
2515 		    return;
2516 #		endif
2517 #		if defined (IRIX5) || defined(OSF1) || defined(HURD)
2518 		    (*(REAL_SIG_PF)old_handler) (sig, code, scp);
2519 		    return;
2520 #		endif
2521 #		ifdef MSWIN32
2522 		    return((*old_handler)(exc_info));
2523 #		endif
2524             }
2525         }
2526         UNPROTECT(h, GC_page_size);
2527 	/* We need to make sure that no collection occurs between	*/
2528 	/* the UNPROTECT and the setting of the dirty bit.  Otherwise	*/
2529 	/* a write by a third thread might go unnoticed.  Reversing	*/
2530 	/* the order is just as bad, since we would end up unprotecting	*/
2531 	/* a page in a GC cycle during which it's not marked.		*/
2532 	/* Currently we do this by disabling the thread stopping	*/
2533 	/* signals while this handler is running.  An alternative might	*/
2534 	/* be to record the fact that we're about to unprotect, or	*/
2535 	/* have just unprotected a page in the GC's thread structure,	*/
2536 	/* and then to have the thread stopping code set the dirty	*/
2537 	/* flag, if necessary.						*/
2538         for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
2539             register int index = PHT_HASH(h+i);
2540 
2541             async_set_pht_entry_from_index(GC_dirty_pages, index);
2542         }
2543 #	if defined(OSF1)
2544 	    /* These reset the signal handler each time by default. */
2545 	    signal(SIGSEGV, (SIG_PF) GC_write_fault_handler);
2546 #	endif
2547     	/* The write may not take place before dirty bits are read.	*/
2548     	/* But then we'll fault again ...				*/
2549 #	if defined(MSWIN32) || defined(MSWINCE)
2550 	    return(EXCEPTION_CONTINUE_EXECUTION);
2551 #	else
2552 	    return;
2553 #	endif
2554     }
2555 #if defined(MSWIN32) || defined(MSWINCE)
2556     return EXCEPTION_CONTINUE_SEARCH;
2557 #else
2558     GC_err_printf1("Segfault at 0x%lx\n", addr);
2559     ABORT("Unexpected bus error or segmentation fault");
2560 #endif
2561 }
2562 #endif /* !DARWIN */
2563 
2564 /*
2565  * We hold the allocation lock.  We expect block h to be written
2566  * shortly.  Ensure that all pages containing any part of the n hblks
2567  * starting at h are no longer protected.  If is_ptrfree is false,
2568  * also ensure that they will subsequently appear to be dirty.
2569  */
GC_remove_protection(h,nblocks,is_ptrfree)2570 void GC_remove_protection(h, nblocks, is_ptrfree)
2571 struct hblk *h;
2572 word nblocks;
2573 GC_bool is_ptrfree;
2574 {
2575     struct hblk * h_trunc;  /* Truncated to page boundary */
2576     struct hblk * h_end;    /* Page boundary following block end */
2577     struct hblk * current;
2578     GC_bool found_clean;
2579 
2580     if (!GC_dirty_maintained) return;
2581     h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
2582     h_end = (struct hblk *)(((word)(h + nblocks) + GC_page_size-1)
2583 	                    & ~(GC_page_size-1));
2584     found_clean = FALSE;
2585     for (current = h_trunc; current < h_end; ++current) {
2586         int index = PHT_HASH(current);
2587 
2588         if (!is_ptrfree || current < h || current >= h + nblocks) {
2589             async_set_pht_entry_from_index(GC_dirty_pages, index);
2590         }
2591     }
2592     UNPROTECT(h_trunc, (ptr_t)h_end - (ptr_t)h_trunc);
2593 }
2594 
2595 #if !defined(DARWIN)
GC_dirty_init()2596 void GC_dirty_init()
2597 {
2598 #   if defined(SUNOS5SIGS) || defined(IRIX5) || defined(LINUX) || \
2599        defined(OSF1) || defined(HURD)
2600       struct sigaction	act, oldact;
2601       /* We should probably specify SA_SIGINFO for Linux, and handle 	*/
2602       /* the different architectures more uniformly.			*/
2603 #     if defined(IRIX5) || defined(LINUX) || defined(OSF1) || defined(HURD)
2604     	act.sa_flags	= SA_RESTART;
2605         act.sa_handler  = (SIG_PF)GC_write_fault_handler;
2606 #     else
2607     	act.sa_flags	= SA_RESTART | SA_SIGINFO;
2608         act.sa_sigaction = GC_write_fault_handler;
2609 #     endif
2610       (void)sigemptyset(&act.sa_mask);
2611 #     ifdef SIG_SUSPEND
2612         /* Arrange to postpone SIG_SUSPEND while we're in a write fault	*/
2613         /* handler.  This effectively makes the handler atomic w.r.t.	*/
2614         /* stopping the world for GC.					*/
2615         (void)sigaddset(&act.sa_mask, SIG_SUSPEND);
2616 #     endif /* SIG_SUSPEND */
2617 #    endif
2618 #   ifdef PRINTSTATS
2619 	GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
2620 #   endif
2621     GC_dirty_maintained = TRUE;
2622     if (GC_page_size % HBLKSIZE != 0) {
2623         GC_err_printf0("Page size not multiple of HBLKSIZE\n");
2624         ABORT("Page size not multiple of HBLKSIZE");
2625     }
2626 #   if defined(SUNOS4) || defined(FREEBSD)
2627       GC_old_bus_handler = signal(SIGBUS, GC_write_fault_handler);
2628       if (GC_old_bus_handler == SIG_IGN) {
2629         GC_err_printf0("Previously ignored bus error!?");
2630         GC_old_bus_handler = SIG_DFL;
2631       }
2632       if (GC_old_bus_handler != SIG_DFL) {
2633 #	ifdef PRINTSTATS
2634           GC_err_printf0("Replaced other SIGBUS handler\n");
2635 #	endif
2636       }
2637 #   endif
2638 #   if defined(SUNOS4)
2639       GC_old_segv_handler = signal(SIGSEGV, (SIG_PF)GC_write_fault_handler);
2640       if (GC_old_segv_handler == SIG_IGN) {
2641         GC_err_printf0("Previously ignored segmentation violation!?");
2642         GC_old_segv_handler = SIG_DFL;
2643       }
2644       if (GC_old_segv_handler != SIG_DFL) {
2645 #	ifdef PRINTSTATS
2646           GC_err_printf0("Replaced other SIGSEGV handler\n");
2647 #	endif
2648       }
2649 #   endif
2650 #   if defined(SUNOS5SIGS) || defined(IRIX5) || defined(LINUX) \
2651        || defined(OSF1) || defined(HURD)
2652       /* SUNOS5SIGS includes HPUX */
2653 #     if defined(GC_IRIX_THREADS)
2654       	sigaction(SIGSEGV, 0, &oldact);
2655       	sigaction(SIGSEGV, &act, 0);
2656 #     else
2657 	{
2658 	  int res = sigaction(SIGSEGV, &act, &oldact);
2659 	  if (res != 0) ABORT("Sigaction failed");
2660  	}
2661 #     endif
2662 #     if defined(_sigargs) || defined(HURD) || !defined(SA_SIGINFO)
2663 	/* This is Irix 5.x, not 6.x.  Irix 5.x does not have	*/
2664 	/* sa_sigaction.					*/
2665 	GC_old_segv_handler = oldact.sa_handler;
2666 #     else /* Irix 6.x or SUNOS5SIGS or LINUX */
2667         if (oldact.sa_flags & SA_SIGINFO) {
2668           GC_old_segv_handler = (SIG_PF)(oldact.sa_sigaction);
2669         } else {
2670           GC_old_segv_handler = oldact.sa_handler;
2671         }
2672 #     endif
2673       if (GC_old_segv_handler == SIG_IGN) {
2674 	     GC_err_printf0("Previously ignored segmentation violation!?");
2675 	     GC_old_segv_handler = SIG_DFL;
2676       }
2677       if (GC_old_segv_handler != SIG_DFL) {
2678 #       ifdef PRINTSTATS
2679 	  GC_err_printf0("Replaced other SIGSEGV handler\n");
2680 #       endif
2681       }
2682 #   endif
2683 #   if defined(HPUX) || defined(LINUX) || defined(HURD)
2684       sigaction(SIGBUS, &act, &oldact);
2685       GC_old_bus_handler = oldact.sa_handler;
2686       if (GC_old_bus_handler == SIG_IGN) {
2687 	     GC_err_printf0("Previously ignored bus error!?");
2688 	     GC_old_bus_handler = SIG_DFL;
2689       }
2690       if (GC_old_bus_handler != SIG_DFL) {
2691 #       ifdef PRINTSTATS
2692 	  GC_err_printf0("Replaced other SIGBUS handler\n");
2693 #       endif
2694       }
2695 #   endif /* HPUX || LINUX || HURD */
2696 #   if defined(MSWIN32)
2697       GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
2698       if (GC_old_segv_handler != NULL) {
2699 #	ifdef PRINTSTATS
2700           GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
2701 #	endif
2702       } else {
2703           GC_old_segv_handler = SIG_DFL;
2704       }
2705 #   endif
2706 }
2707 #endif /* !DARWIN */
2708 
GC_incremental_protection_needs()2709 int GC_incremental_protection_needs()
2710 {
2711     if (GC_page_size == HBLKSIZE) {
2712 	return GC_PROTECTS_POINTER_HEAP;
2713     } else {
2714 	return GC_PROTECTS_POINTER_HEAP | GC_PROTECTS_PTRFREE_HEAP;
2715     }
2716 }
2717 
2718 #define HAVE_INCREMENTAL_PROTECTION_NEEDS
2719 
2720 #define IS_PTRFREE(hhdr) ((hhdr)->hb_descr == 0)
2721 
2722 #define PAGE_ALIGNED(x) !((word)(x) & (GC_page_size - 1))
GC_protect_heap()2723 void GC_protect_heap()
2724 {
2725     ptr_t start;
2726     word len;
2727     struct hblk * current;
2728     struct hblk * current_start;  /* Start of block to be protected. */
2729     struct hblk * limit;
2730     unsigned i;
2731     GC_bool protect_all =
2732 	  (0 != (GC_incremental_protection_needs() & GC_PROTECTS_PTRFREE_HEAP));
2733     for (i = 0; i < GC_n_heap_sects; i++) {
2734         start = GC_heap_sects[i].hs_start;
2735         len = GC_heap_sects[i].hs_bytes;
2736 	if (protect_all) {
2737           PROTECT(start, len);
2738 	} else {
2739 	  GC_ASSERT(PAGE_ALIGNED(len))
2740 	  GC_ASSERT(PAGE_ALIGNED(start))
2741 	  current_start = current = (struct hblk *)start;
2742 	  limit = (struct hblk *)(start + len);
2743 	  while (current < limit) {
2744             hdr * hhdr;
2745 	    word nhblks;
2746 	    GC_bool is_ptrfree;
2747 
2748 	    GC_ASSERT(PAGE_ALIGNED(current));
2749 	    GET_HDR(current, hhdr);
2750 	    if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
2751 	      /* This can happen only if we're at the beginning of a 	*/
2752 	      /* heap segment, and a block spans heap segments.		*/
2753 	      /* We will handle that block as part of the preceding	*/
2754 	      /* segment.						*/
2755 	      GC_ASSERT(current_start == current);
2756 	      current_start = ++current;
2757 	      continue;
2758 	    }
2759 	    if (HBLK_IS_FREE(hhdr)) {
2760 	      GC_ASSERT(PAGE_ALIGNED(hhdr -> hb_sz));
2761 	      nhblks = divHBLKSZ(hhdr -> hb_sz);
2762 	      is_ptrfree = TRUE;	/* dirty on alloc */
2763 	    } else {
2764 	      nhblks = OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
2765 	      is_ptrfree = IS_PTRFREE(hhdr);
2766 	    }
2767 	    if (is_ptrfree) {
2768 	      if (current_start < current) {
2769 		PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
2770 	      }
2771 	      current_start = (current += nhblks);
2772 	    } else {
2773 	      current += nhblks;
2774 	    }
2775 	  }
2776 	  if (current_start < current) {
2777 	    PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
2778 	  }
2779 	}
2780     }
2781 }
2782 
2783 /* We assume that either the world is stopped or its OK to lose dirty	*/
2784 /* bits while this is happenning (as in GC_enable_incremental).		*/
GC_read_dirty()2785 void GC_read_dirty()
2786 {
2787     BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
2788           (sizeof GC_dirty_pages));
2789     BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
2790     GC_protect_heap();
2791 }
2792 
GC_page_was_dirty(h)2793 GC_bool GC_page_was_dirty(h)
2794 struct hblk * h;
2795 {
2796     register word index = PHT_HASH(h);
2797 
2798     return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
2799 }
2800 
2801 /*
2802  * Acquiring the allocation lock here is dangerous, since this
2803  * can be called from within GC_call_with_alloc_lock, and the cord
2804  * package does so.  On systems that allow nested lock acquisition, this
2805  * happens to work.
2806  * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
2807  */
2808 
2809 static GC_bool syscall_acquired_lock = FALSE;	/* Protected by GC lock. */
2810 
GC_begin_syscall()2811 void GC_begin_syscall()
2812 {
2813     if (!I_HOLD_LOCK()) {
2814 	LOCK();
2815 	syscall_acquired_lock = TRUE;
2816     }
2817 }
2818 
GC_end_syscall()2819 void GC_end_syscall()
2820 {
2821     if (syscall_acquired_lock) {
2822 	syscall_acquired_lock = FALSE;
2823 	UNLOCK();
2824     }
2825 }
2826 
GC_unprotect_range(addr,len)2827 void GC_unprotect_range(addr, len)
2828 ptr_t addr;
2829 word len;
2830 {
2831     struct hblk * start_block;
2832     struct hblk * end_block;
2833     register struct hblk *h;
2834     ptr_t obj_start;
2835 
2836     if (!GC_dirty_maintained) return;
2837     obj_start = GC_base(addr);
2838     if (obj_start == 0) return;
2839     if (GC_base(addr + len - 1) != obj_start) {
2840         ABORT("GC_unprotect_range(range bigger than object)");
2841     }
2842     start_block = (struct hblk *)((word)addr & ~(GC_page_size - 1));
2843     end_block = (struct hblk *)((word)(addr + len - 1) & ~(GC_page_size - 1));
2844     end_block += GC_page_size/HBLKSIZE - 1;
2845     for (h = start_block; h <= end_block; h++) {
2846         register word index = PHT_HASH(h);
2847 
2848         async_set_pht_entry_from_index(GC_dirty_pages, index);
2849     }
2850     UNPROTECT(start_block,
2851     	      ((ptr_t)end_block - (ptr_t)start_block) + HBLKSIZE);
2852 }
2853 
2854 #if 0
2855 
2856 /* We no longer wrap read by default, since that was causing too many	*/
2857 /* problems.  It is preferred that the client instead avoids writing	*/
2858 /* to the write-protected heap with a system call.			*/
2859 /* This still serves as sample code if you do want to wrap system calls.*/
2860 
2861 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(GC_USE_LD_WRAP)
2862 /* Replacement for UNIX system call.					  */
2863 /* Other calls that write to the heap should be handled similarly.	  */
2864 /* Note that this doesn't work well for blocking reads:  It will hold	  */
2865 /* the allocation lock for the entire duration of the call. Multithreaded */
2866 /* clients should really ensure that it won't block, either by setting 	  */
2867 /* the descriptor nonblocking, or by calling select or poll first, to	  */
2868 /* make sure that input is available.					  */
2869 /* Another, preferred alternative is to ensure that system calls never 	  */
2870 /* write to the protected heap (see above).				  */
2871 # if defined(__STDC__) && !defined(SUNOS4)
2872 #   include <unistd.h>
2873 #   include <sys/uio.h>
2874     ssize_t read(int fd, void *buf, size_t nbyte)
2875 # else
2876 #   ifndef LINT
2877       int read(fd, buf, nbyte)
2878 #   else
2879       int GC_read(fd, buf, nbyte)
2880 #   endif
2881     int fd;
2882     char *buf;
2883     int nbyte;
2884 # endif
2885 {
2886     int result;
2887 
2888     GC_begin_syscall();
2889     GC_unprotect_range(buf, (word)nbyte);
2890 #   if defined(IRIX5) || defined(GC_LINUX_THREADS)
2891 	/* Indirect system call may not always be easily available.	*/
2892 	/* We could call _read, but that would interfere with the	*/
2893 	/* libpthread interception of read.				*/
2894 	/* On Linux, we have to be careful with the linuxthreads	*/
2895 	/* read interception.						*/
2896 	{
2897 	    struct iovec iov;
2898 
2899 	    iov.iov_base = buf;
2900 	    iov.iov_len = nbyte;
2901 	    result = readv(fd, &iov, 1);
2902 	}
2903 #   else
2904 #     if defined(HURD)
2905 	result = __read(fd, buf, nbyte);
2906 #     else
2907  	/* The two zero args at the end of this list are because one
2908  	   IA-64 syscall() implementation actually requires six args
2909  	   to be passed, even though they aren't always used. */
2910      	result = syscall(SYS_read, fd, buf, nbyte, 0, 0);
2911 #     endif /* !HURD */
2912 #   endif
2913     GC_end_syscall();
2914     return(result);
2915 }
2916 #endif /* !MSWIN32 && !MSWINCE && !GC_LINUX_THREADS */
2917 
2918 #if defined(GC_USE_LD_WRAP) && !defined(THREADS)
2919     /* We use the GNU ld call wrapping facility.			*/
2920     /* This requires that the linker be invoked with "--wrap read".	*/
2921     /* This can be done by passing -Wl,"--wrap read" to gcc.		*/
2922     /* I'm not sure that this actually wraps whatever version of read	*/
2923     /* is called by stdio.  That code also mentions __read.		*/
2924 #   include <unistd.h>
2925     ssize_t __wrap_read(int fd, void *buf, size_t nbyte)
2926     {
2927  	int result;
2928 
2929 	GC_begin_syscall();
2930     	GC_unprotect_range(buf, (word)nbyte);
2931 	result = __real_read(fd, buf, nbyte);
2932 	GC_end_syscall();
2933 	return(result);
2934     }
2935 
2936     /* We should probably also do this for __read, or whatever stdio	*/
2937     /* actually calls.							*/
2938 #endif
2939 
2940 #endif /* 0 */
2941 
2942 /*ARGSUSED*/
GC_page_was_ever_dirty(h)2943 GC_bool GC_page_was_ever_dirty(h)
2944 struct hblk *h;
2945 {
2946     return(TRUE);
2947 }
2948 
2949 /* Reset the n pages starting at h to "was never dirty" status.	*/
2950 /*ARGSUSED*/
GC_is_fresh(h,n)2951 void GC_is_fresh(h, n)
2952 struct hblk *h;
2953 word n;
2954 {
2955 }
2956 
2957 # endif /* MPROTECT_VDB */
2958 
2959 # ifdef PROC_VDB
2960 
2961 /*
2962  * See DEFAULT_VDB for interface descriptions.
2963  */
2964 
2965 /*
2966  * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
2967  * from which we can read page modified bits.  This facility is far from
2968  * optimal (e.g. we would like to get the info for only some of the
2969  * address space), but it avoids intercepting system calls.
2970  */
2971 
2972 #include <errno.h>
2973 #include <sys/types.h>
2974 #include <sys/signal.h>
2975 #include <sys/fault.h>
2976 #include <sys/syscall.h>
2977 #include <sys/procfs.h>
2978 #include <sys/stat.h>
2979 
2980 #define INITIAL_BUF_SZ 4096
2981 word GC_proc_buf_size = INITIAL_BUF_SZ;
2982 char *GC_proc_buf;
2983 
2984 #ifdef GC_SOLARIS_THREADS
2985 /* We don't have exact sp values for threads.  So we count on	*/
2986 /* occasionally declaring stack pages to be fresh.  Thus we 	*/
2987 /* need a real implementation of GC_is_fresh.  We can't clear	*/
2988 /* entries in GC_written_pages, since that would declare all	*/
2989 /* pages with the given hash address to be fresh.		*/
2990 #   define MAX_FRESH_PAGES 8*1024	/* Must be power of 2 */
2991     struct hblk ** GC_fresh_pages;	/* A direct mapped cache.	*/
2992     					/* Collisions are dropped.	*/
2993 
2994 #   define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
2995 #   define ADD_FRESH_PAGE(h) \
2996 	GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
2997 #   define PAGE_IS_FRESH(h) \
2998 	(GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
2999 #endif
3000 
3001 /* Add all pages in pht2 to pht1 */
GC_or_pages(pht1,pht2)3002 void GC_or_pages(pht1, pht2)
3003 page_hash_table pht1, pht2;
3004 {
3005     register int i;
3006 
3007     for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
3008 }
3009 
3010 int GC_proc_fd;
3011 
GC_dirty_init()3012 void GC_dirty_init()
3013 {
3014     int fd;
3015     char buf[30];
3016 
3017     GC_dirty_maintained = TRUE;
3018     if (GC_words_allocd != 0 || GC_words_allocd_before_gc != 0) {
3019     	register int i;
3020 
3021         for (i = 0; i < PHT_SIZE; i++) GC_written_pages[i] = (word)(-1);
3022 #       ifdef PRINTSTATS
3023 	    GC_printf1("Allocated words:%lu:all pages may have been written\n",
3024 	    	       (unsigned long)
3025 	    	      		(GC_words_allocd + GC_words_allocd_before_gc));
3026 #	endif
3027     }
3028     sprintf(buf, "/proc/%d", getpid());
3029     fd = open(buf, O_RDONLY);
3030     if (fd < 0) {
3031     	ABORT("/proc open failed");
3032     }
3033     GC_proc_fd = syscall(SYS_ioctl, fd, PIOCOPENPD, 0);
3034     close(fd);
3035     syscall(SYS_fcntl, GC_proc_fd, F_SETFD, FD_CLOEXEC);
3036     if (GC_proc_fd < 0) {
3037     	ABORT("/proc ioctl failed");
3038     }
3039     GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
3040 #   ifdef GC_SOLARIS_THREADS
3041 	GC_fresh_pages = (struct hblk **)
3042 	  GC_scratch_alloc(MAX_FRESH_PAGES * sizeof (struct hblk *));
3043 	if (GC_fresh_pages == 0) {
3044 	    GC_err_printf0("No space for fresh pages\n");
3045 	    EXIT();
3046 	}
3047 	BZERO(GC_fresh_pages, MAX_FRESH_PAGES * sizeof (struct hblk *));
3048 #   endif
3049 }
3050 
3051 /* Ignore write hints. They don't help us here.	*/
3052 /*ARGSUSED*/
GC_remove_protection(h,nblocks,is_ptrfree)3053 void GC_remove_protection(h, nblocks, is_ptrfree)
3054 struct hblk *h;
3055 word nblocks;
3056 GC_bool is_ptrfree;
3057 {
3058 }
3059 
3060 #ifdef GC_SOLARIS_THREADS
3061 #   define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
3062 #else
3063 #   define READ(fd,buf,nbytes) read(fd, buf, nbytes)
3064 #endif
3065 
GC_read_dirty()3066 void GC_read_dirty()
3067 {
3068     unsigned long ps, np;
3069     int nmaps;
3070     ptr_t vaddr;
3071     struct prasmap * map;
3072     char * bufp;
3073     ptr_t current_addr, limit;
3074     int i;
3075 int dummy;
3076 
3077     BZERO(GC_grungy_pages, (sizeof GC_grungy_pages));
3078 
3079     bufp = GC_proc_buf;
3080     if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
3081 #	ifdef PRINTSTATS
3082             GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
3083             	       GC_proc_buf_size);
3084 #	endif
3085         {
3086             /* Retry with larger buffer. */
3087             word new_size = 2 * GC_proc_buf_size;
3088             char * new_buf = GC_scratch_alloc(new_size);
3089 
3090             if (new_buf != 0) {
3091                 GC_proc_buf = bufp = new_buf;
3092                 GC_proc_buf_size = new_size;
3093             }
3094             if (syscall(SYS_read, GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
3095                 WARN("Insufficient space for /proc read\n", 0);
3096                 /* Punt:	*/
3097         	memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
3098 		memset(GC_written_pages, 0xff, sizeof(page_hash_table));
3099 #		ifdef GC_SOLARIS_THREADS
3100 		    BZERO(GC_fresh_pages,
3101 		    	  MAX_FRESH_PAGES * sizeof (struct hblk *));
3102 #		endif
3103 		return;
3104             }
3105         }
3106     }
3107     /* Copy dirty bits into GC_grungy_pages */
3108     	nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
3109 	/* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
3110 		     nmaps, PG_REFERENCED, PG_MODIFIED); */
3111 	bufp = bufp + sizeof(struct prpageheader);
3112 	for (i = 0; i < nmaps; i++) {
3113 	    map = (struct prasmap *)bufp;
3114 	    vaddr = (ptr_t)(map -> pr_vaddr);
3115 	    ps = map -> pr_pagesize;
3116 	    np = map -> pr_npage;
3117 	    /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
3118 	    limit = vaddr + ps * np;
3119 	    bufp += sizeof (struct prasmap);
3120 	    for (current_addr = vaddr;
3121 	         current_addr < limit; current_addr += ps){
3122 	        if ((*bufp++) & PG_MODIFIED) {
3123 	            register struct hblk * h = (struct hblk *) current_addr;
3124 
3125 	            while ((ptr_t)h < current_addr + ps) {
3126 	                register word index = PHT_HASH(h);
3127 
3128 	                set_pht_entry_from_index(GC_grungy_pages, index);
3129 #			ifdef GC_SOLARIS_THREADS
3130 			  {
3131 			    register int slot = FRESH_PAGE_SLOT(h);
3132 
3133 			    if (GC_fresh_pages[slot] == h) {
3134 			        GC_fresh_pages[slot] = 0;
3135 			    }
3136 			  }
3137 #			endif
3138 	                h++;
3139 	            }
3140 	        }
3141 	    }
3142 	    bufp += sizeof(long) - 1;
3143 	    bufp = (char *)((unsigned long)bufp & ~(sizeof(long)-1));
3144 	}
3145     /* Update GC_written_pages. */
3146         GC_or_pages(GC_written_pages, GC_grungy_pages);
3147 #   ifdef GC_SOLARIS_THREADS
3148       /* Make sure that old stacks are considered completely clean	*/
3149       /* unless written again.						*/
3150 	GC_old_stacks_are_fresh();
3151 #   endif
3152 }
3153 
3154 #undef READ
3155 
GC_page_was_dirty(h)3156 GC_bool GC_page_was_dirty(h)
3157 struct hblk *h;
3158 {
3159     register word index = PHT_HASH(h);
3160     register GC_bool result;
3161 
3162     result = get_pht_entry_from_index(GC_grungy_pages, index);
3163 #   ifdef GC_SOLARIS_THREADS
3164 	if (result && PAGE_IS_FRESH(h)) result = FALSE;
3165 	/* This happens only if page was declared fresh since	*/
3166 	/* the read_dirty call, e.g. because it's in an unused  */
3167 	/* thread stack.  It's OK to treat it as clean, in	*/
3168 	/* that case.  And it's consistent with 		*/
3169 	/* GC_page_was_ever_dirty.				*/
3170 #   endif
3171     return(result);
3172 }
3173 
GC_page_was_ever_dirty(h)3174 GC_bool GC_page_was_ever_dirty(h)
3175 struct hblk *h;
3176 {
3177     register word index = PHT_HASH(h);
3178     register GC_bool result;
3179 
3180     result = get_pht_entry_from_index(GC_written_pages, index);
3181 #   ifdef GC_SOLARIS_THREADS
3182 	if (result && PAGE_IS_FRESH(h)) result = FALSE;
3183 #   endif
3184     return(result);
3185 }
3186 
3187 /* Caller holds allocation lock.	*/
GC_is_fresh(h,n)3188 void GC_is_fresh(h, n)
3189 struct hblk *h;
3190 word n;
3191 {
3192 
3193     register word index;
3194 
3195 #   ifdef GC_SOLARIS_THREADS
3196       register word i;
3197 
3198       if (GC_fresh_pages != 0) {
3199         for (i = 0; i < n; i++) {
3200           ADD_FRESH_PAGE(h + i);
3201         }
3202       }
3203 #   endif
3204 }
3205 
3206 # endif /* PROC_VDB */
3207 
3208 
3209 # ifdef PCR_VDB
3210 
3211 # include "vd/PCR_VD.h"
3212 
3213 # define NPAGES (32*1024)	/* 128 MB */
3214 
3215 PCR_VD_DB  GC_grungy_bits[NPAGES];
3216 
3217 ptr_t GC_vd_base;	/* Address corresponding to GC_grungy_bits[0]	*/
3218 			/* HBLKSIZE aligned.				*/
3219 
GC_dirty_init()3220 void GC_dirty_init()
3221 {
3222     GC_dirty_maintained = TRUE;
3223     /* For the time being, we assume the heap generally grows up */
3224     GC_vd_base = GC_heap_sects[0].hs_start;
3225     if (GC_vd_base == 0) {
3226    	ABORT("Bad initial heap segment");
3227     }
3228     if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
3229 	!= PCR_ERes_okay) {
3230 	ABORT("dirty bit initialization failed");
3231     }
3232 }
3233 
GC_read_dirty()3234 void GC_read_dirty()
3235 {
3236     /* lazily enable dirty bits on newly added heap sects */
3237     {
3238         static int onhs = 0;
3239         int nhs = GC_n_heap_sects;
3240         for( ; onhs < nhs; onhs++ ) {
3241             PCR_VD_WriteProtectEnable(
3242                     GC_heap_sects[onhs].hs_start,
3243                     GC_heap_sects[onhs].hs_bytes );
3244         }
3245     }
3246 
3247 
3248     if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
3249         != PCR_ERes_okay) {
3250 	ABORT("dirty bit read failed");
3251     }
3252 }
3253 
GC_page_was_dirty(h)3254 GC_bool GC_page_was_dirty(h)
3255 struct hblk *h;
3256 {
3257     if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
3258 	return(TRUE);
3259     }
3260     return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit);
3261 }
3262 
3263 /*ARGSUSED*/
GC_remove_protection(h,nblocks,is_ptrfree)3264 void GC_remove_protection(h, nblocks, is_ptrfree)
3265 struct hblk *h;
3266 word nblocks;
3267 GC_bool is_ptrfree;
3268 {
3269     PCR_VD_WriteProtectDisable(h, nblocks*HBLKSIZE);
3270     PCR_VD_WriteProtectEnable(h, nblocks*HBLKSIZE);
3271 }
3272 
3273 # endif /* PCR_VDB */
3274 
3275 #if defined(MPROTECT_VDB) && defined(DARWIN)
3276 /* The following sources were used as a *reference* for this exception handling
3277    code:
3278       1. Apple's mach/xnu documentation
3279       2. Timothy J. Wood's "Mach Exception Handlers 101" post to the
3280          omnigroup's macosx-dev list.
3281          www.omnigroup.com/mailman/archive/macosx-dev/2000-June/002030.html
3282       3. macosx-nat.c from Apple's GDB source code.
3283 */
3284 
3285 /* The bug that caused all this trouble should now be fixed. This should
3286    eventually be removed if all goes well. */
3287 /* define BROKEN_EXCEPTION_HANDLING */
3288 
3289 #include <mach/mach.h>
3290 #include <mach/mach_error.h>
3291 #include <mach/thread_status.h>
3292 #include <mach/exception.h>
3293 #include <mach/task.h>
3294 #include <pthread.h>
3295 
3296 /* These are not defined in any header, although they are documented */
3297 extern boolean_t exc_server(mach_msg_header_t *,mach_msg_header_t *);
3298 extern kern_return_t exception_raise(
3299     mach_port_t,mach_port_t,mach_port_t,
3300     exception_type_t,exception_data_t,mach_msg_type_number_t);
3301 extern kern_return_t exception_raise_state(
3302     mach_port_t,mach_port_t,mach_port_t,
3303     exception_type_t,exception_data_t,mach_msg_type_number_t,
3304     thread_state_flavor_t*,thread_state_t,mach_msg_type_number_t,
3305     thread_state_t,mach_msg_type_number_t*);
3306 extern kern_return_t exception_raise_state_identity(
3307     mach_port_t,mach_port_t,mach_port_t,
3308     exception_type_t,exception_data_t,mach_msg_type_number_t,
3309     thread_state_flavor_t*,thread_state_t,mach_msg_type_number_t,
3310     thread_state_t,mach_msg_type_number_t*);
3311 
3312 
3313 #define MAX_EXCEPTION_PORTS 16
3314 
3315 static mach_port_t GC_task_self;
3316 
3317 static struct {
3318     mach_msg_type_number_t count;
3319     exception_mask_t      masks[MAX_EXCEPTION_PORTS];
3320     exception_handler_t   ports[MAX_EXCEPTION_PORTS];
3321     exception_behavior_t  behaviors[MAX_EXCEPTION_PORTS];
3322     thread_state_flavor_t flavors[MAX_EXCEPTION_PORTS];
3323 } GC_old_exc_ports;
3324 
3325 static struct {
3326     mach_port_t exception;
3327 #if defined(THREADS)
3328     mach_port_t reply;
3329 #endif
3330 } GC_ports;
3331 
3332 typedef struct {
3333     mach_msg_header_t head;
3334 } GC_msg_t;
3335 
3336 typedef enum {
3337     GC_MP_NORMAL, GC_MP_DISCARDING, GC_MP_STOPPED
3338 } GC_mprotect_state_t;
3339 
3340 /* FIXME: 1 and 2 seem to be safe to use in the msgh_id field,
3341    but it isn't  documented. Use the source and see if they
3342    should be ok. */
3343 #define ID_STOP 1
3344 #define ID_RESUME 2
3345 
3346 /* These values are only used on the reply port */
3347 #define ID_ACK 3
3348 
3349 #if defined(THREADS)
3350 
3351 GC_mprotect_state_t GC_mprotect_state;
3352 
3353 /* The following should ONLY be called when the world is stopped  */
GC_mprotect_thread_notify(mach_msg_id_t id)3354 static void GC_mprotect_thread_notify(mach_msg_id_t id) {
3355     struct {
3356         GC_msg_t msg;
3357         mach_msg_trailer_t trailer;
3358     } buf;
3359     mach_msg_return_t r;
3360     /* remote, local */
3361     buf.msg.head.msgh_bits =
3362         MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,0);
3363     buf.msg.head.msgh_size = sizeof(buf.msg);
3364     buf.msg.head.msgh_remote_port = GC_ports.exception;
3365     buf.msg.head.msgh_local_port = MACH_PORT_NULL;
3366     buf.msg.head.msgh_id = id;
3367 
3368     r = mach_msg(
3369         &buf.msg.head,
3370         MACH_SEND_MSG|MACH_RCV_MSG|MACH_RCV_LARGE,
3371         sizeof(buf.msg),
3372         sizeof(buf),
3373         GC_ports.reply,
3374         MACH_MSG_TIMEOUT_NONE,
3375         MACH_PORT_NULL);
3376     if(r != MACH_MSG_SUCCESS)
3377 	ABORT("mach_msg failed in GC_mprotect_thread_notify");
3378     if(buf.msg.head.msgh_id != ID_ACK)
3379         ABORT("invalid ack in GC_mprotect_thread_notify");
3380 }
3381 
3382 /* Should only be called by the mprotect thread */
GC_mprotect_thread_reply()3383 static void GC_mprotect_thread_reply() {
3384     GC_msg_t msg;
3385     mach_msg_return_t r;
3386     /* remote, local */
3387     msg.head.msgh_bits =
3388         MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,0);
3389     msg.head.msgh_size = sizeof(msg);
3390     msg.head.msgh_remote_port = GC_ports.reply;
3391     msg.head.msgh_local_port = MACH_PORT_NULL;
3392     msg.head.msgh_id = ID_ACK;
3393 
3394     r = mach_msg(
3395         &msg.head,
3396         MACH_SEND_MSG,
3397         sizeof(msg),
3398         0,
3399         MACH_PORT_NULL,
3400         MACH_MSG_TIMEOUT_NONE,
3401         MACH_PORT_NULL);
3402     if(r != MACH_MSG_SUCCESS)
3403 	ABORT("mach_msg failed in GC_mprotect_thread_reply");
3404 }
3405 
GC_mprotect_stop()3406 void GC_mprotect_stop() {
3407     GC_mprotect_thread_notify(ID_STOP);
3408 }
GC_mprotect_resume()3409 void GC_mprotect_resume() {
3410     GC_mprotect_thread_notify(ID_RESUME);
3411 }
3412 
3413 #else /* !THREADS */
3414 /* The compiler should optimize away any GC_mprotect_state computations */
3415 #define GC_mprotect_state GC_MP_NORMAL
3416 #endif
3417 
GC_mprotect_thread(void * arg)3418 static void *GC_mprotect_thread(void *arg) {
3419     mach_msg_return_t r;
3420     /* These two structures contain some private kernel data. We don't need to
3421        access any of it so we don't bother defining a proper struct. The
3422        correct definitions are in the xnu source code. */
3423     struct {
3424         mach_msg_header_t head;
3425         char data[256];
3426     } reply;
3427     struct {
3428         mach_msg_header_t head;
3429         mach_msg_body_t msgh_body;
3430         char data[1024];
3431     } msg;
3432 
3433     mach_msg_id_t id;
3434 
3435     for(;;) {
3436         r = mach_msg(
3437             &msg.head,
3438             MACH_RCV_MSG|MACH_RCV_LARGE|
3439                 (GC_mprotect_state == GC_MP_DISCARDING ? MACH_RCV_TIMEOUT : 0),
3440             0,
3441             sizeof(msg),
3442             GC_ports.exception,
3443             GC_mprotect_state == GC_MP_DISCARDING ? 0 : MACH_MSG_TIMEOUT_NONE,
3444             MACH_PORT_NULL);
3445 
3446         id = r == MACH_MSG_SUCCESS ? msg.head.msgh_id : -1;
3447 
3448 #if defined(THREADS)
3449         if(GC_mprotect_state == GC_MP_DISCARDING) {
3450             if(r == MACH_RCV_TIMED_OUT) {
3451                 GC_mprotect_state = GC_MP_STOPPED;
3452                 GC_mprotect_thread_reply();
3453                 continue;
3454             }
3455             if(r == MACH_MSG_SUCCESS && (id == ID_STOP || id == ID_RESUME))
3456                 ABORT("out of order mprotect thread request");
3457         }
3458 #endif
3459 
3460         if(r != MACH_MSG_SUCCESS) {
3461             GC_err_printf2("mach_msg failed with %d %s\n",
3462                 (int)r,mach_error_string(r));
3463             ABORT("mach_msg failed");
3464         }
3465 
3466         switch(id) {
3467 #if defined(THREADS)
3468             case ID_STOP:
3469                 if(GC_mprotect_state != GC_MP_NORMAL)
3470                     ABORT("Called mprotect_stop when state wasn't normal");
3471                 GC_mprotect_state = GC_MP_DISCARDING;
3472                 break;
3473             case ID_RESUME:
3474                 if(GC_mprotect_state != GC_MP_STOPPED)
3475                     ABORT("Called mprotect_resume when state wasn't stopped");
3476                 GC_mprotect_state = GC_MP_NORMAL;
3477                 GC_mprotect_thread_reply();
3478                 break;
3479 #endif /* THREADS */
3480             default:
3481 	            /* Handle the message (calls catch_exception_raise) */
3482     	        if(!exc_server(&msg.head,&reply.head))
3483                     ABORT("exc_server failed");
3484                 /* Send the reply */
3485                 r = mach_msg(
3486                     &reply.head,
3487                     MACH_SEND_MSG,
3488                     reply.head.msgh_size,
3489                     0,
3490                     MACH_PORT_NULL,
3491                     MACH_MSG_TIMEOUT_NONE,
3492                     MACH_PORT_NULL);
3493 	        if(r != MACH_MSG_SUCCESS) {
3494 	        	/* This will fail if the thread dies, but the thread shouldn't
3495 	        	   die... */
3496 	        	#ifdef BROKEN_EXCEPTION_HANDLING
3497     	        	GC_err_printf2(
3498                         "mach_msg failed with %d %s while sending exc reply\n",
3499                         (int)r,mach_error_string(r));
3500     	        #else
3501     	        	ABORT("mach_msg failed while sending exception reply");
3502     	        #endif
3503         	}
3504         } /* switch */
3505     } /* for(;;) */
3506     /* NOT REACHED */
3507     return NULL;
3508 }
3509 
3510 /* All this SIGBUS code shouldn't be necessary. All protection faults should
3511    be going throught the mach exception handler. However, it seems a SIGBUS is
3512    occasionally sent for some unknown reason. Even more odd, it seems to be
3513    meaningless and safe to ignore. */
3514 #ifdef BROKEN_EXCEPTION_HANDLING
3515 
3516 typedef void (* SIG_PF)();
3517 static SIG_PF GC_old_bus_handler;
3518 
3519 /* Updates to this aren't atomic, but the SIGBUSs seem pretty rare.
3520    Even if this doesn't get updated property, it isn't really a problem */
3521 static int GC_sigbus_count;
3522 
GC_darwin_sigbus(int num,siginfo_t * sip,void * context)3523 static void GC_darwin_sigbus(int num,siginfo_t *sip,void *context) {
3524     if(num != SIGBUS) ABORT("Got a non-sigbus signal in the sigbus handler");
3525 
3526     /* Ugh... some seem safe to ignore, but too many in a row probably means
3527        trouble. GC_sigbus_count is reset for each mach exception that is
3528        handled */
3529     if(GC_sigbus_count >= 8) {
3530         ABORT("Got more than 8 SIGBUSs in a row!");
3531     } else {
3532         GC_sigbus_count++;
3533         GC_err_printf0("GC: WARNING: Ignoring SIGBUS.\n");
3534     }
3535 }
3536 #endif /* BROKEN_EXCEPTION_HANDLING */
3537 
GC_dirty_init()3538 void GC_dirty_init() {
3539     kern_return_t r;
3540     mach_port_t me;
3541     pthread_t thread;
3542     pthread_attr_t attr;
3543     exception_mask_t mask;
3544 
3545 #   ifdef PRINTSTATS
3546         GC_printf0("Inititalizing mach/darwin mprotect virtual dirty bit "
3547             "implementation\n");
3548 #   endif
3549 #	ifdef BROKEN_EXCEPTION_HANDLING
3550         GC_err_printf0("GC: WARNING: Enabling workarounds for various darwin "
3551             "exception handling bugs.\n");
3552 #	endif
3553     GC_dirty_maintained = TRUE;
3554     if (GC_page_size % HBLKSIZE != 0) {
3555         GC_err_printf0("Page size not multiple of HBLKSIZE\n");
3556         ABORT("Page size not multiple of HBLKSIZE");
3557     }
3558 
3559     GC_task_self = me = mach_task_self();
3560 
3561     r = mach_port_allocate(me,MACH_PORT_RIGHT_RECEIVE,&GC_ports.exception);
3562     if(r != KERN_SUCCESS) ABORT("mach_port_allocate failed (exception port)");
3563 
3564     r = mach_port_insert_right(me,GC_ports.exception,GC_ports.exception,
3565     	MACH_MSG_TYPE_MAKE_SEND);
3566     if(r != KERN_SUCCESS)
3567     	ABORT("mach_port_insert_right failed (exception port)");
3568 
3569     #if defined(THREADS)
3570         r = mach_port_allocate(me,MACH_PORT_RIGHT_RECEIVE,&GC_ports.reply);
3571         if(r != KERN_SUCCESS) ABORT("mach_port_allocate failed (reply port)");
3572     #endif
3573 
3574     /* The exceptions we want to catch */
3575     mask = EXC_MASK_BAD_ACCESS;
3576 
3577     r = task_get_exception_ports(
3578         me,
3579         mask,
3580         GC_old_exc_ports.masks,
3581         &GC_old_exc_ports.count,
3582         GC_old_exc_ports.ports,
3583         GC_old_exc_ports.behaviors,
3584         GC_old_exc_ports.flavors
3585     );
3586     if(r != KERN_SUCCESS) ABORT("task_get_exception_ports failed");
3587 
3588     r = task_set_exception_ports(
3589         me,
3590         mask,
3591         GC_ports.exception,
3592         EXCEPTION_DEFAULT,
3593         MACHINE_THREAD_STATE
3594     );
3595     if(r != KERN_SUCCESS) ABORT("task_set_exception_ports failed");
3596 
3597     if(pthread_attr_init(&attr) != 0) ABORT("pthread_attr_init failed");
3598     if(pthread_attr_setdetachstate(&attr,PTHREAD_CREATE_DETACHED) != 0)
3599         ABORT("pthread_attr_setdetachedstate failed");
3600 
3601 #	undef pthread_create
3602     /* This will call the real pthread function, not our wrapper */
3603     if(pthread_create(&thread,&attr,GC_mprotect_thread,NULL) != 0)
3604         ABORT("pthread_create failed");
3605     pthread_attr_destroy(&attr);
3606 
3607     /* Setup the sigbus handler for ignoring the meaningless SIGBUSs */
3608     #ifdef BROKEN_EXCEPTION_HANDLING
3609     {
3610         struct sigaction sa, oldsa;
3611         sa.sa_handler = (SIG_PF)GC_darwin_sigbus;
3612         sigemptyset(&sa.sa_mask);
3613         sa.sa_flags = SA_RESTART|SA_SIGINFO;
3614         if(sigaction(SIGBUS,&sa,&oldsa) < 0) ABORT("sigaction");
3615         GC_old_bus_handler = (SIG_PF)oldsa.sa_handler;
3616         if (GC_old_bus_handler != SIG_DFL) {
3617 #       	ifdef PRINTSTATS
3618                 GC_err_printf0("Replaced other SIGBUS handler\n");
3619 #       	endif
3620         }
3621     }
3622     #endif /* BROKEN_EXCEPTION_HANDLING  */
3623 }
3624 
3625 /* The source code for Apple's GDB was used as a reference for the exception
3626    forwarding code. This code is similar to be GDB code only because there is
3627    only one way to do it. */
GC_forward_exception(mach_port_t thread,mach_port_t task,exception_type_t exception,exception_data_t data,mach_msg_type_number_t data_count)3628 static kern_return_t GC_forward_exception(
3629         mach_port_t thread,
3630         mach_port_t task,
3631         exception_type_t exception,
3632         exception_data_t data,
3633         mach_msg_type_number_t data_count
3634 ) {
3635     int i;
3636     kern_return_t r;
3637     mach_port_t port;
3638     exception_behavior_t behavior;
3639     thread_state_flavor_t flavor;
3640 
3641     thread_state_data_t thread_state;
3642     mach_msg_type_number_t thread_state_count = THREAD_STATE_MAX;
3643 
3644     for(i=0;i<GC_old_exc_ports.count;i++)
3645         if(GC_old_exc_ports.masks[i] & (1 << exception))
3646             break;
3647     if(i==GC_old_exc_ports.count) ABORT("No handler for exception!");
3648 
3649     port = GC_old_exc_ports.ports[i];
3650     behavior = GC_old_exc_ports.behaviors[i];
3651     flavor = GC_old_exc_ports.flavors[i];
3652 
3653     if(behavior != EXCEPTION_DEFAULT) {
3654         r = thread_get_state(thread,flavor,thread_state,&thread_state_count);
3655         if(r != KERN_SUCCESS)
3656             ABORT("thread_get_state failed in forward_exception");
3657     }
3658 
3659     switch(behavior) {
3660         case EXCEPTION_DEFAULT:
3661             r = exception_raise(port,thread,task,exception,data,data_count);
3662             break;
3663         case EXCEPTION_STATE:
3664             r = exception_raise_state(port,thread,task,exception,data,
3665                 data_count,&flavor,thread_state,thread_state_count,
3666                 thread_state,&thread_state_count);
3667             break;
3668         case EXCEPTION_STATE_IDENTITY:
3669             r = exception_raise_state_identity(port,thread,task,exception,data,
3670                 data_count,&flavor,thread_state,thread_state_count,
3671                 thread_state,&thread_state_count);
3672             break;
3673         default:
3674             r = KERN_FAILURE; /* make gcc happy */
3675             ABORT("forward_exception: unknown behavior");
3676             break;
3677     }
3678 
3679     if(behavior != EXCEPTION_DEFAULT) {
3680         r = thread_set_state(thread,flavor,thread_state,thread_state_count);
3681         if(r != KERN_SUCCESS)
3682             ABORT("thread_set_state failed in forward_exception");
3683     }
3684 
3685     return r;
3686 }
3687 
3688 #define FWD() GC_forward_exception(thread,task,exception,code,code_count)
3689 
3690 /* This violates the namespace rules but there isn't anything that can be done
3691    about it. The exception handling stuff is hard coded to call this */
3692 kern_return_t
catch_exception_raise(mach_port_t exception_port,mach_port_t thread,mach_port_t task,exception_type_t exception,exception_data_t code,mach_msg_type_number_t code_count)3693 catch_exception_raise(
3694    mach_port_t exception_port,mach_port_t thread,mach_port_t task,
3695    exception_type_t exception,exception_data_t code,
3696    mach_msg_type_number_t code_count
3697 ) {
3698     kern_return_t r;
3699     char *addr;
3700     struct hblk *h;
3701     int i;
3702 #ifdef POWERPC
3703     thread_state_flavor_t flavor = PPC_EXCEPTION_STATE;
3704     mach_msg_type_number_t exc_state_count = PPC_EXCEPTION_STATE_COUNT;
3705     ppc_exception_state_t exc_state;
3706 #else
3707 #	error FIXME for non-ppc darwin
3708 #endif
3709 
3710 
3711     if(exception != EXC_BAD_ACCESS || code[0] != KERN_PROTECTION_FAILURE) {
3712         #ifdef DEBUG_EXCEPTION_HANDLING
3713         /* We aren't interested, pass it on to the old handler */
3714         GC_printf3("Exception: 0x%x Code: 0x%x 0x%x in catch....\n",
3715             exception,
3716             code_count > 0 ? code[0] : -1,
3717             code_count > 1 ? code[1] : -1);
3718         #endif
3719         return FWD();
3720     }
3721 
3722     r = thread_get_state(thread,flavor,
3723         (natural_t*)&exc_state,&exc_state_count);
3724     if(r != KERN_SUCCESS) {
3725         /* The thread is supposed to be suspended while the exception handler
3726            is called. This shouldn't fail. */
3727         #ifdef BROKEN_EXCEPTION_HANDLING
3728             GC_err_printf0("thread_get_state failed in "
3729                 "catch_exception_raise\n");
3730             return KERN_SUCCESS;
3731         #else
3732             ABORT("thread_get_state failed in catch_exception_raise");
3733         #endif
3734     }
3735 
3736     /* This is the address that caused the fault */
3737     addr = (char*) exc_state.dar;
3738 
3739     if((HDR(addr)) == 0) {
3740         /* Ugh... just like the SIGBUS problem above, it seems we get a bogus
3741            KERN_PROTECTION_FAILURE every once and a while. We wait till we get
3742            a bunch in a row before doing anything about it. If a "real" fault
3743            ever occurres it'll just keep faulting over and over and we'll hit
3744            the limit pretty quickly. */
3745         #ifdef BROKEN_EXCEPTION_HANDLING
3746             static char *last_fault;
3747             static int last_fault_count;
3748 
3749             if(addr != last_fault) {
3750                 last_fault = addr;
3751                 last_fault_count = 0;
3752             }
3753             if(++last_fault_count < 32) {
3754                 if(last_fault_count == 1)
3755                     GC_err_printf1(
3756                         "GC: WARNING: Ignoring KERN_PROTECTION_FAILURE at %p\n",
3757                         addr);
3758                 return KERN_SUCCESS;
3759             }
3760 
3761             GC_err_printf1("Unexpected KERN_PROTECTION_FAILURE at %p\n",addr);
3762             /* Can't pass it along to the signal handler because that is
3763                ignoring SIGBUS signals. We also shouldn't call ABORT here as
3764                signals don't always work too well from the exception handler. */
3765             GC_err_printf0("Aborting\n");
3766             exit(EXIT_FAILURE);
3767         #else /* BROKEN_EXCEPTION_HANDLING */
3768             /* Pass it along to the next exception handler
3769                (which should call SIGBUS/SIGSEGV) */
3770             return FWD();
3771         #endif /* !BROKEN_EXCEPTION_HANDLING */
3772     }
3773 
3774     #ifdef BROKEN_EXCEPTION_HANDLING
3775         /* Reset the number of consecutive SIGBUSs */
3776         GC_sigbus_count = 0;
3777     #endif
3778 
3779     if(GC_mprotect_state == GC_MP_NORMAL) { /* common case */
3780         h = (struct hblk*)((word)addr & ~(GC_page_size-1));
3781         UNPROTECT(h, GC_page_size);
3782         for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
3783             register int index = PHT_HASH(h+i);
3784             async_set_pht_entry_from_index(GC_dirty_pages, index);
3785         }
3786     } else if(GC_mprotect_state == GC_MP_DISCARDING) {
3787         /* Lie to the thread for now. No sense UNPROTECT()ing the memory
3788            when we're just going to PROTECT() it again later. The thread
3789            will just fault again once it resumes */
3790     } else {
3791         /* Shouldn't happen, i don't think */
3792         GC_printf0("KERN_PROTECTION_FAILURE while world is stopped\n");
3793         return FWD();
3794     }
3795     return KERN_SUCCESS;
3796 }
3797 #undef FWD
3798 
3799 /* These should never be called, but just in case...  */
catch_exception_raise_state(mach_port_name_t exception_port,int exception,exception_data_t code,mach_msg_type_number_t codeCnt,int flavor,thread_state_t old_state,int old_stateCnt,thread_state_t new_state,int new_stateCnt)3800 kern_return_t catch_exception_raise_state(mach_port_name_t exception_port,
3801     int exception, exception_data_t code, mach_msg_type_number_t codeCnt,
3802     int flavor, thread_state_t old_state, int old_stateCnt,
3803     thread_state_t new_state, int new_stateCnt)
3804 {
3805     ABORT("catch_exception_raise_state");
3806     return(KERN_INVALID_ARGUMENT);
3807 }
catch_exception_raise_state_identity(mach_port_name_t exception_port,mach_port_t thread,mach_port_t task,int exception,exception_data_t code,mach_msg_type_number_t codeCnt,int flavor,thread_state_t old_state,int old_stateCnt,thread_state_t new_state,int new_stateCnt)3808 kern_return_t catch_exception_raise_state_identity(
3809     mach_port_name_t exception_port, mach_port_t thread, mach_port_t task,
3810     int exception, exception_data_t code, mach_msg_type_number_t codeCnt,
3811     int flavor, thread_state_t old_state, int old_stateCnt,
3812     thread_state_t new_state, int new_stateCnt)
3813 {
3814     ABORT("catch_exception_raise_state_identity");
3815     return(KERN_INVALID_ARGUMENT);
3816 }
3817 
3818 
3819 #endif /* DARWIN && MPROTECT_VDB */
3820 
3821 # ifndef HAVE_INCREMENTAL_PROTECTION_NEEDS
GC_incremental_protection_needs()3822   int GC_incremental_protection_needs()
3823   {
3824     return GC_PROTECTS_NONE;
3825   }
3826 # endif /* !HAVE_INCREMENTAL_PROTECTION_NEEDS */
3827 
3828 /*
3829  * Call stack save code for debugging.
3830  * Should probably be in mach_dep.c, but that requires reorganization.
3831  */
3832 
3833 /* I suspect the following works for most X86 *nix variants, so 	*/
3834 /* long as the frame pointer is explicitly stored.  In the case of gcc,	*/
3835 /* compiler flags (e.g. -fomit-frame-pointer) determine whether it is.	*/
3836 #if defined(I386) && defined(LINUX) && defined(SAVE_CALL_CHAIN)
3837 #   include <features.h>
3838 
3839     struct frame {
3840 	struct frame *fr_savfp;
3841 	long	fr_savpc;
3842         long	fr_arg[NARGS];  /* All the arguments go here.	*/
3843     };
3844 #endif
3845 
3846 #if defined(SPARC)
3847 #  if defined(LINUX)
3848 #    include <features.h>
3849 
3850      struct frame {
3851 	long	fr_local[8];
3852 	long	fr_arg[6];
3853 	struct frame *fr_savfp;
3854 	long	fr_savpc;
3855 #       ifndef __arch64__
3856 	  char	*fr_stret;
3857 #       endif
3858 	long	fr_argd[6];
3859 	long	fr_argx[0];
3860      };
3861 #  else
3862 #    if defined(SUNOS4)
3863 #      include <machine/frame.h>
3864 #    else
3865 #      if defined (DRSNX)
3866 #	 include <sys/sparc/frame.h>
3867 #      else
3868 #	 if defined(OPENBSD) || defined(NETBSD)
3869 #	   include <frame.h>
3870 #	 else
3871 #	   include <sys/frame.h>
3872 #	 endif
3873 #      endif
3874 #    endif
3875 #  endif
3876 #  if NARGS > 6
3877 	--> We only know how to to get the first 6 arguments
3878 #  endif
3879 #endif /* SPARC */
3880 
3881 #ifdef  NEED_CALLINFO
3882 /* Fill in the pc and argument information for up to NFRAMES of my	*/
3883 /* callers.  Ignore my frame and my callers frame.			*/
3884 
3885 #ifdef LINUX
3886 #   include <unistd.h>
3887 #endif
3888 
3889 #endif /* NEED_CALLINFO */
3890 
3891 #ifdef SAVE_CALL_CHAIN
3892 
3893 #if NARGS == 0 && NFRAMES % 2 == 0 /* No padding */ \
3894     && defined(GC_HAVE_BUILTIN_BACKTRACE)
3895 
3896 #include <execinfo.h>
3897 
GC_save_callers(info)3898 void GC_save_callers (info)
3899 struct callinfo info[NFRAMES];
3900 {
3901   void * tmp_info[NFRAMES + 1];
3902   int npcs, i;
3903 # define IGNORE_FRAMES 1
3904 
3905   /* We retrieve NFRAMES+1 pc values, but discard the first, since it	*/
3906   /* points to our own frame.						*/
3907   GC_ASSERT(sizeof(struct callinfo) == sizeof(void *));
3908   npcs = backtrace((void **)tmp_info, NFRAMES + IGNORE_FRAMES);
3909   BCOPY(tmp_info+IGNORE_FRAMES, info, (npcs - IGNORE_FRAMES) * sizeof(void *));
3910   for (i = npcs - IGNORE_FRAMES; i < NFRAMES; ++i) info[i].ci_pc = 0;
3911 }
3912 
3913 #else /* No builtin backtrace; do it ourselves */
3914 
3915 #if (defined(OPENBSD) || defined(NETBSD)) && defined(SPARC)
3916 #  define FR_SAVFP fr_fp
3917 #  define FR_SAVPC fr_pc
3918 #else
3919 #  define FR_SAVFP fr_savfp
3920 #  define FR_SAVPC fr_savpc
3921 #endif
3922 
3923 #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
3924 #   define BIAS 2047
3925 #else
3926 #   define BIAS 0
3927 #endif
3928 
3929 void GC_save_callers (info)
3930 struct callinfo info[NFRAMES];
3931 {
3932   struct frame *frame;
3933   struct frame *fp;
3934   int nframes = 0;
3935 # ifdef I386
3936     /* We assume this is turned on only with gcc as the compiler. */
3937     asm("movl %%ebp,%0" : "=r"(frame));
3938     fp = frame;
3939 # else
3940     frame = (struct frame *) GC_save_regs_in_stack ();
3941     fp = (struct frame *)((long) frame -> FR_SAVFP + BIAS);
3942 #endif
3943 
3944    for (; (!(fp HOTTER_THAN frame) && !(GC_stackbottom HOTTER_THAN (ptr_t)fp)
3945 	   && (nframes < NFRAMES));
3946        fp = (struct frame *)((long) fp -> FR_SAVFP + BIAS), nframes++) {
3947       register int i;
3948 
3949       info[nframes].ci_pc = fp->FR_SAVPC;
3950 #     if NARGS > 0
3951         for (i = 0; i < NARGS; i++) {
3952 	  info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
3953         }
3954 #     endif /* NARGS > 0 */
3955   }
3956   if (nframes < NFRAMES) info[nframes].ci_pc = 0;
3957 }
3958 
3959 #endif /* No builtin backtrace */
3960 
3961 #endif /* SAVE_CALL_CHAIN */
3962 
3963 #ifdef NEED_CALLINFO
3964 
3965 /* Print info to stderr.  We do NOT hold the allocation lock */
GC_print_callers(info)3966 void GC_print_callers (info)
3967 struct callinfo info[NFRAMES];
3968 {
3969     register int i;
3970     static int reentry_count = 0;
3971     GC_bool stop = FALSE;
3972 
3973     LOCK();
3974       ++reentry_count;
3975     UNLOCK();
3976 
3977 #   if NFRAMES == 1
3978       GC_err_printf0("\tCaller at allocation:\n");
3979 #   else
3980       GC_err_printf0("\tCall chain at allocation:\n");
3981 #   endif
3982     for (i = 0; i < NFRAMES && !stop ; i++) {
3983      	if (info[i].ci_pc == 0) break;
3984 #	if NARGS > 0
3985 	{
3986 	  int j;
3987 
3988      	  GC_err_printf0("\t\targs: ");
3989      	  for (j = 0; j < NARGS; j++) {
3990      	    if (j != 0) GC_err_printf0(", ");
3991      	    GC_err_printf2("%d (0x%X)", ~(info[i].ci_arg[j]),
3992      	    				~(info[i].ci_arg[j]));
3993      	  }
3994 	  GC_err_printf0("\n");
3995 	}
3996 # 	endif
3997         if (reentry_count > 1) {
3998 	    /* We were called during an allocation during	*/
3999 	    /* a previous GC_print_callers call; punt.		*/
4000      	    GC_err_printf1("\t\t##PC##= 0x%lx\n", info[i].ci_pc);
4001 	    continue;
4002 	}
4003 	{
4004 #	  ifdef LINUX
4005 	    FILE *pipe;
4006 #	  endif
4007 #	  if defined(GC_HAVE_BUILTIN_BACKTRACE)
4008 	    char **sym_name =
4009 	      backtrace_symbols((void **)(&(info[i].ci_pc)), 1);
4010 	    char *name = sym_name[0];
4011 #	  else
4012 	    char buf[40];
4013 	    char *name = buf;
4014      	    sprintf(buf, "##PC##= 0x%lx", info[i].ci_pc);
4015 #	  endif
4016 #	  if defined(LINUX) && !defined(SMALL_CONFIG)
4017 	    /* Try for a line number. */
4018 	    {
4019 #	        define EXE_SZ 100
4020 		static char exe_name[EXE_SZ];
4021 #		define CMD_SZ 200
4022 		char cmd_buf[CMD_SZ];
4023 #		define RESULT_SZ 200
4024 		static char result_buf[RESULT_SZ];
4025 		size_t result_len;
4026 		static GC_bool found_exe_name = FALSE;
4027 		static GC_bool will_fail = FALSE;
4028 		int ret_code;
4029 		/* Try to get it via a hairy and expensive scheme.	*/
4030 		/* First we get the name of the executable:		*/
4031 		if (will_fail) goto out;
4032 		if (!found_exe_name) {
4033 		  ret_code = readlink("/proc/self/exe", exe_name, EXE_SZ);
4034 		  if (ret_code < 0 || ret_code >= EXE_SZ
4035 		      || exe_name[0] != '/') {
4036 		    will_fail = TRUE;	/* Dont try again. */
4037 		    goto out;
4038 		  }
4039 		  exe_name[ret_code] = '\0';
4040 		  found_exe_name = TRUE;
4041 		}
4042 		/* Then we use popen to start addr2line -e <exe> <addr>	*/
4043 		/* There are faster ways to do this, but hopefully this	*/
4044 		/* isn't time critical.					*/
4045 		sprintf(cmd_buf, "/usr/bin/addr2line -f -e %s 0x%lx", exe_name,
4046 				 (unsigned long)info[i].ci_pc);
4047 		pipe = popen(cmd_buf, "r");
4048 		if (pipe == NULL
4049 		    || (result_len = fread(result_buf, 1, RESULT_SZ - 1, pipe))
4050 		       == 0) {
4051 		  if (pipe != NULL) pclose(pipe);
4052 		  will_fail = TRUE;
4053 		  goto out;
4054 		}
4055 		if (result_buf[result_len - 1] == '\n') --result_len;
4056 		result_buf[result_len] = 0;
4057 		if (result_buf[0] == '?'
4058 		    || result_buf[result_len-2] == ':'
4059 		       && result_buf[result_len-1] == '0') {
4060 		    pclose(pipe);
4061 		    goto out;
4062 		}
4063 		/* Get rid of embedded newline, if any.  Test for "main" */
4064 		{
4065 		   char * nl = strchr(result_buf, '\n');
4066 		   if (nl != NULL && nl < result_buf + result_len) {
4067 		     *nl = ':';
4068 		   }
4069 		   if (strncmp(result_buf, "main", nl - result_buf) == 0) {
4070 		     stop = TRUE;
4071 		   }
4072 		}
4073 		if (result_len < RESULT_SZ - 25) {
4074 		  /* Add in hex address	*/
4075 		    sprintf(result_buf + result_len, " [0x%lx]",
4076 			  (unsigned long)info[i].ci_pc);
4077 		}
4078 		name = result_buf;
4079 		pclose(pipe);
4080 		out:;
4081 	    }
4082 #	  endif /* LINUX */
4083 	  GC_err_printf1("\t\t%s\n", name);
4084 #	  if defined(GC_HAVE_BUILTIN_BACKTRACE)
4085 	    free(sym_name);  /* May call GC_free; that's OK */
4086 #         endif
4087 	}
4088     }
4089     LOCK();
4090       --reentry_count;
4091     UNLOCK();
4092 }
4093 
4094 #endif /* NEED_CALLINFO */
4095 
4096 
4097 
4098 #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
4099 
4100 /* Dump /proc/self/maps to GC_stderr, to enable looking up names for
4101    addresses in FIND_LEAK output. */
4102 
dump_maps(char * maps)4103 static word dump_maps(char *maps)
4104 {
4105     GC_err_write(maps, strlen(maps));
4106     return 1;
4107 }
4108 
GC_print_address_map()4109 void GC_print_address_map()
4110 {
4111     GC_err_printf0("---------- Begin address map ----------\n");
4112     GC_apply_to_maps(dump_maps);
4113     GC_err_printf0("---------- End address map ----------\n");
4114 }
4115 
4116 #endif
4117 
4118 
4119