1 /*
2 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996-1997 by Silicon Graphics. All rights reserved.
4 *
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7 *
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
13 */
14
15 # include "gc_priv.h"
16
17 # include <stdio.h>
18 # include <signal.h>
19
20 # if defined(LINUX) && !defined(POWERPC)
21 # include <linux/version.h>
22 # if (LINUX_VERSION_CODE <= 0x10400)
23 /* Ugly hack to get struct sigcontext_struct definition. Required */
24 /* for some early 1.3.X releases. Will hopefully go away soon. */
25 /* in some later Linux releases, asm/sigcontext.h may have to */
26 /* be included instead. */
27 # define __KERNEL__
28 # include <asm/signal.h>
29 # undef __KERNEL__
30 # else
31 /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
32 /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
33 /* prototypes, so we have to include the top-level sigcontext.h to */
34 /* make sure the former gets defined to be the latter if appropriate. */
35 # include <features.h>
36 # if 2 <= __GLIBC__
37 # include <sigcontext.h>
38 # else /* not 2 <= __GLIBC__ */
39 /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
40 /* one. Check LINUX_VERSION_CODE to see which we should reference. */
41 # include <asm/sigcontext.h>
42 # endif /* 2 <= __GLIBC__ */
43 # endif
44 # endif
45 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS)
46 # include <sys/types.h>
47 # if !defined(MSWIN32) && !defined(SUNOS4)
48 # include <unistd.h>
49 # endif
50 # endif
51
52 /* Blatantly OS dependent routines, except for those that are related */
53 /* dynamic loading. */
54
55 # if !defined(THREADS) && !defined(STACKBOTTOM) && defined(HEURISTIC2)
56 # define NEED_FIND_LIMIT
57 # endif
58
59 # if defined(IRIX_THREADS)
60 # define NEED_FIND_LIMIT
61 # endif
62
63 # if (defined(SUNOS4) & defined(DYNAMIC_LOADING)) && !defined(PCR)
64 # define NEED_FIND_LIMIT
65 # endif
66
67 # if (defined(SVR4) || defined(AUX) || defined(DGUX)) && !defined(PCR)
68 # define NEED_FIND_LIMIT
69 # endif
70
71 # if defined(LINUX) && defined(POWERPC)
72 # define NEED_FIND_LIMIT
73 # endif
74
75 #ifdef NEED_FIND_LIMIT
76 # include <setjmp.h>
77 #endif
78
79 #ifdef FREEBSD
80 # include <machine/trap.h>
81 #endif
82
83 #ifdef AMIGA
84 # include <proto/exec.h>
85 # include <proto/dos.h>
86 # include <dos/dosextens.h>
87 # include <workbench/startup.h>
88 #endif
89
90 #ifdef MSWIN32
91 # define WIN32_LEAN_AND_MEAN
92 # define NOSERVICE
93 # include <windows.h>
94 #endif
95
96 #ifdef MACOS
97 # include <Processes.h>
98 #endif
99
100 #ifdef IRIX5
101 # include <sys/uio.h>
102 # include <malloc.h> /* for locking */
103 #endif
104 #ifdef USE_MMAP
105 # include <sys/types.h>
106 # include <sys/mman.h>
107 # include <sys/stat.h>
108 # include <fcntl.h>
109 #endif
110
111 #ifdef SUNOS5SIGS
112 # include <sys/siginfo.h>
113 # undef setjmp
114 # undef longjmp
115 # define setjmp(env) sigsetjmp(env, 1)
116 # define longjmp(env, val) siglongjmp(env, val)
117 # define jmp_buf sigjmp_buf
118 #endif
119
120 #ifdef DJGPP
121 /* Apparently necessary for djgpp 2.01. May casuse problems with */
122 /* other versions. */
123 typedef long unsigned int caddr_t;
124 #endif
125
126 #ifdef PCR
127 # include "il/PCR_IL.h"
128 # include "th/PCR_ThCtl.h"
129 # include "mm/PCR_MM.h"
130 #endif
131
132 #if !defined(NO_EXECUTE_PERMISSION)
133 # define OPT_PROT_EXEC PROT_EXEC
134 #else
135 # define OPT_PROT_EXEC 0
136 #endif
137
138 #if defined(LINUX) && defined(POWERPC)
139 ptr_t GC_data_start;
140
GC_init_linuxppc()141 void GC_init_linuxppc()
142 {
143 extern ptr_t GC_find_limit();
144 extern char **_environ;
145 GC_data_start = GC_find_limit((ptr_t)&_environ, FALSE);
146 }
147 #endif
148
149 # ifdef OS2
150
151 # include <stddef.h>
152
153 # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
154
155 struct exe_hdr {
156 unsigned short magic_number;
157 unsigned short padding[29];
158 long new_exe_offset;
159 };
160
161 #define E_MAGIC(x) (x).magic_number
162 #define EMAGIC 0x5A4D
163 #define E_LFANEW(x) (x).new_exe_offset
164
165 struct e32_exe {
166 unsigned char magic_number[2];
167 unsigned char byte_order;
168 unsigned char word_order;
169 unsigned long exe_format_level;
170 unsigned short cpu;
171 unsigned short os;
172 unsigned long padding1[13];
173 unsigned long object_table_offset;
174 unsigned long object_count;
175 unsigned long padding2[31];
176 };
177
178 #define E32_MAGIC1(x) (x).magic_number[0]
179 #define E32MAGIC1 'L'
180 #define E32_MAGIC2(x) (x).magic_number[1]
181 #define E32MAGIC2 'X'
182 #define E32_BORDER(x) (x).byte_order
183 #define E32LEBO 0
184 #define E32_WORDER(x) (x).word_order
185 #define E32LEWO 0
186 #define E32_CPU(x) (x).cpu
187 #define E32CPU286 1
188 #define E32_OBJTAB(x) (x).object_table_offset
189 #define E32_OBJCNT(x) (x).object_count
190
191 struct o32_obj {
192 unsigned long size;
193 unsigned long base;
194 unsigned long flags;
195 unsigned long pagemap;
196 unsigned long mapsize;
197 unsigned long reserved;
198 };
199
200 #define O32_FLAGS(x) (x).flags
201 #define OBJREAD 0x0001L
202 #define OBJWRITE 0x0002L
203 #define OBJINVALID 0x0080L
204 #define O32_SIZE(x) (x).size
205 #define O32_BASE(x) (x).base
206
207 # else /* IBM's compiler */
208
209 /* A kludge to get around what appears to be a header file bug */
210 # ifndef WORD
211 # define WORD unsigned short
212 # endif
213 # ifndef DWORD
214 # define DWORD unsigned long
215 # endif
216
217 # define EXE386 1
218 # include <newexe.h>
219 # include <exe386.h>
220
221 # endif /* __IBMC__ */
222
223 # define INCL_DOSEXCEPTIONS
224 # define INCL_DOSPROCESS
225 # define INCL_DOSERRORS
226 # define INCL_DOSMODULEMGR
227 # define INCL_DOSMEMMGR
228 # include <os2.h>
229
230
231 /* Disable and enable signals during nontrivial allocations */
232
GC_disable_signals(void)233 void GC_disable_signals(void)
234 {
235 ULONG nest;
236
237 DosEnterMustComplete(&nest);
238 if (nest != 1) ABORT("nested GC_disable_signals");
239 }
240
GC_enable_signals(void)241 void GC_enable_signals(void)
242 {
243 ULONG nest;
244
245 DosExitMustComplete(&nest);
246 if (nest != 0) ABORT("GC_enable_signals");
247 }
248
249
250 # else
251
252 # if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
253 && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW)
254
255 # if defined(sigmask) && !defined(UTS4)
256 /* Use the traditional BSD interface */
257 # define SIGSET_T int
258 # define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
259 # define SIG_FILL(set) (set) = 0x7fffffff
260 /* Setting the leading bit appears to provoke a bug in some */
261 /* longjmp implementations. Most systems appear not to have */
262 /* a signal 32. */
263 # define SIGSETMASK(old, new) (old) = sigsetmask(new)
264 # else
265 /* Use POSIX/SYSV interface */
266 # define SIGSET_T sigset_t
267 # define SIG_DEL(set, signal) sigdelset(&(set), (signal))
268 # define SIG_FILL(set) sigfillset(&set)
269 # define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
270 # endif
271
272 static bool mask_initialized = FALSE;
273
274 static SIGSET_T new_mask;
275
276 static SIGSET_T old_mask;
277
278 static SIGSET_T dummy;
279
280 #if defined(PRINTSTATS) && !defined(THREADS)
281 # define CHECK_SIGNALS
282 int GC_sig_disabled = 0;
283 #endif
284
GC_disable_signals()285 void GC_disable_signals()
286 {
287 if (!mask_initialized) {
288 SIG_FILL(new_mask);
289
290 SIG_DEL(new_mask, SIGSEGV);
291 SIG_DEL(new_mask, SIGILL);
292 SIG_DEL(new_mask, SIGQUIT);
293 # ifdef SIGBUS
294 SIG_DEL(new_mask, SIGBUS);
295 # endif
296 # ifdef SIGIOT
297 SIG_DEL(new_mask, SIGIOT);
298 # endif
299 # ifdef SIGEMT
300 SIG_DEL(new_mask, SIGEMT);
301 # endif
302 # ifdef SIGTRAP
303 SIG_DEL(new_mask, SIGTRAP);
304 # endif
305 mask_initialized = TRUE;
306 }
307 # ifdef CHECK_SIGNALS
308 if (GC_sig_disabled != 0) ABORT("Nested disables");
309 GC_sig_disabled++;
310 # endif
311 SIGSETMASK(old_mask,new_mask);
312 }
313
GC_enable_signals()314 void GC_enable_signals()
315 {
316 # ifdef CHECK_SIGNALS
317 if (GC_sig_disabled != 1) ABORT("Unmatched enable");
318 GC_sig_disabled--;
319 # endif
320 SIGSETMASK(dummy,old_mask);
321 }
322
323 # endif /* !PCR */
324
325 # endif /*!OS/2 */
326
327 /* Ivan Demakov: simplest way (to me) */
328 #ifdef DOS4GW
GC_disable_signals()329 void GC_disable_signals() { }
GC_enable_signals()330 void GC_enable_signals() { }
331 #endif
332
333 /* Find the page size */
334 word GC_page_size;
335
336 # ifdef MSWIN32
GC_setpagesize()337 void GC_setpagesize()
338 {
339 SYSTEM_INFO sysinfo;
340
341 GetSystemInfo(&sysinfo);
342 GC_page_size = sysinfo.dwPageSize;
343 }
344
345 # else
346 # if defined(MPROTECT_VDB) || defined(PROC_VDB)
GC_setpagesize()347 void GC_setpagesize()
348 {
349 GC_page_size = GETPAGESIZE();
350 }
351 # else
352 /* It's acceptable to fake it. */
GC_setpagesize()353 void GC_setpagesize()
354 {
355 GC_page_size = HBLKSIZE;
356 }
357 # endif
358 # endif
359
360 /*
361 * Find the base of the stack.
362 * Used only in single-threaded environment.
363 * With threads, GC_mark_roots needs to know how to do this.
364 * Called with allocator lock held.
365 */
366 # ifdef MSWIN32
367 # define is_writable(prot) ((prot) == PAGE_READWRITE \
368 || (prot) == PAGE_WRITECOPY \
369 || (prot) == PAGE_EXECUTE_READWRITE \
370 || (prot) == PAGE_EXECUTE_WRITECOPY)
371 /* Return the number of bytes that are writable starting at p. */
372 /* The pointer p is assumed to be page aligned. */
373 /* If base is not 0, *base becomes the beginning of the */
374 /* allocation region containing p. */
GC_get_writable_length(ptr_t p,ptr_t * base)375 word GC_get_writable_length(ptr_t p, ptr_t *base)
376 {
377 MEMORY_BASIC_INFORMATION buf;
378 word result;
379 word protect;
380
381 result = VirtualQuery(p, &buf, sizeof(buf));
382 if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
383 if (base != 0) *base = (ptr_t)(buf.AllocationBase);
384 protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
385 if (!is_writable(protect)) {
386 return(0);
387 }
388 if (buf.State != MEM_COMMIT) return(0);
389 return(buf.RegionSize);
390 }
391
GC_get_stack_base()392 ptr_t GC_get_stack_base()
393 {
394 int dummy;
395 ptr_t sp = (ptr_t)(&dummy);
396 ptr_t trunc_sp = (ptr_t)((word)sp & ~(GC_page_size - 1));
397 word size = GC_get_writable_length(trunc_sp, 0);
398
399 return(trunc_sp + size);
400 }
401
402
403 # else
404
405 # ifdef OS2
406
GC_get_stack_base()407 ptr_t GC_get_stack_base()
408 {
409 PTIB ptib;
410 PPIB ppib;
411
412 if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
413 GC_err_printf0("DosGetInfoBlocks failed\n");
414 ABORT("DosGetInfoBlocks failed\n");
415 }
416 return((ptr_t)(ptib -> tib_pstacklimit));
417 }
418
419 # else
420
421 # ifdef AMIGA
422
GC_get_stack_base()423 ptr_t GC_get_stack_base()
424 {
425 extern struct WBStartup *_WBenchMsg;
426 extern long __base;
427 extern long __stack;
428 struct Task *task;
429 struct Process *proc;
430 struct CommandLineInterface *cli;
431 long size;
432
433 if ((task = FindTask(0)) == 0) {
434 GC_err_puts("Cannot find own task structure\n");
435 ABORT("task missing");
436 }
437 proc = (struct Process *)task;
438 cli = BADDR(proc->pr_CLI);
439
440 if (_WBenchMsg != 0 || cli == 0) {
441 size = (char *)task->tc_SPUpper - (char *)task->tc_SPLower;
442 } else {
443 size = cli->cli_DefaultStack * 4;
444 }
445 return (ptr_t)(__base + GC_max(size, __stack));
446 }
447
448 # else
449
450
451
452 # ifdef NEED_FIND_LIMIT
453 /* Some tools to implement HEURISTIC2 */
454 # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
455 /* static */ jmp_buf GC_jmp_buf;
456
457 /*ARGSUSED*/
GC_fault_handler(sig)458 void GC_fault_handler(sig)
459 int sig;
460 {
461 longjmp(GC_jmp_buf, 1);
462 }
463
464 # ifdef __STDC__
465 typedef void (*handler)(int);
466 # else
467 typedef void (*handler)();
468 # endif
469
470 # if defined(SUNOS5SIGS) || defined(IRIX5)
471 static struct sigaction oldact;
472 # else
473 static handler old_segv_handler, old_bus_handler;
474 # endif
475
GC_setup_temporary_fault_handler()476 void GC_setup_temporary_fault_handler()
477 {
478 # if defined(SUNOS5SIGS) || defined(IRIX5)
479 struct sigaction act;
480
481 act.sa_handler = GC_fault_handler;
482 act.sa_flags = SA_RESTART | SA_SIGINFO | SA_NODEFER;
483 /* The presence of SA_NODEFER represents yet another gross */
484 /* hack. Under Solaris 2.3, siglongjmp doesn't appear to */
485 /* interact correctly with -lthread. We hide the confusion */
486 /* by making sure that signal handling doesn't affect the */
487 /* signal mask. */
488
489 (void) sigemptyset(&act.sa_mask);
490 # ifdef IRIX_THREADS
491 /* Older versions have a bug related to retrieving and */
492 /* and setting a handler at the same time. */
493 (void) sigaction(SIGSEGV, 0, &oldact);
494 (void) sigaction(SIGSEGV, &act, 0);
495 # else
496 (void) sigaction(SIGSEGV, &act, &oldact);
497 # endif /* IRIX_THREADS */
498 # else
499 old_segv_handler = signal(SIGSEGV, GC_fault_handler);
500 # ifdef SIGBUS
501 old_bus_handler = signal(SIGBUS, GC_fault_handler);
502 # endif
503 # endif
504 }
505
GC_reset_fault_handler()506 void GC_reset_fault_handler()
507 {
508 # if defined(SUNOS5SIGS) || defined(IRIX5)
509 (void) sigaction(SIGSEGV, &oldact, 0);
510 # else
511 (void) signal(SIGSEGV, old_segv_handler);
512 # ifdef SIGBUS
513 (void) signal(SIGBUS, old_bus_handler);
514 # endif
515 # endif
516 }
517
518 /* Return the first nonaddressible location > p (up) or */
519 /* the smallest location q s.t. [q,p] is addressible (!up). */
GC_find_limit(p,up)520 ptr_t GC_find_limit(p, up)
521 ptr_t p;
522 bool up;
523 {
524 static VOLATILE ptr_t result;
525 /* Needs to be static, since otherwise it may not be */
526 /* preserved across the longjmp. Can safely be */
527 /* static since it's only called once, with the */
528 /* allocation lock held. */
529
530
531 GC_setup_temporary_fault_handler();
532 if (setjmp(GC_jmp_buf) == 0) {
533 result = (ptr_t)(((word)(p))
534 & ~(MIN_PAGE_SIZE-1));
535 for (;;) {
536 if (up) {
537 result += MIN_PAGE_SIZE;
538 } else {
539 result -= MIN_PAGE_SIZE;
540 }
541 GC_noop1((word)(*result));
542 }
543 }
544 GC_reset_fault_handler();
545 if (!up) {
546 result += MIN_PAGE_SIZE;
547 }
548 return(result);
549 }
550 # endif
551
552
GC_get_stack_base()553 ptr_t GC_get_stack_base()
554 {
555 word dummy;
556 ptr_t result;
557
558 # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
559
560 # ifdef STACKBOTTOM
561 return(STACKBOTTOM);
562 # else
563 # ifdef HEURISTIC1
564 # ifdef STACK_GROWS_DOWN
565 result = (ptr_t)((((word)(&dummy))
566 + STACKBOTTOM_ALIGNMENT_M1)
567 & ~STACKBOTTOM_ALIGNMENT_M1);
568 # else
569 result = (ptr_t)(((word)(&dummy))
570 & ~STACKBOTTOM_ALIGNMENT_M1);
571 # endif
572 # endif /* HEURISTIC1 */
573 # ifdef HEURISTIC2
574 # ifdef STACK_GROWS_DOWN
575 result = GC_find_limit((ptr_t)(&dummy), TRUE);
576 # ifdef HEURISTIC2_LIMIT
577 if (result > HEURISTIC2_LIMIT
578 && (ptr_t)(&dummy) < HEURISTIC2_LIMIT) {
579 result = HEURISTIC2_LIMIT;
580 }
581 # endif
582 # else
583 result = GC_find_limit((ptr_t)(&dummy), FALSE);
584 # ifdef HEURISTIC2_LIMIT
585 if (result < HEURISTIC2_LIMIT
586 && (ptr_t)(&dummy) > HEURISTIC2_LIMIT) {
587 result = HEURISTIC2_LIMIT;
588 }
589 # endif
590 # endif
591
592 # endif /* HEURISTIC2 */
593 return(result);
594 # endif /* STACKBOTTOM */
595 }
596
597 # endif /* ! AMIGA */
598 # endif /* ! OS2 */
599 # endif /* ! MSWIN32 */
600
601 /*
602 * Register static data segment(s) as roots.
603 * If more data segments are added later then they need to be registered
604 * add that point (as we do with SunOS dynamic loading),
605 * or GC_mark_roots needs to check for them (as we do with PCR).
606 * Called with allocator lock held.
607 */
608
609 # ifdef OS2
610
GC_register_data_segments()611 void GC_register_data_segments()
612 {
613 PTIB ptib;
614 PPIB ppib;
615 HMODULE module_handle;
616 # define PBUFSIZ 512
617 UCHAR path[PBUFSIZ];
618 FILE * myexefile;
619 struct exe_hdr hdrdos; /* MSDOS header. */
620 struct e32_exe hdr386; /* Real header for my executable */
621 struct o32_obj seg; /* Currrent segment */
622 int nsegs;
623
624
625 if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
626 GC_err_printf0("DosGetInfoBlocks failed\n");
627 ABORT("DosGetInfoBlocks failed\n");
628 }
629 module_handle = ppib -> pib_hmte;
630 if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
631 GC_err_printf0("DosQueryModuleName failed\n");
632 ABORT("DosGetInfoBlocks failed\n");
633 }
634 myexefile = fopen(path, "rb");
635 if (myexefile == 0) {
636 GC_err_puts("Couldn't open executable ");
637 GC_err_puts(path); GC_err_puts("\n");
638 ABORT("Failed to open executable\n");
639 }
640 if (fread((char *)(&hdrdos), 1, sizeof hdrdos, myexefile) < sizeof hdrdos) {
641 GC_err_puts("Couldn't read MSDOS header from ");
642 GC_err_puts(path); GC_err_puts("\n");
643 ABORT("Couldn't read MSDOS header");
644 }
645 if (E_MAGIC(hdrdos) != EMAGIC) {
646 GC_err_puts("Executable has wrong DOS magic number: ");
647 GC_err_puts(path); GC_err_puts("\n");
648 ABORT("Bad DOS magic number");
649 }
650 if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
651 GC_err_puts("Seek to new header failed in ");
652 GC_err_puts(path); GC_err_puts("\n");
653 ABORT("Bad DOS magic number");
654 }
655 if (fread((char *)(&hdr386), 1, sizeof hdr386, myexefile) < sizeof hdr386) {
656 GC_err_puts("Couldn't read MSDOS header from ");
657 GC_err_puts(path); GC_err_puts("\n");
658 ABORT("Couldn't read OS/2 header");
659 }
660 if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
661 GC_err_puts("Executable has wrong OS/2 magic number:");
662 GC_err_puts(path); GC_err_puts("\n");
663 ABORT("Bad OS/2 magic number");
664 }
665 if ( E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
666 GC_err_puts("Executable %s has wrong byte order: ");
667 GC_err_puts(path); GC_err_puts("\n");
668 ABORT("Bad byte order");
669 }
670 if ( E32_CPU(hdr386) == E32CPU286) {
671 GC_err_puts("GC can't handle 80286 executables: ");
672 GC_err_puts(path); GC_err_puts("\n");
673 EXIT();
674 }
675 if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
676 SEEK_SET) != 0) {
677 GC_err_puts("Seek to object table failed: ");
678 GC_err_puts(path); GC_err_puts("\n");
679 ABORT("Seek to object table failed");
680 }
681 for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
682 int flags;
683 if (fread((char *)(&seg), 1, sizeof seg, myexefile) < sizeof seg) {
684 GC_err_puts("Couldn't read obj table entry from ");
685 GC_err_puts(path); GC_err_puts("\n");
686 ABORT("Couldn't read obj table entry");
687 }
688 flags = O32_FLAGS(seg);
689 if (!(flags & OBJWRITE)) continue;
690 if (!(flags & OBJREAD)) continue;
691 if (flags & OBJINVALID) {
692 GC_err_printf0("Object with invalid pages?\n");
693 continue;
694 }
695 GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg), FALSE);
696 }
697 }
698
699 # else
700
701 # ifdef MSWIN32
702 /* Unfortunately, we have to handle win32s very differently from NT, */
703 /* Since VirtualQuery has very different semantics. In particular, */
704 /* under win32s a VirtualQuery call on an unmapped page returns an */
705 /* invalid result. Under GC_register_data_segments is a noop and */
706 /* all real work is done by GC_register_dynamic_libraries. Under */
707 /* win32s, we cannot find the data segments associated with dll's. */
708 /* We rgister the main data segment here. */
709 bool GC_win32s = FALSE; /* We're running under win32s. */
710
GC_is_win32s()711 bool GC_is_win32s()
712 {
713 DWORD v = GetVersion();
714
715 /* Check that this is not NT, and Windows major version <= 3 */
716 return ((v & 0x80000000) && (v & 0xff) <= 3);
717 }
718
GC_init_win32()719 void GC_init_win32()
720 {
721 GC_win32s = GC_is_win32s();
722 }
723
724 /* Return the smallest address a such that VirtualQuery */
725 /* returns correct results for all addresses between a and start. */
726 /* Assumes VirtualQuery returns correct information for start. */
GC_least_described_address(ptr_t start)727 ptr_t GC_least_described_address(ptr_t start)
728 {
729 MEMORY_BASIC_INFORMATION buf;
730 SYSTEM_INFO sysinfo;
731 DWORD result;
732 LPVOID limit;
733 ptr_t p;
734 LPVOID q;
735
736 GetSystemInfo(&sysinfo);
737 limit = sysinfo.lpMinimumApplicationAddress;
738 p = (ptr_t)((word)start & ~(GC_page_size - 1));
739 for (;;) {
740 q = (LPVOID)(p - GC_page_size);
741 if ((ptr_t)q > (ptr_t)p /* underflow */ || q < limit) break;
742 result = VirtualQuery(q, &buf, sizeof(buf));
743 if (result != sizeof(buf) || buf.AllocationBase == 0) break;
744 p = (ptr_t)(buf.AllocationBase);
745 }
746 return(p);
747 }
748
749 /* Is p the start of either the malloc heap, or of one of our */
750 /* heap sections? */
GC_is_heap_base(ptr_t p)751 bool GC_is_heap_base (ptr_t p)
752 {
753
754 register unsigned i;
755
756 # ifndef REDIRECT_MALLOC
757 static ptr_t malloc_heap_pointer = 0;
758
759 if (0 == malloc_heap_pointer) {
760 MEMORY_BASIC_INFORMATION buf;
761 register DWORD result = VirtualQuery(malloc(1), &buf, sizeof(buf));
762
763 if (result != sizeof(buf)) {
764 ABORT("Weird VirtualQuery result");
765 }
766 malloc_heap_pointer = (ptr_t)(buf.AllocationBase);
767 }
768 if (p == malloc_heap_pointer) return(TRUE);
769 # endif
770 for (i = 0; i < GC_n_heap_bases; i++) {
771 if (GC_heap_bases[i] == p) return(TRUE);
772 }
773 return(FALSE);
774 }
775
GC_register_root_section(ptr_t static_root)776 void GC_register_root_section(ptr_t static_root)
777 {
778 MEMORY_BASIC_INFORMATION buf;
779 SYSTEM_INFO sysinfo;
780 DWORD result;
781 DWORD protect;
782 LPVOID p;
783 char * base;
784 char * limit, * new_limit;
785
786 if (!GC_win32s) return;
787 p = base = limit = GC_least_described_address(static_root);
788 GetSystemInfo(&sysinfo);
789 while (p < sysinfo.lpMaximumApplicationAddress) {
790 result = VirtualQuery(p, &buf, sizeof(buf));
791 if (result != sizeof(buf) || buf.AllocationBase == 0
792 || GC_is_heap_base(buf.AllocationBase)) break;
793 new_limit = (char *)p + buf.RegionSize;
794 protect = buf.Protect;
795 if (buf.State == MEM_COMMIT
796 && is_writable(protect)) {
797 if ((char *)p == limit) {
798 limit = new_limit;
799 } else {
800 if (base != limit) GC_add_roots_inner(base, limit, FALSE);
801 base = p;
802 limit = new_limit;
803 }
804 }
805 if (p > (LPVOID)new_limit /* overflow */) break;
806 p = (LPVOID)new_limit;
807 }
808 if (base != limit) GC_add_roots_inner(base, limit, FALSE);
809 }
810
GC_register_data_segments()811 void GC_register_data_segments()
812 {
813 static char dummy;
814
815 GC_register_root_section((ptr_t)(&dummy));
816 }
817 # else
818 # ifdef AMIGA
819
GC_register_data_segments()820 void GC_register_data_segments()
821 {
822 extern struct WBStartup *_WBenchMsg;
823 struct Process *proc;
824 struct CommandLineInterface *cli;
825 BPTR myseglist;
826 ULONG *data;
827
828 if ( _WBenchMsg != 0 ) {
829 if ((myseglist = _WBenchMsg->sm_Segment) == 0) {
830 GC_err_puts("No seglist from workbench\n");
831 return;
832 }
833 } else {
834 if ((proc = (struct Process *)FindTask(0)) == 0) {
835 GC_err_puts("Cannot find process structure\n");
836 return;
837 }
838 if ((cli = BADDR(proc->pr_CLI)) == 0) {
839 GC_err_puts("No CLI\n");
840 return;
841 }
842 if ((myseglist = cli->cli_Module) == 0) {
843 GC_err_puts("No seglist from CLI\n");
844 return;
845 }
846 }
847
848 for (data = (ULONG *)BADDR(myseglist); data != 0;
849 data = (ULONG *)BADDR(data[0])) {
850 # ifdef AMIGA_SKIP_SEG
851 if (((ULONG) GC_register_data_segments < (ULONG) &data[1]) ||
852 ((ULONG) GC_register_data_segments > (ULONG) &data[1] + data[-1])) {
853 # else
854 {
855 # endif /* AMIGA_SKIP_SEG */
856 GC_add_roots_inner((char *)&data[1],
857 ((char *)&data[1]) + data[-1], FALSE);
858 }
859 }
860 }
861
862
863 # else
864
865 # if (defined(SVR4) || defined(AUX) || defined(DGUX)) && !defined(PCR)
866 char * GC_SysVGetDataStart(max_page_size, etext_addr)
867 int max_page_size;
868 int * etext_addr;
869 {
870 word text_end = ((word)(etext_addr) + sizeof(word) - 1)
871 & ~(sizeof(word) - 1);
872 /* etext rounded to word boundary */
873 word next_page = ((text_end + (word)max_page_size - 1)
874 & ~((word)max_page_size - 1));
875 word page_offset = (text_end & ((word)max_page_size - 1));
876 VOLATILE char * result = (char *)(next_page + page_offset);
877 /* Note that this isnt equivalent to just adding */
878 /* max_page_size to &etext if &etext is at a page boundary */
879
880 GC_setup_temporary_fault_handler();
881 if (setjmp(GC_jmp_buf) == 0) {
882 /* Try writing to the address. */
883 *result = *result;
884 GC_reset_fault_handler();
885 } else {
886 GC_reset_fault_handler();
887 /* We got here via a longjmp. The address is not readable. */
888 /* This is known to happen under Solaris 2.4 + gcc, which place */
889 /* string constants in the text segment, but after etext. */
890 /* Use plan B. Note that we now know there is a gap between */
891 /* text and data segments, so plan A bought us something. */
892 result = (char *)GC_find_limit((ptr_t)(DATAEND) - MIN_PAGE_SIZE, FALSE);
893 }
894 return((char *)result);
895 }
896 # endif
897
898
899 void GC_register_data_segments()
900 {
901 # if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT) && !defined(MACOS)
902 # if defined(REDIRECT_MALLOC) && defined(SOLARIS_THREADS)
903 /* As of Solaris 2.3, the Solaris threads implementation */
904 /* allocates the data structure for the initial thread with */
905 /* sbrk at process startup. It needs to be scanned, so that */
906 /* we don't lose some malloc allocated data structures */
907 /* hanging from it. We're on thin ice here ... */
908 extern caddr_t sbrk();
909
910 GC_add_roots_inner(DATASTART, (char *)sbrk(0), FALSE);
911 # else
912 GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);
913 # endif
914 # endif
915 # if !defined(PCR) && defined(NEXT)
916 GC_add_roots_inner(DATASTART, (char *) get_end(), FALSE);
917 # endif
918 # if defined(MACOS)
919 {
920 # if defined(THINK_C)
921 extern void* GC_MacGetDataStart(void);
922 /* globals begin above stack and end at a5. */
923 GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
924 (ptr_t)LMGetCurrentA5(), FALSE);
925 # else
926 # if defined(__MWERKS__)
927 # if !__POWERPC__
928 extern void* GC_MacGetDataStart(void);
929 /* globals begin above stack and end at a5. */
930 GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
931 (ptr_t)LMGetCurrentA5(), FALSE);
932 # else
933 extern char __data_start__[], __data_end__[];
934 GC_add_roots_inner((ptr_t)&__data_start__,
935 (ptr_t)&__data_end__, FALSE);
936 # endif /* __POWERPC__ */
937 # endif /* __MWERKS__ */
938 # endif /* !THINK_C */
939 }
940 # endif /* MACOS */
941
942 /* Dynamic libraries are added at every collection, since they may */
943 /* change. */
944 }
945
946 # endif /* ! AMIGA */
947 # endif /* ! MSWIN32 */
948 # endif /* ! OS2 */
949
950 /*
951 * Auxiliary routines for obtaining memory from OS.
952 */
953
954 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
955 && !defined(MSWIN32) && !defined(MACOS) && !defined(DOS4GW)
956
957 # ifdef SUNOS4
958 extern caddr_t sbrk();
959 # endif
960 # ifdef __STDC__
961 # define SBRK_ARG_T ptrdiff_t
962 # else
963 # define SBRK_ARG_T int
964 # endif
965
966 # ifdef RS6000
967 /* The compiler seems to generate speculative reads one past the end of */
968 /* an allocated object. Hence we need to make sure that the page */
969 /* following the last heap page is also mapped. */
970 ptr_t GC_unix_get_mem(bytes)
971 word bytes;
972 {
973 caddr_t cur_brk = (caddr_t)sbrk(0);
974 caddr_t result;
975 SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
976 static caddr_t my_brk_val = 0;
977
978 if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
979 if (lsbs != 0) {
980 if((caddr_t)(sbrk(GC_page_size - lsbs)) == (caddr_t)(-1)) return(0);
981 }
982 if (cur_brk == my_brk_val) {
983 /* Use the extra block we allocated last time. */
984 result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
985 if (result == (caddr_t)(-1)) return(0);
986 result -= GC_page_size;
987 } else {
988 result = (ptr_t)sbrk(GC_page_size + (SBRK_ARG_T)bytes);
989 if (result == (caddr_t)(-1)) return(0);
990 }
991 my_brk_val = result + bytes + GC_page_size; /* Always page aligned */
992 return((ptr_t)result);
993 }
994
995 #else /* Not RS6000 */
996
997 #if defined(USE_MMAP)
998 /* Tested only under IRIX5 */
999
1000 ptr_t GC_unix_get_mem(bytes)
1001 word bytes;
1002 {
1003 static bool initialized = FALSE;
1004 static int fd;
1005 void *result;
1006 static ptr_t last_addr = HEAP_START;
1007
1008 if (!initialized) {
1009 fd = open("/dev/zero", O_RDONLY);
1010 initialized = TRUE;
1011 }
1012 result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
1013 MAP_PRIVATE | MAP_FIXED, fd, 0/* offset */);
1014 if (result == MAP_FAILED) return(0);
1015 last_addr = (ptr_t)result + bytes + GC_page_size - 1;
1016 last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));
1017 return((ptr_t)result);
1018 }
1019
1020 #else /* Not RS6000, not USE_MMAP */
1021 ptr_t GC_unix_get_mem(bytes)
1022 word bytes;
1023 {
1024 ptr_t result;
1025 # ifdef IRIX5
1026 /* Bare sbrk isn't thread safe. Play by malloc rules. */
1027 /* The equivalent may be needed on other systems as well. */
1028 __LOCK_MALLOC();
1029 # endif
1030 {
1031 ptr_t cur_brk = (ptr_t)sbrk(0);
1032 SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
1033
1034 if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
1035 if (lsbs != 0) {
1036 if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) return(0);
1037 }
1038 result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
1039 if (result == (ptr_t)(-1)) result = 0;
1040 }
1041 # ifdef IRIX5
1042 __UNLOCK_MALLOC();
1043 # endif
1044 return(result);
1045 }
1046
1047 #endif /* Not USE_MMAP */
1048 #endif /* Not RS6000 */
1049
1050 # endif /* UN*X */
1051
1052 # ifdef OS2
1053
1054 void * os2_alloc(size_t bytes)
1055 {
1056 void * result;
1057
1058 if (DosAllocMem(&result, bytes, PAG_EXECUTE | PAG_READ |
1059 PAG_WRITE | PAG_COMMIT)
1060 != NO_ERROR) {
1061 return(0);
1062 }
1063 if (result == 0) return(os2_alloc(bytes));
1064 return(result);
1065 }
1066
1067 # endif /* OS2 */
1068
1069
1070 # ifdef MSWIN32
1071 word GC_n_heap_bases = 0;
1072
1073 ptr_t GC_win32_get_mem(bytes)
1074 word bytes;
1075 {
1076 ptr_t result;
1077
1078 if (GC_win32s) {
1079 /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
1080 /* There are also unconfirmed rumors of other */
1081 /* problems, so we dodge the issue. */
1082 result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE);
1083 result = (ptr_t)(((word)result + HBLKSIZE) & ~(HBLKSIZE-1));
1084 } else {
1085 result = (ptr_t) VirtualAlloc(NULL, bytes,
1086 MEM_COMMIT | MEM_RESERVE,
1087 PAGE_EXECUTE_READWRITE);
1088 }
1089 if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
1090 /* If I read the documentation correctly, this can */
1091 /* only happen if HBLKSIZE > 64k or not a power of 2. */
1092 if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
1093 GC_heap_bases[GC_n_heap_bases++] = result;
1094 return(result);
1095 }
1096
1097 # endif
1098
1099 /* Routine for pushing any additional roots. In THREADS */
1100 /* environment, this is also responsible for marking from */
1101 /* thread stacks. In the SRC_M3 case, it also handles */
1102 /* global variables. */
1103 #ifndef THREADS
1104 void (*GC_push_other_roots)() = 0;
1105 #else /* THREADS */
1106
1107 # ifdef PCR
1108 PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
1109 {
1110 struct PCR_ThCtl_TInfoRep info;
1111 PCR_ERes result;
1112
1113 info.ti_stkLow = info.ti_stkHi = 0;
1114 result = PCR_ThCtl_GetInfo(t, &info);
1115 GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
1116 return(result);
1117 }
1118
1119 /* Push the contents of an old object. We treat this as stack */
1120 /* data only becasue that makes it robust against mark stack */
1121 /* overflow. */
1122 PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
1123 {
1124 GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
1125 return(PCR_ERes_okay);
1126 }
1127
1128
1129 void GC_default_push_other_roots()
1130 {
1131 /* Traverse data allocated by previous memory managers. */
1132 {
1133 extern struct PCR_MM_ProcsRep * GC_old_allocator;
1134
1135 if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
1136 GC_push_old_obj, 0)
1137 != PCR_ERes_okay) {
1138 ABORT("Old object enumeration failed");
1139 }
1140 }
1141 /* Traverse all thread stacks. */
1142 if (PCR_ERes_IsErr(
1143 PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
1144 || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
1145 ABORT("Thread stack marking failed\n");
1146 }
1147 }
1148
1149 # endif /* PCR */
1150
1151 # ifdef SRC_M3
1152
1153 # ifdef ALL_INTERIOR_POINTERS
1154 --> misconfigured
1155 # endif
1156
1157
1158 extern void ThreadF__ProcessStacks();
1159
1160 void GC_push_thread_stack(start, stop)
1161 word start, stop;
1162 {
1163 GC_push_all_stack((ptr_t)start, (ptr_t)stop + sizeof(word));
1164 }
1165
1166 /* Push routine with M3 specific calling convention. */
1167 GC_m3_push_root(dummy1, p, dummy2, dummy3)
1168 word *p;
1169 ptr_t dummy1, dummy2;
1170 int dummy3;
1171 {
1172 word q = *p;
1173
1174 if ((ptr_t)(q) >= GC_least_plausible_heap_addr
1175 && (ptr_t)(q) < GC_greatest_plausible_heap_addr) {
1176 GC_push_one_checked(q,FALSE);
1177 }
1178 }
1179
1180 /* M3 set equivalent to RTHeap.TracedRefTypes */
1181 typedef struct { int elts[1]; } RefTypeSet;
1182 RefTypeSet GC_TracedRefTypes = {{0x1}};
1183
1184 /* From finalize.c */
1185 extern void GC_push_finalizer_structures();
1186
1187 /* From stubborn.c: */
1188 # ifdef STUBBORN_ALLOC
1189 extern GC_PTR * GC_changing_list_start;
1190 # endif
1191
1192
1193 void GC_default_push_other_roots()
1194 {
1195 /* Use the M3 provided routine for finding static roots. */
1196 /* This is a bit dubious, since it presumes no C roots. */
1197 /* We handle the collector roots explicitly. */
1198 {
1199 # ifdef STUBBORN_ALLOC
1200 GC_push_one(GC_changing_list_start);
1201 # endif
1202 GC_push_finalizer_structures();
1203 RTMain__GlobalMapProc(GC_m3_push_root, 0, GC_TracedRefTypes);
1204 }
1205 if (GC_words_allocd > 0) {
1206 ThreadF__ProcessStacks(GC_push_thread_stack);
1207 }
1208 /* Otherwise this isn't absolutely necessary, and we have */
1209 /* startup ordering problems. */
1210 }
1211
1212 # endif /* SRC_M3 */
1213
1214 # if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || defined(IRIX_THREADS)
1215
1216 extern void GC_push_all_stacks();
1217
1218 void GC_default_push_other_roots()
1219 {
1220 GC_push_all_stacks();
1221 }
1222
1223 # endif /* SOLARIS_THREADS || ... */
1224
1225 void (*GC_push_other_roots)() = GC_default_push_other_roots;
1226
1227 #endif
1228
1229 /*
1230 * Routines for accessing dirty bits on virtual pages.
1231 * We plan to eventaually implement four strategies for doing so:
1232 * DEFAULT_VDB: A simple dummy implementation that treats every page
1233 * as possibly dirty. This makes incremental collection
1234 * useless, but the implementation is still correct.
1235 * PCR_VDB: Use PPCRs virtual dirty bit facility.
1236 * PROC_VDB: Use the /proc facility for reading dirty bits. Only
1237 * works under some SVR4 variants. Even then, it may be
1238 * too slow to be entirely satisfactory. Requires reading
1239 * dirty bits for entire address space. Implementations tend
1240 * to assume that the client is a (slow) debugger.
1241 * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
1242 * dirtied pages. The implementation (and implementability)
1243 * is highly system dependent. This usually fails when system
1244 * calls write to a protected page. We prevent the read system
1245 * call from doing so. It is the clients responsibility to
1246 * make sure that other system calls are similarly protected
1247 * or write only to the stack.
1248 */
1249
1250 bool GC_dirty_maintained = FALSE;
1251
1252 # ifdef DEFAULT_VDB
1253
1254 /* All of the following assume the allocation lock is held, and */
1255 /* signals are disabled. */
1256
1257 /* The client asserts that unallocated pages in the heap are never */
1258 /* written. */
1259
1260 /* Initialize virtual dirty bit implementation. */
1261 void GC_dirty_init()
1262 {
1263 GC_dirty_maintained = TRUE;
1264 }
1265
1266 /* Retrieve system dirty bits for heap to a local buffer. */
1267 /* Restore the systems notion of which pages are dirty. */
1268 void GC_read_dirty()
1269 {}
1270
1271 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
1272 /* If the actual page size is different, this returns TRUE if any */
1273 /* of the pages overlapping h are dirty. This routine may err on the */
1274 /* side of labelling pages as dirty (and this implementation does). */
1275 /*ARGSUSED*/
1276 bool GC_page_was_dirty(h)
1277 struct hblk *h;
1278 {
1279 return(TRUE);
1280 }
1281
1282 /*
1283 * The following two routines are typically less crucial. They matter
1284 * most with large dynamic libraries, or if we can't accurately identify
1285 * stacks, e.g. under Solaris 2.X. Otherwise the following default
1286 * versions are adequate.
1287 */
1288
1289 /* Could any valid GC heap pointer ever have been written to this page? */
1290 /*ARGSUSED*/
1291 bool GC_page_was_ever_dirty(h)
1292 struct hblk *h;
1293 {
1294 return(TRUE);
1295 }
1296
1297 /* Reset the n pages starting at h to "was never dirty" status. */
1298 void GC_is_fresh(h, n)
1299 struct hblk *h;
1300 word n;
1301 {
1302 }
1303
1304 /* A call hints that h is about to be written. */
1305 /* May speed up some dirty bit implementations. */
1306 /*ARGSUSED*/
1307 void GC_write_hint(h)
1308 struct hblk *h;
1309 {
1310 }
1311
1312 # endif /* DEFAULT_VDB */
1313
1314
1315 # ifdef MPROTECT_VDB
1316
1317 /*
1318 * See DEFAULT_VDB for interface descriptions.
1319 */
1320
1321 /*
1322 * This implementation maintains dirty bits itself by catching write
1323 * faults and keeping track of them. We assume nobody else catches
1324 * SIGBUS or SIGSEGV. We assume no write faults occur in system calls
1325 * except as a result of a read system call. This means clients must
1326 * either ensure that system calls do not touch the heap, or must
1327 * provide their own wrappers analogous to the one for read.
1328 * We assume the page size is a multiple of HBLKSIZE.
1329 * This implementation is currently SunOS 4.X and IRIX 5.X specific, though we
1330 * tried to use portable code where easily possible. It is known
1331 * not to work under a number of other systems.
1332 */
1333
1334 # ifndef MSWIN32
1335
1336 # include <sys/mman.h>
1337 # include <signal.h>
1338 # include <sys/syscall.h>
1339
1340 # define PROTECT(addr, len) \
1341 if (mprotect((caddr_t)(addr), (int)(len), \
1342 PROT_READ | OPT_PROT_EXEC) < 0) { \
1343 ABORT("mprotect failed"); \
1344 }
1345 # define UNPROTECT(addr, len) \
1346 if (mprotect((caddr_t)(addr), (int)(len), \
1347 PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
1348 ABORT("un-mprotect failed"); \
1349 }
1350
1351 # else
1352
1353 # include <signal.h>
1354
1355 static DWORD protect_junk;
1356 # define PROTECT(addr, len) \
1357 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
1358 &protect_junk)) { \
1359 DWORD last_error = GetLastError(); \
1360 GC_printf1("Last error code: %lx\n", last_error); \
1361 ABORT("VirtualProtect failed"); \
1362 }
1363 # define UNPROTECT(addr, len) \
1364 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
1365 &protect_junk)) { \
1366 ABORT("un-VirtualProtect failed"); \
1367 }
1368
1369 # endif
1370
1371 VOLATILE page_hash_table GC_dirty_pages;
1372 /* Pages dirtied since last GC_read_dirty. */
1373
1374 #if defined(SUNOS4) || defined(FREEBSD)
1375 typedef void (* SIG_PF)();
1376 #endif
1377 #if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX)
1378 typedef void (* SIG_PF)(int);
1379 #endif
1380 #if defined(MSWIN32)
1381 typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF;
1382 # undef SIG_DFL
1383 # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
1384 #endif
1385
1386 #if defined(IRIX5) || defined(OSF1)
1387 typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
1388 #endif
1389 #if defined(SUNOS5SIGS)
1390 typedef void (* REAL_SIG_PF)(int, struct siginfo *, void *);
1391 #endif
1392 #if defined(LINUX)
1393 # include <linux/version.h>
1394 # if (LINUX_VERSION_CODE >= 0x20100)
1395 typedef void (* REAL_SIG_PF)(int, struct sigcontext);
1396 # else
1397 typedef void (* REAL_SIG_PF)(int, struct sigcontext_struct);
1398 # endif
1399 # endif
1400
1401 SIG_PF GC_old_bus_handler;
1402 SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
1403
1404 /*ARGSUSED*/
1405 # if defined (SUNOS4) || defined(FREEBSD)
1406 void GC_write_fault_handler(sig, code, scp, addr)
1407 int sig, code;
1408 struct sigcontext *scp;
1409 char * addr;
1410 # ifdef SUNOS4
1411 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
1412 # define CODE_OK (FC_CODE(code) == FC_PROT \
1413 || (FC_CODE(code) == FC_OBJERR \
1414 && FC_ERRNO(code) == FC_PROT))
1415 # endif
1416 # ifdef FREEBSD
1417 # define SIG_OK (sig == SIGBUS)
1418 # define CODE_OK (code == BUS_PAGE_FAULT)
1419 # endif
1420 # endif
1421 # if defined(IRIX5) || defined(OSF1)
1422 # include <errno.h>
1423 void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
1424 # define SIG_OK (sig == SIGSEGV)
1425 # ifdef OSF1
1426 # define CODE_OK (code == 2 /* experimentally determined */)
1427 # endif
1428 # ifdef IRIX5
1429 # define CODE_OK (code == EACCES)
1430 # endif
1431 # endif
1432 # if defined(LINUX)
1433 # if (LINUX_VERSION_CODE >= 0x20100)
1434 void GC_write_fault_handler(int sig, struct sigcontext sc)
1435 # else
1436 void GC_write_fault_handler(int sig, struct sigcontext_struct sc)
1437 # endif
1438 # define SIG_OK (sig == SIGSEGV)
1439 # define CODE_OK TRUE
1440 /* Empirically c.trapno == 14, but is that useful? */
1441 /* We assume Intel architecture, so alignment */
1442 /* faults are not possible. */
1443 # endif
1444 # if defined(SUNOS5SIGS)
1445 void GC_write_fault_handler(int sig, struct siginfo *scp, void * context)
1446 # define SIG_OK (sig == SIGSEGV)
1447 # define CODE_OK (scp -> si_code == SEGV_ACCERR)
1448 # endif
1449 # if defined(MSWIN32)
1450 LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info)
1451 # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
1452 EXCEPTION_ACCESS_VIOLATION)
1453 # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
1454 /* Write fault */
1455 # endif
1456 {
1457 register unsigned i;
1458 # ifdef IRIX5
1459 char * addr = (char *) (size_t) (scp -> sc_badvaddr);
1460 # endif
1461 # if defined(OSF1) && defined(ALPHA)
1462 char * addr = (char *) (scp -> sc_traparg_a0);
1463 # endif
1464 # ifdef SUNOS5SIGS
1465 char * addr = (char *) (scp -> si_addr);
1466 # endif
1467 # ifdef LINUX
1468 # ifdef I386
1469 char * addr = (char *) (sc.cr2);
1470 # else
1471 char * addr = /* As of 1.3.90 there seemed to be no way to do this. */;
1472 # endif
1473 # endif
1474 # if defined(MSWIN32)
1475 char * addr = (char *) (exc_info -> ExceptionRecord
1476 -> ExceptionInformation[1]);
1477 # define sig SIGSEGV
1478 # endif
1479
1480 if (SIG_OK && CODE_OK) {
1481 register struct hblk * h =
1482 (struct hblk *)((word)addr & ~(GC_page_size-1));
1483 bool in_allocd_block;
1484
1485 # ifdef SUNOS5SIGS
1486 /* Address is only within the correct physical page. */
1487 in_allocd_block = FALSE;
1488 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
1489 if (HDR(h+i) != 0) {
1490 in_allocd_block = TRUE;
1491 }
1492 }
1493 # else
1494 in_allocd_block = (HDR(addr) != 0);
1495 # endif
1496 if (!in_allocd_block) {
1497 /* Heap blocks now begin and end on page boundaries */
1498 SIG_PF old_handler;
1499
1500 if (sig == SIGSEGV) {
1501 old_handler = GC_old_segv_handler;
1502 } else {
1503 old_handler = GC_old_bus_handler;
1504 }
1505 if (old_handler == SIG_DFL) {
1506 # ifndef MSWIN32
1507 ABORT("Unexpected bus error or segmentation fault");
1508 # else
1509 return(EXCEPTION_CONTINUE_SEARCH);
1510 # endif
1511 } else {
1512 # if defined (SUNOS4) || defined(FREEBSD)
1513 (*old_handler) (sig, code, scp, addr);
1514 return;
1515 # endif
1516 # if defined (SUNOS5SIGS)
1517 (*(REAL_SIG_PF)old_handler) (sig, scp, context);
1518 return;
1519 # endif
1520 # if defined (LINUX)
1521 (*(REAL_SIG_PF)old_handler) (sig, sc);
1522 return;
1523 # endif
1524 # if defined (IRIX5) || defined(OSF1)
1525 (*(REAL_SIG_PF)old_handler) (sig, code, scp);
1526 return;
1527 # endif
1528 # ifdef MSWIN32
1529 return((*old_handler)(exc_info));
1530 # endif
1531 }
1532 }
1533 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
1534 register int index = PHT_HASH(h+i);
1535
1536 set_pht_entry_from_index(GC_dirty_pages, index);
1537 }
1538 UNPROTECT(h, GC_page_size);
1539 # if defined(OSF1) || defined(LINUX)
1540 /* These reset the signal handler each time by default. */
1541 signal(SIGSEGV, (SIG_PF) GC_write_fault_handler);
1542 # endif
1543 /* The write may not take place before dirty bits are read. */
1544 /* But then we'll fault again ... */
1545 # ifdef MSWIN32
1546 return(EXCEPTION_CONTINUE_EXECUTION);
1547 # else
1548 return;
1549 # endif
1550 }
1551 #ifdef MSWIN32
1552 return EXCEPTION_CONTINUE_SEARCH;
1553 #else
1554 ABORT("Unexpected bus error or segmentation fault");
1555 #endif
1556 }
1557
1558 /*
1559 * We hold the allocation lock. We expect block h to be written
1560 * shortly.
1561 */
1562 void GC_write_hint(h)
1563 struct hblk *h;
1564 {
1565 register struct hblk * h_trunc;
1566 register unsigned i;
1567 register bool found_clean;
1568
1569 if (!GC_dirty_maintained) return;
1570 h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
1571 found_clean = FALSE;
1572 for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
1573 register int index = PHT_HASH(h_trunc+i);
1574
1575 if (!get_pht_entry_from_index(GC_dirty_pages, index)) {
1576 found_clean = TRUE;
1577 set_pht_entry_from_index(GC_dirty_pages, index);
1578 }
1579 }
1580 if (found_clean) {
1581 UNPROTECT(h_trunc, GC_page_size);
1582 }
1583 }
1584
1585 void GC_dirty_init()
1586 {
1587 #if defined(SUNOS5SIGS) || defined(IRIX5)
1588 struct sigaction act, oldact;
1589 # ifdef IRIX5
1590 act.sa_flags = SA_RESTART;
1591 act.sa_handler = GC_write_fault_handler;
1592 # else
1593 act.sa_flags = SA_RESTART | SA_SIGINFO;
1594 act.sa_sigaction = GC_write_fault_handler;
1595 # endif
1596 (void)sigemptyset(&act.sa_mask);
1597 #endif
1598 # ifdef PRINTSTATS
1599 GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
1600 # endif
1601 GC_dirty_maintained = TRUE;
1602 if (GC_page_size % HBLKSIZE != 0) {
1603 GC_err_printf0("Page size not multiple of HBLKSIZE\n");
1604 ABORT("Page size not multiple of HBLKSIZE");
1605 }
1606 # if defined(SUNOS4) || defined(FREEBSD)
1607 GC_old_bus_handler = signal(SIGBUS, GC_write_fault_handler);
1608 if (GC_old_bus_handler == SIG_IGN) {
1609 GC_err_printf0("Previously ignored bus error!?");
1610 GC_old_bus_handler = SIG_DFL;
1611 }
1612 if (GC_old_bus_handler != SIG_DFL) {
1613 # ifdef PRINTSTATS
1614 GC_err_printf0("Replaced other SIGBUS handler\n");
1615 # endif
1616 }
1617 # endif
1618 # if defined(OSF1) || defined(SUNOS4) || defined(LINUX)
1619 GC_old_segv_handler = signal(SIGSEGV, (SIG_PF)GC_write_fault_handler);
1620 if (GC_old_segv_handler == SIG_IGN) {
1621 GC_err_printf0("Previously ignored segmentation violation!?");
1622 GC_old_segv_handler = SIG_DFL;
1623 }
1624 if (GC_old_segv_handler != SIG_DFL) {
1625 # ifdef PRINTSTATS
1626 GC_err_printf0("Replaced other SIGSEGV handler\n");
1627 # endif
1628 }
1629 # endif
1630 # if defined(SUNOS5SIGS) || defined(IRIX5)
1631 # ifdef IRIX_THREADS
1632 sigaction(SIGSEGV, 0, &oldact);
1633 sigaction(SIGSEGV, &act, 0);
1634 # else
1635 sigaction(SIGSEGV, &act, &oldact);
1636 # endif
1637 if (oldact.sa_flags & SA_SIGINFO) {
1638 GC_old_segv_handler = (SIG_PF)(oldact.sa_sigaction);
1639 } else {
1640 GC_old_segv_handler = oldact.sa_handler;
1641 }
1642 if (GC_old_segv_handler == SIG_IGN) {
1643 GC_err_printf0("Previously ignored segmentation violation!?");
1644 GC_old_segv_handler = SIG_DFL;
1645 }
1646 if (GC_old_segv_handler != SIG_DFL) {
1647 # ifdef PRINTSTATS
1648 GC_err_printf0("Replaced other SIGSEGV handler\n");
1649 # endif
1650 }
1651 # endif
1652 # if defined(MSWIN32)
1653 GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
1654 if (GC_old_segv_handler != NULL) {
1655 # ifdef PRINTSTATS
1656 GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
1657 # endif
1658 } else {
1659 GC_old_segv_handler = SIG_DFL;
1660 }
1661 # endif
1662 }
1663
1664
1665
1666 void GC_protect_heap()
1667 {
1668 ptr_t start;
1669 word len;
1670 unsigned i;
1671
1672 for (i = 0; i < GC_n_heap_sects; i++) {
1673 start = GC_heap_sects[i].hs_start;
1674 len = GC_heap_sects[i].hs_bytes;
1675 PROTECT(start, len);
1676 }
1677 }
1678
1679 /* We assume that either the world is stopped or its OK to lose dirty */
1680 /* bits while this is happenning (as in GC_enable_incremental). */
1681 void GC_read_dirty()
1682 {
1683 BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
1684 (sizeof GC_dirty_pages));
1685 BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
1686 GC_protect_heap();
1687 }
1688
1689 bool GC_page_was_dirty(h)
1690 struct hblk * h;
1691 {
1692 register word index = PHT_HASH(h);
1693
1694 return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
1695 }
1696
1697 /*
1698 * Acquiring the allocation lock here is dangerous, since this
1699 * can be called from within GC_call_with_alloc_lock, and the cord
1700 * package does so. On systems that allow nested lock acquisition, this
1701 * happens to work.
1702 * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
1703 */
1704
1705 void GC_begin_syscall()
1706 {
1707 if (!I_HOLD_LOCK()) LOCK();
1708 }
1709
1710 void GC_end_syscall()
1711 {
1712 if (!I_HOLD_LOCK()) UNLOCK();
1713 }
1714
1715 void GC_unprotect_range(addr, len)
1716 ptr_t addr;
1717 word len;
1718 {
1719 struct hblk * start_block;
1720 struct hblk * end_block;
1721 register struct hblk *h;
1722 ptr_t obj_start;
1723
1724 if (!GC_incremental) return;
1725 obj_start = GC_base(addr);
1726 if (obj_start == 0) return;
1727 if (GC_base(addr + len - 1) != obj_start) {
1728 ABORT("GC_unprotect_range(range bigger than object)");
1729 }
1730 start_block = (struct hblk *)((word)addr & ~(GC_page_size - 1));
1731 end_block = (struct hblk *)((word)(addr + len - 1) & ~(GC_page_size - 1));
1732 end_block += GC_page_size/HBLKSIZE - 1;
1733 for (h = start_block; h <= end_block; h++) {
1734 register word index = PHT_HASH(h);
1735
1736 set_pht_entry_from_index(GC_dirty_pages, index);
1737 }
1738 UNPROTECT(start_block,
1739 ((ptr_t)end_block - (ptr_t)start_block) + HBLKSIZE);
1740 }
1741
1742 #ifndef MSWIN32
1743 /* Replacement for UNIX system call. */
1744 /* Other calls that write to the heap */
1745 /* should be handled similarly. */
1746 # if defined(__STDC__) && !defined(SUNOS4)
1747 # include <unistd.h>
1748 ssize_t read(int fd, void *buf, size_t nbyte)
1749 # else
1750 # ifndef LINT
1751 int read(fd, buf, nbyte)
1752 # else
1753 int GC_read(fd, buf, nbyte)
1754 # endif
1755 int fd;
1756 char *buf;
1757 int nbyte;
1758 # endif
1759 {
1760 int result;
1761
1762 GC_begin_syscall();
1763 GC_unprotect_range(buf, (word)nbyte);
1764 # ifdef IRIX5
1765 /* Indirect system call exists, but is undocumented, and */
1766 /* always seems to return EINVAL. There seems to be no */
1767 /* general way to wrap system calls, since the system call */
1768 /* convention appears to require an immediate argument for */
1769 /* the system call number, and building the required code */
1770 /* in the data segment also seems dangerous. We can fake it */
1771 /* for read; anything else is up to the client. */
1772 {
1773 struct iovec iov;
1774
1775 iov.iov_base = buf;
1776 iov.iov_len = nbyte;
1777 result = readv(fd, &iov, 1);
1778 }
1779 # else
1780 result = syscall(SYS_read, fd, buf, nbyte);
1781 # endif
1782 GC_end_syscall();
1783 return(result);
1784 }
1785 #endif /* !MSWIN32 */
1786
1787 /*ARGSUSED*/
1788 bool GC_page_was_ever_dirty(h)
1789 struct hblk *h;
1790 {
1791 return(TRUE);
1792 }
1793
1794 /* Reset the n pages starting at h to "was never dirty" status. */
1795 /*ARGSUSED*/
1796 void GC_is_fresh(h, n)
1797 struct hblk *h;
1798 word n;
1799 {
1800 }
1801
1802 # endif /* MPROTECT_VDB */
1803
1804 # ifdef PROC_VDB
1805
1806 /*
1807 * See DEFAULT_VDB for interface descriptions.
1808 */
1809
1810 /*
1811 * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
1812 * from which we can read page modified bits. This facility is far from
1813 * optimal (e.g. we would like to get the info for only some of the
1814 * address space), but it avoids intercepting system calls.
1815 */
1816
1817 #include <errno.h>
1818 #include <sys/types.h>
1819 #include <sys/signal.h>
1820 #include <sys/fault.h>
1821 #include <sys/syscall.h>
1822 #include <sys/procfs.h>
1823 #include <sys/stat.h>
1824 #include <fcntl.h>
1825
1826 #define INITIAL_BUF_SZ 4096
1827 word GC_proc_buf_size = INITIAL_BUF_SZ;
1828 char *GC_proc_buf;
1829
1830 page_hash_table GC_written_pages = { 0 }; /* Pages ever dirtied */
1831
1832 #ifdef SOLARIS_THREADS
1833 /* We don't have exact sp values for threads. So we count on */
1834 /* occasionally declaring stack pages to be fresh. Thus we */
1835 /* need a real implementation of GC_is_fresh. We can't clear */
1836 /* entries in GC_written_pages, since that would declare all */
1837 /* pages with the given hash address to be fresh. */
1838 # define MAX_FRESH_PAGES 8*1024 /* Must be power of 2 */
1839 struct hblk ** GC_fresh_pages; /* A direct mapped cache. */
1840 /* Collisions are dropped. */
1841
1842 # define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
1843 # define ADD_FRESH_PAGE(h) \
1844 GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
1845 # define PAGE_IS_FRESH(h) \
1846 (GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
1847 #endif
1848
1849 /* Add all pages in pht2 to pht1 */
1850 void GC_or_pages(pht1, pht2)
1851 page_hash_table pht1, pht2;
1852 {
1853 register int i;
1854
1855 for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
1856 }
1857
1858 int GC_proc_fd;
1859
1860 void GC_dirty_init()
1861 {
1862 int fd;
1863 char buf[30];
1864
1865 GC_dirty_maintained = TRUE;
1866 if (GC_words_allocd != 0 || GC_words_allocd_before_gc != 0) {
1867 register int i;
1868
1869 for (i = 0; i < PHT_SIZE; i++) GC_written_pages[i] = (word)(-1);
1870 # ifdef PRINTSTATS
1871 GC_printf1("Allocated words:%lu:all pages may have been written\n",
1872 (unsigned long)
1873 (GC_words_allocd + GC_words_allocd_before_gc));
1874 # endif
1875 }
1876 sprintf(buf, "/proc/%d", getpid());
1877 fd = open(buf, O_RDONLY);
1878 if (fd < 0) {
1879 ABORT("/proc open failed");
1880 }
1881 GC_proc_fd = syscall(SYS_ioctl, fd, PIOCOPENPD, 0);
1882 close(fd);
1883 if (GC_proc_fd < 0) {
1884 ABORT("/proc ioctl failed");
1885 }
1886 GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
1887 # ifdef SOLARIS_THREADS
1888 GC_fresh_pages = (struct hblk **)
1889 GC_scratch_alloc(MAX_FRESH_PAGES * sizeof (struct hblk *));
1890 if (GC_fresh_pages == 0) {
1891 GC_err_printf0("No space for fresh pages\n");
1892 EXIT();
1893 }
1894 BZERO(GC_fresh_pages, MAX_FRESH_PAGES * sizeof (struct hblk *));
1895 # endif
1896 }
1897
1898 /* Ignore write hints. They don't help us here. */
1899 /*ARGSUSED*/
1900 void GC_write_hint(h)
1901 struct hblk *h;
1902 {
1903 }
1904
1905 #ifdef SOLARIS_THREADS
1906 # define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
1907 #else
1908 # define READ(fd,buf,nbytes) read(fd, buf, nbytes)
1909 #endif
1910
1911 void GC_read_dirty()
1912 {
1913 unsigned long ps, np;
1914 int nmaps;
1915 ptr_t vaddr;
1916 struct prasmap * map;
1917 char * bufp;
1918 ptr_t current_addr, limit;
1919 int i;
1920 int dummy;
1921
1922 BZERO(GC_grungy_pages, (sizeof GC_grungy_pages));
1923
1924 bufp = GC_proc_buf;
1925 if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
1926 # ifdef PRINTSTATS
1927 GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
1928 GC_proc_buf_size);
1929 # endif
1930 {
1931 /* Retry with larger buffer. */
1932 word new_size = 2 * GC_proc_buf_size;
1933 char * new_buf = GC_scratch_alloc(new_size);
1934
1935 if (new_buf != 0) {
1936 GC_proc_buf = bufp = new_buf;
1937 GC_proc_buf_size = new_size;
1938 }
1939 if (syscall(SYS_read, GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
1940 WARN("Insufficient space for /proc read\n", 0);
1941 /* Punt: */
1942 memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
1943 memset(GC_written_pages, 0xff, sizeof(page_hash_table));
1944 # ifdef SOLARIS_THREADS
1945 BZERO(GC_fresh_pages,
1946 MAX_FRESH_PAGES * sizeof (struct hblk *));
1947 # endif
1948 return;
1949 }
1950 }
1951 }
1952 /* Copy dirty bits into GC_grungy_pages */
1953 nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
1954 /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
1955 nmaps, PG_REFERENCED, PG_MODIFIED); */
1956 bufp = bufp + sizeof(struct prpageheader);
1957 for (i = 0; i < nmaps; i++) {
1958 map = (struct prasmap *)bufp;
1959 vaddr = (ptr_t)(map -> pr_vaddr);
1960 ps = map -> pr_pagesize;
1961 np = map -> pr_npage;
1962 /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
1963 limit = vaddr + ps * np;
1964 bufp += sizeof (struct prasmap);
1965 for (current_addr = vaddr;
1966 current_addr < limit; current_addr += ps){
1967 if ((*bufp++) & PG_MODIFIED) {
1968 register struct hblk * h = (struct hblk *) current_addr;
1969
1970 while ((ptr_t)h < current_addr + ps) {
1971 register word index = PHT_HASH(h);
1972
1973 set_pht_entry_from_index(GC_grungy_pages, index);
1974 # ifdef SOLARIS_THREADS
1975 {
1976 register int slot = FRESH_PAGE_SLOT(h);
1977
1978 if (GC_fresh_pages[slot] == h) {
1979 GC_fresh_pages[slot] = 0;
1980 }
1981 }
1982 # endif
1983 h++;
1984 }
1985 }
1986 }
1987 bufp += sizeof(long) - 1;
1988 bufp = (char *)((unsigned long)bufp & ~(sizeof(long)-1));
1989 }
1990 /* Update GC_written_pages. */
1991 GC_or_pages(GC_written_pages, GC_grungy_pages);
1992 # ifdef SOLARIS_THREADS
1993 /* Make sure that old stacks are considered completely clean */
1994 /* unless written again. */
1995 GC_old_stacks_are_fresh();
1996 # endif
1997 }
1998
1999 #undef READ
2000
2001 bool GC_page_was_dirty(h)
2002 struct hblk *h;
2003 {
2004 register word index = PHT_HASH(h);
2005 register bool result;
2006
2007 result = get_pht_entry_from_index(GC_grungy_pages, index);
2008 # ifdef SOLARIS_THREADS
2009 if (result && PAGE_IS_FRESH(h)) result = FALSE;
2010 /* This happens only if page was declared fresh since */
2011 /* the read_dirty call, e.g. because it's in an unused */
2012 /* thread stack. It's OK to treat it as clean, in */
2013 /* that case. And it's consistent with */
2014 /* GC_page_was_ever_dirty. */
2015 # endif
2016 return(result);
2017 }
2018
2019 bool GC_page_was_ever_dirty(h)
2020 struct hblk *h;
2021 {
2022 register word index = PHT_HASH(h);
2023 register bool result;
2024
2025 result = get_pht_entry_from_index(GC_written_pages, index);
2026 # ifdef SOLARIS_THREADS
2027 if (result && PAGE_IS_FRESH(h)) result = FALSE;
2028 # endif
2029 return(result);
2030 }
2031
2032 /* Caller holds allocation lock. */
2033 void GC_is_fresh(h, n)
2034 struct hblk *h;
2035 word n;
2036 {
2037
2038 register word index;
2039
2040 # ifdef SOLARIS_THREADS
2041 register word i;
2042
2043 if (GC_fresh_pages != 0) {
2044 for (i = 0; i < n; i++) {
2045 ADD_FRESH_PAGE(h + i);
2046 }
2047 }
2048 # endif
2049 }
2050
2051 # endif /* PROC_VDB */
2052
2053
2054 # ifdef PCR_VDB
2055
2056 # include "vd/PCR_VD.h"
2057
2058 # define NPAGES (32*1024) /* 128 MB */
2059
2060 PCR_VD_DB GC_grungy_bits[NPAGES];
2061
2062 ptr_t GC_vd_base; /* Address corresponding to GC_grungy_bits[0] */
2063 /* HBLKSIZE aligned. */
2064
2065 void GC_dirty_init()
2066 {
2067 GC_dirty_maintained = TRUE;
2068 /* For the time being, we assume the heap generally grows up */
2069 GC_vd_base = GC_heap_sects[0].hs_start;
2070 if (GC_vd_base == 0) {
2071 ABORT("Bad initial heap segment");
2072 }
2073 if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
2074 != PCR_ERes_okay) {
2075 ABORT("dirty bit initialization failed");
2076 }
2077 }
2078
2079 void GC_read_dirty()
2080 {
2081 /* lazily enable dirty bits on newly added heap sects */
2082 {
2083 static int onhs = 0;
2084 int nhs = GC_n_heap_sects;
2085 for( ; onhs < nhs; onhs++ ) {
2086 PCR_VD_WriteProtectEnable(
2087 GC_heap_sects[onhs].hs_start,
2088 GC_heap_sects[onhs].hs_bytes );
2089 }
2090 }
2091
2092
2093 if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
2094 != PCR_ERes_okay) {
2095 ABORT("dirty bit read failed");
2096 }
2097 }
2098
2099 bool GC_page_was_dirty(h)
2100 struct hblk *h;
2101 {
2102 if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
2103 return(TRUE);
2104 }
2105 return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit);
2106 }
2107
2108 /*ARGSUSED*/
2109 void GC_write_hint(h)
2110 struct hblk *h;
2111 {
2112 PCR_VD_WriteProtectDisable(h, HBLKSIZE);
2113 PCR_VD_WriteProtectEnable(h, HBLKSIZE);
2114 }
2115
2116 # endif /* PCR_VDB */
2117
2118 /*
2119 * Call stack save code for debugging.
2120 * Should probably be in mach_dep.c, but that requires reorganization.
2121 */
2122 #if defined(SPARC)
2123 # if defined(SUNOS4)
2124 # include <machine/frame.h>
2125 # else
2126 # if defined (DRSNX)
2127 # include <sys/sparc/frame.h>
2128 # else
2129 # include <sys/frame.h>
2130 # endif
2131 # endif
2132 # if NARGS > 6
2133 --> We only know how to to get the first 6 arguments
2134 # endif
2135
2136 #ifdef SAVE_CALL_CHAIN
2137 /* Fill in the pc and argument information for up to NFRAMES of my */
2138 /* callers. Ignore my frame and my callers frame. */
2139 void GC_save_callers (info)
2140 struct callinfo info[NFRAMES];
2141 {
2142 struct frame *frame;
2143 struct frame *fp;
2144 int nframes = 0;
2145 word GC_save_regs_in_stack();
2146
2147 frame = (struct frame *) GC_save_regs_in_stack ();
2148
2149 for (fp = frame -> fr_savfp; fp != 0 && nframes < NFRAMES;
2150 fp = fp -> fr_savfp, nframes++) {
2151 register int i;
2152
2153 info[nframes].ci_pc = fp->fr_savpc;
2154 for (i = 0; i < NARGS; i++) {
2155 info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
2156 }
2157 }
2158 if (nframes < NFRAMES) info[nframes].ci_pc = 0;
2159 }
2160
2161 #endif /* SAVE_CALL_CHAIN */
2162 #endif /* SPARC */
2163
2164 #ifdef SAVE_CALL_CHAIN
2165
2166 void GC_print_callers (info)
2167 struct callinfo info[NFRAMES];
2168 {
2169 register int i,j;
2170
2171 GC_err_printf0("\tCall chain at allocation:\n");
2172 for (i = 0; i < NFRAMES; i++) {
2173 if (info[i].ci_pc == 0) break;
2174 GC_err_printf0("\t\targs: ");
2175 for (j = 0; j < NARGS; j++) {
2176 if (j != 0) GC_err_printf0(", ");
2177 GC_err_printf2("%d (0x%X)", ~(info[i].ci_arg[j]),
2178 ~(info[i].ci_arg[j]));
2179 }
2180 GC_err_printf1("\n\t\t##PC##= 0x%X\n", info[i].ci_pc);
2181 }
2182 }
2183
2184 #endif /* SAVE_CALL_CHAIN */
2185
2186
2187