1 /* heap-gen.c: The generational, incremental garbage collector.
2  * Written by Marco Scheibe. Fixes provided by Craig McPheeters,
3  * Carsten Bormann, Jon Hartlaub, Charlie Xiaoli Huang, Gal Shalif.
4  *
5  * This garbage collector is still experimental and probably needs to be
6  * rewritten at least in parts.  See also ../BUGS.  If your application
7  * does not work correctly and you suspect the generational garbage
8  * collector to be the culprit, try the stop-and-copy GC instead.
9  *
10  * $Id$
11  *
12  * Copyright 1990, 1991, 1992, 1993, 1994, 1995, Oliver Laumann, Berlin
13  * Copyright 2002, 2003 Sam Hocevar <sam@hocevar.net>, Paris
14  *
15  * This software was derived from Elk 1.2, which was Copyright 1987, 1988,
16  * 1989, Nixdorf Computer AG and TELES GmbH, Berlin (Elk 1.2 has been written
17  * by Oliver Laumann for TELES Telematic Services, Berlin, in a joint project
18  * between TELES and Nixdorf Microprocessor Engineering, Berlin).
19  *
20  * Oliver Laumann, TELES GmbH, Nixdorf Computer AG and Sam Hocevar, as co-
21  * owners or individual owners of copyright in this software, grant to any
22  * person or company a worldwide, royalty free, license to
23  *
24  *    i) copy this software,
25  *   ii) prepare derivative works based on this software,
26  *  iii) distribute copies of this software or derivative works,
27  *   iv) perform this software, or
28  *    v) display this software,
29  *
30  * provided that this notice is not removed and that neither Oliver Laumann
31  * nor Teles nor Nixdorf are deemed to have made any representations as to
32  * the suitability of this software for any purpose nor are held responsible
33  * for any defects of this software.
34  *
35  * THERE IS ABSOLUTELY NO WARRANTY FOR THIS SOFTWARE.
36  */
37 
38 #include <limits.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <sys/types.h>
42 #if defined(MPROTECT_SIG) || defined(MPROTECT_MMAP)
43 #  include <sys/mman.h>
44 #endif
45 #if defined(HAVE_UNISTD_H)
46 #  include <unistd.h>
47 #  if defined(_SC_PAGE_SIZE) && !defined(_SC_PAGESIZE)   /* Wrong in HP-UX */
48 #    define _SC_PAGESIZE _SC_PAGE_SIZE
49 #  endif
50 #endif
51 #ifdef SIGSEGV_SIGINFO
52 #  include <siginfo.h>
53 #  include <ucontext.h>
54 #endif
55 
56 /* The following variables may be set from outside the collector to
57  * fine-tune some used parameters.
58  */
59 
60 int tuneable_forward_region = 5;   /* fraction of heap pages that are tried
61                                     * to allocate as forward region when
62                                     * collecting.
63                                     */
64 int tuneable_force_total = 35;     /* % newly allocated during collection
65                                     * to force total collection
66                                     */
67 int tuneable_newly_expand = 25;    /* % of heap newly allocated during
68                                     * a total collection to force heap
69                                     * expansion.
70                                     */
71 int tuneable_force_expand = 20;    /* % stable to force heap expansion
72                                     */
73 
74 /* ------------------------------------------------------------------------
75 
76 defined in object.h:
77 
78 typedef int gcspace_t;          // type used for space and type arrays
79 typedef unsigned int gcptr_t;   // type used for pointers
80 
81    ------------------------------------------------------------------------ */
82 
83 static int percent = 0;
84 static pageno_t old_logical_pages;
85 
86 static int inc_collection = 0;
87 
88 static int incomplete_msg = 0;
89 
90 static pageno_t logical_pages, spanning_pages, physical_pages;
91 
92 /* pagebase is #defined in object.h if ARRAY_BROKEN is not defined. */
93 
94 #ifdef ARRAY_BROKEN
95   pageno_t pagebase;
96 #endif
97 
98 static pageno_t firstpage, lastpage;
99 
100 static char *saved_heap_ptr;
101 gcspace_t *space;
102 static gcspace_t *types, *pmap;
103 static pageno_t *linked;
104 
105 static pageno_t current_pages, forwarded_pages;
106 static pageno_t protected_pages, allocated_pages;
107 
108 static addrarith_t bytes_per_pp, pp_shift; /* bytes per physical page */
109 static addrarith_t hp_per_pp;      /* number of heap pages per physical page */
110 static addrarith_t pp_mask;        /* ANDed with a virtual address gives
111                                     * base address of physical page
112                                     */
113 static addrarith_t hp_per_pp_mask; /* ANDed with heap page number gives
114                                     * first page number in the physical
115                                     * page the heap page belongs to.
116                                     */
117 #define SAME_PHYSPAGE(a,b) (((a) & pp_mask) == ((b) & pp_mask))
118 
119 gcspace_t current_space; /* has to be exported because IS_ALIVE depends on it */
120 
121 static gcspace_t forward_space, previous_space;
122 static pageno_t current_freepage, current_free;
123 static pageno_t forward_freepage, forward_free;
124 static pageno_t last_forward_freepage;
125 
126 static Object *current_freep, *forward_freep;
127 
128 static int scanning = 0; /* set to true if scanning a
129                           * physical page is in progress */
130 static Object *scanpointer;
131 static Object *scanfirst, *scanlast;
132 #define IN_SCANREGION(addr) ((Object*)(addr) >= scanfirst \
133                              && (Object*)(addr) <= scanlast)
134 #define IS_SCANNED(addr) ((Object *)(addr) < scanpointer)
135 #define MAXRESCAN 10
136 static pageno_t rescan[MAXRESCAN];
137 static int rescanpages = 0;
138 static int allscan = 0;
139 
140 static pageno_t stable_queue, stable_tail; /* head and tail of the queue
141                                             * of stable pages */
142 
143 #define DIRTYENTRIES 20
144 struct dirty_rec {
145     pageno_t pages[DIRTYENTRIES];
146     struct dirty_rec *next;
147 };
148 
149 static struct dirty_rec *dirtylist, *dirtyhead;
150 static int dirtyentries;
151 
152 static int ScanCluster ();
153 static int Scanner ();
154 static void TerminateGC ();
155 
156 /*****************************************************************************/
157 
158 /* PAGEBYTES is defined in object.h */
159 
160 #define PAGEWORDS      ((addrarith_t)(PAGEBYTES / sizeof (Object)))
161 #define HEAPPAGEMASK   ~((gcptr_t)PAGEBYTES-1)
162 
163 #ifdef ALIGN_8BYTE
164 #  define MAX_OBJECTWORDS       (PAGEWORDS - 1)
165 #  define NEEDED_PAGES(size)    (((size) + PAGEWORDS) / PAGEWORDS)
166 #else
167 #  define MAX_OBJECTWORDS       PAGEWORDS
168 #  define NEEDED_PAGES(size)    (((size) + PAGEWORDS - 1) / PAGEWORDS)
169 #endif
170 
171 #define MAKE_HEADER(obj,words,type)     (SET(obj, type, words))
172 #define HEADER_TO_TYPE(header)          ((unsigned int)TYPE(header))
173 #define HEADER_TO_WORDS(header)         ((unsigned int)FIXNUM(header))
174 
175 /* some conversion stuff. PHYSPAGE converts a logical page number into the
176  * start address of the physical page the logical page lies on.
177  * If ARRAY_BROKEN is defined, page numbering will start at 0 for the
178  * first heap page. Not that this will introduce some extra overhead.
179  * Note that PAGE_TO_ADDR(0) == 0 if ARRAY_BROKEN is not defined...
180  */
181 
182 #define OBJ_TO_PPADDR(obj) ((gcptr_t)POINTER(obj) & pp_mask)
183 #define PTR_TO_PPADDR(ptr) ((gcptr_t)(ptr) & pp_mask)
184 #define ADDR_TO_PAGE(addr) ((((addr) & HEAPPAGEMASK) / PAGEBYTES) - pagebase)
185 #define PAGE_TO_ADDR(page) (((page) + pagebase) * PAGEBYTES)
186 #define PHYSPAGE(page)     ((((page) + pagebase) * PAGEBYTES) & pp_mask)
187 
188 #define UNALLOCATED_PAGE   (gcspace_t)(-2)
189 #define FREE_PAGE          1
190 
191 #define OBJECTPAGE         0
192 #define CONTPAGE           1
193 
194 #define PERCENT(x, y)  (((x) * 100) / (y))
195 #define HEAPPERCENT(x)  PERCENT(x, logical_pages)
196 
197 #define IS_CLUSTER(a,b) (SAME_PHYSPAGE (PAGE_TO_ADDR ((a)), \
198                                         PAGE_TO_ADDR ((b))) || \
199                          (space[a] == space[b] && \
200                           types[(a)&hp_per_pp_mask] == OBJECTPAGE && \
201                           types[((b)&hp_per_pp_mask)+hp_per_pp] == OBJECTPAGE))
202 
203 /* check whether the (physical) page starting at address addr is protected
204  * or not. SET_PROTECT and SET_UNPROTECT are used to set or clear the flag
205  * for the page starting at address addr in the pmap array. The job of
206  * protecting a page (by calling mprotect) is done in PROTECT/UNPROTECT.
207  */
208 
209 #define PMAP(addr)           pmap[((addr) - PAGE_TO_ADDR(0)) >> pp_shift]
210 
211 #define IS_PROTECTED(addr)   ( PMAP (addr) )
212 #define SET_PROTECT(addr)    { PMAP (addr) = 1; protected_pages++; }
213 #define SET_UNPROTECT(addr)  { PMAP (addr) = 0; protected_pages--; }
214 
215 #if defined(MPROTECT_SIG) || defined(MPROTECT_MMAP)
216 #  ifndef PROT_RW
217 #    define PROT_RW   (PROT_READ | PROT_WRITE)
218 #  endif
219 #  ifndef PROT_NONE
220 #    define PROT_NONE 0
221 #  endif
222 #  define MPROTECT(addr,len,prot) { if (inc_collection) \
223                                         mprotect ((caddr_t)(addr), (len), \
224                                                   (prot)); }
225 #else
226 #  define PROT_RW
227 #  define PROT_NONE
228 #  define MPROTECT(addr,len,prot)
229 #endif
230 
231 #define PROTECT(addr)   { if (!IS_PROTECTED (addr)) {                         \
232                               if (!scanning) {                                \
233                                   SET_PROTECT (addr);                         \
234                                   MPROTECT ((addr), bytes_per_pp, PROT_NONE); \
235                               } else                                          \
236                                   AddDirty ((addr));                          \
237                           } }
238 
239 #define UNPROTECT(addr) { if (IS_PROTECTED (addr)) {                          \
240                               SET_UNPROTECT (addr);                           \
241                               MPROTECT ((addr), bytes_per_pp, PROT_RW);       \
242                           } }
243 
244 /*****************************************************************************/
245 
246 /* the following functions maintain a linked list to remember pages that
247  * are "endangered" while scanning goes on. The list elements are arrays,
248  * each one containing some page addresses. If an array is filled, a new
249  * one is appended to the list (dynamically).
250  * An address is not added to the list if the most recently added entry
251  * is the same address. It is not necessary to add an address if it is in
252  * the list anywhere, but searching would be too time-consuming.
253  */
254 
SetupDirtyList()255 static void SetupDirtyList () {
256     dirtylist = (struct dirty_rec *) malloc (sizeof (struct dirty_rec));
257     if (dirtylist == (struct dirty_rec *)0)
258         Fatal_Error ("SetupDirtyList: unable to allocate memory");
259     memset (dirtylist->pages, 0, sizeof (dirtylist->pages));
260     dirtylist->next = (struct dirty_rec *)0;
261     dirtyhead = dirtylist;
262     dirtyentries = 0;
263 }
264 
AddDirty(pageno_t addr)265 static void AddDirty (pageno_t addr) {
266     struct dirty_rec *p;
267 
268     if (dirtyentries != 0 &&
269         dirtylist->pages[(dirtyentries-1) % DIRTYENTRIES] == addr)
270             return;
271     else
272         dirtylist->pages[dirtyentries++ % DIRTYENTRIES] = addr;
273 
274     if (dirtyentries % DIRTYENTRIES == 0) {
275         p = (struct dirty_rec *) malloc (sizeof (struct dirty_rec));
276         if (p == (struct dirty_rec *)0)
277             Fatal_Error ("AddDirty: unable to allocate memory");
278         memset (p->pages, 0, sizeof (p->pages));
279         p->next = (struct dirty_rec *)0;
280         dirtylist->next = p;
281         dirtylist = p;
282     }
283 }
284 
ReprotectDirty()285 static void ReprotectDirty () {
286     int i;
287 
288     dirtylist = dirtyhead;
289     while (dirtylist) {
290         for (i = 0; i < DIRTYENTRIES && dirtyentries--; i++)
291             PROTECT (dirtylist->pages[i]);
292         dirtylist = dirtylist->next;
293     }
294 
295     dirtyentries = 0;
296     dirtylist = dirtyhead;
297     dirtylist->next = (struct dirty_rec *)0;
298 }
299 
300 
301 /* register a page which has been promoted into the scan region by the
302  * Visit function. If that page has not been scanned yet, return, else
303  * remember the page to be scanned later. If there is not enough space
304  * to remember pages, set a flag to rescan the whole scan region.
305  */
306 
RegisterPage(pageno_t page)307 static void RegisterPage (pageno_t page) {
308     if (allscan)
309         return;
310 
311     if (IS_SCANNED (PAGE_TO_ADDR (page))) {
312         if (rescanpages < MAXRESCAN)
313             rescan[rescanpages++] = page;
314         else
315             allscan = 1;
316     }
317 }
318 
319 /* determine a physical page cluster. Search backward until the beginning
320  * of the cluster is found, then forward until the length of the cluster
321  * is determined. The first parameter is the address of the first physical
322  * page in the cluster, the second one is the length in physical pages.
323  * Note that these parameters are value-result parameters !
324  */
325 
DetermineCluster(gcptr_t * addr,int * len)326 static void DetermineCluster (gcptr_t *addr, int *len) {
327     gcptr_t addr1;
328 
329     *len = 1;
330     while (types[ADDR_TO_PAGE (*addr)] != OBJECTPAGE) {
331         *addr -= bytes_per_pp;
332         (*len)++;
333     }
334     addr1 = *addr + ((*len) << pp_shift);
335 
336     while (ADDR_TO_PAGE(addr1) <= lastpage &&
337             space[ADDR_TO_PAGE(addr1)] > 0 &&
338             types[ADDR_TO_PAGE(addr1)] != OBJECTPAGE) {
339         addr1 += bytes_per_pp;
340         (*len)++;
341     }
342 }
343 
344 
345 /* the following two functions are used to protect or unprotect a page
346  * cluster. The first parameter is the address of the first page of the
347  * cluster, the second one is the length in physical pages. If the length
348  * is 0, DetermineCluster is called to set length accordingly.
349  */
350 
ProtectCluster(gcptr_t addr,unsigned int len)351 static void ProtectCluster (gcptr_t addr, unsigned int len) {
352     if (!len) DetermineCluster (&addr, &len);
353     if (len > 1) {
354         while (len) {
355             if (!IS_PROTECTED (addr)) {
356                 MPROTECT (addr, len << pp_shift, PROT_NONE);
357                 break;
358             }
359             len--;
360             addr += bytes_per_pp;
361         }
362         while (len--) {
363             if (!IS_PROTECTED (addr)) SET_PROTECT (addr);
364             addr += bytes_per_pp;
365         }
366     } else {
367         if (!IS_PROTECTED (addr)) {
368             MPROTECT (addr, bytes_per_pp, PROT_NONE);
369             SET_PROTECT (addr);
370         }
371     }
372 }
373 
374 
UnprotectCluster(gcptr_t addr,unsigned int len)375 static void UnprotectCluster (gcptr_t addr, unsigned int len) {
376     if (!len) DetermineCluster (&addr, &len);
377     MPROTECT (addr, len << pp_shift, PROT_RW);
378     while (len--) {
379         if (IS_PROTECTED (addr)) SET_UNPROTECT (addr);
380         addr += bytes_per_pp;
381     }
382 }
383 
384 
385 /* add one page to the stable set queue */
386 
AddQueue(pageno_t page)387 static void AddQueue (pageno_t page) {
388 
389     if (stable_queue != (pageno_t)-1)
390         linked[stable_tail] = page;
391     else
392         stable_queue = page;
393     linked[page] = (pageno_t)-1;
394     stable_tail = page;
395 }
396 
397 
398 /* the following function promotes all heap pages in the stable set queue
399  * into current space. After this, there are no more forwarded pages in the
400  * heap.
401  */
402 
PromoteStableQueue()403 static void PromoteStableQueue () {
404     Object *p;
405     int pcount, size;
406     pageno_t start;
407 
408     while (stable_queue != (pageno_t)-1) {
409         p = PAGE_TO_OBJ (stable_queue);
410 #ifdef ALIGN_8BYTE
411         p++;
412 #endif
413         size = HEADER_TO_WORDS (*p);
414         pcount = NEEDED_PAGES (size);
415 
416         start = stable_queue;
417         while (pcount--)
418             space[start++] = current_space;
419         stable_queue = linked[stable_queue];
420     }
421     current_pages = allocated_pages;
422     forwarded_pages = 0;
423 }
424 
425 /* calculate the logarithm (base 2) for arguments == 2**n
426  */
427 
Logbase2(addrarith_t psize)428 static int Logbase2 (addrarith_t psize) {
429     int shift = 0;
430 
431 #if LONG_BITS-64 == 0
432     if (psize & 0xffffffff00000000) shift += 32;
433     if (psize & 0xffff0000ffff0000) shift += 16;
434     if (psize & 0xff00ff00ff00ff00) shift += 8;
435     if (psize & 0xf0f0f0f0f0f0f0f0) shift += 4;
436     if (psize & 0xcccccccccccccccc) shift += 2;
437     if (psize & 0xaaaaaaaaaaaaaaaa) shift += 1;
438 #else
439     if (psize & 0xffff0000) shift += 16;
440     if (psize & 0xff00ff00) shift += 8;
441     if (psize & 0xf0f0f0f0) shift += 4;
442     if (psize & 0xcccccccc) shift += 2;
443     if (psize & 0xaaaaaaaa) shift += 1;
444 #endif
445 
446     return (shift);
447 }
448 
449 /* return next heap page number, wrap around at the end of the heap. */
450 
next(pageno_t page)451 static pageno_t next (pageno_t page) {
452     return ((page < lastpage) ? page+1 : firstpage);
453 }
454 
455 /*****************************************************************************/
456 
457 #ifdef MPROTECT_MMAP
458 
heapmalloc(int s)459 static char *heapmalloc (int s) {
460     char *ret = mmap (0, s, PROT_READ|PROT_WRITE, MAP_ANON, -1, 0);
461 
462     if (ret == (char*)-1)
463         ret = 0;
464 
465     return ret;
466 }
467 
468 #else
469 
470 #  define heapmalloc(size)  (char *)malloc ((size))
471 
472 #endif
473 
474 /*
475  * make a heap of size kilobytes. It is divided into heappages of
476  * PAGEBYTES byte and is aligned at a physical page boundary. The
477  * heapsize is rounded up to the nearest multiple of the physical
478  * pagesize. Checked by sam@hocevar.net on Apr 1, 2003.
479  */
480 
Make_Heap(int size)481 void Make_Heap (int size) {
482     addrarith_t heapsize = size * 2 * 1024;
483     char *heap_ptr, *aligned_heap_ptr;
484     Object heap_obj;
485     pageno_t i;
486 
487 #if defined(MPROTECT_SIG) || defined(MPROTECT_MMAP)
488     InstallHandler ();
489 #endif
490 
491     /* calculate number of logical heappages and of used physical pages.
492      * First, round up to the nearest multiple of the physical pagesize,
493      * then calculate the resulting number of heap pages.
494      */
495 
496 #if defined(_SC_PAGESIZE)
497     if ((bytes_per_pp = sysconf (_SC_PAGESIZE)) == -1)
498         Fatal_Error ("sysconf(_SC_PAGESIZE) failed; can't get pagesize");
499 #elif defined(HAVE_GETPAGESIZE)
500     bytes_per_pp = getpagesize ();
501 #elif defined(MPROTECT_SIG) || defined(MPROTECT_MMAP)
502 #   error "mprotect requires getpagesize or sysconf_pagesize"
503 #else
504     bytes_per_pp = 4096;
505 #endif
506     physical_pages = (heapsize+bytes_per_pp-1)/bytes_per_pp;
507     hp_per_pp = bytes_per_pp / PAGEBYTES;
508     hp_per_pp_mask = ~(hp_per_pp - 1);
509     logical_pages = spanning_pages = physical_pages * hp_per_pp;
510     pp_mask = ~(bytes_per_pp-1);
511     pp_shift = Logbase2 (bytes_per_pp);
512 
513     heap_ptr = heapmalloc (logical_pages*PAGEBYTES+bytes_per_pp-1);
514     /* FIXME: add heap_ptr to a list of pointers to free */
515     saved_heap_ptr = heap_ptr;
516 
517     if (heap_ptr == NULL)
518         Fatal_Error ("cannot allocate heap (%u KBytes)", size);
519 
520     /* Align heap at a memory page boundary */
521 
522     if ((gcptr_t)heap_ptr & (bytes_per_pp-1))
523         aligned_heap_ptr = (char*)(((gcptr_t)heap_ptr+bytes_per_pp-1)
524             & ~(bytes_per_pp-1));
525     else
526         aligned_heap_ptr = heap_ptr;
527 
528     SET(heap_obj, 0, (intptr_t)aligned_heap_ptr);
529 
530 #ifdef ARRAY_BROKEN
531     pagebase = ((gcptr_t)POINTER (heap_obj)) / PAGEBYTES;
532 #endif
533     firstpage = OBJ_TO_PAGE (heap_obj);
534     lastpage = firstpage+logical_pages-1;
535 
536     space = (gcspace_t *)malloc (logical_pages*sizeof (gcspace_t));
537     types = (gcspace_t *)malloc ((logical_pages + 1)*sizeof (gcspace_t));
538     pmap = (gcspace_t *)malloc (physical_pages*sizeof (gcspace_t));
539     linked = (pageno_t *)malloc (logical_pages*sizeof (pageno_t));
540     if (!space || !types || !pmap || !linked) {
541         free (heap_ptr);
542         if (space) free ((char*)space);
543         if (types) free ((char*)types);
544         if (pmap) free ((char*)pmap);
545         if (linked) free ((char*)linked);
546         Fatal_Error ("cannot allocate heap maps");
547     }
548 
549     memset (types, 0, (logical_pages + 1)*sizeof (gcspace_t));
550     memset (pmap, 0, physical_pages*sizeof (gcspace_t));
551     memset (linked, 0, logical_pages*sizeof (unsigned int));
552     space -= firstpage; /* to index the arrays with the heap page number */
553     types -= firstpage;
554     types[lastpage+1] = OBJECTPAGE;
555     linked -= firstpage;
556 #ifndef ARRAY_BROKEN
557     pmap -= (PAGE_TO_ADDR (firstpage) >> pp_shift);
558 #endif
559 
560     for (i = firstpage; i <= lastpage; i++)
561         space[i] = FREE_PAGE;
562 
563     allocated_pages = 0;
564     forwarded_pages = 0;
565     current_pages = 0;
566     protected_pages = 0;
567     stable_queue = (pageno_t)-1;
568     SetupDirtyList ();
569 
570     current_space = forward_space = previous_space = 3;
571     current_freepage = firstpage; current_free = 0;
572 }
573 
574 /*
575  * increment the heap by 1024 KB. Checked by sam@hocevar.net on Apr 1, 2003.
576  */
577 
ExpandHeap(char * reason)578 static int ExpandHeap (char *reason) {
579     int increment = (1024 * 1024 + bytes_per_pp - 1) / bytes_per_pp;
580     int incpages = increment * hp_per_pp;
581     addrarith_t heapinc = incpages * PAGEBYTES;
582     pageno_t new_first, inc_first;
583     pageno_t new_last, inc_last;
584     pageno_t new_logpages, new_physpages;
585     pageno_t new_spanpages;
586     gcptr_t addr;
587     gcspace_t *new_space, *new_type, *new_pmap;
588     pageno_t *new_link, i;
589     char *heap_ptr, *aligned_heap_ptr;
590     Object heap_obj;
591 #ifdef ARRAY_BROKEN
592     pageno_t new_pagebase, offset;
593     pageno_t new_firstpage, new_lastpage;
594 #else
595 #   define offset 0
596 #endif
597 
598     /* FIXME: this pointer is lost */
599     heap_ptr = heapmalloc (heapinc+bytes_per_pp/*-1*/);
600 
601     if (heap_ptr == NULL) {
602         if (Var_Is_True (V_Garbage_Collect_Notifyp)) {
603             char buf[243];
604             sprintf(buf, "[Heap expansion failed (%s)]~%%", reason);
605             Format (Standard_Output_Port, buf,
606                     strlen(buf), 0, (Object *)0);
607             (void)fflush (stdout);
608         }
609         return (0);
610     }
611 
612     /* Align heap at a memory page boundary */
613 
614     if ((gcptr_t)heap_ptr & (bytes_per_pp-1))
615         aligned_heap_ptr = (char*)(((gcptr_t)heap_ptr+bytes_per_pp-1)
616             & ~(bytes_per_pp-1));
617     else
618         aligned_heap_ptr = heap_ptr;
619 
620     SET(heap_obj, 0, (intptr_t)aligned_heap_ptr);
621 
622     new_first = firstpage;
623     new_last = lastpage;
624 
625 #ifdef ARRAY_BROKEN
626     new_pagebase = ((gcptr_t)POINTER (heap_obj)) / PAGEBYTES;
627     inc_first = 0; /* = OBJ_TO_PAGE (heap_obj) - new_pagebase */
628 
629     new_firstpage = (pagebase > new_pagebase)
630         ? new_pagebase : pagebase;
631 
632     new_lastpage = (pagebase > new_pagebase)
633         ? pagebase + lastpage
634         : new_pagebase + incpages - 1;
635 
636     offset = pagebase - new_firstpage;
637 #else
638     inc_first = OBJ_TO_PAGE (heap_obj);
639 #endif
640 
641     inc_last = inc_first+incpages-1;
642     if (inc_last > lastpage)
643         new_last = inc_last;
644     if (inc_first < firstpage)
645         new_first = inc_first;
646     new_logpages = logical_pages+incpages;
647 #ifdef ARRAY_BROKEN
648     new_spanpages = new_lastpage-new_firstpage+1;
649     new_last = new_spanpages-1;
650 #else
651     new_spanpages = new_last-new_first+1;
652 #endif
653     new_physpages = new_spanpages / hp_per_pp;
654 
655     new_space = (gcspace_t *)malloc (new_spanpages*sizeof (gcspace_t));
656     new_type = (gcspace_t *)malloc ((new_spanpages + 1)*sizeof (gcspace_t));
657     new_pmap = (gcspace_t *)malloc (new_physpages*sizeof (gcspace_t));
658     new_link = (pageno_t *)malloc (new_spanpages*sizeof (pageno_t));
659     if (!new_space || !new_type || !new_pmap || !new_link) {
660         free (heap_ptr);
661         if (new_space) free ((char*)new_space);
662         if (new_type) free ((char*)new_type);
663         if (new_pmap) free ((char*)new_pmap);
664         if (new_link) free ((char*)new_link);
665         if (Var_Is_True (V_Garbage_Collect_Notifyp)) {
666             Format (Standard_Output_Port, "[Heap expansion failed]~%",
667                     25, 0, (Object *)0);
668             (void)fflush (stdout);
669         }
670         return (0);
671     }
672 
673 
674     /* new_first will be 0 if ARRAY_BROKEN is defined. */
675 
676     new_space -= new_first;
677     new_type -= new_first;
678     new_link -= new_first;
679 
680     memset (new_pmap, 0, new_physpages * sizeof (gcspace_t));
681 #ifndef ARRAY_BROKEN
682     new_pmap -= (PHYSPAGE (new_first) >> pp_shift);
683 #endif
684 
685     memset (new_type+inc_first+offset, 0, (incpages+1)*sizeof (gcspace_t));
686     memset (new_link+inc_first+offset, 0, incpages*sizeof (unsigned int));
687 
688     /* FIXME: memmove! */
689     for (i = firstpage; i <= lastpage; i++) {
690         new_link[i + offset] = linked[i] + offset;
691         new_type[i + offset] = types[i];
692     }
693     for (addr = PAGE_TO_ADDR (firstpage); addr <= PAGE_TO_ADDR (lastpage);
694          addr += bytes_per_pp) {
695         new_pmap[((addr - PAGE_TO_ADDR(0)) >> pp_shift) + offset] =
696             IS_PROTECTED (addr);
697     }
698 
699 #ifdef ARRAY_BROKEN
700     for (i = 0; i < new_spanpages; i++) new_space[i] = UNALLOCATED_PAGE;
701     for (i = firstpage; i <= lastpage; i++) new_space[i+offset] = space[i];
702     offset = offset ? 0 : new_pagebase - pagebase;
703     for (i = offset; i <= offset + inc_last; i++) new_space[i] = FREE_PAGE;
704     new_type[new_spanpages] = OBJECTPAGE;
705 #else
706     for (i = new_first; i < firstpage; i++) new_space[i] = UNALLOCATED_PAGE;
707     for (i = firstpage; i <= lastpage; i++) new_space[i] = space[i];
708 
709     for (i = lastpage+1; i <= new_last; i++) new_space[i] = UNALLOCATED_PAGE;
710     for (i = inc_first; i <= inc_last; i++) new_space[i] = FREE_PAGE;
711     new_type[new_last+1] = OBJECTPAGE;
712 #endif
713 
714     current_freepage += offset;
715     forward_freepage += offset;
716     last_forward_freepage += offset;
717 
718     free ((char*)(linked+firstpage));
719     free ((char*)(types+firstpage));
720     free ((char*)(space+firstpage));
721 
722 #ifndef ARRAY_BROKEN
723     free ((char*)(pmap+(PAGE_TO_ADDR (firstpage) >> pp_shift)));
724 #else
725     free ((char*)pmap);
726 #endif
727 
728     linked = new_link;
729     types = new_type;
730     space = new_space;
731     pmap = new_pmap;
732     firstpage = new_first;
733     lastpage = new_last;
734     logical_pages = new_logpages;
735     spanning_pages = new_spanpages;
736     physical_pages = new_physpages;
737 
738     if (Var_Is_True (V_Garbage_Collect_Notifyp)) {
739         int a = (logical_pages * PAGEBYTES) >> 10;
740         char buf[243];
741 
742         sprintf(buf, "[Heap expanded to %dK (%s)]~%%", a, reason);
743         Format (Standard_Output_Port, buf, strlen(buf), 0, (Object *)0);
744         (void)fflush (stdout);
745     }
746     return (1);
747 }
748 
749 
750 /*
751  * free the heap.
752  */
753 
Free_Heap()754 void Free_Heap () {
755     free (saved_heap_ptr);
756 
757     free ((char*)(linked+firstpage));
758     free ((char*)(types+firstpage));
759     free ((char*)(space+firstpage));
760 
761 #ifndef ARRAY_BROKEN
762     free ((char*)(pmap+(PAGE_TO_ADDR (firstpage) >> pp_shift)));
763 #else
764     free ((char*)pmap);
765 #endif
766 }
767 
768 /* allocate new logical heappages. npg is the number of pages to allocate.
769  * If there is not enough space left, the heap will be expanded if possible.
770  * The new page is allocated in current space.
771  */
772 
ProtectedInRegion(pageno_t start,pageno_t npages)773 static int ProtectedInRegion (pageno_t start, pageno_t npages) {
774     gcptr_t beginpage = PHYSPAGE (start);
775     gcptr_t endpage = PHYSPAGE (start+npages-1);
776 
777     do {
778         if (IS_PROTECTED (beginpage))
779             return (1);
780         beginpage += bytes_per_pp;
781     } while (beginpage <= endpage);
782 
783     return (0);
784 }
785 
AllocPage(pageno_t npg)786 static void AllocPage (pageno_t npg) {
787     pageno_t first_freepage = 0;/* first free heap page */
788     pageno_t cont_free;         /* contiguous free pages */
789     pageno_t n, p;
790 
791     if (current_space != forward_space) {
792         (void)Scanner ((pageno_t)1);
793         if (!protected_pages)
794             TerminateGC ();
795     } else {
796         if (inc_collection) {
797             if (allocated_pages+npg >= logical_pages/3)
798                 P_Collect_Incremental ();
799         } else {
800             if (allocated_pages+npg >= logical_pages/2)
801                 P_Collect ();
802         }
803     }
804 
805     /* now look for a cluster of npg free pages. cont_free counts the
806      * number of free pages found, first_freepage is the number of the
807      * first free heap page in the cluster. */
808     for (p = spanning_pages, cont_free = 0; p; p--) {
809 
810         /* If we have more space than before, or if the current page is
811          * stable, start again with the next page. */
812         if (space[current_freepage] >= previous_space
813              || STABLE (current_freepage)) {
814             current_freepage = next (current_freepage);
815             cont_free = 0;
816             continue;
817         }
818 
819         if (cont_free == 0) {
820             /* This is our first free page, first check that we have a
821              * continuous cluster of pages (we'll check later that they
822              * are free). Otherwise, go to the next free page. */
823             if ((current_freepage+npg-1) > lastpage
824                 || !IS_CLUSTER (current_freepage, current_freepage+npg-1)) {
825                 current_freepage = next ((current_freepage&hp_per_pp_mask)
826                                           +hp_per_pp-1);
827                 continue;
828             }
829 
830             first_freepage = current_freepage;
831         }
832 
833         cont_free++;
834 
835         if (cont_free == npg) {
836             space[first_freepage] = current_space;
837             types[first_freepage] = OBJECTPAGE;
838             for (n = 1; n < npg; n++) {
839                 space[first_freepage+n] = current_space;
840                 types[first_freepage+n] = CONTPAGE;
841             }
842             current_freep = PAGE_TO_OBJ (first_freepage);
843             current_free = npg*PAGEWORDS;
844             current_pages += npg;
845             allocated_pages += npg;
846             current_freepage = next (first_freepage+npg-1);
847             if (ProtectedInRegion (first_freepage, npg))
848                 (void)ScanCluster (PHYSPAGE (first_freepage));
849             return;
850         }
851 
852         /* check the next free page. If we warped, reset cont_free to 0. */
853         current_freepage = next (current_freepage);
854         if (current_freepage == firstpage) cont_free = 0;
855     }
856 
857     /* no space available, try to expand heap */
858 
859     if (ExpandHeap ("to allocate new object")) {
860         AllocPage (npg);
861         return;
862     }
863 
864     Fatal_Error ("unable to allocate %lu bytes in heap", npg*PAGEBYTES);
865 
866     /*NOTREACHED*/
867 }
868 
869 
870 /* allocate an object in the heap. size is the size of the new object
871  * in bytes, type describes the object's type (see object.h), and konst
872  * determines whether the object is immutable.
873  */
874 
Alloc_Object(size,type,konst)875 Object Alloc_Object (size, type, konst) {
876     Object obj;
877     register addrarith_t s = /* size in words */
878         ((size + sizeof(Object) - 1) / sizeof(Object)) + 1;
879     int big = 0;
880 
881     if (GC_Debug) {
882         if (inc_collection)
883             P_Collect_Incremental ();
884         else
885             P_Collect ();
886     }
887 
888     /* if there is not enough space left on the current page, discard
889      * the left space and allocate a new page. Space is discarded by
890      * writing a T_Freespace object.
891      */
892 
893     if (s > current_free) {
894         if (current_free) {
895             MAKE_HEADER (*current_freep, current_free, T_Freespace);
896             current_free = 0;
897         }
898 
899         /* If we are about to allocate an object bigger than one heap page,
900          * set a flag. The space behind big objects is discarded, see below.
901          */
902 
903 #ifdef ALIGN_8BYTE
904         if (s < PAGEWORDS-1)
905             AllocPage ((pageno_t)1);
906         else {
907             AllocPage ((pageno_t)(s+PAGEWORDS)/PAGEWORDS);
908             big = 1;
909         }
910         MAKE_HEADER (*current_freep, 1, T_Align_8Byte);
911         current_freep++;
912         current_free--;
913 #else
914         if (s < PAGEWORDS)
915             AllocPage ((pageno_t)1);
916         else {
917             AllocPage ((pageno_t)(s+PAGEWORDS-1)/PAGEWORDS);
918             big = 1;
919         }
920 #endif
921     }
922 
923     /* now write a header for the object into the heap and update the
924      * pointer to the next free location and the counter of free words
925      * in the current heappage.
926      */
927 
928     MAKE_HEADER (*current_freep, s, type);
929     current_freep++;
930     *current_freep = Null;
931     SET (obj, type, (intptr_t)current_freep);
932     if (big)
933         current_freep = (Object*)0, current_free = 0;
934     else
935         current_freep += (s-1), current_free -= s;
936 #ifdef ALIGN_8BYTE
937     if (!((gcptr_t)current_freep & 7) && current_free) {
938         MAKE_HEADER (*current_freep, 1, T_Align_8Byte);
939         current_freep++;
940         current_free--;
941     }
942 #endif
943     if (type == T_Control_Point)
944         CONTROL(obj)->reloc = 0;
945 
946     if (konst) SETCONST (obj);
947     return (obj);
948 }
949 
950 
951 /* allocate a page in forward space. If there is no space left, the heap
952  * is expanded. The argument prevents allocation of a heap page which lies
953  * on the same physical page the referenced object lies on.
954  */
955 
AllocForwardPage(Object bad)956 static void AllocForwardPage (Object bad) {
957     Object *badaddr = (Object *)POINTER (bad);
958     pageno_t whole_heap = spanning_pages;
959     pageno_t tpage;
960 
961     while (whole_heap--) {
962         if (space[forward_freepage] < previous_space
963             && !STABLE (forward_freepage)
964             && !SAME_PHYSPAGE ((gcptr_t)badaddr,
965                     PAGE_TO_ADDR (forward_freepage))
966             && !IN_SCANREGION (PAGE_TO_ADDR (forward_freepage))) {
967 
968             allocated_pages++;
969             forwarded_pages++;
970             space[forward_freepage] = forward_space;
971             types[forward_freepage] = OBJECTPAGE;
972             forward_freep = PAGE_TO_OBJ (forward_freepage);
973             forward_free = PAGEWORDS;
974             AddQueue (forward_freepage);
975 
976             tpage = last_forward_freepage;
977             last_forward_freepage = next (forward_freepage);
978             forward_freepage = tpage;
979             return;
980         } else {
981             forward_freepage = next (forward_freepage);
982         }
983     }
984 
985     if (ExpandHeap ("to allocate forward page")) {
986         AllocForwardPage (bad);
987         return;
988     }
989 
990     Fatal_Error ("unable to allocate forward page in %lu KBytes heap",
991                  (logical_pages * PAGEBYTES) >> 10);
992 
993     /*NOTREACHED*/
994 }
995 
996 
997 /* Visit an object and move it into forward space.  The forwarded
998  * object must be protected because it is to be scanned later.
999  */
1000 
Visit(register Object * cp)1001 int Visit (register Object *cp) {
1002     register pageno_t page = OBJ_TO_PAGE (*cp);
1003     register Object *obj_ptr = (Object *)POINTER (*cp);
1004     int tag = TYPE (*cp);
1005     int konst = ISCONST (*cp);
1006     addrarith_t objwords;
1007     pageno_t objpages, pcount;
1008     gcptr_t ffreep, pageaddr = 0;
1009     int outside;
1010 
1011     /* if the Visit function is called via the REVIVE_OBJ macro and we are
1012      * not inside an incremental collection, exit immediately.
1013      */
1014 
1015     if (current_space == forward_space)
1016         return 0;
1017 
1018     if (page < firstpage || page > lastpage || STABLE (page)
1019         || space[page] == current_space  || space[page] == UNALLOCATED_PAGE
1020         || !Types[tag].haspointer)
1021         return 0;
1022 
1023     if (space[page] != previous_space) {
1024         char buf[100];
1025         sprintf (buf, "Visit: object not in prev space at %p ('%s') %d %d",
1026             obj_ptr, Types[tag].name, space[page], previous_space);
1027         Panic (buf);
1028     }
1029 
1030     if (!IN_SCANREGION (obj_ptr) && IS_PROTECTED ((gcptr_t)obj_ptr)) {
1031         pageaddr = OBJ_TO_PPADDR (*cp);
1032         UNPROTECT (pageaddr);
1033     }
1034 
1035     if (WAS_FORWARDED (*cp)) {
1036         if (pageaddr != 0)
1037             PROTECT (pageaddr);
1038         MAKEOBJ (*cp, tag, (intptr_t)POINTER(*obj_ptr));
1039         if (konst)
1040             SETCONST (*cp);
1041         return 0;
1042     }
1043 
1044     ffreep = PTR_TO_PPADDR (forward_freep);
1045     outside = !IN_SCANREGION (forward_freep);
1046     objwords = HEADER_TO_WORDS (*(obj_ptr - 1));
1047     if (objwords >= forward_free) {
1048 #ifdef ALIGN_8BYTE
1049         if (objwords >= PAGEWORDS - 1) {
1050             objpages = (objwords + PAGEWORDS) / PAGEWORDS;
1051 #else
1052         if (objwords >= PAGEWORDS) {
1053             objpages = (objwords + PAGEWORDS - 1) / PAGEWORDS;
1054 #endif
1055             forwarded_pages += objpages;
1056             for (pcount = 0; pcount < objpages; pcount++)
1057                 space[page + pcount] = forward_space;
1058             AddQueue (page);
1059             if (IN_SCANREGION (PAGE_TO_ADDR (page)))
1060                 RegisterPage (page);
1061             else
1062                 ProtectCluster (PHYSPAGE (page), 0);
1063 
1064             if (pageaddr != 0)
1065                 PROTECT (pageaddr);
1066 
1067             return 0;
1068         }
1069 
1070         if (forward_free) {
1071             if (outside && IS_PROTECTED (ffreep)
1072                 && !SAME_PHYSPAGE ((gcptr_t)obj_ptr, ffreep)) {
1073 
1074                 UNPROTECT (ffreep);
1075                 MAKE_HEADER (*forward_freep, forward_free, T_Freespace);
1076                 forward_free = 0;
1077                 PROTECT (ffreep);
1078             } else {
1079                 MAKE_HEADER (*forward_freep, forward_free, T_Freespace);
1080                 forward_free = 0;
1081             }
1082         }
1083 
1084         AllocForwardPage (*cp);
1085         outside = !IN_SCANREGION (forward_freep);
1086         ffreep = PTR_TO_PPADDR (forward_freep); /* re-set ffreep ! */
1087 #ifdef ALIGN_8BYTE
1088         if (outside && IS_PROTECTED (ffreep))
1089             UNPROTECT (ffreep);
1090         MAKE_HEADER (*forward_freep, 1, T_Align_8Byte);
1091         forward_freep++;
1092         forward_free--;
1093         goto do_forward;
1094 #endif
1095     }
1096 
1097     if (outside && IS_PROTECTED (ffreep))
1098         UNPROTECT (ffreep);
1099 
1100 #ifdef ALIGN_8BYTE
1101 do_forward:
1102 #endif
1103     if (tag == T_Control_Point) {
1104         CONTROL (*cp)->reloc =
1105             (char*)(forward_freep + 1) - (char*)obj_ptr;
1106     }
1107 
1108     MAKE_HEADER (*forward_freep, objwords, tag);
1109     forward_freep++;
1110     memcpy (forward_freep, obj_ptr, (objwords-1)*sizeof(Object));
1111     SET (*obj_ptr, T_Broken_Heart, (intptr_t)forward_freep);
1112     MAKEOBJ (*cp, tag, (intptr_t)forward_freep);
1113     if (konst)
1114         SETCONST (*cp);
1115     forward_freep += (objwords - 1);
1116     forward_free -= objwords;
1117 
1118 #ifdef ALIGN_8BYTE
1119     if (!((gcptr_t)forward_freep & 7) && forward_free) {
1120         MAKE_HEADER (*forward_freep, 1, T_Align_8Byte);
1121         forward_freep++;
1122         forward_free--;
1123     }
1124 #endif
1125 
1126     if (outside)
1127         PROTECT (ffreep);
1128 
1129     if (pageaddr != 0)
1130         PROTECT (pageaddr);
1131 
1132     return 0;
1133 }
1134 
1135 
1136 /* Scan a page and visit all objects referenced by objects lying on the
1137  * page. This will possibly forward the referenced objects.
1138  */
1139 
1140 static void ScanPage (Object *currentp, Object *nextcp) {
1141     Object *cp = currentp, obj;
1142     addrarith_t len, m, n;
1143     int t;
1144 
1145     while (cp < nextcp && (cp != forward_freep || forward_free == 0)) {
1146         t = HEADER_TO_TYPE (*cp);
1147         len = HEADER_TO_WORDS (*cp);
1148         cp++;
1149 
1150         /* cp now points to the real Scheme object in the heap. t denotes
1151          * the type of the object, len its length inclusive header in
1152          * words.
1153          */
1154 
1155         SET(obj, t, (intptr_t)cp);
1156 
1157         switch (t) {
1158         case T_Symbol:
1159             Visit (&SYMBOL(obj)->next);
1160             Visit (&SYMBOL(obj)->name);
1161             Visit (&SYMBOL(obj)->value);
1162             Visit (&SYMBOL(obj)->plist);
1163             break;
1164 
1165         case T_Pair:
1166         case T_Environment:
1167             Visit (&PAIR(obj)->car);
1168             Visit (&PAIR(obj)->cdr);
1169             break;
1170 
1171         case T_Vector:
1172             for (n = 0, m = VECTOR(obj)->size; n < m; n++ )
1173                 Visit (&VECTOR(obj)->data[n]);
1174             break;
1175 
1176         case T_Compound:
1177             Visit (&COMPOUND(obj)->closure);
1178             Visit (&COMPOUND(obj)->env);
1179             Visit (&COMPOUND(obj)->name);
1180             break;
1181 
1182         case T_Control_Point:
1183             (CONTROL(obj)->delta) += CONTROL(obj)->reloc;
1184 
1185 #ifdef HAVE_ALLOCA
1186             Visit_GC_List (CONTROL(obj)->gclist, CONTROL(obj)->delta);
1187 #else
1188             Visit (&CONTROL(obj)->gcsave);
1189 #endif
1190             Visit_Wind (CONTROL(obj)->firstwind,
1191                         (CONTROL(obj)->delta) );
1192 
1193             Visit (&CONTROL(obj)->env);
1194             break;
1195 
1196         case T_Promise:
1197             Visit (&PROMISE(obj)->env);
1198             Visit (&PROMISE(obj)->thunk);
1199             break;
1200 
1201         case T_Port:
1202             Visit (&PORT(obj)->name);
1203             break;
1204 
1205         case T_Autoload:
1206             Visit (&AUTOLOAD(obj)->files);
1207             Visit (&AUTOLOAD(obj)->env);
1208             break;
1209 
1210         case T_Macro:
1211             Visit (&MACRO(obj)->body);
1212             Visit (&MACRO(obj)->name);
1213             break;
1214 
1215         default:
1216             if (Types[t].visit)
1217                 (Types[t].visit) (&obj, Visit);
1218         }
1219         cp += (len - 1);
1220     }
1221 }
1222 
1223 
1224 /* rescan all pages remembered by the RegisterPage function. */
1225 
1226 static void RescanPages () {
1227     register Object *cp;
1228     register int i;
1229     int pages = rescanpages;
1230 
1231     rescanpages = 0;
1232     for (i = 0; i < pages; i++) {
1233         cp = PAGE_TO_OBJ (rescan[i]);
1234 #ifdef ALIGN_8BYTE
1235         ScanPage (cp + 1, cp + PAGEWORDS);
1236 #else
1237         ScanPage (cp, cp + PAGEWORDS);
1238 #endif
1239     }
1240 }
1241 
1242 static int ScanCluster (gcptr_t addr) {
1243     register pageno_t page, last;
1244     pageno_t npages;
1245     int n = 0;
1246 
1247     scanning = 1;
1248     DetermineCluster (&addr, &n);
1249     npages = n;
1250     scanfirst = (Object *)addr;
1251     scanlast = (Object *)(addr + (npages << pp_shift) - sizeof (Object));
1252     UnprotectCluster ((gcptr_t)scanfirst, (int)npages);
1253 
1254  rescan_cluster:
1255     last = ADDR_TO_PAGE ((gcptr_t)scanlast);
1256     for (page = ADDR_TO_PAGE ((gcptr_t)scanfirst); page <= last; page++) {
1257         if (STABLE (page) && types[page] == OBJECTPAGE) {
1258             scanpointer = PAGE_TO_OBJ (page);
1259 #ifdef ALIGN_8BYTE
1260             ScanPage (scanpointer + 1, scanpointer + PAGEWORDS);
1261 #else
1262             ScanPage (scanpointer, scanpointer + PAGEWORDS);
1263 #endif
1264         }
1265     }
1266 
1267     while (rescanpages) {
1268         if (allscan) {
1269             allscan = 0;
1270             goto rescan_cluster;
1271         } else
1272             RescanPages ();
1273     }
1274 
1275     scanfirst = (Object *)0;
1276     scanlast = (Object *)0;
1277     scanning = 0;
1278     ReprotectDirty ();
1279 
1280     return (npages); /* return number of scanned pages */
1281 }
1282 
1283 
1284 static int Scanner (pageno_t npages) {
1285     register gcptr_t addr, lastaddr;
1286     pageno_t spages;
1287     pageno_t scanned = 0;
1288 
1289     while (npages > 0 && protected_pages) {
1290         lastaddr = PAGE_TO_ADDR (lastpage);
1291         for (addr = PAGE_TO_ADDR(firstpage); addr < lastaddr && npages > 0;
1292              addr += bytes_per_pp) {
1293 
1294             if (IS_PROTECTED (addr)) {
1295                 if (space[ADDR_TO_PAGE (addr)] == UNALLOCATED_PAGE)
1296                     Panic ("Scanner: found incorrect heap page");
1297                 spages = ScanCluster (addr);
1298                 scanned += spages;
1299                 npages -= spages;
1300             }
1301         }
1302     }
1303 
1304     scanfirst = (Object *)0;
1305     scanlast = scanfirst;
1306 
1307     return (scanned);
1308 }
1309 
1310 #if defined(MPROTECT_SIG) || defined(MPROTECT_MMAP)
1311 /* the following function handles a page fault. If the fault was caused
1312  * by the mutator and incremental collection is enabled, this will result
1313  * in scanning the physical page the fault occured on.
1314  */
1315 
1316 #ifdef SIGSEGV_SIGCONTEXT
1317 
1318 static void PagefaultHandler (int sig, int code, struct sigcontext *scp) {
1319     char *addr = (char *)(scp->sc_badvaddr);
1320 
1321 #else
1322 #ifdef SIGSEGV_AIX
1323 
1324 static void PagefaultHandler (int sig, int code, struct sigcontext *scp) {
1325     char *addr = (char *)scp->sc_jmpbuf.jmp_context.except[3];
1326     /*
1327      * Or should that be .jmp_context.o_vaddr?
1328      */
1329 
1330 #else
1331 #ifdef SIGSEGV_SIGINFO
1332 
1333 static void PagefaultHandler (int sig, siginfo_t *sip, ucontext_t *ucp) {
1334     char *addr;
1335 
1336 #else
1337 #ifdef SIGSEGV_ARG4
1338 
1339 static void PagefaultHandler (int sig, int code, struct sigcontext *scp,
1340     char *addr) {
1341 
1342 #else
1343 #ifdef SIGSEGV_HPUX
1344 
1345 static void PagefaultHandler (int sig, int code, struct sigcontext *scp) {
1346 
1347 #else
1348 #  include "HAVE_MPROTECT defined, but missing SIGSEGV_xxx"
1349 #endif
1350 #endif
1351 #endif
1352 #endif
1353 #endif
1354 
1355     pageno_t page;
1356     gcptr_t ppage;
1357     char *errmsg = 0;
1358 
1359 #ifdef SIGSEGV_AIX
1360     if ((char *)scp->sc_jmpbuf.jmp_context.except[0] != addr)
1361         Panic ("except");
1362 #endif
1363 
1364 #ifdef SIGSEGV_SIGINFO
1365     if (sip == 0)
1366         Fatal_Error ("SIGSEGV handler got called with zero siginfo_t");
1367     addr = sip->si_addr;
1368 #endif
1369 
1370 #ifdef SIGSEGV_HPUX
1371     char *addr;
1372 
1373     if (scp == 0)
1374         Fatal_Error ("SIGSEGV handler got called with zero sigcontext");
1375     addr = (char *)scp->sc_sl.sl_ss.ss_cr21;
1376 #endif
1377 
1378     ppage = PTR_TO_PPADDR(addr);
1379     page = ADDR_TO_PAGE((gcptr_t)addr);
1380 
1381     if (!inc_collection)
1382         errmsg = "SIGSEGV signal received";
1383     else if (current_space == forward_space)
1384         errmsg = "SIGSEGV signal received while not garbage collecting";
1385     else if (page < firstpage || page > lastpage)
1386         errmsg = "SIGSEV signal received; address outside of heap";
1387     if (errmsg) {
1388         fprintf (stderr, "\n[%s]\n", errmsg);
1389         abort ();
1390     }
1391 
1392     GC_In_Progress = 1;
1393     (void)ScanCluster (ppage);
1394     GC_In_Progress = 0;
1395 #ifdef SIGSEGV_AIX
1396     InstallHandler ();
1397 #endif
1398     return;
1399 }
1400 
1401 void InstallHandler () {
1402 #ifdef SIGSEGV_SIGINFO
1403     struct sigaction sact;
1404     sigset_t mask;
1405 
1406     sact.sa_handler = (void (*)())PagefaultHandler;
1407     sigemptyset (&mask);
1408     sact.sa_mask = mask;
1409     sact.sa_flags = SA_SIGINFO;
1410     if (sigaction (SIGSEGV, &sact, 0) == -1) {
1411         perror ("sigaction"); exit (1);
1412     }
1413 #else
1414     (void)signal (SIGSEGV, (void (*)())PagefaultHandler);
1415 #endif
1416 }
1417 #endif
1418 
1419 static void TerminateGC () {
1420     int save_force_total;
1421 
1422     forward_space = current_space;
1423     previous_space = current_space;
1424 
1425     if (protected_pages)
1426         Panic ("TerminateGC: protected pages after collection");
1427 
1428     allocated_pages = current_pages + forwarded_pages;
1429     current_pages = 0;
1430 
1431     if (forward_free) {
1432         MAKE_HEADER (*forward_freep, forward_free, T_Freespace);
1433         forward_free = 0;
1434     }
1435     forward_freep = (Object *)0;
1436 
1437     Call_After_GC();
1438     GC_In_Progress = 0;
1439     Enable_Interrupts;
1440 
1441     if (Var_Is_True (V_Garbage_Collect_Notifyp) && !GC_Debug) {
1442         int foo = percent - HEAPPERCENT (allocated_pages);
1443         Object bar;
1444 
1445         bar = Make_Integer (foo);
1446         if (!incomplete_msg)
1447             Format (Standard_Output_Port, "[", 1, 0, (Object *)0);
1448 
1449         if (foo >= 0)
1450             Format (Standard_Output_Port, "~s% reclaimed]~%", 16, 1, &bar);
1451         else
1452             Format (Standard_Output_Port, "finished]~%", 11, 0, (Object *)0);
1453         (void)fflush (stdout);
1454         incomplete_msg = 0;
1455     }
1456 
1457     if (PERCENT (allocated_pages, old_logical_pages) >= tuneable_force_total) {
1458         PromoteStableQueue ();
1459         save_force_total = tuneable_force_total;
1460         tuneable_force_total = 100;
1461         if (inc_collection)
1462             P_Collect_Incremental ();
1463         else
1464             P_Collect ();
1465         tuneable_force_total = save_force_total;
1466         if (HEAPPERCENT (allocated_pages) >= tuneable_newly_expand)
1467             /* return value should not be ignore here: */
1468             (void)ExpandHeap ("after full collection");
1469     }
1470 }
1471 
1472 
1473 static void Finish_Collection () {
1474     register gcptr_t addr;
1475 
1476     do {
1477         for (addr = PAGE_TO_ADDR(firstpage);
1478              addr < PAGE_TO_ADDR(lastpage);
1479              addr += bytes_per_pp) {
1480 
1481             if (IS_PROTECTED (addr)) {
1482                 (void)ScanCluster (addr);
1483                 if (protected_pages == 0) TerminateGC ();
1484             }
1485         }
1486     } while (protected_pages);
1487 
1488     return;
1489 }
1490 
1491 
1492 static void General_Collect (int initiate) {
1493     pageno_t fpage, free_fpages, i;
1494     pageno_t page;
1495     pageno_t fregion_pages;
1496     Object obj;
1497 
1498     if (!Interpreter_Initialized)
1499         Fatal_Error ("Out of heap space (increase heap size)");
1500 
1501     if (current_space != forward_space && !inc_collection) {
1502         Format (Standard_Output_Port, "GC while GC in progress~%",
1503                 25, 0, (Object*)0);
1504         return;
1505     }
1506 
1507     /* Call all user-registered functions to be executed just before GC. */
1508 
1509     Disable_Interrupts;
1510     GC_In_Progress = 1;
1511     Call_Before_GC();
1512     percent = HEAPPERCENT (allocated_pages);
1513     old_logical_pages = logical_pages;
1514 
1515     if (Var_Is_True (V_Garbage_Collect_Notifyp) && !GC_Debug) {
1516         if (initiate) {
1517             Format (Standard_Output_Port, "[Garbage collecting...]~%",
1518                     25, 0, (Object *)0);
1519             incomplete_msg = 0;
1520         } else {
1521             Format (Standard_Output_Port, "[Garbage collecting... ",
1522                     23, 0, (Object *)0);
1523             incomplete_msg = 1;
1524         }
1525         (void)fflush (stdout);
1526     }
1527 
1528     if (GC_Debug) {
1529         printf ("."); (void)fflush (stdout);
1530     }
1531 
1532     /* discard any remaining portion of the current heap page */
1533 
1534     if (current_free) {
1535         MAKE_HEADER (*current_freep, current_free, T_Freespace);
1536         current_free = 0;
1537     }
1538 
1539     /* partition regions for forwarded and newly-allocated objects. Then
1540      * advance the current free pointer so that - if possible - there will
1541      * be RESERVEDPAGES free heap pages in the forward region.
1542      */
1543 
1544     forward_freepage = current_freepage;
1545     last_forward_freepage = forward_freepage;
1546 
1547     current_freep = PAGE_TO_OBJ (current_freepage);
1548     forward_freep = current_freep;
1549 
1550     fpage = forward_freepage;
1551     free_fpages = 0;
1552     fregion_pages = logical_pages / tuneable_forward_region;
1553 
1554     for (i = 0; free_fpages <= fregion_pages && i < spanning_pages; i++) {
1555         if (space[fpage] != current_space && !STABLE (fpage))
1556             free_fpages++;
1557         fpage = next (fpage);
1558     }
1559     current_freep = (Object *)PHYSPAGE (fpage);
1560     SET(obj, 0, (intptr_t)current_freep);
1561     current_freepage = OBJ_TO_PAGE (obj);
1562 
1563     /* advance spaces. Then forward all objects directly accessible
1564      * via the global GC lists and the WIND list.
1565      */
1566 
1567     current_pages = 0;
1568     forward_space = current_space + 1;
1569     current_space = current_space + 2;
1570 
1571     Visit_GC_List (Global_GC_Obj, 0);
1572     Visit_GC_List (GC_List, 0);
1573     Visit_Wind (First_Wind, 0);
1574 
1575     /* If collecting in a non-incremental manner, scan all heap pages which
1576      * have been protected, else check whether to expand the heap because
1577      * the stable set has grown too big.
1578      */
1579 
1580     page = stable_queue;
1581     while (page != (pageno_t)-1) {
1582         ProtectCluster (PHYSPAGE (page), 0);
1583         page = linked[page];
1584     }
1585 
1586     if (!initiate) {
1587         Finish_Collection ();
1588     } else
1589         if (HEAPPERCENT (forwarded_pages) > tuneable_force_expand)
1590             /* return value should not be ignored here: */
1591             (void)ExpandHeap ("large stable set");
1592 
1593     GC_In_Progress = 0;
1594     return;
1595 }
1596 
1597 
1598 Object P_Collect_Incremental () {
1599     /* if already collecting, scan a few pages and return */
1600 
1601     if (!inc_collection) {
1602         if (current_space == forward_space)
1603             Primitive_Error ("incremental garbage collection not enabled");
1604         else {
1605             inc_collection = 1;
1606             Finish_Collection ();
1607             inc_collection = 0;
1608             return (True);
1609         }
1610     } else {
1611         if (current_space != forward_space) {
1612             (void)Scanner ((pageno_t)1);
1613             GC_In_Progress = 0;
1614             if (protected_pages == 0)
1615                 TerminateGC ();
1616             return (protected_pages ? False : True);
1617         } else {
1618             General_Collect (1);
1619             return (False);
1620         }
1621     }
1622     /*NOTREACHED*/
1623 }
1624 
1625 Object P_Collect () {
1626     /* Check the inc_collection flag. If an incremental GC is in
1627      * progress and the flag has been changed to false, finish
1628      * the collection.
1629      */
1630 
1631     if (!inc_collection && current_space != forward_space) {
1632         inc_collection = 1;
1633         Finish_Collection ();
1634         inc_collection = 0;
1635         return (Void);
1636     }
1637 
1638     if (current_space != forward_space) {
1639         Finish_Collection ();
1640         return (Void);
1641     } else {
1642         General_Collect (0);
1643         return (Void);
1644     }
1645 }
1646 
1647 void Generational_GC_Finalize () {
1648     if (current_space != forward_space)
1649         Finish_Collection ();
1650 }
1651 
1652 void Generational_GC_Reinitialize () {
1653 #if defined(MPROTECT_SIG) || defined(MPROTECT_MMAP)
1654     InstallHandler ();
1655 #endif
1656 }
1657 
1658 
1659 Object Internal_GC_Status (int strat, int flags) {
1660     Object list;
1661 #if defined(MPROTECT_SIG) || defined(MPROTECT_MMAP)
1662     Object cell;
1663 #endif
1664     GC_Node;
1665 
1666     list = Cons (Sym_Generational_GC, Null);
1667     GC_Link (list);
1668     switch (strat) {
1669     default:            /* query or stop-and-copy */
1670 #if defined(MPROTECT_SIG) || defined(MPROTECT_MMAP)
1671         if (inc_collection) {
1672             cell = Cons (Sym_Incremental_GC, Null);
1673             (void)P_Set_Cdr (list, cell);
1674         }
1675 #endif
1676         break;
1677     case GC_STRAT_GEN:
1678         if (flags == GC_FLAGS_INCR) {
1679 #if defined(MPROTECT_SIG) || defined(MPROTECT_MMAP)
1680             inc_collection = 1;
1681             cell = Cons (Sym_Incremental_GC, Null);
1682             (void)P_Set_Cdr (list, cell);
1683 #endif
1684         } else inc_collection = 0;
1685         break;
1686     }
1687     GC_Unlink;
1688     return (list);
1689 }
1690