1 /*
2  * Part of Scheme 48 1.9.  See file COPYING for notices and license.
3  *
4  * Authors: David Frese, Marcus Crestani, Artem Mironov, Robert Ransom,
5  * Mike Sperber, Martin Gasbichler
6  */
7 
8 
9 #include "generation_gc.h"
10 
11 #include <stdlib.h>
12 #include <string.h> /* memcpy */
13 
14 #include "scheme48heap.h"
15 
16 #include "areas.h"
17 #include "data.h"
18 #include "memory.h"
19 #include "page_alloc.h"
20 #include "utils.h"
21 #include "memory_map.h"
22 #include "page_constants.h"
23 #include "area_roots.h"
24 #include <assert.h>
25 #include "gc_config.h"
26 #include "measure.h"
27 #include "remset.h"
28 
29 #include <event.h>   // s48_run_time
30 
31 #define FOR_ALL_AREAS(areas, command) \
32 do { \
33   Area* area = areas; \
34   while (area != NULL) { \
35     Area* next = area->next; \
36     command; \
37     area = next; \
38   } \
39 } while(0)
40 
41 
42 typedef struct Generation {
43   int index;
44   Space* current_space;
45   Space* other_space;
46 
47   /* size of the generation after its last collection */
48   unsigned long last_size;
49 
50   /* Number of collections from this generation*/
51   unsigned long self_count;
52 
53   /* Number of all collections (age_count) */
54   unsigned long age_count;
55 
56 } Generation;
57 
58 
make_space(int generation_index)59 static Space* make_space(int generation_index) {
60   Space* space = (Space*)calloc(1, sizeof(Space));
61   if (space == NULL) {
62     s48_gc_error("make_space: out of memory");
63   }
64 
65   space->generation_index = generation_index;
66   return space;
67 }
68 
69 typedef struct {
70   /* a list of areas */
71   Area* large;
72 
73   /* always one of the ones below or one of the small
74   areas in the first generation in some cases */
75   Area* small;
76 
77   Area* small_below;
78   Area* small_above;
79 
80   /* only one area */
81   Area* weaks;
82 } CreationSpace;
83 
84 static CreationSpace creation_space;
85 
86 static unsigned long current_water_mark; /* pages */
87 
88 /* from young to old */
89 static Generation generations[S48_GENERATIONS_COUNT];
90 
91 static char heap_is_initialized = 0;
92 static char gc_forbid_count = 0;
93 static unsigned long gc_count = 0;
94 static long gc_seconds = 0;
95 static long gc_mseconds = 0;
96 
recreate_creation_space()97 static void recreate_creation_space() {
98   unsigned long s_below;
99   unsigned long s_above;
100 
101   /* free current areas */
102   if (creation_space.small_below != NULL)
103     s48_free_area(creation_space.small_below);
104   if (creation_space.small_above != NULL)
105     s48_free_area(creation_space.small_above);
106   if (creation_space.large != NULL)
107     s48_free_areas(creation_space.large);
108   creation_space.large = NULL;
109   if (creation_space.weaks != NULL)
110     s48_free_area(creation_space.weaks);
111 
112   /* create some new ones */
113   s_below = current_water_mark;
114   if (s_below != 0)
115     creation_space.small_below = s48_allocate_area(s_below, s_below, 0, AREA_TYPE_SIZE_SMALL);
116   else
117     creation_space.small_below = NULL;
118 
119   s_above = S48_CREATION_SPACE_SIZE - current_water_mark;
120   if (s_above != 0)
121     creation_space.small_above = s48_allocate_area(s_above, s_above, 0, AREA_TYPE_SIZE_SMALL);
122   else
123     creation_space.small_above = NULL;
124 
125   if (creation_space.small_below == NULL) {
126     creation_space.small = creation_space.small_above;
127   }
128   else {
129     creation_space.small = creation_space.small_below;
130   }
131 
132   creation_space.weaks = s48_allocate_area(S48_MINIMUM_WEAK_AREA_SIZE,
133 					   S48_MAXIMUM_WEAK_AREA_SIZE,
134 					   0,
135 					   AREA_TYPE_SIZE_WEAKS);
136 }
137 
138 /* FPage 1 */
s48_initialize_bibop_heap()139 void s48_initialize_bibop_heap() {
140 
141   int i;
142 
143   if (heap_is_initialized == 1) return;
144 
145   s48_initialize_page_allocation();
146 
147   current_water_mark = S48_DEFAULT_WATER_MARK;
148 
149   creation_space.small_below = NULL;
150   creation_space.small_above = NULL;
151   creation_space.large = NULL;
152   creation_space.weaks = NULL;
153 
154   recreate_creation_space();
155 
156   for (i = 0; i < S48_GENERATIONS_COUNT; i++) {
157     generations[i].index = i;
158     generations[i].current_space = make_space(i);
159     generations[i].other_space = make_space(i);
160     generations[i].last_size = 0;
161     generations[i].self_count = 0;
162     generations[i].age_count = 0;
163   }
164 
165   heap_is_initialized++;
166 }
167 
168 /* this adds AREA to the "known" heap. Used by find_all for example. */
s48_integrate_area(Area * area)169 void s48_integrate_area(Area* area) {
170   Space* space = generations[0].current_space;
171   Area** a;
172   /* put it behind the first area of the large or small list of the
173      first generation */
174   if ((area->frontier - area->start) < S48_SMALL_OBJECT_LIMIT) {
175     if (space->small_area == NULL)
176       a = &space->small_area;
177     else
178       a = &space->small_area->next;
179   } else {
180     if (space->large_area == NULL)
181       a = &space->large_area;
182     else
183       a = &space->large_area->next;
184   }
185   area->generation_index = 0;
186   area->next = *a;
187   *a = area;
188 }
189 
190 #if (S48_ADJUST_WATER_MARK)
191 
192 static unsigned long aging_space_survival;
193 static float last_aging_space_survival = 0; /* initial value does not
194 					       matter */
195 
adjust_water_mark(float aging_space_survival)196 static void adjust_water_mark(float aging_space_survival) {
197   /* the easies solution would be to set the water-mark according to
198      this survival-percent value, but let's try to get some
199      convergence. And we tried to weaken extreme values */
200   last_aging_space_survival = ((aging_space_survival
201 			       + (gc_count * last_aging_space_survival)) /
202 			       (gc_count + 1));
203   /* maybe take int_max(gc_count, 1000) or something... */
204 
205   current_water_mark =
206     BYTES_TO_PAGES((unsigned long)
207 		   (PAGES_TO_BYTES_I_KNOW_THIS_CAN_OVERFLOW(S48_CREATION_SPACE_SIZE)
208 		    * last_aging_space_survival));
209   /* if the water-mark would be at the top, then nothing would be
210      copied into the aging_space, and we could not adjust the
211      water-mark in the future. */
212   if (current_water_mark == S48_CREATION_SPACE_SIZE)
213     current_water_mark--;
214 }
215 #endif
216 
217 /********************************************************************
218  Starting a Collection
219  ********************************************************************/
220 
221 #define BROKEN_HEART_P S48_STOB_P
222 
223 static unsigned long calc_generation_area_size(Generation* g);
224 static unsigned long calc_creation_space_size(CreationSpace* c);
225 static long heap_size_count;
226 
heap_size_step(s48_address start,s48_address end)227 static void heap_size_step(s48_address start, s48_address end) {
228   heap_size_count += end - start;
229 }
230 
s48_heap_size()231 long s48_heap_size() {
232   int i;
233   unsigned long size = 0;
234   for (i=0; i < S48_GENERATIONS_COUNT; i++){
235     size += calc_generation_area_size(&generations[i]);
236   }
237   size += calc_creation_space_size(&creation_space);
238   return size;
239 }
240 
calc_creation_space_size(CreationSpace * c)241 static unsigned long calc_creation_space_size(CreationSpace* c) {
242   unsigned long size = 0;
243   FOR_ALL_AREAS(c->large,
244 		size += (area->end - area->start));
245   FOR_ALL_AREAS(c->small_below,
246 		size += (area->end - area->start));
247   FOR_ALL_AREAS(c->small_above,
248 		size += (area->end - area->start));
249   FOR_ALL_AREAS(c->weaks,
250 		size += (area->end - area->start));
251   return size;
252 }
253 
254 /* calc the WHOLE allocated heap (Semispace) */
calc_generation_area_size(Generation * g)255 static unsigned long calc_generation_area_size(Generation* g) {
256   unsigned long size = 0;
257   FOR_ALL_AREAS(g->current_space->small_area,
258 		size += (area->end - area->start));
259   FOR_ALL_AREAS(g->current_space->large_area,
260 		size += (area->end - area->start));
261   FOR_ALL_AREAS(g->current_space->weaks_area,
262 		size += (area->end - area->start));
263   return size;
264 }
265 
s48_heap_live_size()266 long s48_heap_live_size() {
267   heap_size_count = 0;
268   s48_walk_heap(&heap_size_step);
269   return heap_size_count;
270 }
271 
272 
273 /* An extant is either not a stored object, or it has a forwarding
274    pointer, or it is in an area that is not currently being collected.
275 */
276 
s48_extantP(s48_value thing)277 char s48_extantP(s48_value thing) {
278   if ((!S48_STOB_P(thing)) || BROKEN_HEART_P(S48_STOB_HEADER(thing))) {
279     return (0 == 0);
280   } else {
281     Area* area = s48_memory_map_ref(S48_ADDRESS_AT_HEADER(thing));
282     return ( (area == NULL) || (GC_ACTION_IGNORE == area->action) );
283   }
284 }
285 
clean_weak_pointers(Area * areas)286 static void clean_weak_pointers(Area* areas) {
287   while (areas != NULL) {
288     s48_address end = areas->frontier;
289     s48_address addr = S48_ADDRESS_INC(areas->start);
290     for (; addr < end; addr += S48_CELLS_TO_A_UNITS(2)) {
291       s48_value stob = *((s48_value*)addr);
292       if (!s48_extantP(stob))
293 	*((s48_value*)addr) = S48_FALSE;
294       else {
295 	/* maybe the object location has changed */
296 	if (S48_STOB_P(stob) && BROKEN_HEART_P(S48_STOB_HEADER(stob)))
297 	  *((s48_value*)addr) = S48_STOB_HEADER(stob);
298       }
299     }
300     areas = areas->next;
301   }
302 }
303 
set_targets(Space * space,Space * target)304 static void set_targets(Space* space, Space* target) {
305   FOR_ALL_AREAS(space->small_area, area->target_space = target);
306   FOR_ALL_AREAS(space->large_area, area->target_space = target);
307   FOR_ALL_AREAS(space->weaks_area, area->target_space = target);
308 }
309 
310 /* forward declaration */
311 static unsigned long calc_areas_number(Area* next_area);
312 
313 
314 #define FOR_YOUNG_AREAS(areas, command) \
315 do { \
316   Area* area = areas; \
317   unsigned long count; \
318   unsigned long areas_number; \
319   unsigned long old_areas; \
320   count = 0; \
321   areas_number = calc_areas_number(areas); \
322   old_areas = areas_number / S48_PART_OF_OLD_AREAS ; \
323   while (area != NULL) { \
324     Area* next = area->next; \
325     if (count > old_areas) command; \
326     count++; \
327     area = next; \
328   } \
329 } while(0)
330 
331 /* The youngest areas will be recycled in the same generation*/
reset_young_targets(Space * space,Space * target)332 static void reset_young_targets(Space* space, Space* target) {
333   FOR_YOUNG_AREAS(space->small_area, area->target_space = target);
334   /* Large Objects should be allways promoted as they are */
335   /* FOR_YOUNG_AREAS(space->large_area, area->target_space = target); */
336   FOR_YOUNG_AREAS(space->weaks_area, area->target_space = target);
337 }
338 
set_gc_actions(Space * space,gc_action_t small,gc_action_t large,gc_action_t weaks)339 static void set_gc_actions(Space* space, gc_action_t small,
340 			   gc_action_t large, gc_action_t weaks) {
341   FOR_ALL_AREAS(space->small_area, area->action = small);
342   FOR_ALL_AREAS(space->large_area, area->action = large);
343   FOR_ALL_AREAS(space->weaks_area, area->action = weaks);
344 }
345 
346 /* How many Area(s) are in the linked listz of areas */
calc_areas_number(Area * next_area)347 static unsigned long calc_areas_number(Area* next_area){
348   unsigned long the_areas_number=0;
349 
350   while (next_area){
351     the_areas_number++;
352     next_area = next_area->next;
353   }
354 
355 #if (BIBOP_LOG)
356   s48_bibop_log("Areas Number: %i", the_areas_number);
357 #endif
358 
359   return the_areas_number;
360 }
361 
362 /* calc only the used heap (Semispace) */
calc_generation_size(Generation * g)363 static unsigned long calc_generation_size(Generation* g) {
364   unsigned long size = 0;
365   FOR_ALL_AREAS(g->current_space->small_area,
366 		size += (area->frontier - area->start));
367   FOR_ALL_AREAS(g->current_space->large_area,
368 		size += (area->frontier - area->start));
369   FOR_ALL_AREAS(g->current_space->weaks_area,
370 		size += (area->frontier - area->start));
371   return size;
372 }
373 
calc_generation_other_space_size(Generation * g)374 static unsigned long calc_generation_other_space_size(Generation* g) {
375   unsigned long size = 0;
376   FOR_ALL_AREAS(g->other_space->small_area,
377 		size += (area->frontier - area->start));
378   FOR_ALL_AREAS(g->other_space->large_area,
379 		size += (area->frontier - area->start));
380   FOR_ALL_AREAS(g->other_space->weaks_area,
381 		size += (area->frontier - area->start));
382   return size;
383 }
384 
385 
386 
387 /* FPage 6 - 7 - 8 */
init_areas(int count)388 inline static void init_areas(int count) {
389   int i, current_target,
390     creation_space_target_small_below_generation_index,
391     creation_space_target_generation_index;
392   unsigned long current_size;
393 
394   /* Generation indices for the creation_space */
395 #if (S48_GENERATIONS_COUNT > 1)
396   creation_space_target_small_below_generation_index = 1;
397 #else
398   creation_space_target_small_below_generation_index = 0;
399 #endif
400 
401   creation_space_target_generation_index = 0;
402 
403   /* REMARK: At the very first collection, the image is loaded, which
404      has source compiled code that rarely changes. At this point there
405      was no execution of the main program yet. We can hold the
406      surviving objects of the first collection (of the creation_space
407      or of the first generation - or both) in a protected generation
408      (preferrably the oldest one) which is collected never again.
409      (alternatively collecting it after a large number of collections)
410      For this purpose we need at least 3 generations!  The option to
411      activate this is S48_USE_STATIC_SPACE */
412 
413 #if (S48_USE_STATIC_SPACE)
414   if (s48_gc_count() == 0) {
415     creation_space_target_small_below_generation_index = S48_GENERATIONS_COUNT - 1;
416     creation_space_target_generation_index = creation_space_target_small_below_generation_index;
417   }
418 #endif
419 
420   /* FPage 6 */
421   /* initialize the creation_space */
422   /* the objects of the small_below area that will survive the
423      collection will be moved into an older generation */
424   if (creation_space.small_below != NULL) {
425     assert(creation_space.small_below->next == NULL);
426     creation_space.small_below->target_space =
427       generations[creation_space_target_small_below_generation_index].current_space;
428     creation_space.small_below->action = GC_ACTION_COPY_SMALL;
429   }
430   /* the objects of the small_above area, large area and weaks area,
431      that will survive the collection will be moved (or marked) into
432      the youngest (first) generation, to be soon recollected */
433   if (creation_space.small_above != NULL) {
434     assert(creation_space.small_above->next == NULL);
435     creation_space.small_above->target_space =
436       generations[creation_space_target_generation_index].current_space;
437     creation_space.small_above->action = GC_ACTION_COPY_SMALL;
438   }
439 
440   assert(creation_space.weaks->next == NULL);
441   creation_space.weaks->target_space = generations[creation_space_target_generation_index].current_space;
442   creation_space.weaks->action = GC_ACTION_COPY_WEAK;
443 
444   FOR_ALL_AREAS(creation_space.large,
445 		area->target_space = generations[creation_space_target_generation_index].current_space;
446 		area->action = GC_ACTION_MARK_LARGE );
447 
448   /* FPage 7 */
449   /* Promotion policy - Which generation should the live objects be
450      copied to? */
451 #if (BIBOP_LOG)
452 
453   /* all current sizes */
454 #if (S48_PROMOTION_THRESHOLD)
455   s48_bibop_log("S48_PROMOTION_THRESHOLD: %i", S48_PROMOTION_THRESHOLD);
456 #endif
457 
458 #if (S48_PROMOTION_HEAP_LIMIT)
459   s48_bibop_log("S48_PROMOTION_HEAP_LIMIT: %i", S48_PROMOTION_HEAP_LIMIT);
460 #endif
461 
462 #if (S48_PROMOTION_AGE_LIMIT)
463   s48_bibop_log("S48_PROMOTION_AGE_LIMIT: %i", S48_PROMOTION_AGE_LIMIT);
464 #endif
465 
466   for (i = S48_GENERATIONS_COUNT - 1; i > -1; i--) {
467     /* Look  out! Spaces are allready swapped !!! */
468     current_size = calc_generation_other_space_size(&generations[i]);
469 
470 #if (S48_PROMOTION_THRESHOLD)
471     s48_bibop_log("Generation %i : %i ", i, (current_size - generations[i].last_size));
472 #endif
473 
474 #if (S48_PROMOTION_HEAP_LIMIT)
475     s48_bibop_log("Generation %i : %i ", i, current_size);
476 
477 #endif
478 
479 #if (S48_PROMOTION_AGE_LIMIT)
480     s48_bibop_log("Generation %i : Age Count  %i ", i, generations[i].age_count);
481     s48_bibop_log("Generation %i : Self Count %i ", i, generations[i].self_count);
482 #endif
483   }
484 #endif /* #if (BIBOP_LOG) */
485 
486 
487   /* initialize the areas that will be collected. */
488   for (i = 0; i < count; i++) {
489 
490     /* trace everything */
491     FOR_ALL_AREAS(generations[i].current_space->small_area,
492 		  area->trace = area->start);
493     FOR_ALL_AREAS(generations[i].current_space->large_area,
494 		  area->trace = area->start);
495 
496     /* targets of the other_spaces are the current_space of the choosen
497        generation according to the promotion option */
498 
499     /* Look  out! Spaces are allready swapped !!! */
500     current_size = calc_generation_other_space_size(&generations[i]);
501 
502 #if (S48_PROMOTION_THRESHOLD)
503 
504     current_target = ( (current_size - generations[i].last_size)
505 	  > S48_PROMOTION_THRESHOLD)
506       ? i + 1
507       : i;
508 
509 #elif (S48_PROMOTION_HEAP_LIMIT)
510     /* Look  out! Spaces are allready swapped !!! */
511     current_target = (current_size > S48_PROMOTION_HEAP_LIMIT)
512       ? i + 1
513       : i;
514 
515 #elif (S48_PROMOTION_AGE_LIMIT)
516     current_target = (generations[i].self_count > 0 &&
517 	 generations[i].self_count % S48_PROMOTION_AGE_LIMIT == 0)
518       ? i + 1
519       : i;
520 
521 #else
522 #error "BIBOP GC configuration error: no promotion policy defined"
523 #endif
524 
525 #if (S48_USE_STATIC_SPACE)
526     current_target = (s48_gc_count() == 0) ? creation_space_target_small_below_generation_index : current_target ;
527 #endif
528 
529     /* Adjust index j (for the last generation) */
530 #if (S48_USE_STATIC_SPACE)
531     current_target = (current_target >= S48_GENERATIONS_COUNT - 1) ? S48_GENERATIONS_COUNT - 2 : current_target ;
532 #else
533     current_target = (current_target >= S48_GENERATIONS_COUNT) ? S48_GENERATIONS_COUNT - 1 : current_target ;
534 #endif
535 
536     /* promotion targets */
537     set_targets(generations[i].other_space, generations[current_target].current_space);
538 
539     /* Wilson's opportunistic object promotion targets */
540     if ( i != current_target ) {
541     reset_young_targets(generations[i].other_space,
542 			generations[i].current_space);
543     }
544 
545 #if (BIBOP_LOG)
546     s48_bibop_log("generations[%i].other_space -> generations[%i].current_space",
547 		  i, current_target);
548 #endif
549 
550     /* actions: the ones that will be evacuated now */
551     set_gc_actions(generations[i].other_space, GC_ACTION_COPY_SMALL,
552 		   GC_ACTION_MARK_LARGE, GC_ACTION_COPY_WEAK);
553     /* ignore the ones that will be filled now */
554     set_gc_actions(generations[i].current_space, GC_ACTION_IGNORE,
555 		   GC_ACTION_IGNORE, GC_ACTION_IGNORE);
556   }
557 
558   /* FPage 8 */
559   /* initialize the areas that are not collected this time */
560   for (i = count; i < S48_GENERATIONS_COUNT; i++) {
561     /* trace only what will be added to these */
562     /* maybe only the first "old" one will have to be traced ?? */
563     FOR_ALL_AREAS(generations[i].current_space->small_area,
564 		  area->trace = area->frontier);
565     FOR_ALL_AREAS(generations[i].current_space->large_area,
566 		  area->trace = area->frontier);
567 
568     /* the other spaces should be empty anyway */
569     set_gc_actions(generations[i].other_space, GC_ACTION_ERROR,
570 		   GC_ACTION_ERROR, GC_ACTION_ERROR);
571 
572     set_gc_actions(generations[i].current_space, GC_ACTION_IGNORE,
573 	 	   GC_ACTION_IGNORE, GC_ACTION_IGNORE);
574   }
575 }
576 
clear_space(Space * space)577 inline static void clear_space(Space* space) {
578   FOR_ALL_AREAS(space->small_area, s48_free_area(area)); space->small_area = NULL;
579   FOR_ALL_AREAS(space->large_area, s48_free_area(area)); space->large_area = NULL;
580   FOR_ALL_AREAS(space->weaks_area, s48_free_area(area)); space->weaks_area = NULL;
581 }
582 
trace_areas(Area * areas)583 static char trace_areas(Area* areas) {
584   char hit = 0;
585 
586   while (areas != NULL) {
587     while (1) {
588       s48_address start = areas->trace;
589       s48_address end = areas->frontier;
590       if (start != end) {
591 	s48_internal_trace_locationsB(areas, TRUE, start, end, "trace_areas");
592 	areas->trace = end;
593 	hit = 1;
594       } else
595 	break;
596     }
597     areas = areas->next;
598   }
599 
600   return hit;
601 }
602 
do_gc()603 static void do_gc() {
604   char hit;
605   do {
606     int i;
607     char hit0, hit1;
608     hit = FALSE;
609 
610     /* maybe it's enough to trace up to max_gen+1 */
611     for (i = 0; i < S48_GENERATIONS_COUNT; i++) {
612       hit0 = trace_areas(generations[i].current_space->small_area);
613       hit1 = trace_areas(generations[i].current_space->large_area);
614       hit = hit0 || hit1 || hit;
615     }
616 
617   } while ( hit );
618 }
619 
swap(Generation * g)620 inline static void swap(Generation* g) {
621   Space* tmp = g->current_space;
622   g->current_space = g->other_space;
623   g->other_space = tmp;
624 }
625 
626 #if (MEASURE_GC)
627 /* We include this here, because the measurement code uses static
628    variables from here a lot */
629 #include "measure.ci"
630 #endif
631 
s48_gc_count()632 long s48_gc_count() {
633   return gc_count;
634 }
635 
s48_gc_run_time(long * mseconds)636 long s48_gc_run_time(long* mseconds) {
637   *mseconds = gc_mseconds;
638   return gc_seconds;
639 }
640 
641 /* collect the first COUNT generations */
642 /* FPage 5 ... */
collect(int count,psbool emergency)643 static void collect(int count, psbool emergency) {
644   int i;
645 #if (S48_USE_STATIC_SPACE)
646   psbool major_p = (count == (S48_GENERATIONS_COUNT - 1));
647 #else
648   psbool major_p = (count == S48_GENERATIONS_COUNT);
649 #endif
650 
651   /* this it for the water mark changing at the end of the collection */
652 #if (S48_ADJUST_WATER_MARK)
653   aging_space_survival = 0;
654   aging_space_before = 0;
655   FOR_ALL_AREAS(generations[0].current_space->small_area,
656 		aging_space_before += (area->frontier - area->start));
657 #endif
658 
659   /* swap spaces and update age_count first */
660   /* FPage 5 */
661   for (i = 0; i < count; i++) {
662     swap(&generations[i]);
663     generations[i].age_count++;
664 
665 #if (BIBOP_LOG)
666     s48_bibop_log("swapped current <-> other generation %i", i);
667 #endif
668   }
669 
670   /* update self_count for the generation from wich the collection
671      starts */
672   generations[count-1].self_count++;
673 
674   /* Initialize actions, targets and trace pointers */
675   /* FPage 6 - 7 - 8 */
676   init_areas(count);
677 
678   /* trace all roots to the younger generations */
679 #if (S48_USE_REMEMBERED_SETS)
680   for (i = 0; i < count; i++) {
681     RemSet* rs; RemSet* nrs;
682     FOR_ALL_AREAS(generations[i].other_space->small_area,
683 		  s48_trace_remset(area->remset));
684     FOR_ALL_AREAS(generations[i].other_space->weaks_area,
685 		  s48_trace_remset(area->remset));
686     /* beause large areas are "reused", the remembered set has to be
687        freed explicitly */
688     FOR_ALL_AREAS(generations[i].other_space->large_area,
689 		  nrs = s48_make_remset();
690 		  rs = area->remset;
691 		  area->remset = nrs;
692 		  s48_trace_remset(rs);
693 		  s48_free_remset(rs);
694 		  );
695   }
696 #endif
697 
698   /* FPage 9 ... */
699   for (i = count; i < S48_GENERATIONS_COUNT; i++) {
700 
701 #if (BIBOP_LOG)
702     s48_bibop_log("Tracing roots from current-space of generation %i\n", i);
703 #endif
704 
705     /* FPage 9 ... area_roots.c */
706     s48_trace_areas_roots(generations[i].current_space->small_area);
707     s48_trace_areas_roots(generations[i].current_space->large_area);
708   }
709 
710   s48_gc_root();
711 
712   /* do the tracing until everything is done */
713   do_gc();
714 
715   /* clean up*/
716   for (i = 0; i < S48_GENERATIONS_COUNT; i++) {
717     clean_weak_pointers(generations[i].current_space->weaks_area);
718   }
719 
720   s48_post_gc_cleanup(major_p, emergency);
721 
722   /* for objects resurrected in some post-gc-cleanup, trace again */
723   do_gc();
724 
725   for (i = 0; i < count; i++) {
726     clear_space(generations[i].other_space);
727     generations[i].last_size = calc_generation_size(&generations[i]);
728   }
729 
730   /* reset creation space */
731 #if (S48_ADJUST_WATER_MARK)
732   if (aging_space_before != 0)
733     adjust_water_mark((float)aging_space_survival / aging_space_before);
734 #endif
735   recreate_creation_space();
736 
737   gc_count++;
738 }
739 
740 static psbool do_collect(psbool force_major, psbool emergency);
741 
742 /* FPage 4 - 5  */
s48_collect(psbool force_major)743 void s48_collect(psbool force_major) {
744   /*
745     The BIBOP GC's heap gets an absolute maximal size with the -h flag
746     of scheme48.
747 
748     -h <heap_size> : means <heap_size> cells (0 means no size limit).
749 
750     Without the -h flag, the heap size gets a default value
751     (init.c).  We have to calculate a minimal heap size, set by the
752     special configuration of BIBOP (gc_config.h), to decide during the
753     initialization (s48_initialize_bibop_heap()) if the given
754     <heap_size> is reasonable or not. This is done after the
755     allocation of the image_areas (s48_initialize_image_areas()). If
756     the maximal heap size is too small we increase it to a reasonable
757     value (the user is informed about that).
758 
759     The variable 'actual_heap_size' (in cells) is updated before each
760     collection and represents the total size of all used areas (but
761     without allocated free areas, and without the memory used for the
762     various structures like Area, Metapage, Generation etc.). If this
763     actual heap size rises above the user defined (or default) maximal
764     heap size, we halt the program.
765 
766 */
767   unsigned long user_defined_hsize, heap_live_size;
768   psbool was_major;
769   long start_seconds, start_mseconds, end_seconds, end_mseconds;
770 
771   start_seconds = s48_run_time(&start_mseconds);
772 
773   was_major = do_collect(force_major, FALSE);
774 
775   heap_live_size = S48_BYTES_TO_CELLS(s48_heap_live_size());
776   user_defined_hsize = s48_max_heap_size();
777 
778   if ((user_defined_hsize > 0) &&
779       (heap_live_size > (user_defined_hsize *
780 			 ((100.0 - S48_EMERGENCY_PERCENTAGE)/100.0)))) {
781     /* try again with a major collection. If it's still too much
782        afterwards, quit. */
783     if (! was_major)
784       do_collect(TRUE, TRUE);
785     heap_live_size = S48_BYTES_TO_CELLS(s48_heap_live_size());
786     if (heap_live_size > user_defined_hsize)
787       s48_gc_error("Scheme 48 heap overflow (max heap size %i cells)\n",
788 		   user_defined_hsize);
789   }
790   end_seconds = s48_run_time(&end_mseconds);
791   if (end_mseconds >= start_mseconds) {
792     gc_seconds = gc_seconds + (end_seconds - start_seconds);
793     gc_mseconds = gc_mseconds + (end_mseconds - start_mseconds);
794   }
795   else {
796     gc_seconds = gc_seconds + ((end_seconds - start_seconds) - 1);
797     gc_mseconds = gc_mseconds + ((1000 + end_mseconds) - start_mseconds);
798   }
799 }
800 
do_collect(psbool force_major,psbool emergency)801 static psbool do_collect(psbool force_major, psbool emergency) {
802   /* Which generations should be collected? */
803 
804   /* collect up to the oldest generation that has grown enough since
805      its last collection. */
806   /* The youngest generation is collected always */
807   /* FPage 5 */
808   int c;  /* generation number */
809 
810 #if (BIBOP_LOG)
811 
812   /* all current sizes */
813 #if (S48_COLLECTION_THRESHOLD)
814   s48_bibop_log("S48_COLLECTION_THRESHOLD: %i", S48_COLLECTION_THRESHOLD);
815 #endif
816 
817 #if (S48_COLLECTION_HEAP_LIMIT)
818   s48_bibop_log("S48_COLLECTION_HEAP_LIMIT: %i", S48_COLLECTION_HEAP_LIMIT);
819 #endif
820 
821 #if (S48_COLLECTION_AGE_LIMIT)
822   s48_bibop_log("S48_COLLECTION_AGE_LIMIT: %i", S48_COLLECTION_AGE_LIMIT);
823 #endif
824 
825  { int i;
826   for (i = S48_GENERATIONS_COUNT - 1; i > -1; i--) {
827     unsigned long current_size;
828     current_size = calc_generation_size(&generations[i]);
829 
830 #if (S48_COLLECTION_THRESHOLD)
831     s48_bibop_log("Generation %i : %i ", i, (current_size - generations[i].last_size));
832 #endif
833 
834 #if (S48_COLLECTION_HEAP_LIMIT)
835   s48_bibop_log("Generation %i : %i ", i, current_size);
836 
837 #endif
838 
839 #if (S48_COLLECTION_AGE_LIMIT)
840   s48_bibop_log("Generation %i : %i ", i, generations[i].age_count);
841 #endif
842   }}
843 #endif /* #if (BIBOP_LOG) */
844 
845   /* Which generation should be collected? */
846 #if (S48_USE_STATIC_SPACE)
847   c = S48_GENERATIONS_COUNT - 1;
848 #else
849   c = S48_GENERATIONS_COUNT;
850 #endif
851 
852   if (! force_major) {
853    for (; c > 1; c--) {
854     unsigned long current_size;
855     current_size = calc_generation_size(&generations[c-1]);
856 
857 #if (S48_COLLECTION_THRESHOLD)
858     if ((current_size - generations[c-1].last_size) > S48_COLLECTION_THRESHOLD)
859       break;
860 #endif
861 
862 #if (S48_COLLECTION_HEAP_LIMIT)
863     if (current_size > S48_COLLECTION_HEAP_LIMIT)
864       break;
865 #endif
866 
867 #if (S48_COLLECTION_AGE_LIMIT)
868     if (generations[c-1].self_count > 0 &&
869 	generations[c-1].self_count % S48_COLLECTION_AGE_LIMIT == 0)
870       break;
871 #endif
872 
873    }
874   }
875 
876 #if (BIBOP_LOG)
877   s48_bibop_log("Generation choosen: %i", c-1);
878   s48_bibop_log("Starting Collection...");
879 #endif
880 
881 #if (MEASURE_GC)
882   measure_before_collection(c);
883 #endif
884 
885   /*************************************/
886   /* Now is going to be really collected */
887   /* FPage 5 ... */
888   collect(c, emergency);
889   /*************************************/
890 
891 #if (MEASURE_GC)
892   measure_after_collection(c);
893 #endif
894 
895 #if (BIBOP_LOG)
896   s48_bibop_log("Collection done!");
897 #endif
898 
899 #if (S48_USE_STATIC_SPACE)
900   return (c == S48_GENERATIONS_COUNT-1);
901 #else
902   return (c == S48_GENERATIONS_COUNT);
903 #endif
904 }
905 
906 /*********************************************************************
907  Tracing
908  ********************************************************************/
909 
910 /* forward declarations */
911 
912 inline static void mark_large(Area* area, Space* to_space);
913 inline static Area* allocate_small_area(Space* space,
914 					unsigned long size_in_bytes);
915 inline static Area* allocate_weak_area(Space* space);
916 inline static Area* allocate_large_area(Space* space,
917 					unsigned long size_in_bytes);
918 
919 /* the value STOB has been written to location ADDR */
call_internal_write_barrier(Area * maybe_area,char area_looked_up,s48_address addr,s48_value stob,Area * to_area)920 inline static void call_internal_write_barrier(Area* maybe_area, char area_looked_up, s48_address addr,
921 					       s48_value stob, Area* to_area) {
922   if (!area_looked_up) maybe_area = s48_memory_map_ref(addr);
923   /* if maybe_area is still NULL, it must have been a write to a
924      location outside of the heap, e.g. a temporary pointer or
925      something in the root-set; we can ignore it. */
926   if ((maybe_area != NULL) && (maybe_area->generation_index > 0))
927     s48_internal_write_barrier(maybe_area, addr, stob, to_area);
928 }
929 
call_internal_write_barrier2(Area * maybe_area,char area_looked_up,s48_address addr,s48_value stob)930 inline static void call_internal_write_barrier2(Area* maybe_area, char area_looked_up, s48_address addr,
931 						s48_value stob) {
932   call_internal_write_barrier(maybe_area, area_looked_up, addr, stob,
933 			      s48_memory_map_ref(S48_ADDRESS_AT_HEADER(stob)));
934 }
935 
936 #if (S48_HAVE_TRANSPORT_LINK_CELLS)
937 
make_small_available_in_no_gc(Space * space,unsigned long size_in_bytes)938 static Area* make_small_available_in_no_gc(Space* space,
939 					   unsigned long size_in_bytes) {
940   Area* area = space->small_area;
941   if (size_in_bytes > AREA_REMAINING(area)) {
942     area = allocate_small_area(space, size_in_bytes);
943   }
944   return area;
945 }
946 
allocate_small_in_no_gc(Space * space,unsigned long size_in_bytes)947 static s48_address allocate_small_in_no_gc(Space* space,
948 					   unsigned long size_in_bytes) {
949   Area* area = make_small_available_in_no_gc(space, size_in_bytes);
950   s48_address addr = area->frontier;
951   area->frontier += S48_BYTES_TO_A_UNITS(size_in_bytes);
952   return addr;
953 }
954 
make_stob(long type,unsigned long size_in_cells)955 static s48_value make_stob(long type, unsigned long size_in_cells) {
956   /* Must work during a collection! */
957 
958   unsigned long size_in_bytes = S48_CELLS_TO_BYTES(size_in_cells);
959 
960   /* Allocate a place for it */
961   s48_address addr = allocate_small_in_no_gc(
962       generations[0].current_space,
963       S48_STOB_OVERHEAD_IN_BYTES + size_in_bytes);
964 
965   /* Initialize */
966   assert(S48_STOB_OVERHEAD_IN_BYTES == sizeof(s48_value));
967   *((s48_value*)addr) = S48_MAKE_HEADER(type, size_in_bytes);
968   memset(addr + S48_STOB_OVERHEAD_IN_A_UNITS, 0, size_in_bytes);
969 
970   return S48_ADDRESS_TO_STOB_DESCRIPTOR(addr + S48_STOB_OVERHEAD_IN_A_UNITS);
971 }
972 
make_pair(s48_value car,s48_value cdr)973 static s48_value make_pair(s48_value car, s48_value cdr) {
974   s48_value result = make_stob(S48_STOBTYPE_PAIR, 2);
975   S48_UNSAFE_SET_CAR(result, car);
976   S48_UNSAFE_SET_CDR(result, cdr);
977   return result;
978 }
979 
append_tconcB(s48_value tconc,s48_value elem)980 static void append_tconcB(s48_value tconc, s48_value elem) {
981   /* A tconc is a pair, whose car points to the first pair of a list
982      and whose cdr points to the last pair of this list. */
983 
984   /* elem must already be in the "to space"! */
985 
986   s48_value tconc_tail = S48_UNSAFE_CDR(tconc);
987 
988   assert(S48_PAIR_P(tconc));
989 
990   /* Though the tconc must already be in the "to space", it's cdr (and
991      car) could still point to the "from space". But that does not
992      matter here, because if it still has to be copied, it's (already
993      correct) contents will be ignored in the tracing. And because we
994      only write pointers to objects in the "to space", nothing has to
995      be traced additionally here. */
996 
997   if (S48_PAIR_P(tconc_tail)) {
998     /* create a new pair */
999     s48_value newpair = make_pair(S48_FALSE, S48_FALSE);
1000 
1001     /* enqueue the tlc (=elem) in the tconc queue */
1002     S48_UNSAFE_SET_CAR(tconc_tail, elem);
1003     S48_UNSAFE_SET_CDR(tconc_tail, newpair);
1004     S48_UNSAFE_SET_CDR(tconc, newpair); /* new tail */
1005   }
1006   /* else: silently ignoring malformed tconc */
1007 }
1008 
trace_transport_link_cell(Area * maybe_area,char area_looked_up,s48_address contents_pointer,unsigned long size_in_a_units)1009 static void trace_transport_link_cell(Area* maybe_area, char area_looked_up,
1010                                       s48_address contents_pointer,
1011 				      unsigned long size_in_a_units) {
1012   s48_value tlc = S48_ADDRESS_TO_STOB_DESCRIPTOR(contents_pointer);
1013   s48_value old_key;
1014   char key_moved_p;
1015   assert(S48_TRANSPORT_LINK_CELL_P(tlc));
1016   old_key = S48_UNSAFE_TRANSPORT_LINK_CELL_KEY(tlc);
1017 
1018   /* ...trace the current tlc to make sure that every pointer is up-to-date. */
1019   s48_internal_trace_locationsB(
1020     maybe_area, area_looked_up, contents_pointer,
1021     contents_pointer + size_in_a_units,
1022     "trace_transport_link_cell");
1023 
1024   /* Hint: We will not recognize large keys "moving" into an older
1025      generation; but the tlc-logic is only interested in keys changing
1026      their address anyway. So that does not matter */
1027   key_moved_p = (S48_UNSAFE_TRANSPORT_LINK_CELL_KEY(tlc) != old_key);
1028 
1029   if (key_moved_p) {
1030     s48_value tconc = S48_UNSAFE_TRANSPORT_LINK_CELL_TCONC(tlc);
1031     /* If the tconc field is a pair... */
1032     if (S48_FALSE_P(tconc))
1033       {} /* ignore */
1034     else if (S48_PAIR_P(tconc)
1035 	     && S48_PAIR_P(S48_UNSAFE_CAR(tconc))
1036 	     && S48_PAIR_P(S48_UNSAFE_CDR(tconc))) {
1037       /* ...then add the tlc to the end of the tconc queue. */
1038       append_tconcB(tconc, tlc);
1039       /* ...and set the tconc field to null (false). */
1040       S48_UNSAFE_SET_TRANSPORT_LINK_CELL_TCONC(tlc, S48_FALSE);
1041     }
1042     else
1043       {} /*printf("Warning: malformed tlc at %p.\n", S48_ADDRESS_AT_HEADER(tlc));*/
1044   }
1045   assert(S48_TRANSPORT_LINK_CELL_P(tlc));
1046 }
1047 #endif /* S48_HAVE_TRANSPORT_LINK_CELLS */
1048 
1049 /* EKG checks for broken hearts - only used internally in
1050    s48_trace_locationsB */
1051 
1052 #define EKG(label) {\
1053     long header = S48_STOB_HEADER(trace_stob_stob);\
1054     if (BROKEN_HEART_P((s48_value)header)) {\
1055       *((s48_value*)addr) = header;\
1056       call_internal_write_barrier2(maybe_area, area_looked_up, addr,	\
1057 				  (s48_value)header);\
1058       addr = next;\
1059       goto loop;\
1060     } else {\
1061       copy_header = header;\
1062       copy_thing = trace_stob_stob;\
1063       goto label;\
1064     }\
1065   }
1066 
1067 
do_copy_object(s48_address addr,Area * maybe_area,char area_looked_up,Area * from_area,s48_value copy_thing,s48_value copy_header,Area * copy_area)1068 void do_copy_object(s48_address addr, /* addr of pointer */
1069 		    Area * maybe_area, /* laying in area, if known */
1070 		    char area_looked_up, /* area known? */
1071 		    Area * from_area, /* pointing in area */
1072 		    s48_value copy_thing, /* stob descriptor */
1073 		    s48_value copy_header, /* stob header */
1074 		    Area * copy_area /* target area */
1075 		    ) {
1076    /* we start writing at the frontier location */
1077    s48_address frontier = copy_area->frontier;
1078 
1079    /* the data, means after the header, will be written at this location */
1080    s48_address data_addr = frontier + S48_STOB_OVERHEAD_IN_A_UNITS;
1081 
1082    /* Since the s48_address is allways 4 bytes, the lower 2 bits are allways 00 */
1083    /* We use these 2 bits for the STOB-TAG: 11 to make a scheme-stob */
1084    s48_value new = S48_ADDRESS_TO_STOB_DESCRIPTOR(data_addr);
1085    assert(s48_memory_map_ref(S48_ADDRESS_AT_HEADER(new)) == copy_area);
1086 
1087 #if (S48_ADJUST_WATER_MARK)
1088    /* count small object-sizes, that survive in the first generation */
1089    if ((from_area->generation_index == 0) &&
1090        (from_area != creation_space.small_below) &&
1091        (from_area != creation_space.small_above) &&
1092        (from_area->action == GC_ACTION_COPY_SMALL))
1093      aging_space_survival += S48_HEADER_LENGTH_IN_A_UNITS(copy_header) +
1094        S48_STOB_OVERHEAD_IN_BYTES;
1095 #endif
1096 
1097    /* count every surviving obj */
1098 #if (MEASURE_GC)
1099    all_surviving_obj += S48_HEADER_LENGTH_IN_A_UNITS(copy_header) +
1100      S48_STOB_OVERHEAD_IN_BYTES;
1101 #endif
1102 
1103    /* copy the object to the new location */
1104    /* first the header at the frontier location */
1105    *((s48_value*)frontier) = copy_header;
1106 
1107    /* and then the data (thing addresss after header) at the data_addr
1108       location */
1109    assert(AREA_REMAINING(copy_area) >= (S48_HEADER_LENGTH_IN_BYTES(copy_header)
1110 					+ S48_STOB_OVERHEAD_IN_BYTES));
1111 
1112    memcpy((void*)data_addr, S48_ADDRESS_AFTER_HEADER(copy_thing, void),
1113 	  S48_HEADER_LENGTH_IN_BYTES(copy_header));
1114 
1115    /* break heart */
1116    /* alternative: S48_STOB_HEADER(copy_thing) = new;*/
1117    *((s48_value*)S48_ADDRESS_AT_HEADER(copy_thing)) = new;
1118 
1119    /* advance the allocation pointer */
1120    copy_area->frontier = data_addr + S48_HEADER_LENGTH_IN_A_UNITS(copy_header);
1121 
1122    /* overwrite the old stob with the new one */
1123    *((s48_value*)addr) = new;
1124 
1125    /* if we are tracing an area of an older generation call write_barrier */
1126    call_internal_write_barrier(maybe_area, area_looked_up, addr, new, copy_area);
1127 }
1128 
1129 /*  Copy everything pointed to from somewhere between START (inclusive)
1130     and END (exclusive).
1131 */
1132 
s48_internal_trace_locationsB(Area * maybe_area,char area_looked_up,s48_address start,s48_address end,char * called_from)1133 void s48_internal_trace_locationsB(Area* maybe_area, char area_looked_up, s48_address start,
1134 				   s48_address end, char* called_from) {
1135   s48_address addr = start;
1136   s48_address next;
1137   s48_value thing;
1138 
1139   s48_value trace_stob_stob;
1140   long copy_header;
1141   s48_value copy_thing;
1142   Area* copy_area;
1143   Area* from_area;
1144   Space* copy_to_space;
1145 
1146  loop: {
1147     if (addr < end) {
1148       thing = *((s48_value*) addr);
1149       next = S48_ADDRESS_INC(addr);
1150       if (S48_HEADER_P(thing)) {
1151 	if (S48_B_VECTOR_HEADER_P(thing)) {
1152 	  addr = next + S48_HEADER_LENGTH_IN_A_UNITS(thing);
1153 	}
1154 	else if (S48_HEADER_TYPE(thing) == S48_STOBTYPE_CONTINUATION) {
1155 	  long size = S48_HEADER_LENGTH_IN_A_UNITS(thing);
1156 	  extern void s48_trace_continuation(char *, long); /* BIBOP-specific */
1157 	  s48_trace_continuation(next, size);
1158 	  addr = next + size;
1159 	}
1160 #if (S48_HAVE_TRANSPORT_LINK_CELLS)
1161 	else if (S48_HEADER_TYPE(thing) == S48_STOBTYPE_TRANSPORT_LINK_CELL) {
1162 	  long size = S48_HEADER_LENGTH_IN_A_UNITS(thing);
1163 	  trace_transport_link_cell(maybe_area, area_looked_up, next, size);
1164 	  addr = next + size;
1165 	}
1166 #endif
1167 	else {
1168 	  addr = next;
1169 	}
1170 	goto loop;
1171       } else if (! S48_STOB_P(thing)) {
1172 	addr = next;
1173 	goto loop;
1174       } else {
1175 	/* it's a stob */
1176 	trace_stob_stob = thing;
1177 	goto trace_stob;
1178       }
1179     }
1180     return;
1181   }
1182 
1183  trace_stob: { /* parameter: trace_stob_stob */
1184    from_area = s48_memory_map_ref(S48_ADDRESS_AT_HEADER(trace_stob_stob));
1185    if (from_area != NULL) {
1186      switch (from_area->action) {
1187      case GC_ACTION_COPY_SMALL: {
1188        copy_to_space = from_area->target_space;
1189        EKG(copy_small);
1190      } break;
1191      case GC_ACTION_COPY_MIXED: {
1192        copy_to_space = from_area->target_space;
1193        EKG(copy_mixed);
1194      } break;
1195      case GC_ACTION_COPY_WEAK: {
1196        copy_to_space = from_area->target_space;
1197        EKG(copy_weak_pointer);
1198      } break;
1199      case GC_ACTION_IGNORE: {
1200        call_internal_write_barrier(maybe_area, area_looked_up, addr, trace_stob_stob, from_area);
1201        addr = next;
1202        goto loop;
1203      } break;
1204      case GC_ACTION_MARK_LARGE: {
1205        copy_to_space = from_area->target_space;
1206        mark_large(from_area, copy_to_space);
1207        /* a large object has been "copied" */
1208        call_internal_write_barrier(maybe_area, area_looked_up, addr, trace_stob_stob, from_area);
1209        addr = next;
1210        goto loop;
1211      } break;
1212      case GC_ACTION_ERROR: {
1213        s48_gc_error("got error gc-action in the %i generation", from_area->generation_index + 1);
1214        return; /* Never reached */
1215      } break;
1216      default: {
1217        s48_gc_error("got unexpected gc-action %d in the %i generation", from_area->action, from_area->generation_index + 1);
1218        return; /* Never reached */
1219      }
1220      }
1221    }
1222    else {
1223      s48_gc_error("illegal stob descriptor found while tracing address %p called from %s",
1224 		  addr, called_from);
1225      return; /* Never reached */
1226    }
1227  }
1228 
1229   assert(FALSE); /* we should never get here */
1230 
1231  /* Find out which is the actual copy_area for small, large, etc. object */
1232  copy_small: { /* parameter: copy_to_space, copy_header, copy_thing */
1233    /* get the current Area of the copy_space, means target_space */
1234    Area* area = copy_to_space->small_area;
1235    unsigned long size_in_bytes = (S48_HEADER_LENGTH_IN_A_UNITS(copy_header)
1236 				  + S48_STOB_OVERHEAD_IN_A_UNITS);
1237    if (size_in_bytes <= AREA_REMAINING(area))
1238 
1239      /* If the object fits then this is the copy_area ...*/
1240      copy_area = area;
1241    else
1242      /*  otherwise, allocate a small area in this space */
1243      copy_area = allocate_small_area(copy_to_space, size_in_bytes);
1244    goto copy_object;
1245  }
1246 
1247  copy_large: { /* parameter: copy_to_space, copy_header, copy_thing */
1248    copy_area = allocate_large_area( copy_to_space,
1249 				    S48_HEADER_LENGTH_IN_BYTES(copy_header) +
1250 				    S48_STOB_OVERHEAD_IN_BYTES );
1251    goto copy_object;
1252  }
1253 
1254  copy_mixed: { /* parameter: copy_to_space, copy_header, copy_thing */
1255    if (S48_STOBTYPE_WEAK_POINTER == S48_HEADER_TYPE(copy_header))
1256      goto copy_weak_pointer; /* uses copy_to_space, copy_thing! */
1257    else if (S48_HEADER_LENGTH_IN_BYTES(copy_header) < S48_SMALL_OBJECT_LIMIT)
1258      goto copy_small; /* uses copy_to_space, copy_thing, copy_header! */
1259    else
1260      goto copy_large; /* dito */
1261  }
1262 
1263  copy_weak_pointer: { /* parameter: copy_to_space, copy_thing */
1264    Area* area = copy_to_space->weaks_area;
1265    /*copy_header = WEAK_POINTER_HEADER;*/
1266    if ((unsigned long) (S48_HEADER_LENGTH_IN_A_UNITS(copy_header)
1267 			+ S48_STOB_OVERHEAD_IN_A_UNITS)
1268        < AREA_REMAINING(area))
1269      copy_area = area;
1270    else
1271      copy_area = allocate_weak_area(copy_to_space);
1272    goto copy_object;
1273  }
1274 
1275  copy_object: { /* parameter: from_area, copy_thing, copy_header, copy_area */
1276    do_copy_object(addr, maybe_area, area_looked_up, from_area, copy_thing, copy_header, copy_area);
1277    /* continue behind that stob */
1278    addr = next;
1279    goto loop;
1280  }
1281 } /* end: trace_locationsB */
1282 
1283 /* Traces between START (inclusive) and END (exclusive). */
s48_trace_locationsB(s48_address start,s48_address end)1284 void s48_trace_locationsB(s48_address start, s48_address end) {
1285   s48_internal_trace_locationsB(NULL, FALSE, start, end, "s48_trace_locationsB");
1286 }
1287 
1288 /* s48_trace_value passes the location of STOB to
1289    s48_trace_locationsB. */
1290 
s48_trace_value(s48_value stob)1291 s48_value s48_trace_value(s48_value stob) {
1292   s48_address addr = (s48_address)&stob;
1293   (void)s48_trace_locationsB(addr, S48_ADDRESS_INC(addr));
1294   /* stob now holds the new location of the value... */
1295   return stob;
1296 }
1297 
1298 /* s48_trace_stob_contentsB passes the contents of a d-vector stob to
1299    s48_trace_locations. Never call this with b-vectors! */
1300 
s48_trace_stob_contentsB(s48_value stob)1301 void s48_trace_stob_contentsB(s48_value stob) {
1302   s48_address start = (s48_address)S48_ADDRESS_AFTER_HEADER(stob, void);
1303   unsigned long size = S48_BYTES_TO_A_UNITS(S48_STOB_BYTE_LENGTH(stob));
1304   s48_trace_locationsB(start, (start + size));
1305 }
1306 
1307 /* creating new areas during gc */
1308 
allocate_small_area(Space * space,unsigned long size_in_bytes)1309 inline static Area* allocate_small_area(Space* space,
1310 					unsigned long size_in_bytes) {
1311   Area* area = s48_allocate_area(ulong_max(S48_MINIMUM_SMALL_AREA_SIZE,
1312 					   BYTES_TO_PAGES(size_in_bytes)),
1313 				 ulong_max(S48_MAXIMUM_SMALL_AREA_SIZE,
1314 					   BYTES_TO_PAGES(size_in_bytes)),
1315 				 (unsigned char)space->generation_index,
1316 				 AREA_TYPE_SIZE_SMALL);
1317   area->action = GC_ACTION_IGNORE;
1318   area->next = space->small_area;
1319   space->small_area = area;
1320 
1321   return area;
1322 }
1323 
allocate_large_area(Space * space,unsigned long size_in_bytes)1324 inline static Area* allocate_large_area(Space* space,
1325 					unsigned long size_in_bytes) {
1326   unsigned long pages = BYTES_TO_PAGES(size_in_bytes);
1327   Area* area = s48_allocate_area(pages,
1328 				 pages,
1329 				 (unsigned char)space->generation_index,
1330 				 AREA_TYPE_SIZE_LARGE);
1331   area->action = GC_ACTION_IGNORE;
1332   area->next = space->large_area;
1333   space->large_area = area;
1334 
1335   return area;
1336 }
1337 
allocate_weak_area(Space * space)1338 inline static Area* allocate_weak_area(Space* space) {
1339   Area* area = s48_allocate_area(S48_MINIMUM_WEAK_AREA_SIZE,
1340 				 S48_MAXIMUM_WEAK_AREA_SIZE,
1341 				 (unsigned char)space->generation_index,
1342 				 AREA_TYPE_SIZE_WEAKS);
1343   area->action = GC_ACTION_IGNORE;
1344   area->next = space->weaks_area;
1345   space->weaks_area = area;
1346 
1347   return area;
1348 }
1349 
1350 /* Remove AREA from from-space's list and put it on to-space's.  Ignore
1351    AREA from now on.
1352 */
1353 
delete_large_area(Area * large_area)1354 inline static void delete_large_area(Area* large_area) {
1355   char hit = FALSE;
1356   FOR_ALL_AREAS(creation_space.large,
1357 		if (area == large_area) hit = TRUE);
1358   if (hit)
1359     creation_space.large = s48_delete_area(creation_space.large, large_area);
1360   else {
1361     Space* from_space = generations[large_area->generation_index].other_space;
1362     from_space->large_area = s48_delete_area(from_space->large_area, large_area);
1363   }
1364 }
1365 
mark_large(Area * area,Space * to_space)1366 inline static void mark_large(Area* area, Space* to_space) {
1367   delete_large_area(area);
1368 
1369   area->next = to_space->large_area;
1370   to_space->large_area = area;
1371   area->action = GC_ACTION_IGNORE;
1372   area->generation_index = to_space->generation_index;
1373   area->trace = area->start;
1374 }
1375 
1376 /*********************************************************************
1377  Allocation
1378 *********************************************************************/
1379 
s48_available()1380 long s48_available() {
1381   /* it's not 100% sure that all these cells can be allocated, because
1382      if an object does not fit into area_below, the remaining space is
1383      discarded. Is this a bad thing ?? */
1384   /* If the heap can grow, the remaining memory in the creation space
1385      is available. If it can't, we have to consider, that the actually
1386      allocated heap (s48_heap_size) cannot grow above
1387      s48_max_heap_size(). So less space is really available. */
1388 
1389   long max_heap_size = s48_max_heap_size();
1390   long available_creation_space =
1391     S48_BYTES_TO_CELLS(AREA_REMAINING(creation_space.small_below)+
1392 		       AREA_REMAINING(creation_space.small_above));
1393   if (max_heap_size == 0)
1394     return available_creation_space;
1395   else {
1396     long virtually_available =
1397       max_heap_size - S48_BYTES_TO_CELLS(s48_heap_live_size());
1398     if (virtually_available < available_creation_space)
1399       return virtually_available;
1400     else
1401       return available_creation_space;
1402   }
1403 }
1404 
s48_forbid_gcB()1405 void s48_forbid_gcB() {
1406   gc_forbid_count++;
1407 }
1408 
s48_allow_gcB()1409 void s48_allow_gcB() {
1410   /*assert(gc_forbid_count > 0);*/
1411   gc_forbid_count--;
1412 }
1413 
1414 /* Small Objects */
1415 
1416 /* FPage 2 - 3 - 4 */
s48_make_availableAgc(long len_in_bytes)1417 void s48_make_availableAgc(long len_in_bytes) {
1418 
1419 #if (BIBOP_LOG)
1420   int i; /* for the generations-loop */
1421   int before_size[S48_GENERATIONS_COUNT];
1422   int after_size[S48_GENERATIONS_COUNT];
1423 #endif
1424 
1425   /* let's see if we run out of space in the current area... */
1426   if (AREA_REMAINING(creation_space.small) < len_in_bytes) {
1427     /* if we are under the water-mark, then continue above it... */
1428     if ((creation_space.small == creation_space.small_below) &&
1429 	(len_in_bytes <= AREA_REMAINING(creation_space.small_above))) {
1430 
1431       /* FPage 2 */
1432       creation_space.small = creation_space.small_above;
1433     }
1434 
1435     /* While the image is still loading, and the creation_space is
1436        full, then the creation_space.small points to the small area
1437        created in current_space of the first generation. In this case
1438        the allocation is going on in this small area without memory
1439        limit. When the image has already been read, then a gc is
1440        allowed and the very next allocation of an object triggers the
1441        first collection.
1442 
1443        Just for the history: with a
1444        S48_CREATION_SPACE_SIZE of 512 KB (128 Pages as default), after
1445        the image is been loaded, there are 89.133 objects in the heap:
1446 
1447        - 14.177 objects in small_above area
1448        - 16.579 objects in the small_below area
1449        - 58.377 objects(!) in 19 areas in the current_space
1450                 of the first generation.
1451 
1452        That is, only 1/3 comes into the creation_space and the rest
1453        2/3 into the first generation, which causes a big delay by the
1454        first collection. I'll change this, by increasing the
1455        creation_space_size 3 times.
1456 
1457        From now on, the creation_space alone is responsible for the
1458        next collections: if we are above it already, and are allowed
1459        to collect some garbage, then do it. */
1460 
1461     else {
1462 
1463       if (gc_forbid_count == 0) {
1464       /* FPage 4 */
1465 
1466 #if (BIBOP_LOG)
1467       s48_bibop_log("CREATION SPACE WATER MARK");
1468       s48_bibop_log("creation_space.small_above: %i pages",
1469 		    S48_CREATION_SPACE_SIZE - current_water_mark);
1470       s48_bibop_log("creation_space.small_below: %i pages\n",
1471 		    current_water_mark);
1472       /*s48_bibop_log("NEW OBJECTS");
1473       s48_bibop_log("Bytes small_above = %i", get_creation_space_small_above());
1474       s48_bibop_log("Bytes small_below = %i", get_creation_space_small_below());*/
1475 
1476       if (s48_gc_count() == 0) {
1477 	s48_bibop_log("Bytes small_gen   = %i",
1478 		      calc_generation_size(&generations[0]));
1479       }
1480       s48_bibop_log("");
1481 
1482       /* save the current size before the collection */
1483       for (i = 0; i < S48_GENERATIONS_COUNT; i++) {
1484 	before_size[i] = calc_generation_size(&generations[i]);
1485       }
1486 
1487 #endif
1488 
1489         s48_collect(FALSE);
1490 
1491 #if (BIBOP_LOG)
1492       /* save the current size after the collection */
1493       for (i = 0; i < S48_GENERATIONS_COUNT; i++) {
1494 	after_size[i] = calc_generation_size(&generations[i]);
1495       }
1496 
1497       s48_bibop_log("AFTER COLLECTION");
1498       for (i = S48_GENERATIONS_COUNT - 1; i > -1; i--) {
1499 	s48_bibop_log("gen: %i, last size %i, current size %i",
1500 		      i,
1501 		      before_size[i],
1502 		      after_size[i]);
1503       }
1504 
1505       s48_bibop_log("");
1506 #endif
1507       }
1508 
1509       /* if a gc is not allowed, or if after the collection, the
1510 	 creation-space is still too small, just use the first
1511 	 generation to allocate space, and allocate a new area if
1512 	 needed. */
1513       if ((gc_forbid_count != 0) ||
1514 	  (AREA_REMAINING(creation_space.small) < len_in_bytes)) {
1515 	/* FPage 3 */
1516 	creation_space.small = generations[0].current_space->small_area;
1517 	if (AREA_REMAINING(creation_space.small) < len_in_bytes) {
1518 	  Area* new_area =
1519 	    s48_allocate_area(ulong_max(S48_MINIMUM_SMALL_AREA_SIZE,
1520 					BYTES_TO_PAGES(len_in_bytes)),
1521 			      ulong_max(S48_MAXIMUM_SMALL_AREA_SIZE,
1522 					BYTES_TO_PAGES(len_in_bytes)),
1523 			      0,
1524 			      AREA_TYPE_SIZE_SMALL);
1525 	  new_area->next = generations[0].current_space->small_area;
1526 	  generations[0].current_space->small_area = new_area;
1527 	  creation_space.small = new_area;
1528 	}
1529       }
1530     }
1531   }
1532 
1533   if (AREA_REMAINING(creation_space.small) < len_in_bytes)
1534     s48_gc_error("out of memory in s48_make_availableAgc(%d)", len_in_bytes);
1535 }
1536 
1537 
s48_allocate_small(long len_in_bytes)1538 s48_address s48_allocate_small(long len_in_bytes) {
1539   s48_address result;
1540   /* catch misuse of this function */
1541   /*assert(len_in_bytes <= S48_SMALL_OBJECT_LIMIT);*/
1542 
1543   result = creation_space.small->frontier;
1544   creation_space.small->frontier += S48_BYTES_TO_A_UNITS(len_in_bytes);
1545   assert(creation_space.small->frontier <= creation_space.small->end);
1546 
1547   return result;
1548 }
1549 
1550 /* Large Objects */
1551 
s48_make_large_availableAgc(long len_in_bytes)1552 static void s48_make_large_availableAgc(long len_in_bytes) {
1553   unsigned long current_size = 0;
1554   /* maybe keep tracking the size while allocating... */
1555   FOR_ALL_AREAS(creation_space.large,
1556 		current_size += (area->frontier - area->start));
1557   if ((gc_forbid_count == 0) &&
1558       (current_size > S48_MAXIMUM_LARGE_CREATION_SPACE_SIZE)) {
1559     s48_collect(FALSE);
1560   }
1561   else {
1562     ;
1563   }
1564 }
1565 
s48_allocate_large(long len_in_bytes)1566 static s48_address s48_allocate_large(long len_in_bytes) {
1567   unsigned long len_in_pages = BYTES_TO_PAGES(len_in_bytes);
1568   Area *area;
1569   if (PAGES_TO_BYTES_LOSES_P(len_in_pages)) {
1570     /* pretend we're just out of memory */
1571     return NULL;
1572   };
1573   area = s48_allocate_area_without_crashing(len_in_pages,
1574 					    len_in_pages,
1575 					    0,
1576 					    AREA_TYPE_SIZE_LARGE);
1577   if (area == NULL) {
1578     /* out of memory */
1579     return NULL;
1580   };
1581   area->frontier = area->start + len_in_bytes;
1582   area->next = creation_space.large;
1583   creation_space.large = area;
1584   return area->start;
1585 }
1586 
1587 /* "Mixed" Objects */
1588 
allocate_mixedAgc(long len_in_bytes)1589 inline static s48_address allocate_mixedAgc(long len_in_bytes) {
1590   if (len_in_bytes <= S48_SMALL_OBJECT_LIMIT) {
1591     s48_make_availableAgc(len_in_bytes);
1592     return s48_allocate_small(len_in_bytes);
1593   } else {
1594     s48_make_large_availableAgc(len_in_bytes);
1595     return s48_allocate_large(len_in_bytes);
1596   }
1597 }
1598 
1599 /*
1600 
1601 The Allocator (s48_allocate_tracedAgc)
1602 
1603 - If called from the portable Dumper: the len_in_bytes is of one
1604 object only. All objects are allocated one after the other.
1605 
1606 - If called from the TSC-Dumper: the len_in_bytes is of the whole
1607 image. At this phase a gc is forbidden. So if the creation_space is
1608 not big enough, we got a problem. So could increase the size of the
1609 creation_space to allocate the whole image, but we can't copy it
1610 blind, cause the static_space is divided in 3 areas: small, large and
1611 weaks obejcts. We have to find these 3 kinds of obejct separately !!!
1612 */
1613 
s48_allocate_tracedAgc(long len_in_bytes)1614 s48_address s48_allocate_tracedAgc(long len_in_bytes) {
1615   return allocate_mixedAgc(len_in_bytes);
1616 }
1617 
s48_allocate_untracedAgc(long len_in_bytes)1618 s48_address s48_allocate_untracedAgc(long len_in_bytes) {
1619   return allocate_mixedAgc(len_in_bytes);
1620 }
1621 
1622 /* Unmovable objects are allocated directly in a new large area, which
1623    are never moved in a collection. */
s48_gc_can_allocate_unmovableP()1624 psbool s48_gc_can_allocate_unmovableP() { return PSTRUE; }
1625 
s48_allocate_traced_unmovableAgc(long len_in_bytes)1626 s48_address s48_allocate_traced_unmovableAgc(long len_in_bytes) {
1627     s48_make_large_availableAgc(len_in_bytes);
1628     return s48_allocate_large(len_in_bytes);
1629 }
1630 
s48_allocate_untraced_unmovableAgc(long len_in_bytes)1631 s48_address s48_allocate_untraced_unmovableAgc(long len_in_bytes) {
1632     s48_make_large_availableAgc(len_in_bytes);
1633     return s48_allocate_large(len_in_bytes);
1634 }
1635 
s48_unmovableP(s48_value stob)1636 psbool s48_unmovableP(s48_value stob) {
1637   Area* area = s48_memory_map_ref(S48_ADDRESS_AT_HEADER(stob));
1638   return ((area != NULL) &&
1639 	  (area->area_type_size == AREA_TYPE_SIZE_LARGE)) ? PSTRUE : PSFALSE;
1640 }
1641 
1642 /* Weak Pointers */
1643 
s48_allocate_weakAgc(long len_in_bytes)1644 s48_address s48_allocate_weakAgc(long len_in_bytes) {
1645   Area* area = creation_space.weaks;
1646   s48_address result;
1647 
1648   if (AREA_REMAINING(area) < len_in_bytes) {
1649     if (gc_forbid_count == 0) {
1650       s48_collect(FALSE);
1651       area = creation_space.weaks;
1652     }
1653     else {
1654       Area** areap = &generations[0].current_space->weaks_area;
1655       if (AREA_REMAINING(*areap) < len_in_bytes) {
1656 	Area* new_area = s48_allocate_area(S48_MINIMUM_WEAK_AREA_SIZE,
1657 					   S48_MAXIMUM_WEAK_AREA_SIZE,
1658 					   0, AREA_TYPE_SIZE_WEAKS);
1659 	new_area->next = *areap;
1660 	*areap = new_area;
1661       }
1662       area = *areap;
1663     }
1664   }
1665   if (AREA_REMAINING(area) < len_in_bytes)
1666     /* this should be impossible */
1667     s48_gc_error("out of memory in s48_allocate_weakAgc(%d).", len_in_bytes);
1668 
1669   result = area->frontier;
1670   area->frontier += S48_BYTES_TO_A_UNITS(len_in_bytes);
1671   return (result);
1672 }
1673 
1674 /*********************************************************************
1675  Walking down the heap
1676 *********************************************************************/
1677 
walk_areas(void (* do_part)(s48_address,s48_address),Area * areas)1678 inline static void walk_areas(void (*do_part)(s48_address, s48_address),
1679 			      Area* areas) {
1680   while (areas != NULL) {
1681     do_part(areas->start, areas->frontier);
1682     areas = areas->next;
1683   }
1684 }
1685 
s48_walk_heap(void (* do_part)(s48_address,s48_address))1686 void s48_walk_heap(void (*do_part)(s48_address, s48_address)) {
1687   int i;
1688   walk_areas(do_part, creation_space.small_below);
1689   walk_areas(do_part, creation_space.small_above);
1690   walk_areas(do_part, creation_space.large);
1691   walk_areas(do_part, creation_space.weaks);
1692 
1693   for (i = 0; i < S48_GENERATIONS_COUNT; i++) {
1694     walk_areas(do_part, generations[i].current_space->small_area);
1695     walk_areas(do_part, generations[i].current_space->large_area);
1696     walk_areas(do_part, generations[i].current_space->weaks_area);
1697   }
1698 }
1699 
1700 
1701 /* Special area initialization for the BIBOP undumper in the last
1702    generation */
1703 
1704 
1705 /*****************************************************************************/
s48_initialize_image_areas(long small_bytes,long small_hp_d,long large_bytes,long large_hp_d,long weaks_bytes,long weaks_hp_d)1706 void s48_initialize_image_areas(long small_bytes, long small_hp_d,
1707 				long large_bytes, long large_hp_d,
1708 				long weaks_bytes, long weaks_hp_d) {
1709    int image_generation = S48_GENERATIONS_COUNT - 1;
1710    s48_address start;
1711    s48_address small_end;
1712    s48_address large_end;
1713    s48_address end;
1714    long img_bytes;
1715    long i;
1716 
1717    /*Wrong image format ? */
1718    if ((small_bytes < 0) || (large_bytes < 0) || (weaks_bytes < 0))  return;
1719 
1720    /*Get a block */
1721    img_bytes = small_bytes + large_bytes + weaks_bytes;
1722 
1723    s48_allocate_image_area(img_bytes, &start, &end);
1724 
1725    if (img_bytes != (end - start)) {
1726      s48_gc_error("Image block is not OK!");
1727    }
1728 
1729    small_end = start + small_bytes;
1730    large_end = small_end + large_bytes;
1731 
1732 
1733    /* Set the start addresses */
1734    s48_set_new_small_start_addrB(start);
1735    s48_set_new_large_start_addrB(small_end);
1736    s48_set_new_weaks_start_addrB(large_end);
1737 
1738 
1739    /* Split this block and assign it to the last generation's areas */
1740    if (small_bytes > 0) {
1741      Area* small_img;
1742      small_img = s48_make_area(start, small_end,
1743 			       start + S48_BYTES_TO_A_UNITS(small_hp_d),
1744 			       image_generation, AREA_TYPE_SIZE_SMALL);
1745      small_img->action = GC_ACTION_IGNORE;
1746      generations[image_generation].current_space->small_area = small_img;
1747    }
1748 
1749    if (large_bytes > 0) {
1750      Area* large_img;
1751      large_img = s48_make_area(small_end, large_end,
1752 			       small_end + S48_BYTES_TO_A_UNITS(large_hp_d),
1753 			       image_generation, AREA_TYPE_SIZE_LARGE);
1754      large_img->action = GC_ACTION_IGNORE;
1755      generations[image_generation].current_space->large_area = large_img;
1756    }
1757 
1758    if (weaks_bytes > 0) {
1759      Area* weaks_img;
1760      weaks_img = s48_make_area(large_end, end,
1761 			       large_end + S48_BYTES_TO_A_UNITS(weaks_hp_d),
1762 			       image_generation, AREA_TYPE_SIZE_WEAKS);
1763      weaks_img->action = GC_ACTION_IGNORE;
1764      generations[image_generation].current_space->weaks_area = weaks_img;
1765    }
1766 
1767    return;
1768 
1769  }
1770 
1771 
1772 /*********************************************************************/
s48_get_new_small_size(void)1773  long s48_get_new_small_size(void) {
1774 /*********************************************************************/
1775    s48_address start;
1776    s48_address end;
1777 
1778    start = generations[S48_GENERATIONS_COUNT - 1].current_space->small_area->start;
1779    end = generations[S48_GENERATIONS_COUNT - 1].current_space->small_area->end;
1780 
1781    return end - start;
1782  }
1783 
1784 /*********************************************************************/
s48_get_new_large_size(void)1785  long s48_get_new_large_size(void) {
1786 /*********************************************************************/
1787    s48_address start;
1788    s48_address end;
1789 
1790    start = generations[S48_GENERATIONS_COUNT - 1].current_space->large_area->start;
1791    end = generations[S48_GENERATIONS_COUNT - 1].current_space->large_area->end;
1792 
1793    return end - start;
1794  }
1795 
1796 /*********************************************************************/
s48_get_new_weaks_size(void)1797  long s48_get_new_weaks_size(void) {
1798 /*********************************************************************/
1799    s48_address start;
1800    s48_address end;
1801 
1802    start = generations[S48_GENERATIONS_COUNT - 1].current_space->weaks_area->start;
1803    end = generations[S48_GENERATIONS_COUNT - 1].current_space->weaks_area->end;
1804 
1805    return end - start;
1806  }
1807 
1808 /*********************************************************************/
s48_get_new_small_end_addr(void)1809  char *   s48_get_new_small_end_addr(void) {
1810 /*********************************************************************/
1811    return generations[S48_GENERATIONS_COUNT - 1].current_space->small_area->end;
1812  }
1813 
1814 /*********************************************************************/
s48_get_new_large_end_addr(void)1815  char *   s48_get_new_large_end_addr(void) {
1816 /*********************************************************************/
1817    return generations[S48_GENERATIONS_COUNT - 1].current_space->large_area->end;
1818  }
1819 
1820 /*********************************************************************/
s48_get_new_weaks_end_addr(void)1821  char *   s48_get_new_weaks_end_addr(void) {
1822 /*********************************************************************/
1823    return generations[S48_GENERATIONS_COUNT - 1].current_space->weaks_area->end;
1824  }
1825 
1826 
1827 /*********************************************************************/
s48_check_heap_sizeB()1828  void s48_check_heap_sizeB() {
1829 /*********************************************************************/
1830 
1831    unsigned long max_size = s48_max_heap_size(); /* cells */
1832    extern long s48_min_heap_size(void);
1833    unsigned long min_size = s48_min_heap_size(); /* cells */
1834 
1835    /* Check the given heap size (flag -h) and the actual one */
1836    if ((max_size != 0) && (min_size > max_size)) {
1837      s48_set_max_heap_sizeB( min_size );
1838      fprintf(stderr,
1839 	     "Maximum heap size %ld is too small, using %ld cells instead.\n", max_size,
1840 	     s48_max_heap_size());
1841    }
1842  }
1843 
1844