1 /******************************** -*- C -*- ****************************
2 *
3 * Object Table maintenance module.
4 *
5 *
6 ***********************************************************************/
7
8 /***********************************************************************
9 *
10 * Copyright 1988,89,90,91,92,94,95,99,2000,2001,2002,2006,2007,2008,2009
11 * Free Software Foundation, Inc.
12 * Written by Steve Byrne and Paolo Bonzini.
13 *
14 * This file is part of GNU Smalltalk.
15 *
16 * GNU Smalltalk is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the Free
18 * Software Foundation; either version 2, or (at your option) any later
19 * version.
20 *
21 * Linking GNU Smalltalk statically or dynamically with other modules is
22 * making a combined work based on GNU Smalltalk. Thus, the terms and
23 * conditions of the GNU General Public License cover the whole
24 * combination.
25 *
26 * In addition, as a special exception, the Free Software Foundation
27 * give you permission to combine GNU Smalltalk with free software
28 * programs or libraries that are released under the GNU LGPL and with
29 * independent programs running under the GNU Smalltalk virtual machine.
30 *
31 * You may copy and distribute such a system following the terms of the
32 * GNU GPL for GNU Smalltalk and the licenses of the other code
33 * concerned, provided that you include the source code of that other
34 * code when and as the GNU GPL requires distribution of source code.
35 *
36 * Note that people who make modified versions of GNU Smalltalk are not
37 * obligated to grant this special exception for their modified
38 * versions; it is their choice whether to do so. The GNU General
39 * Public License gives permission to release a modified version without
40 * this exception; this exception also makes it possible to release a
41 * modified version which carries forward this exception.
42 *
43 * GNU Smalltalk is distributed in the hope that it will be useful, but WITHOUT
44 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
45 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
46 * more details.
47 *
48 * You should have received a copy of the GNU General Public License along with
49 * GNU Smalltalk; see the file COPYING. If not, write to the Free Software
50 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
51 *
52 ***********************************************************************/
53
54
55 #include "gstpriv.h"
56
57 #define K 1024
58 #define INIT_NUM_INCUBATOR_OOPS 50
59 #define INCUBATOR_CHUNK_SIZE 20
60
61 /* The number of OOPs that are swept on each incremental GC step. */
62 #define INCREMENTAL_SWEEP_STEP 100
63
64 /* Define this flag to turn on debugging dumps for garbage collection */
65 /* #define GC_DEBUG_OUTPUT */
66
67 /* Define this flag to turn on debugging code for OOP table management */
68 /* #define GC_DEBUGGING */
69
70 /* Define this flag to disable incremental garbage collection */
71 /* #define NO_INCREMENTAL_GC */
72
73 /* Define this flag to turn on debugging code for oldspace management */
74 /* #define MMAN_DEBUG_OUTPUT */
75
76 #if defined(GC_DEBUG_OUTPUT)
77 #define GC_DEBUGGING
78 #endif
79
80 #if !defined(OPTIMIZE)
81 #define GC_DEBUGGING
82 #endif
83
84
85
86
87 /* These are the real OOPS for nil, true, and false */
88 OOP _gst_nil_oop = NULL;
89 OOP _gst_true_oop = NULL;
90 OOP _gst_false_oop = NULL;
91
92 /* This is true to show a message whenever a GC happens. */
93 int _gst_gc_message = true;
94
95 /* This is != 0 in the middle of a GC. */
96 int _gst_gc_running = 0;
97
98 /* This is the memory area which holds the object table. */
99 static heap oop_heap;
100
101 /* This vector holds the storage for all the Character objects in the
102 system. Since all character objects are unique, we pre-allocate
103 space for 256 of them, and treat them as special built-ins when
104 doing garbage collection. */
105 static struct gst_character _gst_char_object_table[NUM_CHAR_OBJECTS];
106
107 /* This is "nil" object in the system. That is, the single instance
108 of the UndefinedObject class, which is called "nil". */
109 static struct gst_undefined_object _gst_nil_object;
110
111 /* These represent the two boolean objects in the system, true and
112 false. This is the object storage for those two objects.
113 false == &_gst_boolean_objects[0],
114 true == &_gst_boolean_objects[1] */
115 static struct gst_boolean _gst_boolean_objects[2];
116
117 /* This variable represents information about the memory space.
118 _gst_mem holds the required information: basically the
119 pointer to the base and top of the space, and the pointers into it
120 for allocation and copying. */
121 struct memory_space _gst_mem;
122
123 /* Data to compute the statistics in _gst_mem. */
124 struct statistical_data
125 {
126 int reclaimedOldSpaceBytesSinceLastGlobalGC;
127 unsigned long timeOfLastScavenge, timeOfLastGlobalGC, timeOfLastGrowth,
128 timeOfLastCompaction;
129 } stats;
130
131
132
133 /* Allocates a table for OOPs of SIZE bytes, and store pointers to the
134 builtin OOPs into _gst_nil_oop et al. */
135 static void alloc_oop_table (size_t);
136
137 /* Free N slots from the beginning of the queue Q and return a pointer
138 to their base. */
139 static OOP *queue_get (surv_space *q, int n);
140
141 /* Allocate N slots at the end of the queue Q and return a pointer
142 to their base. */
143 static OOP *queue_put (surv_space *q, OOP *src, int n);
144
145 /* Move an object from survivor space to oldspace. */
146 static void tenure_one_object ();
147
148 /* Initialize an allocation heap with the oldspace hooks set. */
149 static heap_data *init_old_space (size_t size);
150
151 /* Initialize a surv_space structure. */
152 static void init_survivor_space (struct surv_space *space, size_t size);
153
154 /* Raise the oldspace size limit to SIZE bytes without compacting it. */
155 static void grow_memory_no_compact (size_t size);
156
157 /* Reset a surv_space structure (same as init, but without allocating
158 memory. */
159 static void reset_survivor_space (struct surv_space *space);
160
161 /* Return whether the incremental collector is running. */
162 static inline mst_Boolean incremental_gc_running (void);
163
164 /* Restart the incremental collector. Objects before FIRSTOOP
165 are assumed to be alive (currently the base of the OOP table is
166 always passed, but you never know). */
167 static void reset_incremental_gc (OOP firstOOP);
168
169 /* Compact the old objects. Grow oldspace to NEWSIZE bytes. */
170 static void compact (size_t new_heap_limit);
171
172 /* Allocate and return space for an oldspace object of SIZE bytes.
173 The pointer to the object data is returned, the OOP is
174 stored in P_OOP. */
175 static gst_object alloc_old_obj (size_t size,
176 OOP *p_oop);
177
178 /* Gather statistics. */
179 static void update_stats (unsigned long *last, double *between, double *duration);
180
181 /* The copying collector. */
182 static inline void copy_oops (void);
183
184 /* Grey ranges are generated in two cases. The standard one is when we
185 write to oldspace; another one is when we copy objects to the destination
186 semispace faster than the scanner can go past them. When this happens,
187 tenure_one_object puts the object onto a special list of old objects
188 that are to be scanned. What this function does is to consume this last
189 list. (It also completes the special treatment of ephemeron objects). */
190 static void scan_grey_objects ();
191
192 /* The treatment of grey pages is different from grey objects. Since some
193 new objects might not be tenured, grey pages might still hold some
194 pointers to new objects. For this reason, and to avoid the cost of
195 delivering two signals, a grey page is *not* removed from the tree
196 until no new object is found in it. */
197 static void scan_grey_pages ();
198
199 /* Greys a page worth of pointers starting at BASE. */
200 static void add_to_grey_list (OOP *base, int n);
201
202 /* Greys the object OOP. */
203 static void add_grey_object (OOP oop);
204
205 /* Do the breadth-first scanning of copied objects. */
206 static void cheney_scan (void);
207
208 /* Hook that allows pages to be created grey. */
209 static void oldspace_after_allocating (heap_data *h, heap_block *blk, size_t sz);
210
211 /* Hook that discards freed pages from the remembered table. */
212 static void oldspace_before_freeing (heap_data *h, heap_block *blk, size_t sz);
213
214 #ifndef NO_SIGSEGV_HANDLING
215 /* The a global SIGSEGV handler. */
216 static int oldspace_sigsegv_handler (void* fault_address, int serious);
217 #endif
218
219 /* Hook that triggers garbage collection. */
220 static heap_data *oldspace_nomemory (heap_data *h, size_t sz);
221
222 /* Answer the number of fields to be scanned in the object starting
223 at OBJ, with the given FLAGS on its OOP. */
224 static int scanned_fields_in (gst_object obj, int flags) ATTRIBUTE_PURE;
225
226 /* The mark phase of oldspace GC. */
227 static inline void mark_oops (void);
228
229 /* Communicate to the finalization thread which objects have to be sent
230 the #mourn message.
231
232 When one of the objects pointed to by a weak object have no other
233 references, the slot of the weak object is replaced by a zero and
234 the #mourn message is sent to it. Ephemerons' keys are checked for
235 reachability after non-ephemerons are marked, and if no objects outside
236 the ephemeron refer to it, the ephemeron is sent #mourn as well. */
237 static inline void mourn_objects (void);
238
239 /* Mark the ephemeron objects. This is done after other objects
240 are marked. */
241 static inline void mark_ephemeron_oops (void);
242
243 /* Walks the instance variables of weak objects and zeroes out those that are
244 not surviving the garbage collection. Called by preare_for_sweep. */
245 static inline void check_weak_refs ();
246
247
248 void
init_survivor_space(struct surv_space * space,size_t size)249 init_survivor_space (struct surv_space *space, size_t size)
250 {
251 space->totalSize = size;
252 space->minPtr = (OOP *) xmalloc (size);
253 space->maxPtr = (OOP *) ((char *)space->minPtr + size);
254
255 reset_survivor_space (space);
256 }
257
258 heap_data *
init_old_space(size_t size)259 init_old_space (size_t size)
260 {
261 heap_data *h = _gst_mem_new_heap (0, size);
262 h->after_prim_allocating = oldspace_after_allocating;
263 h->before_prim_freeing = oldspace_before_freeing;
264 h->nomemory = oldspace_nomemory;
265
266 return h;
267 }
268
269 void
_gst_init_mem_default()270 _gst_init_mem_default ()
271 {
272 _gst_init_mem (0, 0, 0, 0, 0, 0);
273 }
274
275 void
_gst_init_mem(size_t eden,size_t survivor,size_t old,size_t big_object_threshold,int grow_threshold_percent,int space_grow_rate)276 _gst_init_mem (size_t eden, size_t survivor, size_t old,
277 size_t big_object_threshold, int grow_threshold_percent,
278 int space_grow_rate)
279 {
280 if (!_gst_mem.old)
281 {
282 #ifndef NO_SIGSEGV_HANDLING
283 sigsegv_install_handler (oldspace_sigsegv_handler);
284 #endif
285 if (!eden)
286 eden = 3 * K * K;
287 if (!survivor)
288 survivor = 128 * K;
289 if (!old)
290 old = 4 * K * K;
291 if (!big_object_threshold)
292 big_object_threshold = 4 * K;
293 if (!grow_threshold_percent)
294 grow_threshold_percent = 80;
295 if (!space_grow_rate)
296 space_grow_rate = 30;
297 }
298 else
299 {
300 if (eden || survivor)
301 _gst_scavenge ();
302
303 if (survivor)
304 _gst_tenure_all_survivors ();
305
306 if (old && old != _gst_mem.old->heap_total)
307 _gst_grow_memory_to (old);
308 }
309
310 if (eden)
311 {
312 _gst_mem.eden.totalSize = eden;
313 _gst_mem.eden.minPtr = (OOP *) xmalloc (eden);
314 _gst_mem.eden.allocPtr = _gst_mem.eden.minPtr;
315 _gst_mem.eden.maxPtr = (OOP *)
316 ((char *)_gst_mem.eden.minPtr + eden);
317 }
318
319 if (survivor)
320 {
321 init_survivor_space (&_gst_mem.surv[0], survivor);
322 init_survivor_space (&_gst_mem.surv[1], survivor);
323 init_survivor_space (&_gst_mem.tenuring_queue,
324 survivor / OBJ_HEADER_SIZE_WORDS);
325 }
326
327 if (big_object_threshold)
328 _gst_mem.big_object_threshold = big_object_threshold;
329
330 if (_gst_mem.eden.totalSize < _gst_mem.big_object_threshold)
331 _gst_mem.big_object_threshold = _gst_mem.eden.totalSize;
332
333 if (grow_threshold_percent)
334 _gst_mem.grow_threshold_percent = grow_threshold_percent;
335
336 if (space_grow_rate)
337 _gst_mem.space_grow_rate = space_grow_rate;
338
339 if (!_gst_mem.old)
340 {
341 if (old)
342 {
343 _gst_mem.old = init_old_space (old);
344 _gst_mem.fixed = init_old_space (old);
345 }
346
347 _gst_mem.active_half = &_gst_mem.surv[0];
348 _gst_mem.active_flag = F_EVEN;
349 _gst_mem.live_flags = F_EVEN | F_OLD;
350
351 stats.timeOfLastScavenge = stats.timeOfLastGlobalGC =
352 stats.timeOfLastGrowth = stats.timeOfLastCompaction =
353 _gst_get_milli_time ();
354
355 _gst_mem.factor = 0.4;
356
357 _gst_inc_init_registry ();
358 }
359
360 _gst_mem.markQueue = (struct mark_queue *)
361 xcalloc (8 * K, sizeof (struct mark_queue));
362 _gst_mem.lastMarkQueue = &_gst_mem.markQueue[8 * K];
363 }
364
_gst_update_object_memory_oop(OOP oop)365 void _gst_update_object_memory_oop (OOP oop)
366 {
367 gst_object_memory data;
368
369 /* Ensure the statistics are coherent. */
370 for (;;) {
371 OOP floatOOP;
372
373 data = (gst_object_memory) OOP_TO_OBJ (oop);
374 data->bytesPerOOP = FROM_INT (sizeof (PTR));
375 data->bytesPerOTE = FROM_INT (sizeof (struct oop_s) +
376 sizeof (gst_object_header));
377
378 data->edenSize = FROM_INT (_gst_mem.eden.totalSize);
379 data->survSpaceSize = FROM_INT (_gst_mem.active_half->totalSize);
380 data->oldSpaceSize = FROM_INT (_gst_mem.old->heap_limit);
381 data->fixedSpaceSize = FROM_INT (_gst_mem.fixed->heap_limit);
382 data->edenUsedBytes = FROM_INT ((char *)_gst_mem.eden.allocPtr -
383 (char *)_gst_mem.eden.minPtr);
384 data->survSpaceUsedBytes = FROM_INT (_gst_mem.active_half->filled);
385 data->oldSpaceUsedBytes = FROM_INT (_gst_mem.old->heap_total);
386 data->fixedSpaceUsedBytes = FROM_INT (_gst_mem.fixed->heap_total);
387 data->rememberedTableEntries = FROM_INT (_gst_mem.rememberedTableEntries);
388 data->numScavenges = FROM_INT (_gst_mem.numScavenges);
389 data->numGlobalGCs = FROM_INT (_gst_mem.numGlobalGCs);
390 data->numCompactions = FROM_INT (_gst_mem.numCompactions);
391 data->numGrowths = FROM_INT (_gst_mem.numGrowths);
392 data->numOldOOPs = FROM_INT (_gst_mem.numOldOOPs);
393 data->numFixedOOPs = FROM_INT (_gst_mem.numFixedOOPs);
394 data->numWeakOOPs = FROM_INT (_gst_mem.numWeakOOPs);
395 data->numOTEs = FROM_INT (_gst_mem.ot_size);
396 data->numFreeOTEs = FROM_INT (_gst_mem.num_free_oops);
397 data->allocFailures = FROM_INT (_gst_mem.old->failures + _gst_mem.fixed->failures);
398 data->allocMatches = FROM_INT (_gst_mem.old->matches + _gst_mem.fixed->matches);
399 data->allocSplits = FROM_INT (_gst_mem.old->splits + _gst_mem.fixed->splits);
400 data->allocProbes = FROM_INT (_gst_mem.old->probes + _gst_mem.fixed->probes);
401
402 /* Every allocation of a FloatD might cause a garbage
403 collection! */
404 #define SET_FIELD(x) \
405 floatOOP = floatd_new (_gst_mem.x); \
406 if (data != (gst_object_memory) OOP_TO_OBJ (oop)) continue; \
407 data->x = floatOOP;
408
409 SET_FIELD (timeBetweenScavenges);
410 SET_FIELD (timeBetweenGlobalGCs);
411 SET_FIELD (timeBetweenGrowths);
412 SET_FIELD (timeToScavenge);
413 SET_FIELD (timeToCollect);
414 SET_FIELD (timeToCompact);
415 SET_FIELD (reclaimedBytesPerScavenge);
416 SET_FIELD (tenuredBytesPerScavenge);
417 SET_FIELD (reclaimedBytesPerGlobalGC);
418 SET_FIELD (reclaimedPercentPerScavenge);
419
420 #undef SET_FIELD
421
422 break;
423 }
424 }
425
426 void
_gst_init_oop_table(PTR address,size_t size)427 _gst_init_oop_table (PTR address, size_t size)
428 {
429 int i;
430
431 oop_heap = NULL;
432 for (i = MAX_OOP_TABLE_SIZE; i && !oop_heap; i >>= 1)
433 oop_heap = _gst_heap_create (address, i * sizeof (struct oop_s));
434
435 if (!oop_heap)
436 nomemory (true);
437
438 alloc_oop_table (size);
439
440 _gst_nil_oop->flags = F_READONLY | F_OLD | F_REACHABLE;
441 _gst_nil_oop->object = (gst_object) & _gst_nil_object;
442 _gst_nil_object.objSize =
443 FROM_INT (ROUNDED_WORDS (sizeof (struct gst_undefined_object)));
444
445 _gst_true_oop->flags = F_READONLY | F_OLD | F_REACHABLE;
446 _gst_true_oop->object = (gst_object) & _gst_boolean_objects[0];
447 _gst_false_oop->flags = F_READONLY | F_OLD | F_REACHABLE;
448 _gst_false_oop->object = (gst_object) & _gst_boolean_objects[1];
449 _gst_boolean_objects[0].objSize =
450 FROM_INT (ROUNDED_WORDS (sizeof (struct gst_boolean)));
451 _gst_boolean_objects[1].objSize =
452 FROM_INT (ROUNDED_WORDS (sizeof (struct gst_boolean)));
453 _gst_boolean_objects[0].booleanValue = _gst_true_oop;
454 _gst_boolean_objects[1].booleanValue = _gst_false_oop;
455
456 for (i = 0; i < NUM_CHAR_OBJECTS; i++)
457 {
458 _gst_char_object_table[i].objSize =
459 FROM_INT (ROUNDED_WORDS (sizeof (struct gst_character)));
460 _gst_char_object_table[i].charVal = FROM_INT (i);
461 _gst_mem.ot[i + CHAR_OBJECT_BASE].object =
462 (gst_object) & _gst_char_object_table[i];
463 _gst_mem.ot[i + CHAR_OBJECT_BASE].flags =
464 F_READONLY | F_OLD | F_REACHABLE;
465 }
466 }
467
468
469 void
alloc_oop_table(size_t size)470 alloc_oop_table (size_t size)
471 {
472 size_t bytes;
473
474 _gst_mem.ot_size = size;
475 bytes = (size - FIRST_OOP_INDEX) * sizeof (struct oop_s);
476 _gst_mem.ot_base =
477 (struct oop_s *) _gst_heap_sbrk (oop_heap, bytes);
478 if (!_gst_mem.ot_base)
479 nomemory (true);
480
481 _gst_mem.ot = &_gst_mem.ot_base[-FIRST_OOP_INDEX];
482 _gst_nil_oop = &_gst_mem.ot[NIL_OOP_INDEX];
483 _gst_true_oop = &_gst_mem.ot[TRUE_OOP_INDEX];
484 _gst_false_oop = &_gst_mem.ot[FALSE_OOP_INDEX];
485
486 _gst_mem.num_free_oops = size;
487 _gst_mem.last_allocated_oop = _gst_mem.last_swept_oop = _gst_mem.ot - 1;
488 _gst_mem.next_oop_to_sweep = _gst_mem.ot - 1;
489 }
490
491 mst_Boolean
_gst_realloc_oop_table(size_t newSize)492 _gst_realloc_oop_table (size_t newSize)
493 {
494 size_t bytes;
495
496 bytes = (newSize - _gst_mem.ot_size) * sizeof (struct oop_s);
497 if (bytes < 0)
498 return (true);
499
500 if (!_gst_heap_sbrk (oop_heap, bytes))
501 {
502 /* try to recover. Note that we cannot move the OOP table like
503 we do with the object data. */
504 nomemory (false);
505 return (false);
506 }
507
508 _gst_mem.num_free_oops += newSize - _gst_mem.ot_size;
509 _gst_mem.ot_size = newSize;
510 return (true);
511 }
512
513 void
_gst_dump_oop_table()514 _gst_dump_oop_table()
515 {
516 OOP oop;
517
518 for (oop = _gst_mem.ot; oop <= _gst_mem.last_allocated_oop; oop++)
519 if (!IS_OOP_FREE (oop))
520 {
521 if (IS_OOP_VALID (oop))
522 _gst_display_oop (oop);
523 else
524 _gst_display_oop_short (oop);
525 }
526 }
527
528 void
_gst_dump_owners(OOP oop)529 _gst_dump_owners (OOP oop)
530 {
531 OOP oop2, lastOOP;
532
533 for (oop2 = _gst_mem.ot_base, lastOOP = &_gst_mem.ot[_gst_mem.ot_size];
534 oop2 < lastOOP; oop2++)
535 if UNCOMMON (IS_OOP_VALID (oop2) && is_owner(oop2, oop))
536 _gst_display_oop (oop2);
537 }
538
539 void
_gst_check_oop_table()540 _gst_check_oop_table ()
541 {
542 OOP oop, lastOOP;
543
544 for (oop = _gst_mem.ot_base, lastOOP = &_gst_mem.ot[_gst_mem.ot_size];
545 oop < lastOOP; oop++)
546 {
547 gst_object object;
548 OOP *scanPtr;
549 int n;
550
551 if (!IS_OOP_VALID_GC (oop))
552 continue;
553
554 object = OOP_TO_OBJ (oop);
555 scanPtr = &object->objClass;
556 if (oop->flags & F_CONTEXT)
557 {
558 gst_method_context ctx;
559 intptr_t methodSP;
560 ctx = (gst_method_context) object;
561 methodSP = TO_INT (ctx->spOffset);
562 n = ctx->contextStack + methodSP + 1 - object->data;
563 }
564 else
565 n = NUM_OOPS (object) + 1;
566
567 while (n--)
568 {
569 OOP pointedOOP = *scanPtr++;
570 if (IS_OOP (pointedOOP)
571 && (!IS_OOP_ADDR (pointedOOP) || !IS_OOP_VALID_GC (pointedOOP)))
572 abort ();
573 }
574 }
575 }
576
577
578 void
_gst_init_builtin_objects_classes(void)579 _gst_init_builtin_objects_classes (void)
580 {
581 int i;
582
583 _gst_nil_object.objClass = _gst_undefined_object_class;
584 _gst_boolean_objects[0].objClass = _gst_true_class;
585 _gst_boolean_objects[1].objClass = _gst_false_class;
586
587 for (i = 0; i < NUM_CHAR_OBJECTS; i++)
588 _gst_char_object_table[i].objClass = _gst_char_class;
589 }
590
591 OOP
_gst_find_an_instance(OOP class_oop)592 _gst_find_an_instance (OOP class_oop)
593 {
594 OOP oop;
595
596 PREFETCH_START (_gst_mem.ot, PREF_READ | PREF_NTA);
597 for (oop = _gst_mem.ot;
598 oop <= _gst_mem.last_allocated_oop; oop++)
599 {
600 PREFETCH_LOOP (oop, PREF_READ | PREF_NTA);
601 if (IS_OOP_VALID (oop) && (OOP_CLASS (oop) == class_oop))
602 return (oop);
603 }
604
605 return (_gst_nil_oop);
606 }
607
608
609 void
_gst_make_oop_non_weak(OOP oop)610 _gst_make_oop_non_weak (OOP oop)
611 {
612 weak_area_tree *entry = _gst_mem.weak_areas;
613
614 oop->flags &= ~F_WEAK;
615 _gst_mem.numWeakOOPs--;
616 while (entry)
617 {
618 if (entry->oop == oop)
619 {
620 rb_erase (&entry->rb, (rb_node_t **) &_gst_mem.weak_areas);
621 xfree (entry);
622 break;
623 }
624
625 entry = (weak_area_tree *)
626 (oop < entry->oop ? entry->rb.rb_left : entry->rb.rb_right);
627 }
628 }
629
630 void
_gst_make_oop_weak(OOP oop)631 _gst_make_oop_weak (OOP oop)
632 {
633 weak_area_tree *entry;
634 weak_area_tree *node = NULL;
635 rb_node_t **p = (rb_node_t **) &_gst_mem.weak_areas;
636
637 oop->flags |= F_WEAK;
638 _gst_mem.numWeakOOPs++;
639
640 while (*p)
641 {
642 node = (weak_area_tree *) *p;
643
644 if (oop < node->oop)
645 p = &(*p)->rb_left;
646 else if (oop > node->oop)
647 p = &(*p)->rb_right;
648 else
649 return;
650 }
651
652 entry = (weak_area_tree *) xmalloc (sizeof (weak_area_tree));
653 entry->oop = oop;
654 entry->rb.rb_parent = &node->rb;
655 entry->rb.rb_left = entry->rb.rb_right = NULL;
656 *p = &(entry->rb);
657
658 rb_rebalance (&entry->rb, (rb_node_t **) &_gst_mem.weak_areas);
659 }
660
661 void
_gst_swap_objects(OOP oop1,OOP oop2)662 _gst_swap_objects (OOP oop1,
663 OOP oop2)
664 {
665 struct oop_s tempOOP;
666 if (oop2->flags & F_WEAK)
667 _gst_make_oop_non_weak (oop2);
668
669 if (oop1->flags & F_WEAK)
670 _gst_make_oop_non_weak (oop1);
671
672 /* Put the two objects in the same generation. FIXME: this can be
673 a cause of early tenuring, especially since one of them is often
674 garbage! */
675 if ((oop1->flags & F_OLD) ^ (oop2->flags & F_OLD))
676 _gst_tenure_oop ((oop1->flags & F_OLD) ? oop2 : oop1);
677
678 #ifdef ENABLE_JIT_TRANSLATION
679 /* We may exchange the translations, but it is very likely that
680 one of the objects does not have one yet, and the other one
681 will never be needed anymore (the object becomes garbage). */
682 if (oop1->flags & F_XLAT)
683 _gst_discard_native_code (oop1);
684
685 if (oop2->flags & F_XLAT)
686 _gst_discard_native_code (oop2);
687 #endif
688
689 tempOOP = *oop2; /* note structure assignment going on here */
690 *oop2 = *oop1;
691 *oop1 = tempOOP;
692
693 /* If the incremental GC has reached oop1 but not oop2 (or vice versa),
694 this flag will end up in the wrong OOP, i.e. in the one that has already
695 been scanned by the incremental GC. Restore things. */
696 if ((oop1->flags & F_REACHABLE) ^ (oop2->flags & F_REACHABLE))
697 {
698 oop1->flags ^= F_REACHABLE;
699 oop2->flags ^= F_REACHABLE;
700 }
701
702 if (oop2->flags & F_WEAK)
703 _gst_make_oop_weak (oop2);
704
705 if (oop1->flags & F_WEAK)
706 _gst_make_oop_weak (oop1);
707 }
708
709
710 void
_gst_make_oop_fixed(OOP oop)711 _gst_make_oop_fixed (OOP oop)
712 {
713 gst_object newObj;
714 int size;
715 if (oop->flags & F_FIXED)
716 return;
717
718 if ((oop->flags & F_LOADED) == 0)
719 {
720 size = SIZE_TO_BYTES (TO_INT(oop->object->objSize));
721 newObj = (gst_object) _gst_mem_alloc (_gst_mem.fixed, size);
722 if (!newObj)
723 abort ();
724
725 memcpy (newObj, oop->object, size);
726 if ((oop->flags & F_OLD) == 0)
727 _gst_mem.numOldOOPs++;
728 else
729 _gst_mem_free (_gst_mem.old, oop->object);
730
731 oop->object = newObj;
732 }
733
734 oop->flags &= ~(F_SPACES | F_POOLED);
735 oop->flags |= F_OLD | F_FIXED;
736 }
737
738 void
_gst_tenure_oop(OOP oop)739 _gst_tenure_oop (OOP oop)
740 {
741 gst_object newObj;
742 if (oop->flags & F_OLD)
743 return;
744
745 if (!(oop->flags & F_FIXED))
746 {
747 int size = SIZE_TO_BYTES (TO_INT(oop->object->objSize));
748 newObj = (gst_object) _gst_mem_alloc (_gst_mem.old, size);
749 if (!newObj)
750 abort ();
751
752 memcpy (newObj, oop->object, size);
753 _gst_mem.numOldOOPs++;
754
755 oop->object = newObj;
756 }
757
758 oop->flags &= ~(F_SPACES | F_POOLED);
759 oop->flags |= F_OLD;
760 }
761
762
763 gst_object
_gst_alloc_obj(size_t size,OOP * p_oop)764 _gst_alloc_obj (size_t size,
765 OOP *p_oop)
766 {
767 OOP *newAllocPtr;
768 gst_object p_instance;
769
770 size = ROUNDED_BYTES (size);
771
772 /* We don't want to have allocPtr pointing to the wrong thing during
773 GC, so we use a local var to hold its new value */
774 newAllocPtr = _gst_mem.eden.allocPtr + BYTES_TO_SIZE (size);
775
776 if UNCOMMON (size >= _gst_mem.big_object_threshold)
777 return alloc_old_obj (size, p_oop);
778
779 if UNCOMMON (newAllocPtr >= _gst_mem.eden.maxPtr)
780 {
781 _gst_scavenge ();
782 newAllocPtr = _gst_mem.eden.allocPtr + size;
783 }
784
785 p_instance = (gst_object) _gst_mem.eden.allocPtr;
786 _gst_mem.eden.allocPtr = newAllocPtr;
787 *p_oop = alloc_oop (p_instance, _gst_mem.active_flag);
788 p_instance->objSize = FROM_INT (BYTES_TO_SIZE (size));
789
790 return p_instance;
791 }
792
793 gst_object
alloc_old_obj(size_t size,OOP * p_oop)794 alloc_old_obj (size_t size,
795 OOP *p_oop)
796 {
797 gst_object p_instance;
798
799 size = ROUNDED_BYTES (size);
800
801 /* If the object is big enough, we put it directly in oldspace. */
802 p_instance = (gst_object) _gst_mem_alloc (_gst_mem.old, size);
803 if COMMON (p_instance)
804 goto ok;
805
806 _gst_global_gc (size);
807 p_instance = (gst_object) _gst_mem_alloc (_gst_mem.old, size);
808 if COMMON (p_instance)
809 goto ok;
810
811 compact (0);
812 p_instance = (gst_object) _gst_mem_alloc (_gst_mem.old, size);
813 if UNCOMMON (!p_instance)
814 {
815 /* !!! do something more reasonable in the future */
816 _gst_errorf ("Cannot recover, exiting...");
817 exit (1);
818 }
819
820 ok:
821 *p_oop = alloc_oop (p_instance, F_OLD);
822 p_instance->objSize = FROM_INT (BYTES_TO_SIZE (size));
823 return p_instance;
824 }
825
_gst_alloc_words(size_t size)826 gst_object _gst_alloc_words (size_t size)
827 {
828 OOP *newAllocPtr;
829 gst_object p_instance;
830
831 /* We don't want to have allocPtr pointing to the wrong thing during
832 GC, so we use a local var to hold its new value */
833 newAllocPtr = _gst_mem.eden.allocPtr + size;
834
835 if UNCOMMON (newAllocPtr >= _gst_mem.eden.maxPtr)
836 {
837 nomemory (0);
838 abort ();
839 }
840
841 if UNCOMMON (size >= _gst_mem.big_object_threshold)
842 abort ();
843
844 p_instance = (gst_object) _gst_mem.eden.allocPtr;
845 _gst_mem.eden.allocPtr = newAllocPtr;
846 p_instance->objSize = FROM_INT (size);
847 return p_instance;
848 }
849
850
851 void
reset_survivor_space(surv_space * space)852 reset_survivor_space (surv_space *space)
853 {
854 space->allocated = space->filled = 0;
855 space->tenurePtr = space->allocPtr = space->topPtr = space->minPtr;
856 }
857
858 void
oldspace_after_allocating(heap_data * h,heap_block * blk,size_t sz)859 oldspace_after_allocating (heap_data *h, heap_block *blk, size_t sz)
860 {
861 #ifdef MMAN_DEBUG_OUTPUT
862 printf ("Allocating oldspace page at %p (%d)\n", blk, sz);
863 #endif
864
865 add_to_grey_list ((OOP *) blk, sz / sizeof (PTR));
866 _gst_mem.rememberedTableEntries++;
867 }
868
869 void
oldspace_before_freeing(heap_data * h,heap_block * blk,size_t sz)870 oldspace_before_freeing (heap_data *h, heap_block *blk, size_t sz)
871 {
872 grey_area_node *node, *last, **next;
873
874 #ifdef MMAN_DEBUG_OUTPUT
875 printf ("Freeing oldspace page at %p (%d)\n", blk, sz);
876 #endif
877
878 /* Remove related entries from the remembered table. */
879 for (last = NULL, next = &_gst_mem.grey_pages.head; (node = *next); )
880 if (node->base >= (OOP *)blk
881 && node->base + node->n <= (OOP *)( ((char *)blk) + sz))
882 {
883 #ifdef MMAN_DEBUG_OUTPUT
884 printf (" Remembered table entry removed %p..%p\n",
885 node->base, node->base+node->n);
886 #endif
887
888 _gst_mem.rememberedTableEntries--;
889 *next = node->next;
890 xfree (node);
891 }
892 else
893 {
894 last = node;
895 next = &(node->next);
896 }
897
898 _gst_mem.grey_pages.tail = last;
899 _gst_mem_protect ((PTR) blk, sz, PROT_READ | PROT_WRITE);
900 }
901
902 heap_data *
oldspace_nomemory(heap_data * h,size_t sz)903 oldspace_nomemory (heap_data *h, size_t sz)
904 {
905 heap_data **p_heap;
906
907 assert (h == _gst_mem.old || h == _gst_mem.fixed);
908 p_heap = (h == _gst_mem.old ? &_gst_mem.old : &_gst_mem.fixed);
909
910 if (!_gst_gc_running)
911 _gst_global_gc (sz);
912 else
913 {
914 /* Already garbage collecting, emergency growth just to satisfy
915 tenuring necessities. */
916 int grow_amount_to_satisfy_rate = h->heap_limit
917 * (100.0 + _gst_mem.space_grow_rate) / 100;
918 int grow_amount_to_satisfy_threshold = (sz + h->heap_total)
919 * 100.0 /_gst_mem.grow_threshold_percent;
920
921 h->heap_limit = MAX (grow_amount_to_satisfy_rate,
922 grow_amount_to_satisfy_threshold);
923 }
924
925 return *p_heap;
926 }
927
928 #ifndef NO_SIGSEGV_HANDLING
oldspace_sigsegv_handler(void * fault_address,int serious)929 int oldspace_sigsegv_handler (void* fault_address, int serious)
930 {
931 static int reentering, reentered;
932 void *page;
933 if UNCOMMON (reentering)
934 {
935 reentered = 1;
936 abort();
937 }
938 else
939 {
940 reentered = 0;
941 reentering = 1;
942 }
943
944 page = (char *) fault_address - ((intptr_t) fault_address & (getpagesize() - 1));
945 errno = 0;
946 if (_gst_mem_protect (page, getpagesize(), PROT_READ | PROT_WRITE) == -1 &&
947 (errno == ENOMEM || errno == EFAULT || errno == EACCES || errno == EINVAL))
948 {
949 #if defined (MMAN_DEBUG_OUTPUT)
950 printf ("Plain old segmentation violation -- address = %p\n", page);
951 #endif
952 reentering = 0;
953 abort();
954 }
955
956 /* Try accessing the page */
957 (void) *(volatile char *) fault_address;
958 reentering = 0;
959
960 #if defined (MMAN_DEBUG_OUTPUT)
961 printf ("Unprotected %p (SIGSEGV at %p)\n", page, fault_address);
962 #endif
963
964 _gst_mem.rememberedTableEntries++;
965 add_to_grey_list ((PTR) page, getpagesize() / sizeof (PTR));
966 return !reentered;
967 }
968 #endif
969
970
971 void
update_stats(unsigned long * last,double * between,double * duration)972 update_stats (unsigned long *last, double *between, double *duration)
973 {
974 unsigned long now = _gst_get_milli_time ();
975 unsigned long since = now - *last;
976 if (between)
977 *between = _gst_mem.factor * *between
978 + (1 - _gst_mem.factor) * since;
979
980 if (duration)
981 *duration = _gst_mem.factor * *duration
982 + (1 - _gst_mem.factor) * since;
983 else
984 *last = now;
985 }
986
987 void
_gst_grow_memory_to(size_t spaceSize)988 _gst_grow_memory_to (size_t spaceSize)
989 {
990 compact (spaceSize);
991 }
992
993 void
grow_memory_no_compact(size_t new_heap_limit)994 grow_memory_no_compact (size_t new_heap_limit)
995 {
996 _gst_mem.old->heap_limit = new_heap_limit;
997 _gst_mem.fixed->heap_limit = new_heap_limit;
998 _gst_mem.numGrowths++;
999 update_stats (&stats.timeOfLastGrowth,
1000 &_gst_mem.timeBetweenGrowths, NULL);
1001 }
1002
1003 void
compact(size_t new_heap_limit)1004 compact (size_t new_heap_limit)
1005 {
1006 OOP oop;
1007 heap_data *new_heap = init_old_space (
1008 new_heap_limit ? new_heap_limit : _gst_mem.old->heap_limit);
1009
1010 if (new_heap_limit)
1011 {
1012 _gst_mem.fixed->heap_limit = new_heap_limit;
1013 _gst_mem.numGrowths++;
1014 update_stats (&stats.timeOfLastGrowth,
1015 &_gst_mem.timeBetweenGrowths, NULL);
1016 stats.timeOfLastCompaction = stats.timeOfLastGrowth;
1017 }
1018 else
1019 {
1020 /* Do not copy garbage. */
1021 _gst_finish_incremental_gc ();
1022 _gst_mem.numCompactions++;
1023 update_stats (&stats.timeOfLastCompaction, NULL, NULL);
1024 }
1025
1026 _gst_fixup_object_pointers ();
1027
1028 /* Now do the copying loop which will compact oldspace. */
1029 PREFETCH_START (_gst_mem.ot, PREF_READ | PREF_NTA);
1030 for (oop = _gst_mem.ot;
1031 oop < &_gst_mem.ot[_gst_mem.ot_size]; oop++)
1032 {
1033 PREFETCH_LOOP (oop, PREF_READ | PREF_NTA);
1034 if ((oop->flags & (F_OLD | F_FIXED | F_LOADED)) == F_OLD)
1035 {
1036 gst_object new;
1037 size_t size = SIZE_TO_BYTES (TO_INT (oop->object->objSize));
1038 new = _gst_mem_alloc (new_heap, size);
1039 memcpy (new, oop->object, size);
1040 _gst_mem_free (_gst_mem.old, oop->object);
1041 oop->object = new;
1042 }
1043 }
1044
1045 xfree (_gst_mem.old);
1046 _gst_mem.old = new_heap;
1047 new_heap->nomemory = oldspace_nomemory;
1048
1049 _gst_restore_object_pointers ();
1050
1051 update_stats (&stats.timeOfLastCompaction, NULL, &_gst_mem.timeToCompact);
1052 }
1053
1054
1055 void
_gst_global_compact()1056 _gst_global_compact ()
1057 {
1058 _gst_global_gc (0);
1059 compact (0);
1060 }
1061
1062 void
_gst_global_gc(int next_allocation)1063 _gst_global_gc (int next_allocation)
1064 {
1065 const char *s;
1066 int old_limit;
1067
1068 _gst_mem.numGlobalGCs++;
1069
1070 old_limit = _gst_mem.old->heap_limit;
1071 _gst_mem.old->heap_limit = 0;
1072
1073 if (!_gst_gc_running++
1074 && _gst_gc_message
1075 && _gst_verbosity >= 2
1076 && !_gst_regression_testing)
1077 {
1078 /* print the first part of this message before we finish
1079 scanning oop table for live ones, so that the delay caused by
1080 this scanning is apparent. Note the use of stderr for the
1081 printed message. The idea here was that generated output
1082 could be treated as Smalltalk code, HTML or whatever else you
1083 want without harm. */
1084 fflush (stdout);
1085 fprintf (stderr, "\"Global garbage collection... ");
1086 fflush (stderr);
1087 }
1088
1089 update_stats (&stats.timeOfLastGlobalGC,
1090 &_gst_mem.timeBetweenGlobalGCs, NULL);
1091
1092 _gst_finish_incremental_gc ();
1093 _gst_fixup_object_pointers ();
1094 copy_oops ();
1095 _gst_tenure_all_survivors ();
1096 mark_oops ();
1097 _gst_mem.live_flags &= ~F_OLD;
1098 _gst_mem.live_flags |= F_REACHABLE;
1099 check_weak_refs ();
1100 _gst_restore_object_pointers ();
1101 #if defined (GC_DEBUGGING)
1102 _gst_check_oop_table ();
1103 #endif
1104 reset_incremental_gc (_gst_mem.ot);
1105
1106 update_stats (&stats.timeOfLastGlobalGC,
1107 NULL, &_gst_mem.timeToCollect);
1108
1109 s = "done";
1110
1111 /* Compaction and growth tests are only done during the outermost GC (well
1112 I am not sure that GC's can nest...) */
1113 if (old_limit)
1114 {
1115 old_limit = MAX (old_limit, _gst_mem.old->heap_total);
1116
1117 /* if memory is still low, go all the way on sweeping */
1118 if UNCOMMON ((next_allocation + _gst_mem.old->heap_total)
1119 * 100.0 / old_limit > _gst_mem.grow_threshold_percent)
1120 {
1121 int target_limit;
1122 _gst_finish_incremental_gc ();
1123
1124 /* Check if it's time to compact the heap. Compaction make the most
1125 sense if there were lots of garbage. And the heap limit is shrunk
1126 to avoid excessive garbage accumulation in the next round */
1127 target_limit = MAX(_gst_mem.eden.totalSize,
1128 ((next_allocation + _gst_mem.old->heap_total)
1129 * (100.0 + _gst_mem.space_grow_rate)
1130 / _gst_mem.grow_threshold_percent));
1131 if (target_limit < old_limit)
1132 {
1133 s = "done, heap compacted";
1134 compact (0);
1135 grow_memory_no_compact (target_limit);
1136 }
1137 }
1138
1139 /* Check if it's time to grow the heap. */
1140 if UNCOMMON ((next_allocation + _gst_mem.old->heap_total)
1141 * 100.0 / old_limit > _gst_mem.grow_threshold_percent
1142 || (next_allocation + _gst_mem.fixed->heap_total)
1143 * 100.0 / _gst_mem.fixed->heap_limit > _gst_mem.grow_threshold_percent)
1144 {
1145 int grow_amount_to_satisfy_rate = old_limit
1146 * (100.0 + _gst_mem.space_grow_rate) / 100;
1147 int grow_amount_to_satisfy_threshold =
1148 (next_allocation + _gst_mem.old->heap_total)
1149 * 100.0 /_gst_mem.grow_threshold_percent;
1150
1151 s = "done, heap grown";
1152 grow_memory_no_compact (MAX (grow_amount_to_satisfy_rate,
1153 grow_amount_to_satisfy_threshold));
1154 }
1155 }
1156
1157 if (!--_gst_gc_running
1158 && _gst_gc_message
1159 && _gst_verbosity >= 2
1160 && !_gst_regression_testing)
1161 {
1162 fprintf (stderr, "%s\"\n", s);
1163 fflush (stderr);
1164 }
1165
1166 /* If the heap was grown, don't reset the old limit! */
1167 if (!_gst_mem.old->heap_limit)
1168 _gst_mem.old->heap_limit = old_limit;
1169
1170 _gst_invalidate_croutine_cache ();
1171 mourn_objects ();
1172 }
1173
1174 void
_gst_scavenge(void)1175 _gst_scavenge (void)
1176 {
1177 int oldBytes, reclaimedBytes, tenuredBytes, reclaimedPercent;
1178
1179 /* Check if oldspace had to be grown in emergency. */
1180 size_t prev_heap_limit = _gst_mem.old->heap_limit;
1181
1182 /* Force a GC as soon as possible if we're low on OOPs or memory. */
1183 if UNCOMMON (_gst_mem.num_free_oops < LOW_WATER_OOP_THRESHOLD
1184 || _gst_mem.old->heap_total * 100.0 / _gst_mem.old->heap_limit >
1185 _gst_mem.grow_threshold_percent
1186 || _gst_mem.fixed->heap_total * 100.0 / _gst_mem.fixed->heap_limit >
1187 _gst_mem.grow_threshold_percent)
1188 {
1189 _gst_global_gc (0);
1190 _gst_incremental_gc_step ();
1191 return;
1192 }
1193
1194 if (!_gst_gc_running++
1195 && _gst_gc_message
1196 && _gst_verbosity > 2
1197 && !_gst_regression_testing)
1198 {
1199 /* print the first part of this message before we finish
1200 scanning oop table for live ones, so that the delay caused by
1201 this scanning is apparent. Note the use of stderr for the
1202 printed message. The idea here was that generated output
1203 could be treated as Smalltalk code, HTML or whatever else you
1204 want without harm. */
1205 fflush (stdout);
1206 fprintf (stderr, "\"Scavenging... ");
1207 fflush (stderr);
1208 }
1209
1210 oldBytes = (char *) _gst_mem.eden.allocPtr - (char *) _gst_mem.eden.minPtr +
1211 _gst_mem.active_half->filled;
1212
1213 _gst_mem.numScavenges++;
1214 update_stats (&stats.timeOfLastScavenge,
1215 &_gst_mem.timeBetweenScavenges, NULL);
1216
1217 _gst_finish_incremental_gc ();
1218 _gst_fixup_object_pointers ();
1219 copy_oops ();
1220 check_weak_refs ();
1221 _gst_restore_object_pointers ();
1222 reset_incremental_gc (_gst_mem.ot);
1223
1224 update_stats (&stats.timeOfLastScavenge,
1225 NULL, &_gst_mem.timeToScavenge);
1226
1227 reclaimedBytes = oldBytes - _gst_mem.active_half->allocated;
1228 if (reclaimedBytes < 0)
1229 reclaimedBytes = 0;
1230
1231 tenuredBytes = _gst_mem.active_half->allocated - _gst_mem.active_half->filled;
1232 reclaimedPercent = 100.0 * reclaimedBytes / oldBytes;
1233
1234 if (!--_gst_gc_running
1235 && _gst_gc_message
1236 && _gst_verbosity > 2
1237 && !_gst_regression_testing)
1238 {
1239 fprintf (stderr, "%d%% reclaimed, done\"\n", reclaimedPercent);
1240 fflush (stderr);
1241 }
1242
1243 _gst_mem.reclaimedBytesPerScavenge =
1244 _gst_mem.factor * reclaimedBytes +
1245 (1 - _gst_mem.factor) * _gst_mem.reclaimedBytesPerScavenge;
1246
1247 _gst_mem.reclaimedPercentPerScavenge =
1248 _gst_mem.factor * reclaimedPercent +
1249 (1 - _gst_mem.factor) * _gst_mem.reclaimedPercentPerScavenge;
1250
1251 _gst_mem.tenuredBytesPerScavenge =
1252 _gst_mem.factor * tenuredBytes +
1253 (1 - _gst_mem.factor) * _gst_mem.tenuredBytesPerScavenge;
1254
1255 _gst_invalidate_croutine_cache ();
1256 mourn_objects ();
1257
1258 /* If tenuring had to grow oldspace, do a global garbage collection
1259 now. */
1260 if (_gst_mem.old->heap_limit > prev_heap_limit)
1261 {
1262 _gst_global_gc (0);
1263 _gst_incremental_gc_step ();
1264 return;
1265 }
1266 }
1267
1268
1269 mst_Boolean
incremental_gc_running()1270 incremental_gc_running ()
1271 {
1272 return (_gst_mem.next_oop_to_sweep > _gst_mem.last_swept_oop);
1273 }
1274
1275 void
_gst_finish_incremental_gc()1276 _gst_finish_incremental_gc ()
1277 {
1278 OOP oop, firstOOP;
1279
1280 #if defined (GC_DEBUG_OUTPUT)
1281 printf ("Completing sweep (%p...%p), validity flags %x\n", _gst_mem.last_swept_oop,
1282 _gst_mem.next_oop_to_sweep, _gst_mem.live_flags);
1283 #endif
1284
1285 PREFETCH_START (_gst_mem.next_oop_to_sweep, PREF_BACKWARDS | PREF_READ | PREF_NTA);
1286 for (oop = _gst_mem.next_oop_to_sweep, firstOOP = _gst_mem.last_swept_oop;
1287 oop > firstOOP; oop--)
1288 {
1289 PREFETCH_LOOP (oop, PREF_BACKWARDS | PREF_READ | PREF_NTA);
1290 if (IS_OOP_VALID_GC (oop))
1291 {
1292 maybe_release_xlat (oop);
1293 oop->flags &= ~F_REACHABLE;
1294 }
1295 else
1296 {
1297 _gst_sweep_oop (oop);
1298 _gst_mem.num_free_oops++;
1299 if (oop == _gst_mem.last_allocated_oop)
1300 _gst_mem.last_allocated_oop--;
1301 }
1302 }
1303
1304 _gst_mem.next_oop_to_sweep = oop;
1305 _gst_finished_incremental_gc ();
1306 }
1307
1308 void
_gst_finished_incremental_gc(void)1309 _gst_finished_incremental_gc (void)
1310 {
1311 if (_gst_mem.live_flags & F_OLD)
1312 return;
1313
1314 _gst_mem.live_flags &= ~F_REACHABLE;
1315 _gst_mem.live_flags |= F_OLD;
1316
1317 if (stats.reclaimedOldSpaceBytesSinceLastGlobalGC)
1318 {
1319 _gst_mem.reclaimedBytesPerGlobalGC =
1320 _gst_mem.factor * stats.reclaimedOldSpaceBytesSinceLastGlobalGC +
1321 (1 - _gst_mem.factor) * _gst_mem.reclaimedBytesPerGlobalGC;
1322 stats.reclaimedOldSpaceBytesSinceLastGlobalGC = 0;
1323 }
1324 #ifdef ENABLE_JIT_TRANSLATION
1325 /* Go and really free the blocks associated to garbage collected
1326 native code. */
1327 _gst_free_released_native_code ();
1328 #endif
1329 }
1330
1331 mst_Boolean
_gst_incremental_gc_step()1332 _gst_incremental_gc_step ()
1333 {
1334 OOP oop, firstOOP;
1335 int i;
1336
1337 if (!incremental_gc_running ())
1338 return true;
1339
1340 i = 0;
1341 firstOOP = _gst_mem.last_swept_oop;
1342 for (oop = _gst_mem.next_oop_to_sweep; oop > firstOOP; oop--)
1343 {
1344 if (IS_OOP_VALID_GC (oop))
1345 {
1346 maybe_release_xlat (oop);
1347 oop->flags &= ~F_REACHABLE;
1348 }
1349 else
1350 {
1351 _gst_sweep_oop (oop);
1352 _gst_mem.num_free_oops++;
1353 if (oop == _gst_mem.last_allocated_oop)
1354 _gst_mem.last_allocated_oop--;
1355 if (++i == INCREMENTAL_SWEEP_STEP)
1356 {
1357 _gst_mem.next_oop_to_sweep = oop - 1;
1358 return false;
1359 }
1360 }
1361 }
1362
1363 _gst_mem.next_oop_to_sweep = oop;
1364 _gst_finished_incremental_gc ();
1365 return true;
1366 }
1367
1368 void
reset_incremental_gc(OOP firstOOP)1369 reset_incremental_gc (OOP firstOOP)
1370 {
1371 OOP oop;
1372
1373 /* This loop is the same as that in alloc_oop. Skip low OOPs
1374 that are allocated */
1375 for (oop = firstOOP; IS_OOP_VALID_GC (oop); oop->flags &= ~F_REACHABLE, oop++)
1376 #if defined(ENABLE_JIT_TRANSLATION)
1377 if (oop->flags & F_XLAT)
1378 {
1379 if (oop->flags & F_XLAT_REACHABLE)
1380 /* Reachable, and referenced by active contexts. Keep it
1381 around. */
1382 oop->flags &= ~F_XLAT_2NDCHANCE;
1383 else
1384 {
1385 /* Reachable, but not referenced by active contexts. We
1386 give it a second chance... */
1387 if (oop->flags & F_XLAT_2NDCHANCE)
1388 _gst_release_native_code (oop);
1389
1390 oop->flags ^= F_XLAT_2NDCHANCE;
1391 }
1392 }
1393 #else
1394 ;
1395 #endif
1396
1397 /* Initialize these here so that IS_OOP_VALID works correctly. */
1398 _gst_mem.next_oop_to_sweep = _gst_mem.last_allocated_oop;
1399 _gst_mem.last_swept_oop = oop - 1;
1400
1401 #ifdef NO_INCREMENTAL_GC
1402 _gst_finish_incremental_gc ();
1403 #else
1404 /* Skip high OOPs that are unallocated. */
1405 for (oop = _gst_mem.last_allocated_oop; !IS_OOP_VALID (oop); oop--)
1406 _gst_sweep_oop (oop);
1407
1408 _gst_mem.last_allocated_oop = oop;
1409 _gst_mem.next_oop_to_sweep = oop;
1410 #endif
1411
1412 _gst_mem.num_free_oops = _gst_mem.ot_size -
1413 (_gst_mem.last_allocated_oop - _gst_mem.ot);
1414
1415 /* Check if it's time to grow the OOP table. */
1416 if (_gst_mem.num_free_oops * 100.0 / _gst_mem.ot_size <
1417 100 - _gst_mem.grow_threshold_percent)
1418 _gst_realloc_oop_table (_gst_mem.ot_size
1419 * (100 + _gst_mem.space_grow_rate) / 100);
1420
1421 #if defined (GC_DEBUG_OUTPUT)
1422 printf ("Last allocated OOP %p\n"
1423 "Next OOP swept top to bottom %p, highest swept bottom to top %p\n",
1424 _gst_mem.last_allocated_oop,
1425 _gst_mem.next_oop_to_sweep, _gst_mem.last_swept_oop);
1426 #endif
1427 }
1428
1429
1430 void
_gst_sweep_oop(OOP oop)1431 _gst_sweep_oop (OOP oop)
1432 {
1433 if (IS_OOP_FREE (oop))
1434 return;
1435
1436 #ifdef ENABLE_JIT_TRANSLATION
1437 if (oop->flags & F_XLAT)
1438 /* Unreachable, always free the native code. It is *not* optional
1439 to free the code in this case -- and I'm not talking about memory
1440 leaks: a different method could use the same OOP as this one and
1441 the old method would be executed instead of the new one! */
1442 _gst_release_native_code (oop);
1443 #endif
1444
1445 if UNCOMMON (oop->flags & F_WEAK)
1446 _gst_make_oop_non_weak (oop);
1447
1448 /* Free unreachable oldspace objects. */
1449 if UNCOMMON (oop->flags & F_FIXED)
1450 {
1451 _gst_mem.numOldOOPs--;
1452 stats.reclaimedOldSpaceBytesSinceLastGlobalGC +=
1453 SIZE_TO_BYTES (TO_INT (OOP_TO_OBJ (oop)->objSize));
1454 if ((oop->flags & F_LOADED) == 0)
1455 _gst_mem_free (_gst_mem.fixed, oop->object);
1456 }
1457 else if UNCOMMON (oop->flags & F_OLD)
1458 {
1459 _gst_mem.numOldOOPs--;
1460 stats.reclaimedOldSpaceBytesSinceLastGlobalGC +=
1461 SIZE_TO_BYTES (TO_INT (OOP_TO_OBJ (oop)->objSize));
1462 if ((oop->flags & F_LOADED) == 0)
1463 _gst_mem_free (_gst_mem.old, oop->object);
1464 }
1465
1466 oop->flags = 0;
1467 }
1468
1469 void
mourn_objects(void)1470 mourn_objects (void)
1471 {
1472 gst_object array;
1473 long size;
1474 gst_processor_scheduler processor;
1475
1476 size = _gst_buffer_size () / sizeof (OOP);
1477 if (!size)
1478 return;
1479
1480 processor = (gst_processor_scheduler) OOP_TO_OBJ (_gst_processor_oop);
1481 if (!IS_NIL (processor->gcArray))
1482 {
1483 _gst_errorf ("Too many garbage collections, finalizers missed!");
1484 _gst_errorf ("This is a bug, please report.");
1485 }
1486 else
1487 {
1488 /* Copy the buffer into an Array */
1489 array = new_instance_with (_gst_array_class, size, &processor->gcArray);
1490 _gst_copy_buffer (array->data);
1491 if (!IS_NIL (processor->gcSemaphore))
1492 {
1493 static async_queue_entry e;
1494 e.func = _gst_do_async_signal;
1495 e.data = processor->gcSemaphore;
1496 _gst_async_call_internal (&e);
1497 }
1498 else
1499 {
1500 _gst_errorf ("Running finalizers before initialization.");
1501 abort ();
1502 }
1503 }
1504 }
1505
1506
1507 #define IS_QUEUE_SPLIT(q) ((q)->topPtr != (q)->allocPtr)
1508
1509 OOP *
queue_get(surv_space * q,int n)1510 queue_get (surv_space *q, int n)
1511 {
1512 OOP *result = q->tenurePtr;
1513 q->filled -= n * sizeof (PTR);
1514 q->tenurePtr += n;
1515
1516 /* Check if the read pointer has to wrap. */
1517 if (q->tenurePtr == q->topPtr)
1518 {
1519 q->tenurePtr = q->minPtr;
1520 q->topPtr = q->allocPtr;
1521 }
1522
1523 return result;
1524 }
1525
1526 OOP *
queue_put(surv_space * q,OOP * src,int n)1527 queue_put (surv_space *q, OOP *src, int n)
1528 {
1529 OOP *result, *newAlloc;
1530 for (;;)
1531 {
1532 result = q->allocPtr;
1533 newAlloc = q->allocPtr + n;
1534
1535 #if defined(GC_DEBUG_OUTPUT)
1536 printf ("Top %p alloc %p tenure %p\n", q->topPtr, q->allocPtr, q->tenurePtr);
1537 #endif
1538
1539 if (IS_QUEUE_SPLIT (q) && UNCOMMON (newAlloc > q->tenurePtr))
1540 /* We tenure old objects as we copy more objects into
1541 the circular survivor space. */
1542 {
1543 #if defined(GC_DEBUG_OUTPUT)
1544 printf ("Tenure: current max %p, needed %p\n", q->tenurePtr, newAlloc);
1545 #endif
1546 tenure_one_object();
1547 continue;
1548 }
1549
1550 if UNCOMMON (newAlloc > q->maxPtr)
1551 {
1552 #if defined(GC_DEBUG_OUTPUT)
1553 printf ("Wrap: survivor space ends at %p, needed %p\n", q->maxPtr, newAlloc);
1554 #endif
1555 q->topPtr = q->allocPtr;
1556 q->allocPtr = q->minPtr;
1557 continue;
1558 }
1559
1560 break;
1561 }
1562
1563 if (!IS_QUEUE_SPLIT (q))
1564 /* We are still extending towards the top. Push further the
1565 valid area (which is space...topPtr and minPtr...allocPtr
1566 if topPtr != allocPtr (not circular yet), space...allocPtr
1567 if topPtr == allocPtr (circular). */
1568 q->topPtr = newAlloc;
1569
1570 q->filled += n * sizeof (PTR);
1571 q->allocated += n * sizeof (PTR);
1572 q->allocPtr = newAlloc;
1573 memcpy (result, src, n * sizeof (PTR));
1574 return result;
1575 }
1576
1577 void
tenure_one_object()1578 tenure_one_object ()
1579 {
1580 OOP oop;
1581
1582 oop = *_gst_mem.tenuring_queue.tenurePtr;
1583 #if defined(GC_DEBUG_OUTPUT)
1584 printf (" ");
1585 _gst_display_oop (oop);
1586 #endif
1587
1588 if (_gst_mem.scan.current == oop)
1589 {
1590 #if defined(GC_DEBUG_OUTPUT)
1591 printf ("Tenured OOP %p was being scanned\n", oop);
1592 #endif
1593
1594 _gst_tenure_oop (oop);
1595 _gst_mem.scan.at = (OOP *) oop->object;
1596 }
1597
1598 else if (_gst_mem.scan.queue_at == _gst_mem.tenuring_queue.tenurePtr)
1599 {
1600 #if defined(GC_DEBUG_OUTPUT)
1601 printf ("Tenured OOP %p had not been scanned yet\n", oop);
1602 #endif
1603
1604 /* Since tenurePtr is going to advance by a place, we must
1605 keep the Cheney scan pointer up to date. Check if it has
1606 to wrap! */
1607 _gst_mem.scan.queue_at++;
1608 if (_gst_mem.scan.queue_at >= _gst_mem.tenuring_queue.topPtr
1609 && IS_QUEUE_SPLIT (&_gst_mem.tenuring_queue))
1610 _gst_mem.scan.queue_at = _gst_mem.tenuring_queue.minPtr;
1611
1612 _gst_tenure_oop (oop);
1613 add_grey_object (oop);
1614 }
1615 else
1616 _gst_tenure_oop (oop);
1617
1618 queue_get (&_gst_mem.tenuring_queue, 1);
1619 queue_get (_gst_mem.active_half, TO_INT (oop->object->objSize));
1620 }
1621
1622 void
_gst_grey_oop_range(PTR from,size_t size)1623 _gst_grey_oop_range (PTR from, size_t size)
1624 {
1625 volatile char *last, *page;
1626
1627 for (last = ((char *)from) + size,
1628 page = ((char *)from) - ((intptr_t) from & (getpagesize() - 1));
1629 page < last;
1630 page += getpagesize())
1631 *page = *page;
1632 }
1633
1634
1635 void
add_grey_object(OOP oop)1636 add_grey_object (OOP oop)
1637 {
1638 grey_area_node *entry;
1639 gst_object obj = OOP_TO_OBJ (oop);
1640 int numFields = scanned_fields_in (obj, oop->flags);
1641 OOP *base = &(obj->objClass);
1642
1643 if (!numFields)
1644 return;
1645
1646 /* For ephemeron, skip the first field and the class. */
1647 if (oop->flags & F_EPHEMERON)
1648 {
1649 numFields -= &(obj->data[1]) - base;
1650 base = &(obj->data[1]);
1651 }
1652
1653 entry = (grey_area_node *) xmalloc (sizeof (grey_area_node));
1654 entry->base = base;
1655 entry->n = numFields;
1656 entry->oop = oop;
1657 entry->next = NULL;
1658 if (_gst_mem.grey_areas.tail)
1659 _gst_mem.grey_areas.tail->next = entry;
1660 else
1661 _gst_mem.grey_areas.head = entry;
1662
1663 _gst_mem.grey_areas.tail = entry;
1664 }
1665
1666 void
add_to_grey_list(OOP * base,int n)1667 add_to_grey_list (OOP *base, int n)
1668 {
1669 grey_area_node *entry = (grey_area_node *) xmalloc (sizeof (grey_area_node));
1670 entry->base = base;
1671 entry->n = n;
1672 entry->oop = NULL;
1673 entry->next = NULL;
1674 if (_gst_mem.grey_pages.tail)
1675 _gst_mem.grey_pages.tail->next = entry;
1676 else
1677 _gst_mem.grey_pages.head = entry;
1678
1679 _gst_mem.grey_pages.tail = entry;
1680 }
1681
1682 void
_gst_tenure_all_survivors()1683 _gst_tenure_all_survivors ()
1684 {
1685 OOP oop;
1686 while (_gst_mem.tenuring_queue.filled)
1687 {
1688 oop = *queue_get (&_gst_mem.tenuring_queue, 1);
1689 _gst_tenure_oop (oop);
1690 }
1691 }
1692
1693 void
check_weak_refs()1694 check_weak_refs ()
1695 {
1696 rb_node_t *node;
1697 rb_traverse_t t;
1698
1699 for (node = rb_first(&(_gst_mem.weak_areas->rb), &t);
1700 node; node = rb_next (&t))
1701 {
1702 weak_area_tree *area = (weak_area_tree *) node;
1703 mst_Boolean mourn = false;
1704 OOP *field, oop;
1705 int n;
1706
1707 oop = area->oop;
1708 if (!IS_OOP_VALID_GC (oop))
1709 continue;
1710
1711 for (field = (OOP *) oop->object + OBJ_HEADER_SIZE_WORDS,
1712 n = NUM_OOPS (oop->object); n--; field++)
1713 {
1714 OOP oop = *field;
1715 if (IS_INT (oop))
1716 continue;
1717
1718 if (!IS_OOP_VALID_GC (oop))
1719 {
1720 mourn = true;
1721 *field = _gst_nil_oop;
1722 }
1723 }
1724
1725 if (mourn)
1726 _gst_add_buf_pointer (area->oop);
1727 }
1728 }
1729
1730 void
copy_oops(void)1731 copy_oops (void)
1732 {
1733 _gst_reset_buffer ();
1734
1735 /* Do the flip! */
1736 _gst_mem.live_flags ^= F_SPACES;
1737 _gst_mem.active_flag ^= F_SPACES;
1738 _gst_mem.active_half = &_gst_mem.surv[_gst_mem.active_flag == F_ODD];
1739
1740 reset_survivor_space (_gst_mem.active_half);
1741 reset_survivor_space (&_gst_mem.tenuring_queue);
1742
1743 /* And the pointer for Cheney scanning. */
1744 _gst_mem.scan.queue_at = _gst_mem.tenuring_queue.tenurePtr;
1745
1746 /* Do these first, they are more likely to stay around for long,
1747 so it makes sense to make their tenuring more likely (the first
1748 copied objects are also tenured first). */
1749 scan_grey_pages ();
1750
1751 _gst_copy_registered_oops ();
1752 cheney_scan ();
1753
1754 /* Do these last since they are often alive only till the next
1755 scavenge. */
1756 _gst_copy_processor_registers ();
1757 cheney_scan ();
1758
1759 scan_grey_objects ();
1760
1761 /* Reset the new-space pointers */
1762 _gst_empty_context_pool ();
1763 _gst_mem.eden.allocPtr = _gst_mem.eden.minPtr;
1764 }
1765
1766 void
_gst_print_grey_list(mst_Boolean check_pointers)1767 _gst_print_grey_list (mst_Boolean check_pointers)
1768 {
1769 grey_area_node *node;
1770 OOP *pOOP, oop;
1771 int i, n;
1772
1773 for (n = 0, node = _gst_mem.grey_pages.head; node; node = node->next, n++)
1774 {
1775 int new_pointers = 0;
1776 if (check_pointers)
1777 for (new_pointers = 0, pOOP = node->base, i = node->n; i--; pOOP++)
1778 {
1779 PREFETCH_LOOP (pOOP, PREF_READ | PREF_NTA);
1780 oop = *pOOP;
1781
1782 /* Not all addresses are known to contain valid OOPs! */
1783 if (!IS_OOP_ADDR (oop))
1784 continue;
1785
1786 if (!IS_OOP_NEW (oop))
1787 continue;
1788
1789 new_pointers++;
1790 }
1791
1792 printf ("%11p%c ", node->base, new_pointers == 0 ? ' ' : '*');
1793 if ((n & 3) == 3)
1794 putchar ('\n');
1795 }
1796
1797 if (_gst_mem.grey_pages.tail)
1798 printf (" (tail = %12p)", _gst_mem.grey_pages.tail->base);
1799
1800 printf ("\n");
1801 }
1802
1803 void
scan_grey_pages()1804 scan_grey_pages ()
1805 {
1806 grey_area_node *node, **next, *last;
1807 OOP *pOOP, oop;
1808 int i, n;
1809
1810 #if defined (MMAN_DEBUG_OUTPUT)
1811 printf ("Pages on the grey list:\n");
1812 _gst_print_grey_list (true);
1813 #endif
1814
1815 for (last = NULL, next = &_gst_mem.grey_pages.head; (node = *next); )
1816 {
1817 #if defined(GC_DEBUG_OUTPUT) || defined(MMAN_DEBUG_OUTPUT)
1818 printf ("Scanning grey page %p...%p ", node->base, node->base + node->n);
1819 #if defined(GC_DEBUG_OUTPUT)
1820 putchar ('\n');
1821 #else
1822 fflush (stdout);
1823 #endif
1824 #endif
1825
1826 PREFETCH_START (node->base, PREF_READ | PREF_NTA);
1827 for (n = 0, pOOP = node->base, i = node->n; i--; pOOP++)
1828 {
1829 PREFETCH_LOOP (pOOP, PREF_READ | PREF_NTA);
1830 oop = *pOOP;
1831
1832 /* Not all addresses are known to contain valid OOPs! */
1833 if (!IS_OOP_ADDR (oop))
1834 continue;
1835
1836 if (!IS_OOP_NEW (oop))
1837 continue;
1838
1839 n++;
1840 if (!IS_OOP_COPIED (oop))
1841 _gst_copy_an_oop (oop);
1842 }
1843
1844 #if !defined (NO_SIGSEGV_HANDLING)
1845 if (!n)
1846 {
1847 /* The entry was temporary, or we found no new-space
1848 pointers in it. Delete it and make the page read-only. */
1849 #if defined (MMAN_DEBUG_OUTPUT)
1850 printf ("Protecting %p\n", node->base);
1851 #endif
1852 _gst_mem.rememberedTableEntries--;
1853 _gst_mem_protect ((PTR) node->base, node->n * sizeof(OOP), PROT_READ);
1854 *next = node->next;
1855 xfree (node);
1856 }
1857 else
1858 #endif
1859 {
1860 #if defined (MMAN_DEBUG_OUTPUT)
1861 printf ("Found %d pointers\n", n);
1862 #endif
1863 last = node;
1864 next = &(node->next);
1865 }
1866
1867 cheney_scan ();
1868 }
1869
1870 _gst_mem.grey_pages.tail = last;
1871
1872 #if defined (MMAN_DEBUG_OUTPUT)
1873 printf ("Pages left on the grey list:\n");
1874 _gst_print_grey_list (false);
1875 #endif
1876 }
1877
1878 void
scan_grey_objects()1879 scan_grey_objects()
1880 {
1881 grey_area_node *node, *next;
1882 OOP oop;
1883 gst_object obj;
1884
1885 for (next = _gst_mem.grey_areas.head; (node = next); )
1886 {
1887 oop = node->oop;
1888 obj = OOP_TO_OBJ (oop);
1889
1890 if (oop->flags & F_EPHEMERON)
1891 /* Objects might have moved, so update node->base. */
1892 node->base = (OOP *) &obj->data[1];
1893
1894 #if defined(GC_DEBUG_OUTPUT)
1895 printf ("Scanning grey range %p...%p (%p)\n", node->base, node->base + node->n, oop);
1896 #endif
1897
1898 _gst_copy_oop_range (node->base, node->base + node->n);
1899
1900 if (oop->flags & F_EPHEMERON)
1901 {
1902 OOP key = obj->data[0];
1903
1904 /* Copy the key, mourn the object if it was not reachable. */
1905 if (!IS_OOP_COPIED (key))
1906 {
1907 _gst_copy_an_oop (key);
1908 _gst_add_buf_pointer (oop);
1909 }
1910 }
1911
1912 _gst_mem.grey_areas.head = next = node->next;
1913 xfree (node);
1914 if (!next)
1915 _gst_mem.grey_areas.tail = NULL;
1916
1917 cheney_scan ();
1918
1919 /* The scan might have greyed more areas. */
1920 if (!next)
1921 next = _gst_mem.grey_areas.head;
1922 }
1923
1924 }
1925
1926 int
scanned_fields_in(gst_object object,int flags)1927 scanned_fields_in (gst_object object,
1928 int flags)
1929 {
1930 if COMMON (!(flags & (F_WEAK | F_CONTEXT)))
1931 {
1932 int size = NUM_OOPS (object);
1933 return object->data + size - &object->objClass;
1934 }
1935
1936 if COMMON (flags & F_CONTEXT)
1937 {
1938 gst_method_context ctx;
1939 intptr_t methodSP;
1940 ctx = (gst_method_context) object;
1941 methodSP = TO_INT (ctx->spOffset);
1942 return ctx->contextStack + methodSP + 1 - &ctx->objClass;
1943 }
1944
1945 /* Weak object, only mark the class. */
1946 return 1;
1947 }
1948
1949 void
cheney_scan(void)1950 cheney_scan (void)
1951 {
1952 #if defined(GC_DEBUG_OUTPUT)
1953 printf ("Starting Cheney scan\n");
1954 #endif
1955
1956 while (_gst_mem.scan.queue_at !=
1957 _gst_mem.tenuring_queue.allocPtr)
1958 {
1959 OOP oop;
1960 int i, numFields;
1961
1962 if (_gst_mem.scan.queue_at >= _gst_mem.tenuring_queue.topPtr)
1963 _gst_mem.scan.queue_at = _gst_mem.tenuring_queue.minPtr;
1964
1965 if (_gst_mem.scan.queue_at == _gst_mem.tenuring_queue.allocPtr)
1966 break;
1967
1968 oop = *_gst_mem.scan.queue_at;
1969
1970 #if defined(GC_DEBUGGING)
1971 if (!IS_OOP_ADDR (oop))
1972 abort();
1973 #endif
1974
1975 #if defined(GC_DEBUG_OUTPUT)
1976 printf (">Scan ");
1977 _gst_display_oop (oop);
1978 #endif
1979
1980 _gst_mem.scan.current = oop;
1981 _gst_mem.scan.queue_at++;
1982
1983 if (oop->flags & F_EPHEMERON)
1984 continue;
1985
1986 _gst_mem.scan.at = (OOP *) OOP_TO_OBJ (oop);
1987 numFields = scanned_fields_in (OOP_TO_OBJ (oop), oop->flags);
1988
1989 /* The +1 below is to skip the size field. */
1990 for (i = 0; i < numFields; i++)
1991 MAYBE_COPY_OOP (_gst_mem.scan.at[i+1]);
1992 }
1993
1994 #if defined(GC_DEBUG_OUTPUT)
1995 printf ("Ending Cheney scan\n");
1996 #endif
1997 }
1998
1999 void
_gst_copy_oop_range(OOP * curOOP,OOP * atEndOOP)2000 _gst_copy_oop_range (OOP *curOOP, OOP *atEndOOP)
2001 {
2002 OOP *pOOP;
2003 for (pOOP = curOOP; pOOP < atEndOOP; pOOP++)
2004 MAYBE_COPY_OOP (*pOOP);
2005 }
2006
2007 void
_gst_copy_an_oop(OOP oop)2008 _gst_copy_an_oop (OOP oop)
2009 {
2010 int i, n;
2011 do
2012 {
2013 gst_object obj;
2014 OOP *pData;
2015
2016 obj = OOP_TO_OBJ (oop);
2017 pData = (OOP *) obj;
2018
2019 #if defined(GC_DEBUG_OUTPUT)
2020 printf (">Copy ");
2021 _gst_display_oop (oop);
2022 #endif
2023
2024 #if defined (GC_DEBUGGING)
2025 if UNCOMMON (!IS_INT (obj->objSize))
2026 {
2027 printf ("Size not an integer in OOP %p (%p)\n", oop, obj);
2028 abort ();
2029 }
2030
2031 if UNCOMMON (TO_INT (obj->objSize) < 2)
2032 {
2033 printf ("Invalid size for OOP %p (%p)\n", oop, obj);
2034 abort ();
2035 }
2036
2037 if UNCOMMON (oop->flags == 0)
2038 {
2039 printf ("Free OOP %p was referenced\n", oop);
2040 abort ();
2041 }
2042
2043 if UNCOMMON ((oop->flags & F_OLD) ||
2044 IS_SURVIVOR_ADDR(obj, _gst_mem.active_half == &_gst_mem.surv[1]))
2045 {
2046 printf ("Copying an already copied object\n");
2047 abort ();
2048 return;
2049 }
2050 #endif
2051
2052 queue_put (&_gst_mem.tenuring_queue, &oop, 1);
2053 obj = oop->object = (gst_object)
2054 queue_put (_gst_mem.active_half, pData, TO_INT (obj->objSize));
2055
2056 oop->flags &= ~(F_SPACES | F_POOLED);
2057 oop->flags |= _gst_mem.active_flag;
2058
2059 /* Look for a child that has not been copied and move it
2060 near the object. This improves the locality of reference.
2061 We do not copy the class (that's the reason for the -1
2062 here). */
2063 n = scanned_fields_in (obj, oop->flags) - 1;
2064 if (oop->flags & F_EPHEMERON)
2065 {
2066 /* For ephemerons, do the work later. */
2067 add_grey_object (oop);
2068 return;
2069 }
2070
2071 for (i = 0; i < n; i++)
2072 {
2073 OOP newOOP = obj->data[i];
2074 if (!IS_OOP_COPIED (newOOP))
2075 {
2076 oop = newOOP;
2077 break;
2078 }
2079 }
2080 }
2081 while (i < n);
2082 }
2083
2084
2085 void
mark_oops(void)2086 mark_oops (void)
2087 {
2088 _gst_reset_buffer ();
2089 _gst_mark_registered_oops ();
2090 _gst_mark_processor_registers ();
2091 mark_ephemeron_oops ();
2092 }
2093
2094 void
mark_ephemeron_oops(void)2095 mark_ephemeron_oops (void)
2096 {
2097 OOP *pOOP, *pDeadOOP, *base;
2098 int i, size;
2099
2100 /* Make a local copy of the buffer */
2101 size = _gst_buffer_size ();
2102 base = alloca (size);
2103 _gst_copy_buffer (base);
2104 _gst_reset_buffer ();
2105 size /= sizeof (PTR);
2106
2107 /* First pass: distinguish objects whose key was reachable from
2108 the outside by clearing their F_EPHEMERON bit. */
2109 for (pOOP = base, i = size; i--; pOOP++)
2110 {
2111 OOP oop = *pOOP;
2112 gst_object obj = OOP_TO_OBJ(oop);
2113 OOP key = obj->data[0];
2114
2115 if (key->flags & F_REACHABLE)
2116 oop->flags &= ~F_EPHEMERON;
2117
2118 key->flags |= F_REACHABLE;
2119 }
2120
2121 for (pOOP = pDeadOOP = base, i = size; i--; )
2122 {
2123 OOP oop = *pOOP++;
2124 gst_object obj = OOP_TO_OBJ(oop);
2125 OOP key = obj->data[0];
2126 int num = NUM_OOPS(obj);
2127 int j;
2128
2129 /* Find if the key is reachable from the objects (so that
2130 we can mourn the ephemeron if this is not so). */
2131 key->flags &= ~F_REACHABLE;
2132
2133 for (j = 1; j < num; j++)
2134 MAYBE_MARK_OOP (obj->data[j]);
2135
2136 /* Remember that above we cleared F_EPHEMERON if the key
2137 is alive. */
2138 if (!IS_OOP_MARKED (key) && (oop->flags & F_EPHEMERON))
2139 *pDeadOOP++ = oop;
2140
2141 /* Ok, now mark the key. */
2142 MAYBE_MARK_OOP (key);
2143
2144 /* Restore the flag in case it was cleared. */
2145 oop->flags |= F_EPHEMERON;
2146 }
2147
2148 /* If more ephemerons were reachable from the object, go on... */
2149 if (_gst_buffer_size ())
2150 mark_ephemeron_oops ();
2151
2152 _gst_add_buf_data (base, (char *) pDeadOOP - (char *) base);
2153 }
2154
2155 #define TAIL_MARK_OOP(newOOP) do { \
2156 PREFETCH_ADDR ((newOOP)->object, PREF_READ | PREF_NTA); \
2157 oop = (newOOP); \
2158 goto markOne; /* tail recurse!!! */ \
2159 } while(0)
2160
2161 #define TAIL_MARK_OOPRANGE(firstOOP, oopAtEnd) do { \
2162 PREFETCH_START (firstOOP, PREF_READ | PREF_NTA); \
2163 curOOP = (OOP *)(firstOOP); \
2164 atEndOOP = (OOP *)(oopAtEnd); \
2165 oop = NULL; \
2166 goto markRange; \
2167 } while(0)
2168
2169 void
_gst_mark_an_oop_internal(OOP oop)2170 _gst_mark_an_oop_internal (OOP oop)
2171 {
2172 OOP *curOOP, *atEndOOP;
2173 struct mark_queue *markQueue = _gst_mem.markQueue;
2174 struct mark_queue *lastMarkQueue = _gst_mem.lastMarkQueue;
2175 struct mark_queue *currentMarkQueue = markQueue;
2176 goto markOne;
2177
2178 markRange:
2179 {
2180 OOP firstOOP = NULL;
2181
2182 /* The first unmarked OOP is used for tail recursion. */
2183 while (curOOP < atEndOOP)
2184 {
2185 oop = *curOOP++;
2186 if (IS_OOP (oop) && !IS_OOP_MARKED (oop))
2187 {
2188 oop->flags |= F_REACHABLE;
2189 firstOOP = oop;
2190 break;
2191 }
2192 }
2193
2194 /* The second unmarked OOP is the first that is placed on the mark
2195 queue. TODO: split REACHABLE and VISITED flags. An object is
2196 marked REACHABLE here, and REACHABLE|VISITED in the markOne label.
2197 At the end of GC, all REACHABLE objects are also VISITED.
2198 The above loop should seek an object that is not VISITED so
2199 that it can be marked. For the loop below, however, REACHABLE
2200 objects are known to be somewhere else on the mark stack, and can
2201 be skipped.
2202
2203 Skipping objects after the first unmarked OOP is still useful,
2204 because it keeps the stack size a bit lower in the relatively common
2205 case of many integer or nil instance variables. */
2206 while (curOOP < atEndOOP)
2207 {
2208 oop = *curOOP;
2209
2210 if (IS_OOP (oop) && !IS_OOP_MARKED (oop))
2211 {
2212 if (currentMarkQueue == lastMarkQueue)
2213 {
2214 const size_t size = lastMarkQueue - markQueue;
2215
2216 _gst_mem.markQueue = (struct mark_queue *)
2217 xrealloc (_gst_mem.markQueue, 2 * size * sizeof (struct mark_queue));
2218 _gst_mem.lastMarkQueue = &_gst_mem.markQueue[2 * size];
2219
2220 markQueue = _gst_mem.markQueue;
2221 lastMarkQueue = _gst_mem.lastMarkQueue;
2222 currentMarkQueue = &_gst_mem.markQueue[size];
2223 }
2224 currentMarkQueue->firstOOP = curOOP;
2225 currentMarkQueue->endOOP = atEndOOP;
2226 currentMarkQueue++;
2227 break;
2228 }
2229
2230 curOOP++;
2231 }
2232
2233 if (!firstOOP)
2234 goto pop;
2235
2236 TAIL_MARK_OOP (firstOOP);
2237 }
2238
2239 markOne:
2240 {
2241 OOP objClass;
2242 gst_object object;
2243 uintptr_t size;
2244
2245 #if defined (GC_DEBUGGING)
2246 if UNCOMMON (IS_OOP_FREE (oop))
2247 {
2248 printf ("Error! Free OOP %p is being marked!\n", oop);
2249 abort ();
2250 return;
2251 }
2252 #endif
2253
2254 #if defined(GC_DEBUG_OUTPUT)
2255 printf (">Mark ");
2256 _gst_display_oop (oop);
2257 #endif
2258
2259 /* see if the object has pointers, set up to copy them if so.
2260 */
2261 oop->flags |= F_REACHABLE;
2262 object = OOP_TO_OBJ (oop);
2263 objClass = object->objClass;
2264 if UNCOMMON (oop->flags & F_CONTEXT)
2265 {
2266 gst_method_context ctx;
2267 intptr_t methodSP;
2268 ctx = (gst_method_context) object;
2269 methodSP = TO_INT (ctx->spOffset);
2270 /* printf("setting up for loop on context %x, sp = %d\n",
2271 ctx, methodSP); */
2272 TAIL_MARK_OOPRANGE (&ctx->objClass,
2273 ctx->contextStack + methodSP + 1);
2274
2275 }
2276 else if UNCOMMON (oop->flags & (F_EPHEMERON | F_WEAK))
2277 {
2278 if (oop->flags & F_EPHEMERON)
2279 _gst_add_buf_pointer (oop);
2280
2281 /* In general, there will be many instances of a class,
2282 but only the first time will it be unmarked. So I'm
2283 marking this as uncommon. */
2284 if UNCOMMON (!IS_OOP_MARKED (objClass))
2285 TAIL_MARK_OOP (objClass);
2286 }
2287 else
2288 {
2289 size = NUM_OOPS (object);
2290 if COMMON (size)
2291 TAIL_MARK_OOPRANGE (&object->objClass,
2292 object->data + size);
2293
2294 else if UNCOMMON (!IS_OOP_MARKED (objClass))
2295 TAIL_MARK_OOP (objClass);
2296 }
2297 }
2298
2299 pop:
2300 {
2301 if (currentMarkQueue > markQueue)
2302 {
2303 currentMarkQueue--;
2304 TAIL_MARK_OOPRANGE (currentMarkQueue->firstOOP,
2305 currentMarkQueue->endOOP);
2306 }
2307 }
2308 }
2309
2310 void
_gst_mark_oop_range(OOP * curOOP,OOP * atEndOOP)2311 _gst_mark_oop_range (OOP *curOOP, OOP *atEndOOP)
2312 {
2313 OOP *pOOP;
2314 for (pOOP = curOOP; pOOP < atEndOOP; pOOP++)
2315 MAYBE_MARK_OOP (*pOOP);
2316 }
2317
2318
2319
2320 void
_gst_inc_init_registry(void)2321 _gst_inc_init_registry (void)
2322 {
2323 _gst_mem.inc_base =
2324 (OOP *) xmalloc (INIT_NUM_INCUBATOR_OOPS * sizeof (OOP *));
2325 _gst_mem.inc_ptr = _gst_mem.inc_base;
2326 _gst_mem.inc_end =
2327 _gst_mem.inc_base + INIT_NUM_INCUBATOR_OOPS;
2328
2329 /* Make the incubated objects part of the root set */
2330 _gst_register_oop_array (&_gst_mem.inc_base, &_gst_mem.inc_ptr);
2331 }
2332
2333 void
_gst_inc_grow_registry(void)2334 _gst_inc_grow_registry (void)
2335 {
2336 unsigned oldPtrOffset;
2337 unsigned oldRegistrySize, newRegistrySize;
2338
2339 oldPtrOffset = _gst_mem.inc_ptr - _gst_mem.inc_base;
2340 oldRegistrySize = _gst_mem.inc_end - _gst_mem.inc_base;
2341 newRegistrySize = oldRegistrySize + INCUBATOR_CHUNK_SIZE;
2342
2343 _gst_mem.inc_base =
2344 (OOP *) xrealloc (_gst_mem.inc_base,
2345 newRegistrySize * sizeof (OOP *));
2346 _gst_mem.inc_ptr = _gst_mem.inc_base + oldPtrOffset;
2347 _gst_mem.inc_end = _gst_mem.inc_base + newRegistrySize;
2348 }
2349