1 /*
2 ** gc.c - garbage collector for mruby
3 **
4 ** See Copyright Notice in mruby.h
5 */
6 
7 #include <string.h>
8 #include <stdlib.h>
9 #include <mruby.h>
10 #include <mruby/array.h>
11 #include <mruby/class.h>
12 #include <mruby/data.h>
13 #include <mruby/hash.h>
14 #include <mruby/proc.h>
15 #include <mruby/range.h>
16 #include <mruby/string.h>
17 #include <mruby/variable.h>
18 #include <mruby/gc.h>
19 #include <mruby/error.h>
20 #include <mruby/throw.h>
21 
22 /*
23   = Tri-color Incremental Garbage Collection
24 
25   mruby's GC is Tri-color Incremental GC with Mark & Sweep.
26   Algorithm details are omitted.
27   Instead, the implementation part is described below.
28 
29   == Object's Color
30 
31   Each object can be painted in three colors:
32 
33     * White - Unmarked.
34     * Gray - Marked, But the child objects are unmarked.
35     * Black - Marked, the child objects are also marked.
36 
37   == Two White Types
38 
39   There're two white color types in a flip-flop fashion: White-A and White-B,
40   which respectively represent the Current White color (the newly allocated
41   objects in the current GC cycle) and the Sweep Target White color (the
42   dead objects to be swept).
43 
44   A and B will be switched just at the beginning of the next GC cycle. At
45   that time, all the dead objects have been swept, while the newly created
46   objects in the current GC cycle which finally remains White are now
47   regarded as dead objects. Instead of traversing all the White-A objects and
48   painting them as White-B, just switch the meaning of White-A and White-B as
49   this will be much cheaper.
50 
51   As a result, the objects we sweep in the current GC cycle are always
52   left from the previous GC cycle. This allows us to sweep objects
53   incrementally, without the disturbance of the newly created objects.
54 
55   == Execution Timing
56 
57   GC Execution Time and Each step interval are decided by live objects count.
58   List of Adjustment API:
59 
60     * gc_interval_ratio_set
61     * gc_step_ratio_set
62 
63   For details, see the comments for each function.
64 
65   == Write Barrier
66 
67   mruby implementer and C extension library writer must insert a write
68   barrier when updating a reference from a field of an object.
69   When updating a reference from a field of object A to object B,
70   two different types of write barrier are available:
71 
72     * mrb_field_write_barrier - target B object for a mark.
73     * mrb_write_barrier       - target A object for a mark.
74 
75   == Generational Mode
76 
77   mruby's GC offers an Generational Mode while re-using the tri-color GC
78   infrastructure. It will treat the Black objects as Old objects after each
79   sweep phase, instead of painting them White. The key ideas are still the same
80   as traditional generational GC:
81 
82     * Minor GC - just traverse the Young objects (Gray objects) in the mark
83                  phase, then only sweep the newly created objects, and leave
84                  the Old objects live.
85 
86     * Major GC - same as a full regular GC cycle.
87 
88   The difference from "traditional" generational GC is, that the major GC
89   in mruby is triggered incrementally in a tri-color manner.
90 
91 
92   For details, see the comments for each function.
93 
94 */
95 
96 struct free_obj {
97   MRB_OBJECT_HEADER;
98   struct RBasic *next;
99 };
100 
101 typedef struct {
102   union {
103     struct free_obj free;
104     struct RBasic basic;
105     struct RObject object;
106     struct RClass klass;
107     struct RString string;
108     struct RArray array;
109     struct RHash hash;
110     struct RRange range;
111     struct RData data;
112     struct RProc proc;
113     struct REnv env;
114     struct RException exc;
115     struct RBreak brk;
116 #ifdef MRB_WORD_BOXING
117 #ifndef MRB_WITHOUT_FLOAT
118     struct RFloat floatv;
119 #endif
120     struct RCptr cptr;
121 #endif
122   } as;
123 } RVALUE;
124 
125 #ifdef GC_PROFILE
126 #include <stdio.h>
127 #include <sys/time.h>
128 
129 static double program_invoke_time = 0;
130 static double gc_time = 0;
131 static double gc_total_time = 0;
132 
133 static double
gettimeofday_time(void)134 gettimeofday_time(void)
135 {
136   struct timeval tv;
137   gettimeofday(&tv, NULL);
138   return tv.tv_sec + tv.tv_usec * 1e-6;
139 }
140 
141 #define GC_INVOKE_TIME_REPORT(with) do {\
142   fprintf(stderr, "%s\n", with);\
143   fprintf(stderr, "gc_invoke: %19.3f\n", gettimeofday_time() - program_invoke_time);\
144   fprintf(stderr, "is_generational: %d\n", is_generational(gc));\
145   fprintf(stderr, "is_major_gc: %d\n", is_major_gc(gc));\
146 } while(0)
147 
148 #define GC_TIME_START do {\
149   gc_time = gettimeofday_time();\
150 } while(0)
151 
152 #define GC_TIME_STOP_AND_REPORT do {\
153   gc_time = gettimeofday_time() - gc_time;\
154   gc_total_time += gc_time;\
155   fprintf(stderr, "gc_state: %d\n", gc->state);\
156   fprintf(stderr, "live: %zu\n", gc->live);\
157   fprintf(stderr, "majorgc_old_threshold: %zu\n", gc->majorgc_old_threshold);\
158   fprintf(stderr, "gc_threshold: %zu\n", gc->threshold);\
159   fprintf(stderr, "gc_time: %30.20f\n", gc_time);\
160   fprintf(stderr, "gc_total_time: %30.20f\n\n", gc_total_time);\
161 } while(0)
162 #else
163 #define GC_INVOKE_TIME_REPORT(s)
164 #define GC_TIME_START
165 #define GC_TIME_STOP_AND_REPORT
166 #endif
167 
168 #ifdef GC_DEBUG
169 #define DEBUG(x) (x)
170 #else
171 #define DEBUG(x)
172 #endif
173 
174 #ifndef MRB_HEAP_PAGE_SIZE
175 #define MRB_HEAP_PAGE_SIZE 1024
176 #endif
177 
178 #define GC_STEP_SIZE 1024
179 
180 /* white: 001 or 010, black: 100, gray: 000 */
181 #define GC_GRAY 0
182 #define GC_WHITE_A 1
183 #define GC_WHITE_B (1 << 1)
184 #define GC_BLACK (1 << 2)
185 #define GC_WHITES (GC_WHITE_A | GC_WHITE_B)
186 #define GC_COLOR_MASK 7
187 
188 #define paint_gray(o) ((o)->color = GC_GRAY)
189 #define paint_black(o) ((o)->color = GC_BLACK)
190 #define paint_white(o) ((o)->color = GC_WHITES)
191 #define paint_partial_white(s, o) ((o)->color = (s)->current_white_part)
192 #define is_gray(o) ((o)->color == GC_GRAY)
193 #define is_white(o) ((o)->color & GC_WHITES)
194 #define is_black(o) ((o)->color & GC_BLACK)
195 #define flip_white_part(s) ((s)->current_white_part = other_white_part(s))
196 #define other_white_part(s) ((s)->current_white_part ^ GC_WHITES)
197 #define is_dead(s, o) (((o)->color & other_white_part(s) & GC_WHITES) || (o)->tt == MRB_TT_FREE)
198 
199 #define objects(p) ((RVALUE *)p->objects)
200 
201 MRB_API void*
mrb_realloc_simple(mrb_state * mrb,void * p,size_t len)202 mrb_realloc_simple(mrb_state *mrb, void *p,  size_t len)
203 {
204   void *p2;
205 
206   p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud);
207   if (!p2 && len > 0 && mrb->gc.heaps) {
208     mrb_full_gc(mrb);
209     p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud);
210   }
211 
212   return p2;
213 }
214 
215 MRB_API void*
mrb_realloc(mrb_state * mrb,void * p,size_t len)216 mrb_realloc(mrb_state *mrb, void *p, size_t len)
217 {
218   void *p2;
219 
220   p2 = mrb_realloc_simple(mrb, p, len);
221   if (len == 0) return p2;
222   if (p2 == NULL) {
223     if (mrb->gc.out_of_memory) {
224       mrb_exc_raise(mrb, mrb_obj_value(mrb->nomem_err));
225       /* mrb_panic(mrb); */
226     }
227     else {
228       mrb->gc.out_of_memory = TRUE;
229       mrb_exc_raise(mrb, mrb_obj_value(mrb->nomem_err));
230     }
231   }
232   else {
233     mrb->gc.out_of_memory = FALSE;
234   }
235 
236   return p2;
237 }
238 
239 MRB_API void*
mrb_malloc(mrb_state * mrb,size_t len)240 mrb_malloc(mrb_state *mrb, size_t len)
241 {
242   return mrb_realloc(mrb, 0, len);
243 }
244 
245 MRB_API void*
mrb_malloc_simple(mrb_state * mrb,size_t len)246 mrb_malloc_simple(mrb_state *mrb, size_t len)
247 {
248   return mrb_realloc_simple(mrb, 0, len);
249 }
250 
251 MRB_API void*
mrb_calloc(mrb_state * mrb,size_t nelem,size_t len)252 mrb_calloc(mrb_state *mrb, size_t nelem, size_t len)
253 {
254   void *p;
255 
256   if (nelem > 0 && len > 0 &&
257       nelem <= SIZE_MAX / len) {
258     size_t size;
259     size = nelem * len;
260     p = mrb_malloc(mrb, size);
261 
262     memset(p, 0, size);
263   }
264   else {
265     p = NULL;
266   }
267 
268   return p;
269 }
270 
271 MRB_API void
mrb_free(mrb_state * mrb,void * p)272 mrb_free(mrb_state *mrb, void *p)
273 {
274   (mrb->allocf)(mrb, p, 0, mrb->allocf_ud);
275 }
276 
277 static mrb_bool
heap_p(mrb_gc * gc,struct RBasic * object)278 heap_p(mrb_gc *gc, struct RBasic *object)
279 {
280   mrb_heap_page* page;
281 
282   page = gc->heaps;
283   while (page) {
284     RVALUE *p;
285 
286     p = objects(page);
287     if (&p[0].as.basic <= object && object <= &p[MRB_HEAP_PAGE_SIZE].as.basic) {
288       return TRUE;
289     }
290     page = page->next;
291   }
292   return FALSE;
293 }
294 
295 MRB_API mrb_bool
mrb_object_dead_p(mrb_state * mrb,struct RBasic * object)296 mrb_object_dead_p(mrb_state *mrb, struct RBasic *object) {
297   mrb_gc *gc = &mrb->gc;
298   if (!heap_p(gc, object)) return TRUE;
299   return is_dead(gc, object);
300 }
301 
302 static void
link_heap_page(mrb_gc * gc,mrb_heap_page * page)303 link_heap_page(mrb_gc *gc, mrb_heap_page *page)
304 {
305   page->next = gc->heaps;
306   if (gc->heaps)
307     gc->heaps->prev = page;
308   gc->heaps = page;
309 }
310 
311 static void
unlink_heap_page(mrb_gc * gc,mrb_heap_page * page)312 unlink_heap_page(mrb_gc *gc, mrb_heap_page *page)
313 {
314   if (page->prev)
315     page->prev->next = page->next;
316   if (page->next)
317     page->next->prev = page->prev;
318   if (gc->heaps == page)
319     gc->heaps = page->next;
320   page->prev = NULL;
321   page->next = NULL;
322 }
323 
324 static void
link_free_heap_page(mrb_gc * gc,mrb_heap_page * page)325 link_free_heap_page(mrb_gc *gc, mrb_heap_page *page)
326 {
327   page->free_next = gc->free_heaps;
328   if (gc->free_heaps) {
329     gc->free_heaps->free_prev = page;
330   }
331   gc->free_heaps = page;
332 }
333 
334 static void
unlink_free_heap_page(mrb_gc * gc,mrb_heap_page * page)335 unlink_free_heap_page(mrb_gc *gc, mrb_heap_page *page)
336 {
337   if (page->free_prev)
338     page->free_prev->free_next = page->free_next;
339   if (page->free_next)
340     page->free_next->free_prev = page->free_prev;
341   if (gc->free_heaps == page)
342     gc->free_heaps = page->free_next;
343   page->free_prev = NULL;
344   page->free_next = NULL;
345 }
346 
347 static void
add_heap(mrb_state * mrb,mrb_gc * gc)348 add_heap(mrb_state *mrb, mrb_gc *gc)
349 {
350   mrb_heap_page *page = (mrb_heap_page *)mrb_calloc(mrb, 1, sizeof(mrb_heap_page) + MRB_HEAP_PAGE_SIZE * sizeof(RVALUE));
351   RVALUE *p, *e;
352   struct RBasic *prev = NULL;
353 
354   for (p = objects(page), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
355     p->as.free.tt = MRB_TT_FREE;
356     p->as.free.next = prev;
357     prev = &p->as.basic;
358   }
359   page->freelist = prev;
360 
361   link_heap_page(gc, page);
362   link_free_heap_page(gc, page);
363 }
364 
365 #define DEFAULT_GC_INTERVAL_RATIO 200
366 #define DEFAULT_GC_STEP_RATIO 200
367 #define MAJOR_GC_INC_RATIO 120
368 #define MAJOR_GC_TOOMANY 10000
369 #define is_generational(gc) ((gc)->generational)
370 #define is_major_gc(gc) (is_generational(gc) && (gc)->full)
371 #define is_minor_gc(gc) (is_generational(gc) && !(gc)->full)
372 
373 void
mrb_gc_init(mrb_state * mrb,mrb_gc * gc)374 mrb_gc_init(mrb_state *mrb, mrb_gc *gc)
375 {
376 #ifndef MRB_GC_FIXED_ARENA
377   gc->arena = (struct RBasic**)mrb_malloc(mrb, sizeof(struct RBasic*)*MRB_GC_ARENA_SIZE);
378   gc->arena_capa = MRB_GC_ARENA_SIZE;
379 #endif
380 
381   gc->current_white_part = GC_WHITE_A;
382   gc->heaps = NULL;
383   gc->free_heaps = NULL;
384   add_heap(mrb, gc);
385   gc->interval_ratio = DEFAULT_GC_INTERVAL_RATIO;
386   gc->step_ratio = DEFAULT_GC_STEP_RATIO;
387 #ifndef MRB_GC_TURN_OFF_GENERATIONAL
388   gc->generational = TRUE;
389   gc->full = TRUE;
390 #endif
391 
392 #ifdef GC_PROFILE
393   program_invoke_time = gettimeofday_time();
394 #endif
395 }
396 
397 static void obj_free(mrb_state *mrb, struct RBasic *obj, int end);
398 
399 void
free_heap(mrb_state * mrb,mrb_gc * gc)400 free_heap(mrb_state *mrb, mrb_gc *gc)
401 {
402   mrb_heap_page *page = gc->heaps;
403   mrb_heap_page *tmp;
404   RVALUE *p, *e;
405 
406   while (page) {
407     tmp = page;
408     page = page->next;
409     for (p = objects(tmp), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
410       if (p->as.free.tt != MRB_TT_FREE)
411         obj_free(mrb, &p->as.basic, TRUE);
412     }
413     mrb_free(mrb, tmp);
414   }
415 }
416 
417 void
mrb_gc_destroy(mrb_state * mrb,mrb_gc * gc)418 mrb_gc_destroy(mrb_state *mrb, mrb_gc *gc)
419 {
420   free_heap(mrb, gc);
421 #ifndef MRB_GC_FIXED_ARENA
422   mrb_free(mrb, gc->arena);
423 #endif
424 }
425 
426 static void
gc_protect(mrb_state * mrb,mrb_gc * gc,struct RBasic * p)427 gc_protect(mrb_state *mrb, mrb_gc *gc, struct RBasic *p)
428 {
429 #ifdef MRB_GC_FIXED_ARENA
430   if (gc->arena_idx >= MRB_GC_ARENA_SIZE) {
431     /* arena overflow error */
432     gc->arena_idx = MRB_GC_ARENA_SIZE - 4; /* force room in arena */
433     mrb_exc_raise(mrb, mrb_obj_value(mrb->arena_err));
434   }
435 #else
436   if (gc->arena_idx >= gc->arena_capa) {
437     /* extend arena */
438     gc->arena_capa = (int)(gc->arena_capa * 3 / 2);
439     gc->arena = (struct RBasic**)mrb_realloc(mrb, gc->arena, sizeof(struct RBasic*)*gc->arena_capa);
440   }
441 #endif
442   gc->arena[gc->arena_idx++] = p;
443 }
444 
445 /* mrb_gc_protect() leaves the object in the arena */
446 MRB_API void
mrb_gc_protect(mrb_state * mrb,mrb_value obj)447 mrb_gc_protect(mrb_state *mrb, mrb_value obj)
448 {
449   if (mrb_immediate_p(obj)) return;
450   gc_protect(mrb, &mrb->gc, mrb_basic_ptr(obj));
451 }
452 
453 #define GC_ROOT_NAME "_gc_root_"
454 
455 /* mrb_gc_register() keeps the object from GC.
456 
457    Register your object when it's exported to C world,
458    without reference from Ruby world, e.g. callback
459    arguments.  Don't forget to remove the object using
460    mrb_gc_unregister, otherwise your object will leak.
461 */
462 
463 MRB_API void
mrb_gc_register(mrb_state * mrb,mrb_value obj)464 mrb_gc_register(mrb_state *mrb, mrb_value obj)
465 {
466   mrb_sym root = mrb_intern_lit(mrb, GC_ROOT_NAME);
467   mrb_value table = mrb_gv_get(mrb, root);
468 
469   if (mrb_nil_p(table) || mrb_type(table) != MRB_TT_ARRAY) {
470     table = mrb_ary_new(mrb);
471     mrb_gv_set(mrb, root, table);
472   }
473   mrb_ary_push(mrb, table, obj);
474 }
475 
476 /* mrb_gc_unregister() removes the object from GC root. */
477 MRB_API void
mrb_gc_unregister(mrb_state * mrb,mrb_value obj)478 mrb_gc_unregister(mrb_state *mrb, mrb_value obj)
479 {
480   mrb_sym root = mrb_intern_lit(mrb, GC_ROOT_NAME);
481   mrb_value table = mrb_gv_get(mrb, root);
482   struct RArray *a;
483   mrb_int i;
484 
485   if (mrb_nil_p(table)) return;
486   if (mrb_type(table) != MRB_TT_ARRAY) {
487     mrb_gv_set(mrb, root, mrb_nil_value());
488     return;
489   }
490   a = mrb_ary_ptr(table);
491   mrb_ary_modify(mrb, a);
492   for (i = 0; i < ARY_LEN(a); i++) {
493     if (mrb_obj_eq(mrb, ARY_PTR(a)[i], obj)) {
494       mrb_int len = ARY_LEN(a)-1;
495       mrb_value *ptr = ARY_PTR(a);
496 
497       ARY_SET_LEN(a, len);
498       memmove(&ptr[i], &ptr[i + 1], (len - i) * sizeof(mrb_value));
499       break;
500     }
501   }
502 }
503 
504 MRB_API struct RBasic*
mrb_obj_alloc(mrb_state * mrb,enum mrb_vtype ttype,struct RClass * cls)505 mrb_obj_alloc(mrb_state *mrb, enum mrb_vtype ttype, struct RClass *cls)
506 {
507   struct RBasic *p;
508   static const RVALUE RVALUE_zero = { { { MRB_TT_FALSE } } };
509   mrb_gc *gc = &mrb->gc;
510 
511   if (cls) {
512     enum mrb_vtype tt;
513 
514     switch (cls->tt) {
515     case MRB_TT_CLASS:
516     case MRB_TT_SCLASS:
517     case MRB_TT_MODULE:
518     case MRB_TT_ENV:
519       break;
520     default:
521       mrb_raise(mrb, E_TYPE_ERROR, "allocation failure");
522     }
523     tt = MRB_INSTANCE_TT(cls);
524     if (tt != MRB_TT_FALSE &&
525         ttype != MRB_TT_SCLASS &&
526         ttype != MRB_TT_ICLASS &&
527         ttype != MRB_TT_ENV &&
528         ttype != tt) {
529       mrb_raisef(mrb, E_TYPE_ERROR, "allocation failure of %S", mrb_obj_value(cls));
530     }
531   }
532 
533 #ifdef MRB_GC_STRESS
534   mrb_full_gc(mrb);
535 #endif
536   if (gc->threshold < gc->live) {
537     mrb_incremental_gc(mrb);
538   }
539   if (gc->free_heaps == NULL) {
540     add_heap(mrb, gc);
541   }
542 
543   p = gc->free_heaps->freelist;
544   gc->free_heaps->freelist = ((struct free_obj*)p)->next;
545   if (gc->free_heaps->freelist == NULL) {
546     unlink_free_heap_page(gc, gc->free_heaps);
547   }
548 
549   gc->live++;
550   gc_protect(mrb, gc, p);
551   *(RVALUE *)p = RVALUE_zero;
552   p->tt = ttype;
553   p->c = cls;
554   paint_partial_white(gc, p);
555   return p;
556 }
557 
558 static inline void
add_gray_list(mrb_state * mrb,mrb_gc * gc,struct RBasic * obj)559 add_gray_list(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
560 {
561 #ifdef MRB_GC_STRESS
562   if (obj->tt > MRB_TT_MAXDEFINE) {
563     abort();
564   }
565 #endif
566   paint_gray(obj);
567   obj->gcnext = gc->gray_list;
568   gc->gray_list = obj;
569 }
570 
571 static int
ci_nregs(mrb_callinfo * ci)572 ci_nregs(mrb_callinfo *ci)
573 {
574   struct RProc *p = ci->proc;
575   int n = 0;
576 
577   if (!p) {
578     if (ci->argc < 0) return 3;
579     return ci->argc+2;
580   }
581   if (!MRB_PROC_CFUNC_P(p) && p->body.irep) {
582     n = p->body.irep->nregs;
583   }
584   if (ci->argc < 0) {
585     if (n < 3) n = 3; /* self + args + blk */
586   }
587   if (ci->argc > n) {
588     n = ci->argc + 2; /* self + blk */
589   }
590   return n;
591 }
592 
593 static void
mark_context_stack(mrb_state * mrb,struct mrb_context * c)594 mark_context_stack(mrb_state *mrb, struct mrb_context *c)
595 {
596   size_t i;
597   size_t e;
598   mrb_value nil;
599 
600   if (c->stack == NULL) return;
601   e = c->stack - c->stbase;
602   if (c->ci) {
603     e += ci_nregs(c->ci);
604   }
605   if (c->stbase + e > c->stend) e = c->stend - c->stbase;
606   for (i=0; i<e; i++) {
607     mrb_value v = c->stbase[i];
608 
609     if (!mrb_immediate_p(v)) {
610       mrb_gc_mark(mrb, mrb_basic_ptr(v));
611     }
612   }
613   e = c->stend - c->stbase;
614   nil = mrb_nil_value();
615   for (; i<e; i++) {
616     c->stbase[i] = nil;
617   }
618 }
619 
620 static void
mark_context(mrb_state * mrb,struct mrb_context * c)621 mark_context(mrb_state *mrb, struct mrb_context *c)
622 {
623   int i;
624   mrb_callinfo *ci;
625 
626  start:
627   if (c->status == MRB_FIBER_TERMINATED) return;
628 
629   /* mark VM stack */
630   mark_context_stack(mrb, c);
631 
632   /* mark call stack */
633   if (c->cibase) {
634     for (ci = c->cibase; ci <= c->ci; ci++) {
635       mrb_gc_mark(mrb, (struct RBasic*)ci->env);
636       mrb_gc_mark(mrb, (struct RBasic*)ci->proc);
637       mrb_gc_mark(mrb, (struct RBasic*)ci->target_class);
638     }
639   }
640   /* mark ensure stack */
641   for (i=0; i<c->eidx; i++) {
642     mrb_gc_mark(mrb, (struct RBasic*)c->ensure[i]);
643   }
644   /* mark fibers */
645   mrb_gc_mark(mrb, (struct RBasic*)c->fib);
646   if (c->prev) {
647     c = c->prev;
648     goto start;
649   }
650 }
651 
652 static void
gc_mark_children(mrb_state * mrb,mrb_gc * gc,struct RBasic * obj)653 gc_mark_children(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
654 {
655   mrb_assert(is_gray(obj));
656   paint_black(obj);
657   gc->gray_list = obj->gcnext;
658   mrb_gc_mark(mrb, (struct RBasic*)obj->c);
659   switch (obj->tt) {
660   case MRB_TT_ICLASS:
661     {
662       struct RClass *c = (struct RClass*)obj;
663       if (MRB_FLAG_TEST(c, MRB_FL_CLASS_IS_ORIGIN))
664         mrb_gc_mark_mt(mrb, c);
665       mrb_gc_mark(mrb, (struct RBasic*)((struct RClass*)obj)->super);
666     }
667     break;
668 
669   case MRB_TT_CLASS:
670   case MRB_TT_MODULE:
671   case MRB_TT_SCLASS:
672     {
673       struct RClass *c = (struct RClass*)obj;
674 
675       mrb_gc_mark_mt(mrb, c);
676       mrb_gc_mark(mrb, (struct RBasic*)c->super);
677     }
678     /* fall through */
679 
680   case MRB_TT_OBJECT:
681   case MRB_TT_DATA:
682   case MRB_TT_EXCEPTION:
683     mrb_gc_mark_iv(mrb, (struct RObject*)obj);
684     break;
685 
686   case MRB_TT_PROC:
687     {
688       struct RProc *p = (struct RProc*)obj;
689 
690       mrb_gc_mark(mrb, (struct RBasic*)p->upper);
691       mrb_gc_mark(mrb, (struct RBasic*)p->e.env);
692     }
693     break;
694 
695   case MRB_TT_ENV:
696     {
697       struct REnv *e = (struct REnv*)obj;
698       mrb_int i, len;
699 
700       if (MRB_ENV_STACK_SHARED_P(e) && e->cxt && e->cxt->fib) {
701         mrb_gc_mark(mrb, (struct RBasic*)e->cxt->fib);
702       }
703       len = MRB_ENV_STACK_LEN(e);
704       for (i=0; i<len; i++) {
705         mrb_gc_mark_value(mrb, e->stack[i]);
706       }
707     }
708     break;
709 
710   case MRB_TT_FIBER:
711     {
712       struct mrb_context *c = ((struct RFiber*)obj)->cxt;
713 
714       if (c) mark_context(mrb, c);
715     }
716     break;
717 
718   case MRB_TT_ARRAY:
719     {
720       struct RArray *a = (struct RArray*)obj;
721       size_t i, e;
722 
723       for (i=0,e=ARY_LEN(a); i<e; i++) {
724         mrb_gc_mark_value(mrb, ARY_PTR(a)[i]);
725       }
726     }
727     break;
728 
729   case MRB_TT_HASH:
730     mrb_gc_mark_iv(mrb, (struct RObject*)obj);
731     mrb_gc_mark_hash(mrb, (struct RHash*)obj);
732     break;
733 
734   case MRB_TT_STRING:
735     if (RSTR_FSHARED_P(obj) && !RSTR_NOFREE_P(obj)) {
736       struct RString *s = (struct RString*)obj;
737       mrb_gc_mark(mrb, (struct RBasic*)s->as.heap.aux.fshared);
738     }
739     break;
740 
741   case MRB_TT_RANGE:
742     mrb_gc_mark_range(mrb, (struct RRange*)obj);
743     break;
744 
745   default:
746     break;
747   }
748 }
749 
750 MRB_API void
mrb_gc_mark(mrb_state * mrb,struct RBasic * obj)751 mrb_gc_mark(mrb_state *mrb, struct RBasic *obj)
752 {
753   if (obj == 0) return;
754   if (!is_white(obj)) return;
755   mrb_assert((obj)->tt != MRB_TT_FREE);
756   add_gray_list(mrb, &mrb->gc, obj);
757 }
758 
759 static void
obj_free(mrb_state * mrb,struct RBasic * obj,int end)760 obj_free(mrb_state *mrb, struct RBasic *obj, int end)
761 {
762   DEBUG(fprintf(stderr, "obj_free(%p,tt=%d)\n",obj,obj->tt));
763   switch (obj->tt) {
764     /* immediate - no mark */
765   case MRB_TT_TRUE:
766   case MRB_TT_FIXNUM:
767   case MRB_TT_SYMBOL:
768     /* cannot happen */
769     return;
770 
771 #ifndef MRB_WITHOUT_FLOAT
772   case MRB_TT_FLOAT:
773 #ifdef MRB_WORD_BOXING
774     break;
775 #else
776     return;
777 #endif
778 #endif
779 
780   case MRB_TT_OBJECT:
781     mrb_gc_free_iv(mrb, (struct RObject*)obj);
782     break;
783 
784   case MRB_TT_EXCEPTION:
785     mrb_gc_free_iv(mrb, (struct RObject*)obj);
786     break;
787 
788   case MRB_TT_CLASS:
789   case MRB_TT_MODULE:
790   case MRB_TT_SCLASS:
791     mrb_gc_free_mt(mrb, (struct RClass*)obj);
792     mrb_gc_free_iv(mrb, (struct RObject*)obj);
793     break;
794   case MRB_TT_ICLASS:
795     if (MRB_FLAG_TEST(obj, MRB_FL_CLASS_IS_ORIGIN))
796       mrb_gc_free_mt(mrb, (struct RClass*)obj);
797     break;
798   case MRB_TT_ENV:
799     {
800       struct REnv *e = (struct REnv*)obj;
801 
802       if (MRB_ENV_STACK_SHARED_P(e)) {
803         /* cannot be freed */
804         e->stack = NULL;
805         break;
806       }
807       mrb_free(mrb, e->stack);
808       e->stack = NULL;
809     }
810     break;
811 
812   case MRB_TT_FIBER:
813     {
814       struct mrb_context *c = ((struct RFiber*)obj)->cxt;
815 
816       if (c && c != mrb->root_c) {
817         if (!end && c->status != MRB_FIBER_TERMINATED) {
818           mrb_callinfo *ci = c->ci;
819           mrb_callinfo *ce = c->cibase;
820 
821           while (ce <= ci) {
822             struct REnv *e = ci->env;
823             if (e && !mrb_object_dead_p(mrb, (struct RBasic*)e) &&
824                 e->tt == MRB_TT_ENV && MRB_ENV_STACK_SHARED_P(e)) {
825               mrb_env_unshare(mrb, e);
826             }
827             ci--;
828           }
829         }
830         mrb_free_context(mrb, c);
831       }
832     }
833     break;
834 
835   case MRB_TT_ARRAY:
836     if (ARY_SHARED_P(obj))
837       mrb_ary_decref(mrb, ((struct RArray*)obj)->as.heap.aux.shared);
838     else if (!ARY_EMBED_P(obj))
839       mrb_free(mrb, ((struct RArray*)obj)->as.heap.ptr);
840     break;
841 
842   case MRB_TT_HASH:
843     mrb_gc_free_iv(mrb, (struct RObject*)obj);
844     mrb_gc_free_hash(mrb, (struct RHash*)obj);
845     break;
846 
847   case MRB_TT_STRING:
848     mrb_gc_free_str(mrb, (struct RString*)obj);
849     break;
850 
851   case MRB_TT_PROC:
852     {
853       struct RProc *p = (struct RProc*)obj;
854 
855       if (!MRB_PROC_CFUNC_P(p) && p->body.irep) {
856         mrb_irep *irep = p->body.irep;
857         if (end) {
858           mrb_irep_cutref(mrb, irep);
859         }
860         mrb_irep_decref(mrb, irep);
861       }
862     }
863     break;
864 
865   case MRB_TT_RANGE:
866     mrb_gc_free_range(mrb, ((struct RRange*)obj));
867     break;
868 
869   case MRB_TT_DATA:
870     {
871       struct RData *d = (struct RData*)obj;
872       if (d->type && d->type->dfree) {
873         d->type->dfree(mrb, d->data);
874       }
875       mrb_gc_free_iv(mrb, (struct RObject*)obj);
876     }
877     break;
878 
879   default:
880     break;
881   }
882   obj->tt = MRB_TT_FREE;
883 }
884 
885 static void
root_scan_phase(mrb_state * mrb,mrb_gc * gc)886 root_scan_phase(mrb_state *mrb, mrb_gc *gc)
887 {
888   int i, e;
889 
890   if (!is_minor_gc(gc)) {
891     gc->gray_list = NULL;
892     gc->atomic_gray_list = NULL;
893   }
894 
895   mrb_gc_mark_gv(mrb);
896   /* mark arena */
897   for (i=0,e=gc->arena_idx; i<e; i++) {
898     mrb_gc_mark(mrb, gc->arena[i]);
899   }
900   /* mark class hierarchy */
901   mrb_gc_mark(mrb, (struct RBasic*)mrb->object_class);
902 
903   /* mark built-in classes */
904   mrb_gc_mark(mrb, (struct RBasic*)mrb->class_class);
905   mrb_gc_mark(mrb, (struct RBasic*)mrb->module_class);
906   mrb_gc_mark(mrb, (struct RBasic*)mrb->proc_class);
907   mrb_gc_mark(mrb, (struct RBasic*)mrb->string_class);
908   mrb_gc_mark(mrb, (struct RBasic*)mrb->array_class);
909   mrb_gc_mark(mrb, (struct RBasic*)mrb->hash_class);
910   mrb_gc_mark(mrb, (struct RBasic*)mrb->range_class);
911 
912 #ifndef MRB_WITHOUT_FLOAT
913   mrb_gc_mark(mrb, (struct RBasic*)mrb->float_class);
914 #endif
915   mrb_gc_mark(mrb, (struct RBasic*)mrb->fixnum_class);
916   mrb_gc_mark(mrb, (struct RBasic*)mrb->true_class);
917   mrb_gc_mark(mrb, (struct RBasic*)mrb->false_class);
918   mrb_gc_mark(mrb, (struct RBasic*)mrb->nil_class);
919   mrb_gc_mark(mrb, (struct RBasic*)mrb->symbol_class);
920   mrb_gc_mark(mrb, (struct RBasic*)mrb->kernel_module);
921 
922   mrb_gc_mark(mrb, (struct RBasic*)mrb->eException_class);
923   mrb_gc_mark(mrb, (struct RBasic*)mrb->eStandardError_class);
924 
925   /* mark top_self */
926   mrb_gc_mark(mrb, (struct RBasic*)mrb->top_self);
927   /* mark exception */
928   mrb_gc_mark(mrb, (struct RBasic*)mrb->exc);
929   /* mark pre-allocated exception */
930   mrb_gc_mark(mrb, (struct RBasic*)mrb->nomem_err);
931   mrb_gc_mark(mrb, (struct RBasic*)mrb->stack_err);
932 #ifdef MRB_GC_FIXED_ARENA
933   mrb_gc_mark(mrb, (struct RBasic*)mrb->arena_err);
934 #endif
935 
936   mark_context(mrb, mrb->c);
937   if (mrb->root_c != mrb->c) {
938     mark_context(mrb, mrb->root_c);
939   }
940 }
941 
942 static size_t
gc_gray_mark(mrb_state * mrb,mrb_gc * gc,struct RBasic * obj)943 gc_gray_mark(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
944 {
945   size_t children = 0;
946 
947   gc_mark_children(mrb, gc, obj);
948 
949   switch (obj->tt) {
950   case MRB_TT_ICLASS:
951     children++;
952     break;
953 
954   case MRB_TT_CLASS:
955   case MRB_TT_SCLASS:
956   case MRB_TT_MODULE:
957     {
958       struct RClass *c = (struct RClass*)obj;
959 
960       children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
961       children += mrb_gc_mark_mt_size(mrb, c);
962       children++;
963     }
964     break;
965 
966   case MRB_TT_OBJECT:
967   case MRB_TT_DATA:
968   case MRB_TT_EXCEPTION:
969     children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
970     break;
971 
972   case MRB_TT_ENV:
973     children += MRB_ENV_STACK_LEN(obj);
974     break;
975 
976   case MRB_TT_FIBER:
977     {
978       struct mrb_context *c = ((struct RFiber*)obj)->cxt;
979       size_t i;
980       mrb_callinfo *ci;
981 
982       if (!c || c->status == MRB_FIBER_TERMINATED) break;
983 
984       /* mark stack */
985       i = c->stack - c->stbase;
986 
987       if (c->ci) {
988         i += ci_nregs(c->ci);
989       }
990       if (c->stbase + i > c->stend) i = c->stend - c->stbase;
991       children += i;
992 
993       /* mark ensure stack */
994       children += c->eidx;
995 
996       /* mark closure */
997       if (c->cibase) {
998         for (i=0, ci = c->cibase; ci <= c->ci; i++, ci++)
999           ;
1000       }
1001       children += i;
1002     }
1003     break;
1004 
1005   case MRB_TT_ARRAY:
1006     {
1007       struct RArray *a = (struct RArray*)obj;
1008       children += ARY_LEN(a);
1009     }
1010     break;
1011 
1012   case MRB_TT_HASH:
1013     children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
1014     children += mrb_gc_mark_hash_size(mrb, (struct RHash*)obj);
1015     break;
1016 
1017   case MRB_TT_PROC:
1018   case MRB_TT_RANGE:
1019     children+=2;
1020     break;
1021 
1022   default:
1023     break;
1024   }
1025   return children;
1026 }
1027 
1028 
1029 static void
gc_mark_gray_list(mrb_state * mrb,mrb_gc * gc)1030 gc_mark_gray_list(mrb_state *mrb, mrb_gc *gc) {
1031   while (gc->gray_list) {
1032     if (is_gray(gc->gray_list))
1033       gc_mark_children(mrb, gc, gc->gray_list);
1034     else
1035       gc->gray_list = gc->gray_list->gcnext;
1036   }
1037 }
1038 
1039 
1040 static size_t
incremental_marking_phase(mrb_state * mrb,mrb_gc * gc,size_t limit)1041 incremental_marking_phase(mrb_state *mrb, mrb_gc *gc, size_t limit)
1042 {
1043   size_t tried_marks = 0;
1044 
1045   while (gc->gray_list && tried_marks < limit) {
1046     tried_marks += gc_gray_mark(mrb, gc, gc->gray_list);
1047   }
1048 
1049   return tried_marks;
1050 }
1051 
1052 static void
final_marking_phase(mrb_state * mrb,mrb_gc * gc)1053 final_marking_phase(mrb_state *mrb, mrb_gc *gc)
1054 {
1055   int i, e;
1056 
1057   /* mark arena */
1058   for (i=0,e=gc->arena_idx; i<e; i++) {
1059     mrb_gc_mark(mrb, gc->arena[i]);
1060   }
1061   mrb_gc_mark_gv(mrb);
1062   mark_context(mrb, mrb->c);
1063   mark_context(mrb, mrb->root_c);
1064   mrb_gc_mark(mrb, (struct RBasic*)mrb->exc);
1065   gc_mark_gray_list(mrb, gc);
1066   mrb_assert(gc->gray_list == NULL);
1067   gc->gray_list = gc->atomic_gray_list;
1068   gc->atomic_gray_list = NULL;
1069   gc_mark_gray_list(mrb, gc);
1070   mrb_assert(gc->gray_list == NULL);
1071 }
1072 
1073 static void
prepare_incremental_sweep(mrb_state * mrb,mrb_gc * gc)1074 prepare_incremental_sweep(mrb_state *mrb, mrb_gc *gc)
1075 {
1076   gc->state = MRB_GC_STATE_SWEEP;
1077   gc->sweeps = gc->heaps;
1078   gc->live_after_mark = gc->live;
1079 }
1080 
1081 static size_t
incremental_sweep_phase(mrb_state * mrb,mrb_gc * gc,size_t limit)1082 incremental_sweep_phase(mrb_state *mrb, mrb_gc *gc, size_t limit)
1083 {
1084   mrb_heap_page *page = gc->sweeps;
1085   size_t tried_sweep = 0;
1086 
1087   while (page && (tried_sweep < limit)) {
1088     RVALUE *p = objects(page);
1089     RVALUE *e = p + MRB_HEAP_PAGE_SIZE;
1090     size_t freed = 0;
1091     mrb_bool dead_slot = TRUE;
1092     mrb_bool full = (page->freelist == NULL);
1093 
1094     if (is_minor_gc(gc) && page->old) {
1095       /* skip a slot which doesn't contain any young object */
1096       p = e;
1097       dead_slot = FALSE;
1098     }
1099     while (p<e) {
1100       if (is_dead(gc, &p->as.basic)) {
1101         if (p->as.basic.tt != MRB_TT_FREE) {
1102           obj_free(mrb, &p->as.basic, FALSE);
1103           if (p->as.basic.tt == MRB_TT_FREE) {
1104             p->as.free.next = page->freelist;
1105             page->freelist = (struct RBasic*)p;
1106             freed++;
1107           }
1108           else {
1109             dead_slot = FALSE;
1110           }
1111         }
1112       }
1113       else {
1114         if (!is_generational(gc))
1115           paint_partial_white(gc, &p->as.basic); /* next gc target */
1116         dead_slot = FALSE;
1117       }
1118       p++;
1119     }
1120 
1121     /* free dead slot */
1122     if (dead_slot && freed < MRB_HEAP_PAGE_SIZE) {
1123       mrb_heap_page *next = page->next;
1124 
1125       unlink_heap_page(gc, page);
1126       unlink_free_heap_page(gc, page);
1127       mrb_free(mrb, page);
1128       page = next;
1129     }
1130     else {
1131       if (full && freed > 0) {
1132         link_free_heap_page(gc, page);
1133       }
1134       if (page->freelist == NULL && is_minor_gc(gc))
1135         page->old = TRUE;
1136       else
1137         page->old = FALSE;
1138       page = page->next;
1139     }
1140     tried_sweep += MRB_HEAP_PAGE_SIZE;
1141     gc->live -= freed;
1142     gc->live_after_mark -= freed;
1143   }
1144   gc->sweeps = page;
1145   return tried_sweep;
1146 }
1147 
1148 static size_t
incremental_gc(mrb_state * mrb,mrb_gc * gc,size_t limit)1149 incremental_gc(mrb_state *mrb, mrb_gc *gc, size_t limit)
1150 {
1151   switch (gc->state) {
1152   case MRB_GC_STATE_ROOT:
1153     root_scan_phase(mrb, gc);
1154     gc->state = MRB_GC_STATE_MARK;
1155     flip_white_part(gc);
1156     return 0;
1157   case MRB_GC_STATE_MARK:
1158     if (gc->gray_list) {
1159       return incremental_marking_phase(mrb, gc, limit);
1160     }
1161     else {
1162       final_marking_phase(mrb, gc);
1163       prepare_incremental_sweep(mrb, gc);
1164       return 0;
1165     }
1166   case MRB_GC_STATE_SWEEP: {
1167      size_t tried_sweep = 0;
1168      tried_sweep = incremental_sweep_phase(mrb, gc, limit);
1169      if (tried_sweep == 0)
1170        gc->state = MRB_GC_STATE_ROOT;
1171      return tried_sweep;
1172   }
1173   default:
1174     /* unknown state */
1175     mrb_assert(0);
1176     return 0;
1177   }
1178 }
1179 
1180 static void
incremental_gc_until(mrb_state * mrb,mrb_gc * gc,mrb_gc_state to_state)1181 incremental_gc_until(mrb_state *mrb, mrb_gc *gc, mrb_gc_state to_state)
1182 {
1183   do {
1184     incremental_gc(mrb, gc, SIZE_MAX);
1185   } while (gc->state != to_state);
1186 }
1187 
1188 static void
incremental_gc_step(mrb_state * mrb,mrb_gc * gc)1189 incremental_gc_step(mrb_state *mrb, mrb_gc *gc)
1190 {
1191   size_t limit = 0, result = 0;
1192   limit = (GC_STEP_SIZE/100) * gc->step_ratio;
1193   while (result < limit) {
1194     result += incremental_gc(mrb, gc, limit);
1195     if (gc->state == MRB_GC_STATE_ROOT)
1196       break;
1197   }
1198 
1199   gc->threshold = gc->live + GC_STEP_SIZE;
1200 }
1201 
1202 static void
clear_all_old(mrb_state * mrb,mrb_gc * gc)1203 clear_all_old(mrb_state *mrb, mrb_gc *gc)
1204 {
1205   mrb_bool origin_mode = gc->generational;
1206 
1207   mrb_assert(is_generational(gc));
1208   if (is_major_gc(gc)) {
1209     /* finish the half baked GC */
1210     incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1211   }
1212 
1213   /* Sweep the dead objects, then reset all the live objects
1214    * (including all the old objects, of course) to white. */
1215   gc->generational = FALSE;
1216   prepare_incremental_sweep(mrb, gc);
1217   incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1218   gc->generational = origin_mode;
1219 
1220   /* The gray objects have already been painted as white */
1221   gc->atomic_gray_list = gc->gray_list = NULL;
1222 }
1223 
1224 MRB_API void
mrb_incremental_gc(mrb_state * mrb)1225 mrb_incremental_gc(mrb_state *mrb)
1226 {
1227   mrb_gc *gc = &mrb->gc;
1228 
1229   if (gc->disabled || gc->iterating) return;
1230 
1231   GC_INVOKE_TIME_REPORT("mrb_incremental_gc()");
1232   GC_TIME_START;
1233 
1234   if (is_minor_gc(gc)) {
1235     incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1236   }
1237   else {
1238     incremental_gc_step(mrb, gc);
1239   }
1240 
1241   if (gc->state == MRB_GC_STATE_ROOT) {
1242     mrb_assert(gc->live >= gc->live_after_mark);
1243     gc->threshold = (gc->live_after_mark/100) * gc->interval_ratio;
1244     if (gc->threshold < GC_STEP_SIZE) {
1245       gc->threshold = GC_STEP_SIZE;
1246     }
1247 
1248     if (is_major_gc(gc)) {
1249       size_t threshold = gc->live_after_mark/100 * MAJOR_GC_INC_RATIO;
1250 
1251       gc->full = FALSE;
1252       if (threshold < MAJOR_GC_TOOMANY) {
1253         gc->majorgc_old_threshold = threshold;
1254       }
1255       else {
1256         /* too many objects allocated during incremental GC, */
1257         /* instead of increasing threshold, invoke full GC. */
1258         mrb_full_gc(mrb);
1259       }
1260     }
1261     else if (is_minor_gc(gc)) {
1262       if (gc->live > gc->majorgc_old_threshold) {
1263         clear_all_old(mrb, gc);
1264         gc->full = TRUE;
1265       }
1266     }
1267   }
1268 
1269   GC_TIME_STOP_AND_REPORT;
1270 }
1271 
1272 /* Perform a full gc cycle */
1273 MRB_API void
mrb_full_gc(mrb_state * mrb)1274 mrb_full_gc(mrb_state *mrb)
1275 {
1276   mrb_gc *gc = &mrb->gc;
1277 
1278   if (gc->disabled || gc->iterating) return;
1279 
1280   GC_INVOKE_TIME_REPORT("mrb_full_gc()");
1281   GC_TIME_START;
1282 
1283   if (is_generational(gc)) {
1284     /* clear all the old objects back to young */
1285     clear_all_old(mrb, gc);
1286     gc->full = TRUE;
1287   }
1288   else if (gc->state != MRB_GC_STATE_ROOT) {
1289     /* finish half baked GC cycle */
1290     incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1291   }
1292 
1293   incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1294   gc->threshold = (gc->live_after_mark/100) * gc->interval_ratio;
1295 
1296   if (is_generational(gc)) {
1297     gc->majorgc_old_threshold = gc->live_after_mark/100 * MAJOR_GC_INC_RATIO;
1298     gc->full = FALSE;
1299   }
1300 
1301   GC_TIME_STOP_AND_REPORT;
1302 }
1303 
1304 MRB_API void
mrb_garbage_collect(mrb_state * mrb)1305 mrb_garbage_collect(mrb_state *mrb)
1306 {
1307   mrb_full_gc(mrb);
1308 }
1309 
1310 /*
1311  * Field write barrier
1312  *   Paint obj(Black) -> value(White) to obj(Black) -> value(Gray).
1313  */
1314 
1315 MRB_API void
mrb_field_write_barrier(mrb_state * mrb,struct RBasic * obj,struct RBasic * value)1316 mrb_field_write_barrier(mrb_state *mrb, struct RBasic *obj, struct RBasic *value)
1317 {
1318   mrb_gc *gc = &mrb->gc;
1319 
1320   if (!is_black(obj)) return;
1321   if (!is_white(value)) return;
1322 
1323   mrb_assert(gc->state == MRB_GC_STATE_MARK || (!is_dead(gc, value) && !is_dead(gc, obj)));
1324   mrb_assert(is_generational(gc) || gc->state != MRB_GC_STATE_ROOT);
1325 
1326   if (is_generational(gc) || gc->state == MRB_GC_STATE_MARK) {
1327     add_gray_list(mrb, gc, value);
1328   }
1329   else {
1330     mrb_assert(gc->state == MRB_GC_STATE_SWEEP);
1331     paint_partial_white(gc, obj); /* for never write barriers */
1332   }
1333 }
1334 
1335 /*
1336  * Write barrier
1337  *   Paint obj(Black) to obj(Gray).
1338  *
1339  *   The object that is painted gray will be traversed atomically in final
1340  *   mark phase. So you use this write barrier if it's frequency written spot.
1341  *   e.g. Set element on Array.
1342  */
1343 
1344 MRB_API void
mrb_write_barrier(mrb_state * mrb,struct RBasic * obj)1345 mrb_write_barrier(mrb_state *mrb, struct RBasic *obj)
1346 {
1347   mrb_gc *gc = &mrb->gc;
1348 
1349   if (!is_black(obj)) return;
1350 
1351   mrb_assert(!is_dead(gc, obj));
1352   mrb_assert(is_generational(gc) || gc->state != MRB_GC_STATE_ROOT);
1353   paint_gray(obj);
1354   obj->gcnext = gc->atomic_gray_list;
1355   gc->atomic_gray_list = obj;
1356 }
1357 
1358 /*
1359  *  call-seq:
1360  *     GC.start                     -> nil
1361  *
1362  *  Initiates full garbage collection.
1363  *
1364  */
1365 
1366 static mrb_value
gc_start(mrb_state * mrb,mrb_value obj)1367 gc_start(mrb_state *mrb, mrb_value obj)
1368 {
1369   mrb_full_gc(mrb);
1370   return mrb_nil_value();
1371 }
1372 
1373 /*
1374  *  call-seq:
1375  *     GC.enable    -> true or false
1376  *
1377  *  Enables garbage collection, returning <code>true</code> if garbage
1378  *  collection was previously disabled.
1379  *
1380  *     GC.disable   #=> false
1381  *     GC.enable    #=> true
1382  *     GC.enable    #=> false
1383  *
1384  */
1385 
1386 static mrb_value
gc_enable(mrb_state * mrb,mrb_value obj)1387 gc_enable(mrb_state *mrb, mrb_value obj)
1388 {
1389   mrb_bool old = mrb->gc.disabled;
1390 
1391   mrb->gc.disabled = FALSE;
1392 
1393   return mrb_bool_value(old);
1394 }
1395 
1396 /*
1397  *  call-seq:
1398  *     GC.disable    -> true or false
1399  *
1400  *  Disables garbage collection, returning <code>true</code> if garbage
1401  *  collection was already disabled.
1402  *
1403  *     GC.disable   #=> false
1404  *     GC.disable   #=> true
1405  *
1406  */
1407 
1408 static mrb_value
gc_disable(mrb_state * mrb,mrb_value obj)1409 gc_disable(mrb_state *mrb, mrb_value obj)
1410 {
1411   mrb_bool old = mrb->gc.disabled;
1412 
1413   mrb->gc.disabled = TRUE;
1414 
1415   return mrb_bool_value(old);
1416 }
1417 
1418 /*
1419  *  call-seq:
1420  *     GC.interval_ratio      -> fixnum
1421  *
1422  *  Returns ratio of GC interval. Default value is 200(%).
1423  *
1424  */
1425 
1426 static mrb_value
gc_interval_ratio_get(mrb_state * mrb,mrb_value obj)1427 gc_interval_ratio_get(mrb_state *mrb, mrb_value obj)
1428 {
1429   return mrb_fixnum_value(mrb->gc.interval_ratio);
1430 }
1431 
1432 /*
1433  *  call-seq:
1434  *     GC.interval_ratio = fixnum    -> nil
1435  *
1436  *  Updates ratio of GC interval. Default value is 200(%).
1437  *  GC start as soon as after end all step of GC if you set 100(%).
1438  *
1439  */
1440 
1441 static mrb_value
gc_interval_ratio_set(mrb_state * mrb,mrb_value obj)1442 gc_interval_ratio_set(mrb_state *mrb, mrb_value obj)
1443 {
1444   mrb_int ratio;
1445 
1446   mrb_get_args(mrb, "i", &ratio);
1447   mrb->gc.interval_ratio = (int)ratio;
1448   return mrb_nil_value();
1449 }
1450 
1451 /*
1452  *  call-seq:
1453  *     GC.step_ratio    -> fixnum
1454  *
1455  *  Returns step span ratio of Incremental GC. Default value is 200(%).
1456  *
1457  */
1458 
1459 static mrb_value
gc_step_ratio_get(mrb_state * mrb,mrb_value obj)1460 gc_step_ratio_get(mrb_state *mrb, mrb_value obj)
1461 {
1462   return mrb_fixnum_value(mrb->gc.step_ratio);
1463 }
1464 
1465 /*
1466  *  call-seq:
1467  *     GC.step_ratio = fixnum   -> nil
1468  *
1469  *  Updates step span ratio of Incremental GC. Default value is 200(%).
1470  *  1 step of incrementalGC becomes long if a rate is big.
1471  *
1472  */
1473 
1474 static mrb_value
gc_step_ratio_set(mrb_state * mrb,mrb_value obj)1475 gc_step_ratio_set(mrb_state *mrb, mrb_value obj)
1476 {
1477   mrb_int ratio;
1478 
1479   mrb_get_args(mrb, "i", &ratio);
1480   mrb->gc.step_ratio = (int)ratio;
1481   return mrb_nil_value();
1482 }
1483 
1484 static void
change_gen_gc_mode(mrb_state * mrb,mrb_gc * gc,mrb_bool enable)1485 change_gen_gc_mode(mrb_state *mrb, mrb_gc *gc, mrb_bool enable)
1486 {
1487   if (gc->disabled || gc->iterating) {
1488     mrb_raise(mrb, E_RUNTIME_ERROR, "generational mode changed when GC disabled");
1489     return;
1490   }
1491   if (is_generational(gc) && !enable) {
1492     clear_all_old(mrb, gc);
1493     mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1494     gc->full = FALSE;
1495   }
1496   else if (!is_generational(gc) && enable) {
1497     incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1498     gc->majorgc_old_threshold = gc->live_after_mark/100 * MAJOR_GC_INC_RATIO;
1499     gc->full = FALSE;
1500   }
1501   gc->generational = enable;
1502 }
1503 
1504 /*
1505  *  call-seq:
1506  *     GC.generational_mode -> true or false
1507  *
1508  *  Returns generational or normal gc mode.
1509  *
1510  */
1511 
1512 static mrb_value
gc_generational_mode_get(mrb_state * mrb,mrb_value self)1513 gc_generational_mode_get(mrb_state *mrb, mrb_value self)
1514 {
1515   return mrb_bool_value(mrb->gc.generational);
1516 }
1517 
1518 /*
1519  *  call-seq:
1520  *     GC.generational_mode = true or false -> true or false
1521  *
1522  *  Changes to generational or normal gc mode.
1523  *
1524  */
1525 
1526 static mrb_value
gc_generational_mode_set(mrb_state * mrb,mrb_value self)1527 gc_generational_mode_set(mrb_state *mrb, mrb_value self)
1528 {
1529   mrb_bool enable;
1530 
1531   mrb_get_args(mrb, "b", &enable);
1532   if (mrb->gc.generational != enable)
1533     change_gen_gc_mode(mrb, &mrb->gc, enable);
1534 
1535   return mrb_bool_value(enable);
1536 }
1537 
1538 
1539 static void
gc_each_objects(mrb_state * mrb,mrb_gc * gc,mrb_each_object_callback * callback,void * data)1540 gc_each_objects(mrb_state *mrb, mrb_gc *gc, mrb_each_object_callback *callback, void *data)
1541 {
1542   mrb_heap_page* page;
1543 
1544   page = gc->heaps;
1545   while (page != NULL) {
1546     RVALUE *p;
1547     int i;
1548 
1549     p = objects(page);
1550     for (i=0; i < MRB_HEAP_PAGE_SIZE; i++) {
1551       if ((*callback)(mrb, &p[i].as.basic, data) == MRB_EACH_OBJ_BREAK)
1552         return;
1553     }
1554     page = page->next;
1555   }
1556 }
1557 
1558 void
mrb_objspace_each_objects(mrb_state * mrb,mrb_each_object_callback * callback,void * data)1559 mrb_objspace_each_objects(mrb_state *mrb, mrb_each_object_callback *callback, void *data)
1560 {
1561   mrb_bool iterating = mrb->gc.iterating;
1562 
1563   mrb_full_gc(mrb);
1564   mrb->gc.iterating = TRUE;
1565   if (iterating) {
1566     gc_each_objects(mrb, &mrb->gc, callback, data);
1567   }
1568   else {
1569     struct mrb_jmpbuf *prev_jmp = mrb->jmp;
1570     struct mrb_jmpbuf c_jmp;
1571 
1572     MRB_TRY(&c_jmp) {
1573       mrb->jmp = &c_jmp;
1574       gc_each_objects(mrb, &mrb->gc, callback, data);
1575       mrb->jmp = prev_jmp;
1576       mrb->gc.iterating = iterating;
1577    } MRB_CATCH(&c_jmp) {
1578       mrb->gc.iterating = iterating;
1579       mrb->jmp = prev_jmp;
1580       MRB_THROW(prev_jmp);
1581     } MRB_END_EXC(&c_jmp);
1582   }
1583 }
1584 
1585 #ifdef GC_TEST
1586 #ifdef GC_DEBUG
1587 static mrb_value gc_test(mrb_state *, mrb_value);
1588 #endif
1589 #endif
1590 
1591 void
mrb_init_gc(mrb_state * mrb)1592 mrb_init_gc(mrb_state *mrb)
1593 {
1594   struct RClass *gc;
1595 
1596   gc = mrb_define_module(mrb, "GC");
1597 
1598   mrb_define_class_method(mrb, gc, "start", gc_start, MRB_ARGS_NONE());
1599   mrb_define_class_method(mrb, gc, "enable", gc_enable, MRB_ARGS_NONE());
1600   mrb_define_class_method(mrb, gc, "disable", gc_disable, MRB_ARGS_NONE());
1601   mrb_define_class_method(mrb, gc, "interval_ratio", gc_interval_ratio_get, MRB_ARGS_NONE());
1602   mrb_define_class_method(mrb, gc, "interval_ratio=", gc_interval_ratio_set, MRB_ARGS_REQ(1));
1603   mrb_define_class_method(mrb, gc, "step_ratio", gc_step_ratio_get, MRB_ARGS_NONE());
1604   mrb_define_class_method(mrb, gc, "step_ratio=", gc_step_ratio_set, MRB_ARGS_REQ(1));
1605   mrb_define_class_method(mrb, gc, "generational_mode=", gc_generational_mode_set, MRB_ARGS_REQ(1));
1606   mrb_define_class_method(mrb, gc, "generational_mode", gc_generational_mode_get, MRB_ARGS_NONE());
1607 #ifdef GC_TEST
1608 #ifdef GC_DEBUG
1609   mrb_define_class_method(mrb, gc, "test", gc_test, MRB_ARGS_NONE());
1610 #endif
1611 #endif
1612 }
1613 
1614 #ifdef GC_TEST
1615 #ifdef GC_DEBUG
1616 void
test_mrb_field_write_barrier(void)1617 test_mrb_field_write_barrier(void)
1618 {
1619   mrb_state *mrb = mrb_open();
1620   struct RBasic *obj, *value;
1621   mrb_gc *gc = &mrb->gc;
1622 
1623   puts("test_mrb_field_write_barrier");
1624   gc->generational = FALSE;
1625   obj = mrb_basic_ptr(mrb_ary_new(mrb));
1626   value = mrb_basic_ptr(mrb_str_new_lit(mrb, "value"));
1627   paint_black(obj);
1628   paint_partial_white(gc, value);
1629 
1630 
1631   puts("  in MRB_GC_STATE_MARK");
1632   gc->state = MRB_GC_STATE_MARK;
1633   mrb_field_write_barrier(mrb, obj, value);
1634 
1635   mrb_assert(is_gray(value));
1636 
1637 
1638   puts("  in MRB_GC_STATE_SWEEP");
1639   paint_partial_white(gc, value);
1640   gc->state = MRB_GC_STATE_SWEEP;
1641   mrb_field_write_barrier(mrb, obj, value);
1642 
1643   mrb_assert(obj->color & gc->current_white_part);
1644   mrb_assert(value->color & gc->current_white_part);
1645 
1646 
1647   puts("  fail with black");
1648   gc->state = MRB_GC_STATE_MARK;
1649   paint_white(obj);
1650   paint_partial_white(gc, value);
1651   mrb_field_write_barrier(mrb, obj, value);
1652 
1653   mrb_assert(obj->color & gc->current_white_part);
1654 
1655 
1656   puts("  fail with gray");
1657   gc->state = MRB_GC_STATE_MARK;
1658   paint_black(obj);
1659   paint_gray(value);
1660   mrb_field_write_barrier(mrb, obj, value);
1661 
1662   mrb_assert(is_gray(value));
1663 
1664 
1665   {
1666     puts("test_mrb_field_write_barrier_value");
1667     obj = mrb_basic_ptr(mrb_ary_new(mrb));
1668     mrb_value value = mrb_str_new_lit(mrb, "value");
1669     paint_black(obj);
1670     paint_partial_white(gc, mrb_basic_ptr(value));
1671 
1672     gc->state = MRB_GC_STATE_MARK;
1673     mrb_field_write_barrier_value(mrb, obj, value);
1674 
1675     mrb_assert(is_gray(mrb_basic_ptr(value)));
1676   }
1677 
1678   mrb_close(mrb);
1679 }
1680 
1681 void
test_mrb_write_barrier(void)1682 test_mrb_write_barrier(void)
1683 {
1684   mrb_state *mrb = mrb_open();
1685   struct RBasic *obj;
1686   mrb_gc *gc = &mrb->gc;
1687 
1688   puts("test_mrb_write_barrier");
1689   obj = mrb_basic_ptr(mrb_ary_new(mrb));
1690   paint_black(obj);
1691 
1692   puts("  in MRB_GC_STATE_MARK");
1693   gc->state = MRB_GC_STATE_MARK;
1694   mrb_write_barrier(mrb, obj);
1695 
1696   mrb_assert(is_gray(obj));
1697   mrb_assert(gc->atomic_gray_list == obj);
1698 
1699 
1700   puts("  fail with gray");
1701   paint_gray(obj);
1702   mrb_write_barrier(mrb, obj);
1703 
1704   mrb_assert(is_gray(obj));
1705 
1706   mrb_close(mrb);
1707 }
1708 
1709 void
test_add_gray_list(void)1710 test_add_gray_list(void)
1711 {
1712   mrb_state *mrb = mrb_open();
1713   struct RBasic *obj1, *obj2;
1714   mrb_gc *gc = &mrb->gc;
1715 
1716   puts("test_add_gray_list");
1717   change_gen_gc_mode(mrb, gc, FALSE);
1718   mrb_assert(gc->gray_list == NULL);
1719   obj1 = mrb_basic_ptr(mrb_str_new_lit(mrb, "test"));
1720   add_gray_list(mrb, gc, obj1);
1721   mrb_assert(gc->gray_list == obj1);
1722   mrb_assert(is_gray(obj1));
1723 
1724   obj2 = mrb_basic_ptr(mrb_str_new_lit(mrb, "test"));
1725   add_gray_list(mrb, gc, obj2);
1726   mrb_assert(gc->gray_list == obj2);
1727   mrb_assert(gc->gray_list->gcnext == obj1);
1728   mrb_assert(is_gray(obj2));
1729 
1730   mrb_close(mrb);
1731 }
1732 
1733 void
test_gc_gray_mark(void)1734 test_gc_gray_mark(void)
1735 {
1736   mrb_state *mrb = mrb_open();
1737   mrb_value obj_v, value_v;
1738   struct RBasic *obj;
1739   size_t gray_num = 0;
1740   mrb_gc *gc = &mrb->gc;
1741 
1742   puts("test_gc_gray_mark");
1743 
1744   puts("  in MRB_TT_CLASS");
1745   obj = (struct RBasic*)mrb->object_class;
1746   paint_gray(obj);
1747   gray_num = gc_gray_mark(mrb, gc, obj);
1748   mrb_assert(is_black(obj));
1749   mrb_assert(gray_num > 1);
1750 
1751   puts("  in MRB_TT_ARRAY");
1752   obj_v = mrb_ary_new(mrb);
1753   value_v = mrb_str_new_lit(mrb, "test");
1754   paint_gray(mrb_basic_ptr(obj_v));
1755   paint_partial_white(gc, mrb_basic_ptr(value_v));
1756   mrb_ary_push(mrb, obj_v, value_v);
1757   gray_num = gc_gray_mark(mrb, gc, mrb_basic_ptr(obj_v));
1758   mrb_assert(is_black(mrb_basic_ptr(obj_v)));
1759   mrb_assert(is_gray(mrb_basic_ptr(value_v)));
1760   mrb_assert(gray_num == 1);
1761 
1762   mrb_close(mrb);
1763 }
1764 
1765 void
test_incremental_gc(void)1766 test_incremental_gc(void)
1767 {
1768   mrb_state *mrb = mrb_open();
1769   size_t max = ~0, live = 0, total = 0, freed = 0;
1770   RVALUE *free;
1771   mrb_heap_page *page;
1772   mrb_gc *gc = &mrb->gc;
1773 
1774   puts("test_incremental_gc");
1775   change_gen_gc_mode(mrb, gc, FALSE);
1776 
1777   puts("  in mrb_full_gc");
1778   mrb_full_gc(mrb);
1779 
1780   mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1781   puts("  in MRB_GC_STATE_ROOT");
1782   incremental_gc(mrb, gc, max);
1783   mrb_assert(gc->state == MRB_GC_STATE_MARK);
1784   puts("  in MRB_GC_STATE_MARK");
1785   incremental_gc_until(mrb, gc, MRB_GC_STATE_SWEEP);
1786   mrb_assert(gc->state == MRB_GC_STATE_SWEEP);
1787 
1788   puts("  in MRB_GC_STATE_SWEEP");
1789   page = gc->heaps;
1790   while (page) {
1791     RVALUE *p = objects(page);
1792     RVALUE *e = p + MRB_HEAP_PAGE_SIZE;
1793     while (p<e) {
1794       if (is_black(&p->as.basic)) {
1795         live++;
1796       }
1797       if (is_gray(&p->as.basic) && !is_dead(gc, &p->as.basic)) {
1798         printf("%p\n", &p->as.basic);
1799       }
1800       p++;
1801     }
1802     page = page->next;
1803     total += MRB_HEAP_PAGE_SIZE;
1804   }
1805 
1806   mrb_assert(gc->gray_list == NULL);
1807 
1808   incremental_gc(mrb, gc, max);
1809   mrb_assert(gc->state == MRB_GC_STATE_SWEEP);
1810 
1811   incremental_gc(mrb, gc, max);
1812   mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1813 
1814   free = (RVALUE*)gc->heaps->freelist;
1815   while (free) {
1816    freed++;
1817    free = (RVALUE*)free->as.free.next;
1818   }
1819 
1820   mrb_assert(gc->live == live);
1821   mrb_assert(gc->live == total-freed);
1822 
1823   puts("test_incremental_gc(gen)");
1824   incremental_gc_until(mrb, gc, MRB_GC_STATE_SWEEP);
1825   change_gen_gc_mode(mrb, gc, TRUE);
1826 
1827   mrb_assert(gc->full == FALSE);
1828   mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1829 
1830   puts("  in minor");
1831   mrb_assert(is_minor_gc(gc));
1832   mrb_assert(gc->majorgc_old_threshold > 0);
1833   gc->majorgc_old_threshold = 0;
1834   mrb_incremental_gc(mrb);
1835   mrb_assert(gc->full == TRUE);
1836   mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1837 
1838   puts("  in major");
1839   mrb_assert(is_major_gc(gc));
1840   do {
1841     mrb_incremental_gc(mrb);
1842   } while (gc->state != MRB_GC_STATE_ROOT);
1843   mrb_assert(gc->full == FALSE);
1844 
1845   mrb_close(mrb);
1846 }
1847 
1848 void
test_incremental_sweep_phase(void)1849 test_incremental_sweep_phase(void)
1850 {
1851   mrb_state *mrb = mrb_open();
1852   mrb_gc *gc = &mrb->gc;
1853 
1854   puts("test_incremental_sweep_phase");
1855 
1856   add_heap(mrb, gc);
1857   gc->sweeps = gc->heaps;
1858 
1859   mrb_assert(gc->heaps->next->next == NULL);
1860   mrb_assert(gc->free_heaps->next->next == NULL);
1861   incremental_sweep_phase(mrb, gc, MRB_HEAP_PAGE_SIZE * 3);
1862 
1863   mrb_assert(gc->heaps->next == NULL);
1864   mrb_assert(gc->heaps == gc->free_heaps);
1865 
1866   mrb_close(mrb);
1867 }
1868 
1869 static mrb_value
gc_test(mrb_state * mrb,mrb_value self)1870 gc_test(mrb_state *mrb, mrb_value self)
1871 {
1872   test_mrb_field_write_barrier();
1873   test_mrb_write_barrier();
1874   test_add_gray_list();
1875   test_gc_gray_mark();
1876   test_incremental_gc();
1877   test_incremental_sweep_phase();
1878   return mrb_nil_value();
1879 }
1880 #endif  /* GC_DEBUG */
1881 #endif  /* GC_TEST */
1882