1 /*
2 ** gc.c - garbage collector for mruby
3 **
4 ** See Copyright Notice in mruby.h
5 */
6 
7 #include <string.h>
8 #include <stdlib.h>
9 #include <mruby.h>
10 #include <mruby/array.h>
11 #include <mruby/class.h>
12 #include <mruby/data.h>
13 #include <mruby/istruct.h>
14 #include <mruby/hash.h>
15 #include <mruby/proc.h>
16 #include <mruby/range.h>
17 #include <mruby/string.h>
18 #include <mruby/variable.h>
19 #include <mruby/gc.h>
20 #include <mruby/error.h>
21 #include <mruby/throw.h>
22 
23 /*
24   = Tri-color Incremental Garbage Collection
25 
26   mruby's GC is Tri-color Incremental GC with Mark & Sweep.
27   Algorithm details are omitted.
28   Instead, the implementation part is described below.
29 
30   == Object's Color
31 
32   Each object can be painted in three colors:
33 
34     * White - Unmarked.
35     * Gray - Marked, But the child objects are unmarked.
36     * Black - Marked, the child objects are also marked.
37 
38   == Two White Types
39 
40   There're two white color types in a flip-flop fashion: White-A and White-B,
41   which respectively represent the Current White color (the newly allocated
42   objects in the current GC cycle) and the Sweep Target White color (the
43   dead objects to be swept).
44 
45   A and B will be switched just at the beginning of the next GC cycle. At
46   that time, all the dead objects have been swept, while the newly created
47   objects in the current GC cycle which finally remains White are now
48   regarded as dead objects. Instead of traversing all the White-A objects and
49   painting them as White-B, just switch the meaning of White-A and White-B as
50   this will be much cheaper.
51 
52   As a result, the objects we sweep in the current GC cycle are always
53   left from the previous GC cycle. This allows us to sweep objects
54   incrementally, without the disturbance of the newly created objects.
55 
56   == Execution Timing
57 
58   GC Execution Time and Each step interval are decided by live objects count.
59   List of Adjustment API:
60 
61     * gc_interval_ratio_set
62     * gc_step_ratio_set
63 
64   For details, see the comments for each function.
65 
66   == Write Barrier
67 
68   mruby implementer and C extension library writer must insert a write
69   barrier when updating a reference from a field of an object.
70   When updating a reference from a field of object A to object B,
71   two different types of write barrier are available:
72 
73     * mrb_field_write_barrier - target B object for a mark.
74     * mrb_write_barrier       - target A object for a mark.
75 
76   == Generational Mode
77 
78   mruby's GC offers an Generational Mode while re-using the tri-color GC
79   infrastructure. It will treat the Black objects as Old objects after each
80   sweep phase, instead of painting them White. The key ideas are still the same
81   as traditional generational GC:
82 
83     * Minor GC - just traverse the Young objects (Gray objects) in the mark
84                  phase, then only sweep the newly created objects, and leave
85                  the Old objects live.
86 
87     * Major GC - same as a full regular GC cycle.
88 
89   The difference from "traditional" generational GC is, that the major GC
90   in mruby is triggered incrementally in a tri-color manner.
91 
92 
93   For details, see the comments for each function.
94 
95 */
96 
97 struct free_obj {
98   MRB_OBJECT_HEADER;
99   struct RBasic *next;
100 };
101 
102 typedef struct {
103   union {
104     struct free_obj free;
105     struct RBasic basic;
106     struct RObject object;
107     struct RClass klass;
108     struct RString string;
109     struct RArray array;
110     struct RHash hash;
111     struct RRange range;
112     struct RData data;
113     struct RIStruct istruct;
114     struct RProc proc;
115     struct REnv env;
116     struct RFiber fiber;
117     struct RException exc;
118     struct RBreak brk;
119 #ifdef MRB_WORD_BOXING
120 #ifndef MRB_WITHOUT_FLOAT
121     struct RFloat floatv;
122 #endif
123     struct RCptr cptr;
124 #endif
125   } as;
126 } RVALUE;
127 
128 #ifdef GC_PROFILE
129 #include <stdio.h>
130 #include <sys/time.h>
131 
132 static double program_invoke_time = 0;
133 static double gc_time = 0;
134 static double gc_total_time = 0;
135 
136 static double
gettimeofday_time(void)137 gettimeofday_time(void)
138 {
139   struct timeval tv;
140   gettimeofday(&tv, NULL);
141   return tv.tv_sec + tv.tv_usec * 1e-6;
142 }
143 
144 #define GC_INVOKE_TIME_REPORT(with) do {\
145   fprintf(stderr, "%s\n", with);\
146   fprintf(stderr, "gc_invoke: %19.3f\n", gettimeofday_time() - program_invoke_time);\
147   fprintf(stderr, "is_generational: %d\n", is_generational(gc));\
148   fprintf(stderr, "is_major_gc: %d\n", is_major_gc(gc));\
149 } while(0)
150 
151 #define GC_TIME_START do {\
152   gc_time = gettimeofday_time();\
153 } while(0)
154 
155 #define GC_TIME_STOP_AND_REPORT do {\
156   gc_time = gettimeofday_time() - gc_time;\
157   gc_total_time += gc_time;\
158   fprintf(stderr, "gc_state: %d\n", gc->state);\
159   fprintf(stderr, "live: %zu\n", gc->live);\
160   fprintf(stderr, "majorgc_old_threshold: %zu\n", gc->majorgc_old_threshold);\
161   fprintf(stderr, "gc_threshold: %zu\n", gc->threshold);\
162   fprintf(stderr, "gc_time: %30.20f\n", gc_time);\
163   fprintf(stderr, "gc_total_time: %30.20f\n\n", gc_total_time);\
164 } while(0)
165 #else
166 #define GC_INVOKE_TIME_REPORT(s)
167 #define GC_TIME_START
168 #define GC_TIME_STOP_AND_REPORT
169 #endif
170 
171 #ifdef GC_DEBUG
172 #define DEBUG(x) (x)
173 #else
174 #define DEBUG(x)
175 #endif
176 
177 #ifndef MRB_HEAP_PAGE_SIZE
178 #define MRB_HEAP_PAGE_SIZE 1024
179 #endif
180 
181 #define GC_STEP_SIZE 1024
182 
183 /* white: 001 or 010, black: 100, gray: 000 */
184 #define GC_GRAY 0
185 #define GC_WHITE_A 1
186 #define GC_WHITE_B (1 << 1)
187 #define GC_BLACK (1 << 2)
188 #define GC_WHITES (GC_WHITE_A | GC_WHITE_B)
189 #define GC_COLOR_MASK 7
190 
191 #define paint_gray(o) ((o)->color = GC_GRAY)
192 #define paint_black(o) ((o)->color = GC_BLACK)
193 #define paint_white(o) ((o)->color = GC_WHITES)
194 #define paint_partial_white(s, o) ((o)->color = (s)->current_white_part)
195 #define is_gray(o) ((o)->color == GC_GRAY)
196 #define is_white(o) ((o)->color & GC_WHITES)
197 #define is_black(o) ((o)->color & GC_BLACK)
198 #define flip_white_part(s) ((s)->current_white_part = other_white_part(s))
199 #define other_white_part(s) ((s)->current_white_part ^ GC_WHITES)
200 #define is_dead(s, o) (((o)->color & other_white_part(s) & GC_WHITES) || (o)->tt == MRB_TT_FREE)
201 
202 #define objects(p) ((RVALUE *)p->objects)
203 
204 mrb_noreturn void mrb_raise_nomemory(mrb_state *mrb);
205 
206 MRB_API void*
mrb_realloc_simple(mrb_state * mrb,void * p,size_t len)207 mrb_realloc_simple(mrb_state *mrb, void *p,  size_t len)
208 {
209   void *p2;
210 
211   p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud);
212   if (!p2 && len > 0 && mrb->gc.heaps) {
213     mrb_full_gc(mrb);
214     p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud);
215   }
216 
217   return p2;
218 }
219 
220 MRB_API void*
mrb_realloc(mrb_state * mrb,void * p,size_t len)221 mrb_realloc(mrb_state *mrb, void *p, size_t len)
222 {
223   void *p2;
224 
225   p2 = mrb_realloc_simple(mrb, p, len);
226   if (len == 0) return p2;
227   if (p2 == NULL) {
228     if (mrb->gc.out_of_memory) {
229       mrb_raise_nomemory(mrb);
230       /* mrb_panic(mrb); */
231     }
232     else {
233       mrb->gc.out_of_memory = TRUE;
234       mrb_raise_nomemory(mrb);
235     }
236   }
237   else {
238     mrb->gc.out_of_memory = FALSE;
239   }
240 
241   return p2;
242 }
243 
244 MRB_API void*
mrb_malloc(mrb_state * mrb,size_t len)245 mrb_malloc(mrb_state *mrb, size_t len)
246 {
247   return mrb_realloc(mrb, 0, len);
248 }
249 
250 MRB_API void*
mrb_malloc_simple(mrb_state * mrb,size_t len)251 mrb_malloc_simple(mrb_state *mrb, size_t len)
252 {
253   return mrb_realloc_simple(mrb, 0, len);
254 }
255 
256 MRB_API void*
mrb_calloc(mrb_state * mrb,size_t nelem,size_t len)257 mrb_calloc(mrb_state *mrb, size_t nelem, size_t len)
258 {
259   void *p;
260 
261   if (nelem > 0 && len > 0 &&
262       nelem <= SIZE_MAX / len) {
263     size_t size;
264     size = nelem * len;
265     p = mrb_malloc(mrb, size);
266 
267     memset(p, 0, size);
268   }
269   else {
270     p = NULL;
271   }
272 
273   return p;
274 }
275 
276 MRB_API void
mrb_free(mrb_state * mrb,void * p)277 mrb_free(mrb_state *mrb, void *p)
278 {
279   (mrb->allocf)(mrb, p, 0, mrb->allocf_ud);
280 }
281 
282 MRB_API void*
mrb_alloca(mrb_state * mrb,size_t size)283 mrb_alloca(mrb_state *mrb, size_t size)
284 {
285   struct RString *s;
286   s = (struct RString*)mrb_obj_alloc(mrb, MRB_TT_STRING, mrb->string_class);
287   return s->as.heap.ptr = (char*)mrb_malloc(mrb, size);
288 }
289 
290 static mrb_bool
heap_p(mrb_gc * gc,struct RBasic * object)291 heap_p(mrb_gc *gc, struct RBasic *object)
292 {
293   mrb_heap_page* page;
294 
295   page = gc->heaps;
296   while (page) {
297     RVALUE *p;
298 
299     p = objects(page);
300     if (&p[0].as.basic <= object && object <= &p[MRB_HEAP_PAGE_SIZE].as.basic) {
301       return TRUE;
302     }
303     page = page->next;
304   }
305   return FALSE;
306 }
307 
308 MRB_API mrb_bool
mrb_object_dead_p(mrb_state * mrb,struct RBasic * object)309 mrb_object_dead_p(mrb_state *mrb, struct RBasic *object) {
310   mrb_gc *gc = &mrb->gc;
311   if (!heap_p(gc, object)) return TRUE;
312   return is_dead(gc, object);
313 }
314 
315 static void
link_heap_page(mrb_gc * gc,mrb_heap_page * page)316 link_heap_page(mrb_gc *gc, mrb_heap_page *page)
317 {
318   page->next = gc->heaps;
319   if (gc->heaps)
320     gc->heaps->prev = page;
321   gc->heaps = page;
322 }
323 
324 static void
unlink_heap_page(mrb_gc * gc,mrb_heap_page * page)325 unlink_heap_page(mrb_gc *gc, mrb_heap_page *page)
326 {
327   if (page->prev)
328     page->prev->next = page->next;
329   if (page->next)
330     page->next->prev = page->prev;
331   if (gc->heaps == page)
332     gc->heaps = page->next;
333   page->prev = NULL;
334   page->next = NULL;
335 }
336 
337 static void
link_free_heap_page(mrb_gc * gc,mrb_heap_page * page)338 link_free_heap_page(mrb_gc *gc, mrb_heap_page *page)
339 {
340   page->free_next = gc->free_heaps;
341   if (gc->free_heaps) {
342     gc->free_heaps->free_prev = page;
343   }
344   gc->free_heaps = page;
345 }
346 
347 static void
unlink_free_heap_page(mrb_gc * gc,mrb_heap_page * page)348 unlink_free_heap_page(mrb_gc *gc, mrb_heap_page *page)
349 {
350   if (page->free_prev)
351     page->free_prev->free_next = page->free_next;
352   if (page->free_next)
353     page->free_next->free_prev = page->free_prev;
354   if (gc->free_heaps == page)
355     gc->free_heaps = page->free_next;
356   page->free_prev = NULL;
357   page->free_next = NULL;
358 }
359 
360 static void
add_heap(mrb_state * mrb,mrb_gc * gc)361 add_heap(mrb_state *mrb, mrb_gc *gc)
362 {
363   mrb_heap_page *page = (mrb_heap_page *)mrb_calloc(mrb, 1, sizeof(mrb_heap_page) + MRB_HEAP_PAGE_SIZE * sizeof(RVALUE));
364   RVALUE *p, *e;
365   struct RBasic *prev = NULL;
366 
367   for (p = objects(page), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
368     p->as.free.tt = MRB_TT_FREE;
369     p->as.free.next = prev;
370     prev = &p->as.basic;
371   }
372   page->freelist = prev;
373 
374   link_heap_page(gc, page);
375   link_free_heap_page(gc, page);
376 }
377 
378 #define DEFAULT_GC_INTERVAL_RATIO 200
379 #define DEFAULT_GC_STEP_RATIO 200
380 #define MAJOR_GC_INC_RATIO 120
381 #define MAJOR_GC_TOOMANY 10000
382 #define is_generational(gc) ((gc)->generational)
383 #define is_major_gc(gc) (is_generational(gc) && (gc)->full)
384 #define is_minor_gc(gc) (is_generational(gc) && !(gc)->full)
385 
386 void
mrb_gc_init(mrb_state * mrb,mrb_gc * gc)387 mrb_gc_init(mrb_state *mrb, mrb_gc *gc)
388 {
389 #ifndef MRB_GC_FIXED_ARENA
390   gc->arena = (struct RBasic**)mrb_malloc(mrb, sizeof(struct RBasic*)*MRB_GC_ARENA_SIZE);
391   gc->arena_capa = MRB_GC_ARENA_SIZE;
392 #endif
393 
394   gc->current_white_part = GC_WHITE_A;
395   gc->heaps = NULL;
396   gc->free_heaps = NULL;
397   add_heap(mrb, gc);
398   gc->interval_ratio = DEFAULT_GC_INTERVAL_RATIO;
399   gc->step_ratio = DEFAULT_GC_STEP_RATIO;
400 #ifndef MRB_GC_TURN_OFF_GENERATIONAL
401   gc->generational = TRUE;
402   gc->full = TRUE;
403 #endif
404 
405 #ifdef GC_PROFILE
406   program_invoke_time = gettimeofday_time();
407 #endif
408 }
409 
410 static void obj_free(mrb_state *mrb, struct RBasic *obj, int end);
411 
412 static void
free_heap(mrb_state * mrb,mrb_gc * gc)413 free_heap(mrb_state *mrb, mrb_gc *gc)
414 {
415   mrb_heap_page *page = gc->heaps;
416   mrb_heap_page *tmp;
417   RVALUE *p, *e;
418 
419   while (page) {
420     tmp = page;
421     page = page->next;
422     for (p = objects(tmp), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
423       if (p->as.free.tt != MRB_TT_FREE)
424         obj_free(mrb, &p->as.basic, TRUE);
425     }
426     mrb_free(mrb, tmp);
427   }
428 }
429 
430 void
mrb_gc_destroy(mrb_state * mrb,mrb_gc * gc)431 mrb_gc_destroy(mrb_state *mrb, mrb_gc *gc)
432 {
433   free_heap(mrb, gc);
434 #ifndef MRB_GC_FIXED_ARENA
435   mrb_free(mrb, gc->arena);
436 #endif
437 }
438 
439 static void
gc_protect(mrb_state * mrb,mrb_gc * gc,struct RBasic * p)440 gc_protect(mrb_state *mrb, mrb_gc *gc, struct RBasic *p)
441 {
442 #ifdef MRB_GC_FIXED_ARENA
443   if (gc->arena_idx >= MRB_GC_ARENA_SIZE) {
444     /* arena overflow error */
445     gc->arena_idx = MRB_GC_ARENA_SIZE - 4; /* force room in arena */
446     mrb_exc_raise(mrb, mrb_obj_value(mrb->arena_err));
447   }
448 #else
449   if (gc->arena_idx >= gc->arena_capa) {
450     /* extend arena */
451     gc->arena_capa = (int)(gc->arena_capa * 3 / 2);
452     gc->arena = (struct RBasic**)mrb_realloc(mrb, gc->arena, sizeof(struct RBasic*)*gc->arena_capa);
453   }
454 #endif
455   gc->arena[gc->arena_idx++] = p;
456 }
457 
458 /* mrb_gc_protect() leaves the object in the arena */
459 MRB_API void
mrb_gc_protect(mrb_state * mrb,mrb_value obj)460 mrb_gc_protect(mrb_state *mrb, mrb_value obj)
461 {
462   if (mrb_immediate_p(obj)) return;
463   gc_protect(mrb, &mrb->gc, mrb_basic_ptr(obj));
464 }
465 
466 #define GC_ROOT_NAME "_gc_root_"
467 
468 /* mrb_gc_register() keeps the object from GC.
469 
470    Register your object when it's exported to C world,
471    without reference from Ruby world, e.g. callback
472    arguments.  Don't forget to remove the object using
473    mrb_gc_unregister, otherwise your object will leak.
474 */
475 
476 MRB_API void
mrb_gc_register(mrb_state * mrb,mrb_value obj)477 mrb_gc_register(mrb_state *mrb, mrb_value obj)
478 {
479   mrb_sym root;
480   mrb_value table;
481 
482   if (mrb_immediate_p(obj)) return;
483   root = mrb_intern_lit(mrb, GC_ROOT_NAME);
484   table = mrb_gv_get(mrb, root);
485   if (mrb_nil_p(table) || !mrb_array_p(table)) {
486     table = mrb_ary_new(mrb);
487     mrb_gv_set(mrb, root, table);
488   }
489   mrb_ary_push(mrb, table, obj);
490 }
491 
492 /* mrb_gc_unregister() removes the object from GC root. */
493 MRB_API void
mrb_gc_unregister(mrb_state * mrb,mrb_value obj)494 mrb_gc_unregister(mrb_state *mrb, mrb_value obj)
495 {
496   mrb_sym root;
497   mrb_value table;
498   struct RArray *a;
499   mrb_int i;
500 
501   if (mrb_immediate_p(obj)) return;
502   root = mrb_intern_lit(mrb, GC_ROOT_NAME);
503   table = mrb_gv_get(mrb, root);
504   if (mrb_nil_p(table)) return;
505   if (!mrb_array_p(table)) {
506     mrb_gv_set(mrb, root, mrb_nil_value());
507     return;
508   }
509   a = mrb_ary_ptr(table);
510   mrb_ary_modify(mrb, a);
511   for (i = 0; i < ARY_LEN(a); i++) {
512     if (mrb_ptr(ARY_PTR(a)[i]) == mrb_ptr(obj)) {
513       mrb_int len = ARY_LEN(a)-1;
514       mrb_value *ptr = ARY_PTR(a);
515 
516       ARY_SET_LEN(a, len);
517       memmove(&ptr[i], &ptr[i + 1], (len - i) * sizeof(mrb_value));
518       break;
519     }
520   }
521 }
522 
523 MRB_API struct RBasic*
mrb_obj_alloc(mrb_state * mrb,enum mrb_vtype ttype,struct RClass * cls)524 mrb_obj_alloc(mrb_state *mrb, enum mrb_vtype ttype, struct RClass *cls)
525 {
526   struct RBasic *p;
527   static const RVALUE RVALUE_zero = { { { NULL, NULL, MRB_TT_FALSE } } };
528   mrb_gc *gc = &mrb->gc;
529 
530   if (cls) {
531     enum mrb_vtype tt;
532 
533     switch (cls->tt) {
534     case MRB_TT_CLASS:
535     case MRB_TT_SCLASS:
536     case MRB_TT_MODULE:
537     case MRB_TT_ENV:
538       break;
539     default:
540       mrb_raise(mrb, E_TYPE_ERROR, "allocation failure");
541     }
542     tt = MRB_INSTANCE_TT(cls);
543     if (tt != MRB_TT_FALSE &&
544         ttype != MRB_TT_SCLASS &&
545         ttype != MRB_TT_ICLASS &&
546         ttype != MRB_TT_ENV &&
547         ttype != tt) {
548       mrb_raisef(mrb, E_TYPE_ERROR, "allocation failure of %C", cls);
549     }
550   }
551 
552 #ifdef MRB_GC_STRESS
553   mrb_full_gc(mrb);
554 #endif
555   if (gc->threshold < gc->live) {
556     mrb_incremental_gc(mrb);
557   }
558   if (gc->free_heaps == NULL) {
559     add_heap(mrb, gc);
560   }
561 
562   p = gc->free_heaps->freelist;
563   gc->free_heaps->freelist = ((struct free_obj*)p)->next;
564   if (gc->free_heaps->freelist == NULL) {
565     unlink_free_heap_page(gc, gc->free_heaps);
566   }
567 
568   gc->live++;
569   gc_protect(mrb, gc, p);
570   *(RVALUE *)p = RVALUE_zero;
571   p->tt = ttype;
572   p->c = cls;
573   paint_partial_white(gc, p);
574   return p;
575 }
576 
577 static inline void
add_gray_list(mrb_state * mrb,mrb_gc * gc,struct RBasic * obj)578 add_gray_list(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
579 {
580 #ifdef MRB_GC_STRESS
581   if (obj->tt > MRB_TT_MAXDEFINE) {
582     abort();
583   }
584 #endif
585   paint_gray(obj);
586   obj->gcnext = gc->gray_list;
587   gc->gray_list = obj;
588 }
589 
590 static int
ci_nregs(mrb_callinfo * ci)591 ci_nregs(mrb_callinfo *ci)
592 {
593   struct RProc *p = ci->proc;
594   int n = 0;
595 
596   if (!p) {
597     if (ci->argc < 0) return 3;
598     return ci->argc+2;
599   }
600   if (!MRB_PROC_CFUNC_P(p) && p->body.irep) {
601     n = p->body.irep->nregs;
602   }
603   if (ci->argc < 0) {
604     if (n < 3) n = 3; /* self + args + blk */
605   }
606   if (ci->argc > n) {
607     n = ci->argc + 2; /* self + blk */
608   }
609   return n;
610 }
611 
612 static void
mark_context_stack(mrb_state * mrb,struct mrb_context * c)613 mark_context_stack(mrb_state *mrb, struct mrb_context *c)
614 {
615   size_t i;
616   size_t e;
617   mrb_value nil;
618 
619   if (c->stack == NULL) return;
620   e = c->stack - c->stbase;
621   if (c->ci) {
622     e += ci_nregs(c->ci);
623   }
624   if (c->stbase + e > c->stend) e = c->stend - c->stbase;
625   for (i=0; i<e; i++) {
626     mrb_value v = c->stbase[i];
627 
628     if (!mrb_immediate_p(v)) {
629       mrb_gc_mark(mrb, mrb_basic_ptr(v));
630     }
631   }
632   e = c->stend - c->stbase;
633   nil = mrb_nil_value();
634   for (; i<e; i++) {
635     c->stbase[i] = nil;
636   }
637 }
638 
639 static void
mark_context(mrb_state * mrb,struct mrb_context * c)640 mark_context(mrb_state *mrb, struct mrb_context *c)
641 {
642   int i;
643   mrb_callinfo *ci;
644 
645  start:
646   if (c->status == MRB_FIBER_TERMINATED) return;
647 
648   /* mark VM stack */
649   mark_context_stack(mrb, c);
650 
651   /* mark call stack */
652   if (c->cibase) {
653     for (ci = c->cibase; ci <= c->ci; ci++) {
654       mrb_gc_mark(mrb, (struct RBasic*)ci->env);
655       mrb_gc_mark(mrb, (struct RBasic*)ci->proc);
656       mrb_gc_mark(mrb, (struct RBasic*)ci->target_class);
657     }
658   }
659   /* mark ensure stack */
660   for (i=0; i<c->eidx; i++) {
661     mrb_gc_mark(mrb, (struct RBasic*)c->ensure[i]);
662   }
663   /* mark fibers */
664   mrb_gc_mark(mrb, (struct RBasic*)c->fib);
665   if (c->prev) {
666     c = c->prev;
667     goto start;
668   }
669 }
670 
671 static void
gc_mark_children(mrb_state * mrb,mrb_gc * gc,struct RBasic * obj)672 gc_mark_children(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
673 {
674   mrb_assert(is_gray(obj));
675   paint_black(obj);
676   gc->gray_list = obj->gcnext;
677   mrb_gc_mark(mrb, (struct RBasic*)obj->c);
678   switch (obj->tt) {
679   case MRB_TT_ICLASS:
680     {
681       struct RClass *c = (struct RClass*)obj;
682       if (MRB_FLAG_TEST(c, MRB_FL_CLASS_IS_ORIGIN))
683         mrb_gc_mark_mt(mrb, c);
684       mrb_gc_mark(mrb, (struct RBasic*)((struct RClass*)obj)->super);
685     }
686     break;
687 
688   case MRB_TT_CLASS:
689   case MRB_TT_MODULE:
690   case MRB_TT_SCLASS:
691     {
692       struct RClass *c = (struct RClass*)obj;
693 
694       mrb_gc_mark_mt(mrb, c);
695       mrb_gc_mark(mrb, (struct RBasic*)c->super);
696     }
697     /* fall through */
698 
699   case MRB_TT_OBJECT:
700   case MRB_TT_DATA:
701   case MRB_TT_EXCEPTION:
702     mrb_gc_mark_iv(mrb, (struct RObject*)obj);
703     break;
704 
705   case MRB_TT_PROC:
706     {
707       struct RProc *p = (struct RProc*)obj;
708 
709       mrb_gc_mark(mrb, (struct RBasic*)p->upper);
710       mrb_gc_mark(mrb, (struct RBasic*)p->e.env);
711     }
712     break;
713 
714   case MRB_TT_ENV:
715     {
716       struct REnv *e = (struct REnv*)obj;
717       mrb_int i, len;
718 
719       if (MRB_ENV_ONSTACK_P(e) && e->cxt && e->cxt->fib) {
720         mrb_gc_mark(mrb, (struct RBasic*)e->cxt->fib);
721       }
722       len = MRB_ENV_LEN(e);
723       for (i=0; i<len; i++) {
724         mrb_gc_mark_value(mrb, e->stack[i]);
725       }
726     }
727     break;
728 
729   case MRB_TT_FIBER:
730     {
731       struct mrb_context *c = ((struct RFiber*)obj)->cxt;
732 
733       if (c) mark_context(mrb, c);
734     }
735     break;
736 
737   case MRB_TT_ARRAY:
738     {
739       struct RArray *a = (struct RArray*)obj;
740       size_t i, e;
741 
742       for (i=0,e=ARY_LEN(a); i<e; i++) {
743         mrb_gc_mark_value(mrb, ARY_PTR(a)[i]);
744       }
745     }
746     break;
747 
748   case MRB_TT_HASH:
749     mrb_gc_mark_iv(mrb, (struct RObject*)obj);
750     mrb_gc_mark_hash(mrb, (struct RHash*)obj);
751     break;
752 
753   case MRB_TT_STRING:
754     if (RSTR_FSHARED_P(obj)) {
755       struct RString *s = (struct RString*)obj;
756       mrb_gc_mark(mrb, (struct RBasic*)s->as.heap.aux.fshared);
757     }
758     break;
759 
760   case MRB_TT_RANGE:
761     mrb_gc_mark_range(mrb, (struct RRange*)obj);
762     break;
763 
764   default:
765     break;
766   }
767 }
768 
769 MRB_API void
mrb_gc_mark(mrb_state * mrb,struct RBasic * obj)770 mrb_gc_mark(mrb_state *mrb, struct RBasic *obj)
771 {
772   if (obj == 0) return;
773   if (!is_white(obj)) return;
774   mrb_assert((obj)->tt != MRB_TT_FREE);
775   add_gray_list(mrb, &mrb->gc, obj);
776 }
777 
778 static void
obj_free(mrb_state * mrb,struct RBasic * obj,int end)779 obj_free(mrb_state *mrb, struct RBasic *obj, int end)
780 {
781   DEBUG(fprintf(stderr, "obj_free(%p,tt=%d)\n",obj,obj->tt));
782   switch (obj->tt) {
783     /* immediate - no mark */
784   case MRB_TT_TRUE:
785   case MRB_TT_FIXNUM:
786   case MRB_TT_SYMBOL:
787     /* cannot happen */
788     return;
789 
790 #ifndef MRB_WITHOUT_FLOAT
791   case MRB_TT_FLOAT:
792 #ifdef MRB_WORD_BOXING
793     break;
794 #else
795     return;
796 #endif
797 #endif
798 
799   case MRB_TT_OBJECT:
800     mrb_gc_free_iv(mrb, (struct RObject*)obj);
801     break;
802 
803   case MRB_TT_EXCEPTION:
804     mrb_gc_free_iv(mrb, (struct RObject*)obj);
805     break;
806 
807   case MRB_TT_CLASS:
808   case MRB_TT_MODULE:
809   case MRB_TT_SCLASS:
810     mrb_gc_free_mt(mrb, (struct RClass*)obj);
811     mrb_gc_free_iv(mrb, (struct RObject*)obj);
812     mrb_mc_clear_by_class(mrb, (struct RClass*)obj);
813     break;
814   case MRB_TT_ICLASS:
815     if (MRB_FLAG_TEST(obj, MRB_FL_CLASS_IS_ORIGIN))
816       mrb_gc_free_mt(mrb, (struct RClass*)obj);
817     mrb_mc_clear_by_class(mrb, (struct RClass*)obj);
818     break;
819   case MRB_TT_ENV:
820     {
821       struct REnv *e = (struct REnv*)obj;
822 
823       if (MRB_ENV_ONSTACK_P(e)) {
824         /* cannot be freed */
825         e->stack = NULL;
826         break;
827       }
828       mrb_free(mrb, e->stack);
829       e->stack = NULL;
830     }
831     break;
832 
833   case MRB_TT_FIBER:
834     {
835       struct mrb_context *c = ((struct RFiber*)obj)->cxt;
836 
837       if (c && c != mrb->root_c) {
838         if (!end && c->status != MRB_FIBER_TERMINATED) {
839           mrb_callinfo *ci = c->ci;
840           mrb_callinfo *ce = c->cibase;
841 
842           while (ce <= ci) {
843             struct REnv *e = ci->env;
844             if (e && !mrb_object_dead_p(mrb, (struct RBasic*)e) &&
845                 e->tt == MRB_TT_ENV && MRB_ENV_ONSTACK_P(e)) {
846               mrb_env_unshare(mrb, e);
847             }
848             ci--;
849           }
850         }
851         mrb_free_context(mrb, c);
852       }
853     }
854     break;
855 
856   case MRB_TT_ARRAY:
857     if (ARY_SHARED_P(obj))
858       mrb_ary_decref(mrb, ((struct RArray*)obj)->as.heap.aux.shared);
859     else if (!ARY_EMBED_P(obj))
860       mrb_free(mrb, ((struct RArray*)obj)->as.heap.ptr);
861     break;
862 
863   case MRB_TT_HASH:
864     mrb_gc_free_iv(mrb, (struct RObject*)obj);
865     mrb_gc_free_hash(mrb, (struct RHash*)obj);
866     break;
867 
868   case MRB_TT_STRING:
869     mrb_gc_free_str(mrb, (struct RString*)obj);
870     break;
871 
872   case MRB_TT_PROC:
873     {
874       struct RProc *p = (struct RProc*)obj;
875 
876       if (!MRB_PROC_CFUNC_P(p) && p->body.irep) {
877         mrb_irep *irep = p->body.irep;
878         if (end) {
879           mrb_irep_cutref(mrb, irep);
880         }
881         mrb_irep_decref(mrb, irep);
882       }
883     }
884     break;
885 
886   case MRB_TT_RANGE:
887     mrb_gc_free_range(mrb, ((struct RRange*)obj));
888     break;
889 
890   case MRB_TT_DATA:
891     {
892       struct RData *d = (struct RData*)obj;
893       if (d->type && d->type->dfree) {
894         d->type->dfree(mrb, d->data);
895       }
896       mrb_gc_free_iv(mrb, (struct RObject*)obj);
897     }
898     break;
899 
900   default:
901     break;
902   }
903   obj->tt = MRB_TT_FREE;
904 }
905 
906 static void
root_scan_phase(mrb_state * mrb,mrb_gc * gc)907 root_scan_phase(mrb_state *mrb, mrb_gc *gc)
908 {
909   int i, e;
910 
911   if (!is_minor_gc(gc)) {
912     gc->gray_list = NULL;
913     gc->atomic_gray_list = NULL;
914   }
915 
916   mrb_gc_mark_gv(mrb);
917   /* mark arena */
918   for (i=0,e=gc->arena_idx; i<e; i++) {
919     mrb_gc_mark(mrb, gc->arena[i]);
920   }
921   /* mark class hierarchy */
922   mrb_gc_mark(mrb, (struct RBasic*)mrb->object_class);
923 
924   /* mark built-in classes */
925   mrb_gc_mark(mrb, (struct RBasic*)mrb->class_class);
926   mrb_gc_mark(mrb, (struct RBasic*)mrb->module_class);
927   mrb_gc_mark(mrb, (struct RBasic*)mrb->proc_class);
928   mrb_gc_mark(mrb, (struct RBasic*)mrb->string_class);
929   mrb_gc_mark(mrb, (struct RBasic*)mrb->array_class);
930   mrb_gc_mark(mrb, (struct RBasic*)mrb->hash_class);
931   mrb_gc_mark(mrb, (struct RBasic*)mrb->range_class);
932 
933 #ifndef MRB_WITHOUT_FLOAT
934   mrb_gc_mark(mrb, (struct RBasic*)mrb->float_class);
935 #endif
936   mrb_gc_mark(mrb, (struct RBasic*)mrb->fixnum_class);
937   mrb_gc_mark(mrb, (struct RBasic*)mrb->true_class);
938   mrb_gc_mark(mrb, (struct RBasic*)mrb->false_class);
939   mrb_gc_mark(mrb, (struct RBasic*)mrb->nil_class);
940   mrb_gc_mark(mrb, (struct RBasic*)mrb->symbol_class);
941   mrb_gc_mark(mrb, (struct RBasic*)mrb->kernel_module);
942 
943   mrb_gc_mark(mrb, (struct RBasic*)mrb->eException_class);
944   mrb_gc_mark(mrb, (struct RBasic*)mrb->eStandardError_class);
945 
946   /* mark top_self */
947   mrb_gc_mark(mrb, (struct RBasic*)mrb->top_self);
948   /* mark exception */
949   mrb_gc_mark(mrb, (struct RBasic*)mrb->exc);
950   /* mark pre-allocated exception */
951   mrb_gc_mark(mrb, (struct RBasic*)mrb->nomem_err);
952   mrb_gc_mark(mrb, (struct RBasic*)mrb->stack_err);
953 #ifdef MRB_GC_FIXED_ARENA
954   mrb_gc_mark(mrb, (struct RBasic*)mrb->arena_err);
955 #endif
956 
957   mark_context(mrb, mrb->c);
958   if (mrb->root_c != mrb->c) {
959     mark_context(mrb, mrb->root_c);
960   }
961 }
962 
963 /* rough estimation of number of GC marks (non recursive) */
964 static size_t
gc_gray_counts(mrb_state * mrb,mrb_gc * gc,struct RBasic * obj)965 gc_gray_counts(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
966 {
967   size_t children = 0;
968 
969   switch (obj->tt) {
970   case MRB_TT_ICLASS:
971     children++;
972     break;
973 
974   case MRB_TT_CLASS:
975   case MRB_TT_SCLASS:
976   case MRB_TT_MODULE:
977     {
978       struct RClass *c = (struct RClass*)obj;
979 
980       children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
981       children += mrb_gc_mark_mt_size(mrb, c);
982       children++;
983     }
984     break;
985 
986   case MRB_TT_OBJECT:
987   case MRB_TT_DATA:
988   case MRB_TT_EXCEPTION:
989     children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
990     break;
991 
992   case MRB_TT_ENV:
993     children += MRB_ENV_LEN(obj);
994     break;
995 
996   case MRB_TT_FIBER:
997     {
998       struct mrb_context *c = ((struct RFiber*)obj)->cxt;
999       size_t i;
1000       mrb_callinfo *ci;
1001 
1002       if (!c || c->status == MRB_FIBER_TERMINATED) break;
1003 
1004       /* mark stack */
1005       i = c->stack - c->stbase;
1006 
1007       if (c->ci) {
1008         i += ci_nregs(c->ci);
1009       }
1010       if (c->stbase + i > c->stend) i = c->stend - c->stbase;
1011       children += i;
1012 
1013       /* mark ensure stack */
1014       children += c->eidx;
1015 
1016       /* mark closure */
1017       if (c->cibase) {
1018         for (i=0, ci = c->cibase; ci <= c->ci; i++, ci++)
1019           ;
1020       }
1021       children += i;
1022     }
1023     break;
1024 
1025   case MRB_TT_ARRAY:
1026     {
1027       struct RArray *a = (struct RArray*)obj;
1028       children += ARY_LEN(a);
1029     }
1030     break;
1031 
1032   case MRB_TT_HASH:
1033     children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
1034     children += mrb_gc_mark_hash_size(mrb, (struct RHash*)obj);
1035     break;
1036 
1037   case MRB_TT_PROC:
1038   case MRB_TT_RANGE:
1039     children+=2;
1040     break;
1041 
1042   default:
1043     break;
1044   }
1045   return children;
1046 }
1047 
1048 
1049 static void
gc_mark_gray_list(mrb_state * mrb,mrb_gc * gc)1050 gc_mark_gray_list(mrb_state *mrb, mrb_gc *gc) {
1051   while (gc->gray_list) {
1052     if (is_gray(gc->gray_list))
1053       gc_mark_children(mrb, gc, gc->gray_list);
1054     else
1055       gc->gray_list = gc->gray_list->gcnext;
1056   }
1057 }
1058 
1059 
1060 static size_t
incremental_marking_phase(mrb_state * mrb,mrb_gc * gc,size_t limit)1061 incremental_marking_phase(mrb_state *mrb, mrb_gc *gc, size_t limit)
1062 {
1063   size_t tried_marks = 0;
1064 
1065   while (gc->gray_list && tried_marks < limit) {
1066     struct RBasic *obj = gc->gray_list;
1067     gc_mark_children(mrb, gc, obj);
1068     tried_marks += gc_gray_counts(mrb, gc, obj);
1069   }
1070 
1071   return tried_marks;
1072 }
1073 
1074 static void
final_marking_phase(mrb_state * mrb,mrb_gc * gc)1075 final_marking_phase(mrb_state *mrb, mrb_gc *gc)
1076 {
1077   int i, e;
1078 
1079   /* mark arena */
1080   for (i=0,e=gc->arena_idx; i<e; i++) {
1081     mrb_gc_mark(mrb, gc->arena[i]);
1082   }
1083   mrb_gc_mark_gv(mrb);
1084   mark_context(mrb, mrb->c);
1085   mark_context(mrb, mrb->root_c);
1086   mrb_gc_mark(mrb, (struct RBasic*)mrb->exc);
1087   gc_mark_gray_list(mrb, gc);
1088   mrb_assert(gc->gray_list == NULL);
1089   gc->gray_list = gc->atomic_gray_list;
1090   gc->atomic_gray_list = NULL;
1091   gc_mark_gray_list(mrb, gc);
1092   mrb_assert(gc->gray_list == NULL);
1093 }
1094 
1095 static void
prepare_incremental_sweep(mrb_state * mrb,mrb_gc * gc)1096 prepare_incremental_sweep(mrb_state *mrb, mrb_gc *gc)
1097 {
1098   gc->state = MRB_GC_STATE_SWEEP;
1099   gc->sweeps = gc->heaps;
1100   gc->live_after_mark = gc->live;
1101 }
1102 
1103 static size_t
incremental_sweep_phase(mrb_state * mrb,mrb_gc * gc,size_t limit)1104 incremental_sweep_phase(mrb_state *mrb, mrb_gc *gc, size_t limit)
1105 {
1106   mrb_heap_page *page = gc->sweeps;
1107   size_t tried_sweep = 0;
1108 
1109   while (page && (tried_sweep < limit)) {
1110     RVALUE *p = objects(page);
1111     RVALUE *e = p + MRB_HEAP_PAGE_SIZE;
1112     size_t freed = 0;
1113     mrb_bool dead_slot = TRUE;
1114     mrb_bool full = (page->freelist == NULL);
1115 
1116     if (is_minor_gc(gc) && page->old) {
1117       /* skip a slot which doesn't contain any young object */
1118       p = e;
1119       dead_slot = FALSE;
1120     }
1121     while (p<e) {
1122       if (is_dead(gc, &p->as.basic)) {
1123         if (p->as.basic.tt != MRB_TT_FREE) {
1124           obj_free(mrb, &p->as.basic, FALSE);
1125           if (p->as.basic.tt == MRB_TT_FREE) {
1126             p->as.free.next = page->freelist;
1127             page->freelist = (struct RBasic*)p;
1128             freed++;
1129           }
1130           else {
1131             dead_slot = FALSE;
1132           }
1133         }
1134       }
1135       else {
1136         if (!is_generational(gc))
1137           paint_partial_white(gc, &p->as.basic); /* next gc target */
1138         dead_slot = FALSE;
1139       }
1140       p++;
1141     }
1142 
1143     /* free dead slot */
1144     if (dead_slot && freed < MRB_HEAP_PAGE_SIZE) {
1145       mrb_heap_page *next = page->next;
1146 
1147       unlink_heap_page(gc, page);
1148       unlink_free_heap_page(gc, page);
1149       mrb_free(mrb, page);
1150       page = next;
1151     }
1152     else {
1153       if (full && freed > 0) {
1154         link_free_heap_page(gc, page);
1155       }
1156       if (page->freelist == NULL && is_minor_gc(gc))
1157         page->old = TRUE;
1158       else
1159         page->old = FALSE;
1160       page = page->next;
1161     }
1162     tried_sweep += MRB_HEAP_PAGE_SIZE;
1163     gc->live -= freed;
1164     gc->live_after_mark -= freed;
1165   }
1166   gc->sweeps = page;
1167   return tried_sweep;
1168 }
1169 
1170 static size_t
incremental_gc(mrb_state * mrb,mrb_gc * gc,size_t limit)1171 incremental_gc(mrb_state *mrb, mrb_gc *gc, size_t limit)
1172 {
1173   switch (gc->state) {
1174   case MRB_GC_STATE_ROOT:
1175     root_scan_phase(mrb, gc);
1176     gc->state = MRB_GC_STATE_MARK;
1177     flip_white_part(gc);
1178     return 0;
1179   case MRB_GC_STATE_MARK:
1180     if (gc->gray_list) {
1181       return incremental_marking_phase(mrb, gc, limit);
1182     }
1183     else {
1184       final_marking_phase(mrb, gc);
1185       prepare_incremental_sweep(mrb, gc);
1186       return 0;
1187     }
1188   case MRB_GC_STATE_SWEEP: {
1189      size_t tried_sweep = 0;
1190      tried_sweep = incremental_sweep_phase(mrb, gc, limit);
1191      if (tried_sweep == 0)
1192        gc->state = MRB_GC_STATE_ROOT;
1193      return tried_sweep;
1194   }
1195   default:
1196     /* unknown state */
1197     mrb_assert(0);
1198     return 0;
1199   }
1200 }
1201 
1202 static void
incremental_gc_until(mrb_state * mrb,mrb_gc * gc,mrb_gc_state to_state)1203 incremental_gc_until(mrb_state *mrb, mrb_gc *gc, mrb_gc_state to_state)
1204 {
1205   do {
1206     incremental_gc(mrb, gc, SIZE_MAX);
1207   } while (gc->state != to_state);
1208 }
1209 
1210 static void
incremental_gc_step(mrb_state * mrb,mrb_gc * gc)1211 incremental_gc_step(mrb_state *mrb, mrb_gc *gc)
1212 {
1213   size_t limit = 0, result = 0;
1214   limit = (GC_STEP_SIZE/100) * gc->step_ratio;
1215   while (result < limit) {
1216     result += incremental_gc(mrb, gc, limit);
1217     if (gc->state == MRB_GC_STATE_ROOT)
1218       break;
1219   }
1220 
1221   gc->threshold = gc->live + GC_STEP_SIZE;
1222 }
1223 
1224 static void
clear_all_old(mrb_state * mrb,mrb_gc * gc)1225 clear_all_old(mrb_state *mrb, mrb_gc *gc)
1226 {
1227   mrb_bool origin_mode = gc->generational;
1228 
1229   mrb_assert(is_generational(gc));
1230   if (is_major_gc(gc)) {
1231     /* finish the half baked GC */
1232     incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1233   }
1234 
1235   /* Sweep the dead objects, then reset all the live objects
1236    * (including all the old objects, of course) to white. */
1237   gc->generational = FALSE;
1238   prepare_incremental_sweep(mrb, gc);
1239   incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1240   gc->generational = origin_mode;
1241 
1242   /* The gray objects have already been painted as white */
1243   gc->atomic_gray_list = gc->gray_list = NULL;
1244 }
1245 
1246 MRB_API void
mrb_incremental_gc(mrb_state * mrb)1247 mrb_incremental_gc(mrb_state *mrb)
1248 {
1249   mrb_gc *gc = &mrb->gc;
1250 
1251   if (gc->disabled || gc->iterating) return;
1252 
1253   GC_INVOKE_TIME_REPORT("mrb_incremental_gc()");
1254   GC_TIME_START;
1255 
1256   if (is_minor_gc(gc)) {
1257     incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1258   }
1259   else {
1260     incremental_gc_step(mrb, gc);
1261   }
1262 
1263   if (gc->state == MRB_GC_STATE_ROOT) {
1264     mrb_assert(gc->live >= gc->live_after_mark);
1265     gc->threshold = (gc->live_after_mark/100) * gc->interval_ratio;
1266     if (gc->threshold < GC_STEP_SIZE) {
1267       gc->threshold = GC_STEP_SIZE;
1268     }
1269 
1270     if (is_major_gc(gc)) {
1271       size_t threshold = gc->live_after_mark/100 * MAJOR_GC_INC_RATIO;
1272 
1273       gc->full = FALSE;
1274       if (threshold < MAJOR_GC_TOOMANY) {
1275         gc->majorgc_old_threshold = threshold;
1276       }
1277       else {
1278         /* too many objects allocated during incremental GC, */
1279         /* instead of increasing threshold, invoke full GC. */
1280         mrb_full_gc(mrb);
1281       }
1282     }
1283     else if (is_minor_gc(gc)) {
1284       if (gc->live > gc->majorgc_old_threshold) {
1285         clear_all_old(mrb, gc);
1286         gc->full = TRUE;
1287       }
1288     }
1289   }
1290 
1291   GC_TIME_STOP_AND_REPORT;
1292 }
1293 
1294 /* Perform a full gc cycle */
1295 MRB_API void
mrb_full_gc(mrb_state * mrb)1296 mrb_full_gc(mrb_state *mrb)
1297 {
1298   mrb_gc *gc = &mrb->gc;
1299 
1300   if (!mrb->c) return;
1301   if (gc->disabled || gc->iterating) return;
1302 
1303   GC_INVOKE_TIME_REPORT("mrb_full_gc()");
1304   GC_TIME_START;
1305 
1306   if (is_generational(gc)) {
1307     /* clear all the old objects back to young */
1308     clear_all_old(mrb, gc);
1309     gc->full = TRUE;
1310   }
1311   else if (gc->state != MRB_GC_STATE_ROOT) {
1312     /* finish half baked GC cycle */
1313     incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1314   }
1315 
1316   incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1317   gc->threshold = (gc->live_after_mark/100) * gc->interval_ratio;
1318 
1319   if (is_generational(gc)) {
1320     gc->majorgc_old_threshold = gc->live_after_mark/100 * MAJOR_GC_INC_RATIO;
1321     gc->full = FALSE;
1322   }
1323 
1324   GC_TIME_STOP_AND_REPORT;
1325 }
1326 
1327 MRB_API void
mrb_garbage_collect(mrb_state * mrb)1328 mrb_garbage_collect(mrb_state *mrb)
1329 {
1330   mrb_full_gc(mrb);
1331 }
1332 
1333 /*
1334  * Field write barrier
1335  *   Paint obj(Black) -> value(White) to obj(Black) -> value(Gray).
1336  */
1337 
1338 MRB_API void
mrb_field_write_barrier(mrb_state * mrb,struct RBasic * obj,struct RBasic * value)1339 mrb_field_write_barrier(mrb_state *mrb, struct RBasic *obj, struct RBasic *value)
1340 {
1341   mrb_gc *gc = &mrb->gc;
1342 
1343   if (!is_black(obj)) return;
1344   if (!is_white(value)) return;
1345 
1346   mrb_assert(gc->state == MRB_GC_STATE_MARK || (!is_dead(gc, value) && !is_dead(gc, obj)));
1347   mrb_assert(is_generational(gc) || gc->state != MRB_GC_STATE_ROOT);
1348 
1349   if (is_generational(gc) || gc->state == MRB_GC_STATE_MARK) {
1350     add_gray_list(mrb, gc, value);
1351   }
1352   else {
1353     mrb_assert(gc->state == MRB_GC_STATE_SWEEP);
1354     paint_partial_white(gc, obj); /* for never write barriers */
1355   }
1356 }
1357 
1358 /*
1359  * Write barrier
1360  *   Paint obj(Black) to obj(Gray).
1361  *
1362  *   The object that is painted gray will be traversed atomically in final
1363  *   mark phase. So you use this write barrier if it's frequency written spot.
1364  *   e.g. Set element on Array.
1365  */
1366 
1367 MRB_API void
mrb_write_barrier(mrb_state * mrb,struct RBasic * obj)1368 mrb_write_barrier(mrb_state *mrb, struct RBasic *obj)
1369 {
1370   mrb_gc *gc = &mrb->gc;
1371 
1372   if (!is_black(obj)) return;
1373 
1374   mrb_assert(!is_dead(gc, obj));
1375   mrb_assert(is_generational(gc) || gc->state != MRB_GC_STATE_ROOT);
1376   paint_gray(obj);
1377   obj->gcnext = gc->atomic_gray_list;
1378   gc->atomic_gray_list = obj;
1379 }
1380 
1381 /*
1382  *  call-seq:
1383  *     GC.start                     -> nil
1384  *
1385  *  Initiates full garbage collection.
1386  *
1387  */
1388 
1389 static mrb_value
gc_start(mrb_state * mrb,mrb_value obj)1390 gc_start(mrb_state *mrb, mrb_value obj)
1391 {
1392   mrb_full_gc(mrb);
1393   return mrb_nil_value();
1394 }
1395 
1396 /*
1397  *  call-seq:
1398  *     GC.enable    -> true or false
1399  *
1400  *  Enables garbage collection, returning <code>true</code> if garbage
1401  *  collection was previously disabled.
1402  *
1403  *     GC.disable   #=> false
1404  *     GC.enable    #=> true
1405  *     GC.enable    #=> false
1406  *
1407  */
1408 
1409 static mrb_value
gc_enable(mrb_state * mrb,mrb_value obj)1410 gc_enable(mrb_state *mrb, mrb_value obj)
1411 {
1412   mrb_bool old = mrb->gc.disabled;
1413 
1414   mrb->gc.disabled = FALSE;
1415 
1416   return mrb_bool_value(old);
1417 }
1418 
1419 /*
1420  *  call-seq:
1421  *     GC.disable    -> true or false
1422  *
1423  *  Disables garbage collection, returning <code>true</code> if garbage
1424  *  collection was already disabled.
1425  *
1426  *     GC.disable   #=> false
1427  *     GC.disable   #=> true
1428  *
1429  */
1430 
1431 static mrb_value
gc_disable(mrb_state * mrb,mrb_value obj)1432 gc_disable(mrb_state *mrb, mrb_value obj)
1433 {
1434   mrb_bool old = mrb->gc.disabled;
1435 
1436   mrb->gc.disabled = TRUE;
1437 
1438   return mrb_bool_value(old);
1439 }
1440 
1441 /*
1442  *  call-seq:
1443  *     GC.interval_ratio      -> fixnum
1444  *
1445  *  Returns ratio of GC interval. Default value is 200(%).
1446  *
1447  */
1448 
1449 static mrb_value
gc_interval_ratio_get(mrb_state * mrb,mrb_value obj)1450 gc_interval_ratio_get(mrb_state *mrb, mrb_value obj)
1451 {
1452   return mrb_fixnum_value(mrb->gc.interval_ratio);
1453 }
1454 
1455 /*
1456  *  call-seq:
1457  *     GC.interval_ratio = fixnum    -> nil
1458  *
1459  *  Updates ratio of GC interval. Default value is 200(%).
1460  *  GC start as soon as after end all step of GC if you set 100(%).
1461  *
1462  */
1463 
1464 static mrb_value
gc_interval_ratio_set(mrb_state * mrb,mrb_value obj)1465 gc_interval_ratio_set(mrb_state *mrb, mrb_value obj)
1466 {
1467   mrb_int ratio;
1468 
1469   mrb_get_args(mrb, "i", &ratio);
1470   mrb->gc.interval_ratio = (int)ratio;
1471   return mrb_nil_value();
1472 }
1473 
1474 /*
1475  *  call-seq:
1476  *     GC.step_ratio    -> fixnum
1477  *
1478  *  Returns step span ratio of Incremental GC. Default value is 200(%).
1479  *
1480  */
1481 
1482 static mrb_value
gc_step_ratio_get(mrb_state * mrb,mrb_value obj)1483 gc_step_ratio_get(mrb_state *mrb, mrb_value obj)
1484 {
1485   return mrb_fixnum_value(mrb->gc.step_ratio);
1486 }
1487 
1488 /*
1489  *  call-seq:
1490  *     GC.step_ratio = fixnum   -> nil
1491  *
1492  *  Updates step span ratio of Incremental GC. Default value is 200(%).
1493  *  1 step of incrementalGC becomes long if a rate is big.
1494  *
1495  */
1496 
1497 static mrb_value
gc_step_ratio_set(mrb_state * mrb,mrb_value obj)1498 gc_step_ratio_set(mrb_state *mrb, mrb_value obj)
1499 {
1500   mrb_int ratio;
1501 
1502   mrb_get_args(mrb, "i", &ratio);
1503   mrb->gc.step_ratio = (int)ratio;
1504   return mrb_nil_value();
1505 }
1506 
1507 static void
change_gen_gc_mode(mrb_state * mrb,mrb_gc * gc,mrb_bool enable)1508 change_gen_gc_mode(mrb_state *mrb, mrb_gc *gc, mrb_bool enable)
1509 {
1510   if (gc->disabled || gc->iterating) {
1511     mrb_raise(mrb, E_RUNTIME_ERROR, "generational mode changed when GC disabled");
1512     return;
1513   }
1514   if (is_generational(gc) && !enable) {
1515     clear_all_old(mrb, gc);
1516     mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1517     gc->full = FALSE;
1518   }
1519   else if (!is_generational(gc) && enable) {
1520     incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1521     gc->majorgc_old_threshold = gc->live_after_mark/100 * MAJOR_GC_INC_RATIO;
1522     gc->full = FALSE;
1523   }
1524   gc->generational = enable;
1525 }
1526 
1527 /*
1528  *  call-seq:
1529  *     GC.generational_mode -> true or false
1530  *
1531  *  Returns generational or normal gc mode.
1532  *
1533  */
1534 
1535 static mrb_value
gc_generational_mode_get(mrb_state * mrb,mrb_value self)1536 gc_generational_mode_get(mrb_state *mrb, mrb_value self)
1537 {
1538   return mrb_bool_value(mrb->gc.generational);
1539 }
1540 
1541 /*
1542  *  call-seq:
1543  *     GC.generational_mode = true or false -> true or false
1544  *
1545  *  Changes to generational or normal gc mode.
1546  *
1547  */
1548 
1549 static mrb_value
gc_generational_mode_set(mrb_state * mrb,mrb_value self)1550 gc_generational_mode_set(mrb_state *mrb, mrb_value self)
1551 {
1552   mrb_bool enable;
1553 
1554   mrb_get_args(mrb, "b", &enable);
1555   if (mrb->gc.generational != enable)
1556     change_gen_gc_mode(mrb, &mrb->gc, enable);
1557 
1558   return mrb_bool_value(enable);
1559 }
1560 
1561 
1562 static void
gc_each_objects(mrb_state * mrb,mrb_gc * gc,mrb_each_object_callback * callback,void * data)1563 gc_each_objects(mrb_state *mrb, mrb_gc *gc, mrb_each_object_callback *callback, void *data)
1564 {
1565   mrb_heap_page* page;
1566 
1567   page = gc->heaps;
1568   while (page != NULL) {
1569     RVALUE *p;
1570     int i;
1571 
1572     p = objects(page);
1573     for (i=0; i < MRB_HEAP_PAGE_SIZE; i++) {
1574       if ((*callback)(mrb, &p[i].as.basic, data) == MRB_EACH_OBJ_BREAK)
1575         return;
1576     }
1577     page = page->next;
1578   }
1579 }
1580 
1581 void
mrb_objspace_each_objects(mrb_state * mrb,mrb_each_object_callback * callback,void * data)1582 mrb_objspace_each_objects(mrb_state *mrb, mrb_each_object_callback *callback, void *data)
1583 {
1584   mrb_bool iterating = mrb->gc.iterating;
1585 
1586   mrb_full_gc(mrb);
1587   mrb->gc.iterating = TRUE;
1588   if (iterating) {
1589     gc_each_objects(mrb, &mrb->gc, callback, data);
1590   }
1591   else {
1592     struct mrb_jmpbuf *prev_jmp = mrb->jmp;
1593     struct mrb_jmpbuf c_jmp;
1594 
1595     MRB_TRY(&c_jmp) {
1596       mrb->jmp = &c_jmp;
1597       gc_each_objects(mrb, &mrb->gc, callback, data);
1598       mrb->jmp = prev_jmp;
1599       mrb->gc.iterating = iterating;
1600    } MRB_CATCH(&c_jmp) {
1601       mrb->gc.iterating = iterating;
1602       mrb->jmp = prev_jmp;
1603       MRB_THROW(prev_jmp);
1604     } MRB_END_EXC(&c_jmp);
1605   }
1606 }
1607 
1608 #ifdef GC_TEST
1609 #ifdef GC_DEBUG
1610 static mrb_value gc_test(mrb_state *, mrb_value);
1611 #endif
1612 #endif
1613 
1614 void
mrb_init_gc(mrb_state * mrb)1615 mrb_init_gc(mrb_state *mrb)
1616 {
1617   struct RClass *gc;
1618 
1619   mrb_static_assert(sizeof(RVALUE) <= sizeof(void*) * 6,
1620                     "RVALUE size must be within 6 words");
1621 
1622   gc = mrb_define_module(mrb, "GC");
1623 
1624   mrb_define_class_method(mrb, gc, "start", gc_start, MRB_ARGS_NONE());
1625   mrb_define_class_method(mrb, gc, "enable", gc_enable, MRB_ARGS_NONE());
1626   mrb_define_class_method(mrb, gc, "disable", gc_disable, MRB_ARGS_NONE());
1627   mrb_define_class_method(mrb, gc, "interval_ratio", gc_interval_ratio_get, MRB_ARGS_NONE());
1628   mrb_define_class_method(mrb, gc, "interval_ratio=", gc_interval_ratio_set, MRB_ARGS_REQ(1));
1629   mrb_define_class_method(mrb, gc, "step_ratio", gc_step_ratio_get, MRB_ARGS_NONE());
1630   mrb_define_class_method(mrb, gc, "step_ratio=", gc_step_ratio_set, MRB_ARGS_REQ(1));
1631   mrb_define_class_method(mrb, gc, "generational_mode=", gc_generational_mode_set, MRB_ARGS_REQ(1));
1632   mrb_define_class_method(mrb, gc, "generational_mode", gc_generational_mode_get, MRB_ARGS_NONE());
1633 #ifdef GC_TEST
1634 #ifdef GC_DEBUG
1635   mrb_define_class_method(mrb, gc, "test", gc_test, MRB_ARGS_NONE());
1636 #endif
1637 #endif
1638 }
1639