1 /**********************************************************************
2
3 gc.c -
4
5 $Author: nagachika $
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12 **********************************************************************/
13
14 #define rb_data_object_alloc rb_data_object_alloc
15 #define rb_data_typed_object_alloc rb_data_typed_object_alloc
16
17 #include "ruby/encoding.h"
18 #include "ruby/io.h"
19 #include "ruby/st.h"
20 #include "ruby/re.h"
21 #include "ruby/thread.h"
22 #include "ruby/util.h"
23 #include "ruby/debug.h"
24 #include "internal.h"
25 #include "eval_intern.h"
26 #include "vm_core.h"
27 #include "gc.h"
28 #include "constant.h"
29 #include "ruby_atomic.h"
30 #include "probes.h"
31 #include "id_table.h"
32 #include <stdio.h>
33 #include <stdarg.h>
34 #include <setjmp.h>
35 #include <sys/types.h>
36 #include "ruby_assert.h"
37 #include "debug_counter.h"
38 #include "transient_heap.h"
39 #include "mjit.h"
40
41 #undef rb_data_object_wrap
42
43 #ifndef HAVE_MALLOC_USABLE_SIZE
44 # ifdef _WIN32
45 # define HAVE_MALLOC_USABLE_SIZE
46 # define malloc_usable_size(a) _msize(a)
47 # elif defined HAVE_MALLOC_SIZE
48 # define HAVE_MALLOC_USABLE_SIZE
49 # define malloc_usable_size(a) malloc_size(a)
50 # endif
51 #endif
52 #ifdef HAVE_MALLOC_USABLE_SIZE
53 # ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
54 # include RUBY_ALTERNATIVE_MALLOC_HEADER
55 # elif HAVE_MALLOC_H
56 # include <malloc.h>
57 # elif defined(HAVE_MALLOC_NP_H)
58 # include <malloc_np.h>
59 # elif defined(HAVE_MALLOC_MALLOC_H)
60 # include <malloc/malloc.h>
61 # endif
62 #endif
63
64 #ifdef HAVE_SYS_TIME_H
65 #include <sys/time.h>
66 #endif
67
68 #ifdef HAVE_SYS_RESOURCE_H
69 #include <sys/resource.h>
70 #endif
71
72 #if defined _WIN32 || defined __CYGWIN__
73 #include <windows.h>
74 #elif defined(HAVE_POSIX_MEMALIGN)
75 #elif defined(HAVE_MEMALIGN)
76 #include <malloc.h>
77 #endif
78
79 #define rb_setjmp(env) RUBY_SETJMP(env)
80 #define rb_jmp_buf rb_jmpbuf_t
81
82 #if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
83 /* trick the compiler into thinking a external signal handler uses this */
84 volatile VALUE rb_gc_guarded_val;
85 volatile VALUE *
rb_gc_guarded_ptr_val(volatile VALUE * ptr,VALUE val)86 rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
87 {
88 rb_gc_guarded_val = val;
89
90 return ptr;
91 }
92 #endif
93
94 #ifndef GC_HEAP_INIT_SLOTS
95 #define GC_HEAP_INIT_SLOTS 10000
96 #endif
97 #ifndef GC_HEAP_FREE_SLOTS
98 #define GC_HEAP_FREE_SLOTS 4096
99 #endif
100 #ifndef GC_HEAP_GROWTH_FACTOR
101 #define GC_HEAP_GROWTH_FACTOR 1.8
102 #endif
103 #ifndef GC_HEAP_GROWTH_MAX_SLOTS
104 #define GC_HEAP_GROWTH_MAX_SLOTS 0 /* 0 is disable */
105 #endif
106 #ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
107 #define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
108 #endif
109
110 #ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
111 #define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
112 #endif
113 #ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
114 #define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
115 #endif
116 #ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
117 #define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
118 #endif
119
120 #ifndef GC_MALLOC_LIMIT_MIN
121 #define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
122 #endif
123 #ifndef GC_MALLOC_LIMIT_MAX
124 #define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 /* 32MB */)
125 #endif
126 #ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
127 #define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
128 #endif
129
130 #ifndef GC_OLDMALLOC_LIMIT_MIN
131 #define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
132 #endif
133 #ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
134 #define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
135 #endif
136 #ifndef GC_OLDMALLOC_LIMIT_MAX
137 #define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 /* 128MB */)
138 #endif
139
140 #ifndef PRINT_MEASURE_LINE
141 #define PRINT_MEASURE_LINE 0
142 #endif
143 #ifndef PRINT_ENTER_EXIT_TICK
144 #define PRINT_ENTER_EXIT_TICK 0
145 #endif
146 #ifndef PRINT_ROOT_TICKS
147 #define PRINT_ROOT_TICKS 0
148 #endif
149
150 #define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
151 #define TICK_TYPE 1
152
153 typedef struct {
154 size_t heap_init_slots;
155 size_t heap_free_slots;
156 double growth_factor;
157 size_t growth_max_slots;
158
159 double heap_free_slots_min_ratio;
160 double heap_free_slots_goal_ratio;
161 double heap_free_slots_max_ratio;
162 double oldobject_limit_factor;
163
164 size_t malloc_limit_min;
165 size_t malloc_limit_max;
166 double malloc_limit_growth_factor;
167
168 size_t oldmalloc_limit_min;
169 size_t oldmalloc_limit_max;
170 double oldmalloc_limit_growth_factor;
171
172 VALUE gc_stress;
173 } ruby_gc_params_t;
174
175 static ruby_gc_params_t gc_params = {
176 GC_HEAP_INIT_SLOTS,
177 GC_HEAP_FREE_SLOTS,
178 GC_HEAP_GROWTH_FACTOR,
179 GC_HEAP_GROWTH_MAX_SLOTS,
180
181 GC_HEAP_FREE_SLOTS_MIN_RATIO,
182 GC_HEAP_FREE_SLOTS_GOAL_RATIO,
183 GC_HEAP_FREE_SLOTS_MAX_RATIO,
184 GC_HEAP_OLDOBJECT_LIMIT_FACTOR,
185
186 GC_MALLOC_LIMIT_MIN,
187 GC_MALLOC_LIMIT_MAX,
188 GC_MALLOC_LIMIT_GROWTH_FACTOR,
189
190 GC_OLDMALLOC_LIMIT_MIN,
191 GC_OLDMALLOC_LIMIT_MAX,
192 GC_OLDMALLOC_LIMIT_GROWTH_FACTOR,
193
194 FALSE,
195 };
196
197 /* GC_DEBUG:
198 * enable to embed GC debugging information.
199 */
200 #ifndef GC_DEBUG
201 #define GC_DEBUG 0
202 #endif
203
204 #if USE_RGENGC
205 /* RGENGC_DEBUG:
206 * 1: basic information
207 * 2: remember set operation
208 * 3: mark
209 * 4:
210 * 5: sweep
211 */
212 #ifndef RGENGC_DEBUG
213 #ifdef RUBY_DEVEL
214 #define RGENGC_DEBUG -1
215 #else
216 #define RGENGC_DEBUG 0
217 #endif
218 #endif
219 #if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
220 # define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
221 #else
222 # define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
223 #endif
224 int ruby_rgengc_debug;
225
226 /* RGENGC_CHECK_MODE
227 * 0: disable all assertions
228 * 1: enable assertions (to debug RGenGC)
229 * 2: enable internal consistency check at each GC (for debugging)
230 * 3: enable internal consistency check at each GC steps (for debugging)
231 * 4: enable liveness check
232 * 5: show all references
233 */
234 #ifndef RGENGC_CHECK_MODE
235 #define RGENGC_CHECK_MODE 0
236 #endif
237
238 #if RGENGC_CHECK_MODE > 0
239 #define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
240 #else
241 #define GC_ASSERT(expr) ((void)0)
242 #endif
243
244 /* RGENGC_OLD_NEWOBJ_CHECK
245 * 0: disable all assertions
246 * >0: make a OLD object when new object creation.
247 *
248 * Make one OLD object per RGENGC_OLD_NEWOBJ_CHECK WB protected objects creation.
249 */
250 #ifndef RGENGC_OLD_NEWOBJ_CHECK
251 #define RGENGC_OLD_NEWOBJ_CHECK 0
252 #endif
253
254 /* RGENGC_PROFILE
255 * 0: disable RGenGC profiling
256 * 1: enable profiling for basic information
257 * 2: enable profiling for each types
258 */
259 #ifndef RGENGC_PROFILE
260 #define RGENGC_PROFILE 0
261 #endif
262
263 /* RGENGC_ESTIMATE_OLDMALLOC
264 * Enable/disable to estimate increase size of malloc'ed size by old objects.
265 * If estimation exceeds threshold, then will invoke full GC.
266 * 0: disable estimation.
267 * 1: enable estimation.
268 */
269 #ifndef RGENGC_ESTIMATE_OLDMALLOC
270 #define RGENGC_ESTIMATE_OLDMALLOC 1
271 #endif
272
273 /* RGENGC_FORCE_MAJOR_GC
274 * Force major/full GC if this macro is not 0.
275 */
276 #ifndef RGENGC_FORCE_MAJOR_GC
277 #define RGENGC_FORCE_MAJOR_GC 0
278 #endif
279
280 #else /* USE_RGENGC */
281
282 #ifdef RGENGC_DEBUG
283 #undef RGENGC_DEBUG
284 #endif
285 #define RGENGC_DEBUG 0
286 #ifdef RGENGC_CHECK_MODE
287 #undef RGENGC_CHECK_MODE
288 #endif
289 #define RGENGC_CHECK_MODE 0
290 #define RGENGC_PROFILE 0
291 #define RGENGC_ESTIMATE_OLDMALLOC 0
292 #define RGENGC_FORCE_MAJOR_GC 0
293
294 #endif /* USE_RGENGC */
295
296 #ifndef GC_PROFILE_MORE_DETAIL
297 #define GC_PROFILE_MORE_DETAIL 0
298 #endif
299 #ifndef GC_PROFILE_DETAIL_MEMORY
300 #define GC_PROFILE_DETAIL_MEMORY 0
301 #endif
302 #ifndef GC_ENABLE_INCREMENTAL_MARK
303 #define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
304 #endif
305 #ifndef GC_ENABLE_LAZY_SWEEP
306 #define GC_ENABLE_LAZY_SWEEP 1
307 #endif
308 #ifndef CALC_EXACT_MALLOC_SIZE
309 #define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
310 #endif
311 #if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
312 #ifndef MALLOC_ALLOCATED_SIZE
313 #define MALLOC_ALLOCATED_SIZE 0
314 #endif
315 #else
316 #define MALLOC_ALLOCATED_SIZE 0
317 #endif
318 #ifndef MALLOC_ALLOCATED_SIZE_CHECK
319 #define MALLOC_ALLOCATED_SIZE_CHECK 0
320 #endif
321
322 #ifndef GC_DEBUG_STRESS_TO_CLASS
323 #define GC_DEBUG_STRESS_TO_CLASS 0
324 #endif
325
326 #ifndef RGENGC_OBJ_INFO
327 #define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
328 #endif
329
330 typedef enum {
331 GPR_FLAG_NONE = 0x000,
332 /* major reason */
333 GPR_FLAG_MAJOR_BY_NOFREE = 0x001,
334 GPR_FLAG_MAJOR_BY_OLDGEN = 0x002,
335 GPR_FLAG_MAJOR_BY_SHADY = 0x004,
336 GPR_FLAG_MAJOR_BY_FORCE = 0x008,
337 #if RGENGC_ESTIMATE_OLDMALLOC
338 GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
339 #endif
340 GPR_FLAG_MAJOR_MASK = 0x0ff,
341
342 /* gc reason */
343 GPR_FLAG_NEWOBJ = 0x100,
344 GPR_FLAG_MALLOC = 0x200,
345 GPR_FLAG_METHOD = 0x400,
346 GPR_FLAG_CAPI = 0x800,
347 GPR_FLAG_STRESS = 0x1000,
348
349 /* others */
350 GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
351 GPR_FLAG_HAVE_FINALIZE = 0x4000,
352 GPR_FLAG_IMMEDIATE_MARK = 0x8000,
353 GPR_FLAG_FULL_MARK = 0x10000
354 } gc_profile_record_flag;
355
356 typedef struct gc_profile_record {
357 int flags;
358
359 double gc_time;
360 double gc_invoke_time;
361
362 size_t heap_total_objects;
363 size_t heap_use_size;
364 size_t heap_total_size;
365
366 #if GC_PROFILE_MORE_DETAIL
367 double gc_mark_time;
368 double gc_sweep_time;
369
370 size_t heap_use_pages;
371 size_t heap_live_objects;
372 size_t heap_free_objects;
373
374 size_t allocate_increase;
375 size_t allocate_limit;
376
377 double prepare_time;
378 size_t removing_objects;
379 size_t empty_objects;
380 #if GC_PROFILE_DETAIL_MEMORY
381 long maxrss;
382 long minflt;
383 long majflt;
384 #endif
385 #endif
386 #if MALLOC_ALLOCATED_SIZE
387 size_t allocated_size;
388 #endif
389
390 #if RGENGC_PROFILE > 0
391 size_t old_objects;
392 size_t remembered_normal_objects;
393 size_t remembered_shady_objects;
394 #endif
395 } gc_profile_record;
396
397 #if defined(_MSC_VER) || defined(__CYGWIN__)
398 #pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
399 #endif
400
401 typedef struct RVALUE {
402 union {
403 struct {
404 VALUE flags; /* always 0 for freed obj */
405 struct RVALUE *next;
406 } free;
407 struct RBasic basic;
408 struct RObject object;
409 struct RClass klass;
410 struct RFloat flonum;
411 struct RString string;
412 struct RArray array;
413 struct RRegexp regexp;
414 struct RHash hash;
415 struct RData data;
416 struct RTypedData typeddata;
417 struct RStruct rstruct;
418 struct RBignum bignum;
419 struct RFile file;
420 struct RMatch match;
421 struct RRational rational;
422 struct RComplex complex;
423 union {
424 rb_cref_t cref;
425 struct vm_svar svar;
426 struct vm_throw_data throw_data;
427 struct vm_ifunc ifunc;
428 struct MEMO memo;
429 struct rb_method_entry_struct ment;
430 const rb_iseq_t iseq;
431 rb_env_t env;
432 struct rb_imemo_tmpbuf_struct alloc;
433 rb_ast_t ast;
434 } imemo;
435 struct {
436 struct RBasic basic;
437 VALUE v1;
438 VALUE v2;
439 VALUE v3;
440 } values;
441 } as;
442 #if GC_DEBUG
443 const char *file;
444 int line;
445 #endif
446 } RVALUE;
447
448 #if defined(_MSC_VER) || defined(__CYGWIN__)
449 #pragma pack(pop)
450 #endif
451
452 typedef uintptr_t bits_t;
453 enum {
454 BITS_SIZE = sizeof(bits_t),
455 BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
456 };
457
458 struct heap_page_header {
459 struct heap_page *page;
460 };
461
462 struct heap_page_body {
463 struct heap_page_header header;
464 /* char gap[]; */
465 /* RVALUE values[]; */
466 };
467
468 struct gc_list {
469 VALUE *varptr;
470 struct gc_list *next;
471 };
472
473 #define STACK_CHUNK_SIZE 500
474
475 typedef struct stack_chunk {
476 VALUE data[STACK_CHUNK_SIZE];
477 struct stack_chunk *next;
478 } stack_chunk_t;
479
480 typedef struct mark_stack {
481 stack_chunk_t *chunk;
482 stack_chunk_t *cache;
483 int index;
484 int limit;
485 size_t cache_size;
486 size_t unused_cache_size;
487 } mark_stack_t;
488
489 typedef struct rb_heap_struct {
490 RVALUE *freelist;
491
492 struct heap_page *free_pages;
493 struct heap_page *using_page;
494 struct list_head pages;
495 struct heap_page *sweeping_page; /* iterator for .pages */
496 #if GC_ENABLE_INCREMENTAL_MARK
497 struct heap_page *pooled_pages;
498 #endif
499 size_t total_pages; /* total page count in a heap */
500 size_t total_slots; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */
501 } rb_heap_t;
502
503 enum gc_mode {
504 gc_mode_none,
505 gc_mode_marking,
506 gc_mode_sweeping
507 };
508
509 typedef struct rb_objspace {
510 struct {
511 size_t limit;
512 size_t increase;
513 #if MALLOC_ALLOCATED_SIZE
514 size_t allocated_size;
515 size_t allocations;
516 #endif
517 } malloc_params;
518
519 struct {
520 unsigned int mode : 2;
521 unsigned int immediate_sweep : 1;
522 unsigned int dont_gc : 1;
523 unsigned int dont_incremental : 1;
524 unsigned int during_gc : 1;
525 unsigned int gc_stressful: 1;
526 unsigned int has_hook: 1;
527 #if USE_RGENGC
528 unsigned int during_minor_gc : 1;
529 #endif
530 #if GC_ENABLE_INCREMENTAL_MARK
531 unsigned int during_incremental_marking : 1;
532 #endif
533 } flags;
534
535 rb_event_flag_t hook_events;
536 size_t total_allocated_objects;
537
538 rb_heap_t eden_heap;
539 rb_heap_t tomb_heap; /* heap for zombies and ghosts */
540
541 struct {
542 rb_atomic_t finalizing;
543 } atomic_flags;
544
545 struct mark_func_data_struct {
546 void *data;
547 void (*mark_func)(VALUE v, void *data);
548 } *mark_func_data;
549
550 mark_stack_t mark_stack;
551 size_t marked_slots;
552
553 struct {
554 struct heap_page **sorted;
555 size_t allocated_pages;
556 size_t allocatable_pages;
557 size_t sorted_length;
558 RVALUE *range[2];
559 size_t freeable_pages;
560
561 /* final */
562 size_t final_slots;
563 VALUE deferred_final;
564 } heap_pages;
565
566 st_table *finalizer_table;
567
568 struct {
569 int run;
570 int latest_gc_info;
571 gc_profile_record *records;
572 gc_profile_record *current_record;
573 size_t next_index;
574 size_t size;
575
576 #if GC_PROFILE_MORE_DETAIL
577 double prepare_time;
578 #endif
579 double invoke_time;
580
581 #if USE_RGENGC
582 size_t minor_gc_count;
583 size_t major_gc_count;
584 #if RGENGC_PROFILE > 0
585 size_t total_generated_normal_object_count;
586 size_t total_generated_shady_object_count;
587 size_t total_shade_operation_count;
588 size_t total_promoted_count;
589 size_t total_remembered_normal_object_count;
590 size_t total_remembered_shady_object_count;
591
592 #if RGENGC_PROFILE >= 2
593 size_t generated_normal_object_count_types[RUBY_T_MASK];
594 size_t generated_shady_object_count_types[RUBY_T_MASK];
595 size_t shade_operation_count_types[RUBY_T_MASK];
596 size_t promoted_types[RUBY_T_MASK];
597 size_t remembered_normal_object_count_types[RUBY_T_MASK];
598 size_t remembered_shady_object_count_types[RUBY_T_MASK];
599 #endif
600 #endif /* RGENGC_PROFILE */
601 #endif /* USE_RGENGC */
602
603 /* temporary profiling space */
604 double gc_sweep_start_time;
605 size_t total_allocated_objects_at_gc_start;
606 size_t heap_used_at_gc_start;
607
608 /* basic statistics */
609 size_t count;
610 size_t total_freed_objects;
611 size_t total_allocated_pages;
612 size_t total_freed_pages;
613 } profile;
614 struct gc_list *global_list;
615
616 VALUE gc_stress_mode;
617
618 #if USE_RGENGC
619 struct {
620 VALUE parent_object;
621 int need_major_gc;
622 size_t last_major_gc;
623 size_t uncollectible_wb_unprotected_objects;
624 size_t uncollectible_wb_unprotected_objects_limit;
625 size_t old_objects;
626 size_t old_objects_limit;
627
628 #if RGENGC_ESTIMATE_OLDMALLOC
629 size_t oldmalloc_increase;
630 size_t oldmalloc_increase_limit;
631 #endif
632
633 #if RGENGC_CHECK_MODE >= 2
634 struct st_table *allrefs_table;
635 size_t error_count;
636 #endif
637 } rgengc;
638 #if GC_ENABLE_INCREMENTAL_MARK
639 struct {
640 size_t pooled_slots;
641 size_t step_slots;
642 } rincgc;
643 #endif
644 #endif /* USE_RGENGC */
645
646 #if GC_DEBUG_STRESS_TO_CLASS
647 VALUE stress_to_class;
648 #endif
649 } rb_objspace_t;
650
651
652 /* default tiny heap size: 16KB */
653 #define HEAP_PAGE_ALIGN_LOG 14
654 #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
655 enum {
656 HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG),
657 HEAP_PAGE_ALIGN_MASK = (~(~0UL << HEAP_PAGE_ALIGN_LOG)),
658 REQUIRED_SIZE_BY_MALLOC = (sizeof(size_t) * 5),
659 HEAP_PAGE_SIZE = (HEAP_PAGE_ALIGN - REQUIRED_SIZE_BY_MALLOC),
660 HEAP_PAGE_OBJ_LIMIT = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header))/sizeof(struct RVALUE)),
661 HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE, sizeof(struct RVALUE)), BITS_BITLENGTH),
662 HEAP_PAGE_BITMAP_SIZE = (BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT),
663 HEAP_PAGE_BITMAP_PLANES = USE_RGENGC ? 4 : 1 /* RGENGC: mark, unprotected, uncollectible, marking */
664 };
665
666 struct heap_page {
667 short total_slots;
668 short free_slots;
669 short final_slots;
670 struct {
671 unsigned int before_sweep : 1;
672 unsigned int has_remembered_objects : 1;
673 unsigned int has_uncollectible_shady_objects : 1;
674 unsigned int in_tomb : 1;
675 } flags;
676
677 struct heap_page *free_next;
678 RVALUE *start;
679 RVALUE *freelist;
680 struct list_node page_node;
681
682 #if USE_RGENGC
683 bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
684 #endif
685 /* the following three bitmaps are cleared at the beginning of full GC */
686 bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT];
687 #if USE_RGENGC
688 bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT];
689 bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT];
690 #endif
691 };
692
693 #define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
694 #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
695 #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
696
697 #define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK)/sizeof(RVALUE))
698 #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
699 #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
700 #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
701
702 /* Bitmap Operations */
703 #define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
704 #define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
705 #define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
706
707 /* getting bitmap */
708 #define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
709 #if USE_RGENGC
710 #define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
711 #define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
712 #define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
713 #endif
714
715 #ifndef ENABLE_VM_OBJSPACE
716 # define ENABLE_VM_OBJSPACE 1
717 #endif
718
719 /* Aliases */
720 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
721 #define rb_objspace (*rb_objspace_of(GET_VM()))
722 #define rb_objspace_of(vm) ((vm)->objspace)
723 #else
724 static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT_MIN}};
725 #define rb_objspace_of(vm) (&rb_objspace)
726 #endif
727
728 #define ruby_initial_gc_stress gc_params.gc_stress
729
730 VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
731
732 #define malloc_limit objspace->malloc_params.limit
733 #define malloc_increase objspace->malloc_params.increase
734 #define malloc_allocated_size objspace->malloc_params.allocated_size
735 #define heap_pages_sorted objspace->heap_pages.sorted
736 #define heap_allocated_pages objspace->heap_pages.allocated_pages
737 #define heap_pages_sorted_length objspace->heap_pages.sorted_length
738 #define heap_pages_lomem objspace->heap_pages.range[0]
739 #define heap_pages_himem objspace->heap_pages.range[1]
740 #define heap_allocatable_pages objspace->heap_pages.allocatable_pages
741 #define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
742 #define heap_pages_final_slots objspace->heap_pages.final_slots
743 #define heap_pages_deferred_final objspace->heap_pages.deferred_final
744 #define heap_eden (&objspace->eden_heap)
745 #define heap_tomb (&objspace->tomb_heap)
746 #define dont_gc objspace->flags.dont_gc
747 #define during_gc objspace->flags.during_gc
748 #define finalizing objspace->atomic_flags.finalizing
749 #define finalizer_table objspace->finalizer_table
750 #define global_list objspace->global_list
751 #define ruby_gc_stressful objspace->flags.gc_stressful
752 #define ruby_gc_stress_mode objspace->gc_stress_mode
753 #if GC_DEBUG_STRESS_TO_CLASS
754 #define stress_to_class objspace->stress_to_class
755 #else
756 #define stress_to_class 0
757 #endif
758
759 static inline enum gc_mode
gc_mode_verify(enum gc_mode mode)760 gc_mode_verify(enum gc_mode mode)
761 {
762 #if RGENGC_CHECK_MODE > 0
763 switch (mode) {
764 case gc_mode_none:
765 case gc_mode_marking:
766 case gc_mode_sweeping:
767 break;
768 default:
769 rb_bug("gc_mode_verify: unreachable (%d)", (int)mode);
770 }
771 #endif
772 return mode;
773 }
774
775 #define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
776 #define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
777
778 #define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
779 #define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
780 #if USE_RGENGC
781 #define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
782 #else
783 #define is_full_marking(objspace) TRUE
784 #endif
785 #if GC_ENABLE_INCREMENTAL_MARK
786 #define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
787 #else
788 #define is_incremental_marking(objspace) FALSE
789 #endif
790 #if GC_ENABLE_INCREMENTAL_MARK
791 #define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
792 #else
793 #define will_be_incremental_marking(objspace) FALSE
794 #endif
795 #define has_sweeping_pages(heap) ((heap)->sweeping_page != 0)
796 #define is_lazy_sweeping(heap) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(heap))
797
798 #if SIZEOF_LONG == SIZEOF_VOIDP
799 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
800 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
801 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
802 # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
803 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
804 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
805 #else
806 # error not supported
807 #endif
808
809 #define RANY(o) ((RVALUE*)(o))
810
811 struct RZombie {
812 struct RBasic basic;
813 VALUE next;
814 void (*dfree)(void *);
815 void *data;
816 };
817
818 #define RZOMBIE(o) ((struct RZombie *)(o))
819
820 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
821
822 #if RUBY_MARK_FREE_DEBUG
823 int ruby_gc_debug_indent = 0;
824 #endif
825 VALUE rb_mGC;
826 int ruby_disable_gc = 0;
827
828 void rb_iseq_mark(const rb_iseq_t *iseq);
829 void rb_iseq_free(const rb_iseq_t *iseq);
830
831 void rb_gcdebug_print_obj_condition(VALUE obj);
832
833 static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
834 static VALUE define_final0(VALUE obj, VALUE block);
835
836 static void negative_size_allocation_error(const char *);
837
838 static void init_mark_stack(mark_stack_t *stack);
839
840 static int ready_to_gc(rb_objspace_t *objspace);
841
842 static int garbage_collect(rb_objspace_t *, int reason);
843
844 static int gc_start(rb_objspace_t *objspace, int reason);
845 static void gc_rest(rb_objspace_t *objspace);
846 static inline void gc_enter(rb_objspace_t *objspace, const char *event);
847 static inline void gc_exit(rb_objspace_t *objspace, const char *event);
848
849 static void gc_marks(rb_objspace_t *objspace, int full_mark);
850 static void gc_marks_start(rb_objspace_t *objspace, int full);
851 static int gc_marks_finish(rb_objspace_t *objspace);
852 static void gc_marks_rest(rb_objspace_t *objspace);
853 static void gc_marks_step(rb_objspace_t *objspace, int slots);
854 static void gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap);
855
856 static void gc_sweep(rb_objspace_t *objspace);
857 static void gc_sweep_start(rb_objspace_t *objspace);
858 static void gc_sweep_finish(rb_objspace_t *objspace);
859 static int gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap);
860 static void gc_sweep_rest(rb_objspace_t *objspace);
861 static void gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap);
862
863 static inline void gc_mark(rb_objspace_t *objspace, VALUE ptr);
864 static void gc_mark_ptr(rb_objspace_t *objspace, VALUE ptr);
865 NO_SANITIZE("memory", static void gc_mark_maybe(rb_objspace_t *objspace, VALUE ptr));
866 static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
867
868 static int gc_mark_stacked_objects_incremental(rb_objspace_t *, size_t count);
869 static int gc_mark_stacked_objects_all(rb_objspace_t *);
870 static void gc_grey(rb_objspace_t *objspace, VALUE ptr);
871
872 static inline int gc_mark_set(rb_objspace_t *objspace, VALUE obj);
873 NO_SANITIZE("memory", static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr));
874
875 static void push_mark_stack(mark_stack_t *, VALUE);
876 static int pop_mark_stack(mark_stack_t *, VALUE *);
877 static size_t mark_stack_size(mark_stack_t *stack);
878 static void shrink_stack_chunk_cache(mark_stack_t *stack);
879
880 static size_t obj_memsize_of(VALUE obj, int use_all_types);
881 static VALUE gc_verify_internal_consistency(VALUE self);
882 static int gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj);
883 static int gc_verify_heap_pages(rb_objspace_t *objspace);
884
885 static void gc_stress_set(rb_objspace_t *objspace, VALUE flag);
886
887 static double getrusage_time(void);
888 static inline void gc_prof_setup_new_record(rb_objspace_t *objspace, int reason);
889 static inline void gc_prof_timer_start(rb_objspace_t *);
890 static inline void gc_prof_timer_stop(rb_objspace_t *);
891 static inline void gc_prof_mark_timer_start(rb_objspace_t *);
892 static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
893 static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
894 static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
895 static inline void gc_prof_set_malloc_info(rb_objspace_t *);
896 static inline void gc_prof_set_heap_info(rb_objspace_t *);
897
898 #define gc_prof_record(objspace) (objspace)->profile.current_record
899 #define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
900
901 #ifdef HAVE_VA_ARGS_MACRO
902 # define gc_report(level, objspace, ...) \
903 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
904 #else
905 # define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
906 #endif
907 PRINTF_ARGS(static void gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...), 3, 4);
908 static const char *obj_info(VALUE obj);
909
910 #define PUSH_MARK_FUNC_DATA(v) do { \
911 struct mark_func_data_struct *prev_mark_func_data = objspace->mark_func_data; \
912 objspace->mark_func_data = (v);
913
914 #define POP_MARK_FUNC_DATA() objspace->mark_func_data = prev_mark_func_data;} while (0)
915
916 /*
917 * 1 - TSC (H/W Time Stamp Counter)
918 * 2 - getrusage
919 */
920 #ifndef TICK_TYPE
921 #define TICK_TYPE 1
922 #endif
923
924 #if USE_TICK_T
925
926 #if TICK_TYPE == 1
927 /* the following code is only for internal tuning. */
928
929 /* Source code to use RDTSC is quoted and modified from
930 * http://www.mcs.anl.gov/~kazutomo/rdtsc.html
931 * written by Kazutomo Yoshii <kazutomo@mcs.anl.gov>
932 */
933
934 #if defined(__GNUC__) && defined(__i386__)
935 typedef unsigned long long tick_t;
936 #define PRItick "llu"
937 static inline tick_t
tick(void)938 tick(void)
939 {
940 unsigned long long int x;
941 __asm__ __volatile__ ("rdtsc" : "=A" (x));
942 return x;
943 }
944
945 #elif defined(__GNUC__) && defined(__x86_64__)
946 typedef unsigned long long tick_t;
947 #define PRItick "llu"
948
949 static __inline__ tick_t
tick(void)950 tick(void)
951 {
952 unsigned long hi, lo;
953 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
954 return ((unsigned long long)lo)|( ((unsigned long long)hi)<<32);
955 }
956
957 #elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
958 typedef unsigned long long tick_t;
959 #define PRItick "llu"
960
961 static __inline__ tick_t
tick(void)962 tick(void)
963 {
964 unsigned long long val = __builtin_ppc_get_timebase();
965 return val;
966 }
967
968 #elif defined(_WIN32) && defined(_MSC_VER)
969 #include <intrin.h>
970 typedef unsigned __int64 tick_t;
971 #define PRItick "llu"
972
973 static inline tick_t
tick(void)974 tick(void)
975 {
976 return __rdtsc();
977 }
978
979 #else /* use clock */
980 typedef clock_t tick_t;
981 #define PRItick "llu"
982
983 static inline tick_t
tick(void)984 tick(void)
985 {
986 return clock();
987 }
988 #endif /* TSC */
989
990 #elif TICK_TYPE == 2
991 typedef double tick_t;
992 #define PRItick "4.9f"
993
994 static inline tick_t
tick(void)995 tick(void)
996 {
997 return getrusage_time();
998 }
999 #else /* TICK_TYPE */
1000 #error "choose tick type"
1001 #endif /* TICK_TYPE */
1002
1003 #define MEASURE_LINE(expr) do { \
1004 volatile tick_t start_time = tick(); \
1005 volatile tick_t end_time; \
1006 expr; \
1007 end_time = tick(); \
1008 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1009 } while (0)
1010
1011 #else /* USE_TICK_T */
1012 #define MEASURE_LINE(expr) expr
1013 #endif /* USE_TICK_T */
1014
1015 #define FL_CHECK2(name, x, pred) \
1016 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1017 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1018 #define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1019 #define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1020 #define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1021
1022 #define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1023 #define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1024
1025 #if USE_RGENGC
1026 #define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1027 #define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1028 #define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1029
1030 #define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1031 #define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1032 #define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1033
1034 #define RVALUE_OLD_AGE 3
1035 #define RVALUE_AGE_SHIFT 5 /* FL_PROMOTED0 bit */
1036
1037 static int rgengc_remembered(rb_objspace_t *objspace, VALUE obj);
1038 static int rgengc_remember(rb_objspace_t *objspace, VALUE obj);
1039 static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap);
1040 static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap);
1041
1042 static inline int
RVALUE_FLAGS_AGE(VALUE flags)1043 RVALUE_FLAGS_AGE(VALUE flags)
1044 {
1045 return (int)((flags & (FL_PROMOTED0 | FL_PROMOTED1)) >> RVALUE_AGE_SHIFT);
1046 }
1047
1048 #endif /* USE_RGENGC */
1049
1050
1051 #if RGENGC_CHECK_MODE == 0
1052 static inline VALUE
check_rvalue_consistency(const VALUE obj)1053 check_rvalue_consistency(const VALUE obj)
1054 {
1055 return obj;
1056 }
1057 #else
1058 static VALUE
check_rvalue_consistency(const VALUE obj)1059 check_rvalue_consistency(const VALUE obj)
1060 {
1061 rb_objspace_t *objspace = &rb_objspace;
1062
1063 if (SPECIAL_CONST_P(obj)) {
1064 rb_bug("check_rvalue_consistency: %p is a special const.", (void *)obj);
1065 }
1066 else if (!is_pointer_to_heap(objspace, (void *)obj)) {
1067 rb_bug("check_rvalue_consistency: %p is not a Ruby object.", (void *)obj);
1068 }
1069 else {
1070 const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1071 const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1072 const int mark_bit = RVALUE_MARK_BITMAP(obj) != 0;
1073 const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0, remembered_bit = marking_bit;
1074 const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
1075
1076 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("check_rvalue_consistency: %s is T_NONE", obj_info(obj));
1077 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("check_rvalue_consistency: %s is T_ZOMBIE", obj_info(obj));
1078 obj_memsize_of((VALUE)obj, FALSE);
1079
1080 /* check generation
1081 *
1082 * OLD == age == 3 && old-bitmap && mark-bit (except incremental marking)
1083 */
1084 if (age > 0 && wb_unprotected_bit) {
1085 rb_bug("check_rvalue_consistency: %s is not WB protected, but age is %d > 0.", obj_info(obj), age);
1086 }
1087
1088 if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1089 rb_bug("check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.", obj_info(obj));
1090 }
1091
1092 if (!is_full_marking(objspace)) {
1093 if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1094 rb_bug("check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.", obj_info(obj), age);
1095 }
1096 if (remembered_bit && age != RVALUE_OLD_AGE) {
1097 rb_bug("check_rvalue_consistency: %s is remembered, but not old (age: %d).", obj_info(obj), age);
1098 }
1099 }
1100
1101 /*
1102 * check coloring
1103 *
1104 * marking:false marking:true
1105 * marked:false white *invalid*
1106 * marked:true black grey
1107 */
1108 if (is_incremental_marking(objspace) && marking_bit) {
1109 if (!is_marking(objspace) && !mark_bit) rb_bug("check_rvalue_consistency: %s is marking, but not marked.", obj_info(obj));
1110 }
1111 }
1112 return obj;
1113 }
1114 #endif
1115
1116 static inline int
RVALUE_MARKED(VALUE obj)1117 RVALUE_MARKED(VALUE obj)
1118 {
1119 check_rvalue_consistency(obj);
1120 return RVALUE_MARK_BITMAP(obj) != 0;
1121 }
1122
1123 #if USE_RGENGC
1124 static inline int
RVALUE_WB_UNPROTECTED(VALUE obj)1125 RVALUE_WB_UNPROTECTED(VALUE obj)
1126 {
1127 check_rvalue_consistency(obj);
1128 return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1129 }
1130
1131 static inline int
RVALUE_MARKING(VALUE obj)1132 RVALUE_MARKING(VALUE obj)
1133 {
1134 check_rvalue_consistency(obj);
1135 return RVALUE_MARKING_BITMAP(obj) != 0;
1136 }
1137
1138 static inline int
RVALUE_REMEMBERED(VALUE obj)1139 RVALUE_REMEMBERED(VALUE obj)
1140 {
1141 check_rvalue_consistency(obj);
1142 return RVALUE_MARKING_BITMAP(obj) != 0;
1143 }
1144
1145 static inline int
RVALUE_UNCOLLECTIBLE(VALUE obj)1146 RVALUE_UNCOLLECTIBLE(VALUE obj)
1147 {
1148 check_rvalue_consistency(obj);
1149 return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1150 }
1151
1152 static inline int
RVALUE_OLD_P_RAW(VALUE obj)1153 RVALUE_OLD_P_RAW(VALUE obj)
1154 {
1155 const VALUE promoted = FL_PROMOTED0 | FL_PROMOTED1;
1156 return (RBASIC(obj)->flags & promoted) == promoted;
1157 }
1158
1159 static inline int
RVALUE_OLD_P(VALUE obj)1160 RVALUE_OLD_P(VALUE obj)
1161 {
1162 check_rvalue_consistency(obj);
1163 return RVALUE_OLD_P_RAW(obj);
1164 }
1165
1166 #if RGENGC_CHECK_MODE || GC_DEBUG
1167 static inline int
RVALUE_AGE(VALUE obj)1168 RVALUE_AGE(VALUE obj)
1169 {
1170 check_rvalue_consistency(obj);
1171 return RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
1172 }
1173 #endif
1174
1175 static inline void
RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t * objspace,struct heap_page * page,VALUE obj)1176 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1177 {
1178 MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
1179 objspace->rgengc.old_objects++;
1180 rb_transient_heap_promote(obj);
1181
1182 #if RGENGC_PROFILE >= 2
1183 objspace->profile.total_promoted_count++;
1184 objspace->profile.promoted_types[BUILTIN_TYPE(obj)]++;
1185 #endif
1186 }
1187
1188 static inline void
RVALUE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t * objspace,VALUE obj)1189 RVALUE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, VALUE obj)
1190 {
1191 RB_DEBUG_COUNTER_INC(obj_promote);
1192 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1193 }
1194
1195 static inline VALUE
RVALUE_FLAGS_AGE_SET(VALUE flags,int age)1196 RVALUE_FLAGS_AGE_SET(VALUE flags, int age)
1197 {
1198 flags &= ~(FL_PROMOTED0 | FL_PROMOTED1);
1199 flags |= (age << RVALUE_AGE_SHIFT);
1200 return flags;
1201 }
1202
1203 /* set age to age+1 */
1204 static inline void
RVALUE_AGE_INC(rb_objspace_t * objspace,VALUE obj)1205 RVALUE_AGE_INC(rb_objspace_t *objspace, VALUE obj)
1206 {
1207 VALUE flags = RBASIC(obj)->flags;
1208 int age = RVALUE_FLAGS_AGE(flags);
1209
1210 if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1211 rb_bug("RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj));
1212 }
1213
1214 age++;
1215 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(flags, age);
1216
1217 if (age == RVALUE_OLD_AGE) {
1218 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1219 }
1220 check_rvalue_consistency(obj);
1221 }
1222
1223 /* set age to RVALUE_OLD_AGE */
1224 static inline void
RVALUE_AGE_SET_OLD(rb_objspace_t * objspace,VALUE obj)1225 RVALUE_AGE_SET_OLD(rb_objspace_t *objspace, VALUE obj)
1226 {
1227 check_rvalue_consistency(obj);
1228 GC_ASSERT(!RVALUE_OLD_P(obj));
1229
1230 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, RVALUE_OLD_AGE);
1231 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1232
1233 check_rvalue_consistency(obj);
1234 }
1235
1236 /* set age to RVALUE_OLD_AGE - 1 */
1237 static inline void
RVALUE_AGE_SET_CANDIDATE(rb_objspace_t * objspace,VALUE obj)1238 RVALUE_AGE_SET_CANDIDATE(rb_objspace_t *objspace, VALUE obj)
1239 {
1240 check_rvalue_consistency(obj);
1241 GC_ASSERT(!RVALUE_OLD_P(obj));
1242
1243 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, RVALUE_OLD_AGE - 1);
1244
1245 check_rvalue_consistency(obj);
1246 }
1247
1248 static inline void
RVALUE_DEMOTE_RAW(rb_objspace_t * objspace,VALUE obj)1249 RVALUE_DEMOTE_RAW(rb_objspace_t *objspace, VALUE obj)
1250 {
1251 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, 0);
1252 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
1253 }
1254
1255 static inline void
RVALUE_DEMOTE(rb_objspace_t * objspace,VALUE obj)1256 RVALUE_DEMOTE(rb_objspace_t *objspace, VALUE obj)
1257 {
1258 check_rvalue_consistency(obj);
1259 GC_ASSERT(RVALUE_OLD_P(obj));
1260
1261 if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(obj)) {
1262 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
1263 }
1264
1265 RVALUE_DEMOTE_RAW(objspace, obj);
1266
1267 if (RVALUE_MARKED(obj)) {
1268 objspace->rgengc.old_objects--;
1269 }
1270
1271 check_rvalue_consistency(obj);
1272 }
1273
1274 static inline void
RVALUE_AGE_RESET_RAW(VALUE obj)1275 RVALUE_AGE_RESET_RAW(VALUE obj)
1276 {
1277 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, 0);
1278 }
1279
1280 static inline void
RVALUE_AGE_RESET(VALUE obj)1281 RVALUE_AGE_RESET(VALUE obj)
1282 {
1283 check_rvalue_consistency(obj);
1284 GC_ASSERT(!RVALUE_OLD_P(obj));
1285
1286 RVALUE_AGE_RESET_RAW(obj);
1287 check_rvalue_consistency(obj);
1288 }
1289
1290 static inline int
RVALUE_BLACK_P(VALUE obj)1291 RVALUE_BLACK_P(VALUE obj)
1292 {
1293 return RVALUE_MARKED(obj) && !RVALUE_MARKING(obj);
1294 }
1295
1296 #if 0
1297 static inline int
1298 RVALUE_GREY_P(VALUE obj)
1299 {
1300 return RVALUE_MARKED(obj) && RVALUE_MARKING(obj);
1301 }
1302 #endif
1303
1304 static inline int
RVALUE_WHITE_P(VALUE obj)1305 RVALUE_WHITE_P(VALUE obj)
1306 {
1307 return RVALUE_MARKED(obj) == FALSE;
1308 }
1309
1310 #endif /* USE_RGENGC */
1311
1312 /*
1313 --------------------------- ObjectSpace -----------------------------
1314 */
1315
1316 rb_objspace_t *
rb_objspace_alloc(void)1317 rb_objspace_alloc(void)
1318 {
1319 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
1320 rb_objspace_t *objspace = calloc(1, sizeof(rb_objspace_t));
1321 #else
1322 rb_objspace_t *objspace = &rb_objspace;
1323 #endif
1324 malloc_limit = gc_params.malloc_limit_min;
1325 list_head_init(&objspace->eden_heap.pages);
1326 list_head_init(&objspace->tomb_heap.pages);
1327
1328 return objspace;
1329 }
1330
1331 static void free_stack_chunks(mark_stack_t *);
1332 static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page);
1333
1334 void
rb_objspace_free(rb_objspace_t * objspace)1335 rb_objspace_free(rb_objspace_t *objspace)
1336 {
1337 if (is_lazy_sweeping(heap_eden))
1338 rb_bug("lazy sweeping underway when freeing object space");
1339
1340 if (objspace->profile.records) {
1341 free(objspace->profile.records);
1342 objspace->profile.records = 0;
1343 }
1344
1345 if (global_list) {
1346 struct gc_list *list, *next;
1347 for (list = global_list; list; list = next) {
1348 next = list->next;
1349 xfree(list);
1350 }
1351 }
1352 if (heap_pages_sorted) {
1353 size_t i;
1354 for (i = 0; i < heap_allocated_pages; ++i) {
1355 heap_page_free(objspace, heap_pages_sorted[i]);
1356 }
1357 free(heap_pages_sorted);
1358 heap_allocated_pages = 0;
1359 heap_pages_sorted_length = 0;
1360 heap_pages_lomem = 0;
1361 heap_pages_himem = 0;
1362
1363 objspace->eden_heap.total_pages = 0;
1364 objspace->eden_heap.total_slots = 0;
1365 }
1366 free_stack_chunks(&objspace->mark_stack);
1367 #if !(defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE)
1368 if (objspace == &rb_objspace) return;
1369 #endif
1370 free(objspace);
1371 }
1372
1373 static void
heap_pages_expand_sorted_to(rb_objspace_t * objspace,size_t next_length)1374 heap_pages_expand_sorted_to(rb_objspace_t *objspace, size_t next_length)
1375 {
1376 struct heap_page **sorted;
1377 size_t size = next_length * sizeof(struct heap_page *);
1378
1379 gc_report(3, objspace, "heap_pages_expand_sorted: next_length: %d, size: %d\n", (int)next_length, (int)size);
1380
1381 if (heap_pages_sorted_length > 0) {
1382 sorted = (struct heap_page **)realloc(heap_pages_sorted, size);
1383 if (sorted) heap_pages_sorted = sorted;
1384 }
1385 else {
1386 sorted = heap_pages_sorted = (struct heap_page **)malloc(size);
1387 }
1388
1389 if (sorted == 0) {
1390 rb_memerror();
1391 }
1392
1393 heap_pages_sorted_length = next_length;
1394 }
1395
1396 static void
heap_pages_expand_sorted(rb_objspace_t * objspace)1397 heap_pages_expand_sorted(rb_objspace_t *objspace)
1398 {
1399 /* usually heap_allocatable_pages + heap_eden->total_pages == heap_pages_sorted_length
1400 * because heap_allocatable_pages contains heap_tomb->total_pages (recycle heap_tomb pages).
1401 * however, if there are pages which do not have empty slots, then try to create new pages
1402 * so that the additional allocatable_pages counts (heap_tomb->total_pages) are added.
1403 */
1404 size_t next_length = heap_allocatable_pages;
1405 next_length += heap_eden->total_pages;
1406 next_length += heap_tomb->total_pages;
1407
1408 if (next_length > heap_pages_sorted_length) {
1409 heap_pages_expand_sorted_to(objspace, next_length);
1410 }
1411
1412 GC_ASSERT(heap_allocatable_pages + heap_eden->total_pages <= heap_pages_sorted_length);
1413 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
1414 }
1415
1416 static void
heap_allocatable_pages_set(rb_objspace_t * objspace,size_t s)1417 heap_allocatable_pages_set(rb_objspace_t *objspace, size_t s)
1418 {
1419 heap_allocatable_pages = s;
1420 heap_pages_expand_sorted(objspace);
1421 }
1422
1423
1424 static inline void
heap_page_add_freeobj(rb_objspace_t * objspace,struct heap_page * page,VALUE obj)1425 heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1426 {
1427 RVALUE *p = (RVALUE *)obj;
1428 p->as.free.flags = 0;
1429 p->as.free.next = page->freelist;
1430 page->freelist = p;
1431
1432 if (RGENGC_CHECK_MODE && !is_pointer_to_heap(objspace, p)) {
1433 rb_bug("heap_page_add_freeobj: %p is not rvalue.", (void *)p);
1434 }
1435 poison_object(obj);
1436
1437 gc_report(3, objspace, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj);
1438 }
1439
1440 static inline void
heap_add_freepage(rb_objspace_t * objspace,rb_heap_t * heap,struct heap_page * page)1441 heap_add_freepage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1442 {
1443 if (page->freelist) {
1444 page->free_next = heap->free_pages;
1445 heap->free_pages = page;
1446 }
1447 }
1448
1449 #if GC_ENABLE_INCREMENTAL_MARK
1450 static inline int
heap_add_poolpage(rb_objspace_t * objspace,rb_heap_t * heap,struct heap_page * page)1451 heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1452 {
1453 if (page->freelist) {
1454 page->free_next = heap->pooled_pages;
1455 heap->pooled_pages = page;
1456 objspace->rincgc.pooled_slots += page->free_slots;
1457 return TRUE;
1458 }
1459 else {
1460 return FALSE;
1461 }
1462 }
1463 #endif
1464
1465 static void
heap_unlink_page(rb_objspace_t * objspace,rb_heap_t * heap,struct heap_page * page)1466 heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1467 {
1468 list_del(&page->page_node);
1469 heap->total_pages--;
1470 heap->total_slots -= page->total_slots;
1471 }
1472
1473 static void
heap_page_free(rb_objspace_t * objspace,struct heap_page * page)1474 heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
1475 {
1476 heap_allocated_pages--;
1477 objspace->profile.total_freed_pages++;
1478 rb_aligned_free(GET_PAGE_BODY(page->start));
1479 free(page);
1480 }
1481
1482 static void
heap_pages_free_unused_pages(rb_objspace_t * objspace)1483 heap_pages_free_unused_pages(rb_objspace_t *objspace)
1484 {
1485 size_t i, j;
1486
1487 if (!list_empty(&heap_tomb->pages)) {
1488 for (i = j = 1; j < heap_allocated_pages; i++) {
1489 struct heap_page *page = heap_pages_sorted[i];
1490
1491 if (page->flags.in_tomb && page->free_slots == page->total_slots) {
1492 heap_unlink_page(objspace, heap_tomb, page);
1493 heap_page_free(objspace, page);
1494 }
1495 else {
1496 if (i != j) {
1497 heap_pages_sorted[j] = page;
1498 }
1499 j++;
1500 }
1501 }
1502 GC_ASSERT(j == heap_allocated_pages);
1503 }
1504 }
1505
1506 static struct heap_page *
heap_page_allocate(rb_objspace_t * objspace)1507 heap_page_allocate(rb_objspace_t *objspace)
1508 {
1509 RVALUE *start, *end, *p;
1510 struct heap_page *page;
1511 struct heap_page_body *page_body = 0;
1512 size_t hi, lo, mid;
1513 int limit = HEAP_PAGE_OBJ_LIMIT;
1514
1515 /* assign heap_page body (contains heap_page_header and RVALUEs) */
1516 page_body = (struct heap_page_body *)rb_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
1517 if (page_body == 0) {
1518 rb_memerror();
1519 }
1520
1521 /* assign heap_page entry */
1522 page = (struct heap_page *)calloc(1, sizeof(struct heap_page));
1523 if (page == 0) {
1524 rb_aligned_free(page_body);
1525 rb_memerror();
1526 }
1527
1528 /* adjust obj_limit (object number available in this page) */
1529 start = (RVALUE*)((VALUE)page_body + sizeof(struct heap_page_header));
1530 if ((VALUE)start % sizeof(RVALUE) != 0) {
1531 int delta = (int)(sizeof(RVALUE) - ((VALUE)start % sizeof(RVALUE)));
1532 start = (RVALUE*)((VALUE)start + delta);
1533 limit = (HEAP_PAGE_SIZE - (int)((VALUE)start - (VALUE)page_body))/(int)sizeof(RVALUE);
1534 }
1535 end = start + limit;
1536
1537 /* setup heap_pages_sorted */
1538 lo = 0;
1539 hi = heap_allocated_pages;
1540 while (lo < hi) {
1541 struct heap_page *mid_page;
1542
1543 mid = (lo + hi) / 2;
1544 mid_page = heap_pages_sorted[mid];
1545 if (mid_page->start < start) {
1546 lo = mid + 1;
1547 }
1548 else if (mid_page->start > start) {
1549 hi = mid;
1550 }
1551 else {
1552 rb_bug("same heap page is allocated: %p at %"PRIuVALUE, (void *)page_body, (VALUE)mid);
1553 }
1554 }
1555
1556 if (hi < heap_allocated_pages) {
1557 MEMMOVE(&heap_pages_sorted[hi+1], &heap_pages_sorted[hi], struct heap_page_header*, heap_allocated_pages - hi);
1558 }
1559
1560 heap_pages_sorted[hi] = page;
1561
1562 heap_allocated_pages++;
1563
1564 GC_ASSERT(heap_eden->total_pages + heap_allocatable_pages <= heap_pages_sorted_length);
1565 GC_ASSERT(heap_eden->total_pages + heap_tomb->total_pages == heap_allocated_pages - 1);
1566 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
1567
1568 objspace->profile.total_allocated_pages++;
1569
1570 if (heap_allocated_pages > heap_pages_sorted_length) {
1571 rb_bug("heap_page_allocate: allocated(%"PRIdSIZE") > sorted(%"PRIdSIZE")",
1572 heap_allocated_pages, heap_pages_sorted_length);
1573 }
1574
1575 if (heap_pages_lomem == 0 || heap_pages_lomem > start) heap_pages_lomem = start;
1576 if (heap_pages_himem < end) heap_pages_himem = end;
1577
1578 page->start = start;
1579 page->total_slots = limit;
1580 page_body->header.page = page;
1581
1582 for (p = start; p != end; p++) {
1583 gc_report(3, objspace, "assign_heap_page: %p is added to freelist\n", (void *)p);
1584 heap_page_add_freeobj(objspace, page, (VALUE)p);
1585 }
1586 page->free_slots = limit;
1587
1588 return page;
1589 }
1590
1591 static struct heap_page *
heap_page_resurrect(rb_objspace_t * objspace)1592 heap_page_resurrect(rb_objspace_t *objspace)
1593 {
1594 struct heap_page *page = 0, *next;
1595
1596 list_for_each_safe(&heap_tomb->pages, page, next, page_node) {
1597 if (page->freelist != NULL) {
1598 heap_unlink_page(objspace, heap_tomb, page);
1599 return page;
1600 }
1601 }
1602
1603 return NULL;
1604 }
1605
1606 static struct heap_page *
heap_page_create(rb_objspace_t * objspace)1607 heap_page_create(rb_objspace_t *objspace)
1608 {
1609 struct heap_page *page;
1610 const char *method = "recycle";
1611
1612 heap_allocatable_pages--;
1613
1614 page = heap_page_resurrect(objspace);
1615
1616 if (page == NULL) {
1617 page = heap_page_allocate(objspace);
1618 method = "allocate";
1619 }
1620 if (0) fprintf(stderr, "heap_page_create: %s - %p, heap_allocated_pages: %d, heap_allocated_pages: %d, tomb->total_pages: %d\n",
1621 method, (void *)page, (int)heap_pages_sorted_length, (int)heap_allocated_pages, (int)heap_tomb->total_pages);
1622 return page;
1623 }
1624
1625 static void
heap_add_page(rb_objspace_t * objspace,rb_heap_t * heap,struct heap_page * page)1626 heap_add_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1627 {
1628 page->flags.in_tomb = (heap == heap_tomb);
1629 list_add(&heap->pages, &page->page_node);
1630 heap->total_pages++;
1631 heap->total_slots += page->total_slots;
1632 }
1633
1634 static void
heap_assign_page(rb_objspace_t * objspace,rb_heap_t * heap)1635 heap_assign_page(rb_objspace_t *objspace, rb_heap_t *heap)
1636 {
1637 struct heap_page *page = heap_page_create(objspace);
1638 heap_add_page(objspace, heap, page);
1639 heap_add_freepage(objspace, heap, page);
1640 }
1641
1642 static void
heap_add_pages(rb_objspace_t * objspace,rb_heap_t * heap,size_t add)1643 heap_add_pages(rb_objspace_t *objspace, rb_heap_t *heap, size_t add)
1644 {
1645 size_t i;
1646
1647 heap_allocatable_pages_set(objspace, add);
1648
1649 for (i = 0; i < add; i++) {
1650 heap_assign_page(objspace, heap);
1651 }
1652
1653 GC_ASSERT(heap_allocatable_pages == 0);
1654 }
1655
1656 static size_t
heap_extend_pages(rb_objspace_t * objspace,size_t free_slots,size_t total_slots)1657 heap_extend_pages(rb_objspace_t *objspace, size_t free_slots, size_t total_slots)
1658 {
1659 double goal_ratio = gc_params.heap_free_slots_goal_ratio;
1660 size_t used = heap_allocated_pages + heap_allocatable_pages;
1661 size_t next_used;
1662
1663 if (goal_ratio == 0.0) {
1664 next_used = (size_t)(used * gc_params.growth_factor);
1665 }
1666 else {
1667 /* Find `f' where free_slots = f * total_slots * goal_ratio
1668 * => f = (total_slots - free_slots) / ((1 - goal_ratio) * total_slots)
1669 */
1670 double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
1671
1672 if (f > gc_params.growth_factor) f = gc_params.growth_factor;
1673 if (f < 1.0) f = 1.1;
1674
1675 next_used = (size_t)(f * used);
1676
1677 if (0) {
1678 fprintf(stderr,
1679 "free_slots(%8"PRIuSIZE")/total_slots(%8"PRIuSIZE")=%1.2f,"
1680 " G(%1.2f), f(%1.2f),"
1681 " used(%8"PRIuSIZE") => next_used(%8"PRIuSIZE")\n",
1682 free_slots, total_slots, free_slots/(double)total_slots,
1683 goal_ratio, f, used, next_used);
1684 }
1685 }
1686
1687 if (gc_params.growth_max_slots > 0) {
1688 size_t max_used = (size_t)(used + gc_params.growth_max_slots/HEAP_PAGE_OBJ_LIMIT);
1689 if (next_used > max_used) next_used = max_used;
1690 }
1691
1692 return next_used - used;
1693 }
1694
1695 static void
heap_set_increment(rb_objspace_t * objspace,size_t additional_pages)1696 heap_set_increment(rb_objspace_t *objspace, size_t additional_pages)
1697 {
1698 size_t used = heap_eden->total_pages;
1699 size_t next_used_limit = used + additional_pages;
1700
1701 if (next_used_limit == heap_allocated_pages) next_used_limit++;
1702
1703 heap_allocatable_pages_set(objspace, next_used_limit - used);
1704
1705 gc_report(1, objspace, "heap_set_increment: heap_allocatable_pages is %d\n", (int)heap_allocatable_pages);
1706 }
1707
1708 static int
heap_increment(rb_objspace_t * objspace,rb_heap_t * heap)1709 heap_increment(rb_objspace_t *objspace, rb_heap_t *heap)
1710 {
1711 if (heap_allocatable_pages > 0) {
1712 gc_report(1, objspace, "heap_increment: heap_pages_sorted_length: %d, heap_pages_inc: %d, heap->total_pages: %d\n",
1713 (int)heap_pages_sorted_length, (int)heap_allocatable_pages, (int)heap->total_pages);
1714
1715 GC_ASSERT(heap_allocatable_pages + heap_eden->total_pages <= heap_pages_sorted_length);
1716 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
1717
1718 heap_assign_page(objspace, heap);
1719 return TRUE;
1720 }
1721 return FALSE;
1722 }
1723
1724 static void
heap_prepare(rb_objspace_t * objspace,rb_heap_t * heap)1725 heap_prepare(rb_objspace_t *objspace, rb_heap_t *heap)
1726 {
1727 GC_ASSERT(heap->free_pages == NULL);
1728
1729 if (is_lazy_sweeping(heap)) {
1730 gc_sweep_continue(objspace, heap);
1731 }
1732 else if (is_incremental_marking(objspace)) {
1733 gc_marks_continue(objspace, heap);
1734 }
1735
1736 if (heap->free_pages == NULL &&
1737 (will_be_incremental_marking(objspace) || heap_increment(objspace, heap) == FALSE) &&
1738 gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
1739 rb_memerror();
1740 }
1741 }
1742
1743 static RVALUE *
heap_get_freeobj_from_next_freepage(rb_objspace_t * objspace,rb_heap_t * heap)1744 heap_get_freeobj_from_next_freepage(rb_objspace_t *objspace, rb_heap_t *heap)
1745 {
1746 struct heap_page *page;
1747 RVALUE *p;
1748
1749 while (heap->free_pages == NULL) {
1750 heap_prepare(objspace, heap);
1751 }
1752 page = heap->free_pages;
1753 heap->free_pages = page->free_next;
1754 heap->using_page = page;
1755
1756 GC_ASSERT(page->free_slots != 0);
1757 p = page->freelist;
1758 page->freelist = NULL;
1759 page->free_slots = 0;
1760 unpoison_object((VALUE)p, true);
1761 return p;
1762 }
1763
1764 static inline VALUE
heap_get_freeobj_head(rb_objspace_t * objspace,rb_heap_t * heap)1765 heap_get_freeobj_head(rb_objspace_t *objspace, rb_heap_t *heap)
1766 {
1767 RVALUE *p = heap->freelist;
1768 if (LIKELY(p != NULL)) {
1769 heap->freelist = p->as.free.next;
1770 }
1771 unpoison_object((VALUE)p, true);
1772 return (VALUE)p;
1773 }
1774
1775 static inline VALUE
heap_get_freeobj(rb_objspace_t * objspace,rb_heap_t * heap)1776 heap_get_freeobj(rb_objspace_t *objspace, rb_heap_t *heap)
1777 {
1778 RVALUE *p = heap->freelist;
1779
1780 while (1) {
1781 if (LIKELY(p != NULL)) {
1782 unpoison_object((VALUE)p, true);
1783 heap->freelist = p->as.free.next;
1784 return (VALUE)p;
1785 }
1786 else {
1787 p = heap_get_freeobj_from_next_freepage(objspace, heap);
1788 }
1789 }
1790 }
1791
1792 void
rb_objspace_set_event_hook(const rb_event_flag_t event)1793 rb_objspace_set_event_hook(const rb_event_flag_t event)
1794 {
1795 rb_objspace_t *objspace = &rb_objspace;
1796 objspace->hook_events = event & RUBY_INTERNAL_EVENT_OBJSPACE_MASK;
1797 objspace->flags.has_hook = (objspace->hook_events != 0);
1798 }
1799
1800 static void
gc_event_hook_body(rb_execution_context_t * ec,rb_objspace_t * objspace,const rb_event_flag_t event,VALUE data)1801 gc_event_hook_body(rb_execution_context_t *ec, rb_objspace_t *objspace, const rb_event_flag_t event, VALUE data)
1802 {
1803 const VALUE *pc = ec->cfp->pc;
1804 if (pc && VM_FRAME_RUBYFRAME_P(ec->cfp)) {
1805 /* increment PC because source line is calculated with PC-1 */
1806 ec->cfp->pc++;
1807 }
1808 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, data);
1809 ec->cfp->pc = pc;
1810 }
1811
1812 #define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
1813 #define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
1814
1815 #define gc_event_hook(objspace, event, data) do { \
1816 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
1817 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
1818 } \
1819 } while (0)
1820
1821 static inline VALUE
newobj_init(VALUE klass,VALUE flags,VALUE v1,VALUE v2,VALUE v3,int wb_protected,rb_objspace_t * objspace,VALUE obj)1822 newobj_init(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected, rb_objspace_t *objspace, VALUE obj)
1823 {
1824 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
1825 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1826
1827 /* OBJSETUP */
1828 RBASIC(obj)->flags = flags;
1829 RBASIC_SET_CLASS_RAW(obj, klass);
1830 RANY(obj)->as.values.v1 = v1;
1831 RANY(obj)->as.values.v2 = v2;
1832 RANY(obj)->as.values.v3 = v3;
1833
1834 #if RGENGC_CHECK_MODE
1835 GC_ASSERT(RVALUE_MARKED(obj) == FALSE);
1836 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
1837 GC_ASSERT(RVALUE_OLD_P(obj) == FALSE);
1838 GC_ASSERT(RVALUE_WB_UNPROTECTED(obj) == FALSE);
1839
1840 if (flags & FL_PROMOTED1) {
1841 if (RVALUE_AGE(obj) != 2) rb_bug("newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
1842 }
1843 else {
1844 if (RVALUE_AGE(obj) > 0) rb_bug("newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
1845 }
1846 if (rgengc_remembered(objspace, (VALUE)obj)) rb_bug("newobj: %s is remembered.", obj_info(obj));
1847 #endif
1848
1849 #if USE_RGENGC
1850 if (UNLIKELY(wb_protected == FALSE)) {
1851 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
1852 }
1853 #endif
1854
1855 #if RGENGC_PROFILE
1856 if (wb_protected) {
1857 objspace->profile.total_generated_normal_object_count++;
1858 #if RGENGC_PROFILE >= 2
1859 objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
1860 #endif
1861 }
1862 else {
1863 objspace->profile.total_generated_shady_object_count++;
1864 #if RGENGC_PROFILE >= 2
1865 objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
1866 #endif
1867 }
1868 #endif
1869
1870 #if GC_DEBUG
1871 RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
1872 GC_ASSERT(!SPECIAL_CONST_P(obj)); /* check alignment */
1873 #endif
1874
1875 objspace->total_allocated_objects++;
1876
1877 gc_report(5, objspace, "newobj: %s\n", obj_info(obj));
1878
1879 #if RGENGC_OLD_NEWOBJ_CHECK > 0
1880 {
1881 static int newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
1882
1883 if (!is_incremental_marking(objspace) &&
1884 flags & FL_WB_PROTECTED && /* do not promote WB unprotected objects */
1885 ! RB_TYPE_P(obj, T_ARRAY)) { /* array.c assumes that allocated objects are new */
1886 if (--newobj_cnt == 0) {
1887 newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
1888
1889 gc_mark_set(objspace, obj);
1890 RVALUE_AGE_SET_OLD(objspace, obj);
1891
1892 rb_gc_writebarrier_remember(obj);
1893 }
1894 }
1895 }
1896 #endif
1897 check_rvalue_consistency(obj);
1898 return obj;
1899 }
1900
1901 static inline VALUE
newobj_slowpath(VALUE klass,VALUE flags,VALUE v1,VALUE v2,VALUE v3,rb_objspace_t * objspace,int wb_protected)1902 newobj_slowpath(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace, int wb_protected)
1903 {
1904 VALUE obj;
1905
1906 if (UNLIKELY(during_gc || ruby_gc_stressful)) {
1907 if (during_gc) {
1908 dont_gc = 1;
1909 during_gc = 0;
1910 rb_bug("object allocation during garbage collection phase");
1911 }
1912
1913 if (ruby_gc_stressful) {
1914 if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
1915 rb_memerror();
1916 }
1917 }
1918 }
1919
1920 obj = heap_get_freeobj(objspace, heap_eden);
1921 newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
1922 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_NEWOBJ, obj);
1923 return obj;
1924 }
1925
1926 NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace));
1927 NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace));
1928
1929 static VALUE
newobj_slowpath_wb_protected(VALUE klass,VALUE flags,VALUE v1,VALUE v2,VALUE v3,rb_objspace_t * objspace)1930 newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace)
1931 {
1932 return newobj_slowpath(klass, flags, v1, v2, v3, objspace, TRUE);
1933 }
1934
1935 static VALUE
newobj_slowpath_wb_unprotected(VALUE klass,VALUE flags,VALUE v1,VALUE v2,VALUE v3,rb_objspace_t * objspace)1936 newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace)
1937 {
1938 return newobj_slowpath(klass, flags, v1, v2, v3, objspace, FALSE);
1939 }
1940
1941 static inline VALUE
newobj_of(VALUE klass,VALUE flags,VALUE v1,VALUE v2,VALUE v3,int wb_protected)1942 newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected)
1943 {
1944 rb_objspace_t *objspace = &rb_objspace;
1945 VALUE obj;
1946
1947 RB_DEBUG_COUNTER_INC(obj_newobj);
1948 (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
1949
1950 #if GC_DEBUG_STRESS_TO_CLASS
1951 if (UNLIKELY(stress_to_class)) {
1952 long i, cnt = RARRAY_LEN(stress_to_class);
1953 for (i = 0; i < cnt; ++i) {
1954 if (klass == RARRAY_AREF(stress_to_class, i)) rb_memerror();
1955 }
1956 }
1957 #endif
1958 if (!(during_gc ||
1959 ruby_gc_stressful ||
1960 gc_event_hook_available_p(objspace)) &&
1961 (obj = heap_get_freeobj_head(objspace, heap_eden)) != Qfalse) {
1962 return newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
1963 }
1964 else {
1965 RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
1966
1967 return wb_protected ?
1968 newobj_slowpath_wb_protected(klass, flags, v1, v2, v3, objspace) :
1969 newobj_slowpath_wb_unprotected(klass, flags, v1, v2, v3, objspace);
1970 }
1971 }
1972
1973 VALUE
rb_wb_unprotected_newobj_of(VALUE klass,VALUE flags)1974 rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags)
1975 {
1976 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1977 return newobj_of(klass, flags, 0, 0, 0, FALSE);
1978 }
1979
1980 VALUE
rb_wb_protected_newobj_of(VALUE klass,VALUE flags)1981 rb_wb_protected_newobj_of(VALUE klass, VALUE flags)
1982 {
1983 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1984 return newobj_of(klass, flags, 0, 0, 0, TRUE);
1985 }
1986
1987 /* for compatibility */
1988
1989 VALUE
rb_newobj(void)1990 rb_newobj(void)
1991 {
1992 return newobj_of(0, T_NONE, 0, 0, 0, FALSE);
1993 }
1994
1995 VALUE
rb_newobj_of(VALUE klass,VALUE flags)1996 rb_newobj_of(VALUE klass, VALUE flags)
1997 {
1998 return newobj_of(klass, flags & ~FL_WB_PROTECTED, 0, 0, 0, flags & FL_WB_PROTECTED);
1999 }
2000
2001 #define UNEXPECTED_NODE(func) \
2002 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2003 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
2004
2005 #undef rb_imemo_new
2006
2007 VALUE
rb_imemo_new(enum imemo_type type,VALUE v1,VALUE v2,VALUE v3,VALUE v0)2008 rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
2009 {
2010 VALUE flags = T_IMEMO | (type << FL_USHIFT);
2011 return newobj_of(v0, flags, v1, v2, v3, TRUE);
2012 }
2013
2014 static VALUE
rb_imemo_tmpbuf_new(VALUE v1,VALUE v2,VALUE v3,VALUE v0)2015 rb_imemo_tmpbuf_new(VALUE v1, VALUE v2, VALUE v3, VALUE v0)
2016 {
2017 VALUE flags = T_IMEMO | (imemo_tmpbuf << FL_USHIFT);
2018 return newobj_of(v0, flags, v1, v2, v3, FALSE);
2019 }
2020
2021 VALUE
rb_imemo_tmpbuf_auto_free_pointer(void * buf)2022 rb_imemo_tmpbuf_auto_free_pointer(void *buf)
2023 {
2024 return rb_imemo_new(imemo_tmpbuf, (VALUE)buf, 0, 0, 0);
2025 }
2026
2027 VALUE
rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(void * buf,size_t cnt)2028 rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(void *buf, size_t cnt)
2029 {
2030 return rb_imemo_tmpbuf_new((VALUE)buf, 0, (VALUE)cnt, 0);
2031 }
2032
2033 rb_imemo_tmpbuf_t *
rb_imemo_tmpbuf_parser_heap(void * buf,rb_imemo_tmpbuf_t * old_heap,size_t cnt)2034 rb_imemo_tmpbuf_parser_heap(void *buf, rb_imemo_tmpbuf_t *old_heap, size_t cnt)
2035 {
2036 return (rb_imemo_tmpbuf_t *)rb_imemo_tmpbuf_new((VALUE)buf, (VALUE)old_heap, (VALUE)cnt, 0);
2037 }
2038
2039 #if IMEMO_DEBUG
2040 VALUE
rb_imemo_new_debug(enum imemo_type type,VALUE v1,VALUE v2,VALUE v3,VALUE v0,const char * file,int line)2041 rb_imemo_new_debug(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0, const char *file, int line)
2042 {
2043 VALUE memo = rb_imemo_new(type, v1, v2, v3, v0);
2044 fprintf(stderr, "memo %p (type: %d) @ %s:%d\n", (void *)memo, imemo_type(memo), file, line);
2045 return memo;
2046 }
2047 #endif
2048
2049 VALUE
rb_data_object_wrap(VALUE klass,void * datap,RUBY_DATA_FUNC dmark,RUBY_DATA_FUNC dfree)2050 rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
2051 {
2052 if (klass) Check_Type(klass, T_CLASS);
2053 return newobj_of(klass, T_DATA, (VALUE)dmark, (VALUE)dfree, (VALUE)datap, FALSE);
2054 }
2055
2056 #undef rb_data_object_alloc
rb_data_object_alloc(VALUE klass,void * datap,RUBY_DATA_FUNC dmark,RUBY_DATA_FUNC dfree)2057 RUBY_ALIAS_FUNCTION(rb_data_object_alloc(VALUE klass, void *datap,
2058 RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree),
2059 rb_data_object_wrap, (klass, datap, dmark, dfree))
2060
2061
2062 VALUE
2063 rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
2064 {
2065 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
2066 DATA_PTR(obj) = xcalloc(1, size);
2067 return obj;
2068 }
2069
2070 VALUE
rb_data_typed_object_wrap(VALUE klass,void * datap,const rb_data_type_t * type)2071 rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
2072 {
2073 if (klass) Check_Type(klass, T_CLASS);
2074 return newobj_of(klass, T_DATA, (VALUE)type, (VALUE)1, (VALUE)datap, type->flags & RUBY_FL_WB_PROTECTED);
2075 }
2076
2077 #undef rb_data_typed_object_alloc
rb_data_typed_object_alloc(VALUE klass,void * datap,const rb_data_type_t * type)2078 RUBY_ALIAS_FUNCTION(rb_data_typed_object_alloc(VALUE klass, void *datap,
2079 const rb_data_type_t *type),
2080 rb_data_typed_object_wrap, (klass, datap, type))
2081
2082 VALUE
2083 rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
2084 {
2085 VALUE obj = rb_data_typed_object_wrap(klass, 0, type);
2086 DATA_PTR(obj) = xcalloc(1, size);
2087 return obj;
2088 }
2089
2090 size_t
rb_objspace_data_type_memsize(VALUE obj)2091 rb_objspace_data_type_memsize(VALUE obj)
2092 {
2093 if (RTYPEDDATA_P(obj)) {
2094 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
2095 const void *ptr = RTYPEDDATA_DATA(obj);
2096 if (ptr && type->function.dsize) {
2097 return type->function.dsize(ptr);
2098 }
2099 }
2100 return 0;
2101 }
2102
2103 const char *
rb_objspace_data_type_name(VALUE obj)2104 rb_objspace_data_type_name(VALUE obj)
2105 {
2106 if (RTYPEDDATA_P(obj)) {
2107 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
2108 }
2109 else {
2110 return 0;
2111 }
2112 }
2113
PUREFUNC(static inline int is_pointer_to_heap (rb_objspace_t * objspace,void * ptr);)2114 PUREFUNC(static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr);)
2115 static inline int
2116 is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
2117 {
2118 register RVALUE *p = RANY(ptr);
2119 register struct heap_page *page;
2120 register size_t hi, lo, mid;
2121
2122 if (p < heap_pages_lomem || p > heap_pages_himem) return FALSE;
2123 if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
2124
2125 /* check if p looks like a pointer using bsearch*/
2126 lo = 0;
2127 hi = heap_allocated_pages;
2128 while (lo < hi) {
2129 mid = (lo + hi) / 2;
2130 page = heap_pages_sorted[mid];
2131 if (page->start <= p) {
2132 if (p < page->start + page->total_slots) {
2133 return TRUE;
2134 }
2135 lo = mid + 1;
2136 }
2137 else {
2138 hi = mid;
2139 }
2140 }
2141 return FALSE;
2142 }
2143
2144 static enum rb_id_table_iterator_result
free_const_entry_i(VALUE value,void * data)2145 free_const_entry_i(VALUE value, void *data)
2146 {
2147 rb_const_entry_t *ce = (rb_const_entry_t *)value;
2148 xfree(ce);
2149 return ID_TABLE_CONTINUE;
2150 }
2151
2152 void
rb_free_const_table(struct rb_id_table * tbl)2153 rb_free_const_table(struct rb_id_table *tbl)
2154 {
2155 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
2156 rb_id_table_free(tbl);
2157 }
2158
2159 static inline void
make_zombie(rb_objspace_t * objspace,VALUE obj,void (* dfree)(void *),void * data)2160 make_zombie(rb_objspace_t *objspace, VALUE obj, void (*dfree)(void *), void *data)
2161 {
2162 struct RZombie *zombie = RZOMBIE(obj);
2163 zombie->basic.flags = T_ZOMBIE;
2164 zombie->dfree = dfree;
2165 zombie->data = data;
2166 zombie->next = heap_pages_deferred_final;
2167 heap_pages_deferred_final = (VALUE)zombie;
2168 }
2169
2170 static inline void
make_io_zombie(rb_objspace_t * objspace,VALUE obj)2171 make_io_zombie(rb_objspace_t *objspace, VALUE obj)
2172 {
2173 rb_io_t *fptr = RANY(obj)->as.file.fptr;
2174 make_zombie(objspace, obj, (void (*)(void*))rb_io_fptr_finalize, fptr);
2175 }
2176
2177 static int
obj_free(rb_objspace_t * objspace,VALUE obj)2178 obj_free(rb_objspace_t *objspace, VALUE obj)
2179 {
2180 RB_DEBUG_COUNTER_INC(obj_free);
2181
2182 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_FREEOBJ, obj);
2183
2184 switch (BUILTIN_TYPE(obj)) {
2185 case T_NIL:
2186 case T_FIXNUM:
2187 case T_TRUE:
2188 case T_FALSE:
2189 rb_bug("obj_free() called for broken object");
2190 break;
2191 }
2192
2193 if (FL_TEST(obj, FL_EXIVAR)) {
2194 rb_free_generic_ivar((VALUE)obj);
2195 FL_UNSET(obj, FL_EXIVAR);
2196 }
2197
2198 #if USE_RGENGC
2199 if (RVALUE_WB_UNPROTECTED(obj)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2200
2201 #if RGENGC_CHECK_MODE
2202 #define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
2203 CHECK(RVALUE_WB_UNPROTECTED);
2204 CHECK(RVALUE_MARKED);
2205 CHECK(RVALUE_MARKING);
2206 CHECK(RVALUE_UNCOLLECTIBLE);
2207 #undef CHECK
2208 #endif
2209 #endif
2210
2211 switch (BUILTIN_TYPE(obj)) {
2212 case T_OBJECT:
2213 if ((RANY(obj)->as.basic.flags & ROBJECT_EMBED) ||
2214 RANY(obj)->as.object.as.heap.ivptr == NULL) {
2215 RB_DEBUG_COUNTER_INC(obj_obj_embed);
2216 }
2217 else if (ROBJ_TRANSIENT_P(obj)) {
2218 RB_DEBUG_COUNTER_INC(obj_obj_transient);
2219 }
2220 else {
2221 xfree(RANY(obj)->as.object.as.heap.ivptr);
2222 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
2223 }
2224 break;
2225 case T_MODULE:
2226 case T_CLASS:
2227 mjit_remove_class_serial(RCLASS_SERIAL(obj));
2228 rb_id_table_free(RCLASS_M_TBL(obj));
2229 if (RCLASS_IV_TBL(obj)) {
2230 st_free_table(RCLASS_IV_TBL(obj));
2231 }
2232 if (RCLASS_CONST_TBL(obj)) {
2233 rb_free_const_table(RCLASS_CONST_TBL(obj));
2234 }
2235 if (RCLASS_IV_INDEX_TBL(obj)) {
2236 st_free_table(RCLASS_IV_INDEX_TBL(obj));
2237 }
2238 if (RCLASS_EXT(obj)->subclasses) {
2239 if (BUILTIN_TYPE(obj) == T_MODULE) {
2240 rb_class_detach_module_subclasses(obj);
2241 }
2242 else {
2243 rb_class_detach_subclasses(obj);
2244 }
2245 RCLASS_EXT(obj)->subclasses = NULL;
2246 }
2247 rb_class_remove_from_module_subclasses(obj);
2248 rb_class_remove_from_super_subclasses(obj);
2249 if (RANY(obj)->as.klass.ptr)
2250 xfree(RANY(obj)->as.klass.ptr);
2251 RANY(obj)->as.klass.ptr = NULL;
2252
2253 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
2254 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
2255 break;
2256 case T_STRING:
2257 rb_str_free(obj);
2258 break;
2259 case T_ARRAY:
2260 rb_ary_free(obj);
2261 break;
2262 case T_HASH:
2263 #if USE_DEBUG_COUNTER
2264 if (RHASH_SIZE(obj) >= 8) {
2265 RB_DEBUG_COUNTER_INC(obj_hash_ge8);
2266 }
2267 else if (RHASH_SIZE(obj) >= 4) {
2268 RB_DEBUG_COUNTER_INC(obj_hash_ge4);
2269 }
2270 else if (RHASH_SIZE(obj) >= 1) {
2271 RB_DEBUG_COUNTER_INC(obj_hash_under4);
2272 }
2273 else {
2274 RB_DEBUG_COUNTER_INC(obj_hash_empty);
2275 }
2276
2277 if (RHASH_AR_TABLE_P(obj)) {
2278 RB_DEBUG_COUNTER_INC(obj_hash_ar);
2279 }
2280 else {
2281 RB_DEBUG_COUNTER_INC(obj_hash_st);
2282 }
2283 #endif
2284 if (/* RHASH_AR_TABLE_P(obj) */ !FL_TEST_RAW(obj, RHASH_ST_TABLE_FLAG)) {
2285 ar_table *tab = RHASH(obj)->as.ar;
2286
2287 if (tab) {
2288 if (RHASH_TRANSIENT_P(obj)) {
2289 RB_DEBUG_COUNTER_INC(obj_hash_transient);
2290 }
2291 else {
2292 ruby_xfree(tab);
2293 }
2294 }
2295 }
2296 else {
2297 GC_ASSERT(RHASH_ST_TABLE_P(obj));
2298 st_free_table(RHASH(obj)->as.st);
2299 }
2300 break;
2301 case T_REGEXP:
2302 if (RANY(obj)->as.regexp.ptr) {
2303 onig_free(RANY(obj)->as.regexp.ptr);
2304 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
2305 }
2306 break;
2307 case T_DATA:
2308 if (DATA_PTR(obj)) {
2309 int free_immediately = FALSE;
2310 void (*dfree)(void *);
2311 void *data = DATA_PTR(obj);
2312
2313 if (RTYPEDDATA_P(obj)) {
2314 free_immediately = (RANY(obj)->as.typeddata.type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
2315 dfree = RANY(obj)->as.typeddata.type->function.dfree;
2316 if (0 && free_immediately == 0) {
2317 /* to expose non-free-immediate T_DATA */
2318 fprintf(stderr, "not immediate -> %s\n", RANY(obj)->as.typeddata.type->wrap_struct_name);
2319 }
2320 }
2321 else {
2322 dfree = RANY(obj)->as.data.dfree;
2323 }
2324
2325 if (dfree) {
2326 if (dfree == RUBY_DEFAULT_FREE) {
2327 xfree(data);
2328 RB_DEBUG_COUNTER_INC(obj_data_xfree);
2329 }
2330 else if (free_immediately) {
2331 (*dfree)(data);
2332 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
2333 }
2334 else {
2335 make_zombie(objspace, obj, dfree, data);
2336 RB_DEBUG_COUNTER_INC(obj_data_zombie);
2337 return 1;
2338 }
2339 }
2340 else {
2341 RB_DEBUG_COUNTER_INC(obj_data_empty);
2342 }
2343 }
2344 break;
2345 case T_MATCH:
2346 if (RANY(obj)->as.match.rmatch) {
2347 struct rmatch *rm = RANY(obj)->as.match.rmatch;
2348 onig_region_free(&rm->regs, 0);
2349 if (rm->char_offset)
2350 xfree(rm->char_offset);
2351 xfree(rm);
2352
2353 RB_DEBUG_COUNTER_INC(obj_match_ptr);
2354 }
2355 break;
2356 case T_FILE:
2357 if (RANY(obj)->as.file.fptr) {
2358 make_io_zombie(objspace, obj);
2359 RB_DEBUG_COUNTER_INC(obj_file_ptr);
2360 return 1;
2361 }
2362 break;
2363 case T_RATIONAL:
2364 case T_COMPLEX:
2365 break;
2366 case T_ICLASS:
2367 /* Basically , T_ICLASS shares table with the module */
2368 if (FL_TEST(obj, RICLASS_IS_ORIGIN)) {
2369 rb_id_table_free(RCLASS_M_TBL(obj));
2370 }
2371 if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
2372 rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj));
2373 }
2374 if (RCLASS_EXT(obj)->subclasses) {
2375 rb_class_detach_subclasses(obj);
2376 RCLASS_EXT(obj)->subclasses = NULL;
2377 }
2378 rb_class_remove_from_module_subclasses(obj);
2379 rb_class_remove_from_super_subclasses(obj);
2380 xfree(RANY(obj)->as.klass.ptr);
2381 RANY(obj)->as.klass.ptr = NULL;
2382
2383 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
2384 break;
2385
2386 case T_FLOAT:
2387 break;
2388
2389 case T_BIGNUM:
2390 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
2391 xfree(BIGNUM_DIGITS(obj));
2392 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
2393 }
2394 break;
2395
2396 case T_NODE:
2397 UNEXPECTED_NODE(obj_free);
2398 break;
2399
2400 case T_STRUCT:
2401 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
2402 RANY(obj)->as.rstruct.as.heap.ptr == NULL) {
2403 RB_DEBUG_COUNTER_INC(obj_struct_embed);
2404 }
2405 else if (RSTRUCT_TRANSIENT_P(obj)) {
2406 RB_DEBUG_COUNTER_INC(obj_struct_transient);
2407 }
2408 else {
2409 xfree((void *)RANY(obj)->as.rstruct.as.heap.ptr);
2410 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
2411 }
2412 break;
2413
2414 case T_SYMBOL:
2415 {
2416 rb_gc_free_dsymbol(obj);
2417 RB_DEBUG_COUNTER_INC(obj_symbol);
2418 }
2419 break;
2420
2421 case T_IMEMO:
2422 switch (imemo_type(obj)) {
2423 case imemo_ment:
2424 rb_free_method_entry(&RANY(obj)->as.imemo.ment);
2425 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
2426 break;
2427 case imemo_iseq:
2428 rb_iseq_free(&RANY(obj)->as.imemo.iseq);
2429 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
2430 break;
2431 case imemo_env:
2432 GC_ASSERT(VM_ENV_ESCAPED_P(RANY(obj)->as.imemo.env.ep));
2433 xfree((VALUE *)RANY(obj)->as.imemo.env.env);
2434 RB_DEBUG_COUNTER_INC(obj_imemo_env);
2435 break;
2436 case imemo_tmpbuf:
2437 xfree(RANY(obj)->as.imemo.alloc.ptr);
2438 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
2439 break;
2440 case imemo_ast:
2441 rb_ast_free(&RANY(obj)->as.imemo.ast);
2442 RB_DEBUG_COUNTER_INC(obj_imemo_ast);
2443 break;
2444 case imemo_cref:
2445 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
2446 break;
2447 case imemo_svar:
2448 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
2449 break;
2450 case imemo_throw_data:
2451 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
2452 break;
2453 case imemo_ifunc:
2454 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
2455 break;
2456 case imemo_memo:
2457 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
2458 break;
2459 case imemo_parser_strterm:
2460 RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
2461 break;
2462 default:
2463 /* unreachable */
2464 break;
2465 }
2466 return 0;
2467
2468 default:
2469 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
2470 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
2471 }
2472
2473 if (FL_TEST(obj, FL_FINALIZE)) {
2474 make_zombie(objspace, obj, 0, 0);
2475 return 1;
2476 }
2477 else {
2478 return 0;
2479 }
2480 }
2481
2482 void
Init_heap(void)2483 Init_heap(void)
2484 {
2485 rb_objspace_t *objspace = &rb_objspace;
2486
2487 gc_stress_set(objspace, ruby_initial_gc_stress);
2488
2489 #if RGENGC_ESTIMATE_OLDMALLOC
2490 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
2491 #endif
2492
2493 heap_add_pages(objspace, heap_eden, gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT);
2494 init_mark_stack(&objspace->mark_stack);
2495
2496 objspace->profile.invoke_time = getrusage_time();
2497 finalizer_table = st_init_numtable();
2498 }
2499
2500 typedef int each_obj_callback(void *, void *, size_t, void *);
2501
2502 struct each_obj_args {
2503 each_obj_callback *callback;
2504 void *data;
2505 };
2506
2507 static VALUE
objspace_each_objects(VALUE arg)2508 objspace_each_objects(VALUE arg)
2509 {
2510 size_t i;
2511 struct heap_page *page;
2512 RVALUE *pstart = NULL, *pend;
2513 rb_objspace_t *objspace = &rb_objspace;
2514 struct each_obj_args *args = (struct each_obj_args *)arg;
2515
2516 i = 0;
2517 while (i < heap_allocated_pages) {
2518 while (0 < i && pstart < heap_pages_sorted[i-1]->start) i--;
2519 while (i < heap_allocated_pages && heap_pages_sorted[i]->start <= pstart) i++;
2520 if (heap_allocated_pages <= i) break;
2521
2522 page = heap_pages_sorted[i];
2523
2524 pstart = page->start;
2525 pend = pstart + page->total_slots;
2526
2527 if ((*args->callback)(pstart, pend, sizeof(RVALUE), args->data)) {
2528 break;
2529 }
2530 }
2531
2532 return Qnil;
2533 }
2534
2535 static VALUE
incremental_enable(void)2536 incremental_enable(void)
2537 {
2538 rb_objspace_t *objspace = &rb_objspace;
2539
2540 objspace->flags.dont_incremental = FALSE;
2541 return Qnil;
2542 }
2543
2544 /*
2545 * rb_objspace_each_objects() is special C API to walk through
2546 * Ruby object space. This C API is too difficult to use it.
2547 * To be frank, you should not use it. Or you need to read the
2548 * source code of this function and understand what this function does.
2549 *
2550 * 'callback' will be called several times (the number of heap page,
2551 * at current implementation) with:
2552 * vstart: a pointer to the first living object of the heap_page.
2553 * vend: a pointer to next to the valid heap_page area.
2554 * stride: a distance to next VALUE.
2555 *
2556 * If callback() returns non-zero, the iteration will be stopped.
2557 *
2558 * This is a sample callback code to iterate liveness objects:
2559 *
2560 * int
2561 * sample_callback(void *vstart, void *vend, int stride, void *data) {
2562 * VALUE v = (VALUE)vstart;
2563 * for (; v != (VALUE)vend; v += stride) {
2564 * if (RBASIC(v)->flags) { // liveness check
2565 * // do something with live object 'v'
2566 * }
2567 * return 0; // continue to iteration
2568 * }
2569 *
2570 * Note: 'vstart' is not a top of heap_page. This point the first
2571 * living object to grasp at least one object to avoid GC issue.
2572 * This means that you can not walk through all Ruby object page
2573 * including freed object page.
2574 *
2575 * Note: On this implementation, 'stride' is same as sizeof(RVALUE).
2576 * However, there are possibilities to pass variable values with
2577 * 'stride' with some reasons. You must use stride instead of
2578 * use some constant value in the iteration.
2579 */
2580 void
rb_objspace_each_objects(each_obj_callback * callback,void * data)2581 rb_objspace_each_objects(each_obj_callback *callback, void *data)
2582 {
2583 struct each_obj_args args;
2584 rb_objspace_t *objspace = &rb_objspace;
2585 int prev_dont_incremental = objspace->flags.dont_incremental;
2586
2587 gc_rest(objspace);
2588 objspace->flags.dont_incremental = TRUE;
2589
2590 args.callback = callback;
2591 args.data = data;
2592
2593 if (prev_dont_incremental) {
2594 objspace_each_objects((VALUE)&args);
2595 }
2596 else {
2597 rb_ensure(objspace_each_objects, (VALUE)&args, incremental_enable, Qnil);
2598 }
2599 }
2600
2601 void
rb_objspace_each_objects_without_setup(each_obj_callback * callback,void * data)2602 rb_objspace_each_objects_without_setup(each_obj_callback *callback, void *data)
2603 {
2604 struct each_obj_args args;
2605 args.callback = callback;
2606 args.data = data;
2607
2608 objspace_each_objects((VALUE)&args);
2609 }
2610
2611 struct os_each_struct {
2612 size_t num;
2613 VALUE of;
2614 };
2615
2616 static int
internal_object_p(VALUE obj)2617 internal_object_p(VALUE obj)
2618 {
2619 RVALUE *p = (RVALUE *)obj;
2620 void *ptr = __asan_region_is_poisoned(p, SIZEOF_VALUE);
2621 bool used_p = p->as.basic.flags;
2622 unpoison_object(obj, false);
2623
2624 if (used_p) {
2625 switch (BUILTIN_TYPE(p)) {
2626 case T_NODE:
2627 UNEXPECTED_NODE(internal_object_p);
2628 break;
2629 case T_NONE:
2630 case T_IMEMO:
2631 case T_ICLASS:
2632 case T_ZOMBIE:
2633 break;
2634 case T_CLASS:
2635 if (!p->as.basic.klass) break;
2636 if (FL_TEST(obj, FL_SINGLETON)) {
2637 return rb_singleton_class_internal_p(obj);
2638 }
2639 return 0;
2640 default:
2641 if (!p->as.basic.klass) break;
2642 return 0;
2643 }
2644 }
2645 if (ptr || ! used_p) {
2646 poison_object(obj);
2647 }
2648 return 1;
2649 }
2650
2651 int
rb_objspace_internal_object_p(VALUE obj)2652 rb_objspace_internal_object_p(VALUE obj)
2653 {
2654 return internal_object_p(obj);
2655 }
2656
2657 static int
os_obj_of_i(void * vstart,void * vend,size_t stride,void * data)2658 os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
2659 {
2660 struct os_each_struct *oes = (struct os_each_struct *)data;
2661 RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend;
2662
2663 for (; p != pend; p++) {
2664 volatile VALUE v = (VALUE)p;
2665 if (!internal_object_p(v)) {
2666 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
2667 rb_yield(v);
2668 oes->num++;
2669 }
2670 }
2671 }
2672
2673 return 0;
2674 }
2675
2676 static VALUE
os_obj_of(VALUE of)2677 os_obj_of(VALUE of)
2678 {
2679 struct os_each_struct oes;
2680
2681 oes.num = 0;
2682 oes.of = of;
2683 rb_objspace_each_objects(os_obj_of_i, &oes);
2684 return SIZET2NUM(oes.num);
2685 }
2686
2687 /*
2688 * call-seq:
2689 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
2690 * ObjectSpace.each_object([module]) -> an_enumerator
2691 *
2692 * Calls the block once for each living, nonimmediate object in this
2693 * Ruby process. If <i>module</i> is specified, calls the block
2694 * for only those classes or modules that match (or are a subclass of)
2695 * <i>module</i>. Returns the number of objects found. Immediate
2696 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
2697 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
2698 * never returned. In the example below, <code>each_object</code>
2699 * returns both the numbers we defined and several constants defined in
2700 * the <code>Math</code> module.
2701 *
2702 * If no block is given, an enumerator is returned instead.
2703 *
2704 * a = 102.7
2705 * b = 95 # Won't be returned
2706 * c = 12345678987654321
2707 * count = ObjectSpace.each_object(Numeric) {|x| p x }
2708 * puts "Total count: #{count}"
2709 *
2710 * <em>produces:</em>
2711 *
2712 * 12345678987654321
2713 * 102.7
2714 * 2.71828182845905
2715 * 3.14159265358979
2716 * 2.22044604925031e-16
2717 * 1.7976931348623157e+308
2718 * 2.2250738585072e-308
2719 * Total count: 7
2720 *
2721 */
2722
2723 static VALUE
os_each_obj(int argc,VALUE * argv,VALUE os)2724 os_each_obj(int argc, VALUE *argv, VALUE os)
2725 {
2726 VALUE of;
2727
2728 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
2729 RETURN_ENUMERATOR(os, 1, &of);
2730 return os_obj_of(of);
2731 }
2732
2733 /*
2734 * call-seq:
2735 * ObjectSpace.undefine_finalizer(obj)
2736 *
2737 * Removes all finalizers for <i>obj</i>.
2738 *
2739 */
2740
2741 static VALUE
undefine_final(VALUE os,VALUE obj)2742 undefine_final(VALUE os, VALUE obj)
2743 {
2744 return rb_undefine_finalizer(obj);
2745 }
2746
2747 VALUE
rb_undefine_finalizer(VALUE obj)2748 rb_undefine_finalizer(VALUE obj)
2749 {
2750 rb_objspace_t *objspace = &rb_objspace;
2751 st_data_t data = obj;
2752 rb_check_frozen(obj);
2753 st_delete(finalizer_table, &data, 0);
2754 FL_UNSET(obj, FL_FINALIZE);
2755 return obj;
2756 }
2757
2758 static void
should_be_callable(VALUE block)2759 should_be_callable(VALUE block)
2760 {
2761 if (!rb_obj_respond_to(block, idCall, TRUE)) {
2762 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
2763 rb_obj_class(block));
2764 }
2765 }
2766 static void
should_be_finalizable(VALUE obj)2767 should_be_finalizable(VALUE obj)
2768 {
2769 if (!FL_ABLE(obj)) {
2770 rb_raise(rb_eArgError, "cannot define finalizer for %s",
2771 rb_obj_classname(obj));
2772 }
2773 rb_check_frozen(obj);
2774 }
2775
2776 /*
2777 * call-seq:
2778 * ObjectSpace.define_finalizer(obj, aProc=proc())
2779 *
2780 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
2781 * was destroyed. The object ID of the <i>obj</i> will be passed
2782 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
2783 * method, make sure it can be called with a single argument.
2784 *
2785 */
2786
2787 static VALUE
define_final(int argc,VALUE * argv,VALUE os)2788 define_final(int argc, VALUE *argv, VALUE os)
2789 {
2790 VALUE obj, block;
2791
2792 rb_scan_args(argc, argv, "11", &obj, &block);
2793 should_be_finalizable(obj);
2794 if (argc == 1) {
2795 block = rb_block_proc();
2796 }
2797 else {
2798 should_be_callable(block);
2799 }
2800
2801 return define_final0(obj, block);
2802 }
2803
2804 static VALUE
define_final0(VALUE obj,VALUE block)2805 define_final0(VALUE obj, VALUE block)
2806 {
2807 rb_objspace_t *objspace = &rb_objspace;
2808 VALUE table;
2809 st_data_t data;
2810
2811 RBASIC(obj)->flags |= FL_FINALIZE;
2812
2813 block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block);
2814 OBJ_FREEZE(block);
2815
2816 if (st_lookup(finalizer_table, obj, &data)) {
2817 table = (VALUE)data;
2818
2819 /* avoid duplicate block, table is usually small */
2820 {
2821 long len = RARRAY_LEN(table);
2822 long i;
2823
2824 for (i = 0; i < len; i++) {
2825 VALUE recv = RARRAY_AREF(table, i);
2826 if (rb_funcall(recv, idEq, 1, block)) {
2827 return recv;
2828 }
2829 }
2830 }
2831
2832 rb_ary_push(table, block);
2833 }
2834 else {
2835 table = rb_ary_new3(1, block);
2836 RBASIC_CLEAR_CLASS(table);
2837 st_add_direct(finalizer_table, obj, table);
2838 }
2839 return block;
2840 }
2841
2842 VALUE
rb_define_finalizer(VALUE obj,VALUE block)2843 rb_define_finalizer(VALUE obj, VALUE block)
2844 {
2845 should_be_finalizable(obj);
2846 should_be_callable(block);
2847 return define_final0(obj, block);
2848 }
2849
2850 void
rb_gc_copy_finalizer(VALUE dest,VALUE obj)2851 rb_gc_copy_finalizer(VALUE dest, VALUE obj)
2852 {
2853 rb_objspace_t *objspace = &rb_objspace;
2854 VALUE table;
2855 st_data_t data;
2856
2857 if (!FL_TEST(obj, FL_FINALIZE)) return;
2858 if (st_lookup(finalizer_table, obj, &data)) {
2859 table = (VALUE)data;
2860 st_insert(finalizer_table, dest, table);
2861 }
2862 FL_SET(dest, FL_FINALIZE);
2863 }
2864
2865 static VALUE
run_single_final(VALUE final,VALUE objid)2866 run_single_final(VALUE final, VALUE objid)
2867 {
2868 const VALUE cmd = RARRAY_AREF(final, 1);
2869 const int level = OBJ_TAINTED(cmd) ?
2870 RUBY_SAFE_LEVEL_MAX : FIX2INT(RARRAY_AREF(final, 0));
2871
2872 rb_set_safe_level_force(level);
2873 return rb_check_funcall(cmd, idCall, 1, &objid);
2874 }
2875
2876 static void
run_finalizer(rb_objspace_t * objspace,VALUE obj,VALUE table)2877 run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
2878 {
2879 long i;
2880 enum ruby_tag_type state;
2881 volatile struct {
2882 VALUE errinfo;
2883 VALUE objid;
2884 rb_control_frame_t *cfp;
2885 long finished;
2886 int safe;
2887 } saved;
2888 rb_execution_context_t * volatile ec = GET_EC();
2889 #define RESTORE_FINALIZER() (\
2890 ec->cfp = saved.cfp, \
2891 rb_set_safe_level_force(saved.safe), \
2892 rb_set_errinfo(saved.errinfo))
2893
2894 saved.safe = rb_safe_level();
2895 saved.errinfo = rb_errinfo();
2896 saved.objid = nonspecial_obj_id(obj);
2897 saved.cfp = ec->cfp;
2898 saved.finished = 0;
2899
2900 EC_PUSH_TAG(ec);
2901 state = EC_EXEC_TAG();
2902 if (state != TAG_NONE) {
2903 ++saved.finished; /* skip failed finalizer */
2904 }
2905 for (i = saved.finished;
2906 RESTORE_FINALIZER(), i<RARRAY_LEN(table);
2907 saved.finished = ++i) {
2908 run_single_final(RARRAY_AREF(table, i), saved.objid);
2909 }
2910 EC_POP_TAG();
2911 #undef RESTORE_FINALIZER
2912 }
2913
2914 static void
run_final(rb_objspace_t * objspace,VALUE zombie)2915 run_final(rb_objspace_t *objspace, VALUE zombie)
2916 {
2917 st_data_t key, table;
2918
2919 if (RZOMBIE(zombie)->dfree) {
2920 RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
2921 }
2922
2923 key = (st_data_t)zombie;
2924 if (st_delete(finalizer_table, &key, &table)) {
2925 run_finalizer(objspace, zombie, (VALUE)table);
2926 }
2927 }
2928
2929 static void
finalize_list(rb_objspace_t * objspace,VALUE zombie)2930 finalize_list(rb_objspace_t *objspace, VALUE zombie)
2931 {
2932 while (zombie) {
2933 VALUE next_zombie;
2934 struct heap_page *page;
2935 unpoison_object(zombie, false);
2936 next_zombie = RZOMBIE(zombie)->next;
2937 page = GET_HEAP_PAGE(zombie);
2938
2939 run_final(objspace, zombie);
2940
2941 RZOMBIE(zombie)->basic.flags = 0;
2942 if (LIKELY(heap_pages_final_slots)) heap_pages_final_slots--;
2943 page->final_slots--;
2944 page->free_slots++;
2945 heap_page_add_freeobj(objspace, GET_HEAP_PAGE(zombie), zombie);
2946
2947 objspace->profile.total_freed_objects++;
2948
2949 zombie = next_zombie;
2950 }
2951 }
2952
2953 static void
finalize_deferred(rb_objspace_t * objspace)2954 finalize_deferred(rb_objspace_t *objspace)
2955 {
2956 VALUE zombie;
2957
2958 while ((zombie = ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
2959 finalize_list(objspace, zombie);
2960 }
2961 }
2962
2963 static void
gc_finalize_deferred(void * dmy)2964 gc_finalize_deferred(void *dmy)
2965 {
2966 rb_objspace_t *objspace = dmy;
2967 if (ATOMIC_EXCHANGE(finalizing, 1)) return;
2968 finalize_deferred(objspace);
2969 ATOMIC_SET(finalizing, 0);
2970 }
2971
2972 /* TODO: to keep compatibility, maybe unused. */
2973 void
rb_gc_finalize_deferred(void)2974 rb_gc_finalize_deferred(void)
2975 {
2976 gc_finalize_deferred(0);
2977 }
2978
2979 static void
gc_finalize_deferred_register(rb_objspace_t * objspace)2980 gc_finalize_deferred_register(rb_objspace_t *objspace)
2981 {
2982 if (rb_postponed_job_register_one(0, gc_finalize_deferred, objspace) == 0) {
2983 rb_bug("gc_finalize_deferred_register: can't register finalizer.");
2984 }
2985 }
2986
2987 struct force_finalize_list {
2988 VALUE obj;
2989 VALUE table;
2990 struct force_finalize_list *next;
2991 };
2992
2993 static int
force_chain_object(st_data_t key,st_data_t val,st_data_t arg)2994 force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
2995 {
2996 struct force_finalize_list **prev = (struct force_finalize_list **)arg;
2997 struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
2998 curr->obj = key;
2999 curr->table = val;
3000 curr->next = *prev;
3001 *prev = curr;
3002 return ST_CONTINUE;
3003 }
3004
3005 void
rb_gc_call_finalizer_at_exit(void)3006 rb_gc_call_finalizer_at_exit(void)
3007 {
3008 #if RGENGC_CHECK_MODE >= 2
3009 gc_verify_internal_consistency(Qnil);
3010 #endif
3011 rb_objspace_call_finalizer(&rb_objspace);
3012 }
3013
3014 static void
rb_objspace_call_finalizer(rb_objspace_t * objspace)3015 rb_objspace_call_finalizer(rb_objspace_t *objspace)
3016 {
3017 RVALUE *p, *pend;
3018 size_t i;
3019
3020 gc_rest(objspace);
3021
3022 if (ATOMIC_EXCHANGE(finalizing, 1)) return;
3023
3024 /* run finalizers */
3025 finalize_deferred(objspace);
3026 GC_ASSERT(heap_pages_deferred_final == 0);
3027
3028 gc_rest(objspace);
3029 /* prohibit incremental GC */
3030 objspace->flags.dont_incremental = 1;
3031
3032 /* force to run finalizer */
3033 while (finalizer_table->num_entries) {
3034 struct force_finalize_list *list = 0;
3035 st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
3036 while (list) {
3037 struct force_finalize_list *curr = list;
3038 st_data_t obj = (st_data_t)curr->obj;
3039 run_finalizer(objspace, curr->obj, curr->table);
3040 st_delete(finalizer_table, &obj, 0);
3041 list = curr->next;
3042 xfree(curr);
3043 }
3044 }
3045
3046 /* prohibit GC because force T_DATA finalizers can break an object graph consistency */
3047 dont_gc = 1;
3048
3049 /* running data/file finalizers are part of garbage collection */
3050 gc_enter(objspace, "rb_objspace_call_finalizer");
3051
3052 /* run data/file object's finalizers */
3053 for (i = 0; i < heap_allocated_pages; i++) {
3054 p = heap_pages_sorted[i]->start; pend = p + heap_pages_sorted[i]->total_slots;
3055 while (p < pend) {
3056 unpoison_object((VALUE)p, false);
3057 switch (BUILTIN_TYPE(p)) {
3058 case T_DATA:
3059 if (!DATA_PTR(p) || !RANY(p)->as.data.dfree) break;
3060 if (rb_obj_is_thread((VALUE)p)) break;
3061 if (rb_obj_is_mutex((VALUE)p)) break;
3062 if (rb_obj_is_fiber((VALUE)p)) break;
3063 p->as.free.flags = 0;
3064 if (RTYPEDDATA_P(p)) {
3065 RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
3066 }
3067 if (RANY(p)->as.data.dfree == RUBY_DEFAULT_FREE) {
3068 xfree(DATA_PTR(p));
3069 }
3070 else if (RANY(p)->as.data.dfree) {
3071 make_zombie(objspace, (VALUE)p, RANY(p)->as.data.dfree, RANY(p)->as.data.data);
3072 }
3073 break;
3074 case T_FILE:
3075 if (RANY(p)->as.file.fptr) {
3076 make_io_zombie(objspace, (VALUE)p);
3077 }
3078 break;
3079 }
3080 poison_object((VALUE)p);
3081 p++;
3082 }
3083 }
3084
3085 gc_exit(objspace, "rb_objspace_call_finalizer");
3086
3087 if (heap_pages_deferred_final) {
3088 finalize_list(objspace, heap_pages_deferred_final);
3089 }
3090
3091 st_free_table(finalizer_table);
3092 finalizer_table = 0;
3093 ATOMIC_SET(finalizing, 0);
3094 }
3095
3096 PUREFUNC(static inline int is_id_value(rb_objspace_t *objspace, VALUE ptr));
3097 static inline int
is_id_value(rb_objspace_t * objspace,VALUE ptr)3098 is_id_value(rb_objspace_t *objspace, VALUE ptr)
3099 {
3100 if (!is_pointer_to_heap(objspace, (void *)ptr)) return FALSE;
3101 if (BUILTIN_TYPE(ptr) > T_FIXNUM) return FALSE;
3102 if (BUILTIN_TYPE(ptr) == T_ICLASS) return FALSE;
3103 return TRUE;
3104 }
3105
3106 static inline int
heap_is_swept_object(rb_objspace_t * objspace,rb_heap_t * heap,VALUE ptr)3107 heap_is_swept_object(rb_objspace_t *objspace, rb_heap_t *heap, VALUE ptr)
3108 {
3109 struct heap_page *page = GET_HEAP_PAGE(ptr);
3110 return page->flags.before_sweep ? FALSE : TRUE;
3111 }
3112
3113 static inline int
is_swept_object(rb_objspace_t * objspace,VALUE ptr)3114 is_swept_object(rb_objspace_t *objspace, VALUE ptr)
3115 {
3116 if (heap_is_swept_object(objspace, heap_eden, ptr)) {
3117 return TRUE;
3118 }
3119 else {
3120 return FALSE;
3121 }
3122 }
3123
3124 /* garbage objects will be collected soon. */
3125 static inline int
is_garbage_object(rb_objspace_t * objspace,VALUE ptr)3126 is_garbage_object(rb_objspace_t *objspace, VALUE ptr)
3127 {
3128 if (!is_lazy_sweeping(heap_eden) ||
3129 is_swept_object(objspace, ptr) ||
3130 MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr)) {
3131
3132 return FALSE;
3133 }
3134 else {
3135 return TRUE;
3136 }
3137 }
3138
3139 static inline int
is_live_object(rb_objspace_t * objspace,VALUE ptr)3140 is_live_object(rb_objspace_t *objspace, VALUE ptr)
3141 {
3142 switch (BUILTIN_TYPE(ptr)) {
3143 case T_NONE:
3144 case T_ZOMBIE:
3145 return FALSE;
3146 }
3147
3148 if (!is_garbage_object(objspace, ptr)) {
3149 return TRUE;
3150 }
3151 else {
3152 return FALSE;
3153 }
3154 }
3155
3156 static inline int
is_markable_object(rb_objspace_t * objspace,VALUE obj)3157 is_markable_object(rb_objspace_t *objspace, VALUE obj)
3158 {
3159 if (rb_special_const_p(obj)) return FALSE; /* special const is not markable */
3160 check_rvalue_consistency(obj);
3161 return TRUE;
3162 }
3163
3164 int
rb_objspace_markable_object_p(VALUE obj)3165 rb_objspace_markable_object_p(VALUE obj)
3166 {
3167 rb_objspace_t *objspace = &rb_objspace;
3168 return is_markable_object(objspace, obj) && is_live_object(objspace, obj);
3169 }
3170
3171 int
rb_objspace_garbage_object_p(VALUE obj)3172 rb_objspace_garbage_object_p(VALUE obj)
3173 {
3174 rb_objspace_t *objspace = &rb_objspace;
3175 return is_garbage_object(objspace, obj);
3176 }
3177
3178 /*
3179 * call-seq:
3180 * ObjectSpace._id2ref(object_id) -> an_object
3181 *
3182 * Converts an object id to a reference to the object. May not be
3183 * called on an object id passed as a parameter to a finalizer.
3184 *
3185 * s = "I am a string" #=> "I am a string"
3186 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
3187 * r == s #=> true
3188 *
3189 */
3190
3191 static VALUE
id2ref(VALUE obj,VALUE objid)3192 id2ref(VALUE obj, VALUE objid)
3193 {
3194 #if SIZEOF_LONG == SIZEOF_VOIDP
3195 #define NUM2PTR(x) NUM2ULONG(x)
3196 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
3197 #define NUM2PTR(x) NUM2ULL(x)
3198 #endif
3199 rb_objspace_t *objspace = &rb_objspace;
3200 VALUE ptr;
3201 void *p0;
3202
3203 ptr = NUM2PTR(objid);
3204 p0 = (void *)ptr;
3205
3206 if (ptr == Qtrue) return Qtrue;
3207 if (ptr == Qfalse) return Qfalse;
3208 if (ptr == Qnil) return Qnil;
3209 if (FIXNUM_P(ptr)) return (VALUE)ptr;
3210 if (FLONUM_P(ptr)) return (VALUE)ptr;
3211 ptr = obj_id_to_ref(objid);
3212
3213 if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
3214 ID symid = ptr / sizeof(RVALUE);
3215 if (rb_id2str(symid) == 0)
3216 rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
3217 return ID2SYM(symid);
3218 }
3219
3220 if (!is_id_value(objspace, ptr)) {
3221 rb_raise(rb_eRangeError, "%p is not id value", p0);
3222 }
3223 if (!is_live_object(objspace, ptr)) {
3224 rb_raise(rb_eRangeError, "%p is recycled object", p0);
3225 }
3226 if (RBASIC(ptr)->klass == 0) {
3227 rb_raise(rb_eRangeError, "%p is internal object", p0);
3228 }
3229 return (VALUE)ptr;
3230 }
3231
3232 /*
3233 * Document-method: __id__
3234 * Document-method: object_id
3235 *
3236 * call-seq:
3237 * obj.__id__ -> integer
3238 * obj.object_id -> integer
3239 *
3240 * Returns an integer identifier for +obj+.
3241 *
3242 * The same number will be returned on all calls to +object_id+ for a given
3243 * object, and no two active objects will share an id.
3244 *
3245 * Note: that some objects of builtin classes are reused for optimization.
3246 * This is the case for immediate values and frozen string literals.
3247 *
3248 * Immediate values are not passed by reference but are passed by value:
3249 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
3250 *
3251 * Object.new.object_id == Object.new.object_id # => false
3252 * (21 * 2).object_id == (21 * 2).object_id # => true
3253 * "hello".object_id == "hello".object_id # => false
3254 * "hi".freeze.object_id == "hi".freeze.object_id # => true
3255 */
3256
3257 VALUE
rb_obj_id(VALUE obj)3258 rb_obj_id(VALUE obj)
3259 {
3260 /*
3261 * 32-bit VALUE space
3262 * MSB ------------------------ LSB
3263 * false 00000000000000000000000000000000
3264 * true 00000000000000000000000000000010
3265 * nil 00000000000000000000000000000100
3266 * undef 00000000000000000000000000000110
3267 * symbol ssssssssssssssssssssssss00001110
3268 * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
3269 * fixnum fffffffffffffffffffffffffffffff1
3270 *
3271 * object_id space
3272 * LSB
3273 * false 00000000000000000000000000000000
3274 * true 00000000000000000000000000000010
3275 * nil 00000000000000000000000000000100
3276 * undef 00000000000000000000000000000110
3277 * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
3278 * object oooooooooooooooooooooooooooooo0 o...o % A = 0
3279 * fixnum fffffffffffffffffffffffffffffff1 bignum if required
3280 *
3281 * where A = sizeof(RVALUE)/4
3282 *
3283 * sizeof(RVALUE) is
3284 * 20 if 32-bit, double is 4-byte aligned
3285 * 24 if 32-bit, double is 8-byte aligned
3286 * 40 if 64-bit
3287 */
3288 if (STATIC_SYM_P(obj)) {
3289 return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
3290 }
3291 else if (FLONUM_P(obj)) {
3292 #if SIZEOF_LONG == SIZEOF_VOIDP
3293 return LONG2NUM((SIGNED_VALUE)obj);
3294 #else
3295 return LL2NUM((SIGNED_VALUE)obj);
3296 #endif
3297 }
3298 else if (SPECIAL_CONST_P(obj)) {
3299 return LONG2NUM((SIGNED_VALUE)obj);
3300 }
3301 return nonspecial_obj_id(obj);
3302 }
3303
3304 #include "regint.h"
3305
3306 static size_t
obj_memsize_of(VALUE obj,int use_all_types)3307 obj_memsize_of(VALUE obj, int use_all_types)
3308 {
3309 size_t size = 0;
3310
3311 if (SPECIAL_CONST_P(obj)) {
3312 return 0;
3313 }
3314
3315 if (FL_TEST(obj, FL_EXIVAR)) {
3316 size += rb_generic_ivar_memsize(obj);
3317 }
3318
3319 switch (BUILTIN_TYPE(obj)) {
3320 case T_OBJECT:
3321 if (!(RBASIC(obj)->flags & ROBJECT_EMBED) &&
3322 ROBJECT(obj)->as.heap.ivptr) {
3323 size += ROBJECT(obj)->as.heap.numiv * sizeof(VALUE);
3324 }
3325 break;
3326 case T_MODULE:
3327 case T_CLASS:
3328 if (RCLASS_M_TBL(obj)) {
3329 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
3330 }
3331 if (RCLASS_EXT(obj)) {
3332 if (RCLASS_IV_TBL(obj)) {
3333 size += st_memsize(RCLASS_IV_TBL(obj));
3334 }
3335 if (RCLASS_IV_INDEX_TBL(obj)) {
3336 size += st_memsize(RCLASS_IV_INDEX_TBL(obj));
3337 }
3338 if (RCLASS(obj)->ptr->iv_tbl) {
3339 size += st_memsize(RCLASS(obj)->ptr->iv_tbl);
3340 }
3341 if (RCLASS(obj)->ptr->const_tbl) {
3342 size += rb_id_table_memsize(RCLASS(obj)->ptr->const_tbl);
3343 }
3344 size += sizeof(rb_classext_t);
3345 }
3346 break;
3347 case T_ICLASS:
3348 if (FL_TEST(obj, RICLASS_IS_ORIGIN)) {
3349 if (RCLASS_M_TBL(obj)) {
3350 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
3351 }
3352 }
3353 break;
3354 case T_STRING:
3355 size += rb_str_memsize(obj);
3356 break;
3357 case T_ARRAY:
3358 size += rb_ary_memsize(obj);
3359 break;
3360 case T_HASH:
3361 if (RHASH_AR_TABLE_P(obj)) {
3362 size += sizeof(ar_table);
3363 }
3364 else {
3365 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
3366 size += st_memsize(RHASH_ST_TABLE(obj));
3367 }
3368 break;
3369 case T_REGEXP:
3370 if (RREGEXP_PTR(obj)) {
3371 size += onig_memsize(RREGEXP_PTR(obj));
3372 }
3373 break;
3374 case T_DATA:
3375 if (use_all_types) size += rb_objspace_data_type_memsize(obj);
3376 break;
3377 case T_MATCH:
3378 if (RMATCH(obj)->rmatch) {
3379 struct rmatch *rm = RMATCH(obj)->rmatch;
3380 size += onig_region_memsize(&rm->regs);
3381 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
3382 size += sizeof(struct rmatch);
3383 }
3384 break;
3385 case T_FILE:
3386 if (RFILE(obj)->fptr) {
3387 size += rb_io_memsize(RFILE(obj)->fptr);
3388 }
3389 break;
3390 case T_RATIONAL:
3391 case T_COMPLEX:
3392 case T_IMEMO:
3393 if (imemo_type_p(obj, imemo_tmpbuf)) {
3394 size += RANY(obj)->as.imemo.alloc.cnt * sizeof(VALUE);
3395 }
3396 break;
3397
3398 case T_FLOAT:
3399 case T_SYMBOL:
3400 break;
3401
3402 case T_BIGNUM:
3403 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
3404 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
3405 }
3406 break;
3407
3408 case T_NODE:
3409 UNEXPECTED_NODE(obj_memsize_of);
3410 break;
3411
3412 case T_STRUCT:
3413 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
3414 RSTRUCT(obj)->as.heap.ptr) {
3415 size += sizeof(VALUE) * RSTRUCT_LEN(obj);
3416 }
3417 break;
3418
3419 case T_ZOMBIE:
3420 break;
3421
3422 default:
3423 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
3424 BUILTIN_TYPE(obj), (void*)obj);
3425 }
3426
3427 return size + sizeof(RVALUE);
3428 }
3429
3430 size_t
rb_obj_memsize_of(VALUE obj)3431 rb_obj_memsize_of(VALUE obj)
3432 {
3433 return obj_memsize_of(obj, TRUE);
3434 }
3435
3436 static int
set_zero(st_data_t key,st_data_t val,st_data_t arg)3437 set_zero(st_data_t key, st_data_t val, st_data_t arg)
3438 {
3439 VALUE k = (VALUE)key;
3440 VALUE hash = (VALUE)arg;
3441 rb_hash_aset(hash, k, INT2FIX(0));
3442 return ST_CONTINUE;
3443 }
3444
3445 /*
3446 * call-seq:
3447 * ObjectSpace.count_objects([result_hash]) -> hash
3448 *
3449 * Counts all objects grouped by type.
3450 *
3451 * It returns a hash, such as:
3452 * {
3453 * :TOTAL=>10000,
3454 * :FREE=>3011,
3455 * :T_OBJECT=>6,
3456 * :T_CLASS=>404,
3457 * # ...
3458 * }
3459 *
3460 * The contents of the returned hash are implementation specific.
3461 * It may be changed in future.
3462 *
3463 * The keys starting with +:T_+ means live objects.
3464 * For example, +:T_ARRAY+ is the number of arrays.
3465 * +:FREE+ means object slots which is not used now.
3466 * +:TOTAL+ means sum of above.
3467 *
3468 * If the optional argument +result_hash+ is given,
3469 * it is overwritten and returned. This is intended to avoid probe effect.
3470 *
3471 * h = {}
3472 * ObjectSpace.count_objects(h)
3473 * puts h
3474 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
3475 *
3476 * This method is only expected to work on C Ruby.
3477 *
3478 */
3479
3480 static VALUE
count_objects(int argc,VALUE * argv,VALUE os)3481 count_objects(int argc, VALUE *argv, VALUE os)
3482 {
3483 rb_objspace_t *objspace = &rb_objspace;
3484 size_t counts[T_MASK+1];
3485 size_t freed = 0;
3486 size_t total = 0;
3487 size_t i;
3488 VALUE hash = Qnil;
3489
3490 if (rb_check_arity(argc, 0, 1) == 1) {
3491 hash = argv[0];
3492 if (!RB_TYPE_P(hash, T_HASH))
3493 rb_raise(rb_eTypeError, "non-hash given");
3494 }
3495
3496 for (i = 0; i <= T_MASK; i++) {
3497 counts[i] = 0;
3498 }
3499
3500 for (i = 0; i < heap_allocated_pages; i++) {
3501 struct heap_page *page = heap_pages_sorted[i];
3502 RVALUE *p, *pend;
3503
3504 p = page->start; pend = p + page->total_slots;
3505 for (;p < pend; p++) {
3506 if (p->as.basic.flags) {
3507 counts[BUILTIN_TYPE(p)]++;
3508 }
3509 else {
3510 freed++;
3511 }
3512 }
3513 total += page->total_slots;
3514 }
3515
3516 if (hash == Qnil) {
3517 hash = rb_hash_new();
3518 }
3519 else if (!RHASH_EMPTY_P(hash)) {
3520 rb_hash_stlike_foreach(hash, set_zero, hash);
3521 }
3522 rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
3523 rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
3524
3525 for (i = 0; i <= T_MASK; i++) {
3526 VALUE type;
3527 switch (i) {
3528 #define COUNT_TYPE(t) case (t): type = ID2SYM(rb_intern(#t)); break;
3529 COUNT_TYPE(T_NONE);
3530 COUNT_TYPE(T_OBJECT);
3531 COUNT_TYPE(T_CLASS);
3532 COUNT_TYPE(T_MODULE);
3533 COUNT_TYPE(T_FLOAT);
3534 COUNT_TYPE(T_STRING);
3535 COUNT_TYPE(T_REGEXP);
3536 COUNT_TYPE(T_ARRAY);
3537 COUNT_TYPE(T_HASH);
3538 COUNT_TYPE(T_STRUCT);
3539 COUNT_TYPE(T_BIGNUM);
3540 COUNT_TYPE(T_FILE);
3541 COUNT_TYPE(T_DATA);
3542 COUNT_TYPE(T_MATCH);
3543 COUNT_TYPE(T_COMPLEX);
3544 COUNT_TYPE(T_RATIONAL);
3545 COUNT_TYPE(T_NIL);
3546 COUNT_TYPE(T_TRUE);
3547 COUNT_TYPE(T_FALSE);
3548 COUNT_TYPE(T_SYMBOL);
3549 COUNT_TYPE(T_FIXNUM);
3550 COUNT_TYPE(T_IMEMO);
3551 COUNT_TYPE(T_UNDEF);
3552 COUNT_TYPE(T_ICLASS);
3553 COUNT_TYPE(T_ZOMBIE);
3554 #undef COUNT_TYPE
3555 default: type = INT2NUM(i); break;
3556 }
3557 if (counts[i])
3558 rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
3559 }
3560
3561 return hash;
3562 }
3563
3564 /*
3565 ------------------------ Garbage Collection ------------------------
3566 */
3567
3568 /* Sweeping */
3569
3570 static size_t
objspace_available_slots(rb_objspace_t * objspace)3571 objspace_available_slots(rb_objspace_t *objspace)
3572 {
3573 return heap_eden->total_slots + heap_tomb->total_slots;
3574 }
3575
3576 static size_t
objspace_live_slots(rb_objspace_t * objspace)3577 objspace_live_slots(rb_objspace_t *objspace)
3578 {
3579 return (objspace->total_allocated_objects - objspace->profile.total_freed_objects) - heap_pages_final_slots;
3580 }
3581
3582 static size_t
objspace_free_slots(rb_objspace_t * objspace)3583 objspace_free_slots(rb_objspace_t *objspace)
3584 {
3585 return objspace_available_slots(objspace) - objspace_live_slots(objspace) - heap_pages_final_slots;
3586 }
3587
3588 static void
gc_setup_mark_bits(struct heap_page * page)3589 gc_setup_mark_bits(struct heap_page *page)
3590 {
3591 #if USE_RGENGC
3592 /* copy oldgen bitmap to mark bitmap */
3593 memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE);
3594 #else
3595 /* clear mark bitmap */
3596 memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
3597 #endif
3598 }
3599
3600 static inline int
gc_page_sweep(rb_objspace_t * objspace,rb_heap_t * heap,struct heap_page * sweep_page)3601 gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page)
3602 {
3603 int i;
3604 int empty_slots = 0, freed_slots = 0, final_slots = 0;
3605 RVALUE *p, *pend,*offset;
3606 bits_t *bits, bitset;
3607
3608 gc_report(2, objspace, "page_sweep: start.\n");
3609
3610 sweep_page->flags.before_sweep = FALSE;
3611
3612 p = sweep_page->start; pend = p + sweep_page->total_slots;
3613 offset = p - NUM_IN_PAGE(p);
3614 bits = sweep_page->mark_bits;
3615
3616 /* create guard : fill 1 out-of-range */
3617 bits[BITMAP_INDEX(p)] |= BITMAP_BIT(p)-1;
3618 bits[BITMAP_INDEX(pend)] |= ~(BITMAP_BIT(pend) - 1);
3619
3620 for (i=0; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
3621 bitset = ~bits[i];
3622 if (bitset) {
3623 p = offset + i * BITS_BITLENGTH;
3624 do {
3625 unpoison_object((VALUE)p, false);
3626 if (bitset & 1) {
3627 switch (BUILTIN_TYPE(p)) {
3628 default: { /* majority case */
3629 gc_report(2, objspace, "page_sweep: free %p\n", (void *)p);
3630 #if USE_RGENGC && RGENGC_CHECK_MODE
3631 if (!is_full_marking(objspace)) {
3632 if (RVALUE_OLD_P((VALUE)p)) rb_bug("page_sweep: %p - old while minor GC.", (void *)p);
3633 if (rgengc_remembered(objspace, (VALUE)p)) rb_bug("page_sweep: %p - remembered.", (void *)p);
3634 }
3635 #endif
3636 if (obj_free(objspace, (VALUE)p)) {
3637 final_slots++;
3638 }
3639 else {
3640 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
3641 heap_page_add_freeobj(objspace, sweep_page, (VALUE)p);
3642 gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info((VALUE)p));
3643 freed_slots++;
3644 poison_object((VALUE)p);
3645 }
3646 break;
3647 }
3648
3649 /* minor cases */
3650 case T_ZOMBIE:
3651 /* already counted */
3652 break;
3653 case T_NONE:
3654 empty_slots++; /* already freed */
3655 break;
3656 }
3657 }
3658 p++;
3659 bitset >>= 1;
3660 } while (bitset);
3661 }
3662 }
3663
3664 gc_setup_mark_bits(sweep_page);
3665
3666 #if GC_PROFILE_MORE_DETAIL
3667 if (gc_prof_enabled(objspace)) {
3668 gc_profile_record *record = gc_prof_record(objspace);
3669 record->removing_objects += final_slots + freed_slots;
3670 record->empty_objects += empty_slots;
3671 }
3672 #endif
3673 if (0) fprintf(stderr, "gc_page_sweep(%d): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
3674 (int)rb_gc_count(),
3675 (int)sweep_page->total_slots,
3676 freed_slots, empty_slots, final_slots);
3677
3678 sweep_page->free_slots = freed_slots + empty_slots;
3679 objspace->profile.total_freed_objects += freed_slots;
3680 heap_pages_final_slots += final_slots;
3681 sweep_page->final_slots += final_slots;
3682
3683 if (heap_pages_deferred_final && !finalizing) {
3684 rb_thread_t *th = GET_THREAD();
3685 if (th) {
3686 gc_finalize_deferred_register(objspace);
3687 }
3688 }
3689
3690 gc_report(2, objspace, "page_sweep: end.\n");
3691
3692 return freed_slots + empty_slots;
3693 }
3694
3695 /* allocate additional minimum page to work */
3696 static void
gc_heap_prepare_minimum_pages(rb_objspace_t * objspace,rb_heap_t * heap)3697 gc_heap_prepare_minimum_pages(rb_objspace_t *objspace, rb_heap_t *heap)
3698 {
3699 if (!heap->free_pages && heap_increment(objspace, heap) == FALSE) {
3700 /* there is no free after page_sweep() */
3701 heap_set_increment(objspace, 1);
3702 if (!heap_increment(objspace, heap)) { /* can't allocate additional free objects */
3703 rb_memerror();
3704 }
3705 }
3706 }
3707
3708 static const char *
gc_mode_name(enum gc_mode mode)3709 gc_mode_name(enum gc_mode mode)
3710 {
3711 switch (mode) {
3712 case gc_mode_none: return "none";
3713 case gc_mode_marking: return "marking";
3714 case gc_mode_sweeping: return "sweeping";
3715 default: rb_bug("gc_mode_name: unknown mode: %d", (int)mode);
3716 }
3717 }
3718
3719 static void
gc_mode_transition(rb_objspace_t * objspace,enum gc_mode mode)3720 gc_mode_transition(rb_objspace_t *objspace, enum gc_mode mode)
3721 {
3722 #if RGENGC_CHECK_MODE
3723 enum gc_mode prev_mode = gc_mode(objspace);
3724 switch (prev_mode) {
3725 case gc_mode_none: GC_ASSERT(mode == gc_mode_marking); break;
3726 case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping); break;
3727 case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none); break;
3728 }
3729 #endif
3730 if (0) fprintf(stderr, "gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
3731 gc_mode_set(objspace, mode);
3732 }
3733
3734 static void
gc_sweep_start_heap(rb_objspace_t * objspace,rb_heap_t * heap)3735 gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
3736 {
3737 heap->sweeping_page = list_top(&heap->pages, struct heap_page, page_node);
3738 heap->free_pages = NULL;
3739 #if GC_ENABLE_INCREMENTAL_MARK
3740 heap->pooled_pages = NULL;
3741 objspace->rincgc.pooled_slots = 0;
3742 #endif
3743 if (heap->using_page) {
3744 RVALUE **p = &heap->using_page->freelist;
3745 while (*p) {
3746 p = &(*p)->as.free.next;
3747 }
3748 *p = heap->freelist;
3749 heap->using_page = NULL;
3750 }
3751 heap->freelist = NULL;
3752 }
3753
3754 #if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
3755 __attribute__((noinline))
3756 #endif
3757 static void
gc_sweep_start(rb_objspace_t * objspace)3758 gc_sweep_start(rb_objspace_t *objspace)
3759 {
3760 gc_mode_transition(objspace, gc_mode_sweeping);
3761 gc_sweep_start_heap(objspace, heap_eden);
3762 }
3763
3764 static void
gc_sweep_finish(rb_objspace_t * objspace)3765 gc_sweep_finish(rb_objspace_t *objspace)
3766 {
3767 gc_report(1, objspace, "gc_sweep_finish\n");
3768
3769 gc_prof_set_heap_info(objspace);
3770 heap_pages_free_unused_pages(objspace);
3771
3772 /* if heap_pages has unused pages, then assign them to increment */
3773 if (heap_allocatable_pages < heap_tomb->total_pages) {
3774 heap_allocatable_pages_set(objspace, heap_tomb->total_pages);
3775 }
3776
3777 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_SWEEP, 0);
3778 gc_mode_transition(objspace, gc_mode_none);
3779
3780 #if RGENGC_CHECK_MODE >= 2
3781 gc_verify_internal_consistency(Qnil);
3782 #endif
3783 }
3784
3785 static int
gc_sweep_step(rb_objspace_t * objspace,rb_heap_t * heap)3786 gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
3787 {
3788 struct heap_page *sweep_page = heap->sweeping_page;
3789 int unlink_limit = 3;
3790 #if GC_ENABLE_INCREMENTAL_MARK
3791 int need_pool = will_be_incremental_marking(objspace) ? TRUE : FALSE;
3792
3793 gc_report(2, objspace, "gc_sweep_step (need_pool: %d)\n", need_pool);
3794 #else
3795 gc_report(2, objspace, "gc_sweep_step\n");
3796 #endif
3797
3798 if (sweep_page == NULL) return FALSE;
3799
3800 #if GC_ENABLE_LAZY_SWEEP
3801 gc_prof_sweep_timer_start(objspace);
3802 #endif
3803
3804 do {
3805 int free_slots = gc_page_sweep(objspace, heap, sweep_page);
3806 heap->sweeping_page = list_next(&heap->pages, sweep_page, page_node);
3807
3808 if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
3809 heap_pages_freeable_pages > 0 &&
3810 unlink_limit > 0) {
3811 heap_pages_freeable_pages--;
3812 unlink_limit--;
3813 /* there are no living objects -> move this page to tomb heap */
3814 heap_unlink_page(objspace, heap, sweep_page);
3815 heap_add_page(objspace, heap_tomb, sweep_page);
3816 }
3817 else if (free_slots > 0) {
3818 #if GC_ENABLE_INCREMENTAL_MARK
3819 if (need_pool) {
3820 if (heap_add_poolpage(objspace, heap, sweep_page)) {
3821 need_pool = FALSE;
3822 }
3823 }
3824 else {
3825 heap_add_freepage(objspace, heap, sweep_page);
3826 break;
3827 }
3828 #else
3829 heap_add_freepage(objspace, heap, sweep_page);
3830 break;
3831 #endif
3832 }
3833 else {
3834 sweep_page->free_next = NULL;
3835 }
3836 } while ((sweep_page = heap->sweeping_page));
3837
3838 if (!heap->sweeping_page) {
3839 gc_sweep_finish(objspace);
3840 }
3841
3842 #if GC_ENABLE_LAZY_SWEEP
3843 gc_prof_sweep_timer_stop(objspace);
3844 #endif
3845
3846 return heap->free_pages != NULL;
3847 }
3848
3849 static void
gc_sweep_rest(rb_objspace_t * objspace)3850 gc_sweep_rest(rb_objspace_t *objspace)
3851 {
3852 rb_heap_t *heap = heap_eden; /* lazy sweep only for eden */
3853
3854 while (has_sweeping_pages(heap)) {
3855 gc_sweep_step(objspace, heap);
3856 }
3857 }
3858
3859 static void
gc_sweep_continue(rb_objspace_t * objspace,rb_heap_t * heap)3860 gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap)
3861 {
3862 GC_ASSERT(dont_gc == FALSE);
3863 if (!GC_ENABLE_LAZY_SWEEP) return;
3864
3865 gc_enter(objspace, "sweep_continue");
3866 #if USE_RGENGC
3867 if (objspace->rgengc.need_major_gc == GPR_FLAG_NONE && heap_increment(objspace, heap)) {
3868 gc_report(3, objspace, "gc_sweep_continue: success heap_increment().\n");
3869 }
3870 #endif
3871 gc_sweep_step(objspace, heap);
3872 gc_exit(objspace, "sweep_continue");
3873 }
3874
3875 static void
gc_sweep(rb_objspace_t * objspace)3876 gc_sweep(rb_objspace_t *objspace)
3877 {
3878 const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
3879
3880 gc_report(1, objspace, "gc_sweep: immediate: %d\n", immediate_sweep);
3881
3882 if (immediate_sweep) {
3883 #if !GC_ENABLE_LAZY_SWEEP
3884 gc_prof_sweep_timer_start(objspace);
3885 #endif
3886 gc_sweep_start(objspace);
3887 gc_sweep_rest(objspace);
3888 #if !GC_ENABLE_LAZY_SWEEP
3889 gc_prof_sweep_timer_stop(objspace);
3890 #endif
3891 }
3892 else {
3893 struct heap_page *page = NULL;
3894 gc_sweep_start(objspace);
3895
3896 list_for_each(&heap_eden->pages, page, page_node) {
3897 page->flags.before_sweep = TRUE;
3898 }
3899 gc_sweep_step(objspace, heap_eden);
3900 }
3901
3902 gc_heap_prepare_minimum_pages(objspace, heap_eden);
3903 }
3904
3905 /* Marking - Marking stack */
3906
3907 static stack_chunk_t *
stack_chunk_alloc(void)3908 stack_chunk_alloc(void)
3909 {
3910 stack_chunk_t *res;
3911
3912 res = malloc(sizeof(stack_chunk_t));
3913 if (!res)
3914 rb_memerror();
3915
3916 return res;
3917 }
3918
3919 static inline int
is_mark_stack_empty(mark_stack_t * stack)3920 is_mark_stack_empty(mark_stack_t *stack)
3921 {
3922 return stack->chunk == NULL;
3923 }
3924
3925 static size_t
mark_stack_size(mark_stack_t * stack)3926 mark_stack_size(mark_stack_t *stack)
3927 {
3928 size_t size = stack->index;
3929 stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
3930
3931 while (chunk) {
3932 size += stack->limit;
3933 chunk = chunk->next;
3934 }
3935 return size;
3936 }
3937
3938 static void
add_stack_chunk_cache(mark_stack_t * stack,stack_chunk_t * chunk)3939 add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
3940 {
3941 chunk->next = stack->cache;
3942 stack->cache = chunk;
3943 stack->cache_size++;
3944 }
3945
3946 static void
shrink_stack_chunk_cache(mark_stack_t * stack)3947 shrink_stack_chunk_cache(mark_stack_t *stack)
3948 {
3949 stack_chunk_t *chunk;
3950
3951 if (stack->unused_cache_size > (stack->cache_size/2)) {
3952 chunk = stack->cache;
3953 stack->cache = stack->cache->next;
3954 stack->cache_size--;
3955 free(chunk);
3956 }
3957 stack->unused_cache_size = stack->cache_size;
3958 }
3959
3960 static void
push_mark_stack_chunk(mark_stack_t * stack)3961 push_mark_stack_chunk(mark_stack_t *stack)
3962 {
3963 stack_chunk_t *next;
3964
3965 GC_ASSERT(stack->index == stack->limit);
3966
3967 if (stack->cache_size > 0) {
3968 next = stack->cache;
3969 stack->cache = stack->cache->next;
3970 stack->cache_size--;
3971 if (stack->unused_cache_size > stack->cache_size)
3972 stack->unused_cache_size = stack->cache_size;
3973 }
3974 else {
3975 next = stack_chunk_alloc();
3976 }
3977 next->next = stack->chunk;
3978 stack->chunk = next;
3979 stack->index = 0;
3980 }
3981
3982 static void
pop_mark_stack_chunk(mark_stack_t * stack)3983 pop_mark_stack_chunk(mark_stack_t *stack)
3984 {
3985 stack_chunk_t *prev;
3986
3987 prev = stack->chunk->next;
3988 GC_ASSERT(stack->index == 0);
3989 add_stack_chunk_cache(stack, stack->chunk);
3990 stack->chunk = prev;
3991 stack->index = stack->limit;
3992 }
3993
3994 static void
free_stack_chunks(mark_stack_t * stack)3995 free_stack_chunks(mark_stack_t *stack)
3996 {
3997 stack_chunk_t *chunk = stack->chunk;
3998 stack_chunk_t *next = NULL;
3999
4000 while (chunk != NULL) {
4001 next = chunk->next;
4002 free(chunk);
4003 chunk = next;
4004 }
4005 }
4006
4007 static void
push_mark_stack(mark_stack_t * stack,VALUE data)4008 push_mark_stack(mark_stack_t *stack, VALUE data)
4009 {
4010 if (stack->index == stack->limit) {
4011 push_mark_stack_chunk(stack);
4012 }
4013 stack->chunk->data[stack->index++] = data;
4014 }
4015
4016 static int
pop_mark_stack(mark_stack_t * stack,VALUE * data)4017 pop_mark_stack(mark_stack_t *stack, VALUE *data)
4018 {
4019 if (is_mark_stack_empty(stack)) {
4020 return FALSE;
4021 }
4022 if (stack->index == 1) {
4023 *data = stack->chunk->data[--stack->index];
4024 pop_mark_stack_chunk(stack);
4025 }
4026 else {
4027 *data = stack->chunk->data[--stack->index];
4028 }
4029 return TRUE;
4030 }
4031
4032 #if GC_ENABLE_INCREMENTAL_MARK
4033 static int
invalidate_mark_stack_chunk(stack_chunk_t * chunk,int limit,VALUE obj)4034 invalidate_mark_stack_chunk(stack_chunk_t *chunk, int limit, VALUE obj)
4035 {
4036 int i;
4037 for (i=0; i<limit; i++) {
4038 if (chunk->data[i] == obj) {
4039 chunk->data[i] = Qundef;
4040 return TRUE;
4041 }
4042 }
4043 return FALSE;
4044 }
4045
4046 static void
invalidate_mark_stack(mark_stack_t * stack,VALUE obj)4047 invalidate_mark_stack(mark_stack_t *stack, VALUE obj)
4048 {
4049 stack_chunk_t *chunk = stack->chunk;
4050 int limit = stack->index;
4051
4052 while (chunk) {
4053 if (invalidate_mark_stack_chunk(chunk, limit, obj)) return;
4054 chunk = chunk->next;
4055 limit = stack->limit;
4056 }
4057 rb_bug("invalid_mark_stack: unreachable");
4058 }
4059 #endif
4060
4061 static void
init_mark_stack(mark_stack_t * stack)4062 init_mark_stack(mark_stack_t *stack)
4063 {
4064 int i;
4065
4066 MEMZERO(stack, mark_stack_t, 1);
4067 stack->index = stack->limit = STACK_CHUNK_SIZE;
4068 stack->cache_size = 0;
4069
4070 for (i=0; i < 4; i++) {
4071 add_stack_chunk_cache(stack, stack_chunk_alloc());
4072 }
4073 stack->unused_cache_size = stack->cache_size;
4074 }
4075
4076 /* Marking */
4077
4078 #ifdef __ia64
4079 #define SET_STACK_END (SET_MACHINE_STACK_END(&ec->machine.stack_end), ec->machine.register_stack_end = rb_ia64_bsp())
4080 #else
4081 #define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
4082 #endif
4083
4084 #define STACK_START (ec->machine.stack_start)
4085 #define STACK_END (ec->machine.stack_end)
4086 #define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
4087
4088 #ifdef __EMSCRIPTEN__
4089 #undef STACK_GROW_DIRECTION
4090 #define STACK_GROW_DIRECTION 1
4091 #endif
4092
4093 #if STACK_GROW_DIRECTION < 0
4094 # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
4095 #elif STACK_GROW_DIRECTION > 0
4096 # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
4097 #else
4098 # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
4099 : (size_t)(STACK_END - STACK_START + 1))
4100 #endif
4101 #if !STACK_GROW_DIRECTION
4102 int ruby_stack_grow_direction;
4103 int
ruby_get_stack_grow_direction(volatile VALUE * addr)4104 ruby_get_stack_grow_direction(volatile VALUE *addr)
4105 {
4106 VALUE *end;
4107 SET_MACHINE_STACK_END(&end);
4108
4109 if (end > addr) return ruby_stack_grow_direction = 1;
4110 return ruby_stack_grow_direction = -1;
4111 }
4112 #endif
4113
4114 size_t
ruby_stack_length(VALUE ** p)4115 ruby_stack_length(VALUE **p)
4116 {
4117 rb_execution_context_t *ec = GET_EC();
4118 SET_STACK_END;
4119 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
4120 return STACK_LENGTH;
4121 }
4122
4123 #define PREVENT_STACK_OVERFLOW 1
4124 #ifndef PREVENT_STACK_OVERFLOW
4125 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
4126 # define PREVENT_STACK_OVERFLOW 1
4127 #else
4128 # define PREVENT_STACK_OVERFLOW 0
4129 #endif
4130 #endif
4131 #if PREVENT_STACK_OVERFLOW
4132 static int
stack_check(rb_execution_context_t * ec,int water_mark)4133 stack_check(rb_execution_context_t *ec, int water_mark)
4134 {
4135 int ret;
4136 SET_STACK_END;
4137 ret = STACK_LENGTH > STACK_LEVEL_MAX - water_mark;
4138 #ifdef __ia64
4139 if (!ret) {
4140 ret = (VALUE*)rb_ia64_bsp() - ec->machine.register_stack_start >
4141 ec->machine.register_stack_maxsize/sizeof(VALUE) - water_mark;
4142 }
4143 #endif
4144 return ret;
4145 }
4146 #else
4147 #define stack_check(ec, water_mark) FALSE
4148 #endif
4149
4150 #define STACKFRAME_FOR_CALL_CFUNC 838
4151
4152 MJIT_FUNC_EXPORTED int
rb_ec_stack_check(rb_execution_context_t * ec)4153 rb_ec_stack_check(rb_execution_context_t *ec)
4154 {
4155 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
4156 }
4157
4158 int
ruby_stack_check(void)4159 ruby_stack_check(void)
4160 {
4161 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
4162 }
4163
4164 ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void mark_locations_array(rb_objspace_t *objspace, register const VALUE *x, register long n));
4165 static void
mark_locations_array(rb_objspace_t * objspace,register const VALUE * x,register long n)4166 mark_locations_array(rb_objspace_t *objspace, register const VALUE *x, register long n)
4167 {
4168 VALUE v;
4169 while (n--) {
4170 v = *x;
4171 gc_mark_maybe(objspace, v);
4172 x++;
4173 }
4174 }
4175
4176 static void
gc_mark_locations(rb_objspace_t * objspace,const VALUE * start,const VALUE * end)4177 gc_mark_locations(rb_objspace_t *objspace, const VALUE *start, const VALUE *end)
4178 {
4179 long n;
4180
4181 if (end <= start) return;
4182 n = end - start;
4183 mark_locations_array(objspace, start, n);
4184 }
4185
4186 void
rb_gc_mark_locations(const VALUE * start,const VALUE * end)4187 rb_gc_mark_locations(const VALUE *start, const VALUE *end)
4188 {
4189 gc_mark_locations(&rb_objspace, start, end);
4190 }
4191
4192 static void
gc_mark_values(rb_objspace_t * objspace,long n,const VALUE * values)4193 gc_mark_values(rb_objspace_t *objspace, long n, const VALUE *values)
4194 {
4195 long i;
4196
4197 for (i=0; i<n; i++) {
4198 gc_mark(objspace, values[i]);
4199 }
4200 }
4201
4202 void
rb_gc_mark_values(long n,const VALUE * values)4203 rb_gc_mark_values(long n, const VALUE *values)
4204 {
4205 rb_objspace_t *objspace = &rb_objspace;
4206 gc_mark_values(objspace, n, values);
4207 }
4208
4209 static int
mark_entry(st_data_t key,st_data_t value,st_data_t data)4210 mark_entry(st_data_t key, st_data_t value, st_data_t data)
4211 {
4212 rb_objspace_t *objspace = (rb_objspace_t *)data;
4213 gc_mark(objspace, (VALUE)value);
4214 return ST_CONTINUE;
4215 }
4216
4217 static void
mark_tbl(rb_objspace_t * objspace,st_table * tbl)4218 mark_tbl(rb_objspace_t *objspace, st_table *tbl)
4219 {
4220 if (!tbl || tbl->num_entries == 0) return;
4221 st_foreach(tbl, mark_entry, (st_data_t)objspace);
4222 }
4223
4224 static int
mark_key(st_data_t key,st_data_t value,st_data_t data)4225 mark_key(st_data_t key, st_data_t value, st_data_t data)
4226 {
4227 rb_objspace_t *objspace = (rb_objspace_t *)data;
4228 gc_mark(objspace, (VALUE)key);
4229 return ST_CONTINUE;
4230 }
4231
4232 static void
mark_set(rb_objspace_t * objspace,st_table * tbl)4233 mark_set(rb_objspace_t *objspace, st_table *tbl)
4234 {
4235 if (!tbl) return;
4236 st_foreach(tbl, mark_key, (st_data_t)objspace);
4237 }
4238
4239 void
rb_mark_set(st_table * tbl)4240 rb_mark_set(st_table *tbl)
4241 {
4242 mark_set(&rb_objspace, tbl);
4243 }
4244
4245 static int
mark_keyvalue(st_data_t key,st_data_t value,st_data_t data)4246 mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
4247 {
4248 rb_objspace_t *objspace = (rb_objspace_t *)data;
4249
4250 gc_mark(objspace, (VALUE)key);
4251 gc_mark(objspace, (VALUE)value);
4252 return ST_CONTINUE;
4253 }
4254
4255 static void
mark_hash(rb_objspace_t * objspace,VALUE hash)4256 mark_hash(rb_objspace_t *objspace, VALUE hash)
4257 {
4258 rb_hash_stlike_foreach(hash, mark_keyvalue, (st_data_t)objspace);
4259
4260 if (RHASH_AR_TABLE_P(hash)) {
4261 if (objspace->mark_func_data == NULL && RHASH_TRANSIENT_P(hash)) {
4262 rb_transient_heap_mark(hash, RHASH_AR_TABLE(hash));
4263 }
4264 }
4265 else {
4266 VM_ASSERT(!RHASH_TRANSIENT_P(hash));
4267 }
4268 gc_mark(objspace, RHASH(hash)->ifnone);
4269 }
4270
4271 static void
mark_st(rb_objspace_t * objspace,st_table * tbl)4272 mark_st(rb_objspace_t *objspace, st_table *tbl)
4273 {
4274 if (!tbl) return;
4275 st_foreach(tbl, mark_keyvalue, (st_data_t)objspace);
4276 }
4277
4278 void
rb_mark_hash(st_table * tbl)4279 rb_mark_hash(st_table *tbl)
4280 {
4281 mark_st(&rb_objspace, tbl);
4282 }
4283
4284 static void
mark_method_entry(rb_objspace_t * objspace,const rb_method_entry_t * me)4285 mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
4286 {
4287 const rb_method_definition_t *def = me->def;
4288
4289 gc_mark(objspace, me->owner);
4290 gc_mark(objspace, me->defined_class);
4291
4292 if (def) {
4293 switch (def->type) {
4294 case VM_METHOD_TYPE_ISEQ:
4295 if (def->body.iseq.iseqptr) gc_mark(objspace, (VALUE)def->body.iseq.iseqptr);
4296 gc_mark(objspace, (VALUE)def->body.iseq.cref);
4297 break;
4298 case VM_METHOD_TYPE_ATTRSET:
4299 case VM_METHOD_TYPE_IVAR:
4300 gc_mark(objspace, def->body.attr.location);
4301 break;
4302 case VM_METHOD_TYPE_BMETHOD:
4303 gc_mark(objspace, def->body.bmethod.proc);
4304 if (def->body.bmethod.hooks) rb_hook_list_mark(def->body.bmethod.hooks);
4305 break;
4306 case VM_METHOD_TYPE_ALIAS:
4307 gc_mark(objspace, (VALUE)def->body.alias.original_me);
4308 return;
4309 case VM_METHOD_TYPE_REFINED:
4310 gc_mark(objspace, (VALUE)def->body.refined.orig_me);
4311 gc_mark(objspace, (VALUE)def->body.refined.owner);
4312 break;
4313 case VM_METHOD_TYPE_CFUNC:
4314 case VM_METHOD_TYPE_ZSUPER:
4315 case VM_METHOD_TYPE_MISSING:
4316 case VM_METHOD_TYPE_OPTIMIZED:
4317 case VM_METHOD_TYPE_UNDEF:
4318 case VM_METHOD_TYPE_NOTIMPLEMENTED:
4319 break;
4320 }
4321 }
4322 }
4323
4324 static enum rb_id_table_iterator_result
mark_method_entry_i(VALUE me,void * data)4325 mark_method_entry_i(VALUE me, void *data)
4326 {
4327 rb_objspace_t *objspace = (rb_objspace_t *)data;
4328
4329 gc_mark(objspace, me);
4330 return ID_TABLE_CONTINUE;
4331 }
4332
4333 static void
mark_m_tbl(rb_objspace_t * objspace,struct rb_id_table * tbl)4334 mark_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
4335 {
4336 if (tbl) {
4337 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
4338 }
4339 }
4340
4341 static enum rb_id_table_iterator_result
mark_const_entry_i(VALUE value,void * data)4342 mark_const_entry_i(VALUE value, void *data)
4343 {
4344 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
4345 rb_objspace_t *objspace = data;
4346
4347 gc_mark(objspace, ce->value);
4348 gc_mark(objspace, ce->file);
4349 return ID_TABLE_CONTINUE;
4350 }
4351
4352 static void
mark_const_tbl(rb_objspace_t * objspace,struct rb_id_table * tbl)4353 mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
4354 {
4355 if (!tbl) return;
4356 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
4357 }
4358
4359 #if STACK_GROW_DIRECTION < 0
4360 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
4361 #elif STACK_GROW_DIRECTION > 0
4362 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
4363 #else
4364 #define GET_STACK_BOUNDS(start, end, appendix) \
4365 ((STACK_END < STACK_START) ? \
4366 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
4367 #endif
4368
4369 static void mark_stack_locations(rb_objspace_t *objspace, const rb_execution_context_t *ec,
4370 const VALUE *stack_start, const VALUE *stack_end);
4371
4372 static void
mark_current_machine_context(rb_objspace_t * objspace,rb_execution_context_t * ec)4373 mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
4374 {
4375 union {
4376 rb_jmp_buf j;
4377 VALUE v[sizeof(rb_jmp_buf) / sizeof(VALUE)];
4378 } save_regs_gc_mark;
4379 VALUE *stack_start, *stack_end;
4380
4381 FLUSH_REGISTER_WINDOWS;
4382 /* This assumes that all registers are saved into the jmp_buf (and stack) */
4383 rb_setjmp(save_regs_gc_mark.j);
4384
4385 /* SET_STACK_END must be called in this function because
4386 * the stack frame of this function may contain
4387 * callee save registers and they should be marked. */
4388 SET_STACK_END;
4389 GET_STACK_BOUNDS(stack_start, stack_end, 1);
4390
4391 mark_locations_array(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v));
4392
4393 mark_stack_locations(objspace, ec, stack_start, stack_end);
4394 }
4395
4396 void
rb_gc_mark_machine_stack(const rb_execution_context_t * ec)4397 rb_gc_mark_machine_stack(const rb_execution_context_t *ec)
4398 {
4399 rb_objspace_t *objspace = &rb_objspace;
4400 VALUE *stack_start, *stack_end;
4401
4402 GET_STACK_BOUNDS(stack_start, stack_end, 0);
4403 mark_stack_locations(objspace, ec, stack_start, stack_end);
4404 }
4405
4406 static void
mark_stack_locations(rb_objspace_t * objspace,const rb_execution_context_t * ec,const VALUE * stack_start,const VALUE * stack_end)4407 mark_stack_locations(rb_objspace_t *objspace, const rb_execution_context_t *ec,
4408 const VALUE *stack_start, const VALUE *stack_end)
4409 {
4410
4411 gc_mark_locations(objspace, stack_start, stack_end);
4412 #ifdef __ia64
4413 gc_mark_locations(objspace,
4414 ec->machine.register_stack_start,
4415 ec->machine.register_stack_end);
4416 #endif
4417 #if defined(__mc68000__)
4418 gc_mark_locations(objspace,
4419 (VALUE*)((char*)stack_start + 2),
4420 (VALUE*)((char*)stack_end - 2));
4421 #endif
4422 }
4423
4424 void
rb_mark_tbl(st_table * tbl)4425 rb_mark_tbl(st_table *tbl)
4426 {
4427 mark_tbl(&rb_objspace, tbl);
4428 }
4429
4430 static void
gc_mark_maybe(rb_objspace_t * objspace,VALUE obj)4431 gc_mark_maybe(rb_objspace_t *objspace, VALUE obj)
4432 {
4433 (void)VALGRIND_MAKE_MEM_DEFINED(&obj, sizeof(obj));
4434 if (is_pointer_to_heap(objspace, (void *)obj)) {
4435 int type;
4436 void *ptr = __asan_region_is_poisoned((void *)obj, SIZEOF_VALUE);
4437
4438 unpoison_object(obj, false);
4439 type = BUILTIN_TYPE(obj);
4440 if (type != T_ZOMBIE && type != T_NONE) {
4441 gc_mark_ptr(objspace, obj);
4442 }
4443 if (ptr) {
4444 poison_object(obj);
4445 }
4446 }
4447 }
4448
4449 void
rb_gc_mark_maybe(VALUE obj)4450 rb_gc_mark_maybe(VALUE obj)
4451 {
4452 gc_mark_maybe(&rb_objspace, obj);
4453 }
4454
4455 static inline int
gc_mark_set(rb_objspace_t * objspace,VALUE obj)4456 gc_mark_set(rb_objspace_t *objspace, VALUE obj)
4457 {
4458 if (RVALUE_MARKED(obj)) return 0;
4459 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
4460 return 1;
4461 }
4462
4463 #if USE_RGENGC
4464 static int
gc_remember_unprotected(rb_objspace_t * objspace,VALUE obj)4465 gc_remember_unprotected(rb_objspace_t *objspace, VALUE obj)
4466 {
4467 struct heap_page *page = GET_HEAP_PAGE(obj);
4468 bits_t *uncollectible_bits = &page->uncollectible_bits[0];
4469
4470 if (!MARKED_IN_BITMAP(uncollectible_bits, obj)) {
4471 page->flags.has_uncollectible_shady_objects = TRUE;
4472 MARK_IN_BITMAP(uncollectible_bits, obj);
4473 objspace->rgengc.uncollectible_wb_unprotected_objects++;
4474
4475 #if RGENGC_PROFILE > 0
4476 objspace->profile.total_remembered_shady_object_count++;
4477 #if RGENGC_PROFILE >= 2
4478 objspace->profile.remembered_shady_object_count_types[BUILTIN_TYPE(obj)]++;
4479 #endif
4480 #endif
4481 return TRUE;
4482 }
4483 else {
4484 return FALSE;
4485 }
4486 }
4487 #endif
4488
4489 static void
rgengc_check_relation(rb_objspace_t * objspace,VALUE obj)4490 rgengc_check_relation(rb_objspace_t *objspace, VALUE obj)
4491 {
4492 #if USE_RGENGC
4493 const VALUE old_parent = objspace->rgengc.parent_object;
4494
4495 if (old_parent) { /* parent object is old */
4496 if (RVALUE_WB_UNPROTECTED(obj)) {
4497 if (gc_remember_unprotected(objspace, obj)) {
4498 gc_report(2, objspace, "relation: (O->S) %s -> %s\n", obj_info(old_parent), obj_info(obj));
4499 }
4500 }
4501 else {
4502 if (!RVALUE_OLD_P(obj)) {
4503 if (RVALUE_MARKED(obj)) {
4504 /* An object pointed from an OLD object should be OLD. */
4505 gc_report(2, objspace, "relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
4506 RVALUE_AGE_SET_OLD(objspace, obj);
4507 if (is_incremental_marking(objspace)) {
4508 if (!RVALUE_MARKING(obj)) {
4509 gc_grey(objspace, obj);
4510 }
4511 }
4512 else {
4513 rgengc_remember(objspace, obj);
4514 }
4515 }
4516 else {
4517 gc_report(2, objspace, "relation: (O->Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
4518 RVALUE_AGE_SET_CANDIDATE(objspace, obj);
4519 }
4520 }
4521 }
4522 }
4523
4524 GC_ASSERT(old_parent == objspace->rgengc.parent_object);
4525 #endif
4526 }
4527
4528 static void
gc_grey(rb_objspace_t * objspace,VALUE obj)4529 gc_grey(rb_objspace_t *objspace, VALUE obj)
4530 {
4531 #if RGENGC_CHECK_MODE
4532 if (RVALUE_MARKED(obj) == FALSE) rb_bug("gc_grey: %s is not marked.", obj_info(obj));
4533 if (RVALUE_MARKING(obj) == TRUE) rb_bug("gc_grey: %s is marking/remembered.", obj_info(obj));
4534 #endif
4535
4536 #if GC_ENABLE_INCREMENTAL_MARK
4537 if (is_incremental_marking(objspace)) {
4538 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
4539 }
4540 #endif
4541
4542 push_mark_stack(&objspace->mark_stack, obj);
4543 }
4544
4545 static void
gc_aging(rb_objspace_t * objspace,VALUE obj)4546 gc_aging(rb_objspace_t *objspace, VALUE obj)
4547 {
4548 #if USE_RGENGC
4549 struct heap_page *page = GET_HEAP_PAGE(obj);
4550
4551 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
4552 check_rvalue_consistency(obj);
4553
4554 if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
4555 if (!RVALUE_OLD_P(obj)) {
4556 gc_report(3, objspace, "gc_aging: YOUNG: %s\n", obj_info(obj));
4557 RVALUE_AGE_INC(objspace, obj);
4558 }
4559 else if (is_full_marking(objspace)) {
4560 GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page, obj) == FALSE);
4561 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
4562 }
4563 }
4564 check_rvalue_consistency(obj);
4565 #endif /* USE_RGENGC */
4566
4567 objspace->marked_slots++;
4568 }
4569
4570 NOINLINE(static void gc_mark_ptr(rb_objspace_t *objspace, VALUE obj));
4571
4572 static void
gc_mark_ptr(rb_objspace_t * objspace,VALUE obj)4573 gc_mark_ptr(rb_objspace_t *objspace, VALUE obj)
4574 {
4575 if (LIKELY(objspace->mark_func_data == NULL)) {
4576 rgengc_check_relation(objspace, obj);
4577 if (!gc_mark_set(objspace, obj)) return; /* already marked */
4578 if (RB_TYPE_P(obj, T_NONE)) rb_bug("try to mark T_NONE object"); /* check here will help debugging */
4579 gc_aging(objspace, obj);
4580 gc_grey(objspace, obj);
4581 }
4582 else {
4583 objspace->mark_func_data->mark_func(obj, objspace->mark_func_data->data);
4584 }
4585 }
4586
4587 static inline void
gc_mark(rb_objspace_t * objspace,VALUE obj)4588 gc_mark(rb_objspace_t *objspace, VALUE obj)
4589 {
4590 if (!is_markable_object(objspace, obj)) return;
4591 gc_mark_ptr(objspace, obj);
4592 }
4593
4594 void
rb_gc_mark(VALUE ptr)4595 rb_gc_mark(VALUE ptr)
4596 {
4597 gc_mark(&rb_objspace, ptr);
4598 }
4599
4600 /* CAUTION: THIS FUNCTION ENABLE *ONLY BEFORE* SWEEPING.
4601 * This function is only for GC_END_MARK timing.
4602 */
4603
4604 int
rb_objspace_marked_object_p(VALUE obj)4605 rb_objspace_marked_object_p(VALUE obj)
4606 {
4607 return RVALUE_MARKED(obj) ? TRUE : FALSE;
4608 }
4609
4610 static inline void
gc_mark_set_parent(rb_objspace_t * objspace,VALUE obj)4611 gc_mark_set_parent(rb_objspace_t *objspace, VALUE obj)
4612 {
4613 #if USE_RGENGC
4614 if (RVALUE_OLD_P(obj)) {
4615 objspace->rgengc.parent_object = obj;
4616 }
4617 else {
4618 objspace->rgengc.parent_object = Qfalse;
4619 }
4620 #endif
4621 }
4622
4623 static void
gc_mark_imemo(rb_objspace_t * objspace,VALUE obj)4624 gc_mark_imemo(rb_objspace_t *objspace, VALUE obj)
4625 {
4626 switch (imemo_type(obj)) {
4627 case imemo_env:
4628 {
4629 const rb_env_t *env = (const rb_env_t *)obj;
4630 GC_ASSERT(VM_ENV_ESCAPED_P(env->ep));
4631 gc_mark_values(objspace, (long)env->env_size, env->env);
4632 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
4633 gc_mark(objspace, (VALUE)rb_vm_env_prev_env(env));
4634 gc_mark(objspace, (VALUE)env->iseq);
4635 }
4636 return;
4637 case imemo_cref:
4638 gc_mark(objspace, RANY(obj)->as.imemo.cref.klass);
4639 gc_mark(objspace, (VALUE)RANY(obj)->as.imemo.cref.next);
4640 gc_mark(objspace, RANY(obj)->as.imemo.cref.refinements);
4641 return;
4642 case imemo_svar:
4643 gc_mark(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
4644 gc_mark(objspace, RANY(obj)->as.imemo.svar.lastline);
4645 gc_mark(objspace, RANY(obj)->as.imemo.svar.backref);
4646 gc_mark(objspace, RANY(obj)->as.imemo.svar.others);
4647 return;
4648 case imemo_throw_data:
4649 gc_mark(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
4650 return;
4651 case imemo_ifunc:
4652 gc_mark_maybe(objspace, (VALUE)RANY(obj)->as.imemo.ifunc.data);
4653 return;
4654 case imemo_memo:
4655 gc_mark(objspace, RANY(obj)->as.imemo.memo.v1);
4656 gc_mark(objspace, RANY(obj)->as.imemo.memo.v2);
4657 gc_mark_maybe(objspace, RANY(obj)->as.imemo.memo.u3.value);
4658 return;
4659 case imemo_ment:
4660 mark_method_entry(objspace, &RANY(obj)->as.imemo.ment);
4661 return;
4662 case imemo_iseq:
4663 rb_iseq_mark((rb_iseq_t *)obj);
4664 return;
4665 case imemo_tmpbuf:
4666 {
4667 const rb_imemo_tmpbuf_t *m = &RANY(obj)->as.imemo.alloc;
4668 do {
4669 rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
4670 } while ((m = m->next) != NULL);
4671 }
4672 return;
4673 case imemo_ast:
4674 rb_ast_mark(&RANY(obj)->as.imemo.ast);
4675 return;
4676 case imemo_parser_strterm:
4677 rb_strterm_mark(obj);
4678 return;
4679 #if VM_CHECK_MODE > 0
4680 default:
4681 VM_UNREACHABLE(gc_mark_imemo);
4682 #endif
4683 }
4684 }
4685
4686 static void
gc_mark_children(rb_objspace_t * objspace,VALUE obj)4687 gc_mark_children(rb_objspace_t *objspace, VALUE obj)
4688 {
4689 register RVALUE *any = RANY(obj);
4690 gc_mark_set_parent(objspace, obj);
4691
4692 if (FL_TEST(obj, FL_EXIVAR)) {
4693 rb_mark_generic_ivar(obj);
4694 }
4695
4696 switch (BUILTIN_TYPE(obj)) {
4697 case T_NIL:
4698 case T_FIXNUM:
4699 rb_bug("rb_gc_mark() called for broken object");
4700 break;
4701
4702 case T_NODE:
4703 UNEXPECTED_NODE(rb_gc_mark);
4704 break;
4705
4706 case T_IMEMO:
4707 gc_mark_imemo(objspace, obj);
4708 return;
4709 }
4710
4711 gc_mark(objspace, any->as.basic.klass);
4712
4713 switch (BUILTIN_TYPE(obj)) {
4714 case T_CLASS:
4715 case T_MODULE:
4716 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
4717 if (!RCLASS_EXT(obj)) break;
4718 mark_tbl(objspace, RCLASS_IV_TBL(obj));
4719 mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
4720 gc_mark(objspace, RCLASS_SUPER((VALUE)obj));
4721 break;
4722
4723 case T_ICLASS:
4724 if (FL_TEST(obj, RICLASS_IS_ORIGIN)) {
4725 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
4726 }
4727 if (!RCLASS_EXT(obj)) break;
4728 mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
4729 gc_mark(objspace, RCLASS_SUPER((VALUE)obj));
4730 break;
4731
4732 case T_ARRAY:
4733 if (FL_TEST(obj, ELTS_SHARED)) {
4734 VALUE root = any->as.array.as.heap.aux.shared;
4735 gc_mark(objspace, root);
4736 }
4737 else {
4738 long i, len = RARRAY_LEN(obj);
4739 const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(obj);
4740 for (i=0; i < len; i++) {
4741 gc_mark(objspace, ptr[i]);
4742 }
4743
4744 if (objspace->mark_func_data == NULL) {
4745 if (!FL_TEST_RAW(obj, RARRAY_EMBED_FLAG) &&
4746 RARRAY_TRANSIENT_P(obj)) {
4747 rb_transient_heap_mark(obj, ptr);
4748 }
4749 }
4750 }
4751 break;
4752
4753 case T_HASH:
4754 mark_hash(objspace, obj);
4755 break;
4756
4757 case T_STRING:
4758 if (STR_SHARED_P(obj)) {
4759 gc_mark(objspace, any->as.string.as.heap.aux.shared);
4760 }
4761 break;
4762
4763 case T_DATA:
4764 {
4765 void *const ptr = DATA_PTR(obj);
4766 if (ptr) {
4767 RUBY_DATA_FUNC mark_func = RTYPEDDATA_P(obj) ?
4768 any->as.typeddata.type->function.dmark :
4769 any->as.data.dmark;
4770 if (mark_func) (*mark_func)(ptr);
4771 }
4772 }
4773 break;
4774
4775 case T_OBJECT:
4776 {
4777 const VALUE * const ptr = ROBJECT_IVPTR(obj);
4778
4779 if (ptr) {
4780 uint32_t i, len = ROBJECT_NUMIV(obj);
4781 for (i = 0; i < len; i++) {
4782 gc_mark(objspace, ptr[i]);
4783 }
4784
4785 if (objspace->mark_func_data == NULL &&
4786 ROBJ_TRANSIENT_P(obj)) {
4787 rb_transient_heap_mark(obj, ptr);
4788 }
4789 }
4790 }
4791 break;
4792
4793 case T_FILE:
4794 if (any->as.file.fptr) {
4795 gc_mark(objspace, any->as.file.fptr->pathv);
4796 gc_mark(objspace, any->as.file.fptr->tied_io_for_writing);
4797 gc_mark(objspace, any->as.file.fptr->writeconv_asciicompat);
4798 gc_mark(objspace, any->as.file.fptr->writeconv_pre_ecopts);
4799 gc_mark(objspace, any->as.file.fptr->encs.ecopts);
4800 gc_mark(objspace, any->as.file.fptr->write_lock);
4801 }
4802 break;
4803
4804 case T_REGEXP:
4805 gc_mark(objspace, any->as.regexp.src);
4806 break;
4807
4808 case T_FLOAT:
4809 case T_BIGNUM:
4810 case T_SYMBOL:
4811 break;
4812
4813 case T_MATCH:
4814 gc_mark(objspace, any->as.match.regexp);
4815 if (any->as.match.str) {
4816 gc_mark(objspace, any->as.match.str);
4817 }
4818 break;
4819
4820 case T_RATIONAL:
4821 gc_mark(objspace, any->as.rational.num);
4822 gc_mark(objspace, any->as.rational.den);
4823 break;
4824
4825 case T_COMPLEX:
4826 gc_mark(objspace, any->as.complex.real);
4827 gc_mark(objspace, any->as.complex.imag);
4828 break;
4829
4830 case T_STRUCT:
4831 {
4832 long i;
4833 const long len = RSTRUCT_LEN(obj);
4834 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
4835
4836 for (i=0; i<len; i++) {
4837 gc_mark(objspace, ptr[i]);
4838 }
4839
4840 if (objspace->mark_func_data == NULL &&
4841 RSTRUCT_TRANSIENT_P(obj)) {
4842 rb_transient_heap_mark(obj, ptr);
4843 }
4844 }
4845 break;
4846
4847 default:
4848 #if GC_DEBUG
4849 rb_gcdebug_print_obj_condition((VALUE)obj);
4850 #endif
4851 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
4852 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
4853 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
4854 BUILTIN_TYPE(obj), (void *)any,
4855 is_pointer_to_heap(objspace, any) ? "corrupted object" : "non object");
4856 }
4857 }
4858
4859 /**
4860 * incremental: 0 -> not incremental (do all)
4861 * incremental: n -> mark at most `n' objects
4862 */
4863 static inline int
gc_mark_stacked_objects(rb_objspace_t * objspace,int incremental,size_t count)4864 gc_mark_stacked_objects(rb_objspace_t *objspace, int incremental, size_t count)
4865 {
4866 mark_stack_t *mstack = &objspace->mark_stack;
4867 VALUE obj;
4868 #if GC_ENABLE_INCREMENTAL_MARK
4869 size_t marked_slots_at_the_beginning = objspace->marked_slots;
4870 size_t popped_count = 0;
4871 #endif
4872
4873 while (pop_mark_stack(mstack, &obj)) {
4874 if (obj == Qundef) continue; /* skip */
4875
4876 if (RGENGC_CHECK_MODE && !RVALUE_MARKED(obj)) {
4877 rb_bug("gc_mark_stacked_objects: %s is not marked.", obj_info(obj));
4878 }
4879 gc_mark_children(objspace, obj);
4880
4881 #if GC_ENABLE_INCREMENTAL_MARK
4882 if (incremental) {
4883 if (RGENGC_CHECK_MODE && !RVALUE_MARKING(obj)) {
4884 rb_bug("gc_mark_stacked_objects: incremental, but marking bit is 0");
4885 }
4886 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
4887 popped_count++;
4888
4889 if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
4890 break;
4891 }
4892 }
4893 else {
4894 /* just ignore marking bits */
4895 }
4896 #endif
4897 }
4898
4899 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(Qnil);
4900
4901 if (is_mark_stack_empty(mstack)) {
4902 shrink_stack_chunk_cache(mstack);
4903 return TRUE;
4904 }
4905 else {
4906 return FALSE;
4907 }
4908 }
4909
4910 static int
gc_mark_stacked_objects_incremental(rb_objspace_t * objspace,size_t count)4911 gc_mark_stacked_objects_incremental(rb_objspace_t *objspace, size_t count)
4912 {
4913 return gc_mark_stacked_objects(objspace, TRUE, count);
4914 }
4915
4916 static int
gc_mark_stacked_objects_all(rb_objspace_t * objspace)4917 gc_mark_stacked_objects_all(rb_objspace_t *objspace)
4918 {
4919 return gc_mark_stacked_objects(objspace, FALSE, 0);
4920 }
4921
4922 #if PRINT_ROOT_TICKS
4923 #define MAX_TICKS 0x100
4924 static tick_t mark_ticks[MAX_TICKS];
4925 static const char *mark_ticks_categories[MAX_TICKS];
4926
4927 static void
show_mark_ticks(void)4928 show_mark_ticks(void)
4929 {
4930 int i;
4931 fprintf(stderr, "mark ticks result:\n");
4932 for (i=0; i<MAX_TICKS; i++) {
4933 const char *category = mark_ticks_categories[i];
4934 if (category) {
4935 fprintf(stderr, "%s\t%8lu\n", category, (unsigned long)mark_ticks[i]);
4936 }
4937 else {
4938 break;
4939 }
4940 }
4941 }
4942
4943 #endif /* PRITNT_ROOT_TICKS */
4944
4945 static void
gc_mark_roots(rb_objspace_t * objspace,const char ** categoryp)4946 gc_mark_roots(rb_objspace_t *objspace, const char **categoryp)
4947 {
4948 struct gc_list *list;
4949 rb_execution_context_t *ec = GET_EC();
4950 rb_vm_t *vm = rb_ec_vm_ptr(ec);
4951
4952 #if PRINT_ROOT_TICKS
4953 tick_t start_tick = tick();
4954 int tick_count = 0;
4955 const char *prev_category = 0;
4956
4957 if (mark_ticks_categories[0] == 0) {
4958 atexit(show_mark_ticks);
4959 }
4960 #endif
4961
4962 if (categoryp) *categoryp = "xxx";
4963
4964 #if USE_RGENGC
4965 objspace->rgengc.parent_object = Qfalse;
4966 #endif
4967
4968 #if PRINT_ROOT_TICKS
4969 #define MARK_CHECKPOINT_PRINT_TICK(category) do { \
4970 if (prev_category) { \
4971 tick_t t = tick(); \
4972 mark_ticks[tick_count] = t - start_tick; \
4973 mark_ticks_categories[tick_count] = prev_category; \
4974 tick_count++; \
4975 } \
4976 prev_category = category; \
4977 start_tick = tick(); \
4978 } while (0)
4979 #else /* PRITNT_ROOT_TICKS */
4980 #define MARK_CHECKPOINT_PRINT_TICK(category)
4981 #endif
4982
4983 #define MARK_CHECKPOINT(category) do { \
4984 if (categoryp) *categoryp = category; \
4985 MARK_CHECKPOINT_PRINT_TICK(category); \
4986 } while (0)
4987
4988 MARK_CHECKPOINT("vm");
4989 SET_STACK_END;
4990 rb_vm_mark(vm);
4991 if (vm->self) gc_mark(objspace, vm->self);
4992
4993 MARK_CHECKPOINT("finalizers");
4994 mark_tbl(objspace, finalizer_table);
4995
4996 MARK_CHECKPOINT("machine_context");
4997 mark_current_machine_context(objspace, ec);
4998
4999 /* mark protected global variables */
5000 MARK_CHECKPOINT("global_list");
5001 for (list = global_list; list; list = list->next) {
5002 rb_gc_mark_maybe(*list->varptr);
5003 }
5004
5005 MARK_CHECKPOINT("end_proc");
5006 rb_mark_end_proc();
5007
5008 MARK_CHECKPOINT("global_tbl");
5009 rb_gc_mark_global_tbl();
5010
5011 if (stress_to_class) rb_gc_mark(stress_to_class);
5012
5013 MARK_CHECKPOINT("finish");
5014 #undef MARK_CHECKPOINT
5015 }
5016
5017 #if RGENGC_CHECK_MODE >= 4
5018
5019 #define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
5020 #define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
5021 #define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
5022
5023 struct reflist {
5024 VALUE *list;
5025 int pos;
5026 int size;
5027 };
5028
5029 static struct reflist *
reflist_create(VALUE obj)5030 reflist_create(VALUE obj)
5031 {
5032 struct reflist *refs = xmalloc(sizeof(struct reflist));
5033 refs->size = 1;
5034 refs->list = ALLOC_N(VALUE, refs->size);
5035 refs->list[0] = obj;
5036 refs->pos = 1;
5037 return refs;
5038 }
5039
5040 static void
reflist_destruct(struct reflist * refs)5041 reflist_destruct(struct reflist *refs)
5042 {
5043 xfree(refs->list);
5044 xfree(refs);
5045 }
5046
5047 static void
reflist_add(struct reflist * refs,VALUE obj)5048 reflist_add(struct reflist *refs, VALUE obj)
5049 {
5050 if (refs->pos == refs->size) {
5051 refs->size *= 2;
5052 SIZED_REALLOC_N(refs->list, VALUE, refs->size, refs->size/2);
5053 }
5054
5055 refs->list[refs->pos++] = obj;
5056 }
5057
5058 static void
reflist_dump(struct reflist * refs)5059 reflist_dump(struct reflist *refs)
5060 {
5061 int i;
5062 for (i=0; i<refs->pos; i++) {
5063 VALUE obj = refs->list[i];
5064 if (IS_ROOTSIG(obj)) { /* root */
5065 fprintf(stderr, "<root@%s>", GET_ROOTSIG(obj));
5066 }
5067 else {
5068 fprintf(stderr, "<%s>", obj_info(obj));
5069 }
5070 if (i+1 < refs->pos) fprintf(stderr, ", ");
5071 }
5072 }
5073
5074 static int
reflist_referred_from_machine_context(struct reflist * refs)5075 reflist_referred_from_machine_context(struct reflist *refs)
5076 {
5077 int i;
5078 for (i=0; i<refs->pos; i++) {
5079 VALUE obj = refs->list[i];
5080 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj), "machine_context") == 0) return 1;
5081 }
5082 return 0;
5083 }
5084
5085 struct allrefs {
5086 rb_objspace_t *objspace;
5087 /* a -> obj1
5088 * b -> obj1
5089 * c -> obj1
5090 * c -> obj2
5091 * d -> obj3
5092 * #=> {obj1 => [a, b, c], obj2 => [c, d]}
5093 */
5094 struct st_table *references;
5095 const char *category;
5096 VALUE root_obj;
5097 mark_stack_t mark_stack;
5098 };
5099
5100 static int
allrefs_add(struct allrefs * data,VALUE obj)5101 allrefs_add(struct allrefs *data, VALUE obj)
5102 {
5103 struct reflist *refs;
5104
5105 if (st_lookup(data->references, obj, (st_data_t *)&refs)) {
5106 reflist_add(refs, data->root_obj);
5107 return 0;
5108 }
5109 else {
5110 refs = reflist_create(data->root_obj);
5111 st_insert(data->references, obj, (st_data_t)refs);
5112 return 1;
5113 }
5114 }
5115
5116 static void
allrefs_i(VALUE obj,void * ptr)5117 allrefs_i(VALUE obj, void *ptr)
5118 {
5119 struct allrefs *data = (struct allrefs *)ptr;
5120
5121 if (allrefs_add(data, obj)) {
5122 push_mark_stack(&data->mark_stack, obj);
5123 }
5124 }
5125
5126 static void
allrefs_roots_i(VALUE obj,void * ptr)5127 allrefs_roots_i(VALUE obj, void *ptr)
5128 {
5129 struct allrefs *data = (struct allrefs *)ptr;
5130 if (strlen(data->category) == 0) rb_bug("!!!");
5131 data->root_obj = MAKE_ROOTSIG(data->category);
5132
5133 if (allrefs_add(data, obj)) {
5134 push_mark_stack(&data->mark_stack, obj);
5135 }
5136 }
5137
5138 static st_table *
objspace_allrefs(rb_objspace_t * objspace)5139 objspace_allrefs(rb_objspace_t *objspace)
5140 {
5141 struct allrefs data;
5142 struct mark_func_data_struct mfd;
5143 VALUE obj;
5144 int prev_dont_gc = dont_gc;
5145 dont_gc = TRUE;
5146
5147 data.objspace = objspace;
5148 data.references = st_init_numtable();
5149 init_mark_stack(&data.mark_stack);
5150
5151 mfd.mark_func = allrefs_roots_i;
5152 mfd.data = &data;
5153
5154 /* traverse root objects */
5155 PUSH_MARK_FUNC_DATA(&mfd);
5156 objspace->mark_func_data = &mfd;
5157 gc_mark_roots(objspace, &data.category);
5158 POP_MARK_FUNC_DATA();
5159
5160 /* traverse rest objects reachable from root objects */
5161 while (pop_mark_stack(&data.mark_stack, &obj)) {
5162 rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
5163 }
5164 free_stack_chunks(&data.mark_stack);
5165
5166 dont_gc = prev_dont_gc;
5167 return data.references;
5168 }
5169
5170 static int
objspace_allrefs_destruct_i(st_data_t key,st_data_t value,void * ptr)5171 objspace_allrefs_destruct_i(st_data_t key, st_data_t value, void *ptr)
5172 {
5173 struct reflist *refs = (struct reflist *)value;
5174 reflist_destruct(refs);
5175 return ST_CONTINUE;
5176 }
5177
5178 static void
objspace_allrefs_destruct(struct st_table * refs)5179 objspace_allrefs_destruct(struct st_table *refs)
5180 {
5181 st_foreach(refs, objspace_allrefs_destruct_i, 0);
5182 st_free_table(refs);
5183 }
5184
5185 #if RGENGC_CHECK_MODE >= 5
5186 static int
allrefs_dump_i(st_data_t k,st_data_t v,st_data_t ptr)5187 allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
5188 {
5189 VALUE obj = (VALUE)k;
5190 struct reflist *refs = (struct reflist *)v;
5191 fprintf(stderr, "[allrefs_dump_i] %s <- ", obj_info(obj));
5192 reflist_dump(refs);
5193 fprintf(stderr, "\n");
5194 return ST_CONTINUE;
5195 }
5196
5197 static void
allrefs_dump(rb_objspace_t * objspace)5198 allrefs_dump(rb_objspace_t *objspace)
5199 {
5200 fprintf(stderr, "[all refs] (size: %d)\n", (int)objspace->rgengc.allrefs_table->num_entries);
5201 st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
5202 }
5203 #endif
5204
5205 static int
gc_check_after_marks_i(st_data_t k,st_data_t v,void * ptr)5206 gc_check_after_marks_i(st_data_t k, st_data_t v, void *ptr)
5207 {
5208 VALUE obj = k;
5209 struct reflist *refs = (struct reflist *)v;
5210 rb_objspace_t *objspace = (rb_objspace_t *)ptr;
5211
5212 /* object should be marked or oldgen */
5213 if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj)) {
5214 fprintf(stderr, "gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj));
5215 fprintf(stderr, "gc_check_after_marks_i: %p is referred from ", (void *)obj);
5216 reflist_dump(refs);
5217
5218 if (reflist_referred_from_machine_context(refs)) {
5219 fprintf(stderr, " (marked from machine stack).\n");
5220 /* marked from machine context can be false positive */
5221 }
5222 else {
5223 objspace->rgengc.error_count++;
5224 fprintf(stderr, "\n");
5225 }
5226 }
5227 return ST_CONTINUE;
5228 }
5229
5230 static void
gc_marks_check(rb_objspace_t * objspace,int (* checker_func)(ANYARGS),const char * checker_name)5231 gc_marks_check(rb_objspace_t *objspace, int (*checker_func)(ANYARGS), const char *checker_name)
5232 {
5233 size_t saved_malloc_increase = objspace->malloc_params.increase;
5234 #if RGENGC_ESTIMATE_OLDMALLOC
5235 size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
5236 #endif
5237 VALUE already_disabled = rb_gc_disable();
5238
5239 objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
5240
5241 if (checker_func) {
5242 st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
5243 }
5244
5245 if (objspace->rgengc.error_count > 0) {
5246 #if RGENGC_CHECK_MODE >= 5
5247 allrefs_dump(objspace);
5248 #endif
5249 if (checker_name) rb_bug("%s: GC has problem.", checker_name);
5250 }
5251
5252 objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
5253 objspace->rgengc.allrefs_table = 0;
5254
5255 if (already_disabled == Qfalse) rb_gc_enable();
5256 objspace->malloc_params.increase = saved_malloc_increase;
5257 #if RGENGC_ESTIMATE_OLDMALLOC
5258 objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
5259 #endif
5260 }
5261 #endif /* RGENGC_CHECK_MODE >= 4 */
5262
5263 struct verify_internal_consistency_struct {
5264 rb_objspace_t *objspace;
5265 int err_count;
5266 size_t live_object_count;
5267 size_t zombie_object_count;
5268
5269 #if USE_RGENGC
5270 VALUE parent;
5271 size_t old_object_count;
5272 size_t remembered_shady_count;
5273 #endif
5274 };
5275
5276 #if USE_RGENGC
5277 static void
check_generation_i(const VALUE child,void * ptr)5278 check_generation_i(const VALUE child, void *ptr)
5279 {
5280 struct verify_internal_consistency_struct *data = (struct verify_internal_consistency_struct *)ptr;
5281 const VALUE parent = data->parent;
5282
5283 if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(parent));
5284
5285 if (!RVALUE_OLD_P(child)) {
5286 if (!RVALUE_REMEMBERED(parent) &&
5287 !RVALUE_REMEMBERED(child) &&
5288 !RVALUE_UNCOLLECTIBLE(child)) {
5289 fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent), obj_info(child));
5290 data->err_count++;
5291 }
5292 }
5293 }
5294
5295 static void
check_color_i(const VALUE child,void * ptr)5296 check_color_i(const VALUE child, void *ptr)
5297 {
5298 struct verify_internal_consistency_struct *data = (struct verify_internal_consistency_struct *)ptr;
5299 const VALUE parent = data->parent;
5300
5301 if (!RVALUE_WB_UNPROTECTED(parent) && RVALUE_WHITE_P(child)) {
5302 fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
5303 obj_info(parent), obj_info(child));
5304 data->err_count++;
5305 }
5306 }
5307 #endif
5308
5309 static void
check_children_i(const VALUE child,void * ptr)5310 check_children_i(const VALUE child, void *ptr)
5311 {
5312 check_rvalue_consistency(child);
5313 }
5314
5315 static int
verify_internal_consistency_i(void * page_start,void * page_end,size_t stride,void * ptr)5316 verify_internal_consistency_i(void *page_start, void *page_end, size_t stride, void *ptr)
5317 {
5318 struct verify_internal_consistency_struct *data = (struct verify_internal_consistency_struct *)ptr;
5319 VALUE obj;
5320 rb_objspace_t *objspace = data->objspace;
5321
5322 for (obj = (VALUE)page_start; obj != (VALUE)page_end; obj += stride) {
5323 if (is_live_object(objspace, obj)) {
5324 /* count objects */
5325 data->live_object_count++;
5326
5327 rb_objspace_reachable_objects_from(obj, check_children_i, (void *)data);
5328
5329 #if USE_RGENGC
5330 /* check health of children */
5331 data->parent = obj;
5332
5333 if (RVALUE_OLD_P(obj)) data->old_object_count++;
5334 if (RVALUE_WB_UNPROTECTED(obj) && RVALUE_UNCOLLECTIBLE(obj)) data->remembered_shady_count++;
5335
5336 if (!is_marking(objspace) && RVALUE_OLD_P(obj)) {
5337 /* reachable objects from an oldgen object should be old or (young with remember) */
5338 data->parent = obj;
5339 rb_objspace_reachable_objects_from(obj, check_generation_i, (void *)data);
5340 }
5341
5342 if (is_incremental_marking(objspace)) {
5343 if (RVALUE_BLACK_P(obj)) {
5344 /* reachable objects from black objects should be black or grey objects */
5345 data->parent = obj;
5346 rb_objspace_reachable_objects_from(obj, check_color_i, (void *)data);
5347 }
5348 }
5349 #endif
5350 }
5351 else {
5352 if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
5353 GC_ASSERT(RBASIC(obj)->flags == T_ZOMBIE);
5354 data->zombie_object_count++;
5355 }
5356 }
5357 }
5358
5359 return 0;
5360 }
5361
5362 static int
gc_verify_heap_page(rb_objspace_t * objspace,struct heap_page * page,VALUE obj)5363 gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
5364 {
5365 #if USE_RGENGC
5366 int i;
5367 unsigned int has_remembered_shady = FALSE;
5368 unsigned int has_remembered_old = FALSE;
5369 int remembered_old_objects = 0;
5370 int free_objects = 0;
5371 int zombie_objects = 0;
5372
5373 for (i=0; i<page->total_slots; i++) {
5374 VALUE val = (VALUE)&page->start[i];
5375 if (RBASIC(val) == 0) free_objects++;
5376 if (BUILTIN_TYPE(val) == T_ZOMBIE) zombie_objects++;
5377 if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
5378 has_remembered_shady = TRUE;
5379 }
5380 if (RVALUE_PAGE_MARKING(page, val)) {
5381 has_remembered_old = TRUE;
5382 remembered_old_objects++;
5383 }
5384 }
5385
5386 if (!is_incremental_marking(objspace) &&
5387 page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
5388
5389 for (i=0; i<page->total_slots; i++) {
5390 VALUE val = (VALUE)&page->start[i];
5391 if (RVALUE_PAGE_MARKING(page, val)) {
5392 fprintf(stderr, "marking -> %s\n", obj_info(val));
5393 }
5394 }
5395 rb_bug("page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
5396 (void *)page, remembered_old_objects, obj ? obj_info(obj) : "");
5397 }
5398
5399 if (page->flags.has_uncollectible_shady_objects == FALSE && has_remembered_shady == TRUE) {
5400 rb_bug("page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
5401 (void *)page, obj ? obj_info(obj) : "");
5402 }
5403
5404 if (0) {
5405 /* free_slots may not equal to free_objects */
5406 if (page->free_slots != free_objects) {
5407 rb_bug("page %p's free_slots should be %d, but %d\n", (void *)page, (int)page->free_slots, free_objects);
5408 }
5409 }
5410 if (page->final_slots != zombie_objects) {
5411 rb_bug("page %p's final_slots should be %d, but %d\n", (void *)page, (int)page->final_slots, zombie_objects);
5412 }
5413
5414 return remembered_old_objects;
5415 #else
5416 return 0;
5417 #endif
5418 }
5419
5420 static int
gc_verify_heap_pages_(rb_objspace_t * objspace,struct list_head * head)5421 gc_verify_heap_pages_(rb_objspace_t *objspace, struct list_head *head)
5422 {
5423 int remembered_old_objects = 0;
5424 struct heap_page *page = 0;
5425
5426 list_for_each(head, page, page_node) {
5427 if (page->flags.has_remembered_objects == FALSE) {
5428 remembered_old_objects += gc_verify_heap_page(objspace, page, Qfalse);
5429 }
5430 }
5431
5432 return remembered_old_objects;
5433 }
5434
5435 static int
gc_verify_heap_pages(rb_objspace_t * objspace)5436 gc_verify_heap_pages(rb_objspace_t *objspace)
5437 {
5438 int remembered_old_objects = 0;
5439 remembered_old_objects += gc_verify_heap_pages_(objspace, &heap_eden->pages);
5440 remembered_old_objects += gc_verify_heap_pages_(objspace, &heap_tomb->pages);
5441 return remembered_old_objects;
5442 }
5443
5444 /*
5445 * call-seq:
5446 * GC.verify_internal_consistency -> nil
5447 *
5448 * Verify internal consistency.
5449 *
5450 * This method is implementation specific.
5451 * Now this method checks generational consistency
5452 * if RGenGC is supported.
5453 */
5454 static VALUE
gc_verify_internal_consistency(VALUE dummy)5455 gc_verify_internal_consistency(VALUE dummy)
5456 {
5457 rb_objspace_t *objspace = &rb_objspace;
5458 struct verify_internal_consistency_struct data = {0};
5459 struct each_obj_args eo_args;
5460
5461 data.objspace = objspace;
5462 gc_report(5, objspace, "gc_verify_internal_consistency: start\n");
5463
5464 /* check relations */
5465
5466 eo_args.callback = verify_internal_consistency_i;
5467 eo_args.data = (void *)&data;
5468 objspace_each_objects((VALUE)&eo_args);
5469
5470 if (data.err_count != 0) {
5471 #if RGENGC_CHECK_MODE >= 5
5472 objspace->rgengc.error_count = data.err_count;
5473 gc_marks_check(objspace, NULL, NULL);
5474 allrefs_dump(objspace);
5475 #endif
5476 rb_bug("gc_verify_internal_consistency: found internal inconsistency.");
5477 }
5478
5479 /* check heap_page status */
5480 gc_verify_heap_pages(objspace);
5481
5482 /* check counters */
5483
5484 if (!is_lazy_sweeping(heap_eden) && !finalizing) {
5485 if (objspace_live_slots(objspace) != data.live_object_count) {
5486 fprintf(stderr, "heap_pages_final_slots: %d, objspace->profile.total_freed_objects: %d\n",
5487 (int)heap_pages_final_slots, (int)objspace->profile.total_freed_objects);
5488 rb_bug("inconsistent live slot number: expect %"PRIuSIZE", but %"PRIuSIZE".", objspace_live_slots(objspace), data.live_object_count);
5489 }
5490 }
5491
5492 #if USE_RGENGC
5493 if (!is_marking(objspace)) {
5494 if (objspace->rgengc.old_objects != data.old_object_count) {
5495 rb_bug("inconsistent old slot number: expect %"PRIuSIZE", but %"PRIuSIZE".", objspace->rgengc.old_objects, data.old_object_count);
5496 }
5497 if (objspace->rgengc.uncollectible_wb_unprotected_objects != data.remembered_shady_count) {
5498 rb_bug("inconsistent old slot number: expect %"PRIuSIZE", but %"PRIuSIZE".", objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
5499 }
5500 }
5501 #endif
5502
5503 if (!finalizing) {
5504 size_t list_count = 0;
5505
5506 {
5507 VALUE z = heap_pages_deferred_final;
5508 while (z) {
5509 list_count++;
5510 z = RZOMBIE(z)->next;
5511 }
5512 }
5513
5514 if (heap_pages_final_slots != data.zombie_object_count ||
5515 heap_pages_final_slots != list_count) {
5516
5517 rb_bug("inconsistent finalizing object count:\n"
5518 " expect %"PRIuSIZE"\n"
5519 " but %"PRIuSIZE" zombies\n"
5520 " heap_pages_deferred_final list has %"PRIuSIZE" items.",
5521 heap_pages_final_slots,
5522 data.zombie_object_count,
5523 list_count);
5524 }
5525 }
5526
5527 gc_report(5, objspace, "gc_verify_internal_consistency: OK\n");
5528
5529 return Qnil;
5530 }
5531
5532 void
rb_gc_verify_internal_consistency(void)5533 rb_gc_verify_internal_consistency(void)
5534 {
5535 gc_verify_internal_consistency(Qnil);
5536 }
5537
5538 static VALUE
gc_verify_transient_heap_internal_consistency(VALUE dmy)5539 gc_verify_transient_heap_internal_consistency(VALUE dmy)
5540 {
5541 rb_transient_heap_verify();
5542 return Qnil;
5543 }
5544
5545 /* marks */
5546
5547 static void
gc_marks_start(rb_objspace_t * objspace,int full_mark)5548 gc_marks_start(rb_objspace_t *objspace, int full_mark)
5549 {
5550 /* start marking */
5551 gc_report(1, objspace, "gc_marks_start: (%s)\n", full_mark ? "full" : "minor");
5552 gc_mode_transition(objspace, gc_mode_marking);
5553
5554 #if USE_RGENGC
5555 if (full_mark) {
5556 #if GC_ENABLE_INCREMENTAL_MARK
5557 objspace->rincgc.step_slots = (objspace->marked_slots * 2) / ((objspace->rincgc.pooled_slots / HEAP_PAGE_OBJ_LIMIT) + 1);
5558
5559 if (0) fprintf(stderr, "objspace->marked_slots: %d, objspace->rincgc.pooled_page_num: %d, objspace->rincgc.step_slots: %d, \n",
5560 (int)objspace->marked_slots, (int)objspace->rincgc.pooled_slots, (int)objspace->rincgc.step_slots);
5561 #endif
5562 objspace->flags.during_minor_gc = FALSE;
5563 objspace->profile.major_gc_count++;
5564 objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
5565 objspace->rgengc.old_objects = 0;
5566 objspace->rgengc.last_major_gc = objspace->profile.count;
5567 objspace->marked_slots = 0;
5568 rgengc_mark_and_rememberset_clear(objspace, heap_eden);
5569 }
5570 else {
5571 objspace->flags.during_minor_gc = TRUE;
5572 objspace->marked_slots =
5573 objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects; /* uncollectible objects are marked already */
5574 objspace->profile.minor_gc_count++;
5575 rgengc_rememberset_mark(objspace, heap_eden);
5576 }
5577 #endif
5578
5579 gc_mark_roots(objspace, NULL);
5580
5581 gc_report(1, objspace, "gc_marks_start: (%s) end, stack in %d\n", full_mark ? "full" : "minor", (int)mark_stack_size(&objspace->mark_stack));
5582 }
5583
5584 #if GC_ENABLE_INCREMENTAL_MARK
5585 static void
gc_marks_wb_unprotected_objects(rb_objspace_t * objspace)5586 gc_marks_wb_unprotected_objects(rb_objspace_t *objspace)
5587 {
5588 struct heap_page *page = 0;
5589
5590 list_for_each(&heap_eden->pages, page, page_node) {
5591 bits_t *mark_bits = page->mark_bits;
5592 bits_t *wbun_bits = page->wb_unprotected_bits;
5593 RVALUE *p = page->start;
5594 RVALUE *offset = p - NUM_IN_PAGE(p);
5595 size_t j;
5596
5597 for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
5598 bits_t bits = mark_bits[j] & wbun_bits[j];
5599
5600 if (bits) {
5601 p = offset + j * BITS_BITLENGTH;
5602
5603 do {
5604 if (bits & 1) {
5605 gc_report(2, objspace, "gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((VALUE)p));
5606 GC_ASSERT(RVALUE_WB_UNPROTECTED((VALUE)p));
5607 GC_ASSERT(RVALUE_MARKED((VALUE)p));
5608 gc_mark_children(objspace, (VALUE)p);
5609 }
5610 p++;
5611 bits >>= 1;
5612 } while (bits);
5613 }
5614 }
5615 }
5616
5617 gc_mark_stacked_objects_all(objspace);
5618 }
5619
5620 static struct heap_page *
heap_move_pooled_pages_to_free_pages(rb_heap_t * heap)5621 heap_move_pooled_pages_to_free_pages(rb_heap_t *heap)
5622 {
5623 struct heap_page *page = heap->pooled_pages;
5624
5625 if (page) {
5626 heap->pooled_pages = page->free_next;
5627 page->free_next = heap->free_pages;
5628 heap->free_pages = page;
5629 }
5630
5631 return page;
5632 }
5633 #endif
5634
5635 static int
gc_marks_finish(rb_objspace_t * objspace)5636 gc_marks_finish(rb_objspace_t *objspace)
5637 {
5638 #if GC_ENABLE_INCREMENTAL_MARK
5639 /* finish incremental GC */
5640 if (is_incremental_marking(objspace)) {
5641 if (heap_eden->pooled_pages) {
5642 heap_move_pooled_pages_to_free_pages(heap_eden);
5643 gc_report(1, objspace, "gc_marks_finish: pooled pages are exists. retry.\n");
5644 return FALSE; /* continue marking phase */
5645 }
5646
5647 if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
5648 rb_bug("gc_marks_finish: mark stack is not empty (%d).", (int)mark_stack_size(&objspace->mark_stack));
5649 }
5650
5651 gc_mark_roots(objspace, 0);
5652
5653 if (is_mark_stack_empty(&objspace->mark_stack) == FALSE) {
5654 gc_report(1, objspace, "gc_marks_finish: not empty (%d). retry.\n", (int)mark_stack_size(&objspace->mark_stack));
5655 return FALSE;
5656 }
5657
5658 #if RGENGC_CHECK_MODE >= 2
5659 if (gc_verify_heap_pages(objspace) != 0) {
5660 rb_bug("gc_marks_finish (incremental): there are remembered old objects.");
5661 }
5662 #endif
5663
5664 objspace->flags.during_incremental_marking = FALSE;
5665 /* check children of all marked wb-unprotected objects */
5666 gc_marks_wb_unprotected_objects(objspace);
5667 }
5668 #endif /* GC_ENABLE_INCREMENTAL_MARK */
5669
5670 #if RGENGC_CHECK_MODE >= 2
5671 gc_verify_internal_consistency(Qnil);
5672 #endif
5673
5674 #if USE_RGENGC
5675 if (is_full_marking(objspace)) {
5676 /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
5677 const double r = gc_params.oldobject_limit_factor;
5678 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r);
5679 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
5680 }
5681 #endif
5682
5683 #if RGENGC_CHECK_MODE >= 4
5684 gc_marks_check(objspace, gc_check_after_marks_i, "after_marks");
5685 #endif
5686
5687 {
5688 /* decide full GC is needed or not */
5689 rb_heap_t *heap = heap_eden;
5690 size_t total_slots = heap_allocatable_pages * HEAP_PAGE_OBJ_LIMIT + heap->total_slots;
5691 size_t sweep_slots = total_slots - objspace->marked_slots; /* will be swept slots */
5692 size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
5693 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
5694 int full_marking = is_full_marking(objspace);
5695
5696 GC_ASSERT(heap->total_slots >= objspace->marked_slots);
5697
5698 /* setup free-able page counts */
5699 if (max_free_slots < gc_params.heap_init_slots) max_free_slots = gc_params.heap_init_slots;
5700
5701 if (sweep_slots > max_free_slots) {
5702 heap_pages_freeable_pages = (sweep_slots - max_free_slots) / HEAP_PAGE_OBJ_LIMIT;
5703 }
5704 else {
5705 heap_pages_freeable_pages = 0;
5706 }
5707
5708 /* check free_min */
5709 if (min_free_slots < gc_params.heap_free_slots) min_free_slots = gc_params.heap_free_slots;
5710
5711 #if USE_RGENGC
5712 if (sweep_slots < min_free_slots) {
5713 if (!full_marking) {
5714 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
5715 full_marking = TRUE;
5716 /* do not update last_major_gc, because full marking is not done. */
5717 goto increment;
5718 }
5719 else {
5720 gc_report(1, objspace, "gc_marks_finish: next is full GC!!)\n");
5721 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
5722 }
5723 }
5724 else {
5725 increment:
5726 gc_report(1, objspace, "gc_marks_finish: heap_set_increment!!\n");
5727 heap_set_increment(objspace, heap_extend_pages(objspace, sweep_slots, total_slots));
5728 heap_increment(objspace, heap);
5729 }
5730 }
5731
5732 if (full_marking) {
5733 /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
5734 const double r = gc_params.oldobject_limit_factor;
5735 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r);
5736 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
5737 }
5738
5739 if (objspace->rgengc.uncollectible_wb_unprotected_objects > objspace->rgengc.uncollectible_wb_unprotected_objects_limit) {
5740 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_SHADY;
5741 }
5742 if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
5743 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDGEN;
5744 }
5745 if (RGENGC_FORCE_MAJOR_GC) {
5746 objspace->rgengc.need_major_gc = GPR_FLAG_MAJOR_BY_FORCE;
5747 }
5748
5749 gc_report(1, objspace, "gc_marks_finish (marks %d objects, old %d objects, total %d slots, sweep %d slots, increment: %d, next GC: %s)\n",
5750 (int)objspace->marked_slots, (int)objspace->rgengc.old_objects, (int)heap->total_slots, (int)sweep_slots, (int)heap_allocatable_pages,
5751 objspace->rgengc.need_major_gc ? "major" : "minor");
5752 #else /* USE_RGENGC */
5753 if (sweep_slots < min_free_slots) {
5754 gc_report(1, objspace, "gc_marks_finish: heap_set_increment!!\n");
5755 heap_set_increment(objspace, heap_extend_pages(objspace, sweep_slot, total_slot));
5756 heap_increment(objspace, heap);
5757 }
5758 #endif
5759 }
5760
5761 rb_transient_heap_finish_marking();
5762
5763 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_MARK, 0);
5764
5765 return TRUE;
5766 }
5767
5768 static void
gc_marks_step(rb_objspace_t * objspace,int slots)5769 gc_marks_step(rb_objspace_t *objspace, int slots)
5770 {
5771 #if GC_ENABLE_INCREMENTAL_MARK
5772 GC_ASSERT(is_marking(objspace));
5773
5774 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
5775 if (gc_marks_finish(objspace)) {
5776 /* finish */
5777 gc_sweep(objspace);
5778 }
5779 }
5780 if (0) fprintf(stderr, "objspace->marked_slots: %d\n", (int)objspace->marked_slots);
5781 #endif
5782 }
5783
5784 static void
gc_marks_rest(rb_objspace_t * objspace)5785 gc_marks_rest(rb_objspace_t *objspace)
5786 {
5787 gc_report(1, objspace, "gc_marks_rest\n");
5788
5789 #if GC_ENABLE_INCREMENTAL_MARK
5790 heap_eden->pooled_pages = NULL;
5791 #endif
5792
5793 if (is_incremental_marking(objspace)) {
5794 do {
5795 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
5796 } while (gc_marks_finish(objspace) == FALSE);
5797 }
5798 else {
5799 gc_mark_stacked_objects_all(objspace);
5800 gc_marks_finish(objspace);
5801 }
5802
5803 /* move to sweep */
5804 gc_sweep(objspace);
5805 }
5806
5807 static void
gc_marks_continue(rb_objspace_t * objspace,rb_heap_t * heap)5808 gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap)
5809 {
5810 GC_ASSERT(dont_gc == FALSE);
5811 #if GC_ENABLE_INCREMENTAL_MARK
5812
5813 gc_enter(objspace, "marks_continue");
5814
5815 PUSH_MARK_FUNC_DATA(NULL);
5816 {
5817 int slots = 0;
5818 const char *from;
5819
5820 if (heap->pooled_pages) {
5821 while (heap->pooled_pages && slots < HEAP_PAGE_OBJ_LIMIT) {
5822 struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap);
5823 slots += page->free_slots;
5824 }
5825 from = "pooled-pages";
5826 }
5827 else if (heap_increment(objspace, heap)) {
5828 slots = heap->free_pages->free_slots;
5829 from = "incremented-pages";
5830 }
5831
5832 if (slots > 0) {
5833 gc_report(2, objspace, "gc_marks_continue: provide %d slots from %s.\n", slots, from);
5834 gc_marks_step(objspace, (int)objspace->rincgc.step_slots);
5835 }
5836 else {
5837 gc_report(2, objspace, "gc_marks_continue: no more pooled pages (stack depth: %d).\n", (int)mark_stack_size(&objspace->mark_stack));
5838 gc_marks_rest(objspace);
5839 }
5840 }
5841 POP_MARK_FUNC_DATA();
5842
5843 gc_exit(objspace, "marks_continue");
5844 #endif
5845 }
5846
5847 static void
gc_marks(rb_objspace_t * objspace,int full_mark)5848 gc_marks(rb_objspace_t *objspace, int full_mark)
5849 {
5850 gc_prof_mark_timer_start(objspace);
5851
5852 PUSH_MARK_FUNC_DATA(NULL);
5853 {
5854 /* setup marking */
5855
5856 #if USE_RGENGC
5857 gc_marks_start(objspace, full_mark);
5858 if (!is_incremental_marking(objspace)) {
5859 gc_marks_rest(objspace);
5860 }
5861
5862 #if RGENGC_PROFILE > 0
5863 if (gc_prof_record(objspace)) {
5864 gc_profile_record *record = gc_prof_record(objspace);
5865 record->old_objects = objspace->rgengc.old_objects;
5866 }
5867 #endif
5868
5869 #else /* USE_RGENGC */
5870 gc_marks_start(objspace, TRUE);
5871 gc_marks_rest(objspace);
5872 #endif
5873 }
5874 POP_MARK_FUNC_DATA();
5875 gc_prof_mark_timer_stop(objspace);
5876 }
5877
5878 /* RGENGC */
5879
5880 static void
gc_report_body(int level,rb_objspace_t * objspace,const char * fmt,...)5881 gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...)
5882 {
5883 if (level <= RGENGC_DEBUG) {
5884 char buf[1024];
5885 FILE *out = stderr;
5886 va_list args;
5887 const char *status = " ";
5888
5889 #if USE_RGENGC
5890 if (during_gc) {
5891 status = is_full_marking(objspace) ? "+" : "-";
5892 }
5893 else {
5894 if (is_lazy_sweeping(heap_eden)) {
5895 status = "S";
5896 }
5897 if (is_incremental_marking(objspace)) {
5898 status = "M";
5899 }
5900 }
5901 #endif
5902
5903 va_start(args, fmt);
5904 vsnprintf(buf, 1024, fmt, args);
5905 va_end(args);
5906
5907 fprintf(out, "%s|", status);
5908 fputs(buf, out);
5909 }
5910 }
5911
5912 #if USE_RGENGC
5913
5914 /* bit operations */
5915
5916 static int
rgengc_remembersetbits_get(rb_objspace_t * objspace,VALUE obj)5917 rgengc_remembersetbits_get(rb_objspace_t *objspace, VALUE obj)
5918 {
5919 return RVALUE_REMEMBERED(obj);
5920 }
5921
5922 static int
rgengc_remembersetbits_set(rb_objspace_t * objspace,VALUE obj)5923 rgengc_remembersetbits_set(rb_objspace_t *objspace, VALUE obj)
5924 {
5925 struct heap_page *page = GET_HEAP_PAGE(obj);
5926 bits_t *bits = &page->marking_bits[0];
5927
5928 GC_ASSERT(!is_incremental_marking(objspace));
5929
5930 if (MARKED_IN_BITMAP(bits, obj)) {
5931 return FALSE;
5932 }
5933 else {
5934 page->flags.has_remembered_objects = TRUE;
5935 MARK_IN_BITMAP(bits, obj);
5936 return TRUE;
5937 }
5938 }
5939
5940 /* wb, etc */
5941
5942 /* return FALSE if already remembered */
5943 static int
rgengc_remember(rb_objspace_t * objspace,VALUE obj)5944 rgengc_remember(rb_objspace_t *objspace, VALUE obj)
5945 {
5946 gc_report(6, objspace, "rgengc_remember: %s %s\n", obj_info(obj),
5947 rgengc_remembersetbits_get(objspace, obj) ? "was already remembered" : "is remembered now");
5948
5949 check_rvalue_consistency(obj);
5950
5951 if (RGENGC_CHECK_MODE) {
5952 if (RVALUE_WB_UNPROTECTED(obj)) rb_bug("rgengc_remember: %s is not wb protected.", obj_info(obj));
5953 }
5954
5955 #if RGENGC_PROFILE > 0
5956 if (!rgengc_remembered(objspace, obj)) {
5957 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
5958 objspace->profile.total_remembered_normal_object_count++;
5959 #if RGENGC_PROFILE >= 2
5960 objspace->profile.remembered_normal_object_count_types[BUILTIN_TYPE(obj)]++;
5961 #endif
5962 }
5963 }
5964 #endif /* RGENGC_PROFILE > 0 */
5965
5966 return rgengc_remembersetbits_set(objspace, obj);
5967 }
5968
5969 static int
rgengc_remembered(rb_objspace_t * objspace,VALUE obj)5970 rgengc_remembered(rb_objspace_t *objspace, VALUE obj)
5971 {
5972 int result = rgengc_remembersetbits_get(objspace, obj);
5973 check_rvalue_consistency(obj);
5974 gc_report(6, objspace, "rgengc_remembered: %s\n", obj_info(obj));
5975 return result;
5976 }
5977
5978 #ifndef PROFILE_REMEMBERSET_MARK
5979 #define PROFILE_REMEMBERSET_MARK 0
5980 #endif
5981
5982 static void
rgengc_rememberset_mark(rb_objspace_t * objspace,rb_heap_t * heap)5983 rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
5984 {
5985 size_t j;
5986 struct heap_page *page = 0;
5987 #if PROFILE_REMEMBERSET_MARK
5988 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
5989 #endif
5990 gc_report(1, objspace, "rgengc_rememberset_mark: start\n");
5991
5992 list_for_each(&heap->pages, page, page_node) {
5993 if (page->flags.has_remembered_objects | page->flags.has_uncollectible_shady_objects) {
5994 RVALUE *p = page->start;
5995 RVALUE *offset = p - NUM_IN_PAGE(p);
5996 bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
5997 bits_t *marking_bits = page->marking_bits;
5998 bits_t *uncollectible_bits = page->uncollectible_bits;
5999 bits_t *wb_unprotected_bits = page->wb_unprotected_bits;
6000 #if PROFILE_REMEMBERSET_MARK
6001 if (page->flags.has_remembered_objects && page->flags.has_uncollectible_shady_objects) has_both++;
6002 else if (page->flags.has_remembered_objects) has_old++;
6003 else if (page->flags.has_uncollectible_shady_objects) has_shady++;
6004 #endif
6005 for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
6006 bits[j] = marking_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
6007 marking_bits[j] = 0;
6008 }
6009 page->flags.has_remembered_objects = FALSE;
6010
6011 for (j=0; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
6012 bitset = bits[j];
6013
6014 if (bitset) {
6015 p = offset + j * BITS_BITLENGTH;
6016
6017 do {
6018 if (bitset & 1) {
6019 VALUE obj = (VALUE)p;
6020 gc_report(2, objspace, "rgengc_rememberset_mark: mark %s\n", obj_info(obj));
6021 GC_ASSERT(RVALUE_UNCOLLECTIBLE(obj));
6022 GC_ASSERT(RVALUE_OLD_P(obj) || RVALUE_WB_UNPROTECTED(obj));
6023
6024 gc_mark_children(objspace, obj);
6025 }
6026 p++;
6027 bitset >>= 1;
6028 } while (bitset);
6029 }
6030 }
6031 }
6032 #if PROFILE_REMEMBERSET_MARK
6033 else {
6034 skip++;
6035 }
6036 #endif
6037 }
6038
6039 #if PROFILE_REMEMBERSET_MARK
6040 fprintf(stderr, "%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
6041 #endif
6042 gc_report(1, objspace, "rgengc_rememberset_mark: finished\n");
6043 }
6044
6045 static void
rgengc_mark_and_rememberset_clear(rb_objspace_t * objspace,rb_heap_t * heap)6046 rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
6047 {
6048 struct heap_page *page = 0;
6049
6050 list_for_each(&heap->pages, page, page_node) {
6051 memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6052 memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6053 memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6054 page->flags.has_uncollectible_shady_objects = FALSE;
6055 page->flags.has_remembered_objects = FALSE;
6056 }
6057 }
6058
6059 /* RGENGC: APIs */
6060
6061 NOINLINE(static void gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace));
6062
6063 static void
gc_writebarrier_generational(VALUE a,VALUE b,rb_objspace_t * objspace)6064 gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace)
6065 {
6066 if (RGENGC_CHECK_MODE) {
6067 if (!RVALUE_OLD_P(a)) rb_bug("gc_writebarrier_generational: %s is not an old object.", obj_info(a));
6068 if ( RVALUE_OLD_P(b)) rb_bug("gc_writebarrier_generational: %s is an old object.", obj_info(b));
6069 if (is_incremental_marking(objspace)) rb_bug("gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
6070 }
6071
6072 #if 1
6073 /* mark `a' and remember (default behavior) */
6074 if (!rgengc_remembered(objspace, a)) {
6075 rgengc_remember(objspace, a);
6076 gc_report(1, objspace, "gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
6077 }
6078 #else
6079 /* mark `b' and remember */
6080 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(b), b);
6081 if (RVALUE_WB_UNPROTECTED(b)) {
6082 gc_remember_unprotected(objspace, b);
6083 }
6084 else {
6085 RVALUE_AGE_SET_OLD(objspace, b);
6086 rgengc_remember(objspace, b);
6087 }
6088
6089 gc_report(1, objspace, "gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a), obj_info(b));
6090 #endif
6091
6092 check_rvalue_consistency(a);
6093 check_rvalue_consistency(b);
6094 }
6095
6096 #if GC_ENABLE_INCREMENTAL_MARK
6097 static void
gc_mark_from(rb_objspace_t * objspace,VALUE obj,VALUE parent)6098 gc_mark_from(rb_objspace_t *objspace, VALUE obj, VALUE parent)
6099 {
6100 gc_mark_set_parent(objspace, parent);
6101 rgengc_check_relation(objspace, obj);
6102 if (gc_mark_set(objspace, obj) == FALSE) return;
6103 gc_aging(objspace, obj);
6104 gc_grey(objspace, obj);
6105 }
6106
6107 NOINLINE(static void gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace));
6108
6109 static void
gc_writebarrier_incremental(VALUE a,VALUE b,rb_objspace_t * objspace)6110 gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace)
6111 {
6112 gc_report(2, objspace, "gc_writebarrier_incremental: [LG] %p -> %s\n", (void *)a, obj_info(b));
6113
6114 if (RVALUE_BLACK_P(a)) {
6115 if (RVALUE_WHITE_P(b)) {
6116 if (!RVALUE_WB_UNPROTECTED(a)) {
6117 gc_report(2, objspace, "gc_writebarrier_incremental: [IN] %p -> %s\n", (void *)a, obj_info(b));
6118 gc_mark_from(objspace, b, a);
6119 }
6120 }
6121 else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
6122 if (!RVALUE_WB_UNPROTECTED(b)) {
6123 gc_report(1, objspace, "gc_writebarrier_incremental: [GN] %p -> %s\n", (void *)a, obj_info(b));
6124 RVALUE_AGE_SET_OLD(objspace, b);
6125
6126 if (RVALUE_BLACK_P(b)) {
6127 gc_grey(objspace, b);
6128 }
6129 }
6130 else {
6131 gc_report(1, objspace, "gc_writebarrier_incremental: [LL] %p -> %s\n", (void *)a, obj_info(b));
6132 gc_remember_unprotected(objspace, b);
6133 }
6134 }
6135 }
6136 }
6137 #else
6138 #define gc_writebarrier_incremental(a, b, objspace)
6139 #endif
6140
6141 void
rb_gc_writebarrier(VALUE a,VALUE b)6142 rb_gc_writebarrier(VALUE a, VALUE b)
6143 {
6144 rb_objspace_t *objspace = &rb_objspace;
6145
6146 if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(a)) rb_bug("rb_gc_writebarrier: a is special const");
6147 if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(b)) rb_bug("rb_gc_writebarrier: b is special const");
6148
6149 if (!is_incremental_marking(objspace)) {
6150 if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
6151 return;
6152 }
6153 else {
6154 gc_writebarrier_generational(a, b, objspace);
6155 }
6156 }
6157 else { /* slow path */
6158 gc_writebarrier_incremental(a, b, objspace);
6159 }
6160 }
6161
6162 void
rb_gc_writebarrier_unprotect(VALUE obj)6163 rb_gc_writebarrier_unprotect(VALUE obj)
6164 {
6165 if (RVALUE_WB_UNPROTECTED(obj)) {
6166 return;
6167 }
6168 else {
6169 rb_objspace_t *objspace = &rb_objspace;
6170
6171 gc_report(2, objspace, "rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj),
6172 rgengc_remembered(objspace, obj) ? " (already remembered)" : "");
6173
6174 if (RVALUE_OLD_P(obj)) {
6175 gc_report(1, objspace, "rb_gc_writebarrier_unprotect: %s\n", obj_info(obj));
6176 RVALUE_DEMOTE(objspace, obj);
6177 gc_mark_set(objspace, obj);
6178 gc_remember_unprotected(objspace, obj);
6179
6180 #if RGENGC_PROFILE
6181 objspace->profile.total_shade_operation_count++;
6182 #if RGENGC_PROFILE >= 2
6183 objspace->profile.shade_operation_count_types[BUILTIN_TYPE(obj)]++;
6184 #endif /* RGENGC_PROFILE >= 2 */
6185 #endif /* RGENGC_PROFILE */
6186 }
6187 else {
6188 RVALUE_AGE_RESET(obj);
6189 }
6190
6191 RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
6192 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
6193 }
6194 }
6195
6196 /*
6197 * remember `obj' if needed.
6198 */
6199 MJIT_FUNC_EXPORTED void
rb_gc_writebarrier_remember(VALUE obj)6200 rb_gc_writebarrier_remember(VALUE obj)
6201 {
6202 rb_objspace_t *objspace = &rb_objspace;
6203
6204 gc_report(1, objspace, "rb_gc_writebarrier_remember: %s\n", obj_info(obj));
6205
6206 if (is_incremental_marking(objspace)) {
6207 if (RVALUE_BLACK_P(obj)) {
6208 gc_grey(objspace, obj);
6209 }
6210 }
6211 else {
6212 if (RVALUE_OLD_P(obj)) {
6213 rgengc_remember(objspace, obj);
6214 }
6215 }
6216 }
6217
6218 static st_table *rgengc_unprotect_logging_table;
6219
6220 static int
rgengc_unprotect_logging_exit_func_i(st_data_t key,st_data_t val,st_data_t arg)6221 rgengc_unprotect_logging_exit_func_i(st_data_t key, st_data_t val, st_data_t arg)
6222 {
6223 fprintf(stderr, "%s\t%d\n", (char *)key, (int)val);
6224 return ST_CONTINUE;
6225 }
6226
6227 static void
rgengc_unprotect_logging_exit_func(void)6228 rgengc_unprotect_logging_exit_func(void)
6229 {
6230 st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
6231 }
6232
6233 void
rb_gc_unprotect_logging(void * objptr,const char * filename,int line)6234 rb_gc_unprotect_logging(void *objptr, const char *filename, int line)
6235 {
6236 VALUE obj = (VALUE)objptr;
6237
6238 if (rgengc_unprotect_logging_table == 0) {
6239 rgengc_unprotect_logging_table = st_init_strtable();
6240 atexit(rgengc_unprotect_logging_exit_func);
6241 }
6242
6243 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
6244 char buff[0x100];
6245 st_data_t cnt = 1;
6246 char *ptr = buff;
6247
6248 snprintf(ptr, 0x100 - 1, "%s|%s:%d", obj_info(obj), filename, line);
6249
6250 if (st_lookup(rgengc_unprotect_logging_table, (st_data_t)ptr, &cnt)) {
6251 cnt++;
6252 }
6253 else {
6254 ptr = (strdup)(buff);
6255 if (!ptr) rb_memerror();
6256 }
6257 st_insert(rgengc_unprotect_logging_table, (st_data_t)ptr, cnt);
6258 }
6259 }
6260 #endif /* USE_RGENGC */
6261
6262 void
rb_copy_wb_protected_attribute(VALUE dest,VALUE obj)6263 rb_copy_wb_protected_attribute(VALUE dest, VALUE obj)
6264 {
6265 #if USE_RGENGC
6266 rb_objspace_t *objspace = &rb_objspace;
6267
6268 if (RVALUE_WB_UNPROTECTED(obj) && !RVALUE_WB_UNPROTECTED(dest)) {
6269 if (!RVALUE_OLD_P(dest)) {
6270 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
6271 RVALUE_AGE_RESET_RAW(dest);
6272 }
6273 else {
6274 RVALUE_DEMOTE(objspace, dest);
6275 }
6276 }
6277
6278 check_rvalue_consistency(dest);
6279 #endif
6280 }
6281
6282 /* RGENGC analysis information */
6283
6284 VALUE
rb_obj_rgengc_writebarrier_protected_p(VALUE obj)6285 rb_obj_rgengc_writebarrier_protected_p(VALUE obj)
6286 {
6287 #if USE_RGENGC
6288 return RVALUE_WB_UNPROTECTED(obj) ? Qfalse : Qtrue;
6289 #else
6290 return Qfalse;
6291 #endif
6292 }
6293
6294 VALUE
rb_obj_rgengc_promoted_p(VALUE obj)6295 rb_obj_rgengc_promoted_p(VALUE obj)
6296 {
6297 return OBJ_PROMOTED(obj) ? Qtrue : Qfalse;
6298 }
6299
6300 size_t
rb_obj_gc_flags(VALUE obj,ID * flags,size_t max)6301 rb_obj_gc_flags(VALUE obj, ID* flags, size_t max)
6302 {
6303 size_t n = 0;
6304 static ID ID_marked;
6305 #if USE_RGENGC
6306 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible;
6307 #endif
6308
6309 if (!ID_marked) {
6310 #define I(s) ID_##s = rb_intern(#s);
6311 I(marked);
6312 #if USE_RGENGC
6313 I(wb_protected);
6314 I(old);
6315 I(marking);
6316 I(uncollectible);
6317 #endif
6318 #undef I
6319 }
6320
6321 #if USE_RGENGC
6322 if (RVALUE_WB_UNPROTECTED(obj) == 0 && n<max) flags[n++] = ID_wb_protected;
6323 if (RVALUE_OLD_P(obj) && n<max) flags[n++] = ID_old;
6324 if (RVALUE_UNCOLLECTIBLE(obj) && n<max) flags[n++] = ID_uncollectible;
6325 if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj) && n<max) flags[n++] = ID_marking;
6326 #endif
6327 if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) && n<max) flags[n++] = ID_marked;
6328 return n;
6329 }
6330
6331 /* GC */
6332
6333 void
rb_gc_force_recycle(VALUE obj)6334 rb_gc_force_recycle(VALUE obj)
6335 {
6336 rb_objspace_t *objspace = &rb_objspace;
6337
6338 #if USE_RGENGC
6339 int is_old = RVALUE_OLD_P(obj);
6340
6341 gc_report(2, objspace, "rb_gc_force_recycle: %s\n", obj_info(obj));
6342
6343 if (is_old) {
6344 if (RVALUE_MARKED(obj)) {
6345 objspace->rgengc.old_objects--;
6346 }
6347 }
6348 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
6349 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
6350
6351 #if GC_ENABLE_INCREMENTAL_MARK
6352 if (is_incremental_marking(objspace)) {
6353 if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj)) {
6354 invalidate_mark_stack(&objspace->mark_stack, obj);
6355 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
6356 }
6357 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
6358 }
6359 else {
6360 #endif
6361 if (is_old || !GET_HEAP_PAGE(obj)->flags.before_sweep) {
6362 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
6363 }
6364 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
6365 #if GC_ENABLE_INCREMENTAL_MARK
6366 }
6367 #endif
6368 #endif
6369
6370 objspace->profile.total_freed_objects++;
6371
6372 heap_page_add_freeobj(objspace, GET_HEAP_PAGE(obj), obj);
6373
6374 /* Disable counting swept_slots because there are no meaning.
6375 * if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(p), p)) {
6376 * objspace->heap.swept_slots++;
6377 * }
6378 */
6379 }
6380
6381 #ifndef MARK_OBJECT_ARY_BUCKET_SIZE
6382 #define MARK_OBJECT_ARY_BUCKET_SIZE 1024
6383 #endif
6384
6385 void
rb_gc_register_mark_object(VALUE obj)6386 rb_gc_register_mark_object(VALUE obj)
6387 {
6388 VALUE ary_ary = GET_VM()->mark_object_ary;
6389 VALUE ary = rb_ary_last(0, 0, ary_ary);
6390
6391 if (ary == Qnil || RARRAY_LEN(ary) >= MARK_OBJECT_ARY_BUCKET_SIZE) {
6392 ary = rb_ary_tmp_new(MARK_OBJECT_ARY_BUCKET_SIZE);
6393 rb_ary_push(ary_ary, ary);
6394 }
6395
6396 rb_ary_push(ary, obj);
6397 }
6398
6399 void
rb_gc_register_address(VALUE * addr)6400 rb_gc_register_address(VALUE *addr)
6401 {
6402 rb_objspace_t *objspace = &rb_objspace;
6403 struct gc_list *tmp;
6404
6405 tmp = ALLOC(struct gc_list);
6406 tmp->next = global_list;
6407 tmp->varptr = addr;
6408 global_list = tmp;
6409 }
6410
6411 void
rb_gc_unregister_address(VALUE * addr)6412 rb_gc_unregister_address(VALUE *addr)
6413 {
6414 rb_objspace_t *objspace = &rb_objspace;
6415 struct gc_list *tmp = global_list;
6416
6417 if (tmp->varptr == addr) {
6418 global_list = tmp->next;
6419 xfree(tmp);
6420 return;
6421 }
6422 while (tmp->next) {
6423 if (tmp->next->varptr == addr) {
6424 struct gc_list *t = tmp->next;
6425
6426 tmp->next = tmp->next->next;
6427 xfree(t);
6428 break;
6429 }
6430 tmp = tmp->next;
6431 }
6432 }
6433
6434 void
rb_global_variable(VALUE * var)6435 rb_global_variable(VALUE *var)
6436 {
6437 rb_gc_register_address(var);
6438 }
6439
6440 #define GC_NOTIFY 0
6441
6442 enum {
6443 gc_stress_no_major,
6444 gc_stress_no_immediate_sweep,
6445 gc_stress_full_mark_after_malloc,
6446 gc_stress_max
6447 };
6448
6449 #define gc_stress_full_mark_after_malloc_p() \
6450 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
6451
6452 static void
heap_ready_to_gc(rb_objspace_t * objspace,rb_heap_t * heap)6453 heap_ready_to_gc(rb_objspace_t *objspace, rb_heap_t *heap)
6454 {
6455 if (!heap->freelist && !heap->free_pages) {
6456 if (!heap_increment(objspace, heap)) {
6457 heap_set_increment(objspace, 1);
6458 heap_increment(objspace, heap);
6459 }
6460 }
6461 }
6462
6463 static int
ready_to_gc(rb_objspace_t * objspace)6464 ready_to_gc(rb_objspace_t *objspace)
6465 {
6466 if (dont_gc || during_gc || ruby_disable_gc) {
6467 heap_ready_to_gc(objspace, heap_eden);
6468 return FALSE;
6469 }
6470 else {
6471 return TRUE;
6472 }
6473 }
6474
6475 static void
gc_reset_malloc_info(rb_objspace_t * objspace)6476 gc_reset_malloc_info(rb_objspace_t *objspace)
6477 {
6478 gc_prof_set_malloc_info(objspace);
6479 {
6480 size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
6481 size_t old_limit = malloc_limit;
6482
6483 if (inc > malloc_limit) {
6484 malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
6485 if (malloc_limit > gc_params.malloc_limit_max) {
6486 malloc_limit = gc_params.malloc_limit_max;
6487 }
6488 }
6489 else {
6490 malloc_limit = (size_t)(malloc_limit * 0.98); /* magic number */
6491 if (malloc_limit < gc_params.malloc_limit_min) {
6492 malloc_limit = gc_params.malloc_limit_min;
6493 }
6494 }
6495
6496 if (0) {
6497 if (old_limit != malloc_limit) {
6498 fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: %"PRIuSIZE" -> %"PRIuSIZE"\n",
6499 rb_gc_count(), old_limit, malloc_limit);
6500 }
6501 else {
6502 fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: not changed (%"PRIuSIZE")\n",
6503 rb_gc_count(), malloc_limit);
6504 }
6505 }
6506 }
6507
6508 /* reset oldmalloc info */
6509 #if RGENGC_ESTIMATE_OLDMALLOC
6510 if (!is_full_marking(objspace)) {
6511 if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
6512 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
6513 objspace->rgengc.oldmalloc_increase_limit =
6514 (size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
6515
6516 if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
6517 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
6518 }
6519 }
6520
6521 if (0) fprintf(stderr, "%d\t%d\t%u\t%u\t%d\n",
6522 (int)rb_gc_count(),
6523 (int)objspace->rgengc.need_major_gc,
6524 (unsigned int)objspace->rgengc.oldmalloc_increase,
6525 (unsigned int)objspace->rgengc.oldmalloc_increase_limit,
6526 (unsigned int)gc_params.oldmalloc_limit_max);
6527 }
6528 else {
6529 /* major GC */
6530 objspace->rgengc.oldmalloc_increase = 0;
6531
6532 if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
6533 objspace->rgengc.oldmalloc_increase_limit =
6534 (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
6535 if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
6536 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
6537 }
6538 }
6539 }
6540 #endif
6541 }
6542
6543 static int
garbage_collect(rb_objspace_t * objspace,int reason)6544 garbage_collect(rb_objspace_t *objspace, int reason)
6545 {
6546 #if GC_PROFILE_MORE_DETAIL
6547 objspace->profile.prepare_time = getrusage_time();
6548 #endif
6549
6550 gc_rest(objspace);
6551
6552 #if GC_PROFILE_MORE_DETAIL
6553 objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
6554 #endif
6555
6556 return gc_start(objspace, reason);
6557 }
6558
6559 static int
gc_start(rb_objspace_t * objspace,int reason)6560 gc_start(rb_objspace_t *objspace, int reason)
6561 {
6562 unsigned int do_full_mark = !!((unsigned)reason & GPR_FLAG_FULL_MARK);
6563 unsigned int immediate_mark = (unsigned)reason & GPR_FLAG_IMMEDIATE_MARK;
6564
6565 /* reason may be clobbered, later, so keep set immediate_sweep here */
6566 objspace->flags.immediate_sweep = !!((unsigned)reason & GPR_FLAG_IMMEDIATE_SWEEP);
6567
6568 if (!heap_allocated_pages) return FALSE; /* heap is not ready */
6569 if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
6570
6571 GC_ASSERT(gc_mode(objspace) == gc_mode_none);
6572 GC_ASSERT(!is_lazy_sweeping(heap_eden));
6573 GC_ASSERT(!is_incremental_marking(objspace));
6574 #if RGENGC_CHECK_MODE >= 2
6575 gc_verify_internal_consistency(Qnil);
6576 #endif
6577
6578 gc_enter(objspace, "gc_start");
6579
6580 if (ruby_gc_stressful) {
6581 int flag = FIXNUM_P(ruby_gc_stress_mode) ? FIX2INT(ruby_gc_stress_mode) : 0;
6582
6583 if ((flag & (1<<gc_stress_no_major)) == 0) {
6584 do_full_mark = TRUE;
6585 }
6586
6587 objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
6588 }
6589 else {
6590 #if USE_RGENGC
6591 if (objspace->rgengc.need_major_gc) {
6592 reason |= objspace->rgengc.need_major_gc;
6593 do_full_mark = TRUE;
6594 }
6595 else if (RGENGC_FORCE_MAJOR_GC) {
6596 reason = GPR_FLAG_MAJOR_BY_FORCE;
6597 do_full_mark = TRUE;
6598 }
6599
6600 objspace->rgengc.need_major_gc = GPR_FLAG_NONE;
6601 #endif
6602 }
6603
6604 if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
6605 reason |= GPR_FLAG_MAJOR_BY_FORCE; /* GC by CAPI, METHOD, and so on. */
6606 }
6607
6608 #if GC_ENABLE_INCREMENTAL_MARK
6609 if (!GC_ENABLE_INCREMENTAL_MARK || objspace->flags.dont_incremental || immediate_mark) {
6610 objspace->flags.during_incremental_marking = FALSE;
6611 }
6612 else {
6613 objspace->flags.during_incremental_marking = do_full_mark;
6614 }
6615 #endif
6616
6617 if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
6618 objspace->flags.immediate_sweep = TRUE;
6619 }
6620
6621 if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
6622
6623 gc_report(1, objspace, "gc_start(reason: %d) => %u, %d, %d\n",
6624 reason,
6625 do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
6626
6627 #if USE_DEBUG_COUNTER
6628 RB_DEBUG_COUNTER_INC(gc_count);
6629
6630 if (reason & GPR_FLAG_MAJOR_MASK) {
6631 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
6632 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
6633 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
6634 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
6635 #if RGENGC_ESTIMATE_OLDMALLOC
6636 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
6637 #endif
6638 }
6639 else {
6640 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
6641 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
6642 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
6643 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
6644 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
6645 }
6646 #endif
6647
6648 objspace->profile.count++;
6649 objspace->profile.latest_gc_info = reason;
6650 objspace->profile.total_allocated_objects_at_gc_start = objspace->total_allocated_objects;
6651 objspace->profile.heap_used_at_gc_start = heap_allocated_pages;
6652 gc_prof_setup_new_record(objspace, reason);
6653 gc_reset_malloc_info(objspace);
6654 rb_transient_heap_start_marking(do_full_mark);
6655
6656 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_START, 0 /* TODO: pass minor/immediate flag? */);
6657 GC_ASSERT(during_gc);
6658
6659 gc_prof_timer_start(objspace);
6660 {
6661 gc_marks(objspace, do_full_mark);
6662 }
6663 gc_prof_timer_stop(objspace);
6664
6665 gc_exit(objspace, "gc_start");
6666 return TRUE;
6667 }
6668
6669 static void
gc_rest(rb_objspace_t * objspace)6670 gc_rest(rb_objspace_t *objspace)
6671 {
6672 int marking = is_incremental_marking(objspace);
6673 int sweeping = is_lazy_sweeping(heap_eden);
6674
6675 if (marking || sweeping) {
6676 gc_enter(objspace, "gc_rest");
6677
6678 if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(Qnil);
6679
6680 if (is_incremental_marking(objspace)) {
6681 PUSH_MARK_FUNC_DATA(NULL);
6682 gc_marks_rest(objspace);
6683 POP_MARK_FUNC_DATA();
6684 }
6685 if (is_lazy_sweeping(heap_eden)) {
6686 gc_sweep_rest(objspace);
6687 }
6688 gc_exit(objspace, "gc_rest");
6689 }
6690 }
6691
6692 struct objspace_and_reason {
6693 rb_objspace_t *objspace;
6694 int reason;
6695 };
6696
6697 static void
gc_current_status_fill(rb_objspace_t * objspace,char * buff)6698 gc_current_status_fill(rb_objspace_t *objspace, char *buff)
6699 {
6700 int i = 0;
6701 if (is_marking(objspace)) {
6702 buff[i++] = 'M';
6703 #if USE_RGENGC
6704 if (is_full_marking(objspace)) buff[i++] = 'F';
6705 #if GC_ENABLE_INCREMENTAL_MARK
6706 if (is_incremental_marking(objspace)) buff[i++] = 'I';
6707 #endif
6708 #endif
6709 }
6710 else if (is_sweeping(objspace)) {
6711 buff[i++] = 'S';
6712 if (is_lazy_sweeping(heap_eden)) buff[i++] = 'L';
6713 }
6714 else {
6715 buff[i++] = 'N';
6716 }
6717 buff[i] = '\0';
6718 }
6719
6720 static const char *
gc_current_status(rb_objspace_t * objspace)6721 gc_current_status(rb_objspace_t *objspace)
6722 {
6723 static char buff[0x10];
6724 gc_current_status_fill(objspace, buff);
6725 return buff;
6726 }
6727
6728 #if PRINT_ENTER_EXIT_TICK
6729
6730 static tick_t last_exit_tick;
6731 static tick_t enter_tick;
6732 static int enter_count = 0;
6733 static char last_gc_status[0x10];
6734
6735 static inline void
gc_record(rb_objspace_t * objspace,int direction,const char * event)6736 gc_record(rb_objspace_t *objspace, int direction, const char *event)
6737 {
6738 if (direction == 0) { /* enter */
6739 enter_count++;
6740 enter_tick = tick();
6741 gc_current_status_fill(objspace, last_gc_status);
6742 }
6743 else { /* exit */
6744 tick_t exit_tick = tick();
6745 char current_gc_status[0x10];
6746 gc_current_status_fill(objspace, current_gc_status);
6747 #if 1
6748 /* [last mutator time] [gc time] [event] */
6749 fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
6750 enter_tick - last_exit_tick,
6751 exit_tick - enter_tick,
6752 event,
6753 last_gc_status, current_gc_status,
6754 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
6755 last_exit_tick = exit_tick;
6756 #else
6757 /* [enter_tick] [gc time] [event] */
6758 fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
6759 enter_tick,
6760 exit_tick - enter_tick,
6761 event,
6762 last_gc_status, current_gc_status,
6763 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
6764 #endif
6765 }
6766 }
6767 #else /* PRINT_ENTER_EXIT_TICK */
6768 static inline void
gc_record(rb_objspace_t * objspace,int direction,const char * event)6769 gc_record(rb_objspace_t *objspace, int direction, const char *event)
6770 {
6771 /* null */
6772 }
6773 #endif /* PRINT_ENTER_EXIT_TICK */
6774
6775 static inline void
gc_enter(rb_objspace_t * objspace,const char * event)6776 gc_enter(rb_objspace_t *objspace, const char *event)
6777 {
6778 GC_ASSERT(during_gc == 0);
6779 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(Qnil);
6780
6781 mjit_gc_start_hook();
6782
6783 during_gc = TRUE;
6784 gc_report(1, objspace, "gc_enter: %s [%s]\n", event, gc_current_status(objspace));
6785 gc_record(objspace, 0, event);
6786 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_ENTER, 0); /* TODO: which parameter should be passed? */
6787 }
6788
6789 static inline void
gc_exit(rb_objspace_t * objspace,const char * event)6790 gc_exit(rb_objspace_t *objspace, const char *event)
6791 {
6792 GC_ASSERT(during_gc != 0);
6793
6794 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_EXIT, 0); /* TODO: which parameter should be passsed? */
6795 gc_record(objspace, 1, event);
6796 gc_report(1, objspace, "gc_exit: %s [%s]\n", event, gc_current_status(objspace));
6797 during_gc = FALSE;
6798
6799 mjit_gc_finish_hook();
6800 }
6801
6802 static void *
gc_with_gvl(void * ptr)6803 gc_with_gvl(void *ptr)
6804 {
6805 struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr;
6806 return (void *)(VALUE)garbage_collect(oar->objspace, oar->reason);
6807 }
6808
6809 static int
garbage_collect_with_gvl(rb_objspace_t * objspace,int reason)6810 garbage_collect_with_gvl(rb_objspace_t *objspace, int reason)
6811 {
6812 if (dont_gc) return TRUE;
6813 if (ruby_thread_has_gvl_p()) {
6814 return garbage_collect(objspace, reason);
6815 }
6816 else {
6817 if (ruby_native_thread_p()) {
6818 struct objspace_and_reason oar;
6819 oar.objspace = objspace;
6820 oar.reason = reason;
6821 return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar);
6822 }
6823 else {
6824 /* no ruby thread */
6825 fprintf(stderr, "[FATAL] failed to allocate memory\n");
6826 exit(EXIT_FAILURE);
6827 }
6828 }
6829 }
6830
6831 #undef Init_stack
6832
6833 void
Init_stack(volatile VALUE * addr)6834 Init_stack(volatile VALUE *addr)
6835 {
6836 ruby_init_stack(addr);
6837 }
6838
6839 /*
6840 * call-seq:
6841 * GC.start -> nil
6842 * ObjectSpace.garbage_collect -> nil
6843 * include GC; garbage_collect -> nil
6844 * GC.start(full_mark: true, immediate_sweep: true) -> nil
6845 * ObjectSpace.garbage_collect(full_mark: true, immediate_sweep: true) -> nil
6846 * include GC; garbage_collect(full_mark: true, immediate_sweep: true) -> nil
6847 *
6848 * Initiates garbage collection, unless manually disabled.
6849 *
6850 * This method is defined with keyword arguments that default to true:
6851 *
6852 * def GC.start(full_mark: true, immediate_sweep: true); end
6853 *
6854 * Use full_mark: false to perform a minor GC.
6855 * Use immediate_sweep: false to defer sweeping (use lazy sweep).
6856 *
6857 * Note: These keyword arguments are implementation and version dependent. They
6858 * are not guaranteed to be future-compatible, and may be ignored if the
6859 * underlying implementation does not support them.
6860 */
6861
6862 static VALUE
gc_start_internal(int argc,VALUE * argv,VALUE self)6863 gc_start_internal(int argc, VALUE *argv, VALUE self)
6864 {
6865 rb_objspace_t *objspace = &rb_objspace;
6866 int reason = GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
6867 GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_METHOD;
6868 VALUE opt = Qnil;
6869 static ID keyword_ids[3];
6870
6871 rb_scan_args(argc, argv, "0:", &opt);
6872
6873 if (!NIL_P(opt)) {
6874 VALUE kwvals[3];
6875
6876 if (!keyword_ids[0]) {
6877 keyword_ids[0] = rb_intern("full_mark");
6878 keyword_ids[1] = rb_intern("immediate_mark");
6879 keyword_ids[2] = rb_intern("immediate_sweep");
6880 }
6881
6882 rb_get_kwargs(opt, keyword_ids, 0, 3, kwvals);
6883
6884 if (kwvals[0] != Qundef && !RTEST(kwvals[0])) {
6885 reason &= ~GPR_FLAG_FULL_MARK;
6886 }
6887 if (kwvals[1] != Qundef && !RTEST(kwvals[1])) {
6888 reason &= ~GPR_FLAG_IMMEDIATE_MARK;
6889 }
6890 if (kwvals[2] != Qundef && !RTEST(kwvals[2])) {
6891 reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
6892 }
6893 }
6894
6895 garbage_collect(objspace, reason);
6896 gc_finalize_deferred(objspace);
6897
6898 return Qnil;
6899 }
6900
6901 VALUE
rb_gc_start(void)6902 rb_gc_start(void)
6903 {
6904 rb_gc();
6905 return Qnil;
6906 }
6907
6908 void
rb_gc(void)6909 rb_gc(void)
6910 {
6911 rb_objspace_t *objspace = &rb_objspace;
6912 int reason = GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
6913 GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI;
6914 garbage_collect(objspace, reason);
6915 gc_finalize_deferred(objspace);
6916 }
6917
6918 int
rb_during_gc(void)6919 rb_during_gc(void)
6920 {
6921 rb_objspace_t *objspace = &rb_objspace;
6922 return during_gc;
6923 }
6924
6925 #if RGENGC_PROFILE >= 2
6926
6927 static const char *type_name(int type, VALUE obj);
6928
6929 static void
gc_count_add_each_types(VALUE hash,const char * name,const size_t * types)6930 gc_count_add_each_types(VALUE hash, const char *name, const size_t *types)
6931 {
6932 VALUE result = rb_hash_new_with_size(T_MASK);
6933 int i;
6934 for (i=0; i<T_MASK; i++) {
6935 const char *type = type_name(i, 0);
6936 rb_hash_aset(result, ID2SYM(rb_intern(type)), SIZET2NUM(types[i]));
6937 }
6938 rb_hash_aset(hash, ID2SYM(rb_intern(name)), result);
6939 }
6940 #endif
6941
6942 size_t
rb_gc_count(void)6943 rb_gc_count(void)
6944 {
6945 return rb_objspace.profile.count;
6946 }
6947
6948 /*
6949 * call-seq:
6950 * GC.count -> Integer
6951 *
6952 * The number of times GC occurred.
6953 *
6954 * It returns the number of times GC occurred since the process started.
6955 *
6956 */
6957
6958 static VALUE
gc_count(VALUE self)6959 gc_count(VALUE self)
6960 {
6961 return SIZET2NUM(rb_gc_count());
6962 }
6963
6964 static VALUE
gc_info_decode(rb_objspace_t * objspace,const VALUE hash_or_key,const int orig_flags)6965 gc_info_decode(rb_objspace_t *objspace, const VALUE hash_or_key, const int orig_flags)
6966 {
6967 static VALUE sym_major_by = Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state;
6968 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
6969 #if RGENGC_ESTIMATE_OLDMALLOC
6970 static VALUE sym_oldmalloc;
6971 #endif
6972 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
6973 static VALUE sym_none, sym_marking, sym_sweeping;
6974 VALUE hash = Qnil, key = Qnil;
6975 VALUE major_by;
6976 VALUE flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
6977
6978 if (SYMBOL_P(hash_or_key)) {
6979 key = hash_or_key;
6980 }
6981 else if (RB_TYPE_P(hash_or_key, T_HASH)) {
6982 hash = hash_or_key;
6983 }
6984 else {
6985 rb_raise(rb_eTypeError, "non-hash or symbol given");
6986 }
6987
6988 if (sym_major_by == Qnil) {
6989 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
6990 S(major_by);
6991 S(gc_by);
6992 S(immediate_sweep);
6993 S(have_finalizer);
6994 S(state);
6995
6996 S(stress);
6997 S(nofree);
6998 S(oldgen);
6999 S(shady);
7000 S(force);
7001 #if RGENGC_ESTIMATE_OLDMALLOC
7002 S(oldmalloc);
7003 #endif
7004 S(newobj);
7005 S(malloc);
7006 S(method);
7007 S(capi);
7008
7009 S(none);
7010 S(marking);
7011 S(sweeping);
7012 #undef S
7013 }
7014
7015 #define SET(name, attr) \
7016 if (key == sym_##name) \
7017 return (attr); \
7018 else if (hash != Qnil) \
7019 rb_hash_aset(hash, sym_##name, (attr));
7020
7021 major_by =
7022 (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
7023 (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
7024 (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
7025 (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
7026 #if RGENGC_ESTIMATE_OLDMALLOC
7027 (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
7028 #endif
7029 Qnil;
7030 SET(major_by, major_by);
7031
7032 SET(gc_by,
7033 (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
7034 (flags & GPR_FLAG_MALLOC) ? sym_malloc :
7035 (flags & GPR_FLAG_METHOD) ? sym_method :
7036 (flags & GPR_FLAG_CAPI) ? sym_capi :
7037 (flags & GPR_FLAG_STRESS) ? sym_stress :
7038 Qnil
7039 );
7040
7041 SET(have_finalizer, (flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
7042 SET(immediate_sweep, (flags & GPR_FLAG_IMMEDIATE_SWEEP) ? Qtrue : Qfalse);
7043
7044 if (orig_flags == 0) {
7045 SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
7046 gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
7047 }
7048 #undef SET
7049
7050 if (!NIL_P(key)) {/* matched key should return above */
7051 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
7052 }
7053
7054 return hash;
7055 }
7056
7057 VALUE
rb_gc_latest_gc_info(VALUE key)7058 rb_gc_latest_gc_info(VALUE key)
7059 {
7060 rb_objspace_t *objspace = &rb_objspace;
7061 return gc_info_decode(objspace, key, 0);
7062 }
7063
7064 /*
7065 * call-seq:
7066 * GC.latest_gc_info -> {:gc_by=>:newobj}
7067 * GC.latest_gc_info(hash) -> hash
7068 * GC.latest_gc_info(:major_by) -> :malloc
7069 *
7070 * Returns information about the most recent garbage collection.
7071 */
7072
7073 static VALUE
gc_latest_gc_info(int argc,VALUE * argv,VALUE self)7074 gc_latest_gc_info(int argc, VALUE *argv, VALUE self)
7075 {
7076 rb_objspace_t *objspace = &rb_objspace;
7077 VALUE arg = Qnil;
7078
7079 if (rb_check_arity(argc, 0, 1) == 1) {
7080 arg = argv[0];
7081 if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
7082 rb_raise(rb_eTypeError, "non-hash or symbol given");
7083 }
7084 }
7085 else {
7086 arg = rb_hash_new();
7087 }
7088
7089 return gc_info_decode(objspace, arg, 0);
7090 }
7091
7092 enum gc_stat_sym {
7093 gc_stat_sym_count,
7094 gc_stat_sym_heap_allocated_pages,
7095 gc_stat_sym_heap_sorted_length,
7096 gc_stat_sym_heap_allocatable_pages,
7097 gc_stat_sym_heap_available_slots,
7098 gc_stat_sym_heap_live_slots,
7099 gc_stat_sym_heap_free_slots,
7100 gc_stat_sym_heap_final_slots,
7101 gc_stat_sym_heap_marked_slots,
7102 gc_stat_sym_heap_eden_pages,
7103 gc_stat_sym_heap_tomb_pages,
7104 gc_stat_sym_total_allocated_pages,
7105 gc_stat_sym_total_freed_pages,
7106 gc_stat_sym_total_allocated_objects,
7107 gc_stat_sym_total_freed_objects,
7108 gc_stat_sym_malloc_increase_bytes,
7109 gc_stat_sym_malloc_increase_bytes_limit,
7110 #if USE_RGENGC
7111 gc_stat_sym_minor_gc_count,
7112 gc_stat_sym_major_gc_count,
7113 gc_stat_sym_remembered_wb_unprotected_objects,
7114 gc_stat_sym_remembered_wb_unprotected_objects_limit,
7115 gc_stat_sym_old_objects,
7116 gc_stat_sym_old_objects_limit,
7117 #if RGENGC_ESTIMATE_OLDMALLOC
7118 gc_stat_sym_oldmalloc_increase_bytes,
7119 gc_stat_sym_oldmalloc_increase_bytes_limit,
7120 #endif
7121 #if RGENGC_PROFILE
7122 gc_stat_sym_total_generated_normal_object_count,
7123 gc_stat_sym_total_generated_shady_object_count,
7124 gc_stat_sym_total_shade_operation_count,
7125 gc_stat_sym_total_promoted_count,
7126 gc_stat_sym_total_remembered_normal_object_count,
7127 gc_stat_sym_total_remembered_shady_object_count,
7128 #endif
7129 #endif
7130 gc_stat_sym_last
7131 };
7132
7133 enum gc_stat_compat_sym {
7134 gc_stat_compat_sym_gc_stat_heap_used,
7135 gc_stat_compat_sym_heap_eden_page_length,
7136 gc_stat_compat_sym_heap_tomb_page_length,
7137 gc_stat_compat_sym_heap_increment,
7138 gc_stat_compat_sym_heap_length,
7139 gc_stat_compat_sym_heap_live_slot,
7140 gc_stat_compat_sym_heap_free_slot,
7141 gc_stat_compat_sym_heap_final_slot,
7142 gc_stat_compat_sym_heap_swept_slot,
7143 #if USE_RGENGC
7144 gc_stat_compat_sym_remembered_shady_object,
7145 gc_stat_compat_sym_remembered_shady_object_limit,
7146 gc_stat_compat_sym_old_object,
7147 gc_stat_compat_sym_old_object_limit,
7148 #endif
7149 gc_stat_compat_sym_total_allocated_object,
7150 gc_stat_compat_sym_total_freed_object,
7151 gc_stat_compat_sym_malloc_increase,
7152 gc_stat_compat_sym_malloc_limit,
7153 #if RGENGC_ESTIMATE_OLDMALLOC
7154 gc_stat_compat_sym_oldmalloc_increase,
7155 gc_stat_compat_sym_oldmalloc_limit,
7156 #endif
7157 gc_stat_compat_sym_last
7158 };
7159
7160 static VALUE gc_stat_symbols[gc_stat_sym_last];
7161 static VALUE gc_stat_compat_symbols[gc_stat_compat_sym_last];
7162 static VALUE gc_stat_compat_table;
7163
7164 static void
setup_gc_stat_symbols(void)7165 setup_gc_stat_symbols(void)
7166 {
7167 if (gc_stat_symbols[0] == 0) {
7168 #define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
7169 S(count);
7170 S(heap_allocated_pages);
7171 S(heap_sorted_length);
7172 S(heap_allocatable_pages);
7173 S(heap_available_slots);
7174 S(heap_live_slots);
7175 S(heap_free_slots);
7176 S(heap_final_slots);
7177 S(heap_marked_slots);
7178 S(heap_eden_pages);
7179 S(heap_tomb_pages);
7180 S(total_allocated_pages);
7181 S(total_freed_pages);
7182 S(total_allocated_objects);
7183 S(total_freed_objects);
7184 S(malloc_increase_bytes);
7185 S(malloc_increase_bytes_limit);
7186 #if USE_RGENGC
7187 S(minor_gc_count);
7188 S(major_gc_count);
7189 S(remembered_wb_unprotected_objects);
7190 S(remembered_wb_unprotected_objects_limit);
7191 S(old_objects);
7192 S(old_objects_limit);
7193 #if RGENGC_ESTIMATE_OLDMALLOC
7194 S(oldmalloc_increase_bytes);
7195 S(oldmalloc_increase_bytes_limit);
7196 #endif
7197 #if RGENGC_PROFILE
7198 S(total_generated_normal_object_count);
7199 S(total_generated_shady_object_count);
7200 S(total_shade_operation_count);
7201 S(total_promoted_count);
7202 S(total_remembered_normal_object_count);
7203 S(total_remembered_shady_object_count);
7204 #endif /* RGENGC_PROFILE */
7205 #endif /* USE_RGENGC */
7206 #undef S
7207 #define S(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s] = ID2SYM(rb_intern_const(#s))
7208 S(gc_stat_heap_used);
7209 S(heap_eden_page_length);
7210 S(heap_tomb_page_length);
7211 S(heap_increment);
7212 S(heap_length);
7213 S(heap_live_slot);
7214 S(heap_free_slot);
7215 S(heap_final_slot);
7216 S(heap_swept_slot);
7217 #if USE_RGEGC
7218 S(remembered_shady_object);
7219 S(remembered_shady_object_limit);
7220 S(old_object);
7221 S(old_object_limit);
7222 #endif
7223 S(total_allocated_object);
7224 S(total_freed_object);
7225 S(malloc_increase);
7226 S(malloc_limit);
7227 #if RGENGC_ESTIMATE_OLDMALLOC
7228 S(oldmalloc_increase);
7229 S(oldmalloc_limit);
7230 #endif
7231 #undef S
7232
7233 {
7234 VALUE table = gc_stat_compat_table = rb_hash_new();
7235 rb_obj_hide(table);
7236 rb_gc_register_mark_object(table);
7237
7238 /* compatibility layer for Ruby 2.1 */
7239 #define OLD_SYM(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s]
7240 #define NEW_SYM(s) gc_stat_symbols[gc_stat_sym_##s]
7241 rb_hash_aset(table, OLD_SYM(gc_stat_heap_used), NEW_SYM(heap_allocated_pages));
7242 rb_hash_aset(table, OLD_SYM(heap_eden_page_length), NEW_SYM(heap_eden_pages));
7243 rb_hash_aset(table, OLD_SYM(heap_tomb_page_length), NEW_SYM(heap_tomb_pages));
7244 rb_hash_aset(table, OLD_SYM(heap_increment), NEW_SYM(heap_allocatable_pages));
7245 rb_hash_aset(table, OLD_SYM(heap_length), NEW_SYM(heap_sorted_length));
7246 rb_hash_aset(table, OLD_SYM(heap_live_slot), NEW_SYM(heap_live_slots));
7247 rb_hash_aset(table, OLD_SYM(heap_free_slot), NEW_SYM(heap_free_slots));
7248 rb_hash_aset(table, OLD_SYM(heap_final_slot), NEW_SYM(heap_final_slots));
7249 #if USE_RGEGC
7250 rb_hash_aset(table, OLD_SYM(remembered_shady_object), NEW_SYM(remembered_wb_unprotected_objects));
7251 rb_hash_aset(table, OLD_SYM(remembered_shady_object_limit), NEW_SYM(remembered_wb_unprotected_objects_limit));
7252 rb_hash_aset(table, OLD_SYM(old_object), NEW_SYM(old_objects));
7253 rb_hash_aset(table, OLD_SYM(old_object_limit), NEW_SYM(old_objects_limit));
7254 #endif
7255 rb_hash_aset(table, OLD_SYM(total_allocated_object), NEW_SYM(total_allocated_objects));
7256 rb_hash_aset(table, OLD_SYM(total_freed_object), NEW_SYM(total_freed_objects));
7257 rb_hash_aset(table, OLD_SYM(malloc_increase), NEW_SYM(malloc_increase_bytes));
7258 rb_hash_aset(table, OLD_SYM(malloc_limit), NEW_SYM(malloc_increase_bytes_limit));
7259 #if RGENGC_ESTIMATE_OLDMALLOC
7260 rb_hash_aset(table, OLD_SYM(oldmalloc_increase), NEW_SYM(oldmalloc_increase_bytes));
7261 rb_hash_aset(table, OLD_SYM(oldmalloc_limit), NEW_SYM(oldmalloc_increase_bytes_limit));
7262 #endif
7263 #undef OLD_SYM
7264 #undef NEW_SYM
7265 rb_obj_freeze(table);
7266 }
7267 }
7268 }
7269
7270 static VALUE
compat_key(VALUE key)7271 compat_key(VALUE key)
7272 {
7273 VALUE new_key = rb_hash_lookup(gc_stat_compat_table, key);
7274
7275 if (!NIL_P(new_key)) {
7276 static int warned = 0;
7277 if (warned == 0) {
7278 rb_warn("GC.stat keys were changed from Ruby 2.1. "
7279 "In this case, you refer to obsolete `%"PRIsVALUE"' (new key is `%"PRIsVALUE"'). "
7280 "Please check <https://bugs.ruby-lang.org/issues/9924> for more information.",
7281 key, new_key);
7282 warned = 1;
7283 }
7284 }
7285
7286 return new_key;
7287 }
7288
7289 static VALUE
default_proc_for_compat_func(VALUE hash,VALUE dmy,int argc,VALUE * argv)7290 default_proc_for_compat_func(VALUE hash, VALUE dmy, int argc, VALUE *argv)
7291 {
7292 VALUE key, new_key;
7293
7294 Check_Type(hash, T_HASH);
7295 rb_check_arity(argc, 2, 2);
7296 key = argv[1];
7297
7298 if ((new_key = compat_key(key)) != Qnil) {
7299 return rb_hash_lookup(hash, new_key);
7300 }
7301
7302 return Qnil;
7303 }
7304
7305 static size_t
gc_stat_internal(VALUE hash_or_sym)7306 gc_stat_internal(VALUE hash_or_sym)
7307 {
7308 rb_objspace_t *objspace = &rb_objspace;
7309 VALUE hash = Qnil, key = Qnil;
7310
7311 setup_gc_stat_symbols();
7312
7313 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
7314 hash = hash_or_sym;
7315
7316 if (NIL_P(RHASH_IFNONE(hash))) {
7317 static VALUE default_proc_for_compat = 0;
7318 if (default_proc_for_compat == 0) { /* TODO: it should be */
7319 default_proc_for_compat = rb_proc_new(default_proc_for_compat_func, Qnil);
7320 rb_gc_register_mark_object(default_proc_for_compat);
7321 }
7322 rb_hash_set_default_proc(hash, default_proc_for_compat);
7323 }
7324 }
7325 else if (SYMBOL_P(hash_or_sym)) {
7326 key = hash_or_sym;
7327 }
7328 else {
7329 rb_raise(rb_eTypeError, "non-hash or symbol argument");
7330 }
7331
7332 #define SET(name, attr) \
7333 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
7334 return attr; \
7335 else if (hash != Qnil) \
7336 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
7337
7338 again:
7339 SET(count, objspace->profile.count);
7340
7341 /* implementation dependent counters */
7342 SET(heap_allocated_pages, heap_allocated_pages);
7343 SET(heap_sorted_length, heap_pages_sorted_length);
7344 SET(heap_allocatable_pages, heap_allocatable_pages);
7345 SET(heap_available_slots, objspace_available_slots(objspace));
7346 SET(heap_live_slots, objspace_live_slots(objspace));
7347 SET(heap_free_slots, objspace_free_slots(objspace));
7348 SET(heap_final_slots, heap_pages_final_slots);
7349 SET(heap_marked_slots, objspace->marked_slots);
7350 SET(heap_eden_pages, heap_eden->total_pages);
7351 SET(heap_tomb_pages, heap_tomb->total_pages);
7352 SET(total_allocated_pages, objspace->profile.total_allocated_pages);
7353 SET(total_freed_pages, objspace->profile.total_freed_pages);
7354 SET(total_allocated_objects, objspace->total_allocated_objects);
7355 SET(total_freed_objects, objspace->profile.total_freed_objects);
7356 SET(malloc_increase_bytes, malloc_increase);
7357 SET(malloc_increase_bytes_limit, malloc_limit);
7358 #if USE_RGENGC
7359 SET(minor_gc_count, objspace->profile.minor_gc_count);
7360 SET(major_gc_count, objspace->profile.major_gc_count);
7361 SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
7362 SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
7363 SET(old_objects, objspace->rgengc.old_objects);
7364 SET(old_objects_limit, objspace->rgengc.old_objects_limit);
7365 #if RGENGC_ESTIMATE_OLDMALLOC
7366 SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
7367 SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
7368 #endif
7369
7370 #if RGENGC_PROFILE
7371 SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
7372 SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
7373 SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
7374 SET(total_promoted_count, objspace->profile.total_promoted_count);
7375 SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
7376 SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
7377 #endif /* RGENGC_PROFILE */
7378 #endif /* USE_RGENGC */
7379 #undef SET
7380
7381 if (!NIL_P(key)) { /* matched key should return above */
7382 VALUE new_key;
7383 if ((new_key = compat_key(key)) != Qnil) {
7384 key = new_key;
7385 goto again;
7386 }
7387 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
7388 }
7389
7390 #if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
7391 if (hash != Qnil) {
7392 gc_count_add_each_types(hash, "generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
7393 gc_count_add_each_types(hash, "generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
7394 gc_count_add_each_types(hash, "shade_operation_count_types", objspace->profile.shade_operation_count_types);
7395 gc_count_add_each_types(hash, "promoted_types", objspace->profile.promoted_types);
7396 gc_count_add_each_types(hash, "remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
7397 gc_count_add_each_types(hash, "remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
7398 }
7399 #endif
7400
7401 return 0;
7402 }
7403
7404 /*
7405 * call-seq:
7406 * GC.stat -> Hash
7407 * GC.stat(hash) -> hash
7408 * GC.stat(:key) -> Numeric
7409 *
7410 * Returns a Hash containing information about the GC.
7411 *
7412 * The hash includes information about internal statistics about GC such as:
7413 *
7414 * {
7415 * :count=>0,
7416 * :heap_allocated_pages=>24,
7417 * :heap_sorted_length=>24,
7418 * :heap_allocatable_pages=>0,
7419 * :heap_available_slots=>9783,
7420 * :heap_live_slots=>7713,
7421 * :heap_free_slots=>2070,
7422 * :heap_final_slots=>0,
7423 * :heap_marked_slots=>0,
7424 * :heap_eden_pages=>24,
7425 * :heap_tomb_pages=>0,
7426 * :total_allocated_pages=>24,
7427 * :total_freed_pages=>0,
7428 * :total_allocated_objects=>7796,
7429 * :total_freed_objects=>83,
7430 * :malloc_increase_bytes=>2389312,
7431 * :malloc_increase_bytes_limit=>16777216,
7432 * :minor_gc_count=>0,
7433 * :major_gc_count=>0,
7434 * :remembered_wb_unprotected_objects=>0,
7435 * :remembered_wb_unprotected_objects_limit=>0,
7436 * :old_objects=>0,
7437 * :old_objects_limit=>0,
7438 * :oldmalloc_increase_bytes=>2389760,
7439 * :oldmalloc_increase_bytes_limit=>16777216
7440 * }
7441 *
7442 * The contents of the hash are implementation specific and may be changed in
7443 * the future.
7444 *
7445 * This method is only expected to work on C Ruby.
7446 *
7447 */
7448
7449 static VALUE
gc_stat(int argc,VALUE * argv,VALUE self)7450 gc_stat(int argc, VALUE *argv, VALUE self)
7451 {
7452 VALUE arg = Qnil;
7453
7454 if (rb_check_arity(argc, 0, 1) == 1) {
7455 arg = argv[0];
7456 if (SYMBOL_P(arg)) {
7457 size_t value = gc_stat_internal(arg);
7458 return SIZET2NUM(value);
7459 }
7460 else if (!RB_TYPE_P(arg, T_HASH)) {
7461 rb_raise(rb_eTypeError, "non-hash or symbol given");
7462 }
7463 }
7464 else {
7465 arg = rb_hash_new();
7466 }
7467 gc_stat_internal(arg);
7468 return arg;
7469 }
7470
7471 size_t
rb_gc_stat(VALUE key)7472 rb_gc_stat(VALUE key)
7473 {
7474 if (SYMBOL_P(key)) {
7475 size_t value = gc_stat_internal(key);
7476 return value;
7477 }
7478 else {
7479 gc_stat_internal(key);
7480 return 0;
7481 }
7482 }
7483
7484 /*
7485 * call-seq:
7486 * GC.stress -> integer, true or false
7487 *
7488 * Returns current status of GC stress mode.
7489 */
7490
7491 static VALUE
gc_stress_get(VALUE self)7492 gc_stress_get(VALUE self)
7493 {
7494 rb_objspace_t *objspace = &rb_objspace;
7495 return ruby_gc_stress_mode;
7496 }
7497
7498 static void
gc_stress_set(rb_objspace_t * objspace,VALUE flag)7499 gc_stress_set(rb_objspace_t *objspace, VALUE flag)
7500 {
7501 objspace->flags.gc_stressful = RTEST(flag);
7502 objspace->gc_stress_mode = flag;
7503 }
7504
7505 /*
7506 * call-seq:
7507 * GC.stress = flag -> flag
7508 *
7509 * Updates the GC stress mode.
7510 *
7511 * When stress mode is enabled, the GC is invoked at every GC opportunity:
7512 * all memory and object allocations.
7513 *
7514 * Enabling stress mode will degrade performance, it is only for debugging.
7515 *
7516 * flag can be true, false, or an integer bit-ORed following flags.
7517 * 0x01:: no major GC
7518 * 0x02:: no immediate sweep
7519 * 0x04:: full mark after malloc/calloc/realloc
7520 */
7521
7522 static VALUE
gc_stress_set_m(VALUE self,VALUE flag)7523 gc_stress_set_m(VALUE self, VALUE flag)
7524 {
7525 rb_objspace_t *objspace = &rb_objspace;
7526 gc_stress_set(objspace, flag);
7527 return flag;
7528 }
7529
7530 /*
7531 * call-seq:
7532 * GC.enable -> true or false
7533 *
7534 * Enables garbage collection, returning +true+ if garbage
7535 * collection was previously disabled.
7536 *
7537 * GC.disable #=> false
7538 * GC.enable #=> true
7539 * GC.enable #=> false
7540 *
7541 */
7542
7543 VALUE
rb_gc_enable(void)7544 rb_gc_enable(void)
7545 {
7546 rb_objspace_t *objspace = &rb_objspace;
7547 int old = dont_gc;
7548
7549 dont_gc = FALSE;
7550 return old ? Qtrue : Qfalse;
7551 }
7552
7553 /*
7554 * call-seq:
7555 * GC.disable -> true or false
7556 *
7557 * Disables garbage collection, returning +true+ if garbage
7558 * collection was already disabled.
7559 *
7560 * GC.disable #=> false
7561 * GC.disable #=> true
7562 *
7563 */
7564
7565 VALUE
rb_gc_disable(void)7566 rb_gc_disable(void)
7567 {
7568 rb_objspace_t *objspace = &rb_objspace;
7569 int old = dont_gc;
7570
7571 gc_rest(objspace);
7572
7573 dont_gc = TRUE;
7574 return old ? Qtrue : Qfalse;
7575 }
7576
7577 static int
get_envparam_size(const char * name,size_t * default_value,size_t lower_bound)7578 get_envparam_size(const char *name, size_t *default_value, size_t lower_bound)
7579 {
7580 char *ptr = getenv(name);
7581 ssize_t val;
7582
7583 if (ptr != NULL && *ptr) {
7584 size_t unit = 0;
7585 char *end;
7586 #if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
7587 val = strtoll(ptr, &end, 0);
7588 #else
7589 val = strtol(ptr, &end, 0);
7590 #endif
7591 switch (*end) {
7592 case 'k': case 'K':
7593 unit = 1024;
7594 ++end;
7595 break;
7596 case 'm': case 'M':
7597 unit = 1024*1024;
7598 ++end;
7599 break;
7600 case 'g': case 'G':
7601 unit = 1024*1024*1024;
7602 ++end;
7603 break;
7604 }
7605 while (*end && isspace((unsigned char)*end)) end++;
7606 if (*end) {
7607 if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
7608 return 0;
7609 }
7610 if (unit > 0) {
7611 if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
7612 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%s is ignored because it overflows\n", name, ptr);
7613 return 0;
7614 }
7615 val *= unit;
7616 }
7617 if (val > 0 && (size_t)val > lower_bound) {
7618 if (RTEST(ruby_verbose)) {
7619 fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE")\n", name, val, *default_value);
7620 }
7621 *default_value = (size_t)val;
7622 return 1;
7623 }
7624 else {
7625 if (RTEST(ruby_verbose)) {
7626 fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE") is ignored because it must be greater than %"PRIuSIZE".\n",
7627 name, val, *default_value, lower_bound);
7628 }
7629 return 0;
7630 }
7631 }
7632 return 0;
7633 }
7634
7635 static int
get_envparam_double(const char * name,double * default_value,double lower_bound,double upper_bound,int accept_zero)7636 get_envparam_double(const char *name, double *default_value, double lower_bound, double upper_bound, int accept_zero)
7637 {
7638 char *ptr = getenv(name);
7639 double val;
7640
7641 if (ptr != NULL && *ptr) {
7642 char *end;
7643 val = strtod(ptr, &end);
7644 if (!*ptr || *end) {
7645 if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
7646 return 0;
7647 }
7648
7649 if (accept_zero && val == 0.0) {
7650 goto accept;
7651 }
7652 else if (val <= lower_bound) {
7653 if (RTEST(ruby_verbose)) {
7654 fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
7655 name, val, *default_value, lower_bound);
7656 }
7657 }
7658 else if (upper_bound != 0.0 && /* ignore upper_bound if it is 0.0 */
7659 val > upper_bound) {
7660 if (RTEST(ruby_verbose)) {
7661 fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
7662 name, val, *default_value, upper_bound);
7663 }
7664 }
7665 else {
7666 accept:
7667 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%f (default value: %f)\n", name, val, *default_value);
7668 *default_value = val;
7669 return 1;
7670 }
7671 }
7672 return 0;
7673 }
7674
7675 static void
gc_set_initial_pages(void)7676 gc_set_initial_pages(void)
7677 {
7678 size_t min_pages;
7679 rb_objspace_t *objspace = &rb_objspace;
7680
7681 min_pages = gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT;
7682 if (min_pages > heap_eden->total_pages) {
7683 heap_add_pages(objspace, heap_eden, min_pages - heap_eden->total_pages);
7684 }
7685 }
7686
7687 /*
7688 * GC tuning environment variables
7689 *
7690 * * RUBY_GC_HEAP_INIT_SLOTS
7691 * - Initial allocation slots.
7692 * * RUBY_GC_HEAP_FREE_SLOTS
7693 * - Prepare at least this amount of slots after GC.
7694 * - Allocate slots if there are not enough slots.
7695 * * RUBY_GC_HEAP_GROWTH_FACTOR (new from 2.1)
7696 * - Allocate slots by this factor.
7697 * - (next slots number) = (current slots number) * (this factor)
7698 * * RUBY_GC_HEAP_GROWTH_MAX_SLOTS (new from 2.1)
7699 * - Allocation rate is limited to this number of slots.
7700 * * RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO (new from 2.4)
7701 * - Allocate additional pages when the number of free slots is
7702 * lower than the value (total_slots * (this ratio)).
7703 * * RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO (new from 2.4)
7704 * - Allocate slots to satisfy this formula:
7705 * free_slots = total_slots * goal_ratio
7706 * - In other words, prepare (total_slots * goal_ratio) free slots.
7707 * - if this value is 0.0, then use RUBY_GC_HEAP_GROWTH_FACTOR directly.
7708 * * RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO (new from 2.4)
7709 * - Allow to free pages when the number of free slots is
7710 * greater than the value (total_slots * (this ratio)).
7711 * * RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR (new from 2.1.1)
7712 * - Do full GC when the number of old objects is more than R * N
7713 * where R is this factor and
7714 * N is the number of old objects just after last full GC.
7715 *
7716 * * obsolete
7717 * * RUBY_FREE_MIN -> RUBY_GC_HEAP_FREE_SLOTS (from 2.1)
7718 * * RUBY_HEAP_MIN_SLOTS -> RUBY_GC_HEAP_INIT_SLOTS (from 2.1)
7719 *
7720 * * RUBY_GC_MALLOC_LIMIT
7721 * * RUBY_GC_MALLOC_LIMIT_MAX (new from 2.1)
7722 * * RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
7723 *
7724 * * RUBY_GC_OLDMALLOC_LIMIT (new from 2.1)
7725 * * RUBY_GC_OLDMALLOC_LIMIT_MAX (new from 2.1)
7726 * * RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
7727 */
7728
7729 void
ruby_gc_set_params(int safe_level)7730 ruby_gc_set_params(int safe_level)
7731 {
7732 if (safe_level > 0) return;
7733
7734 /* RUBY_GC_HEAP_FREE_SLOTS */
7735 if (get_envparam_size("RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
7736 /* ok */
7737 }
7738 else if (get_envparam_size("RUBY_FREE_MIN", &gc_params.heap_free_slots, 0)) {
7739 rb_warn("RUBY_FREE_MIN is obsolete. Use RUBY_GC_HEAP_FREE_SLOTS instead.");
7740 }
7741
7742 /* RUBY_GC_HEAP_INIT_SLOTS */
7743 if (get_envparam_size("RUBY_GC_HEAP_INIT_SLOTS", &gc_params.heap_init_slots, 0)) {
7744 gc_set_initial_pages();
7745 }
7746 else if (get_envparam_size("RUBY_HEAP_MIN_SLOTS", &gc_params.heap_init_slots, 0)) {
7747 rb_warn("RUBY_HEAP_MIN_SLOTS is obsolete. Use RUBY_GC_HEAP_INIT_SLOTS instead.");
7748 gc_set_initial_pages();
7749 }
7750
7751 get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
7752 get_envparam_size ("RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
7753 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
7754 0.0, 1.0, FALSE);
7755 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
7756 gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
7757 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
7758 gc_params.heap_free_slots_min_ratio, gc_params.heap_free_slots_max_ratio, TRUE);
7759 get_envparam_double("RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
7760
7761 get_envparam_size ("RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0);
7762 get_envparam_size ("RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
7763 if (!gc_params.malloc_limit_max) { /* ignore max-check if 0 */
7764 gc_params.malloc_limit_max = SIZE_MAX;
7765 }
7766 get_envparam_double("RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
7767
7768 #if RGENGC_ESTIMATE_OLDMALLOC
7769 if (get_envparam_size("RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
7770 rb_objspace_t *objspace = &rb_objspace;
7771 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
7772 }
7773 get_envparam_size ("RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
7774 get_envparam_double("RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
7775 #endif
7776 }
7777
7778 void
rb_objspace_reachable_objects_from(VALUE obj,void (func)(VALUE,void *),void * data)7779 rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
7780 {
7781 rb_objspace_t *objspace = &rb_objspace;
7782
7783 if (is_markable_object(objspace, obj)) {
7784 struct mark_func_data_struct mfd;
7785 mfd.mark_func = func;
7786 mfd.data = data;
7787 PUSH_MARK_FUNC_DATA(&mfd);
7788 gc_mark_children(objspace, obj);
7789 POP_MARK_FUNC_DATA();
7790 }
7791 }
7792
7793 struct root_objects_data {
7794 const char *category;
7795 void (*func)(const char *category, VALUE, void *);
7796 void *data;
7797 };
7798
7799 static void
root_objects_from(VALUE obj,void * ptr)7800 root_objects_from(VALUE obj, void *ptr)
7801 {
7802 const struct root_objects_data *data = (struct root_objects_data *)ptr;
7803 (*data->func)(data->category, obj, data->data);
7804 }
7805
7806 void
rb_objspace_reachable_objects_from_root(void (func)(const char * category,VALUE,void *),void * passing_data)7807 rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
7808 {
7809 rb_objspace_t *objspace = &rb_objspace;
7810 struct root_objects_data data;
7811 struct mark_func_data_struct mfd;
7812
7813 data.func = func;
7814 data.data = passing_data;
7815
7816 mfd.mark_func = root_objects_from;
7817 mfd.data = &data;
7818
7819 PUSH_MARK_FUNC_DATA(&mfd);
7820 gc_mark_roots(objspace, &data.category);
7821 POP_MARK_FUNC_DATA();
7822 }
7823
7824 /*
7825 ------------------------ Extended allocator ------------------------
7826 */
7827
7828 static void objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t size);
7829
7830 static void *
negative_size_allocation_error_with_gvl(void * ptr)7831 negative_size_allocation_error_with_gvl(void *ptr)
7832 {
7833 rb_raise(rb_eNoMemError, "%s", (const char *)ptr);
7834 return 0; /* should not be reached */
7835 }
7836
7837 static void
negative_size_allocation_error(const char * msg)7838 negative_size_allocation_error(const char *msg)
7839 {
7840 if (ruby_thread_has_gvl_p()) {
7841 rb_raise(rb_eNoMemError, "%s", msg);
7842 }
7843 else {
7844 if (ruby_native_thread_p()) {
7845 rb_thread_call_with_gvl(negative_size_allocation_error_with_gvl, (void *)msg);
7846 }
7847 else {
7848 fprintf(stderr, "[FATAL] %s\n", msg);
7849 exit(EXIT_FAILURE);
7850 }
7851 }
7852 }
7853
7854 static void *
ruby_memerror_body(void * dummy)7855 ruby_memerror_body(void *dummy)
7856 {
7857 rb_memerror();
7858 return 0;
7859 }
7860
7861 static void
ruby_memerror(void)7862 ruby_memerror(void)
7863 {
7864 if (ruby_thread_has_gvl_p()) {
7865 rb_memerror();
7866 }
7867 else {
7868 if (ruby_native_thread_p()) {
7869 rb_thread_call_with_gvl(ruby_memerror_body, 0);
7870 }
7871 else {
7872 /* no ruby thread */
7873 fprintf(stderr, "[FATAL] failed to allocate memory\n");
7874 exit(EXIT_FAILURE);
7875 }
7876 }
7877 }
7878
7879 void
rb_memerror(void)7880 rb_memerror(void)
7881 {
7882 rb_execution_context_t *ec = GET_EC();
7883 rb_objspace_t *objspace = rb_objspace_of(rb_ec_vm_ptr(ec));
7884 VALUE exc;
7885
7886 if (during_gc) gc_exit(objspace, "rb_memerror");
7887
7888 exc = nomem_error;
7889 if (!exc ||
7890 rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
7891 fprintf(stderr, "[FATAL] failed to allocate memory\n");
7892 exit(EXIT_FAILURE);
7893 }
7894 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
7895 rb_ec_raised_clear(ec);
7896 }
7897 else {
7898 rb_ec_raised_set(ec, RAISED_NOMEMORY);
7899 exc = ruby_vm_special_exception_copy(exc);
7900 }
7901 ec->errinfo = exc;
7902 EC_JUMP_TAG(ec, TAG_RAISE);
7903 }
7904
7905 void *
rb_aligned_malloc(size_t alignment,size_t size)7906 rb_aligned_malloc(size_t alignment, size_t size)
7907 {
7908 void *res;
7909
7910 #if defined __MINGW32__
7911 res = __mingw_aligned_malloc(size, alignment);
7912 #elif defined _WIN32
7913 void *_aligned_malloc(size_t, size_t);
7914 res = _aligned_malloc(size, alignment);
7915 #elif defined(HAVE_POSIX_MEMALIGN)
7916 if (posix_memalign(&res, alignment, size) == 0) {
7917 return res;
7918 }
7919 else {
7920 return NULL;
7921 }
7922 #elif defined(HAVE_MEMALIGN)
7923 res = memalign(alignment, size);
7924 #else
7925 char* aligned;
7926 res = malloc(alignment + size + sizeof(void*));
7927 aligned = (char*)res + alignment + sizeof(void*);
7928 aligned -= ((VALUE)aligned & (alignment - 1));
7929 ((void**)aligned)[-1] = res;
7930 res = (void*)aligned;
7931 #endif
7932
7933 /* alignment must be a power of 2 */
7934 GC_ASSERT(((alignment - 1) & alignment) == 0);
7935 GC_ASSERT(alignment % sizeof(void*) == 0);
7936 return res;
7937 }
7938
7939 void
rb_aligned_free(void * ptr)7940 rb_aligned_free(void *ptr)
7941 {
7942 #if defined __MINGW32__
7943 __mingw_aligned_free(ptr);
7944 #elif defined _WIN32
7945 _aligned_free(ptr);
7946 #elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
7947 free(ptr);
7948 #else
7949 free(((void**)ptr)[-1]);
7950 #endif
7951 }
7952
7953 static inline size_t
objspace_malloc_size(rb_objspace_t * objspace,void * ptr,size_t hint)7954 objspace_malloc_size(rb_objspace_t *objspace, void *ptr, size_t hint)
7955 {
7956 #ifdef HAVE_MALLOC_USABLE_SIZE
7957 return malloc_usable_size(ptr);
7958 #else
7959 return hint;
7960 #endif
7961 }
7962
7963 enum memop_type {
7964 MEMOP_TYPE_MALLOC = 0,
7965 MEMOP_TYPE_FREE,
7966 MEMOP_TYPE_REALLOC
7967 };
7968
7969 static inline void
atomic_sub_nounderflow(size_t * var,size_t sub)7970 atomic_sub_nounderflow(size_t *var, size_t sub)
7971 {
7972 if (sub == 0) return;
7973
7974 while (1) {
7975 size_t val = *var;
7976 if (val < sub) sub = val;
7977 if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val) break;
7978 }
7979 }
7980
7981 static void
objspace_malloc_gc_stress(rb_objspace_t * objspace)7982 objspace_malloc_gc_stress(rb_objspace_t *objspace)
7983 {
7984 if (ruby_gc_stressful && ruby_native_thread_p()) {
7985 int reason = GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
7986 GPR_FLAG_STRESS | GPR_FLAG_MALLOC;
7987
7988 if (gc_stress_full_mark_after_malloc_p()) {
7989 reason |= GPR_FLAG_FULL_MARK;
7990 }
7991 garbage_collect_with_gvl(objspace, reason);
7992 }
7993 }
7994
7995 static void
objspace_malloc_increase(rb_objspace_t * objspace,void * mem,size_t new_size,size_t old_size,enum memop_type type)7996 objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
7997 {
7998 if (new_size > old_size) {
7999 ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
8000 #if RGENGC_ESTIMATE_OLDMALLOC
8001 ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
8002 #endif
8003 }
8004 else {
8005 atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
8006 #if RGENGC_ESTIMATE_OLDMALLOC
8007 atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
8008 #endif
8009 }
8010
8011 if (type == MEMOP_TYPE_MALLOC) {
8012 retry:
8013 if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc) {
8014 if (ruby_thread_has_gvl_p() && is_lazy_sweeping(heap_eden)) {
8015 gc_rest(objspace); /* gc_rest can reduce malloc_increase */
8016 goto retry;
8017 }
8018 garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
8019 }
8020 }
8021
8022 #if MALLOC_ALLOCATED_SIZE
8023 if (new_size >= old_size) {
8024 ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
8025 }
8026 else {
8027 size_t dec_size = old_size - new_size;
8028 size_t allocated_size = objspace->malloc_params.allocated_size;
8029
8030 #if MALLOC_ALLOCATED_SIZE_CHECK
8031 if (allocated_size < dec_size) {
8032 rb_bug("objspace_malloc_increase: underflow malloc_params.allocated_size.");
8033 }
8034 #endif
8035 atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
8036 }
8037
8038 if (0) fprintf(stderr, "increase - ptr: %p, type: %s, new_size: %d, old_size: %d\n",
8039 mem,
8040 type == MEMOP_TYPE_MALLOC ? "malloc" :
8041 type == MEMOP_TYPE_FREE ? "free " :
8042 type == MEMOP_TYPE_REALLOC ? "realloc": "error",
8043 (int)new_size, (int)old_size);
8044
8045 switch (type) {
8046 case MEMOP_TYPE_MALLOC:
8047 ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
8048 break;
8049 case MEMOP_TYPE_FREE:
8050 {
8051 size_t allocations = objspace->malloc_params.allocations;
8052 if (allocations > 0) {
8053 atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
8054 }
8055 #if MALLOC_ALLOCATED_SIZE_CHECK
8056 else {
8057 GC_ASSERT(objspace->malloc_params.allocations > 0);
8058 }
8059 #endif
8060 }
8061 break;
8062 case MEMOP_TYPE_REALLOC: /* ignore */ break;
8063 }
8064 #endif
8065 }
8066
8067 struct malloc_obj_info { /* 4 words */
8068 size_t size;
8069 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
8070 size_t gen;
8071 const char *file;
8072 size_t line;
8073 #endif
8074 };
8075
8076 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
8077 const char *ruby_malloc_info_file;
8078 int ruby_malloc_info_line;
8079 #endif
8080
8081 static inline size_t
objspace_malloc_prepare(rb_objspace_t * objspace,size_t size)8082 objspace_malloc_prepare(rb_objspace_t *objspace, size_t size)
8083 {
8084 if (size == 0) size = 1;
8085
8086 #if CALC_EXACT_MALLOC_SIZE
8087 size += sizeof(struct malloc_obj_info);
8088 #endif
8089
8090 return size;
8091 }
8092
8093 static inline void *
objspace_malloc_fixup(rb_objspace_t * objspace,void * mem,size_t size)8094 objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
8095 {
8096 size = objspace_malloc_size(objspace, mem, size);
8097 objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
8098
8099 #if CALC_EXACT_MALLOC_SIZE
8100 {
8101 struct malloc_obj_info *info = mem;
8102 info->size = size;
8103 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
8104 info->gen = objspace->profile.count;
8105 info->file = ruby_malloc_info_file;
8106 info->line = info->file ? ruby_malloc_info_line : 0;
8107 #else
8108 info->file = NULL;
8109 #endif
8110 mem = info + 1;
8111 }
8112 #endif
8113
8114 return mem;
8115 }
8116
8117 #define TRY_WITH_GC(alloc) do { \
8118 objspace_malloc_gc_stress(objspace); \
8119 if (!(alloc) && \
8120 (!garbage_collect_with_gvl(objspace, GPR_FLAG_FULL_MARK | \
8121 GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \
8122 GPR_FLAG_MALLOC) || \
8123 !(alloc))) { \
8124 ruby_memerror(); \
8125 } \
8126 } while (0)
8127
8128 /* these shouldn't be called directly.
8129 * objspace_* functinos do not check allocation size.
8130 */
8131 static void *
objspace_xmalloc0(rb_objspace_t * objspace,size_t size)8132 objspace_xmalloc0(rb_objspace_t *objspace, size_t size)
8133 {
8134 void *mem;
8135
8136 size = objspace_malloc_prepare(objspace, size);
8137 TRY_WITH_GC(mem = malloc(size));
8138 RB_DEBUG_COUNTER_INC(heap_xmalloc);
8139 return objspace_malloc_fixup(objspace, mem, size);
8140 }
8141
8142 static inline size_t
xmalloc2_size(const size_t count,const size_t elsize)8143 xmalloc2_size(const size_t count, const size_t elsize)
8144 {
8145 size_t ret;
8146 if (rb_mul_size_overflow(count, elsize, SSIZE_MAX, &ret)) {
8147 ruby_malloc_size_overflow(count, elsize);
8148 }
8149 return ret;
8150 }
8151
8152 static void *
objspace_xrealloc(rb_objspace_t * objspace,void * ptr,size_t new_size,size_t old_size)8153 objspace_xrealloc(rb_objspace_t *objspace, void *ptr, size_t new_size, size_t old_size)
8154 {
8155 void *mem;
8156
8157 if (!ptr) return objspace_xmalloc0(objspace, new_size);
8158
8159 /*
8160 * The behavior of realloc(ptr, 0) is implementation defined.
8161 * Therefore we don't use realloc(ptr, 0) for portability reason.
8162 * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
8163 */
8164 if (new_size == 0) {
8165 objspace_xfree(objspace, ptr, old_size);
8166 return 0;
8167 }
8168
8169 #if CALC_EXACT_MALLOC_SIZE
8170 {
8171 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
8172 new_size += sizeof(struct malloc_obj_info);
8173 ptr = info;
8174 old_size = info->size;
8175 }
8176 #endif
8177
8178 old_size = objspace_malloc_size(objspace, ptr, old_size);
8179 TRY_WITH_GC(mem = realloc(ptr, new_size));
8180 new_size = objspace_malloc_size(objspace, mem, new_size);
8181
8182 #if CALC_EXACT_MALLOC_SIZE
8183 {
8184 struct malloc_obj_info *info = mem;
8185 info->size = new_size;
8186 mem = info + 1;
8187 }
8188 #endif
8189
8190 objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
8191
8192 RB_DEBUG_COUNTER_INC(heap_xrealloc);
8193 return mem;
8194 }
8195
8196 #if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
8197
8198 #define MALLOC_INFO_GEN_SIZE 100
8199 #define MALLOC_INFO_SIZE_SIZE 10
8200 static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
8201 static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
8202 static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
8203 static st_table *malloc_info_file_table;
8204
8205 static int
mmalloc_info_file_i(st_data_t key,st_data_t val,st_data_t dmy)8206 mmalloc_info_file_i(st_data_t key, st_data_t val, st_data_t dmy)
8207 {
8208 const char *file = (void *)key;
8209 const size_t *data = (void *)val;
8210
8211 fprintf(stderr, "%s\t%d\t%d\n", file, (int)data[0], (int)data[1]);
8212
8213 return ST_CONTINUE;
8214 }
8215
8216 __attribute__((destructor))
8217 void
rb_malloc_info_show_results(void)8218 rb_malloc_info_show_results(void)
8219 {
8220 int i;
8221
8222 fprintf(stderr, "* malloc_info gen statistics\n");
8223 for (i=0; i<MALLOC_INFO_GEN_SIZE; i++) {
8224 if (i == MALLOC_INFO_GEN_SIZE-1) {
8225 fprintf(stderr, "more\t%d\t%d\n", (int)malloc_info_gen_cnt[i], (int)malloc_info_gen_size[i]);
8226 }
8227 else {
8228 fprintf(stderr, "%d\t%d\t%d\n", i, (int)malloc_info_gen_cnt[i], (int)malloc_info_gen_size[i]);
8229 }
8230 }
8231
8232 fprintf(stderr, "* malloc_info size statistics\n");
8233 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
8234 int s = 16 << i;
8235 fprintf(stderr, "%d\t%d\n", (int)s, (int)malloc_info_size[i]);
8236 }
8237 fprintf(stderr, "more\t%d\n", (int)malloc_info_size[i]);
8238
8239 if (malloc_info_file_table) {
8240 fprintf(stderr, "* malloc_info file statistics\n");
8241 st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
8242 }
8243 }
8244 #else
8245 void
rb_malloc_info_show_results(void)8246 rb_malloc_info_show_results(void)
8247 {
8248 }
8249 #endif
8250
8251 static void
objspace_xfree(rb_objspace_t * objspace,void * ptr,size_t old_size)8252 objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t old_size)
8253 {
8254 #if CALC_EXACT_MALLOC_SIZE
8255 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
8256 ptr = info;
8257 old_size = info->size;
8258
8259 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
8260 {
8261 int gen = (int)(objspace->profile.count - info->gen);
8262 int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
8263 int i;
8264
8265 malloc_info_gen_cnt[gen_index]++;
8266 malloc_info_gen_size[gen_index] += info->size;
8267
8268 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
8269 size_t s = 16 << i;
8270 if (info->size <= s) {
8271 malloc_info_size[i]++;
8272 goto found;
8273 }
8274 }
8275 malloc_info_size[i]++;
8276 found:;
8277
8278 {
8279 st_data_t key = (st_data_t)info->file;
8280 size_t *data;
8281
8282 if (malloc_info_file_table == NULL) {
8283 malloc_info_file_table = st_init_numtable_with_size(1024);
8284 }
8285 if (st_lookup(malloc_info_file_table, key, (st_data_t *)&data)) {
8286 /* hit */
8287 }
8288 else {
8289 data = malloc(sizeof(size_t) * 2);
8290 if (data == NULL) rb_bug("objspace_xfree: can not allocate memory");
8291 data[0] = data[1] = 0;
8292 st_insert(malloc_info_file_table, key, (st_data_t)data);
8293 }
8294 data[0] ++;
8295 data[1] += info->size;
8296 };
8297 #if 0 /* verbose output */
8298 if (gen >= 2) {
8299 if (info->file) {
8300 fprintf(stderr, "free - size:%d, gen:%d, pos: %s:%d\n", (int)info->size, gen, info->file, (int)info->line);
8301 }
8302 else {
8303 fprintf(stderr, "free - size:%d, gen:%d\n", (int)info->size, gen);
8304 }
8305 }
8306 #endif
8307 }
8308 #endif
8309 #endif
8310 old_size = objspace_malloc_size(objspace, ptr, old_size);
8311
8312 free(ptr);
8313 RB_DEBUG_COUNTER_INC(heap_xfree);
8314
8315 objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE);
8316 }
8317
8318 static void *
ruby_xmalloc0(size_t size)8319 ruby_xmalloc0(size_t size)
8320 {
8321 return objspace_xmalloc0(&rb_objspace, size);
8322 }
8323
8324 void *
ruby_xmalloc_body(size_t size)8325 ruby_xmalloc_body(size_t size)
8326 {
8327 if ((ssize_t)size < 0) {
8328 negative_size_allocation_error("too large allocation size");
8329 }
8330 return ruby_xmalloc0(size);
8331 }
8332
8333 void
ruby_malloc_size_overflow(size_t count,size_t elsize)8334 ruby_malloc_size_overflow(size_t count, size_t elsize)
8335 {
8336 rb_raise(rb_eArgError,
8337 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
8338 count, elsize);
8339 }
8340
8341 void *
ruby_xmalloc2_body(size_t n,size_t size)8342 ruby_xmalloc2_body(size_t n, size_t size)
8343 {
8344 return objspace_xmalloc0(&rb_objspace, xmalloc2_size(n, size));
8345 }
8346
8347 static void *
objspace_xcalloc(rb_objspace_t * objspace,size_t size)8348 objspace_xcalloc(rb_objspace_t *objspace, size_t size)
8349 {
8350 void *mem;
8351
8352 size = objspace_malloc_prepare(objspace, size);
8353 TRY_WITH_GC(mem = calloc(1, size));
8354 return objspace_malloc_fixup(objspace, mem, size);
8355 }
8356
8357 void *
ruby_xcalloc_body(size_t n,size_t size)8358 ruby_xcalloc_body(size_t n, size_t size)
8359 {
8360 return objspace_xcalloc(&rb_objspace, xmalloc2_size(n, size));
8361 }
8362
8363 #ifdef ruby_sized_xrealloc
8364 #undef ruby_sized_xrealloc
8365 #endif
8366 void *
ruby_sized_xrealloc(void * ptr,size_t new_size,size_t old_size)8367 ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
8368 {
8369 if ((ssize_t)new_size < 0) {
8370 negative_size_allocation_error("too large allocation size");
8371 }
8372
8373 return objspace_xrealloc(&rb_objspace, ptr, new_size, old_size);
8374 }
8375
8376 void *
ruby_xrealloc_body(void * ptr,size_t new_size)8377 ruby_xrealloc_body(void *ptr, size_t new_size)
8378 {
8379 return ruby_sized_xrealloc(ptr, new_size, 0);
8380 }
8381
8382 #ifdef ruby_sized_xrealloc2
8383 #undef ruby_sized_xrealloc2
8384 #endif
8385 void *
ruby_sized_xrealloc2(void * ptr,size_t n,size_t size,size_t old_n)8386 ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
8387 {
8388 size_t len = size * n;
8389 if (n != 0 && size != len / n) {
8390 rb_raise(rb_eArgError, "realloc: possible integer overflow");
8391 }
8392 return objspace_xrealloc(&rb_objspace, ptr, len, old_n * size);
8393 }
8394
8395 void *
ruby_xrealloc2_body(void * ptr,size_t n,size_t size)8396 ruby_xrealloc2_body(void *ptr, size_t n, size_t size)
8397 {
8398 return ruby_sized_xrealloc2(ptr, n, size, 0);
8399 }
8400
8401 #ifdef ruby_sized_xfree
8402 #undef ruby_sized_xfree
8403 #endif
8404 void
ruby_sized_xfree(void * x,size_t size)8405 ruby_sized_xfree(void *x, size_t size)
8406 {
8407 if (x) {
8408 objspace_xfree(&rb_objspace, x, size);
8409 }
8410 }
8411
8412 void
ruby_xfree(void * x)8413 ruby_xfree(void *x)
8414 {
8415 ruby_sized_xfree(x, 0);
8416 }
8417
8418 /* Mimic ruby_xmalloc, but need not rb_objspace.
8419 * should return pointer suitable for ruby_xfree
8420 */
8421 void *
ruby_mimmalloc(size_t size)8422 ruby_mimmalloc(size_t size)
8423 {
8424 void *mem;
8425 #if CALC_EXACT_MALLOC_SIZE
8426 size += sizeof(struct malloc_obj_info);
8427 #endif
8428 mem = malloc(size);
8429 #if CALC_EXACT_MALLOC_SIZE
8430 /* set 0 for consistency of allocated_size/allocations */
8431 {
8432 struct malloc_obj_info *info = mem;
8433 info->size = 0;
8434 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
8435 info->gen = 0;
8436 info->file = NULL;
8437 info->line = 0;
8438 #else
8439 info->file = NULL;
8440 #endif
8441 mem = info + 1;
8442 }
8443 #endif
8444 return mem;
8445 }
8446
8447 void
ruby_mimfree(void * ptr)8448 ruby_mimfree(void *ptr)
8449 {
8450 #if CALC_EXACT_MALLOC_SIZE
8451 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
8452 ptr = info;
8453 #endif
8454 free(ptr);
8455 }
8456
8457 void *
rb_alloc_tmp_buffer_with_count(volatile VALUE * store,size_t size,size_t cnt)8458 rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
8459 {
8460 void *ptr;
8461 VALUE imemo;
8462 rb_imemo_tmpbuf_t *tmpbuf;
8463
8464 /* Keep the order; allocate an empty imemo first then xmalloc, to
8465 * get rid of potential memory leak */
8466 imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(NULL, 0);
8467 *store = imemo;
8468 ptr = ruby_xmalloc0(size);
8469 tmpbuf = (rb_imemo_tmpbuf_t *)imemo;
8470 tmpbuf->ptr = ptr;
8471 tmpbuf->cnt = cnt;
8472 return ptr;
8473 }
8474
8475 void *
rb_alloc_tmp_buffer(volatile VALUE * store,long len)8476 rb_alloc_tmp_buffer(volatile VALUE *store, long len)
8477 {
8478 long cnt;
8479
8480 if (len < 0 || (cnt = (long)roomof(len, sizeof(VALUE))) < 0) {
8481 rb_raise(rb_eArgError, "negative buffer size (or size too big)");
8482 }
8483
8484 return rb_alloc_tmp_buffer_with_count(store, len, cnt);
8485 }
8486
8487 void
rb_free_tmp_buffer(volatile VALUE * store)8488 rb_free_tmp_buffer(volatile VALUE *store)
8489 {
8490 rb_imemo_tmpbuf_t *s = (rb_imemo_tmpbuf_t*)ATOMIC_VALUE_EXCHANGE(*store, 0);
8491 if (s) {
8492 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
8493 s->cnt = 0;
8494 ruby_xfree(ptr);
8495 }
8496 }
8497
8498 #if MALLOC_ALLOCATED_SIZE
8499 /*
8500 * call-seq:
8501 * GC.malloc_allocated_size -> Integer
8502 *
8503 * Returns the size of memory allocated by malloc().
8504 *
8505 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
8506 */
8507
8508 static VALUE
gc_malloc_allocated_size(VALUE self)8509 gc_malloc_allocated_size(VALUE self)
8510 {
8511 return UINT2NUM(rb_objspace.malloc_params.allocated_size);
8512 }
8513
8514 /*
8515 * call-seq:
8516 * GC.malloc_allocations -> Integer
8517 *
8518 * Returns the number of malloc() allocations.
8519 *
8520 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
8521 */
8522
8523 static VALUE
gc_malloc_allocations(VALUE self)8524 gc_malloc_allocations(VALUE self)
8525 {
8526 return UINT2NUM(rb_objspace.malloc_params.allocations);
8527 }
8528 #endif
8529
8530 void
rb_gc_adjust_memory_usage(ssize_t diff)8531 rb_gc_adjust_memory_usage(ssize_t diff)
8532 {
8533 rb_objspace_t *objspace = &rb_objspace;
8534 if (diff > 0) {
8535 objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
8536 }
8537 else if (diff < 0) {
8538 objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
8539 }
8540 }
8541
8542 /*
8543 ------------------------------ WeakMap ------------------------------
8544 */
8545
8546 struct weakmap {
8547 st_table *obj2wmap; /* obj -> [ref,...] */
8548 st_table *wmap2obj; /* ref -> obj */
8549 VALUE final;
8550 };
8551
8552 #define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
8553
8554 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
8555 static int
wmap_mark_map(st_data_t key,st_data_t val,st_data_t arg)8556 wmap_mark_map(st_data_t key, st_data_t val, st_data_t arg)
8557 {
8558 rb_objspace_t *objspace = (rb_objspace_t *)arg;
8559 VALUE obj = (VALUE)val;
8560 if (!is_live_object(objspace, obj)) return ST_DELETE;
8561 return ST_CONTINUE;
8562 }
8563 #endif
8564
8565 static void
wmap_mark(void * ptr)8566 wmap_mark(void *ptr)
8567 {
8568 struct weakmap *w = ptr;
8569 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
8570 if (w->obj2wmap) st_foreach(w->obj2wmap, wmap_mark_map, (st_data_t)&rb_objspace);
8571 #endif
8572 rb_gc_mark(w->final);
8573 }
8574
8575 static int
wmap_free_map(st_data_t key,st_data_t val,st_data_t arg)8576 wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
8577 {
8578 VALUE *ptr = (VALUE *)val;
8579 ruby_sized_xfree(ptr, (ptr[0] + 1) * sizeof(VALUE));
8580 return ST_CONTINUE;
8581 }
8582
8583 static void
wmap_free(void * ptr)8584 wmap_free(void *ptr)
8585 {
8586 struct weakmap *w = ptr;
8587 st_foreach(w->obj2wmap, wmap_free_map, 0);
8588 st_free_table(w->obj2wmap);
8589 st_free_table(w->wmap2obj);
8590 }
8591
8592 static int
wmap_memsize_map(st_data_t key,st_data_t val,st_data_t arg)8593 wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
8594 {
8595 VALUE *ptr = (VALUE *)val;
8596 *(size_t *)arg += (ptr[0] + 1) * sizeof(VALUE);
8597 return ST_CONTINUE;
8598 }
8599
8600 static size_t
wmap_memsize(const void * ptr)8601 wmap_memsize(const void *ptr)
8602 {
8603 size_t size;
8604 const struct weakmap *w = ptr;
8605 size = sizeof(*w);
8606 size += st_memsize(w->obj2wmap);
8607 size += st_memsize(w->wmap2obj);
8608 st_foreach(w->obj2wmap, wmap_memsize_map, (st_data_t)&size);
8609 return size;
8610 }
8611
8612 static const rb_data_type_t weakmap_type = {
8613 "weakmap",
8614 {
8615 wmap_mark,
8616 wmap_free,
8617 wmap_memsize,
8618 },
8619 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
8620 };
8621
8622 static VALUE
wmap_allocate(VALUE klass)8623 wmap_allocate(VALUE klass)
8624 {
8625 struct weakmap *w;
8626 VALUE obj = TypedData_Make_Struct(klass, struct weakmap, &weakmap_type, w);
8627 w->obj2wmap = st_init_numtable();
8628 w->wmap2obj = st_init_numtable();
8629 w->final = rb_obj_method(obj, ID2SYM(rb_intern("finalize")));
8630 return obj;
8631 }
8632
8633 static int
wmap_final_func(st_data_t * key,st_data_t * value,st_data_t arg,int existing)8634 wmap_final_func(st_data_t *key, st_data_t *value, st_data_t arg, int existing)
8635 {
8636 VALUE wmap, *ptr, size, i, j;
8637 if (!existing) return ST_STOP;
8638 wmap = (VALUE)arg, ptr = (VALUE *)*value;
8639 for (i = j = 1, size = ptr[0]; i <= size; ++i) {
8640 if (ptr[i] != wmap) {
8641 ptr[j++] = ptr[i];
8642 }
8643 }
8644 if (j == 1) {
8645 ruby_sized_xfree(ptr, i * sizeof(VALUE));
8646 return ST_DELETE;
8647 }
8648 if (j < i) {
8649 ptr = ruby_sized_xrealloc2(ptr, j + 1, sizeof(VALUE), i);
8650 ptr[0] = j;
8651 *value = (st_data_t)ptr;
8652 }
8653 return ST_CONTINUE;
8654 }
8655
8656 /* :nodoc: */
8657 static VALUE
wmap_finalize(VALUE self,VALUE objid)8658 wmap_finalize(VALUE self, VALUE objid)
8659 {
8660 st_data_t orig, wmap, data;
8661 VALUE obj, *rids, i, size;
8662 struct weakmap *w;
8663
8664 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
8665 /* Get reference from object id. */
8666 obj = obj_id_to_ref(objid);
8667
8668 /* obj is original referenced object and/or weak reference. */
8669 orig = (st_data_t)obj;
8670 if (st_delete(w->obj2wmap, &orig, &data)) {
8671 rids = (VALUE *)data;
8672 size = *rids++;
8673 for (i = 0; i < size; ++i) {
8674 wmap = (st_data_t)rids[i];
8675 st_delete(w->wmap2obj, &wmap, NULL);
8676 }
8677 ruby_sized_xfree((VALUE *)data, (size + 1) * sizeof(VALUE));
8678 }
8679
8680 wmap = (st_data_t)obj;
8681 if (st_delete(w->wmap2obj, &wmap, &orig)) {
8682 wmap = (st_data_t)obj;
8683 st_update(w->obj2wmap, orig, wmap_final_func, wmap);
8684 }
8685 return self;
8686 }
8687
8688 struct wmap_iter_arg {
8689 rb_objspace_t *objspace;
8690 VALUE value;
8691 };
8692
8693 static int
wmap_inspect_i(st_data_t key,st_data_t val,st_data_t arg)8694 wmap_inspect_i(st_data_t key, st_data_t val, st_data_t arg)
8695 {
8696 VALUE str = (VALUE)arg;
8697 VALUE k = (VALUE)key, v = (VALUE)val;
8698
8699 if (RSTRING_PTR(str)[0] == '#') {
8700 rb_str_cat2(str, ", ");
8701 }
8702 else {
8703 rb_str_cat2(str, ": ");
8704 RSTRING_PTR(str)[0] = '#';
8705 }
8706 k = SPECIAL_CONST_P(k) ? rb_inspect(k) : rb_any_to_s(k);
8707 rb_str_append(str, k);
8708 rb_str_cat2(str, " => ");
8709 v = SPECIAL_CONST_P(v) ? rb_inspect(v) : rb_any_to_s(v);
8710 rb_str_append(str, v);
8711 OBJ_INFECT(str, k);
8712 OBJ_INFECT(str, v);
8713
8714 return ST_CONTINUE;
8715 }
8716
8717 static VALUE
wmap_inspect(VALUE self)8718 wmap_inspect(VALUE self)
8719 {
8720 VALUE str;
8721 VALUE c = rb_class_name(CLASS_OF(self));
8722 struct weakmap *w;
8723
8724 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
8725 str = rb_sprintf("-<%"PRIsVALUE":%p", c, (void *)self);
8726 if (w->wmap2obj) {
8727 st_foreach(w->wmap2obj, wmap_inspect_i, str);
8728 }
8729 RSTRING_PTR(str)[0] = '#';
8730 rb_str_cat2(str, ">");
8731 return str;
8732 }
8733
8734 static int
wmap_each_i(st_data_t key,st_data_t val,st_data_t arg)8735 wmap_each_i(st_data_t key, st_data_t val, st_data_t arg)
8736 {
8737 rb_objspace_t *objspace = (rb_objspace_t *)arg;
8738 VALUE obj = (VALUE)val;
8739 if (is_id_value(objspace, obj) && is_live_object(objspace, obj)) {
8740 rb_yield_values(2, (VALUE)key, obj);
8741 }
8742 return ST_CONTINUE;
8743 }
8744
8745 /* Iterates over keys and objects in a weakly referenced object */
8746 static VALUE
wmap_each(VALUE self)8747 wmap_each(VALUE self)
8748 {
8749 struct weakmap *w;
8750 rb_objspace_t *objspace = &rb_objspace;
8751
8752 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
8753 st_foreach(w->wmap2obj, wmap_each_i, (st_data_t)objspace);
8754 return self;
8755 }
8756
8757 static int
wmap_each_key_i(st_data_t key,st_data_t val,st_data_t arg)8758 wmap_each_key_i(st_data_t key, st_data_t val, st_data_t arg)
8759 {
8760 rb_objspace_t *objspace = (rb_objspace_t *)arg;
8761 VALUE obj = (VALUE)val;
8762 if (is_id_value(objspace, obj) && is_live_object(objspace, obj)) {
8763 rb_yield((VALUE)key);
8764 }
8765 return ST_CONTINUE;
8766 }
8767
8768 /* Iterates over keys and objects in a weakly referenced object */
8769 static VALUE
wmap_each_key(VALUE self)8770 wmap_each_key(VALUE self)
8771 {
8772 struct weakmap *w;
8773 rb_objspace_t *objspace = &rb_objspace;
8774
8775 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
8776 st_foreach(w->wmap2obj, wmap_each_key_i, (st_data_t)objspace);
8777 return self;
8778 }
8779
8780 static int
wmap_each_value_i(st_data_t key,st_data_t val,st_data_t arg)8781 wmap_each_value_i(st_data_t key, st_data_t val, st_data_t arg)
8782 {
8783 rb_objspace_t *objspace = (rb_objspace_t *)arg;
8784 VALUE obj = (VALUE)val;
8785 if (is_id_value(objspace, obj) && is_live_object(objspace, obj)) {
8786 rb_yield(obj);
8787 }
8788 return ST_CONTINUE;
8789 }
8790
8791 /* Iterates over keys and objects in a weakly referenced object */
8792 static VALUE
wmap_each_value(VALUE self)8793 wmap_each_value(VALUE self)
8794 {
8795 struct weakmap *w;
8796 rb_objspace_t *objspace = &rb_objspace;
8797
8798 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
8799 st_foreach(w->wmap2obj, wmap_each_value_i, (st_data_t)objspace);
8800 return self;
8801 }
8802
8803 static int
wmap_keys_i(st_data_t key,st_data_t val,st_data_t arg)8804 wmap_keys_i(st_data_t key, st_data_t val, st_data_t arg)
8805 {
8806 struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
8807 rb_objspace_t *objspace = argp->objspace;
8808 VALUE ary = argp->value;
8809 VALUE obj = (VALUE)val;
8810 if (is_id_value(objspace, obj) && is_live_object(objspace, obj)) {
8811 rb_ary_push(ary, (VALUE)key);
8812 }
8813 return ST_CONTINUE;
8814 }
8815
8816 /* Iterates over keys and objects in a weakly referenced object */
8817 static VALUE
wmap_keys(VALUE self)8818 wmap_keys(VALUE self)
8819 {
8820 struct weakmap *w;
8821 struct wmap_iter_arg args;
8822
8823 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
8824 args.objspace = &rb_objspace;
8825 args.value = rb_ary_new();
8826 st_foreach(w->wmap2obj, wmap_keys_i, (st_data_t)&args);
8827 return args.value;
8828 }
8829
8830 static int
wmap_values_i(st_data_t key,st_data_t val,st_data_t arg)8831 wmap_values_i(st_data_t key, st_data_t val, st_data_t arg)
8832 {
8833 struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
8834 rb_objspace_t *objspace = argp->objspace;
8835 VALUE ary = argp->value;
8836 VALUE obj = (VALUE)val;
8837 if (is_id_value(objspace, obj) && is_live_object(objspace, obj)) {
8838 rb_ary_push(ary, obj);
8839 }
8840 return ST_CONTINUE;
8841 }
8842
8843 /* Iterates over values and objects in a weakly referenced object */
8844 static VALUE
wmap_values(VALUE self)8845 wmap_values(VALUE self)
8846 {
8847 struct weakmap *w;
8848 struct wmap_iter_arg args;
8849
8850 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
8851 args.objspace = &rb_objspace;
8852 args.value = rb_ary_new();
8853 st_foreach(w->wmap2obj, wmap_values_i, (st_data_t)&args);
8854 return args.value;
8855 }
8856
8857 static int
wmap_aset_update(st_data_t * key,st_data_t * val,st_data_t arg,int existing)8858 wmap_aset_update(st_data_t *key, st_data_t *val, st_data_t arg, int existing)
8859 {
8860 VALUE size, *ptr, *optr;
8861 if (existing) {
8862 size = (ptr = optr = (VALUE *)*val)[0];
8863 ++size;
8864 ptr = ruby_sized_xrealloc2(ptr, size + 1, sizeof(VALUE), size);
8865 }
8866 else {
8867 optr = 0;
8868 size = 1;
8869 ptr = ruby_xmalloc0(2 * sizeof(VALUE));
8870 }
8871 ptr[0] = size;
8872 ptr[size] = (VALUE)arg;
8873 if (ptr == optr) return ST_STOP;
8874 *val = (st_data_t)ptr;
8875 return ST_CONTINUE;
8876 }
8877
8878 /* Creates a weak reference from the given key to the given value */
8879 static VALUE
wmap_aset(VALUE self,VALUE wmap,VALUE orig)8880 wmap_aset(VALUE self, VALUE wmap, VALUE orig)
8881 {
8882 struct weakmap *w;
8883
8884 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
8885 should_be_finalizable(orig);
8886 should_be_finalizable(wmap);
8887 define_final0(orig, w->final);
8888 define_final0(wmap, w->final);
8889 st_update(w->obj2wmap, (st_data_t)orig, wmap_aset_update, wmap);
8890 st_insert(w->wmap2obj, (st_data_t)wmap, (st_data_t)orig);
8891 return nonspecial_obj_id(orig);
8892 }
8893
8894 /* Retrieves a weakly referenced object with the given key */
8895 static VALUE
wmap_aref(VALUE self,VALUE wmap)8896 wmap_aref(VALUE self, VALUE wmap)
8897 {
8898 st_data_t data;
8899 VALUE obj;
8900 struct weakmap *w;
8901 rb_objspace_t *objspace = &rb_objspace;
8902
8903 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
8904 if (!st_lookup(w->wmap2obj, (st_data_t)wmap, &data)) return Qnil;
8905 obj = (VALUE)data;
8906 if (!is_id_value(objspace, obj)) return Qnil;
8907 if (!is_live_object(objspace, obj)) return Qnil;
8908 return obj;
8909 }
8910
8911 /* Returns +true+ if +key+ is registered */
8912 static VALUE
wmap_has_key(VALUE self,VALUE key)8913 wmap_has_key(VALUE self, VALUE key)
8914 {
8915 return NIL_P(wmap_aref(self, key)) ? Qfalse : Qtrue;
8916 }
8917
8918 /* Returns the number of referenced objects */
8919 static VALUE
wmap_size(VALUE self)8920 wmap_size(VALUE self)
8921 {
8922 struct weakmap *w;
8923 st_index_t n;
8924
8925 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
8926 n = w->wmap2obj->num_entries;
8927 #if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
8928 return ULONG2NUM(n);
8929 #else
8930 return ULL2NUM(n);
8931 #endif
8932 }
8933
8934 /*
8935 ------------------------------ GC profiler ------------------------------
8936 */
8937
8938 #define GC_PROFILE_RECORD_DEFAULT_SIZE 100
8939
8940 /* return sec in user time */
8941 static double
getrusage_time(void)8942 getrusage_time(void)
8943 {
8944 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
8945 {
8946 static int try_clock_gettime = 1;
8947 struct timespec ts;
8948 if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts) == 0) {
8949 return ts.tv_sec + ts.tv_nsec * 1e-9;
8950 }
8951 else {
8952 try_clock_gettime = 0;
8953 }
8954 }
8955 #endif
8956
8957 #ifdef RUSAGE_SELF
8958 {
8959 struct rusage usage;
8960 struct timeval time;
8961 if (getrusage(RUSAGE_SELF, &usage) == 0) {
8962 time = usage.ru_utime;
8963 return time.tv_sec + time.tv_usec * 1e-6;
8964 }
8965 }
8966 #endif
8967
8968 #ifdef _WIN32
8969 {
8970 FILETIME creation_time, exit_time, kernel_time, user_time;
8971 ULARGE_INTEGER ui;
8972 LONG_LONG q;
8973 double t;
8974
8975 if (GetProcessTimes(GetCurrentProcess(),
8976 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
8977 memcpy(&ui, &user_time, sizeof(FILETIME));
8978 q = ui.QuadPart / 10L;
8979 t = (DWORD)(q % 1000000L) * 1e-6;
8980 q /= 1000000L;
8981 #ifdef __GNUC__
8982 t += q;
8983 #else
8984 t += (double)(DWORD)(q >> 16) * (1 << 16);
8985 t += (DWORD)q & ~(~0 << 16);
8986 #endif
8987 return t;
8988 }
8989 }
8990 #endif
8991
8992 return 0.0;
8993 }
8994
8995 static inline void
gc_prof_setup_new_record(rb_objspace_t * objspace,int reason)8996 gc_prof_setup_new_record(rb_objspace_t *objspace, int reason)
8997 {
8998 if (objspace->profile.run) {
8999 size_t index = objspace->profile.next_index;
9000 gc_profile_record *record;
9001
9002 /* create new record */
9003 objspace->profile.next_index++;
9004
9005 if (!objspace->profile.records) {
9006 objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
9007 objspace->profile.records = malloc(sizeof(gc_profile_record) * objspace->profile.size);
9008 }
9009 if (index >= objspace->profile.size) {
9010 void *ptr;
9011 objspace->profile.size += 1000;
9012 ptr = realloc(objspace->profile.records, sizeof(gc_profile_record) * objspace->profile.size);
9013 if (!ptr) rb_memerror();
9014 objspace->profile.records = ptr;
9015 }
9016 if (!objspace->profile.records) {
9017 rb_bug("gc_profile malloc or realloc miss");
9018 }
9019 record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
9020 MEMZERO(record, gc_profile_record, 1);
9021
9022 /* setup before-GC parameter */
9023 record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
9024 #if MALLOC_ALLOCATED_SIZE
9025 record->allocated_size = malloc_allocated_size;
9026 #endif
9027 #if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
9028 #ifdef RUSAGE_SELF
9029 {
9030 struct rusage usage;
9031 if (getrusage(RUSAGE_SELF, &usage) == 0) {
9032 record->maxrss = usage.ru_maxrss;
9033 record->minflt = usage.ru_minflt;
9034 record->majflt = usage.ru_majflt;
9035 }
9036 }
9037 #endif
9038 #endif
9039 }
9040 }
9041
9042 static inline void
gc_prof_timer_start(rb_objspace_t * objspace)9043 gc_prof_timer_start(rb_objspace_t *objspace)
9044 {
9045 if (gc_prof_enabled(objspace)) {
9046 gc_profile_record *record = gc_prof_record(objspace);
9047 #if GC_PROFILE_MORE_DETAIL
9048 record->prepare_time = objspace->profile.prepare_time;
9049 #endif
9050 record->gc_time = 0;
9051 record->gc_invoke_time = getrusage_time();
9052 }
9053 }
9054
9055 static double
elapsed_time_from(double time)9056 elapsed_time_from(double time)
9057 {
9058 double now = getrusage_time();
9059 if (now > time) {
9060 return now - time;
9061 }
9062 else {
9063 return 0;
9064 }
9065 }
9066
9067 static inline void
gc_prof_timer_stop(rb_objspace_t * objspace)9068 gc_prof_timer_stop(rb_objspace_t *objspace)
9069 {
9070 if (gc_prof_enabled(objspace)) {
9071 gc_profile_record *record = gc_prof_record(objspace);
9072 record->gc_time = elapsed_time_from(record->gc_invoke_time);
9073 record->gc_invoke_time -= objspace->profile.invoke_time;
9074 }
9075 }
9076
9077 #define RUBY_DTRACE_GC_HOOK(name) \
9078 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
9079 static inline void
gc_prof_mark_timer_start(rb_objspace_t * objspace)9080 gc_prof_mark_timer_start(rb_objspace_t *objspace)
9081 {
9082 RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
9083 #if GC_PROFILE_MORE_DETAIL
9084 if (gc_prof_enabled(objspace)) {
9085 gc_prof_record(objspace)->gc_mark_time = getrusage_time();
9086 }
9087 #endif
9088 }
9089
9090 static inline void
gc_prof_mark_timer_stop(rb_objspace_t * objspace)9091 gc_prof_mark_timer_stop(rb_objspace_t *objspace)
9092 {
9093 RUBY_DTRACE_GC_HOOK(MARK_END);
9094 #if GC_PROFILE_MORE_DETAIL
9095 if (gc_prof_enabled(objspace)) {
9096 gc_profile_record *record = gc_prof_record(objspace);
9097 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
9098 }
9099 #endif
9100 }
9101
9102 static inline void
gc_prof_sweep_timer_start(rb_objspace_t * objspace)9103 gc_prof_sweep_timer_start(rb_objspace_t *objspace)
9104 {
9105 RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
9106 if (gc_prof_enabled(objspace)) {
9107 gc_profile_record *record = gc_prof_record(objspace);
9108
9109 if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
9110 objspace->profile.gc_sweep_start_time = getrusage_time();
9111 }
9112 }
9113 }
9114
9115 static inline void
gc_prof_sweep_timer_stop(rb_objspace_t * objspace)9116 gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
9117 {
9118 RUBY_DTRACE_GC_HOOK(SWEEP_END);
9119
9120 if (gc_prof_enabled(objspace)) {
9121 double sweep_time;
9122 gc_profile_record *record = gc_prof_record(objspace);
9123
9124 if (record->gc_time > 0) {
9125 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
9126 /* need to accumulate GC time for lazy sweep after gc() */
9127 record->gc_time += sweep_time;
9128 }
9129 else if (GC_PROFILE_MORE_DETAIL) {
9130 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
9131 }
9132
9133 #if GC_PROFILE_MORE_DETAIL
9134 record->gc_sweep_time += sweep_time;
9135 if (heap_pages_deferred_final) record->flags |= GPR_FLAG_HAVE_FINALIZE;
9136 #endif
9137 if (heap_pages_deferred_final) objspace->profile.latest_gc_info |= GPR_FLAG_HAVE_FINALIZE;
9138 }
9139 }
9140
9141 static inline void
gc_prof_set_malloc_info(rb_objspace_t * objspace)9142 gc_prof_set_malloc_info(rb_objspace_t *objspace)
9143 {
9144 #if GC_PROFILE_MORE_DETAIL
9145 if (gc_prof_enabled(objspace)) {
9146 gc_profile_record *record = gc_prof_record(objspace);
9147 record->allocate_increase = malloc_increase;
9148 record->allocate_limit = malloc_limit;
9149 }
9150 #endif
9151 }
9152
9153 static inline void
gc_prof_set_heap_info(rb_objspace_t * objspace)9154 gc_prof_set_heap_info(rb_objspace_t *objspace)
9155 {
9156 if (gc_prof_enabled(objspace)) {
9157 gc_profile_record *record = gc_prof_record(objspace);
9158 size_t live = objspace->profile.total_allocated_objects_at_gc_start - objspace->profile.total_freed_objects;
9159 size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT;
9160
9161 #if GC_PROFILE_MORE_DETAIL
9162 record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
9163 record->heap_live_objects = live;
9164 record->heap_free_objects = total - live;
9165 #endif
9166
9167 record->heap_total_objects = total;
9168 record->heap_use_size = live * sizeof(RVALUE);
9169 record->heap_total_size = total * sizeof(RVALUE);
9170 }
9171 }
9172
9173 /*
9174 * call-seq:
9175 * GC::Profiler.clear -> nil
9176 *
9177 * Clears the GC profiler data.
9178 *
9179 */
9180
9181 static VALUE
gc_profile_clear(void)9182 gc_profile_clear(void)
9183 {
9184 rb_objspace_t *objspace = &rb_objspace;
9185 if (GC_PROFILE_RECORD_DEFAULT_SIZE * 2 < objspace->profile.size) {
9186 objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE * 2;
9187 objspace->profile.records = realloc(objspace->profile.records, sizeof(gc_profile_record) * objspace->profile.size);
9188 if (!objspace->profile.records) {
9189 rb_memerror();
9190 }
9191 }
9192 MEMZERO(objspace->profile.records, gc_profile_record, objspace->profile.size);
9193 objspace->profile.next_index = 0;
9194 objspace->profile.current_record = 0;
9195 return Qnil;
9196 }
9197
9198 /*
9199 * call-seq:
9200 * GC::Profiler.raw_data -> [Hash, ...]
9201 *
9202 * Returns an Array of individual raw profile data Hashes ordered
9203 * from earliest to latest by +:GC_INVOKE_TIME+.
9204 *
9205 * For example:
9206 *
9207 * [
9208 * {
9209 * :GC_TIME=>1.3000000000000858e-05,
9210 * :GC_INVOKE_TIME=>0.010634999999999999,
9211 * :HEAP_USE_SIZE=>289640,
9212 * :HEAP_TOTAL_SIZE=>588960,
9213 * :HEAP_TOTAL_OBJECTS=>14724,
9214 * :GC_IS_MARKED=>false
9215 * },
9216 * # ...
9217 * ]
9218 *
9219 * The keys mean:
9220 *
9221 * +:GC_TIME+::
9222 * Time elapsed in seconds for this GC run
9223 * +:GC_INVOKE_TIME+::
9224 * Time elapsed in seconds from startup to when the GC was invoked
9225 * +:HEAP_USE_SIZE+::
9226 * Total bytes of heap used
9227 * +:HEAP_TOTAL_SIZE+::
9228 * Total size of heap in bytes
9229 * +:HEAP_TOTAL_OBJECTS+::
9230 * Total number of objects
9231 * +:GC_IS_MARKED+::
9232 * Returns +true+ if the GC is in mark phase
9233 *
9234 * If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
9235 * to the following hash keys:
9236 *
9237 * +:GC_MARK_TIME+::
9238 * +:GC_SWEEP_TIME+::
9239 * +:ALLOCATE_INCREASE+::
9240 * +:ALLOCATE_LIMIT+::
9241 * +:HEAP_USE_PAGES+::
9242 * +:HEAP_LIVE_OBJECTS+::
9243 * +:HEAP_FREE_OBJECTS+::
9244 * +:HAVE_FINALIZE+::
9245 *
9246 */
9247
9248 static VALUE
gc_profile_record_get(void)9249 gc_profile_record_get(void)
9250 {
9251 VALUE prof;
9252 VALUE gc_profile = rb_ary_new();
9253 size_t i;
9254 rb_objspace_t *objspace = (&rb_objspace);
9255
9256 if (!objspace->profile.run) {
9257 return Qnil;
9258 }
9259
9260 for (i =0; i < objspace->profile.next_index; i++) {
9261 gc_profile_record *record = &objspace->profile.records[i];
9262
9263 prof = rb_hash_new();
9264 rb_hash_aset(prof, ID2SYM(rb_intern("GC_FLAGS")), gc_info_decode(0, rb_hash_new(), record->flags));
9265 rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(record->gc_time));
9266 rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(record->gc_invoke_time));
9267 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(record->heap_use_size));
9268 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(record->heap_total_size));
9269 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(record->heap_total_objects));
9270 rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), Qtrue);
9271 #if GC_PROFILE_MORE_DETAIL
9272 rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(record->gc_mark_time));
9273 rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(record->gc_sweep_time));
9274 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(record->allocate_increase));
9275 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(record->allocate_limit));
9276 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_PAGES")), SIZET2NUM(record->heap_use_pages));
9277 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(record->heap_live_objects));
9278 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(record->heap_free_objects));
9279
9280 rb_hash_aset(prof, ID2SYM(rb_intern("REMOVING_OBJECTS")), SIZET2NUM(record->removing_objects));
9281 rb_hash_aset(prof, ID2SYM(rb_intern("EMPTY_OBJECTS")), SIZET2NUM(record->empty_objects));
9282
9283 rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), (record->flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
9284 #endif
9285
9286 #if RGENGC_PROFILE > 0
9287 rb_hash_aset(prof, ID2SYM(rb_intern("OLD_OBJECTS")), SIZET2NUM(record->old_objects));
9288 rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_NORMAL_OBJECTS")), SIZET2NUM(record->remembered_normal_objects));
9289 rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_SHADY_OBJECTS")), SIZET2NUM(record->remembered_shady_objects));
9290 #endif
9291 rb_ary_push(gc_profile, prof);
9292 }
9293
9294 return gc_profile;
9295 }
9296
9297 #if GC_PROFILE_MORE_DETAIL
9298 #define MAJOR_REASON_MAX 0x10
9299
9300 static char *
gc_profile_dump_major_reason(int flags,char * buff)9301 gc_profile_dump_major_reason(int flags, char *buff)
9302 {
9303 int reason = flags & GPR_FLAG_MAJOR_MASK;
9304 int i = 0;
9305
9306 if (reason == GPR_FLAG_NONE) {
9307 buff[0] = '-';
9308 buff[1] = 0;
9309 }
9310 else {
9311 #define C(x, s) \
9312 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
9313 buff[i++] = #x[0]; \
9314 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
9315 buff[i] = 0; \
9316 }
9317 C(NOFREE, N);
9318 C(OLDGEN, O);
9319 C(SHADY, S);
9320 #if RGENGC_ESTIMATE_OLDMALLOC
9321 C(OLDMALLOC, M);
9322 #endif
9323 #undef C
9324 }
9325 return buff;
9326 }
9327 #endif
9328
9329 static void
gc_profile_dump_on(VALUE out,VALUE (* append)(VALUE,VALUE))9330 gc_profile_dump_on(VALUE out, VALUE (*append)(VALUE, VALUE))
9331 {
9332 rb_objspace_t *objspace = &rb_objspace;
9333 size_t count = objspace->profile.next_index;
9334 #ifdef MAJOR_REASON_MAX
9335 char reason_str[MAJOR_REASON_MAX];
9336 #endif
9337
9338 if (objspace->profile.run && count /* > 1 */) {
9339 size_t i;
9340 const gc_profile_record *record;
9341
9342 append(out, rb_sprintf("GC %"PRIuSIZE" invokes.\n", objspace->profile.count));
9343 append(out, rb_str_new_cstr("Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
9344
9345 for (i = 0; i < count; i++) {
9346 record = &objspace->profile.records[i];
9347 append(out, rb_sprintf("%5"PRIuSIZE" %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
9348 i+1, record->gc_invoke_time, record->heap_use_size,
9349 record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
9350 }
9351
9352 #if GC_PROFILE_MORE_DETAIL
9353 append(out, rb_str_new_cstr("\n\n" \
9354 "More detail.\n" \
9355 "Prepare Time = Previously GC's rest sweep time\n"
9356 "Index Flags Allocate Inc. Allocate Limit"
9357 #if CALC_EXACT_MALLOC_SIZE
9358 " Allocated Size"
9359 #endif
9360 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
9361 #if RGENGC_PROFILE
9362 " OldgenObj RemNormObj RemShadObj"
9363 #endif
9364 #if GC_PROFILE_DETAIL_MEMORY
9365 " MaxRSS(KB) MinorFLT MajorFLT"
9366 #endif
9367 "\n"));
9368
9369 for (i = 0; i < count; i++) {
9370 record = &objspace->profile.records[i];
9371 append(out, rb_sprintf("%5"PRIuSIZE" %4s/%c/%6s%c %13"PRIuSIZE" %15"PRIuSIZE
9372 #if CALC_EXACT_MALLOC_SIZE
9373 " %15"PRIuSIZE
9374 #endif
9375 " %9"PRIuSIZE" %17.12f %17.12f %17.12f %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
9376 #if RGENGC_PROFILE
9377 "%10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
9378 #endif
9379 #if GC_PROFILE_DETAIL_MEMORY
9380 "%11ld %8ld %8ld"
9381 #endif
9382
9383 "\n",
9384 i+1,
9385 gc_profile_dump_major_reason(record->flags, reason_str),
9386 (record->flags & GPR_FLAG_HAVE_FINALIZE) ? 'F' : '.',
9387 (record->flags & GPR_FLAG_NEWOBJ) ? "NEWOBJ" :
9388 (record->flags & GPR_FLAG_MALLOC) ? "MALLOC" :
9389 (record->flags & GPR_FLAG_METHOD) ? "METHOD" :
9390 (record->flags & GPR_FLAG_CAPI) ? "CAPI__" : "??????",
9391 (record->flags & GPR_FLAG_STRESS) ? '!' : ' ',
9392 record->allocate_increase, record->allocate_limit,
9393 #if CALC_EXACT_MALLOC_SIZE
9394 record->allocated_size,
9395 #endif
9396 record->heap_use_pages,
9397 record->gc_mark_time*1000,
9398 record->gc_sweep_time*1000,
9399 record->prepare_time*1000,
9400
9401 record->heap_live_objects,
9402 record->heap_free_objects,
9403 record->removing_objects,
9404 record->empty_objects
9405 #if RGENGC_PROFILE
9406 ,
9407 record->old_objects,
9408 record->remembered_normal_objects,
9409 record->remembered_shady_objects
9410 #endif
9411 #if GC_PROFILE_DETAIL_MEMORY
9412 ,
9413 record->maxrss / 1024,
9414 record->minflt,
9415 record->majflt
9416 #endif
9417
9418 ));
9419 }
9420 #endif
9421 }
9422 }
9423
9424 /*
9425 * call-seq:
9426 * GC::Profiler.result -> String
9427 *
9428 * Returns a profile data report such as:
9429 *
9430 * GC 1 invokes.
9431 * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
9432 * 1 0.012 159240 212940 10647 0.00000000000001530000
9433 */
9434
9435 static VALUE
gc_profile_result(void)9436 gc_profile_result(void)
9437 {
9438 VALUE str = rb_str_buf_new(0);
9439 gc_profile_dump_on(str, rb_str_buf_append);
9440 return str;
9441 }
9442
9443 /*
9444 * call-seq:
9445 * GC::Profiler.report
9446 * GC::Profiler.report(io)
9447 *
9448 * Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
9449 *
9450 */
9451
9452 static VALUE
gc_profile_report(int argc,VALUE * argv,VALUE self)9453 gc_profile_report(int argc, VALUE *argv, VALUE self)
9454 {
9455 VALUE out;
9456
9457 out = (!rb_check_arity(argc, 0, 1) ? rb_stdout : argv[0]);
9458 gc_profile_dump_on(out, rb_io_write);
9459
9460 return Qnil;
9461 }
9462
9463 /*
9464 * call-seq:
9465 * GC::Profiler.total_time -> float
9466 *
9467 * The total time used for garbage collection in seconds
9468 */
9469
9470 static VALUE
gc_profile_total_time(VALUE self)9471 gc_profile_total_time(VALUE self)
9472 {
9473 double time = 0;
9474 rb_objspace_t *objspace = &rb_objspace;
9475
9476 if (objspace->profile.run && objspace->profile.next_index > 0) {
9477 size_t i;
9478 size_t count = objspace->profile.next_index;
9479
9480 for (i = 0; i < count; i++) {
9481 time += objspace->profile.records[i].gc_time;
9482 }
9483 }
9484 return DBL2NUM(time);
9485 }
9486
9487 /*
9488 * call-seq:
9489 * GC::Profiler.enabled? -> true or false
9490 *
9491 * The current status of GC profile mode.
9492 */
9493
9494 static VALUE
gc_profile_enable_get(VALUE self)9495 gc_profile_enable_get(VALUE self)
9496 {
9497 rb_objspace_t *objspace = &rb_objspace;
9498 return objspace->profile.run ? Qtrue : Qfalse;
9499 }
9500
9501 /*
9502 * call-seq:
9503 * GC::Profiler.enable -> nil
9504 *
9505 * Starts the GC profiler.
9506 *
9507 */
9508
9509 static VALUE
gc_profile_enable(void)9510 gc_profile_enable(void)
9511 {
9512 rb_objspace_t *objspace = &rb_objspace;
9513 objspace->profile.run = TRUE;
9514 objspace->profile.current_record = 0;
9515 return Qnil;
9516 }
9517
9518 /*
9519 * call-seq:
9520 * GC::Profiler.disable -> nil
9521 *
9522 * Stops the GC profiler.
9523 *
9524 */
9525
9526 static VALUE
gc_profile_disable(void)9527 gc_profile_disable(void)
9528 {
9529 rb_objspace_t *objspace = &rb_objspace;
9530
9531 objspace->profile.run = FALSE;
9532 objspace->profile.current_record = 0;
9533 return Qnil;
9534 }
9535
9536 /*
9537 ------------------------------ DEBUG ------------------------------
9538 */
9539
9540 static const char *
type_name(int type,VALUE obj)9541 type_name(int type, VALUE obj)
9542 {
9543 switch (type) {
9544 #define TYPE_NAME(t) case (t): return #t;
9545 TYPE_NAME(T_NONE);
9546 TYPE_NAME(T_OBJECT);
9547 TYPE_NAME(T_CLASS);
9548 TYPE_NAME(T_MODULE);
9549 TYPE_NAME(T_FLOAT);
9550 TYPE_NAME(T_STRING);
9551 TYPE_NAME(T_REGEXP);
9552 TYPE_NAME(T_ARRAY);
9553 TYPE_NAME(T_HASH);
9554 TYPE_NAME(T_STRUCT);
9555 TYPE_NAME(T_BIGNUM);
9556 TYPE_NAME(T_FILE);
9557 TYPE_NAME(T_MATCH);
9558 TYPE_NAME(T_COMPLEX);
9559 TYPE_NAME(T_RATIONAL);
9560 TYPE_NAME(T_NIL);
9561 TYPE_NAME(T_TRUE);
9562 TYPE_NAME(T_FALSE);
9563 TYPE_NAME(T_SYMBOL);
9564 TYPE_NAME(T_FIXNUM);
9565 TYPE_NAME(T_UNDEF);
9566 TYPE_NAME(T_IMEMO);
9567 TYPE_NAME(T_ICLASS);
9568 TYPE_NAME(T_ZOMBIE);
9569 case T_DATA:
9570 if (obj && rb_objspace_data_type_name(obj)) {
9571 return rb_objspace_data_type_name(obj);
9572 }
9573 return "T_DATA";
9574 #undef TYPE_NAME
9575 }
9576 return "unknown";
9577 }
9578
9579 static const char *
obj_type_name(VALUE obj)9580 obj_type_name(VALUE obj)
9581 {
9582 return type_name(TYPE(obj), obj);
9583 }
9584
9585 static const char *
method_type_name(rb_method_type_t type)9586 method_type_name(rb_method_type_t type)
9587 {
9588 switch (type) {
9589 case VM_METHOD_TYPE_ISEQ: return "iseq";
9590 case VM_METHOD_TYPE_ATTRSET: return "attrest";
9591 case VM_METHOD_TYPE_IVAR: return "ivar";
9592 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
9593 case VM_METHOD_TYPE_ALIAS: return "alias";
9594 case VM_METHOD_TYPE_REFINED: return "refined";
9595 case VM_METHOD_TYPE_CFUNC: return "cfunc";
9596 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
9597 case VM_METHOD_TYPE_MISSING: return "missing";
9598 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
9599 case VM_METHOD_TYPE_UNDEF: return "undef";
9600 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
9601 }
9602 rb_bug("method_type_name: unreachable (type: %d)", type);
9603 }
9604
9605 /* from array.c */
9606 # define ARY_SHARED_P(ary) \
9607 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
9608 FL_TEST((ary),ELTS_SHARED)!=0)
9609 # define ARY_EMBED_P(ary) \
9610 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
9611 FL_TEST((ary), RARRAY_EMBED_FLAG)!=0)
9612
9613 static void
rb_raw_iseq_info(char * buff,const int buff_size,const rb_iseq_t * iseq)9614 rb_raw_iseq_info(char *buff, const int buff_size, const rb_iseq_t *iseq)
9615 {
9616 if (iseq->body && iseq->body->location.label) {
9617 VALUE path = rb_iseq_path(iseq);
9618 VALUE n = iseq->body->location.first_lineno;
9619 snprintf(buff, buff_size, "%s %s@%s:%d", buff,
9620 RSTRING_PTR(iseq->body->location.label),
9621 RSTRING_PTR(path),
9622 n ? FIX2INT(n) : 0 );
9623 }
9624 }
9625
9626 const char *
rb_raw_obj_info(char * buff,const int buff_size,VALUE obj)9627 rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
9628 {
9629 if (SPECIAL_CONST_P(obj)) {
9630 snprintf(buff, buff_size, "%s", obj_type_name(obj));
9631
9632 if (FIXNUM_P(obj)) {
9633 snprintf(buff, buff_size, "%s %ld", buff, FIX2LONG(obj));
9634 }
9635 else if (SYMBOL_P(obj)) {
9636 snprintf(buff, buff_size, "%s %s", buff, rb_id2name(SYM2ID(obj)));
9637 }
9638 }
9639 else {
9640 #define TF(c) ((c) != 0 ? "true" : "false")
9641 #define C(c, s) ((c) != 0 ? (s) : " ")
9642 const int type = BUILTIN_TYPE(obj);
9643 #if USE_RGENGC
9644 const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
9645
9646 if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
9647 snprintf(buff, buff_size, "%p [%d%s%s%s%s] %s",
9648 (void *)obj, age,
9649 C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
9650 C(RVALUE_MARK_BITMAP(obj), "M"),
9651 C(RVALUE_MARKING_BITMAP(obj), "R"),
9652 C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
9653 obj_type_name(obj));
9654 }
9655 else {
9656 /* fake */
9657 snprintf(buff, buff_size, "%p [%dXXXX] %s",
9658 (void *)obj, age,
9659 obj_type_name(obj));
9660 }
9661 #else
9662 snprintf(buff, buff_size, "%p [%s] %s",
9663 (void *)obj,
9664 C(RVALUE_MARK_BITMAP(obj), "M"),
9665 obj_type_name(obj));
9666 #endif
9667
9668 if (internal_object_p(obj)) {
9669 /* ignore */
9670 }
9671 else if (RBASIC(obj)->klass == 0) {
9672 snprintf(buff, buff_size, "%s (temporary internal)", buff);
9673 }
9674 else {
9675 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
9676 if (!NIL_P(class_path)) {
9677 snprintf(buff, buff_size, "%s (%s)", buff, RSTRING_PTR(class_path));
9678 }
9679 }
9680
9681 #if GC_DEBUG
9682 snprintf(buff, buff_size, "%s @%s:%d", buff, RANY(obj)->file, RANY(obj)->line);
9683 #endif
9684
9685 switch (type) {
9686 case T_NODE:
9687 UNEXPECTED_NODE(rb_raw_obj_info);
9688 break;
9689 case T_ARRAY:
9690 if (FL_TEST(obj, ELTS_SHARED)) {
9691 snprintf(buff, buff_size, "%s shared -> %s", buff,
9692 rb_obj_info(RARRAY(obj)->as.heap.aux.shared));
9693 }
9694 else if (FL_TEST(obj, RARRAY_EMBED_FLAG)) {
9695 snprintf(buff, buff_size, "%s [%s%s] len: %d (embed)", buff,
9696 C(ARY_EMBED_P(obj), "E"),
9697 C(ARY_SHARED_P(obj), "S"),
9698 (int)RARRAY_LEN(obj));
9699 }
9700 else {
9701 snprintf(buff, buff_size, "%s [%s%s%s] len: %d, capa:%d ptr:%p", buff,
9702 C(ARY_EMBED_P(obj), "E"),
9703 C(ARY_SHARED_P(obj), "S"),
9704 C(RARRAY_TRANSIENT_P(obj), "T"),
9705 (int)RARRAY_LEN(obj),
9706 ARY_EMBED_P(obj) ? -1 : (int)RARRAY(obj)->as.heap.aux.capa,
9707 (void *)RARRAY_CONST_PTR_TRANSIENT(obj));
9708 }
9709 break;
9710 case T_STRING: {
9711 snprintf(buff, buff_size, "%s %s", buff, RSTRING_PTR(obj));
9712 break;
9713 }
9714 case T_HASH: {
9715 snprintf(buff, buff_size, "%s [%c%c] %d", buff,
9716 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
9717 RHASH_TRANSIENT_P(obj) ? 'T' : ' ',
9718 (int)RHASH_SIZE(obj));
9719 break;
9720 }
9721 case T_CLASS:
9722 case T_MODULE:
9723 {
9724 VALUE class_path = rb_class_path_cached(obj);
9725 if (!NIL_P(class_path)) {
9726 snprintf(buff, buff_size, "%s %s", buff, RSTRING_PTR(class_path));
9727 }
9728 break;
9729 }
9730 case T_ICLASS:
9731 {
9732 VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
9733 if (!NIL_P(class_path)) {
9734 snprintf(buff, buff_size, "%s src:%s", buff, RSTRING_PTR(class_path));
9735 }
9736 break;
9737 }
9738 case T_OBJECT:
9739 {
9740 uint32_t len = ROBJECT_NUMIV(obj);
9741
9742 if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
9743 snprintf(buff, buff_size, "%s (embed) len:%d", buff, len);
9744 }
9745 else {
9746 VALUE *ptr = ROBJECT_IVPTR(obj);
9747 snprintf(buff, buff_size, "%s len:%d ptr:%p", buff, len, (void *)ptr);
9748 }
9749 }
9750 break;
9751 case T_DATA: {
9752 const struct rb_block *block;
9753 const rb_iseq_t *iseq;
9754 if (rb_obj_is_proc(obj) &&
9755 (block = vm_proc_block(obj)) != NULL &&
9756 (vm_block_type(block) == block_type_iseq) &&
9757 (iseq = vm_block_iseq(block)) != NULL) {
9758 rb_raw_iseq_info(buff, buff_size, iseq);
9759 }
9760 else {
9761 const char * const type_name = rb_objspace_data_type_name(obj);
9762 if (type_name) {
9763 snprintf(buff, buff_size, "%s %s", buff, type_name);
9764 }
9765 }
9766 break;
9767 }
9768 case T_IMEMO: {
9769 const char *imemo_name = "\0";
9770 switch (imemo_type(obj)) {
9771 #define IMEMO_NAME(x) case imemo_##x: imemo_name = #x; break;
9772 IMEMO_NAME(env);
9773 IMEMO_NAME(cref);
9774 IMEMO_NAME(svar);
9775 IMEMO_NAME(throw_data);
9776 IMEMO_NAME(ifunc);
9777 IMEMO_NAME(memo);
9778 IMEMO_NAME(ment);
9779 IMEMO_NAME(iseq);
9780 IMEMO_NAME(tmpbuf);
9781 IMEMO_NAME(ast);
9782 IMEMO_NAME(parser_strterm);
9783 #undef IMEMO_NAME
9784 default: UNREACHABLE;
9785 }
9786 snprintf(buff, buff_size, "%s %s", buff, imemo_name);
9787
9788 switch (imemo_type(obj)) {
9789 case imemo_ment: {
9790 const rb_method_entry_t *me = &RANY(obj)->as.imemo.ment;
9791 if (me->def) {
9792 snprintf(buff, buff_size, "%s (called_id: %s, type: %s, alias: %d, owner: %s, defined_class: %s)", buff,
9793 rb_id2name(me->called_id),
9794 method_type_name(me->def->type),
9795 me->def->alias_count,
9796 obj_info(me->owner),
9797 obj_info(me->defined_class));
9798 }
9799 else {
9800 snprintf(buff, buff_size, "%s", rb_id2name(me->called_id));
9801 }
9802 break;
9803 }
9804 case imemo_iseq: {
9805 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
9806 rb_raw_iseq_info(buff, buff_size, iseq);
9807 break;
9808 }
9809 default:
9810 break;
9811 }
9812 }
9813 default:
9814 break;
9815 }
9816 #undef TF
9817 #undef C
9818 }
9819 return buff;
9820 }
9821
9822 #if RGENGC_OBJ_INFO
9823 #define OBJ_INFO_BUFFERS_NUM 10
9824 #define OBJ_INFO_BUFFERS_SIZE 0x100
9825 static int obj_info_buffers_index = 0;
9826 static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
9827
9828 static const char *
obj_info(VALUE obj)9829 obj_info(VALUE obj)
9830 {
9831 const int index = obj_info_buffers_index++;
9832 char *const buff = &obj_info_buffers[index][0];
9833
9834 if (obj_info_buffers_index >= OBJ_INFO_BUFFERS_NUM) {
9835 obj_info_buffers_index = 0;
9836 }
9837
9838 return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
9839 }
9840 #else
9841 static const char *
obj_info(VALUE obj)9842 obj_info(VALUE obj)
9843 {
9844 return obj_type_name(obj);
9845 }
9846 #endif
9847
9848 MJIT_FUNC_EXPORTED const char *
rb_obj_info(VALUE obj)9849 rb_obj_info(VALUE obj)
9850 {
9851 if (!rb_special_const_p(obj)) {
9852 return obj_info(obj);
9853 }
9854 else {
9855 return obj_type_name(obj);
9856 }
9857 }
9858
9859 void
rb_obj_info_dump(VALUE obj)9860 rb_obj_info_dump(VALUE obj)
9861 {
9862 char buff[0x100];
9863 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
9864 }
9865
9866 #if GC_DEBUG
9867
9868 void
rb_gcdebug_print_obj_condition(VALUE obj)9869 rb_gcdebug_print_obj_condition(VALUE obj)
9870 {
9871 rb_objspace_t *objspace = &rb_objspace;
9872
9873 fprintf(stderr, "created at: %s:%d\n", RANY(obj)->file, RANY(obj)->line);
9874
9875 if (is_pointer_to_heap(objspace, (void *)obj)) {
9876 fprintf(stderr, "pointer to heap?: true\n");
9877 }
9878 else {
9879 fprintf(stderr, "pointer to heap?: false\n");
9880 return;
9881 }
9882
9883 fprintf(stderr, "marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ? "true" : "false");
9884 #if USE_RGENGC
9885 fprintf(stderr, "age? : %d\n", RVALUE_AGE(obj));
9886 fprintf(stderr, "old? : %s\n", RVALUE_OLD_P(obj) ? "true" : "false");
9887 fprintf(stderr, "WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ? "false" : "true");
9888 fprintf(stderr, "remembered? : %s\n", RVALUE_REMEMBERED(obj) ? "true" : "false");
9889 #endif
9890
9891 if (is_lazy_sweeping(heap_eden)) {
9892 fprintf(stderr, "lazy sweeping?: true\n");
9893 fprintf(stderr, "swept?: %s\n", is_swept_object(objspace, obj) ? "done" : "not yet");
9894 }
9895 else {
9896 fprintf(stderr, "lazy sweeping?: false\n");
9897 }
9898 }
9899
9900 static VALUE
gcdebug_sentinel(VALUE obj,VALUE name)9901 gcdebug_sentinel(VALUE obj, VALUE name)
9902 {
9903 fprintf(stderr, "WARNING: object %s(%p) is inadvertently collected\n", (char *)name, (void *)obj);
9904 return Qnil;
9905 }
9906
9907 void
rb_gcdebug_sentinel(VALUE obj,const char * name)9908 rb_gcdebug_sentinel(VALUE obj, const char *name)
9909 {
9910 rb_define_finalizer(obj, rb_proc_new(gcdebug_sentinel, (VALUE)name));
9911 }
9912
9913 #endif /* GC_DEBUG */
9914
9915 #if GC_DEBUG_STRESS_TO_CLASS
9916 /*
9917 * call-seq:
9918 * GC.add_stress_to_class(class[, ...])
9919 *
9920 * Raises NoMemoryError when allocating an instance of the given classes.
9921 *
9922 */
9923 static VALUE
rb_gcdebug_add_stress_to_class(int argc,VALUE * argv,VALUE self)9924 rb_gcdebug_add_stress_to_class(int argc, VALUE *argv, VALUE self)
9925 {
9926 rb_objspace_t *objspace = &rb_objspace;
9927
9928 if (!stress_to_class) {
9929 stress_to_class = rb_ary_tmp_new(argc);
9930 }
9931 rb_ary_cat(stress_to_class, argv, argc);
9932 return self;
9933 }
9934
9935 /*
9936 * call-seq:
9937 * GC.remove_stress_to_class(class[, ...])
9938 *
9939 * No longer raises NoMemoryError when allocating an instance of the
9940 * given classes.
9941 *
9942 */
9943 static VALUE
rb_gcdebug_remove_stress_to_class(int argc,VALUE * argv,VALUE self)9944 rb_gcdebug_remove_stress_to_class(int argc, VALUE *argv, VALUE self)
9945 {
9946 rb_objspace_t *objspace = &rb_objspace;
9947 int i;
9948
9949 if (stress_to_class) {
9950 for (i = 0; i < argc; ++i) {
9951 rb_ary_delete_same(stress_to_class, argv[i]);
9952 }
9953 if (RARRAY_LEN(stress_to_class) == 0) {
9954 stress_to_class = 0;
9955 }
9956 }
9957 return Qnil;
9958 }
9959 #endif
9960
9961 /*
9962 * Document-module: ObjectSpace
9963 *
9964 * The ObjectSpace module contains a number of routines
9965 * that interact with the garbage collection facility and allow you to
9966 * traverse all living objects with an iterator.
9967 *
9968 * ObjectSpace also provides support for object finalizers, procs that will be
9969 * called when a specific object is about to be destroyed by garbage
9970 * collection.
9971 *
9972 * require 'objspace'
9973 *
9974 * a = "A"
9975 * b = "B"
9976 *
9977 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
9978 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
9979 *
9980 * _produces:_
9981 *
9982 * Finalizer two on 537763470
9983 * Finalizer one on 537763480
9984 */
9985
9986 /*
9987 * Document-class: ObjectSpace::WeakMap
9988 *
9989 * An ObjectSpace::WeakMap object holds references to
9990 * any objects, but those objects can get garbage collected.
9991 *
9992 * This class is mostly used internally by WeakRef, please use
9993 * +lib/weakref.rb+ for the public interface.
9994 */
9995
9996 /* Document-class: GC::Profiler
9997 *
9998 * The GC profiler provides access to information on GC runs including time,
9999 * length and object space size.
10000 *
10001 * Example:
10002 *
10003 * GC::Profiler.enable
10004 *
10005 * require 'rdoc/rdoc'
10006 *
10007 * GC::Profiler.report
10008 *
10009 * GC::Profiler.disable
10010 *
10011 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
10012 */
10013
10014 /*
10015 * The GC module provides an interface to Ruby's mark and
10016 * sweep garbage collection mechanism.
10017 *
10018 * Some of the underlying methods are also available via the ObjectSpace
10019 * module.
10020 *
10021 * You may obtain information about the operation of the GC through
10022 * GC::Profiler.
10023 */
10024
10025 void
Init_GC(void)10026 Init_GC(void)
10027 {
10028 #undef rb_intern
10029 VALUE rb_mObjSpace;
10030 VALUE rb_mProfiler;
10031 VALUE gc_constants;
10032
10033 rb_mGC = rb_define_module("GC");
10034 rb_define_singleton_method(rb_mGC, "start", gc_start_internal, -1);
10035 rb_define_singleton_method(rb_mGC, "enable", rb_gc_enable, 0);
10036 rb_define_singleton_method(rb_mGC, "disable", rb_gc_disable, 0);
10037 rb_define_singleton_method(rb_mGC, "stress", gc_stress_get, 0);
10038 rb_define_singleton_method(rb_mGC, "stress=", gc_stress_set_m, 1);
10039 rb_define_singleton_method(rb_mGC, "count", gc_count, 0);
10040 rb_define_singleton_method(rb_mGC, "stat", gc_stat, -1);
10041 rb_define_singleton_method(rb_mGC, "latest_gc_info", gc_latest_gc_info, -1);
10042 rb_define_method(rb_mGC, "garbage_collect", gc_start_internal, -1);
10043
10044 gc_constants = rb_hash_new();
10045 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(sizeof(RVALUE)));
10046 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT));
10047 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
10048 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_PLANES")), SIZET2NUM(HEAP_PAGE_BITMAP_PLANES));
10049 OBJ_FREEZE(gc_constants);
10050 /* internal constants */
10051 rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
10052
10053 rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
10054 rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
10055 rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
10056 rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
10057 rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
10058 rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
10059 rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
10060 rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
10061 rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
10062
10063 rb_mObjSpace = rb_define_module("ObjectSpace");
10064 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
10065 rb_define_module_function(rb_mObjSpace, "garbage_collect", gc_start_internal, -1);
10066
10067 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
10068 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
10069
10070 rb_define_module_function(rb_mObjSpace, "_id2ref", id2ref, 1);
10071
10072 rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
10073
10074 rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
10075 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
10076
10077 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
10078
10079 {
10080 VALUE rb_cWeakMap = rb_define_class_under(rb_mObjSpace, "WeakMap", rb_cObject);
10081 rb_define_alloc_func(rb_cWeakMap, wmap_allocate);
10082 rb_define_method(rb_cWeakMap, "[]=", wmap_aset, 2);
10083 rb_define_method(rb_cWeakMap, "[]", wmap_aref, 1);
10084 rb_define_method(rb_cWeakMap, "include?", wmap_has_key, 1);
10085 rb_define_method(rb_cWeakMap, "member?", wmap_has_key, 1);
10086 rb_define_method(rb_cWeakMap, "key?", wmap_has_key, 1);
10087 rb_define_method(rb_cWeakMap, "inspect", wmap_inspect, 0);
10088 rb_define_method(rb_cWeakMap, "each", wmap_each, 0);
10089 rb_define_method(rb_cWeakMap, "each_pair", wmap_each, 0);
10090 rb_define_method(rb_cWeakMap, "each_key", wmap_each_key, 0);
10091 rb_define_method(rb_cWeakMap, "each_value", wmap_each_value, 0);
10092 rb_define_method(rb_cWeakMap, "keys", wmap_keys, 0);
10093 rb_define_method(rb_cWeakMap, "values", wmap_values, 0);
10094 rb_define_method(rb_cWeakMap, "size", wmap_size, 0);
10095 rb_define_method(rb_cWeakMap, "length", wmap_size, 0);
10096 rb_define_private_method(rb_cWeakMap, "finalize", wmap_finalize, 1);
10097 rb_include_module(rb_cWeakMap, rb_mEnumerable);
10098 }
10099
10100 /* internal methods */
10101 rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency, 0);
10102 rb_define_singleton_method(rb_mGC, "verify_transient_heap_internal_consistency", gc_verify_transient_heap_internal_consistency, 0);
10103 #if MALLOC_ALLOCATED_SIZE
10104 rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
10105 rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
10106 #endif
10107
10108 #if GC_DEBUG_STRESS_TO_CLASS
10109 rb_define_singleton_method(rb_mGC, "add_stress_to_class", rb_gcdebug_add_stress_to_class, -1);
10110 rb_define_singleton_method(rb_mGC, "remove_stress_to_class", rb_gcdebug_remove_stress_to_class, -1);
10111 #endif
10112
10113 {
10114 VALUE opts;
10115 /* GC build options */
10116 rb_define_const(rb_mGC, "OPTS", opts = rb_ary_new());
10117 #define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
10118 OPT(GC_DEBUG);
10119 OPT(USE_RGENGC);
10120 OPT(RGENGC_DEBUG);
10121 OPT(RGENGC_CHECK_MODE);
10122 OPT(RGENGC_PROFILE);
10123 OPT(RGENGC_ESTIMATE_OLDMALLOC);
10124 OPT(GC_PROFILE_MORE_DETAIL);
10125 OPT(GC_ENABLE_LAZY_SWEEP);
10126 OPT(CALC_EXACT_MALLOC_SIZE);
10127 OPT(MALLOC_ALLOCATED_SIZE);
10128 OPT(MALLOC_ALLOCATED_SIZE_CHECK);
10129 OPT(GC_PROFILE_DETAIL_MEMORY);
10130 #undef OPT
10131 OBJ_FREEZE(opts);
10132 }
10133 }
10134
10135 #ifdef ruby_xmalloc
10136 #undef ruby_xmalloc
10137 #endif
10138 #ifdef ruby_xmalloc2
10139 #undef ruby_xmalloc2
10140 #endif
10141 #ifdef ruby_xcalloc
10142 #undef ruby_xcalloc
10143 #endif
10144 #ifdef ruby_xrealloc
10145 #undef ruby_xrealloc
10146 #endif
10147 #ifdef ruby_xrealloc2
10148 #undef ruby_xrealloc2
10149 #endif
10150
10151 void *
ruby_xmalloc(size_t size)10152 ruby_xmalloc(size_t size)
10153 {
10154 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
10155 ruby_malloc_info_file = __FILE__;
10156 ruby_malloc_info_line = __LINE__;
10157 #endif
10158 return ruby_xmalloc_body(size);
10159 }
10160
10161 void *
ruby_xmalloc2(size_t n,size_t size)10162 ruby_xmalloc2(size_t n, size_t size)
10163 {
10164 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
10165 ruby_malloc_info_file = __FILE__;
10166 ruby_malloc_info_line = __LINE__;
10167 #endif
10168 return ruby_xmalloc2_body(n, size);
10169 }
10170
10171 void *
ruby_xcalloc(size_t n,size_t size)10172 ruby_xcalloc(size_t n, size_t size)
10173 {
10174 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
10175 ruby_malloc_info_file = __FILE__;
10176 ruby_malloc_info_line = __LINE__;
10177 #endif
10178 return ruby_xcalloc_body(n, size);
10179 }
10180
10181 void *
ruby_xrealloc(void * ptr,size_t new_size)10182 ruby_xrealloc(void *ptr, size_t new_size)
10183 {
10184 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
10185 ruby_malloc_info_file = __FILE__;
10186 ruby_malloc_info_line = __LINE__;
10187 #endif
10188 return ruby_xrealloc_body(ptr, new_size);
10189 }
10190
10191 void *
ruby_xrealloc2(void * ptr,size_t n,size_t new_size)10192 ruby_xrealloc2(void *ptr, size_t n, size_t new_size)
10193 {
10194 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
10195 ruby_malloc_info_file = __FILE__;
10196 ruby_malloc_info_line = __LINE__;
10197 #endif
10198 return ruby_xrealloc2_body(ptr, n, new_size);
10199 }
10200