1 /**********************************************************************
2 
3   vm_core.h -
4 
5   $Author: naruse $
6   created at: 04/01/01 19:41:38 JST
7 
8   Copyright (C) 2004-2007 Koichi Sasada
9 
10 **********************************************************************/
11 
12 #ifndef RUBY_VM_CORE_H
13 #define RUBY_VM_CORE_H
14 
15 /*
16  * Enable check mode.
17  *   1: enable local assertions.
18  */
19 #ifndef VM_CHECK_MODE
20 #define VM_CHECK_MODE 0
21 #endif
22 
23 /**
24  * VM Debug Level
25  *
26  * debug level:
27  *  0: no debug output
28  *  1: show instruction name
29  *  2: show stack frame when control stack frame is changed
30  *  3: show stack status
31  *  4: show register
32  *  5:
33  * 10: gc check
34  */
35 
36 #ifndef VMDEBUG
37 #define VMDEBUG 0
38 #endif
39 
40 #if 0
41 #undef  VMDEBUG
42 #define VMDEBUG 3
43 #endif
44 
45 #include "ruby_assert.h"
46 
47 #if VM_CHECK_MODE > 0
48 #define VM_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr)
49 
50 #define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
51 
52 #else
53 #define VM_ASSERT(expr) ((void)0)
54 #define VM_UNREACHABLE(func) UNREACHABLE
55 #endif
56 
57 #define RUBY_VM_THREAD_MODEL 2
58 
59 /*
60  * implementation selector of get_insn_info algorithm
61  *   0: linear search
62  *   1: binary search
63  *   2: succinct bitvector
64  */
65 #ifndef VM_INSN_INFO_TABLE_IMPL
66 # define VM_INSN_INFO_TABLE_IMPL 2
67 #endif
68 
69 #include "ruby/ruby.h"
70 #include "ruby/st.h"
71 
72 #include "node.h"
73 #include "vm_opts.h"
74 #include "id.h"
75 #include "method.h"
76 #include "ruby_atomic.h"
77 #include "ccan/list/list.h"
78 
79 #include "ruby/thread_native.h"
80 #if   defined(_WIN32)
81 #include "thread_win32.h"
82 #elif defined(HAVE_PTHREAD_H)
83 #include "thread_pthread.h"
84 #endif
85 
86 #include <setjmp.h>
87 #include <signal.h>
88 
89 #if defined(NSIG_MAX)           /* POSIX issue 8 */
90 # undef NSIG
91 # define NSIG NSIG_MAX
92 #elif defined(_SIG_MAXSIG)      /* FreeBSD */
93 # undef NSIG
94 # define NSIG _SIG_MAXSIG
95 #elif defined(_SIGMAX)          /* QNX */
96 # define NSIG (_SIGMAX + 1)
97 #elif defined(NSIG)             /* 99% of everything else */
98 # /* take it */
99 #else                           /* Last resort */
100 # define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
101 #endif
102 
103 #define RUBY_NSIG NSIG
104 
105 #if defined(SIGCLD)
106 #  define RUBY_SIGCHLD    (SIGCLD)
107 #elif defined(SIGCHLD)
108 #  define RUBY_SIGCHLD    (SIGCHLD)
109 #else
110 #  define RUBY_SIGCHLD    (0)
111 #endif
112 
113 /* platforms with broken or non-existent SIGCHLD work by polling */
114 #if defined(__APPLE__)
115 #  define SIGCHLD_LOSSY (1)
116 #else
117 #  define SIGCHLD_LOSSY (0)
118 #endif
119 
120 /* define to 0 to test old code path */
121 #define WAITPID_USE_SIGCHLD (RUBY_SIGCHLD || SIGCHLD_LOSSY)
122 
123 #ifdef HAVE_STDARG_PROTOTYPES
124 #include <stdarg.h>
125 #define va_init_list(a,b) va_start((a),(b))
126 #else
127 #include <varargs.h>
128 #define va_init_list(a,b) va_start((a))
129 #endif
130 
131 #if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
132 #  define USE_SIGALTSTACK
133 void *rb_register_sigaltstack(void);
134 #  define RB_ALTSTACK_INIT(var) var = rb_register_sigaltstack()
135 #  define RB_ALTSTACK_FREE(var) xfree(var)
136 #  define RB_ALTSTACK(var)  var
137 #else /* noop */
138 #  define RB_ALTSTACK_INIT(var)
139 #  define RB_ALTSTACK_FREE(var)
140 #  define RB_ALTSTACK(var) (0)
141 #endif
142 
143 /*****************/
144 /* configuration */
145 /*****************/
146 
147 /* gcc ver. check */
148 #if defined(__GNUC__) && __GNUC__ >= 2
149 
150 #if OPT_TOKEN_THREADED_CODE
151 #if OPT_DIRECT_THREADED_CODE
152 #undef OPT_DIRECT_THREADED_CODE
153 #endif
154 #endif
155 
156 #else /* defined(__GNUC__) && __GNUC__ >= 2 */
157 
158 /* disable threaded code options */
159 #if OPT_DIRECT_THREADED_CODE
160 #undef OPT_DIRECT_THREADED_CODE
161 #endif
162 #if OPT_TOKEN_THREADED_CODE
163 #undef OPT_TOKEN_THREADED_CODE
164 #endif
165 #endif
166 
167 /* call threaded code */
168 #if    OPT_CALL_THREADED_CODE
169 #if    OPT_DIRECT_THREADED_CODE
170 #undef OPT_DIRECT_THREADED_CODE
171 #endif /* OPT_DIRECT_THREADED_CODE */
172 #if    OPT_STACK_CACHING
173 #undef OPT_STACK_CACHING
174 #endif /* OPT_STACK_CACHING */
175 #endif /* OPT_CALL_THREADED_CODE */
176 
177 void rb_vm_encoded_insn_data_table_init(void);
178 typedef unsigned long rb_num_t;
179 typedef   signed long rb_snum_t;
180 
181 enum ruby_tag_type {
182     RUBY_TAG_NONE	= 0x0,
183     RUBY_TAG_RETURN	= 0x1,
184     RUBY_TAG_BREAK	= 0x2,
185     RUBY_TAG_NEXT	= 0x3,
186     RUBY_TAG_RETRY	= 0x4,
187     RUBY_TAG_REDO	= 0x5,
188     RUBY_TAG_RAISE	= 0x6,
189     RUBY_TAG_THROW	= 0x7,
190     RUBY_TAG_FATAL	= 0x8,
191     RUBY_TAG_MASK	= 0xf
192 };
193 
194 #define TAG_NONE	RUBY_TAG_NONE
195 #define TAG_RETURN	RUBY_TAG_RETURN
196 #define TAG_BREAK	RUBY_TAG_BREAK
197 #define TAG_NEXT	RUBY_TAG_NEXT
198 #define TAG_RETRY	RUBY_TAG_RETRY
199 #define TAG_REDO	RUBY_TAG_REDO
200 #define TAG_RAISE	RUBY_TAG_RAISE
201 #define TAG_THROW	RUBY_TAG_THROW
202 #define TAG_FATAL	RUBY_TAG_FATAL
203 #define TAG_MASK	RUBY_TAG_MASK
204 
205 enum ruby_vm_throw_flags {
206     VM_THROW_NO_ESCAPE_FLAG = 0x8000,
207     VM_THROW_STATE_MASK = 0xff
208 };
209 
210 /* forward declarations */
211 struct rb_thread_struct;
212 struct rb_control_frame_struct;
213 
214 /* iseq data type */
215 typedef struct rb_compile_option_struct rb_compile_option_t;
216 
217 struct iseq_inline_cache_entry {
218     rb_serial_t ic_serial;
219     const rb_cref_t *ic_cref;
220     union {
221 	size_t index;
222 	VALUE value;
223     } ic_value;
224 };
225 
226 union iseq_inline_storage_entry {
227     struct {
228 	struct rb_thread_struct *running_thread;
229 	VALUE value;
230     } once;
231     struct iseq_inline_cache_entry cache;
232 };
233 
234 enum method_missing_reason {
235     MISSING_NOENTRY   = 0x00,
236     MISSING_PRIVATE   = 0x01,
237     MISSING_PROTECTED = 0x02,
238     MISSING_FCALL     = 0x04,
239     MISSING_VCALL     = 0x08,
240     MISSING_SUPER     = 0x10,
241     MISSING_MISSING   = 0x20,
242     MISSING_NONE      = 0x40
243 };
244 
245 struct rb_call_info {
246     /* fixed at compile time */
247     ID mid;
248     unsigned int flag;
249     int orig_argc;
250 };
251 
252 struct rb_call_info_kw_arg {
253     int keyword_len;
254     VALUE keywords[1];
255 };
256 
257 struct rb_call_info_with_kwarg {
258     struct rb_call_info ci;
259     struct rb_call_info_kw_arg *kw_arg;
260 };
261 
262 struct rb_calling_info {
263     VALUE block_handler;
264     VALUE recv;
265     int argc;
266 };
267 
268 struct rb_call_cache;
269 struct rb_execution_context_struct;
270 typedef VALUE (*vm_call_handler)(struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc);
271 
272 struct rb_call_cache {
273     /* inline cache: keys */
274     rb_serial_t method_state;
275     rb_serial_t class_serial;
276 
277     /* inline cache: values */
278     const rb_callable_method_entry_t *me;
279 
280     vm_call_handler call;
281 
282     union {
283 	unsigned int index; /* used by ivar */
284 	enum method_missing_reason method_missing_reason; /* used by method_missing */
285 	int inc_sp; /* used by cfunc */
286     } aux;
287 };
288 
289 #if 1
290 #define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
291 #else
292 #define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
293 #endif
294 #define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
295 
296 typedef struct rb_iseq_location_struct {
297     VALUE pathobj;      /* String (path) or Array [path, realpath]. Frozen. */
298     VALUE base_label;   /* String */
299     VALUE label;        /* String */
300     VALUE first_lineno; /* TODO: may be unsigned short */
301     int node_id;
302     rb_code_location_t code_location;
303 } rb_iseq_location_t;
304 
305 #define PATHOBJ_PATH     0
306 #define PATHOBJ_REALPATH 1
307 
308 static inline VALUE
pathobj_path(VALUE pathobj)309 pathobj_path(VALUE pathobj)
310 {
311     if (RB_TYPE_P(pathobj, T_STRING)) {
312 	return pathobj;
313     }
314     else {
315 	VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
316 	return RARRAY_AREF(pathobj, PATHOBJ_PATH);
317     }
318 }
319 
320 static inline VALUE
pathobj_realpath(VALUE pathobj)321 pathobj_realpath(VALUE pathobj)
322 {
323     if (RB_TYPE_P(pathobj, T_STRING)) {
324 	return pathobj;
325     }
326     else {
327 	VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
328 	return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
329     }
330 }
331 
332 /* Forward declarations */
333 struct rb_mjit_unit;
334 
335 struct rb_iseq_constant_body {
336     enum iseq_type {
337 	ISEQ_TYPE_TOP,
338 	ISEQ_TYPE_METHOD,
339 	ISEQ_TYPE_BLOCK,
340 	ISEQ_TYPE_CLASS,
341 	ISEQ_TYPE_RESCUE,
342 	ISEQ_TYPE_ENSURE,
343 	ISEQ_TYPE_EVAL,
344 	ISEQ_TYPE_MAIN,
345 	ISEQ_TYPE_PLAIN
346     } type;              /* instruction sequence type */
347 
348     unsigned int iseq_size;
349     const VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
350 
351     /**
352      * parameter information
353      *
354      *  def m(a1, a2, ..., aM,                    # mandatory
355      *        b1=(...), b2=(...), ..., bN=(...),  # optional
356      *        *c,                                 # rest
357      *        d1, d2, ..., dO,                    # post
358      *        e1:(...), e2:(...), ..., eK:(...),  # keyword
359      *        **f,                                # keyword_rest
360      *        &g)                                 # block
361      * =>
362      *
363      *  lead_num     = M
364      *  opt_num      = N
365      *  rest_start   = M+N
366      *  post_start   = M+N+(*1)
367      *  post_num     = O
368      *  keyword_num  = K
369      *  block_start  = M+N+(*1)+O+K
370      *  keyword_bits = M+N+(*1)+O+K+(&1)
371      *  size         = M+N+O+(*1)+K+(&1)+(**1) // parameter size.
372      */
373 
374     struct {
375 	struct {
376 	    unsigned int has_lead   : 1;
377 	    unsigned int has_opt    : 1;
378 	    unsigned int has_rest   : 1;
379 	    unsigned int has_post   : 1;
380 	    unsigned int has_kw     : 1;
381 	    unsigned int has_kwrest : 1;
382 	    unsigned int has_block  : 1;
383 
384 	    unsigned int ambiguous_param0 : 1; /* {|a|} */
385 	} flags;
386 
387 	unsigned int size;
388 
389 	int lead_num;
390 	int opt_num;
391 	int rest_start;
392 	int post_start;
393 	int post_num;
394 	int block_start;
395 
396 	const VALUE *opt_table; /* (opt_num + 1) entries. */
397 	/* opt_num and opt_table:
398 	 *
399 	 * def foo o1=e1, o2=e2, ..., oN=eN
400 	 * #=>
401 	 *   # prologue code
402 	 *   A1: e1
403 	 *   A2: e2
404 	 *   ...
405 	 *   AN: eN
406 	 *   AL: body
407 	 * opt_num = N
408 	 * opt_table = [A1, A2, ..., AN, AL]
409 	 */
410 
411 	const struct rb_iseq_param_keyword {
412 	    int num;
413 	    int required_num;
414 	    int bits_start;
415 	    int rest_start;
416 	    const ID *table;
417 	    const VALUE *default_values;
418 	} *keyword;
419     } param;
420 
421     rb_iseq_location_t location;
422 
423     /* insn info, must be freed */
424     struct iseq_insn_info {
425 	const struct iseq_insn_info_entry *body;
426 	unsigned int *positions;
427 	unsigned int size;
428 #if VM_INSN_INFO_TABLE_IMPL == 2
429 	struct succ_index_table *succ_index_table;
430 #endif
431     } insns_info;
432 
433     const ID *local_table;		/* must free */
434 
435     /* catch table */
436     const struct iseq_catch_table *catch_table;
437 
438     /* for child iseq */
439     const struct rb_iseq_struct *parent_iseq;
440     struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
441 
442     union iseq_inline_storage_entry *is_entries;
443     struct rb_call_info *ci_entries; /* struct rb_call_info ci_entries[ci_size];
444 				      * struct rb_call_info_with_kwarg cikw_entries[ci_kw_size];
445 				      * So that:
446 				      * struct rb_call_info_with_kwarg *cikw_entries = &body->ci_entries[ci_size];
447 				      */
448     struct rb_call_cache *cc_entries; /* size is ci_size + ci_kw_size */
449 
450     struct {
451 	rb_snum_t flip_count;
452 	VALUE coverage;
453         VALUE pc2branchindex;
454 	VALUE *original_iseq;
455     } variable;
456 
457     unsigned int local_table_size;
458     unsigned int is_size;
459     unsigned int ci_size;
460     unsigned int ci_kw_size;
461     unsigned int stack_max; /* for stack overflow check */
462 
463 #if USE_MJIT
464     /* The following fields are MJIT related info.  */
465     VALUE (*jit_func)(struct rb_execution_context_struct *,
466                       struct rb_control_frame_struct *); /* function pointer for loaded native code */
467     long unsigned total_calls; /* number of total calls with `mjit_exec()` */
468     struct rb_mjit_unit *jit_unit;
469 #endif
470     char catch_except_p; /* If a frame of this ISeq may catch exception, set TRUE */
471 };
472 
473 /* T_IMEMO/iseq */
474 /* typedef rb_iseq_t is in method.h */
475 struct rb_iseq_struct {
476     VALUE flags; /* 1 */
477     VALUE wrapper; /* 2 */
478 
479     struct rb_iseq_constant_body *body;  /* 3 */
480 
481     union { /* 4, 5 words */
482 	struct iseq_compile_data *compile_data; /* used at compile time */
483 
484 	struct {
485 	    VALUE obj;
486 	    int index;
487 	} loader;
488 
489         struct {
490             struct rb_hook_list_struct *local_hooks;
491             rb_event_flag_t global_trace_events;
492         } exec;
493     } aux;
494 };
495 
496 #ifndef USE_LAZY_LOAD
497 #define USE_LAZY_LOAD 0
498 #endif
499 
500 #if USE_LAZY_LOAD
501 const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
502 #endif
503 
504 static inline const rb_iseq_t *
rb_iseq_check(const rb_iseq_t * iseq)505 rb_iseq_check(const rb_iseq_t *iseq)
506 {
507 #if USE_LAZY_LOAD
508     if (iseq->body == NULL) {
509 	rb_iseq_complete((rb_iseq_t *)iseq);
510     }
511 #endif
512     return iseq;
513 }
514 
515 static inline const rb_iseq_t *
def_iseq_ptr(rb_method_definition_t * def)516 def_iseq_ptr(rb_method_definition_t *def)
517 {
518 #if VM_CHECK_MODE > 0
519     if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
520 #endif
521     return rb_iseq_check(def->body.iseq.iseqptr);
522 }
523 
524 enum ruby_special_exceptions {
525     ruby_error_reenter,
526     ruby_error_nomemory,
527     ruby_error_sysstack,
528     ruby_error_stackfatal,
529     ruby_error_stream_closed,
530     ruby_special_error_count
531 };
532 
533 enum ruby_basic_operators {
534     BOP_PLUS,
535     BOP_MINUS,
536     BOP_MULT,
537     BOP_DIV,
538     BOP_MOD,
539     BOP_EQ,
540     BOP_EQQ,
541     BOP_LT,
542     BOP_LE,
543     BOP_LTLT,
544     BOP_AREF,
545     BOP_ASET,
546     BOP_LENGTH,
547     BOP_SIZE,
548     BOP_EMPTY_P,
549     BOP_SUCC,
550     BOP_GT,
551     BOP_GE,
552     BOP_NOT,
553     BOP_NEQ,
554     BOP_MATCH,
555     BOP_FREEZE,
556     BOP_UMINUS,
557     BOP_MAX,
558     BOP_MIN,
559     BOP_CALL,
560     BOP_AND,
561     BOP_OR,
562 
563     BOP_LAST_
564 };
565 
566 #define GetVMPtr(obj, ptr) \
567   GetCoreDataFromValue((obj), rb_vm_t, (ptr))
568 
569 struct rb_vm_struct;
570 typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
571 
572 typedef struct rb_at_exit_list {
573     rb_vm_at_exit_func *func;
574     struct rb_at_exit_list *next;
575 } rb_at_exit_list;
576 
577 struct rb_objspace;
578 struct rb_objspace *rb_objspace_alloc(void);
579 void rb_objspace_free(struct rb_objspace *);
580 
581 typedef struct rb_hook_list_struct {
582     struct rb_event_hook_struct *hooks;
583     rb_event_flag_t events;
584     unsigned int need_clean;
585     unsigned int running;
586 } rb_hook_list_t;
587 
588 typedef struct rb_vm_struct {
589     VALUE self;
590 
591     rb_global_vm_lock_t gvl;
592 
593     struct rb_thread_struct *main_thread;
594 
595     /* persists across uncontended GVL release/acquire for time slice */
596     const struct rb_thread_struct *running_thread;
597 
598 #ifdef USE_SIGALTSTACK
599     void *main_altstack;
600 #endif
601 
602     rb_serial_t fork_gen;
603     rb_nativethread_lock_t waitpid_lock;
604     struct list_head waiting_pids; /* PID > 0: <=> struct waitpid_state */
605     struct list_head waiting_grps; /* PID <= 0: <=> struct waitpid_state */
606     struct list_head waiting_fds; /* <=> struct waiting_fd */
607     struct list_head living_threads;
608     VALUE thgroup_default;
609     int living_thread_num;
610 
611     unsigned int running: 1;
612     unsigned int thread_abort_on_exception: 1;
613     unsigned int thread_report_on_exception: 1;
614 
615     unsigned int safe_level_: 1;
616     int sleeper;
617 
618     /* object management */
619     VALUE mark_object_ary;
620     const VALUE special_exceptions[ruby_special_error_count];
621 
622     /* load */
623     VALUE top_self;
624     VALUE load_path;
625     VALUE load_path_snapshot;
626     VALUE load_path_check_cache;
627     VALUE expanded_load_path;
628     VALUE loaded_features;
629     VALUE loaded_features_snapshot;
630     struct st_table *loaded_features_index;
631     struct st_table *loading_table;
632 
633     /* signal */
634     struct {
635 	VALUE cmd[RUBY_NSIG];
636 	unsigned char safe[RUBY_NSIG];
637     } trap_list;
638 
639     /* hook */
640     rb_hook_list_t global_hooks;
641 
642     /* relation table of ensure - rollback for callcc */
643     struct st_table *ensure_rollback_table;
644 
645     /* postponed_job (async-signal-safe, NOT thread-safe) */
646     struct rb_postponed_job_struct *postponed_job_buffer;
647     int postponed_job_index;
648 
649     int src_encoding_index;
650 
651     /* workqueue (thread-safe, NOT async-signal-safe) */
652     struct list_head workqueue; /* <=> rb_workqueue_job.jnode */
653     rb_nativethread_lock_t workqueue_lock;
654 
655     VALUE verbose, debug, orig_progname, progname;
656     VALUE coverages;
657     int coverage_mode;
658 
659     VALUE defined_module_hash;
660 
661     struct rb_objspace *objspace;
662 
663     rb_at_exit_list *at_exit;
664 
665     VALUE *defined_strings;
666     st_table *frozen_strings;
667 
668     /* params */
669     struct { /* size in byte */
670 	size_t thread_vm_stack_size;
671 	size_t thread_machine_stack_size;
672 	size_t fiber_vm_stack_size;
673 	size_t fiber_machine_stack_size;
674     } default_params;
675 
676     short redefined_flag[BOP_LAST_];
677 } rb_vm_t;
678 
679 /* default values */
680 
681 #define RUBY_VM_SIZE_ALIGN 4096
682 
683 #define RUBY_VM_THREAD_VM_STACK_SIZE          ( 128 * 1024 * sizeof(VALUE)) /*  512 KB or 1024 KB */
684 #define RUBY_VM_THREAD_VM_STACK_SIZE_MIN      (   2 * 1024 * sizeof(VALUE)) /*    8 KB or   16 KB */
685 #define RUBY_VM_THREAD_MACHINE_STACK_SIZE     ( 128 * 1024 * sizeof(VALUE)) /*  512 KB or 1024 KB */
686 #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN (  16 * 1024 * sizeof(VALUE)) /*   64 KB or  128 KB */
687 
688 #define RUBY_VM_FIBER_VM_STACK_SIZE           (  16 * 1024 * sizeof(VALUE)) /*   64 KB or  128 KB */
689 #define RUBY_VM_FIBER_VM_STACK_SIZE_MIN       (   2 * 1024 * sizeof(VALUE)) /*    8 KB or   16 KB */
690 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE      (  64 * 1024 * sizeof(VALUE)) /*  256 KB or  512 KB */
691 #if defined(__powerpc64__)
692 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN  (  32 * 1024 * sizeof(VALUE)) /*   128 KB or  256 KB */
693 #else
694 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN  (  16 * 1024 * sizeof(VALUE)) /*   64 KB or  128 KB */
695 #endif
696 
697 /* optimize insn */
698 #define INTEGER_REDEFINED_OP_FLAG (1 << 0)
699 #define FLOAT_REDEFINED_OP_FLAG  (1 << 1)
700 #define STRING_REDEFINED_OP_FLAG (1 << 2)
701 #define ARRAY_REDEFINED_OP_FLAG  (1 << 3)
702 #define HASH_REDEFINED_OP_FLAG   (1 << 4)
703 /* #define BIGNUM_REDEFINED_OP_FLAG (1 << 5) */
704 #define SYMBOL_REDEFINED_OP_FLAG (1 << 6)
705 #define TIME_REDEFINED_OP_FLAG   (1 << 7)
706 #define REGEXP_REDEFINED_OP_FLAG (1 << 8)
707 #define NIL_REDEFINED_OP_FLAG    (1 << 9)
708 #define TRUE_REDEFINED_OP_FLAG   (1 << 10)
709 #define FALSE_REDEFINED_OP_FLAG  (1 << 11)
710 #define PROC_REDEFINED_OP_FLAG   (1 << 12)
711 
712 #define BASIC_OP_UNREDEFINED_P(op, klass) (LIKELY((GET_VM()->redefined_flag[(op)]&(klass)) == 0))
713 
714 #ifndef VM_DEBUG_BP_CHECK
715 #define VM_DEBUG_BP_CHECK 0
716 #endif
717 
718 #ifndef VM_DEBUG_VERIFY_METHOD_CACHE
719 #define VM_DEBUG_VERIFY_METHOD_CACHE (VM_DEBUG_MODE != 0)
720 #endif
721 
722 struct rb_captured_block {
723     VALUE self;
724     const VALUE *ep;
725     union {
726 	const rb_iseq_t *iseq;
727 	const struct vm_ifunc *ifunc;
728 	VALUE val;
729     } code;
730 };
731 
732 enum rb_block_handler_type {
733     block_handler_type_iseq,
734     block_handler_type_ifunc,
735     block_handler_type_symbol,
736     block_handler_type_proc
737 };
738 
739 enum rb_block_type {
740     block_type_iseq,
741     block_type_ifunc,
742     block_type_symbol,
743     block_type_proc
744 };
745 
746 struct rb_block {
747     union {
748 	struct rb_captured_block captured;
749 	VALUE symbol;
750 	VALUE proc;
751     } as;
752     enum rb_block_type type;
753 };
754 
755 typedef struct rb_control_frame_struct {
756     const VALUE *pc;		/* cfp[0] */
757     VALUE *sp;			/* cfp[1] */
758     const rb_iseq_t *iseq;	/* cfp[2] */
759     VALUE self;			/* cfp[3] / block[0] */
760     const VALUE *ep;		/* cfp[4] / block[1] */
761     const void *block_code;     /* cfp[5] / block[2] */ /* iseq or ifunc */
762     const VALUE *bp;		/* cfp[6] */
763 
764 #if VM_DEBUG_BP_CHECK
765     VALUE *bp_check;		/* cfp[7] */
766 #endif
767 } rb_control_frame_t;
768 
769 extern const rb_data_type_t ruby_threadptr_data_type;
770 
771 static inline struct rb_thread_struct *
rb_thread_ptr(VALUE thval)772 rb_thread_ptr(VALUE thval)
773 {
774     return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
775 }
776 
777 enum rb_thread_status {
778     THREAD_RUNNABLE,
779     THREAD_STOPPED,
780     THREAD_STOPPED_FOREVER,
781     THREAD_KILLED
782 };
783 
784 typedef RUBY_JMP_BUF rb_jmpbuf_t;
785 
786 /*
787   the members which are written in EC_PUSH_TAG() should be placed at
788   the beginning and the end, so that entire region is accessible.
789 */
790 struct rb_vm_tag {
791     VALUE tag;
792     VALUE retval;
793     rb_jmpbuf_t buf;
794     struct rb_vm_tag *prev;
795     enum ruby_tag_type state;
796 };
797 
798 STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
799 STATIC_ASSERT(rb_vm_tag_buf_end,
800 	      offsetof(struct rb_vm_tag, buf) + sizeof(rb_jmpbuf_t) <
801 	      sizeof(struct rb_vm_tag));
802 
803 struct rb_vm_protect_tag {
804     struct rb_vm_protect_tag *prev;
805 };
806 
807 struct rb_unblock_callback {
808     rb_unblock_function_t *func;
809     void *arg;
810 };
811 
812 struct rb_mutex_struct;
813 
814 typedef struct rb_thread_list_struct{
815     struct rb_thread_list_struct *next;
816     struct rb_thread_struct *th;
817 } rb_thread_list_t;
818 
819 typedef struct rb_ensure_entry {
820     VALUE marker;
821     VALUE (*e_proc)(ANYARGS);
822     VALUE data2;
823 } rb_ensure_entry_t;
824 
825 typedef struct rb_ensure_list {
826     struct rb_ensure_list *next;
827     struct rb_ensure_entry entry;
828 } rb_ensure_list_t;
829 
830 typedef char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) * 2 + 3];
831 
832 typedef struct rb_fiber_struct rb_fiber_t;
833 
834 typedef struct rb_execution_context_struct {
835     /* execution information */
836     VALUE *vm_stack;		/* must free, must mark */
837     size_t vm_stack_size;       /* size in word (byte size / sizeof(VALUE)) */
838     rb_control_frame_t *cfp;
839 
840     struct rb_vm_tag *tag;
841     struct rb_vm_protect_tag *protect_tag;
842 
843     /* interrupt flags */
844     rb_atomic_t interrupt_flag;
845     rb_atomic_t interrupt_mask; /* size should match flag */
846 
847     rb_fiber_t *fiber_ptr;
848     struct rb_thread_struct *thread_ptr;
849 
850     /* storage (ec (fiber) local) */
851     st_table *local_storage;
852     VALUE local_storage_recursive_hash;
853     VALUE local_storage_recursive_hash_for_trace;
854 
855     /* eval env */
856     const VALUE *root_lep;
857     VALUE root_svar;
858 
859     /* ensure & callcc */
860     rb_ensure_list_t *ensure_list;
861 
862     /* trace information */
863     struct rb_trace_arg_struct *trace_arg;
864 
865     /* temporary places */
866     VALUE errinfo;
867     VALUE passed_block_handler; /* for rb_iterate */
868 
869     uint8_t raised_flag; /* only 3 bits needed */
870 
871     /* n.b. only 7 bits needed, really: */
872     BITFIELD(enum method_missing_reason, method_missing_reason, 8);
873 
874     VALUE private_const_reference;
875 
876     /* for GC */
877     struct {
878 	VALUE *stack_start;
879 	VALUE *stack_end;
880 	size_t stack_maxsize;
881 #ifdef __ia64
882 	VALUE *register_stack_start;
883 	VALUE *register_stack_end;
884 	size_t register_stack_maxsize;
885 #endif
886 	RUBY_ALIGNAS(SIZEOF_VALUE) jmp_buf regs;
887     } machine;
888 } rb_execution_context_t;
889 
890 void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
891 
892 typedef struct rb_thread_struct {
893     struct list_node vmlt_node;
894     VALUE self;
895     rb_vm_t *vm;
896 
897     rb_execution_context_t *ec;
898 
899     VALUE last_status; /* $? */
900 
901     /* for cfunc */
902     struct rb_calling_info *calling;
903 
904     /* for load(true) */
905     VALUE top_self;
906     VALUE top_wrapper;
907 
908     /* thread control */
909     rb_nativethread_id_t thread_id;
910 #ifdef NON_SCALAR_THREAD_ID
911     rb_thread_id_string_t thread_id_string;
912 #endif
913     BITFIELD(enum rb_thread_status, status, 2);
914     /* bit flags */
915     unsigned int to_kill : 1;
916     unsigned int abort_on_exception: 1;
917     unsigned int report_on_exception: 1;
918     unsigned int pending_interrupt_queue_checked: 1;
919     int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
920     uint32_t running_time_us; /* 12500..800000 */
921 
922     native_thread_data_t native_thread_data;
923     void *blocking_region_buffer;
924 
925     VALUE thgroup;
926     VALUE value;
927 
928     /* temporary place of retval on OPT_CALL_THREADED_CODE */
929 #if OPT_CALL_THREADED_CODE
930     VALUE retval;
931 #endif
932 
933     /* async errinfo queue */
934     VALUE pending_interrupt_queue;
935     VALUE pending_interrupt_mask_stack;
936 
937     /* interrupt management */
938     rb_nativethread_lock_t interrupt_lock;
939     struct rb_unblock_callback unblock;
940     VALUE locking_mutex;
941     struct rb_mutex_struct *keeping_mutexes;
942 
943     rb_thread_list_t *join_list;
944 
945     union {
946         struct {
947             VALUE proc;
948             VALUE args;
949         } proc;
950         struct {
951             VALUE (*func)(ANYARGS);
952             void *arg;
953         } func;
954     } invoke_arg;
955 
956     enum {
957         thread_invoke_type_none = 0,
958         thread_invoke_type_proc,
959         thread_invoke_type_func
960     } invoke_type;
961 
962     /* statistics data for profiler */
963     VALUE stat_insn_usage;
964 
965     /* fiber */
966     rb_fiber_t *root_fiber;
967     rb_jmpbuf_t root_jmpbuf;
968 
969     /* misc */
970     VALUE name;
971 
972 } rb_thread_t;
973 
974 typedef enum {
975     VM_DEFINECLASS_TYPE_CLASS           = 0x00,
976     VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
977     VM_DEFINECLASS_TYPE_MODULE          = 0x02,
978     /* 0x03..0x06 is reserved */
979     VM_DEFINECLASS_TYPE_MASK            = 0x07
980 } rb_vm_defineclass_type_t;
981 
982 #define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
983 #define VM_DEFINECLASS_FLAG_SCOPED         0x08
984 #define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
985 #define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
986 #define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
987     ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
988 
989 /* iseq.c */
990 RUBY_SYMBOL_EXPORT_BEGIN
991 
992 /* node -> iseq */
993 rb_iseq_t *rb_iseq_new         (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum iseq_type);
994 rb_iseq_t *rb_iseq_new_top     (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
995 rb_iseq_t *rb_iseq_new_main    (const rb_ast_body_t *ast,             VALUE path, VALUE realpath, const rb_iseq_t *parent);
996 rb_iseq_t *rb_iseq_new_with_opt(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, VALUE first_lineno,
997 				const rb_iseq_t *parent, enum iseq_type, const rb_compile_option_t*);
998 rb_iseq_t *rb_iseq_new_ifunc(const struct vm_ifunc *ifunc, VALUE name, VALUE path, VALUE realpath, VALUE first_lineno,
999 			     const rb_iseq_t *parent, enum iseq_type, const rb_compile_option_t*);
1000 
1001 /* src -> iseq */
1002 rb_iseq_t *rb_iseq_compile(VALUE src, VALUE file, VALUE line);
1003 rb_iseq_t *rb_iseq_compile_on_base(VALUE src, VALUE file, VALUE line, const struct rb_block *base_block);
1004 rb_iseq_t *rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE realpath, VALUE line, const struct rb_block *base_block, VALUE opt);
1005 
1006 VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1007 int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1008 const char *ruby_node_name(int node);
1009 
1010 VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1011 
1012 RUBY_EXTERN VALUE rb_cISeq;
1013 RUBY_EXTERN VALUE rb_cRubyVM;
1014 RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1015 RUBY_SYMBOL_EXPORT_END
1016 
1017 #define GetProcPtr(obj, ptr) \
1018   GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1019 
1020 typedef struct {
1021     const struct rb_block block;
1022     unsigned int is_from_method: 1;	/* bool */
1023     unsigned int is_lambda: 1;		/* bool */
1024 } rb_proc_t;
1025 
1026 typedef struct {
1027     VALUE flags; /* imemo header */
1028     const rb_iseq_t *iseq;
1029     const VALUE *ep;
1030     const VALUE *env;
1031     unsigned int env_size;
1032 } rb_env_t;
1033 
1034 extern const rb_data_type_t ruby_binding_data_type;
1035 
1036 #define GetBindingPtr(obj, ptr) \
1037   GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1038 
1039 typedef struct {
1040     const struct rb_block block;
1041     const VALUE pathobj;
1042     unsigned short first_lineno;
1043 } rb_binding_t;
1044 
1045 /* used by compile time and send insn */
1046 
1047 enum vm_check_match_type {
1048     VM_CHECKMATCH_TYPE_WHEN = 1,
1049     VM_CHECKMATCH_TYPE_CASE = 2,
1050     VM_CHECKMATCH_TYPE_RESCUE = 3
1051 };
1052 
1053 #define VM_CHECKMATCH_TYPE_MASK   0x03
1054 #define VM_CHECKMATCH_ARRAY       0x04
1055 
1056 enum vm_call_flag_bits {
1057     VM_CALL_ARGS_SPLAT_bit,     /* m(*args) */
1058     VM_CALL_ARGS_BLOCKARG_bit,  /* m(&block) */
1059     VM_CALL_FCALL_bit,          /* m(...) */
1060     VM_CALL_VCALL_bit,          /* m */
1061     VM_CALL_ARGS_SIMPLE_bit,    /* (ci->flag & (SPLAT|BLOCKARG)) && blockiseq == NULL && ci->kw_arg == NULL */
1062     VM_CALL_BLOCKISEQ_bit,      /* has blockiseq */
1063     VM_CALL_KWARG_bit,          /* has kwarg */
1064     VM_CALL_KW_SPLAT_bit,       /* m(**opts) */
1065     VM_CALL_TAILCALL_bit,       /* located at tail position */
1066     VM_CALL_SUPER_bit,          /* super */
1067     VM_CALL_ZSUPER_bit,         /* zsuper */
1068     VM_CALL_OPT_SEND_bit,       /* internal flag */
1069     VM_CALL__END
1070 };
1071 
1072 #define VM_CALL_ARGS_SPLAT      (0x01 << VM_CALL_ARGS_SPLAT_bit)
1073 #define VM_CALL_ARGS_BLOCKARG   (0x01 << VM_CALL_ARGS_BLOCKARG_bit)
1074 #define VM_CALL_FCALL           (0x01 << VM_CALL_FCALL_bit)
1075 #define VM_CALL_VCALL           (0x01 << VM_CALL_VCALL_bit)
1076 #define VM_CALL_ARGS_SIMPLE     (0x01 << VM_CALL_ARGS_SIMPLE_bit)
1077 #define VM_CALL_BLOCKISEQ       (0x01 << VM_CALL_BLOCKISEQ_bit)
1078 #define VM_CALL_KWARG           (0x01 << VM_CALL_KWARG_bit)
1079 #define VM_CALL_KW_SPLAT        (0x01 << VM_CALL_KW_SPLAT_bit)
1080 #define VM_CALL_TAILCALL        (0x01 << VM_CALL_TAILCALL_bit)
1081 #define VM_CALL_SUPER           (0x01 << VM_CALL_SUPER_bit)
1082 #define VM_CALL_ZSUPER          (0x01 << VM_CALL_ZSUPER_bit)
1083 #define VM_CALL_OPT_SEND        (0x01 << VM_CALL_OPT_SEND_bit)
1084 
1085 enum vm_special_object_type {
1086     VM_SPECIAL_OBJECT_VMCORE = 1,
1087     VM_SPECIAL_OBJECT_CBASE,
1088     VM_SPECIAL_OBJECT_CONST_BASE
1089 };
1090 
1091 enum vm_svar_index {
1092     VM_SVAR_LASTLINE = 0,      /* $_ */
1093     VM_SVAR_BACKREF = 1,       /* $~ */
1094 
1095     VM_SVAR_EXTRA_START = 2,
1096     VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1097 };
1098 
1099 /* inline cache */
1100 typedef struct iseq_inline_cache_entry *IC;
1101 typedef union iseq_inline_storage_entry *ISE;
1102 typedef struct rb_call_info *CALL_INFO;
1103 typedef struct rb_call_cache *CALL_CACHE;
1104 
1105 void rb_vm_change_state(void);
1106 
1107 typedef VALUE CDHASH;
1108 
1109 #ifndef FUNC_FASTCALL
1110 #define FUNC_FASTCALL(x) x
1111 #endif
1112 
1113 typedef rb_control_frame_t *
1114   (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1115 
1116 #define VM_TAGGED_PTR_SET(p, tag)  ((VALUE)(p) | (tag))
1117 #define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1118 
1119 #define GC_GUARDED_PTR(p)     VM_TAGGED_PTR_SET((p), 0x01)
1120 #define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1121 #define GC_GUARDED_PTR_P(p)   (((VALUE)(p)) & 0x01)
1122 
1123 enum {
1124     /* Frame/Environment flag bits:
1125      *   MMMM MMMM MMMM MMMM ____ __FF FFFF EEEX (LSB)
1126      *
1127      * X   : tag for GC marking (It seems as Fixnum)
1128      * EEE : 3 bits Env flags
1129      * FF..: 6 bits Frame flags
1130      * MM..: 15 bits frame magic (to check frame corruption)
1131      */
1132 
1133     /* frame types */
1134     VM_FRAME_MAGIC_METHOD = 0x11110001,
1135     VM_FRAME_MAGIC_BLOCK  = 0x22220001,
1136     VM_FRAME_MAGIC_CLASS  = 0x33330001,
1137     VM_FRAME_MAGIC_TOP    = 0x44440001,
1138     VM_FRAME_MAGIC_CFUNC  = 0x55550001,
1139     VM_FRAME_MAGIC_IFUNC  = 0x66660001,
1140     VM_FRAME_MAGIC_EVAL   = 0x77770001,
1141     VM_FRAME_MAGIC_RESCUE = 0x78880001,
1142     VM_FRAME_MAGIC_DUMMY  = 0x79990001,
1143 
1144     VM_FRAME_MAGIC_MASK   = 0x7fff0001,
1145 
1146     /* frame flag */
1147     VM_FRAME_FLAG_PASSED    = 0x0010,
1148     VM_FRAME_FLAG_FINISH    = 0x0020,
1149     VM_FRAME_FLAG_BMETHOD   = 0x0040,
1150     VM_FRAME_FLAG_CFRAME    = 0x0080,
1151     VM_FRAME_FLAG_LAMBDA    = 0x0100,
1152     VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1153 
1154     /* env flag */
1155     VM_ENV_FLAG_LOCAL       = 0x0002,
1156     VM_ENV_FLAG_ESCAPED     = 0x0004,
1157     VM_ENV_FLAG_WB_REQUIRED = 0x0008
1158 };
1159 
1160 #define VM_ENV_DATA_SIZE             ( 3)
1161 
1162 #define VM_ENV_DATA_INDEX_ME_CREF    (-2) /* ep[-2] */
1163 #define VM_ENV_DATA_INDEX_SPECVAL    (-1) /* ep[-1] */
1164 #define VM_ENV_DATA_INDEX_FLAGS      ( 0) /* ep[ 0] */
1165 #define VM_ENV_DATA_INDEX_ENV        ( 1) /* ep[ 1] */
1166 
1167 #define VM_ENV_INDEX_LAST_LVAR              (-VM_ENV_DATA_SIZE)
1168 
1169 static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1170 
1171 static inline void
VM_ENV_FLAGS_SET(const VALUE * ep,VALUE flag)1172 VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1173 {
1174     VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1175     VM_ASSERT(FIXNUM_P(flags));
1176     VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1177 }
1178 
1179 static inline void
VM_ENV_FLAGS_UNSET(const VALUE * ep,VALUE flag)1180 VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1181 {
1182     VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1183     VM_ASSERT(FIXNUM_P(flags));
1184     VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1185 }
1186 
1187 static inline unsigned long
VM_ENV_FLAGS(const VALUE * ep,long flag)1188 VM_ENV_FLAGS(const VALUE *ep, long flag)
1189 {
1190     VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1191     VM_ASSERT(FIXNUM_P(flags));
1192     return flags & flag;
1193 }
1194 
1195 static inline unsigned long
VM_FRAME_TYPE(const rb_control_frame_t * cfp)1196 VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1197 {
1198     return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1199 }
1200 
1201 static inline int
VM_FRAME_LAMBDA_P(const rb_control_frame_t * cfp)1202 VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1203 {
1204     return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1205 }
1206 
1207 static inline int
VM_FRAME_FINISHED_P(const rb_control_frame_t * cfp)1208 VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1209 {
1210     return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1211 }
1212 
1213 static inline int
VM_FRAME_BMETHOD_P(const rb_control_frame_t * cfp)1214 VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1215 {
1216     return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1217 }
1218 
1219 static inline int
rb_obj_is_iseq(VALUE iseq)1220 rb_obj_is_iseq(VALUE iseq)
1221 {
1222     return imemo_type_p(iseq, imemo_iseq);
1223 }
1224 
1225 #if VM_CHECK_MODE > 0
1226 #define RUBY_VM_NORMAL_ISEQ_P(iseq)  rb_obj_is_iseq((VALUE)iseq)
1227 #endif
1228 
1229 static inline int
VM_FRAME_CFRAME_P(const rb_control_frame_t * cfp)1230 VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1231 {
1232     int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1233     VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p);
1234     return cframe_p;
1235 }
1236 
1237 static inline int
VM_FRAME_RUBYFRAME_P(const rb_control_frame_t * cfp)1238 VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1239 {
1240     return !VM_FRAME_CFRAME_P(cfp);
1241 }
1242 
1243 #define RUBYVM_CFUNC_FRAME_P(cfp) \
1244   (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1245 
1246 #define VM_GUARDED_PREV_EP(ep)         GC_GUARDED_PTR(ep)
1247 #define VM_BLOCK_HANDLER_NONE 0
1248 
1249 static inline int
VM_ENV_LOCAL_P(const VALUE * ep)1250 VM_ENV_LOCAL_P(const VALUE *ep)
1251 {
1252     return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1253 }
1254 
1255 static inline const VALUE *
VM_ENV_PREV_EP(const VALUE * ep)1256 VM_ENV_PREV_EP(const VALUE *ep)
1257 {
1258     VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1259     return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1260 }
1261 
1262 static inline VALUE
VM_ENV_BLOCK_HANDLER(const VALUE * ep)1263 VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1264 {
1265     VM_ASSERT(VM_ENV_LOCAL_P(ep));
1266     return ep[VM_ENV_DATA_INDEX_SPECVAL];
1267 }
1268 
1269 #if VM_CHECK_MODE > 0
1270 int rb_vm_ep_in_heap_p(const VALUE *ep);
1271 #endif
1272 
1273 static inline int
VM_ENV_ESCAPED_P(const VALUE * ep)1274 VM_ENV_ESCAPED_P(const VALUE *ep)
1275 {
1276     VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1277     return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1278 }
1279 
1280 #if VM_CHECK_MODE > 0
1281 static inline int
vm_assert_env(VALUE obj)1282 vm_assert_env(VALUE obj)
1283 {
1284     VM_ASSERT(imemo_type_p(obj, imemo_env));
1285     return 1;
1286 }
1287 #endif
1288 
1289 static inline VALUE
VM_ENV_ENVVAL(const VALUE * ep)1290 VM_ENV_ENVVAL(const VALUE *ep)
1291 {
1292     VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1293     VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1294     VM_ASSERT(vm_assert_env(envval));
1295     return envval;
1296 }
1297 
1298 static inline const rb_env_t *
VM_ENV_ENVVAL_PTR(const VALUE * ep)1299 VM_ENV_ENVVAL_PTR(const VALUE *ep)
1300 {
1301     return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1302 }
1303 
1304 static inline const rb_env_t *
vm_env_new(VALUE * env_ep,VALUE * env_body,unsigned int env_size,const rb_iseq_t * iseq)1305 vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1306 {
1307     rb_env_t *env = (rb_env_t *)rb_imemo_new(imemo_env, (VALUE)env_ep, (VALUE)env_body, 0, (VALUE)iseq);
1308     env->env_size = env_size;
1309     env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1310     return env;
1311 }
1312 
1313 static inline void
VM_FORCE_WRITE(const VALUE * ptr,VALUE v)1314 VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1315 {
1316     *((VALUE *)ptr) = v;
1317 }
1318 
1319 static inline void
VM_FORCE_WRITE_SPECIAL_CONST(const VALUE * ptr,VALUE special_const_value)1320 VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1321 {
1322     VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1323     VM_FORCE_WRITE(ptr, special_const_value);
1324 }
1325 
1326 static inline void
VM_STACK_ENV_WRITE(const VALUE * ep,int index,VALUE v)1327 VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1328 {
1329     VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1330     VM_FORCE_WRITE(&ep[index], v);
1331 }
1332 
1333 const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1334 const VALUE *rb_vm_proc_local_ep(VALUE proc);
1335 void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1336 void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1337 
1338 VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1339 
1340 #define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1341 #define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1342 
1343 #define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1344   ((void *)(ecfp) > (void *)(cfp))
1345 
1346 static inline const rb_control_frame_t *
RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t * ec)1347 RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1348 {
1349     return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1350 }
1351 
1352 static inline int
RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t * ec,const rb_control_frame_t * cfp)1353 RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1354 {
1355     return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1356 }
1357 
1358 static inline int
VM_BH_ISEQ_BLOCK_P(VALUE block_handler)1359 VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1360 {
1361     if ((block_handler & 0x03) == 0x01) {
1362 #if VM_CHECK_MODE > 0
1363 	struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1364 	VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1365 #endif
1366 	return 1;
1367     }
1368     else {
1369 	return 0;
1370     }
1371 }
1372 
1373 static inline VALUE
VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block * captured)1374 VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1375 {
1376     VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1377     VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1378     return block_handler;
1379 }
1380 
1381 static inline const struct rb_captured_block *
VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)1382 VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1383 {
1384     struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1385     VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1386     return captured;
1387 }
1388 
1389 static inline int
VM_BH_IFUNC_P(VALUE block_handler)1390 VM_BH_IFUNC_P(VALUE block_handler)
1391 {
1392     if ((block_handler & 0x03) == 0x03) {
1393 #if VM_CHECK_MODE > 0
1394 	struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1395 	VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1396 #endif
1397 	return 1;
1398     }
1399     else {
1400 	return 0;
1401     }
1402 }
1403 
1404 static inline VALUE
VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block * captured)1405 VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1406 {
1407     VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1408     VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1409     return block_handler;
1410 }
1411 
1412 static inline const struct rb_captured_block *
VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)1413 VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1414 {
1415     struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1416     VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1417     return captured;
1418 }
1419 
1420 static inline const struct rb_captured_block *
VM_BH_TO_CAPT_BLOCK(VALUE block_handler)1421 VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1422 {
1423     struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1424     VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1425     return captured;
1426 }
1427 
1428 static inline enum rb_block_handler_type
vm_block_handler_type(VALUE block_handler)1429 vm_block_handler_type(VALUE block_handler)
1430 {
1431     if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1432 	return block_handler_type_iseq;
1433     }
1434     else if (VM_BH_IFUNC_P(block_handler)) {
1435 	return block_handler_type_ifunc;
1436     }
1437     else if (SYMBOL_P(block_handler)) {
1438 	return block_handler_type_symbol;
1439     }
1440     else {
1441 	VM_ASSERT(rb_obj_is_proc(block_handler));
1442 	return block_handler_type_proc;
1443     }
1444 }
1445 
1446 static inline void
vm_block_handler_verify(MAYBE_UNUSED (VALUE block_handler))1447 vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1448 {
1449     VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1450 	      (vm_block_handler_type(block_handler), 1));
1451 }
1452 
1453 static inline enum rb_block_type
vm_block_type(const struct rb_block * block)1454 vm_block_type(const struct rb_block *block)
1455 {
1456 #if VM_CHECK_MODE > 0
1457     switch (block->type) {
1458       case block_type_iseq:
1459 	VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1460 	break;
1461       case block_type_ifunc:
1462 	VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1463 	break;
1464       case block_type_symbol:
1465 	VM_ASSERT(SYMBOL_P(block->as.symbol));
1466 	break;
1467       case block_type_proc:
1468 	VM_ASSERT(rb_obj_is_proc(block->as.proc));
1469 	break;
1470     }
1471 #endif
1472     return block->type;
1473 }
1474 
1475 static inline void
vm_block_type_set(const struct rb_block * block,enum rb_block_type type)1476 vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1477 {
1478     struct rb_block *mb = (struct rb_block *)block;
1479     mb->type = type;
1480 }
1481 
1482 static inline const struct rb_block *
vm_proc_block(VALUE procval)1483 vm_proc_block(VALUE procval)
1484 {
1485     VM_ASSERT(rb_obj_is_proc(procval));
1486     return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1487 }
1488 
1489 static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1490 static inline const VALUE *vm_block_ep(const struct rb_block *block);
1491 
1492 static inline const rb_iseq_t *
vm_proc_iseq(VALUE procval)1493 vm_proc_iseq(VALUE procval)
1494 {
1495     return vm_block_iseq(vm_proc_block(procval));
1496 }
1497 
1498 static inline const VALUE *
vm_proc_ep(VALUE procval)1499 vm_proc_ep(VALUE procval)
1500 {
1501     return vm_block_ep(vm_proc_block(procval));
1502 }
1503 
1504 static inline const rb_iseq_t *
vm_block_iseq(const struct rb_block * block)1505 vm_block_iseq(const struct rb_block *block)
1506 {
1507     switch (vm_block_type(block)) {
1508       case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1509       case block_type_proc: return vm_proc_iseq(block->as.proc);
1510       case block_type_ifunc:
1511       case block_type_symbol: return NULL;
1512     }
1513     VM_UNREACHABLE(vm_block_iseq);
1514     return NULL;
1515 }
1516 
1517 static inline const VALUE *
vm_block_ep(const struct rb_block * block)1518 vm_block_ep(const struct rb_block *block)
1519 {
1520     switch (vm_block_type(block)) {
1521       case block_type_iseq:
1522       case block_type_ifunc:  return block->as.captured.ep;
1523       case block_type_proc:   return vm_proc_ep(block->as.proc);
1524       case block_type_symbol: return NULL;
1525     }
1526     VM_UNREACHABLE(vm_block_ep);
1527     return NULL;
1528 }
1529 
1530 static inline VALUE
vm_block_self(const struct rb_block * block)1531 vm_block_self(const struct rb_block *block)
1532 {
1533     switch (vm_block_type(block)) {
1534       case block_type_iseq:
1535       case block_type_ifunc:
1536 	return block->as.captured.self;
1537       case block_type_proc:
1538 	return vm_block_self(vm_proc_block(block->as.proc));
1539       case block_type_symbol:
1540 	return Qundef;
1541     }
1542     VM_UNREACHABLE(vm_block_self);
1543     return Qundef;
1544 }
1545 
1546 static inline VALUE
VM_BH_TO_SYMBOL(VALUE block_handler)1547 VM_BH_TO_SYMBOL(VALUE block_handler)
1548 {
1549     VM_ASSERT(SYMBOL_P(block_handler));
1550     return block_handler;
1551 }
1552 
1553 static inline VALUE
VM_BH_FROM_SYMBOL(VALUE symbol)1554 VM_BH_FROM_SYMBOL(VALUE symbol)
1555 {
1556     VM_ASSERT(SYMBOL_P(symbol));
1557     return symbol;
1558 }
1559 
1560 static inline VALUE
VM_BH_TO_PROC(VALUE block_handler)1561 VM_BH_TO_PROC(VALUE block_handler)
1562 {
1563     VM_ASSERT(rb_obj_is_proc(block_handler));
1564     return block_handler;
1565 }
1566 
1567 static inline VALUE
VM_BH_FROM_PROC(VALUE procval)1568 VM_BH_FROM_PROC(VALUE procval)
1569 {
1570     VM_ASSERT(rb_obj_is_proc(procval));
1571     return procval;
1572 }
1573 
1574 /* VM related object allocate functions */
1575 VALUE rb_thread_alloc(VALUE klass);
1576 VALUE rb_binding_alloc(VALUE klass);
1577 VALUE rb_proc_alloc(VALUE klass);
1578 VALUE rb_proc_dup(VALUE self);
1579 
1580 /* for debug */
1581 extern void rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1582 extern void rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc);
1583 extern void rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1584 
1585 #define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp)
1586 #define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp))
1587 void rb_vm_bugreport(const void *);
1588 NORETURN(void rb_bug_context(const void *, const char *fmt, ...));
1589 
1590 /* functions about thread/vm execution */
1591 RUBY_SYMBOL_EXPORT_BEGIN
1592 VALUE rb_iseq_eval(const rb_iseq_t *iseq);
1593 VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1594 VALUE rb_iseq_path(const rb_iseq_t *iseq);
1595 VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1596 RUBY_SYMBOL_EXPORT_END
1597 
1598 VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1599 void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1600 
1601 int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1602 void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1603 
1604 VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, VALUE block_handler);
1605 
1606 VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1607 static inline VALUE
rb_vm_make_proc(const rb_execution_context_t * ec,const struct rb_captured_block * captured,VALUE klass)1608 rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1609 {
1610     return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1611 }
1612 
1613 static inline VALUE
rb_vm_make_lambda(const rb_execution_context_t * ec,const struct rb_captured_block * captured,VALUE klass)1614 rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1615 {
1616     return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1617 }
1618 
1619 VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1620 VALUE rb_vm_env_local_variables(const rb_env_t *env);
1621 const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1622 const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1623 void rb_vm_inc_const_missing_count(void);
1624 void rb_vm_gvl_destroy(rb_vm_t *vm);
1625 VALUE rb_vm_call(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1626 		 const VALUE *argv, const rb_callable_method_entry_t *me);
1627 MJIT_STATIC void rb_vm_pop_frame(rb_execution_context_t *ec);
1628 
1629 void rb_thread_start_timer_thread(void);
1630 void rb_thread_stop_timer_thread(void);
1631 void rb_thread_reset_timer_thread(void);
1632 void rb_thread_wakeup_timer_thread(int);
1633 
1634 static inline void
rb_vm_living_threads_init(rb_vm_t * vm)1635 rb_vm_living_threads_init(rb_vm_t *vm)
1636 {
1637     list_head_init(&vm->waiting_fds);
1638     list_head_init(&vm->waiting_pids);
1639     list_head_init(&vm->workqueue);
1640     list_head_init(&vm->waiting_grps);
1641     list_head_init(&vm->living_threads);
1642     vm->living_thread_num = 0;
1643 }
1644 
1645 static inline void
rb_vm_living_threads_insert(rb_vm_t * vm,rb_thread_t * th)1646 rb_vm_living_threads_insert(rb_vm_t *vm, rb_thread_t *th)
1647 {
1648     list_add_tail(&vm->living_threads, &th->vmlt_node);
1649     vm->living_thread_num++;
1650 }
1651 
1652 static inline void
rb_vm_living_threads_remove(rb_vm_t * vm,rb_thread_t * th)1653 rb_vm_living_threads_remove(rb_vm_t *vm, rb_thread_t *th)
1654 {
1655     list_del(&th->vmlt_node);
1656     vm->living_thread_num--;
1657 }
1658 
1659 typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1660 rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1661 rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1662 int rb_vm_get_sourceline(const rb_control_frame_t *);
1663 VALUE rb_name_err_mesg_new(VALUE mesg, VALUE recv, VALUE method);
1664 void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1665 void ruby_thread_init_stack(rb_thread_t *th);
1666 int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1667 void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1668 MJIT_STATIC VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
1669 
1670 void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1671 
1672 #define rb_vm_register_special_exception(sp, e, m) \
1673     rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1674 
1675 void rb_gc_mark_machine_stack(const rb_execution_context_t *ec);
1676 
1677 void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
1678 
1679 MJIT_STATIC const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
1680 
1681 #define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1682 
1683 #define RUBY_CONST_ASSERT(expr) (1/!!(expr)) /* expr must be a compile-time constant */
1684 #define VM_STACK_OVERFLOWED_P(cfp, sp, margin) \
1685     (!RUBY_CONST_ASSERT(sizeof(*(sp)) == sizeof(VALUE)) || \
1686      !RUBY_CONST_ASSERT(sizeof(*(cfp)) == sizeof(rb_control_frame_t)) || \
1687      ((rb_control_frame_t *)((sp) + (margin)) + 1) >= (cfp))
1688 #define WHEN_VM_STACK_OVERFLOWED(cfp, sp, margin) \
1689     if (LIKELY(!VM_STACK_OVERFLOWED_P(cfp, sp, margin))) {(void)0;} else /* overflowed */
1690 #define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) \
1691     WHEN_VM_STACK_OVERFLOWED(cfp, sp, margin) vm_stackoverflow()
1692 #define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1693     WHEN_VM_STACK_OVERFLOWED(cfp, (cfp)->sp, margin) vm_stackoverflow()
1694 
1695 VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
1696 
1697 /* for thread */
1698 
1699 #if RUBY_VM_THREAD_MODEL == 2
1700 RUBY_SYMBOL_EXPORT_BEGIN
1701 
1702 RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
1703 RUBY_EXTERN rb_execution_context_t *ruby_current_execution_context_ptr;
1704 RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
1705 RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
1706 RUBY_EXTERN unsigned int    ruby_vm_event_local_num;
1707 
1708 RUBY_SYMBOL_EXPORT_END
1709 
1710 #define GET_VM()     rb_current_vm()
1711 #define GET_THREAD() rb_current_thread()
1712 #define GET_EC()     rb_current_execution_context()
1713 
1714 static inline rb_thread_t *
rb_ec_thread_ptr(const rb_execution_context_t * ec)1715 rb_ec_thread_ptr(const rb_execution_context_t *ec)
1716 {
1717     return ec->thread_ptr;
1718 }
1719 
1720 static inline rb_vm_t *
rb_ec_vm_ptr(const rb_execution_context_t * ec)1721 rb_ec_vm_ptr(const rb_execution_context_t *ec)
1722 {
1723     const rb_thread_t *th = rb_ec_thread_ptr(ec);
1724     if (th) {
1725 	return th->vm;
1726     }
1727     else {
1728 	return NULL;
1729     }
1730 }
1731 
1732 static inline rb_execution_context_t *
rb_current_execution_context(void)1733 rb_current_execution_context(void)
1734 {
1735     return ruby_current_execution_context_ptr;
1736 }
1737 
1738 static inline rb_thread_t *
rb_current_thread(void)1739 rb_current_thread(void)
1740 {
1741     const rb_execution_context_t *ec = GET_EC();
1742     return rb_ec_thread_ptr(ec);
1743 }
1744 
1745 static inline rb_vm_t *
rb_current_vm(void)1746 rb_current_vm(void)
1747 {
1748     VM_ASSERT(ruby_current_vm_ptr == NULL ||
1749 	      ruby_current_execution_context_ptr == NULL ||
1750 	      rb_ec_thread_ptr(GET_EC()) == NULL ||
1751 	      rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
1752     return ruby_current_vm_ptr;
1753 }
1754 
1755 static inline void
rb_thread_set_current_raw(const rb_thread_t * th)1756 rb_thread_set_current_raw(const rb_thread_t *th)
1757 {
1758     ruby_current_execution_context_ptr = th->ec;
1759 }
1760 
1761 static inline void
rb_thread_set_current(rb_thread_t * th)1762 rb_thread_set_current(rb_thread_t *th)
1763 {
1764     if (th->vm->running_thread != th) {
1765         th->running_time_us = 0;
1766     }
1767     rb_thread_set_current_raw(th);
1768     th->vm->running_thread = th;
1769 }
1770 
1771 #else
1772 #error "unsupported thread model"
1773 #endif
1774 
1775 enum {
1776     TIMER_INTERRUPT_MASK         = 0x01,
1777     PENDING_INTERRUPT_MASK       = 0x02,
1778     POSTPONED_JOB_INTERRUPT_MASK = 0x04,
1779     TRAP_INTERRUPT_MASK	         = 0x08
1780 };
1781 
1782 #define RUBY_VM_SET_TIMER_INTERRUPT(ec)		ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
1783 #define RUBY_VM_SET_INTERRUPT(ec)		ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
1784 #define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec)	ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
1785 #define RUBY_VM_SET_TRAP_INTERRUPT(ec)		ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
1786 #define RUBY_VM_INTERRUPTED(ec)			((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
1787 						 (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
1788 #define RUBY_VM_INTERRUPTED_ANY(ec)		((ec)->interrupt_flag & ~(ec)->interrupt_mask)
1789 
1790 VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
1791 int rb_signal_buff_size(void);
1792 int rb_signal_exec(rb_thread_t *th, int sig);
1793 void rb_threadptr_check_signal(rb_thread_t *mth);
1794 void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
1795 void rb_threadptr_signal_exit(rb_thread_t *th);
1796 int rb_threadptr_execute_interrupts(rb_thread_t *, int);
1797 void rb_threadptr_interrupt(rb_thread_t *th);
1798 void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
1799 void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
1800 void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
1801 void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
1802 void rb_execution_context_mark(const rb_execution_context_t *ec);
1803 void rb_fiber_close(rb_fiber_t *fib);
1804 void Init_native_thread(rb_thread_t *th);
1805 
1806 #define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
1807 static inline void
rb_vm_check_ints(rb_execution_context_t * ec)1808 rb_vm_check_ints(rb_execution_context_t *ec)
1809 {
1810     VM_ASSERT(ec == GET_EC());
1811     if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
1812 	rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
1813     }
1814 }
1815 
1816 /* tracer */
1817 
1818 struct rb_trace_arg_struct {
1819     rb_event_flag_t event;
1820     rb_execution_context_t *ec;
1821     const rb_control_frame_t *cfp;
1822     VALUE self;
1823     ID id;
1824     ID called_id;
1825     VALUE klass;
1826     VALUE data;
1827 
1828     int klass_solved;
1829 
1830     /* calc from cfp */
1831     int lineno;
1832     VALUE path;
1833 };
1834 
1835 void rb_hook_list_mark(rb_hook_list_t *hooks);
1836 void rb_hook_list_free(rb_hook_list_t *hooks);
1837 void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
1838 void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
1839 
1840 void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
1841 
1842 #define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
1843     const rb_event_flag_t flag_arg_ = (flag_); \
1844     rb_hook_list_t *hooks_arg_ = (hooks_); \
1845     if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
1846         /* defer evaluating the other arguments */ \
1847         rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
1848     } \
1849 } while (0)
1850 
1851 static inline void
rb_exec_event_hook_orig(rb_execution_context_t * ec,rb_hook_list_t * hooks,rb_event_flag_t flag,VALUE self,ID id,ID called_id,VALUE klass,VALUE data,int pop_p)1852 rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
1853                         VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
1854 {
1855     struct rb_trace_arg_struct trace_arg;
1856 
1857     VM_ASSERT((hooks->events & flag) != 0);
1858 
1859     trace_arg.event = flag;
1860     trace_arg.ec = ec;
1861     trace_arg.cfp = ec->cfp;
1862     trace_arg.self = self;
1863     trace_arg.id = id;
1864     trace_arg.called_id = called_id;
1865     trace_arg.klass = klass;
1866     trace_arg.data = data;
1867     trace_arg.path = Qundef;
1868     trace_arg.klass_solved = 0;
1869 
1870     rb_exec_event_hooks(&trace_arg, hooks, pop_p);
1871 }
1872 
1873 static inline rb_hook_list_t *
rb_vm_global_hooks(const rb_execution_context_t * ec)1874 rb_vm_global_hooks(const rb_execution_context_t *ec)
1875 {
1876     return &rb_ec_vm_ptr(ec)->global_hooks;
1877 }
1878 
1879 #define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
1880   EXEC_EVENT_HOOK_ORIG(ec_, rb_vm_global_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
1881 
1882 #define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
1883   EXEC_EVENT_HOOK_ORIG(ec_, rb_vm_global_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
1884 
1885 static inline void
rb_exec_event_hook_script_compiled(rb_execution_context_t * ec,const rb_iseq_t * iseq,VALUE eval_script)1886 rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
1887 {
1888     EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
1889                     NIL_P(eval_script) ? (VALUE)iseq :
1890                     rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
1891 }
1892 
1893 RUBY_SYMBOL_EXPORT_BEGIN
1894 
1895 int rb_thread_check_trap_pending(void);
1896 
1897 /* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
1898 #define RUBY_EVENT_COVERAGE_LINE                0x010000
1899 #define RUBY_EVENT_COVERAGE_BRANCH              0x020000
1900 
1901 extern VALUE rb_get_coverages(void);
1902 extern void rb_set_coverages(VALUE, int, VALUE);
1903 extern void rb_clear_coverages(void);
1904 extern void rb_reset_coverages(void);
1905 
1906 void rb_postponed_job_flush(rb_vm_t *vm);
1907 
1908 RUBY_SYMBOL_EXPORT_END
1909 
1910 #endif /* RUBY_VM_CORE_H */
1911