1 /**********************************************************************
2
3 vm.c -
4
5 $Author: usa $
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9 **********************************************************************/
10
11 #include "internal.h"
12 #include "ruby/vm.h"
13 #include "ruby/st.h"
14
15 #define vm_exec rb_vm_exec
16
17 #include "gc.h"
18 #include "vm_core.h"
19 #include "vm_debug.h"
20 #include "iseq.h"
21 #include "eval_intern.h"
22 #ifndef MJIT_HEADER
23 #include "probes.h"
24 #else
25 #include "probes.dmyh"
26 #endif
27 #include "probes_helper.h"
28
29 VALUE rb_str_concat_literals(size_t, const VALUE*);
30
31 PUREFUNC(static inline const VALUE *VM_EP_LEP(const VALUE *));
32 static inline const VALUE *
VM_EP_LEP(const VALUE * ep)33 VM_EP_LEP(const VALUE *ep)
34 {
35 while (!VM_ENV_LOCAL_P(ep)) {
36 ep = VM_ENV_PREV_EP(ep);
37 }
38 return ep;
39 }
40
41 static inline const rb_control_frame_t *
rb_vm_search_cf_from_ep(const rb_execution_context_t * ec,const rb_control_frame_t * cfp,const VALUE * const ep)42 rb_vm_search_cf_from_ep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE * const ep)
43 {
44 if (!ep) {
45 return NULL;
46 }
47 else {
48 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
49
50 while (cfp < eocfp) {
51 if (cfp->ep == ep) {
52 return cfp;
53 }
54 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
55 }
56
57 return NULL;
58 }
59 }
60
61 const VALUE *
rb_vm_ep_local_ep(const VALUE * ep)62 rb_vm_ep_local_ep(const VALUE *ep)
63 {
64 return VM_EP_LEP(ep);
65 }
66
67 PUREFUNC(static inline const VALUE *VM_CF_LEP(const rb_control_frame_t * const cfp));
68 static inline const VALUE *
VM_CF_LEP(const rb_control_frame_t * const cfp)69 VM_CF_LEP(const rb_control_frame_t * const cfp)
70 {
71 return VM_EP_LEP(cfp->ep);
72 }
73
74 static inline const VALUE *
VM_CF_PREV_EP(const rb_control_frame_t * const cfp)75 VM_CF_PREV_EP(const rb_control_frame_t * const cfp)
76 {
77 return VM_ENV_PREV_EP(cfp->ep);
78 }
79
80 PUREFUNC(static inline VALUE VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp));
81 static inline VALUE
VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp)82 VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp)
83 {
84 const VALUE *ep = VM_CF_LEP(cfp);
85 return VM_ENV_BLOCK_HANDLER(ep);
86 }
87
88 VALUE
rb_vm_frame_block_handler(const rb_control_frame_t * cfp)89 rb_vm_frame_block_handler(const rb_control_frame_t *cfp)
90 {
91 return VM_CF_BLOCK_HANDLER(cfp);
92 }
93
94 #if VM_CHECK_MODE > 0
95 static int
VM_CFP_IN_HEAP_P(const rb_execution_context_t * ec,const rb_control_frame_t * cfp)96 VM_CFP_IN_HEAP_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
97 {
98 const VALUE *start = ec->vm_stack;
99 const VALUE *end = (VALUE *)ec->vm_stack + ec->vm_stack_size;
100 VM_ASSERT(start != NULL);
101
102 if (start <= (VALUE *)cfp && (VALUE *)cfp < end) {
103 return FALSE;
104 }
105 else {
106 return TRUE;
107 }
108 }
109
110 static int
VM_EP_IN_HEAP_P(const rb_execution_context_t * ec,const VALUE * ep)111 VM_EP_IN_HEAP_P(const rb_execution_context_t *ec, const VALUE *ep)
112 {
113 const VALUE *start = ec->vm_stack;
114 const VALUE *end = (VALUE *)ec->cfp;
115 VM_ASSERT(start != NULL);
116
117 if (start <= ep && ep < end) {
118 return FALSE;
119 }
120 else {
121 return TRUE;
122 }
123 }
124
125 int
vm_ep_in_heap_p_(const rb_execution_context_t * ec,const VALUE * ep)126 vm_ep_in_heap_p_(const rb_execution_context_t *ec, const VALUE *ep)
127 {
128 if (VM_EP_IN_HEAP_P(ec, ep)) {
129 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV]; /* VM_ENV_ENVVAL(ep); */
130
131 if (envval != Qundef) {
132 const rb_env_t *env = (const rb_env_t *)envval;
133
134 VM_ASSERT(vm_assert_env(envval));
135 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
136 VM_ASSERT(env->ep == ep);
137 }
138 return TRUE;
139 }
140 else {
141 return FALSE;
142 }
143 }
144
145 int
rb_vm_ep_in_heap_p(const VALUE * ep)146 rb_vm_ep_in_heap_p(const VALUE *ep)
147 {
148 const rb_execution_context_t *ec = GET_EC();
149 if (ec->vm_stack == NULL) return TRUE;
150 return vm_ep_in_heap_p_(ec, ep);
151 }
152 #endif
153
154 static struct rb_captured_block *
VM_CFP_TO_CAPTURED_BLOCK(const rb_control_frame_t * cfp)155 VM_CFP_TO_CAPTURED_BLOCK(const rb_control_frame_t *cfp)
156 {
157 VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp));
158 return (struct rb_captured_block *)&cfp->self;
159 }
160
161 static rb_control_frame_t *
VM_CAPTURED_BLOCK_TO_CFP(const struct rb_captured_block * captured)162 VM_CAPTURED_BLOCK_TO_CFP(const struct rb_captured_block *captured)
163 {
164 rb_control_frame_t *cfp = ((rb_control_frame_t *)((VALUE *)(captured) - 3));
165 VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp));
166 VM_ASSERT(sizeof(rb_control_frame_t)/sizeof(VALUE) == 7 + VM_DEBUG_BP_CHECK ? 1 : 0);
167 return cfp;
168 }
169
170 static int
VM_BH_FROM_CFP_P(VALUE block_handler,const rb_control_frame_t * cfp)171 VM_BH_FROM_CFP_P(VALUE block_handler, const rb_control_frame_t *cfp)
172 {
173 const struct rb_captured_block *captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
174 return VM_TAGGED_PTR_REF(block_handler, 0x03) == captured;
175 }
176
177 static VALUE
vm_passed_block_handler(rb_execution_context_t * ec)178 vm_passed_block_handler(rb_execution_context_t *ec)
179 {
180 VALUE block_handler = ec->passed_block_handler;
181 ec->passed_block_handler = VM_BLOCK_HANDLER_NONE;
182 vm_block_handler_verify(block_handler);
183 return block_handler;
184 }
185
186 static rb_cref_t *
vm_cref_new0(VALUE klass,rb_method_visibility_t visi,int module_func,rb_cref_t * prev_cref,int pushed_by_eval,int use_prev_prev)187 vm_cref_new0(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval, int use_prev_prev)
188 {
189 VALUE refinements = Qnil;
190 int omod_shared = FALSE;
191 rb_cref_t *cref;
192
193 /* scope */
194 union {
195 rb_scope_visibility_t visi;
196 VALUE value;
197 } scope_visi;
198
199 scope_visi.visi.method_visi = visi;
200 scope_visi.visi.module_func = module_func;
201
202 /* refinements */
203 if (prev_cref != NULL && prev_cref != (void *)1 /* TODO: why CREF_NEXT(cref) is 1? */) {
204 refinements = CREF_REFINEMENTS(prev_cref);
205
206 if (!NIL_P(refinements)) {
207 omod_shared = TRUE;
208 CREF_OMOD_SHARED_SET(prev_cref);
209 }
210 }
211
212 cref = (rb_cref_t *)rb_imemo_new(imemo_cref, klass, (VALUE)(use_prev_prev ? CREF_NEXT(prev_cref) : prev_cref), scope_visi.value, refinements);
213
214 if (pushed_by_eval) CREF_PUSHED_BY_EVAL_SET(cref);
215 if (omod_shared) CREF_OMOD_SHARED_SET(cref);
216
217 return cref;
218 }
219
220 static rb_cref_t *
vm_cref_new(VALUE klass,rb_method_visibility_t visi,int module_func,rb_cref_t * prev_cref,int pushed_by_eval)221 vm_cref_new(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval)
222 {
223 return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval, FALSE);
224 }
225
226 static rb_cref_t *
vm_cref_new_use_prev(VALUE klass,rb_method_visibility_t visi,int module_func,rb_cref_t * prev_cref,int pushed_by_eval)227 vm_cref_new_use_prev(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval)
228 {
229 return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval, TRUE);
230 }
231
232 static int
ref_delete_symkey(VALUE key,VALUE value,VALUE unused)233 ref_delete_symkey(VALUE key, VALUE value, VALUE unused)
234 {
235 return SYMBOL_P(key) ? ST_DELETE : ST_CONTINUE;
236 }
237
238 static rb_cref_t *
vm_cref_dup(const rb_cref_t * cref)239 vm_cref_dup(const rb_cref_t *cref)
240 {
241 VALUE klass = CREF_CLASS(cref);
242 const rb_scope_visibility_t *visi = CREF_SCOPE_VISI(cref);
243 rb_cref_t *next_cref = CREF_NEXT(cref), *new_cref;
244 int pushed_by_eval = CREF_PUSHED_BY_EVAL(cref);
245
246 new_cref = vm_cref_new(klass, visi->method_visi, visi->module_func, next_cref, pushed_by_eval);
247
248 if (!NIL_P(CREF_REFINEMENTS(cref))) {
249 VALUE ref = rb_hash_dup(CREF_REFINEMENTS(cref));
250 rb_hash_foreach(ref, ref_delete_symkey, Qnil);
251 CREF_REFINEMENTS_SET(new_cref, ref);
252 CREF_OMOD_SHARED_UNSET(new_cref);
253 }
254
255 return new_cref;
256 }
257
258 static rb_cref_t *
vm_cref_new_toplevel(rb_execution_context_t * ec)259 vm_cref_new_toplevel(rb_execution_context_t *ec)
260 {
261 rb_cref_t *cref = vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE /* toplevel visibility is private */, FALSE, NULL, FALSE);
262 VALUE top_wrapper = rb_ec_thread_ptr(ec)->top_wrapper;
263
264 if (top_wrapper) {
265 cref = vm_cref_new(top_wrapper, METHOD_VISI_PRIVATE, FALSE, cref, FALSE);
266 }
267
268 return cref;
269 }
270
271 rb_cref_t *
rb_vm_cref_new_toplevel(void)272 rb_vm_cref_new_toplevel(void)
273 {
274 return vm_cref_new_toplevel(GET_EC());
275 }
276
277 static void
vm_cref_dump(const char * mesg,const rb_cref_t * cref)278 vm_cref_dump(const char *mesg, const rb_cref_t *cref)
279 {
280 fprintf(stderr, "vm_cref_dump: %s (%p)\n", mesg, (void *)cref);
281
282 while (cref) {
283 fprintf(stderr, "= cref| klass: %s\n", RSTRING_PTR(rb_class_path(CREF_CLASS(cref))));
284 cref = CREF_NEXT(cref);
285 }
286 }
287
288 void
rb_vm_block_ep_update(VALUE obj,const struct rb_block * dst,const VALUE * ep)289 rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep)
290 {
291 *((const VALUE **)&dst->as.captured.ep) = ep;
292 RB_OBJ_WRITTEN(obj, Qundef, VM_ENV_ENVVAL(ep));
293 }
294
295 static void
vm_bind_update_env(VALUE bindval,rb_binding_t * bind,VALUE envval)296 vm_bind_update_env(VALUE bindval, rb_binding_t *bind, VALUE envval)
297 {
298 const rb_env_t *env = (rb_env_t *)envval;
299 RB_OBJ_WRITE(bindval, &bind->block.as.captured.code.iseq, env->iseq);
300 rb_vm_block_ep_update(bindval, &bind->block, env->ep);
301 }
302
303 #if VM_COLLECT_USAGE_DETAILS
304 static void vm_collect_usage_operand(int insn, int n, VALUE op);
305 static void vm_collect_usage_insn(int insn);
306 static void vm_collect_usage_register(int reg, int isset);
307 #endif
308
309 static VALUE vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp);
310 extern VALUE rb_vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
311 int argc, const VALUE *argv, VALUE block_handler,
312 const rb_callable_method_entry_t *me);
313 static VALUE vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, VALUE block_handler);
314
315 static VALUE rb_block_param_proxy;
316
317 #include "mjit.h"
318 #include "vm_insnhelper.h"
319 #include "vm_exec.h"
320 #include "vm_insnhelper.c"
321
322 #ifndef MJIT_HEADER
323
324 #include "vm_exec.c"
325
326 #include "vm_method.c"
327 #endif /* #ifndef MJIT_HEADER */
328 #include "vm_eval.c"
329 #ifndef MJIT_HEADER
330
331 #define PROCDEBUG 0
332
333 rb_serial_t
rb_next_class_serial(void)334 rb_next_class_serial(void)
335 {
336 rb_serial_t class_serial = NEXT_CLASS_SERIAL();
337 mjit_add_class_serial(class_serial);
338 return class_serial;
339 }
340
341 VALUE rb_cRubyVM;
342 VALUE rb_cThread;
343 VALUE rb_mRubyVMFrozenCore;
344
345 #define ruby_vm_redefined_flag GET_VM()->redefined_flag
346 VALUE ruby_vm_const_missing_count = 0;
347 rb_vm_t *ruby_current_vm_ptr = NULL;
348 rb_execution_context_t *ruby_current_execution_context_ptr = NULL;
349
350 rb_event_flag_t ruby_vm_event_flags;
351 rb_event_flag_t ruby_vm_event_enabled_global_flags;
352 unsigned int ruby_vm_event_local_num;
353
354 rb_serial_t ruby_vm_global_method_state = 1;
355 rb_serial_t ruby_vm_global_constant_state = 1;
356 rb_serial_t ruby_vm_class_serial = 1;
357
358 static void thread_free(void *ptr);
359
360 void
rb_vm_inc_const_missing_count(void)361 rb_vm_inc_const_missing_count(void)
362 {
363 ruby_vm_const_missing_count +=1;
364 }
365
366 VALUE rb_class_path_no_cache(VALUE _klass);
367
368 MJIT_FUNC_EXPORTED int
rb_dtrace_setup(rb_execution_context_t * ec,VALUE klass,ID id,struct ruby_dtrace_method_hook_args * args)369 rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id,
370 struct ruby_dtrace_method_hook_args *args)
371 {
372 enum ruby_value_type type;
373 if (!klass) {
374 if (!ec) ec = GET_EC();
375 if (!rb_ec_frame_method_id_and_class(ec, &id, 0, &klass) || !klass)
376 return FALSE;
377 }
378 if (RB_TYPE_P(klass, T_ICLASS)) {
379 klass = RBASIC(klass)->klass;
380 }
381 else if (FL_TEST(klass, FL_SINGLETON)) {
382 klass = rb_attr_get(klass, id__attached__);
383 if (NIL_P(klass)) return FALSE;
384 }
385 type = BUILTIN_TYPE(klass);
386 if (type == T_CLASS || type == T_ICLASS || type == T_MODULE) {
387 VALUE name = rb_class_path_no_cache(klass);
388 const char *classname, *filename;
389 const char *methodname = rb_id2name(id);
390 if (methodname && (filename = rb_source_location_cstr(&args->line_no)) != 0) {
391 if (NIL_P(name) || !(classname = StringValuePtr(name)))
392 classname = "<unknown>";
393 args->classname = classname;
394 args->methodname = methodname;
395 args->filename = filename;
396 args->klass = klass;
397 args->name = name;
398 return TRUE;
399 }
400 }
401 return FALSE;
402 }
403
404 /*
405 * call-seq:
406 * RubyVM.stat -> Hash
407 * RubyVM.stat(hsh) -> hsh
408 * RubyVM.stat(Symbol) -> Numeric
409 *
410 * Returns a Hash containing implementation-dependent counters inside the VM.
411 *
412 * This hash includes information about method/constant cache serials:
413 *
414 * {
415 * :global_method_state=>251,
416 * :global_constant_state=>481,
417 * :class_serial=>9029
418 * }
419 *
420 * The contents of the hash are implementation specific and may be changed in
421 * the future.
422 *
423 * This method is only expected to work on C Ruby.
424 */
425
426 static VALUE
vm_stat(int argc,VALUE * argv,VALUE self)427 vm_stat(int argc, VALUE *argv, VALUE self)
428 {
429 static VALUE sym_global_method_state, sym_global_constant_state, sym_class_serial;
430 VALUE arg = Qnil;
431 VALUE hash = Qnil, key = Qnil;
432
433 if (rb_check_arity(argc, 0, 1) == 1) {
434 arg = argv[0];
435 if (SYMBOL_P(arg))
436 key = arg;
437 else if (RB_TYPE_P(arg, T_HASH))
438 hash = arg;
439 else
440 rb_raise(rb_eTypeError, "non-hash or symbol given");
441 }
442 else {
443 hash = rb_hash_new();
444 }
445
446 if (sym_global_method_state == 0) {
447 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
448 S(global_method_state);
449 S(global_constant_state);
450 S(class_serial);
451 #undef S
452 }
453
454 #define SET(name, attr) \
455 if (key == sym_##name) \
456 return SERIALT2NUM(attr); \
457 else if (hash != Qnil) \
458 rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr));
459
460 SET(global_method_state, ruby_vm_global_method_state);
461 SET(global_constant_state, ruby_vm_global_constant_state);
462 SET(class_serial, ruby_vm_class_serial);
463 #undef SET
464
465 if (!NIL_P(key)) { /* matched key should return above */
466 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
467 }
468
469 return hash;
470 }
471
472 /* control stack frame */
473
474 static void
vm_set_top_stack(rb_execution_context_t * ec,const rb_iseq_t * iseq)475 vm_set_top_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq)
476 {
477 if (iseq->body->type != ISEQ_TYPE_TOP) {
478 rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
479 }
480
481 /* for return */
482 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, rb_ec_thread_ptr(ec)->top_self,
483 VM_BLOCK_HANDLER_NONE,
484 (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
485 iseq->body->iseq_encoded, ec->cfp->sp,
486 iseq->body->local_table_size, iseq->body->stack_max);
487 }
488
489 static void
vm_set_eval_stack(rb_execution_context_t * ec,const rb_iseq_t * iseq,const rb_cref_t * cref,const struct rb_block * base_block)490 vm_set_eval_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq, const rb_cref_t *cref, const struct rb_block *base_block)
491 {
492 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_EVAL | VM_FRAME_FLAG_FINISH,
493 vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)),
494 (VALUE)cref, /* cref or me */
495 iseq->body->iseq_encoded,
496 ec->cfp->sp, iseq->body->local_table_size,
497 iseq->body->stack_max);
498 }
499
500 static void
vm_set_main_stack(rb_execution_context_t * ec,const rb_iseq_t * iseq)501 vm_set_main_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq)
502 {
503 VALUE toplevel_binding = rb_const_get(rb_cObject, rb_intern("TOPLEVEL_BINDING"));
504 rb_binding_t *bind;
505
506 GetBindingPtr(toplevel_binding, bind);
507 RUBY_ASSERT_MESG(bind, "TOPLEVEL_BINDING is not built");
508
509 vm_set_eval_stack(ec, iseq, 0, &bind->block);
510
511 /* save binding */
512 if (iseq->body->local_table_size > 0) {
513 vm_bind_update_env(toplevel_binding, bind, vm_make_env_object(ec, ec->cfp));
514 }
515 }
516
517 rb_control_frame_t *
rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t * ec,const rb_control_frame_t * cfp)518 rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
519 {
520 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
521 if (cfp->iseq) {
522 return (rb_control_frame_t *)cfp;
523 }
524 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
525 }
526 return 0;
527 }
528
529 MJIT_FUNC_EXPORTED rb_control_frame_t *
rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t * ec,const rb_control_frame_t * cfp)530 rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
531 {
532 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) bp();
533 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
534 if (VM_FRAME_RUBYFRAME_P(cfp)) {
535 return (rb_control_frame_t *)cfp;
536 }
537 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
538 }
539 return 0;
540 }
541
542 #endif /* #ifndef MJIT_HEADER */
543
544 static rb_control_frame_t *
vm_get_ruby_level_caller_cfp(const rb_execution_context_t * ec,const rb_control_frame_t * cfp)545 vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
546 {
547 if (VM_FRAME_RUBYFRAME_P(cfp)) {
548 return (rb_control_frame_t *)cfp;
549 }
550
551 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
552
553 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
554 if (VM_FRAME_RUBYFRAME_P(cfp)) {
555 return (rb_control_frame_t *)cfp;
556 }
557
558 if (VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_PASSED) == FALSE) {
559 break;
560 }
561 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
562 }
563 return 0;
564 }
565
566 MJIT_STATIC void
rb_vm_pop_cfunc_frame(void)567 rb_vm_pop_cfunc_frame(void)
568 {
569 rb_execution_context_t *ec = GET_EC();
570 rb_control_frame_t *cfp = ec->cfp;
571 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
572
573 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, cfp->self, me->def->original_id, me->called_id, me->owner, Qnil);
574 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
575 vm_pop_frame(ec, cfp, cfp->ep);
576 }
577
578 #ifndef MJIT_HEADER
579
580 void
rb_vm_rewind_cfp(rb_execution_context_t * ec,rb_control_frame_t * cfp)581 rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp)
582 {
583 /* check skipped frame */
584 while (ec->cfp != cfp) {
585 #if VMDEBUG
586 printf("skipped frame: %s\n", vm_frametype_name(ec->cfp));
587 #endif
588 if (VM_FRAME_TYPE(ec->cfp) != VM_FRAME_MAGIC_CFUNC) {
589 rb_vm_pop_frame(ec);
590 }
591 else { /* unlikely path */
592 rb_vm_pop_cfunc_frame();
593 }
594 }
595 }
596
597 /* at exit */
598
599 void
ruby_vm_at_exit(void (* func)(rb_vm_t *))600 ruby_vm_at_exit(void (*func)(rb_vm_t *))
601 {
602 rb_vm_t *vm = GET_VM();
603 rb_at_exit_list *nl = ALLOC(rb_at_exit_list);
604 nl->func = func;
605 nl->next = vm->at_exit;
606 vm->at_exit = nl;
607 }
608
609 static void
ruby_vm_run_at_exit_hooks(rb_vm_t * vm)610 ruby_vm_run_at_exit_hooks(rb_vm_t *vm)
611 {
612 rb_at_exit_list *l = vm->at_exit;
613
614 while (l) {
615 rb_at_exit_list* t = l->next;
616 rb_vm_at_exit_func *func = l->func;
617 ruby_xfree(l);
618 l = t;
619 (*func)(vm);
620 }
621 }
622
623 /* Env */
624
625 static VALUE check_env_value(const rb_env_t *env);
626
627 static int
check_env(const rb_env_t * env)628 check_env(const rb_env_t *env)
629 {
630 fprintf(stderr, "---\n");
631 fprintf(stderr, "envptr: %p\n", (void *)&env->ep[0]);
632 fprintf(stderr, "envval: %10p ", (void *)env->ep[1]);
633 dp(env->ep[1]);
634 fprintf(stderr, "ep: %10p\n", (void *)env->ep);
635 if (rb_vm_env_prev_env(env)) {
636 fprintf(stderr, ">>\n");
637 check_env_value(rb_vm_env_prev_env(env));
638 fprintf(stderr, "<<\n");
639 }
640 return 1;
641 }
642
643 static VALUE
check_env_value(const rb_env_t * env)644 check_env_value(const rb_env_t *env)
645 {
646 if (check_env(env)) {
647 return (VALUE)env;
648 }
649 rb_bug("invalid env");
650 return Qnil; /* unreachable */
651 }
652
653 static VALUE
vm_block_handler_escape(const rb_execution_context_t * ec,VALUE block_handler)654 vm_block_handler_escape(const rb_execution_context_t *ec, VALUE block_handler)
655 {
656 switch (vm_block_handler_type(block_handler)) {
657 case block_handler_type_ifunc:
658 case block_handler_type_iseq:
659 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
660
661 case block_handler_type_symbol:
662 case block_handler_type_proc:
663 return block_handler;
664 }
665 VM_UNREACHABLE(vm_block_handler_escape);
666 return Qnil;
667 }
668
669 static VALUE
vm_make_env_each(const rb_execution_context_t * const ec,rb_control_frame_t * const cfp)670 vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *const cfp)
671 {
672 const VALUE * const ep = cfp->ep;
673 const rb_env_t *env;
674 const rb_iseq_t *env_iseq;
675 VALUE *env_body, *env_ep;
676 int local_size, env_size;
677
678 if (VM_ENV_ESCAPED_P(ep)) {
679 return VM_ENV_ENVVAL(ep);
680 }
681
682 if (!VM_ENV_LOCAL_P(ep)) {
683 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
684
685 if (!VM_ENV_ESCAPED_P(prev_ep)) {
686 rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
687
688 while (prev_cfp->ep != prev_ep) {
689 prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(prev_cfp);
690 VM_ASSERT(prev_cfp->ep != NULL);
691 }
692
693 vm_make_env_each(ec, prev_cfp);
694 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_SPECVAL], VM_GUARDED_PREV_EP(prev_cfp->ep));
695 }
696 }
697 else {
698 VALUE block_handler = VM_ENV_BLOCK_HANDLER(ep);
699
700 if (block_handler != VM_BLOCK_HANDLER_NONE) {
701 VALUE blockprocval = vm_block_handler_escape(ec, block_handler);
702 VM_STACK_ENV_WRITE(ep, VM_ENV_DATA_INDEX_SPECVAL, blockprocval);
703 }
704 }
705
706 if (!VM_FRAME_RUBYFRAME_P(cfp)) {
707 local_size = VM_ENV_DATA_SIZE;
708 }
709 else {
710 local_size = cfp->iseq->body->local_table_size + VM_ENV_DATA_SIZE;
711 }
712
713 /*
714 * # local variables on a stack frame (N == local_size)
715 * [lvar1, lvar2, ..., lvarN, SPECVAL]
716 * ^
717 * ep[0]
718 *
719 * # moved local variables
720 * [lvar1, lvar2, ..., lvarN, SPECVAL, Envval, BlockProcval (if needed)]
721 * ^ ^
722 * env->env[0] ep[0]
723 */
724
725 env_size = local_size +
726 1 /* envval */;
727 env_body = ALLOC_N(VALUE, env_size);
728 MEMCPY(env_body, ep - (local_size - 1 /* specval */), VALUE, local_size);
729
730 #if 0
731 for (i = 0; i < local_size; i++) {
732 if (VM_FRAME_RUBYFRAME_P(cfp)) {
733 /* clear value stack for GC */
734 ep[-local_size + i] = 0;
735 }
736 }
737 #endif
738
739 env_iseq = VM_FRAME_RUBYFRAME_P(cfp) ? cfp->iseq : NULL;
740 env_ep = &env_body[local_size - 1 /* specval */];
741
742 env = vm_env_new(env_ep, env_body, env_size, env_iseq);
743
744 cfp->ep = env_ep;
745 VM_ENV_FLAGS_SET(env_ep, VM_ENV_FLAG_ESCAPED | VM_ENV_FLAG_WB_REQUIRED);
746 VM_STACK_ENV_WRITE(ep, 0, (VALUE)env); /* GC mark */
747 return (VALUE)env;
748 }
749
750 static VALUE
vm_make_env_object(const rb_execution_context_t * ec,rb_control_frame_t * cfp)751 vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
752 {
753 VALUE envval = vm_make_env_each(ec, cfp);
754
755 if (PROCDEBUG) {
756 check_env_value((const rb_env_t *)envval);
757 }
758
759 return envval;
760 }
761
762 void
rb_vm_stack_to_heap(rb_execution_context_t * ec)763 rb_vm_stack_to_heap(rb_execution_context_t *ec)
764 {
765 rb_control_frame_t *cfp = ec->cfp;
766 while ((cfp = rb_vm_get_binding_creatable_next_cfp(ec, cfp)) != 0) {
767 vm_make_env_object(ec, cfp);
768 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
769 }
770 }
771
772 const rb_env_t *
rb_vm_env_prev_env(const rb_env_t * env)773 rb_vm_env_prev_env(const rb_env_t *env)
774 {
775 const VALUE *ep = env->ep;
776
777 if (VM_ENV_LOCAL_P(ep)) {
778 return NULL;
779 }
780 else {
781 return VM_ENV_ENVVAL_PTR(VM_ENV_PREV_EP(ep));
782 }
783 }
784
785 static int
collect_local_variables_in_iseq(const rb_iseq_t * iseq,const struct local_var_list * vars)786 collect_local_variables_in_iseq(const rb_iseq_t *iseq, const struct local_var_list *vars)
787 {
788 unsigned int i;
789 if (!iseq) return 0;
790 for (i = 0; i < iseq->body->local_table_size; i++) {
791 local_var_list_add(vars, iseq->body->local_table[i]);
792 }
793 return 1;
794 }
795
796 static void
collect_local_variables_in_env(const rb_env_t * env,const struct local_var_list * vars)797 collect_local_variables_in_env(const rb_env_t *env, const struct local_var_list *vars)
798 {
799 do {
800 collect_local_variables_in_iseq(env->iseq, vars);
801 } while ((env = rb_vm_env_prev_env(env)) != NULL);
802 }
803
804 static int
vm_collect_local_variables_in_heap(const VALUE * ep,const struct local_var_list * vars)805 vm_collect_local_variables_in_heap(const VALUE *ep, const struct local_var_list *vars)
806 {
807 if (VM_ENV_ESCAPED_P(ep)) {
808 collect_local_variables_in_env(VM_ENV_ENVVAL_PTR(ep), vars);
809 return 1;
810 }
811 else {
812 return 0;
813 }
814 }
815
816 VALUE
rb_vm_env_local_variables(const rb_env_t * env)817 rb_vm_env_local_variables(const rb_env_t *env)
818 {
819 struct local_var_list vars;
820 local_var_list_init(&vars);
821 collect_local_variables_in_env(env, &vars);
822 return local_var_list_finish(&vars);
823 }
824
825 VALUE
rb_iseq_local_variables(const rb_iseq_t * iseq)826 rb_iseq_local_variables(const rb_iseq_t *iseq)
827 {
828 struct local_var_list vars;
829 local_var_list_init(&vars);
830 while (collect_local_variables_in_iseq(iseq, &vars)) {
831 iseq = iseq->body->parent_iseq;
832 }
833 return local_var_list_finish(&vars);
834 }
835
836 /* Proc */
837
838 static VALUE
vm_proc_create_from_captured(VALUE klass,const struct rb_captured_block * captured,enum rb_block_type block_type,int8_t is_from_method,int8_t is_lambda)839 vm_proc_create_from_captured(VALUE klass,
840 const struct rb_captured_block *captured,
841 enum rb_block_type block_type,
842 int8_t is_from_method, int8_t is_lambda)
843 {
844 VALUE procval = rb_proc_alloc(klass);
845 rb_proc_t *proc = RTYPEDDATA_DATA(procval);
846
847 VM_ASSERT(VM_EP_IN_HEAP_P(GET_EC(), captured->ep));
848
849 /* copy block */
850 RB_OBJ_WRITE(procval, &proc->block.as.captured.self, captured->self);
851 RB_OBJ_WRITE(procval, &proc->block.as.captured.code.val, captured->code.val);
852 rb_vm_block_ep_update(procval, &proc->block, captured->ep);
853
854 vm_block_type_set(&proc->block, block_type);
855 proc->is_from_method = is_from_method;
856 proc->is_lambda = is_lambda;
857
858 return procval;
859 }
860
861 void
rb_vm_block_copy(VALUE obj,const struct rb_block * dst,const struct rb_block * src)862 rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src)
863 {
864 /* copy block */
865 switch (vm_block_type(src)) {
866 case block_type_iseq:
867 case block_type_ifunc:
868 RB_OBJ_WRITE(obj, &dst->as.captured.self, src->as.captured.self);
869 RB_OBJ_WRITE(obj, &dst->as.captured.code.val, src->as.captured.code.val);
870 rb_vm_block_ep_update(obj, dst, src->as.captured.ep);
871 break;
872 case block_type_symbol:
873 RB_OBJ_WRITE(obj, &dst->as.symbol, src->as.symbol);
874 break;
875 case block_type_proc:
876 RB_OBJ_WRITE(obj, &dst->as.proc, src->as.proc);
877 break;
878 }
879 }
880
881 static VALUE
proc_create(VALUE klass,const struct rb_block * block,int8_t is_from_method,int8_t is_lambda)882 proc_create(VALUE klass, const struct rb_block *block, int8_t is_from_method, int8_t is_lambda)
883 {
884 VALUE procval = rb_proc_alloc(klass);
885 rb_proc_t *proc = RTYPEDDATA_DATA(procval);
886
887 VM_ASSERT(VM_EP_IN_HEAP_P(GET_EC(), vm_block_ep(block)));
888 rb_vm_block_copy(procval, &proc->block, block);
889 vm_block_type_set(&proc->block, block->type);
890 proc->is_from_method = is_from_method;
891 proc->is_lambda = is_lambda;
892
893 return procval;
894 }
895
896 VALUE
rb_proc_dup(VALUE self)897 rb_proc_dup(VALUE self)
898 {
899 VALUE procval;
900 rb_proc_t *src;
901
902 GetProcPtr(self, src);
903 procval = proc_create(rb_cProc, &src->block, src->is_from_method, src->is_lambda);
904 RB_GC_GUARD(self); /* for: body = rb_proc_dup(body) */
905 return procval;
906 }
907
908
909 MJIT_FUNC_EXPORTED VALUE
rb_vm_make_proc_lambda(const rb_execution_context_t * ec,const struct rb_captured_block * captured,VALUE klass,int8_t is_lambda)910 rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda)
911 {
912 VALUE procval;
913
914 if (!VM_ENV_ESCAPED_P(captured->ep)) {
915 rb_control_frame_t *cfp = VM_CAPTURED_BLOCK_TO_CFP(captured);
916 vm_make_env_object(ec, cfp);
917 }
918 VM_ASSERT(VM_EP_IN_HEAP_P(ec, captured->ep));
919 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq) ||
920 imemo_type_p(captured->code.val, imemo_ifunc));
921
922 procval = vm_proc_create_from_captured(klass, captured,
923 imemo_type(captured->code.val) == imemo_iseq ? block_type_iseq : block_type_ifunc, FALSE, is_lambda);
924 return procval;
925 }
926
927 /* Binding */
928
929 VALUE
rb_vm_make_binding(const rb_execution_context_t * ec,const rb_control_frame_t * src_cfp)930 rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp)
931 {
932 rb_control_frame_t *cfp = rb_vm_get_binding_creatable_next_cfp(ec, src_cfp);
933 rb_control_frame_t *ruby_level_cfp = rb_vm_get_ruby_level_next_cfp(ec, src_cfp);
934 VALUE bindval, envval;
935 rb_binding_t *bind;
936
937 if (cfp == 0 || ruby_level_cfp == 0) {
938 rb_raise(rb_eRuntimeError, "Can't create Binding Object on top of Fiber.");
939 }
940
941 while (1) {
942 envval = vm_make_env_object(ec, cfp);
943 if (cfp == ruby_level_cfp) {
944 break;
945 }
946 cfp = rb_vm_get_binding_creatable_next_cfp(ec, RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
947 }
948
949 bindval = rb_binding_alloc(rb_cBinding);
950 GetBindingPtr(bindval, bind);
951 vm_bind_update_env(bindval, bind, envval);
952 RB_OBJ_WRITE(bindval, &bind->block.as.captured.self, cfp->self);
953 RB_OBJ_WRITE(bindval, &bind->block.as.captured.code.iseq, cfp->iseq);
954 RB_OBJ_WRITE(bindval, &bind->pathobj, ruby_level_cfp->iseq->body->location.pathobj);
955 bind->first_lineno = rb_vm_get_sourceline(ruby_level_cfp);
956
957 return bindval;
958 }
959
960 const VALUE *
rb_binding_add_dynavars(VALUE bindval,rb_binding_t * bind,int dyncount,const ID * dynvars)961 rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars)
962 {
963 VALUE envval, pathobj = bind->pathobj;
964 VALUE path = pathobj_path(pathobj);
965 VALUE realpath = pathobj_realpath(pathobj);
966 const struct rb_block *base_block;
967 const rb_env_t *env;
968 rb_execution_context_t *ec = GET_EC();
969 const rb_iseq_t *base_iseq, *iseq;
970 rb_ast_body_t ast;
971 NODE tmp_node;
972 ID minibuf[4], *dyns = minibuf;
973 VALUE idtmp = 0;
974
975 if (dyncount < 0) return 0;
976
977 base_block = &bind->block;
978 base_iseq = vm_block_iseq(base_block);
979
980 if (dyncount >= numberof(minibuf)) dyns = ALLOCV_N(ID, idtmp, dyncount + 1);
981
982 dyns[0] = dyncount;
983 MEMCPY(dyns + 1, dynvars, ID, dyncount);
984 rb_node_init(&tmp_node, NODE_SCOPE, (VALUE)dyns, 0, 0);
985 ast.root = &tmp_node;
986 ast.compile_option = 0;
987 ast.line_count = -1;
988
989 if (base_iseq) {
990 iseq = rb_iseq_new(&ast, base_iseq->body->location.label, path, realpath, base_iseq, ISEQ_TYPE_EVAL);
991 }
992 else {
993 VALUE tempstr = rb_fstring_lit("<temp>");
994 iseq = rb_iseq_new_top(&ast, tempstr, tempstr, tempstr, NULL);
995 }
996 tmp_node.nd_tbl = 0; /* reset table */
997 ALLOCV_END(idtmp);
998
999 vm_set_eval_stack(ec, iseq, 0, base_block);
1000 vm_bind_update_env(bindval, bind, envval = vm_make_env_object(ec, ec->cfp));
1001 rb_vm_pop_frame(ec);
1002
1003 env = (const rb_env_t *)envval;
1004 return env->env;
1005 }
1006
1007 /* C -> Ruby: block */
1008
1009 static inline VALUE
invoke_block(rb_execution_context_t * ec,const rb_iseq_t * iseq,VALUE self,const struct rb_captured_block * captured,const rb_cref_t * cref,VALUE type,int opt_pc)1010 invoke_block(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_cref_t *cref, VALUE type, int opt_pc)
1011 {
1012 int arg_size = iseq->body->param.size;
1013
1014 vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_FINISH, self,
1015 VM_GUARDED_PREV_EP(captured->ep),
1016 (VALUE)cref, /* cref or method */
1017 iseq->body->iseq_encoded + opt_pc,
1018 ec->cfp->sp + arg_size,
1019 iseq->body->local_table_size - arg_size,
1020 iseq->body->stack_max);
1021 return vm_exec(ec, TRUE);
1022 }
1023
1024 static VALUE
invoke_bmethod(rb_execution_context_t * ec,const rb_iseq_t * iseq,VALUE self,const struct rb_captured_block * captured,const rb_callable_method_entry_t * me,VALUE type,int opt_pc)1025 invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_callable_method_entry_t *me, VALUE type, int opt_pc)
1026 {
1027 /* bmethod */
1028 int arg_size = iseq->body->param.size;
1029 VALUE ret;
1030 rb_hook_list_t *hooks;
1031
1032 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
1033
1034 vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_BMETHOD, self,
1035 VM_GUARDED_PREV_EP(captured->ep),
1036 (VALUE)me,
1037 iseq->body->iseq_encoded + opt_pc,
1038 ec->cfp->sp + arg_size,
1039 iseq->body->local_table_size - arg_size,
1040 iseq->body->stack_max);
1041
1042 RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
1043 EXEC_EVENT_HOOK(ec, RUBY_EVENT_CALL, self, me->def->original_id, me->called_id, me->owner, Qnil);
1044
1045 if (UNLIKELY((hooks = me->def->body.bmethod.hooks) != NULL) &&
1046 hooks->events & RUBY_EVENT_CALL) {
1047 rb_exec_event_hook_orig(ec, hooks, RUBY_EVENT_CALL, self,
1048 me->def->original_id, me->called_id, me->owner, Qnil, FALSE);
1049 }
1050 VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);
1051 ret = vm_exec(ec, TRUE);
1052
1053 EXEC_EVENT_HOOK(ec, RUBY_EVENT_RETURN, self, me->def->original_id, me->called_id, me->owner, ret);
1054 if ((hooks = me->def->body.bmethod.hooks) != NULL &&
1055 hooks->events & RUBY_EVENT_RETURN) {
1056 rb_exec_event_hook_orig(ec, hooks, RUBY_EVENT_RETURN, self,
1057 me->def->original_id, me->called_id, me->owner, ret, FALSE);
1058 }
1059 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
1060 return ret;
1061 }
1062
1063 ALWAYS_INLINE(static VALUE
1064 invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
1065 VALUE self, int argc, const VALUE *argv, VALUE passed_block_handler,
1066 const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me));
1067
1068 static inline VALUE
invoke_iseq_block_from_c(rb_execution_context_t * ec,const struct rb_captured_block * captured,VALUE self,int argc,const VALUE * argv,VALUE passed_block_handler,const rb_cref_t * cref,int is_lambda,const rb_callable_method_entry_t * me)1069 invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
1070 VALUE self, int argc, const VALUE *argv, VALUE passed_block_handler,
1071 const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me)
1072 {
1073 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
1074 int i, opt_pc;
1075 VALUE type = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
1076 rb_control_frame_t *cfp = ec->cfp;
1077 VALUE *sp = cfp->sp;
1078
1079 stack_check(ec);
1080
1081 CHECK_VM_STACK_OVERFLOW(cfp, argc);
1082 cfp->sp = sp + argc;
1083 for (i=0; i<argc; i++) {
1084 sp[i] = argv[i];
1085 }
1086
1087 opt_pc = vm_yield_setup_args(ec, iseq, argc, sp, passed_block_handler,
1088 (is_lambda ? arg_setup_method : arg_setup_block));
1089 cfp->sp = sp;
1090
1091 if (me == NULL) {
1092 return invoke_block(ec, iseq, self, captured, cref, type, opt_pc);
1093 }
1094 else {
1095 return invoke_bmethod(ec, iseq, self, captured, me, type, opt_pc);
1096 }
1097 }
1098
1099 static inline VALUE
invoke_block_from_c_bh(rb_execution_context_t * ec,VALUE block_handler,int argc,const VALUE * argv,VALUE passed_block_handler,const rb_cref_t * cref,int is_lambda,int force_blockarg)1100 invoke_block_from_c_bh(rb_execution_context_t *ec, VALUE block_handler,
1101 int argc, const VALUE *argv,
1102 VALUE passed_block_handler, const rb_cref_t *cref,
1103 int is_lambda, int force_blockarg)
1104 {
1105 again:
1106 switch (vm_block_handler_type(block_handler)) {
1107 case block_handler_type_iseq:
1108 {
1109 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
1110 return invoke_iseq_block_from_c(ec, captured, captured->self,
1111 argc, argv, passed_block_handler,
1112 cref, is_lambda, NULL);
1113 }
1114 case block_handler_type_ifunc:
1115 return vm_yield_with_cfunc(ec, VM_BH_TO_IFUNC_BLOCK(block_handler),
1116 VM_BH_TO_IFUNC_BLOCK(block_handler)->self,
1117 argc, argv, passed_block_handler, NULL);
1118 case block_handler_type_symbol:
1119 return vm_yield_with_symbol(ec, VM_BH_TO_SYMBOL(block_handler),
1120 argc, argv, passed_block_handler);
1121 case block_handler_type_proc:
1122 if (force_blockarg == FALSE) {
1123 is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler));
1124 }
1125 block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler));
1126 goto again;
1127 }
1128 VM_UNREACHABLE(invoke_block_from_c_splattable);
1129 return Qundef;
1130 }
1131
1132 static inline VALUE
check_block_handler(rb_execution_context_t * ec)1133 check_block_handler(rb_execution_context_t *ec)
1134 {
1135 VALUE block_handler = VM_CF_BLOCK_HANDLER(ec->cfp);
1136 vm_block_handler_verify(block_handler);
1137 if (UNLIKELY(block_handler == VM_BLOCK_HANDLER_NONE)) {
1138 rb_vm_localjump_error("no block given", Qnil, 0);
1139 }
1140
1141 return block_handler;
1142 }
1143
1144 static VALUE
vm_yield_with_cref(rb_execution_context_t * ec,int argc,const VALUE * argv,const rb_cref_t * cref,int is_lambda)1145 vm_yield_with_cref(rb_execution_context_t *ec, int argc, const VALUE *argv, const rb_cref_t *cref, int is_lambda)
1146 {
1147 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1148 argc, argv, VM_BLOCK_HANDLER_NONE,
1149 cref, is_lambda, FALSE);
1150 }
1151
1152 static VALUE
vm_yield(rb_execution_context_t * ec,int argc,const VALUE * argv)1153 vm_yield(rb_execution_context_t *ec, int argc, const VALUE *argv)
1154 {
1155 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1156 argc, argv, VM_BLOCK_HANDLER_NONE,
1157 NULL, FALSE, FALSE);
1158 }
1159
1160 static VALUE
vm_yield_with_block(rb_execution_context_t * ec,int argc,const VALUE * argv,VALUE block_handler)1161 vm_yield_with_block(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE block_handler)
1162 {
1163 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1164 argc, argv, block_handler,
1165 NULL, FALSE, FALSE);
1166 }
1167
1168 static VALUE
vm_yield_force_blockarg(rb_execution_context_t * ec,VALUE args)1169 vm_yield_force_blockarg(rb_execution_context_t *ec, VALUE args)
1170 {
1171 return invoke_block_from_c_bh(ec, check_block_handler(ec), 1, &args,
1172 VM_BLOCK_HANDLER_NONE, NULL, FALSE, TRUE);
1173 }
1174
1175 ALWAYS_INLINE(static VALUE
1176 invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc,
1177 VALUE self, int argc, const VALUE *argv,
1178 VALUE passed_block_handler, int is_lambda,
1179 const rb_callable_method_entry_t *me));
1180
1181 static inline VALUE
invoke_block_from_c_proc(rb_execution_context_t * ec,const rb_proc_t * proc,VALUE self,int argc,const VALUE * argv,VALUE passed_block_handler,int is_lambda,const rb_callable_method_entry_t * me)1182 invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc,
1183 VALUE self, int argc, const VALUE *argv,
1184 VALUE passed_block_handler, int is_lambda,
1185 const rb_callable_method_entry_t *me)
1186 {
1187 const struct rb_block *block = &proc->block;
1188
1189 again:
1190 switch (vm_block_type(block)) {
1191 case block_type_iseq:
1192 return invoke_iseq_block_from_c(ec, &block->as.captured, self, argc, argv, passed_block_handler, NULL, is_lambda, me);
1193 case block_type_ifunc:
1194 return vm_yield_with_cfunc(ec, &block->as.captured, self, argc, argv, passed_block_handler, me);
1195 case block_type_symbol:
1196 return vm_yield_with_symbol(ec, block->as.symbol, argc, argv, passed_block_handler);
1197 case block_type_proc:
1198 is_lambda = block_proc_is_lambda(block->as.proc);
1199 block = vm_proc_block(block->as.proc);
1200 goto again;
1201 }
1202 VM_UNREACHABLE(invoke_block_from_c_proc);
1203 return Qundef;
1204 }
1205
1206 static VALUE
vm_invoke_proc(rb_execution_context_t * ec,rb_proc_t * proc,VALUE self,int argc,const VALUE * argv,VALUE passed_block_handler)1207 vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
1208 int argc, const VALUE *argv, VALUE passed_block_handler)
1209 {
1210 return invoke_block_from_c_proc(ec, proc, self, argc, argv, passed_block_handler, proc->is_lambda, NULL);
1211 }
1212
1213 MJIT_FUNC_EXPORTED VALUE
rb_vm_invoke_bmethod(rb_execution_context_t * ec,rb_proc_t * proc,VALUE self,int argc,const VALUE * argv,VALUE block_handler,const rb_callable_method_entry_t * me)1214 rb_vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
1215 int argc, const VALUE *argv, VALUE block_handler, const rb_callable_method_entry_t *me)
1216 {
1217 return invoke_block_from_c_proc(ec, proc, self, argc, argv, block_handler, TRUE, me);
1218 }
1219
1220 MJIT_FUNC_EXPORTED VALUE
rb_vm_invoke_proc(rb_execution_context_t * ec,rb_proc_t * proc,int argc,const VALUE * argv,VALUE passed_block_handler)1221 rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc,
1222 int argc, const VALUE *argv, VALUE passed_block_handler)
1223 {
1224 VALUE self = vm_block_self(&proc->block);
1225 vm_block_handler_verify(passed_block_handler);
1226
1227 if (proc->is_from_method) {
1228 return rb_vm_invoke_bmethod(ec, proc, self, argc, argv, passed_block_handler, NULL);
1229 }
1230 else {
1231 return vm_invoke_proc(ec, proc, self, argc, argv, passed_block_handler);
1232 }
1233 }
1234
1235 /* special variable */
1236
1237 static rb_control_frame_t *
vm_normal_frame(const rb_execution_context_t * ec,rb_control_frame_t * cfp)1238 vm_normal_frame(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
1239 {
1240 while (cfp->pc == 0) {
1241 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1242 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
1243 return 0;
1244 }
1245 }
1246 return cfp;
1247 }
1248
1249 static VALUE
vm_cfp_svar_get(const rb_execution_context_t * ec,rb_control_frame_t * cfp,VALUE key)1250 vm_cfp_svar_get(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key)
1251 {
1252 cfp = vm_normal_frame(ec, cfp);
1253 return lep_svar_get(ec, cfp ? VM_CF_LEP(cfp) : 0, key);
1254 }
1255
1256 static void
vm_cfp_svar_set(const rb_execution_context_t * ec,rb_control_frame_t * cfp,VALUE key,const VALUE val)1257 vm_cfp_svar_set(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key, const VALUE val)
1258 {
1259 cfp = vm_normal_frame(ec, cfp);
1260 lep_svar_set(ec, cfp ? VM_CF_LEP(cfp) : 0, key, val);
1261 }
1262
1263 static VALUE
vm_svar_get(const rb_execution_context_t * ec,VALUE key)1264 vm_svar_get(const rb_execution_context_t *ec, VALUE key)
1265 {
1266 return vm_cfp_svar_get(ec, ec->cfp, key);
1267 }
1268
1269 static void
vm_svar_set(const rb_execution_context_t * ec,VALUE key,VALUE val)1270 vm_svar_set(const rb_execution_context_t *ec, VALUE key, VALUE val)
1271 {
1272 vm_cfp_svar_set(ec, ec->cfp, key, val);
1273 }
1274
1275 VALUE
rb_backref_get(void)1276 rb_backref_get(void)
1277 {
1278 return vm_svar_get(GET_EC(), VM_SVAR_BACKREF);
1279 }
1280
1281 void
rb_backref_set(VALUE val)1282 rb_backref_set(VALUE val)
1283 {
1284 vm_svar_set(GET_EC(), VM_SVAR_BACKREF, val);
1285 }
1286
1287 VALUE
rb_lastline_get(void)1288 rb_lastline_get(void)
1289 {
1290 return vm_svar_get(GET_EC(), VM_SVAR_LASTLINE);
1291 }
1292
1293 void
rb_lastline_set(VALUE val)1294 rb_lastline_set(VALUE val)
1295 {
1296 vm_svar_set(GET_EC(), VM_SVAR_LASTLINE, val);
1297 }
1298
1299 /* misc */
1300
1301 /* in intern.h */
1302 const char *
rb_sourcefile(void)1303 rb_sourcefile(void)
1304 {
1305 const rb_execution_context_t *ec = GET_EC();
1306 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
1307
1308 if (cfp) {
1309 return RSTRING_PTR(rb_iseq_path(cfp->iseq));
1310 }
1311 else {
1312 return 0;
1313 }
1314 }
1315
1316 /* in intern.h */
1317 int
rb_sourceline(void)1318 rb_sourceline(void)
1319 {
1320 const rb_execution_context_t *ec = GET_EC();
1321 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
1322
1323 if (cfp) {
1324 return rb_vm_get_sourceline(cfp);
1325 }
1326 else {
1327 return 0;
1328 }
1329 }
1330
1331 VALUE
rb_source_location(int * pline)1332 rb_source_location(int *pline)
1333 {
1334 const rb_execution_context_t *ec = GET_EC();
1335 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
1336
1337 if (cfp && cfp->iseq) {
1338 if (pline) *pline = rb_vm_get_sourceline(cfp);
1339 return rb_iseq_path(cfp->iseq);
1340 }
1341 else {
1342 if (pline) *pline = 0;
1343 return Qnil;
1344 }
1345 }
1346
1347 MJIT_FUNC_EXPORTED const char *
rb_source_location_cstr(int * pline)1348 rb_source_location_cstr(int *pline)
1349 {
1350 VALUE path = rb_source_location(pline);
1351 if (NIL_P(path)) return NULL;
1352 return RSTRING_PTR(path);
1353 }
1354
1355 rb_cref_t *
rb_vm_cref(void)1356 rb_vm_cref(void)
1357 {
1358 const rb_execution_context_t *ec = GET_EC();
1359 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
1360
1361 if (cfp == NULL) {
1362 return NULL;
1363 }
1364 return rb_vm_get_cref(cfp->ep);
1365 }
1366
1367 rb_cref_t *
rb_vm_cref_replace_with_duplicated_cref(void)1368 rb_vm_cref_replace_with_duplicated_cref(void)
1369 {
1370 const rb_execution_context_t *ec = GET_EC();
1371 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
1372 rb_cref_t *cref = vm_cref_replace_with_duplicated_cref(cfp->ep);
1373 return cref;
1374 }
1375
1376 const rb_cref_t *
rb_vm_cref_in_context(VALUE self,VALUE cbase)1377 rb_vm_cref_in_context(VALUE self, VALUE cbase)
1378 {
1379 const rb_execution_context_t *ec = GET_EC();
1380 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
1381 const rb_cref_t *cref;
1382 if (cfp->self != self) return NULL;
1383 if (!vm_env_cref_by_cref(cfp->ep)) return NULL;
1384 cref = rb_vm_get_cref(cfp->ep);
1385 if (CREF_CLASS(cref) != cbase) return NULL;
1386 return cref;
1387 }
1388
1389 #if 0
1390 void
1391 debug_cref(rb_cref_t *cref)
1392 {
1393 while (cref) {
1394 dp(CREF_CLASS(cref));
1395 printf("%ld\n", CREF_VISI(cref));
1396 cref = CREF_NEXT(cref);
1397 }
1398 }
1399 #endif
1400
1401 VALUE
rb_vm_cbase(void)1402 rb_vm_cbase(void)
1403 {
1404 const rb_execution_context_t *ec = GET_EC();
1405 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
1406
1407 if (cfp == 0) {
1408 rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
1409 }
1410 return vm_get_cbase(cfp->ep);
1411 }
1412
1413 /* jump */
1414
1415 static VALUE
make_localjump_error(const char * mesg,VALUE value,int reason)1416 make_localjump_error(const char *mesg, VALUE value, int reason)
1417 {
1418 extern VALUE rb_eLocalJumpError;
1419 VALUE exc = rb_exc_new2(rb_eLocalJumpError, mesg);
1420 ID id;
1421
1422 switch (reason) {
1423 case TAG_BREAK:
1424 CONST_ID(id, "break");
1425 break;
1426 case TAG_REDO:
1427 CONST_ID(id, "redo");
1428 break;
1429 case TAG_RETRY:
1430 CONST_ID(id, "retry");
1431 break;
1432 case TAG_NEXT:
1433 CONST_ID(id, "next");
1434 break;
1435 case TAG_RETURN:
1436 CONST_ID(id, "return");
1437 break;
1438 default:
1439 CONST_ID(id, "noreason");
1440 break;
1441 }
1442 rb_iv_set(exc, "@exit_value", value);
1443 rb_iv_set(exc, "@reason", ID2SYM(id));
1444 return exc;
1445 }
1446
1447 MJIT_FUNC_EXPORTED void
rb_vm_localjump_error(const char * mesg,VALUE value,int reason)1448 rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
1449 {
1450 VALUE exc = make_localjump_error(mesg, value, reason);
1451 rb_exc_raise(exc);
1452 }
1453
1454 VALUE
rb_vm_make_jump_tag_but_local_jump(int state,VALUE val)1455 rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
1456 {
1457 const char *mesg;
1458
1459 switch (state) {
1460 case TAG_RETURN:
1461 mesg = "unexpected return";
1462 break;
1463 case TAG_BREAK:
1464 mesg = "unexpected break";
1465 break;
1466 case TAG_NEXT:
1467 mesg = "unexpected next";
1468 break;
1469 case TAG_REDO:
1470 mesg = "unexpected redo";
1471 val = Qnil;
1472 break;
1473 case TAG_RETRY:
1474 mesg = "retry outside of rescue clause";
1475 val = Qnil;
1476 break;
1477 default:
1478 return Qnil;
1479 }
1480 if (val == Qundef) {
1481 val = GET_EC()->tag->retval;
1482 }
1483 return make_localjump_error(mesg, val, state);
1484 }
1485
1486 #if 0
1487 void
1488 rb_vm_jump_tag_but_local_jump(int state)
1489 {
1490 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
1491 if (!NIL_P(exc)) rb_exc_raise(exc);
1492 EC_JUMP_TAG(GET_EC(), state);
1493 }
1494 #endif
1495
1496 static rb_control_frame_t *
next_not_local_frame(rb_control_frame_t * cfp)1497 next_not_local_frame(rb_control_frame_t *cfp)
1498 {
1499 while (VM_ENV_LOCAL_P(cfp->ep)) {
1500 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1501 }
1502 return cfp;
1503 }
1504
1505 NORETURN(static void vm_iter_break(rb_execution_context_t *ec, VALUE val));
1506
1507 static void
vm_iter_break(rb_execution_context_t * ec,VALUE val)1508 vm_iter_break(rb_execution_context_t *ec, VALUE val)
1509 {
1510 rb_control_frame_t *cfp = next_not_local_frame(ec->cfp);
1511 const VALUE *ep = VM_CF_PREV_EP(cfp);
1512 const rb_control_frame_t *target_cfp = rb_vm_search_cf_from_ep(ec, cfp, ep);
1513
1514 #if 0 /* raise LocalJumpError */
1515 if (!target_cfp) {
1516 rb_vm_localjump_error("unexpected break", val, TAG_BREAK);
1517 }
1518 #endif
1519
1520 ec->errinfo = (VALUE)THROW_DATA_NEW(val, target_cfp, TAG_BREAK);
1521 EC_JUMP_TAG(ec, TAG_BREAK);
1522 }
1523
1524 void
rb_iter_break(void)1525 rb_iter_break(void)
1526 {
1527 vm_iter_break(GET_EC(), Qnil);
1528 }
1529
1530 void
rb_iter_break_value(VALUE val)1531 rb_iter_break_value(VALUE val)
1532 {
1533 vm_iter_break(GET_EC(), val);
1534 }
1535
1536 /* optimization: redefine management */
1537
1538 static st_table *vm_opt_method_table = 0;
1539
1540 static int
vm_redefinition_check_flag(VALUE klass)1541 vm_redefinition_check_flag(VALUE klass)
1542 {
1543 if (klass == rb_cInteger) return INTEGER_REDEFINED_OP_FLAG;
1544 if (klass == rb_cFloat) return FLOAT_REDEFINED_OP_FLAG;
1545 if (klass == rb_cString) return STRING_REDEFINED_OP_FLAG;
1546 if (klass == rb_cArray) return ARRAY_REDEFINED_OP_FLAG;
1547 if (klass == rb_cHash) return HASH_REDEFINED_OP_FLAG;
1548 if (klass == rb_cSymbol) return SYMBOL_REDEFINED_OP_FLAG;
1549 if (klass == rb_cTime) return TIME_REDEFINED_OP_FLAG;
1550 if (klass == rb_cRegexp) return REGEXP_REDEFINED_OP_FLAG;
1551 if (klass == rb_cNilClass) return NIL_REDEFINED_OP_FLAG;
1552 if (klass == rb_cTrueClass) return TRUE_REDEFINED_OP_FLAG;
1553 if (klass == rb_cFalseClass) return FALSE_REDEFINED_OP_FLAG;
1554 if (klass == rb_cProc) return PROC_REDEFINED_OP_FLAG;
1555 return 0;
1556 }
1557
1558 static int
vm_redefinition_check_method_type(const rb_method_definition_t * def)1559 vm_redefinition_check_method_type(const rb_method_definition_t *def)
1560 {
1561 switch (def->type) {
1562 case VM_METHOD_TYPE_CFUNC:
1563 case VM_METHOD_TYPE_OPTIMIZED:
1564 return TRUE;
1565 default:
1566 return FALSE;
1567 }
1568 }
1569
1570 static void
rb_vm_check_redefinition_opt_method(const rb_method_entry_t * me,VALUE klass)1571 rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VALUE klass)
1572 {
1573 st_data_t bop;
1574 if (RB_TYPE_P(klass, T_ICLASS) && FL_TEST(klass, RICLASS_IS_ORIGIN)) {
1575 klass = RBASIC_CLASS(klass);
1576 }
1577 if (vm_redefinition_check_method_type(me->def)) {
1578 if (st_lookup(vm_opt_method_table, (st_data_t)me, &bop)) {
1579 int flag = vm_redefinition_check_flag(klass);
1580
1581 ruby_vm_redefined_flag[bop] |= flag;
1582 }
1583 }
1584 }
1585
1586 static enum rb_id_table_iterator_result
check_redefined_method(ID mid,VALUE value,void * data)1587 check_redefined_method(ID mid, VALUE value, void *data)
1588 {
1589 VALUE klass = (VALUE)data;
1590 const rb_method_entry_t *me = (rb_method_entry_t *)value;
1591 const rb_method_entry_t *newme = rb_method_entry(klass, mid);
1592
1593 if (newme != me) rb_vm_check_redefinition_opt_method(me, me->owner);
1594
1595 return ID_TABLE_CONTINUE;
1596 }
1597
1598 void
rb_vm_check_redefinition_by_prepend(VALUE klass)1599 rb_vm_check_redefinition_by_prepend(VALUE klass)
1600 {
1601 if (!vm_redefinition_check_flag(klass)) return;
1602 rb_id_table_foreach(RCLASS_M_TBL(RCLASS_ORIGIN(klass)), check_redefined_method, (void *)klass);
1603 }
1604
1605 static void
add_opt_method(VALUE klass,ID mid,VALUE bop)1606 add_opt_method(VALUE klass, ID mid, VALUE bop)
1607 {
1608 const rb_method_entry_t *me = rb_method_entry_at(klass, mid);
1609
1610 if (me && vm_redefinition_check_method_type(me->def)) {
1611 st_insert(vm_opt_method_table, (st_data_t)me, (st_data_t)bop);
1612 }
1613 else {
1614 rb_bug("undefined optimized method: %s", rb_id2name(mid));
1615 }
1616 }
1617
1618 static void
vm_init_redefined_flag(void)1619 vm_init_redefined_flag(void)
1620 {
1621 ID mid;
1622 VALUE bop;
1623
1624 vm_opt_method_table = st_init_numtable();
1625
1626 #define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
1627 #define C(k) add_opt_method(rb_c##k, mid, bop)
1628 OP(PLUS, PLUS), (C(Integer), C(Float), C(String), C(Array));
1629 OP(MINUS, MINUS), (C(Integer), C(Float));
1630 OP(MULT, MULT), (C(Integer), C(Float));
1631 OP(DIV, DIV), (C(Integer), C(Float));
1632 OP(MOD, MOD), (C(Integer), C(Float));
1633 OP(Eq, EQ), (C(Integer), C(Float), C(String), C(Symbol));
1634 OP(Eqq, EQQ), (C(Integer), C(Float), C(Symbol), C(String),
1635 C(NilClass), C(TrueClass), C(FalseClass));
1636 OP(LT, LT), (C(Integer), C(Float));
1637 OP(LE, LE), (C(Integer), C(Float));
1638 OP(GT, GT), (C(Integer), C(Float));
1639 OP(GE, GE), (C(Integer), C(Float));
1640 OP(LTLT, LTLT), (C(String), C(Array));
1641 OP(AREF, AREF), (C(Array), C(Hash));
1642 OP(ASET, ASET), (C(Array), C(Hash));
1643 OP(Length, LENGTH), (C(Array), C(String), C(Hash));
1644 OP(Size, SIZE), (C(Array), C(String), C(Hash));
1645 OP(EmptyP, EMPTY_P), (C(Array), C(String), C(Hash));
1646 OP(Succ, SUCC), (C(Integer), C(String), C(Time));
1647 OP(EqTilde, MATCH), (C(Regexp), C(String));
1648 OP(Freeze, FREEZE), (C(String));
1649 OP(UMinus, UMINUS), (C(String));
1650 OP(Max, MAX), (C(Array));
1651 OP(Min, MIN), (C(Array));
1652 OP(Call, CALL), (C(Proc));
1653 OP(And, AND), (C(Integer));
1654 OP(Or, OR), (C(Integer));
1655 #undef C
1656 #undef OP
1657 }
1658
1659 /* for vm development */
1660
1661 #if VMDEBUG
1662 static const char *
vm_frametype_name(const rb_control_frame_t * cfp)1663 vm_frametype_name(const rb_control_frame_t *cfp)
1664 {
1665 switch (VM_FRAME_TYPE(cfp)) {
1666 case VM_FRAME_MAGIC_METHOD: return "method";
1667 case VM_FRAME_MAGIC_BLOCK: return "block";
1668 case VM_FRAME_MAGIC_CLASS: return "class";
1669 case VM_FRAME_MAGIC_TOP: return "top";
1670 case VM_FRAME_MAGIC_CFUNC: return "cfunc";
1671 case VM_FRAME_MAGIC_IFUNC: return "ifunc";
1672 case VM_FRAME_MAGIC_EVAL: return "eval";
1673 case VM_FRAME_MAGIC_RESCUE: return "rescue";
1674 default:
1675 rb_bug("unknown frame");
1676 }
1677 }
1678 #endif
1679
1680 static VALUE
frame_return_value(const struct vm_throw_data * err)1681 frame_return_value(const struct vm_throw_data *err)
1682 {
1683 if (THROW_DATA_P(err) &&
1684 THROW_DATA_STATE(err) == TAG_BREAK &&
1685 THROW_DATA_CONSUMED_P(err) == FALSE) {
1686 return THROW_DATA_VAL(err);
1687 }
1688 else {
1689 return Qnil;
1690 }
1691 }
1692
1693 #if 0
1694 /* for debug */
1695 static const char *
1696 frame_name(const rb_control_frame_t *cfp)
1697 {
1698 unsigned long type = VM_FRAME_TYPE(cfp);
1699 #define C(t) if (type == VM_FRAME_MAGIC_##t) return #t
1700 C(METHOD);
1701 C(BLOCK);
1702 C(CLASS);
1703 C(TOP);
1704 C(CFUNC);
1705 C(PROC);
1706 C(IFUNC);
1707 C(EVAL);
1708 C(LAMBDA);
1709 C(RESCUE);
1710 C(DUMMY);
1711 #undef C
1712 return "unknown";
1713 }
1714 #endif
1715
1716 static void
hook_before_rewind(rb_execution_context_t * ec,const rb_control_frame_t * cfp,int will_finish_vm_exec,int state,struct vm_throw_data * err)1717 hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp,
1718 int will_finish_vm_exec, int state, struct vm_throw_data *err)
1719 {
1720 if (state == TAG_RAISE && RBASIC_CLASS(err) == rb_eSysStackError) {
1721 return;
1722 }
1723 else {
1724 const rb_iseq_t *iseq = cfp->iseq;
1725 rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
1726
1727 switch (VM_FRAME_TYPE(ec->cfp)) {
1728 case VM_FRAME_MAGIC_METHOD:
1729 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
1730 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
1731
1732 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
1733 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN,
1734 ec->cfp->self, 0, 0, 0, frame_return_value(err), TRUE);
1735 }
1736
1737 THROW_DATA_CONSUMED_SET(err);
1738 break;
1739 case VM_FRAME_MAGIC_BLOCK:
1740 if (VM_FRAME_BMETHOD_P(ec->cfp)) {
1741 EXEC_EVENT_HOOK(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
1742 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
1743 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
1744 ec->cfp->self, 0, 0, 0, frame_return_value(err), FALSE);
1745 }
1746
1747 if (!will_finish_vm_exec) {
1748 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(ec->cfp);
1749
1750 /* kick RUBY_EVENT_RETURN at invoke_block_from_c() for bmethod */
1751 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self,
1752 rb_vm_frame_method_entry(ec->cfp)->def->original_id,
1753 rb_vm_frame_method_entry(ec->cfp)->called_id,
1754 rb_vm_frame_method_entry(ec->cfp)->owner,
1755 frame_return_value(err));
1756
1757 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
1758 local_hooks = me->def->body.bmethod.hooks;
1759
1760 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
1761 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN, ec->cfp->self,
1762 rb_vm_frame_method_entry(ec->cfp)->def->original_id,
1763 rb_vm_frame_method_entry(ec->cfp)->called_id,
1764 rb_vm_frame_method_entry(ec->cfp)->owner,
1765 frame_return_value(err), TRUE);
1766 }
1767 }
1768 THROW_DATA_CONSUMED_SET(err);
1769 }
1770 else {
1771 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
1772 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
1773 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
1774 ec->cfp->self, 0, 0, 0, frame_return_value(err), TRUE);
1775 }
1776 THROW_DATA_CONSUMED_SET(err);
1777 }
1778 break;
1779 case VM_FRAME_MAGIC_CLASS:
1780 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_END, ec->cfp->self, 0, 0, 0, Qnil);
1781 break;
1782 }
1783 }
1784 }
1785
1786 /* evaluator body */
1787
1788 /* finish
1789 VMe (h1) finish
1790 VM finish F1 F2
1791 cfunc finish F1 F2 C1
1792 rb_funcall finish F1 F2 C1
1793 VMe finish F1 F2 C1
1794 VM finish F1 F2 C1 F3
1795
1796 F1 - F3 : pushed by VM
1797 C1 : pushed by send insn (CFUNC)
1798
1799 struct CONTROL_FRAME {
1800 VALUE *pc; // cfp[0], program counter
1801 VALUE *sp; // cfp[1], stack pointer
1802 rb_iseq_t *iseq; // cfp[2], iseq
1803 VALUE self; // cfp[3], self
1804 const VALUE *ep; // cfp[4], env pointer
1805 const void *block_code; // cfp[5], block code
1806 };
1807
1808 struct rb_captured_block {
1809 VALUE self;
1810 VALUE *ep;
1811 union code;
1812 };
1813
1814 struct METHOD_ENV {
1815 VALUE param0;
1816 ...
1817 VALUE paramN;
1818 VALUE lvar1;
1819 ...
1820 VALUE lvarM;
1821 VALUE cref; // ep[-2]
1822 VALUE special; // ep[-1]
1823 VALUE flags; // ep[ 0] == lep[0]
1824 };
1825
1826 struct BLOCK_ENV {
1827 VALUE block_param0;
1828 ...
1829 VALUE block_paramN;
1830 VALUE block_lvar1;
1831 ...
1832 VALUE block_lvarM;
1833 VALUE cref; // ep[-2]
1834 VALUE special; // ep[-1]
1835 VALUE flags; // ep[ 0]
1836 };
1837
1838 struct CLASS_ENV {
1839 VALUE class_lvar0;
1840 ...
1841 VALUE class_lvarN;
1842 VALUE cref;
1843 VALUE prev_ep; // for frame jump
1844 VALUE flags;
1845 };
1846
1847 struct C_METHOD_CONTROL_FRAME {
1848 VALUE *pc; // 0
1849 VALUE *sp; // stack pointer
1850 rb_iseq_t *iseq; // cmi
1851 VALUE self; // ?
1852 VALUE *ep; // ep == lep
1853 void *code; //
1854 };
1855
1856 struct C_BLOCK_CONTROL_FRAME {
1857 VALUE *pc; // point only "finish" insn
1858 VALUE *sp; // sp
1859 rb_iseq_t *iseq; // ?
1860 VALUE self; //
1861 VALUE *ep; // ep
1862 void *code; //
1863 };
1864
1865 If mjit_exec is already called before calling vm_exec, `mjit_enable_p` should
1866 be FALSE to avoid calling `mjit_exec` twice.
1867 */
1868
1869 static inline VALUE
1870 vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
1871 VALUE errinfo, VALUE *initial);
1872
1873 MJIT_FUNC_EXPORTED VALUE
vm_exec(rb_execution_context_t * ec,int mjit_enable_p)1874 vm_exec(rb_execution_context_t *ec, int mjit_enable_p)
1875 {
1876 enum ruby_tag_type state;
1877 VALUE result = Qundef;
1878 VALUE initial = 0;
1879
1880 EC_PUSH_TAG(ec);
1881
1882 _tag.retval = Qnil;
1883 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1884 if (!mjit_enable_p || (result = mjit_exec(ec)) == Qundef) {
1885 result = vm_exec_core(ec, initial);
1886 }
1887 goto vm_loop_start; /* fallback to the VM */
1888 }
1889 else {
1890 result = ec->errinfo;
1891 rb_ec_raised_reset(ec, RAISED_STACKOVERFLOW);
1892 while ((result = vm_exec_handle_exception(ec, state, result, &initial)) == Qundef) {
1893 /* caught a jump, exec the handler */
1894 result = vm_exec_core(ec, initial);
1895 vm_loop_start:
1896 VM_ASSERT(ec->tag == &_tag);
1897 /* when caught `throw`, `tag.state` is set. */
1898 if ((state = _tag.state) == TAG_NONE) break;
1899 _tag.state = TAG_NONE;
1900 }
1901 }
1902 EC_POP_TAG();
1903 return result;
1904 }
1905
1906 static inline VALUE
vm_exec_handle_exception(rb_execution_context_t * ec,enum ruby_tag_type state,VALUE errinfo,VALUE * initial)1907 vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
1908 VALUE errinfo, VALUE *initial)
1909 {
1910 struct vm_throw_data *err = (struct vm_throw_data *)errinfo;
1911
1912 for (;;) {
1913 unsigned int i;
1914 const struct iseq_catch_table_entry *entry;
1915 const struct iseq_catch_table *ct;
1916 unsigned long epc, cont_pc, cont_sp;
1917 const rb_iseq_t *catch_iseq;
1918 rb_control_frame_t *cfp;
1919 VALUE type;
1920 const rb_control_frame_t *escape_cfp;
1921
1922 cont_pc = cont_sp = 0;
1923 catch_iseq = NULL;
1924
1925 while (ec->cfp->pc == 0 || ec->cfp->iseq == 0) {
1926 if (UNLIKELY(VM_FRAME_TYPE(ec->cfp) == VM_FRAME_MAGIC_CFUNC)) {
1927 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_C_RETURN, ec->cfp->self,
1928 rb_vm_frame_method_entry(ec->cfp)->def->original_id,
1929 rb_vm_frame_method_entry(ec->cfp)->called_id,
1930 rb_vm_frame_method_entry(ec->cfp)->owner, Qnil);
1931 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec,
1932 rb_vm_frame_method_entry(ec->cfp)->owner,
1933 rb_vm_frame_method_entry(ec->cfp)->def->original_id);
1934 }
1935 rb_vm_pop_frame(ec);
1936 }
1937
1938 cfp = ec->cfp;
1939 epc = cfp->pc - cfp->iseq->body->iseq_encoded;
1940
1941 escape_cfp = NULL;
1942 if (state == TAG_BREAK || state == TAG_RETURN) {
1943 escape_cfp = THROW_DATA_CATCH_FRAME(err);
1944
1945 if (cfp == escape_cfp) {
1946 if (state == TAG_RETURN) {
1947 if (!VM_FRAME_FINISHED_P(cfp)) {
1948 THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
1949 THROW_DATA_STATE_SET(err, state = TAG_BREAK);
1950 }
1951 else {
1952 ct = cfp->iseq->body->catch_table;
1953 if (ct) for (i = 0; i < ct->size; i++) {
1954 entry = &ct->entries[i];
1955 if (entry->start < epc && entry->end >= epc) {
1956 if (entry->type == CATCH_TYPE_ENSURE) {
1957 catch_iseq = entry->iseq;
1958 cont_pc = entry->cont;
1959 cont_sp = entry->sp;
1960 break;
1961 }
1962 }
1963 }
1964 if (catch_iseq == NULL) {
1965 ec->errinfo = Qnil;
1966 THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
1967 hook_before_rewind(ec, ec->cfp, TRUE, state, err);
1968 rb_vm_pop_frame(ec);
1969 return THROW_DATA_VAL(err);
1970 }
1971 }
1972 /* through */
1973 }
1974 else {
1975 /* TAG_BREAK */
1976 #if OPT_STACK_CACHING
1977 *initial = THROW_DATA_VAL(err);
1978 #else
1979 *ec->cfp->sp++ = THROW_DATA_VAL(err);
1980 #endif
1981 ec->errinfo = Qnil;
1982 return Qundef;
1983 }
1984 }
1985 }
1986
1987 if (state == TAG_RAISE) {
1988 ct = cfp->iseq->body->catch_table;
1989 if (ct) for (i = 0; i < ct->size; i++) {
1990 entry = &ct->entries[i];
1991 if (entry->start < epc && entry->end >= epc) {
1992
1993 if (entry->type == CATCH_TYPE_RESCUE ||
1994 entry->type == CATCH_TYPE_ENSURE) {
1995 catch_iseq = entry->iseq;
1996 cont_pc = entry->cont;
1997 cont_sp = entry->sp;
1998 break;
1999 }
2000 }
2001 }
2002 }
2003 else if (state == TAG_RETRY) {
2004 ct = cfp->iseq->body->catch_table;
2005 if (ct) for (i = 0; i < ct->size; i++) {
2006 entry = &ct->entries[i];
2007 if (entry->start < epc && entry->end >= epc) {
2008
2009 if (entry->type == CATCH_TYPE_ENSURE) {
2010 catch_iseq = entry->iseq;
2011 cont_pc = entry->cont;
2012 cont_sp = entry->sp;
2013 break;
2014 }
2015 else if (entry->type == CATCH_TYPE_RETRY) {
2016 const rb_control_frame_t *escape_cfp;
2017 escape_cfp = THROW_DATA_CATCH_FRAME(err);
2018 if (cfp == escape_cfp) {
2019 cfp->pc = cfp->iseq->body->iseq_encoded + entry->cont;
2020 ec->errinfo = Qnil;
2021 return Qundef;
2022 }
2023 }
2024 }
2025 }
2026 }
2027 else if (state == TAG_BREAK && !escape_cfp) {
2028 type = CATCH_TYPE_BREAK;
2029
2030 search_restart_point:
2031 ct = cfp->iseq->body->catch_table;
2032 if (ct) for (i = 0; i < ct->size; i++) {
2033 entry = &ct->entries[i];
2034
2035 if (entry->start < epc && entry->end >= epc) {
2036 if (entry->type == CATCH_TYPE_ENSURE) {
2037 catch_iseq = entry->iseq;
2038 cont_pc = entry->cont;
2039 cont_sp = entry->sp;
2040 break;
2041 }
2042 else if (entry->type == type) {
2043 cfp->pc = cfp->iseq->body->iseq_encoded + entry->cont;
2044 cfp->sp = vm_base_ptr(cfp) + entry->sp;
2045
2046 if (state != TAG_REDO) {
2047 #if OPT_STACK_CACHING
2048 *initial = THROW_DATA_VAL(err);
2049 #else
2050 *ec->cfp->sp++ = THROW_DATA_VAL(err);
2051 #endif
2052 }
2053 ec->errinfo = Qnil;
2054 VM_ASSERT(ec->tag->state == TAG_NONE);
2055 return Qundef;
2056 }
2057 }
2058 }
2059 }
2060 else if (state == TAG_REDO) {
2061 type = CATCH_TYPE_REDO;
2062 goto search_restart_point;
2063 }
2064 else if (state == TAG_NEXT) {
2065 type = CATCH_TYPE_NEXT;
2066 goto search_restart_point;
2067 }
2068 else {
2069 ct = cfp->iseq->body->catch_table;
2070 if (ct) for (i = 0; i < ct->size; i++) {
2071 entry = &ct->entries[i];
2072 if (entry->start < epc && entry->end >= epc) {
2073
2074 if (entry->type == CATCH_TYPE_ENSURE) {
2075 catch_iseq = entry->iseq;
2076 cont_pc = entry->cont;
2077 cont_sp = entry->sp;
2078 break;
2079 }
2080 }
2081 }
2082 }
2083
2084 if (catch_iseq != NULL) { /* found catch table */
2085 /* enter catch scope */
2086 const int arg_size = 1;
2087
2088 rb_iseq_check(catch_iseq);
2089 cfp->sp = vm_base_ptr(cfp) + cont_sp;
2090 cfp->pc = cfp->iseq->body->iseq_encoded + cont_pc;
2091
2092 /* push block frame */
2093 cfp->sp[0] = (VALUE)err;
2094 vm_push_frame(ec, catch_iseq, VM_FRAME_MAGIC_RESCUE,
2095 cfp->self,
2096 VM_GUARDED_PREV_EP(cfp->ep),
2097 0, /* cref or me */
2098 catch_iseq->body->iseq_encoded,
2099 cfp->sp + arg_size /* push value */,
2100 catch_iseq->body->local_table_size - arg_size,
2101 catch_iseq->body->stack_max);
2102
2103 state = 0;
2104 ec->tag->state = TAG_NONE;
2105 ec->errinfo = Qnil;
2106
2107 return Qundef;
2108 }
2109 else {
2110 hook_before_rewind(ec, ec->cfp, FALSE, state, err);
2111
2112 if (VM_FRAME_FINISHED_P(ec->cfp)) {
2113 rb_vm_pop_frame(ec);
2114 ec->errinfo = (VALUE)err;
2115 ec->tag = ec->tag->prev;
2116 EC_JUMP_TAG(ec, state);
2117 }
2118 else {
2119 rb_vm_pop_frame(ec);
2120 }
2121 }
2122 }
2123 }
2124
2125 /* misc */
2126
2127 VALUE
rb_iseq_eval(const rb_iseq_t * iseq)2128 rb_iseq_eval(const rb_iseq_t *iseq)
2129 {
2130 rb_execution_context_t *ec = GET_EC();
2131 VALUE val;
2132 vm_set_top_stack(ec, iseq);
2133 val = vm_exec(ec, TRUE);
2134 return val;
2135 }
2136
2137 VALUE
rb_iseq_eval_main(const rb_iseq_t * iseq)2138 rb_iseq_eval_main(const rb_iseq_t *iseq)
2139 {
2140 rb_execution_context_t *ec = GET_EC();
2141 VALUE val;
2142
2143 vm_set_main_stack(ec, iseq);
2144 val = vm_exec(ec, TRUE);
2145 return val;
2146 }
2147
2148 int
rb_vm_control_frame_id_and_class(const rb_control_frame_t * cfp,ID * idp,ID * called_idp,VALUE * klassp)2149 rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp)
2150 {
2151 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
2152
2153 if (me) {
2154 if (idp) *idp = me->def->original_id;
2155 if (called_idp) *called_idp = me->called_id;
2156 if (klassp) *klassp = me->owner;
2157 return TRUE;
2158 }
2159 else {
2160 return FALSE;
2161 }
2162 }
2163
2164 int
rb_ec_frame_method_id_and_class(const rb_execution_context_t * ec,ID * idp,ID * called_idp,VALUE * klassp)2165 rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp)
2166 {
2167 return rb_vm_control_frame_id_and_class(ec->cfp, idp, called_idp, klassp);
2168 }
2169
2170 int
rb_frame_method_id_and_class(ID * idp,VALUE * klassp)2171 rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
2172 {
2173 return rb_ec_frame_method_id_and_class(GET_EC(), idp, 0, klassp);
2174 }
2175
2176 VALUE
rb_vm_call_cfunc(VALUE recv,VALUE (* func)(VALUE),VALUE arg,VALUE block_handler,VALUE filename)2177 rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
2178 VALUE block_handler, VALUE filename)
2179 {
2180 rb_execution_context_t *ec = GET_EC();
2181 const rb_control_frame_t *reg_cfp = ec->cfp;
2182 const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
2183 VALUE val;
2184
2185 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
2186 recv, block_handler,
2187 (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
2188 0, reg_cfp->sp, 0, 0);
2189
2190 val = (*func)(arg);
2191
2192 rb_vm_pop_frame(ec);
2193 return val;
2194 }
2195
2196 /* vm */
2197
2198 void
rb_vm_mark(void * ptr)2199 rb_vm_mark(void *ptr)
2200 {
2201 RUBY_MARK_ENTER("vm");
2202 RUBY_GC_INFO("-------------------------------------------------\n");
2203 if (ptr) {
2204 rb_vm_t *vm = ptr;
2205 rb_thread_t *th = 0;
2206
2207 list_for_each(&vm->living_threads, th, vmlt_node) {
2208 rb_gc_mark(th->self);
2209 }
2210 rb_gc_mark(vm->thgroup_default);
2211 rb_gc_mark(vm->mark_object_ary);
2212 rb_gc_mark(vm->load_path);
2213 rb_gc_mark(vm->load_path_snapshot);
2214 RUBY_MARK_UNLESS_NULL(vm->load_path_check_cache);
2215 rb_gc_mark(vm->expanded_load_path);
2216 rb_gc_mark(vm->loaded_features);
2217 rb_gc_mark(vm->loaded_features_snapshot);
2218 rb_gc_mark(vm->top_self);
2219 rb_gc_mark(vm->orig_progname);
2220 RUBY_MARK_UNLESS_NULL(vm->coverages);
2221 rb_gc_mark(vm->defined_module_hash);
2222
2223 if (vm->loading_table) {
2224 rb_mark_tbl(vm->loading_table);
2225 }
2226
2227 rb_hook_list_mark(&vm->global_hooks);
2228
2229 rb_gc_mark_values(RUBY_NSIG, vm->trap_list.cmd);
2230
2231 mjit_mark();
2232 }
2233
2234 RUBY_MARK_LEAVE("vm");
2235 }
2236
2237 #undef rb_vm_register_special_exception
2238 void
rb_vm_register_special_exception_str(enum ruby_special_exceptions sp,VALUE cls,VALUE mesg)2239 rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE cls, VALUE mesg)
2240 {
2241 rb_vm_t *vm = GET_VM();
2242 VALUE exc = rb_exc_new3(cls, rb_obj_freeze(mesg));
2243 OBJ_TAINT(exc);
2244 OBJ_FREEZE(exc);
2245 ((VALUE *)vm->special_exceptions)[sp] = exc;
2246 rb_gc_register_mark_object(exc);
2247 }
2248
2249 int
rb_vm_add_root_module(ID id,VALUE module)2250 rb_vm_add_root_module(ID id, VALUE module)
2251 {
2252 rb_vm_t *vm = GET_VM();
2253
2254 rb_hash_aset(vm->defined_module_hash, ID2SYM(id), module);
2255
2256 return TRUE;
2257 }
2258
2259 static int
free_loading_table_entry(st_data_t key,st_data_t value,st_data_t arg)2260 free_loading_table_entry(st_data_t key, st_data_t value, st_data_t arg)
2261 {
2262 xfree((char *)key);
2263 return ST_DELETE;
2264 }
2265
2266 extern void rb_native_mutex_destroy(rb_nativethread_lock_t *lock);
2267
2268 int
ruby_vm_destruct(rb_vm_t * vm)2269 ruby_vm_destruct(rb_vm_t *vm)
2270 {
2271 RUBY_FREE_ENTER("vm");
2272
2273 if (vm) {
2274 rb_thread_t *th = vm->main_thread;
2275 struct rb_objspace *objspace = vm->objspace;
2276 vm->main_thread = 0;
2277 if (th) {
2278 rb_fiber_reset_root_local_storage(th);
2279 thread_free(th);
2280 }
2281 rb_vm_living_threads_init(vm);
2282 ruby_vm_run_at_exit_hooks(vm);
2283 if (vm->loading_table) {
2284 st_foreach(vm->loading_table, free_loading_table_entry, 0);
2285 st_free_table(vm->loading_table);
2286 vm->loading_table = 0;
2287 }
2288 if (vm->frozen_strings) {
2289 st_free_table(vm->frozen_strings);
2290 vm->frozen_strings = 0;
2291 }
2292 rb_vm_gvl_destroy(vm);
2293 RB_ALTSTACK_FREE(vm->main_altstack);
2294 if (objspace) {
2295 rb_objspace_free(objspace);
2296 }
2297 rb_native_mutex_destroy(&vm->waitpid_lock);
2298 rb_native_mutex_destroy(&vm->workqueue_lock);
2299 /* after freeing objspace, you *can't* use ruby_xfree() */
2300 ruby_mimfree(vm);
2301 ruby_current_vm_ptr = NULL;
2302 }
2303 RUBY_FREE_LEAVE("vm");
2304 return 0;
2305 }
2306
2307 static size_t
vm_memsize(const void * ptr)2308 vm_memsize(const void *ptr)
2309 {
2310 const rb_vm_t *vmobj = ptr;
2311 size_t size = sizeof(rb_vm_t);
2312
2313 size += vmobj->living_thread_num * sizeof(rb_thread_t);
2314
2315 if (vmobj->defined_strings) {
2316 size += DEFINED_EXPR * sizeof(VALUE);
2317 }
2318 return size;
2319 }
2320
2321 static const rb_data_type_t vm_data_type = {
2322 "VM",
2323 {NULL, NULL, vm_memsize,},
2324 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
2325 };
2326
2327
2328 static VALUE
vm_default_params(void)2329 vm_default_params(void)
2330 {
2331 rb_vm_t *vm = GET_VM();
2332 VALUE result = rb_hash_new();
2333 #define SET(name) rb_hash_aset(result, ID2SYM(rb_intern(#name)), SIZET2NUM(vm->default_params.name));
2334 SET(thread_vm_stack_size);
2335 SET(thread_machine_stack_size);
2336 SET(fiber_vm_stack_size);
2337 SET(fiber_machine_stack_size);
2338 #undef SET
2339 rb_obj_freeze(result);
2340 return result;
2341 }
2342
2343 static size_t
get_param(const char * name,size_t default_value,size_t min_value)2344 get_param(const char *name, size_t default_value, size_t min_value)
2345 {
2346 const char *envval;
2347 size_t result = default_value;
2348 if ((envval = getenv(name)) != 0) {
2349 long val = atol(envval);
2350 if (val < (long)min_value) {
2351 val = (long)min_value;
2352 }
2353 result = (size_t)(((val -1 + RUBY_VM_SIZE_ALIGN) / RUBY_VM_SIZE_ALIGN) * RUBY_VM_SIZE_ALIGN);
2354 }
2355 if (0) fprintf(stderr, "%s: %"PRIuSIZE"\n", name, result); /* debug print */
2356
2357 return result;
2358 }
2359
2360 static void
check_machine_stack_size(size_t * sizep)2361 check_machine_stack_size(size_t *sizep)
2362 {
2363 #ifdef PTHREAD_STACK_MIN
2364 size_t size = *sizep;
2365 #endif
2366
2367 #ifdef PTHREAD_STACK_MIN
2368 if (size < PTHREAD_STACK_MIN) {
2369 *sizep = PTHREAD_STACK_MIN * 2;
2370 }
2371 #endif
2372 }
2373
2374 static void
vm_default_params_setup(rb_vm_t * vm)2375 vm_default_params_setup(rb_vm_t *vm)
2376 {
2377 vm->default_params.thread_vm_stack_size =
2378 get_param("RUBY_THREAD_VM_STACK_SIZE",
2379 RUBY_VM_THREAD_VM_STACK_SIZE,
2380 RUBY_VM_THREAD_VM_STACK_SIZE_MIN);
2381
2382 vm->default_params.thread_machine_stack_size =
2383 get_param("RUBY_THREAD_MACHINE_STACK_SIZE",
2384 RUBY_VM_THREAD_MACHINE_STACK_SIZE,
2385 RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN);
2386
2387 vm->default_params.fiber_vm_stack_size =
2388 get_param("RUBY_FIBER_VM_STACK_SIZE",
2389 RUBY_VM_FIBER_VM_STACK_SIZE,
2390 RUBY_VM_FIBER_VM_STACK_SIZE_MIN);
2391
2392 vm->default_params.fiber_machine_stack_size =
2393 get_param("RUBY_FIBER_MACHINE_STACK_SIZE",
2394 RUBY_VM_FIBER_MACHINE_STACK_SIZE,
2395 RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN);
2396
2397 /* environment dependent check */
2398 check_machine_stack_size(&vm->default_params.thread_machine_stack_size);
2399 check_machine_stack_size(&vm->default_params.fiber_machine_stack_size);
2400 }
2401
2402 static void
vm_init2(rb_vm_t * vm)2403 vm_init2(rb_vm_t *vm)
2404 {
2405 MEMZERO(vm, rb_vm_t, 1);
2406 rb_vm_living_threads_init(vm);
2407 vm->thread_report_on_exception = 1;
2408 vm->src_encoding_index = -1;
2409
2410 vm_default_params_setup(vm);
2411 }
2412
2413 /* Thread */
2414
2415 #define USE_THREAD_DATA_RECYCLE 1
2416
2417 #if USE_THREAD_DATA_RECYCLE
2418 #define RECYCLE_MAX 64
2419 static VALUE *thread_recycle_stack_slot[RECYCLE_MAX];
2420 static int thread_recycle_stack_count = 0;
2421 #endif /* USE_THREAD_DATA_RECYCLE */
2422
2423 VALUE *
rb_thread_recycle_stack(size_t size)2424 rb_thread_recycle_stack(size_t size)
2425 {
2426 #if USE_THREAD_DATA_RECYCLE
2427 if (thread_recycle_stack_count > 0) {
2428 /* TODO: check stack size if stack sizes are variable */
2429 return thread_recycle_stack_slot[--thread_recycle_stack_count];
2430 }
2431 #endif /* USE_THREAD_DATA_RECYCLE */
2432 return ALLOC_N(VALUE, size);
2433 }
2434
2435 void
rb_thread_recycle_stack_release(VALUE * stack)2436 rb_thread_recycle_stack_release(VALUE *stack)
2437 {
2438 VM_ASSERT(stack != NULL);
2439
2440 #if USE_THREAD_DATA_RECYCLE
2441 if (thread_recycle_stack_count < RECYCLE_MAX) {
2442 thread_recycle_stack_slot[thread_recycle_stack_count++] = stack;
2443 return;
2444 }
2445 #endif
2446 ruby_xfree(stack);
2447 }
2448
2449 void
rb_execution_context_mark(const rb_execution_context_t * ec)2450 rb_execution_context_mark(const rb_execution_context_t *ec)
2451 {
2452 #if VM_CHECK_MODE > 0
2453 void rb_ec_verify(const rb_execution_context_t *ec); /* cont.c */
2454 rb_ec_verify(ec);
2455 #endif
2456
2457 /* mark VM stack */
2458 if (ec->vm_stack) {
2459 VALUE *p = ec->vm_stack;
2460 VALUE *sp = ec->cfp->sp;
2461 rb_control_frame_t *cfp = ec->cfp;
2462 rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
2463
2464 rb_gc_mark_values((long)(sp - p), p);
2465
2466 while (cfp != limit_cfp) {
2467 const VALUE *ep = cfp->ep;
2468 VM_ASSERT(!!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) == vm_ep_in_heap_p_(ec, ep));
2469 rb_gc_mark(cfp->self);
2470 rb_gc_mark((VALUE)cfp->iseq);
2471 rb_gc_mark((VALUE)cfp->block_code);
2472
2473 if (!VM_ENV_LOCAL_P(ep)) {
2474 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
2475 if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) {
2476 rb_gc_mark(prev_ep[VM_ENV_DATA_INDEX_ENV]);
2477 }
2478 }
2479
2480 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2481 }
2482 }
2483
2484 /* mark machine stack */
2485 if (ec->machine.stack_start && ec->machine.stack_end &&
2486 ec != GET_EC() /* marked for current ec at the first stage of marking */
2487 ) {
2488 rb_gc_mark_machine_stack(ec);
2489 rb_gc_mark_locations((VALUE *)&ec->machine.regs,
2490 (VALUE *)(&ec->machine.regs) +
2491 sizeof(ec->machine.regs) / (sizeof(VALUE)));
2492 }
2493
2494 RUBY_MARK_UNLESS_NULL(ec->errinfo);
2495 RUBY_MARK_UNLESS_NULL(ec->root_svar);
2496 rb_mark_tbl(ec->local_storage);
2497 RUBY_MARK_UNLESS_NULL(ec->local_storage_recursive_hash);
2498 RUBY_MARK_UNLESS_NULL(ec->local_storage_recursive_hash_for_trace);
2499 RUBY_MARK_UNLESS_NULL(ec->private_const_reference);
2500 }
2501
2502 void rb_fiber_mark_self(rb_fiber_t *fib);
2503 void rb_threadptr_root_fiber_setup(rb_thread_t *th);
2504 void rb_threadptr_root_fiber_release(rb_thread_t *th);
2505
2506 static void
thread_mark(void * ptr)2507 thread_mark(void *ptr)
2508 {
2509 rb_thread_t *th = ptr;
2510 RUBY_MARK_ENTER("thread");
2511 rb_fiber_mark_self(th->ec->fiber_ptr);
2512
2513 /* mark ruby objects */
2514 switch (th->invoke_type) {
2515 case thread_invoke_type_proc:
2516 RUBY_MARK_UNLESS_NULL(th->invoke_arg.proc.proc);
2517 RUBY_MARK_UNLESS_NULL(th->invoke_arg.proc.args);
2518 break;
2519 case thread_invoke_type_func:
2520 rb_gc_mark_maybe((VALUE)th->invoke_arg.func.arg);
2521 break;
2522 default:
2523 break;
2524 }
2525
2526 RUBY_MARK_UNLESS_NULL(th->thgroup);
2527 RUBY_MARK_UNLESS_NULL(th->value);
2528 RUBY_MARK_UNLESS_NULL(th->pending_interrupt_queue);
2529 RUBY_MARK_UNLESS_NULL(th->pending_interrupt_mask_stack);
2530 RUBY_MARK_UNLESS_NULL(th->top_self);
2531 RUBY_MARK_UNLESS_NULL(th->top_wrapper);
2532 if (th->root_fiber) rb_fiber_mark_self(th->root_fiber);
2533 RUBY_MARK_UNLESS_NULL(th->stat_insn_usage);
2534 RUBY_MARK_UNLESS_NULL(th->last_status);
2535 RUBY_MARK_UNLESS_NULL(th->locking_mutex);
2536 RUBY_MARK_UNLESS_NULL(th->name);
2537
2538 RUBY_MARK_LEAVE("thread");
2539 }
2540
2541 static void
thread_free(void * ptr)2542 thread_free(void *ptr)
2543 {
2544 rb_thread_t *th = ptr;
2545 RUBY_FREE_ENTER("thread");
2546
2547 if (th->locking_mutex != Qfalse) {
2548 rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex);
2549 }
2550 if (th->keeping_mutexes != NULL) {
2551 rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes);
2552 }
2553
2554 rb_threadptr_root_fiber_release(th);
2555
2556 if (th->vm && th->vm->main_thread == th) {
2557 RUBY_GC_INFO("main thread\n");
2558 }
2559 else {
2560 ruby_xfree(ptr);
2561 }
2562
2563 RUBY_FREE_LEAVE("thread");
2564 }
2565
2566 static size_t
thread_memsize(const void * ptr)2567 thread_memsize(const void *ptr)
2568 {
2569 const rb_thread_t *th = ptr;
2570 size_t size = sizeof(rb_thread_t);
2571
2572 if (!th->root_fiber) {
2573 size += th->ec->vm_stack_size * sizeof(VALUE);
2574 }
2575 if (th->ec->local_storage) {
2576 size += st_memsize(th->ec->local_storage);
2577 }
2578 return size;
2579 }
2580
2581 #define thread_data_type ruby_threadptr_data_type
2582 const rb_data_type_t ruby_threadptr_data_type = {
2583 "VM/thread",
2584 {
2585 thread_mark,
2586 thread_free,
2587 thread_memsize,
2588 },
2589 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
2590 };
2591
2592 VALUE
rb_obj_is_thread(VALUE obj)2593 rb_obj_is_thread(VALUE obj)
2594 {
2595 if (rb_typeddata_is_kind_of(obj, &thread_data_type)) {
2596 return Qtrue;
2597 }
2598 else {
2599 return Qfalse;
2600 }
2601 }
2602
2603 static VALUE
thread_alloc(VALUE klass)2604 thread_alloc(VALUE klass)
2605 {
2606 VALUE obj;
2607 rb_thread_t *th;
2608 obj = TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th);
2609
2610 return obj;
2611 }
2612
2613 static void
th_init(rb_thread_t * th,VALUE self)2614 th_init(rb_thread_t *th, VALUE self)
2615 {
2616 th->self = self;
2617 rb_threadptr_root_fiber_setup(th);
2618
2619 {
2620 /* vm_stack_size is word number.
2621 * th->vm->default_params.thread_vm_stack_size is byte size. */
2622 size_t size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
2623 rb_ec_set_vm_stack(th->ec, rb_thread_recycle_stack(size), size);
2624 }
2625
2626 th->ec->cfp = (void *)(th->ec->vm_stack + th->ec->vm_stack_size);
2627
2628 vm_push_frame(th->ec, 0 /* dummy iseq */, VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH | VM_FRAME_FLAG_CFRAME /* dummy frame */,
2629 Qnil /* dummy self */, VM_BLOCK_HANDLER_NONE /* dummy block ptr */,
2630 0 /* dummy cref/me */,
2631 0 /* dummy pc */, th->ec->vm_stack, 0, 0);
2632
2633 th->status = THREAD_RUNNABLE;
2634 th->last_status = Qnil;
2635 th->ec->errinfo = Qnil;
2636 th->ec->root_svar = Qfalse;
2637 th->ec->local_storage_recursive_hash = Qnil;
2638 th->ec->local_storage_recursive_hash_for_trace = Qnil;
2639 #ifdef NON_SCALAR_THREAD_ID
2640 th->thread_id_string[0] = '\0';
2641 #endif
2642
2643 #if OPT_CALL_THREADED_CODE
2644 th->retval = Qundef;
2645 #endif
2646 th->name = Qnil;
2647 th->report_on_exception = th->vm->thread_report_on_exception;
2648 }
2649
2650 static VALUE
ruby_thread_init(VALUE self)2651 ruby_thread_init(VALUE self)
2652 {
2653 rb_thread_t *th = rb_thread_ptr(self);
2654 rb_vm_t *vm = GET_THREAD()->vm;
2655
2656 th->vm = vm;
2657 th_init(th, self);
2658 rb_ivar_set(self, rb_intern("locals"), rb_hash_new());
2659
2660 th->top_wrapper = 0;
2661 th->top_self = rb_vm_top_self();
2662 th->ec->root_svar = Qfalse;
2663 return self;
2664 }
2665
2666 VALUE
rb_thread_alloc(VALUE klass)2667 rb_thread_alloc(VALUE klass)
2668 {
2669 VALUE self = thread_alloc(klass);
2670 ruby_thread_init(self);
2671 return self;
2672 }
2673
2674 static void
vm_define_method(VALUE obj,ID id,VALUE iseqval,int is_singleton)2675 vm_define_method(VALUE obj, ID id, VALUE iseqval, int is_singleton)
2676 {
2677 VALUE klass;
2678 rb_method_visibility_t visi;
2679 rb_cref_t *cref = rb_vm_cref();
2680
2681 if (!is_singleton) {
2682 klass = CREF_CLASS(cref);
2683 visi = rb_scope_visibility_get();
2684 }
2685 else { /* singleton */
2686 klass = rb_singleton_class(obj); /* class and frozen checked in this API */
2687 visi = METHOD_VISI_PUBLIC;
2688 }
2689
2690 if (NIL_P(klass)) {
2691 rb_raise(rb_eTypeError, "no class/module to add method");
2692 }
2693
2694 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, visi);
2695
2696 if (!is_singleton && rb_scope_module_func_check()) {
2697 klass = rb_singleton_class(klass);
2698 rb_add_method_iseq(klass, id, (const rb_iseq_t *)iseqval, cref, METHOD_VISI_PUBLIC);
2699 }
2700 }
2701
2702 #define REWIND_CFP(expr) do { \
2703 rb_execution_context_t *ec__ = GET_EC(); \
2704 VALUE *const curr_sp = (ec__->cfp++)->sp; \
2705 VALUE *const saved_sp = ec__->cfp->sp; \
2706 ec__->cfp->sp = curr_sp; \
2707 expr; \
2708 (ec__->cfp--)->sp = saved_sp; \
2709 } while (0)
2710
2711 static VALUE
m_core_define_method(VALUE self,VALUE sym,VALUE iseqval)2712 m_core_define_method(VALUE self, VALUE sym, VALUE iseqval)
2713 {
2714 REWIND_CFP({
2715 vm_define_method(Qnil, SYM2ID(sym), iseqval, FALSE);
2716 });
2717 return sym;
2718 }
2719
2720 static VALUE
m_core_define_singleton_method(VALUE self,VALUE cbase,VALUE sym,VALUE iseqval)2721 m_core_define_singleton_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
2722 {
2723 REWIND_CFP({
2724 vm_define_method(cbase, SYM2ID(sym), iseqval, TRUE);
2725 });
2726 return sym;
2727 }
2728
2729 static VALUE
m_core_set_method_alias(VALUE self,VALUE cbase,VALUE sym1,VALUE sym2)2730 m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2)
2731 {
2732 REWIND_CFP({
2733 rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
2734 });
2735 return Qnil;
2736 }
2737
2738 static VALUE
m_core_set_variable_alias(VALUE self,VALUE sym1,VALUE sym2)2739 m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2)
2740 {
2741 REWIND_CFP({
2742 rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
2743 });
2744 return Qnil;
2745 }
2746
2747 static VALUE
m_core_undef_method(VALUE self,VALUE cbase,VALUE sym)2748 m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
2749 {
2750 REWIND_CFP({
2751 rb_undef(cbase, SYM2ID(sym));
2752 rb_clear_method_cache_by_class(self);
2753 });
2754 return Qnil;
2755 }
2756
2757 static VALUE
m_core_set_postexe(VALUE self)2758 m_core_set_postexe(VALUE self)
2759 {
2760 rb_set_end_proc(rb_call_end_proc, rb_block_proc());
2761 return Qnil;
2762 }
2763
2764 static VALUE core_hash_merge_kwd(VALUE hash, VALUE kw);
2765
2766 static VALUE
core_hash_merge(VALUE hash,long argc,const VALUE * argv)2767 core_hash_merge(VALUE hash, long argc, const VALUE *argv)
2768 {
2769 Check_Type(hash, T_HASH);
2770 VM_ASSERT(argc % 2 == 0);
2771 rb_hash_bulk_insert(argc, argv, hash);
2772 return hash;
2773 }
2774
2775 static VALUE
m_core_hash_merge_ptr(int argc,VALUE * argv,VALUE recv)2776 m_core_hash_merge_ptr(int argc, VALUE *argv, VALUE recv)
2777 {
2778 VALUE hash = argv[0];
2779
2780 REWIND_CFP(core_hash_merge(hash, argc-1, argv+1));
2781
2782 return hash;
2783 }
2784
2785 static void
kw_check_symbol(VALUE key)2786 kw_check_symbol(VALUE key)
2787 {
2788 if (!SYMBOL_P(key)) {
2789 rb_raise(rb_eTypeError, "hash key %+"PRIsVALUE" is not a Symbol",
2790 key);
2791 }
2792 }
2793 static int
kwmerge_i(VALUE key,VALUE value,VALUE hash)2794 kwmerge_i(VALUE key, VALUE value, VALUE hash)
2795 {
2796 kw_check_symbol(key);
2797 rb_hash_aset(hash, key, value);
2798 return ST_CONTINUE;
2799 }
2800
2801 static VALUE
m_core_hash_merge_kwd(VALUE recv,VALUE hash,VALUE kw)2802 m_core_hash_merge_kwd(VALUE recv, VALUE hash, VALUE kw)
2803 {
2804 REWIND_CFP(hash = core_hash_merge_kwd(hash, kw));
2805 return hash;
2806 }
2807
2808 static VALUE
core_hash_merge_kwd(VALUE hash,VALUE kw)2809 core_hash_merge_kwd(VALUE hash, VALUE kw)
2810 {
2811 rb_hash_foreach(rb_to_hash_type(kw), kwmerge_i, hash);
2812 return hash;
2813 }
2814
2815 /* Returns true if JIT is enabled */
2816 static VALUE
mjit_enabled_p(void)2817 mjit_enabled_p(void)
2818 {
2819 return mjit_enabled ? Qtrue : Qfalse;
2820 }
2821
2822 static VALUE
mjit_pause_m(int argc,VALUE * argv,RB_UNUSED_VAR (VALUE self))2823 mjit_pause_m(int argc, VALUE *argv, RB_UNUSED_VAR(VALUE self))
2824 {
2825 VALUE options = Qnil;
2826 VALUE wait = Qtrue;
2827 rb_scan_args(argc, argv, "0:", &options);
2828
2829 if (!NIL_P(options)) {
2830 static ID keyword_ids[1];
2831 if (!keyword_ids[0])
2832 keyword_ids[0] = rb_intern("wait");
2833 rb_get_kwargs(options, keyword_ids, 0, 1, &wait);
2834 }
2835
2836 return mjit_pause(RTEST(wait));
2837 }
2838
2839 extern VALUE *rb_gc_stack_start;
2840 extern size_t rb_gc_stack_maxsize;
2841 #ifdef __ia64
2842 extern VALUE *rb_gc_register_stack_start;
2843 #endif
2844
2845 /* debug functions */
2846
2847 /* :nodoc: */
2848 static VALUE
sdr(void)2849 sdr(void)
2850 {
2851 rb_vm_bugreport(NULL);
2852 return Qnil;
2853 }
2854
2855 /* :nodoc: */
2856 static VALUE
nsdr(void)2857 nsdr(void)
2858 {
2859 VALUE ary = rb_ary_new();
2860 #if HAVE_BACKTRACE
2861 #include <execinfo.h>
2862 #define MAX_NATIVE_TRACE 1024
2863 static void *trace[MAX_NATIVE_TRACE];
2864 int n = (int)backtrace(trace, MAX_NATIVE_TRACE);
2865 char **syms = backtrace_symbols(trace, n);
2866 int i;
2867
2868 if (syms == 0) {
2869 rb_memerror();
2870 }
2871
2872 for (i=0; i<n; i++) {
2873 rb_ary_push(ary, rb_str_new2(syms[i]));
2874 }
2875 free(syms); /* OK */
2876 #endif
2877 return ary;
2878 }
2879
2880 #if VM_COLLECT_USAGE_DETAILS
2881 static VALUE usage_analysis_insn_stop(VALUE self);
2882 static VALUE usage_analysis_operand_stop(VALUE self);
2883 static VALUE usage_analysis_register_stop(VALUE self);
2884 #endif
2885
2886 VALUE rb_resolve_feature_path(VALUE klass, VALUE fname);
2887
2888 void
Init_VM(void)2889 Init_VM(void)
2890 {
2891 VALUE opts;
2892 VALUE klass;
2893 VALUE fcore;
2894 VALUE mjit;
2895
2896 /*
2897 * Document-class: RubyVM
2898 *
2899 * The RubyVM module provides some access to Ruby internals.
2900 * This module is for very limited purposes, such as debugging,
2901 * prototyping, and research. Normal users must not use it.
2902 */
2903 rb_cRubyVM = rb_define_class("RubyVM", rb_cObject);
2904 rb_undef_alloc_func(rb_cRubyVM);
2905 rb_undef_method(CLASS_OF(rb_cRubyVM), "new");
2906 rb_define_singleton_method(rb_cRubyVM, "stat", vm_stat, -1);
2907
2908 /* FrozenCore (hidden) */
2909 fcore = rb_class_new(rb_cBasicObject);
2910 RBASIC(fcore)->flags = T_ICLASS;
2911 klass = rb_singleton_class(fcore);
2912 rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3);
2913 rb_define_method_id(klass, id_core_set_variable_alias, m_core_set_variable_alias, 2);
2914 rb_define_method_id(klass, id_core_undef_method, m_core_undef_method, 2);
2915 rb_define_method_id(klass, id_core_define_method, m_core_define_method, 2);
2916 rb_define_method_id(klass, id_core_define_singleton_method, m_core_define_singleton_method, 3);
2917 rb_define_method_id(klass, id_core_set_postexe, m_core_set_postexe, 0);
2918 rb_define_method_id(klass, id_core_hash_merge_ptr, m_core_hash_merge_ptr, -1);
2919 rb_define_method_id(klass, id_core_hash_merge_kwd, m_core_hash_merge_kwd, 2);
2920 rb_define_method_id(klass, idProc, rb_block_proc, 0);
2921 rb_define_method_id(klass, idLambda, rb_block_lambda, 0);
2922 rb_obj_freeze(fcore);
2923 RBASIC_CLEAR_CLASS(klass);
2924 rb_obj_freeze(klass);
2925 rb_gc_register_mark_object(fcore);
2926 rb_mRubyVMFrozenCore = fcore;
2927
2928 /* RubyVM::MJIT */
2929 mjit = rb_define_module_under(rb_cRubyVM, "MJIT");
2930 rb_define_singleton_method(mjit, "enabled?", mjit_enabled_p, 0);
2931 rb_define_singleton_method(mjit, "pause", mjit_pause_m, -1);
2932 rb_define_singleton_method(mjit, "resume", mjit_resume, 0);
2933
2934 /*
2935 * Document-class: Thread
2936 *
2937 * Threads are the Ruby implementation for a concurrent programming model.
2938 *
2939 * Programs that require multiple threads of execution are a perfect
2940 * candidate for Ruby's Thread class.
2941 *
2942 * For example, we can create a new thread separate from the main thread's
2943 * execution using ::new.
2944 *
2945 * thr = Thread.new { puts "Whats the big deal" }
2946 *
2947 * Then we are able to pause the execution of the main thread and allow
2948 * our new thread to finish, using #join:
2949 *
2950 * thr.join #=> "Whats the big deal"
2951 *
2952 * If we don't call +thr.join+ before the main thread terminates, then all
2953 * other threads including +thr+ will be killed.
2954 *
2955 * Alternatively, you can use an array for handling multiple threads at
2956 * once, like in the following example:
2957 *
2958 * threads = []
2959 * threads << Thread.new { puts "Whats the big deal" }
2960 * threads << Thread.new { 3.times { puts "Threads are fun!" } }
2961 *
2962 * After creating a few threads we wait for them all to finish
2963 * consecutively.
2964 *
2965 * threads.each { |thr| thr.join }
2966 *
2967 * === Thread initialization
2968 *
2969 * In order to create new threads, Ruby provides ::new, ::start, and
2970 * ::fork. A block must be provided with each of these methods, otherwise
2971 * a ThreadError will be raised.
2972 *
2973 * When subclassing the Thread class, the +initialize+ method of your
2974 * subclass will be ignored by ::start and ::fork. Otherwise, be sure to
2975 * call super in your +initialize+ method.
2976 *
2977 * === Thread termination
2978 *
2979 * For terminating threads, Ruby provides a variety of ways to do this.
2980 *
2981 * The class method ::kill, is meant to exit a given thread:
2982 *
2983 * thr = Thread.new { ... }
2984 * Thread.kill(thr) # sends exit() to thr
2985 *
2986 * Alternatively, you can use the instance method #exit, or any of its
2987 * aliases #kill or #terminate.
2988 *
2989 * thr.exit
2990 *
2991 * === Thread status
2992 *
2993 * Ruby provides a few instance methods for querying the state of a given
2994 * thread. To get a string with the current thread's state use #status
2995 *
2996 * thr = Thread.new { sleep }
2997 * thr.status # => "sleep"
2998 * thr.exit
2999 * thr.status # => false
3000 *
3001 * You can also use #alive? to tell if the thread is running or sleeping,
3002 * and #stop? if the thread is dead or sleeping.
3003 *
3004 * === Thread variables and scope
3005 *
3006 * Since threads are created with blocks, the same rules apply to other
3007 * Ruby blocks for variable scope. Any local variables created within this
3008 * block are accessible to only this thread.
3009 *
3010 * ==== Fiber-local vs. Thread-local
3011 *
3012 * Each fiber has its own bucket for Thread#[] storage. When you set a
3013 * new fiber-local it is only accessible within this Fiber. To illustrate:
3014 *
3015 * Thread.new {
3016 * Thread.current[:foo] = "bar"
3017 * Fiber.new {
3018 * p Thread.current[:foo] # => nil
3019 * }.resume
3020 * }.join
3021 *
3022 * This example uses #[] for getting and #[]= for setting fiber-locals,
3023 * you can also use #keys to list the fiber-locals for a given
3024 * thread and #key? to check if a fiber-local exists.
3025 *
3026 * When it comes to thread-locals, they are accessible within the entire
3027 * scope of the thread. Given the following example:
3028 *
3029 * Thread.new{
3030 * Thread.current.thread_variable_set(:foo, 1)
3031 * p Thread.current.thread_variable_get(:foo) # => 1
3032 * Fiber.new{
3033 * Thread.current.thread_variable_set(:foo, 2)
3034 * p Thread.current.thread_variable_get(:foo) # => 2
3035 * }.resume
3036 * p Thread.current.thread_variable_get(:foo) # => 2
3037 * }.join
3038 *
3039 * You can see that the thread-local +:foo+ carried over into the fiber
3040 * and was changed to +2+ by the end of the thread.
3041 *
3042 * This example makes use of #thread_variable_set to create new
3043 * thread-locals, and #thread_variable_get to reference them.
3044 *
3045 * There is also #thread_variables to list all thread-locals, and
3046 * #thread_variable? to check if a given thread-local exists.
3047 *
3048 * === Exception handling
3049 *
3050 * Any thread can raise an exception using the #raise instance method,
3051 * which operates similarly to Kernel#raise.
3052 *
3053 * However, it's important to note that an exception that occurs in any
3054 * thread except the main thread depends on #abort_on_exception. This
3055 * option is +false+ by default, meaning that any unhandled exception will
3056 * cause the thread to terminate silently when waited on by either #join
3057 * or #value. You can change this default by either #abort_on_exception=
3058 * +true+ or setting $DEBUG to +true+.
3059 *
3060 * With the addition of the class method ::handle_interrupt, you can now
3061 * handle exceptions asynchronously with threads.
3062 *
3063 * === Scheduling
3064 *
3065 * Ruby provides a few ways to support scheduling threads in your program.
3066 *
3067 * The first way is by using the class method ::stop, to put the current
3068 * running thread to sleep and schedule the execution of another thread.
3069 *
3070 * Once a thread is asleep, you can use the instance method #wakeup to
3071 * mark your thread as eligible for scheduling.
3072 *
3073 * You can also try ::pass, which attempts to pass execution to another
3074 * thread but is dependent on the OS whether a running thread will switch
3075 * or not. The same goes for #priority, which lets you hint to the thread
3076 * scheduler which threads you want to take precedence when passing
3077 * execution. This method is also dependent on the OS and may be ignored
3078 * on some platforms.
3079 *
3080 */
3081 rb_cThread = rb_define_class("Thread", rb_cObject);
3082 rb_undef_alloc_func(rb_cThread);
3083
3084 #if VM_COLLECT_USAGE_DETAILS
3085 /* ::RubyVM::USAGE_ANALYSIS_* */
3086 #define define_usage_analysis_hash(name) /* shut up rdoc -C */ \
3087 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_" #name, rb_hash_new())
3088 define_usage_analysis_hash(INSN);
3089 define_usage_analysis_hash(REGS);
3090 define_usage_analysis_hash(INSN_BIGRAM);
3091
3092 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_STOP", usage_analysis_insn_stop, 0);
3093 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_STOP", usage_analysis_operand_stop, 0);
3094 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_STOP", usage_analysis_register_stop, 0);
3095 #endif
3096
3097 /* ::RubyVM::OPTS, which shows vm build options */
3098 rb_define_const(rb_cRubyVM, "OPTS", opts = rb_ary_new());
3099
3100 #if OPT_DIRECT_THREADED_CODE
3101 rb_ary_push(opts, rb_str_new2("direct threaded code"));
3102 #elif OPT_TOKEN_THREADED_CODE
3103 rb_ary_push(opts, rb_str_new2("token threaded code"));
3104 #elif OPT_CALL_THREADED_CODE
3105 rb_ary_push(opts, rb_str_new2("call threaded code"));
3106 #endif
3107
3108 #if OPT_STACK_CACHING
3109 rb_ary_push(opts, rb_str_new2("stack caching"));
3110 #endif
3111 #if OPT_OPERANDS_UNIFICATION
3112 rb_ary_push(opts, rb_str_new2("operands unification"));
3113 #endif
3114 #if OPT_INSTRUCTIONS_UNIFICATION
3115 rb_ary_push(opts, rb_str_new2("instructions unification"));
3116 #endif
3117 #if OPT_INLINE_METHOD_CACHE
3118 rb_ary_push(opts, rb_str_new2("inline method cache"));
3119 #endif
3120 #if OPT_BLOCKINLINING
3121 rb_ary_push(opts, rb_str_new2("block inlining"));
3122 #endif
3123
3124 /* ::RubyVM::INSTRUCTION_NAMES */
3125 rb_define_const(rb_cRubyVM, "INSTRUCTION_NAMES", rb_insns_name_array());
3126
3127 /* ::RubyVM::DEFAULT_PARAMS
3128 * This constant variable shows VM's default parameters.
3129 * Note that changing these values does not affect VM execution.
3130 * Specification is not stable and you should not depend on this value.
3131 * Of course, this constant is MRI specific.
3132 */
3133 rb_define_const(rb_cRubyVM, "DEFAULT_PARAMS", vm_default_params());
3134
3135 /* debug functions ::RubyVM::SDR(), ::RubyVM::NSDR() */
3136 #if VMDEBUG
3137 rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0);
3138 rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0);
3139 #else
3140 (void)sdr;
3141 (void)nsdr;
3142 #endif
3143
3144 /* VM bootstrap: phase 2 */
3145 {
3146 rb_vm_t *vm = ruby_current_vm_ptr;
3147 rb_thread_t *th = GET_THREAD();
3148 VALUE filename = rb_fstring_lit("<main>");
3149 const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
3150 volatile VALUE th_self;
3151
3152 /* create vm object */
3153 vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
3154
3155 /* create main thread */
3156 th_self = th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
3157 rb_iv_set(th_self, "locals", rb_hash_new());
3158 vm->main_thread = th;
3159 vm->running_thread = th;
3160 th->vm = vm;
3161 th->top_wrapper = 0;
3162 th->top_self = rb_vm_top_self();
3163 rb_thread_set_current(th);
3164
3165 rb_vm_living_threads_insert(vm, th);
3166
3167 rb_gc_register_mark_object((VALUE)iseq);
3168 th->ec->cfp->iseq = iseq;
3169 th->ec->cfp->pc = iseq->body->iseq_encoded;
3170 th->ec->cfp->self = th->top_self;
3171
3172 VM_ENV_FLAGS_UNSET(th->ec->cfp->ep, VM_FRAME_FLAG_CFRAME);
3173 VM_STACK_ENV_WRITE(th->ec->cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE));
3174
3175 /*
3176 * The Binding of the top level scope
3177 */
3178 rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
3179 }
3180 vm_init_redefined_flag();
3181
3182 rb_block_param_proxy = rb_obj_alloc(rb_cObject);
3183 rb_add_method(rb_singleton_class(rb_block_param_proxy), idCall, VM_METHOD_TYPE_OPTIMIZED,
3184 (void *)OPTIMIZED_METHOD_TYPE_BLOCK_CALL, METHOD_VISI_PUBLIC);
3185 rb_obj_freeze(rb_block_param_proxy);
3186 rb_gc_register_mark_object(rb_block_param_proxy);
3187
3188 /* vm_backtrace.c */
3189 Init_vm_backtrace();
3190
3191 rb_define_singleton_method(rb_cRubyVM, "resolve_feature_path", rb_resolve_feature_path, 1);
3192 }
3193
3194 void
rb_vm_set_progname(VALUE filename)3195 rb_vm_set_progname(VALUE filename)
3196 {
3197 rb_thread_t *th = GET_VM()->main_thread;
3198 rb_control_frame_t *cfp = (void *)(th->ec->vm_stack + th->ec->vm_stack_size);
3199 --cfp;
3200
3201 rb_iseq_pathobj_set(cfp->iseq, rb_str_dup(filename), rb_iseq_realpath(cfp->iseq));
3202 }
3203
3204 extern const struct st_hash_type rb_fstring_hash_type;
3205
3206 void
Init_BareVM(void)3207 Init_BareVM(void)
3208 {
3209 /* VM bootstrap: phase 1 */
3210 rb_vm_t * vm = ruby_mimmalloc(sizeof(*vm));
3211 rb_thread_t * th = ruby_mimmalloc(sizeof(*th));
3212 if (!vm || !th) {
3213 fprintf(stderr, "[FATAL] failed to allocate memory\n");
3214 exit(EXIT_FAILURE);
3215 }
3216 MEMZERO(th, rb_thread_t, 1);
3217 vm_init2(vm);
3218
3219 vm->objspace = rb_objspace_alloc();
3220 ruby_current_vm_ptr = vm;
3221
3222 Init_native_thread(th);
3223 th->vm = vm;
3224 th_init(th, 0);
3225 rb_thread_set_current_raw(th);
3226 ruby_thread_init_stack(th);
3227 }
3228
3229 void
Init_vm_objects(void)3230 Init_vm_objects(void)
3231 {
3232 rb_vm_t *vm = GET_VM();
3233
3234 vm->defined_module_hash = rb_hash_new();
3235
3236 /* initialize mark object array, hash */
3237 vm->mark_object_ary = rb_ary_tmp_new(128);
3238 vm->loading_table = st_init_strtable();
3239 vm->frozen_strings = st_init_table_with_size(&rb_fstring_hash_type, 1000);
3240 }
3241
3242 /* top self */
3243
3244 static VALUE
main_to_s(VALUE obj)3245 main_to_s(VALUE obj)
3246 {
3247 return rb_str_new2("main");
3248 }
3249
3250 VALUE
rb_vm_top_self(void)3251 rb_vm_top_self(void)
3252 {
3253 return GET_VM()->top_self;
3254 }
3255
3256 void
Init_top_self(void)3257 Init_top_self(void)
3258 {
3259 rb_vm_t *vm = GET_VM();
3260
3261 vm->top_self = rb_obj_alloc(rb_cObject);
3262 rb_define_singleton_method(rb_vm_top_self(), "to_s", main_to_s, 0);
3263 rb_define_alias(rb_singleton_class(rb_vm_top_self()), "inspect", "to_s");
3264 }
3265
3266 static VALUE *
ruby_vm_verbose_ptr(rb_vm_t * vm)3267 ruby_vm_verbose_ptr(rb_vm_t *vm)
3268 {
3269 return &vm->verbose;
3270 }
3271
3272 static VALUE *
ruby_vm_debug_ptr(rb_vm_t * vm)3273 ruby_vm_debug_ptr(rb_vm_t *vm)
3274 {
3275 return &vm->debug;
3276 }
3277
3278 VALUE *
rb_ruby_verbose_ptr(void)3279 rb_ruby_verbose_ptr(void)
3280 {
3281 return ruby_vm_verbose_ptr(GET_VM());
3282 }
3283
3284 VALUE *
rb_ruby_debug_ptr(void)3285 rb_ruby_debug_ptr(void)
3286 {
3287 return ruby_vm_debug_ptr(GET_VM());
3288 }
3289
3290 /* iseq.c */
3291 VALUE rb_insn_operand_intern(const rb_iseq_t *iseq,
3292 VALUE insn, int op_no, VALUE op,
3293 int len, size_t pos, VALUE *pnop, VALUE child);
3294
3295 st_table *
rb_vm_fstring_table(void)3296 rb_vm_fstring_table(void)
3297 {
3298 return GET_VM()->frozen_strings;
3299 }
3300
3301 #if VM_COLLECT_USAGE_DETAILS
3302
3303 #define HASH_ASET(h, k, v) rb_hash_aset((h), (st_data_t)(k), (st_data_t)(v))
3304
3305 /* uh = {
3306 * insn(Fixnum) => ihash(Hash)
3307 * }
3308 * ihash = {
3309 * -1(Fixnum) => count, # insn usage
3310 * 0(Fixnum) => ophash, # operand usage
3311 * }
3312 * ophash = {
3313 * val(interned string) => count(Fixnum)
3314 * }
3315 */
3316 static void
vm_analysis_insn(int insn)3317 vm_analysis_insn(int insn)
3318 {
3319 ID usage_hash;
3320 ID bigram_hash;
3321 static int prev_insn = -1;
3322
3323 VALUE uh;
3324 VALUE ihash;
3325 VALUE cv;
3326
3327 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
3328 CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
3329 uh = rb_const_get(rb_cRubyVM, usage_hash);
3330 if ((ihash = rb_hash_aref(uh, INT2FIX(insn))) == Qnil) {
3331 ihash = rb_hash_new();
3332 HASH_ASET(uh, INT2FIX(insn), ihash);
3333 }
3334 if ((cv = rb_hash_aref(ihash, INT2FIX(-1))) == Qnil) {
3335 cv = INT2FIX(0);
3336 }
3337 HASH_ASET(ihash, INT2FIX(-1), INT2FIX(FIX2INT(cv) + 1));
3338
3339 /* calc bigram */
3340 if (prev_insn != -1) {
3341 VALUE bi;
3342 VALUE ary[2];
3343 VALUE cv;
3344
3345 ary[0] = INT2FIX(prev_insn);
3346 ary[1] = INT2FIX(insn);
3347 bi = rb_ary_new4(2, &ary[0]);
3348
3349 uh = rb_const_get(rb_cRubyVM, bigram_hash);
3350 if ((cv = rb_hash_aref(uh, bi)) == Qnil) {
3351 cv = INT2FIX(0);
3352 }
3353 HASH_ASET(uh, bi, INT2FIX(FIX2INT(cv) + 1));
3354 }
3355 prev_insn = insn;
3356 }
3357
3358 static void
vm_analysis_operand(int insn,int n,VALUE op)3359 vm_analysis_operand(int insn, int n, VALUE op)
3360 {
3361 ID usage_hash;
3362
3363 VALUE uh;
3364 VALUE ihash;
3365 VALUE ophash;
3366 VALUE valstr;
3367 VALUE cv;
3368
3369 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
3370
3371 uh = rb_const_get(rb_cRubyVM, usage_hash);
3372 if ((ihash = rb_hash_aref(uh, INT2FIX(insn))) == Qnil) {
3373 ihash = rb_hash_new();
3374 HASH_ASET(uh, INT2FIX(insn), ihash);
3375 }
3376 if ((ophash = rb_hash_aref(ihash, INT2FIX(n))) == Qnil) {
3377 ophash = rb_hash_new();
3378 HASH_ASET(ihash, INT2FIX(n), ophash);
3379 }
3380 /* intern */
3381 valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
3382
3383 /* set count */
3384 if ((cv = rb_hash_aref(ophash, valstr)) == Qnil) {
3385 cv = INT2FIX(0);
3386 }
3387 HASH_ASET(ophash, valstr, INT2FIX(FIX2INT(cv) + 1));
3388 }
3389
3390 static void
vm_analysis_register(int reg,int isset)3391 vm_analysis_register(int reg, int isset)
3392 {
3393 ID usage_hash;
3394 VALUE uh;
3395 VALUE valstr;
3396 static const char regstrs[][5] = {
3397 "pc", /* 0 */
3398 "sp", /* 1 */
3399 "ep", /* 2 */
3400 "cfp", /* 3 */
3401 "self", /* 4 */
3402 "iseq", /* 5 */
3403 };
3404 static const char getsetstr[][4] = {
3405 "get",
3406 "set",
3407 };
3408 static VALUE syms[sizeof(regstrs) / sizeof(regstrs[0])][2];
3409
3410 VALUE cv;
3411
3412 CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
3413 if (syms[0] == 0) {
3414 char buff[0x10];
3415 int i;
3416
3417 for (i = 0; i < (int)(sizeof(regstrs) / sizeof(regstrs[0])); i++) {
3418 int j;
3419 for (j = 0; j < 2; j++) {
3420 snprintf(buff, 0x10, "%d %s %-4s", i, getsetstr[j], regstrs[i]);
3421 syms[i][j] = ID2SYM(rb_intern(buff));
3422 }
3423 }
3424 }
3425 valstr = syms[reg][isset];
3426
3427 uh = rb_const_get(rb_cRubyVM, usage_hash);
3428 if ((cv = rb_hash_aref(uh, valstr)) == Qnil) {
3429 cv = INT2FIX(0);
3430 }
3431 HASH_ASET(uh, valstr, INT2FIX(FIX2INT(cv) + 1));
3432 }
3433
3434 #undef HASH_ASET
3435
3436 static void (*ruby_vm_collect_usage_func_insn)(int insn) = vm_analysis_insn;
3437 static void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op) = vm_analysis_operand;
3438 static void (*ruby_vm_collect_usage_func_register)(int reg, int isset) = vm_analysis_register;
3439
3440 /* :nodoc: */
3441 static VALUE
usage_analysis_insn_stop(VALUE self)3442 usage_analysis_insn_stop(VALUE self)
3443 {
3444 ruby_vm_collect_usage_func_insn = 0;
3445 return Qnil;
3446 }
3447
3448 /* :nodoc: */
3449 static VALUE
usage_analysis_operand_stop(VALUE self)3450 usage_analysis_operand_stop(VALUE self)
3451 {
3452 ruby_vm_collect_usage_func_operand = 0;
3453 return Qnil;
3454 }
3455
3456 /* :nodoc: */
3457 static VALUE
usage_analysis_register_stop(VALUE self)3458 usage_analysis_register_stop(VALUE self)
3459 {
3460 ruby_vm_collect_usage_func_register = 0;
3461 return Qnil;
3462 }
3463
3464 #else
3465
3466 MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_insn)(int insn)) = NULL;
3467 MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op)) = NULL;
3468 MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_register)(int reg, int isset)) = NULL;
3469
3470 #endif
3471
3472 #if VM_COLLECT_USAGE_DETAILS
3473 /* @param insn instruction number */
3474 static void
vm_collect_usage_insn(int insn)3475 vm_collect_usage_insn(int insn)
3476 {
3477 if (RUBY_DTRACE_INSN_ENABLED()) {
3478 RUBY_DTRACE_INSN(rb_insns_name(insn));
3479 }
3480 if (ruby_vm_collect_usage_func_insn)
3481 (*ruby_vm_collect_usage_func_insn)(insn);
3482 }
3483
3484 /* @param insn instruction number
3485 * @param n n-th operand
3486 * @param op operand value
3487 */
3488 static void
vm_collect_usage_operand(int insn,int n,VALUE op)3489 vm_collect_usage_operand(int insn, int n, VALUE op)
3490 {
3491 if (RUBY_DTRACE_INSN_OPERAND_ENABLED()) {
3492 VALUE valstr;
3493
3494 valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
3495
3496 RUBY_DTRACE_INSN_OPERAND(RSTRING_PTR(valstr), rb_insns_name(insn));
3497 RB_GC_GUARD(valstr);
3498 }
3499 if (ruby_vm_collect_usage_func_operand)
3500 (*ruby_vm_collect_usage_func_operand)(insn, n, op);
3501 }
3502
3503 /* @param reg register id. see code of vm_analysis_register() */
3504 /* @param isset 0: read, 1: write */
3505 static void
vm_collect_usage_register(int reg,int isset)3506 vm_collect_usage_register(int reg, int isset)
3507 {
3508 if (ruby_vm_collect_usage_func_register)
3509 (*ruby_vm_collect_usage_func_register)(reg, isset);
3510 }
3511 #endif
3512
3513 #endif /* #ifndef MJIT_HEADER */
3514
3515 #include "vm_call_iseq_optimized.inc" /* required from vm_insnhelper.c */
3516