1 /**********************************************************************
2
3 vm_trace.c -
4
5 $Author: ko1 $
6 created at: Tue Aug 14 19:37:09 2012
7
8 Copyright (C) 1993-2012 Yukihiro Matsumoto
9
10 **********************************************************************/
11
12 /*
13 * This file include two parts:
14 *
15 * (1) set_trace_func internal mechanisms
16 * and C level API
17 *
18 * (2) Ruby level API
19 * (2-1) set_trace_func API
20 * (2-2) TracePoint API (not yet)
21 *
22 */
23
24 #include "internal.h"
25 #include "ruby/debug.h"
26
27 #include "vm_core.h"
28 #include "mjit.h"
29 #include "iseq.h"
30 #include "eval_intern.h"
31
32 /* (1) trace mechanisms */
33
34 typedef struct rb_event_hook_struct {
35 rb_event_hook_flag_t hook_flags;
36 rb_event_flag_t events;
37 rb_event_hook_func_t func;
38 VALUE data;
39 struct rb_event_hook_struct *next;
40
41 struct {
42 rb_thread_t *th;
43 unsigned int target_line;
44 } filter;
45 } rb_event_hook_t;
46
47 typedef void (*rb_event_hook_raw_arg_func_t)(VALUE data, const rb_trace_arg_t *arg);
48
49 #define MAX_EVENT_NUM 32
50
51 void
rb_hook_list_mark(rb_hook_list_t * hooks)52 rb_hook_list_mark(rb_hook_list_t *hooks)
53 {
54 rb_event_hook_t *hook = hooks->hooks;
55
56 while (hook) {
57 rb_gc_mark(hook->data);
58 hook = hook->next;
59 }
60 }
61
62 static void clean_hooks(const rb_execution_context_t *ec, rb_hook_list_t *list);
63
64 void
rb_hook_list_free(rb_hook_list_t * hooks)65 rb_hook_list_free(rb_hook_list_t *hooks)
66 {
67 clean_hooks(GET_EC(), hooks);
68 }
69
70 /* ruby_vm_event_flags management */
71
72 static void
update_global_event_hook(rb_event_flag_t vm_events)73 update_global_event_hook(rb_event_flag_t vm_events)
74 {
75 rb_event_flag_t new_iseq_events = vm_events & ISEQ_TRACE_EVENTS;
76 rb_event_flag_t enabled_iseq_events = ruby_vm_event_enabled_global_flags & ISEQ_TRACE_EVENTS;
77
78 if (new_iseq_events & ~enabled_iseq_events) {
79 /* Stop calling all JIT-ed code. Compiling trace insns is not supported for now. */
80 #if USE_MJIT
81 mjit_call_p = FALSE;
82 #endif
83
84 /* write all ISeqs iff new events are added */
85 rb_iseq_trace_set_all(new_iseq_events | enabled_iseq_events);
86 }
87
88 ruby_vm_event_flags = vm_events;
89 ruby_vm_event_enabled_global_flags |= vm_events;
90 rb_objspace_set_event_hook(vm_events);
91 }
92
93 /* add/remove hooks */
94
95 static rb_event_hook_t *
alloc_event_hook(rb_event_hook_func_t func,rb_event_flag_t events,VALUE data,rb_event_hook_flag_t hook_flags)96 alloc_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
97 {
98 rb_event_hook_t *hook;
99
100 if ((events & RUBY_INTERNAL_EVENT_MASK) && (events & ~RUBY_INTERNAL_EVENT_MASK)) {
101 rb_raise(rb_eTypeError, "Can not specify normal event and internal event simultaneously.");
102 }
103
104 hook = ALLOC(rb_event_hook_t);
105 hook->hook_flags = hook_flags;
106 hook->events = events;
107 hook->func = func;
108 hook->data = data;
109
110 /* no filters */
111 hook->filter.th = NULL;
112 hook->filter.target_line = 0;
113
114 return hook;
115 }
116
117 static void
hook_list_connect(VALUE list_owner,rb_hook_list_t * list,rb_event_hook_t * hook,int global_p)118 hook_list_connect(VALUE list_owner, rb_hook_list_t *list, rb_event_hook_t *hook, int global_p)
119 {
120 hook->next = list->hooks;
121 list->hooks = hook;
122 list->events |= hook->events;
123
124 if (global_p) {
125 /* global hooks are root objects at GC mark. */
126 update_global_event_hook(list->events);
127 }
128 else {
129 RB_OBJ_WRITTEN(list_owner, Qundef, hook->data);
130 }
131 }
132
133 static void
connect_event_hook(const rb_execution_context_t * ec,rb_event_hook_t * hook)134 connect_event_hook(const rb_execution_context_t *ec, rb_event_hook_t *hook)
135 {
136 rb_hook_list_t *list = rb_vm_global_hooks(ec);
137 hook_list_connect(Qundef, list, hook, TRUE);
138 }
139
140 static void
rb_threadptr_add_event_hook(const rb_execution_context_t * ec,rb_thread_t * th,rb_event_hook_func_t func,rb_event_flag_t events,VALUE data,rb_event_hook_flag_t hook_flags)141 rb_threadptr_add_event_hook(const rb_execution_context_t *ec, rb_thread_t *th,
142 rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
143 {
144 rb_event_hook_t *hook = alloc_event_hook(func, events, data, hook_flags);
145 hook->filter.th = th;
146 connect_event_hook(ec, hook);
147 }
148
149 void
rb_thread_add_event_hook(VALUE thval,rb_event_hook_func_t func,rb_event_flag_t events,VALUE data)150 rb_thread_add_event_hook(VALUE thval, rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
151 {
152 rb_threadptr_add_event_hook(GET_EC(), rb_thread_ptr(thval), func, events, data, RUBY_EVENT_HOOK_FLAG_SAFE);
153 }
154
155 void
rb_add_event_hook(rb_event_hook_func_t func,rb_event_flag_t events,VALUE data)156 rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
157 {
158 rb_event_hook_t *hook = alloc_event_hook(func, events, data, RUBY_EVENT_HOOK_FLAG_SAFE);
159 connect_event_hook(GET_EC(), hook);
160 }
161
162 void
rb_thread_add_event_hook2(VALUE thval,rb_event_hook_func_t func,rb_event_flag_t events,VALUE data,rb_event_hook_flag_t hook_flags)163 rb_thread_add_event_hook2(VALUE thval, rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
164 {
165 rb_threadptr_add_event_hook(GET_EC(), rb_thread_ptr(thval), func, events, data, hook_flags);
166 }
167
168 void
rb_add_event_hook2(rb_event_hook_func_t func,rb_event_flag_t events,VALUE data,rb_event_hook_flag_t hook_flags)169 rb_add_event_hook2(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
170 {
171 rb_event_hook_t *hook = alloc_event_hook(func, events, data, hook_flags);
172 connect_event_hook(GET_EC(), hook);
173 }
174
175 static void
clean_hooks(const rb_execution_context_t * ec,rb_hook_list_t * list)176 clean_hooks(const rb_execution_context_t *ec, rb_hook_list_t *list)
177 {
178 rb_event_hook_t *hook, **nextp = &list->hooks;
179 VM_ASSERT(list->need_clean == TRUE);
180
181 list->events = 0;
182 list->need_clean = FALSE;
183
184 while ((hook = *nextp) != 0) {
185 if (hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) {
186 *nextp = hook->next;
187 xfree(hook);
188 }
189 else {
190 list->events |= hook->events; /* update active events */
191 nextp = &hook->next;
192 }
193 }
194
195 if (list == rb_vm_global_hooks(ec)) {
196 /* global events */
197 update_global_event_hook(list->events);
198 }
199 else {
200 /* local events */
201 }
202 }
203
204 static void
clean_hooks_check(const rb_execution_context_t * ec,rb_hook_list_t * list)205 clean_hooks_check(const rb_execution_context_t *ec, rb_hook_list_t *list)
206 {
207 if (UNLIKELY(list->need_clean != FALSE)) {
208 if (list->running == 0) {
209 clean_hooks(ec, list);
210 }
211 }
212 }
213
214 #define MATCH_ANY_FILTER_TH ((rb_thread_t *)1)
215
216 /* if func is 0, then clear all funcs */
217 static int
remove_event_hook(const rb_execution_context_t * ec,const rb_thread_t * filter_th,rb_event_hook_func_t func,VALUE data)218 remove_event_hook(const rb_execution_context_t *ec, const rb_thread_t *filter_th, rb_event_hook_func_t func, VALUE data)
219 {
220 rb_vm_t *vm = rb_ec_vm_ptr(ec);
221 rb_hook_list_t *list = &vm->global_hooks;
222 int ret = 0;
223 rb_event_hook_t *hook = list->hooks;
224
225 while (hook) {
226 if (func == 0 || hook->func == func) {
227 if (hook->filter.th == filter_th || filter_th == MATCH_ANY_FILTER_TH) {
228 if (data == Qundef || hook->data == data) {
229 hook->hook_flags |= RUBY_EVENT_HOOK_FLAG_DELETED;
230 ret+=1;
231 list->need_clean = TRUE;
232 }
233 }
234 }
235 hook = hook->next;
236 }
237
238 clean_hooks_check(ec, list);
239 return ret;
240 }
241
242 static int
rb_threadptr_remove_event_hook(const rb_execution_context_t * ec,const rb_thread_t * filter_th,rb_event_hook_func_t func,VALUE data)243 rb_threadptr_remove_event_hook(const rb_execution_context_t *ec, const rb_thread_t *filter_th, rb_event_hook_func_t func, VALUE data)
244 {
245 return remove_event_hook(ec, filter_th, func, data);
246 }
247
248 int
rb_thread_remove_event_hook(VALUE thval,rb_event_hook_func_t func)249 rb_thread_remove_event_hook(VALUE thval, rb_event_hook_func_t func)
250 {
251 return rb_threadptr_remove_event_hook(GET_EC(), rb_thread_ptr(thval), func, Qundef);
252 }
253
254 int
rb_thread_remove_event_hook_with_data(VALUE thval,rb_event_hook_func_t func,VALUE data)255 rb_thread_remove_event_hook_with_data(VALUE thval, rb_event_hook_func_t func, VALUE data)
256 {
257 return rb_threadptr_remove_event_hook(GET_EC(), rb_thread_ptr(thval), func, data);
258 }
259
260 int
rb_remove_event_hook(rb_event_hook_func_t func)261 rb_remove_event_hook(rb_event_hook_func_t func)
262 {
263 return remove_event_hook(GET_EC(), NULL, func, Qundef);
264 }
265
266 int
rb_remove_event_hook_with_data(rb_event_hook_func_t func,VALUE data)267 rb_remove_event_hook_with_data(rb_event_hook_func_t func, VALUE data)
268 {
269 return remove_event_hook(GET_EC(), NULL, func, data);
270 }
271
272 void
rb_clear_trace_func(void)273 rb_clear_trace_func(void)
274 {
275 rb_execution_context_t *ec = GET_EC();
276 rb_threadptr_remove_event_hook(ec, MATCH_ANY_FILTER_TH, 0, Qundef);
277 }
278
279 void
rb_ec_clear_current_thread_trace_func(const rb_execution_context_t * ec)280 rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec)
281 {
282 rb_threadptr_remove_event_hook(ec, rb_ec_thread_ptr(ec), 0, Qundef);
283 }
284
285 /* invoke hooks */
286
287 static void
exec_hooks_body(const rb_execution_context_t * ec,rb_hook_list_t * list,const rb_trace_arg_t * trace_arg)288 exec_hooks_body(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
289 {
290 rb_event_hook_t *hook;
291
292 for (hook = list->hooks; hook; hook = hook->next) {
293 if (!(hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) &&
294 (trace_arg->event & hook->events) &&
295 (LIKELY(hook->filter.th == 0) || hook->filter.th == rb_ec_thread_ptr(ec)) &&
296 (LIKELY(hook->filter.target_line == 0) || (hook->filter.target_line == (unsigned int)rb_vm_get_sourceline(ec->cfp)))) {
297 if (!(hook->hook_flags & RUBY_EVENT_HOOK_FLAG_RAW_ARG)) {
298 (*hook->func)(trace_arg->event, hook->data, trace_arg->self, trace_arg->id, trace_arg->klass);
299 }
300 else {
301 (*((rb_event_hook_raw_arg_func_t)hook->func))(hook->data, trace_arg);
302 }
303 }
304 }
305 }
306
307 static int
exec_hooks_precheck(const rb_execution_context_t * ec,rb_hook_list_t * list,const rb_trace_arg_t * trace_arg)308 exec_hooks_precheck(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
309 {
310 if (list->events & trace_arg->event) {
311 list->running++;
312 return TRUE;
313 }
314 else {
315 return FALSE;
316 }
317 }
318
319 static void
exec_hooks_postcheck(const rb_execution_context_t * ec,rb_hook_list_t * list)320 exec_hooks_postcheck(const rb_execution_context_t *ec, rb_hook_list_t *list)
321 {
322 list->running--;
323 clean_hooks_check(ec, list);
324 }
325
326 static void
exec_hooks_unprotected(const rb_execution_context_t * ec,rb_hook_list_t * list,const rb_trace_arg_t * trace_arg)327 exec_hooks_unprotected(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
328 {
329 if (exec_hooks_precheck(ec, list, trace_arg) == 0) return;
330 exec_hooks_body(ec, list, trace_arg);
331 exec_hooks_postcheck(ec, list);
332 }
333
334 static int
exec_hooks_protected(rb_execution_context_t * ec,rb_hook_list_t * list,const rb_trace_arg_t * trace_arg)335 exec_hooks_protected(rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
336 {
337 enum ruby_tag_type state;
338 volatile int raised;
339
340 if (exec_hooks_precheck(ec, list, trace_arg) == 0) return 0;
341
342 raised = rb_ec_reset_raised(ec);
343
344 /* TODO: Support !RUBY_EVENT_HOOK_FLAG_SAFE hooks */
345
346 EC_PUSH_TAG(ec);
347 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
348 exec_hooks_body(ec, list, trace_arg);
349 }
350 EC_POP_TAG();
351
352 exec_hooks_postcheck(ec, list);
353
354 if (raised) {
355 rb_ec_set_raised(ec);
356 }
357
358 return state;
359 }
360
361 MJIT_FUNC_EXPORTED void
rb_exec_event_hooks(rb_trace_arg_t * trace_arg,rb_hook_list_t * hooks,int pop_p)362 rb_exec_event_hooks(rb_trace_arg_t *trace_arg, rb_hook_list_t *hooks, int pop_p)
363 {
364 rb_execution_context_t *ec = trace_arg->ec;
365
366 if (UNLIKELY(trace_arg->event & RUBY_INTERNAL_EVENT_MASK)) {
367 if (ec->trace_arg && (ec->trace_arg->event & RUBY_INTERNAL_EVENT_MASK)) {
368 /* skip hooks because this thread doing INTERNAL_EVENT */
369 }
370 else {
371 rb_trace_arg_t *prev_trace_arg = ec->trace_arg;
372
373 ec->trace_arg = trace_arg;
374 /* only global hooks */
375 exec_hooks_unprotected(ec, rb_vm_global_hooks(ec), trace_arg);
376 ec->trace_arg = prev_trace_arg;
377 }
378 }
379 else {
380 if (ec->trace_arg == NULL && /* check reentrant */
381 trace_arg->self != rb_mRubyVMFrozenCore /* skip special methods. TODO: remove it. */) {
382 const VALUE errinfo = ec->errinfo;
383 const VALUE old_recursive = ec->local_storage_recursive_hash;
384 int state = 0;
385
386 /* setup */
387 ec->local_storage_recursive_hash = ec->local_storage_recursive_hash_for_trace;
388 ec->errinfo = Qnil;
389 ec->trace_arg = trace_arg;
390
391 /* kick hooks */
392 if ((state = exec_hooks_protected(ec, hooks, trace_arg)) == TAG_NONE) {
393 ec->errinfo = errinfo;
394 }
395
396 /* cleanup */
397 ec->trace_arg = NULL;
398 ec->local_storage_recursive_hash_for_trace = ec->local_storage_recursive_hash;
399 ec->local_storage_recursive_hash = old_recursive;
400
401 if (state) {
402 if (pop_p) {
403 if (VM_FRAME_FINISHED_P(ec->cfp)) {
404 ec->tag = ec->tag->prev;
405 }
406 rb_vm_pop_frame(ec);
407 }
408 EC_JUMP_TAG(ec, state);
409 }
410 }
411 }
412 }
413
414 VALUE
rb_suppress_tracing(VALUE (* func)(VALUE),VALUE arg)415 rb_suppress_tracing(VALUE (*func)(VALUE), VALUE arg)
416 {
417 volatile int raised;
418 VALUE result = Qnil;
419 rb_execution_context_t *const ec = GET_EC();
420 rb_vm_t *const vm = rb_ec_vm_ptr(ec);
421 enum ruby_tag_type state;
422 rb_trace_arg_t dummy_trace_arg;
423 dummy_trace_arg.event = 0;
424
425 if (!ec->trace_arg) {
426 ec->trace_arg = &dummy_trace_arg;
427 }
428
429 raised = rb_ec_reset_raised(ec);
430
431 EC_PUSH_TAG(ec);
432 if (LIKELY((state = EC_EXEC_TAG()) == TAG_NONE)) {
433 result = (*func)(arg);
434 }
435 else {
436 (void)*&vm; /* suppress "clobbered" warning */
437 }
438 EC_POP_TAG();
439
440 if (raised) {
441 rb_ec_reset_raised(ec);
442 }
443
444 if (ec->trace_arg == &dummy_trace_arg) {
445 ec->trace_arg = NULL;
446 }
447
448 if (state) {
449 #if defined RUBY_USE_SETJMPEX && RUBY_USE_SETJMPEX
450 RB_GC_GUARD(result);
451 #endif
452 EC_JUMP_TAG(ec, state);
453 }
454
455 return result;
456 }
457
458 static void call_trace_func(rb_event_flag_t, VALUE data, VALUE self, ID id, VALUE klass);
459
460 /* (2-1) set_trace_func (old API) */
461
462 /*
463 * call-seq:
464 * set_trace_func(proc) -> proc
465 * set_trace_func(nil) -> nil
466 *
467 * Establishes _proc_ as the handler for tracing, or disables
468 * tracing if the parameter is +nil+.
469 *
470 * *Note:* this method is obsolete, please use TracePoint instead.
471 *
472 * _proc_ takes up to six parameters:
473 *
474 * * an event name
475 * * a filename
476 * * a line number
477 * * an object id
478 * * a binding
479 * * the name of a class
480 *
481 * _proc_ is invoked whenever an event occurs.
482 *
483 * Events are:
484 *
485 * +c-call+:: call a C-language routine
486 * +c-return+:: return from a C-language routine
487 * +call+:: call a Ruby method
488 * +class+:: start a class or module definition
489 * +end+:: finish a class or module definition
490 * +line+:: execute code on a new line
491 * +raise+:: raise an exception
492 * +return+:: return from a Ruby method
493 *
494 * Tracing is disabled within the context of _proc_.
495 *
496 * class Test
497 * def test
498 * a = 1
499 * b = 2
500 * end
501 * end
502 *
503 * set_trace_func proc { |event, file, line, id, binding, classname|
504 * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
505 * }
506 * t = Test.new
507 * t.test
508 *
509 * line prog.rb:11 false
510 * c-call prog.rb:11 new Class
511 * c-call prog.rb:11 initialize Object
512 * c-return prog.rb:11 initialize Object
513 * c-return prog.rb:11 new Class
514 * line prog.rb:12 false
515 * call prog.rb:2 test Test
516 * line prog.rb:3 test Test
517 * line prog.rb:4 test Test
518 * return prog.rb:4 test Test
519 */
520
521 static VALUE
set_trace_func(VALUE obj,VALUE trace)522 set_trace_func(VALUE obj, VALUE trace)
523 {
524 rb_remove_event_hook(call_trace_func);
525
526 if (NIL_P(trace)) {
527 return Qnil;
528 }
529
530 if (!rb_obj_is_proc(trace)) {
531 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
532 }
533
534 rb_add_event_hook(call_trace_func, RUBY_EVENT_ALL, trace);
535 return trace;
536 }
537
538 static void
thread_add_trace_func(rb_execution_context_t * ec,rb_thread_t * filter_th,VALUE trace)539 thread_add_trace_func(rb_execution_context_t *ec, rb_thread_t *filter_th, VALUE trace)
540 {
541 if (!rb_obj_is_proc(trace)) {
542 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
543 }
544
545 rb_threadptr_add_event_hook(ec, filter_th, call_trace_func, RUBY_EVENT_ALL, trace, RUBY_EVENT_HOOK_FLAG_SAFE);
546 }
547
548 /*
549 * call-seq:
550 * thr.add_trace_func(proc) -> proc
551 *
552 * Adds _proc_ as a handler for tracing.
553 *
554 * See Thread#set_trace_func and Kernel#set_trace_func.
555 */
556
557 static VALUE
thread_add_trace_func_m(VALUE obj,VALUE trace)558 thread_add_trace_func_m(VALUE obj, VALUE trace)
559 {
560 thread_add_trace_func(GET_EC(), rb_thread_ptr(obj), trace);
561 return trace;
562 }
563
564 /*
565 * call-seq:
566 * thr.set_trace_func(proc) -> proc
567 * thr.set_trace_func(nil) -> nil
568 *
569 * Establishes _proc_ on _thr_ as the handler for tracing, or
570 * disables tracing if the parameter is +nil+.
571 *
572 * See Kernel#set_trace_func.
573 */
574
575 static VALUE
thread_set_trace_func_m(VALUE target_thread,VALUE trace)576 thread_set_trace_func_m(VALUE target_thread, VALUE trace)
577 {
578 rb_execution_context_t *ec = GET_EC();
579 rb_thread_t *target_th = rb_thread_ptr(target_thread);
580
581 rb_threadptr_remove_event_hook(ec, target_th, call_trace_func, Qundef);
582
583 if (NIL_P(trace)) {
584 return Qnil;
585 }
586 else {
587 thread_add_trace_func(ec, target_th, trace);
588 return trace;
589 }
590 }
591
592 static const char *
get_event_name(rb_event_flag_t event)593 get_event_name(rb_event_flag_t event)
594 {
595 switch (event) {
596 case RUBY_EVENT_LINE: return "line";
597 case RUBY_EVENT_CLASS: return "class";
598 case RUBY_EVENT_END: return "end";
599 case RUBY_EVENT_CALL: return "call";
600 case RUBY_EVENT_RETURN: return "return";
601 case RUBY_EVENT_C_CALL: return "c-call";
602 case RUBY_EVENT_C_RETURN: return "c-return";
603 case RUBY_EVENT_RAISE: return "raise";
604 default:
605 return "unknown";
606 }
607 }
608
609 static ID
get_event_id(rb_event_flag_t event)610 get_event_id(rb_event_flag_t event)
611 {
612 ID id;
613
614 switch (event) {
615 #define C(name, NAME) case RUBY_EVENT_##NAME: CONST_ID(id, #name); return id;
616 C(line, LINE);
617 C(class, CLASS);
618 C(end, END);
619 C(call, CALL);
620 C(return, RETURN);
621 C(c_call, C_CALL);
622 C(c_return, C_RETURN);
623 C(raise, RAISE);
624 C(b_call, B_CALL);
625 C(b_return, B_RETURN);
626 C(thread_begin, THREAD_BEGIN);
627 C(thread_end, THREAD_END);
628 C(fiber_switch, FIBER_SWITCH);
629 C(script_compiled, SCRIPT_COMPILED);
630 #undef C
631 default:
632 return 0;
633 }
634 }
635
636 static void
get_path_and_lineno(const rb_execution_context_t * ec,const rb_control_frame_t * cfp,rb_event_flag_t event,VALUE * pathp,int * linep)637 get_path_and_lineno(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, rb_event_flag_t event, VALUE *pathp, int *linep)
638 {
639 cfp = rb_vm_get_ruby_level_next_cfp(ec, cfp);
640
641 if (cfp) {
642 const rb_iseq_t *iseq = cfp->iseq;
643 *pathp = rb_iseq_path(iseq);
644
645 if (event & (RUBY_EVENT_CLASS |
646 RUBY_EVENT_CALL |
647 RUBY_EVENT_B_CALL)) {
648 *linep = FIX2INT(rb_iseq_first_lineno(iseq));
649 }
650 else {
651 *linep = rb_vm_get_sourceline(cfp);
652 }
653 }
654 else {
655 *pathp = Qnil;
656 *linep = 0;
657 }
658 }
659
660 static void
call_trace_func(rb_event_flag_t event,VALUE proc,VALUE self,ID id,VALUE klass)661 call_trace_func(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
662 {
663 int line;
664 VALUE filename;
665 VALUE eventname = rb_str_new2(get_event_name(event));
666 VALUE argv[6];
667 const rb_execution_context_t *ec = GET_EC();
668
669 get_path_and_lineno(ec, ec->cfp, event, &filename, &line);
670
671 if (!klass) {
672 rb_ec_frame_method_id_and_class(ec, &id, 0, &klass);
673 }
674
675 if (klass) {
676 if (RB_TYPE_P(klass, T_ICLASS)) {
677 klass = RBASIC(klass)->klass;
678 }
679 else if (FL_TEST(klass, FL_SINGLETON)) {
680 klass = rb_ivar_get(klass, id__attached__);
681 }
682 }
683
684 argv[0] = eventname;
685 argv[1] = filename;
686 argv[2] = INT2FIX(line);
687 argv[3] = id ? ID2SYM(id) : Qnil;
688 argv[4] = (self && (filename != Qnil)) ? rb_binding_new() : Qnil;
689 argv[5] = klass ? klass : Qnil;
690
691 rb_proc_call_with_block(proc, 6, argv, Qnil);
692 }
693
694 /* (2-2) TracePoint API */
695
696 static VALUE rb_cTracePoint;
697
698 typedef struct rb_tp_struct {
699 rb_event_flag_t events;
700 int tracing; /* bool */
701 rb_thread_t *target_th;
702 VALUE local_target_set; /* Hash: target ->
703 * Qtrue (if target is iseq) or
704 * Qfalse (if target is bmethod)
705 */
706 void (*func)(VALUE tpval, void *data);
707 void *data;
708 VALUE proc;
709 VALUE self;
710 } rb_tp_t;
711
712 static void
tp_mark(void * ptr)713 tp_mark(void *ptr)
714 {
715 rb_tp_t *tp = ptr;
716 rb_gc_mark(tp->proc);
717 rb_gc_mark(tp->local_target_set);
718 if (tp->target_th) rb_gc_mark(tp->target_th->self);
719 }
720
721 static size_t
tp_memsize(const void * ptr)722 tp_memsize(const void *ptr)
723 {
724 return sizeof(rb_tp_t);
725 }
726
727 static const rb_data_type_t tp_data_type = {
728 "tracepoint",
729 {tp_mark, RUBY_TYPED_NEVER_FREE, tp_memsize,},
730 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
731 };
732
733 static VALUE
tp_alloc(VALUE klass)734 tp_alloc(VALUE klass)
735 {
736 rb_tp_t *tp;
737 return TypedData_Make_Struct(klass, rb_tp_t, &tp_data_type, tp);
738 }
739
740 static rb_event_flag_t
symbol2event_flag(VALUE v)741 symbol2event_flag(VALUE v)
742 {
743 ID id;
744 VALUE sym = rb_to_symbol_type(v);
745 const rb_event_flag_t RUBY_EVENT_A_CALL =
746 RUBY_EVENT_CALL | RUBY_EVENT_B_CALL | RUBY_EVENT_C_CALL;
747 const rb_event_flag_t RUBY_EVENT_A_RETURN =
748 RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN | RUBY_EVENT_C_RETURN;
749
750 #define C(name, NAME) CONST_ID(id, #name); if (sym == ID2SYM(id)) return RUBY_EVENT_##NAME
751 C(line, LINE);
752 C(class, CLASS);
753 C(end, END);
754 C(call, CALL);
755 C(return, RETURN);
756 C(c_call, C_CALL);
757 C(c_return, C_RETURN);
758 C(raise, RAISE);
759 C(b_call, B_CALL);
760 C(b_return, B_RETURN);
761 C(thread_begin, THREAD_BEGIN);
762 C(thread_end, THREAD_END);
763 C(fiber_switch, FIBER_SWITCH);
764 C(script_compiled, SCRIPT_COMPILED);
765
766 /* joke */
767 C(a_call, A_CALL);
768 C(a_return, A_RETURN);
769 #undef C
770 rb_raise(rb_eArgError, "unknown event: %"PRIsVALUE, rb_sym2str(sym));
771 }
772
773 static rb_tp_t *
tpptr(VALUE tpval)774 tpptr(VALUE tpval)
775 {
776 rb_tp_t *tp;
777 TypedData_Get_Struct(tpval, rb_tp_t, &tp_data_type, tp);
778 return tp;
779 }
780
781 static rb_trace_arg_t *
get_trace_arg(void)782 get_trace_arg(void)
783 {
784 rb_trace_arg_t *trace_arg = GET_EC()->trace_arg;
785 if (trace_arg == 0) {
786 rb_raise(rb_eRuntimeError, "access from outside");
787 }
788 return trace_arg;
789 }
790
791 struct rb_trace_arg_struct *
rb_tracearg_from_tracepoint(VALUE tpval)792 rb_tracearg_from_tracepoint(VALUE tpval)
793 {
794 return get_trace_arg();
795 }
796
797 rb_event_flag_t
rb_tracearg_event_flag(rb_trace_arg_t * trace_arg)798 rb_tracearg_event_flag(rb_trace_arg_t *trace_arg)
799 {
800 return trace_arg->event;
801 }
802
803 VALUE
rb_tracearg_event(rb_trace_arg_t * trace_arg)804 rb_tracearg_event(rb_trace_arg_t *trace_arg)
805 {
806 return ID2SYM(get_event_id(trace_arg->event));
807 }
808
809 static void
fill_path_and_lineno(rb_trace_arg_t * trace_arg)810 fill_path_and_lineno(rb_trace_arg_t *trace_arg)
811 {
812 if (trace_arg->path == Qundef) {
813 get_path_and_lineno(trace_arg->ec, trace_arg->cfp, trace_arg->event, &trace_arg->path, &trace_arg->lineno);
814 }
815 }
816
817 VALUE
rb_tracearg_lineno(rb_trace_arg_t * trace_arg)818 rb_tracearg_lineno(rb_trace_arg_t *trace_arg)
819 {
820 fill_path_and_lineno(trace_arg);
821 return INT2FIX(trace_arg->lineno);
822 }
823 VALUE
rb_tracearg_path(rb_trace_arg_t * trace_arg)824 rb_tracearg_path(rb_trace_arg_t *trace_arg)
825 {
826 fill_path_and_lineno(trace_arg);
827 return trace_arg->path;
828 }
829
830 static void
fill_id_and_klass(rb_trace_arg_t * trace_arg)831 fill_id_and_klass(rb_trace_arg_t *trace_arg)
832 {
833 if (!trace_arg->klass_solved) {
834 if (!trace_arg->klass) {
835 rb_vm_control_frame_id_and_class(trace_arg->cfp, &trace_arg->id, &trace_arg->called_id, &trace_arg->klass);
836 }
837
838 if (trace_arg->klass) {
839 if (RB_TYPE_P(trace_arg->klass, T_ICLASS)) {
840 trace_arg->klass = RBASIC(trace_arg->klass)->klass;
841 }
842 }
843 else {
844 trace_arg->klass = Qnil;
845 }
846
847 trace_arg->klass_solved = 1;
848 }
849 }
850
851 VALUE
rb_tracearg_parameters(rb_trace_arg_t * trace_arg)852 rb_tracearg_parameters(rb_trace_arg_t *trace_arg)
853 {
854 switch(trace_arg->event) {
855 case RUBY_EVENT_CALL:
856 case RUBY_EVENT_RETURN:
857 case RUBY_EVENT_B_CALL:
858 case RUBY_EVENT_B_RETURN: {
859 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(trace_arg->ec, trace_arg->cfp);
860 if (cfp) {
861 int is_proc = 0;
862 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_BLOCK && !VM_FRAME_LAMBDA_P(cfp)) {
863 is_proc = 1;
864 }
865 return rb_iseq_parameters(cfp->iseq, is_proc);
866 }
867 break;
868 }
869 case RUBY_EVENT_C_CALL:
870 case RUBY_EVENT_C_RETURN: {
871 fill_id_and_klass(trace_arg);
872 if (trace_arg->klass && trace_arg->id) {
873 const rb_method_entry_t *me;
874 VALUE iclass = Qnil;
875 me = rb_method_entry_without_refinements(trace_arg->klass, trace_arg->id, &iclass);
876 return rb_unnamed_parameters(rb_method_entry_arity(me));
877 }
878 break;
879 }
880 case RUBY_EVENT_RAISE:
881 case RUBY_EVENT_LINE:
882 case RUBY_EVENT_CLASS:
883 case RUBY_EVENT_END:
884 case RUBY_EVENT_SCRIPT_COMPILED:
885 rb_raise(rb_eRuntimeError, "not supported by this event");
886 break;
887 }
888 return Qnil;
889 }
890
891 VALUE
rb_tracearg_method_id(rb_trace_arg_t * trace_arg)892 rb_tracearg_method_id(rb_trace_arg_t *trace_arg)
893 {
894 fill_id_and_klass(trace_arg);
895 return trace_arg->id ? ID2SYM(trace_arg->id) : Qnil;
896 }
897
898 VALUE
rb_tracearg_callee_id(rb_trace_arg_t * trace_arg)899 rb_tracearg_callee_id(rb_trace_arg_t *trace_arg)
900 {
901 fill_id_and_klass(trace_arg);
902 return trace_arg->called_id ? ID2SYM(trace_arg->called_id) : Qnil;
903 }
904
905 VALUE
rb_tracearg_defined_class(rb_trace_arg_t * trace_arg)906 rb_tracearg_defined_class(rb_trace_arg_t *trace_arg)
907 {
908 fill_id_and_klass(trace_arg);
909 return trace_arg->klass;
910 }
911
912 VALUE
rb_tracearg_binding(rb_trace_arg_t * trace_arg)913 rb_tracearg_binding(rb_trace_arg_t *trace_arg)
914 {
915 rb_control_frame_t *cfp;
916 cfp = rb_vm_get_binding_creatable_next_cfp(trace_arg->ec, trace_arg->cfp);
917
918 if (cfp) {
919 return rb_vm_make_binding(trace_arg->ec, cfp);
920 }
921 else {
922 return Qnil;
923 }
924 }
925
926 VALUE
rb_tracearg_self(rb_trace_arg_t * trace_arg)927 rb_tracearg_self(rb_trace_arg_t *trace_arg)
928 {
929 return trace_arg->self;
930 }
931
932 VALUE
rb_tracearg_return_value(rb_trace_arg_t * trace_arg)933 rb_tracearg_return_value(rb_trace_arg_t *trace_arg)
934 {
935 if (trace_arg->event & (RUBY_EVENT_RETURN | RUBY_EVENT_C_RETURN | RUBY_EVENT_B_RETURN)) {
936 /* ok */
937 }
938 else {
939 rb_raise(rb_eRuntimeError, "not supported by this event");
940 }
941 if (trace_arg->data == Qundef) {
942 rb_bug("rb_tracearg_return_value: unreachable");
943 }
944 return trace_arg->data;
945 }
946
947 VALUE
rb_tracearg_raised_exception(rb_trace_arg_t * trace_arg)948 rb_tracearg_raised_exception(rb_trace_arg_t *trace_arg)
949 {
950 if (trace_arg->event & (RUBY_EVENT_RAISE)) {
951 /* ok */
952 }
953 else {
954 rb_raise(rb_eRuntimeError, "not supported by this event");
955 }
956 if (trace_arg->data == Qundef) {
957 rb_bug("rb_tracearg_raised_exception: unreachable");
958 }
959 return trace_arg->data;
960 }
961
962 VALUE
rb_tracearg_eval_script(rb_trace_arg_t * trace_arg)963 rb_tracearg_eval_script(rb_trace_arg_t *trace_arg)
964 {
965 VALUE data = trace_arg->data;
966
967 if (trace_arg->event & (RUBY_EVENT_SCRIPT_COMPILED)) {
968 /* ok */
969 }
970 else {
971 rb_raise(rb_eRuntimeError, "not supported by this event");
972 }
973 if (data == Qundef) {
974 rb_bug("rb_tracearg_raised_exception: unreachable");
975 }
976 if (rb_obj_is_iseq(data)) {
977 return Qnil;
978 }
979 else {
980 VM_ASSERT(RB_TYPE_P(data, T_ARRAY));
981 /* [src, iseq] */
982 return RARRAY_AREF(data, 0);
983 }
984 }
985
986 VALUE
rb_tracearg_instruction_sequence(rb_trace_arg_t * trace_arg)987 rb_tracearg_instruction_sequence(rb_trace_arg_t *trace_arg)
988 {
989 VALUE data = trace_arg->data;
990
991 if (trace_arg->event & (RUBY_EVENT_SCRIPT_COMPILED)) {
992 /* ok */
993 }
994 else {
995 rb_raise(rb_eRuntimeError, "not supported by this event");
996 }
997 if (data == Qundef) {
998 rb_bug("rb_tracearg_raised_exception: unreachable");
999 }
1000
1001 if (rb_obj_is_iseq(data)) {
1002 return rb_iseqw_new((const rb_iseq_t *)data);
1003 }
1004 else {
1005 VM_ASSERT(RB_TYPE_P(data, T_ARRAY));
1006 VM_ASSERT(rb_obj_is_iseq(RARRAY_AREF(data, 1)));
1007
1008 /* [src, iseq] */
1009 return rb_iseqw_new((const rb_iseq_t *)RARRAY_AREF(data, 1));
1010 }
1011 }
1012
1013 VALUE
rb_tracearg_object(rb_trace_arg_t * trace_arg)1014 rb_tracearg_object(rb_trace_arg_t *trace_arg)
1015 {
1016 if (trace_arg->event & (RUBY_INTERNAL_EVENT_NEWOBJ | RUBY_INTERNAL_EVENT_FREEOBJ)) {
1017 /* ok */
1018 }
1019 else {
1020 rb_raise(rb_eRuntimeError, "not supported by this event");
1021 }
1022 if (trace_arg->data == Qundef) {
1023 rb_bug("rb_tracearg_object: unreachable");
1024 }
1025 return trace_arg->data;
1026 }
1027
1028 /*
1029 * Type of event
1030 *
1031 * See TracePoint@Events for more information.
1032 */
1033 static VALUE
tracepoint_attr_event(VALUE tpval)1034 tracepoint_attr_event(VALUE tpval)
1035 {
1036 return rb_tracearg_event(get_trace_arg());
1037 }
1038
1039 /*
1040 * Line number of the event
1041 */
1042 static VALUE
tracepoint_attr_lineno(VALUE tpval)1043 tracepoint_attr_lineno(VALUE tpval)
1044 {
1045 return rb_tracearg_lineno(get_trace_arg());
1046 }
1047
1048 /*
1049 * Path of the file being run
1050 */
1051 static VALUE
tracepoint_attr_path(VALUE tpval)1052 tracepoint_attr_path(VALUE tpval)
1053 {
1054 return rb_tracearg_path(get_trace_arg());
1055 }
1056
1057 /*
1058 * Return the parameters of the method or block that the current hook belongs to
1059 */
1060 static VALUE
tracepoint_attr_parameters(VALUE tpval)1061 tracepoint_attr_parameters(VALUE tpval)
1062 {
1063 return rb_tracearg_parameters(get_trace_arg());
1064 }
1065
1066 /*
1067 * Return the name at the definition of the method being called
1068 */
1069 static VALUE
tracepoint_attr_method_id(VALUE tpval)1070 tracepoint_attr_method_id(VALUE tpval)
1071 {
1072 return rb_tracearg_method_id(get_trace_arg());
1073 }
1074
1075 /*
1076 * Return the called name of the method being called
1077 */
1078 static VALUE
tracepoint_attr_callee_id(VALUE tpval)1079 tracepoint_attr_callee_id(VALUE tpval)
1080 {
1081 return rb_tracearg_callee_id(get_trace_arg());
1082 }
1083
1084 /*
1085 * Return class or module of the method being called.
1086 *
1087 * class C; def foo; end; end
1088 * trace = TracePoint.new(:call) do |tp|
1089 * p tp.defined_class #=> C
1090 * end.enable do
1091 * C.new.foo
1092 * end
1093 *
1094 * If method is defined by a module, then that module is returned.
1095 *
1096 * module M; def foo; end; end
1097 * class C; include M; end;
1098 * trace = TracePoint.new(:call) do |tp|
1099 * p tp.defined_class #=> M
1100 * end.enable do
1101 * C.new.foo
1102 * end
1103 *
1104 * <b>Note:</b> #defined_class returns singleton class.
1105 *
1106 * 6th block parameter of Kernel#set_trace_func passes original class
1107 * of attached by singleton class.
1108 *
1109 * <b>This is a difference between Kernel#set_trace_func and TracePoint.</b>
1110 *
1111 * class C; def self.foo; end; end
1112 * trace = TracePoint.new(:call) do |tp|
1113 * p tp.defined_class #=> #<Class:C>
1114 * end.enable do
1115 * C.foo
1116 * end
1117 */
1118 static VALUE
tracepoint_attr_defined_class(VALUE tpval)1119 tracepoint_attr_defined_class(VALUE tpval)
1120 {
1121 return rb_tracearg_defined_class(get_trace_arg());
1122 }
1123
1124 /*
1125 * Return the generated binding object from event
1126 */
1127 static VALUE
tracepoint_attr_binding(VALUE tpval)1128 tracepoint_attr_binding(VALUE tpval)
1129 {
1130 return rb_tracearg_binding(get_trace_arg());
1131 }
1132
1133 /*
1134 * Return the trace object during event
1135 *
1136 * Same as TracePoint#binding:
1137 * trace.binding.eval('self')
1138 */
1139 static VALUE
tracepoint_attr_self(VALUE tpval)1140 tracepoint_attr_self(VALUE tpval)
1141 {
1142 return rb_tracearg_self(get_trace_arg());
1143 }
1144
1145 /*
1146 * Return value from +:return+, +c_return+, and +b_return+ event
1147 */
1148 static VALUE
tracepoint_attr_return_value(VALUE tpval)1149 tracepoint_attr_return_value(VALUE tpval)
1150 {
1151 return rb_tracearg_return_value(get_trace_arg());
1152 }
1153
1154 /*
1155 * Value from exception raised on the +:raise+ event
1156 */
1157 static VALUE
tracepoint_attr_raised_exception(VALUE tpval)1158 tracepoint_attr_raised_exception(VALUE tpval)
1159 {
1160 return rb_tracearg_raised_exception(get_trace_arg());
1161 }
1162
1163 /*
1164 * Compiled source code (String) on *eval methods on the +:script_compiled+ event.
1165 * If loaded from a file, it will return nil.
1166 */
1167 static VALUE
tracepoint_attr_eval_script(VALUE tpval)1168 tracepoint_attr_eval_script(VALUE tpval)
1169 {
1170 return rb_tracearg_eval_script(get_trace_arg());
1171 }
1172
1173 /*
1174 * Compiled instruction sequence represented by a RubyVM::InstructionSequence instance
1175 * on the +:script_compiled+ event.
1176 *
1177 * Note that this method is MRI specific.
1178 */
1179 static VALUE
tracepoint_attr_instruction_sequence(VALUE tpval)1180 tracepoint_attr_instruction_sequence(VALUE tpval)
1181 {
1182 return rb_tracearg_instruction_sequence(get_trace_arg());
1183 }
1184
1185 static void
tp_call_trace(VALUE tpval,rb_trace_arg_t * trace_arg)1186 tp_call_trace(VALUE tpval, rb_trace_arg_t *trace_arg)
1187 {
1188 rb_tp_t *tp = tpptr(tpval);
1189
1190 if (tp->func) {
1191 (*tp->func)(tpval, tp->data);
1192 }
1193 else {
1194 rb_proc_call_with_block((VALUE)tp->proc, 1, &tpval, Qnil);
1195 }
1196 }
1197
1198 VALUE
rb_tracepoint_enable(VALUE tpval)1199 rb_tracepoint_enable(VALUE tpval)
1200 {
1201 rb_tp_t *tp;
1202 tp = tpptr(tpval);
1203
1204 if (tp->local_target_set != Qfalse) {
1205 rb_raise(rb_eArgError, "can't nest-enable a targetting TracePoint");
1206 }
1207
1208 if (tp->target_th) {
1209 rb_thread_add_event_hook2(tp->target_th->self, (rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
1210 RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
1211 }
1212 else {
1213 rb_add_event_hook2((rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
1214 RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
1215 }
1216 tp->tracing = 1;
1217 return Qundef;
1218 }
1219
1220 static const rb_iseq_t *
iseq_of(VALUE target)1221 iseq_of(VALUE target)
1222 {
1223 VALUE iseqv = rb_funcall(rb_cISeq, rb_intern("of"), 1, target);
1224 if (NIL_P(iseqv)) {
1225 rb_raise(rb_eArgError, "specified target is not supported");
1226 }
1227 else {
1228 return rb_iseqw_to_iseq(iseqv);
1229 }
1230 }
1231
1232 const rb_method_definition_t *rb_method_def(VALUE method); /* proc.c */
1233
1234 static VALUE
rb_tracepoint_enable_for_target(VALUE tpval,VALUE target,VALUE target_line)1235 rb_tracepoint_enable_for_target(VALUE tpval, VALUE target, VALUE target_line)
1236 {
1237 rb_tp_t *tp = tpptr(tpval);
1238 const rb_iseq_t *iseq = iseq_of(target);
1239 int n;
1240 unsigned int line = 0;
1241
1242 if (tp->tracing > 0) {
1243 rb_raise(rb_eArgError, "can't nest-enable a targetting TracePoint");
1244 }
1245
1246 if (!NIL_P(target_line)) {
1247 if ((tp->events & RUBY_EVENT_LINE) == 0) {
1248 rb_raise(rb_eArgError, "target_line is specified, but line event is not specified");
1249 }
1250 else {
1251 line = NUM2UINT(target_line);
1252 }
1253 }
1254
1255 VM_ASSERT(tp->local_target_set == Qfalse);
1256 tp->local_target_set = rb_obj_hide(rb_ident_hash_new());
1257
1258 /* iseq */
1259 n = rb_iseq_add_local_tracepoint_recursively(iseq, tp->events, tpval, line);
1260 rb_hash_aset(tp->local_target_set, (VALUE)iseq, Qtrue);
1261
1262 /* bmethod */
1263 if (rb_obj_is_method(target)) {
1264 rb_method_definition_t *def = (rb_method_definition_t *)rb_method_def(target);
1265 if (def->type == VM_METHOD_TYPE_BMETHOD &&
1266 (tp->events & (RUBY_EVENT_CALL | RUBY_EVENT_RETURN))) {
1267 def->body.bmethod.hooks = ZALLOC(rb_hook_list_t);
1268 rb_hook_list_connect_tracepoint(target, def->body.bmethod.hooks, tpval, 0);
1269 rb_hash_aset(tp->local_target_set, target, Qfalse);
1270
1271 n++;
1272 }
1273 }
1274
1275 if (n == 0) {
1276 rb_raise(rb_eArgError, "can not enable any hooks");
1277 }
1278
1279 ruby_vm_event_local_num++;
1280
1281 tp->tracing = 1;
1282
1283 return Qnil;
1284 }
1285
1286 static int
disable_local_event_iseq_i(VALUE target,VALUE iseq_p,VALUE tpval)1287 disable_local_event_iseq_i(VALUE target, VALUE iseq_p, VALUE tpval)
1288 {
1289 if (iseq_p) {
1290 rb_iseq_remove_local_tracepoint_recursively((rb_iseq_t *)target, tpval);
1291 }
1292 else {
1293 /* bmethod */
1294 rb_method_definition_t *def = (rb_method_definition_t *)rb_method_def(target);
1295 rb_hook_list_t *hooks = def->body.bmethod.hooks;
1296 VM_ASSERT(hooks != NULL);
1297 rb_hook_list_remove_tracepoint(hooks, tpval);
1298 if (hooks->running == 0) {
1299 rb_hook_list_free(def->body.bmethod.hooks);
1300 }
1301 def->body.bmethod.hooks = NULL;
1302 }
1303 return ST_CONTINUE;
1304 }
1305
1306 VALUE
rb_tracepoint_disable(VALUE tpval)1307 rb_tracepoint_disable(VALUE tpval)
1308 {
1309 rb_tp_t *tp;
1310
1311 tp = tpptr(tpval);
1312
1313 if (tp->local_target_set) {
1314 rb_hash_foreach(tp->local_target_set, disable_local_event_iseq_i, tpval);
1315 tp->local_target_set = Qfalse;
1316 ruby_vm_event_local_num--;
1317 }
1318 else {
1319 if (tp->target_th) {
1320 rb_thread_remove_event_hook_with_data(tp->target_th->self, (rb_event_hook_func_t)tp_call_trace, tpval);
1321 }
1322 else {
1323 rb_remove_event_hook_with_data((rb_event_hook_func_t)tp_call_trace, tpval);
1324 }
1325 }
1326 tp->tracing = 0;
1327 return Qundef;
1328 }
1329
1330 void
rb_hook_list_connect_tracepoint(VALUE target,rb_hook_list_t * list,VALUE tpval,unsigned int target_line)1331 rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line)
1332 {
1333 rb_tp_t *tp = tpptr(tpval);
1334 rb_event_hook_t *hook = alloc_event_hook((rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
1335 RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
1336 hook->filter.target_line = target_line;
1337 hook_list_connect(target, list, hook, FALSE);
1338 }
1339
1340 void
rb_hook_list_remove_tracepoint(rb_hook_list_t * list,VALUE tpval)1341 rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval)
1342 {
1343 rb_event_hook_t *hook = list->hooks;
1344 rb_event_flag_t events = 0;
1345
1346 while (hook) {
1347 if (hook->data == tpval) {
1348 hook->hook_flags |= RUBY_EVENT_HOOK_FLAG_DELETED;
1349 list->need_clean = TRUE;
1350 }
1351 else {
1352 events |= hook->events;
1353 }
1354 hook = hook->next;
1355 }
1356
1357 list->events = events;
1358 }
1359
1360 /*
1361 * call-seq:
1362 * trace.enable -> true or false
1363 * trace.enable { block } -> obj
1364 *
1365 * Activates the trace
1366 *
1367 * Return true if trace was enabled.
1368 * Return false if trace was disabled.
1369 *
1370 * trace.enabled? #=> false
1371 * trace.enable #=> false (previous state)
1372 * # trace is enabled
1373 * trace.enabled? #=> true
1374 * trace.enable #=> true (previous state)
1375 * # trace is still enabled
1376 *
1377 * If a block is given, the trace will only be enabled within the scope of the
1378 * block.
1379 *
1380 * trace.enabled?
1381 * #=> false
1382 *
1383 * trace.enable do
1384 * trace.enabled?
1385 * # only enabled for this block
1386 * end
1387 *
1388 * trace.enabled?
1389 * #=> false
1390 *
1391 * Note: You cannot access event hooks within the block.
1392 *
1393 * trace.enable { p tp.lineno }
1394 * #=> RuntimeError: access from outside
1395 *
1396 */
1397 static VALUE
tracepoint_enable_m(VALUE tpval,VALUE target,VALUE target_line)1398 tracepoint_enable_m(VALUE tpval, VALUE target, VALUE target_line)
1399 {
1400 rb_tp_t *tp = tpptr(tpval);
1401 int previous_tracing = tp->tracing;
1402
1403 if (NIL_P(target)) {
1404 if (!NIL_P(target_line)) {
1405 rb_raise(rb_eArgError, "only target_line is specified");
1406 }
1407 rb_tracepoint_enable(tpval);
1408 }
1409 else {
1410 rb_tracepoint_enable_for_target(tpval, target, target_line);
1411 }
1412
1413 if (rb_block_given_p()) {
1414 return rb_ensure(rb_yield, Qundef,
1415 previous_tracing ? rb_tracepoint_enable : rb_tracepoint_disable,
1416 tpval);
1417 }
1418 else {
1419 return previous_tracing ? Qtrue : Qfalse;
1420 }
1421 }
1422
1423 /*
1424 * call-seq:
1425 * trace.disable -> true or false
1426 * trace.disable { block } -> obj
1427 *
1428 * Deactivates the trace
1429 *
1430 * Return true if trace was enabled.
1431 * Return false if trace was disabled.
1432 *
1433 * trace.enabled? #=> true
1434 * trace.disable #=> true (previous status)
1435 * trace.enabled? #=> false
1436 * trace.disable #=> false
1437 *
1438 * If a block is given, the trace will only be disable within the scope of the
1439 * block.
1440 *
1441 * trace.enabled?
1442 * #=> true
1443 *
1444 * trace.disable do
1445 * trace.enabled?
1446 * # only disabled for this block
1447 * end
1448 *
1449 * trace.enabled?
1450 * #=> true
1451 *
1452 * Note: You cannot access event hooks within the block.
1453 *
1454 * trace.disable { p tp.lineno }
1455 * #=> RuntimeError: access from outside
1456 */
1457
1458 static VALUE
tracepoint_disable_m(VALUE tpval)1459 tracepoint_disable_m(VALUE tpval)
1460 {
1461 rb_tp_t *tp = tpptr(tpval);
1462 int previous_tracing = tp->tracing;
1463
1464 if (rb_block_given_p()) {
1465 if (tp->local_target_set != Qfalse) {
1466 rb_raise(rb_eArgError, "can't disable a targetting TracePoint in a block");
1467 }
1468
1469 rb_tracepoint_disable(tpval);
1470 return rb_ensure(rb_yield, Qundef,
1471 previous_tracing ? rb_tracepoint_enable : rb_tracepoint_disable,
1472 tpval);
1473 }
1474 else {
1475 rb_tracepoint_disable(tpval);
1476 return previous_tracing ? Qtrue : Qfalse;
1477 }
1478 }
1479
1480 /*
1481 * call-seq:
1482 * trace.enabled? -> true or false
1483 *
1484 * The current status of the trace
1485 */
1486 VALUE
rb_tracepoint_enabled_p(VALUE tpval)1487 rb_tracepoint_enabled_p(VALUE tpval)
1488 {
1489 rb_tp_t *tp = tpptr(tpval);
1490 return tp->tracing ? Qtrue : Qfalse;
1491 }
1492
1493 static VALUE
tracepoint_new(VALUE klass,rb_thread_t * target_th,rb_event_flag_t events,void (func)(VALUE,void *),void * data,VALUE proc)1494 tracepoint_new(VALUE klass, rb_thread_t *target_th, rb_event_flag_t events, void (func)(VALUE, void*), void *data, VALUE proc)
1495 {
1496 VALUE tpval = tp_alloc(klass);
1497 rb_tp_t *tp;
1498 TypedData_Get_Struct(tpval, rb_tp_t, &tp_data_type, tp);
1499
1500 tp->proc = proc;
1501 tp->func = func;
1502 tp->data = data;
1503 tp->events = events;
1504 tp->self = tpval;
1505
1506 return tpval;
1507 }
1508
1509 /*
1510 * Creates a tracepoint by registering a callback function for one or more
1511 * tracepoint events. Once the tracepoint is created, you can use
1512 * rb_tracepoint_enable to enable the tracepoint.
1513 *
1514 * Parameters:
1515 * 1. VALUE target_thval - Meant for picking the thread in which the tracepoint
1516 * is to be created. However, current implementation ignore this parameter,
1517 * tracepoint is created for all threads. Simply specify Qnil.
1518 * 2. rb_event_flag_t events - Event(s) to listen to.
1519 * 3. void (*func)(VALUE, void *) - A callback function.
1520 * 4. void *data - Void pointer that will be passed to the callback function.
1521 *
1522 * When the callback function is called, it will be passed 2 parameters:
1523 * 1)VALUE tpval - the TracePoint object from which trace args can be extracted.
1524 * 2)void *data - A void pointer which helps to share scope with the callback function.
1525 *
1526 * It is important to note that you cannot register callbacks for normal events and internal events
1527 * simultaneously because they are different purpose.
1528 * You can use any Ruby APIs (calling methods and so on) on normal event hooks.
1529 * However, in internal events, you can not use any Ruby APIs (even object creations).
1530 * This is why we can't specify internal events by TracePoint directly.
1531 * Limitations are MRI version specific.
1532 *
1533 * Example:
1534 * rb_tracepoint_new(Qnil, RUBY_INTERNAL_EVENT_NEWOBJ | RUBY_INTERNAL_EVENT_FREEOBJ, obj_event_i, data);
1535 *
1536 * In this example, a callback function obj_event_i will be registered for
1537 * internal events RUBY_INTERNAL_EVENT_NEWOBJ and RUBY_INTERNAL_EVENT_FREEOBJ.
1538 */
1539 VALUE
rb_tracepoint_new(VALUE target_thval,rb_event_flag_t events,void (* func)(VALUE,void *),void * data)1540 rb_tracepoint_new(VALUE target_thval, rb_event_flag_t events, void (*func)(VALUE, void *), void *data)
1541 {
1542 rb_thread_t *target_th = NULL;
1543
1544 if (RTEST(target_thval)) {
1545 target_th = rb_thread_ptr(target_thval);
1546 /* TODO: Test it!
1547 * Warning: This function is not tested.
1548 */
1549 }
1550 return tracepoint_new(rb_cTracePoint, target_th, events, func, data, Qundef);
1551 }
1552
1553 /*
1554 * call-seq:
1555 * TracePoint.new(*events) { |obj| block } -> obj
1556 *
1557 * Returns a new TracePoint object, not enabled by default.
1558 *
1559 * Next, in order to activate the trace, you must use TracePoint#enable
1560 *
1561 * trace = TracePoint.new(:call) do |tp|
1562 * p [tp.lineno, tp.defined_class, tp.method_id, tp.event]
1563 * end
1564 * #=> #<TracePoint:disabled>
1565 *
1566 * trace.enable
1567 * #=> false
1568 *
1569 * puts "Hello, TracePoint!"
1570 * # ...
1571 * # [48, IRB::Notifier::AbstractNotifier, :printf, :call]
1572 * # ...
1573 *
1574 * When you want to deactivate the trace, you must use TracePoint#disable
1575 *
1576 * trace.disable
1577 *
1578 * See TracePoint@Events for possible events and more information.
1579 *
1580 * A block must be given, otherwise an ArgumentError is raised.
1581 *
1582 * If the trace method isn't included in the given events filter, a
1583 * RuntimeError is raised.
1584 *
1585 * TracePoint.trace(:line) do |tp|
1586 * p tp.raised_exception
1587 * end
1588 * #=> RuntimeError: 'raised_exception' not supported by this event
1589 *
1590 * If the trace method is called outside block, a RuntimeError is raised.
1591 *
1592 * TracePoint.trace(:line) do |tp|
1593 * $tp = tp
1594 * end
1595 * $tp.lineno #=> access from outside (RuntimeError)
1596 *
1597 * Access from other threads is also forbidden.
1598 *
1599 */
1600 static VALUE
tracepoint_new_s(int argc,VALUE * argv,VALUE self)1601 tracepoint_new_s(int argc, VALUE *argv, VALUE self)
1602 {
1603 rb_event_flag_t events = 0;
1604 int i;
1605
1606 if (argc > 0) {
1607 for (i=0; i<argc; i++) {
1608 events |= symbol2event_flag(argv[i]);
1609 }
1610 }
1611 else {
1612 events = RUBY_EVENT_TRACEPOINT_ALL;
1613 }
1614
1615 if (!rb_block_given_p()) {
1616 rb_raise(rb_eArgError, "must be called with a block");
1617 }
1618
1619 return tracepoint_new(self, 0, events, 0, 0, rb_block_proc());
1620 }
1621
1622 static VALUE
tracepoint_trace_s(int argc,VALUE * argv,VALUE self)1623 tracepoint_trace_s(int argc, VALUE *argv, VALUE self)
1624 {
1625 VALUE trace = tracepoint_new_s(argc, argv, self);
1626 rb_tracepoint_enable(trace);
1627 return trace;
1628 }
1629
1630 /*
1631 * call-seq:
1632 * trace.inspect -> string
1633 *
1634 * Return a string containing a human-readable TracePoint
1635 * status.
1636 */
1637
1638 static VALUE
tracepoint_inspect(VALUE self)1639 tracepoint_inspect(VALUE self)
1640 {
1641 rb_tp_t *tp = tpptr(self);
1642 rb_trace_arg_t *trace_arg = GET_EC()->trace_arg;
1643
1644 if (trace_arg) {
1645 switch (trace_arg->event) {
1646 case RUBY_EVENT_LINE:
1647 {
1648 VALUE sym = rb_tracearg_method_id(trace_arg);
1649 if (NIL_P(sym))
1650 goto default_inspect;
1651 return rb_sprintf("#<TracePoint:%"PRIsVALUE"@%"PRIsVALUE":%d in `%"PRIsVALUE"'>",
1652 rb_tracearg_event(trace_arg),
1653 rb_tracearg_path(trace_arg),
1654 FIX2INT(rb_tracearg_lineno(trace_arg)),
1655 sym);
1656 }
1657 case RUBY_EVENT_CALL:
1658 case RUBY_EVENT_C_CALL:
1659 case RUBY_EVENT_RETURN:
1660 case RUBY_EVENT_C_RETURN:
1661 return rb_sprintf("#<TracePoint:%"PRIsVALUE" `%"PRIsVALUE"'@%"PRIsVALUE":%d>",
1662 rb_tracearg_event(trace_arg),
1663 rb_tracearg_method_id(trace_arg),
1664 rb_tracearg_path(trace_arg),
1665 FIX2INT(rb_tracearg_lineno(trace_arg)));
1666 case RUBY_EVENT_THREAD_BEGIN:
1667 case RUBY_EVENT_THREAD_END:
1668 return rb_sprintf("#<TracePoint:%"PRIsVALUE" %"PRIsVALUE">",
1669 rb_tracearg_event(trace_arg),
1670 rb_tracearg_self(trace_arg));
1671 default:
1672 default_inspect:
1673 return rb_sprintf("#<TracePoint:%"PRIsVALUE"@%"PRIsVALUE":%d>",
1674 rb_tracearg_event(trace_arg),
1675 rb_tracearg_path(trace_arg),
1676 FIX2INT(rb_tracearg_lineno(trace_arg)));
1677 }
1678 }
1679 else {
1680 return rb_sprintf("#<TracePoint:%s>", tp->tracing ? "enabled" : "disabled");
1681 }
1682 }
1683
1684 static void
tracepoint_stat_event_hooks(VALUE hash,VALUE key,rb_event_hook_t * hook)1685 tracepoint_stat_event_hooks(VALUE hash, VALUE key, rb_event_hook_t *hook)
1686 {
1687 int active = 0, deleted = 0;
1688
1689 while (hook) {
1690 if (hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) {
1691 deleted++;
1692 }
1693 else {
1694 active++;
1695 }
1696 hook = hook->next;
1697 }
1698
1699 rb_hash_aset(hash, key, rb_ary_new3(2, INT2FIX(active), INT2FIX(deleted)));
1700 }
1701
1702 /*
1703 * call-seq:
1704 * TracePoint.stat -> obj
1705 *
1706 * Returns internal information of TracePoint.
1707 *
1708 * The contents of the returned value are implementation specific.
1709 * It may be changed in future.
1710 *
1711 * This method is only for debugging TracePoint itself.
1712 */
1713
1714 static VALUE
tracepoint_stat_s(VALUE self)1715 tracepoint_stat_s(VALUE self)
1716 {
1717 rb_vm_t *vm = GET_VM();
1718 VALUE stat = rb_hash_new();
1719
1720 tracepoint_stat_event_hooks(stat, vm->self, vm->global_hooks.hooks);
1721 /* TODO: thread local hooks */
1722
1723 return stat;
1724 }
1725
1726 /* This function is called from inits.c */
1727 void
Init_vm_trace(void)1728 Init_vm_trace(void)
1729 {
1730 /* trace_func */
1731 rb_define_global_function("set_trace_func", set_trace_func, 1);
1732 rb_define_method(rb_cThread, "set_trace_func", thread_set_trace_func_m, 1);
1733 rb_define_method(rb_cThread, "add_trace_func", thread_add_trace_func_m, 1);
1734
1735 /*
1736 * Document-class: TracePoint
1737 *
1738 * A class that provides the functionality of Kernel#set_trace_func in a
1739 * nice Object-Oriented API.
1740 *
1741 * == Example
1742 *
1743 * We can use TracePoint to gather information specifically for exceptions:
1744 *
1745 * trace = TracePoint.new(:raise) do |tp|
1746 * p [tp.lineno, tp.event, tp.raised_exception]
1747 * end
1748 * #=> #<TracePoint:disabled>
1749 *
1750 * trace.enable
1751 * #=> false
1752 *
1753 * 0 / 0
1754 * #=> [5, :raise, #<ZeroDivisionError: divided by 0>]
1755 *
1756 * == Events
1757 *
1758 * If you don't specify the type of events you want to listen for,
1759 * TracePoint will include all available events.
1760 *
1761 * *Note* do not depend on current event set, as this list is subject to
1762 * change. Instead, it is recommended you specify the type of events you
1763 * want to use.
1764 *
1765 * To filter what is traced, you can pass any of the following as +events+:
1766 *
1767 * +:line+:: execute code on a new line
1768 * +:class+:: start a class or module definition
1769 * +:end+:: finish a class or module definition
1770 * +:call+:: call a Ruby method
1771 * +:return+:: return from a Ruby method
1772 * +:c_call+:: call a C-language routine
1773 * +:c_return+:: return from a C-language routine
1774 * +:raise+:: raise an exception
1775 * +:b_call+:: event hook at block entry
1776 * +:b_return+:: event hook at block ending
1777 * +:thread_begin+:: event hook at thread beginning
1778 * +:thread_end+:: event hook at thread ending
1779 * +:fiber_switch+:: event hook at fiber switch
1780 *
1781 */
1782 rb_cTracePoint = rb_define_class("TracePoint", rb_cObject);
1783 rb_undef_alloc_func(rb_cTracePoint);
1784 rb_define_singleton_method(rb_cTracePoint, "new", tracepoint_new_s, -1);
1785 /*
1786 * Document-method: trace
1787 *
1788 * call-seq:
1789 * TracePoint.trace(*events) { |obj| block } -> obj
1790 *
1791 * A convenience method for TracePoint.new, that activates the trace
1792 * automatically.
1793 *
1794 * trace = TracePoint.trace(:call) { |tp| [tp.lineno, tp.event] }
1795 * #=> #<TracePoint:enabled>
1796 *
1797 * trace.enabled? #=> true
1798 */
1799 rb_define_singleton_method(rb_cTracePoint, "trace", tracepoint_trace_s, -1);
1800
1801 rb_define_method(rb_cTracePoint, "__enable", tracepoint_enable_m, 2);
1802 rb_define_method(rb_cTracePoint, "disable", tracepoint_disable_m, 0);
1803 rb_define_method(rb_cTracePoint, "enabled?", rb_tracepoint_enabled_p, 0);
1804
1805 rb_define_method(rb_cTracePoint, "inspect", tracepoint_inspect, 0);
1806
1807 rb_define_method(rb_cTracePoint, "event", tracepoint_attr_event, 0);
1808 rb_define_method(rb_cTracePoint, "lineno", tracepoint_attr_lineno, 0);
1809 rb_define_method(rb_cTracePoint, "path", tracepoint_attr_path, 0);
1810 rb_define_method(rb_cTracePoint, "parameters", tracepoint_attr_parameters, 0);
1811 rb_define_method(rb_cTracePoint, "method_id", tracepoint_attr_method_id, 0);
1812 rb_define_method(rb_cTracePoint, "callee_id", tracepoint_attr_callee_id, 0);
1813 rb_define_method(rb_cTracePoint, "defined_class", tracepoint_attr_defined_class, 0);
1814 rb_define_method(rb_cTracePoint, "binding", tracepoint_attr_binding, 0);
1815 rb_define_method(rb_cTracePoint, "self", tracepoint_attr_self, 0);
1816 rb_define_method(rb_cTracePoint, "return_value", tracepoint_attr_return_value, 0);
1817 rb_define_method(rb_cTracePoint, "raised_exception", tracepoint_attr_raised_exception, 0);
1818 rb_define_method(rb_cTracePoint, "eval_script", tracepoint_attr_eval_script, 0);
1819 rb_define_method(rb_cTracePoint, "instruction_sequence", tracepoint_attr_instruction_sequence, 0);
1820
1821 rb_define_singleton_method(rb_cTracePoint, "stat", tracepoint_stat_s, 0);
1822 }
1823
1824 typedef struct rb_postponed_job_struct {
1825 rb_postponed_job_func_t func;
1826 void *data;
1827 } rb_postponed_job_t;
1828
1829 #define MAX_POSTPONED_JOB 1000
1830 #define MAX_POSTPONED_JOB_SPECIAL_ADDITION 24
1831
1832 struct rb_workqueue_job {
1833 struct list_node jnode; /* <=> vm->workqueue */
1834 rb_postponed_job_t job;
1835 };
1836
1837 void
Init_vm_postponed_job(void)1838 Init_vm_postponed_job(void)
1839 {
1840 rb_vm_t *vm = GET_VM();
1841 vm->postponed_job_buffer = ALLOC_N(rb_postponed_job_t, MAX_POSTPONED_JOB);
1842 vm->postponed_job_index = 0;
1843 /* workqueue is initialized when VM locks are initialized */
1844 }
1845
1846 enum postponed_job_register_result {
1847 PJRR_SUCCESS = 0,
1848 PJRR_FULL = 1,
1849 PJRR_INTERRUPTED = 2
1850 };
1851
1852 /* Async-signal-safe */
1853 static enum postponed_job_register_result
postponed_job_register(rb_execution_context_t * ec,rb_vm_t * vm,unsigned int flags,rb_postponed_job_func_t func,void * data,int max,int expected_index)1854 postponed_job_register(rb_execution_context_t *ec, rb_vm_t *vm,
1855 unsigned int flags, rb_postponed_job_func_t func, void *data, int max, int expected_index)
1856 {
1857 rb_postponed_job_t *pjob;
1858
1859 if (expected_index >= max) return PJRR_FULL; /* failed */
1860
1861 if (ATOMIC_CAS(vm->postponed_job_index, expected_index, expected_index+1) == expected_index) {
1862 pjob = &vm->postponed_job_buffer[expected_index];
1863 }
1864 else {
1865 return PJRR_INTERRUPTED;
1866 }
1867
1868 /* unused: pjob->flags = flags; */
1869 pjob->func = func;
1870 pjob->data = data;
1871
1872 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec);
1873
1874 return PJRR_SUCCESS;
1875 }
1876
1877 /*
1878 * return 0 if job buffer is full
1879 * Async-signal-safe
1880 */
1881 int
rb_postponed_job_register(unsigned int flags,rb_postponed_job_func_t func,void * data)1882 rb_postponed_job_register(unsigned int flags, rb_postponed_job_func_t func, void *data)
1883 {
1884 rb_execution_context_t *ec = GET_EC();
1885 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1886
1887 begin:
1888 switch (postponed_job_register(ec, vm, flags, func, data, MAX_POSTPONED_JOB, vm->postponed_job_index)) {
1889 case PJRR_SUCCESS : return 1;
1890 case PJRR_FULL : return 0;
1891 case PJRR_INTERRUPTED: goto begin;
1892 default: rb_bug("unreachable\n");
1893 }
1894 }
1895
1896 /*
1897 * return 0 if job buffer is full
1898 * Async-signal-safe
1899 */
1900 int
rb_postponed_job_register_one(unsigned int flags,rb_postponed_job_func_t func,void * data)1901 rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
1902 {
1903 rb_execution_context_t *ec = GET_EC();
1904 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1905 rb_postponed_job_t *pjob;
1906 int i, index;
1907
1908 begin:
1909 index = vm->postponed_job_index;
1910 for (i=0; i<index; i++) {
1911 pjob = &vm->postponed_job_buffer[i];
1912 if (pjob->func == func) {
1913 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec);
1914 return 2;
1915 }
1916 }
1917 switch (postponed_job_register(ec, vm, flags, func, data, MAX_POSTPONED_JOB + MAX_POSTPONED_JOB_SPECIAL_ADDITION, index)) {
1918 case PJRR_SUCCESS : return 1;
1919 case PJRR_FULL : return 0;
1920 case PJRR_INTERRUPTED: goto begin;
1921 default: rb_bug("unreachable\n");
1922 }
1923 }
1924
1925 /*
1926 * thread-safe and called from non-Ruby thread
1927 * returns FALSE on failure (ENOMEM), TRUE otherwise
1928 */
1929 int
rb_workqueue_register(unsigned flags,rb_postponed_job_func_t func,void * data)1930 rb_workqueue_register(unsigned flags, rb_postponed_job_func_t func, void *data)
1931 {
1932 struct rb_workqueue_job *wq_job = malloc(sizeof(*wq_job));
1933 rb_vm_t *vm = GET_VM();
1934
1935 if (!wq_job) return FALSE;
1936 wq_job->job.func = func;
1937 wq_job->job.data = data;
1938
1939 rb_nativethread_lock_lock(&vm->workqueue_lock);
1940 list_add_tail(&vm->workqueue, &wq_job->jnode);
1941 rb_nativethread_lock_unlock(&vm->workqueue_lock);
1942
1943 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(GET_EC());
1944
1945 return TRUE;
1946 }
1947
1948 void
rb_postponed_job_flush(rb_vm_t * vm)1949 rb_postponed_job_flush(rb_vm_t *vm)
1950 {
1951 rb_execution_context_t *ec = GET_EC();
1952 const rb_atomic_t block_mask = POSTPONED_JOB_INTERRUPT_MASK|TRAP_INTERRUPT_MASK;
1953 volatile rb_atomic_t saved_mask = ec->interrupt_mask & block_mask;
1954 VALUE volatile saved_errno = ec->errinfo;
1955 struct list_head tmp;
1956
1957 list_head_init(&tmp);
1958
1959 rb_nativethread_lock_lock(&vm->workqueue_lock);
1960 list_append_list(&tmp, &vm->workqueue);
1961 rb_nativethread_lock_unlock(&vm->workqueue_lock);
1962
1963 ec->errinfo = Qnil;
1964 /* mask POSTPONED_JOB dispatch */
1965 ec->interrupt_mask |= block_mask;
1966 {
1967 EC_PUSH_TAG(ec);
1968 if (EC_EXEC_TAG() == TAG_NONE) {
1969 int index;
1970 struct rb_workqueue_job *wq_job;
1971
1972 while ((index = vm->postponed_job_index) > 0) {
1973 if (ATOMIC_CAS(vm->postponed_job_index, index, index-1) == index) {
1974 rb_postponed_job_t *pjob = &vm->postponed_job_buffer[index-1];
1975 (*pjob->func)(pjob->data);
1976 }
1977 }
1978 while ((wq_job = list_pop(&tmp, struct rb_workqueue_job, jnode))) {
1979 rb_postponed_job_t pjob = wq_job->job;
1980
1981 free(wq_job);
1982 (pjob.func)(pjob.data);
1983 }
1984 }
1985 EC_POP_TAG();
1986 }
1987 /* restore POSTPONED_JOB mask */
1988 ec->interrupt_mask &= ~(saved_mask ^ block_mask);
1989 ec->errinfo = saved_errno;
1990
1991 /* don't leak memory if a job threw an exception */
1992 if (!list_empty(&tmp)) {
1993 rb_nativethread_lock_lock(&vm->workqueue_lock);
1994 list_prepend_list(&vm->workqueue, &tmp);
1995 rb_nativethread_lock_unlock(&vm->workqueue_lock);
1996
1997 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(GET_EC());
1998 }
1999 }
2000