1 /**********************************************************************
2
3 cont.c -
4
5 $Author: nagachika $
6 created at: Thu May 23 09:03:43 2007
7
8 Copyright (C) 2007 Koichi Sasada
9
10 **********************************************************************/
11
12 #include "internal.h"
13 #include "vm_core.h"
14 #include "gc.h"
15 #include "eval_intern.h"
16 #include "mjit.h"
17
18 /* FIBER_USE_NATIVE enables Fiber performance improvement using system
19 * dependent method such as make/setcontext on POSIX system or
20 * CreateFiber() API on Windows.
21 * This hack make Fiber context switch faster (x2 or more).
22 * However, it decrease maximum number of Fiber. For example, on the
23 * 32bit POSIX OS, ten or twenty thousands Fiber can be created.
24 *
25 * Details is reported in the paper "A Fast Fiber Implementation for Ruby 1.9"
26 * in Proc. of 51th Programming Symposium, pp.21--28 (2010) (in Japanese).
27 */
28
29 /*
30 Enable FIBER_USE_COROUTINE to make fiber yield/resume much faster by using native assembly implementations.
31
32 rvm install ruby-head-ioquatix-native-fiber --url https://github.com/ioquatix/ruby --branch native-fiber
33
34 # Without libcoro
35 koyoko% ./build/bin/ruby ./fiber_benchmark.rb 10000 1000
36 setup time for 10000 fibers: 0.099961
37 execution time for 1000 messages: 19.505909
38
39 # With libcoro
40 koyoko% ./build/bin/ruby ./fiber_benchmark.rb 10000 1000
41 setup time for 10000 fibers: 0.099268
42 execution time for 1000 messages: 8.491746
43 */
44
45 #ifdef FIBER_USE_COROUTINE
46 #include FIBER_USE_COROUTINE
47 #define FIBER_USE_NATIVE 1
48 #endif
49
50 #if !defined(FIBER_USE_NATIVE)
51 # if defined(HAVE_GETCONTEXT) && defined(HAVE_SETCONTEXT)
52 # if 0
53 # elif defined(__NetBSD__)
54 /* On our experience, NetBSD doesn't support using setcontext() and pthread
55 * simultaneously. This is because pthread_self(), TLS and other information
56 * are represented by stack pointer (higher bits of stack pointer).
57 * TODO: check such constraint on configure.
58 */
59 # define FIBER_USE_NATIVE 0
60 # elif defined(__sun)
61 /* On Solaris because resuming any Fiber caused SEGV, for some reason.
62 */
63 # define FIBER_USE_NATIVE 0
64 # elif defined(__ia64)
65 /* At least, Linux/ia64's getcontext(3) doesn't save register window.
66 */
67 # define FIBER_USE_NATIVE 0
68 # elif defined(__GNU__)
69 /* GNU/Hurd doesn't fully support getcontext, setcontext, makecontext
70 * and swapcontext functions. Disabling their usage till support is
71 * implemented. More info at
72 * http://darnassus.sceen.net/~hurd-web/open_issues/glibc/#getcontext
73 */
74 # define FIBER_USE_NATIVE 0
75 # else
76 # define FIBER_USE_NATIVE 1
77 # endif
78 # elif defined(_WIN32)
79 # define FIBER_USE_NATIVE 1
80 # endif
81 #endif
82 #if !defined(FIBER_USE_NATIVE)
83 #define FIBER_USE_NATIVE 0
84 #endif
85
86 #if FIBER_USE_NATIVE
87 #ifndef _WIN32
88 #include <unistd.h>
89 #include <sys/mman.h>
90 #include <ucontext.h>
91 #endif
92 #define RB_PAGE_SIZE (pagesize)
93 #define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
94 static long pagesize;
95 #endif /*FIBER_USE_NATIVE*/
96
97 #define CAPTURE_JUST_VALID_VM_STACK 1
98
99 enum context_type {
100 CONTINUATION_CONTEXT = 0,
101 FIBER_CONTEXT = 1
102 };
103
104 struct cont_saved_vm_stack {
105 VALUE *ptr;
106 #ifdef CAPTURE_JUST_VALID_VM_STACK
107 size_t slen; /* length of stack (head of ec->vm_stack) */
108 size_t clen; /* length of control frames (tail of ec->vm_stack) */
109 #endif
110 };
111
112 typedef struct rb_context_struct {
113 enum context_type type;
114 int argc;
115 VALUE self;
116 VALUE value;
117
118 struct cont_saved_vm_stack saved_vm_stack;
119
120 struct {
121 VALUE *stack;
122 VALUE *stack_src;
123 size_t stack_size;
124 #ifdef __ia64
125 VALUE *register_stack;
126 VALUE *register_stack_src;
127 int register_stack_size;
128 #endif
129 } machine;
130 rb_execution_context_t saved_ec;
131 rb_jmpbuf_t jmpbuf;
132 rb_ensure_entry_t *ensure_array;
133 /* Pointer to MJIT info about the continuation. */
134 struct mjit_cont *mjit_cont;
135 } rb_context_t;
136
137
138 /*
139 * Fiber status:
140 * [Fiber.new] ------> FIBER_CREATED
141 * | [Fiber#resume]
142 * v
143 * +--> FIBER_RESUMED ----+
144 * [Fiber#resume] | | [Fiber.yield] |
145 * | v |
146 * +-- FIBER_SUSPENDED | [Terminate]
147 * |
148 * FIBER_TERMINATED <-+
149 */
150 enum fiber_status {
151 FIBER_CREATED,
152 FIBER_RESUMED,
153 FIBER_SUSPENDED,
154 FIBER_TERMINATED
155 };
156
157 #define FIBER_CREATED_P(fib) ((fib)->status == FIBER_CREATED)
158 #define FIBER_RESUMED_P(fib) ((fib)->status == FIBER_RESUMED)
159 #define FIBER_SUSPENDED_P(fib) ((fib)->status == FIBER_SUSPENDED)
160 #define FIBER_TERMINATED_P(fib) ((fib)->status == FIBER_TERMINATED)
161 #define FIBER_RUNNABLE_P(fib) (FIBER_CREATED_P(fib) || FIBER_SUSPENDED_P(fib))
162
163 #if FIBER_USE_NATIVE && !defined(FIBER_USE_COROUTINE) && !defined(_WIN32)
164 static inline int
fiber_context_create(ucontext_t * context,void (* func)(),void * arg,void * ptr,size_t size)165 fiber_context_create(ucontext_t *context, void (*func)(), void *arg, void *ptr, size_t size)
166 {
167 if (getcontext(context) < 0) return -1;
168 /*
169 * getcontext() may fail by some reasons:
170 * 1. SELinux policy banned one of "rt_sigprocmask",
171 * "sigprocmask" or "swapcontext";
172 * 2. libseccomp (aka. syscall filter) banned one of them.
173 */
174 context->uc_link = NULL;
175 context->uc_stack.ss_sp = ptr;
176 context->uc_stack.ss_size = size;
177 makecontext(context, func, 0);
178 return 0;
179 }
180 #endif
181
182 struct rb_fiber_struct {
183 rb_context_t cont;
184 VALUE first_proc;
185 struct rb_fiber_struct *prev;
186 BITFIELD(enum fiber_status, status, 2);
187 /* If a fiber invokes "transfer",
188 * then this fiber can't "resume" any more after that.
189 * You shouldn't mix "transfer" and "resume".
190 */
191 unsigned int transferred : 1;
192
193 #if FIBER_USE_NATIVE
194 #if defined(FIBER_USE_COROUTINE)
195 #define FIBER_ALLOCATE_STACK
196 coroutine_context context;
197 void *ss_sp;
198 size_t ss_size;
199 #elif defined(_WIN32)
200 void *fib_handle;
201 #else
202 #define FIBER_ALLOCATE_STACK
203 ucontext_t context;
204 /* Because context.uc_stack.ss_sp and context.uc_stack.ss_size
205 * are not necessarily valid after makecontext() or swapcontext(),
206 * they are saved in these variables for later use.
207 */
208 void *ss_sp;
209 size_t ss_size;
210 #endif
211 #endif
212 };
213
214 #ifdef FIBER_ALLOCATE_STACK
215 #define MAX_MACHINE_STACK_CACHE 10
216 static int machine_stack_cache_index = 0;
217 typedef struct machine_stack_cache_struct {
218 void *ptr;
219 size_t size;
220 } machine_stack_cache_t;
221 static machine_stack_cache_t machine_stack_cache[MAX_MACHINE_STACK_CACHE];
222 static machine_stack_cache_t terminated_machine_stack;
223 #endif
224
225 static const char *
fiber_status_name(enum fiber_status s)226 fiber_status_name(enum fiber_status s)
227 {
228 switch (s) {
229 case FIBER_CREATED: return "created";
230 case FIBER_RESUMED: return "resumed";
231 case FIBER_SUSPENDED: return "suspended";
232 case FIBER_TERMINATED: return "terminated";
233 }
234 VM_UNREACHABLE(fiber_status_name);
235 return NULL;
236 }
237
238 static void
fiber_verify(const rb_fiber_t * fib)239 fiber_verify(const rb_fiber_t *fib)
240 {
241 #if VM_CHECK_MODE > 0
242 VM_ASSERT(fib->cont.saved_ec.fiber_ptr == fib);
243
244 switch (fib->status) {
245 case FIBER_RESUMED:
246 VM_ASSERT(fib->cont.saved_ec.vm_stack != NULL);
247 break;
248 case FIBER_SUSPENDED:
249 VM_ASSERT(fib->cont.saved_ec.vm_stack != NULL);
250 break;
251 case FIBER_CREATED:
252 case FIBER_TERMINATED:
253 /* TODO */
254 break;
255 default:
256 VM_UNREACHABLE(fiber_verify);
257 }
258 #endif
259 }
260
261 #if VM_CHECK_MODE > 0
262 void
rb_ec_verify(const rb_execution_context_t * ec)263 rb_ec_verify(const rb_execution_context_t *ec)
264 {
265 /* TODO */
266 }
267 #endif
268
269 static void
fiber_status_set(rb_fiber_t * fib,enum fiber_status s)270 fiber_status_set(rb_fiber_t *fib, enum fiber_status s)
271 {
272 if (0) fprintf(stderr, "fib: %p, status: %s -> %s\n", (void *)fib, fiber_status_name(fib->status), fiber_status_name(s));
273 VM_ASSERT(!FIBER_TERMINATED_P(fib));
274 VM_ASSERT(fib->status != s);
275 fiber_verify(fib);
276 fib->status = s;
277 }
278
279 void
rb_ec_set_vm_stack(rb_execution_context_t * ec,VALUE * stack,size_t size)280 rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
281 {
282 ec->vm_stack = stack;
283 ec->vm_stack_size = size;
284 }
285
286 static inline void
ec_switch(rb_thread_t * th,rb_fiber_t * fib)287 ec_switch(rb_thread_t *th, rb_fiber_t *fib)
288 {
289 rb_execution_context_t *ec = &fib->cont.saved_ec;
290
291 ruby_current_execution_context_ptr = th->ec = ec;
292
293 /*
294 * timer-thread may set trap interrupt on previous th->ec at any time;
295 * ensure we do not delay (or lose) the trap interrupt handling.
296 */
297 if (th->vm->main_thread == th && rb_signal_buff_size() > 0) {
298 RUBY_VM_SET_TRAP_INTERRUPT(ec);
299 }
300
301 VM_ASSERT(ec->fiber_ptr->cont.self == 0 || ec->vm_stack != NULL);
302 }
303
304 static const rb_data_type_t cont_data_type, fiber_data_type;
305 static VALUE rb_cContinuation;
306 static VALUE rb_cFiber;
307 static VALUE rb_eFiberError;
308
309 static rb_context_t *
cont_ptr(VALUE obj)310 cont_ptr(VALUE obj)
311 {
312 rb_context_t *cont;
313
314 TypedData_Get_Struct(obj, rb_context_t, &cont_data_type, cont);
315
316 return cont;
317 }
318
319 static rb_fiber_t *
fiber_ptr(VALUE obj)320 fiber_ptr(VALUE obj)
321 {
322 rb_fiber_t *fib;
323
324 TypedData_Get_Struct(obj, rb_fiber_t, &fiber_data_type, fib);
325 if (!fib) rb_raise(rb_eFiberError, "uninitialized fiber");
326
327 return fib;
328 }
329
330 NOINLINE(static VALUE cont_capture(volatile int *volatile stat));
331
332 #define THREAD_MUST_BE_RUNNING(th) do { \
333 if (!(th)->ec->tag) rb_raise(rb_eThreadError, "not running thread"); \
334 } while (0)
335
336 static VALUE
cont_thread_value(const rb_context_t * cont)337 cont_thread_value(const rb_context_t *cont)
338 {
339 return cont->saved_ec.thread_ptr->self;
340 }
341
342 static void
cont_mark(void * ptr)343 cont_mark(void *ptr)
344 {
345 rb_context_t *cont = ptr;
346
347 RUBY_MARK_ENTER("cont");
348 rb_gc_mark(cont->value);
349
350 rb_execution_context_mark(&cont->saved_ec);
351 rb_gc_mark(cont_thread_value(cont));
352
353 if (cont->saved_vm_stack.ptr) {
354 #ifdef CAPTURE_JUST_VALID_VM_STACK
355 rb_gc_mark_locations(cont->saved_vm_stack.ptr,
356 cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
357 #else
358 rb_gc_mark_locations(cont->saved_vm_stack.ptr,
359 cont->saved_vm_stack.ptr, cont->saved_ec.stack_size);
360 #endif
361 }
362
363 if (cont->machine.stack) {
364 if (cont->type == CONTINUATION_CONTEXT) {
365 /* cont */
366 rb_gc_mark_locations(cont->machine.stack,
367 cont->machine.stack + cont->machine.stack_size);
368 }
369 else {
370 /* fiber */
371 const rb_fiber_t *fib = (rb_fiber_t*)cont;
372
373 if (!FIBER_TERMINATED_P(fib)) {
374 rb_gc_mark_locations(cont->machine.stack,
375 cont->machine.stack + cont->machine.stack_size);
376 }
377 }
378 }
379 #ifdef __ia64
380 if (cont->machine.register_stack) {
381 rb_gc_mark_locations(cont->machine.register_stack,
382 cont->machine.register_stack + cont->machine.register_stack_size);
383 }
384 #endif
385
386 RUBY_MARK_LEAVE("cont");
387 }
388
389 static int
fiber_is_root_p(const rb_fiber_t * fib)390 fiber_is_root_p(const rb_fiber_t *fib)
391 {
392 return fib == fib->cont.saved_ec.thread_ptr->root_fiber;
393 }
394
395 static void
cont_free(void * ptr)396 cont_free(void *ptr)
397 {
398 rb_context_t *cont = ptr;
399
400 RUBY_FREE_ENTER("cont");
401 ruby_xfree(cont->saved_ec.vm_stack);
402
403 #if FIBER_USE_NATIVE
404 if (cont->type == CONTINUATION_CONTEXT) {
405 /* cont */
406 ruby_xfree(cont->ensure_array);
407 RUBY_FREE_UNLESS_NULL(cont->machine.stack);
408 }
409 else {
410 /* fiber */
411 rb_fiber_t *fib = (rb_fiber_t*)cont;
412 #if defined(FIBER_USE_COROUTINE)
413 coroutine_destroy(&fib->context);
414 if (fib->ss_sp != NULL) {
415 if (fiber_is_root_p(fib)) {
416 rb_bug("Illegal root fiber parameter");
417 }
418 #ifdef _WIN32
419 VirtualFree((void*)fib->ss_sp, 0, MEM_RELEASE);
420 #else
421 munmap((void*)fib->ss_sp, fib->ss_size);
422 #endif
423 fib->ss_sp = NULL;
424 }
425 #elif defined(_WIN32)
426 if (!fiber_is_root_p(fib)) {
427 /* don't delete root fiber handle */
428 if (fib->fib_handle) {
429 DeleteFiber(fib->fib_handle);
430 }
431 }
432 #else /* not WIN32 */
433 /* fib->ss_sp == NULL is possible for root fiber */
434 if (fib->ss_sp != NULL) {
435 munmap((void*)fib->ss_sp, fib->ss_size);
436 }
437 #endif
438 }
439 #else /* not FIBER_USE_NATIVE */
440 ruby_xfree(cont->ensure_array);
441 RUBY_FREE_UNLESS_NULL(cont->machine.stack);
442 #endif
443 #ifdef __ia64
444 RUBY_FREE_UNLESS_NULL(cont->machine.register_stack);
445 #endif
446 RUBY_FREE_UNLESS_NULL(cont->saved_vm_stack.ptr);
447
448 if (mjit_enabled && cont->mjit_cont != NULL) {
449 mjit_cont_free(cont->mjit_cont);
450 }
451 /* free rb_cont_t or rb_fiber_t */
452 ruby_xfree(ptr);
453 RUBY_FREE_LEAVE("cont");
454 }
455
456 static size_t
cont_memsize(const void * ptr)457 cont_memsize(const void *ptr)
458 {
459 const rb_context_t *cont = ptr;
460 size_t size = 0;
461
462 size = sizeof(*cont);
463 if (cont->saved_vm_stack.ptr) {
464 #ifdef CAPTURE_JUST_VALID_VM_STACK
465 size_t n = (cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
466 #else
467 size_t n = cont->saved_ec.vm_stack_size;
468 #endif
469 size += n * sizeof(*cont->saved_vm_stack.ptr);
470 }
471
472 if (cont->machine.stack) {
473 size += cont->machine.stack_size * sizeof(*cont->machine.stack);
474 }
475 #ifdef __ia64
476 if (cont->machine.register_stack) {
477 size += cont->machine.register_stack_size * sizeof(*cont->machine.register_stack);
478 }
479 #endif
480 return size;
481 }
482
483 void
rb_fiber_mark_self(const rb_fiber_t * fib)484 rb_fiber_mark_self(const rb_fiber_t *fib)
485 {
486 if (fib->cont.self) {
487 rb_gc_mark(fib->cont.self);
488 }
489 else {
490 rb_execution_context_mark(&fib->cont.saved_ec);
491 }
492 }
493
494 static void
fiber_mark(void * ptr)495 fiber_mark(void *ptr)
496 {
497 rb_fiber_t *fib = ptr;
498 RUBY_MARK_ENTER("cont");
499 fiber_verify(fib);
500 rb_gc_mark(fib->first_proc);
501 if (fib->prev) rb_fiber_mark_self(fib->prev);
502
503 #if !FIBER_USE_NATIVE
504 if (fib->status == FIBER_TERMINATED) {
505 /* FIBER_TERMINATED fiber should not mark machine stack */
506 if (fib->cont.saved_ec.machine.stack_end != NULL) {
507 fib->cont.saved_ec.machine.stack_end = NULL;
508 }
509 }
510 #endif
511
512 cont_mark(&fib->cont);
513 RUBY_MARK_LEAVE("cont");
514 }
515
516 static void
fiber_free(void * ptr)517 fiber_free(void *ptr)
518 {
519 rb_fiber_t *fib = ptr;
520 RUBY_FREE_ENTER("fiber");
521
522 if (fib->cont.saved_ec.local_storage) {
523 st_free_table(fib->cont.saved_ec.local_storage);
524 }
525
526 cont_free(&fib->cont);
527 RUBY_FREE_LEAVE("fiber");
528 }
529
530 static size_t
fiber_memsize(const void * ptr)531 fiber_memsize(const void *ptr)
532 {
533 const rb_fiber_t *fib = ptr;
534 size_t size = sizeof(*fib);
535 const rb_execution_context_t *saved_ec = &fib->cont.saved_ec;
536 const rb_thread_t *th = rb_ec_thread_ptr(saved_ec);
537
538 /*
539 * vm.c::thread_memsize already counts th->ec->local_storage
540 */
541 if (saved_ec->local_storage && fib != th->root_fiber) {
542 size += st_memsize(saved_ec->local_storage);
543 }
544 size += cont_memsize(&fib->cont);
545 return size;
546 }
547
548 VALUE
rb_obj_is_fiber(VALUE obj)549 rb_obj_is_fiber(VALUE obj)
550 {
551 if (rb_typeddata_is_kind_of(obj, &fiber_data_type)) {
552 return Qtrue;
553 }
554 else {
555 return Qfalse;
556 }
557 }
558
559 static void
cont_save_machine_stack(rb_thread_t * th,rb_context_t * cont)560 cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
561 {
562 size_t size;
563
564 SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
565 #ifdef __ia64
566 th->ec->machine.register_stack_end = rb_ia64_bsp();
567 #endif
568
569 if (th->ec->machine.stack_start > th->ec->machine.stack_end) {
570 size = cont->machine.stack_size = th->ec->machine.stack_start - th->ec->machine.stack_end;
571 cont->machine.stack_src = th->ec->machine.stack_end;
572 }
573 else {
574 size = cont->machine.stack_size = th->ec->machine.stack_end - th->ec->machine.stack_start;
575 cont->machine.stack_src = th->ec->machine.stack_start;
576 }
577
578 if (cont->machine.stack) {
579 REALLOC_N(cont->machine.stack, VALUE, size);
580 }
581 else {
582 cont->machine.stack = ALLOC_N(VALUE, size);
583 }
584
585 FLUSH_REGISTER_WINDOWS;
586 MEMCPY(cont->machine.stack, cont->machine.stack_src, VALUE, size);
587
588 #ifdef __ia64
589 rb_ia64_flushrs();
590 size = cont->machine.register_stack_size = th->ec->machine.register_stack_end - th->ec->machine.register_stack_start;
591 cont->machine.register_stack_src = th->ec->machine.register_stack_start;
592 if (cont->machine.register_stack) {
593 REALLOC_N(cont->machine.register_stack, VALUE, size);
594 }
595 else {
596 cont->machine.register_stack = ALLOC_N(VALUE, size);
597 }
598
599 MEMCPY(cont->machine.register_stack, cont->machine.register_stack_src, VALUE, size);
600 #endif
601 }
602
603 static const rb_data_type_t cont_data_type = {
604 "continuation",
605 {cont_mark, cont_free, cont_memsize,},
606 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
607 };
608
609 static inline void
cont_save_thread(rb_context_t * cont,rb_thread_t * th)610 cont_save_thread(rb_context_t *cont, rb_thread_t *th)
611 {
612 rb_execution_context_t *sec = &cont->saved_ec;
613
614 VM_ASSERT(th->status == THREAD_RUNNABLE);
615
616 /* save thread context */
617 *sec = *th->ec;
618
619 /* saved_ec->machine.stack_end should be NULL */
620 /* because it may happen GC afterward */
621 sec->machine.stack_end = NULL;
622
623 #ifdef __ia64
624 sec->machine.register_stack_start = NULL;
625 sec->machine.register_stack_end = NULL;
626 #endif
627 }
628
629 static void
cont_init_mjit_cont(rb_context_t * cont)630 cont_init_mjit_cont(rb_context_t *cont)
631 {
632 VM_ASSERT(cont->mjit_cont == NULL);
633 if (mjit_enabled) {
634 cont->mjit_cont = mjit_cont_new(&(cont->saved_ec));
635 }
636 }
637
638 static void
cont_init(rb_context_t * cont,rb_thread_t * th)639 cont_init(rb_context_t *cont, rb_thread_t *th)
640 {
641 /* save thread context */
642 cont_save_thread(cont, th);
643 cont->saved_ec.thread_ptr = th;
644 cont->saved_ec.local_storage = NULL;
645 cont->saved_ec.local_storage_recursive_hash = Qnil;
646 cont->saved_ec.local_storage_recursive_hash_for_trace = Qnil;
647 cont_init_mjit_cont(cont);
648 }
649
650 static rb_context_t *
cont_new(VALUE klass)651 cont_new(VALUE klass)
652 {
653 rb_context_t *cont;
654 volatile VALUE contval;
655 rb_thread_t *th = GET_THREAD();
656
657 THREAD_MUST_BE_RUNNING(th);
658 contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
659 cont->self = contval;
660 cont_init(cont, th);
661 return cont;
662 }
663
664 void
rb_fiber_init_mjit_cont(struct rb_fiber_struct * fiber)665 rb_fiber_init_mjit_cont(struct rb_fiber_struct *fiber)
666 {
667 // Currently this function is meant for root_fiber. Others go through cont_new.
668 // XXX: Is this mjit_cont `mjit_cont_free`d?
669 cont_init_mjit_cont(&fiber->cont);
670 }
671
672 #if 0
673 void
674 show_vm_stack(const rb_execution_context_t *ec)
675 {
676 VALUE *p = ec->vm_stack;
677 while (p < ec->cfp->sp) {
678 fprintf(stderr, "%3d ", (int)(p - ec->vm_stack));
679 rb_obj_info_dump(*p);
680 p++;
681 }
682 }
683
684 void
685 show_vm_pcs(const rb_control_frame_t *cfp,
686 const rb_control_frame_t *end_of_cfp)
687 {
688 int i=0;
689 while (cfp != end_of_cfp) {
690 int pc = 0;
691 if (cfp->iseq) {
692 pc = cfp->pc - cfp->iseq->body->iseq_encoded;
693 }
694 fprintf(stderr, "%2d pc: %d\n", i++, pc);
695 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
696 }
697 }
698 #endif
699 COMPILER_WARNING_PUSH
700 #ifdef __clang__
701 COMPILER_WARNING_IGNORED(-Wduplicate-decl-specifier)
702 #endif
703 static VALUE
cont_capture(volatile int * volatile stat)704 cont_capture(volatile int *volatile stat)
705 {
706 rb_context_t *volatile cont;
707 rb_thread_t *th = GET_THREAD();
708 volatile VALUE contval;
709 const rb_execution_context_t *ec = th->ec;
710
711 THREAD_MUST_BE_RUNNING(th);
712 rb_vm_stack_to_heap(th->ec);
713 cont = cont_new(rb_cContinuation);
714 contval = cont->self;
715
716 #ifdef CAPTURE_JUST_VALID_VM_STACK
717 cont->saved_vm_stack.slen = ec->cfp->sp - ec->vm_stack;
718 cont->saved_vm_stack.clen = ec->vm_stack + ec->vm_stack_size - (VALUE*)ec->cfp;
719 cont->saved_vm_stack.ptr = ALLOC_N(VALUE, cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
720 MEMCPY(cont->saved_vm_stack.ptr,
721 ec->vm_stack,
722 VALUE, cont->saved_vm_stack.slen);
723 MEMCPY(cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen,
724 (VALUE*)ec->cfp,
725 VALUE,
726 cont->saved_vm_stack.clen);
727 #else
728 cont->saved_vm_stack.ptr = ALLOC_N(VALUE, ec->vm_stack_size);
729 MEMCPY(cont->saved_vm_stack.ptr, ec->vm_stack, VALUE, ec->vm_stack_size);
730 #endif
731 rb_ec_set_vm_stack(&cont->saved_ec, NULL, 0);
732 cont_save_machine_stack(th, cont);
733
734 /* backup ensure_list to array for search in another context */
735 {
736 rb_ensure_list_t *p;
737 int size = 0;
738 rb_ensure_entry_t *entry;
739 for (p=th->ec->ensure_list; p; p=p->next)
740 size++;
741 entry = cont->ensure_array = ALLOC_N(rb_ensure_entry_t,size+1);
742 for (p=th->ec->ensure_list; p; p=p->next) {
743 if (!p->entry.marker)
744 p->entry.marker = rb_ary_tmp_new(0); /* dummy object */
745 *entry++ = p->entry;
746 }
747 entry->marker = 0;
748 }
749
750 if (ruby_setjmp(cont->jmpbuf)) {
751 VALUE value;
752
753 VAR_INITIALIZED(cont);
754 value = cont->value;
755 if (cont->argc == -1) rb_exc_raise(value);
756 cont->value = Qnil;
757 *stat = 1;
758 return value;
759 }
760 else {
761 *stat = 0;
762 return contval;
763 }
764 }
765 COMPILER_WARNING_POP
766
767 static inline void
fiber_restore_thread(rb_thread_t * th,rb_fiber_t * fib)768 fiber_restore_thread(rb_thread_t *th, rb_fiber_t *fib)
769 {
770 ec_switch(th, fib);
771 VM_ASSERT(th->ec->fiber_ptr == fib);
772 }
773
774 static inline void
cont_restore_thread(rb_context_t * cont)775 cont_restore_thread(rb_context_t *cont)
776 {
777 rb_thread_t *th = GET_THREAD();
778
779 /* restore thread context */
780 if (cont->type == CONTINUATION_CONTEXT) {
781 /* continuation */
782 rb_execution_context_t *sec = &cont->saved_ec;
783 rb_fiber_t *fib = NULL;
784
785 if (sec->fiber_ptr != NULL) {
786 fib = sec->fiber_ptr;
787 }
788 else if (th->root_fiber) {
789 fib = th->root_fiber;
790 }
791
792 if (fib && th->ec != &fib->cont.saved_ec) {
793 ec_switch(th, fib);
794 }
795
796 if (th->ec->trace_arg != sec->trace_arg) {
797 rb_raise(rb_eRuntimeError, "can't call across trace_func");
798 }
799
800 /* copy vm stack */
801 #ifdef CAPTURE_JUST_VALID_VM_STACK
802 MEMCPY(th->ec->vm_stack,
803 cont->saved_vm_stack.ptr,
804 VALUE, cont->saved_vm_stack.slen);
805 MEMCPY(th->ec->vm_stack + th->ec->vm_stack_size - cont->saved_vm_stack.clen,
806 cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen,
807 VALUE, cont->saved_vm_stack.clen);
808 #else
809 MEMCPY(th->ec->vm_stack, cont->saved_vm_stack.ptr, VALUE, sec->vm_stack_size);
810 #endif
811 /* other members of ec */
812
813 th->ec->cfp = sec->cfp;
814 th->ec->raised_flag = sec->raised_flag;
815 th->ec->tag = sec->tag;
816 th->ec->protect_tag = sec->protect_tag;
817 th->ec->root_lep = sec->root_lep;
818 th->ec->root_svar = sec->root_svar;
819 th->ec->ensure_list = sec->ensure_list;
820 th->ec->errinfo = sec->errinfo;
821
822 VM_ASSERT(th->ec->vm_stack != NULL);
823 }
824 else {
825 /* fiber */
826 fiber_restore_thread(th, (rb_fiber_t*)cont);
827 }
828 }
829
830 #if FIBER_USE_NATIVE
831 #if defined(FIBER_USE_COROUTINE)
832 static COROUTINE
fiber_entry(coroutine_context * from,coroutine_context * to)833 fiber_entry(coroutine_context * from, coroutine_context * to)
834 {
835 rb_fiber_start();
836 }
837 #elif defined(_WIN32)
838 static void
fiber_set_stack_location(void)839 fiber_set_stack_location(void)
840 {
841 rb_thread_t *th = GET_THREAD();
842 VALUE *ptr;
843
844 SET_MACHINE_STACK_END(&ptr);
845 th->ec->machine.stack_start = (void*)(((VALUE)ptr & RB_PAGE_MASK) + STACK_UPPER((void *)&ptr, 0, RB_PAGE_SIZE));
846 }
847
848 NORETURN(static VOID CALLBACK fiber_entry(void *arg));
849 static VOID CALLBACK
fiber_entry(void * arg)850 fiber_entry(void *arg)
851 {
852 fiber_set_stack_location();
853 rb_fiber_start();
854 }
855 #else
856 NORETURN(static void fiber_entry(void *arg));
857 static void
fiber_entry(void * arg)858 fiber_entry(void *arg)
859 {
860 rb_fiber_start();
861 }
862 #endif
863 #endif
864
865 #ifdef FIBER_ALLOCATE_STACK
866 /*
867 * FreeBSD require a first (i.e. addr) argument of mmap(2) is not NULL
868 * if MAP_STACK is passed.
869 * http://www.FreeBSD.org/cgi/query-pr.cgi?pr=158755
870 */
871 #if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
872 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
873 #else
874 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
875 #endif
876
877 #define ERRNOMSG strerror(errno)
878
879 static char*
fiber_machine_stack_alloc(size_t size)880 fiber_machine_stack_alloc(size_t size)
881 {
882 char *ptr;
883 #ifdef _WIN32
884 DWORD old_protect;
885 #endif
886
887 if (machine_stack_cache_index > 0) {
888 if (machine_stack_cache[machine_stack_cache_index - 1].size == (size / sizeof(VALUE))) {
889 ptr = machine_stack_cache[machine_stack_cache_index - 1].ptr;
890 machine_stack_cache_index--;
891 machine_stack_cache[machine_stack_cache_index].ptr = NULL;
892 machine_stack_cache[machine_stack_cache_index].size = 0;
893 } else {
894 /* TODO handle multiple machine stack size */
895 rb_bug("machine_stack_cache size is not canonicalized");
896 }
897 } else {
898 #ifdef _WIN32
899 ptr = VirtualAlloc(0, size, MEM_COMMIT, PAGE_READWRITE);
900
901 if (!ptr) {
902 rb_raise(rb_eFiberError, "can't allocate machine stack to fiber: %s", ERRNOMSG);
903 }
904
905 if (!VirtualProtect(ptr, RB_PAGE_SIZE, PAGE_READWRITE | PAGE_GUARD, &old_protect)) {
906 rb_raise(rb_eFiberError, "can't set a guard page: %s", ERRNOMSG);
907 }
908 #else
909 void *page;
910 STACK_GROW_DIR_DETECTION;
911
912 errno = 0;
913 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
914 if (ptr == MAP_FAILED) {
915 rb_raise(rb_eFiberError, "can't alloc machine stack to fiber: %s", ERRNOMSG);
916 }
917
918 /* guard page setup */
919 page = ptr + STACK_DIR_UPPER(size - RB_PAGE_SIZE, 0);
920 if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
921 rb_raise(rb_eFiberError, "can't set a guard page: %s", ERRNOMSG);
922 }
923 #endif
924 }
925
926 return ptr;
927 }
928 #endif
929
930 #if FIBER_USE_NATIVE
931 static void
fiber_initialize_machine_stack_context(rb_fiber_t * fib,size_t size)932 fiber_initialize_machine_stack_context(rb_fiber_t *fib, size_t size)
933 {
934 rb_execution_context_t *sec = &fib->cont.saved_ec;
935
936 #if defined(FIBER_USE_COROUTINE)
937 char *ptr;
938 STACK_GROW_DIR_DETECTION;
939
940 ptr = fiber_machine_stack_alloc(size);
941 fib->ss_sp = ptr;
942 fib->ss_size = size;
943 coroutine_initialize(&fib->context, fiber_entry, ptr+size, size);
944 sec->machine.stack_start = (VALUE*)(ptr + STACK_DIR_UPPER(0, size));
945 sec->machine.stack_maxsize = size - RB_PAGE_SIZE;
946 #elif defined(_WIN32)
947 # if defined(_MSC_VER) && _MSC_VER <= 1200
948 # define CreateFiberEx(cs, stacksize, flags, entry, param) \
949 CreateFiber((stacksize), (entry), (param))
950 # endif
951 fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
952 if (!fib->fib_handle) {
953 /* try to release unnecessary fibers & retry to create */
954 rb_gc();
955 fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
956 if (!fib->fib_handle) {
957 rb_raise(rb_eFiberError, "can't create fiber");
958 }
959 }
960 sec->machine.stack_maxsize = size;
961 #else /* not WIN32 */
962 char *ptr;
963 STACK_GROW_DIR_DETECTION;
964
965 ptr = fiber_machine_stack_alloc(size);
966 fib->ss_sp = ptr;
967 fib->ss_size = size;
968 if (fiber_context_create(&fib->context, fiber_entry, NULL, fib->ss_sp, fib->ss_size)) {
969 rb_raise(rb_eFiberError, "can't get context for creating fiber: %s", ERRNOMSG);
970 }
971 sec->machine.stack_start = (VALUE*)(ptr + STACK_DIR_UPPER(0, size));
972 sec->machine.stack_maxsize = size - RB_PAGE_SIZE;
973 #endif
974 #ifdef __ia64
975 sth->machine.register_stack_maxsize = sth->machine.stack_maxsize;
976 #endif
977 }
978
979 NOINLINE(static void fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib));
980
981 static void
fiber_setcontext(rb_fiber_t * newfib,rb_fiber_t * oldfib)982 fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib)
983 {
984 rb_thread_t *th = GET_THREAD();
985
986 /* save oldfib's machine stack / TODO: is it needed? */
987 if (!FIBER_TERMINATED_P(oldfib)) {
988 STACK_GROW_DIR_DETECTION;
989 SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
990 if (STACK_DIR_UPPER(0, 1)) {
991 oldfib->cont.machine.stack_size = th->ec->machine.stack_start - th->ec->machine.stack_end;
992 oldfib->cont.machine.stack = th->ec->machine.stack_end;
993 }
994 else {
995 oldfib->cont.machine.stack_size = th->ec->machine.stack_end - th->ec->machine.stack_start;
996 oldfib->cont.machine.stack = th->ec->machine.stack_start;
997 }
998 }
999
1000 /* exchange machine_stack_start between oldfib and newfib */
1001 oldfib->cont.saved_ec.machine.stack_start = th->ec->machine.stack_start;
1002
1003 /* oldfib->machine.stack_end should be NULL */
1004 oldfib->cont.saved_ec.machine.stack_end = NULL;
1005
1006 /* restore thread context */
1007 fiber_restore_thread(th, newfib);
1008
1009 /* swap machine context */
1010 #if defined(FIBER_USE_COROUTINE)
1011 coroutine_transfer(&oldfib->context, &newfib->context);
1012 #elif defined(_WIN32)
1013 SwitchToFiber(newfib->fib_handle);
1014 #else
1015 if (!newfib->context.uc_stack.ss_sp && th->root_fiber != newfib) {
1016 rb_bug("non_root_fiber->context.uc_stac.ss_sp should not be NULL");
1017 }
1018 swapcontext(&oldfib->context, &newfib->context);
1019 #endif
1020 }
1021 #endif /* FIBER_USE_NATIVE */
1022
1023 NOINLINE(NORETURN(static void cont_restore_1(rb_context_t *)));
1024
1025 static void
cont_restore_1(rb_context_t * cont)1026 cont_restore_1(rb_context_t *cont)
1027 {
1028 cont_restore_thread(cont);
1029
1030 /* restore machine stack */
1031 #ifdef _M_AMD64
1032 {
1033 /* workaround for x64 SEH */
1034 jmp_buf buf;
1035 setjmp(buf);
1036 ((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame =
1037 ((_JUMP_BUFFER*)(&buf))->Frame;
1038 }
1039 #endif
1040 if (cont->machine.stack_src) {
1041 FLUSH_REGISTER_WINDOWS;
1042 MEMCPY(cont->machine.stack_src, cont->machine.stack,
1043 VALUE, cont->machine.stack_size);
1044 }
1045
1046 #ifdef __ia64
1047 if (cont->machine.register_stack_src) {
1048 MEMCPY(cont->machine.register_stack_src, cont->machine.register_stack,
1049 VALUE, cont->machine.register_stack_size);
1050 }
1051 #endif
1052
1053 ruby_longjmp(cont->jmpbuf, 1);
1054 }
1055
1056 NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)));
1057
1058 #ifdef __ia64
1059 #define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4
1060 #define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4
1061 static volatile int C(a), C(b), C(c), C(d), C(e);
1062 static volatile int C(f), C(g), C(h), C(i), C(j);
1063 static volatile int C(k), C(l), C(m), C(n), C(o);
1064 static volatile int C(p), C(q), C(r), C(s), C(t);
1065 #if 0
1066 {/* the above lines make cc-mode.el confused so much */}
1067 #endif
1068 int rb_dummy_false = 0;
1069 NORETURN(NOINLINE(static void register_stack_extend(rb_context_t *, VALUE *, VALUE *)));
1070 static void
register_stack_extend(rb_context_t * cont,VALUE * vp,VALUE * curr_bsp)1071 register_stack_extend(rb_context_t *cont, VALUE *vp, VALUE *curr_bsp)
1072 {
1073 if (rb_dummy_false) {
1074 /* use registers as much as possible */
1075 E(a) = E(b) = E(c) = E(d) = E(e) =
1076 E(f) = E(g) = E(h) = E(i) = E(j) =
1077 E(k) = E(l) = E(m) = E(n) = E(o) =
1078 E(p) = E(q) = E(r) = E(s) = E(t) = 0;
1079 E(a) = E(b) = E(c) = E(d) = E(e) =
1080 E(f) = E(g) = E(h) = E(i) = E(j) =
1081 E(k) = E(l) = E(m) = E(n) = E(o) =
1082 E(p) = E(q) = E(r) = E(s) = E(t) = 0;
1083 }
1084 if (curr_bsp < cont->machine.register_stack_src+cont->machine.register_stack_size) {
1085 register_stack_extend(cont, vp, (VALUE*)rb_ia64_bsp());
1086 }
1087 cont_restore_0(cont, vp);
1088 }
1089 #undef C
1090 #undef E
1091 #endif
1092
1093 static void
cont_restore_0(rb_context_t * cont,VALUE * addr_in_prev_frame)1094 cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
1095 {
1096 if (cont->machine.stack_src) {
1097 #ifdef HAVE_ALLOCA
1098 #define STACK_PAD_SIZE 1
1099 #else
1100 #define STACK_PAD_SIZE 1024
1101 #endif
1102 VALUE space[STACK_PAD_SIZE];
1103
1104 #if !STACK_GROW_DIRECTION
1105 if (addr_in_prev_frame > &space[0]) {
1106 /* Stack grows downward */
1107 #endif
1108 #if STACK_GROW_DIRECTION <= 0
1109 volatile VALUE *const end = cont->machine.stack_src;
1110 if (&space[0] > end) {
1111 # ifdef HAVE_ALLOCA
1112 volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
1113 space[0] = *sp;
1114 # else
1115 cont_restore_0(cont, &space[0]);
1116 # endif
1117 }
1118 #endif
1119 #if !STACK_GROW_DIRECTION
1120 }
1121 else {
1122 /* Stack grows upward */
1123 #endif
1124 #if STACK_GROW_DIRECTION >= 0
1125 volatile VALUE *const end = cont->machine.stack_src + cont->machine.stack_size;
1126 if (&space[STACK_PAD_SIZE] < end) {
1127 # ifdef HAVE_ALLOCA
1128 volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
1129 space[0] = *sp;
1130 # else
1131 cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
1132 # endif
1133 }
1134 #endif
1135 #if !STACK_GROW_DIRECTION
1136 }
1137 #endif
1138 }
1139 cont_restore_1(cont);
1140 }
1141 #ifdef __ia64
1142 #define cont_restore_0(cont, vp) register_stack_extend((cont), (vp), (VALUE*)rb_ia64_bsp())
1143 #endif
1144
1145 /*
1146 * Document-class: Continuation
1147 *
1148 * Continuation objects are generated by Kernel#callcc,
1149 * after having +require+d <i>continuation</i>. They hold
1150 * a return address and execution context, allowing a nonlocal return
1151 * to the end of the <code>callcc</code> block from anywhere within a
1152 * program. Continuations are somewhat analogous to a structured
1153 * version of C's <code>setjmp/longjmp</code> (although they contain
1154 * more state, so you might consider them closer to threads).
1155 *
1156 * For instance:
1157 *
1158 * require "continuation"
1159 * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
1160 * callcc{|cc| $cc = cc}
1161 * puts(message = arr.shift)
1162 * $cc.call unless message =~ /Max/
1163 *
1164 * <em>produces:</em>
1165 *
1166 * Freddie
1167 * Herbie
1168 * Ron
1169 * Max
1170 *
1171 * Also you can call callcc in other methods:
1172 *
1173 * require "continuation"
1174 *
1175 * def g
1176 * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
1177 * cc = callcc { |cc| cc }
1178 * puts arr.shift
1179 * return cc, arr.size
1180 * end
1181 *
1182 * def f
1183 * c, size = g
1184 * c.call(c) if size > 1
1185 * end
1186 *
1187 * f
1188 *
1189 * This (somewhat contrived) example allows the inner loop to abandon
1190 * processing early:
1191 *
1192 * require "continuation"
1193 * callcc {|cont|
1194 * for i in 0..4
1195 * print "\n#{i}: "
1196 * for j in i*5...(i+1)*5
1197 * cont.call() if j == 17
1198 * printf "%3d", j
1199 * end
1200 * end
1201 * }
1202 * puts
1203 *
1204 * <em>produces:</em>
1205 *
1206 * 0: 0 1 2 3 4
1207 * 1: 5 6 7 8 9
1208 * 2: 10 11 12 13 14
1209 * 3: 15 16
1210 */
1211
1212 /*
1213 * call-seq:
1214 * callcc {|cont| block } -> obj
1215 *
1216 * Generates a Continuation object, which it passes to
1217 * the associated block. You need to <code>require
1218 * 'continuation'</code> before using this method. Performing a
1219 * <em>cont</em><code>.call</code> will cause the #callcc
1220 * to return (as will falling through the end of the block). The
1221 * value returned by the #callcc is the value of the
1222 * block, or the value passed to <em>cont</em><code>.call</code>. See
1223 * class Continuation for more details. Also see
1224 * Kernel#throw for an alternative mechanism for
1225 * unwinding a call stack.
1226 */
1227
1228 static VALUE
rb_callcc(VALUE self)1229 rb_callcc(VALUE self)
1230 {
1231 volatile int called;
1232 volatile VALUE val = cont_capture(&called);
1233
1234 if (called) {
1235 return val;
1236 }
1237 else {
1238 return rb_yield(val);
1239 }
1240 }
1241
1242 static VALUE
make_passing_arg(int argc,const VALUE * argv)1243 make_passing_arg(int argc, const VALUE *argv)
1244 {
1245 switch (argc) {
1246 case 0:
1247 return Qnil;
1248 case 1:
1249 return argv[0];
1250 default:
1251 return rb_ary_new4(argc, argv);
1252 }
1253 }
1254
1255 /* CAUTION!! : Currently, error in rollback_func is not supported */
1256 /* same as rb_protect if set rollback_func to NULL */
1257 void
ruby_register_rollback_func_for_ensure(VALUE (* ensure_func)(ANYARGS),VALUE (* rollback_func)(ANYARGS))1258 ruby_register_rollback_func_for_ensure(VALUE (*ensure_func)(ANYARGS), VALUE (*rollback_func)(ANYARGS))
1259 {
1260 st_table **table_p = &GET_VM()->ensure_rollback_table;
1261 if (UNLIKELY(*table_p == NULL)) {
1262 *table_p = st_init_numtable();
1263 }
1264 st_insert(*table_p, (st_data_t)ensure_func, (st_data_t)rollback_func);
1265 }
1266
1267 static inline VALUE
lookup_rollback_func(VALUE (* ensure_func)(ANYARGS))1268 lookup_rollback_func(VALUE (*ensure_func)(ANYARGS))
1269 {
1270 st_table *table = GET_VM()->ensure_rollback_table;
1271 st_data_t val;
1272 if (table && st_lookup(table, (st_data_t)ensure_func, &val))
1273 return (VALUE) val;
1274 return Qundef;
1275 }
1276
1277
1278 static inline void
rollback_ensure_stack(VALUE self,rb_ensure_list_t * current,rb_ensure_entry_t * target)1279 rollback_ensure_stack(VALUE self,rb_ensure_list_t *current,rb_ensure_entry_t *target)
1280 {
1281 rb_ensure_list_t *p;
1282 rb_ensure_entry_t *entry;
1283 size_t i, j;
1284 size_t cur_size;
1285 size_t target_size;
1286 size_t base_point;
1287 VALUE (*func)(ANYARGS);
1288
1289 cur_size = 0;
1290 for (p=current; p; p=p->next)
1291 cur_size++;
1292 target_size = 0;
1293 for (entry=target; entry->marker; entry++)
1294 target_size++;
1295
1296 /* search common stack point */
1297 p = current;
1298 base_point = cur_size;
1299 while (base_point) {
1300 if (target_size >= base_point &&
1301 p->entry.marker == target[target_size - base_point].marker)
1302 break;
1303 base_point --;
1304 p = p->next;
1305 }
1306
1307 /* rollback function check */
1308 for (i=0; i < target_size - base_point; i++) {
1309 if (!lookup_rollback_func(target[i].e_proc)) {
1310 rb_raise(rb_eRuntimeError, "continuation called from out of critical rb_ensure scope");
1311 }
1312 }
1313 /* pop ensure stack */
1314 while (cur_size > base_point) {
1315 /* escape from ensure block */
1316 (*current->entry.e_proc)(current->entry.data2);
1317 current = current->next;
1318 cur_size--;
1319 }
1320 /* push ensure stack */
1321 for (j = 0; j < i; j++) {
1322 func = (VALUE (*)(ANYARGS)) lookup_rollback_func(target[i - j - 1].e_proc);
1323 if ((VALUE)func != Qundef) {
1324 (*func)(target[i - j - 1].data2);
1325 }
1326 }
1327 }
1328
1329 /*
1330 * call-seq:
1331 * cont.call(args, ...)
1332 * cont[args, ...]
1333 *
1334 * Invokes the continuation. The program continues from the end of the
1335 * <code>callcc</code> block. If no arguments are given, the original
1336 * <code>callcc</code> returns <code>nil</code>. If one argument is
1337 * given, <code>callcc</code> returns it. Otherwise, an array
1338 * containing <i>args</i> is returned.
1339 *
1340 * callcc {|cont| cont.call } #=> nil
1341 * callcc {|cont| cont.call 1 } #=> 1
1342 * callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3]
1343 */
1344
1345 static VALUE
rb_cont_call(int argc,VALUE * argv,VALUE contval)1346 rb_cont_call(int argc, VALUE *argv, VALUE contval)
1347 {
1348 rb_context_t *cont = cont_ptr(contval);
1349 rb_thread_t *th = GET_THREAD();
1350
1351 if (cont_thread_value(cont) != th->self) {
1352 rb_raise(rb_eRuntimeError, "continuation called across threads");
1353 }
1354 if (cont->saved_ec.protect_tag != th->ec->protect_tag) {
1355 rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
1356 }
1357 if (cont->saved_ec.fiber_ptr) {
1358 if (th->ec->fiber_ptr != cont->saved_ec.fiber_ptr) {
1359 rb_raise(rb_eRuntimeError, "continuation called across fiber");
1360 }
1361 }
1362 rollback_ensure_stack(contval, th->ec->ensure_list, cont->ensure_array);
1363
1364 cont->argc = argc;
1365 cont->value = make_passing_arg(argc, argv);
1366
1367 cont_restore_0(cont, &contval);
1368 return Qnil; /* unreachable */
1369 }
1370
1371 /*********/
1372 /* fiber */
1373 /*********/
1374
1375 /*
1376 * Document-class: Fiber
1377 *
1378 * Fibers are primitives for implementing light weight cooperative
1379 * concurrency in Ruby. Basically they are a means of creating code blocks
1380 * that can be paused and resumed, much like threads. The main difference
1381 * is that they are never preempted and that the scheduling must be done by
1382 * the programmer and not the VM.
1383 *
1384 * As opposed to other stackless light weight concurrency models, each fiber
1385 * comes with a stack. This enables the fiber to be paused from deeply
1386 * nested function calls within the fiber block. See the ruby(1)
1387 * manpage to configure the size of the fiber stack(s).
1388 *
1389 * When a fiber is created it will not run automatically. Rather it must
1390 * be explicitly asked to run using the <code>Fiber#resume</code> method.
1391 * The code running inside the fiber can give up control by calling
1392 * <code>Fiber.yield</code> in which case it yields control back to caller
1393 * (the caller of the <code>Fiber#resume</code>).
1394 *
1395 * Upon yielding or termination the Fiber returns the value of the last
1396 * executed expression
1397 *
1398 * For instance:
1399 *
1400 * fiber = Fiber.new do
1401 * Fiber.yield 1
1402 * 2
1403 * end
1404 *
1405 * puts fiber.resume
1406 * puts fiber.resume
1407 * puts fiber.resume
1408 *
1409 * <em>produces</em>
1410 *
1411 * 1
1412 * 2
1413 * FiberError: dead fiber called
1414 *
1415 * The <code>Fiber#resume</code> method accepts an arbitrary number of
1416 * parameters, if it is the first call to <code>resume</code> then they
1417 * will be passed as block arguments. Otherwise they will be the return
1418 * value of the call to <code>Fiber.yield</code>
1419 *
1420 * Example:
1421 *
1422 * fiber = Fiber.new do |first|
1423 * second = Fiber.yield first + 2
1424 * end
1425 *
1426 * puts fiber.resume 10
1427 * puts fiber.resume 14
1428 * puts fiber.resume 18
1429 *
1430 * <em>produces</em>
1431 *
1432 * 12
1433 * 14
1434 * FiberError: dead fiber called
1435 *
1436 */
1437
1438 static const rb_data_type_t fiber_data_type = {
1439 "fiber",
1440 {fiber_mark, fiber_free, fiber_memsize,},
1441 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
1442 };
1443
1444 static VALUE
fiber_alloc(VALUE klass)1445 fiber_alloc(VALUE klass)
1446 {
1447 return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
1448 }
1449
1450 static rb_fiber_t*
fiber_t_alloc(VALUE fibval)1451 fiber_t_alloc(VALUE fibval)
1452 {
1453 rb_fiber_t *fib;
1454 rb_thread_t *th = GET_THREAD();
1455
1456 if (DATA_PTR(fibval) != 0) {
1457 rb_raise(rb_eRuntimeError, "cannot initialize twice");
1458 }
1459
1460 THREAD_MUST_BE_RUNNING(th);
1461 fib = ZALLOC(rb_fiber_t);
1462 fib->cont.self = fibval;
1463 fib->cont.type = FIBER_CONTEXT;
1464 cont_init(&fib->cont, th);
1465 fib->cont.saved_ec.fiber_ptr = fib;
1466 fib->prev = NULL;
1467
1468 /* fib->status == 0 == CREATED
1469 * So that we don't need to set status: fiber_status_set(fib, FIBER_CREATED); */
1470 VM_ASSERT(FIBER_CREATED_P(fib));
1471
1472 DATA_PTR(fibval) = fib;
1473
1474 return fib;
1475 }
1476
1477 rb_control_frame_t *
1478 rb_vm_push_frame(rb_execution_context_t *sec,
1479 const rb_iseq_t *iseq,
1480 VALUE type,
1481 VALUE self,
1482 VALUE specval,
1483 VALUE cref_or_me,
1484 const VALUE *pc,
1485 VALUE *sp,
1486 int local_size,
1487 int stack_max);
1488
1489 static VALUE
fiber_init(VALUE fibval,VALUE proc)1490 fiber_init(VALUE fibval, VALUE proc)
1491 {
1492 rb_fiber_t *fib = fiber_t_alloc(fibval);
1493 rb_context_t *cont = &fib->cont;
1494 rb_execution_context_t *sec = &cont->saved_ec;
1495 rb_thread_t *cth = GET_THREAD();
1496 rb_vm_t *vm = cth->vm;
1497 size_t fib_stack_bytes = vm->default_params.fiber_vm_stack_size;
1498 size_t thr_stack_bytes = vm->default_params.thread_vm_stack_size;
1499 VALUE *vm_stack;
1500
1501 /* initialize cont */
1502 cont->saved_vm_stack.ptr = NULL;
1503 if (fib_stack_bytes == thr_stack_bytes) {
1504 vm_stack = rb_thread_recycle_stack(fib_stack_bytes / sizeof(VALUE));
1505 }
1506 else {
1507 vm_stack = ruby_xmalloc(fib_stack_bytes);
1508 }
1509 rb_ec_set_vm_stack(sec, vm_stack, fib_stack_bytes / sizeof(VALUE));
1510 sec->cfp = (void *)(sec->vm_stack + sec->vm_stack_size);
1511
1512 rb_vm_push_frame(sec,
1513 NULL,
1514 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH | VM_FRAME_FLAG_CFRAME,
1515 Qnil, /* self */
1516 VM_BLOCK_HANDLER_NONE,
1517 0, /* specval */
1518 NULL, /* pc */
1519 sec->vm_stack, /* sp */
1520 0, /* local_size */
1521 0);
1522
1523 sec->tag = NULL;
1524 sec->local_storage = NULL;
1525 sec->local_storage_recursive_hash = Qnil;
1526 sec->local_storage_recursive_hash_for_trace = Qnil;
1527
1528 fib->first_proc = proc;
1529
1530 #if !FIBER_USE_NATIVE
1531 MEMCPY(&cont->jmpbuf, &cth->root_jmpbuf, rb_jmpbuf_t, 1);
1532 #endif
1533
1534 return fibval;
1535 }
1536
1537 /* :nodoc: */
1538 static VALUE
rb_fiber_init(VALUE fibval)1539 rb_fiber_init(VALUE fibval)
1540 {
1541 return fiber_init(fibval, rb_block_proc());
1542 }
1543
1544 VALUE
rb_fiber_new(VALUE (* func)(ANYARGS),VALUE obj)1545 rb_fiber_new(VALUE (*func)(ANYARGS), VALUE obj)
1546 {
1547 return fiber_init(fiber_alloc(rb_cFiber), rb_proc_new(func, obj));
1548 }
1549
1550 static void rb_fiber_terminate(rb_fiber_t *fib, int need_interrupt);
1551
1552 void
rb_fiber_start(void)1553 rb_fiber_start(void)
1554 {
1555 rb_thread_t * volatile th = GET_THREAD();
1556 rb_fiber_t *fib = th->ec->fiber_ptr;
1557 rb_proc_t *proc;
1558 enum ruby_tag_type state;
1559 int need_interrupt = TRUE;
1560
1561 VM_ASSERT(th->ec == ruby_current_execution_context_ptr);
1562 VM_ASSERT(FIBER_RESUMED_P(fib));
1563
1564 EC_PUSH_TAG(th->ec);
1565 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1566 rb_context_t *cont = &VAR_FROM_MEMORY(fib)->cont;
1567 int argc;
1568 const VALUE *argv, args = cont->value;
1569 GetProcPtr(fib->first_proc, proc);
1570 argv = (argc = cont->argc) > 1 ? RARRAY_CONST_PTR(args) : &args;
1571 cont->value = Qnil;
1572 th->ec->errinfo = Qnil;
1573 th->ec->root_lep = rb_vm_proc_local_ep(fib->first_proc);
1574 th->ec->root_svar = Qfalse;
1575
1576 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, 0, Qnil);
1577 cont->value = rb_vm_invoke_proc(th->ec, proc, argc, argv, VM_BLOCK_HANDLER_NONE);
1578 }
1579 EC_POP_TAG();
1580
1581 if (state) {
1582 VALUE err = th->ec->errinfo;
1583 VM_ASSERT(FIBER_RESUMED_P(fib));
1584
1585 if (state == TAG_RAISE || state == TAG_FATAL) {
1586 rb_threadptr_pending_interrupt_enque(th, err);
1587 }
1588 else {
1589 err = rb_vm_make_jump_tag_but_local_jump(state, err);
1590 if (!NIL_P(err)) {
1591 rb_threadptr_pending_interrupt_enque(th, err);
1592 }
1593 }
1594 need_interrupt = TRUE;
1595 }
1596
1597 rb_fiber_terminate(fib, need_interrupt);
1598 VM_UNREACHABLE(rb_fiber_start);
1599 }
1600
1601 static rb_fiber_t *
root_fiber_alloc(rb_thread_t * th)1602 root_fiber_alloc(rb_thread_t *th)
1603 {
1604 VALUE fibval = fiber_alloc(rb_cFiber);
1605 rb_fiber_t *fib = th->ec->fiber_ptr;
1606
1607 VM_ASSERT(DATA_PTR(fibval) == NULL);
1608 VM_ASSERT(fib->cont.type == FIBER_CONTEXT);
1609 VM_ASSERT(fib->status == FIBER_RESUMED);
1610
1611 th->root_fiber = fib;
1612 DATA_PTR(fibval) = fib;
1613 fib->cont.self = fibval;
1614
1615 #if FIBER_USE_NATIVE
1616 #if defined(FIBER_USE_COROUTINE)
1617 coroutine_initialize(&fib->context, NULL, NULL, 0);
1618 #elif defined(_WIN32)
1619 /* setup fib_handle for root Fiber */
1620 if (fib->fib_handle == 0) {
1621 if ((fib->fib_handle = ConvertThreadToFiber(0)) == 0) {
1622 rb_bug("root_fiber_alloc: ConvertThreadToFiber() failed - %s\n", rb_w32_strerror(-1));
1623 }
1624 }
1625 else {
1626 rb_bug("root_fiber_alloc: fib_handle is not NULL.");
1627 }
1628 #endif
1629 #endif
1630
1631 return fib;
1632 }
1633
1634 void
rb_threadptr_root_fiber_setup(rb_thread_t * th)1635 rb_threadptr_root_fiber_setup(rb_thread_t *th)
1636 {
1637 rb_fiber_t *fib = ruby_mimmalloc(sizeof(rb_fiber_t));
1638 MEMZERO(fib, rb_fiber_t, 1);
1639 fib->cont.type = FIBER_CONTEXT;
1640 fib->cont.saved_ec.fiber_ptr = fib;
1641 fib->cont.saved_ec.thread_ptr = th;
1642 fiber_status_set(fib, FIBER_RESUMED); /* skip CREATED */
1643 th->ec = &fib->cont.saved_ec;
1644
1645 /* NOTE: On WIN32, fib_handle is not allocated yet. */
1646 }
1647
1648 void
rb_threadptr_root_fiber_release(rb_thread_t * th)1649 rb_threadptr_root_fiber_release(rb_thread_t *th)
1650 {
1651 if (th->root_fiber) {
1652 /* ignore. A root fiber object will free th->ec */
1653 }
1654 else {
1655 VM_ASSERT(th->ec->fiber_ptr->cont.type == FIBER_CONTEXT);
1656 VM_ASSERT(th->ec->fiber_ptr->cont.self == 0);
1657 fiber_free(th->ec->fiber_ptr);
1658
1659 if (th->ec == ruby_current_execution_context_ptr) {
1660 ruby_current_execution_context_ptr = NULL;
1661 }
1662 th->ec = NULL;
1663 }
1664 }
1665
1666 static inline rb_fiber_t*
fiber_current(void)1667 fiber_current(void)
1668 {
1669 rb_execution_context_t *ec = GET_EC();
1670 if (ec->fiber_ptr->cont.self == 0) {
1671 root_fiber_alloc(rb_ec_thread_ptr(ec));
1672 }
1673 return ec->fiber_ptr;
1674 }
1675
1676 static inline rb_fiber_t*
return_fiber(void)1677 return_fiber(void)
1678 {
1679 rb_fiber_t *fib = fiber_current();
1680 rb_fiber_t *prev = fib->prev;
1681
1682 if (!prev) {
1683 rb_thread_t *th = GET_THREAD();
1684 rb_fiber_t *root_fiber = th->root_fiber;
1685
1686 VM_ASSERT(root_fiber != NULL);
1687
1688 if (root_fiber == fib) {
1689 rb_raise(rb_eFiberError, "can't yield from root fiber");
1690 }
1691 return root_fiber;
1692 }
1693 else {
1694 fib->prev = NULL;
1695 return prev;
1696 }
1697 }
1698
1699 VALUE
rb_fiber_current(void)1700 rb_fiber_current(void)
1701 {
1702 return fiber_current()->cont.self;
1703 }
1704
1705 static inline VALUE
fiber_store(rb_fiber_t * next_fib,rb_thread_t * th)1706 fiber_store(rb_fiber_t *next_fib, rb_thread_t *th)
1707 {
1708 rb_fiber_t *fib;
1709
1710 if (th->ec->fiber_ptr != NULL) {
1711 fib = th->ec->fiber_ptr;
1712 }
1713 else {
1714 /* create root fiber */
1715 fib = root_fiber_alloc(th);
1716 }
1717
1718 VM_ASSERT(FIBER_RESUMED_P(fib) || FIBER_TERMINATED_P(fib));
1719 VM_ASSERT(FIBER_RUNNABLE_P(next_fib));
1720
1721 #if FIBER_USE_NATIVE
1722 if (FIBER_CREATED_P(next_fib)) {
1723 fiber_initialize_machine_stack_context(next_fib, th->vm->default_params.fiber_machine_stack_size);
1724 }
1725 #endif
1726
1727 if (FIBER_RESUMED_P(fib)) fiber_status_set(fib, FIBER_SUSPENDED);
1728
1729 #if FIBER_USE_NATIVE == 0
1730 /* should (re-)allocate stack are before fib->status change to pass fiber_verify() */
1731 cont_save_machine_stack(th, &fib->cont);
1732 #endif
1733
1734 fiber_status_set(next_fib, FIBER_RESUMED);
1735
1736 #if FIBER_USE_NATIVE
1737 fiber_setcontext(next_fib, fib);
1738 /* restored */
1739 #ifdef MAX_MACHINE_STACK_CACHE
1740 if (terminated_machine_stack.ptr) {
1741 if (machine_stack_cache_index < MAX_MACHINE_STACK_CACHE) {
1742 machine_stack_cache[machine_stack_cache_index++] = terminated_machine_stack;
1743 }
1744 else {
1745 if (terminated_machine_stack.ptr != fib->cont.machine.stack) {
1746 #ifdef _WIN32
1747 VirtualFree(terminated_machine_stack.ptr, 0, MEM_RELEASE);
1748 #else
1749 munmap((void*)terminated_machine_stack.ptr, terminated_machine_stack.size * sizeof(VALUE));
1750 #endif
1751 }
1752 else {
1753 rb_bug("terminated fiber resumed");
1754 }
1755 }
1756 terminated_machine_stack.ptr = NULL;
1757 terminated_machine_stack.size = 0;
1758 }
1759 #endif /* not _WIN32 */
1760 fib = th->ec->fiber_ptr;
1761 if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
1762 return fib->cont.value;
1763
1764 #else /* FIBER_USE_NATIVE */
1765 fib->cont.saved_ec.machine.stack_end = NULL;
1766 if (ruby_setjmp(fib->cont.jmpbuf)) {
1767 /* restored */
1768 fib = th->ec->fiber_ptr;
1769 if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
1770 if (next_fib->cont.value == Qundef) {
1771 cont_restore_0(&next_fib->cont, &next_fib->cont.value);
1772 VM_UNREACHABLE(fiber_store);
1773 }
1774 return fib->cont.value;
1775 }
1776 else {
1777 VALUE undef = Qundef;
1778 cont_restore_0(&next_fib->cont, &undef);
1779 VM_UNREACHABLE(fiber_store);
1780 }
1781 #endif /* FIBER_USE_NATIVE */
1782 }
1783
1784 static inline VALUE
fiber_switch(rb_fiber_t * fib,int argc,const VALUE * argv,int is_resume)1785 fiber_switch(rb_fiber_t *fib, int argc, const VALUE *argv, int is_resume)
1786 {
1787 VALUE value;
1788 rb_context_t *cont = &fib->cont;
1789 rb_thread_t *th = GET_THREAD();
1790
1791 /* make sure the root_fiber object is available */
1792 if (th->root_fiber == NULL) root_fiber_alloc(th);
1793
1794 if (th->ec->fiber_ptr == fib) {
1795 /* ignore fiber context switch
1796 * because destination fiber is same as current fiber
1797 */
1798 return make_passing_arg(argc, argv);
1799 }
1800
1801 if (cont_thread_value(cont) != th->self) {
1802 rb_raise(rb_eFiberError, "fiber called across threads");
1803 }
1804 else if (cont->saved_ec.protect_tag != th->ec->protect_tag) {
1805 rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
1806 }
1807 else if (FIBER_TERMINATED_P(fib)) {
1808 value = rb_exc_new2(rb_eFiberError, "dead fiber called");
1809
1810 if (!FIBER_TERMINATED_P(th->ec->fiber_ptr)) {
1811 rb_exc_raise(value);
1812 VM_UNREACHABLE(fiber_switch);
1813 }
1814 else {
1815 /* th->ec->fiber_ptr is also dead => switch to root fiber */
1816 /* (this means we're being called from rb_fiber_terminate, */
1817 /* and the terminated fiber's return_fiber() is already dead) */
1818 VM_ASSERT(FIBER_SUSPENDED_P(th->root_fiber));
1819
1820 cont = &th->root_fiber->cont;
1821 cont->argc = -1;
1822 cont->value = value;
1823 #if FIBER_USE_NATIVE
1824 fiber_setcontext(th->root_fiber, th->ec->fiber_ptr);
1825 #else
1826 cont_restore_0(cont, &value);
1827 #endif
1828 VM_UNREACHABLE(fiber_switch);
1829 }
1830 }
1831
1832 if (is_resume) {
1833 fib->prev = fiber_current();
1834 }
1835
1836 VM_ASSERT(FIBER_RUNNABLE_P(fib));
1837
1838 cont->argc = argc;
1839 cont->value = make_passing_arg(argc, argv);
1840 value = fiber_store(fib, th);
1841 RUBY_VM_CHECK_INTS(th->ec);
1842
1843 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, 0, Qnil);
1844
1845 return value;
1846 }
1847
1848 VALUE
rb_fiber_transfer(VALUE fibval,int argc,const VALUE * argv)1849 rb_fiber_transfer(VALUE fibval, int argc, const VALUE *argv)
1850 {
1851 return fiber_switch(fiber_ptr(fibval), argc, argv, 0);
1852 }
1853
1854 void
rb_fiber_close(rb_fiber_t * fib)1855 rb_fiber_close(rb_fiber_t *fib)
1856 {
1857 rb_execution_context_t *ec = &fib->cont.saved_ec;
1858 VALUE *vm_stack = ec->vm_stack;
1859 size_t stack_bytes = ec->vm_stack_size * sizeof(VALUE);
1860
1861 fiber_status_set(fib, FIBER_TERMINATED);
1862 if (stack_bytes == rb_ec_vm_ptr(ec)->default_params.thread_vm_stack_size) {
1863 rb_thread_recycle_stack_release(vm_stack);
1864 }
1865 else {
1866 ruby_xfree(vm_stack);
1867 }
1868 rb_ec_set_vm_stack(ec, NULL, 0);
1869
1870 #if !FIBER_USE_NATIVE
1871 /* should not mark machine stack any more */
1872 ec->machine.stack_end = NULL;
1873 #endif
1874 }
1875
1876 static void
rb_fiber_terminate(rb_fiber_t * fib,int need_interrupt)1877 rb_fiber_terminate(rb_fiber_t *fib, int need_interrupt)
1878 {
1879 VALUE value = fib->cont.value;
1880 rb_fiber_t *ret_fib;
1881
1882 VM_ASSERT(FIBER_RESUMED_P(fib));
1883 rb_fiber_close(fib);
1884
1885 #if FIBER_USE_NATIVE
1886 #if defined(FIBER_USE_COROUTINE)
1887 coroutine_destroy(&fib->context);
1888 #elif !defined(_WIN32)
1889 fib->context.uc_stack.ss_sp = NULL;
1890 #endif
1891
1892 #ifdef MAX_MACHINE_STACK_CACHE
1893 /* Ruby must not switch to other thread until storing terminated_machine_stack */
1894 terminated_machine_stack.ptr = fib->ss_sp;
1895 terminated_machine_stack.size = fib->ss_size / sizeof(VALUE);
1896 fib->ss_sp = NULL;
1897 fib->cont.machine.stack = NULL;
1898 fib->cont.machine.stack_size = 0;
1899 #endif
1900 #endif
1901
1902 ret_fib = return_fiber();
1903 if (need_interrupt) RUBY_VM_SET_INTERRUPT(&ret_fib->cont.saved_ec);
1904 fiber_switch(ret_fib, 1, &value, 0);
1905 }
1906
1907 VALUE
rb_fiber_resume(VALUE fibval,int argc,const VALUE * argv)1908 rb_fiber_resume(VALUE fibval, int argc, const VALUE *argv)
1909 {
1910 rb_fiber_t *fib = fiber_ptr(fibval);
1911
1912 if (fib->prev != 0 || fiber_is_root_p(fib)) {
1913 rb_raise(rb_eFiberError, "double resume");
1914 }
1915 if (fib->transferred != 0) {
1916 rb_raise(rb_eFiberError, "cannot resume transferred Fiber");
1917 }
1918
1919 return fiber_switch(fib, argc, argv, 1);
1920 }
1921
1922 VALUE
rb_fiber_yield(int argc,const VALUE * argv)1923 rb_fiber_yield(int argc, const VALUE *argv)
1924 {
1925 return fiber_switch(return_fiber(), argc, argv, 0);
1926 }
1927
1928 void
rb_fiber_reset_root_local_storage(rb_thread_t * th)1929 rb_fiber_reset_root_local_storage(rb_thread_t *th)
1930 {
1931 if (th->root_fiber && th->root_fiber != th->ec->fiber_ptr) {
1932 th->ec->local_storage = th->root_fiber->cont.saved_ec.local_storage;
1933 }
1934 }
1935
1936 /*
1937 * call-seq:
1938 * fiber.alive? -> true or false
1939 *
1940 * Returns true if the fiber can still be resumed (or transferred
1941 * to). After finishing execution of the fiber block this method will
1942 * always return false. You need to <code>require 'fiber'</code>
1943 * before using this method.
1944 */
1945 VALUE
rb_fiber_alive_p(VALUE fibval)1946 rb_fiber_alive_p(VALUE fibval)
1947 {
1948 return FIBER_TERMINATED_P(fiber_ptr(fibval)) ? Qfalse : Qtrue;
1949 }
1950
1951 /*
1952 * call-seq:
1953 * fiber.resume(args, ...) -> obj
1954 *
1955 * Resumes the fiber from the point at which the last <code>Fiber.yield</code>
1956 * was called, or starts running it if it is the first call to
1957 * <code>resume</code>. Arguments passed to resume will be the value of
1958 * the <code>Fiber.yield</code> expression or will be passed as block
1959 * parameters to the fiber's block if this is the first <code>resume</code>.
1960 *
1961 * Alternatively, when resume is called it evaluates to the arguments passed
1962 * to the next <code>Fiber.yield</code> statement inside the fiber's block
1963 * or to the block value if it runs to completion without any
1964 * <code>Fiber.yield</code>
1965 */
1966 static VALUE
rb_fiber_m_resume(int argc,VALUE * argv,VALUE fib)1967 rb_fiber_m_resume(int argc, VALUE *argv, VALUE fib)
1968 {
1969 return rb_fiber_resume(fib, argc, argv);
1970 }
1971
1972 /*
1973 * call-seq:
1974 * fiber.transfer(args, ...) -> obj
1975 *
1976 * Transfer control to another fiber, resuming it from where it last
1977 * stopped or starting it if it was not resumed before. The calling
1978 * fiber will be suspended much like in a call to
1979 * <code>Fiber.yield</code>. You need to <code>require 'fiber'</code>
1980 * before using this method.
1981 *
1982 * The fiber which receives the transfer call is treats it much like
1983 * a resume call. Arguments passed to transfer are treated like those
1984 * passed to resume.
1985 *
1986 * You cannot resume a fiber that transferred control to another one.
1987 * This will cause a double resume error. You need to transfer control
1988 * back to this fiber before it can yield and resume.
1989 *
1990 * Example:
1991 *
1992 * fiber1 = Fiber.new do
1993 * puts "In Fiber 1"
1994 * Fiber.yield
1995 * end
1996 *
1997 * fiber2 = Fiber.new do
1998 * puts "In Fiber 2"
1999 * fiber1.transfer
2000 * puts "Never see this message"
2001 * end
2002 *
2003 * fiber3 = Fiber.new do
2004 * puts "In Fiber 3"
2005 * end
2006 *
2007 * fiber2.resume
2008 * fiber3.resume
2009 *
2010 * <em>produces</em>
2011 *
2012 * In fiber 2
2013 * In fiber 1
2014 * In fiber 3
2015 *
2016 */
2017 static VALUE
rb_fiber_m_transfer(int argc,VALUE * argv,VALUE fibval)2018 rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fibval)
2019 {
2020 rb_fiber_t *fib = fiber_ptr(fibval);
2021 fib->transferred = 1;
2022 return fiber_switch(fib, argc, argv, 0);
2023 }
2024
2025 /*
2026 * call-seq:
2027 * Fiber.yield(args, ...) -> obj
2028 *
2029 * Yields control back to the context that resumed the fiber, passing
2030 * along any arguments that were passed to it. The fiber will resume
2031 * processing at this point when <code>resume</code> is called next.
2032 * Any arguments passed to the next <code>resume</code> will be the
2033 * value that this <code>Fiber.yield</code> expression evaluates to.
2034 */
2035 static VALUE
rb_fiber_s_yield(int argc,VALUE * argv,VALUE klass)2036 rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
2037 {
2038 return rb_fiber_yield(argc, argv);
2039 }
2040
2041 /*
2042 * call-seq:
2043 * Fiber.current() -> fiber
2044 *
2045 * Returns the current fiber. You need to <code>require 'fiber'</code>
2046 * before using this method. If you are not running in the context of
2047 * a fiber this method will return the root fiber.
2048 */
2049 static VALUE
rb_fiber_s_current(VALUE klass)2050 rb_fiber_s_current(VALUE klass)
2051 {
2052 return rb_fiber_current();
2053 }
2054
2055 /*
2056 * call-seq:
2057 * fiber.to_s -> string
2058 *
2059 * Returns fiber information string.
2060 *
2061 */
2062
2063 static VALUE
fiber_to_s(VALUE fibval)2064 fiber_to_s(VALUE fibval)
2065 {
2066 const rb_fiber_t *fib = fiber_ptr(fibval);
2067 const rb_proc_t *proc;
2068 char status_info[0x10];
2069
2070 snprintf(status_info, 0x10, " (%s)", fiber_status_name(fib->status));
2071 if (!rb_obj_is_proc(fib->first_proc)) {
2072 VALUE str = rb_any_to_s(fibval);
2073 strlcat(status_info, ">", sizeof(status_info));
2074 rb_str_set_len(str, RSTRING_LEN(str)-1);
2075 rb_str_cat_cstr(str, status_info);
2076 return str;
2077 }
2078 GetProcPtr(fib->first_proc, proc);
2079 return rb_block_to_s(fibval, &proc->block, status_info);
2080 }
2081
2082 #ifdef HAVE_WORKING_FORK
2083 void
rb_fiber_atfork(rb_thread_t * th)2084 rb_fiber_atfork(rb_thread_t *th)
2085 {
2086 if (th->root_fiber) {
2087 if (&th->root_fiber->cont.saved_ec != th->ec) {
2088 th->root_fiber = th->ec->fiber_ptr;
2089 }
2090 th->root_fiber->prev = 0;
2091 }
2092 }
2093 #endif
2094
2095 /*
2096 * Document-class: FiberError
2097 *
2098 * Raised when an invalid operation is attempted on a Fiber, in
2099 * particular when attempting to call/resume a dead fiber,
2100 * attempting to yield from the root fiber, or calling a fiber across
2101 * threads.
2102 *
2103 * fiber = Fiber.new{}
2104 * fiber.resume #=> nil
2105 * fiber.resume #=> FiberError: dead fiber called
2106 */
2107
2108 void
Init_Cont(void)2109 Init_Cont(void)
2110 {
2111 #if FIBER_USE_NATIVE
2112 rb_thread_t *th = GET_THREAD();
2113
2114 #ifdef _WIN32
2115 SYSTEM_INFO info;
2116 GetSystemInfo(&info);
2117 pagesize = info.dwPageSize;
2118 #else /* not WIN32 */
2119 pagesize = sysconf(_SC_PAGESIZE);
2120 #endif
2121 SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
2122 #endif
2123
2124 rb_cFiber = rb_define_class("Fiber", rb_cObject);
2125 rb_define_alloc_func(rb_cFiber, fiber_alloc);
2126 rb_eFiberError = rb_define_class("FiberError", rb_eStandardError);
2127 rb_define_singleton_method(rb_cFiber, "yield", rb_fiber_s_yield, -1);
2128 rb_define_method(rb_cFiber, "initialize", rb_fiber_init, 0);
2129 rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1);
2130 rb_define_method(rb_cFiber, "to_s", fiber_to_s, 0);
2131 rb_define_alias(rb_cFiber, "inspect", "to_s");
2132 }
2133
2134 RUBY_SYMBOL_EXPORT_BEGIN
2135
2136 void
ruby_Init_Continuation_body(void)2137 ruby_Init_Continuation_body(void)
2138 {
2139 rb_cContinuation = rb_define_class("Continuation", rb_cObject);
2140 rb_undef_alloc_func(rb_cContinuation);
2141 rb_undef_method(CLASS_OF(rb_cContinuation), "new");
2142 rb_define_method(rb_cContinuation, "call", rb_cont_call, -1);
2143 rb_define_method(rb_cContinuation, "[]", rb_cont_call, -1);
2144 rb_define_global_function("callcc", rb_callcc, 0);
2145 }
2146
2147 void
ruby_Init_Fiber_as_Coroutine(void)2148 ruby_Init_Fiber_as_Coroutine(void)
2149 {
2150 rb_define_method(rb_cFiber, "transfer", rb_fiber_m_transfer, -1);
2151 rb_define_method(rb_cFiber, "alive?", rb_fiber_alive_p, 0);
2152 rb_define_singleton_method(rb_cFiber, "current", rb_fiber_s_current, 0);
2153 }
2154
2155 RUBY_SYMBOL_EXPORT_END
2156