1 /*
2   Copyright 2011, 2012 Kristian Nielsen and Monty Program Ab
3             2016 MariaDB Corporation AB
4 
5   This file is free software; you can redistribute it and/or
6   modify it under the terms of the GNU Lesser General Public
7   License as published by the Free Software Foundation; either
8   version 2.1 of the License, or (at your option) any later version.
9 
10   This library is distributed in the hope that it will be useful,
11   but WITHOUT ANY WARRANTY; without even the implied warranty of
12   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13   Lesser General Public License for more details.
14 
15   You should have received a copy of the GNU General Public License
16   along with this.  If not, see <http://www.gnu.org/licenses/>.
17 */
18 
19 /*
20   Implementation of async context spawning using Posix ucontext and
21   swapcontext().
22 */
23 
24 #include "ma_global.h"
25 #include "ma_string.h"
26 #include "ma_context.h"
27 
28 #ifdef HAVE_VALGRIND
29 #include <valgrind/valgrind.h>
30 #endif
31 
32 #ifdef MY_CONTEXT_USE_UCONTEXT
33 /*
34   The makecontext() only allows to pass integers into the created context :-(
35   We want to pass pointers, so we do it this kinda hackish way.
36   Anyway, it should work everywhere, and at least it does not break strict
37   aliasing.
38 */
39 union pass_void_ptr_as_2_int {
40   int a[2];
41   void *p;
42 };
43 
44 /*
45   We use old-style function definition here, as this is passed to
46   makecontext(). And the type of the makecontext() argument does not match
47   the actual type (as the actual type can differ from call to call).
48 */
49 static void
my_context_spawn_internal(i0,i1)50 my_context_spawn_internal(i0, i1)
51 int i0, i1;
52 {
53   int err;
54   struct my_context *c;
55   union pass_void_ptr_as_2_int u;
56 
57   u.a[0]= i0;
58   u.a[1]= i1;
59   c= (struct my_context *)u.p;
60 
61   (*c->user_func)(c->user_data);
62   c->active= 0;
63   err= setcontext(&c->base_context);
64   fprintf(stderr, "Aieie, setcontext() failed: %d (errno=%d)\n", err, errno);
65 }
66 
67 
68 int
my_context_continue(struct my_context * c)69 my_context_continue(struct my_context *c)
70 {
71   int err;
72 
73   if (!c->active)
74     return 0;
75 
76   err= swapcontext(&c->base_context, &c->spawned_context);
77   if (err)
78   {
79     fprintf(stderr, "Aieie, swapcontext() failed: %d (errno=%d)\n",
80             err, errno);
81     return -1;
82   }
83 
84   return c->active;
85 }
86 
87 
88 int
my_context_spawn(struct my_context * c,void (* f)(void *),void * d)89 my_context_spawn(struct my_context *c, void (*f)(void *), void *d)
90 {
91   int err;
92   union pass_void_ptr_as_2_int u;
93 
94   err= getcontext(&c->spawned_context);
95   if (err)
96     return -1;
97   c->spawned_context.uc_stack.ss_sp= c->stack;
98   c->spawned_context.uc_stack.ss_size= c->stack_size;
99   c->spawned_context.uc_link= NULL;
100   c->user_func= f;
101   c->user_data= d;
102   c->active= 1;
103   u.p= c;
104   makecontext(&c->spawned_context, my_context_spawn_internal, 2,
105               u.a[0], u.a[1]);
106 
107   return my_context_continue(c);
108 }
109 
110 
111 int
my_context_yield(struct my_context * c)112 my_context_yield(struct my_context *c)
113 {
114   int err;
115 
116   if (!c->active)
117     return -1;
118 
119   err= swapcontext(&c->spawned_context, &c->base_context);
120   if (err)
121     return -1;
122   return 0;
123 }
124 
125 int
my_context_init(struct my_context * c,size_t stack_size)126 my_context_init(struct my_context *c, size_t stack_size)
127 {
128 #if SIZEOF_CHARP > SIZEOF_INT*2
129 #error Error: Unable to store pointer in 2 ints on this architecture
130 #endif
131 
132   memset(c, 0, sizeof(*c));
133   if (!(c->stack= malloc(stack_size)))
134     return -1;                                  /* Out of memory */
135   c->stack_size= stack_size;
136 #ifdef HAVE_VALGRIND
137   c->valgrind_stack_id=
138     VALGRIND_STACK_REGISTER(c->stack, ((unsigned char *)(c->stack))+stack_size);
139 #endif
140   return 0;
141 }
142 
143 void
my_context_destroy(struct my_context * c)144 my_context_destroy(struct my_context *c)
145 {
146   if (c->stack)
147   {
148 #ifdef HAVE_VALGRIND
149     VALGRIND_STACK_DEREGISTER(c->valgrind_stack_id);
150 #endif
151     free(c->stack);
152   }
153 }
154 
155 #endif  /* MY_CONTEXT_USE_UCONTEXT */
156 
157 
158 #ifdef MY_CONTEXT_USE_X86_64_GCC_ASM
159 /*
160   GCC-amd64 implementation of my_context.
161 
162   This is slightly optimized in the common case where we never yield
163   (eg. fetch next row and it is already fully received in buffer). In this
164   case we do not need to restore registers at return (though we still need to
165   save them as we cannot know if we will yield or not in advance).
166 */
167 
168 #include <stdint.h>
169 #include <stdlib.h>
170 
171 /*
172   Layout of saved registers etc.
173   Since this is accessed through gcc inline assembler, it is simpler to just
174   use numbers than to try to define nice constants or structs.
175 
176    0    0   %rsp
177    1    8   %rbp
178    2   16   %rbx
179    3   24   %r12
180    4   32   %r13
181    5   40   %r14
182    6   48   %r15
183    7   56   %rip for done
184    8   64   %rip for yield/continue
185 */
186 
187 int
my_context_spawn(struct my_context * c,void (* f)(void *),void * d)188 my_context_spawn(struct my_context *c, void (*f)(void *), void *d)
189 {
190   int ret;
191 
192   /*
193     There are 6 callee-save registers we need to save and restore when
194     suspending and continuing, plus stack pointer %rsp and instruction pointer
195     %rip.
196 
197     However, if we never suspend, the user-supplied function will in any case
198     restore the 6 callee-save registers, so we can avoid restoring them in
199     this case.
200   */
201   __asm__ __volatile__
202     (
203      "movq %%rsp, (%[save])\n\t"
204      "movq %[stack], %%rsp\n\t"
205 #if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4 && !defined(__INTEL_COMPILER)
206      /*
207        This emits a DWARF DW_CFA_undefined directive to make the return address
208        undefined. This indicates that this is the top of the stack frame, and
209        helps tools that use DWARF stack unwinding to obtain stack traces.
210        (I use numeric constant to avoid a dependency on libdwarf includes).
211      */
212      ".cfi_escape 0x07, 16\n\t"
213 #endif
214      "movq %%rbp, 8(%[save])\n\t"
215      "movq %%rbx, 16(%[save])\n\t"
216      "movq %%r12, 24(%[save])\n\t"
217      "movq %%r13, 32(%[save])\n\t"
218      "movq %%r14, 40(%[save])\n\t"
219      "movq %%r15, 48(%[save])\n\t"
220      "leaq 1f(%%rip), %%rax\n\t"
221      "leaq 2f(%%rip), %%rcx\n\t"
222      "movq %%rax, 56(%[save])\n\t"
223      "movq %%rcx, 64(%[save])\n\t"
224      /*
225        Constraint below puts the argument to the user function into %rdi, as
226        needed for the calling convention.
227      */
228      "callq *%[f]\n\t"
229      "jmpq *56(%[save])\n"
230      /*
231        Come here when operation is done.
232        We do not need to restore callee-save registers, as the called function
233        will do this for us if needed.
234      */
235      "1:\n\t"
236      "movq (%[save]), %%rsp\n\t"
237      "xorl %[ret], %[ret]\n\t"
238      "jmp 3f\n"
239      /* Come here when operation was suspended. */
240      "2:\n\t"
241      "movl $1, %[ret]\n"
242      "3:\n"
243      : [ret] "=a" (ret),
244        [f] "+S" (f),
245        /* Need this in %rdi to follow calling convention. */
246        [d] "+D" (d)
247      : [stack] "a" (c->stack_top),
248        /* Need this in callee-save register to preserve in function call. */
249        [save] "b" (&c->save[0])
250      : "rcx", "rdx", "r8", "r9", "r10", "r11", "memory", "cc"
251   );
252 
253   return ret;
254 }
255 
256 int
my_context_continue(struct my_context * c)257 my_context_continue(struct my_context *c)
258 {
259   int ret;
260 
261   __asm__ __volatile__
262     (
263      "movq (%[save]), %%rax\n\t"
264      "movq %%rsp, (%[save])\n\t"
265      "movq %%rax, %%rsp\n\t"
266      "movq 8(%[save]), %%rax\n\t"
267      "movq %%rbp, 8(%[save])\n\t"
268      "movq %%rax, %%rbp\n\t"
269      "movq 24(%[save]), %%rax\n\t"
270      "movq %%r12, 24(%[save])\n\t"
271      "movq %%rax, %%r12\n\t"
272      "movq 32(%[save]), %%rax\n\t"
273      "movq %%r13, 32(%[save])\n\t"
274      "movq %%rax, %%r13\n\t"
275      "movq 40(%[save]), %%rax\n\t"
276      "movq %%r14, 40(%[save])\n\t"
277      "movq %%rax, %%r14\n\t"
278      "movq 48(%[save]), %%rax\n\t"
279      "movq %%r15, 48(%[save])\n\t"
280      "movq %%rax, %%r15\n\t"
281 
282      "leaq 1f(%%rip), %%rax\n\t"
283      "leaq 2f(%%rip), %%rcx\n\t"
284      "movq %%rax, 56(%[save])\n\t"
285      "movq 64(%[save]), %%rax\n\t"
286      "movq %%rcx, 64(%[save])\n\t"
287 
288      "movq 16(%[save]), %%rcx\n\t"
289      "movq %%rbx, 16(%[save])\n\t"
290      "movq %%rcx, %%rbx\n\t"
291 
292      "jmpq *%%rax\n"
293      /*
294        Come here when operation is done.
295        Be sure to use the same callee-save register for %[save] here and in
296        my_context_spawn(), so we preserve the value correctly at this point.
297      */
298      "1:\n\t"
299      "movq (%[save]), %%rsp\n\t"
300      "movq 8(%[save]), %%rbp\n\t"
301      /* %rbx is preserved from my_context_spawn() in this case. */
302      "movq 24(%[save]), %%r12\n\t"
303      "movq 32(%[save]), %%r13\n\t"
304      "movq 40(%[save]), %%r14\n\t"
305      "movq 48(%[save]), %%r15\n\t"
306      "xorl %[ret], %[ret]\n\t"
307      "jmp 3f\n"
308      /* Come here when operation is suspended. */
309      "2:\n\t"
310      "movl $1, %[ret]\n"
311      "3:\n"
312      : [ret] "=a" (ret)
313      : /* Need this in callee-save register to preserve in function call. */
314        [save] "b" (&c->save[0])
315      : "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", "memory", "cc"
316         );
317 
318   return ret;
319 }
320 
321 int
my_context_yield(struct my_context * c)322 my_context_yield(struct my_context *c)
323 {
324   uint64_t *save= &c->save[0];
325   __asm__ __volatile__
326     (
327      "movq (%[save]), %%rax\n\t"
328      "movq %%rsp, (%[save])\n\t"
329      "movq %%rax, %%rsp\n\t"
330      "movq 8(%[save]), %%rax\n\t"
331      "movq %%rbp, 8(%[save])\n\t"
332      "movq %%rax, %%rbp\n\t"
333      "movq 16(%[save]), %%rax\n\t"
334      "movq %%rbx, 16(%[save])\n\t"
335      "movq %%rax, %%rbx\n\t"
336      "movq 24(%[save]), %%rax\n\t"
337      "movq %%r12, 24(%[save])\n\t"
338      "movq %%rax, %%r12\n\t"
339      "movq 32(%[save]), %%rax\n\t"
340      "movq %%r13, 32(%[save])\n\t"
341      "movq %%rax, %%r13\n\t"
342      "movq 40(%[save]), %%rax\n\t"
343      "movq %%r14, 40(%[save])\n\t"
344      "movq %%rax, %%r14\n\t"
345      "movq 48(%[save]), %%rax\n\t"
346      "movq %%r15, 48(%[save])\n\t"
347      "movq %%rax, %%r15\n\t"
348      "movq 64(%[save]), %%rax\n\t"
349      "leaq 1f(%%rip), %%rcx\n\t"
350      "movq %%rcx, 64(%[save])\n\t"
351 
352      "jmpq *%%rax\n"
353 
354      "1:\n"
355      : [save] "+D" (save)
356      :
357      : "rax", "rcx", "rdx", "rsi", "r8", "r9", "r10", "r11", "memory", "cc"
358      );
359   return 0;
360 }
361 
362 int
my_context_init(struct my_context * c,size_t stack_size)363 my_context_init(struct my_context *c, size_t stack_size)
364 {
365   memset(c, 0, sizeof(*c));
366 
367   if (!(c->stack_bot= malloc(stack_size)))
368     return -1;                                  /* Out of memory */
369   /*
370     The x86_64 ABI specifies 16-byte stack alignment.
371     Also put two zero words at the top of the stack.
372   */
373   c->stack_top= (void *)
374     (( ((intptr)c->stack_bot + stack_size) & ~(intptr)0xf) - 16);
375   memset(c->stack_top, 0, 16);
376 
377 #ifdef HAVE_VALGRIND
378   c->valgrind_stack_id=
379     VALGRIND_STACK_REGISTER(c->stack_bot, c->stack_top);
380 #endif
381   return 0;
382 }
383 
384 void
my_context_destroy(struct my_context * c)385 my_context_destroy(struct my_context *c)
386 {
387   if (c->stack_bot)
388   {
389     free(c->stack_bot);
390 #ifdef HAVE_VALGRIND
391     VALGRIND_STACK_DEREGISTER(c->valgrind_stack_id);
392 #endif
393   }
394 }
395 
396 #endif  /* MY_CONTEXT_USE_X86_64_GCC_ASM */
397 
398 
399 #ifdef MY_CONTEXT_USE_I386_GCC_ASM
400 /*
401   GCC-i386 implementation of my_context.
402 
403   This is slightly optimized in the common case where we never yield
404   (eg. fetch next row and it is already fully received in buffer). In this
405   case we do not need to restore registers at return (though we still need to
406   save them as we cannot know if we will yield or not in advance).
407 */
408 
409 #include <stdint.h>
410 #include <stdlib.h>
411 
412 /*
413   Layout of saved registers etc.
414   Since this is accessed through gcc inline assembler, it is simpler to just
415   use numbers than to try to define nice constants or structs.
416 
417    0    0   %esp
418    1    4   %ebp
419    2    8   %ebx
420    3   12   %esi
421    4   16   %edi
422    5   20   %eip for done
423    6   24   %eip for yield/continue
424 */
425 
426 int
my_context_spawn(struct my_context * c,void (* f)(void *),void * d)427 my_context_spawn(struct my_context *c, void (*f)(void *), void *d)
428 {
429   int ret;
430 
431   /*
432     There are 4 callee-save registers we need to save and restore when
433     suspending and continuing, plus stack pointer %esp and instruction pointer
434     %eip.
435 
436     However, if we never suspend, the user-supplied function will in any case
437     restore the 4 callee-save registers, so we can avoid restoring them in
438     this case.
439   */
440   __asm__ __volatile__
441     (
442      "movl %%esp, (%[save])\n\t"
443      "movl %[stack], %%esp\n\t"
444 #if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4 && !defined(__INTEL_COMPILER)
445      /*
446        This emits a DWARF DW_CFA_undefined directive to make the return address
447        undefined. This indicates that this is the top of the stack frame, and
448        helps tools that use DWARF stack unwinding to obtain stack traces.
449        (I use numeric constant to avoid a dependency on libdwarf includes).
450      */
451      ".cfi_escape 0x07, 8\n\t"
452 #endif
453      /* Push the parameter on the stack. */
454      "pushl %[d]\n\t"
455      "movl %%ebp, 4(%[save])\n\t"
456      "movl %%ebx, 8(%[save])\n\t"
457      "movl %%esi, 12(%[save])\n\t"
458      "movl %%edi, 16(%[save])\n\t"
459      /* Get label addresses in -fPIC-compatible way (no pc-relative on 32bit) */
460      "call 1f\n"
461      "1:\n\t"
462      "popl %%eax\n\t"
463      "addl $(2f-1b), %%eax\n\t"
464      "movl %%eax, 20(%[save])\n\t"
465      "addl $(3f-2f), %%eax\n\t"
466      "movl %%eax, 24(%[save])\n\t"
467      "call *%[f]\n\t"
468      "jmp *20(%[save])\n"
469      /*
470        Come here when operation is done.
471        We do not need to restore callee-save registers, as the called function
472        will do this for us if needed.
473      */
474      "2:\n\t"
475      "movl (%[save]), %%esp\n\t"
476      "xorl %[ret], %[ret]\n\t"
477      "jmp 4f\n"
478      /* Come here when operation was suspended. */
479      "3:\n\t"
480      "movl $1, %[ret]\n"
481      "4:\n"
482      : [ret] "=a" (ret),
483        [f] "+c" (f),
484        [d] "+d" (d)
485      : [stack] "a" (c->stack_top),
486        /* Need this in callee-save register to preserve across function call. */
487        [save] "D" (&c->save[0])
488      : "memory", "cc"
489   );
490 
491   return ret;
492 }
493 
494 int
my_context_continue(struct my_context * c)495 my_context_continue(struct my_context *c)
496 {
497   int ret;
498 
499   __asm__ __volatile__
500     (
501      "movl (%[save]), %%eax\n\t"
502      "movl %%esp, (%[save])\n\t"
503      "movl %%eax, %%esp\n\t"
504      "movl 4(%[save]), %%eax\n\t"
505      "movl %%ebp, 4(%[save])\n\t"
506      "movl %%eax, %%ebp\n\t"
507      "movl 8(%[save]), %%eax\n\t"
508      "movl %%ebx, 8(%[save])\n\t"
509      "movl %%eax, %%ebx\n\t"
510      "movl 12(%[save]), %%eax\n\t"
511      "movl %%esi, 12(%[save])\n\t"
512      "movl %%eax, %%esi\n\t"
513 
514      "movl 24(%[save]), %%eax\n\t"
515      "call 1f\n"
516      "1:\n\t"
517      "popl %%ecx\n\t"
518      "addl $(2f-1b), %%ecx\n\t"
519      "movl %%ecx, 20(%[save])\n\t"
520      "addl $(3f-2f), %%ecx\n\t"
521      "movl %%ecx, 24(%[save])\n\t"
522 
523      /* Must restore %edi last as it is also our %[save] register. */
524      "movl 16(%[save]), %%ecx\n\t"
525      "movl %%edi, 16(%[save])\n\t"
526      "movl %%ecx, %%edi\n\t"
527 
528      "jmp *%%eax\n"
529      /*
530        Come here when operation is done.
531        Be sure to use the same callee-save register for %[save] here and in
532        my_context_spawn(), so we preserve the value correctly at this point.
533      */
534      "2:\n\t"
535      "movl (%[save]), %%esp\n\t"
536      "movl 4(%[save]), %%ebp\n\t"
537      "movl 8(%[save]), %%ebx\n\t"
538      "movl 12(%[save]), %%esi\n\t"
539      "movl 16(%[save]), %%edi\n\t"
540      "xorl %[ret], %[ret]\n\t"
541      "jmp 4f\n"
542      /* Come here when operation is suspended. */
543      "3:\n\t"
544      "movl $1, %[ret]\n"
545      "4:\n"
546      : [ret] "=a" (ret)
547      : /* Need this in callee-save register to preserve in function call. */
548        [save] "D" (&c->save[0])
549      : "ecx", "edx", "memory", "cc"
550         );
551 
552   return ret;
553 }
554 
555 int
my_context_yield(struct my_context * c)556 my_context_yield(struct my_context *c)
557 {
558   uint64_t *save= &c->save[0];
559   __asm__ __volatile__
560     (
561      "movl (%[save]), %%eax\n\t"
562      "movl %%esp, (%[save])\n\t"
563      "movl %%eax, %%esp\n\t"
564      "movl 4(%[save]), %%eax\n\t"
565      "movl %%ebp, 4(%[save])\n\t"
566      "movl %%eax, %%ebp\n\t"
567      "movl 8(%[save]), %%eax\n\t"
568      "movl %%ebx, 8(%[save])\n\t"
569      "movl %%eax, %%ebx\n\t"
570      "movl 12(%[save]), %%eax\n\t"
571      "movl %%esi, 12(%[save])\n\t"
572      "movl %%eax, %%esi\n\t"
573      "movl 16(%[save]), %%eax\n\t"
574      "movl %%edi, 16(%[save])\n\t"
575      "movl %%eax, %%edi\n\t"
576 
577      "movl 24(%[save]), %%eax\n\t"
578      "call 1f\n"
579      "1:\n\t"
580      "popl %%ecx\n\t"
581      "addl $(2f-1b), %%ecx\n\t"
582      "movl %%ecx, 24(%[save])\n\t"
583 
584      "jmp *%%eax\n"
585 
586      "2:\n"
587      : [save] "+d" (save)
588      :
589      : "eax", "ecx", "memory", "cc"
590      );
591   return 0;
592 }
593 
594 int
my_context_init(struct my_context * c,size_t stack_size)595 my_context_init(struct my_context *c, size_t stack_size)
596 {
597   memset(c, 0, sizeof(*c));
598   if (!(c->stack_bot= malloc(stack_size)))
599     return -1;                                  /* Out of memory */
600   c->stack_top= (void *)
601     (( ((intptr)c->stack_bot + stack_size) & ~(intptr)0xf) - 16);
602   memset(c->stack_top, 0, 16);
603 
604 #ifdef HAVE_VALGRIND
605   c->valgrind_stack_id=
606     VALGRIND_STACK_REGISTER(c->stack_bot, c->stack_top);
607 #endif
608   return 0;
609 }
610 
611 void
my_context_destroy(struct my_context * c)612 my_context_destroy(struct my_context *c)
613 {
614   if (c->stack_bot)
615   {
616     free(c->stack_bot);
617 #ifdef HAVE_VALGRIND
618     VALGRIND_STACK_DEREGISTER(c->valgrind_stack_id);
619 #endif
620   }
621 }
622 
623 #endif  /* MY_CONTEXT_USE_I386_GCC_ASM */
624 
625 
626 #ifdef MY_CONTEXT_USE_WIN32_FIBERS
627 int
my_context_yield(struct my_context * c)628 my_context_yield(struct my_context *c)
629 {
630   c->return_value= 1;
631   SwitchToFiber(c->app_fiber);
632   return 0;
633 }
634 
635 
636 static void WINAPI
my_context_trampoline(void * p)637 my_context_trampoline(void *p)
638 {
639   struct my_context *c= (struct my_context *)p;
640   /*
641     Reuse the Fiber by looping infinitely, each time we are scheduled we
642     spawn the appropriate function and switch back when it is done.
643 
644     This way we avoid the overhead of CreateFiber() for every asynchroneous
645     operation.
646   */
647   for(;;)
648   {
649     (*(c->user_func))(c->user_arg);
650     c->return_value= 0;
651     SwitchToFiber(c->app_fiber);
652   }
653 }
654 
655 int
my_context_init(struct my_context * c,size_t stack_size)656 my_context_init(struct my_context *c, size_t stack_size)
657 {
658   memset(c, 0, sizeof(*c));
659   c->lib_fiber= CreateFiber(stack_size, my_context_trampoline, c);
660   if (c->lib_fiber)
661     return 0;
662   return -1;
663 }
664 
665 void
my_context_destroy(struct my_context * c)666 my_context_destroy(struct my_context *c)
667 {
668   if (c->lib_fiber)
669   {
670     DeleteFiber(c->lib_fiber);
671     c->lib_fiber= NULL;
672   }
673 }
674 
675 int
my_context_spawn(struct my_context * c,void (* f)(void *),void * d)676 my_context_spawn(struct my_context *c, void (*f)(void *), void *d)
677 {
678   c->user_func= f;
679   c->user_arg= d;
680   return my_context_continue(c);
681 }
682 
683 int
my_context_continue(struct my_context * c)684 my_context_continue(struct my_context *c)
685 {
686   void *current_fiber=  IsThreadAFiber() ? GetCurrentFiber() : ConvertThreadToFiber(c);
687   c->app_fiber= current_fiber;
688   SwitchToFiber(c->lib_fiber);
689   return c->return_value;
690 }
691 
692 #endif  /* MY_CONTEXT_USE_WIN32_FIBERS */
693 
694 #ifdef MY_CONTEXT_DISABLE
695 int
my_context_continue(struct my_context * c)696 my_context_continue(struct my_context *c)
697 {
698   return -1;
699 }
700 
701 
702 int
my_context_spawn(struct my_context * c,void (* f)(void *),void * d)703 my_context_spawn(struct my_context *c, void (*f)(void *), void *d)
704 {
705   return -1;
706 }
707 
708 
709 int
my_context_yield(struct my_context * c)710 my_context_yield(struct my_context *c)
711 {
712   return -1;
713 }
714 
715 int
my_context_init(struct my_context * c,size_t stack_size)716 my_context_init(struct my_context *c, size_t stack_size)
717 {
718   return -1;                                  /* Out of memory */
719 }
720 
721 void
my_context_destroy(struct my_context * c)722 my_context_destroy(struct my_context *c)
723 {
724 }
725 
726 #endif
727