1 /* Some copilers don't like re-def of GC_malloc in schemef.h: */
2 #ifndef MZ_PRECISE_GC
3 # define SCHEME_NO_GC_PROTO
4 #endif
5 
6 #include "schpriv.h"
7 #include "schmach.h"
8 #include "schgc.h"
9 
10 #ifdef STACK_GROWS_UP
11 # define DEEPPOS(b) ((uintptr_t)(b)->stack_from+(uintptr_t)(b)->stack_size)
12 #else
13 # define DEEPPOS(b) ((uintptr_t)(b)->stack_from)
14 #endif
15 
16 #ifdef MZ_PRECISE_GC
17 HOOK_SHARED_OK void *(*scheme_get_external_stack_val)(void);
18 HOOK_SHARED_OK void (*scheme_set_external_stack_val)(void *);
19 #endif
20 
21 #ifndef MZ_PRECISE_GC
22 
23 /**********************************************************************/
24 
25 /* When we copy the stack, we must set up GC to specially traverse the
26    stack copy to account for pointers to the interior of collectable
27    objects. */
28 
29 extern MZGC_DLLIMPORT void GC_push_all_stack(void *, void *);
30 extern MZGC_DLLIMPORT void GC_flush_mark_stack(void);
31 extern MZGC_DLLIMPORT void (*GC_push_last_roots)(void);
32 extern MZGC_DLLIMPORT void (*GC_push_last_roots_again)(void);
33 /* GC_push_last_roots_again is called after marking eager
34    finalizations (once at each stage). We rely on the fact that no
35    copied stack will be referenced by (or affected the ordering of)
36    anything non-eagerly finalized.*/
37 
38 #ifdef USE_SENORA_GC
39 # define GC_is_marked(p) GC_base(p)
40 # define GC_did_mark_stack_overflow() 0
41 #else
42 extern MZGC_DLLIMPORT int GC_is_marked(void *);
43 extern MZGC_DLLIMPORT int GC_did_mark_stack_overflow(void);
44 #endif
45 
46 #define get_copy(s_c) (((CopiedStack *)s_c)->_stack_copy)
47 
48 #define MALLOC_LINK() MALLOC_ONE_WEAK(CopiedStack*)
49 #ifdef USE_TAGGED_ALLOCATION
50 extern void *scheme_malloc_stack(size_t);
51 # define MALLOC_STACK(size) scheme_malloc_stack(size)
52 #else
53 # define MALLOC_STACK(size) scheme_malloc_atomic(size)
54 #endif
55 
56 typedef struct CopiedStack {
57   void *_stack_copy; /* The actual data */
58   intptr_t size;
59   int pushed;
60   struct CopiedStack **next, **prev;
61 } CopiedStack;
62 
63 static CopiedStack **first_copied_stack;
64 int scheme_num_copied_stacks = 0;
65 
push_copied_stacks(int init)66 static void push_copied_stacks(int init)
67 {
68   /* This is called after everything else is marked.
69      Mark from those stacks that are still reachable. If
70      we mark from a stack, we need to go back though the list
71      all over to check the previously unmarked stacks. */
72   CopiedStack *cs;
73   int pushed_one;
74 
75   if (init) {
76     for (cs = *first_copied_stack; cs; cs = *cs->next) {
77       if (get_copy(cs))
78 	cs->pushed = 0;
79       else
80 	cs->pushed = 1;
81     }
82   }
83 
84   GC_flush_mark_stack();
85 
86   do {
87     pushed_one = 0;
88     for (cs = *first_copied_stack; cs; cs = *cs->next) {
89       if (!cs->pushed && GC_is_marked(get_copy(cs))) {
90 	pushed_one = 1;
91 	cs->pushed = 1;
92 	GC_push_all_stack(get_copy(cs), (char *)get_copy(cs) + cs->size);
93 	if (GC_did_mark_stack_overflow()) {
94 	  /* printf("mark stack overflow\n"); */
95 	  return;
96 	} else {
97 	  GC_flush_mark_stack();
98 	  if (GC_did_mark_stack_overflow()) {
99 	    /* printf("mark stack overflow (late)\n"); */
100 	    return;
101 	  }
102 	}
103       }
104     }
105   } while (pushed_one);
106 }
107 
init_push_copied_stacks(void)108 static void init_push_copied_stacks(void)
109 {
110   push_copied_stacks(1);
111 }
112 
update_push_copied_stacks(void)113 static void update_push_copied_stacks(void)
114 {
115   do {
116     push_copied_stacks(0);
117   } while (scheme_propagate_ephemeron_marks());
118 }
119 
scheme_init_setjumpup(void)120 void scheme_init_setjumpup(void)
121 {
122   if (scheme_starting_up) {
123     REGISTER_SO(first_copied_stack);
124   }
125   first_copied_stack = MALLOC_LINK();
126   *first_copied_stack = NULL;
127 
128   GC_push_last_roots = init_push_copied_stacks;
129   GC_push_last_roots_again = update_push_copied_stacks;
130 }
131 
remove_cs(void * _cs,void * unused)132 static void remove_cs(void *_cs, void *unused)
133 {
134   CopiedStack *cs = (CopiedStack *)_cs;
135 
136   if (*cs->prev)
137     *(*cs->prev)->next = *cs->next;
138   else
139     *first_copied_stack = *cs->next;
140 
141   if (*cs->next)
142     *(*cs->next)->prev = *cs->prev;
143 
144   if (cs->_stack_copy) {
145 #ifndef SGC_STD_DEBUGGING
146     GC_free(cs->_stack_copy);
147 #else
148     memset(cs->_stack_copy, 0, cs->size);
149 #endif
150     cs->_stack_copy = NULL;
151   }
152 
153   --scheme_num_copied_stacks;
154 }
155 
make_stack_copy_rec(intptr_t size)156 static void *make_stack_copy_rec(intptr_t size)
157 {
158   CopiedStack *cs, **lk;
159 
160   cs = MALLOC_ONE(CopiedStack);
161   cs->size = size;
162   lk = MALLOC_LINK();
163   cs->next = lk;
164   lk = MALLOC_LINK();
165   cs->prev = lk;
166 
167 
168   /* double linked list push */
169   *cs->next = *first_copied_stack;
170   if (*first_copied_stack)
171     *(*first_copied_stack)->prev = cs;
172   *cs->prev = NULL;
173   *first_copied_stack = cs;
174 
175   GC_register_finalizer(cs, remove_cs, NULL, NULL, NULL);
176 
177   scheme_num_copied_stacks++;
178 
179   return (void *)cs;
180 }
181 
set_copy(void * s_c,void * c)182 static void set_copy(void *s_c, void *c)
183 {
184   CopiedStack *cs = (CopiedStack *)s_c;
185 
186   cs->_stack_copy = c;
187 }
188 
189 /**********************************************************************/
190 
191 #else
192 
193 /* Precise GC: */
194 # define MALLOC_STACK(size) scheme_malloc_atomic(size)
195 # define get_copy(s_c) (s_c)
196 # define set_copy(s_c, c) s_c = c
197 
198 THREAD_LOCAL_DECL(static void *stack_copy_cache[STACK_COPY_CACHE_SIZE]);
199 THREAD_LOCAL_DECL(static intptr_t stack_copy_size_cache[STACK_COPY_CACHE_SIZE]);
200 THREAD_LOCAL_DECL(static int scc_pos);
201 #define SCC_OK_EXTRA_AMT 100
202 
scheme_flush_stack_copy_cache(void)203 void scheme_flush_stack_copy_cache(void)
204   XFORM_SKIP_PROC
205 {
206   int i;
207   for (i = 0; i < STACK_COPY_CACHE_SIZE; i++) {
208     stack_copy_cache[i] = NULL;
209     stack_copy_size_cache[i] = 0;
210   }
211 }
212 
213 #endif
214 
215 /**********************************************************************/
216 
217 #ifdef MZ_PRECISE_GC
218 # define GC_VAR_STACK_ARG_DECL , void *gc_var_stack_in
219 # define GC_VAR_STACK_ARG      , __gc_var_stack__
220 #else
221 # define GC_VAR_STACK_ARG_DECL /* empty */
222 # define GC_VAR_STACK_ARG      /* empty */
223 #endif
224 
225 /* This function must not be inlined! */
scheme_copy_stack(Scheme_Jumpup_Buf * b,void * base,void * start GC_VAR_STACK_ARG_DECL)226 void MZ_NO_INLINE scheme_copy_stack(Scheme_Jumpup_Buf *b, void *base, void *start GC_VAR_STACK_ARG_DECL)
227 {
228   intptr_t size, msize;
229   void *here;
230 
231   here = &size;
232 
233   size = (intptr_t)here XFORM_OK_MINUS (intptr_t)start;
234 #ifdef STACK_GROWS_UP
235   b->stack_from = start;
236 #else
237   size = -size;
238   b->stack_from = here;
239 #endif
240 
241   if (size < 0)
242     size = 0;
243 
244   msize = size;
245 
246   if (b->stack_max_size < size) {
247     /* printf("Stack size: %d\n", size); */
248     void *copy;
249 #ifndef MZ_PRECISE_GC
250     copy = make_stack_copy_rec(size);
251     b->stack_copy = copy;
252     set_copy(b->stack_copy, MALLOC_STACK(size));
253 #else
254     /* b is a pointer into the middle of `base'; bad for precise gc: */
255     uintptr_t diff;
256     diff = (uintptr_t)b XFORM_OK_MINUS (uintptr_t)base;
257     b = NULL;
258 
259     copy = NULL;
260     /* Look for a reusable freed block: */
261     {
262       int i;
263       for (i = 0; i < STACK_COPY_CACHE_SIZE; i++) {
264 	if ((stack_copy_size_cache[i] >= size)
265 	    && (stack_copy_size_cache[i] < (size + SCC_OK_EXTRA_AMT))) {
266 	  /* Found one */
267 	  copy = stack_copy_cache[i];
268 	  msize = stack_copy_size_cache[i];
269 	  stack_copy_cache[i] = NULL;
270 	  stack_copy_size_cache[i] = 0;
271 	  break;
272 	}
273       }
274     }
275     if (!copy) {
276       /* No reusable block found */
277       copy = MALLOC_STACK(size);
278     }
279 
280     /* Restore b: */
281     b = (Scheme_Jumpup_Buf *)(((char *)base) XFORM_OK_PLUS diff);
282 
283     set_copy(b->stack_copy, copy);
284 #endif
285     b->stack_max_size = msize;
286   }
287   b->stack_size = size;
288 
289 #ifdef MZ_PRECISE_GC
290   b->gc_var_stack = gc_var_stack_in;
291   if (scheme_get_external_stack_val) {
292     void *es;
293     es = scheme_get_external_stack_val();
294     b->external_stack = es;
295   }
296 #endif
297 
298   memcpy(get_copy(b->stack_copy),
299 	 b->stack_from,
300 	 size);
301 }
302 
303 MZ_DO_NOT_INLINE(void scheme_uncopy_stack(int ok, Scheme_Jumpup_Buf *b, intptr_t *prev));
304 
scheme_uncopy_stack(int ok,Scheme_Jumpup_Buf * b,intptr_t * prev)305 void scheme_uncopy_stack(int ok, Scheme_Jumpup_Buf *b, intptr_t *prev)
306 {
307   GC_CAN_IGNORE Scheme_Jumpup_Buf *c;
308   intptr_t top_delta = 0, bottom_delta = 0, size;
309   void *cfrom, *cto;
310 
311   if (!ok) {
312     uintptr_t z;
313     intptr_t junk[200];
314 
315     z = (uintptr_t)&junk[0];
316 
317     scheme_uncopy_stack(STK_COMP(z, DEEPPOS(b)), b, junk);
318   }
319 
320   /* Vague attempt to prevent the compiler from optimizing away `prev': */
321   prev[199] = 0;
322 
323   FLUSH_REGISTER_WINDOWS;
324 
325   START_XFORM_SKIP;
326   c = b;
327   while (c) {
328     size = c->stack_size - top_delta;
329     cto = (char *)c->stack_from + bottom_delta;
330     cfrom = (char *)get_copy(c->stack_copy) + bottom_delta;
331 
332     memcpy(cto, cfrom, size);
333 
334     if (c->cont) {
335 #ifdef STACK_GROWS_UP
336       top_delta = (((uintptr_t)c->cont->buf_ptr->buf.stack_from
337 		    + c->cont->buf.stack_size)
338 		   - (uintptr_t)c->stack_from);
339 #else
340       bottom_delta = ((uintptr_t)c->stack_from
341 		      + c->stack_size
342 		      - (uintptr_t)c->cont->buf_ptr->buf.stack_from);
343       top_delta = bottom_delta;
344 #endif
345       c = &c->cont->buf_ptr->buf;
346     } else
347       c = NULL;
348   }
349   END_XFORM_SKIP;
350 
351 #ifdef MZ_PRECISE_GC
352   GC_variable_stack = b->gc_var_stack;
353   if (scheme_set_external_stack_val)
354     scheme_set_external_stack_val(b->external_stack);
355 #endif
356 
357   scheme_longjmp(b->buf, 1);
358 }
359 
360 #ifdef MZ_PRECISE_GC
361 START_XFORM_SKIP;
362 #endif
363 
find_same(char * p,char * low,intptr_t max_size)364 static intptr_t find_same(char *p, char *low, intptr_t max_size)
365 {
366   intptr_t cnt = 0;
367 
368   /* We assume a max possible amount of the current stack that should
369      not be shared with the saved stack. This is ok (or not) in the same
370      sense as assuming that STACK_SAFETY_MARGIN is enough wiggle room to
371      prevent stack overflow. */
372 # define MAX_STACK_DIFF 4096
373 
374 #ifdef SIXTY_FOUR_BIT_INTEGERS
375 # define SHARED_STACK_ALIGNMENT 8
376 #else
377 # define SHARED_STACK_ALIGNMENT 4
378 #endif
379 
380   if (max_size > MAX_STACK_DIFF) {
381     cnt = max_size - MAX_STACK_DIFF;
382     max_size = MAX_STACK_DIFF;
383   }
384 
385 #ifdef STACK_GROWS_UP
386   while (max_size--) {
387     if (p[cnt] != low[cnt])
388       break;
389     cnt++;
390   }
391 #else
392   if (!((intptr_t)p & (sizeof(intptr_t)-1))
393       && !((intptr_t)low & (sizeof(intptr_t)-1))) {
394     /* common case of aligned addresses: compare `intptr_t`s at a time */
395     max_size /= sizeof(intptr_t);
396     while (max_size--) {
397       if (((intptr_t *)p)[max_size] != ((intptr_t *)low)[max_size])
398         break;
399       cnt += sizeof(intptr_t);
400     }
401   } else {
402     /* general case: compare bytes */
403     while (max_size--) {
404       if (p[max_size] != low[max_size])
405         break;
406       cnt++;
407     }
408   }
409 #endif
410 
411   if (cnt & (SHARED_STACK_ALIGNMENT - 1)) {
412     cnt -= (cnt & (SHARED_STACK_ALIGNMENT - 1));
413   }
414 
415   return cnt;
416 }
417 
418 #ifdef MZ_PRECISE_GC
align_var_stack(void ** vs,void * s)419 static void *align_var_stack(void **vs, void *s)
420 {
421   void **nvs, **next;
422   intptr_t i, cnt;
423   void *a;
424 
425   while (STK_COMP((uintptr_t)vs, (uintptr_t)s)) {
426     vs = (void **)(*vs);
427   }
428 
429   s = (void *)vs;
430 
431   /* Check next few frames to see whether they refer to variables
432      before s. This can happen due to inlining, so that an older
433      frame is shallower in the stack. It shouldn't happen much,
434      though. */
435   nvs = *vs;
436   while (nvs) {
437     next = NULL;
438     cnt = ((intptr_t *)nvs)[1];
439     for (i = 0; i < cnt; i++) {
440       a = nvs[i+2];
441       if (!a) {
442 	a = nvs[i+3];
443 	i += 2;
444       }
445       if (STK_COMP((uintptr_t)a, (uintptr_t)s)) {
446 	/* We need nvs to update part of copied stack! */
447 	vs = nvs;
448 	s = (void *)vs;
449 	next = *nvs;
450 	break;
451       }
452     }
453     nvs = next;
454   }
455 
456   return s;
457 }
458 #define ALIGN_VAR_STACK(vs, s) s = align_var_stack(vs, s)
459 
shift_var_stack(void * s,intptr_t delta)460 static void *shift_var_stack(void *s, intptr_t delta)
461 {
462 #ifdef STACK_GROWS_UP
463   return s;
464 #else
465   void **vs = (void **)((char *)s + delta);
466   intptr_t cnt;
467 
468   /* Set s past end of vs: */
469   cnt = ((intptr_t *)vs)[1];
470   return (void *)((void **)s + cnt + 2);
471 #endif
472 }
473 #define PAST_VAR_STACK(s) s = shift_var_stack(s, 0);
474 #define PAST_VAR_STACK_DELTA(s, d) s = shift_var_stack(s, d);
475 END_XFORM_SKIP;
476 #else
477 # define ALIGN_VAR_STACK(vs, s) /* empty */
478 # define PAST_VAR_STACK(s) /* empty */
479 # define PAST_VAR_STACK_DELTA(s, d) /* empty */
480 #endif
481 
scheme_setjmpup_relative(Scheme_Jumpup_Buf * b,void * base,void * volatile start,struct Scheme_Cont * c)482 int scheme_setjmpup_relative(Scheme_Jumpup_Buf *b, void *base,
483 			     void * volatile start, struct Scheme_Cont *c)
484 {
485   int local;
486   intptr_t disguised_b;
487 
488 #ifdef MZ_USE_JIT
489   scheme_flush_stack_cache();
490 #endif
491 
492   FLUSH_REGISTER_WINDOWS;
493 
494   if (!(local = scheme_setjmp(b->buf))) {
495     if (c) {
496       /* We'd like to re-use the stack copied for a continuation
497 	 that encloses the current one --- but we dont' know exactly
498 	 how much the stack is supposed to be shared, since call/cc
499 	 is implemented with a trampoline; certainly, the shallowest
500 	 bit of the old continuation is not right for this one. So,
501 	 we just start from the deepest part of the stack and find
502 	 how many bytes match (using find_same)
503 	 For chains of continuations C1 < C2 < C3, we assume that the
504 	 discovered-safe part of C1 to be used for C2 is also valid
505 	 for C3, so checking for C3 starts with the fresh part in C2,
506 	 and that's where asymptotic benefits start to kick in.
507          Unfortunately, I can't quite convince myself that this
508          assumption is definitely correct. I think it's likely correct,
509          but watch out. */
510       intptr_t same_size;
511       START_XFORM_SKIP;
512       same_size = find_same(get_copy(c->buf_ptr->buf.stack_copy),
513                             c->buf_ptr->buf.stack_from,
514                             c->buf_ptr->buf.stack_size);
515       b->cont = c;
516 #ifdef STACK_GROWS_UP
517       start = (void *)((char *)c->buf_ptr->buf.stack_from + same_size);
518 #else
519       start = (void *)((char *)c->buf_ptr->buf.stack_from
520                        + (c->buf_ptr->buf.stack_size - same_size));
521 #endif
522       /* In 3m-mode, we need `start' on a var-stack boundary: */
523       ALIGN_VAR_STACK(__gc_var_stack__, start);
524       END_XFORM_SKIP;
525     } else
526       b->cont = NULL;
527 
528     /* In 3m-mode, we need `start' at the end of the frame */
529     PAST_VAR_STACK(start);
530 
531     /* b is a pointer into the middle of `base', which bad for precise
532      gc, so we hide it. */
533     disguised_b = (intptr_t)b;
534     b = NULL;
535 
536     scheme_copy_stack((Scheme_Jumpup_Buf *)disguised_b, base, start GC_VAR_STACK_ARG);
537 
538     /* Precise GC: ensure that this frame is pushed. */
539     if (0) {
540       base = scheme_malloc(0);
541     }
542 
543     return 0;
544   }
545 
546   return local;
547 }
548 
scheme_prune_jmpup(struct Scheme_Overflow_Jmp * jmp,void * stack_boundary)549 struct Scheme_Overflow_Jmp *scheme_prune_jmpup(struct Scheme_Overflow_Jmp *jmp, void *stack_boundary)
550 {
551   void *cur_end;
552 
553   PAST_VAR_STACK_DELTA(stack_boundary,  (char *)get_copy(jmp->cont.stack_copy) - (char *)jmp->cont.stack_from);
554 
555 #ifdef STACK_GROWS_UP
556   cur_end = (void *)jmp->cont.stack_from;
557 #else
558   cur_end = (void *)((char *)jmp->cont.stack_from + jmp->cont.stack_size);
559 #endif
560 
561   if (stack_boundary != cur_end) {
562     intptr_t new_size, delta;
563     Scheme_Overflow_Jmp *naya;
564     void *copy, *base;
565 
566 # ifdef STACK_GROWS_UP
567     delta = (char *)stack_boundary - (char *)jmp->cont.stack_from;
568     new_size = jmp->cont.stack_size - delta;
569     base = (char *)stack_boundary;
570 # else
571     delta = 0;
572     new_size = (intptr_t)stack_boundary XFORM_OK_MINUS (intptr_t)jmp->cont.stack_from;
573     base = jmp->cont.stack_from;
574 # endif
575 
576     if ((new_size < 0) || (new_size > jmp->cont.stack_size))
577       scheme_signal_error("bad C-stack pruigin size: %ld vs. %ld", new_size, jmp->cont.stack_size);
578 
579     naya = MALLOC_ONE_RT(Scheme_Overflow_Jmp);
580     memcpy(naya, jmp, sizeof(Scheme_Overflow_Jmp));
581     scheme_init_jmpup_buf(&naya->cont);
582 
583 #ifndef MZ_PRECISE_GC
584     copy = make_stack_copy_rec(new_size);
585     naya->cont.stack_copy = copy;
586     set_copy(naya->cont.stack_copy, MALLOC_STACK(new_size));
587 #else
588     copy = MALLOC_STACK(new_size);
589     set_copy(naya->cont.stack_copy, copy);
590 #endif
591 
592     memcpy(get_copy(copy),
593            (char *)get_copy(jmp->cont.stack_copy) XFORM_OK_PLUS delta,
594            new_size);
595 
596     naya->cont.stack_size = naya->cont.stack_max_size = new_size;
597     naya->cont.stack_from = base;
598 
599     return naya;
600   }
601 
602   return NULL;
603 }
604 
scheme_longjmpup(Scheme_Jumpup_Buf * b)605 void scheme_longjmpup(Scheme_Jumpup_Buf *b)
606 {
607   intptr_t z;
608   intptr_t junk[200];
609 
610 #ifdef MZ_USE_JIT
611   scheme_flush_stack_cache();
612 #endif
613 
614   scheme_uncopy_stack(STK_COMP((uintptr_t)&z, DEEPPOS(b)), b, junk);
615 }
616 
scheme_init_jmpup_buf(Scheme_Jumpup_Buf * b)617 void scheme_init_jmpup_buf(Scheme_Jumpup_Buf *b)
618 {
619   b->stack_size = b->stack_max_size = 0;
620   b->stack_from = b->stack_copy = NULL;
621 }
622 
scheme_reset_jmpup_buf(Scheme_Jumpup_Buf * b)623 void scheme_reset_jmpup_buf(Scheme_Jumpup_Buf *b)
624 {
625   if (b->stack_copy) {
626 #ifdef MZ_PRECISE_GC
627     /* "Free" the stack copy by putting it into a cache.
628        (We clear the cache before a GC.) */
629     stack_copy_cache[scc_pos] = b->stack_copy;
630     stack_copy_size_cache[scc_pos] = b->stack_max_size;
631     scc_pos++;
632     if (scc_pos == STACK_COPY_CACHE_SIZE)
633       scc_pos = 0;
634 #else
635     /* Drop the copy of the stack, */
636     /* remove the finalizer, */
637     /* and explicitly call the finalization proc */
638     GC_register_finalizer(b->stack_copy, NULL, NULL, NULL, NULL);
639     remove_cs(b->stack_copy, NULL);
640 #endif
641 
642     scheme_init_jmpup_buf(b);
643   }
644 
645   memset(&b->buf, 0, sizeof(mz_jmp_buf));
646 }
647 
648 #ifdef USE_MZ_CYGWIN_SETJMP
649 /* We have to define setjmp & longjmp to remain compatible
650    with MSVC-compiled extensions. It's the mostly same code
651    as mzsj86.c, just in a slightly different syntax. This code
652    is fragile, because it's not well defined whether the compiler
653    will generate frame-pointer setup; use mzsj86g.S, instead. */
654 
655 #if (__OPTIMIZE__ > 0) || defined(MZ_XFORM)
656 # define NEED_STACK_FRAME_SETUP
657 #endif
658 
659 MZ_DO_NOT_INLINE(int scheme_mz_setjmp(mz_pre_jmp_buf b));
660 MZ_DO_NOT_INLINE(void scheme_mz_longjmp(mz_pre_jmp_buf b, int v));
661 
scheme_mz_setjmp(mz_pre_jmp_buf b)662 int scheme_mz_setjmp(mz_pre_jmp_buf b)
663 {
664 #ifdef NEED_STACK_FRAME_SETUP
665   asm("push %EBP");
666   asm("mov %ESP, %EBP");
667 #endif
668 
669   asm("mov 4(%EBP), %ECX"); /* return address */
670   asm("mov 8(%EBP), %EAX"); /* jmp_buf ptr */
671   asm("mov (%EBP), %EDX");  /* old EBP */
672   asm("mov %EDX, (%EAX)");
673   asm("mov %EBX, 4(%EAX)");
674   asm("mov %EDI, 8(%EAX)");
675   asm("mov %ESI, 12(%EAX)");
676   asm("mov %ESP, 16(%EAX)");
677   asm("mov %ECX, 20(%EAX)");
678 
679 #ifdef NEED_STACK_FRAME_SETUP
680   asm("pop %EBP");
681 #endif
682 
683   return 0;
684 }
685 
scheme_mz_longjmp(mz_pre_jmp_buf b,int v)686 void scheme_mz_longjmp(mz_pre_jmp_buf b, int v)
687 {
688 #ifdef NEED_STACK_FRAME_SETUP
689   asm("push %EBP");
690   asm("mov %ESP, %EBP");
691 #endif
692 
693   asm("mov 12(%EBP), %EAX"); /* return value */
694   asm("mov 8(%EBP), %ECX");  /* jmp_buf */
695   asm("mov 16(%ECX), %ESP"); /* restore stack pointer */
696   asm("mov (%ECX), %EBP");   /* old EBP */
697   asm("mov %EBP, (%ESP)");
698   asm("mov %ESP, %EBP");
699   asm("mov 4(%ECX), %EBX");
700   asm("mov 8(%ECX), %EDI");
701   asm("mov 12(%ECX), %ESI");
702   asm("mov 20(%ECX), %ECX"); /* return address */
703   asm("mov %ECX, 4(%EBP)");
704 
705 #ifdef NEED_STACK_FRAME_SETUP
706   asm("pop %EBP");
707 #endif
708 }
709 
710 #endif
711 
712 
713 #ifndef USE_MZ_SETJMP_INDIRECT
scheme_get_mz_setjmp(void)714 Scheme_Setjmp_Proc scheme_get_mz_setjmp(void)
715 {
716   scheme_log_abort("internal error: setjmp was indirect?");
717   abort();
718   return NULL;
719 }
720 #endif
721 
722 #if defined(USE_MZ_SETJMP_INDIRECT) && defined(__MINGW32__)
723 extern int _scheme_mz_setjmp(mz_pre_jmp_buf b);
724 extern void _scheme_mz_longjmp(mz_pre_jmp_buf b, int v);
725 
scheme_get_mz_setjmp(void)726 Scheme_Setjmp_Proc scheme_get_mz_setjmp(void)
727 {
728   return _scheme_mz_setjmp;
729 }
730 
scheme_mz_longjmp(mz_pre_jmp_buf b,int v)731 void scheme_mz_longjmp(mz_pre_jmp_buf b, int v)
732 {
733   _scheme_mz_longjmp(b, v);
734 }
735 
scheme_mz_setjmp(mz_pre_jmp_buf b)736 int scheme_mz_setjmp(mz_pre_jmp_buf b)
737 {
738   scheme_log_abort("internal error: setjmp wasn't indirect");
739   abort();
740   return 0;
741 }
742 #endif
743