1 /* This file contains the main interpreter eval-apply loop,
2 scheme_do_eval(), C and Scheme stack management routines,
3 and other bridges between evaluation and compilation.
4
5 Evaluation:
6
7 The bytecode interpreter uses the C stack for continuations, and a
8 separate Scheme stack for activation-frame variables and collecting
9 application arguments. Closures are (nearly) flat, so mutable
10 variables are boxed. A third stack is used for continuation marks,
11 only as needed.
12
13 Tail calls are, for the most part, gotos within scheme_do_eval(). A
14 C function called by the main evaluation loop can perform a
15 trampoling tail call via scheme_tail_apply(). The trampoline must
16 return to its caller without allocating any memory, because an
17 allocation optimization in the tail-call code assumes no GCs will
18 occur between the time that a tail call is issued and the time when
19 it's handled.
20
21 Multiple values are returned as a special SCHEME_MULTIPLE_VALUES
22 token that indicates actual values are stored in the current
23 thread's record.
24
25 The `apply' half of the `eval--apply' loop branches on all possible
26 application types. Some functions can be JIT-generated native code,
27 so `apply' is the bridge from interpreted code to JITted
28 code. Primitive functions (including cons) are implemented by C
29 functions outside the loop. Continuation applications are handled
30 directly in scheme_do_eval(). That leaves calls to non-JITted
31 closures, which are also performed within scheme_do_eval() (so that
32 most tail calls avoid the trampoline), which is analogous to a
33 primitive.
34
35 The `eval' half of the loop handles all core syntactic forms, such
36 as application and `letrec's.
37
38 When collecting the arguments for an application, scheme_do_eval()
39 avoids recursive C calls to evaluate arguments by recognizing
40 easily-evaluated expressions, such as constants and variable
41 lookups. This can be viewed as a kind of half-way A-normalization.
42
43 Bytecodes are not linear. They're actually trees of expression
44 nodes.
45
46 Top-level variables (imported or defined in a linklet) are
47 referenced through the Scheme stack, so that the variables can be
48 re-linked each time a linklet is instantiated. The top-level are
49 sometimes called the "prefix", and push_prefix() initializes the
50 prefix portion of the stack. This prefix is captured in a
51 continuation that refers to top-level variables (which is why the
52 closure is not entirely flat). Special GC support allows a prefix
53 to be pruned to just the globals that are used by live closures.
54
55 Bytecode compilation:
56
57 Compilation works in five passes.
58
59 The first pass, called "compile", is the expander and compiler
60 front-end. See "compile.c" along with "compenv.c".
61
62 The second pass, called "letrec_check", determines which references
63 to `letrec'-bound variables need to be guarded with a run-time
64 check to prevent use before definition. The analysis result is
65 reflected by the insertion of `check-not-unsafe-undefined`
66 calls. This this pass mutates records produced by the "compile"
67 pass.
68
69 The third pass, called "optimize", performs constant propagation,
70 constant folding, and function inlining; this pass mutates records
71 produced by the "letrec_check" pass. See "optimize.c". This pass
72 isn't optional; for example, it calculates closure information that
73 the fourth pass uses.
74
75 The fourth pass, called "resolve", finishes compilation by computing
76 variable offsets and indirections (often mutating the records
77 produced by the first pass). It is also responsible for closure
78 conversion (i.e., converting closure content to arguments) and
79 lifting (of procedures that close over nothing or only globals).
80 Beware that the resulting bytecode object is a graph, not a tree,
81 due to sharing (potentially cyclic) of closures that are "empty"
82 but actually refer to other "empty" closures. See "resolve.c".
83
84 The fifth pass, "sfs", performs another liveness analysis on stack
85 slots and inserts operations to clear stack slots as necessary to
86 make execution safe for space. In particular, dead slots need to be
87 cleared before a non-tail call into arbitrary Racket code. This pass
88 can mutate the result of the "resolve" pass. See "sfs.c".
89
90 Bytecode marshaling and validation:
91
92 See "marshal.c" for functions that [un]marshal bytecode form
93 to/from S-expressions (roughly), which can then be printed using a
94 "fast-load" format.
95
96 The bytecode validator is applied to unmarshaled bytecode to check
97 that the bytecode is well formed and won't cause any segfaults in
98 the interpreter or in JITted form. See "validate.c".
99
100 Just-in-time compilation:
101
102 If the JIT is enabled, then an extra "jitprep" pass processes
103 bytecode one more time --- but only bytecode that is not going to
104 be marshaled, and possibly bytecode that was just unmarshaled.
105 In this "jitprep" pass, the `lambda' and `case-lambda'
106 forms are converted to native-code generators, instead of bytecode
107 variants. The code is not actually JITted until it is called; this
108 preparation step merely sets up a JIT hook for each function. The
109 preparation pass is a shallow, functional (i.e., it doesn't mutate
110 the original bytecode) pass; the body of a function is prepared for
111 JITting lazily. See "jitprep.c".
112
113 */
114
115 #include "schpriv.h"
116 #include "schrunst.h"
117 #ifdef MZ_USE_FUTURES
118 # include "future.h"
119 #endif
120
121 #ifdef USE_STACKAVAIL
122 #include <malloc.h>
123 #endif
124 #ifdef UNIX_FIND_STACK_BOUNDS
125 #include <signal.h>
126 #include <sys/time.h>
127 #include <sys/resource.h>
128 #endif
129 #ifdef PTHREAD_STACKSEG_FIND_STACK_BOUNDS
130 # include <sys/signal.h>
131 # include <pthread.h>
132 # include <pthread_np.h>
133 #endif
134 #ifdef WINDOWS_FIND_STACK_BOUNDS
135 #include <windows.h>
136 #endif
137 #include "schmach.h"
138 #ifdef MACOS_STACK_LIMIT
139 #include <Memory.h>
140 #endif
141 #ifdef MZ_USE_FUTURES
142 # include "future.h"
143 #endif
144
145 #ifdef MZ_USE_JIT
146 # define INIT_JIT_ON 1
147 #else
148 # define INIT_JIT_ON 0
149 #endif
150
151 #ifdef __clang__
152 # ifdef MZ_PRECISE_GC
153 # pragma clang diagnostic ignored "-Wself-assign"
154 # endif
155 #endif
156
157 /* globals */
158 SHARED_OK int scheme_startup_use_jit = INIT_JIT_ON;
159 SHARED_OK int scheme_startup_compile_machine_independent = 0;
scheme_set_startup_use_jit(int v)160 void scheme_set_startup_use_jit(int v) { scheme_startup_use_jit = v; }
scheme_set_startup_compile_machine_independent(int v)161 void scheme_set_startup_compile_machine_independent(int v) {
162 scheme_startup_compile_machine_independent = v;
163 }
164
165 /* THREAD LOCAL SHARED */
166 THREAD_LOCAL_DECL(volatile int scheme_fuel_counter);
167 #ifdef USE_STACK_BOUNDARY_VAR
168 THREAD_LOCAL_DECL(uintptr_t scheme_stack_boundary);
169 THREAD_LOCAL_DECL(uintptr_t volatile scheme_jit_stack_boundary);
170 #endif
171 THREAD_LOCAL_DECL(int scheme_continuation_application_count);
172 THREAD_LOCAL_DECL(int scheme_overflow_count);
173 THREAD_LOCAL_DECL(Scheme_Prefix *scheme_prefix_finalize);
174 THREAD_LOCAL_DECL(Scheme_Prefix *scheme_inc_prefix_finalize);
175 THREAD_LOCAL_DECL(Scheme_Object *is_syntax_proc);
176 THREAD_LOCAL_DECL(Scheme_Object *expander_syntax_to_datum_proc);
177 THREAD_LOCAL_DECL(Scheme_Bucket_Table *scheme_namespace_to_env);
scheme_get_overflow_count()178 int scheme_get_overflow_count() { return scheme_overflow_count; }
179
180 /* read-only globals */
181 READ_ONLY Scheme_Object *scheme_eval_waiting;
182 READ_ONLY Scheme_Object *scheme_multiple_values;
183
184 /* symbols */
185 ROSYM Scheme_Object *scheme_stack_dump_key;
186 READ_ONLY static Scheme_Object *zero_rands_ptr; /* &zero_rands_ptr is dummy rands pointer */
187
188 /* locals */
189 static Scheme_Object *enable_break(int, Scheme_Object *[]);
190
191 static Scheme_Object *allow_set_undefined(int argc, Scheme_Object **argv);
192 static Scheme_Object *compile_module_constants(int argc, Scheme_Object **argv);
193 static Scheme_Object *use_jit(int argc, Scheme_Object **argv);
194 static Scheme_Object *disallow_inline(int argc, Scheme_Object **argv);
195 static Scheme_Object *compile_target_machine(int argc, Scheme_Object **argv);
196 static Scheme_Object *compile_is_target_machine(int argc, Scheme_Object **argv);
197
198 void scheme_escape_to_continuation(Scheme_Object *obj, int num_rands, Scheme_Object **rands, Scheme_Object *alt_full);
199
200 #define cons(x,y) scheme_make_pair(x,y)
201
202 typedef void (*DW_PrePost_Proc)(void *);
203
204 #ifdef MZ_PRECISE_GC
205 static void register_traversers(void);
206 #endif
207
208 #define icons scheme_make_pair
209
210 /*========================================================================*/
211 /* initialization */
212 /*========================================================================*/
213
214 void
scheme_init_eval(Scheme_Startup_Env * env)215 scheme_init_eval (Scheme_Startup_Env *env)
216 {
217 #ifdef MZ_PRECISE_GC
218 register_traversers();
219 #endif
220
221 #ifdef MZ_EVAL_WAITING_CONSTANT
222 scheme_eval_waiting = MZ_EVAL_WAITING_CONSTANT;
223 #else
224 REGISTER_SO(scheme_eval_waiting);
225 scheme_eval_waiting = scheme_alloc_eternal_object();
226 scheme_eval_waiting->type = scheme_eval_waiting_type;
227 #endif
228
229 #ifdef MZ_EVAL_WAITING_CONSTANT
230 scheme_multiple_values = MZ_MULTIPLE_VALUES_CONSTANT;
231 #else
232 REGISTER_SO(scheme_multiple_values);
233 scheme_multiple_values = scheme_alloc_eternal_object();
234 scheme_multiple_values->type = scheme_multiple_values_type;
235 #endif
236
237 REGISTER_SO(scheme_stack_dump_key);
238 scheme_stack_dump_key = scheme_make_symbol("stk"); /* uninterned! */
239
240 ADD_PRIM_W_ARITY("break-enabled", enable_break, 0, 1, env);
241
242 ADD_PARAMETER("compile-allow-set!-undefined", allow_set_undefined, MZCONFIG_ALLOW_SET_UNDEFINED, env);
243 ADD_PARAMETER("compile-enforce-module-constants", compile_module_constants, MZCONFIG_COMPILE_MODULE_CONSTS, env);
244 ADD_PARAMETER("eval-jit-enabled", use_jit, MZCONFIG_USE_JIT, env);
245 ADD_PARAMETER("compile-context-preservation-enabled", disallow_inline, MZCONFIG_DISALLOW_INLINE, env);
246 ADD_PARAMETER("current-compile-target-machine", compile_target_machine, MZCONFIG_COMPILE_TARGET_MACHINE, env);
247
248 ADD_PRIM_W_ARITY("compile-target-machine?", compile_is_target_machine, 1, 1, env);
249 }
250
scheme_init_eval_places()251 void scheme_init_eval_places()
252 {
253 #ifdef DEBUG_CHECK_STACK_FRAME_SIZE
254 (void)scheme_do_eval(SCHEME_TAIL_CALL_WAITING, 0, NULL, 0);
255 #endif
256 }
257
ignore_result(Scheme_Object * v)258 XFORM_NONGCING static void ignore_result(Scheme_Object *v)
259 {
260 if (SAME_OBJ(v, SCHEME_MULTIPLE_VALUES)) {
261 scheme_current_thread->ku.multiple.array = NULL;
262 }
263 }
264
scheme_ignore_result(Scheme_Object * v)265 void scheme_ignore_result(Scheme_Object *v)
266 {
267 ignore_result(v);
268 }
269
270 /*========================================================================*/
271 /* C stack and Scheme stack handling */
272 /*========================================================================*/
273
274 Scheme_Object *
scheme_handle_stack_overflow(Scheme_Object * (* k)(void))275 scheme_handle_stack_overflow(Scheme_Object *(*k)(void))
276 {
277 /* "Stack overflow" means running out of C-stack space. The other
278 end of this handler (i.e., the target for the longjmp) is
279 scheme_top_level_do in fun.c */
280 Scheme_Thread *p = scheme_current_thread;
281 Scheme_Overflow *overflow;
282 Scheme_Overflow_Jmp *jmp;
283
284 scheme_about_to_move_C_stack();
285
286 p->overflow_k = k;
287 scheme_overflow_count++;
288
289 overflow = MALLOC_ONE_RT(Scheme_Overflow);
290 #ifdef MZTAG_REQUIRED
291 overflow->type = scheme_rt_overflow;
292 #endif
293 /* push old overflow */
294 overflow->prev = scheme_current_thread->overflow;
295 p->overflow = overflow;
296
297 overflow->stack_start = p->stack_start;
298
299 jmp = MALLOC_ONE_RT(Scheme_Overflow_Jmp);
300 #ifdef MZTAG_REQUIRED
301 jmp->type = scheme_rt_overflow_jmp;
302 #endif
303 overflow->jmp = jmp;
304
305 scheme_init_jmpup_buf(&overflow->jmp->cont);
306 scheme_zero_unneeded_rands(scheme_current_thread); /* for GC */
307
308 if (scheme_setjmpup(&overflow->jmp->cont, overflow->jmp, p->stack_start)) {
309 p = scheme_current_thread;
310 overflow = p->overflow;
311 p->overflow = overflow->prev;
312 p->error_buf = overflow->jmp->savebuf;
313 if (p->meta_prompt) {
314 /* When unwinding a stack overflow, we need to fix up
315 the meta prompt to have the restored stack base.
316 (When overflow happens with a meta prompt in place,
317 no fixup is needed, because the overflow is detected
318 at the point where the meta-prompt's base would be used.) */
319 Scheme_Prompt *meta_prompt;
320 meta_prompt = MALLOC_ONE_TAGGED(Scheme_Prompt);
321 memcpy(meta_prompt, p->meta_prompt, sizeof(Scheme_Prompt));
322 meta_prompt->stack_boundary = p->stack_start;
323 p->meta_prompt = meta_prompt;
324 }
325 if (!overflow->jmp->captured) /* reset if not captured in a continuation */
326 scheme_reset_jmpup_buf(&overflow->jmp->cont);
327 if (!scheme_overflow_reply) {
328 /* No reply value means we should continue some escape. */
329 if (p->cjs.jumping_to_continuation
330 && p->cjs.is_escape) {
331 /* Jump directly to prompt: */
332 Scheme_Prompt *prompt = (Scheme_Prompt *)p->cjs.jumping_to_continuation;
333 scheme_longjmp(*prompt->prompt_buf, 1);
334 } else if (p->cjs.jumping_to_continuation
335 && SCHEME_CONTP(p->cjs.jumping_to_continuation)) {
336 Scheme_Cont *c = (Scheme_Cont *)p->cjs.jumping_to_continuation;
337 p->cjs.jumping_to_continuation = NULL;
338 scheme_longjmpup(&c->buf_ptr->buf);
339 } else {
340 /* Continue normal escape: */
341 scheme_longjmp(scheme_error_buf, 1);
342 }
343 } else {
344 Scheme_Object *reply = scheme_overflow_reply;
345 scheme_overflow_reply = NULL;
346 return reply;
347 }
348 } else {
349 p->stack_start = scheme_overflow_stack_start;
350 scheme_longjmpup(&scheme_overflow_jmp->cont);
351 }
352 return NULL; /* never gets here */
353 }
354
355 #ifdef LINUX_FIND_STACK_BASE
adjust_stack_base(uintptr_t bnd,uintptr_t lim)356 static uintptr_t adjust_stack_base(uintptr_t bnd, uintptr_t lim) {
357 if (bnd == scheme_get_primordial_thread_stack_base()) {
358 /* The address `base' might be far from the actual stack base
359 if Exec Shield is enabled (in some versions)? Use
360 "/proc/self/maps" to get exactly the stack base. */
361 FILE *f;
362 char *buf;
363 f = fopen("/proc/self/maps", "r");
364 if (f) {
365 buf = malloc(256);
366 while (fgets(buf, 256, f)) {
367 int len;
368 len = strlen(buf);
369 if ((len > 8) && !strcmp("[stack]\n", buf XFORM_OK_PLUS len XFORM_OK_MINUS 8)) {
370 uintptr_t p = 0;
371 int i;
372 /* find separator: */
373 for (i = 0; buf[i]; i++) {
374 if (buf[i] == '-') {
375 i++;
376 break;
377 }
378 }
379 /* parse number after separator: */
380 for (; buf[i]; i++) {
381 if ((buf[i] >= '0') && (buf[i] <= '9')) {
382 p = (p << 4) | (buf[i] - '0');
383 } else if ((buf[i] >= 'a') && (buf[i] <= 'f')) {
384 p = (p << 4) | (buf[i] - 'a' + 10);
385 } else if ((buf[i] >= 'A') && (buf[i] <= 'F')) {
386 p = (p << 4) | (buf[i] - 'A' + 10);
387 } else
388 break;
389 }
390 /* printf("%p vs. %p: %d\n", (void*)bnd, (void*)p, p - bnd); */
391 if ((p > bnd) && ((p - lim) < bnd)) {
392 bnd = p;
393 } else {
394 /* bnd is too far from the expected range; on another thread? */
395 }
396 break;
397 }
398 }
399 free(buf);
400 fclose(f);
401 }
402 }
403
404 return bnd;
405 }
406 #endif
407
408 #ifdef WINDOWS_FIND_STACK_BOUNDS
find_exe_stack_size()409 intptr_t find_exe_stack_size()
410 {
411 # define WINDOWS_DEFAULT_STACK_SIZE 1048576
412 intptr_t sz = WINDOWS_DEFAULT_STACK_SIZE;
413 wchar_t *fn;
414 DWORD len = 1024;
415
416 /* Try to read the executable to find out the initial
417 stack size. */
418
419 fn = (wchar_t *)malloc(sizeof(wchar_t) * len);
420 if (GetModuleFileNameW(NULL, fn, len) < len) {
421 HANDLE fd;
422 fd = CreateFileW(fn,
423 GENERIC_READ,
424 FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
425 NULL,
426 OPEN_EXISTING,
427 0,
428 NULL);
429 if (fd != INVALID_HANDLE_VALUE) {
430 int pos;
431 short kind;
432 DWORD got;
433 /* Skip DOS stub */
434 if (SetFilePointer(fd, 0x3C, NULL, FILE_BEGIN)
435 != INVALID_SET_FILE_POINTER) {
436 if (ReadFile(fd, &pos, sizeof(int), &got, NULL)
437 && (got == sizeof(int))) {
438 /* Read offset to header */
439 if (SetFilePointer(fd, pos + 20 + 4, NULL, FILE_BEGIN)
440 != INVALID_SET_FILE_POINTER) {
441 /* Check magic number */
442 if (ReadFile(fd, &kind, sizeof(short), &got, NULL)
443 && (got == sizeof(short))) {
444 /* Two possible magic numbers: PE32 or PE32+: */
445 if ((kind == 0x10b) || (kind == 0x20b)) {
446 /* Skip to PE32[+] header's stack reservation value: */
447 if (SetFilePointer(fd, pos + 20 + 4 + 72, NULL, FILE_BEGIN)
448 != INVALID_SET_FILE_POINTER) {
449 if (kind == 0x10b) {
450 /* PE32: 32-bit stack size: */
451 int ssz;
452 if (ReadFile(fd, &ssz, sizeof(int), &got, NULL)
453 && (got == sizeof(int))) {
454 sz = ssz;
455 }
456 } else {
457 /* PE32+: 64-bit stack size: */
458 mzlonglong lsz;
459 if (ReadFile(fd, &lsz, sizeof(mzlonglong), &got, NULL)
460 && (got == sizeof(mzlonglong))) {
461 sz = lsz;
462 }
463 }
464 }
465 }
466 }
467 }
468 }
469 }
470 CloseHandle(fd);
471 }
472 }
473 free(fn);
474
475 return sz;
476 }
477 #endif
478
scheme_init_stack_check()479 void scheme_init_stack_check()
480 /* Finds the C stack limit --- platform-specific. */
481 {
482 int *v, stack_grows_up;
483 uintptr_t deeper;
484
485 deeper = scheme_get_deeper_address();
486 stack_grows_up = (deeper > (uintptr_t)&v);
487
488 #ifdef STACK_GROWS_UP
489 if (!stack_grows_up) {
490 if (scheme_console_printf)
491 scheme_console_printf("Stack grows DOWN, not UP.\n");
492 else
493 printf("Stack grows DOWN, not UP.\n");
494 exit(1);
495 }
496 #endif
497 #ifdef STACK_GROWS_DOWN
498 if (stack_grows_up) {
499 if (scheme_console_printf)
500 scheme_console_printf("Stack grows UP, not DOWN.\n");
501 else
502 printf("Stack grows UP, not DOWN.\n");
503 exit(1);
504 }
505 #endif
506
507 #ifdef USE_STACK_BOUNDARY_VAR
508 if (!scheme_stack_boundary) {
509 # ifdef ASSUME_FIXED_STACK_SIZE
510 scheme_stack_boundary = scheme_get_current_os_thread_stack_base();
511 if (stack_grows_up)
512 scheme_stack_boundary += (FIXED_STACK_SIZE - STACK_SAFETY_MARGIN);
513 else
514 scheme_stack_boundary += (STACK_SAFETY_MARGIN - FIXED_STACK_SIZE);
515 # endif
516
517 # ifdef WINDOWS_FIND_STACK_BOUNDS
518 scheme_stack_boundary = scheme_get_current_os_thread_stack_base();
519 {
520 intptr_t sz;
521 sz = find_exe_stack_size();
522 scheme_stack_boundary += (STACK_SAFETY_MARGIN - sz);
523 }
524 # endif
525
526 # ifdef MACOS_FIND_STACK_BOUNDS
527 scheme_stack_boundary = (uintptr_t)&v + STACK_SAFETY_MARGIN - StackSpace();
528 # endif
529
530 # ifdef PALMOS_FIND_STACK_BOUNDS
531 {
532 Ptr s, e;
533 SysGetStackInfo(Ptr &s, &e);
534 scheme_stack_boundary = (uintptr_t)e + STACK_SAFETY_MARGIN;
535 }
536 # endif
537
538 # ifdef UNIX_FIND_STACK_BOUNDS
539 {
540 struct rlimit rl;
541 uintptr_t bnd, lim;
542
543 bnd = (uintptr_t)scheme_get_current_os_thread_stack_base();
544
545 getrlimit(RLIMIT_STACK, &rl);
546
547 lim = (uintptr_t)rl.rlim_cur;
548 # ifdef UNIX_STACK_MAXIMUM
549 if (lim > UNIX_STACK_MAXIMUM)
550 lim = UNIX_STACK_MAXIMUM;
551 # endif
552
553 # ifdef LINUX_FIND_STACK_BASE
554 bnd = adjust_stack_base(bnd, lim);
555 # endif
556
557 if (stack_grows_up)
558 bnd += (lim - STACK_SAFETY_MARGIN);
559 else
560 bnd += (STACK_SAFETY_MARGIN - lim);
561
562 scheme_stack_boundary = bnd;
563 }
564 # endif
565
566 # ifdef PTHREAD_STACKSEG_FIND_STACK_BOUNDS
567 {
568 stack_t stack;
569 pthread_stackseg_np(pthread_self(), &stack);
570 scheme_stack_boundary = (uintptr_t)((char *)stack.ss_sp - (stack.ss_size - STACK_SAFETY_MARGIN));
571 }
572 # endif
573 }
574 #endif
575
576 #ifdef USE_STACK_BOUNDARY_VAR
577 scheme_jit_stack_boundary = scheme_stack_boundary;
578 #endif
579 }
580
581
scheme_check_runstack(intptr_t size)582 int scheme_check_runstack(intptr_t size)
583 /* Checks whether the Scheme stack has `size' room left */
584 {
585 return ((MZ_RUNSTACK - MZ_RUNSTACK_START) >= (size + SCHEME_TAIL_COPY_THRESHOLD));
586 }
587
scheme_enlarge_runstack(intptr_t size,void * (* k)())588 void *scheme_enlarge_runstack(intptr_t size, void *(*k)())
589 /* Adds a Scheme stack segment, of at least `size' bytes */
590 {
591 Scheme_Thread *p = scheme_current_thread;
592 Scheme_Saved_Stack *saved;
593 void *v;
594 int cont_count;
595 volatile int escape;
596 mz_jmp_buf newbuf, * volatile savebuf;
597
598 saved = MALLOC_ONE_RT(Scheme_Saved_Stack);
599
600 #ifdef MZTAG_REQUIRED
601 saved->type = scheme_rt_saved_stack;
602 #endif
603 saved->prev = p->runstack_saved;
604 saved->runstack_start = MZ_RUNSTACK_START;
605 saved->runstack_offset = (MZ_RUNSTACK - MZ_RUNSTACK_START);
606 saved->runstack_size = p->runstack_size;
607
608 size += SCHEME_TAIL_COPY_THRESHOLD;
609
610 if (size) {
611 /* If we keep growing the stack, then probably it
612 needs to be much larger, so at least double the
613 stack size, to a point: */
614 intptr_t min_size;
615 min_size = 2 * (p->runstack_size);
616 if (min_size > 128000)
617 min_size = 128000;
618 if (size < min_size)
619 size = min_size;
620 } else {
621 /* This is for a prompt. Re-use the current size,
622 up to a point: */
623 size = p->runstack_size;
624 if (size > 1000)
625 size = 1000;
626 }
627
628 if (p->spare_runstack && (size <= p->spare_runstack_size)) {
629 size = p->spare_runstack_size;
630 MZ_RUNSTACK_START = p->spare_runstack;
631 p->spare_runstack = NULL;
632 } else {
633 MZ_RUNSTACK_START = scheme_alloc_runstack(size);
634 }
635 p->runstack_size = size;
636 MZ_RUNSTACK = MZ_RUNSTACK_START + size;
637 p->runstack_saved = saved;
638
639 cont_count = scheme_cont_capture_count;
640
641 savebuf = p->error_buf;
642 p->error_buf = &newbuf;
643 if (scheme_setjmp(newbuf)) {
644 v = NULL;
645 escape = 1;
646 p = scheme_current_thread; /* might have changed! */
647 } else {
648 v = k();
649 escape = 0;
650 p = scheme_current_thread; /* might have changed! */
651
652 scheme_check_runstack_edge(MZ_RUNSTACK_START);
653
654 if (cont_count == scheme_cont_capture_count) {
655 if (!p->spare_runstack || (p->runstack_size > p->spare_runstack_size)) {
656 p->spare_runstack = MZ_RUNSTACK_START;
657 p->spare_runstack_size = p->runstack_size;
658 }
659 }
660 }
661
662 p->error_buf = savebuf;
663
664 saved = p->runstack_saved;
665
666 p->runstack_saved = saved->prev;
667 MZ_RUNSTACK_START = saved->runstack_start;
668 MZ_RUNSTACK = MZ_RUNSTACK_START + saved->runstack_offset;
669 p->runstack_size = saved->runstack_size;
670
671 if (escape) {
672 scheme_longjmp(*p->error_buf, 1);
673 }
674
675 return v;
676 }
677
678 /*========================================================================*/
679 /* continuation marks */
680 /*========================================================================*/
681
scheme_push_continuation_frame(Scheme_Cont_Frame_Data * d)682 void scheme_push_continuation_frame(Scheme_Cont_Frame_Data *d)
683 {
684 d->cont_mark_pos = MZ_CONT_MARK_POS;
685 d->cont_mark_stack = MZ_CONT_MARK_STACK;
686
687 MZ_CONT_MARK_POS += 2;
688 }
689
scheme_pop_continuation_frame(Scheme_Cont_Frame_Data * d)690 void scheme_pop_continuation_frame(Scheme_Cont_Frame_Data *d)
691 {
692 MZ_CONT_MARK_POS = d->cont_mark_pos;
693 MZ_CONT_MARK_STACK = d->cont_mark_stack;
694 }
695
clone_meta_cont_set_mark(Scheme_Meta_Continuation * mc,Scheme_Object * val,intptr_t findpos)696 static MZ_MARK_STACK_TYPE clone_meta_cont_set_mark(Scheme_Meta_Continuation *mc, Scheme_Object *val, intptr_t findpos)
697 {
698 /* Clone the meta-continuation, in case it was captured by
699 a continuation in its current state. */
700 Scheme_Meta_Continuation *naya;
701 Scheme_Cont_Mark *cp;
702
703 naya = MALLOC_ONE_RT(Scheme_Meta_Continuation);
704 memcpy(naya, mc, sizeof(Scheme_Meta_Continuation));
705 cp = MALLOC_N(Scheme_Cont_Mark, naya->cont_mark_total);
706 memcpy(cp, mc->cont_mark_stack_copied, naya->cont_mark_total * sizeof(Scheme_Cont_Mark));
707 naya->cont_mark_stack_copied = cp;
708 naya->copy_after_captured = scheme_cont_capture_count;
709 mc = naya;
710 scheme_current_thread->meta_continuation = mc;
711
712 mc->cont_mark_stack_copied[findpos].val = val;
713 mc->cont_mark_stack_copied[findpos].cache = NULL;
714
715 return 0;
716 }
717
scheme_new_mark_segment(Scheme_Thread * p)718 void scheme_new_mark_segment(Scheme_Thread *p)
719 {
720 int c = p->cont_mark_seg_count;
721 Scheme_Cont_Mark **segs, *seg;
722
723 /* Note: we perform allocations before changing p to avoid GC trouble,
724 since Racket adjusts a thread's cont_mark_stack_segments on GC. */
725 segs = MALLOC_N(Scheme_Cont_Mark *, c + 1);
726 seg = scheme_malloc_allow_interior(sizeof(Scheme_Cont_Mark) * SCHEME_MARK_SEGMENT_SIZE);
727 segs[c] = seg;
728
729 if (c)
730 memcpy(segs, p->cont_mark_stack_segments, c * sizeof(Scheme_Cont_Mark *));
731
732 p->cont_mark_seg_count++;
733 p->cont_mark_stack_segments = segs;
734 }
735
736 #ifdef MZ_USE_FUTURES
ts_scheme_new_mark_segment(Scheme_Thread * p)737 static void ts_scheme_new_mark_segment(Scheme_Thread *p) XFORM_SKIP_PROC
738 {
739 if (scheme_use_rtcall && !(scheme_future_thread_state)->is_runtime_thread)
740 scheme_rtcall_new_mark_segment(p);
741 else
742 scheme_new_mark_segment(p);
743 }
744 #else
745 # define ts_scheme_new_mark_segment scheme_new_mark_segment
746 #endif
747
scheme_set_cont_mark(Scheme_Object * key,Scheme_Object * val)748 MZ_MARK_STACK_TYPE scheme_set_cont_mark(Scheme_Object *key, Scheme_Object *val)
749 /* This function can be called inside a future thread */
750 {
751 Scheme_Thread *p = scheme_current_thread;
752 Scheme_Cont_Mark *cm = NULL;
753 intptr_t findpos, bottom;
754
755 findpos = (intptr_t)MZ_CONT_MARK_STACK;
756 bottom = (intptr_t)p->cont_mark_stack_bottom;
757 while (1) {
758 if (findpos-- > bottom) {
759 Scheme_Cont_Mark *seg = p->cont_mark_stack_segments[findpos >> SCHEME_LOG_MARK_SEGMENT_SIZE];
760 intptr_t pos = findpos & SCHEME_MARK_SEGMENT_MASK;
761 Scheme_Cont_Mark *find = seg + pos;
762
763 if ((intptr_t)find->pos < (intptr_t)MZ_CONT_MARK_POS) {
764 break;
765 } else {
766 if (find->key == key) {
767 cm = find;
768 break;
769 } else {
770 /* Assume that we'll mutate rather than allocate a new mark record. */
771 /* This is a bad assumption for a nasty program that repeatedly
772 creates a new key for the same frame, but it's good enough. */
773 find->cache = NULL;
774 }
775 }
776 } else {
777 if (MZ_CONT_MARK_POS == p->cont_mark_pos_bottom + 2) {
778 if (p->meta_continuation) {
779 if (key != scheme_stack_dump_key) {
780 /* Check the end of the meta-continuation's stack */
781 Scheme_Meta_Continuation *mc = p->meta_continuation;
782 for (findpos = (intptr_t)mc->cont_mark_total; findpos--; ) {
783 if (mc->cont_mark_stack_copied[findpos].pos != mc->cont_mark_pos)
784 break;
785 if (mc->cont_mark_stack_copied[findpos].key == key) {
786 if (mc->copy_after_captured < scheme_cont_capture_count) {
787 return clone_meta_cont_set_mark(mc, val, findpos);
788 }
789 mc->cont_mark_stack_copied[findpos].val = val;
790 mc->cont_mark_stack_copied[findpos].cache = NULL;
791 return 0;
792 } else {
793 mc->cont_mark_stack_copied[findpos].cache = NULL;
794 }
795 }
796 }
797 }
798 }
799 break;
800 }
801 }
802
803 if (!cm) {
804 /* Allocate a new mark record: */
805 intptr_t segpos;
806 intptr_t pos;
807 Scheme_Cont_Mark *seg;
808
809 findpos = MZ_CONT_MARK_STACK;
810 segpos = ((intptr_t)findpos) >> SCHEME_LOG_MARK_SEGMENT_SIZE;
811 pos = ((intptr_t)findpos) & SCHEME_MARK_SEGMENT_MASK;
812
813 if (segpos >= p->cont_mark_seg_count) {
814 #ifdef MZ_USE_FUTURES
815 jit_future_storage[0] = key;
816 jit_future_storage[1] = val;
817 #endif
818 ts_scheme_new_mark_segment(p);
819 p = scheme_current_thread;
820 #ifdef MZ_USE_FUTURES
821 key = jit_future_storage[0];
822 val = jit_future_storage[1];
823 jit_future_storage[0] = NULL;
824 jit_future_storage[1] = NULL;
825 #endif
826 }
827
828 seg = p->cont_mark_stack_segments[segpos];
829 cm = seg + pos;
830 MZ_CONT_MARK_STACK = findpos + 1;
831 }
832
833 cm->key = key;
834 cm->val = val;
835 cm->pos = MZ_CONT_MARK_POS; /* always odd */
836 cm->cache = NULL;
837
838 return findpos;
839 }
840
scheme_temp_dec_mark_depth()841 void scheme_temp_dec_mark_depth()
842 {
843 MZ_CONT_MARK_POS -= 2;
844 }
845
scheme_temp_inc_mark_depth()846 void scheme_temp_inc_mark_depth()
847 {
848 MZ_CONT_MARK_POS += 2;
849 }
850
851 /*========================================================================*/
852 /* eval-apply helpers */
853 /*========================================================================*/
854
855 /* discourage inlining of functions call ed scheme_do_eval() to keep its frame size smaller */
856 MZ_DO_NOT_INLINE(static void unbound_global(Scheme_Object *obj));
857 MZ_DO_NOT_INLINE(static void make_tail_buffer_safe());
858 MZ_DO_NOT_INLINE(static Scheme_Object **evacuate_runstack(int num_rands, Scheme_Object **rands, Scheme_Object **runstack));
859 MZ_DO_NOT_INLINE(static Scheme_Object *define_values_execute(Scheme_Object *data));
860 MZ_DO_NOT_INLINE(static Scheme_Object *set_execute (Scheme_Object *data));
861 MZ_DO_NOT_INLINE(static Scheme_Object *ref_execute (Scheme_Object *data));
862 MZ_DO_NOT_INLINE(static Scheme_Object *apply_values_execute(Scheme_Object *data));
863 MZ_DO_NOT_INLINE(static Scheme_Object *bangboxenv_execute(Scheme_Object *data));
864 MZ_DO_NOT_INLINE(static Scheme_Object *begin0_execute(Scheme_Object *obj));
865
866 /* called in schapp.h */
do_apply_known_k(void)867 static Scheme_Object *do_apply_known_k(void)
868 {
869 Scheme_Thread *p = scheme_current_thread;
870 Scheme_Object **argv = (Scheme_Object **)p->ku.k.p2;
871
872 p->ku.k.p2 = NULL;
873
874 return _scheme_apply_known_prim_closure_multi((Scheme_Object *)p->ku.k.p1,
875 p->ku.k.i1,
876 argv);
877 }
878
879 #if 0
880 # define DEBUG_CHECK_TYPE(v) \
881 if ((v != SCHEME_MULTIPLE_VALUES) \
882 && (v != SCHEME_TAIL_CALL_WAITING) \
883 && (v != SCHEME_EVAL_WAITING) \
884 && (SCHEME_TYPE(v) > (_scheme_last_type_ + 25))) \
885 { Scheme_Object *o = *(Scheme_Object **)(v); \
886 if (SCHEME_TYPE(o) > (_scheme_last_type_ + 25))\
887 scheme_signal_error("bad type"); }
888 #else
889 # define DEBUG_CHECK_TYPE(v) /**/
890 #endif
891
_scheme_apply_known_prim_closure_multi(Scheme_Object * rator,int argc,Scheme_Object ** argv)892 Scheme_Object *_scheme_apply_known_prim_closure_multi(Scheme_Object *rator,
893 int argc,
894 Scheme_Object **argv)
895 {
896 #define PRIM_CHECK_ARITY 0
897 #define PRIM_CHECK_MULTI 0
898 #include "schapp.inc"
899 }
900
_scheme_apply_prim_closure_multi(Scheme_Object * rator,int argc,Scheme_Object ** argv)901 Scheme_Object *_scheme_apply_prim_closure_multi(Scheme_Object *rator,
902 int argc,
903 Scheme_Object **argv)
904 {
905 #define PRIM_CHECK_ARITY 1
906 #define PRIM_CHECK_MULTI 0
907 #include "schapp.inc"
908 }
909
_scheme_apply_known_prim_closure(Scheme_Object * rator,int argc,Scheme_Object ** argv)910 Scheme_Object *_scheme_apply_known_prim_closure(Scheme_Object *rator,
911 int argc,
912 Scheme_Object **argv)
913 {
914 #define PRIM_CHECK_ARITY 0
915 #define PRIM_CHECK_MULTI 1
916 #include "schapp.inc"
917 }
918
_scheme_apply_prim_closure(Scheme_Object * rator,int argc,Scheme_Object ** argv)919 Scheme_Object *_scheme_apply_prim_closure(Scheme_Object *rator,
920 int argc,
921 Scheme_Object **argv)
922 {
923 #define PRIM_CHECK_ARITY 1
924 #define PRIM_CHECK_MULTI 1
925 #include "schapp.inc"
926 }
927
928
929 #ifdef MZ_USE_JIT
930
931 # define PRIM_APPLY_NAME _scheme_apply_from_native
932 # define PRIM_APPLY_NAME_FAST _scheme_apply_from_native_fast
933 # define PRIM_CHECK_VALUE 1
934 # define PRIM_CHECK_MULTI 1
935 # include "schnapp.inc"
936
937 # define PRIM_APPLY_NAME _scheme_apply_multi_from_native
938 # define PRIM_APPLY_NAME_FAST _scheme_apply_multi_from_native_fast
939 # define PRIM_CHECK_VALUE 1
940 # define PRIM_CHECK_MULTI 0
941 # include "schnapp.inc"
942
943 # define PRIM_APPLY_NAME _scheme_tail_apply_from_native
944 # define PRIM_APPLY_NAME_FAST _scheme_tail_apply_from_native_fast
945 /* It's ok to call primitive and closed primitives directly,
946 since they implement further tail by trampolining. */
947 # define PRIM_CHECK_VALUE 0
948 # define PRIM_CHECK_MULTI 0
949 # include "schnapp.inc"
950
951 #endif
952
scheme_check_one_value(Scheme_Object * v)953 Scheme_Object *scheme_check_one_value(Scheme_Object *v)
954 {
955 if (v == SCHEME_MULTIPLE_VALUES)
956 scheme_wrong_return_arity(NULL, 1, scheme_multiple_count, scheme_multiple_array, NULL);
957 return v;
958 }
959
do_eval_k(void)960 static Scheme_Object *do_eval_k(void)
961 {
962 Scheme_Thread *p = scheme_current_thread;
963 Scheme_Object *obj = (Scheme_Object *)p->ku.k.p1;
964 Scheme_Object **argv = (Scheme_Object **)p->ku.k.p2;
965
966 p->ku.k.p1 = NULL;
967 p->ku.k.p2 = NULL;
968
969 return scheme_do_eval(obj,
970 p->ku.k.i1,
971 argv,
972 p->ku.k.i2);
973 }
974
975 #ifdef MZ_USE_JIT
do_eval_native_k(void)976 static Scheme_Object *do_eval_native_k(void)
977 {
978 /* If argv corresponds to old runstack, copy to new runstack
979 and clear old argv for space safety. */
980 Scheme_Thread *p = scheme_current_thread;
981 Scheme_Object **argv = (Scheme_Object **)p->ku.k.p2;
982
983 if (argv == (p->runstack_saved->runstack_start
984 + p->runstack_saved->runstack_offset)) {
985 int argc = p->ku.k.i1;
986 MZ_RUNSTACK -= argc;
987 memcpy(MZ_RUNSTACK, argv, argc * sizeof(Scheme_Object*));
988 memset(argv, 0, argc * sizeof(Scheme_Object*));
989 p->ku.k.p2 = MZ_RUNSTACK;
990 }
991
992 return do_eval_k();
993 }
994 #endif
995
unbound_global(Scheme_Object * obj)996 static void unbound_global(Scheme_Object *obj)
997 {
998 Scheme_Object *tmp;
999
1000 tmp = MZ_RUNSTACK[SCHEME_TOPLEVEL_DEPTH(obj)];
1001 tmp = ((Scheme_Prefix *)tmp)->a[SCHEME_TOPLEVEL_POS(obj)];
1002
1003 scheme_unbound_global((Scheme_Bucket *)tmp);
1004 }
1005
make_tail_buffer_safe()1006 static void make_tail_buffer_safe()
1007 {
1008 scheme_realloc_tail_buffer(scheme_current_thread);
1009 }
1010
evacuate_runstack(int num_rands,Scheme_Object ** rands,Scheme_Object ** runstack)1011 static Scheme_Object **evacuate_runstack(int num_rands, Scheme_Object **rands, Scheme_Object **runstack)
1012 {
1013 if (rands == runstack) {
1014 /* See [TC-SFS] in "schnapp.inc" */
1015 Scheme_Thread *p = scheme_current_thread;
1016 (void)scheme_tail_apply(scheme_void, num_rands, rands);
1017 rands = p->ku.apply.tail_rands;
1018 p->ku.apply.tail_rands = NULL;
1019 return rands;
1020 } else
1021 return rands;
1022 }
1023
do_eval_k_readjust_mark(void)1024 static Scheme_Object *do_eval_k_readjust_mark(void)
1025 {
1026 Scheme_Thread *p = scheme_current_thread;
1027 p->self_for_proc_chaperone = p->ku.k.p3;
1028 MZ_CONT_MARK_POS -= 2; /* undo increment in do_eval_stack_overflow() */
1029 return do_eval_k();
1030 }
1031
do_eval_stack_overflow(Scheme_Object * obj,int num_rands,Scheme_Object ** rands,int get_value)1032 static Scheme_Object *do_eval_stack_overflow(Scheme_Object *obj, int num_rands, Scheme_Object **rands,
1033 int get_value)
1034 {
1035 Scheme_Thread *p = scheme_current_thread;
1036
1037 p->ku.k.p1 = (void *)obj;
1038 p->ku.k.i1 = num_rands;
1039 if (num_rands >= 0) {
1040 /* Copy rands: */
1041 GC_CAN_IGNORE void *ra;
1042 if (rands == p->tail_buffer)
1043 make_tail_buffer_safe();
1044 ra = (void *)MALLOC_N(Scheme_Object*, num_rands);
1045 p->ku.k.p2 = ra;
1046 {
1047 int i;
1048 for (i = num_rands; i--; ) {
1049 ((Scheme_Object **)ra)[i] = rands[i];
1050 }
1051 }
1052 } else
1053 p->ku.k.p2 = (void *)rands;
1054 p->ku.k.i2 = get_value;
1055
1056 p->ku.k.p3 = p->self_for_proc_chaperone;
1057 p->self_for_proc_chaperone = NULL;
1058
1059 /* In case we got here via scheme_force_value_same_mark(), in case
1060 overflow handling causes the thread to sleep, and in case another
1061 thread tries to get this thread's continuation marks: ensure tha
1062 the mark pos is not below any current mark. */
1063 MZ_CONT_MARK_POS += 2;
1064
1065 return scheme_handle_stack_overflow(do_eval_k_readjust_mark);
1066 }
1067
intersect_dw(Scheme_Dynamic_Wind * a,Scheme_Dynamic_Wind * b,Scheme_Object * prompt_tag,int b_has_tag,int * _common_depth)1068 static Scheme_Dynamic_Wind *intersect_dw(Scheme_Dynamic_Wind *a, Scheme_Dynamic_Wind *b,
1069 Scheme_Object *prompt_tag, int b_has_tag, int *_common_depth)
1070 {
1071 int alen = 0, blen = 0;
1072 int a_has_tag = 0, a_prompt_delta = 0, b_prompt_delta = 0;
1073 Scheme_Dynamic_Wind *dw, *match_a, *match_b;
1074
1075 for (dw = a; dw && (dw->prompt_tag != prompt_tag); dw = dw->prev) {
1076 }
1077 if (dw) {
1078 /* Cut off `a' below the prompt dw. */
1079 a_prompt_delta = dw->depth;
1080 a_has_tag = 1;
1081 }
1082
1083 if (a_has_tag)
1084 a_prompt_delta += 1;
1085 if (b_has_tag)
1086 b_prompt_delta += 1;
1087
1088 alen = (a ? a->depth + 1 : 0) - a_prompt_delta;
1089 blen = (b ? b->depth + 1 : 0) - b_prompt_delta;
1090
1091 while (alen > blen) {
1092 --alen;
1093 a = a->prev;
1094 }
1095 if (!alen) {
1096 *_common_depth = b_prompt_delta - 1;
1097 return a;
1098 }
1099 while (blen > alen) {
1100 --blen;
1101 b = b->prev;
1102 }
1103
1104 /* At this point, we have chains that are the same length. */
1105 match_a = NULL;
1106 match_b = NULL;
1107 while (blen) {
1108 if (SAME_OBJ(a->id ? a->id : (Scheme_Object *)a,
1109 b->id ? b->id : (Scheme_Object *)b)) {
1110 if (!match_a) {
1111 match_a = a;
1112 match_b = b;
1113 }
1114 } else {
1115 match_a = NULL;
1116 match_b = NULL;
1117 }
1118 a = a->prev;
1119 b = b->prev;
1120 blen--;
1121 }
1122
1123 if (!match_a) {
1124 match_a = a;
1125 match_b = b;
1126 }
1127
1128 *_common_depth = (match_b ? match_b->depth : -1);
1129
1130 return match_a;
1131 }
1132
lookup_cont_prompt(Scheme_Cont * c,Scheme_Meta_Continuation ** _prompt_mc,MZ_MARK_POS_TYPE * _prompt_pos,const char * msg)1133 static Scheme_Prompt *lookup_cont_prompt(Scheme_Cont *c,
1134 Scheme_Meta_Continuation **_prompt_mc,
1135 MZ_MARK_POS_TYPE *_prompt_pos,
1136 const char *msg)
1137 {
1138 Scheme_Prompt *prompt;
1139 Scheme_Object *pt;
1140
1141 if (!c->runstack_copied)
1142 /* This continuation is the same as another... */
1143 c = c->buf_ptr->buf.cont;
1144
1145 pt = c->prompt_tag;
1146 if (SCHEME_NP_CHAPERONEP(pt))
1147 pt = SCHEME_CHAPERONE_VAL(pt);
1148
1149 prompt = scheme_get_prompt(SCHEME_PTR_VAL(pt), _prompt_mc, _prompt_pos);
1150 if (!prompt && !SAME_OBJ(scheme_default_prompt_tag, pt)) {
1151 scheme_raise_exn(MZEXN_FAIL_CONTRACT_CONTINUATION, msg);
1152 }
1153
1154 return prompt;
1155 }
1156
1157 #define LOOKUP_NO_PROMPT "continuation application: no corresponding prompt in the current continuation"
1158
check_barrier(Scheme_Prompt * prompt,Scheme_Meta_Continuation * prompt_cont,MZ_MARK_POS_TYPE prompt_pos,Scheme_Cont * c)1159 static Scheme_Prompt *check_barrier(Scheme_Prompt *prompt,
1160 Scheme_Meta_Continuation *prompt_cont, MZ_MARK_POS_TYPE prompt_pos,
1161 Scheme_Cont *c)
1162 /* A continuation barrier is analogous to a dynamic-wind. A jump is
1163 allowed if no dynamic-wind-like pre-thunks would be executed for
1164 the jump. */
1165 {
1166 Scheme_Prompt *barrier_prompt, *b1, *b2;
1167 Scheme_Meta_Continuation *barrier_cont;
1168 MZ_MARK_POS_TYPE barrier_pos;
1169
1170 barrier_prompt = scheme_get_barrier_prompt(&barrier_cont, &barrier_pos);
1171 b1 = barrier_prompt;
1172 if (b1) {
1173 if (!b1->is_barrier)
1174 b1 = NULL;
1175 else if (prompt
1176 && scheme_is_cm_deeper(barrier_cont, barrier_pos,
1177 prompt_cont, prompt_pos))
1178 b1 = NULL;
1179 }
1180 b2 = c->barrier_prompt;
1181 if (b2) {
1182 if (!b2->is_barrier)
1183 b2 = NULL;
1184 }
1185
1186 if (b2 && (b1 != b2)) {
1187 scheme_raise_exn(MZEXN_FAIL_CONTRACT_CONTINUATION,
1188 "continuation application: attempt to cross a continuation barrier");
1189 }
1190
1191 return barrier_prompt;
1192 }
1193
scheme_recheck_prompt_and_barrier(Scheme_Cont * c)1194 void scheme_recheck_prompt_and_barrier(Scheme_Cont *c)
1195 /* Check for prompt & barrier, again. We need to
1196 call this function like a d-w thunk, so that the meta
1197 continuation is right in case of an error. */
1198 {
1199 Scheme_Prompt *prompt;
1200 Scheme_Meta_Continuation *prompt_cont;
1201 MZ_MARK_POS_TYPE prompt_pos;
1202 prompt = lookup_cont_prompt(c, &prompt_cont, &prompt_pos,
1203 LOOKUP_NO_PROMPT
1204 " on return from `dynamic-wind' post thunk");
1205 check_barrier(prompt, prompt_cont, prompt_pos, c);
1206 }
1207
exec_dyn_wind_posts(Scheme_Dynamic_Wind * common,Scheme_Cont * c,int common_depth,Scheme_Dynamic_Wind ** _common)1208 static int exec_dyn_wind_posts(Scheme_Dynamic_Wind *common, Scheme_Cont *c, int common_depth,
1209 Scheme_Dynamic_Wind **_common)
1210 {
1211 int meta_depth;
1212 Scheme_Thread *p = scheme_current_thread;
1213 Scheme_Dynamic_Wind *dw;
1214 int old_cac = scheme_continuation_application_count;
1215 Scheme_Object *pt;
1216
1217 *_common = common;
1218
1219 for (dw = p->dw;
1220 (common ? dw->depth != common->depth : dw != common); /* not id, which may be duplicated */
1221 ) {
1222 meta_depth = p->next_meta;
1223 p->next_meta += dw->next_meta;
1224 p->dw = dw->prev;
1225 if (dw->post) {
1226 if (meta_depth > 0) {
1227 scheme_apply_dw_in_meta(dw, 1, meta_depth, c);
1228 } else {
1229 DW_PrePost_Proc post = dw->post;
1230
1231 MZ_CONT_MARK_POS = dw->envss.cont_mark_pos;
1232 MZ_CONT_MARK_STACK = dw->envss.cont_mark_stack;
1233 post(dw->data);
1234
1235 if (scheme_continuation_application_count != old_cac) {
1236 scheme_recheck_prompt_and_barrier(c);
1237 }
1238 }
1239 p = scheme_current_thread;
1240 /* p->dw might not match dw if the post thunk captures a
1241 continuation that is later restored in a different
1242 meta continuation: */
1243 dw = p->dw;
1244
1245 /* If any continuations were applied, then the set of dynamic
1246 winds may be different now than before. Re-compute the
1247 intersection. */
1248 if (scheme_continuation_application_count != old_cac) {
1249 old_cac = scheme_continuation_application_count;
1250
1251 pt = c->prompt_tag;
1252 if (SCHEME_NP_CHAPERONEP(pt))
1253 pt = SCHEME_CHAPERONE_VAL(pt);
1254
1255 common = intersect_dw(p->dw, c->dw, pt, c->has_prompt_dw, &common_depth);
1256 *_common = common;
1257 }
1258 } else
1259 dw = dw->prev;
1260 }
1261 return common_depth;
1262 }
1263
scheme_jump_to_continuation(Scheme_Object * obj,int num_rands,Scheme_Object ** rands,Scheme_Object ** old_runstack,int can_ec)1264 Scheme_Object *scheme_jump_to_continuation(Scheme_Object *obj, int num_rands, Scheme_Object **rands,
1265 Scheme_Object **old_runstack, int can_ec)
1266 {
1267 Scheme_Thread *p = scheme_current_thread;
1268 Scheme_Cont *c;
1269 Scheme_Dynamic_Wind *common, *new_common;
1270 Scheme_Object *value;
1271 Scheme_Meta_Continuation *prompt_mc;
1272 MZ_MARK_POS_TYPE prompt_pos;
1273 Scheme_Prompt *prompt, *barrier_prompt;
1274 int common_depth;
1275
1276 /* Since scheme_escape_continuation_ok() may allocate... */
1277 if (rands == p->tail_buffer)
1278 make_tail_buffer_safe();
1279
1280 c = (Scheme_Cont *)obj;
1281
1282 /* Shortcut: if the target continuation is an extension of the current
1283 continuation, and if no prompt is in the way, then escape directly. */
1284 if (can_ec
1285 && c->escape_cont
1286 && scheme_escape_continuation_ok(c->escape_cont)) {
1287 prompt = lookup_cont_prompt(c, &prompt_mc, &prompt_pos, LOOKUP_NO_PROMPT);
1288 if (!prompt || (prompt->id
1289 && (prompt->id == c->prompt_id)
1290 && !prompt_mc))
1291 scheme_escape_to_continuation(c->escape_cont, num_rands, rands, (Scheme_Object *)c);
1292 }
1293
1294 if (num_rands != 1) {
1295 GC_CAN_IGNORE Scheme_Object **vals;
1296 int i;
1297
1298 vals = MALLOC_N(Scheme_Object *, num_rands);
1299 for (i = num_rands; i--; ) {
1300 vals[i] = rands[i];
1301 }
1302
1303 value = (Scheme_Object *)vals;
1304 } else
1305 value = rands[0];
1306
1307 DO_CHECK_FOR_BREAK(p, ;);
1308
1309 if (!c->runstack_copied) {
1310 /* This continuation is the same as another, except
1311 that its mark stack is different. The different part
1312 of the mark stack won't be visible, so we use the other. */
1313 c = c->buf_ptr->buf.cont;
1314 }
1315
1316 if (c->composable) {
1317 /* Composable continuation. Jump right in... */
1318 scheme_continuation_application_count++;
1319 MZ_RUNSTACK = old_runstack;
1320 return scheme_compose_continuation(c, num_rands, value);
1321 } else {
1322 /* Aborting (Scheme-style) continuation. */
1323 int orig_cac = scheme_continuation_application_count;
1324 Scheme_Overflow *thread_end_oflow;
1325 Scheme_Object *pt;
1326
1327 scheme_about_to_move_C_stack();
1328
1329 prompt = lookup_cont_prompt(c, &prompt_mc, &prompt_pos, LOOKUP_NO_PROMPT);
1330 barrier_prompt = check_barrier(prompt, prompt_mc, prompt_pos, c);
1331
1332 p->suspend_break++; /* restored at call/cc destination */
1333
1334 pt = c->prompt_tag;
1335 if (SCHEME_NP_CHAPERONEP(pt))
1336 pt = SCHEME_CHAPERONE_VAL(pt);
1337
1338 /* Find `common', the intersection of dynamic-wind chain for
1339 the current continuation and the given continuation, looking
1340 no further back in the current continuation than a prompt. */
1341 common = intersect_dw(p->dw, c->dw, pt, c->has_prompt_dw, &common_depth);
1342
1343 /* For dynamic-winds after `common' in this
1344 continuation, execute the post-thunks */
1345 common_depth = exec_dyn_wind_posts(common, c, common_depth, &new_common);
1346 p = scheme_current_thread;
1347
1348 if (orig_cac != scheme_continuation_application_count) {
1349 /* We checked for a barrier in exec_dyn_wind_posts, but
1350 get prompt & barrier again. */
1351 prompt = lookup_cont_prompt(c, &prompt_mc, &prompt_pos, "shouldn't fail!");
1352 barrier_prompt = scheme_get_barrier_prompt(NULL, NULL);
1353 common = new_common;
1354 }
1355
1356 c->common_dw_depth = common_depth;
1357
1358 /* in case we need it (since no allocation allowed later): */
1359 thread_end_oflow = scheme_get_thread_end_overflow();
1360
1361 if (num_rands == 1)
1362 c->value = value;
1363 else {
1364 GC_CAN_IGNORE Scheme_Object *vals;
1365 vals = scheme_values(num_rands, (Scheme_Object **)value);
1366 if (SAME_OBJ(p->ku.multiple.array, p->values_buffer))
1367 p->values_buffer = NULL;
1368 c->value = vals;
1369 }
1370
1371 /* !! No allocation or GCs allowed from here to the longjmp() !! */
1372
1373 c->common_dw = common;
1374 c->common_next_meta = p->next_meta;
1375
1376 scheme_continuation_application_count++;
1377
1378 if (!prompt) {
1379 /* Invoke the continuation directly. If there's no prompt,
1380 then the prompt's job is taken by the pseudo-prompt
1381 created with a new thread or a barrier prompt. */
1382 p->meta_continuation = NULL; /* since prompt wasn't in any meta-continuation */
1383 p->meta_prompt = NULL;
1384 p->acting_barrier_prompt = NULL;
1385 if ((c->barrier_prompt == barrier_prompt) && barrier_prompt) {
1386 /* Barrier determines continuation end. */
1387 c->resume_to = NULL;
1388 p->stack_start = c->stack_start;
1389 } else {
1390 /* Prompt is pseudo-prompt at thread beginning.
1391 We're effectively composing the continuation,
1392 so use its prompt stack start. */
1393 c->resume_to = thread_end_oflow;
1394 p->stack_start = c->prompt_stack_start;
1395 }
1396 scheme_longjmpup(&c->buf_ptr->buf);
1397 } else if (prompt->id
1398 && (prompt->id == c->prompt_id)
1399 && !prompt_mc) {
1400 /* The current prompt is the same as the one in place when
1401 capturing the continuation, so we can jump directly. */
1402 scheme_drop_prompt_meta_continuations(pt);
1403 c->shortcut_prompt = prompt;
1404 if ((!prompt->boundary_overflow_id && !p->overflow)
1405 || (prompt->boundary_overflow_id
1406 && (prompt->boundary_overflow_id == p->overflow->id))) {
1407 scheme_longjmpup(&c->buf_ptr->buf);
1408 } else {
1409 /* Need to unwind overflows... */
1410 Scheme_Overflow *overflow;
1411 overflow = p->overflow;
1412 while (overflow->prev
1413 && (!overflow->prev->id
1414 || (overflow->prev->id != prompt->boundary_overflow_id))) {
1415 overflow = overflow->prev;
1416 }
1417 /* Immediate destination is in scheme_handle_stack_overflow(). */
1418 p->cjs.jumping_to_continuation = (Scheme_Object *)c;
1419 p->cjs.alt_full_continuation = NULL;
1420 p->overflow = overflow;
1421 p->stack_start = overflow->stack_start;
1422 p->cjs.skip_dws = 0;
1423 scheme_longjmpup(&overflow->jmp->cont);
1424 }
1425 } else {
1426 /* The prompt is different than when we captured the continuation,
1427 so we need to compose the continuation with the current prompt. */
1428 p->cjs.jumping_to_continuation = (Scheme_Object *)prompt;
1429 p->cjs.alt_full_continuation = NULL;
1430 p->cjs.num_vals = 1;
1431 p->cjs.val = (Scheme_Object *)c;
1432 p->cjs.is_escape = 1;
1433 p->cjs.skip_dws = 0;
1434
1435 if (prompt_mc) {
1436 /* The prompt is from a meta-continuation that's different
1437 from the current one. Jump to the meta-continuation
1438 and continue from there. Immediate destination is
1439 in compose_continuation() in fun.c; the ultimate
1440 destination is in scheme_finish_apply_for_prompt()
1441 in fun.c.
1442 We need to adjust the meta-continuation offsets in
1443 common, based on the number that we're discarding
1444 here. */
1445 {
1446 Scheme_Meta_Continuation *xmc;
1447 int offset = 1;
1448 for (xmc = p->meta_continuation;
1449 xmc->prompt_tag != prompt_mc->prompt_tag;
1450 xmc = xmc->next) {
1451 if (xmc->overflow)
1452 offset++;
1453 }
1454 c->common_next_meta -= offset;
1455 }
1456 p->meta_continuation = prompt_mc->next;
1457 p->stack_start = prompt_mc->overflow->stack_start;
1458 p->decompose_mc = prompt_mc;
1459 scheme_longjmpup(&prompt_mc->overflow->jmp->cont);
1460 } else if ((!prompt->boundary_overflow_id && !p->overflow)
1461 || (prompt->boundary_overflow_id
1462 && (prompt->boundary_overflow_id == p->overflow->id))) {
1463 /* Jump directly to the prompt: destination is in
1464 scheme_finish_apply_for_prompt() in fun.c. */
1465 if (!p->meta_continuation)
1466 scheme_signal_error("internal error: no meta-cont for escape");
1467 if (p->meta_continuation->pseudo)
1468 scheme_signal_error("internal error: trying to jump to a prompt in a meta-cont"
1469 " that starts with a pseudo prompt");
1470 scheme_drop_prompt_meta_continuations(pt);
1471 scheme_longjmp(*prompt->prompt_buf, 1);
1472 } else {
1473 /* Need to unwind overflows to get to the prompt. */
1474 Scheme_Overflow *overflow;
1475 scheme_drop_prompt_meta_continuations(pt);
1476 overflow = p->overflow;
1477 while (overflow->prev
1478 && (!overflow->prev->id
1479 || (overflow->prev->id != prompt->boundary_overflow_id))) {
1480 overflow = overflow->prev;
1481 }
1482 /* Immediate destination is in scheme_handle_stack_overflow().
1483 Ultimate destination is in scheme_finish_apply_for_prompt()
1484 in fun.c. */
1485 p->overflow = overflow;
1486 p->stack_start = overflow->stack_start;
1487 scheme_longjmpup(&overflow->jmp->cont);
1488 }
1489 }
1490 return NULL;
1491 }
1492 }
1493
scheme_escape_to_continuation(Scheme_Object * obj,int num_rands,Scheme_Object ** rands,Scheme_Object * alt_full)1494 void scheme_escape_to_continuation(Scheme_Object *obj, int num_rands, Scheme_Object **rands, Scheme_Object *alt_full)
1495 {
1496 Scheme_Thread *p = scheme_current_thread;
1497 Scheme_Object *value;
1498
1499 if (num_rands != 1) {
1500 GC_CAN_IGNORE Scheme_Object **vals;
1501 int i;
1502
1503 if (rands == p->tail_buffer)
1504 make_tail_buffer_safe();
1505
1506 vals = MALLOC_N(Scheme_Object *, num_rands);
1507 for (i = num_rands; i--; ) {
1508 vals[i] = rands[i];
1509 }
1510
1511 value = (Scheme_Object *)vals;
1512 p->cjs.num_vals = num_rands;
1513 } else {
1514 value = rands[0];
1515 p->cjs.num_vals = 1;
1516 }
1517
1518 if (!scheme_escape_continuation_ok(obj)) {
1519 scheme_raise_exn(MZEXN_FAIL_CONTRACT_CONTINUATION,
1520 "continuation application: attempt to jump into an escape continuation");
1521 }
1522
1523 p->cjs.val = value;
1524 p->cjs.jumping_to_continuation = obj;
1525 p->cjs.alt_full_continuation = alt_full;
1526 p->cjs.skip_dws = 0;
1527 scheme_longjmp(MZTHREADELEM(p, error_buf), 1);
1528 }
1529
1530 /*========================================================================*/
1531 /* evaluation of various forms */
1532 /*========================================================================*/
1533
1534 #define CANNOT_SET_ERROR_STR "assignment disallowed"
1535
scheme_set_global_bucket(char * who,Scheme_Bucket * b,Scheme_Object * val,int set_undef)1536 void scheme_set_global_bucket(char *who, Scheme_Bucket *b, Scheme_Object *val,
1537 int set_undef)
1538 {
1539 if ((b->val || set_undef)
1540 && ((b->so.type != scheme_variable_type)
1541 || !(((Scheme_Bucket_With_Flags *)b)->flags & GLOB_IS_IMMUTATED))
1542 && (val || !(((Scheme_Bucket_With_Flags *)b)->flags & GLOB_IS_LINKED)))
1543 b->val = val;
1544 else {
1545 Scheme_Instance *home;
1546 home = scheme_get_bucket_home(b);
1547 if (home) {
1548 const char *msg;
1549 int is_set;
1550
1551 if (SCHEME_TRUEP(scheme_get_param(scheme_current_config(), MZCONFIG_ERROR_PRINT_SRCLOC)))
1552 msg = ("%s: " CANNOT_SET_ERROR_STR ";\n"
1553 " cannot %s\n"
1554 " %s: %S\n"
1555 " in module: %D");
1556 else
1557 msg = ("%s: " CANNOT_SET_ERROR_STR ";\n"
1558 " cannot %s\n"
1559 " %s: %S");
1560
1561 is_set = !strcmp(who, "set!");
1562
1563 scheme_raise_exn(MZEXN_FAIL_CONTRACT_VARIABLE, b->key,
1564 msg,
1565 who,
1566 (b->val
1567 ? (!val
1568 ? "undefine variable that is used by other modules"
1569 : (is_set
1570 ? "modify a constant"
1571 : "re-define a constant"))
1572 : "set variable before its definition"),
1573 (b->val
1574 ? (!val
1575 ? "variable"
1576 : "constant")
1577 : "variable"),
1578 (Scheme_Object *)b->key,
1579 home->name);
1580 } else {
1581 scheme_raise_exn(MZEXN_FAIL_CONTRACT_VARIABLE, b->key,
1582 "%s: " CANNOT_SET_ERROR_STR ";\n"
1583 " cannot %s\n"
1584 " %s: %S",
1585 who,
1586 (val
1587 ? (b->val ? "change constant" : "set undefined")
1588 : "undefine"),
1589 (val
1590 ? (b->val ? "constant" : "variable")
1591 : "variable"),
1592 (Scheme_Object *)b->key);
1593 }
1594 }
1595 }
1596
scheme_install_macro(Scheme_Bucket * b,Scheme_Object * v)1597 void scheme_install_macro(Scheme_Bucket *b, Scheme_Object *v)
1598 {
1599 Scheme_Object *macro;
1600
1601 macro = scheme_alloc_small_object();
1602 macro->type = scheme_macro_type;
1603 SCHEME_PTR_VAL(macro) = v;
1604
1605 b->val = macro;
1606 }
1607
define_values_execute(Scheme_Object * vec)1608 static Scheme_Object *define_values_execute(Scheme_Object *vec)
1609 {
1610 Scheme_Object *name, *vals_expr, *vals, *var;
1611 int delta = 1;
1612 int i, g, show_any;
1613 Scheme_Bucket *b;
1614
1615 vals_expr = SCHEME_VEC_ELS(vec)[0];
1616
1617 vals = _scheme_eval_linked_expr_multi(vals_expr);
1618
1619 if (SAME_OBJ(vals, SCHEME_MULTIPLE_VALUES)) {
1620 Scheme_Object **values;
1621
1622 i = SCHEME_VEC_SIZE(vec) - delta;
1623
1624 g = scheme_current_thread->ku.multiple.count;
1625 if (i == g) {
1626 int is_st;
1627
1628 values = scheme_current_thread->ku.multiple.array;
1629 if (SAME_OBJ(values, scheme_current_thread->values_buffer))
1630 scheme_current_thread->values_buffer = NULL;
1631 scheme_current_thread->ku.multiple.array = NULL;
1632
1633 is_st = !!scheme_is_simple_make_struct_type(vals_expr, g, CHECK_STRUCT_TYPE_RESOLVED,
1634 NULL, NULL, NULL, NULL,
1635 NULL, MZ_RUNSTACK, 0,
1636 NULL, NULL, 5);
1637 if (!is_st)
1638 is_st = scheme_is_simple_make_struct_type_property(vals_expr, g, CHECK_STRUCT_TYPE_RESOLVED,
1639 NULL, NULL, NULL, MZ_RUNSTACK, 0,
1640 NULL, 5);
1641
1642 for (i = 0; i < g; i++) {
1643 Scheme_Prefix *toplevels;
1644
1645 var = SCHEME_VEC_ELS(vec)[i+delta];
1646 if (SAME_TYPE(SCHEME_TYPE(var), scheme_toplevel_type)) {
1647 toplevels = (Scheme_Prefix *)MZ_RUNSTACK[SCHEME_TOPLEVEL_DEPTH(var)];
1648 b = (Scheme_Bucket *)toplevels->a[SCHEME_TOPLEVEL_POS(var)];
1649 } else
1650 b = (Scheme_Bucket *)SCHEME_STATIC_TOPLEVEL_PREFIX(var)->a[SCHEME_TOPLEVEL_POS(var)];
1651
1652 scheme_set_global_bucket("define-values", b, values[i], 1);
1653
1654 if (SCHEME_TOPLEVEL_FLAGS(var) & SCHEME_TOPLEVEL_SEAL) {
1655 if (is_st)
1656 ((Scheme_Bucket_With_Flags *)b)->flags |= (GLOB_IS_IMMUTATED | GLOB_IS_CONSISTENT);
1657 else
1658 ((Scheme_Bucket_With_Flags *)b)->flags |= GLOB_IS_IMMUTATED;
1659 }
1660 }
1661
1662 return scheme_void;
1663 } else {
1664 if (SAME_OBJ(scheme_current_thread->ku.multiple.array, scheme_current_thread->values_buffer))
1665 scheme_current_thread->values_buffer = NULL;
1666 }
1667 } else if (SCHEME_VEC_SIZE(vec) == delta + 1) { /* => single var */
1668 Scheme_Prefix *toplevels;
1669
1670 var = SCHEME_VEC_ELS(vec)[delta];
1671 if (SAME_TYPE(SCHEME_TYPE(var), scheme_toplevel_type)) {
1672 toplevels = (Scheme_Prefix *)MZ_RUNSTACK[SCHEME_TOPLEVEL_DEPTH(var)];
1673 b = (Scheme_Bucket *)toplevels->a[SCHEME_TOPLEVEL_POS(var)];
1674 } else
1675 b = (Scheme_Bucket *)SCHEME_STATIC_TOPLEVEL_PREFIX(var)->a[SCHEME_TOPLEVEL_POS(var)];
1676
1677 scheme_set_global_bucket("define-values", b, vals, 1);
1678
1679 if (SCHEME_TOPLEVEL_FLAGS(var) & SCHEME_TOPLEVEL_SEAL) {
1680 int flags = GLOB_IS_IMMUTATED;
1681 if (scheme_is_statically_proc(vals_expr, NULL, OMITTABLE_RESOLVED)
1682 || (SCHEME_TYPE(vals_expr) >= _scheme_values_types_))
1683 flags |= GLOB_IS_CONSISTENT;
1684 ((Scheme_Bucket_With_Flags *)b)->flags |= flags;
1685 }
1686
1687 return scheme_void;
1688 } else
1689 g = 1;
1690
1691 i = SCHEME_VEC_SIZE(vec) - delta;
1692
1693 show_any = i;
1694
1695 if (show_any) {
1696 Scheme_Prefix *toplevels;
1697 var = SCHEME_VEC_ELS(vec)[delta];
1698 toplevels = (Scheme_Prefix *)MZ_RUNSTACK[SCHEME_TOPLEVEL_DEPTH(var)];
1699 b = (Scheme_Bucket *)toplevels->a[SCHEME_TOPLEVEL_POS(var)];
1700 name = (Scheme_Object *)b->key;
1701 } else
1702 name = NULL;
1703
1704 {
1705 const char *symname;
1706
1707 symname = (show_any ? scheme_symbol_name(name) : "");
1708
1709 scheme_wrong_return_arity("define-values",
1710 i, g,
1711 (g == 1) ? (Scheme_Object **)vals : scheme_current_thread->ku.multiple.array,
1712 "\n in: %s%s%s",
1713 show_any ? "definition of " : "definition of 0 identifiers",
1714 symname,
1715 show_any ? ((i == 1) ? "" : " ...") : "");
1716 }
1717
1718 return NULL;
1719 }
1720
set_execute(Scheme_Object * data)1721 static Scheme_Object *set_execute (Scheme_Object *data)
1722 {
1723 Scheme_Set_Bang *sb = (Scheme_Set_Bang *)data;
1724 Scheme_Object *val;
1725 Scheme_Bucket *var;
1726 Scheme_Prefix *toplevels;
1727
1728 val = _scheme_eval_linked_expr(sb->val);
1729
1730 if (SAME_TYPE(SCHEME_TYPE(sb->var), scheme_toplevel_type)) {
1731 toplevels = (Scheme_Prefix *)MZ_RUNSTACK[SCHEME_TOPLEVEL_DEPTH(sb->var)];
1732 var = (Scheme_Bucket *)toplevels->a[SCHEME_TOPLEVEL_POS(sb->var)];
1733 } else
1734 var = (Scheme_Bucket *)SCHEME_STATIC_TOPLEVEL_PREFIX(sb->var)->a[SCHEME_TOPLEVEL_POS(sb->var)];
1735
1736 scheme_set_global_bucket("set!", var, val, sb->set_undef);
1737
1738 return scheme_void;
1739 }
1740
ref_execute(Scheme_Object * data)1741 static Scheme_Object *ref_execute (Scheme_Object *data)
1742 {
1743 Scheme_Prefix *toplevels;
1744 Scheme_Object *o;
1745 Scheme_Object *var;
1746 Scheme_Object *tl;
1747 Scheme_Instance *home;
1748
1749 tl = SCHEME_PTR1_VAL(data);
1750 if (SCHEME_FALSEP(tl))
1751 var = NULL;
1752 else if (SCHEME_SYMBOLP(tl) || SAME_OBJ(tl, scheme_true))
1753 var = tl;
1754 else {
1755 toplevels = (Scheme_Prefix *)MZ_RUNSTACK[SCHEME_TOPLEVEL_DEPTH(tl)];
1756 var = toplevels->a[SCHEME_TOPLEVEL_POS(tl)];
1757 }
1758
1759 tl = SCHEME_PTR2_VAL(data);
1760 if (SCHEME_FALSEP(tl))
1761 home = NULL;
1762 else {
1763 toplevels = (Scheme_Prefix *)MZ_RUNSTACK[SCHEME_TOPLEVEL_DEPTH(tl)];
1764 o = toplevels->a[SCHEME_TOPLEVEL_POS(tl)];
1765 home = scheme_get_bucket_home((Scheme_Bucket *)o);
1766 }
1767
1768 o = scheme_alloc_object();
1769 o->type = scheme_global_ref_type;
1770 SCHEME_PTR1_VAL(o) = (var ? var : scheme_false);
1771 SCHEME_PTR2_VAL(o) = (home ? (Scheme_Object *)home : scheme_false);
1772
1773 SCHEME_VARREF_FLAGS(data) |= (SCHEME_VARREF_FLAGS(o) & VARREF_FLAGS_MASK);
1774
1775 return o;
1776 }
1777
apply_values_execute(Scheme_Object * data)1778 static Scheme_Object *apply_values_execute(Scheme_Object *data)
1779 {
1780 Scheme_Object *f, *v;
1781
1782 f = SCHEME_PTR1_VAL(data);
1783
1784 f = _scheme_eval_linked_expr(f);
1785 if (!SCHEME_PROCP(f)) {
1786 Scheme_Object *a[1];
1787 a[0] = f;
1788 scheme_wrong_contract("call-with-values", "procedure?", -1, 1, a);
1789 return NULL;
1790 }
1791
1792 v = _scheme_eval_linked_expr_multi(SCHEME_PTR2_VAL(data));
1793 if (SAME_OBJ(v, SCHEME_MULTIPLE_VALUES)) {
1794 Scheme_Thread *p = scheme_current_thread;
1795 Scheme_Object **rands;
1796 int num_rands = p->ku.multiple.count;
1797
1798 if (num_rands > p->tail_buffer_size) {
1799 /* scheme_tail_apply will allocate */
1800 if (SAME_OBJ(p->ku.multiple.array, p->values_buffer))
1801 p->values_buffer = NULL;
1802 }
1803 rands = p->ku.multiple.array;
1804 p->ku.multiple.array = NULL;
1805 return scheme_tail_apply(f, num_rands, rands);
1806 } else {
1807 Scheme_Object *a[1];
1808 a[0] = v;
1809 return scheme_tail_apply(f, 1, a);
1810 }
1811 }
1812
1813 Scheme_Object *
scheme_case_lambda_execute(Scheme_Object * expr)1814 scheme_case_lambda_execute(Scheme_Object *expr)
1815 {
1816 Scheme_Case_Lambda *seqin, *seqout;
1817 int i, cnt;
1818 Scheme_Thread *p = scheme_current_thread;
1819
1820 seqin = (Scheme_Case_Lambda *)expr;
1821
1822 #ifdef MZ_USE_JIT
1823 if (seqin->native_code) {
1824 Scheme_Native_Lambda *ndata;
1825 Scheme_Native_Closure *nc, *na;
1826 Scheme_Lambda *data;
1827 Scheme_Object *val;
1828 GC_CAN_IGNORE Scheme_Object **runstack;
1829 GC_CAN_IGNORE mzshort *map;
1830 int j, jcnt;
1831
1832 ndata = seqin->native_code;
1833 nc = (Scheme_Native_Closure *)scheme_make_native_case_closure(ndata);
1834
1835 cnt = seqin->count;
1836 for (i = 0; i < cnt; i++) {
1837 val = seqin->array[i];
1838 if (!SCHEME_PROCP(val)) {
1839 data = (Scheme_Lambda *)val;
1840 na = (Scheme_Native_Closure *)scheme_make_native_closure(data->u.native_code);
1841 runstack = MZ_RUNSTACK;
1842 jcnt = data->closure_size;
1843 map = data->closure_map;
1844 for (j = 0; j < jcnt; j++) {
1845 na->vals[j] = runstack[map[j]];
1846 }
1847 val = (Scheme_Object *)na;
1848 }
1849 nc->vals[i] = val;
1850 }
1851
1852 return (Scheme_Object *)nc;
1853 }
1854 #endif
1855
1856 seqout = (Scheme_Case_Lambda *)
1857 scheme_malloc_tagged(sizeof(Scheme_Case_Lambda)
1858 + (seqin->count - mzFLEX_DELTA) * sizeof(Scheme_Object *));
1859 seqout->so.type = scheme_case_closure_type;
1860 seqout->count = seqin->count;
1861 seqout->name = seqin->name;
1862
1863 cnt = seqin->count;
1864 for (i = 0; i < cnt; i++) {
1865 if (SAME_TYPE(SCHEME_TYPE(seqin->array[i]), scheme_closure_type)) {
1866 /* An empty closure, created at compile time */
1867 seqout->array[i] = seqin->array[i];
1868 } else {
1869 Scheme_Object *lc;
1870 lc = scheme_make_closure(p, seqin->array[i], 1);
1871 seqout->array[i] = lc;
1872 }
1873 }
1874
1875 return (Scheme_Object *)seqout;
1876 }
1877
scheme_make_envunbox(Scheme_Object * value)1878 Scheme_Object *scheme_make_envunbox(Scheme_Object *value)
1879 {
1880 Scheme_Object *obj;
1881
1882 obj = (Scheme_Object *)scheme_malloc_envunbox(sizeof(Scheme_Object*));
1883 SCHEME_ENVBOX_VAL(obj) = value;
1884
1885 return obj;
1886 }
1887
bangboxenv_execute(Scheme_Object * data)1888 static Scheme_Object *bangboxenv_execute(Scheme_Object *data)
1889 /* A bangboxenv step is inserted by the compilation of `lambda' and
1890 `let' forms where an argument or bindings is set!ed in the body. */
1891 {
1892 int pos = SCHEME_INT_VAL(SCHEME_PTR1_VAL(data));
1893 Scheme_Object *bb;
1894
1895 data = SCHEME_PTR2_VAL(data);
1896
1897 bb = scheme_make_envunbox(MZ_RUNSTACK[pos]);
1898 MZ_RUNSTACK[pos] = bb;
1899
1900 return _scheme_tail_eval(data);
1901 }
1902
begin0_execute(Scheme_Object * obj)1903 static Scheme_Object *begin0_execute(Scheme_Object *obj)
1904 {
1905 Scheme_Object *v, **mv;
1906 int i, mc, apos;
1907
1908 i = ((Scheme_Sequence *)obj)->count;
1909
1910 v = _scheme_eval_linked_expr_multi(((Scheme_Sequence *)obj)->array[0]);
1911 i--;
1912 if (SAME_OBJ(v, SCHEME_MULTIPLE_VALUES)) {
1913 Scheme_Thread *p = scheme_current_thread;
1914 mv = p->ku.multiple.array;
1915 mc = p->ku.multiple.count;
1916 if (SAME_OBJ(mv, p->values_buffer))
1917 p->values_buffer = NULL;
1918 } else {
1919 mv = NULL;
1920 mc = 1;
1921 }
1922
1923 apos = 1;
1924 while (i--) {
1925 ignore_result(_scheme_eval_linked_expr_multi(((Scheme_Sequence *)obj)->array[apos++]));
1926 }
1927
1928 if (mc != 1) {
1929 Scheme_Thread *p = scheme_current_thread;
1930 p->ku.multiple.array = mv;
1931 p->ku.multiple.count = mc;
1932 }
1933
1934 return v;
1935 }
1936
1937 /*========================================================================*/
1938 /* closures */
1939 /*========================================================================*/
1940
1941 Scheme_Object *
scheme_make_closure(Scheme_Thread * p,Scheme_Object * code,int close)1942 scheme_make_closure(Scheme_Thread *p, Scheme_Object *code, int close)
1943 /* Creates a closure at run-time (or an empty closure at compile
1944 time; note that the byte-code marshaller in print.c can handle
1945 empty closures for that reason). */
1946 {
1947 Scheme_Lambda *data;
1948 Scheme_Closure *closure;
1949 GC_CAN_IGNORE Scheme_Object **runstack;
1950 GC_CAN_IGNORE Scheme_Object **dest;
1951 GC_CAN_IGNORE mzshort *map;
1952 int i;
1953
1954 data = (Scheme_Lambda *)code;
1955
1956 #ifdef MZ_USE_JIT
1957 if (data->u.native_code
1958 /* If the union points to a another Scheme_Lambda*, then it's not actually
1959 a pointer to native code. We must have a closure referenced from non-JITted code
1960 where the closure is also referenced by JITted code. */
1961 && !SAME_TYPE(SCHEME_TYPE(data->u.native_code), scheme_lambda_type)) {
1962 Scheme_Object *nc;
1963
1964 nc = scheme_make_native_closure(data->u.native_code);
1965
1966 if (close) {
1967 runstack = MZ_RUNSTACK;
1968 dest = ((Scheme_Native_Closure *)nc)->vals;
1969 map = data->closure_map;
1970 i = data->closure_size;
1971
1972 /* Copy data into the closure: */
1973 while (i--) {
1974 dest[i] = runstack[map[i]];
1975 }
1976 }
1977
1978 return nc;
1979 }
1980 #endif
1981
1982 i = data->closure_size;
1983
1984 closure = (Scheme_Closure *)
1985 scheme_malloc_tagged(sizeof(Scheme_Closure)
1986 + (i - mzFLEX_DELTA) * sizeof(Scheme_Object *));
1987
1988 closure->so.type = scheme_closure_type;
1989 SCHEME_CLOSURE_CODE(closure) = data;
1990
1991 if (!close || !i)
1992 return (Scheme_Object *)closure;
1993
1994 runstack = MZ_RUNSTACK;
1995 dest = closure->vals;
1996 map = data->closure_map;
1997
1998 /* Copy data into the closure: */
1999 while (i--) {
2000 dest[i] = runstack[map[i]];
2001 }
2002
2003 return (Scheme_Object *)closure;
2004 }
2005
scheme_malloc_empty_closure()2006 Scheme_Closure *scheme_malloc_empty_closure()
2007 {
2008 Scheme_Closure *cl;
2009
2010 cl = (Scheme_Closure *)scheme_malloc_tagged(sizeof(Scheme_Closure) - (mzFLEX_DELTA * sizeof(Scheme_Object *)));
2011 cl->so.type = scheme_closure_type;
2012
2013 return cl;
2014 }
2015
scheme_delay_load_closure(Scheme_Lambda * data)2016 void scheme_delay_load_closure(Scheme_Lambda *data)
2017 {
2018 if (SCHEME_RPAIRP(data->body)) {
2019 Scheme_Object *v, *vinfo = NULL;
2020
2021 v = SCHEME_CAR(data->body);
2022 if (SCHEME_VECTORP(v)) {
2023 /* Has info for delayed validation */
2024 vinfo = v;
2025 v = SCHEME_VEC_ELS(vinfo)[0];
2026 }
2027 v = scheme_load_delayed_code(SCHEME_INT_VAL(v),
2028 (struct Scheme_Load_Delay *)SCHEME_CDR(data->body));
2029 data->body = v;
2030
2031 if (vinfo) {
2032 scheme_validate_closure(NULL,
2033 (Scheme_Object *)data,
2034 (char *)SCHEME_VEC_ELS(vinfo)[1],
2035 (Validate_TLS)SCHEME_VEC_ELS(vinfo)[2],
2036 SCHEME_INT_VAL(SCHEME_VEC_ELS(vinfo)[3]),
2037 SCHEME_INT_VAL(SCHEME_VEC_ELS(vinfo)[5]),
2038 (SCHEME_TRUEP(SCHEME_VEC_ELS(vinfo)[8])
2039 ? (void *)SCHEME_VEC_ELS(vinfo)[8]
2040 : NULL),
2041 (SCHEME_TRUEP(SCHEME_VEC_ELS(vinfo)[9])
2042 ? (mzshort *)(SCHEME_VEC_ELS(vinfo)[9])
2043 : NULL),
2044 SCHEME_INT_VAL(SCHEME_VEC_ELS(vinfo)[10]),
2045 SCHEME_INT_VAL(SCHEME_VEC_ELS(vinfo)[6]),
2046 (SCHEME_TRUEP(SCHEME_VEC_ELS(vinfo)[7])
2047 ? (Scheme_Hash_Tree *)SCHEME_VEC_ELS(vinfo)[7]
2048 : NULL),
2049 (Scheme_Hash_Table **)SCHEME_VEC_ELS(vinfo)[11]);
2050 }
2051 }
2052 }
2053
2054 /*========================================================================*/
2055 /* main eval-apply loop */
2056 /*========================================================================*/
2057
2058 /* This is the main evaluator loop. It's roughly of the form:
2059
2060 while (1) {
2061 if (now-applying) {
2062 if (apply-type-1)
2063 ...
2064 else if (apply-type-2)
2065 ...
2066 else ...
2067 } else {
2068 switch (eval-type) {
2069 case eval-type-1:
2070 ...
2071 case eval-type-2:
2072 ...
2073 ...
2074 }
2075 }
2076 }
2077
2078 The main use of #ifdefs is whether to use global variables for the
2079 Scheme stack pointer or to use local variables. Different
2080 architectures work best with different choices.
2081
2082 */
2083
2084 #ifdef INSTRUMENT_PRIMITIVES
2085 extern int g_print_prims;
2086 #endif
2087
2088 #ifdef REGISTER_POOR_MACHINE
2089 # define USE_LOCAL_RUNSTACK 0
2090 # define DELAY_THREAD_RUNSTACK_UPDATE 0
2091 #else
2092 # define USE_LOCAL_RUNSTACK 1
2093 # define DELAY_THREAD_RUNSTACK_UPDATE 1
2094 #endif
2095
2096 Scheme_Object *
scheme_do_eval(Scheme_Object * obj,int num_rands,Scheme_Object ** rands,int get_value)2097 scheme_do_eval(Scheme_Object *obj, int num_rands, Scheme_Object **rands,
2098 int get_value)
2099 /* If rands == MZ_RUNSTACK on entry, rands elements can be modified. */
2100 {
2101 Scheme_Type type;
2102 Scheme_Object *v;
2103 int check_rands;
2104 GC_CAN_IGNORE Scheme_Object *tmpv; /* safe-for-space relies on GC_CAN_IGNORE */
2105 GC_CAN_IGNORE Scheme_Object **tmprands; /* safe-for-space relies on GC_CAN_IGNORE */
2106 GC_MAYBE_IGNORE_INTERIOR Scheme_Object **old_runstack, **runstack_base;
2107 GC_MAYBE_IGNORE_INTERIOR MZ_MARK_STACK_TYPE old_cont_mark_stack;
2108 #if USE_LOCAL_RUNSTACK
2109 GC_MAYBE_IGNORE_INTERIOR Scheme_Object **runstack;
2110 #endif
2111 MZ_MARK_STACK_TYPE pmstack = -1;
2112 # define p scheme_current_thread
2113
2114 #ifdef DO_STACK_CHECK
2115 # define SCHEME_CURRENT_PROCESS p
2116 # include "mzstkchk.h"
2117 {
2118 return do_eval_stack_overflow(obj, num_rands, rands, get_value);
2119 }
2120 #endif
2121
2122 #if USE_LOCAL_RUNSTACK
2123 # define RUNSTACK runstack
2124 # if DELAY_THREAD_RUNSTACK_UPDATE
2125 # define UPDATE_THREAD_RSPTR() (MZ_RUNSTACK = runstack)
2126 # define RUNSTACK_CHANGED() /**/
2127 # else
2128 # define UPDATE_THREAD_RSPTR() /**/
2129 # define RUNSTACK_CHANGED() (MZ_RUNSTACK = runstack)
2130 # endif
2131 runstack = MZ_RUNSTACK;
2132 # define RESET_LOCAL_RUNSTACK() (runstack = MZ_RUNSTACK)
2133 #else
2134 # define RUNSTACK MZ_RUNSTACK
2135 # define UPDATE_THREAD_RSPTR() /**/
2136 # define RUNSTACK_CHANGED() /**/
2137 # define RESET_LOCAL_RUNSTACK() /**/
2138 #endif
2139
2140 #if 1
2141 # define EVAL_SFS_CLEAR(runstack, obj) \
2142 if (SCHEME_GET_LOCAL_FLAGS(obj) == SCHEME_LOCAL_CLEAR_ON_READ) { \
2143 runstack[SCHEME_LOCAL_POS(obj)] = NULL; \
2144 }
2145 # define SFS_CLEAR_RUNSTACK_ONE(runstack, pos) runstack[pos] = NULL
2146 # define SFS_CLEAR_RUNSTACK(runstack, i, n) for (i = n; i--; ) { SFS_CLEAR_RUNSTACK_ONE(runstack, i); }
2147 #else
2148 # define EVAL_SFS_CLEAR(rs, obj) /* empty */
2149 # define SFS_CLEAR_RUNSTACK_ONE(runstack, pos) /* empty */
2150 # define SFS_CLEAR_RUNSTACK(runstack, i, n) /* empty */
2151 #endif
2152
2153 #define RUNSTACK_START MZ_RUNSTACK_START
2154
2155 #define UPDATE_THREAD_RSPTR_FOR_GC() UPDATE_THREAD_RSPTR()
2156 #define UPDATE_THREAD_RSPTR_FOR_ERROR() UPDATE_THREAD_RSPTR()
2157
2158 #define UPDATE_THREAD_RSPTR_FOR_PROC_MARK() UPDATE_THREAD_RSPTR()
2159
2160 #ifdef DEBUG_CHECK_STACK_FRAME_SIZE
2161 if (obj == SCHEME_TAIL_CALL_WAITING) {
2162 scheme_do_eval(SCHEME_EVAL_WAITING, 0, &obj, 0);
2163 return NULL;
2164 } else if (obj == SCHEME_EVAL_WAITING) {
2165 printf("%ld\n", (char *)rands - (char *)&obj);
2166 return NULL;
2167 }
2168 #endif
2169
2170 MZ_CONT_MARK_POS += 2;
2171 old_runstack = RUNSTACK;
2172 if (num_rands >= 0) {
2173 /* If we have a call with arguments at runstack, then we're
2174 allowed to recycle the argument part of the runstack. In fact,
2175 space safety may relies on reusing that space to clear argument
2176 values. */
2177 if (rands == RUNSTACK)
2178 runstack_base = RUNSTACK + num_rands;
2179 else
2180 runstack_base = RUNSTACK;
2181 } else
2182 runstack_base = RUNSTACK;
2183 old_cont_mark_stack = MZ_CONT_MARK_STACK;
2184
2185 if (num_rands >= 0) {
2186
2187 if ((RUNSTACK - RUNSTACK_START) < SCHEME_TAIL_COPY_THRESHOLD) {
2188 /* It's possible that a sequence of primitive _scheme_tail_apply()
2189 calls will exhaust the Scheme stack. Watch out for that. */
2190 rands = evacuate_runstack(num_rands, rands, RUNSTACK);
2191
2192 p->ku.k.p1 = (void *)obj;
2193 p->ku.k.i1 = num_rands;
2194 p->ku.k.p2 = (void *)rands;
2195 p->ku.k.i2 = -1;
2196
2197 UPDATE_THREAD_RSPTR();
2198 if (rands == p->tail_buffer)
2199 make_tail_buffer_safe();
2200 MZ_CONT_MARK_POS -= 2;
2201 return scheme_enlarge_runstack(SCHEME_TAIL_COPY_THRESHOLD, (void *(*)(void))do_eval_k);
2202 }
2203
2204 check_rands = num_rands;
2205
2206 apply_top:
2207
2208 /* DANGER: if rands == p->tail_buffer, we have to be careful to
2209 get the arguments out of the buffer before a GC occurs.
2210 (Otherwise, they'll be zeroed.) One way to make things safe for
2211 GC is to let rands have the buffer and create a new one. */
2212
2213 type = SCHEME_TYPE(obj);
2214
2215 if (type == scheme_prim_type) {
2216 GC_CAN_IGNORE Scheme_Primitive_Proc *prim;
2217 GC_CAN_IGNORE Scheme_Primitive_Closure_Proc *f;
2218
2219 #define VACATE_TAIL_BUFFER_USE_RUNSTACK() \
2220 if (rands == p->tail_buffer) { \
2221 if (num_rands < SCHEME_TAIL_COPY_THRESHOLD) { \
2222 int i; \
2223 GC_CAN_IGNORE Scheme_Object **quick_rands; \
2224 \
2225 quick_rands = PUSH_RUNSTACK(p, RUNSTACK, num_rands); \
2226 RUNSTACK_CHANGED(); \
2227 \
2228 for (i = num_rands; i--; ) { \
2229 quick_rands[i] = rands[i]; \
2230 } \
2231 rands = quick_rands; \
2232 } else { \
2233 UPDATE_THREAD_RSPTR_FOR_GC(); \
2234 make_tail_buffer_safe(); \
2235 } \
2236 }
2237
2238 VACATE_TAIL_BUFFER_USE_RUNSTACK();
2239
2240 UPDATE_THREAD_RSPTR();
2241
2242 prim = (Scheme_Primitive_Proc *)obj;
2243
2244 if (num_rands < prim->mina
2245 || (num_rands > prim->mu.maxa && prim->mina >= 0)) {
2246 scheme_wrong_count_m(prim->name, prim->mina, prim->mu.maxa,
2247 num_rands, rands,
2248 0);
2249 return NULL; /* Shouldn't get here */
2250 }
2251
2252 f = prim->prim_val;
2253 tmprands = rands;
2254 rands = NULL; /* safe for space, since tmprands is ignored by the GC */
2255 v = f(num_rands, tmprands, (Scheme_Object *)prim);
2256
2257 DEBUG_CHECK_TYPE(v);
2258 } else if (type == scheme_closure_type) {
2259 Scheme_Lambda *data;
2260 GC_CAN_IGNORE Scheme_Object **stack, **src;
2261 int i, has_rest, num_params;
2262
2263 DO_CHECK_FOR_BREAK(p, UPDATE_THREAD_RSPTR_FOR_GC(); if (rands == p->tail_buffer) make_tail_buffer_safe(););
2264
2265 data = SCHEME_CLOSURE_CODE(obj);
2266
2267 if ((runstack_base - RUNSTACK_START) < data->max_let_depth) {
2268 rands = evacuate_runstack(num_rands, rands, RUNSTACK);
2269
2270 if (rands == p->tail_buffer) {
2271 UPDATE_THREAD_RSPTR_FOR_GC();
2272 make_tail_buffer_safe();
2273 }
2274
2275 p->ku.k.p1 = (void *)obj;
2276 p->ku.k.i1 = num_rands;
2277 p->ku.k.p2 = (void *)rands;
2278 p->ku.k.i2 = -1;
2279
2280 UPDATE_THREAD_RSPTR();
2281 MZ_CONT_MARK_POS -= 2;
2282 v = (Scheme_Object *)scheme_enlarge_runstack(data->max_let_depth, (void *(*)(void))do_eval_k);
2283 MZ_CONT_MARK_POS += 2;
2284 goto returnv;
2285 }
2286
2287 num_params = data->num_params;
2288 has_rest = SCHEME_LAMBDA_FLAGS(data) & LAMBDA_HAS_REST;
2289
2290 if (num_params) {
2291 if (has_rest) {
2292 int extra, n;
2293
2294 if (num_rands < (num_params - 1)) {
2295 UPDATE_THREAD_RSPTR_FOR_ERROR();
2296 /* note: scheme_wrong_count_m handles rands == p->tail_buffer */
2297 scheme_wrong_count_m((const char *)obj,
2298 -1, -1,
2299 num_rands, rands,
2300 SCHEME_LAMBDA_FLAGS(data) & LAMBDA_IS_METHOD);
2301 return NULL; /* Doesn't get here */
2302 }
2303
2304 n = num_params - has_rest;
2305
2306 RUNSTACK = runstack_base - num_params;
2307 CHECK_RUNSTACK(p, RUNSTACK);
2308 RUNSTACK_CHANGED();
2309
2310 extra = num_rands - n;
2311 if (extra) {
2312 Scheme_Object *rest_vals;
2313 GC_CAN_IGNORE Scheme_Object *pairs;
2314
2315 /* This is a special case: GC may be triggered, but
2316 p->runstack does not point at everything that needs
2317 to be kept if args are lower on the stack.
2318 That's what runstack_tmp_keep is for. Also, if
2319 runstack_tmp_keep == tail_buffer, then the buffer
2320 won't be zeroed. */
2321 UPDATE_THREAD_RSPTR_FOR_GC();
2322 p->runstack_tmp_keep = rands;
2323
2324 rest_vals = scheme_null;
2325 for (i = num_rands - 1; i >= n; --i) {
2326 pairs = scheme_alloc_object();
2327 pairs->type = scheme_pair_type;
2328 SCHEME_CDR(pairs) = rest_vals;
2329 SCHEME_CAR(pairs) = rands[i];
2330 rest_vals = pairs;
2331 }
2332
2333 p->runstack_tmp_keep = NULL;
2334
2335 stack = RUNSTACK;
2336 stack[n] = rest_vals;
2337 while (n--) {
2338 stack[n] = rands[n];
2339 }
2340 } else {
2341 stack = RUNSTACK;
2342 /* Possibly, but not necessarily, rands > stack: */
2343 if ((uintptr_t)rands > (uintptr_t)stack) {
2344 int i;
2345 for (i = 0; i < n; i++) {
2346 stack[i] = rands[i];
2347 }
2348 stack[n] = scheme_null;
2349 } else {
2350 stack[n] = scheme_null;
2351 while (n--) {
2352 stack[n] = rands[n];
2353 }
2354 }
2355 }
2356 } else {
2357 if (num_rands != num_params) {
2358 UPDATE_THREAD_RSPTR_FOR_ERROR();
2359 /* note: scheme_wrong_count_m handles rands == p->tail_buffer */
2360 scheme_wrong_count_m((const char *)obj,
2361 -1, -1,
2362 num_rands, rands,
2363 SCHEME_LAMBDA_FLAGS(data) & LAMBDA_IS_METHOD);
2364 return NULL; /* Doesn't get here */
2365 }
2366
2367 stack = RUNSTACK = runstack_base - num_params;
2368 CHECK_RUNSTACK(p, RUNSTACK);
2369 RUNSTACK_CHANGED();
2370
2371 if (rands != stack) {
2372 int n = num_params;
2373 while (n--) {
2374 stack[n] = rands[n];
2375 }
2376 }
2377 }
2378 } else {
2379 if (num_rands) {
2380 if (has_rest) {
2381 /* 0 params and has_rest => (lambda args E) where args is not in E,
2382 so accept any number of arguments and ignore them. */
2383
2384 } else {
2385 UPDATE_THREAD_RSPTR_FOR_ERROR();
2386 /* note: scheme_wrong_count handles rands == p->tail_buffer */
2387 scheme_wrong_count((const char *)obj, -1, -1, num_rands, rands);
2388 return NULL; /* Doesn't get here */
2389 }
2390 }
2391 RUNSTACK = runstack_base;
2392 RUNSTACK_CHANGED();
2393 }
2394
2395 {
2396 int n = data->closure_size;
2397
2398 if (n) {
2399 src = SCHEME_CLOSURE_ENV(obj);
2400 stack = PUSH_RUNSTACK(p, RUNSTACK, n);
2401 RUNSTACK_CHANGED();
2402
2403 while (n--) {
2404 stack[n] = src[n];
2405 }
2406 }
2407 }
2408
2409 obj = data->body;
2410
2411 if (SCHEME_RPAIRP(obj)) {
2412 UPDATE_THREAD_RSPTR_FOR_GC();
2413 make_tail_buffer_safe();
2414 scheme_delay_load_closure(data);
2415 obj = data->body;
2416 }
2417
2418 if (pmstack >= 0) {
2419 intptr_t segpos = ((intptr_t)pmstack) >> SCHEME_LOG_MARK_SEGMENT_SIZE;
2420 intptr_t pos = ((intptr_t)pmstack) & SCHEME_MARK_SEGMENT_MASK;
2421 GC_CAN_IGNORE Scheme_Cont_Mark *pm = NULL;
2422
2423 pm = p->cont_mark_stack_segments[segpos] + pos;
2424
2425 if (!pm->cache)
2426 pm->val = data->name;
2427 else {
2428 /* Need to clear caches, so do it the slow way */
2429 UPDATE_THREAD_RSPTR_FOR_PROC_MARK();
2430 pmstack = scheme_set_cont_mark(scheme_stack_dump_key, data->name);
2431 }
2432 } else {
2433 /* Allocate a new mark record: */
2434 intptr_t segpos = ((intptr_t)MZ_CONT_MARK_STACK) >> SCHEME_LOG_MARK_SEGMENT_SIZE;
2435 if (segpos >= p->cont_mark_seg_count) {
2436 UPDATE_THREAD_RSPTR_FOR_PROC_MARK();
2437 pmstack = scheme_set_cont_mark(scheme_stack_dump_key, data->name);
2438 } else {
2439 intptr_t pos = ((intptr_t)MZ_CONT_MARK_STACK) & SCHEME_MARK_SEGMENT_MASK;
2440 GC_CAN_IGNORE Scheme_Cont_Mark *pm;
2441 GC_CAN_IGNORE Scheme_Cont_Mark *seg;
2442
2443 pmstack = MZ_CONT_MARK_STACK;
2444
2445 seg = p->cont_mark_stack_segments[segpos];
2446 pm = seg + pos;
2447 MZ_CONT_MARK_STACK++;
2448
2449 pm->key = scheme_stack_dump_key;
2450 pm->val = data->name;
2451 pm->pos = MZ_CONT_MARK_POS;
2452 pm->cache = NULL;
2453 }
2454 }
2455
2456 goto eval_top;
2457 } else if (type == scheme_case_closure_type) {
2458 Scheme_Case_Lambda *seq;
2459 Scheme_Lambda *data;
2460
2461 int i;
2462
2463 seq = (Scheme_Case_Lambda *)obj;
2464 for (i = 0; i < seq->count; i++) {
2465 data = SCHEME_CLOSURE_CODE(seq->array[i]);
2466 if ((!(SCHEME_LAMBDA_FLAGS(data) & LAMBDA_HAS_REST)
2467 && (data->num_params == num_rands))
2468 || ((SCHEME_LAMBDA_FLAGS(data) & LAMBDA_HAS_REST)
2469 && (data->num_params - 1 <= num_rands))) {
2470 obj = seq->array[i];
2471 goto apply_top;
2472 }
2473 }
2474
2475 UPDATE_THREAD_RSPTR_FOR_ERROR();
2476 /* note: scheme_wrong_count handles rands == p->tail_buffer */
2477 scheme_wrong_count((char *)seq, -1, -1, num_rands, rands);
2478
2479 return NULL; /* Doesn't get here. */
2480 #ifdef MZ_USE_JIT
2481 } else if (type == scheme_native_closure_type) {
2482 GC_CAN_IGNORE Scheme_Native_Lambda *data;
2483
2484 VACATE_TAIL_BUFFER_USE_RUNSTACK();
2485
2486 UPDATE_THREAD_RSPTR();
2487
2488 DO_CHECK_FOR_BREAK(p, );
2489
2490 /* See also _apply_native(), which effectively copies this code. */
2491
2492 data = ((Scheme_Native_Closure *)obj)->code;
2493
2494 /* Enlarge the runstack? This max_let_depth is in bytes instead of words. */
2495 if ((uintptr_t)data->max_let_depth > ((uintptr_t)RUNSTACK - (uintptr_t)RUNSTACK_START)) {
2496 p->ku.k.p1 = (void *)obj;
2497 p->ku.k.i1 = num_rands;
2498 p->ku.k.p2 = (void *)rands;
2499 p->ku.k.i2 = -1;
2500
2501 MZ_CONT_MARK_POS -= 2;
2502 v = (Scheme_Object *)scheme_enlarge_runstack(data->max_let_depth / sizeof(void *),
2503 (void *(*)(void))do_eval_native_k);
2504 MZ_CONT_MARK_POS += 2;
2505 goto returnv;
2506 }
2507
2508 tmpv = obj;
2509 obj = NULL; /* safe for space, since tmpv is ignored by the GC */
2510 tmprands = rands;
2511 if (rands != old_runstack)
2512 rands = NULL; /* safe for space, since tmprands is ignored by the GC */
2513 v = data->start_code(tmpv, num_rands, tmprands EXTRA_NATIVE_ARGUMENT);
2514
2515 if (v == SCHEME_TAIL_CALL_WAITING) {
2516 /* [TC-SFS]; see schnapp.inc */
2517 if (rands == old_runstack) {
2518 int i;
2519 for (i = 0; i < num_rands; i++) { rands[i] = NULL; }
2520 }
2521 }
2522
2523 DEBUG_CHECK_TYPE(v);
2524 #endif
2525 } else if (type == scheme_cont_type) {
2526 UPDATE_THREAD_RSPTR();
2527 v = scheme_jump_to_continuation(obj, num_rands, rands, old_runstack, 1);
2528 } else if (type == scheme_escaping_cont_type) {
2529 UPDATE_THREAD_RSPTR();
2530 scheme_escape_to_continuation(obj, num_rands, rands, NULL);
2531 return NULL;
2532 } else if ((type == scheme_proc_struct_type)
2533 || ((type == scheme_proc_chaperone_type)
2534 /* Chaperone is for struct fields, not function arguments --- but
2535 the chaperone may guard access to the function as a field inside
2536 the struct. We'll need to keep track of the original object
2537 as we unwrap to discover procedure chaperones. */
2538 && SCHEME_REDIRECTS_STRUCTP(((Scheme_Chaperone *) obj)->redirects))
2539 /* A raw pair is from scheme_apply_chaperone(), propagating the
2540 original object for an applicable structure. */
2541 || (type == scheme_raw_pair_type)) {
2542 int is_method;
2543 Scheme_Object *orig_obj;
2544
2545 orig_obj = obj;
2546
2547 while (1) {
2548 /* Like the apply loop around this one, but we need
2549 to keep track of orig_obj until we get down to the
2550 structure. */
2551
2552 if (SCHEME_RPAIRP(obj)) {
2553 orig_obj = SCHEME_CDR(obj);
2554 obj = SCHEME_CAR(obj);
2555 }
2556
2557 type = SCHEME_TYPE(obj);
2558 if (type == scheme_proc_struct_type) {
2559 VACATE_TAIL_BUFFER_USE_RUNSTACK();
2560
2561 UPDATE_THREAD_RSPTR_FOR_ERROR(); /* in case */
2562
2563 obj = scheme_extract_struct_procedure(orig_obj, check_rands, rands, &is_method);
2564 if (is_method) {
2565 /* Have to add an extra argument to the front of rands */
2566 if ((rands == RUNSTACK) && (RUNSTACK != RUNSTACK_START)){
2567 /* Common case: we can just push self onto the front: */
2568 rands = PUSH_RUNSTACK(p, RUNSTACK, 1);
2569 rands[0] = orig_obj;
2570 } else {
2571 int i;
2572 Scheme_Object **a;
2573
2574 if (p->tail_buffer && (num_rands < p->tail_buffer_size)) {
2575 /* Use tail-call buffer. Shift in such a way that this works if
2576 rands == p->tail_buffer */
2577 a = p->tail_buffer;
2578 } else {
2579 /* Uncommon general case --- allocate an array */
2580 UPDATE_THREAD_RSPTR_FOR_GC();
2581 a = MALLOC_N(Scheme_Object *, num_rands + 1);
2582 }
2583
2584 for (i = num_rands; i--; ) {
2585 a[i + 1] = rands[i];
2586 }
2587 a[0] = orig_obj;
2588 rands = a;
2589 }
2590 num_rands++;
2591 }
2592
2593 DO_CHECK_FOR_BREAK(p, UPDATE_THREAD_RSPTR_FOR_GC(); if (rands == p->tail_buffer) make_tail_buffer_safe(););
2594
2595 /* After we check arity once, no need to check again
2596 (which would lead to O(n^2) checking for nested
2597 struct procs): */
2598 check_rands = -1;
2599
2600 goto apply_top;
2601 } else {
2602 if (SCHEME_REDIRECTS_STRUCTP(((Scheme_Chaperone *)obj)->redirects))
2603 obj = ((Scheme_Chaperone *)obj)->prev;
2604 else if (SAME_TYPE(SCHEME_TYPE(((Scheme_Chaperone *)obj)->redirects), scheme_nack_guard_evt_type))
2605 /* Chaperone is for evt, not function arguments */
2606 obj = ((Scheme_Chaperone *)obj)->prev;
2607 else {
2608 /* Chaperone is for function arguments */
2609 VACATE_TAIL_BUFFER_USE_RUNSTACK();
2610 UPDATE_THREAD_RSPTR();
2611 tmprands = rands;
2612 rands = NULL; /* safe for space, since tmprands is ignored by the GC */
2613 v = scheme_apply_chaperone(scheme_make_raw_pair(obj, orig_obj), num_rands, tmprands, NULL, 0);
2614
2615 if (SAME_OBJ(v, SCHEME_TAIL_CALL_WAITING)) {
2616 /* Need to stay in this loop, because a tail-call result must
2617 be a tail call to an unwrapped layer, so we'll eventually
2618 need to use orig_obj. */
2619 obj = p->ku.apply.tail_rator;
2620 num_rands = p->ku.apply.tail_num_rands;
2621 if (check_rands != -1) check_rands = num_rands;
2622 rands = p->ku.apply.tail_rands;
2623 p->ku.apply.tail_rator = NULL;
2624 p->ku.apply.tail_rands = NULL;
2625 RUNSTACK = runstack_base;
2626 RUNSTACK_CHANGED();
2627 } else {
2628 break;
2629 }
2630 }
2631 }
2632 }
2633 } else if (type == scheme_proc_chaperone_type) {
2634 if (SAME_TYPE(SCHEME_TYPE(((Scheme_Chaperone *)obj)->redirects), scheme_nack_guard_evt_type)) {
2635 /* Chaperone is for evt, not function arguments */
2636 obj = ((Scheme_Chaperone *)obj)->prev;
2637 check_rands = num_rands;
2638 goto apply_top;
2639 } else {
2640 /* Chaperone is for function arguments */
2641 VACATE_TAIL_BUFFER_USE_RUNSTACK();
2642 UPDATE_THREAD_RSPTR();
2643 tmprands = rands;
2644 rands = NULL; /* safe for space, since tmprands is ignored by the GC */
2645 v = scheme_apply_chaperone(obj, num_rands, tmprands, NULL, 0);
2646 }
2647 } else if (type == scheme_closed_prim_type) {
2648 GC_CAN_IGNORE Scheme_Closed_Primitive_Proc *prim;
2649
2650 DO_CHECK_FOR_BREAK(p, UPDATE_THREAD_RSPTR_FOR_GC(); if (rands == p->tail_buffer) make_tail_buffer_safe(););
2651
2652 VACATE_TAIL_BUFFER_USE_RUNSTACK();
2653
2654 UPDATE_THREAD_RSPTR();
2655
2656 prim = (Scheme_Closed_Primitive_Proc *)obj;
2657
2658 if (num_rands < prim->mina
2659 || (num_rands > prim->maxa && prim->maxa >= 0)) {
2660 scheme_wrong_count_m(prim->name, prim->mina, prim->maxa,
2661 num_rands, rands,
2662 0);
2663 return NULL; /* Shouldn't get here */
2664 }
2665
2666 tmprands = rands;
2667 if (rands != old_runstack)
2668 rands = NULL; /* safe for space, since tmprands is ignored by the GC */
2669 v = prim->prim_val(prim->data, num_rands, tmprands);
2670
2671 if (v == SCHEME_TAIL_CALL_WAITING) {
2672 /* [TC-SFS]; see schnapp.inc */
2673 if (rands == old_runstack) {
2674 int i;
2675 for (i = 0; i < num_rands; i++) { rands[i] = NULL; }
2676 }
2677 }
2678
2679 DEBUG_CHECK_TYPE(v);
2680 } else {
2681 UPDATE_THREAD_RSPTR_FOR_ERROR();
2682 if (rands == p->tail_buffer)
2683 make_tail_buffer_safe();
2684 scheme_wrong_rator(obj, num_rands, rands);
2685 return NULL; /* Doesn't get here. */
2686 }
2687 } else {
2688
2689 eval_top:
2690
2691 if (SCHEME_INTP(obj)) {
2692 v = obj;
2693 goto returnv_never_multi;
2694 }
2695
2696 type = _SCHEME_TYPE(obj);
2697 switch (type)
2698 {
2699 case scheme_toplevel_type:
2700 {
2701 /* Make sure that the GC can ignore tmp: */
2702 #define global_lookup(prefix, _obj, tmp) \
2703 tmp = RUNSTACK[SCHEME_TOPLEVEL_DEPTH(_obj)]; \
2704 tmp = ((Scheme_Prefix *)tmp)->a[SCHEME_TOPLEVEL_POS(_obj)]; \
2705 tmp = (Scheme_Object *)(SCHEME_VAR_BUCKET(tmp))->val; \
2706 if (!tmp) { \
2707 UPDATE_THREAD_RSPTR_FOR_ERROR(); \
2708 unbound_global(_obj); \
2709 return NULL; \
2710 } \
2711 prefix tmp
2712
2713 global_lookup(v = , obj, v);
2714 goto returnv_never_multi;
2715 }
2716 case scheme_static_toplevel_type:
2717 {
2718 obj = SCHEME_STATIC_TOPLEVEL_PREFIX(obj)->a[SCHEME_TOPLEVEL_POS(obj)];
2719 v = (Scheme_Object *)(SCHEME_VAR_BUCKET(obj))->val;
2720 if (!v) {
2721 UPDATE_THREAD_RSPTR_FOR_ERROR();
2722 scheme_unbound_global((Scheme_Bucket *)obj);
2723 return NULL;
2724 }
2725 goto returnv_never_multi;
2726 }
2727 case scheme_local_type:
2728 {
2729 v = RUNSTACK[SCHEME_LOCAL_POS(obj)];
2730 EVAL_SFS_CLEAR(RUNSTACK, obj);
2731 goto returnv_never_multi;
2732 }
2733 case scheme_local_unbox_type:
2734 {
2735 v = SCHEME_ENVBOX_VAL(RUNSTACK[SCHEME_LOCAL_POS(obj)]);
2736 EVAL_SFS_CLEAR(RUNSTACK, obj);
2737 goto returnv_never_multi;
2738 }
2739 case scheme_application_type:
2740 {
2741 Scheme_App_Rec *app;
2742 GC_MAYBE_IGNORE_INTERIOR Scheme_Object **randsp;
2743 Scheme_Object **stack;
2744 int k;
2745 int d_evals;
2746 #ifdef MZ_XFORM
2747 # define GET_FIRST_EVAL ((char *)app)[d_evals]
2748 #else
2749 char *evals;
2750 Scheme_Object **args;
2751 # define GET_FIRST_EVAL evals[0]
2752 #endif
2753
2754 app = (Scheme_App_Rec *)obj;
2755 num_rands = app->num_args;
2756
2757 d_evals = (sizeof(Scheme_App_Rec)
2758 + ((num_rands + 1 - mzFLEX_DELTA) * sizeof(Scheme_Object *)));
2759 #ifndef MZ_XFORM
2760 evals = ((char *)obj) + d_evals;
2761 #endif
2762
2763 obj = app->args[0];
2764
2765 stack = PUSH_RUNSTACK(p, RUNSTACK, num_rands);
2766 RUNSTACK_CHANGED();
2767 UPDATE_THREAD_RSPTR();
2768 SFS_CLEAR_RUNSTACK(RUNSTACK, k, num_rands);
2769
2770 /* Inline local & global variable lookups for speed */
2771 switch (GET_FIRST_EVAL) {
2772 case SCHEME_EVAL_CONSTANT:
2773 break;
2774 case SCHEME_EVAL_GLOBAL:
2775 global_lookup(obj =, obj, tmpv);
2776 break;
2777 case SCHEME_EVAL_LOCAL:
2778 {
2779 tmpv = stack[SCHEME_LOCAL_POS(obj)];
2780 EVAL_SFS_CLEAR(stack, obj);
2781 obj = tmpv;
2782 }
2783 break;
2784 case SCHEME_EVAL_LOCAL_UNBOX:
2785 {
2786 tmpv = SCHEME_ENVBOX_VAL(stack[SCHEME_LOCAL_POS(obj)]);
2787 EVAL_SFS_CLEAR(stack, obj);
2788 obj = tmpv;
2789 }
2790 break;
2791 default:
2792 obj = _scheme_eval_linked_expr_wp(obj, p);
2793 break;
2794 }
2795
2796 if (num_rands) {
2797 #ifdef MZ_XFORM
2798 int evalpos = 1;
2799 #endif
2800
2801 rands = stack;
2802
2803 /* Inline local & global variable lookups for speed */
2804 #ifdef MZ_XFORM
2805 # define GET_NEXT_EVAL ((char *)app)[d_evals + evalpos++]
2806 # define GET_NEXT_ARG app->args[evalpos]
2807 #else
2808 evals++;
2809 args = app->args + 1;
2810 # define GET_NEXT_EVAL *(evals++)
2811 # define GET_NEXT_ARG *(args++)
2812 #endif
2813 randsp = rands;
2814 for (k = num_rands; k--; ) {
2815 v = GET_NEXT_ARG;
2816 switch (GET_NEXT_EVAL) {
2817 case SCHEME_EVAL_CONSTANT:
2818 *(randsp++) = v;
2819 break;
2820 case SCHEME_EVAL_GLOBAL:
2821 global_lookup(*(randsp++) =, v, tmpv);
2822 break;
2823 case SCHEME_EVAL_LOCAL:
2824 *(randsp++) = stack[SCHEME_LOCAL_POS(v)];
2825 EVAL_SFS_CLEAR(stack, v);
2826 break;
2827 case SCHEME_EVAL_LOCAL_UNBOX:
2828 *(randsp++) = SCHEME_ENVBOX_VAL(stack[SCHEME_LOCAL_POS(v)]);
2829 EVAL_SFS_CLEAR(stack, v);
2830 break;
2831 default:
2832 {
2833 GC_CAN_IGNORE Scheme_Object *er;
2834 er = _scheme_eval_linked_expr_wp(v, p);
2835 *(randsp++) = er;
2836 }
2837 break;
2838 }
2839
2840 DEBUG_CHECK_TYPE(randsp[-1]);
2841 }
2842 } else
2843 rands = &zero_rands_ptr;
2844
2845 check_rands = num_rands;
2846 goto apply_top;
2847 }
2848
2849 case scheme_application2_type:
2850 {
2851 Scheme_App2_Rec *app;
2852 GC_CAN_IGNORE Scheme_Object *arg;
2853 short flags;
2854
2855 app = (Scheme_App2_Rec *)obj;
2856
2857 obj = app->rator;
2858 flags = SCHEME_APPN_FLAGS(app);
2859
2860 rands = PUSH_RUNSTACK(p, RUNSTACK, 1);
2861 RUNSTACK_CHANGED();
2862 UPDATE_THREAD_RSPTR();
2863 SFS_CLEAR_RUNSTACK_ONE(RUNSTACK, 0);
2864
2865 /* Inline local & global variable lookups for speed */
2866 switch (flags & 0x7) {
2867 case SCHEME_EVAL_CONSTANT:
2868 break;
2869 case SCHEME_EVAL_GLOBAL:
2870 {
2871 global_lookup(obj =, obj, tmpv);
2872 }
2873 break;
2874 case SCHEME_EVAL_LOCAL:
2875 {
2876 tmpv = rands[SCHEME_LOCAL_POS(obj)];
2877 EVAL_SFS_CLEAR(rands, obj);
2878 obj = tmpv;
2879 }
2880 break;
2881 case SCHEME_EVAL_LOCAL_UNBOX:
2882 {
2883 tmpv = SCHEME_ENVBOX_VAL(rands[SCHEME_LOCAL_POS(obj)]);
2884 EVAL_SFS_CLEAR(rands, obj);
2885 obj = tmpv;
2886 }
2887 break;
2888 default:
2889 obj = _scheme_eval_linked_expr_wp(obj, p);
2890 break;
2891 }
2892
2893 arg = app->rand;
2894
2895 switch ((flags >> 3) & 0x7) {
2896 case SCHEME_EVAL_CONSTANT:
2897 break;
2898 case SCHEME_EVAL_GLOBAL:
2899 {
2900 global_lookup(arg =, arg, tmpv);
2901 }
2902 break;
2903 case SCHEME_EVAL_LOCAL:
2904 {
2905 tmpv = rands[SCHEME_LOCAL_POS(arg)];
2906 EVAL_SFS_CLEAR(rands, arg);
2907 arg = tmpv;
2908 }
2909 break;
2910 case SCHEME_EVAL_LOCAL_UNBOX:
2911 {
2912 tmpv = SCHEME_ENVBOX_VAL(rands[SCHEME_LOCAL_POS(arg)]);
2913 EVAL_SFS_CLEAR(rands, arg);
2914 arg = tmpv;
2915 }
2916 break;
2917 default:
2918 arg = _scheme_eval_linked_expr_wp(arg, p);
2919 break;
2920 }
2921
2922 rands[0] = arg;
2923 num_rands = 1;
2924
2925 check_rands = num_rands;
2926 goto apply_top;
2927 }
2928
2929 case scheme_application3_type:
2930 {
2931 Scheme_App3_Rec *app;
2932 GC_CAN_IGNORE Scheme_Object *arg;
2933 short flags;
2934
2935 app = (Scheme_App3_Rec *)obj;
2936
2937 obj = app->rator;
2938 flags = SCHEME_APPN_FLAGS(app);
2939
2940 rands = PUSH_RUNSTACK(p, RUNSTACK, 2);
2941 RUNSTACK_CHANGED();
2942 UPDATE_THREAD_RSPTR();
2943 SFS_CLEAR_RUNSTACK_ONE(RUNSTACK, 0);
2944 SFS_CLEAR_RUNSTACK_ONE(RUNSTACK, 1);
2945
2946 /* Inline local & global variable lookups for speed */
2947 switch (flags & 0x7) {
2948 case SCHEME_EVAL_CONSTANT:
2949 break;
2950 case SCHEME_EVAL_GLOBAL:
2951 global_lookup(obj =, obj, tmpv);
2952 break;
2953 case SCHEME_EVAL_LOCAL:
2954 tmpv = rands[SCHEME_LOCAL_POS(obj)];
2955 EVAL_SFS_CLEAR(rands, obj);
2956 obj = tmpv;
2957 break;
2958 case SCHEME_EVAL_LOCAL_UNBOX:
2959 tmpv = SCHEME_ENVBOX_VAL(rands[SCHEME_LOCAL_POS(obj)]);
2960 EVAL_SFS_CLEAR(rands, obj);
2961 obj = tmpv;
2962 break;
2963 default:
2964 obj = _scheme_eval_linked_expr_wp(obj, p);
2965 break;
2966 }
2967
2968 arg = app->rand1;
2969
2970 switch ((flags >> 3) & 0x7) {
2971 case SCHEME_EVAL_CONSTANT:
2972 break;
2973 case SCHEME_EVAL_GLOBAL:
2974 global_lookup(arg =, arg, tmpv);
2975 break;
2976 case SCHEME_EVAL_LOCAL:
2977 tmpv = rands[SCHEME_LOCAL_POS(arg)];
2978 EVAL_SFS_CLEAR(rands, arg);
2979 arg = tmpv;
2980 break;
2981 case SCHEME_EVAL_LOCAL_UNBOX:
2982 tmpv = SCHEME_ENVBOX_VAL(rands[SCHEME_LOCAL_POS(arg)]);
2983 EVAL_SFS_CLEAR(rands, arg);
2984 arg = tmpv;
2985 break;
2986 default:
2987 arg = _scheme_eval_linked_expr_wp(arg, p);
2988 break;
2989 }
2990
2991 rands[0] = arg;
2992
2993 arg = app->rand2;
2994
2995 switch ((flags >> 6) & 0x7) {
2996 case SCHEME_EVAL_CONSTANT:
2997 break;
2998 case SCHEME_EVAL_GLOBAL:
2999 global_lookup(arg =, arg, tmpv);
3000 break;
3001 case SCHEME_EVAL_LOCAL:
3002 tmpv = rands[SCHEME_LOCAL_POS(arg)];
3003 EVAL_SFS_CLEAR(rands, arg);
3004 arg = tmpv;
3005 break;
3006 case SCHEME_EVAL_LOCAL_UNBOX:
3007 tmpv = SCHEME_ENVBOX_VAL(rands[SCHEME_LOCAL_POS(arg)]);
3008 EVAL_SFS_CLEAR(rands, arg);
3009 arg = tmpv;
3010 break;
3011 default:
3012 arg = _scheme_eval_linked_expr_wp(arg, p);
3013 break;
3014 }
3015
3016 rands[1] = arg;
3017
3018 num_rands = 2;
3019
3020 check_rands = num_rands;
3021 goto apply_top;
3022 }
3023
3024 case scheme_sequence_type:
3025 {
3026 int cnt;
3027 int i;
3028
3029 cnt = ((Scheme_Sequence *)obj)->count - 1;
3030
3031 UPDATE_THREAD_RSPTR();
3032 for (i = 0; i < cnt; i++) {
3033 ignore_result(_scheme_eval_linked_expr_multi_wp(((Scheme_Sequence *)obj)->array[i], p));
3034 }
3035
3036 obj = ((Scheme_Sequence *)obj)->array[cnt];
3037 goto eval_top;
3038 }
3039 case scheme_branch_type:
3040 {
3041 UPDATE_THREAD_RSPTR();
3042 obj = (NOT_SAME_OBJ(_scheme_eval_linked_expr_wp(((Scheme_Branch_Rec *)obj)->test, p),
3043 scheme_false)
3044 ? ((Scheme_Branch_Rec *)obj)->tbranch
3045 : ((Scheme_Branch_Rec *)obj)->fbranch);
3046
3047 goto eval_top;
3048 }
3049 case scheme_lambda_type:
3050 UPDATE_THREAD_RSPTR();
3051 v = scheme_make_closure(p, obj, 1);
3052 goto returnv_never_multi;
3053
3054 case scheme_let_value_type:
3055 {
3056 GC_CAN_IGNORE Scheme_Let_Value *lv;
3057 GC_CAN_IGNORE Scheme_Object *value, **values;
3058 int i, c, ab;
3059
3060 lv = (Scheme_Let_Value *)obj;
3061
3062 c = lv->count;
3063
3064 i = lv->position;
3065 ab = SCHEME_LET_VALUE_AUTOBOX(lv);
3066 value = lv->value;
3067 obj = lv->body;
3068
3069 UPDATE_THREAD_RSPTR();
3070
3071 if (c == 1) {
3072 value = _scheme_eval_linked_expr_wp(value, p);
3073 if (ab)
3074 SCHEME_ENVBOX_VAL(RUNSTACK[i]) = value;
3075 else
3076 RUNSTACK[i] = value;
3077 } else {
3078 int c2;
3079 GC_CAN_IGNORE Scheme_Object **stack;
3080
3081 value = _scheme_eval_linked_expr_multi_wp(value, p);
3082 c2 = (SAME_OBJ(value, SCHEME_MULTIPLE_VALUES) ? p->ku.multiple.count : 1);
3083 if (c2 != c) {
3084 scheme_wrong_return_arity(NULL, c, c2,
3085 (c2 == 1) ? (Scheme_Object **)value : p->ku.multiple.array,
3086 "\n in: local-binding form");
3087 return NULL;
3088 }
3089
3090 /* Precise GC: values++ is ok because we exit the block
3091 before any GC can happen. Also, GC would zero `values'
3092 if it turns out to be p->values_buffer. */
3093
3094 values = p->ku.multiple.array;
3095 p->ku.multiple.array = NULL;
3096 stack = RUNSTACK;
3097 if (ab) {
3098 while (c--) {
3099 SCHEME_ENVBOX_VAL(stack[i]) = *values;
3100 values++;
3101 i++;
3102 }
3103 } else {
3104 while (c--) {
3105 stack[i] = *values;
3106 values++;
3107 i++;
3108 }
3109 }
3110 }
3111
3112 goto eval_top;
3113 }
3114
3115 case scheme_let_void_type:
3116 {
3117 GC_CAN_IGNORE Scheme_Let_Void *lv;
3118 int c;
3119
3120 lv = (Scheme_Let_Void *)obj;
3121 c = lv->count;
3122 obj = lv->body;
3123
3124 PUSH_RUNSTACK(p, RUNSTACK, c);
3125 RUNSTACK_CHANGED();
3126
3127 if (SCHEME_LET_VOID_AUTOBOX(lv)) {
3128 GC_MAYBE_IGNORE_INTERIOR Scheme_Object **stack = RUNSTACK;
3129
3130 UPDATE_THREAD_RSPTR_FOR_GC();
3131
3132 while (c--) {
3133 GC_CAN_IGNORE Scheme_Object *ub;
3134 ub = scheme_make_envunbox(scheme_undefined);
3135 stack[c] = ub;
3136 }
3137 }
3138
3139 goto eval_top;
3140 }
3141
3142 case scheme_letrec_type:
3143 {
3144 /* Macro instead of var for efficient precise GC conversion */
3145 # define l ((Scheme_Letrec *)obj)
3146 Scheme_Object **a;
3147 GC_MAYBE_IGNORE_INTERIOR Scheme_Object **stack;
3148 int i;
3149
3150 stack = RUNSTACK;
3151 a = l->procs;
3152 i = l->count;
3153
3154 UPDATE_THREAD_RSPTR_FOR_GC();
3155
3156 /* Create unfinished closures */
3157 while (i--) {
3158 Scheme_Object *uc;
3159 uc = scheme_make_closure(p, a[i], 0);
3160 stack[i] = uc;
3161 }
3162
3163 /* Close them: */
3164 i = l->count;
3165 while (i--) {
3166 GC_CAN_IGNORE Scheme_Object *clos;
3167 GC_CAN_IGNORE Scheme_Object **dest;
3168 GC_CAN_IGNORE mzshort *map;
3169 GC_CAN_IGNORE Scheme_Lambda *data;
3170 int j;
3171
3172 clos = stack[i];
3173
3174 #ifdef MZ_USE_JIT
3175 if (SAME_TYPE(_SCHEME_TYPE(clos), scheme_closure_type)) {
3176 dest = ((Scheme_Closure *)clos)->vals;
3177 } else {
3178 dest = ((Scheme_Native_Closure *)clos)->vals;
3179 }
3180 #else
3181 dest = ((Scheme_Closure *)clos)->vals;
3182 #endif
3183
3184 data = (Scheme_Lambda *)a[i];
3185
3186 map = data->closure_map;
3187 j = data->closure_size;
3188
3189 /* Beware - dest points to the middle of a block */
3190
3191 while (j--) {
3192 dest[j] = stack[map[j]];
3193 }
3194 }
3195
3196 obj = l->body;
3197 # undef l
3198 goto eval_top;
3199 }
3200
3201 case scheme_let_one_type:
3202 {
3203 /* Macro instead of var for efficient precise GC conversion */
3204 # define lo ((Scheme_Let_One *)obj)
3205
3206 PUSH_RUNSTACK(p, RUNSTACK, 1);
3207 RUNSTACK_CHANGED();
3208
3209 /* SFS pass may set LET_ONE_UNUSED, but not for the
3210 variable cases; in the constant case, the constant
3211 is #f, so it's ok to push it anyway. */
3212
3213 switch (SCHEME_LET_EVAL_TYPE(lo) & 0x7) {
3214 case SCHEME_EVAL_CONSTANT:
3215 RUNSTACK[0] = lo->value;
3216 break;
3217 case SCHEME_EVAL_GLOBAL:
3218 {
3219 global_lookup(RUNSTACK[0] =, lo->value, tmpv);
3220 }
3221 break;
3222 case SCHEME_EVAL_LOCAL:
3223 RUNSTACK[0] = RUNSTACK[SCHEME_LOCAL_POS(lo->value)];
3224 EVAL_SFS_CLEAR(RUNSTACK, lo->value);
3225 break;
3226 case SCHEME_EVAL_LOCAL_UNBOX:
3227 RUNSTACK[0] = SCHEME_ENVBOX_VAL(RUNSTACK[SCHEME_LOCAL_POS(lo->value)]);
3228 EVAL_SFS_CLEAR(RUNSTACK, lo->value);
3229 break;
3230 default:
3231 UPDATE_THREAD_RSPTR();
3232 {
3233 GC_CAN_IGNORE Scheme_Object *val;
3234 SFS_CLEAR_RUNSTACK_ONE(RUNSTACK, 0);
3235 val = _scheme_eval_linked_expr_wp(lo->value, p);
3236 if (!(SCHEME_LET_EVAL_TYPE(lo) & LET_ONE_UNUSED))
3237 RUNSTACK[0] = val;
3238 }
3239 break;
3240 }
3241
3242 obj = lo->body;
3243 # undef lo
3244 goto eval_top;
3245 }
3246
3247 case scheme_with_cont_mark_type:
3248 {
3249 /* Macro instead of var for efficient precise GC conversion */
3250 # define wcm ((Scheme_With_Continuation_Mark *)obj)
3251 Scheme_Object *key;
3252 GC_CAN_IGNORE Scheme_Object *val;
3253
3254 UPDATE_THREAD_RSPTR();
3255 key = wcm->key;
3256 if (SCHEME_TYPE(key) < _scheme_values_types_)
3257 key = _scheme_eval_linked_expr_wp(key, p);
3258 val = wcm->val;
3259 if (SCHEME_TYPE(val) < _scheme_values_types_)
3260 val = _scheme_eval_linked_expr_wp(val, p);
3261
3262 if (SCHEME_NP_CHAPERONEP(key)
3263 && SCHEME_CONTINUATION_MARK_KEYP(SCHEME_CHAPERONE_VAL(key))) {
3264 val = scheme_chaperone_do_continuation_mark("with-continuation-mark", 0, key, val);
3265 key = SCHEME_CHAPERONE_VAL(key);
3266 }
3267
3268 scheme_set_cont_mark(key, val);
3269
3270 obj = wcm->body;
3271 # undef wcm
3272 goto eval_top;
3273 }
3274
3275 case scheme_define_values_type:
3276 {
3277 UPDATE_THREAD_RSPTR();
3278 v = define_values_execute(obj);
3279 break;
3280 }
3281 case scheme_inline_variant_type:
3282 {
3283 obj = SCHEME_VEC_ELS(obj)[0];
3284 goto eval_top;
3285 }
3286 case scheme_set_bang_type:
3287 {
3288 UPDATE_THREAD_RSPTR();
3289 v = set_execute(obj);
3290 break;
3291 }
3292 case scheme_boxenv_type:
3293 {
3294 UPDATE_THREAD_RSPTR();
3295 v = bangboxenv_execute(obj);
3296 break;
3297 }
3298 case scheme_begin0_sequence_type:
3299 {
3300 UPDATE_THREAD_RSPTR();
3301 v = begin0_execute(obj);
3302 break;
3303 }
3304 case scheme_varref_form_type:
3305 {
3306 UPDATE_THREAD_RSPTR();
3307 v = ref_execute(obj);
3308 break;
3309 }
3310 case scheme_apply_values_type:
3311 {
3312 UPDATE_THREAD_RSPTR();
3313 v = apply_values_execute(obj);
3314 break;
3315 }
3316 case scheme_with_immed_mark_type:
3317 {
3318 # define wcm ((Scheme_With_Continuation_Mark *)obj)
3319 Scheme_Object *mark_key;
3320 GC_CAN_IGNORE Scheme_Object *mark_val;
3321
3322 mark_key = wcm->key;
3323 if (SCHEME_TYPE(mark_key) < _scheme_values_types_) {
3324 UPDATE_THREAD_RSPTR();
3325 mark_key = _scheme_eval_linked_expr_wp(mark_key, p);
3326 }
3327
3328 mark_val = wcm->val;
3329 if (SCHEME_TYPE(mark_val) < _scheme_values_types_) {
3330 UPDATE_THREAD_RSPTR();
3331 mark_val = _scheme_eval_linked_expr_wp(mark_val, p);
3332 }
3333
3334 UPDATE_THREAD_RSPTR();
3335 mark_val = scheme_chaperone_get_immediate_cc_mark(mark_key, mark_val);
3336
3337 PUSH_RUNSTACK(p, RUNSTACK, 1);
3338 RUNSTACK_CHANGED();
3339 RUNSTACK[0] = mark_val;
3340
3341 obj = wcm->body;
3342 goto eval_top;
3343 #undef wcm
3344 }
3345 case scheme_case_lambda_sequence_type:
3346 {
3347 UPDATE_THREAD_RSPTR();
3348 v = scheme_case_lambda_execute(obj);
3349 break;
3350 }
3351 default:
3352 v = obj;
3353 goto returnv_never_multi;
3354 }
3355 }
3356
3357 if (SAME_OBJ(v, SCHEME_TAIL_CALL_WAITING)) {
3358 obj = p->ku.apply.tail_rator;
3359 num_rands = p->ku.apply.tail_num_rands;
3360 rands = p->ku.apply.tail_rands;
3361 p->ku.apply.tail_rator = NULL;
3362 p->ku.apply.tail_rands = NULL;
3363 RUNSTACK = runstack_base;
3364 RUNSTACK_CHANGED();
3365 check_rands = num_rands;
3366 goto apply_top;
3367 }
3368
3369 if (SAME_OBJ(v, SCHEME_EVAL_WAITING)) {
3370 RESET_LOCAL_RUNSTACK();
3371 obj = p->ku.eval.wait_expr;
3372 p->ku.eval.wait_expr = NULL;
3373 goto eval_top;
3374 }
3375
3376 returnv:
3377
3378 if (SAME_OBJ(v, SCHEME_MULTIPLE_VALUES))
3379 if (get_value > 0) {
3380 scheme_wrong_return_arity(NULL, 1, p->ku.multiple.count,
3381 p->ku.multiple.array,
3382 NULL);
3383 return NULL;
3384 }
3385
3386 returnv_never_multi:
3387
3388 /* If resetting RUNSTACK to old_runstack makes the stack larger, we
3389 need to clear extra slots to avoid making an old value on the
3390 runstack suddenly live again */
3391 while ((uintptr_t)RUNSTACK > (uintptr_t)old_runstack) {
3392 RUNSTACK--;
3393 *RUNSTACK = NULL;
3394 }
3395 MZ_RUNSTACK = old_runstack;
3396 MZ_CONT_MARK_STACK = old_cont_mark_stack;
3397 MZ_CONT_MARK_POS -= 2;
3398
3399 DEBUG_CHECK_TYPE(v);
3400
3401 return v;
3402
3403 #ifdef p
3404 # undef p
3405 #endif
3406 }
3407
scheme_current_argument_stack()3408 Scheme_Object **scheme_current_argument_stack()
3409 {
3410 return MZ_RUNSTACK;
3411 }
3412
3413 /*========================================================================*/
3414 /* eval/compile/expand starting points */
3415 /*========================================================================*/
3416
scheme_dynamic_require(int argc,Scheme_Object * argv[])3417 Scheme_Object *scheme_dynamic_require(int argc, Scheme_Object *argv[])
3418 {
3419 Scheme_Object *proc;
3420 proc = scheme_get_startup_export("dynamic-require");
3421 return scheme_apply(proc, argc, argv);
3422 }
3423
scheme_is_syntax(Scheme_Object * v)3424 int scheme_is_syntax(Scheme_Object *v)
3425 {
3426 Scheme_Object *a[1];
3427 if (!is_syntax_proc) {
3428 REGISTER_SO(is_syntax_proc);
3429 is_syntax_proc = scheme_get_startup_export("syntax?");
3430 }
3431 a[0] = v;
3432 return SCHEME_TRUEP(scheme_apply(is_syntax_proc, 1, a));
3433 }
3434
scheme_expander_syntax_to_datum(Scheme_Object * v)3435 Scheme_Object *scheme_expander_syntax_to_datum(Scheme_Object *v)
3436 {
3437 Scheme_Object *a[1];
3438 if (scheme_starting_up)
3439 return v;
3440 else {
3441 if (!expander_syntax_to_datum_proc) {
3442 REGISTER_SO(expander_syntax_to_datum_proc);
3443 expander_syntax_to_datum_proc = scheme_get_startup_export("maybe-syntax->datum");
3444 }
3445 a[0] = v;
3446 return scheme_apply(expander_syntax_to_datum_proc, 1, a);
3447 }
3448 }
3449
scheme_namespace_require(Scheme_Object * mod_path)3450 Scheme_Object *scheme_namespace_require(Scheme_Object *mod_path)
3451 {
3452 Scheme_Object *proc, *a[1];
3453 proc = scheme_get_startup_export("namespace-require");
3454 a[0] = mod_path;
3455 return scheme_apply(proc, 1, a);
3456 }
3457
namespace_to_env(Scheme_Object * ns)3458 static Scheme_Env *namespace_to_env(Scheme_Object *ns)
3459 {
3460 Scheme_Env *env;
3461
3462 env = scheme_lookup_in_table(scheme_namespace_to_env, (char *)ns);
3463
3464 if (!env) {
3465 env = MALLOC_ONE_TAGGED(Scheme_Env);
3466 env->so.type = scheme_env_type;
3467 env->namespace = ns;
3468 scheme_add_to_table(scheme_namespace_to_env, (char *)ns, (void *)env, 0);
3469 }
3470
3471 return env;
3472 }
3473
scheme_make_empty_env(void)3474 Scheme_Env *scheme_make_empty_env(void)
3475 {
3476 Scheme_Object *proc, *ns, *inst, *a[2];
3477 Scheme_Env *env;
3478
3479 proc = scheme_get_startup_export("current-namespace");
3480 ns = scheme_apply(proc, 0, NULL);
3481
3482 env = namespace_to_env(ns);
3483
3484 proc = scheme_get_startup_export("namespace->instance");
3485 a[0] = ns;
3486 a[1] = scheme_make_integer(0);
3487 inst = scheme_apply(proc, 2, a);
3488
3489 env->instance = (Scheme_Instance *)inst;
3490
3491 return env;
3492 }
3493
scheme_get_current_namespace_as_env()3494 Scheme_Env *scheme_get_current_namespace_as_env()
3495 {
3496 Scheme_Object *proc, *ns;
3497
3498 proc = scheme_get_startup_export("current-namespace");
3499 ns = scheme_apply(proc, 0, NULL);
3500
3501 return namespace_to_env(ns);
3502 }
3503
scheme_set_current_namespace_as_env(Scheme_Env * env)3504 void scheme_set_current_namespace_as_env(Scheme_Env *env)
3505 {
3506 Scheme_Object *proc, *a[1];
3507
3508 proc = scheme_get_startup_export("current-namespace");
3509
3510 a[0] = env->namespace;
3511 (void)scheme_apply(proc, 1, a);
3512 }
3513
scheme_compile(Scheme_Object * form,Scheme_Env * env,int writeable)3514 Scheme_Object *scheme_compile(Scheme_Object *form, Scheme_Env *env, int writeable)
3515 {
3516 Scheme_Object *compile_proc, *a[3];
3517 compile_proc = scheme_get_startup_export("compile");
3518 a[0] = form;
3519 a[1] = env->namespace;
3520 a[2] = (writeable ? scheme_true : scheme_false);
3521 return scheme_apply(compile_proc, 3, a);
3522 }
3523
scheme_compile_for_eval(Scheme_Object * form,Scheme_Env * env)3524 Scheme_Object *scheme_compile_for_eval(Scheme_Object *form, Scheme_Env *env)
3525 {
3526 return scheme_compile(form, env, 0);
3527 }
3528
scheme_eval(Scheme_Object * obj,Scheme_Env * env)3529 Scheme_Object *scheme_eval(Scheme_Object *obj, Scheme_Env *env)
3530 {
3531 Scheme_Object *eval_proc, *a[2];
3532 eval_proc = scheme_get_startup_export("eval-top-level");
3533 a[0] = obj;
3534 a[1] = env->namespace;
3535 return scheme_apply(eval_proc, 2, a);
3536 }
3537
scheme_eval_multi(Scheme_Object * obj,Scheme_Env * env)3538 Scheme_Object *scheme_eval_multi(Scheme_Object *obj, Scheme_Env *env)
3539 {
3540 Scheme_Object *eval_proc, *a[2];
3541 eval_proc = scheme_get_startup_export("eval-top-level");
3542 a[0] = obj;
3543 a[1] = env->namespace;
3544 return scheme_apply_multi(eval_proc, 2, a);
3545 }
3546
finish_eval_with_prompt(void * _data,int argc,Scheme_Object ** argv)3547 static Scheme_Object *finish_eval_with_prompt(void *_data, int argc, Scheme_Object **argv)
3548 {
3549 Scheme_Object *data = (Scheme_Object *)_data;
3550 return scheme_eval(SCHEME_CAR(data), (Scheme_Env *)SCHEME_CDR(data));
3551 }
3552
scheme_eval_with_prompt(Scheme_Object * obj,Scheme_Env * env)3553 Scheme_Object *scheme_eval_with_prompt(Scheme_Object *obj, Scheme_Env *env)
3554 {
3555 return scheme_call_with_prompt(finish_eval_with_prompt,
3556 scheme_make_pair(obj, (Scheme_Object *)env));
3557 }
3558
finish_eval_multi_with_prompt(void * _data,int argc,Scheme_Object ** argv)3559 static Scheme_Object *finish_eval_multi_with_prompt(void *_data, int argc, Scheme_Object **argv)
3560 {
3561 Scheme_Object *data = (Scheme_Object *)_data;
3562 return scheme_eval_multi(SCHEME_CAR(data), (Scheme_Env *)SCHEME_CDR(data));
3563 }
3564
scheme_eval_multi_with_prompt(Scheme_Object * obj,Scheme_Env * env)3565 Scheme_Object *scheme_eval_multi_with_prompt(Scheme_Object *obj, Scheme_Env *env)
3566 {
3567 return scheme_call_with_prompt_multi(finish_eval_multi_with_prompt,
3568 scheme_make_pair(obj, (Scheme_Object *)env));
3569 }
3570
_scheme_eval_compiled(Scheme_Object * obj,Scheme_Env * env)3571 Scheme_Object *_scheme_eval_compiled(Scheme_Object *obj, Scheme_Env *env)
3572 {
3573 return _scheme_eval_linked_expr(obj);
3574 }
3575
_scheme_eval_compiled_multi(Scheme_Object * obj,Scheme_Env * env)3576 Scheme_Object *_scheme_eval_compiled_multi(Scheme_Object *obj, Scheme_Env *env)
3577 {
3578 return _scheme_eval_linked_expr_multi(obj);
3579 }
3580
scheme_tail_eval_expr(Scheme_Object * obj)3581 Scheme_Object *scheme_tail_eval_expr(Scheme_Object *obj)
3582 {
3583 return scheme_tail_eval(obj);
3584 }
3585
scheme_primitive_module(Scheme_Object * name,Scheme_Env * for_env)3586 Scheme_Env *scheme_primitive_module(Scheme_Object *name, Scheme_Env *for_env)
3587 {
3588 Scheme_Env *env;
3589 Scheme_Instance *inst;
3590 Scheme_Hash_Tree *protected;
3591
3592 /* An environment wrapper just for filling in the instance: */
3593 env = MALLOC_ONE_TAGGED(Scheme_Env);
3594 env->so.type = scheme_env_type;
3595 env->namespace = for_env->namespace; /* records target namespace, not instance's namespace! */
3596
3597 inst = scheme_make_instance(name, NULL);
3598 env->instance = (Scheme_Instance *)inst;
3599
3600 protected = scheme_make_hash_tree(0);
3601 env->protected = protected;
3602
3603 return env;
3604 }
3605
scheme_finish_primitive_module(Scheme_Env * env)3606 void scheme_finish_primitive_module(Scheme_Env *env)
3607 {
3608 Scheme_Object *proc, *a[5];
3609
3610 proc = scheme_get_startup_export("declare-primitive-module!");
3611 a[0] = env->instance->name;
3612 a[1] = (Scheme_Object *)env->instance;
3613 a[2] = env->namespace; /* target namespace */
3614 a[3] = (Scheme_Object *)env->protected;
3615 a[4] = (env->cross_phase ? scheme_true : scheme_false);
3616 scheme_apply(proc, 5, a);
3617 }
3618
scheme_set_primitive_module_phaseless(Scheme_Env * env,int phaseless)3619 void scheme_set_primitive_module_phaseless(Scheme_Env *env, int phaseless)
3620 {
3621 env->cross_phase = phaseless;
3622 }
3623
scheme_protect_primitive_provide(Scheme_Env * env,Scheme_Object * name)3624 void scheme_protect_primitive_provide(Scheme_Env *env, Scheme_Object *name)
3625 {
3626 Scheme_Hash_Tree *protected;
3627 protected = scheme_hash_tree_set(env->protected, name, scheme_true);
3628 env->protected = protected;
3629 }
3630
3631 /* local functions */
3632
read_syntax(Scheme_Object * port,Scheme_Object * src)3633 static Scheme_Object *read_syntax(Scheme_Object *port, Scheme_Object *src)
3634 {
3635 Scheme_Object *proc, *a[2];
3636 proc = scheme_get_startup_export("read-syntax");
3637 a[0] = src;
3638 a[1] = port;
3639 return scheme_apply(proc, 2, a);
3640 }
3641
namespace_introduce(Scheme_Object * stx)3642 static Scheme_Object *namespace_introduce(Scheme_Object *stx)
3643 {
3644 Scheme_Object *proc, *a[1];
3645 proc = scheme_get_startup_export("namespace-introduce");
3646 a[0] = stx;
3647 return scheme_apply(proc, 1, a);
3648 }
3649
do_eval_string_all(Scheme_Object * port,const char * str,Scheme_Env * env,int cont,int w_prompt)3650 static Scheme_Object *do_eval_string_all(Scheme_Object *port, const char *str, Scheme_Env *env,
3651 int cont, int w_prompt)
3652 /* cont == -2 => module (no result)
3653 cont == -1 => single result
3654 cont == 1 -> multiple result ok
3655 cont == 2 -> #%top-interaction, multiple result ok, use current_print to show results */
3656 {
3657 Scheme_Object *expr, *result = scheme_void;
3658
3659 if (!port)
3660 port = scheme_make_byte_string_input_port(str);
3661
3662 do {
3663 expr = read_syntax(port, scheme_false);
3664
3665 if ((cont == -2) && !SAME_OBJ(expr, scheme_eof)) {
3666 expr = namespace_introduce(expr);
3667 }
3668
3669 if (SAME_OBJ(expr, scheme_eof))
3670 cont = 0;
3671 else if (cont < 0) {
3672 if (w_prompt)
3673 result = scheme_eval_with_prompt(expr, env);
3674 else
3675 result = scheme_eval(expr, env);
3676 } else {
3677 if (cont == 2)
3678 expr = scheme_make_pair(scheme_intern_symbol("#%top-interaction"), expr);
3679
3680 if (w_prompt)
3681 result = scheme_eval_multi_with_prompt(expr, env);
3682 else
3683 result = scheme_eval_multi(expr, env);
3684
3685 if (cont == 2) {
3686 Scheme_Object **a, *_a[1], *arg[1], *printer;
3687 int cnt, i;
3688
3689 if (result == SCHEME_MULTIPLE_VALUES) {
3690 Scheme_Thread *p = scheme_current_thread;
3691 if (SAME_OBJ(p->ku.multiple.array, p->values_buffer))
3692 p->values_buffer = NULL;
3693 a = p->ku.multiple.array;
3694 p->ku.multiple.array = NULL;
3695 cnt = p->ku.multiple.count;
3696 } else {
3697 _a[0] = result;
3698 a = _a;
3699 cnt = 1;
3700 }
3701
3702 for (i = 0; i < cnt; i++) {
3703 printer = scheme_get_param(scheme_current_config(), MZCONFIG_PRINT_HANDLER);
3704 arg[0] = a[i];
3705 scheme_apply(printer, 1, arg);
3706 scheme_flush_output(scheme_get_param(scheme_current_config(), MZCONFIG_OUTPUT_PORT));
3707 }
3708 }
3709 }
3710 } while (cont > 0);
3711
3712 return result;
3713 }
3714
scheme_eval_string_all(const char * str,Scheme_Env * env,int cont)3715 Scheme_Object *scheme_eval_string_all(const char *str, Scheme_Env *env, int cont)
3716 {
3717 return do_eval_string_all(NULL, str, env, cont, 0);
3718 }
3719
scheme_eval_string(const char * str,Scheme_Env * env)3720 Scheme_Object *scheme_eval_string(const char *str, Scheme_Env *env)
3721 {
3722 return do_eval_string_all(NULL, str, env, -1, 0);
3723 }
3724
scheme_eval_module_string(const char * str,Scheme_Env * env)3725 Scheme_Object *scheme_eval_module_string(const char *str, Scheme_Env *env)
3726 {
3727 return do_eval_string_all(NULL, str, env, -2, 0);
3728 }
3729
scheme_eval_string_multi(const char * str,Scheme_Env * env)3730 Scheme_Object *scheme_eval_string_multi(const char *str, Scheme_Env *env)
3731 {
3732 return do_eval_string_all(NULL, str, env, 0, 0);
3733 }
3734
scheme_eval_string_all_with_prompt(const char * str,Scheme_Env * env,int cont)3735 Scheme_Object *scheme_eval_string_all_with_prompt(const char *str, Scheme_Env *env, int cont)
3736 {
3737 return do_eval_string_all(NULL, str, env, cont, 1);
3738 }
3739
scheme_eval_all_with_prompt(Scheme_Object * port,Scheme_Env * env,int cont)3740 Scheme_Object *scheme_eval_all_with_prompt(Scheme_Object *port, Scheme_Env *env, int cont)
3741 {
3742 if (!port) port = scheme_orig_stdin_port;
3743 return do_eval_string_all(port, NULL, env, cont, 1);
3744 }
3745
scheme_eval_string_with_prompt(const char * str,Scheme_Env * env)3746 Scheme_Object *scheme_eval_string_with_prompt(const char *str, Scheme_Env *env)
3747 {
3748 return do_eval_string_all(NULL, str, env, -1, 1);
3749 }
3750
scheme_eval_string_multi_with_prompt(const char * str,Scheme_Env * env)3751 Scheme_Object *scheme_eval_string_multi_with_prompt(const char *str, Scheme_Env *env)
3752 {
3753 return do_eval_string_all(NULL, str, env, 0, 1);
3754 }
3755
scheme_embedded_load(intptr_t len,const char * desc,int predefined)3756 void scheme_embedded_load(intptr_t len, const char *desc, int predefined)
3757 {
3758 Scheme_Object *s, *e, *f, *a[5], *eload;
3759 int argc = 4;
3760 eload = scheme_get_startup_export("embedded-load");
3761 if (len < 0) {
3762 /* description mode: string embeds start, end, and filename, where
3763 a 0-length filename means to find the executable via
3764 `(system-path 'exec-file)`. */
3765 int slen, elen, foff;
3766 slen = strlen(desc);
3767 elen = strlen(desc XFORM_OK_PLUS (slen + 1));
3768 foff = slen + 1 + elen + 1;
3769 s = scheme_make_utf8_string(desc);
3770 e = scheme_make_utf8_string(desc XFORM_OK_PLUS (slen + 1));
3771 if (desc[foff] != 0) {
3772 f = scheme_make_byte_string(desc XFORM_OK_PLUS foff);
3773 argc = 5;
3774 } else
3775 f = scheme_false;
3776 a[0] = s;
3777 a[1] = e;
3778 a[2] = scheme_false;
3779 a[4] = f;
3780 } else {
3781 /* content mode */
3782 a[0] = scheme_false;
3783 a[1] = scheme_false;
3784 s = scheme_make_sized_byte_string((char *)desc, len, 0);
3785 a[2] = s;
3786 }
3787 a[3] = (predefined ? scheme_true : scheme_false);
3788 (void)scheme_apply(eload, argc, a);
3789 }
3790
scheme_is_predefined_module_path(Scheme_Object * m)3791 int scheme_is_predefined_module_path(Scheme_Object *m)
3792 {
3793 Scheme_Object *is_predef, *a[1], *r;
3794 is_predef = scheme_get_startup_export("module-predefined?");
3795 a[0] = m;
3796 r = scheme_apply(is_predef, 1, a);
3797 return SCHEME_TRUEP(r);
3798 }
3799
scheme_init_collection_paths_post(Scheme_Env * env,Scheme_Object * extra_dirs,Scheme_Object * post_dirs)3800 void scheme_init_collection_paths_post(Scheme_Env *env, Scheme_Object *extra_dirs, Scheme_Object *post_dirs)
3801 {
3802 mz_jmp_buf * volatile save, newbuf;
3803 Scheme_Thread * volatile p;
3804 p = scheme_get_current_thread();
3805 save = p->error_buf;
3806 p->error_buf = &newbuf;
3807 if (!scheme_setjmp(newbuf)) {
3808 Scheme_Object *clcp, *flcp, *a[2];
3809
3810 clcp = scheme_builtin_value("current-library-collection-links");
3811 flcp = scheme_builtin_value("find-library-collection-links");
3812
3813 if (clcp && flcp) {
3814 a[0] = _scheme_apply(flcp, 0, NULL);
3815 _scheme_apply(clcp, 1, a);
3816 }
3817
3818 clcp = scheme_builtin_value("current-library-collection-paths");
3819 flcp = scheme_builtin_value("find-library-collection-paths");
3820
3821 if (clcp && flcp) {
3822 a[0] = extra_dirs;
3823 a[1] = post_dirs;
3824 a[0] = _scheme_apply(flcp, 2, a);
3825 _scheme_apply(clcp, 1, a);
3826 }
3827 } else {
3828 scheme_clear_escape();
3829 }
3830 p->error_buf = save;
3831 }
3832
scheme_init_collection_paths(Scheme_Env * env,Scheme_Object * extra_dirs)3833 void scheme_init_collection_paths(Scheme_Env *env, Scheme_Object *extra_dirs)
3834 {
3835 scheme_init_collection_paths_post(env, extra_dirs, scheme_null);
3836 }
3837
scheme_init_compiled_roots(Scheme_Env * global_env,const char * paths)3838 void scheme_init_compiled_roots(Scheme_Env *global_env, const char *paths)
3839 {
3840 mz_jmp_buf * volatile save, newbuf;
3841 Scheme_Thread * volatile p;
3842 p = scheme_get_current_thread();
3843 save = p->error_buf;
3844 p->error_buf = &newbuf;
3845 if (!scheme_setjmp(newbuf)) {
3846 Scheme_Object *rr, *ccfr, *fcfr, *pls2pl, *a[3];
3847
3848 fcfr = scheme_builtin_value("find-compiled-file-roots");
3849 ccfr = scheme_builtin_value("current-compiled-file-roots");
3850 if (paths) {
3851 rr = scheme_builtin_value("regexp-replace*");
3852 pls2pl = scheme_builtin_value("path-list-string->path-list");
3853 } else
3854 rr = pls2pl = scheme_false;
3855
3856 if (rr && fcfr && ccfr && pls2pl) {
3857 if (paths) {
3858 a[0] = scheme_make_utf8_string("@[(]version[)]");
3859 a[1] = scheme_make_utf8_string(paths);
3860 a[2] = scheme_make_utf8_string(scheme_version());
3861 a[2] = _scheme_apply(rr, 3, a);
3862 }
3863
3864 a[1] = _scheme_apply(fcfr, 0, NULL);
3865
3866 if (paths) {
3867 a[0] = a[2];
3868 a[0] = _scheme_apply(pls2pl, 2, a);
3869 } else {
3870 a[0] = a[1];
3871 }
3872
3873 _scheme_apply(ccfr, 1, a);
3874 }
3875 } else {
3876 scheme_clear_escape();
3877 }
3878 p->error_buf = save;
3879 }
3880
allow_set_undefined(int argc,Scheme_Object ** argv)3881 static Scheme_Object *allow_set_undefined(int argc, Scheme_Object **argv)
3882 {
3883 return scheme_param_config("compile-allow-set!-undefined",
3884 scheme_make_integer(MZCONFIG_ALLOW_SET_UNDEFINED),
3885 argc, argv,
3886 -1, NULL, NULL, 1);
3887 }
3888
compile_module_constants(int argc,Scheme_Object ** argv)3889 static Scheme_Object *compile_module_constants(int argc, Scheme_Object **argv)
3890 {
3891 return scheme_param_config("compile-enforce-module-constants",
3892 scheme_make_integer(MZCONFIG_COMPILE_MODULE_CONSTS),
3893 argc, argv,
3894 -1, NULL, NULL, 1);
3895 }
3896
use_jit(int argc,Scheme_Object ** argv)3897 static Scheme_Object *use_jit(int argc, Scheme_Object **argv)
3898 {
3899 return scheme_param_config("eval-jit-enabled",
3900 scheme_make_integer(MZCONFIG_USE_JIT),
3901 argc, argv,
3902 -1, NULL, NULL, 1);
3903 }
3904
disallow_inline(int argc,Scheme_Object ** argv)3905 static Scheme_Object *disallow_inline(int argc, Scheme_Object **argv)
3906 {
3907 return scheme_param_config("compile-context-preservation-enabled",
3908 scheme_make_integer(MZCONFIG_DISALLOW_INLINE),
3909 argc, argv,
3910 -1, NULL, NULL, 1);
3911 }
3912
compile_target_machine(int argc,Scheme_Object ** argv)3913 static Scheme_Object *compile_target_machine(int argc, Scheme_Object **argv)
3914 {
3915 return scheme_param_config2("current-compile-target-machine",
3916 scheme_make_integer(MZCONFIG_COMPILE_TARGET_MACHINE),
3917 argc, argv,
3918 -1, scheme_compile_target_check,
3919 "(or/c #f (and/c symbol? compile-target-machine?))", 0);
3920 }
3921
compile_is_target_machine(int argc,Scheme_Object ** argv)3922 static Scheme_Object *compile_is_target_machine(int argc, Scheme_Object **argv)
3923 {
3924 if (!SCHEME_SYMBOLP(argv[0]))
3925 scheme_wrong_contract("compile-target-machine?", "symbol?", 0, argc, argv);
3926 return scheme_compile_target_check(argc, argv);
3927 }
3928
3929 static Scheme_Object *
enable_break(int argc,Scheme_Object * argv[])3930 enable_break(int argc, Scheme_Object *argv[])
3931 {
3932 if (argc == 1) {
3933 scheme_set_can_break(SCHEME_TRUEP(argv[0]));
3934 if (SCHEME_TRUEP(argv[0])) {
3935 if (scheme_current_thread->external_break && scheme_can_break(scheme_current_thread)) {
3936 scheme_thread_block(0.0);
3937 scheme_current_thread->ran_some = 1;
3938 }
3939 }
3940 return scheme_void;
3941 } else {
3942 return scheme_can_break(scheme_current_thread) ? scheme_true : scheme_false;
3943 }
3944 }
3945
scheme_make_modidx(Scheme_Object * path,Scheme_Object * base,Scheme_Object * resolved)3946 Scheme_Object *scheme_make_modidx(Scheme_Object *path,
3947 Scheme_Object *base,
3948 Scheme_Object *resolved)
3949 {
3950 Scheme_Object *proc, *a[2];
3951 proc = scheme_get_startup_export("module-path-index-join");
3952 a[0] = path;
3953 a[1] = base;
3954 return scheme_apply(proc, 2, a);
3955
3956 }
3957
scheme_is_module_path_index(Scheme_Object * v)3958 int scheme_is_module_path_index(Scheme_Object *v)
3959 {
3960 Scheme_Object *proc, *a[1];
3961 proc = scheme_get_startup_export("module-path-index?");
3962 a[0] = v;
3963 return SCHEME_TRUEP(scheme_apply(proc, 1, a));
3964 }
3965
scheme_is_resolved_module_path(Scheme_Object * v)3966 int scheme_is_resolved_module_path(Scheme_Object *v)
3967 {
3968 Scheme_Object *proc, *a[1];
3969 proc = scheme_get_startup_export("resolved-module-path?");
3970 a[0] = v;
3971 return SCHEME_TRUEP(scheme_apply(proc, 1, a));
3972 }
3973
scheme_is_module_path(Scheme_Object * v)3974 int scheme_is_module_path(Scheme_Object *v)
3975 {
3976 Scheme_Object *proc, *a[1];
3977 proc = scheme_get_startup_export("module-path?");
3978 a[0] = v;
3979 return SCHEME_TRUEP(scheme_apply(proc, 1, a));
3980 }
3981
scheme_module_is_declared(Scheme_Object * name,int try_load)3982 int scheme_module_is_declared(Scheme_Object *name, int try_load)
3983 {
3984 Scheme_Object *proc, *a[2];
3985 proc = scheme_get_startup_export("module-declared?");
3986 a[0] = name;
3987 a[1] = (try_load ? scheme_true : scheme_false);
3988 return SCHEME_TRUEP(scheme_apply(proc, 2, a));
3989 }
3990
scheme_datum_to_kernel_stx(Scheme_Object * v)3991 Scheme_Object *scheme_datum_to_kernel_stx(Scheme_Object *v)
3992 {
3993 Scheme_Object *proc, *a[1];
3994 proc = scheme_get_startup_export("datum->kernel-syntax");
3995 a[0] = v;
3996 return scheme_apply(proc, 1, a);
3997 }
3998
3999 /*========================================================================*/
4000 /* precise GC traversers */
4001 /*========================================================================*/
4002
4003 #ifdef MZ_PRECISE_GC
4004
4005 START_XFORM_SKIP;
4006
4007 #include "mzmark_eval.inc"
4008
register_traversers(void)4009 static void register_traversers(void)
4010 {
4011 GC_REG_TRAV(scheme_rt_saved_stack, mark_saved_stack);
4012 }
4013
4014 END_XFORM_SKIP;
4015
4016 #endif
4017