1 /* Copyright (C) 2001, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2018 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19 #if HAVE_CONFIG_H
20 # include <config.h>
21 #endif
22
23 #include <stdlib.h>
24 #include <alloca.h>
25 #include <alignof.h>
26 #include <string.h>
27 #include <stdint.h>
28 #include <unistd.h>
29
30 #ifdef HAVE_SYS_MMAN_H
31 #include <sys/mman.h>
32 #endif
33
34 #include "libguile/bdw-gc.h"
35 #include <gc/gc_mark.h>
36
37 #include "libguile/_scm.h"
38 #include "libguile/atomic.h"
39 #include "libguile/atomics-internal.h"
40 #include "libguile/cache-internal.h"
41 #include "libguile/control.h"
42 #include "libguile/frames.h"
43 #include "libguile/gc-inline.h"
44 #include "libguile/instructions.h"
45 #include "libguile/loader.h"
46 #include "libguile/programs.h"
47 #include "libguile/simpos.h"
48 #include "libguile/vm.h"
49 #include "libguile/vm-builtins.h"
50
51 static int vm_default_engine = SCM_VM_REGULAR_ENGINE;
52
53 /* Unfortunately we can't snarf these: snarfed things are only loaded up from
54 (system vm vm), which might not be loaded before an error happens. */
55 static SCM sym_vm_run;
56 static SCM sym_vm_error;
57 static SCM sym_keyword_argument_error;
58 static SCM sym_regular;
59 static SCM sym_debug;
60
61 /* The page size. */
62 static size_t page_size;
63
64 /* The VM has a number of internal assertions that shouldn't normally be
65 necessary, but might be if you think you found a bug in the VM. */
66 /* #define VM_ENABLE_ASSERTIONS */
67
68 static void vm_expand_stack (struct scm_vm *vp,
69 union scm_vm_stack_element *new_sp) SCM_NOINLINE;
70
71 /* RESTORE is for the case where we know we have done a PUSH of equal or
72 greater stack size in the past. Otherwise PUSH is the thing, which
73 may expand the stack. */
74 enum vm_increase_sp_kind { VM_SP_PUSH, VM_SP_RESTORE };
75
76 static inline void
vm_increase_sp(struct scm_vm * vp,union scm_vm_stack_element * new_sp,enum vm_increase_sp_kind kind)77 vm_increase_sp (struct scm_vm *vp, union scm_vm_stack_element *new_sp,
78 enum vm_increase_sp_kind kind)
79 {
80 if (new_sp >= vp->sp_min_since_gc)
81 {
82 vp->sp = new_sp;
83 return;
84 }
85
86 if (kind == VM_SP_PUSH && new_sp < vp->stack_limit)
87 vm_expand_stack (vp, new_sp);
88 else
89 vp->sp_min_since_gc = vp->sp = new_sp;
90 }
91
92 static inline void
vm_push_sp(struct scm_vm * vp,union scm_vm_stack_element * new_sp)93 vm_push_sp (struct scm_vm *vp, union scm_vm_stack_element *new_sp)
94 {
95 vm_increase_sp (vp, new_sp, VM_SP_PUSH);
96 }
97
98 static inline void
vm_restore_sp(struct scm_vm * vp,union scm_vm_stack_element * new_sp)99 vm_restore_sp (struct scm_vm *vp, union scm_vm_stack_element *new_sp)
100 {
101 vm_increase_sp (vp, new_sp, VM_SP_RESTORE);
102 }
103
104
105 /*
106 * VM Continuation
107 */
108
109 void
scm_i_vm_cont_print(SCM x,SCM port,scm_print_state * pstate)110 scm_i_vm_cont_print (SCM x, SCM port, scm_print_state *pstate)
111 {
112 scm_puts ("#<vm-continuation ", port);
113 scm_uintprint (SCM_UNPACK (x), 16, port);
114 scm_puts (">", port);
115 }
116
117 int
scm_i_vm_cont_to_frame(SCM cont,struct scm_frame * frame)118 scm_i_vm_cont_to_frame (SCM cont, struct scm_frame *frame)
119 {
120 struct scm_vm_cont *data = SCM_VM_CONT_DATA (cont);
121
122 frame->stack_holder = data;
123 frame->fp_offset = data->fp_offset;
124 frame->sp_offset = data->stack_size;
125 frame->ip = data->ra;
126
127 return 1;
128 }
129
130 /* Ideally we could avoid copying the C stack if the continuation root
131 is inside VM code, and call/cc was invoked within that same call to
132 vm_run. That's currently not implemented. */
133 SCM
scm_i_vm_capture_stack(union scm_vm_stack_element * stack_top,union scm_vm_stack_element * fp,union scm_vm_stack_element * sp,scm_t_uint32 * ra,scm_t_dynstack * dynstack,scm_t_uint32 flags)134 scm_i_vm_capture_stack (union scm_vm_stack_element *stack_top,
135 union scm_vm_stack_element *fp,
136 union scm_vm_stack_element *sp, scm_t_uint32 *ra,
137 scm_t_dynstack *dynstack, scm_t_uint32 flags)
138 {
139 struct scm_vm_cont *p;
140
141 p = scm_gc_malloc (sizeof (*p), "capture_vm_cont");
142 p->stack_size = stack_top - sp;
143 p->stack_bottom = scm_gc_malloc (p->stack_size * sizeof (*p->stack_bottom),
144 "capture_vm_cont");
145 p->ra = ra;
146 p->fp_offset = stack_top - fp;
147 memcpy (p->stack_bottom, sp, p->stack_size * sizeof (*p->stack_bottom));
148 p->dynstack = dynstack;
149 p->flags = flags;
150 return scm_cell (scm_tc7_vm_cont, (scm_t_bits) p);
151 }
152
153 struct return_to_continuation_data
154 {
155 struct scm_vm_cont *cp;
156 struct scm_vm *vp;
157 };
158
159 /* Called with the GC lock to prevent the stack marker from traversing a
160 stack in an inconsistent state. */
161 static void *
vm_return_to_continuation_inner(void * data_ptr)162 vm_return_to_continuation_inner (void *data_ptr)
163 {
164 struct return_to_continuation_data *data = data_ptr;
165 struct scm_vm *vp = data->vp;
166 struct scm_vm_cont *cp = data->cp;
167
168 /* We know that there is enough space for the continuation, because we
169 captured it in the past. However there may have been an expansion
170 since the capture, so we may have to re-link the frame
171 pointers. */
172 memcpy (vp->stack_top - cp->stack_size,
173 cp->stack_bottom,
174 cp->stack_size * sizeof (*cp->stack_bottom));
175 vp->fp = vp->stack_top - cp->fp_offset;
176 vm_restore_sp (vp, vp->stack_top - cp->stack_size);
177
178 return NULL;
179 }
180
181 static void
vm_return_to_continuation(struct scm_vm * vp,SCM cont,size_t n,union scm_vm_stack_element * argv)182 vm_return_to_continuation (struct scm_vm *vp, SCM cont, size_t n,
183 union scm_vm_stack_element *argv)
184 {
185 struct scm_vm_cont *cp;
186 union scm_vm_stack_element *argv_copy;
187 struct return_to_continuation_data data;
188
189 argv_copy = alloca (n * sizeof (*argv));
190 memcpy (argv_copy, argv, n * sizeof (*argv));
191
192 cp = SCM_VM_CONT_DATA (cont);
193
194 data.cp = cp;
195 data.vp = vp;
196 GC_call_with_alloc_lock (vm_return_to_continuation_inner, &data);
197
198 /* Now we have the continuation properly copied over. We just need to
199 copy on an empty frame and the return values, as the continuation
200 expects. */
201 vm_push_sp (vp, vp->sp - 3 - n);
202 vp->sp[n+2].as_scm = SCM_BOOL_F;
203 vp->sp[n+1].as_scm = SCM_BOOL_F;
204 vp->sp[n].as_scm = SCM_BOOL_F;
205 memcpy(vp->sp, argv_copy, n * sizeof (union scm_vm_stack_element));
206
207 vp->ip = cp->ra;
208 }
209
210 static struct scm_vm * thread_vm (scm_i_thread *t);
211 SCM
scm_i_capture_current_stack(void)212 scm_i_capture_current_stack (void)
213 {
214 scm_i_thread *thread;
215 struct scm_vm *vp;
216
217 thread = SCM_I_CURRENT_THREAD;
218 vp = thread_vm (thread);
219
220 return scm_i_vm_capture_stack (vp->stack_top, vp->fp, vp->sp, vp->ip,
221 scm_dynstack_capture_all (&thread->dynstack),
222 0);
223 }
224
225 static void vm_dispatch_apply_hook (struct scm_vm *vp) SCM_NOINLINE;
226 static void vm_dispatch_push_continuation_hook (struct scm_vm *vp) SCM_NOINLINE;
227 static void vm_dispatch_pop_continuation_hook
228 (struct scm_vm *vp, union scm_vm_stack_element *old_fp) SCM_NOINLINE;
229 static void vm_dispatch_next_hook (struct scm_vm *vp) SCM_NOINLINE;
230 static void vm_dispatch_abort_hook (struct scm_vm *vp) SCM_NOINLINE;
231
232 static void
vm_dispatch_hook(struct scm_vm * vp,int hook_num,union scm_vm_stack_element * argv,int n)233 vm_dispatch_hook (struct scm_vm *vp, int hook_num,
234 union scm_vm_stack_element *argv, int n)
235 {
236 SCM hook;
237 struct scm_frame c_frame;
238 scm_t_cell *frame;
239 int saved_trace_level;
240
241 hook = vp->hooks[hook_num];
242
243 if (SCM_LIKELY (scm_is_false (hook))
244 || scm_is_null (SCM_HOOK_PROCEDURES (hook)))
245 return;
246
247 saved_trace_level = vp->trace_level;
248 vp->trace_level = 0;
249
250 /* Allocate a frame object on the stack. This is more efficient than calling
251 `scm_c_make_frame ()' to allocate on the heap, but it forces hooks to not
252 capture frame objects.
253
254 At the same time, procedures such as `frame-procedure' make sense only
255 while the stack frame represented by the frame object is visible, so it
256 seems reasonable to limit the lifetime of frame objects. */
257
258 c_frame.stack_holder = vp;
259 c_frame.fp_offset = vp->stack_top - vp->fp;
260 c_frame.sp_offset = vp->stack_top - vp->sp;
261 c_frame.ip = vp->ip;
262
263 /* Arrange for FRAME to be 8-byte aligned, like any other cell. */
264 frame = alloca (sizeof (*frame) + 8);
265 frame = (scm_t_cell *) ROUND_UP ((scm_t_uintptr) frame, 8UL);
266
267 frame->word_0 = SCM_PACK (scm_tc7_frame | (SCM_VM_FRAME_KIND_VM << 8));
268 frame->word_1 = SCM_PACK_POINTER (&c_frame);
269
270 if (n == 0)
271 {
272 SCM args[1];
273
274 args[0] = SCM_PACK_POINTER (frame);
275 scm_c_run_hookn (hook, args, 1);
276 }
277 else if (n == 1)
278 {
279 SCM args[2];
280
281 args[0] = SCM_PACK_POINTER (frame);
282 args[1] = argv[0].as_scm;
283 scm_c_run_hookn (hook, args, 2);
284 }
285 else
286 {
287 SCM args = SCM_EOL;
288 int i;
289
290 for (i = 0; i < n; i++)
291 args = scm_cons (argv[i].as_scm, args);
292 scm_c_run_hook (hook, scm_cons (SCM_PACK_POINTER (frame), args));
293 }
294
295 vp->trace_level = saved_trace_level;
296 }
297
298 static void
vm_dispatch_apply_hook(struct scm_vm * vp)299 vm_dispatch_apply_hook (struct scm_vm *vp)
300 {
301 return vm_dispatch_hook (vp, SCM_VM_APPLY_HOOK, NULL, 0);
302 }
vm_dispatch_push_continuation_hook(struct scm_vm * vp)303 static void vm_dispatch_push_continuation_hook (struct scm_vm *vp)
304 {
305 return vm_dispatch_hook (vp, SCM_VM_PUSH_CONTINUATION_HOOK, NULL, 0);
306 }
vm_dispatch_pop_continuation_hook(struct scm_vm * vp,union scm_vm_stack_element * old_fp)307 static void vm_dispatch_pop_continuation_hook (struct scm_vm *vp,
308 union scm_vm_stack_element *old_fp)
309 {
310 return vm_dispatch_hook (vp, SCM_VM_POP_CONTINUATION_HOOK,
311 vp->sp, SCM_FRAME_NUM_LOCALS (old_fp, vp->sp) - 1);
312 }
vm_dispatch_next_hook(struct scm_vm * vp)313 static void vm_dispatch_next_hook (struct scm_vm *vp)
314 {
315 return vm_dispatch_hook (vp, SCM_VM_NEXT_HOOK, NULL, 0);
316 }
vm_dispatch_abort_hook(struct scm_vm * vp)317 static void vm_dispatch_abort_hook (struct scm_vm *vp)
318 {
319 return vm_dispatch_hook (vp, SCM_VM_ABORT_CONTINUATION_HOOK,
320 vp->sp, SCM_FRAME_NUM_LOCALS (vp->fp, vp->sp) - 1);
321 }
322
323 static void
324 vm_abort (struct scm_vm *vp, SCM tag, size_t nargs,
325 scm_i_jmp_buf *current_registers) SCM_NORETURN;
326
327 static void
vm_abort(struct scm_vm * vp,SCM tag,size_t nargs,scm_i_jmp_buf * current_registers)328 vm_abort (struct scm_vm *vp, SCM tag, size_t nargs,
329 scm_i_jmp_buf *current_registers)
330 {
331 size_t i;
332 SCM *argv;
333
334 argv = alloca (nargs * sizeof (SCM));
335 for (i = 0; i < nargs; i++)
336 argv[i] = vp->sp[nargs - i - 1].as_scm;
337
338 vp->sp = vp->fp;
339
340 scm_c_abort (vp, tag, nargs, argv, current_registers);
341 }
342
343 struct vm_reinstate_partial_continuation_data
344 {
345 struct scm_vm *vp;
346 struct scm_vm_cont *cp;
347 };
348
349 static void *
vm_reinstate_partial_continuation_inner(void * data_ptr)350 vm_reinstate_partial_continuation_inner (void *data_ptr)
351 {
352 struct vm_reinstate_partial_continuation_data *data = data_ptr;
353 struct scm_vm *vp = data->vp;
354 struct scm_vm_cont *cp = data->cp;
355
356 memcpy (vp->fp - cp->stack_size,
357 cp->stack_bottom,
358 cp->stack_size * sizeof (*cp->stack_bottom));
359
360 vp->fp -= cp->fp_offset;
361 vp->ip = cp->ra;
362
363 return NULL;
364 }
365
366 static void
vm_reinstate_partial_continuation(struct scm_vm * vp,SCM cont,size_t nargs,scm_t_dynstack * dynstack,scm_i_jmp_buf * registers)367 vm_reinstate_partial_continuation (struct scm_vm *vp, SCM cont, size_t nargs,
368 scm_t_dynstack *dynstack,
369 scm_i_jmp_buf *registers)
370 {
371 struct vm_reinstate_partial_continuation_data data;
372 struct scm_vm_cont *cp;
373 union scm_vm_stack_element *args;
374 scm_t_ptrdiff old_fp_offset;
375
376 args = alloca (nargs * sizeof (*args));
377 memcpy (args, vp->sp, nargs * sizeof (*args));
378
379 cp = SCM_VM_CONT_DATA (cont);
380
381 old_fp_offset = vp->stack_top - vp->fp;
382
383 vm_push_sp (vp, vp->fp - (cp->stack_size + nargs + 1));
384
385 data.vp = vp;
386 data.cp = cp;
387 GC_call_with_alloc_lock (vm_reinstate_partial_continuation_inner, &data);
388
389 /* The resume continuation will expect ARGS on the stack as if from a
390 multiple-value return. Fill in the closure slot with #f, and copy
391 the arguments into place. */
392 vp->sp[nargs].as_scm = SCM_BOOL_F;
393 memcpy (vp->sp, args, nargs * sizeof (*args));
394
395 /* The prompt captured a slice of the dynamic stack. Here we wind
396 those entries onto the current thread's stack. We also have to
397 relocate any prompts that we see along the way. */
398 {
399 scm_t_bits *walk;
400
401 for (walk = SCM_DYNSTACK_FIRST (cp->dynstack);
402 SCM_DYNSTACK_TAG (walk);
403 walk = SCM_DYNSTACK_NEXT (walk))
404 {
405 scm_t_bits tag = SCM_DYNSTACK_TAG (walk);
406
407 if (SCM_DYNSTACK_TAG_TYPE (tag) == SCM_DYNSTACK_TYPE_PROMPT)
408 scm_dynstack_wind_prompt (dynstack, walk, old_fp_offset, registers);
409 else
410 scm_dynstack_wind_1 (dynstack, walk);
411 }
412 }
413 }
414
415
416 /*
417 * VM Error Handling
418 */
419
420 static void vm_error (const char *msg, SCM arg) SCM_NORETURN;
421 static void vm_error_bad_instruction (scm_t_uint32 inst) SCM_NORETURN SCM_NOINLINE;
422 static void vm_error_unbound (SCM sym) SCM_NORETURN SCM_NOINLINE;
423 static void vm_error_not_a_variable (const char *func_name, SCM x) SCM_NORETURN SCM_NOINLINE;
424 static void vm_error_apply_to_non_list (SCM x) SCM_NORETURN SCM_NOINLINE;
425 static void vm_error_kwargs_missing_value (SCM proc, SCM kw) SCM_NORETURN SCM_NOINLINE;
426 static void vm_error_kwargs_invalid_keyword (SCM proc, SCM obj) SCM_NORETURN SCM_NOINLINE;
427 static void vm_error_kwargs_unrecognized_keyword (SCM proc, SCM kw) SCM_NORETURN SCM_NOINLINE;
428 static void vm_error_wrong_num_args (SCM proc) SCM_NORETURN SCM_NOINLINE;
429 static void vm_error_wrong_type_apply (SCM proc) SCM_NORETURN SCM_NOINLINE;
430 static void vm_error_not_a_char (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE;
431 static void vm_error_not_a_pair (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE;
432 static void vm_error_not_a_mutable_pair (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE;
433 static void vm_error_not_a_string (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE;
434 static void vm_error_not_a_atomic_box (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE;
435 static void vm_error_not_a_bytevector (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE;
436 static void vm_error_not_a_mutable_bytevector (const char *subr, SCM v) SCM_NORETURN SCM_NOINLINE;
437 static void vm_error_not_a_struct (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE;
438 static void vm_error_not_a_vector (const char *subr, SCM v) SCM_NORETURN SCM_NOINLINE;
439 static void vm_error_not_a_mutable_vector (const char *subr, SCM v) SCM_NORETURN SCM_NOINLINE;
440 static void vm_error_out_of_range_uint64 (const char *subr, scm_t_uint64 idx) SCM_NORETURN SCM_NOINLINE;
441 static void vm_error_out_of_range_int64 (const char *subr, scm_t_int64 idx) SCM_NORETURN SCM_NOINLINE;
442 static void vm_error_no_values (void) SCM_NORETURN SCM_NOINLINE;
443 static void vm_error_not_enough_values (void) SCM_NORETURN SCM_NOINLINE;
444 static void vm_error_wrong_number_of_values (scm_t_uint32 expected) SCM_NORETURN SCM_NOINLINE;
445 static void vm_error_continuation_not_rewindable (SCM cont) SCM_NORETURN SCM_NOINLINE;
446
447 static void
vm_error(const char * msg,SCM arg)448 vm_error (const char *msg, SCM arg)
449 {
450 scm_throw (sym_vm_error,
451 scm_list_3 (sym_vm_run, scm_from_latin1_string (msg),
452 SCM_UNBNDP (arg) ? SCM_EOL : scm_list_1 (arg)));
453 abort(); /* not reached */
454 }
455
456 static void
vm_error_bad_instruction(scm_t_uint32 inst)457 vm_error_bad_instruction (scm_t_uint32 inst)
458 {
459 vm_error ("VM: Bad instruction: ~s", scm_from_uint32 (inst));
460 }
461
462 static void
vm_error_unbound(SCM sym)463 vm_error_unbound (SCM sym)
464 {
465 scm_error_scm (scm_misc_error_key, SCM_BOOL_F,
466 scm_from_latin1_string ("Unbound variable: ~s"),
467 scm_list_1 (sym), SCM_BOOL_F);
468 }
469
470 static void
vm_error_not_a_variable(const char * func_name,SCM x)471 vm_error_not_a_variable (const char *func_name, SCM x)
472 {
473 scm_error (scm_arg_type_key, func_name, "Not a variable: ~S",
474 scm_list_1 (x), scm_list_1 (x));
475 }
476
477 static void
vm_error_apply_to_non_list(SCM x)478 vm_error_apply_to_non_list (SCM x)
479 {
480 scm_error (scm_arg_type_key, "apply", "Apply to non-list: ~S",
481 scm_list_1 (x), scm_list_1 (x));
482 }
483
484 static void
vm_error_kwargs_missing_value(SCM proc,SCM kw)485 vm_error_kwargs_missing_value (SCM proc, SCM kw)
486 {
487 scm_error_scm (sym_keyword_argument_error, proc,
488 scm_from_latin1_string ("Keyword argument has no value"),
489 SCM_EOL, scm_list_1 (kw));
490 }
491
492 static void
vm_error_kwargs_invalid_keyword(SCM proc,SCM obj)493 vm_error_kwargs_invalid_keyword (SCM proc, SCM obj)
494 {
495 scm_error_scm (sym_keyword_argument_error, proc,
496 scm_from_latin1_string ("Invalid keyword"),
497 SCM_EOL, scm_list_1 (obj));
498 }
499
500 static void
vm_error_kwargs_unrecognized_keyword(SCM proc,SCM kw)501 vm_error_kwargs_unrecognized_keyword (SCM proc, SCM kw)
502 {
503 scm_error_scm (sym_keyword_argument_error, proc,
504 scm_from_latin1_string ("Unrecognized keyword"),
505 SCM_EOL, scm_list_1 (kw));
506 }
507
508 static void
vm_error_wrong_num_args(SCM proc)509 vm_error_wrong_num_args (SCM proc)
510 {
511 scm_wrong_num_args (proc);
512 }
513
514 static void
vm_error_wrong_type_apply(SCM proc)515 vm_error_wrong_type_apply (SCM proc)
516 {
517 scm_error (scm_arg_type_key, NULL, "Wrong type to apply: ~S",
518 scm_list_1 (proc), scm_list_1 (proc));
519 }
520
521 static void
vm_error_not_a_char(const char * subr,SCM x)522 vm_error_not_a_char (const char *subr, SCM x)
523 {
524 scm_wrong_type_arg_msg (subr, 1, x, "char");
525 }
526
527 static void
vm_error_not_a_pair(const char * subr,SCM x)528 vm_error_not_a_pair (const char *subr, SCM x)
529 {
530 scm_wrong_type_arg_msg (subr, 1, x, "pair");
531 }
532
533 static void
vm_error_not_a_mutable_pair(const char * subr,SCM x)534 vm_error_not_a_mutable_pair (const char *subr, SCM x)
535 {
536 scm_wrong_type_arg_msg (subr, 1, x, "mutable pair");
537 }
538
539 static void
vm_error_not_a_string(const char * subr,SCM x)540 vm_error_not_a_string (const char *subr, SCM x)
541 {
542 scm_wrong_type_arg_msg (subr, 1, x, "string");
543 }
544
545 static void
vm_error_not_a_atomic_box(const char * subr,SCM x)546 vm_error_not_a_atomic_box (const char *subr, SCM x)
547 {
548 scm_wrong_type_arg_msg (subr, 1, x, "atomic box");
549 }
550
551 static void
vm_error_not_a_bytevector(const char * subr,SCM x)552 vm_error_not_a_bytevector (const char *subr, SCM x)
553 {
554 scm_wrong_type_arg_msg (subr, 1, x, "bytevector");
555 }
556
557 static void
vm_error_not_a_mutable_bytevector(const char * subr,SCM x)558 vm_error_not_a_mutable_bytevector (const char *subr, SCM x)
559 {
560 scm_wrong_type_arg_msg (subr, 1, x, "mutable bytevector");
561 }
562
563 static void
vm_error_not_a_struct(const char * subr,SCM x)564 vm_error_not_a_struct (const char *subr, SCM x)
565 {
566 scm_wrong_type_arg_msg (subr, 1, x, "struct");
567 }
568
569 static void
vm_error_not_a_vector(const char * subr,SCM x)570 vm_error_not_a_vector (const char *subr, SCM x)
571 {
572 scm_wrong_type_arg_msg (subr, 1, x, "vector");
573 }
574
575 static void
vm_error_not_a_mutable_vector(const char * subr,SCM x)576 vm_error_not_a_mutable_vector (const char *subr, SCM x)
577 {
578 scm_wrong_type_arg_msg (subr, 1, x, "mutable vector");
579 }
580
581 static void
vm_error_out_of_range_uint64(const char * subr,scm_t_uint64 idx)582 vm_error_out_of_range_uint64 (const char *subr, scm_t_uint64 idx)
583 {
584 scm_out_of_range (subr, scm_from_uint64 (idx));
585 }
586
587 static void
vm_error_out_of_range_int64(const char * subr,scm_t_int64 idx)588 vm_error_out_of_range_int64 (const char *subr, scm_t_int64 idx)
589 {
590 scm_out_of_range (subr, scm_from_int64 (idx));
591 }
592
593 static void
vm_error_no_values(void)594 vm_error_no_values (void)
595 {
596 vm_error ("Zero values returned to single-valued continuation",
597 SCM_UNDEFINED);
598 }
599
600 static void
vm_error_not_enough_values(void)601 vm_error_not_enough_values (void)
602 {
603 vm_error ("Too few values returned to continuation", SCM_UNDEFINED);
604 }
605
606 static void
vm_error_wrong_number_of_values(scm_t_uint32 expected)607 vm_error_wrong_number_of_values (scm_t_uint32 expected)
608 {
609 vm_error ("Wrong number of values returned to continuation (expected ~a)",
610 scm_from_uint32 (expected));
611 }
612
613 static void
vm_error_continuation_not_rewindable(SCM cont)614 vm_error_continuation_not_rewindable (SCM cont)
615 {
616 vm_error ("Unrewindable partial continuation", cont);
617 }
618
619
620
621
622 static SCM vm_boot_continuation;
623 static SCM vm_builtin_apply;
624 static SCM vm_builtin_values;
625 static SCM vm_builtin_abort_to_prompt;
626 static SCM vm_builtin_call_with_values;
627 static SCM vm_builtin_call_with_current_continuation;
628
629 static const scm_t_uint32 vm_boot_continuation_code[] = {
630 SCM_PACK_OP_24 (halt, 0)
631 };
632
633 static const scm_t_uint32 vm_apply_non_program_code[] = {
634 SCM_PACK_OP_24 (apply_non_program, 0)
635 };
636
637 static const scm_t_uint32 vm_builtin_apply_code[] = {
638 SCM_PACK_OP_24 (assert_nargs_ge, 3),
639 SCM_PACK_OP_24 (tail_apply, 0), /* proc in r1, args from r2 */
640 };
641
642 static const scm_t_uint32 vm_builtin_values_code[] = {
643 SCM_PACK_OP_24 (return_values, 0) /* vals from r1 */
644 };
645
646 static const scm_t_uint32 vm_builtin_abort_to_prompt_code[] = {
647 SCM_PACK_OP_24 (assert_nargs_ge, 2),
648 SCM_PACK_OP_24 (abort, 0), /* tag in r1, vals from r2 */
649 /* FIXME: Partial continuation should capture caller regs. */
650 SCM_PACK_OP_24 (return_values, 0) /* vals from r1 */
651 };
652
653 static const scm_t_uint32 vm_builtin_call_with_values_code[] = {
654 SCM_PACK_OP_24 (assert_nargs_ee, 3),
655 SCM_PACK_OP_24 (alloc_frame, 7),
656 SCM_PACK_OP_12_12 (mov, 0, 5),
657 SCM_PACK_OP_24 (call, 6), SCM_PACK_OP_ARG_8_24 (0, 1),
658 SCM_PACK_OP_24 (long_fmov, 0), SCM_PACK_OP_ARG_8_24 (0, 2),
659 SCM_PACK_OP_24 (tail_call_shuffle, 7)
660 };
661
662 static const scm_t_uint32 vm_builtin_call_with_current_continuation_code[] = {
663 SCM_PACK_OP_24 (assert_nargs_ee, 2),
664 SCM_PACK_OP_24 (call_cc, 0)
665 };
666
667 static const scm_t_uint32 vm_handle_interrupt_code[] = {
668 SCM_PACK_OP_24 (alloc_frame, 3),
669 SCM_PACK_OP_12_12 (mov, 0, 2),
670 SCM_PACK_OP_24 (call, 2), SCM_PACK_OP_ARG_8_24 (0, 1),
671 SCM_PACK_OP_24 (return_from_interrupt, 0)
672 };
673
674
675 int
scm_i_vm_is_boot_continuation_code(scm_t_uint32 * ip)676 scm_i_vm_is_boot_continuation_code (scm_t_uint32 *ip)
677 {
678 return ip == vm_boot_continuation_code;
679 }
680
681 static SCM
scm_vm_builtin_ref(unsigned idx)682 scm_vm_builtin_ref (unsigned idx)
683 {
684 switch (idx)
685 {
686 #define INDEX_TO_NAME(builtin, BUILTIN, req, opt, rest) \
687 case SCM_VM_BUILTIN_##BUILTIN: return vm_builtin_##builtin;
688 FOR_EACH_VM_BUILTIN(INDEX_TO_NAME)
689 #undef INDEX_TO_NAME
690 default: abort();
691 }
692 }
693
694 SCM scm_sym_apply;
695 static SCM scm_sym_values;
696 static SCM scm_sym_abort_to_prompt;
697 static SCM scm_sym_call_with_values;
698 static SCM scm_sym_call_with_current_continuation;
699
700 SCM
scm_vm_builtin_name_to_index(SCM name)701 scm_vm_builtin_name_to_index (SCM name)
702 #define FUNC_NAME "builtin-name->index"
703 {
704 SCM_VALIDATE_SYMBOL (1, name);
705
706 #define NAME_TO_INDEX(builtin, BUILTIN, req, opt, rest) \
707 if (scm_is_eq (name, scm_sym_##builtin)) \
708 return scm_from_uint (SCM_VM_BUILTIN_##BUILTIN);
709 FOR_EACH_VM_BUILTIN(NAME_TO_INDEX)
710 #undef NAME_TO_INDEX
711
712 return SCM_BOOL_F;
713 }
714 #undef FUNC_NAME
715
716 SCM
scm_vm_builtin_index_to_name(SCM index)717 scm_vm_builtin_index_to_name (SCM index)
718 #define FUNC_NAME "builtin-index->name"
719 {
720 unsigned idx;
721
722 SCM_VALIDATE_UINT_COPY (1, index, idx);
723
724 switch (idx)
725 {
726 #define INDEX_TO_NAME(builtin, BUILTIN, req, opt, rest) \
727 case SCM_VM_BUILTIN_##BUILTIN: return scm_sym_##builtin;
728 FOR_EACH_VM_BUILTIN(INDEX_TO_NAME)
729 #undef INDEX_TO_NAME
730 default: return SCM_BOOL_F;
731 }
732 }
733 #undef FUNC_NAME
734
735 static void
scm_init_vm_builtins(void)736 scm_init_vm_builtins (void)
737 {
738 scm_c_define_gsubr ("builtin-name->index", 1, 0, 0,
739 scm_vm_builtin_name_to_index);
740 scm_c_define_gsubr ("builtin-index->name", 1, 0, 0,
741 scm_vm_builtin_index_to_name);
742 }
743
744 SCM
scm_i_call_with_current_continuation(SCM proc)745 scm_i_call_with_current_continuation (SCM proc)
746 {
747 return scm_call_1 (vm_builtin_call_with_current_continuation, proc);
748 }
749
750
751 /*
752 * VM
753 */
754
755 #define VM_NAME vm_regular_engine
756 #define VM_USE_HOOKS 0
757 #define FUNC_NAME "vm-regular-engine"
758 #include "vm-engine.c"
759 #undef FUNC_NAME
760 #undef VM_USE_HOOKS
761 #undef VM_NAME
762
763 #define VM_NAME vm_debug_engine
764 #define VM_USE_HOOKS 1
765 #define FUNC_NAME "vm-debug-engine"
766 #include "vm-engine.c"
767 #undef FUNC_NAME
768 #undef VM_USE_HOOKS
769 #undef VM_NAME
770
771 typedef SCM (*scm_t_vm_engine) (scm_i_thread *current_thread, struct scm_vm *vp,
772 scm_i_jmp_buf *registers, int resume);
773
774 static const scm_t_vm_engine vm_engines[SCM_VM_NUM_ENGINES] =
775 { vm_regular_engine, vm_debug_engine };
776
777 static union scm_vm_stack_element*
allocate_stack(size_t size)778 allocate_stack (size_t size)
779 {
780 void *ret;
781
782 if (size >= ((size_t) -1) / sizeof (union scm_vm_stack_element))
783 abort ();
784
785 size *= sizeof (union scm_vm_stack_element);
786
787 #if HAVE_SYS_MMAN_H
788 ret = mmap (NULL, size, PROT_READ | PROT_WRITE,
789 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
790 if (ret == NULL)
791 /* Shouldn't happen. */
792 abort ();
793 if (ret == MAP_FAILED)
794 ret = NULL;
795 #else
796 ret = malloc (size);
797 #endif
798
799 if (!ret)
800 perror ("allocate_stack failed");
801
802 return (union scm_vm_stack_element *) ret;
803 }
804
805 static void
free_stack(union scm_vm_stack_element * stack,size_t size)806 free_stack (union scm_vm_stack_element *stack, size_t size)
807 {
808 size *= sizeof (*stack);
809
810 #if HAVE_SYS_MMAN_H
811 munmap (stack, size);
812 #else
813 free (stack);
814 #endif
815 }
816
817 /* Ideally what we would like is an mremap or a realloc that grows at
818 the bottom, not the top. Oh well; mmap and memcpy are fast enough,
819 considering that they run very infrequently. */
820 static union scm_vm_stack_element*
expand_stack(union scm_vm_stack_element * old_bottom,size_t old_size,size_t new_size)821 expand_stack (union scm_vm_stack_element *old_bottom, size_t old_size,
822 size_t new_size)
823 #define FUNC_NAME "expand_stack"
824 {
825 union scm_vm_stack_element *new_bottom;
826 size_t extension_size;
827
828 if (new_size >= ((size_t) -1) / sizeof (union scm_vm_stack_element))
829 abort ();
830 if (new_size <= old_size)
831 abort ();
832
833 extension_size = new_size - old_size;
834
835 if ((size_t)old_bottom < extension_size * sizeof (union scm_vm_stack_element))
836 abort ();
837
838 new_bottom = allocate_stack (new_size);
839
840 if (!new_bottom)
841 return NULL;
842
843 memcpy (new_bottom + extension_size,
844 old_bottom,
845 old_size * sizeof (union scm_vm_stack_element));
846 free_stack (old_bottom, old_size);
847
848 return new_bottom;
849 }
850 #undef FUNC_NAME
851
852 static struct scm_vm *
make_vm(void)853 make_vm (void)
854 #define FUNC_NAME "make_vm"
855 {
856 int i;
857 struct scm_vm *vp;
858
859 vp = scm_gc_malloc (sizeof (struct scm_vm), "vm");
860
861 vp->stack_size = page_size / sizeof (union scm_vm_stack_element);
862 vp->stack_bottom = allocate_stack (vp->stack_size);
863 if (!vp->stack_bottom)
864 /* As in expand_stack, we don't have any way to throw an exception
865 if we can't allocate one measely page -- there's no stack to
866 handle it. For now, abort. */
867 abort ();
868 vp->stack_top = vp->stack_bottom + vp->stack_size;
869 vp->stack_limit = vp->stack_bottom;
870 vp->overflow_handler_stack = SCM_EOL;
871 vp->ip = NULL;
872 vp->sp = vp->stack_top;
873 vp->sp_min_since_gc = vp->sp;
874 vp->fp = vp->stack_top;
875 vp->engine = vm_default_engine;
876 vp->trace_level = 0;
877 for (i = 0; i < SCM_VM_NUM_HOOKS; i++)
878 vp->hooks[i] = SCM_BOOL_F;
879
880 return vp;
881 }
882 #undef FUNC_NAME
883
884 static void
return_unused_stack_to_os(struct scm_vm * vp)885 return_unused_stack_to_os (struct scm_vm *vp)
886 {
887 #if HAVE_SYS_MMAN_H
888 scm_t_uintptr lo = (scm_t_uintptr) vp->stack_bottom;
889 scm_t_uintptr hi = (scm_t_uintptr) vp->sp;
890 /* The second condition is needed to protect against wrap-around. */
891 if (vp->sp_min_since_gc >= vp->stack_bottom && vp->sp >= vp->sp_min_since_gc)
892 lo = (scm_t_uintptr) vp->sp_min_since_gc;
893
894 lo &= ~(page_size - 1U); /* round down */
895 hi &= ~(page_size - 1U); /* round down */
896
897 /* Return these pages to the OS. The next time they are paged in,
898 they will be zeroed. */
899 if (lo < hi)
900 {
901 int ret = 0;
902
903 do
904 ret = madvise ((void *) lo, hi - lo, MADV_DONTNEED);
905 while (ret && errno == EAGAIN);
906
907 /* If the OS doesn't implement 'madvise' (as is currently the case
908 for GNU/Hurd), don't warn the user since there's nothing they
909 can do about it. */
910 if (ret && errno != ENOSYS)
911 perror ("madvise failed");
912 }
913
914 vp->sp_min_since_gc = vp->sp;
915 #endif
916 }
917
918 #define SLOT_MAP_CACHE_SIZE 32U
919 struct slot_map_cache_entry
920 {
921 scm_t_uint32 *ip;
922 const scm_t_uint8 *map;
923 };
924
925 struct slot_map_cache
926 {
927 struct slot_map_cache_entry entries[SLOT_MAP_CACHE_SIZE];
928 };
929
930 static const scm_t_uint8 *
find_slot_map(scm_t_uint32 * ip,struct slot_map_cache * cache)931 find_slot_map (scm_t_uint32 *ip, struct slot_map_cache *cache)
932 {
933 /* The lower two bits should be zero. FIXME: Use a better hash
934 function; we don't expose scm_raw_hashq currently. */
935 size_t slot = (((scm_t_uintptr) ip) >> 2) % SLOT_MAP_CACHE_SIZE;
936 const scm_t_uint8 *map;
937
938 if (cache->entries[slot].ip == ip)
939 map = cache->entries[slot].map;
940 else
941 {
942 map = scm_find_slot_map_unlocked (ip);
943 cache->entries[slot].ip = ip;
944 cache->entries[slot].map = map;
945 }
946
947 return map;
948 }
949
950 enum slot_desc
951 {
952 SLOT_DESC_DEAD = 0,
953 SLOT_DESC_LIVE_RAW = 1,
954 SLOT_DESC_LIVE_SCM = 2,
955 SLOT_DESC_UNUSED = 3
956 };
957
958 /* Mark the active VM stack region. */
959 struct GC_ms_entry *
scm_i_vm_mark_stack(struct scm_vm * vp,struct GC_ms_entry * mark_stack_ptr,struct GC_ms_entry * mark_stack_limit)960 scm_i_vm_mark_stack (struct scm_vm *vp, struct GC_ms_entry *mark_stack_ptr,
961 struct GC_ms_entry *mark_stack_limit)
962 {
963 union scm_vm_stack_element *sp, *fp;
964 /* The first frame will be marked conservatively (without a slot map).
965 This is because GC can happen at any point within the hottest
966 activation, due to multiple threads or per-instruction hooks, and
967 providing slot maps for all points in a program would take a
968 prohibitive amount of space. */
969 const scm_t_uint8 *slot_map = NULL;
970 void *upper = (void *) GC_greatest_plausible_heap_addr;
971 void *lower = (void *) GC_least_plausible_heap_addr;
972 struct slot_map_cache cache;
973
974 memset (&cache, 0, sizeof (cache));
975
976 for (fp = vp->fp, sp = vp->sp;
977 fp < vp->stack_top;
978 fp = SCM_FRAME_DYNAMIC_LINK (fp))
979 {
980 scm_t_ptrdiff nlocals = SCM_FRAME_NUM_LOCALS (fp, sp);
981 size_t slot = nlocals - 1;
982 for (slot = nlocals - 1; sp < fp; sp++, slot--)
983 {
984 enum slot_desc desc = SLOT_DESC_LIVE_SCM;
985
986 if (slot_map)
987 desc = (slot_map[slot / 4U] >> ((slot % 4U) * 2)) & 3U;
988
989 switch (desc)
990 {
991 case SLOT_DESC_LIVE_RAW:
992 break;
993 case SLOT_DESC_UNUSED:
994 case SLOT_DESC_LIVE_SCM:
995 if (SCM_NIMP (sp->as_scm) &&
996 sp->as_ptr >= lower && sp->as_ptr <= upper)
997 mark_stack_ptr = GC_mark_and_push (sp->as_ptr,
998 mark_stack_ptr,
999 mark_stack_limit,
1000 NULL);
1001 break;
1002 case SLOT_DESC_DEAD:
1003 /* This value may become dead as a result of GC,
1004 so we can't just leave it on the stack. */
1005 sp->as_scm = SCM_UNSPECIFIED;
1006 break;
1007 }
1008 }
1009 sp = SCM_FRAME_PREVIOUS_SP (fp);
1010 /* Inner frames may have a dead slots map for precise marking.
1011 Note that there may be other reasons to not have a dead slots
1012 map, e.g. if all of the frame's slots below the callee frame
1013 are live. */
1014 slot_map = find_slot_map (SCM_FRAME_RETURN_ADDRESS (fp), &cache);
1015 }
1016
1017 return_unused_stack_to_os (vp);
1018
1019 return mark_stack_ptr;
1020 }
1021
1022 /* Free the VM stack, as this thread is exiting. */
1023 void
scm_i_vm_free_stack(struct scm_vm * vp)1024 scm_i_vm_free_stack (struct scm_vm *vp)
1025 {
1026 free_stack (vp->stack_bottom, vp->stack_size);
1027 vp->stack_bottom = vp->stack_top = vp->stack_limit = NULL;
1028 vp->stack_size = 0;
1029 }
1030
1031 struct vm_expand_stack_data
1032 {
1033 struct scm_vm *vp;
1034 size_t stack_size;
1035 union scm_vm_stack_element *new_sp;
1036 };
1037
1038 static void *
vm_expand_stack_inner(void * data_ptr)1039 vm_expand_stack_inner (void *data_ptr)
1040 {
1041 struct vm_expand_stack_data *data = data_ptr;
1042
1043 struct scm_vm *vp = data->vp;
1044 union scm_vm_stack_element *old_top, *new_bottom;
1045 size_t new_size;
1046 scm_t_ptrdiff reloc;
1047
1048 old_top = vp->stack_top;
1049 new_size = vp->stack_size;
1050 while (new_size < data->stack_size)
1051 new_size *= 2;
1052
1053 new_bottom = expand_stack (vp->stack_bottom, vp->stack_size, new_size);
1054 if (!new_bottom)
1055 return NULL;
1056
1057 vp->stack_bottom = new_bottom;
1058 vp->stack_size = new_size;
1059 vp->stack_top = vp->stack_bottom + new_size;
1060 vp->stack_limit = vp->stack_bottom;
1061 reloc = vp->stack_top - old_top;
1062
1063 if (vp->fp)
1064 vp->fp += reloc;
1065 data->new_sp += reloc;
1066
1067 return new_bottom;
1068 }
1069
1070 static scm_t_ptrdiff
current_overflow_size(struct scm_vm * vp)1071 current_overflow_size (struct scm_vm *vp)
1072 {
1073 if (scm_is_pair (vp->overflow_handler_stack))
1074 return scm_to_ptrdiff_t (scm_caar (vp->overflow_handler_stack));
1075 return -1;
1076 }
1077
1078 static int
should_handle_stack_overflow(struct scm_vm * vp,scm_t_ptrdiff stack_size)1079 should_handle_stack_overflow (struct scm_vm *vp, scm_t_ptrdiff stack_size)
1080 {
1081 scm_t_ptrdiff overflow_size = current_overflow_size (vp);
1082 return overflow_size >= 0 && stack_size >= overflow_size;
1083 }
1084
1085 static void
reset_stack_limit(struct scm_vm * vp)1086 reset_stack_limit (struct scm_vm *vp)
1087 {
1088 if (should_handle_stack_overflow (vp, vp->stack_size))
1089 vp->stack_limit = vp->stack_top - current_overflow_size (vp);
1090 else
1091 vp->stack_limit = vp->stack_bottom;
1092 }
1093
1094 struct overflow_handler_data
1095 {
1096 struct scm_vm *vp;
1097 SCM overflow_handler_stack;
1098 };
1099
1100 static void
wind_overflow_handler(void * ptr)1101 wind_overflow_handler (void *ptr)
1102 {
1103 struct overflow_handler_data *data = ptr;
1104
1105 data->vp->overflow_handler_stack = data->overflow_handler_stack;
1106
1107 reset_stack_limit (data->vp);
1108 }
1109
1110 static void
unwind_overflow_handler(void * ptr)1111 unwind_overflow_handler (void *ptr)
1112 {
1113 struct overflow_handler_data *data = ptr;
1114
1115 data->vp->overflow_handler_stack = scm_cdr (data->overflow_handler_stack);
1116
1117 reset_stack_limit (data->vp);
1118 }
1119
1120 static void
vm_expand_stack(struct scm_vm * vp,union scm_vm_stack_element * new_sp)1121 vm_expand_stack (struct scm_vm *vp, union scm_vm_stack_element *new_sp)
1122 {
1123 scm_t_ptrdiff stack_size = vp->stack_top - new_sp;
1124
1125 if (stack_size > vp->stack_size)
1126 {
1127 struct vm_expand_stack_data data;
1128
1129 data.vp = vp;
1130 data.stack_size = stack_size;
1131 data.new_sp = new_sp;
1132
1133 if (!GC_call_with_alloc_lock (vm_expand_stack_inner, &data))
1134 /* Throw an unwind-only exception. */
1135 scm_report_stack_overflow ();
1136
1137 new_sp = data.new_sp;
1138 }
1139
1140 vp->sp_min_since_gc = vp->sp = new_sp;
1141
1142 if (should_handle_stack_overflow (vp, stack_size))
1143 {
1144 SCM more_stack, new_limit;
1145
1146 struct overflow_handler_data data;
1147 data.vp = vp;
1148 data.overflow_handler_stack = vp->overflow_handler_stack;
1149
1150 scm_dynwind_begin (SCM_F_DYNWIND_REWINDABLE);
1151
1152 scm_dynwind_rewind_handler (unwind_overflow_handler, &data,
1153 SCM_F_WIND_EXPLICITLY);
1154 scm_dynwind_unwind_handler (wind_overflow_handler, &data,
1155 SCM_F_WIND_EXPLICITLY);
1156
1157 /* Call the overflow handler. */
1158 more_stack = scm_call_0 (scm_cdar (data.overflow_handler_stack));
1159
1160 /* If the overflow handler returns, its return value should be an
1161 integral number of words from the outer stack limit to transfer
1162 to the inner limit. */
1163 if (scm_to_ptrdiff_t (more_stack) <= 0)
1164 scm_out_of_range (NULL, more_stack);
1165 new_limit = scm_sum (scm_caar (data.overflow_handler_stack), more_stack);
1166 if (scm_is_pair (scm_cdr (data.overflow_handler_stack)))
1167 new_limit = scm_min (new_limit,
1168 scm_caadr (data.overflow_handler_stack));
1169
1170 /* Ensure the new limit is in range. */
1171 scm_to_ptrdiff_t (new_limit);
1172
1173 /* Increase the limit that we will restore. */
1174 scm_set_car_x (scm_car (data.overflow_handler_stack), new_limit);
1175
1176 scm_dynwind_end ();
1177
1178 /* Recurse. */
1179 return vm_expand_stack (vp, new_sp);
1180 }
1181 }
1182
1183 static struct scm_vm *
thread_vm(scm_i_thread * t)1184 thread_vm (scm_i_thread *t)
1185 {
1186 if (SCM_UNLIKELY (!t->vp))
1187 t->vp = make_vm ();
1188
1189 return t->vp;
1190 }
1191
1192 struct scm_vm *
scm_the_vm(void)1193 scm_the_vm (void)
1194 {
1195 return thread_vm (SCM_I_CURRENT_THREAD);
1196 }
1197
1198 SCM
scm_call_n(SCM proc,SCM * argv,size_t nargs)1199 scm_call_n (SCM proc, SCM *argv, size_t nargs)
1200 {
1201 scm_i_thread *thread;
1202 struct scm_vm *vp;
1203 union scm_vm_stack_element *return_fp, *call_fp;
1204 /* Since nargs can only describe the length of a valid argv array in
1205 elements and each element is at least 4 bytes, nargs will not be
1206 greater than INTMAX/2 and therefore we don't have to check for
1207 overflow here or below. */
1208 size_t return_nlocals = 1, call_nlocals = nargs + 1, frame_size = 2;
1209 scm_t_ptrdiff stack_reserve_words;
1210 size_t i;
1211
1212 thread = SCM_I_CURRENT_THREAD;
1213 vp = thread_vm (thread);
1214
1215 SCM_CHECK_STACK;
1216
1217 /* It's not valid for argv to point into the stack already. */
1218 if ((void *) argv < (void *) vp->stack_top &&
1219 (void *) argv >= (void *) vp->sp)
1220 abort();
1221
1222 /* Check that we have enough space for the two stack frames: the
1223 innermost one that makes the call, and its continuation which
1224 receives the resulting value(s) and returns from the engine
1225 call. */
1226 stack_reserve_words = call_nlocals + frame_size + return_nlocals + frame_size;
1227 vm_push_sp (vp, vp->sp - stack_reserve_words);
1228
1229 call_fp = vp->sp + call_nlocals;
1230 return_fp = call_fp + frame_size + return_nlocals;
1231
1232 SCM_FRAME_SET_RETURN_ADDRESS (return_fp, vp->ip);
1233 SCM_FRAME_SET_DYNAMIC_LINK (return_fp, vp->fp);
1234 SCM_FRAME_LOCAL (return_fp, 0) = vm_boot_continuation;
1235
1236 vp->ip = (scm_t_uint32 *) vm_boot_continuation_code;
1237 vp->fp = call_fp;
1238
1239 SCM_FRAME_SET_RETURN_ADDRESS (call_fp, vp->ip);
1240 SCM_FRAME_SET_DYNAMIC_LINK (call_fp, return_fp);
1241 SCM_FRAME_LOCAL (call_fp, 0) = proc;
1242 for (i = 0; i < nargs; i++)
1243 SCM_FRAME_LOCAL (call_fp, i + 1) = argv[i];
1244
1245 {
1246 scm_i_jmp_buf registers;
1247 int resume;
1248 const void *prev_cookie = vp->resumable_prompt_cookie;
1249 SCM ret;
1250
1251 resume = SCM_I_SETJMP (registers);
1252 if (SCM_UNLIKELY (resume))
1253 {
1254 scm_gc_after_nonlocal_exit ();
1255 /* Non-local return. */
1256 vm_dispatch_abort_hook (vp);
1257 }
1258
1259 vp->resumable_prompt_cookie = ®isters;
1260 ret = vm_engines[vp->engine](thread, vp, ®isters, resume);
1261 vp->resumable_prompt_cookie = prev_cookie;
1262
1263 return ret;
1264 }
1265 }
1266
1267 /* Scheme interface */
1268
1269 #define VM_DEFINE_HOOK(n) \
1270 { \
1271 struct scm_vm *vp; \
1272 vp = scm_the_vm (); \
1273 if (scm_is_false (vp->hooks[n])) \
1274 vp->hooks[n] = scm_make_hook (SCM_I_MAKINUM (1)); \
1275 return vp->hooks[n]; \
1276 }
1277
1278 SCM_DEFINE (scm_vm_apply_hook, "vm-apply-hook", 0, 0, 0,
1279 (void),
1280 "")
1281 #define FUNC_NAME s_scm_vm_apply_hook
1282 {
1283 VM_DEFINE_HOOK (SCM_VM_APPLY_HOOK);
1284 }
1285 #undef FUNC_NAME
1286
1287 SCM_DEFINE (scm_vm_push_continuation_hook, "vm-push-continuation-hook", 0, 0, 0,
1288 (void),
1289 "")
1290 #define FUNC_NAME s_scm_vm_push_continuation_hook
1291 {
1292 VM_DEFINE_HOOK (SCM_VM_PUSH_CONTINUATION_HOOK);
1293 }
1294 #undef FUNC_NAME
1295
1296 SCM_DEFINE (scm_vm_pop_continuation_hook, "vm-pop-continuation-hook", 0, 0, 0,
1297 (void),
1298 "")
1299 #define FUNC_NAME s_scm_vm_pop_continuation_hook
1300 {
1301 VM_DEFINE_HOOK (SCM_VM_POP_CONTINUATION_HOOK);
1302 }
1303 #undef FUNC_NAME
1304
1305 SCM_DEFINE (scm_vm_next_hook, "vm-next-hook", 0, 0, 0,
1306 (void),
1307 "")
1308 #define FUNC_NAME s_scm_vm_next_hook
1309 {
1310 VM_DEFINE_HOOK (SCM_VM_NEXT_HOOK);
1311 }
1312 #undef FUNC_NAME
1313
1314 SCM_DEFINE (scm_vm_abort_continuation_hook, "vm-abort-continuation-hook", 0, 0, 0,
1315 (void),
1316 "")
1317 #define FUNC_NAME s_scm_vm_abort_continuation_hook
1318 {
1319 VM_DEFINE_HOOK (SCM_VM_ABORT_CONTINUATION_HOOK);
1320 }
1321 #undef FUNC_NAME
1322
1323 SCM_DEFINE (scm_vm_trace_level, "vm-trace-level", 0, 0, 0,
1324 (void),
1325 "")
1326 #define FUNC_NAME s_scm_vm_trace_level
1327 {
1328 return scm_from_int (scm_the_vm ()->trace_level);
1329 }
1330 #undef FUNC_NAME
1331
1332 SCM_DEFINE (scm_set_vm_trace_level_x, "set-vm-trace-level!", 1, 0, 0,
1333 (SCM level),
1334 "")
1335 #define FUNC_NAME s_scm_set_vm_trace_level_x
1336 {
1337 scm_the_vm ()->trace_level = scm_to_int (level);
1338 return SCM_UNSPECIFIED;
1339 }
1340 #undef FUNC_NAME
1341
1342
1343 /*
1344 * VM engines
1345 */
1346
1347 static int
symbol_to_vm_engine(SCM engine,const char * FUNC_NAME)1348 symbol_to_vm_engine (SCM engine, const char *FUNC_NAME)
1349 {
1350 if (scm_is_eq (engine, sym_regular))
1351 return SCM_VM_REGULAR_ENGINE;
1352 else if (scm_is_eq (engine, sym_debug))
1353 return SCM_VM_DEBUG_ENGINE;
1354 else
1355 SCM_MISC_ERROR ("Unknown VM engine: ~a", scm_list_1 (engine));
1356 }
1357
1358 static SCM
vm_engine_to_symbol(int engine,const char * FUNC_NAME)1359 vm_engine_to_symbol (int engine, const char *FUNC_NAME)
1360 {
1361 switch (engine)
1362 {
1363 case SCM_VM_REGULAR_ENGINE:
1364 return sym_regular;
1365 case SCM_VM_DEBUG_ENGINE:
1366 return sym_debug;
1367 default:
1368 /* ? */
1369 SCM_MISC_ERROR ("Unknown VM engine: ~a",
1370 scm_list_1 (scm_from_int (engine)));
1371 }
1372 }
1373
1374 SCM_DEFINE (scm_vm_engine, "vm-engine", 0, 0, 0,
1375 (void),
1376 "")
1377 #define FUNC_NAME s_scm_vm_engine
1378 {
1379 return vm_engine_to_symbol (scm_the_vm ()->engine, FUNC_NAME);
1380 }
1381 #undef FUNC_NAME
1382
1383 void
scm_c_set_vm_engine_x(int engine)1384 scm_c_set_vm_engine_x (int engine)
1385 #define FUNC_NAME "set-vm-engine!"
1386 {
1387 if (engine < 0 || engine >= SCM_VM_NUM_ENGINES)
1388 SCM_MISC_ERROR ("Unknown VM engine: ~a",
1389 scm_list_1 (scm_from_int (engine)));
1390
1391 scm_the_vm ()->engine = engine;
1392 }
1393 #undef FUNC_NAME
1394
1395 SCM_DEFINE (scm_set_vm_engine_x, "set-vm-engine!", 1, 0, 0,
1396 (SCM engine),
1397 "")
1398 #define FUNC_NAME s_scm_set_vm_engine_x
1399 {
1400 scm_c_set_vm_engine_x (symbol_to_vm_engine (engine, FUNC_NAME));
1401 return SCM_UNSPECIFIED;
1402 }
1403 #undef FUNC_NAME
1404
1405 void
scm_c_set_default_vm_engine_x(int engine)1406 scm_c_set_default_vm_engine_x (int engine)
1407 #define FUNC_NAME "set-default-vm-engine!"
1408 {
1409 if (engine < 0 || engine >= SCM_VM_NUM_ENGINES)
1410 SCM_MISC_ERROR ("Unknown VM engine: ~a",
1411 scm_list_1 (scm_from_int (engine)));
1412
1413 vm_default_engine = engine;
1414 }
1415 #undef FUNC_NAME
1416
1417 SCM_DEFINE (scm_set_default_vm_engine_x, "set-default-vm-engine!", 1, 0, 0,
1418 (SCM engine),
1419 "")
1420 #define FUNC_NAME s_scm_set_default_vm_engine_x
1421 {
1422 scm_c_set_default_vm_engine_x (symbol_to_vm_engine (engine, FUNC_NAME));
1423 return SCM_UNSPECIFIED;
1424 }
1425 #undef FUNC_NAME
1426
1427 /* FIXME: This function makes no sense, but we keep it to make sure we
1428 have a way of switching to the debug or regular VM. */
1429 SCM_DEFINE (scm_call_with_vm, "call-with-vm", 1, 0, 1,
1430 (SCM proc, SCM args),
1431 "Apply @var{proc} to @var{args} in a dynamic extent in which\n"
1432 "@var{vm} is the current VM.")
1433 #define FUNC_NAME s_scm_call_with_vm
1434 {
1435 return scm_apply_0 (proc, args);
1436 }
1437 #undef FUNC_NAME
1438
1439 SCM_DEFINE (scm_call_with_stack_overflow_handler,
1440 "call-with-stack-overflow-handler", 3, 0, 0,
1441 (SCM limit, SCM thunk, SCM handler),
1442 "Call @var{thunk} in an environment in which the stack limit has\n"
1443 "been reduced to @var{limit} additional words. If the limit is\n"
1444 "reached, @var{handler} (a thunk) will be invoked in the dynamic\n"
1445 "environment of the error. For the extent of the call to\n"
1446 "@var{handler}, the stack limit and handler are restored to the\n"
1447 "values that were in place when\n"
1448 "@code{call-with-stack-overflow-handler} was called.")
1449 #define FUNC_NAME s_scm_call_with_stack_overflow_handler
1450 {
1451 struct scm_vm *vp;
1452 scm_t_ptrdiff c_limit, stack_size;
1453 struct overflow_handler_data data;
1454 SCM new_limit, ret;
1455
1456 vp = scm_the_vm ();
1457 stack_size = vp->stack_top - vp->sp;
1458
1459 c_limit = scm_to_ptrdiff_t (limit);
1460 if (c_limit <= 0)
1461 scm_out_of_range (FUNC_NAME, limit);
1462
1463 new_limit = scm_sum (scm_from_ptrdiff_t (stack_size), limit);
1464 if (scm_is_pair (vp->overflow_handler_stack))
1465 new_limit = scm_min (new_limit, scm_caar (vp->overflow_handler_stack));
1466
1467 /* Hacky check that the current stack depth plus the limit is within
1468 the range of a ptrdiff_t. */
1469 scm_to_ptrdiff_t (new_limit);
1470
1471 data.vp = vp;
1472 data.overflow_handler_stack =
1473 scm_acons (limit, handler, vp->overflow_handler_stack);
1474
1475 scm_dynwind_begin (SCM_F_DYNWIND_REWINDABLE);
1476
1477 scm_dynwind_rewind_handler (wind_overflow_handler, &data,
1478 SCM_F_WIND_EXPLICITLY);
1479 scm_dynwind_unwind_handler (unwind_overflow_handler, &data,
1480 SCM_F_WIND_EXPLICITLY);
1481
1482 /* Reset vp->sp_min_since_gc so that the VM checks actually
1483 trigger. */
1484 return_unused_stack_to_os (vp);
1485
1486 ret = scm_call_0 (thunk);
1487
1488 scm_dynwind_end ();
1489
1490 return ret;
1491 }
1492 #undef FUNC_NAME
1493
1494
1495 /*
1496 * Initialize
1497 */
1498
1499 SCM
scm_load_compiled_with_vm(SCM file)1500 scm_load_compiled_with_vm (SCM file)
1501 {
1502 return scm_call_0 (scm_load_thunk_from_file (file));
1503 }
1504
1505
1506 void
scm_init_vm_builtin_properties(void)1507 scm_init_vm_builtin_properties (void)
1508 {
1509 /* FIXME: Seems hacky to do this here, but oh well :/ */
1510 scm_sym_apply = scm_from_utf8_symbol ("apply");
1511 scm_sym_values = scm_from_utf8_symbol ("values");
1512 scm_sym_abort_to_prompt = scm_from_utf8_symbol ("abort-to-prompt");
1513 scm_sym_call_with_values = scm_from_utf8_symbol ("call-with-values");
1514 scm_sym_call_with_current_continuation =
1515 scm_from_utf8_symbol ("call-with-current-continuation");
1516
1517 #define INIT_BUILTIN(builtin, BUILTIN, req, opt, rest) \
1518 scm_set_procedure_property_x (vm_builtin_##builtin, scm_sym_name, \
1519 scm_sym_##builtin); \
1520 scm_set_procedure_minimum_arity_x (vm_builtin_##builtin, \
1521 SCM_I_MAKINUM (req), \
1522 SCM_I_MAKINUM (opt), \
1523 scm_from_bool (rest));
1524 FOR_EACH_VM_BUILTIN (INIT_BUILTIN);
1525 #undef INIT_BUILTIN
1526 }
1527
1528 void
scm_bootstrap_vm(void)1529 scm_bootstrap_vm (void)
1530 {
1531 scm_c_register_extension ("libguile-" SCM_EFFECTIVE_VERSION,
1532 "scm_init_vm",
1533 (scm_t_extension_init_func)scm_init_vm, NULL);
1534 scm_c_register_extension ("libguile-" SCM_EFFECTIVE_VERSION,
1535 "scm_init_vm_builtins",
1536 (scm_t_extension_init_func)scm_init_vm_builtins,
1537 NULL);
1538
1539 page_size = getpagesize ();
1540 /* page_size should be a power of two. */
1541 if (page_size & (page_size - 1))
1542 abort ();
1543
1544 sym_vm_run = scm_from_latin1_symbol ("vm-run");
1545 sym_vm_error = scm_from_latin1_symbol ("vm-error");
1546 sym_keyword_argument_error = scm_from_latin1_symbol ("keyword-argument-error");
1547 sym_regular = scm_from_latin1_symbol ("regular");
1548 sym_debug = scm_from_latin1_symbol ("debug");
1549
1550 vm_boot_continuation = scm_i_make_program (vm_boot_continuation_code);
1551 SCM_SET_CELL_WORD_0 (vm_boot_continuation,
1552 (SCM_CELL_WORD_0 (vm_boot_continuation)
1553 | SCM_F_PROGRAM_IS_BOOT));
1554
1555 #define DEFINE_BUILTIN(builtin, BUILTIN, req, opt, rest) \
1556 vm_builtin_##builtin = scm_i_make_program (vm_builtin_##builtin##_code);
1557 FOR_EACH_VM_BUILTIN (DEFINE_BUILTIN);
1558 #undef DEFINE_BUILTIN
1559 }
1560
1561 void
scm_init_vm(void)1562 scm_init_vm (void)
1563 {
1564 #ifndef SCM_MAGIC_SNARFER
1565 #include "libguile/vm.x"
1566 #endif
1567 }
1568
1569 /*
1570 Local Variables:
1571 c-file-style: "gnu"
1572 End:
1573 */
1574