1 /* This file implements Racket threads.
2 
3    Racket threads are implemented by copying the stack. The
4    scheme_thread_block() function is called occasionally by the
5    evaluator so that the current thread can be swapped out, and
6    do_swap_thread() performs the actual swap.
7 
8    Much of the work in thread management is knowning when to go to
9    sleep, to be nice to the OS outside of Racket. The rest of the
10    work is implementing custodians (called "custodians" in the code),
11    parameters, and wills. */
12 
13 /* Some copilers don't like re-def of GC_malloc in schemef.h: */
14 #ifndef MZ_PRECISE_GC
15 # define SCHEME_NO_GC_PROTO
16 #endif
17 
18 #include "schpriv.h"
19 #include "schmach.h"
20 #include "schgc.h"
21 #include "schrktio.h"
22 #ifdef MZ_USE_FUTURES
23 # include "future.h"
24 #endif
25 #ifdef USE_STACKAVAIL
26 # include <malloc.h>
27 #endif
28 
29 #ifndef SIGNMZTHREAD
30 # define SIGMZTHREAD SIGUSR2
31 #endif
32 
33 #define DEFAULT_INIT_STACK_SIZE 1000
34 #define MAX_INIT_STACK_SIZE 100000
35 
36 #ifdef SGC_STD_DEBUGGING
37 # define SENORA_GC_NO_FREE
38 #endif
39 
40 /* If a finalization callback invokes Racket code,
41    we can end up with a thread swap in the middle of a thread
42    swap (where the outer swap was interrupted by GC). The
43    following is a debugging flag to help detect and fix
44    such problems. */
45 #define WATCH_FOR_NESTED_SWAPS 0
46 
47 #if WATCH_FOR_NESTED_SWAPS
48 static int swapping = 0;
49 #endif
50 
51 extern void scheme_gmp_tls_init(intptr_t *s);
52 extern void *scheme_gmp_tls_load(intptr_t *s);
53 extern void scheme_gmp_tls_unload(intptr_t *s, void *p);
54 extern void scheme_gmp_tls_snapshot(intptr_t *s, intptr_t *save);
55 extern void scheme_gmp_tls_restore_snapshot(intptr_t *s, void *data, intptr_t *save, int do_free);
56 
57 static void check_ready_break();
58 
59 THREAD_LOCAL_DECL(extern int scheme_num_read_syntax_objects);
60 THREAD_LOCAL_DECL(extern intptr_t scheme_hash_request_count);
61 THREAD_LOCAL_DECL(extern intptr_t scheme_hash_iteration_count);
62 THREAD_LOCAL_DECL(extern intptr_t scheme_code_page_total);
63 #ifdef MZ_USE_JIT
64 extern int scheme_jit_malloced;
65 #else
66 # define scheme_jit_malloced 0
67 #endif
68 
69 SHARED_OK int scheme_init_load_on_demand = 1;
70 
71 /*========================================================================*/
72 /*                    local variables and prototypes                      */
73 /*========================================================================*/
74 
75 #define INIT_TB_SIZE  20
76 
77 #ifndef MZ_THREAD_QUANTUM_USEC
78 # define MZ_THREAD_QUANTUM_USEC 10000
79 #endif
80 
81 THREAD_LOCAL_DECL(static int buffer_init_size);
82 
83 THREAD_LOCAL_DECL(Scheme_Thread *scheme_current_thread = NULL);
84 THREAD_LOCAL_DECL(Scheme_Thread *scheme_main_thread = NULL);
85 THREAD_LOCAL_DECL(Scheme_Thread *scheme_first_thread = NULL);
86 THREAD_LOCAL_DECL(static Scheme_Thread *gc_prep_thread_chain = NULL);
87 
scheme_get_current_thread()88 XFORM_NONGCING Scheme_Thread *scheme_get_current_thread() { return scheme_current_thread; }
scheme_get_multiple_count()89 XFORM_NONGCING intptr_t scheme_get_multiple_count() { return scheme_current_thread->ku.multiple.count; }
scheme_get_multiple_array()90 XFORM_NONGCING Scheme_Object **scheme_get_multiple_array() { return scheme_current_thread->ku.multiple.array; }
scheme_set_current_thread_ran_some()91 XFORM_NONGCING void scheme_set_current_thread_ran_some() { scheme_current_thread->ran_some = 1; }
92 
93 THREAD_LOCAL_DECL(Scheme_Thread_Set *scheme_thread_set_top);
94 
95 THREAD_LOCAL_DECL(static int num_running_threads); /* not counting original */
96 
97 #ifdef LINK_EXTENSIONS_BY_TABLE
98 Scheme_Thread **scheme_current_thread_ptr;
99 volatile int *scheme_fuel_counter_ptr;
100 #endif
101 THREAD_LOCAL_DECL(static int swap_no_setjmp = 0);
102 
103 THREAD_LOCAL_DECL(static int thread_swap_count);
104 THREAD_LOCAL_DECL(int scheme_did_gc_count);
105 THREAD_LOCAL_DECL(static intptr_t process_time_at_swap);
106 THREAD_LOCAL_DECL(static intptr_t process_time_skips);
107 
108 THREAD_LOCAL_DECL(static intptr_t max_gc_pre_used_bytes);
109 THREAD_LOCAL_DECL(static int num_major_garbage_collections);
110 THREAD_LOCAL_DECL(static int num_minor_garbage_collections);
111 THREAD_LOCAL_DECL(static intptr_t max_code_page_total);
112 
113 #ifndef MZ_PRECISE_GC
114 static intptr_t gc_pre_used_bytes;
115 #endif
116 
117 #ifdef RUNSTACK_IS_GLOBAL
118 THREAD_LOCAL_DECL(Scheme_Object **scheme_current_runstack_start);
119 THREAD_LOCAL_DECL(Scheme_Object **scheme_current_runstack);
120 THREAD_LOCAL_DECL(MZ_MARK_STACK_TYPE scheme_current_cont_mark_stack);
121 THREAD_LOCAL_DECL(MZ_MARK_POS_TYPE scheme_current_cont_mark_pos);
122 #endif
123 
124 THREAD_LOCAL_DECL(struct rktio_ltps_t *scheme_semaphore_fd_set);
125 
126 THREAD_LOCAL_DECL(static Scheme_Custodian *main_custodian);
127 THREAD_LOCAL_DECL(static Scheme_Hash_Table *limited_custodians = NULL);
128 READ_ONLY static Scheme_Object *initial_inspector;
129 
130 THREAD_LOCAL_DECL(static Scheme_Plumber *initial_plumber);
131 
132 THREAD_LOCAL_DECL(static Scheme_Hash_Table *late_will_executors_with_pending = NULL);
133 
134 THREAD_LOCAL_DECL(Scheme_Config *initial_config);
135 
136 #ifndef MZ_PRECISE_GC
137 static int cust_box_count, cust_box_alloc;
138 static Scheme_Custodian_Box **cust_boxes;
139 # ifndef USE_SENORA_GC
140 extern int GC_is_marked(void *);
141 # endif
142 #endif
143 
144 READ_ONLY Scheme_At_Exit_Proc replacement_at_exit;
145 
146 ROSYM Scheme_Object *scheme_parameterization_key;
147 ROSYM Scheme_Object *scheme_exn_handler_key;
148 ROSYM Scheme_Object *scheme_break_enabled_key;
149 
150 THREAD_LOCAL_DECL(static Scheme_Object *configuration_callback_cache[2]);
151 
152 static int gcs_on_exit;
153 THREAD_LOCAL_DECL(uintptr_t scheme_total_gc_time);
154 THREAD_LOCAL_DECL(static uintptr_t start_this_gc_time);
155 THREAD_LOCAL_DECL(static uintptr_t end_this_gc_time);
156 THREAD_LOCAL_DECL(static double start_this_gc_real_time);
157 THREAD_LOCAL_DECL(static double end_this_gc_real_time);
158 static void get_ready_for_GC(void);
159 static void done_with_GC(void);
160 #ifdef MZ_PRECISE_GC
161 static void inform_GC(int master_gc, int major_gc, int inc_gc, intptr_t pre_used, intptr_t post_used,
162                       intptr_t pre_admin, intptr_t post_admin, intptr_t post_child_places_used);
163 #endif
164 
165 THREAD_LOCAL_DECL(static volatile short delayed_break_ready);
166 THREAD_LOCAL_DECL(static Scheme_Thread *main_break_target_thread);
167 
168 THREAD_LOCAL_DECL(Scheme_Sleep_Proc scheme_place_sleep);
169 THREAD_LOCAL_DECL(Scheme_Object *thread_sleep_callback);
170 THREAD_LOCAL_DECL(int thread_sleep_callback_fd);
171 HOOK_SHARED_OK void (*scheme_sleep)(float seconds, void *fds);
172 HOOK_SHARED_OK void (*scheme_notify_multithread)(int on);
173 HOOK_SHARED_OK void (*scheme_wakeup_on_input)(void *fds);
174 HOOK_SHARED_OK int (*scheme_check_for_break)(void);
175 THREAD_LOCAL_DECL(static Scheme_On_Atomic_Timeout_Proc on_atomic_timeout);
176 THREAD_LOCAL_DECL(static void *on_atomic_timeout_data);
177 THREAD_LOCAL_DECL(static int atomic_timeout_auto_suspend);
178 THREAD_LOCAL_DECL(static int atomic_timeout_atomic_level);
179 
180 THREAD_LOCAL_DECL(struct Scheme_GC_Pre_Post_Callback_Desc *gc_prepost_callback_descs);
181 
182 ROSYM static Scheme_Object *read_symbol, *write_symbol, *execute_symbol, *delete_symbol, *exists_symbol;
183 ROSYM static Scheme_Object *client_symbol, *server_symbol;
184 ROSYM static Scheme_Object *major_symbol, *minor_symbol, *incremental_symbol;
185 ROSYM static Scheme_Object *cumulative_symbol;
186 ROSYM static Scheme_Object *gc_symbol, *gc_major_symbol;
187 ROSYM static Scheme_Object *racket_symbol;
188 
189 THREAD_LOCAL_DECL(static int do_atomic = 0);
190 THREAD_LOCAL_DECL(static int missed_context_switch = 0);
191 THREAD_LOCAL_DECL(static int have_activity = 0);
192 THREAD_LOCAL_DECL(int scheme_active_but_sleeping = 0);
193 THREAD_LOCAL_DECL(static int thread_ended_with_activity);
194 THREAD_LOCAL_DECL(int scheme_no_stack_overflow);
195 THREAD_LOCAL_DECL(int all_breaks_disabled = 0);
196 THREAD_LOCAL_DECL(static int needs_sleep_cancelled);
197 THREAD_LOCAL_DECL(static double needs_sleep_time_end); /* back-door result */
198 THREAD_LOCAL_DECL(static int tls_pos = 0);
199 /* On swap, put target in a static variable, instead of on the stack,
200    so that the swapped-out thread is less likely to have a pointer
201    to the target thread. */
202 THREAD_LOCAL_DECL(static Scheme_Thread *swap_target);
203 THREAD_LOCAL_DECL(static Scheme_Object *scheduled_kills);
204 THREAD_LOCAL_DECL(static Scheme_Object *the_nested_exn_handler);
205 THREAD_LOCAL_DECL(static Scheme_Object *cust_closers);
206 THREAD_LOCAL_DECL(static Scheme_Object *thread_swap_callbacks);
207 THREAD_LOCAL_DECL(static Scheme_Object *thread_swap_out_callbacks);
208 THREAD_LOCAL_DECL(static Scheme_Object *recycle_cell);
209 THREAD_LOCAL_DECL(static Scheme_Object *maybe_recycle_cell);
210 THREAD_LOCAL_DECL(static int recycle_cc_count);
211 THREAD_LOCAL_DECL(static Scheme_Struct_Type *gc_info_prefab);
212 
213 THREAD_LOCAL_DECL(struct Scheme_Hash_Table *place_local_misc_table);
214 
215 #if defined(MZ_PRECISE_GC) && defined(MZ_USE_PLACES)
216 extern intptr_t GC_is_place();
217 #endif
218 
219 typedef struct Thread_Cell {
220   Scheme_Object so;
221   char inherited, assigned;
222   Scheme_Object *def_val;
223 } Thread_Cell;
224 
225 #ifdef MZ_PRECISE_GC
226 /* This is a trick to get the types right. Note that
227    the layout of the weak box is defined by the
228    GC spec. */
229 typedef struct {
230   short type;
231   short hash_key;
232   Scheme_Custodian *val;
233 } Scheme_Custodian_Weak_Box;
234 
235 # define MALLOC_MREF() (Scheme_Custodian_Reference *)scheme_make_late_weak_box(NULL)
236 # define CUSTODIAN_FAM(x) ((Scheme_Custodian_Weak_Box *)x)->val
237 # define xCUSTODIAN_FAM(x) SCHEME_BOX_VAL(x)
238 # define SET_MREF_POSITION(mref, i) (((Scheme_Custodian_Weak_Box *)mref)->hash_key = (i & 0xFFFF))
239 # define EXTRACT_MREF_START_POSITION(mref, c) (((Scheme_Custodian_Weak_Box *)mref)->hash_key | ((c) & ~0xFFFF))
240 # define EXTRACT_MREF_POSITION_DELTA(mref, c) 0x10000
241 #else
242 # define MALLOC_MREF() MALLOC_ONE_WEAK(Scheme_Custodian_Reference)
243 # define CUSTODIAN_FAM(x) (*(x))
244 # define xCUSTODIAN_FAM(x) (*(x))
245 # define SET_MREF_POSITION(mref, i) /* empty */
246 # define EXTRACT_MREF_START_POSITION(mref, c) ((c)-1)
247 # define EXTRACT_MREF_POSITION_DELTA(mref, c) 1
248 #endif
249 
250 typedef struct Proc_Global_Rec {
251   const char *key;
252   void *val;
253   struct Proc_Global_Rec *next;
254 } Proc_Global_Rec;
255 
256 SHARED_OK static Proc_Global_Rec *process_globals;
257 #if defined(MZ_USE_MZRT)
258 static mzrt_mutex *process_global_lock;
259 #endif
260 
261 typedef struct {
262   Scheme_Object so;
263   intptr_t size;
264 } Scheme_Phantom_Bytes;
265 
266 struct Scheme_Plumber {
267   Scheme_Object so;
268   Scheme_Hash_Table *handles;
269   Scheme_Bucket_Table *weak_handles;
270 };
271 
272 #ifdef MZ_PRECISE_GC
273 static void register_traversers(void);
274 #endif
275 
276 static Scheme_Object *custodian_require_mem(int argc, Scheme_Object *args[]);
277 static Scheme_Object *custodian_limit_mem(int argc, Scheme_Object *args[]);
278 static Scheme_Object *custodian_can_mem(int argc, Scheme_Object *args[]);
279 
280 static Scheme_Object *collect_garbage(int argc, Scheme_Object *args[]);
281 static Scheme_Object *current_memory_use(int argc, Scheme_Object *args[]);
282 
283 static Scheme_Object *sch_thread(int argc, Scheme_Object *args[]);
284 static Scheme_Object *sch_thread_nokill(int argc, Scheme_Object *args[]);
285 static Scheme_Object *sch_sleep(int argc, Scheme_Object *args[]);
286 static Scheme_Object *thread_p(int argc, Scheme_Object *args[]);
287 static Scheme_Object *thread_running_p(int argc, Scheme_Object *args[]);
288 static Scheme_Object *thread_dead_p(int argc, Scheme_Object *args[]);
289 static Scheme_Object *thread_wait(int argc, Scheme_Object *args[]);
290 static Scheme_Object *sch_current(int argc, Scheme_Object *args[]);
291 static Scheme_Object *kill_thread(int argc, Scheme_Object *args[]);
292 static Scheme_Object *break_thread(int argc, Scheme_Object *args[]);
293 static Scheme_Object *thread_suspend(int argc, Scheme_Object *args[]);
294 static Scheme_Object *thread_resume(int argc, Scheme_Object *args[]);
295 static Scheme_Object *make_thread_suspend(int argc, Scheme_Object *args[]);
296 static Scheme_Object *make_thread_resume(int argc, Scheme_Object *args[]);
297 static Scheme_Object *make_thread_dead(int argc, Scheme_Object *args[]);
298 static void register_thread_sync();
299 
300 static Scheme_Object *sch_sync(int argc, Scheme_Object *args[]);
301 static Scheme_Object *sch_sync_timeout(int argc, Scheme_Object *args[]);
302 static Scheme_Object *sch_sync_enable_break(int argc, Scheme_Object *args[]);
303 static Scheme_Object *sch_sync_timeout_enable_break(int argc, Scheme_Object *args[]);
304 static Scheme_Object *evt_p(int argc, Scheme_Object *args[]);
305 static Scheme_Object *evts_to_evt(int argc, Scheme_Object *args[]);
306 
307 static Scheme_Object *make_custodian(int argc, Scheme_Object *argv[]);
308 static Scheme_Object *custodian_p(int argc, Scheme_Object *argv[]);
309 static Scheme_Object *custodian_close_all(int argc, Scheme_Object *argv[]);
310 static Scheme_Object *custodian_shut_down_p(int argc, Scheme_Object *argv[]);
311 static Scheme_Object *custodian_to_list(int argc, Scheme_Object *argv[]);
312 static Scheme_Object *current_custodian(int argc, Scheme_Object *argv[]);
313 static Scheme_Object *make_custodian_box(int argc, Scheme_Object *argv[]);
314 static Scheme_Object *custodian_box_value(int argc, Scheme_Object *argv[]);
315 static Scheme_Object *custodian_box_p(int argc, Scheme_Object *argv[]);
316 static Scheme_Object *call_as_nested_thread(int argc, Scheme_Object *argv[]);
317 
318 static Scheme_Object *unsafe_thread_at_root(int argc, Scheme_Object *argv[]);
319 
320 static Scheme_Object *unsafe_make_custodian_at_root(int argc, Scheme_Object *argv[]);
321 static Scheme_Object *unsafe_custodian_register(int argc, Scheme_Object *argv[]);
322 static Scheme_Object *unsafe_custodian_unregister(int argc, Scheme_Object *argv[]);
323 
324 static Scheme_Object *unsafe_add_post_custodian_shutdown(int argc, Scheme_Object *argv[]);
325 
326 static Scheme_Object *unsafe_register_process_global(int argc, Scheme_Object *argv[]);
327 static Scheme_Object *unsafe_get_place_table(int argc, Scheme_Object *argv[]);
328 static Scheme_Object *unsafe_set_on_atomic_timeout(int argc, Scheme_Object *argv[]);
329 static Scheme_Object *unsafe_add_global_finalizer(int argc, Scheme_Object *argv[]);
330 
331 static Scheme_Object *unsafe_os_thread_enabled_p(int argc, Scheme_Object *argv[]);
332 static Scheme_Object *unsafe_call_in_os_thread(int argc, Scheme_Object *argv[]);
333 static Scheme_Object *unsafe_make_os_semaphore(int argc, Scheme_Object *argv[]);
334 static Scheme_Object *unsafe_os_semaphore_wait(int argc, Scheme_Object *argv[]);
335 static Scheme_Object *unsafe_os_semaphore_post(int argc, Scheme_Object *argv[]);
336 
337 static Scheme_Object *unsafe_add_collect_callbacks(int argc, Scheme_Object *argv[]);
338 static Scheme_Object *unsafe_remove_collect_callbacks(int argc, Scheme_Object *argv[]);
339 
340 static Scheme_Object *make_plumber(int argc, Scheme_Object *argv[]);
341 static Scheme_Object *plumber_p(int argc, Scheme_Object *argv[]);
342 static Scheme_Object *plumber_flush_all(int argc, Scheme_Object *argv[]);
343 static Scheme_Object *plumber_add_flush(int argc, Scheme_Object *argv[]);
344 static Scheme_Object *plumber_remove_flush(int argc, Scheme_Object *argv[]);
345 static Scheme_Object *plumber_flush_p(int argc, Scheme_Object *argv[]);
346 static Scheme_Object *current_plumber(int argc, Scheme_Object *argv[]);
347 
348 static Scheme_Object *parameter_p(int argc, Scheme_Object *args[]);
349 static Scheme_Object *parameter_procedure_eq(int argc, Scheme_Object *args[]);
350 static Scheme_Object *make_parameter(int argc, Scheme_Object *args[]);
351 static Scheme_Object *make_derived_parameter(int argc, Scheme_Object *args[]);
352 static Scheme_Object *parameterization_p(int argc, Scheme_Object *args[]);
353 static Scheme_Object *reparameterize(int argc, Scheme_Object **argv);
354 
355 static Scheme_Object *make_thread_cell(int argc, Scheme_Object *args[]);
356 static Scheme_Object *thread_cell_p(int argc, Scheme_Object *args[]);
357 static Scheme_Object *thread_cell_get(int argc, Scheme_Object *args[]);
358 static Scheme_Object *thread_cell_set(int argc, Scheme_Object *args[]);
359 static Scheme_Object *thread_cell_values(int argc, Scheme_Object *args[]);
360 static Scheme_Object *is_thread_cell_values(int argc, Scheme_Object *args[]);
361 
362 static Scheme_Object *make_security_guard(int argc, Scheme_Object *argv[]);
363 static Scheme_Object *security_guard_p(int argc, Scheme_Object *argv[]);
364 static Scheme_Object *current_security_guard(int argc, Scheme_Object *argv[]);
365 static Scheme_Object *unsafe_make_security_guard_at_root(int argc, Scheme_Object *argv[]);
366 
367 static Scheme_Object *security_guard_check_file(int argc, Scheme_Object *argv[]);
368 static Scheme_Object *security_guard_check_file_link(int argc, Scheme_Object *argv[]);
369 static Scheme_Object *security_guard_check_network(int argc, Scheme_Object *argv[]);
370 
371 static Scheme_Object *cache_configuration(int argc, Scheme_Object **argv);
372 
373 static Scheme_Object *make_thread_set(int argc, Scheme_Object *argv[]);
374 static Scheme_Object *thread_set_p(int argc, Scheme_Object *argv[]);
375 static Scheme_Object *current_thread_set(int argc, Scheme_Object *argv[]);
376 
377 static Scheme_Object *current_thread_initial_stack_size(int argc, Scheme_Object *argv[]);
378 
379 static Scheme_Object *phantom_bytes_p(int argc, Scheme_Object *argv[]);
380 static Scheme_Object *make_phantom_bytes(int argc, Scheme_Object *argv[]);
381 static Scheme_Object *set_phantom_bytes(int argc, Scheme_Object *argv[]);
382 
383 static void adjust_custodian_family(void *pr, void *ignored);
384 
385 static Scheme_Object *make_will_executor(int argc, Scheme_Object *args[]);
386 static Scheme_Object *will_executor_p(int argc, Scheme_Object *args[]);
387 static Scheme_Object *register_will(int argc, Scheme_Object *args[]);
388 static Scheme_Object *will_executor_try(int argc, Scheme_Object *args[]);
389 static Scheme_Object *will_executor_go(int argc, Scheme_Object *args[]);
390 static Scheme_Object *will_executor_sema(Scheme_Object *w, int *repost);
391 
392 static Scheme_Object *check_break_now(int argc, Scheme_Object *args[]);
393 
394 static Scheme_Object *memory_order(int argc, Scheme_Object *args[]);
395 
396 static Scheme_Object *unsafe_start_atomic(int argc, Scheme_Object **argv);
397 static Scheme_Object *unsafe_end_atomic(int argc, Scheme_Object **argv);
398 static Scheme_Object *unsafe_start_breakable_atomic(int argc, Scheme_Object **argv);
399 static Scheme_Object *unsafe_end_breakable_atomic(int argc, Scheme_Object **argv);
400 static Scheme_Object *unsafe_in_atomic_p(int argc, Scheme_Object **argv);
401 
402 static Scheme_Object *unsafe_poll_fd(int argc, Scheme_Object **argv);
403 static Scheme_Object *unsafe_poll_ctx_fd_wakeup(int argc, Scheme_Object **argv);
404 static Scheme_Object *unsafe_poll_ctx_eventmask_wakeup(int argc, Scheme_Object **argv);
405 static Scheme_Object *unsafe_poll_ctx_time_wakeup(int argc, Scheme_Object **argv);
406 static Scheme_Object *unsafe_signal_received(int argc, Scheme_Object **argv);
407 static Scheme_Object *unsafe_make_signal_received(int argc, Scheme_Object **argv);
408 static Scheme_Object *unsafe_set_sleep_in_thread(int argc, Scheme_Object **argv);
409 
410 static Scheme_Object *unsafe_make_place_local(int argc, Scheme_Object **argv);
411 static Scheme_Object *unsafe_place_local_ref(int argc, Scheme_Object **argv);
412 static Scheme_Object *unsafe_place_local_set(int argc, Scheme_Object **argv);
413 
414 static void make_initial_config(Scheme_Thread *p);
415 
416 static int do_kill_thread(Scheme_Thread *p);
417 static void suspend_thread(Scheme_Thread *p);
418 
419 static int check_sleep(int need_activity, int sleep_now);
420 
421 static int syncing_ready(Syncing *syncing, Scheme_Schedule_Info *sinfo);
422 static void get_outof_or_into_lines(Syncing *syncing, int get_out);
423 
424 static void remove_thread(Scheme_Thread *r);
425 static void exit_or_escape(Scheme_Thread *p);
426 
427 static int resume_suspend_ready(Scheme_Object *o, Scheme_Schedule_Info *sinfo);
428 static int dead_ready(Scheme_Object *o, Scheme_Schedule_Info *sinfo);
429 static int cust_box_ready(Scheme_Object *o);
430 
431 static int can_break_param(Scheme_Thread *p);
432 
433 static int post_system_idle();
434 
435 static Scheme_Object *current_stats(int argc, Scheme_Object *args[]);
436 
437 static void log_peak_memory_use();
438 static char *gc_unscaled_num(char *nums, intptr_t v);
439 static char *gc_num(char *nums, intptr_t v);
440 
441 SHARED_OK static Scheme_Object **config_map;
442 
443 typedef struct {
444   MZTAG_IF_REQUIRED
445   short is_derived;
446   Scheme_Object *key;
447   Scheme_Object *guard;
448   Scheme_Object *extract_guard;
449   Scheme_Object *defcell;
450 } ParamData;
451 
452 enum {
453   CONFIG_DIRECT,
454   CONFIG_INDIRECT
455 };
456 
457 typedef struct Scheme_Thread_Custodian_Hop {
458   Scheme_Object so;
459   Scheme_Thread *p; /* really an indirection with precise gc */
460 } Scheme_Thread_Custodian_Hop;
461 
462 SHARED_OK static Scheme_Custodian_Extractor *extractors;
463 
464 #define SETJMP(p) scheme_setjmpup(&p->jmpup_buf, p, p->stack_start)
465 #define LONGJMP(p) scheme_longjmpup(&p->jmpup_buf)
466 #define RESETJMP(p) scheme_reset_jmpup_buf(&p->jmpup_buf)
467 
468 #ifndef MZ_PRECISE_GC
469 # define scheme_thread_hop_type scheme_thread_type
470 #endif
471 
472 SHARED_OK Scheme_Object *initial_cmdline_vec;
473 
474 #if defined(MZ_USE_PLACES)
475 # define RUNNING_IN_ORIGINAL_PLACE (scheme_current_place_id == 0)
476 #else
477 # define RUNNING_IN_ORIGINAL_PLACE 1
478 #endif
479 
480 /*========================================================================*/
481 /*                             initialization                             */
482 /*========================================================================*/
483 
scheme_init_thread(Scheme_Startup_Env * env)484 void scheme_init_thread(Scheme_Startup_Env *env)
485 {
486 #ifdef MZ_PRECISE_GC
487     register_traversers();
488 #endif
489 
490   REGISTER_SO(read_symbol);
491   REGISTER_SO(write_symbol);
492   REGISTER_SO(execute_symbol);
493   REGISTER_SO(delete_symbol);
494   REGISTER_SO(exists_symbol);
495   REGISTER_SO(client_symbol);
496   REGISTER_SO(server_symbol);
497 
498   read_symbol = scheme_intern_symbol("read");
499   write_symbol = scheme_intern_symbol("write");
500   execute_symbol = scheme_intern_symbol("execute");
501   delete_symbol = scheme_intern_symbol("delete");
502   exists_symbol = scheme_intern_symbol("exists");
503   client_symbol = scheme_intern_symbol("client");
504   server_symbol = scheme_intern_symbol("server");
505 
506   REGISTER_SO(major_symbol);
507   REGISTER_SO(minor_symbol);
508   REGISTER_SO(incremental_symbol);
509   major_symbol = scheme_intern_symbol("major");
510   minor_symbol = scheme_intern_symbol("minor");
511   incremental_symbol  = scheme_intern_symbol("incremental");
512 
513   REGISTER_SO(cumulative_symbol);
514   cumulative_symbol = scheme_intern_symbol("cumulative");
515 
516   REGISTER_SO(gc_symbol);
517   REGISTER_SO(gc_major_symbol);
518   gc_symbol = scheme_intern_symbol("GC");
519   gc_major_symbol = scheme_intern_symbol("GC:major");
520 
521   REGISTER_SO(racket_symbol);
522   racket_symbol = scheme_intern_symbol("racket");
523 
524   ADD_PRIM_W_ARITY("dump-memory-stats"            , scheme_dump_gc_stats, 0, -1, env);
525   ADD_PRIM_W_ARITY("vector-set-performance-stats!", current_stats       , 1, 2, env);
526 
527   ADD_PRIM_W_ARITY("thread"                , sch_thread         , 1, 1, env);
528   ADD_PRIM_W_ARITY("thread/suspend-to-kill", sch_thread_nokill  , 1, 1, env);
529   ADD_PRIM_W_ARITY("sleep"                 , sch_sleep          , 0, 1, env);
530   ADD_FOLDING_PRIM("thread?"               , thread_p           , 1, 1, 1, env);
531   ADD_PRIM_W_ARITY("thread-running?"       , thread_running_p   , 1, 1, env);
532   ADD_PRIM_W_ARITY("thread-dead?"          , thread_dead_p      , 1, 1, env);
533   ADD_PRIM_W_ARITY("thread-wait"           , thread_wait        , 1, 1, env);
534   ADD_PRIM_W_ARITY("current-thread"        , sch_current        , 0, 0, env);
535   ADD_PRIM_W_ARITY("kill-thread"           , kill_thread        , 1, 1, env);
536   ADD_PRIM_W_ARITY("break-thread"          , break_thread       , 1, 2, env);
537   ADD_PRIM_W_ARITY("thread-suspend"        , thread_suspend     , 1, 1, env);
538   ADD_PRIM_W_ARITY("thread-resume"         , thread_resume      , 1, 2, env);
539   ADD_PRIM_W_ARITY("thread-resume-evt"     , make_thread_resume , 1, 1, env);
540   ADD_PRIM_W_ARITY("thread-suspend-evt"    , make_thread_suspend, 1, 1, env);
541   ADD_PRIM_W_ARITY("thread-dead-evt"       , make_thread_dead   , 1, 1, env);
542 
543   register_thread_sync();
544   scheme_add_evt(scheme_thread_suspend_type, (Scheme_Ready_Fun)resume_suspend_ready, NULL, NULL, 1);
545   scheme_add_evt(scheme_thread_resume_type, (Scheme_Ready_Fun)resume_suspend_ready, NULL, NULL, 1);
546   scheme_add_evt(scheme_thread_dead_type, (Scheme_Ready_Fun)dead_ready, NULL, NULL, 1);
547   scheme_add_evt(scheme_cust_box_type, cust_box_ready, NULL, NULL, 0);
548 
549 
550   ADD_PARAMETER("current-custodian"        , current_custodian    , MZCONFIG_CUSTODIAN, env);
551   ADD_PRIM_W_ARITY("make-custodian"        , make_custodian       , 0, 1, env);
552   ADD_FOLDING_PRIM("custodian?"            , custodian_p          , 1, 1, 1  , env);
553   ADD_PRIM_W_ARITY("custodian-shutdown-all", custodian_close_all  , 1, 1, env);
554   ADD_PRIM_W_ARITY("custodian-shut-down?"  , custodian_shut_down_p, 1, 1, env);
555   ADD_PRIM_W_ARITY("custodian-managed-list", custodian_to_list    , 2, 2, env);
556   ADD_PRIM_W_ARITY("make-custodian-box"    , make_custodian_box   , 2, 2, env);
557   ADD_PRIM_W_ARITY("custodian-box-value"   , custodian_box_value  , 1, 1, env);
558   ADD_FOLDING_PRIM("custodian-box?"        , custodian_box_p      , 1, 1, 1  , env);
559   ADD_PRIM_W_ARITY("call-in-nested-thread" , call_as_nested_thread, 1, 2, env);
560 
561   ADD_PARAMETER("current-plumber"          , current_plumber      , MZCONFIG_PLUMBER, env);
562   ADD_PRIM_W_ARITY("make-plumber"          , make_plumber       , 0, 0, env);
563   ADD_FOLDING_PRIM("plumber?"              , plumber_p          , 1, 1, 1  , env);
564   ADD_PRIM_W_ARITY("plumber-flush-all"     , plumber_flush_all   , 1, 1, env);
565   ADD_PRIM_W_ARITY("plumber-add-flush!"    , plumber_add_flush   , 2, 3, env);
566   ADD_PRIM_W_ARITY("plumber-flush-handle-remove!" , plumber_remove_flush, 1, 1, env);
567   ADD_PRIM_W_ARITY("plumber-flush-handle?" , plumber_flush_p     , 1, 1, env);
568 
569   ADD_PRIM_W_ARITY("security-guard?"    , security_guard_p   , 1, 1, env);
570   ADD_PRIM_W_ARITY("make-security-guard", make_security_guard, 3, 4, env);
571   ADD_PARAMETER("current-security-guard", current_security_guard, MZCONFIG_SECURITY_GUARD, env);
572 
573   ADD_PRIM_W_ARITY("thread-group?"    , thread_set_p   , 1, 1, env);
574   ADD_PRIM_W_ARITY("make-thread-group", make_thread_set, 0, 1, env);
575   ADD_PARAMETER("current-thread-group", current_thread_set, MZCONFIG_THREAD_SET, env);
576 
577   ADD_PRIM_W_ARITY("parameter?"            , parameter_p           , 1, 1, env);
578   ADD_PRIM_W_ARITY("make-parameter"        , make_parameter        , 1, 3, env);
579   ADD_PRIM_W_ARITY("make-derived-parameter", make_derived_parameter, 3, 3, env);
580   ADD_PRIM_W_ARITY("parameter-procedure=?" , parameter_procedure_eq, 2, 2, env);
581   ADD_PRIM_W_ARITY("parameterization?"     , parameterization_p    , 1, 1, env);
582 
583   ADD_PRIM_W_ARITY("thread-cell?"                        , thread_cell_p     , 1, 1, env);
584   ADD_PRIM_W_ARITY("make-thread-cell"                    , make_thread_cell  , 1, 2, env);
585   ADD_PRIM_W_ARITY("thread-cell-ref"                     , thread_cell_get   , 1, 1, env);
586   ADD_PRIM_W_ARITY("thread-cell-set!"                    , thread_cell_set   , 2, 2, env);
587   ADD_PRIM_W_ARITY("current-preserved-thread-cell-values", thread_cell_values, 0, 1, env);
588   ADD_FOLDING_PRIM("thread-cell-values?"                 , is_thread_cell_values, 1, 1, 1, env);
589 
590   ADD_PRIM_W_ARITY("make-will-executor", make_will_executor, 0, 0, env);
591   ADD_PRIM_W_ARITY("will-executor?"    , will_executor_p   , 1, 1, env);
592   ADD_PRIM_W_ARITY("will-register"     , register_will     , 3, 3, env);
593   ADD_PRIM_W_ARITY("will-try-execute"  , will_executor_try , 1, 2, env);
594   ADD_PRIM_W_ARITY("will-execute"      , will_executor_go  , 1, 1, env);
595 
596   scheme_add_evt_through_sema(scheme_will_executor_type, will_executor_sema, NULL);
597 
598 
599   ADD_PRIM_W_ARITY("collect-garbage"                       , collect_garbage      , 0, 1, env);
600   ADD_PRIM_W_ARITY("current-memory-use"                    , current_memory_use   , 0, 1, env);
601 
602   ADD_PRIM_W_ARITY("custodian-require-memory"              , custodian_require_mem, 3, 3, env);
603   ADD_PRIM_W_ARITY("custodian-limit-memory"                , custodian_limit_mem  , 2, 3, env);
604   ADD_PRIM_W_ARITY("custodian-memory-accounting-available?", custodian_can_mem    , 0, 0, env);
605 
606   ADD_FOLDING_PRIM("memory-order-acquire", memory_order, 0, 0, 1, env);
607   ADD_FOLDING_PRIM("memory-order-release", memory_order, 0, 0, 1, env);
608 
609   ADD_FOLDING_PRIM("evt?"                      , evt_p                        , 1, 1 , 1, env);
610   ADD_PRIM_W_ARITY2("sync"                     , sch_sync                     , 0, -1, 0, -1, env);
611   ADD_PRIM_W_ARITY2("sync/timeout"             , sch_sync_timeout             , 1, -1, 0, -1, env);
612   ADD_PRIM_W_ARITY2("sync/enable-break"        , sch_sync_enable_break        , 0, -1, 0, -1, env);
613   ADD_PRIM_W_ARITY2("sync/timeout/enable-break", sch_sync_timeout_enable_break, 1, -1, 0, -1, env);
614   ADD_PRIM_W_ARITY("choice-evt"                , evts_to_evt                  , 0, -1, env);
615 
616   ADD_PARAMETER("current-thread-initial-stack-size", current_thread_initial_stack_size, MZCONFIG_THREAD_INIT_STACK_SIZE, env);
617 
618   ADD_PRIM_W_ARITY("phantom-bytes?", phantom_bytes_p, 1, 1, env);
619   ADD_PRIM_W_ARITY("make-phantom-bytes", make_phantom_bytes, 1, 1, env);
620   ADD_PRIM_W_ARITY("set-phantom-bytes!", set_phantom_bytes, 2, 2, env);
621 
622   if (scheme_getenv("PLT_GCS_ON_EXIT"))
623     gcs_on_exit = 1;
624 }
625 
626 void
scheme_init_unsafe_thread(Scheme_Startup_Env * env)627 scheme_init_unsafe_thread (Scheme_Startup_Env *env)
628 {
629   Scheme_Object *p;
630 
631   scheme_addto_prim_instance("unsafe-start-atomic",
632 			     scheme_make_prim_w_arity(unsafe_start_atomic,
633 						      "unsafe-start-atomic",
634 						      0, 0),
635 			     env);
636   scheme_addto_prim_instance("unsafe-end-atomic",
637 			     scheme_make_prim_w_arity(unsafe_end_atomic,
638 						      "unsafe-end-atomic",
639 						      0, 0),
640 			     env);
641   scheme_addto_prim_instance("unsafe-start-breakable-atomic",
642 			     scheme_make_prim_w_arity(unsafe_start_breakable_atomic,
643 						      "unsafe-start-breakable-atomic",
644 						      0, 0),
645 			     env);
646   scheme_addto_prim_instance("unsafe-end-breakable-atomic",
647 			     scheme_make_prim_w_arity(unsafe_end_breakable_atomic,
648 						      "unsafe-end-breakable-atomic",
649 						      0, 0),
650 			     env);
651   scheme_addto_prim_instance("unsafe-in-atomic?",
652 			     scheme_make_prim_w_arity(unsafe_in_atomic_p,
653 						      "unsafe-in-atomic?",
654 						      0, 0),
655 			     env);
656 
657   ADD_PRIM_W_ARITY("unsafe-thread-at-root", unsafe_thread_at_root, 1, 1, env);
658 
659   ADD_PRIM_W_ARITY("unsafe-make-custodian-at-root", unsafe_make_custodian_at_root, 0, 0, env);
660   ADD_PRIM_W_ARITY("unsafe-custodian-register", unsafe_custodian_register, 5, 6, env);
661   ADD_PRIM_W_ARITY("unsafe-custodian-unregister", unsafe_custodian_unregister, 2, 2, env);
662 
663   ADD_PRIM_W_ARITY("unsafe-add-post-custodian-shutdown", unsafe_add_post_custodian_shutdown, 1, 2, env);
664 
665   ADD_PRIM_W_ARITY("unsafe-register-process-global", unsafe_register_process_global, 2, 2, env);
666   ADD_PRIM_W_ARITY("unsafe-get-place-table", unsafe_get_place_table, 0, 0, env);
667 
668   ADD_PRIM_W_ARITY("unsafe-set-on-atomic-timeout!", unsafe_set_on_atomic_timeout, 1, 1, env);
669 
670   ADD_PRIM_W_ARITY("unsafe-make-security-guard-at-root", unsafe_make_security_guard_at_root, 0, 3, env);
671 
672   ADD_PRIM_W_ARITY("unsafe-add-global-finalizer", unsafe_add_global_finalizer, 2, 2, env);
673 
674   scheme_addto_prim_instance("unsafe-poller", scheme_unsafe_poller_proc, env);
675   ADD_PRIM_W_ARITY("unsafe-poll-fd", unsafe_poll_fd, 2, 3, env);
676   ADD_PRIM_W_ARITY("unsafe-poll-ctx-fd-wakeup", unsafe_poll_ctx_fd_wakeup, 3, 3, env);
677   ADD_PRIM_W_ARITY("unsafe-poll-ctx-eventmask-wakeup", unsafe_poll_ctx_eventmask_wakeup, 2, 2, env);
678   ADD_PRIM_W_ARITY("unsafe-poll-ctx-milliseconds-wakeup", unsafe_poll_ctx_time_wakeup, 2, 2, env);
679   ADD_PRIM_W_ARITY("unsafe-signal-received", unsafe_signal_received, 0, 0, env);
680   ADD_PRIM_W_ARITY("unsafe-make-signal-received", unsafe_make_signal_received, 0, 0, env);
681   ADD_PRIM_W_ARITY("unsafe-set-sleep-in-thread!", unsafe_set_sleep_in_thread, 2, 2, env);
682 
683   ADD_PRIM_W_ARITY("unsafe-os-thread-enabled?", unsafe_os_thread_enabled_p, 0, 0, env);
684   ADD_PRIM_W_ARITY("unsafe-call-in-os-thread", unsafe_call_in_os_thread, 1, 1, env);
685   ADD_PRIM_W_ARITY("unsafe-make-os-semaphore", unsafe_make_os_semaphore, 0, 0, env);
686   ADD_PRIM_W_ARITY("unsafe-os-semaphore-wait", unsafe_os_semaphore_wait, 1, 1, env);
687   ADD_PRIM_W_ARITY("unsafe-os-semaphore-post", unsafe_os_semaphore_post, 1, 1, env);
688 
689   ADD_PRIM_W_ARITY("unsafe-add-collect-callbacks", unsafe_add_collect_callbacks, 2, 2, env);
690   ADD_PRIM_W_ARITY("unsafe-remove-collect-callbacks", unsafe_remove_collect_callbacks, 1, 1, env);
691 
692   /* Place locals are just boxes, so these operations are just aliases box operations */
693   p = scheme_make_prim_w_arity(unsafe_make_place_local, "unsafe-make-place-local", 1, 1);
694   SCHEME_PRIM_PROC_FLAGS(p) |= scheme_intern_prim_opt_flags(SCHEME_PRIM_IS_UNARY_INLINED
695                                                             | SCHEME_PRIM_IS_OMITABLE_ALLOCATION);
696   scheme_addto_prim_instance("unsafe-make-place-local", p, env);
697 
698   p = scheme_make_immed_prim(unsafe_place_local_ref, "unsafe-place-local-ref", 1, 1);
699   SCHEME_PRIM_PROC_FLAGS(p) |= scheme_intern_prim_opt_flags(SCHEME_PRIM_IS_UNARY_INLINED
700                                                             | SCHEME_PRIM_IS_UNSAFE_OMITABLE
701                                                             | SCHEME_PRIM_IS_OMITABLE
702                                                             | SCHEME_PRIM_AD_HOC_OPT);
703   scheme_addto_prim_instance("unsafe-place-local-ref", p, env);
704 
705   p = scheme_make_immed_prim(unsafe_place_local_set, "unsafe-place-local-set!", 2, 2);
706   SCHEME_PRIM_PROC_FLAGS(p) |= scheme_intern_prim_opt_flags(SCHEME_PRIM_IS_BINARY_INLINED
707                                                             | SCHEME_PRIM_AD_HOC_OPT);
708   scheme_addto_prim_instance("unsafe-place-local-set!", p, env);
709 
710   ADD_PRIM_W_ARITY("unsafe-make-srcloc", scheme_unsafe_make_srcloc, 5, 5, env);
711 }
712 
scheme_init_thread_places(void)713 void scheme_init_thread_places(void) {
714   buffer_init_size = INIT_TB_SIZE;
715   REGISTER_SO(recycle_cell);
716   REGISTER_SO(maybe_recycle_cell);
717   REGISTER_SO(gc_prepost_callback_descs);
718   REGISTER_SO(place_local_misc_table);
719   REGISTER_SO(gc_info_prefab);
720   REGISTER_SO(on_atomic_timeout_data);
721   gc_info_prefab = scheme_lookup_prefab_type(scheme_intern_symbol("gc-info"), 10);
722 }
723 
scheme_init_inspector()724 void scheme_init_inspector() {
725   REGISTER_SO(initial_inspector);
726   initial_inspector = scheme_make_initial_inspectors();
727   /* Keep the initial inspector in case someone resets Racket (by
728      calling scheme_basic_env() a second time. Using the same
729      inspector after a reset lets us use the same initial module
730      instances. */
731 }
732 
scheme_get_current_inspector()733 Scheme_Object *scheme_get_current_inspector()
734   XFORM_SKIP_PROC
735 {
736   Scheme_Config *c;
737 
738   if (scheme_defining_primitives)
739     return initial_inspector;
740 
741   c = scheme_current_config();
742   return scheme_get_param(c, MZCONFIG_INSPECTOR);
743 }
744 
scheme_get_initial_inspector(void)745 Scheme_Object *scheme_get_initial_inspector(void)
746 {
747   return initial_inspector;
748 }
749 
scheme_init_parameterization()750 void scheme_init_parameterization()
751 {
752   REGISTER_SO(scheme_exn_handler_key);
753   REGISTER_SO(scheme_parameterization_key);
754   REGISTER_SO(scheme_break_enabled_key);
755   scheme_exn_handler_key = scheme_make_symbol("exnh");
756   scheme_parameterization_key = scheme_make_symbol("paramz");
757   scheme_break_enabled_key = scheme_make_symbol("break-on?");
758 }
759 
scheme_init_paramz(Scheme_Startup_Env * env)760 void scheme_init_paramz(Scheme_Startup_Env *env)
761 {
762   scheme_switch_prim_instance(env, "#%paramz");
763 
764   scheme_addto_prim_instance("exception-handler-key", scheme_exn_handler_key     , env);
765   scheme_addto_prim_instance("parameterization-key" , scheme_parameterization_key, env);
766   scheme_addto_prim_instance("break-enabled-key"    , scheme_break_enabled_key   , env);
767 
768   ADD_PRIM_W_ARITY("extend-parameterization" , scheme_extend_parameterization , 1, -1, env);
769   ADD_PRIM_W_ARITY("check-for-break"         , check_break_now         , 0,  0, env);
770   ADD_PRIM_W_ARITY("reparameterize"          , reparameterize          , 1,  1, env);
771 
772   ADD_PRIM_W_ARITY("cache-configuration"     , cache_configuration, 2,  2, env);
773 
774   ADD_PRIM_W_ARITY("security-guard-check-file", security_guard_check_file, 3,  3, env);
775   ADD_PRIM_W_ARITY("security-guard-check-file-link", security_guard_check_file_link, 3,  3, env);
776   ADD_PRIM_W_ARITY("security-guard-check-network", security_guard_check_network, 4,  4, env);
777 
778   scheme_restore_prim_instance(env);
779 }
780 
collect_garbage(int argc,Scheme_Object * argv[])781 static Scheme_Object *collect_garbage(int argc, Scheme_Object *argv[])
782 {
783   if (argc == 1 && SAME_OBJ(minor_symbol, argv[0])) {
784     scheme_collect_garbage_minor();
785   } else if ((argc < 1) || SAME_OBJ(major_symbol, argv[0])) {
786     scheme_collect_garbage();
787   } else if ((argc < 1) || SAME_OBJ(incremental_symbol, argv[0])) {
788 #ifdef MZ_PRECISE_GC
789     GC_request_incremental_mode();
790 #endif
791   } else {
792     scheme_wrong_contract("collect-garbage",
793                           "(or/c 'major 'minor 'incremental)",
794                           0, argc, argv);
795   }
796 
797   return scheme_void;
798 }
799 
current_memory_use(int argc,Scheme_Object * args[])800 static Scheme_Object *current_memory_use(int argc, Scheme_Object *args[])
801 {
802   Scheme_Object *arg = NULL;
803   int cumulative = 0;
804   uintptr_t retval = 0;
805 
806   if (argc) {
807     if (SCHEME_FALSEP(args[0])) {
808       arg = args[0];
809     } else if (SAME_TYPE(SCHEME_TYPE(args[0]), scheme_custodian_type)) {
810       arg = args[0];
811     } else if (SAME_OBJ(args[0], cumulative_symbol)) {
812       cumulative = 1;
813       arg = NULL;
814     } else {
815       scheme_wrong_contract("current-memory-use",
816                             "(or/c custodian? 'cumulative #f)",
817                             0, argc, args);
818     }
819   }
820 
821   if (cumulative) {
822 #ifdef MZ_PRECISE_GC
823     retval = GC_get_memory_ever_used();
824 #else
825     retval = GC_get_total_bytes();
826 #endif
827   } else {
828 #ifdef MZ_PRECISE_GC
829     retval = GC_get_memory_use(arg);
830 #else
831     scheme_unused_object(arg);
832     retval = GC_get_memory_use();
833 #endif
834   }
835 
836   return scheme_make_integer_value_from_unsigned(retval);
837 }
838 
cache_configuration(int argc,Scheme_Object ** argv)839 static Scheme_Object *cache_configuration(int argc, Scheme_Object **argv)
840 {
841   int pos;
842 
843   if (!SCHEME_INTP(argv[0]))
844     return scheme_false;
845 
846   pos = SCHEME_INT_VAL(argv[0]);
847 
848   if ((pos < 0) || (pos >= 2))
849     return scheme_false;
850 
851   if (!configuration_callback_cache[pos]) {
852     Scheme_Object *v;
853     v = _scheme_apply(argv[1], 0, NULL);
854     REGISTER_SO(configuration_callback_cache[pos]);
855     configuration_callback_cache[pos] = v;
856   }
857 
858   return configuration_callback_cache[pos];
859 }
860 
861 /*========================================================================*/
862 /*                              custodians                                */
863 /*========================================================================*/
864 
adjust_limit_table(Scheme_Custodian * c)865 static void adjust_limit_table(Scheme_Custodian *c)
866 {
867   /* If a custodian has a limit and any object or children, then it
868      must not be collected and merged with its parent. To prevent
869      collection, we register the custodian in the `limited_custodians'
870      table. */
871   if (c->has_limit) {
872     if (c->elems || CUSTODIAN_FAM(c->children)) {
873       if (!c->recorded) {
874         c->recorded = 1;
875         if (!limited_custodians)
876           limited_custodians = scheme_make_hash_table(SCHEME_hash_ptr);
877         scheme_hash_set(limited_custodians, (Scheme_Object *)c, scheme_true);
878       }
879     } else if (c->recorded) {
880       c->recorded = 0;
881       if (limited_custodians)
882         scheme_hash_set(limited_custodians, (Scheme_Object *)c, NULL);
883     }
884   }
885 }
886 
custodian_require_mem(int argc,Scheme_Object * args[])887 static Scheme_Object *custodian_require_mem(int argc, Scheme_Object *args[])
888 {
889   intptr_t lim;
890   Scheme_Custodian *c1, *c2, *cx;
891 
892   if(NOT_SAME_TYPE(SCHEME_TYPE(args[0]), scheme_custodian_type)) {
893     scheme_wrong_contract("custodian-require-memory", "custodian?", 0, argc, args);
894     return NULL;
895   }
896 
897   if (SCHEME_INTP(args[1]) && (SCHEME_INT_VAL(args[1]) > 0)) {
898     lim = SCHEME_INT_VAL(args[1]);
899   } else if (SCHEME_BIGNUMP(args[1]) && SCHEME_BIGPOS(args[1])) {
900     lim = 0x3fffffff; /* more memory than we actually have */
901   } else {
902     scheme_wrong_contract("custodian-require-memory", "exact-positive-integer?", 1, argc, args);
903     return NULL;
904   }
905 
906   if(NOT_SAME_TYPE(SCHEME_TYPE(args[2]), scheme_custodian_type)) {
907     scheme_wrong_contract("custodian-require-memory", "custodian?", 2, argc, args);
908     return NULL;
909   }
910 
911   c1 = (Scheme_Custodian *)args[0];
912   c2 = (Scheme_Custodian *)args[2];
913 
914   /* Check whether c1 is super to c2: */
915   if (c1 == c2) {
916     cx = NULL;
917   } else {
918     for (cx = c2; cx && NOT_SAME_OBJ(cx, c1); ) {
919       cx = CUSTODIAN_FAM(cx->parent);
920     }
921   }
922   if (!cx) {
923     scheme_raise_exn(MZEXN_FAIL_CONTRACT,
924                      "custodian-require-memory: second custodian is not a sub-custodian of the first custodian");
925   }
926 
927 #ifdef MZ_PRECISE_GC
928   if (GC_set_account_hook(MZACCT_REQUIRE, c1, lim, c2))
929     return scheme_void;
930 #else
931   scheme_unused_intptr(lim);
932 #endif
933 
934   scheme_raise_exn(MZEXN_FAIL_UNSUPPORTED,
935 		   "custodian-require-memory: " NOT_SUPPORTED_STR);
936   return NULL; /* doesn't get here */
937 }
938 
custodian_limit_mem(int argc,Scheme_Object * args[])939 static Scheme_Object *custodian_limit_mem(int argc, Scheme_Object *args[])
940 {
941   intptr_t lim;
942 
943   if (NOT_SAME_TYPE(SCHEME_TYPE(args[0]), scheme_custodian_type)) {
944     scheme_wrong_contract("custodian-limit-memory", "custodian?", 0, argc, args);
945     return NULL;
946   }
947 
948   if (SCHEME_INTP(args[1]) && (SCHEME_INT_VAL(args[1]) > 0)) {
949     lim = SCHEME_INT_VAL(args[1]);
950   } else if (SCHEME_BIGNUMP(args[1]) && SCHEME_BIGPOS(args[1])) {
951     lim = 0x3fffffff; /* more memory than we actually have */
952   } else {
953     scheme_wrong_contract("custodian-limit-memory", "exact-positive-integer?", 1, argc, args);
954     return NULL;
955   }
956 
957   if (argc > 2) {
958     if (NOT_SAME_TYPE(SCHEME_TYPE(args[2]), scheme_custodian_type)) {
959       scheme_wrong_contract("custodian-limit-memory", "custodian?", 2, argc, args);
960       return NULL;
961     }
962   }
963 
964   ((Scheme_Custodian *)args[0])->has_limit = 1;
965   adjust_limit_table((Scheme_Custodian *)args[0]);
966   if (argc > 2) {
967     ((Scheme_Custodian *)args[2])->has_limit = 1;
968     adjust_limit_table((Scheme_Custodian *)args[2]);
969   }
970 
971 #ifdef MZ_PRECISE_GC
972   if (GC_set_account_hook(MZACCT_LIMIT, args[0], lim, (argc > 2) ? args[2] : args[0]))
973     return scheme_void;
974 #else
975   scheme_unused_intptr(lim);
976 #endif
977 
978   scheme_raise_exn(MZEXN_FAIL_UNSUPPORTED,
979 		   "custodian-limit-memory: " NOT_SUPPORTED_STR);
980   return NULL; /* doesn't get here */
981 }
982 
custodian_can_mem(int argc,Scheme_Object * args[])983 static Scheme_Object *custodian_can_mem(int argc, Scheme_Object *args[])
984 {
985 #ifdef MZ_PRECISE_GC
986   return (GC_accouting_enabled() ? scheme_true : scheme_false);
987 #else
988   return scheme_false;
989 #endif
990 }
991 
ensure_custodian_space(Scheme_Custodian * m,int k)992 static void ensure_custodian_space(Scheme_Custodian *m, int k)
993 {
994   int i;
995 
996   if (m->count + k >= m->alloc) {
997     Scheme_Object ***naya_boxes;
998     Scheme_Custodian_Reference **naya_mrefs;
999     Scheme_Close_Custodian_Client **naya_closers;
1000     void **naya_data;
1001 
1002     m->alloc = (m->alloc ? (2 * m->alloc) : 4);
1003     if (m->alloc < k)
1004       m->alloc += k;
1005 
1006     naya_boxes = MALLOC_N(Scheme_Object**, m->alloc);
1007     naya_closers = MALLOC_N_ATOMIC(Scheme_Close_Custodian_Client*, m->alloc);
1008     naya_data = MALLOC_N(void*, m->alloc);
1009     naya_mrefs = MALLOC_N(Scheme_Custodian_Reference*, m->alloc);
1010 
1011     for (i = m->count; i--; ) {
1012       naya_boxes[i] = m->boxes[i];
1013       m->boxes[i] = NULL;
1014       naya_closers[i] = m->closers[i];
1015       m->closers[i] = NULL;
1016       naya_data[i] = m->data[i];
1017       m->data[i] = NULL;
1018       naya_mrefs[i] = m->mrefs[i];
1019       m->mrefs[i] = NULL;
1020     }
1021 
1022     m->boxes = naya_boxes;
1023     m->closers = naya_closers;
1024     m->data = naya_data;
1025     *m->data_ptr = naya_data;
1026     m->mrefs = naya_mrefs;
1027   }
1028 }
1029 
add_managed_box(Scheme_Custodian * m,Scheme_Object ** box,Scheme_Custodian_Reference * mref,Scheme_Close_Custodian_Client * f,void * data)1030 static void add_managed_box(Scheme_Custodian *m,
1031 			    Scheme_Object **box, Scheme_Custodian_Reference *mref,
1032 			    Scheme_Close_Custodian_Client *f, void *data)
1033 {
1034   int i, saw = 0;
1035 
1036   for (i = m->count; i--; ) {
1037     if (!m->boxes[i]) {
1038       m->boxes[i] = box;
1039       m->closers[i] = f;
1040       m->data[i] = data;
1041       m->mrefs[i] = mref;
1042       SET_MREF_POSITION(mref, i);
1043 
1044       m->elems++;
1045       adjust_limit_table(m);
1046 
1047       return;
1048     } else {
1049       saw++;
1050       if (i + saw == m->elems)
1051         break; /* no empty spaces left */
1052     }
1053   }
1054 
1055   ensure_custodian_space(m, 1);
1056 
1057   m->boxes[m->count] = box;
1058   m->closers[m->count] = f;
1059   m->data[m->count] = data;
1060   m->mrefs[m->count] = mref;
1061   SET_MREF_POSITION(mref, m->count);
1062 
1063   m->elems++;
1064   adjust_limit_table(m);
1065 
1066   m->count++;
1067 }
1068 
remove_managed(Scheme_Custodian_Reference * mr,Scheme_Object * o,Scheme_Close_Custodian_Client ** old_f,void ** old_data)1069 static void remove_managed(Scheme_Custodian_Reference *mr, Scheme_Object *o,
1070 			   Scheme_Close_Custodian_Client **old_f, void **old_data)
1071 {
1072   Scheme_Custodian *m;
1073   int i, delta;
1074 
1075   if (!mr)
1076     return;
1077   m = CUSTODIAN_FAM(mr);
1078   if (!m)
1079     return;
1080 
1081   i = EXTRACT_MREF_START_POSITION(mr, m->count);
1082   delta = EXTRACT_MREF_POSITION_DELTA(mr, m->count);
1083 
1084   while (i >= 0) {
1085     if (i < m->count) {
1086       if (m->boxes[i] && SAME_OBJ((xCUSTODIAN_FAM(m->boxes[i])),  o)) {
1087         xCUSTODIAN_FAM(m->boxes[i]) = 0;
1088         m->boxes[i] = NULL;
1089         CUSTODIAN_FAM(m->mrefs[i]) = 0;
1090         m->mrefs[i] = NULL;
1091         if (old_f)
1092           *old_f = m->closers[i];
1093         if (old_data)
1094           *old_data = m->data[i];
1095         m->data[i] = NULL;
1096         --m->elems;
1097         adjust_limit_table(m);
1098         break;
1099       }
1100     }
1101     i -= delta;
1102   }
1103 
1104   while (m->count && !m->boxes[m->count - 1]) {
1105     --m->count;
1106   }
1107 }
1108 
adjust_custodian_family(void * mgr,void * skip_move)1109 static void adjust_custodian_family(void *mgr, void *skip_move)
1110 {
1111   /* Threads note: because this function is only called as a
1112      finalization callback, it is automatically syncronized by the GC
1113      locks. And it is synchronized against all finalizations, so a
1114      managee can't try to unregister while we're shuffling its
1115      custodian. */
1116   Scheme_Custodian *r = (Scheme_Custodian *)mgr, *parent, *m;
1117   int i;
1118 
1119   parent = CUSTODIAN_FAM(r->parent);
1120 
1121   if (parent) {
1122     /* Remove from parent's list of children: */
1123     if (CUSTODIAN_FAM(parent->children) == r) {
1124       CUSTODIAN_FAM(parent->children) = CUSTODIAN_FAM(r->sibling);
1125     } else {
1126       m = CUSTODIAN_FAM(parent->children);
1127       while (m && CUSTODIAN_FAM(m->sibling) != r) {
1128 	m = CUSTODIAN_FAM(m->sibling);
1129       }
1130       if (m)
1131 	CUSTODIAN_FAM(m->sibling) = CUSTODIAN_FAM(r->sibling);
1132     }
1133 
1134     /* Remove from global list: */
1135     if (CUSTODIAN_FAM(r->global_next))
1136       CUSTODIAN_FAM(CUSTODIAN_FAM(r->global_next)->global_prev) = CUSTODIAN_FAM(r->global_prev);
1137     CUSTODIAN_FAM(CUSTODIAN_FAM(r->global_prev)->global_next) = CUSTODIAN_FAM(r->global_next);
1138 
1139     /* Add children to parent's list: */
1140     for (m = CUSTODIAN_FAM(r->children); m; ) {
1141       Scheme_Custodian *next = CUSTODIAN_FAM(m->sibling);
1142 
1143       CUSTODIAN_FAM(m->parent) = parent;
1144       CUSTODIAN_FAM(m->sibling) = CUSTODIAN_FAM(parent->children);
1145       CUSTODIAN_FAM(parent->children) = m;
1146 
1147       m = next;
1148     }
1149 
1150     adjust_limit_table(parent);
1151 
1152     /* Add remaining managed items to parent: */
1153     if (!skip_move) {
1154       for (i = 0; i < r->count; i++) {
1155 	if (r->boxes[i]) {
1156 	  CUSTODIAN_FAM(r->mrefs[i]) = parent;
1157 	  add_managed_box(parent, r->boxes[i], r->mrefs[i], r->closers[i], r->data[i]);
1158 #ifdef MZ_PRECISE_GC
1159 	  {
1160 	    Scheme_Object *o;
1161 	    o = xCUSTODIAN_FAM(r->boxes[i]);
1162 	    if (SAME_TYPE(SCHEME_TYPE(o), scheme_thread_hop_type)) {
1163 	      o = WEAKIFIED(((Scheme_Thread_Custodian_Hop *)o)->p);
1164 	      if (o)
1165 		GC_register_thread(o, parent);
1166 	    } else if (SAME_TYPE(SCHEME_TYPE(o), scheme_place_type)) {
1167               GC_register_thread(o, parent);
1168             }
1169 	  }
1170 #endif
1171 	}
1172       }
1173     }
1174   }
1175 
1176   CUSTODIAN_FAM(r->parent) = NULL;
1177   CUSTODIAN_FAM(r->sibling) = NULL;
1178   if (!skip_move)
1179     CUSTODIAN_FAM(r->children) = NULL;
1180   CUSTODIAN_FAM(r->global_prev) = NULL;
1181   CUSTODIAN_FAM(r->global_next) = NULL;
1182 }
1183 
do_adjust_custodian_family(void * mgr,void * for_retain)1184 static void do_adjust_custodian_family(void *mgr, void *for_retain)
1185 {
1186   adjust_custodian_family(mgr, NULL);
1187 }
1188 
insert_custodian(Scheme_Custodian * m,Scheme_Custodian * parent)1189 void insert_custodian(Scheme_Custodian *m, Scheme_Custodian *parent)
1190 {
1191   /* insert into parent's list: */
1192   CUSTODIAN_FAM(m->parent) = parent;
1193   if (parent) {
1194     CUSTODIAN_FAM(m->sibling) = CUSTODIAN_FAM(parent->children);
1195     CUSTODIAN_FAM(parent->children) = m;
1196   } else
1197     CUSTODIAN_FAM(m->sibling) = NULL;
1198 
1199   /* Insert into global chain. A custodian is always inserted
1200      directly after its parent, so families stay together, and
1201      the local list stays in the same order as the sibling list. */
1202   if (parent) {
1203     Scheme_Custodian *next;
1204     next = CUSTODIAN_FAM(parent->global_next);
1205     CUSTODIAN_FAM(m->global_next) = next;
1206     CUSTODIAN_FAM(m->global_prev) = parent;
1207     CUSTODIAN_FAM(parent->global_next) = m;
1208     if (next)
1209       CUSTODIAN_FAM(next->global_prev) = m;
1210   } else {
1211     CUSTODIAN_FAM(m->global_next) = NULL;
1212     CUSTODIAN_FAM(m->global_prev) = NULL;
1213   }
1214 
1215   if (parent)
1216     adjust_limit_table(parent);
1217 }
1218 
scheme_make_custodian(Scheme_Custodian * parent)1219 Scheme_Custodian *scheme_make_custodian(Scheme_Custodian *parent)
1220 {
1221   Scheme_Custodian *m;
1222   Scheme_Custodian_Reference *mw;
1223   void ***data_ptr;
1224 
1225   if (!parent)
1226     parent = main_custodian; /* still NULL if we're creating main; that's ok */
1227 
1228   m = MALLOC_ONE_TAGGED(Scheme_Custodian);
1229 
1230   m->so.type = scheme_custodian_type;
1231 
1232   m->alloc = m->count = 0;
1233 
1234   mw = MALLOC_MREF();
1235   m->parent = mw;
1236   mw = MALLOC_MREF();
1237   m->children = mw;
1238   mw = MALLOC_MREF();
1239   m->sibling = mw;
1240   mw = MALLOC_MREF();
1241   m->global_next = mw;
1242   mw = MALLOC_MREF();
1243   m->global_prev = mw;
1244 
1245   CUSTODIAN_FAM(m->children) = NULL;
1246 
1247   data_ptr = (void ***)scheme_malloc(sizeof(void**));
1248   m->data_ptr = data_ptr;
1249 
1250   m->post_callbacks = scheme_null;
1251 
1252   insert_custodian(m, parent);
1253 
1254   scheme_add_finalizer(m, do_adjust_custodian_family, data_ptr);
1255 
1256   return m;
1257 }
1258 
rebox_willdone_object(void * o,void * mr)1259 static void rebox_willdone_object(void *o, void *mr)
1260 {
1261   Scheme_Custodian *m = CUSTODIAN_FAM((Scheme_Custodian_Reference *)mr);
1262   Scheme_Close_Custodian_Client *f;
1263   void *data;
1264 
1265   /* Still needs management? */
1266   if (m) {
1267 #ifdef MZ_PRECISE_GC
1268     Scheme_Object *b;
1269 #else
1270     Scheme_Object **b;
1271 #endif
1272 
1273     remove_managed(mr, o, &f, &data);
1274 
1275 #ifdef MZ_PRECISE_GC
1276     b = scheme_box(NULL);
1277 #else
1278     b = MALLOC_ONE(Scheme_Object*); /* not atomic this time */
1279 #endif
1280     xCUSTODIAN_FAM(b) = o;
1281 
1282     /* Put the custodian back: */
1283     CUSTODIAN_FAM((Scheme_Custodian_Reference *)mr) = m;
1284 
1285     add_managed_box(m, (Scheme_Object **)b, (Scheme_Custodian_Reference *)mr, f, data);
1286   }
1287 }
1288 
managed_object_gone(void * o,void * mr)1289 static void managed_object_gone(void *o, void *mr)
1290 {
1291   Scheme_Custodian *m = CUSTODIAN_FAM((Scheme_Custodian_Reference *)mr);
1292 
1293   /* Still has management? */
1294   if (m)
1295     remove_managed(mr, o, NULL, NULL);
1296 }
1297 
scheme_custodian_is_available(Scheme_Custodian * m)1298 int scheme_custodian_is_available(Scheme_Custodian *m) XFORM_SKIP_PROC
1299 /* may be called from a future thread */
1300 {
1301   if (m->shut_down)
1302     return 0;
1303   return 1;
1304 }
1305 
scheme_custodian_check_available(Scheme_Custodian * m,const char * who,const char * what)1306 void scheme_custodian_check_available(Scheme_Custodian *m, const char *who, const char *what)
1307 {
1308   if (!m)
1309     m = (Scheme_Custodian *)scheme_get_param(scheme_current_config(), MZCONFIG_CUSTODIAN);
1310 
1311   if (!scheme_custodian_is_available(m))
1312     scheme_contract_error(who, "the custodian has been shut down",
1313                           "custodian", 1, m,
1314                           NULL);
1315 }
1316 
scheme_add_managed(Scheme_Custodian * m,Scheme_Object * o,Scheme_Close_Custodian_Client * f,void * data,int must_close)1317 Scheme_Custodian_Reference *scheme_add_managed(Scheme_Custodian *m, Scheme_Object *o,
1318 					       Scheme_Close_Custodian_Client *f, void *data,
1319 					       int must_close)
1320 {
1321 #ifdef MZ_PRECISE_GC
1322     Scheme_Object *b;
1323 #else
1324     Scheme_Object **b;
1325 #endif
1326   Scheme_Custodian_Reference *mr;
1327 
1328   if (!m)
1329     m = (Scheme_Custodian *)scheme_get_param(scheme_current_config(), MZCONFIG_CUSTODIAN);
1330 
1331   if (m->shut_down) {
1332     /* The custodian was shut down in the time that it took
1333        to allocate o. This situation should be avoided if at
1334        all possible, but here's the fail-safe. */
1335     if (f)
1336       f(o, data);
1337     return NULL;
1338   }
1339 
1340 #ifdef MZ_PRECISE_GC
1341   b = scheme_make_late_weak_box(NULL);
1342 #else
1343   b = MALLOC_ONE_WEAK(Scheme_Object*);
1344 #endif
1345   xCUSTODIAN_FAM(b) = o;
1346 
1347   mr = MALLOC_MREF();
1348 
1349   CUSTODIAN_FAM(mr) = m;
1350 
1351   /* The atomic link via the box `b' allows the execution of wills for
1352      o. After this, we should either drop the object or we have to
1353      hold on to the object strongly (for when custodian-close-all is
1354      called). */
1355   if (must_close)
1356     scheme_add_finalizer(o, rebox_willdone_object, mr);
1357   else
1358     scheme_add_finalizer(o, managed_object_gone, mr);
1359 
1360   add_managed_box(m, (Scheme_Object **)b, mr, f, data);
1361 
1362   return mr;
1363 }
1364 
chain_close_at_exit(Scheme_Object * o,void * _data)1365 static void chain_close_at_exit(Scheme_Object *o, void *_data)
1366 /* This closer is recognized specially in scheme_run_atexit_closers() */
1367 {
1368   Scheme_Object *data = (Scheme_Object *)_data;
1369   Scheme_Close_Custodian_Client *f;
1370   void **fp;
1371 
1372   fp = (void **)SCHEME_CAR(data);
1373 
1374   if (fp) {
1375     f = (Scheme_Close_Custodian_Client *)*fp;
1376     SCHEME_CAR(data) = NULL;
1377     f(o, SCHEME_CDR(data));
1378   }
1379 }
1380 
scheme_add_managed_close_on_exit(Scheme_Custodian * m,Scheme_Object * o,Scheme_Close_Custodian_Client * f,void * data)1381 Scheme_Custodian_Reference *scheme_add_managed_close_on_exit(Scheme_Custodian *m, Scheme_Object *o,
1382                                                              Scheme_Close_Custodian_Client *f, void *data)
1383 {
1384   void **p;
1385 
1386   p = (void **)scheme_malloc_atomic(sizeof(void *));
1387   *p = f;
1388 
1389   return scheme_add_managed(m, o,
1390                             chain_close_at_exit, scheme_make_raw_pair((Scheme_Object *)p,
1391                                                                       (Scheme_Object *)data),
1392                             1);
1393 }
1394 
scheme_remove_managed(Scheme_Custodian_Reference * mr,Scheme_Object * o)1395 void scheme_remove_managed(Scheme_Custodian_Reference *mr, Scheme_Object *o)
1396 {
1397   /* Is this a good idea? I'm not sure: */
1398   scheme_subtract_finalizer(o, managed_object_gone, mr);
1399   scheme_subtract_finalizer(o, rebox_willdone_object, mr);
1400 
1401   remove_managed(mr, o, NULL, NULL);
1402 }
1403 
call_registered_callback(Scheme_Object * v,void * callback)1404 static void call_registered_callback(Scheme_Object *v, void *callback)
1405 {
1406   Scheme_Object *argv[1];
1407 
1408   argv[0] = v;
1409 
1410   scheme_start_in_scheduler();
1411   _scheme_apply_multi(callback, 1, argv);
1412   scheme_end_in_scheduler();
1413 }
1414 
unsafe_custodian_register(int argc,Scheme_Object * argv[])1415 static Scheme_Object *unsafe_custodian_register(int argc, Scheme_Object *argv[])
1416 {
1417   Scheme_Custodian_Reference *mr;
1418   Scheme_Custodian *custodian = (Scheme_Custodian *)argv[0];
1419   Scheme_Object *v = argv[1];
1420   Scheme_Object *callback = argv[2];
1421   int at_exit = SCHEME_TRUEP(argv[3]);
1422   int init_weak = SCHEME_TRUEP(argv[4]);
1423   /* optional `late?` sixth argument is not used */
1424 
1425   /* Some checks, just to be polite */
1426   if (!SCHEME_CUSTODIANP(argv[0]))
1427     scheme_wrong_contract("unsafe-custodian-register", "custodian?", 0, argc, argv);
1428   if (!SCHEME_PROCP(callback))
1429     scheme_wrong_contract("unsafe-custodian-register", "procedure?", 2, argc, argv);
1430 
1431   if (!scheme_custodian_is_available(custodian))
1432     return scheme_false;
1433 
1434   if (at_exit)
1435     mr = scheme_add_managed_close_on_exit(custodian, v, call_registered_callback, callback);
1436   else
1437     mr = scheme_add_managed(custodian, v, call_registered_callback, callback, !init_weak);
1438 
1439   return scheme_make_cptr(mr, NULL);
1440 }
1441 
unsafe_custodian_unregister(int argc,Scheme_Object * argv[])1442 static Scheme_Object *unsafe_custodian_unregister(int argc, Scheme_Object *argv[])
1443 {
1444   Scheme_Object *v = argv[0];
1445   Scheme_Custodian_Reference *mr = (Scheme_Custodian_Reference *)SCHEME_CPTR_VAL(argv[1]);
1446 
1447   scheme_remove_managed(mr, v);
1448 
1449   return scheme_void;
1450 }
1451 
scheme_do_close_managed(Scheme_Custodian * m,Scheme_Exit_Closer_Func cf)1452 Scheme_Thread *scheme_do_close_managed(Scheme_Custodian *m, Scheme_Exit_Closer_Func cf)
1453 {
1454   Scheme_Thread *kill_self = NULL;
1455   Scheme_Custodian *c, *start, *next_m;
1456   int i, is_thread;
1457   Scheme_Thread *the_thread;
1458   Scheme_Object *o;
1459   Scheme_Close_Custodian_Client *f;
1460   void *data;
1461 
1462   if (!m)
1463     m = main_custodian;
1464 
1465   if (m->shut_down)
1466     return NULL;
1467 
1468   m->shut_down = 1;
1469 
1470   /* Need to kill children first, transitively, so find
1471      last descendant. The family will be the global-list from
1472      m to this last descendant, inclusive. */
1473   for (c = m; CUSTODIAN_FAM(c->children); ) {
1474     for (c = CUSTODIAN_FAM(c->children); CUSTODIAN_FAM(c->sibling); ) {
1475       c = CUSTODIAN_FAM(c->sibling);
1476     }
1477   }
1478 
1479   start = m;
1480   m = c;
1481   while (1) {
1482     /* It matters that this loop starts at the top. See
1483        the m->count = i assignment below. */
1484     for (i = m->count; i--; ) {
1485       if (m->boxes[i]) {
1486 
1487 	o = xCUSTODIAN_FAM(m->boxes[i]);
1488 
1489 	f = m->closers[i];
1490 	data = m->data[i];
1491 
1492 	if (o && !cf && (SAME_TYPE(SCHEME_TYPE(o), scheme_thread_hop_type))) {
1493 	  /* We've added an indirection and made it weak. See mr_hop note above. */
1494 	  is_thread = 1;
1495 	  the_thread = (Scheme_Thread *)WEAKIFIED(((Scheme_Thread_Custodian_Hop *)o)->p);
1496 	} else {
1497 	  is_thread = 0;
1498 	  the_thread = NULL;
1499 	}
1500 
1501 	xCUSTODIAN_FAM(m->boxes[i]) = NULL;
1502 	CUSTODIAN_FAM(m->mrefs[i]) = NULL;
1503 
1504 	/* Set m->count to i in case a GC happens while
1505 	   the closer is running. */
1506 	m->count = i;
1507 
1508         if (!o) {
1509           /* weak link disappeared */
1510         } else if (is_thread && !the_thread) {
1511 	  /* Thread is already collected, so skip */
1512 	} else if (cf) {
1513 	  cf(o, f, data);
1514 	} else {
1515 	  if (is_thread) {
1516 	    if (the_thread) {
1517 	      /* Only kill the thread if it has no other custodians */
1518 	      if (SCHEME_NULLP(the_thread->extra_mrefs)) {
1519 		if (do_kill_thread(the_thread))
1520 		  kill_self = the_thread;
1521 	      } else {
1522 		Scheme_Custodian_Reference *mref;
1523 
1524 		mref = m->mrefs[i];
1525 		if (mref == the_thread->mref) {
1526 		  /* Designate a new main custodian for the thread */
1527 		  mref = (Scheme_Custodian_Reference *)SCHEME_CAR(the_thread->extra_mrefs);
1528 		  the_thread->mref = mref;
1529 		  the_thread->extra_mrefs = SCHEME_CDR(the_thread->extra_mrefs);
1530 #ifdef MZ_PRECISE_GC
1531 		  GC_register_thread(the_thread, CUSTODIAN_FAM(mref));
1532 #endif
1533 		} else {
1534 		  /* Just remove mref from the list of extras */
1535 		  Scheme_Object *l, *prev = NULL;
1536 		  for (l = the_thread->extra_mrefs; 1; l = SCHEME_CDR(l)) {
1537 		    if (SAME_OBJ(SCHEME_CAR(l), (Scheme_Object *)mref)) {
1538 		      if (prev)
1539 			SCHEME_CDR(prev) = SCHEME_CDR(l);
1540 		      else
1541 			the_thread->extra_mrefs = SCHEME_CDR(l);
1542 		      break;
1543 		    }
1544 		    prev = l;
1545 		  }
1546 		}
1547 	      }
1548 	    }
1549 	  } else if (f) {
1550 	    f(o, data);
1551 	  }
1552 	}
1553       }
1554     }
1555 
1556 #ifdef MZ_PRECISE_GC
1557     {
1558       Scheme_Object *pr = m->cust_boxes, *wb;
1559       Scheme_Custodian_Box *cb;
1560       while (pr) {
1561         wb = SCHEME_CAR(pr);
1562         cb = (Scheme_Custodian_Box *)SCHEME_BOX_VAL(wb);
1563         if (cb) cb->v = NULL;
1564         pr = SCHEME_CDR(pr);
1565       }
1566       m->cust_boxes = NULL;
1567     }
1568 #endif
1569 
1570     if (SCHEME_PAIRP(m->post_callbacks)) {
1571       Scheme_Object *proc;
1572       scheme_start_in_scheduler();
1573       while (SCHEME_PAIRP(m->post_callbacks)) {
1574         proc = SCHEME_CAR(m->post_callbacks);
1575         m->post_callbacks = SCHEME_CDR(m->post_callbacks);
1576         _scheme_apply_multi(proc, 0, NULL);
1577       }
1578       scheme_end_in_scheduler();
1579     }
1580 
1581     m->count = 0;
1582     m->alloc = 0;
1583     m->elems = 0;
1584     m->boxes = NULL;
1585     m->closers = NULL;
1586     m->data = NULL;
1587     *m->data_ptr = NULL;
1588     m->mrefs = NULL;
1589     m->shut_down = 1;
1590 
1591     if (SAME_OBJ(m, start)) {
1592       adjust_limit_table(m);
1593       break;
1594     }
1595     next_m = CUSTODIAN_FAM(m->global_prev);
1596 
1597     /* Remove this custodian from its parent */
1598     adjust_custodian_family(m, m);
1599 
1600     adjust_limit_table(m);
1601 
1602     m = next_m;
1603   }
1604 
1605 #ifdef MZ_USE_FUTURES
1606   scheme_future_check_custodians();
1607 #endif
1608 
1609   return kill_self;
1610 }
1611 
do_close_managed(Scheme_Custodian * m)1612 static void do_close_managed(Scheme_Custodian *m)
1613 /* The trick is that we may need to kill the thread
1614    that is running us. If so, delay it to the very
1615    end. */
1616 {
1617   if (scheme_do_close_managed(m, NULL)) {
1618     /* Kill/suspend self */
1619     if (scheme_current_thread->suspend_to_kill)
1620       suspend_thread(scheme_current_thread);
1621     else
1622       scheme_thread_block(0.0);
1623   }
1624 }
1625 
scheme_close_managed(Scheme_Custodian * m)1626 void scheme_close_managed(Scheme_Custodian *m)
1627 {
1628   do_close_managed(m);
1629 
1630   /* Give killed threads time to die: */
1631   scheme_thread_block(0);
1632   scheme_current_thread->ran_some = 1;
1633 }
1634 
make_custodian(int argc,Scheme_Object * argv[])1635 static Scheme_Object *make_custodian(int argc, Scheme_Object *argv[])
1636 {
1637   Scheme_Custodian *m;
1638 
1639   if (argc) {
1640     if (!SCHEME_CUSTODIANP(argv[0]))
1641       scheme_wrong_contract("make-custodian", "custodian?", 0, argc, argv);
1642     m = (Scheme_Custodian *)argv[0];
1643   } else
1644     m = (Scheme_Custodian *)scheme_get_param(scheme_current_config(), MZCONFIG_CUSTODIAN);
1645 
1646   if (m->shut_down)
1647     scheme_contract_error("make-custodian",
1648                           "the custodian has been shut down",
1649                           "custodian", 1, m,
1650                           NULL);
1651 
1652   return (Scheme_Object *)scheme_make_custodian(m);
1653 }
1654 
unsafe_make_custodian_at_root(int argc,Scheme_Object * argv[])1655 static Scheme_Object *unsafe_make_custodian_at_root(int argc, Scheme_Object *argv[])
1656 {
1657   return (Scheme_Object *)scheme_make_custodian(NULL);
1658 }
1659 
custodian_p(int argc,Scheme_Object * argv[])1660 static Scheme_Object *custodian_p(int argc, Scheme_Object *argv[])
1661 {
1662   return SCHEME_CUSTODIANP(argv[0]) ? scheme_true : scheme_false;
1663 }
1664 
custodian_close_all(int argc,Scheme_Object * argv[])1665 static Scheme_Object *custodian_close_all(int argc, Scheme_Object *argv[])
1666 {
1667   if (!SCHEME_CUSTODIANP(argv[0]))
1668     scheme_wrong_contract("custodian-shutdown-all", "custodian?", 0, argc, argv);
1669 
1670   scheme_close_managed((Scheme_Custodian *)argv[0]);
1671 
1672   return scheme_void;
1673 }
1674 
custodian_shut_down_p(int argc,Scheme_Object * argv[])1675 static Scheme_Object *custodian_shut_down_p(int argc, Scheme_Object *argv[])
1676 {
1677   if (!SCHEME_CUSTODIANP(argv[0]))
1678     scheme_wrong_contract("custodian-shut-down?", "custodian?", 0, argc, argv);
1679 
1680   return (((Scheme_Custodian *)argv[0])->shut_down
1681           ? scheme_true
1682           : scheme_false);
1683 }
1684 
scheme_custodian_extract_reference(Scheme_Custodian_Reference * mr)1685 Scheme_Custodian* scheme_custodian_extract_reference(Scheme_Custodian_Reference *mr)
1686 {
1687   return CUSTODIAN_FAM(mr);
1688 }
1689 
scheme_custodian_is_shut_down(Scheme_Custodian * c)1690 int scheme_custodian_is_shut_down(Scheme_Custodian* c)
1691 {
1692   return c->shut_down;
1693 }
1694 
extract_thread(Scheme_Object * o)1695 static Scheme_Object *extract_thread(Scheme_Object *o)
1696 {
1697   return (Scheme_Object *)WEAKIFIED(((Scheme_Thread_Custodian_Hop *)o)->p);
1698 }
1699 
scheme_init_custodian_extractors()1700 void scheme_init_custodian_extractors()
1701 {
1702   if (!extractors) {
1703     int n;
1704     n = scheme_num_types();
1705     REGISTER_SO(extractors);
1706     extractors = MALLOC_N_ATOMIC(Scheme_Custodian_Extractor, n);
1707     memset(extractors, 0, sizeof(Scheme_Custodian_Extractor) * n);
1708     extractors[scheme_thread_hop_type] = extract_thread;
1709   }
1710 }
1711 
scheme_add_custodian_extractor(Scheme_Type t,Scheme_Custodian_Extractor e)1712 void scheme_add_custodian_extractor(Scheme_Type t, Scheme_Custodian_Extractor e)
1713 {
1714   if (t) {
1715     extractors[t] = e;
1716   }
1717 }
1718 
custodian_to_list(int argc,Scheme_Object * argv[])1719 static Scheme_Object *custodian_to_list(int argc, Scheme_Object *argv[])
1720 {
1721   Scheme_Custodian *m, *m2, *c;
1722   Scheme_Object **hold, *o;
1723   int i, j, cnt, kids;
1724   Scheme_Type type;
1725   Scheme_Custodian_Extractor ex;
1726 
1727   if (!SCHEME_CUSTODIANP(argv[0]))
1728     scheme_wrong_contract("custodian-managed-list", "custodian?", 0, argc, argv);
1729   if (!SCHEME_CUSTODIANP(argv[1]))
1730     scheme_wrong_contract("custodian-managed-list", "custodian?", 1, argc, argv);
1731 
1732   m = (Scheme_Custodian *)argv[0];
1733   m2 = (Scheme_Custodian *)argv[1];
1734 
1735   /* Check that the second manages the first: */
1736   c = CUSTODIAN_FAM(m->parent);
1737   while (c && NOT_SAME_OBJ(m2, c)) {
1738     c = CUSTODIAN_FAM(c->parent);
1739   }
1740   if (!c) {
1741     scheme_contract_error("custodian-managed-list",
1742                                  "the second custodian does not "
1743                                  "manage the first custodian",
1744                                  "first custodian", 1, argv[0],
1745                                  "second custodian", 1, argv[1],
1746                                  NULL);
1747   }
1748 
1749   /* Count children: */
1750   kids = 0;
1751   for (c = CUSTODIAN_FAM(m->children); c; c = CUSTODIAN_FAM(c->sibling)) {
1752     kids++;
1753   }
1754 
1755   /* Do all allocation first, since custodian links are weak.
1756      Furthermore, allocation may trigger collection of an otherwise
1757      unreferenced custodian, folding its items into this one,
1758      so loop until we've allocated enough. */
1759   do {
1760     cnt = m->count;
1761     hold = MALLOC_N(Scheme_Object *, cnt + kids);
1762   } while (cnt < m->count);
1763 
1764   /* Put managed items into hold array: */
1765   for (i = m->count, j = 0; i--; ) {
1766     if (m->boxes[i]) {
1767       o = xCUSTODIAN_FAM(m->boxes[i]);
1768 
1769       if (o) {
1770         type = SCHEME_TYPE(o);
1771         ex = extractors[type];
1772         if (ex) {
1773           o = ex(o);
1774         }
1775 
1776         if (o) {
1777           hold[j] = o;
1778           j++;
1779         }
1780       }
1781     }
1782   }
1783   /* Add kids: */
1784   for (c = CUSTODIAN_FAM(m->children); c; c = CUSTODIAN_FAM(c->sibling)) {
1785     hold[j] = (Scheme_Object *)c;
1786     j++;
1787   }
1788 
1789   /* Convert the array to a list: */
1790   return scheme_build_list(j, hold);
1791 }
1792 
current_custodian(int argc,Scheme_Object * argv[])1793 static Scheme_Object *current_custodian(int argc, Scheme_Object *argv[])
1794 {
1795   return scheme_param_config2("current-custodian",
1796                               scheme_make_integer(MZCONFIG_CUSTODIAN),
1797                               argc, argv,
1798                               -1, custodian_p, "custodian?", 0);
1799 }
1800 
scheme_get_current_custodian()1801 Scheme_Custodian *scheme_get_current_custodian()
1802 {
1803   return (Scheme_Custodian *) current_custodian(0, NULL);
1804 }
1805 
make_custodian_box(int argc,Scheme_Object * argv[])1806 static Scheme_Object *make_custodian_box(int argc, Scheme_Object *argv[])
1807 {
1808   Scheme_Custodian_Box *cb;
1809 
1810   if (!SCHEME_CUSTODIANP(argv[0]))
1811     scheme_wrong_contract("make-custodian-box", "custodian?", 0, argc, argv);
1812 
1813   cb = MALLOC_ONE_TAGGED(Scheme_Custodian_Box);
1814   cb->so.type = scheme_cust_box_type;
1815   cb->cust = (Scheme_Custodian *)argv[0];
1816   cb->v = argv[1];
1817 
1818 #ifdef MZ_PRECISE_GC
1819   /* 3m  */
1820   {
1821     Scheme_Object *wb, *pr, *prev;
1822     wb = GC_malloc_weak_box(cb, NULL, 0, 1);
1823     pr = scheme_make_raw_pair(wb, cb->cust->cust_boxes);
1824     cb->cust->cust_boxes = pr;
1825     cb->cust->num_cust_boxes++;
1826 
1827     /* The GC prunes the list of custodian boxes in accounting mode,
1828        but prune here in case accounting is never triggered. */
1829     if (cb->cust->num_cust_boxes > 2 * cb->cust->checked_cust_boxes) {
1830       prev = pr;
1831       pr = SCHEME_CDR(pr);
1832       while (pr) {
1833         wb = SCHEME_CAR(pr);
1834         if (!SCHEME_BOX_VAL(pr)) {
1835           SCHEME_CDR(prev) = SCHEME_CDR(pr);
1836           --cb->cust->num_cust_boxes;
1837         } else {
1838           prev = pr;
1839         }
1840         pr = SCHEME_CDR(pr);
1841       }
1842       cb->cust->checked_cust_boxes = cb->cust->num_cust_boxes;
1843     }
1844   }
1845 #else
1846   /* CGC */
1847   if (cust_box_count >= cust_box_alloc) {
1848     Scheme_Custodian_Box **cbs;
1849     if (!cust_box_alloc) {
1850       cust_box_alloc = 16;
1851       REGISTER_SO(cust_boxes);
1852     } else {
1853       cust_box_alloc = 2 * cust_box_alloc;
1854     }
1855     cbs = (Scheme_Custodian_Box **)scheme_malloc_atomic(cust_box_alloc * sizeof(Scheme_Custodian_Box *));
1856     if (cust_box_count)
1857       memcpy(cbs, cust_boxes, cust_box_count * sizeof(Scheme_Custodian_Box *));
1858     cust_boxes = cbs;
1859   }
1860   cust_boxes[cust_box_count++] = cb;
1861 #endif
1862 
1863   return (Scheme_Object *)cb;
1864 }
1865 
custodian_box_value(int argc,Scheme_Object * argv[])1866 static Scheme_Object *custodian_box_value(int argc, Scheme_Object *argv[])
1867 {
1868   Scheme_Custodian_Box *cb;
1869 
1870   if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_cust_box_type))
1871     scheme_wrong_contract("custodian-box-value", "custodian-box?", 0, argc, argv);
1872 
1873   cb = (Scheme_Custodian_Box *)argv[0];
1874   if (cb->cust->shut_down)
1875     return scheme_false;
1876 
1877   return cb->v;
1878 }
1879 
custodian_box_p(int argc,Scheme_Object * argv[])1880 static Scheme_Object *custodian_box_p(int argc, Scheme_Object *argv[])
1881 {
1882   if (SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_cust_box_type))
1883     return scheme_true;
1884   else
1885     return scheme_false;
1886 }
1887 
cust_box_ready(Scheme_Object * o)1888 static int cust_box_ready(Scheme_Object *o)
1889 {
1890   return ((Scheme_Custodian_Box *)o)->cust->shut_down;
1891 }
1892 
1893 
1894 #ifndef MZ_PRECISE_GC
scheme_clean_cust_box_list(void)1895 void scheme_clean_cust_box_list(void)
1896 {
1897   int src = 0, dest = 0;
1898   Scheme_Custodian_Box *cb;
1899   void *b;
1900 
1901   while (src < cust_box_count) {
1902     cb = cust_boxes[src];
1903     b = GC_base(cb);
1904     if (b
1905 #ifndef USE_SENORA_GC
1906         && GC_is_marked(b)
1907 #endif
1908         ) {
1909       cust_boxes[dest++] = cb;
1910       if (cb->v) {
1911         if (cb->cust->shut_down) {
1912           cb->v = NULL;
1913         }
1914       }
1915     }
1916     src++;
1917   }
1918   cust_box_count = dest;
1919 }
1920 
shrink_cust_box_array(void)1921 static void shrink_cust_box_array(void)
1922 {
1923   /* Call this function periodically to clean up. */
1924   if (cust_box_alloc > 128 && (cust_box_count * 4 < cust_box_alloc)) {
1925     Scheme_Custodian_Box **cbs;
1926     cust_box_alloc = cust_box_count * 2;
1927     cbs = (Scheme_Custodian_Box **)scheme_malloc_atomic(cust_box_alloc * sizeof(Scheme_Custodian_Box *));
1928     memcpy(cbs, cust_boxes, cust_box_count * sizeof(Scheme_Custodian_Box *));
1929     cust_boxes = cbs;
1930   }
1931 }
1932 #else
1933 # define shrink_cust_box_array() /* empty */
1934 # define clean_cust_box_list()   /* empty */
1935 #endif
1936 
scheme_run_atexit_closers(Scheme_Object * o,Scheme_Close_Custodian_Client * f,void * data)1937 void scheme_run_atexit_closers(Scheme_Object *o, Scheme_Close_Custodian_Client *f, void *data)
1938 {
1939   Scheme_Object *l;
1940 
1941   if (cust_closers) {
1942     for (l = cust_closers; SCHEME_RPAIRP(l); l = SCHEME_CDR(l)) {
1943       Scheme_Exit_Closer_Func cf;
1944       cf = (Scheme_Exit_Closer_Func)SCHEME_CAR(l);
1945       cf(o, f, data);
1946     }
1947   }
1948 
1949   if (f == chain_close_at_exit)
1950     f(o, data);
1951 }
1952 
scheme_run_atexit_closers_on_all(Scheme_Exit_Closer_Func alt)1953 void scheme_run_atexit_closers_on_all(Scheme_Exit_Closer_Func alt)
1954 {
1955   mz_jmp_buf newbuf, *savebuf;
1956 
1957   /* scheme_start_atomic(); */
1958   /* Atomic would be needed if this was run to implement
1959      a custodian shutdown, but an actual custodian shutdown
1960      will have terminated everything else anyway. For a
1961      polite exit, other threads can run. */
1962 
1963   if (gcs_on_exit) {
1964     scheme_collect_garbage();
1965     scheme_collect_garbage();
1966   }
1967 
1968   log_peak_memory_use();
1969 
1970   savebuf = scheme_current_thread->error_buf;
1971   scheme_current_thread->error_buf = &newbuf;
1972   if (!scheme_setjmp(newbuf)) {
1973     scheme_do_close_managed(NULL, alt ? alt : scheme_run_atexit_closers);
1974   }
1975   scheme_current_thread->error_buf = savebuf;
1976 }
1977 
do_run_atexit_closers_on_all()1978 void do_run_atexit_closers_on_all()
1979 {
1980   scheme_run_atexit_closers_on_all(NULL);
1981 }
1982 
unsafe_add_post_custodian_shutdown(int argc,Scheme_Object * argv[])1983 static Scheme_Object *unsafe_add_post_custodian_shutdown(int argc, Scheme_Object *argv[])
1984 {
1985   Scheme_Custodian *c;
1986   Scheme_Object *l;
1987 
1988   scheme_check_proc_arity("unsafe-add-post-custodian-shutdown", 0, 0, argc, argv);
1989 
1990   if ((argc > 1)
1991       && !(SCHEME_FALSEP(argv[1])
1992            || SCHEME_CUSTODIANP(argv[1])))
1993     scheme_wrong_contract("unsafe-add-post-custodian-shutdown", "custodian?", 1, argc, argv);
1994 
1995   if ((argc > 1) && !SCHEME_FALSEP(argv[1]))
1996     c = (Scheme_Custodian *)argv[1];
1997   else
1998     c = main_custodian;
1999 
2000 #if defined(MZ_USE_PLACES)
2001   if (RUNNING_IN_ORIGINAL_PLACE
2002       && (c == main_custodian))
2003     return scheme_void;
2004 #endif
2005 
2006   l = scheme_make_pair(argv[0], c->post_callbacks);
2007   c->post_callbacks = l;
2008 
2009   return scheme_void;
2010 }
2011 
scheme_set_atexit(Scheme_At_Exit_Proc p)2012 void scheme_set_atexit(Scheme_At_Exit_Proc p)
2013 {
2014   replacement_at_exit = p;
2015 }
2016 
scheme_add_atexit_closer(Scheme_Exit_Closer_Func f)2017 void scheme_add_atexit_closer(Scheme_Exit_Closer_Func f)
2018 {
2019   if (!cust_closers) {
2020     if (RUNNING_IN_ORIGINAL_PLACE) {
2021       scheme_atexit(do_run_atexit_closers_on_all);
2022     }
2023 
2024     REGISTER_SO(cust_closers);
2025     cust_closers = scheme_null;
2026   }
2027 
2028   cust_closers = scheme_make_raw_pair((Scheme_Object *)f, cust_closers);
2029 }
2030 
scheme_atexit(void (* func)(void))2031 int scheme_atexit(void (*func)(void))
2032 {
2033       if (replacement_at_exit) {
2034         return replacement_at_exit(func);
2035       } else {
2036 #ifdef USE_ON_EXIT_FOR_ATEXIT
2037         return on_exit(func, NULL);
2038 #else
2039         return atexit(func);
2040 #endif
2041       }
2042 }
2043 
scheme_schedule_custodian_close(Scheme_Custodian * c)2044 void scheme_schedule_custodian_close(Scheme_Custodian *c)
2045 {
2046   /* This procedure might be called by a garbage collector to register
2047      a resource-based kill. */
2048 
2049   if (!scheduled_kills) {
2050     REGISTER_SO(scheduled_kills);
2051     scheduled_kills = scheme_null;
2052   }
2053 
2054   scheduled_kills = scheme_make_pair((Scheme_Object *)c, scheduled_kills);
2055   scheme_fuel_counter = 0;
2056   scheme_jit_stack_boundary = (uintptr_t)-1;
2057 }
2058 
check_scheduled_kills()2059 static void check_scheduled_kills()
2060 {
2061   int force_gc = 0;
2062 
2063   if (scheme_no_stack_overflow) {
2064     /* don't shutdown something that may be in an atomic callback */
2065     return;
2066   }
2067 
2068   while (scheduled_kills && !SCHEME_NULLP(scheduled_kills)) {
2069     Scheme_Object *k;
2070     k = SCHEME_CAR(scheduled_kills);
2071     scheduled_kills = SCHEME_CDR(scheduled_kills);
2072     do_close_managed((Scheme_Custodian *)k);
2073     force_gc = 1;
2074   }
2075 
2076   if (force_gc) {
2077     /* A shutdown in response to a memory limit merits another major
2078        GC to clean up and reset the expected heap size. Otherwise, if
2079        another limit is put in place, it will be checked (on a major
2080        GC) even later, which will set the major-GC trigger even
2081        higher, and so on. */
2082     scheme_collect_garbage();
2083   }
2084 }
2085 
check_current_custodian_allows(const char * who,Scheme_Thread * p)2086 static void check_current_custodian_allows(const char *who, Scheme_Thread *p)
2087 {
2088   Scheme_Object *l;
2089   Scheme_Custodian_Reference *mref;
2090   Scheme_Custodian *m, *current;
2091 
2092   /* Check management of the thread: */
2093   current = (Scheme_Custodian *)scheme_get_param(scheme_current_config(), MZCONFIG_CUSTODIAN);
2094 
2095   for (l = p->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
2096     mref = (Scheme_Custodian_Reference *)SCHEME_CAR(l);
2097     m = CUSTODIAN_FAM(mref);
2098     while (NOT_SAME_OBJ(m, current)) {
2099       m = CUSTODIAN_FAM(m->parent);
2100       if (!m)
2101 	goto bad;
2102     }
2103   }
2104 
2105   mref = p->mref;
2106   if (!mref)
2107     return;
2108   m = CUSTODIAN_FAM(mref);
2109   if (!m)
2110     return;
2111 
2112   while (NOT_SAME_OBJ(m, current)) {
2113     m = CUSTODIAN_FAM(m->parent);
2114     if (!m)
2115       goto bad;
2116   }
2117 
2118   return;
2119 
2120  bad:
2121   scheme_contract_error(who,
2122                         "the current custodian does not "
2123                         "solely manage the specified thread",
2124                         "thread", 1, p,
2125                         NULL);
2126 }
2127 
scheme_free_all(void)2128 void scheme_free_all(void)
2129 {
2130   scheme_do_close_managed(NULL, NULL);
2131   scheme_free_dynamic_extensions();
2132 #ifdef MZ_PRECISE_GC
2133   GC_free_all();
2134 #endif
2135 }
2136 
2137 /*========================================================================*/
2138 /*                              plumbers                                  */
2139 /*========================================================================*/
2140 
2141 #define FLUSH_HANDLE_FLAGS(h) MZ_OPT_HASH_KEY(&((Scheme_Small_Object *)h)->iso)
2142 
get_plumber_handles(Scheme_Plumber * p)2143 Scheme_Object *get_plumber_handles(Scheme_Plumber *p)
2144 {
2145   Scheme_Object *v, *r = scheme_null;
2146   Scheme_Bucket_Table *bt;
2147   Scheme_Hash_Table *ht;
2148   int i;
2149 
2150   bt = p->weak_handles;
2151   if (bt) {
2152     for (i = bt->size; i--; ) {
2153       if (bt->buckets[i]) {
2154         v = (Scheme_Object *)HT_EXTRACT_WEAK(bt->buckets[i]->key);
2155         if (v) {
2156           r = scheme_make_pair(v, r);
2157           SCHEME_USE_FUEL(1);
2158         }
2159       }
2160     }
2161   }
2162 
2163   ht = p->handles;
2164   for (i = ht->size; i--; ) {
2165     if (ht->vals[i])
2166       r = scheme_make_pair(ht->keys[i], r);
2167     SCHEME_USE_FUEL(1);
2168   }
2169 
2170   return r;
2171 }
2172 
scheme_flush_managed(Scheme_Plumber * p,int catch_errors)2173 int scheme_flush_managed(Scheme_Plumber *p, int catch_errors)
2174 {
2175   Scheme_Object *r, *h, *o, *a[1];
2176   Scheme_Thread *pt;
2177   mz_jmp_buf * volatile saved_error_buf;
2178   mz_jmp_buf new_error_buf;
2179   volatile int escaped = 0;
2180 
2181   if (!p) p = initial_plumber;
2182 
2183   if (catch_errors) {
2184     pt = scheme_current_thread;
2185     saved_error_buf = pt->error_buf;
2186     pt->error_buf = &new_error_buf;
2187   } else
2188     saved_error_buf = NULL;
2189 
2190   if (!scheme_setjmp(new_error_buf)) {
2191     r = get_plumber_handles(p);
2192 
2193     while (!SCHEME_NULLP(r)) {
2194       h = SCHEME_CAR(r);
2195 
2196       o = SCHEME_PTR2_VAL(h);
2197 
2198       if (SCHEME_OUTPORTP(o)) {
2199         scheme_flush_if_output_fds(o);
2200       } else {
2201         a[0] = h;
2202         (void)scheme_apply_multi(o, 1, a);
2203       }
2204 
2205       r = SCHEME_CDR(r);
2206     }
2207   } else {
2208     escaped = 1;
2209   }
2210 
2211   if (catch_errors)
2212     scheme_current_thread->error_buf = saved_error_buf;
2213 
2214   return escaped;
2215 }
2216 
make_plumber(int argc,Scheme_Object * argv[])2217 static Scheme_Object *make_plumber(int argc, Scheme_Object *argv[])
2218 {
2219   Scheme_Plumber *p;
2220   Scheme_Hash_Table *ht;
2221 
2222   p = MALLOC_ONE_TAGGED(Scheme_Plumber);
2223   p->so.type = scheme_plumber_type;
2224 
2225   ht = scheme_make_hash_table(SCHEME_hash_ptr);
2226   p->handles = ht;
2227 
2228   return (Scheme_Object *)p;
2229 }
2230 
plumber_p(int argc,Scheme_Object * argv[])2231 static Scheme_Object *plumber_p(int argc, Scheme_Object *argv[])
2232 {
2233   return SCHEME_PLUMBERP(argv[0]) ? scheme_true : scheme_false;
2234 }
2235 
plumber_flush_all(int argc,Scheme_Object * argv[])2236 static Scheme_Object *plumber_flush_all(int argc, Scheme_Object *argv[])
2237 {
2238   if (!SCHEME_PLUMBERP(argv[0]))
2239     scheme_wrong_contract("plumber-flush-all", "plumber?", 0, argc, argv);
2240 
2241   scheme_flush_managed((Scheme_Plumber *)argv[0], 0);
2242 
2243   return scheme_void;
2244 }
2245 
scheme_add_flush(Scheme_Plumber * p,Scheme_Object * proc_or_port,int weak_flush)2246 Scheme_Object *scheme_add_flush(Scheme_Plumber *p, Scheme_Object *proc_or_port, int weak_flush)
2247 {
2248   Scheme_Object *h;
2249 
2250   if (!p)
2251     p = (Scheme_Plumber *)scheme_get_param(scheme_current_config(), MZCONFIG_PLUMBER);
2252 
2253   h = scheme_alloc_object();
2254   h->type = scheme_plumber_handle_type;
2255   SCHEME_PTR1_VAL(h) = (Scheme_Object *)p;
2256   SCHEME_PTR2_VAL(h) = proc_or_port;
2257 
2258   if (weak_flush) {
2259     FLUSH_HANDLE_FLAGS(h) |= 0x1;
2260     if (!p->weak_handles) {
2261       Scheme_Bucket_Table *bt;
2262       bt = scheme_make_bucket_table(4, SCHEME_hash_weak_ptr);
2263       p->weak_handles = bt;
2264     }
2265     scheme_add_to_table(p->weak_handles, (const char *)h, scheme_true, 0);
2266   } else
2267     scheme_hash_set(p->handles, h, scheme_true);
2268 
2269   return h;
2270 }
2271 
plumber_add_flush(int argc,Scheme_Object * argv[])2272 static Scheme_Object *plumber_add_flush(int argc, Scheme_Object *argv[])
2273 {
2274   if (!SCHEME_PLUMBERP(argv[0]))
2275     scheme_wrong_contract("plumber-add-flush!", "plumber?", 0, argc, argv);
2276   scheme_check_proc_arity("plumber-add-flush!", 1, 1, argc, argv);
2277 
2278   return scheme_add_flush((Scheme_Plumber *)argv[0], argv[1],
2279                           (argc > 2) && SCHEME_TRUEP(argv[2]));
2280 }
2281 
scheme_remove_flush(Scheme_Object * h)2282 void scheme_remove_flush(Scheme_Object *h)
2283 {
2284   Scheme_Plumber *p;
2285 
2286   p = (Scheme_Plumber *)SCHEME_PTR1_VAL(h);
2287 
2288   if (p) {
2289     if (FLUSH_HANDLE_FLAGS(h) & 0x1) {
2290       Scheme_Bucket *b;
2291       b = scheme_bucket_or_null_from_table(p->weak_handles, (char *)h, 0);
2292       if (b) {
2293         HT_EXTRACT_WEAK(b->key) = NULL;
2294         b->val = NULL;
2295       }
2296     } else
2297       scheme_hash_set(p->handles, h, NULL);
2298     SCHEME_PTR1_VAL(h) = NULL;
2299     SCHEME_PTR2_VAL(h) = NULL;
2300   }
2301 }
2302 
plumber_remove_flush(int argc,Scheme_Object * argv[])2303 static Scheme_Object *plumber_remove_flush(int argc, Scheme_Object *argv[])
2304 {
2305   if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_plumber_handle_type))
2306     scheme_wrong_contract("plumber-flush-handle-remove!", "plumber-handle?", 0, argc, argv);
2307 
2308   scheme_remove_flush(argv[0]);
2309 
2310   return scheme_void;
2311 }
2312 
plumber_flush_p(int argc,Scheme_Object * argv[])2313 static Scheme_Object *plumber_flush_p(int argc, Scheme_Object *argv[])
2314 {
2315   return (SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_plumber_handle_type)
2316           ? scheme_true
2317           : scheme_false);
2318 }
2319 
current_plumber(int argc,Scheme_Object * argv[])2320 static Scheme_Object *current_plumber(int argc, Scheme_Object *argv[])
2321 {
2322   return scheme_param_config2("current-plumber",
2323                               scheme_make_integer(MZCONFIG_PLUMBER),
2324                               argc, argv,
2325                               -1, plumber_p, "plumber?", 0);
2326 }
2327 
2328 /*========================================================================*/
2329 /*                             thread sets                                */
2330 /*========================================================================*/
2331 
2332 #define TSET_IL MZ_INLINE
2333 
create_thread_set(Scheme_Thread_Set * parent)2334 static Scheme_Thread_Set *create_thread_set(Scheme_Thread_Set *parent)
2335 {
2336   Scheme_Thread_Set *t_set;
2337 
2338   t_set = MALLOC_ONE_TAGGED(Scheme_Thread_Set);
2339   t_set->so.type = scheme_thread_set_type;
2340 
2341   t_set->parent = parent;
2342 
2343   /* Everything in t_set is zeroed */
2344 
2345   return t_set;
2346 }
2347 
make_thread_set(int argc,Scheme_Object * argv[])2348 static Scheme_Object *make_thread_set(int argc, Scheme_Object *argv[])
2349 {
2350   Scheme_Thread_Set *parent;
2351 
2352   if (argc) {
2353     if (!(SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_set_type)))
2354       scheme_wrong_contract("make-thread-group", "thread-group?", 0, argc, argv);
2355     parent = (Scheme_Thread_Set *)argv[0];
2356   } else
2357     parent = (Scheme_Thread_Set *)scheme_get_param(scheme_current_config(), MZCONFIG_THREAD_SET);
2358 
2359   return (Scheme_Object *)create_thread_set(parent);
2360 }
2361 
thread_set_p(int argc,Scheme_Object * argv[])2362 static Scheme_Object *thread_set_p(int argc, Scheme_Object *argv[])
2363 {
2364   return ((SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_set_type))
2365 	  ? scheme_true
2366 	  : scheme_false);
2367 }
2368 
current_thread_set(int argc,Scheme_Object * argv[])2369 static Scheme_Object *current_thread_set(int argc, Scheme_Object *argv[])
2370 {
2371   return scheme_param_config2("current-thread-group",
2372                               scheme_make_integer(MZCONFIG_THREAD_SET),
2373                               argc, argv,
2374                               -1, thread_set_p, "thread-group?", 0);
2375 }
2376 
set_t_set_next(Scheme_Object * o,Scheme_Object * n)2377 XFORM_NONGCING static TSET_IL void set_t_set_next(Scheme_Object *o, Scheme_Object *n)
2378 {
2379   if (SCHEME_THREADP(o))
2380     ((Scheme_Thread *)o)->t_set_next = n;
2381   else
2382     ((Scheme_Thread_Set *)o)->next = n;
2383 }
2384 
set_t_set_prev(Scheme_Object * o,Scheme_Object * n)2385 XFORM_NONGCING static TSET_IL void set_t_set_prev(Scheme_Object *o, Scheme_Object *n)
2386 {
2387   if (SCHEME_THREADP(o))
2388     ((Scheme_Thread *)o)->t_set_prev = n;
2389   else
2390     ((Scheme_Thread_Set *)o)->prev = n;
2391 }
2392 
get_t_set_next(Scheme_Object * o)2393 XFORM_NONGCING static TSET_IL Scheme_Object *get_t_set_next(Scheme_Object *o)
2394 {
2395   if (SCHEME_THREADP(o))
2396     return ((Scheme_Thread *)o)->t_set_next;
2397   else
2398     return ((Scheme_Thread_Set *)o)->next;
2399 }
2400 
get_t_set_prev(Scheme_Object * o)2401 XFORM_NONGCING static TSET_IL Scheme_Object *get_t_set_prev(Scheme_Object *o)
2402 {
2403   if (SCHEME_THREADP(o))
2404     return ((Scheme_Thread *)o)->t_set_prev;
2405   else
2406     return ((Scheme_Thread_Set *)o)->prev;
2407 }
2408 
schedule_in_set(Scheme_Object * s,Scheme_Thread_Set * t_set)2409 XFORM_NONGCING static void schedule_in_set(Scheme_Object *s, Scheme_Thread_Set *t_set)
2410 {
2411   num_running_threads += 1;
2412 
2413   while (1) {
2414     set_t_set_next(s, t_set->first);
2415     if (t_set->first)
2416       set_t_set_prev(t_set->first, s);
2417     t_set->first = s;
2418     if (t_set->current)
2419       break;
2420 
2421     t_set->current = s;
2422 
2423     s = (Scheme_Object *)t_set;
2424     t_set = t_set->parent;
2425   }
2426 }
2427 
unschedule_in_set(Scheme_Object * s,Scheme_Thread_Set * t_set)2428 XFORM_NONGCING static void unschedule_in_set(Scheme_Object *s, Scheme_Thread_Set *t_set)
2429 {
2430   Scheme_Object *prev;
2431   Scheme_Object *next;
2432 
2433   num_running_threads -= 1;
2434 
2435   while (1) {
2436     prev = get_t_set_prev(s);
2437     next = get_t_set_next(s);
2438 
2439     if (!prev)
2440       t_set->first = next;
2441     else
2442       set_t_set_next(prev, next);
2443     if (next)
2444       set_t_set_prev(next, prev);
2445     set_t_set_prev(s, NULL);
2446     set_t_set_next(s, NULL);
2447 
2448     if (t_set->current == s) {
2449       if (next) {
2450 	t_set->current = next;
2451       } else {
2452 	t_set->current = t_set->first;
2453       }
2454     }
2455     if (t_set->search_start == s)
2456       t_set->search_start = t_set->current;
2457 
2458     if (t_set->current)
2459       break;
2460 
2461     s = (Scheme_Object *)t_set;
2462     t_set = t_set->parent;
2463   }
2464 }
2465 
2466 /*========================================================================*/
2467 /*                      thread record creation                            */
2468 /*========================================================================*/
2469 
make_thread(Scheme_Config * config,Scheme_Thread_Cell_Table * cells,Scheme_Object * init_break_cell,Scheme_Custodian * mgr,void * stack_base)2470 static Scheme_Thread *make_thread(Scheme_Config *config,
2471 				  Scheme_Thread_Cell_Table *cells,
2472 				  Scheme_Object *init_break_cell,
2473 				  Scheme_Custodian *mgr,
2474           void *stack_base)
2475 {
2476   Scheme_Thread *process;
2477   int prefix = 0;
2478 
2479   process = MALLOC_ONE_TAGGED(Scheme_Thread);
2480 
2481   process->so.type = scheme_thread_type;
2482 
2483   if (!scheme_main_thread) {
2484     /* Creating the first thread... */
2485     REGISTER_SO(scheme_current_thread);
2486     REGISTER_SO(scheme_main_thread);
2487     REGISTER_SO(scheme_first_thread);
2488     REGISTER_SO(thread_swap_callbacks);
2489     REGISTER_SO(thread_swap_out_callbacks);
2490     REGISTER_SO(swap_target);
2491 
2492     scheme_current_thread = process;
2493     scheme_first_thread = scheme_main_thread = process;
2494     process->prev = NULL;
2495     process->next = NULL;
2496 
2497     gc_prep_thread_chain = process;
2498     scheme_current_thread->gc_prep_chain = process;
2499 
2500     process->suspend_break = 1; /* until start-up finished */
2501 
2502     process->error_buf = NULL;
2503 
2504     thread_swap_callbacks = scheme_null;
2505     thread_swap_out_callbacks = scheme_null;
2506 
2507     GC_set_collect_start_callback(get_ready_for_GC);
2508     GC_set_collect_end_callback(done_with_GC);
2509 #ifdef MZ_PRECISE_GC
2510     GC_set_collect_inform_callback(inform_GC);
2511 #endif
2512 
2513 #ifdef LINK_EXTENSIONS_BY_TABLE
2514     scheme_current_thread_ptr = &scheme_current_thread;
2515     scheme_fuel_counter_ptr = &scheme_fuel_counter;
2516 #endif
2517 
2518 #if defined(MZ_PRECISE_GC)
2519     GC_set_get_thread_stack_base(scheme_get_current_thread_stack_start);
2520 #endif
2521     process->stack_start = stack_base;
2522 
2523   } else {
2524     prefix = 1;
2525   }
2526 
2527   process->engine_weight = 10000;
2528 
2529   process->cont_mark_pos = (MZ_MARK_POS_TYPE)1;
2530   process->cont_mark_stack = 0;
2531   process->cont_mark_stack_segments = NULL;
2532   process->cont_mark_seg_count = 0;
2533 
2534   if (!config) {
2535     make_initial_config(process);
2536     config = process->init_config;
2537   } else {
2538     process->init_config = config;
2539     process->cell_values = cells;
2540   }
2541 
2542   if (init_break_cell) {
2543     process->init_break_cell = init_break_cell;
2544   } else {
2545     Scheme_Object *v;
2546     v = scheme_make_thread_cell(scheme_false, 1);
2547     process->init_break_cell = v;
2548   }
2549 
2550   if (!mgr)
2551     mgr = (Scheme_Custodian *)scheme_get_param(config, MZCONFIG_CUSTODIAN);
2552 
2553 #ifdef MZ_PRECISE_GC
2554   GC_register_new_thread(process, mgr);
2555 #endif
2556 
2557   {
2558     Scheme_Object *t_set;
2559     t_set = scheme_get_param(config, MZCONFIG_THREAD_SET);
2560     process->t_set_parent = (Scheme_Thread_Set *)t_set;
2561   }
2562 
2563   if (SAME_OBJ(process, scheme_first_thread)) {
2564     REGISTER_SO(scheme_thread_set_top);
2565     scheme_thread_set_top = process->t_set_parent;
2566     scheme_thread_set_top->first = (Scheme_Object *)process;
2567     scheme_thread_set_top->current = (Scheme_Object *)process;
2568   } else
2569     schedule_in_set((Scheme_Object *)process, process->t_set_parent);
2570 
2571   scheme_init_jmpup_buf(&process->jmpup_buf);
2572 
2573   process->running = MZTHREAD_RUNNING;
2574 
2575   process->dw = NULL;
2576 
2577   process->block_descriptor = NOT_BLOCKED;
2578   process->block_check = NULL;
2579   process->block_needs_wakeup = NULL;
2580   process->sleep_end = 0;
2581 
2582   process->external_break = 0;
2583 
2584   process->ran_some = 1;
2585 
2586   scheme_gmp_tls_init(process->gmp_tls);
2587 
2588   if (prefix) {
2589     process->next = scheme_first_thread;
2590     process->prev = NULL;
2591     process->next->prev = process;
2592     scheme_first_thread = process;
2593   }
2594 
2595   if (!buffer_init_size) /* => before place init */
2596     buffer_init_size = INIT_TB_SIZE;
2597 
2598   {
2599     Scheme_Object **tb;
2600     tb = MALLOC_N(Scheme_Object *, buffer_init_size);
2601     process->tail_buffer = tb;
2602   }
2603   process->tail_buffer_size = buffer_init_size;
2604 
2605   {
2606     int init_stack_size;
2607     Scheme_Object *iss;
2608 
2609     iss = scheme_get_thread_param(config, cells, MZCONFIG_THREAD_INIT_STACK_SIZE);
2610     if (SCHEME_INTP(iss))
2611       init_stack_size = SCHEME_INT_VAL(iss);
2612     else if (SCHEME_BIGNUMP(iss))
2613       init_stack_size = 0x7FFFFFFF;
2614     else
2615       init_stack_size = DEFAULT_INIT_STACK_SIZE;
2616 
2617     /* A too-large stack size won't help performance.
2618        A too-small stack size is unsafe for certain kinds of
2619        tail calls. */
2620     if (init_stack_size > MAX_INIT_STACK_SIZE)
2621       init_stack_size = MAX_INIT_STACK_SIZE;
2622     if (init_stack_size < SCHEME_TAIL_COPY_THRESHOLD)
2623       init_stack_size = SCHEME_TAIL_COPY_THRESHOLD;
2624 
2625     process->runstack_size = init_stack_size;
2626     {
2627       Scheme_Object **sa;
2628       sa = scheme_alloc_runstack(init_stack_size);
2629       process->runstack_start = sa;
2630     }
2631     process->runstack = process->runstack_start + init_stack_size;
2632   }
2633 
2634   process->runstack_saved = NULL;
2635 
2636 #ifdef RUNSTACK_IS_GLOBAL
2637   if (!prefix) {
2638 # ifndef MZ_PRECISE_GC
2639     /* Precise GC: we intentionally don't register MZ_RUNSTACK. See done_with_GC() */
2640     REGISTER_SO(MZ_RUNSTACK);
2641 # endif
2642     REGISTER_SO(MZ_RUNSTACK_START);
2643 
2644     MZ_RUNSTACK = process->runstack;
2645     MZ_RUNSTACK_START = process->runstack_start;
2646     MZ_CONT_MARK_STACK = process->cont_mark_stack;
2647     MZ_CONT_MARK_POS = process->cont_mark_pos;
2648   }
2649 #endif
2650 
2651   process->on_kill = NULL;
2652 
2653   process->user_tls = NULL;
2654   process->user_tls_size = 0;
2655 
2656   process->nester = process->nestee = NULL;
2657 
2658   process->mbox_first = NULL;
2659   process->mbox_last = NULL;
2660   process->mbox_sema = NULL;
2661 
2662   process->mref = NULL;
2663   process->extra_mrefs = NULL;
2664 
2665 
2666 
2667   /* A thread points to a lot of stuff, so it's bad to put a finalization
2668      on it, which is what registering with a custodian does. Instead, we
2669      register a weak indirection with the custodian. That way, the thread
2670      (and anything it points to) can be collected one GC cycle earlier.
2671 
2672      It's possible that the thread will be collected before the indirection
2673      record, so when we use the indirection (e.g., in custodian traversals),
2674      we'll need to check for NULL. */
2675   {
2676     Scheme_Thread_Custodian_Hop *hop;
2677     Scheme_Custodian_Reference *mref;
2678     hop = MALLOC_ONE_WEAK_RT(Scheme_Thread_Custodian_Hop);
2679     process->mr_hop = hop;
2680     hop->so.type = scheme_thread_hop_type;
2681     {
2682       Scheme_Thread *wp;
2683       wp = (Scheme_Thread *)WEAKIFY((Scheme_Object *)process);
2684       hop->p = wp;
2685     }
2686 
2687     mref = scheme_add_managed(mgr, (Scheme_Object *)hop, NULL, NULL, 0);
2688     process->mref = mref;
2689     process->extra_mrefs = scheme_null;
2690 
2691 #ifndef MZ_PRECISE_GC
2692     scheme_weak_reference((void **)(void *)&hop->p);
2693 #endif
2694   }
2695 
2696   return process;
2697 }
2698 
scheme_make_thread(void * stack_base)2699 Scheme_Thread *scheme_make_thread(void *stack_base)
2700 {
2701   /* Makes the initial process. */
2702   return make_thread(NULL, NULL, NULL, NULL, stack_base);
2703 }
2704 
scheme_realloc_tail_buffer(Scheme_Thread * p)2705 void scheme_realloc_tail_buffer(Scheme_Thread *p)
2706 {
2707   GC_CAN_IGNORE Scheme_Object **tb;
2708 
2709   p->tail_buffer = NULL; /* so args aren't zeroed */
2710 
2711   /* Decay cached size back toward the initial size: */
2712   if (p->tail_buffer_size > (buffer_init_size << 1))
2713     p->tail_buffer_size = p->tail_buffer_size >> 1;
2714 
2715   tb = MALLOC_N(Scheme_Object *, p->tail_buffer_size);
2716   p->tail_buffer = tb;
2717 }
2718 
check_tail_buffer_size(Scheme_Thread * p)2719 static void check_tail_buffer_size(Scheme_Thread *p)
2720 {
2721   if (p->tail_buffer_size < buffer_init_size) {
2722     Scheme_Object **tb;
2723     tb = MALLOC_N(Scheme_Object *, buffer_init_size);
2724     p->tail_buffer = tb;
2725     p->tail_buffer_size = buffer_init_size;
2726   }
2727 }
2728 
scheme_set_tail_buffer_size(int s)2729 void scheme_set_tail_buffer_size(int s)
2730 {
2731   if (s > buffer_init_size) {
2732     Scheme_Thread *p;
2733 
2734     buffer_init_size = s;
2735 
2736     for (p = scheme_first_thread; p; p = p->next) {
2737       check_tail_buffer_size(p);
2738     }
2739   }
2740 }
2741 
scheme_tls_allocate()2742 int scheme_tls_allocate()
2743 {
2744   return tls_pos++;
2745 }
2746 
scheme_tls_set(int pos,void * v)2747 void scheme_tls_set(int pos, void *v)
2748 {
2749   Scheme_Thread *p = scheme_current_thread;
2750 
2751   if (p->user_tls_size <= pos) {
2752     int oldc = p->user_tls_size;
2753     void **old_tls = p->user_tls, **va;
2754 
2755     p->user_tls_size = tls_pos;
2756     va = MALLOC_N(void*, tls_pos);
2757     p->user_tls = va;
2758     while (oldc--) {
2759       p->user_tls[oldc] = old_tls[oldc];
2760     }
2761   }
2762 
2763   p->user_tls[pos] = v;
2764 }
2765 
scheme_tls_get(int pos)2766 void *scheme_tls_get(int pos)
2767 {
2768   Scheme_Thread *p = scheme_current_thread;
2769 
2770   if (p->user_tls_size <= pos)
2771     return NULL;
2772   else
2773     return p->user_tls[pos];
2774 }
2775 
scheme_alloc_runstack(intptr_t len)2776 Scheme_Object **scheme_alloc_runstack(intptr_t len)
2777   XFORM_SKIP_PROC
2778 {
2779 #ifdef MZ_PRECISE_GC
2780   intptr_t sz;
2781   void **p;
2782   sz = sizeof(Scheme_Object*) * (len + RUNSTACK_HEADER_FIELDS);
2783   p = (void **)GC_malloc_tagged_allow_interior(sz);
2784   *(Scheme_Type *)(void *)p = scheme_rt_runstack;
2785   ((intptr_t *)(void *)p)[1] = gcBYTES_TO_WORDS(sz);
2786   ((intptr_t *)(void *)p)[2] = 0;
2787   ((intptr_t *)(void *)p)[3] = len;
2788 # define MZ_RUNSTACK_OVERFLOW_CANARY 0xFF77FF77
2789   ((intptr_t *)(void *)p)[4] = MZ_RUNSTACK_OVERFLOW_CANARY;
2790   return (Scheme_Object **)(p + RUNSTACK_HEADER_FIELDS);
2791 #else
2792   return (Scheme_Object **)scheme_malloc_allow_interior(sizeof(Scheme_Object*) * len);
2793 #endif
2794 }
2795 
scheme_set_runstack_limits(Scheme_Object ** rs,intptr_t len,intptr_t start,intptr_t end)2796 void scheme_set_runstack_limits(Scheme_Object **rs, intptr_t len, intptr_t start, intptr_t end)
2797   XFORM_SKIP_PROC
2798 /* With 3m, we can tell the GC not to scan the unused parts, and we
2799    can have the fixup function zero out the unused parts; that avoids
2800    writing and scanning pages that could be skipped for a minor
2801    GC. For CGC, we have to just clear out the unused part. */
2802 {
2803   scheme_check_runstack_edge(rs);
2804 #ifdef MZ_PRECISE_GC
2805   if (((intptr_t *)(void *)rs)[-3] != start)
2806     ((intptr_t *)(void *)rs)[-3] = start;
2807   if (((intptr_t *)(void *)rs)[-2] != end)
2808     ((intptr_t *)(void *)rs)[-2] = end;
2809 #else
2810   memset(rs, 0, start * sizeof(Scheme_Object *));
2811   memset(rs + end, 0, (len - end) * sizeof(Scheme_Object *));
2812 #endif
2813 }
2814 
scheme_check_runstack_edge(Scheme_Object ** rs)2815 void scheme_check_runstack_edge(Scheme_Object **rs)
2816 {
2817 #ifdef MZ_PRECISE_GC
2818   if (((intptr_t *)rs)[-1] != MZ_RUNSTACK_OVERFLOW_CANARY) {
2819     scheme_log_abort("internal error: runstack overflow detected");
2820     abort();
2821   }
2822 #endif
2823 }
2824 
scheme_register_process_global(const char * key,void * val)2825 void *scheme_register_process_global(const char *key, void *val)
2826 {
2827   void *old_val = NULL;
2828   char *key2;
2829   Proc_Global_Rec *pg;
2830   intptr_t len;
2831 
2832   scheme_process_global_lock();
2833 
2834   for (pg = process_globals; pg; pg = pg->next) {
2835     if (!strcmp(pg->key, key)) {
2836       old_val = pg->val;
2837       break;
2838     }
2839   }
2840 
2841   if (!old_val && val) {
2842     len = strlen(key);
2843     key2 = (char *)malloc(len + 1);
2844     memcpy(key2, key, len + 1);
2845     pg = (Proc_Global_Rec *)malloc(sizeof(Proc_Global_Rec));
2846     pg->key = key2;
2847     pg->val = val;
2848     pg->next = process_globals;
2849     process_globals = pg;
2850   }
2851 
2852   scheme_process_global_unlock();
2853 
2854   return old_val;
2855 }
2856 
unsafe_register_process_global(int argc,Scheme_Object * argv[])2857 static Scheme_Object *unsafe_register_process_global(int argc, Scheme_Object *argv[])
2858 {
2859   void *val;
2860 
2861   if (!SCHEME_BYTE_STRINGP(argv[0]))
2862     scheme_wrong_contract("unsafe-register-process-global", "bytes?", 0, argc, argv);
2863   if (!scheme_is_cpointer(argv[1]))
2864     scheme_wrong_contract("unsafe-register-process-global", "cpointer?", 1, argc, argv);
2865 
2866   val = scheme_register_process_global(SCHEME_BYTE_STR_VAL(argv[0]),
2867                                        scheme_extract_pointer(argv[1]));
2868 
2869   if (val)
2870     return scheme_make_cptr(val, NULL);
2871   else
2872     return scheme_false;
2873 }
2874 
unsafe_get_place_table(int argc,Scheme_Object * argv[])2875 static Scheme_Object *unsafe_get_place_table(int argc, Scheme_Object *argv[])
2876 {
2877   return (Scheme_Object *)scheme_get_place_table();
2878 }
2879 
scheme_init_process_globals(void)2880 void scheme_init_process_globals(void)
2881 {
2882 #if defined(MZ_USE_MZRT)
2883   mzrt_mutex_create(&process_global_lock);
2884 #endif
2885 }
2886 
scheme_process_global_lock(void)2887 void scheme_process_global_lock(void)
2888 {
2889 #if defined(MZ_USE_MZRT)
2890   if (process_global_lock)
2891     mzrt_mutex_lock(process_global_lock);
2892 #endif
2893 }
2894 
scheme_process_global_unlock(void)2895 void scheme_process_global_unlock(void)
2896 {
2897 #if defined(MZ_USE_MZRT)
2898   if (process_global_lock)
2899     mzrt_mutex_unlock(process_global_lock);
2900 #endif
2901 }
2902 
scheme_get_place_table(void)2903 Scheme_Hash_Table *scheme_get_place_table(void)
2904 {
2905   if (!place_local_misc_table)
2906     place_local_misc_table = scheme_make_hash_table(SCHEME_hash_ptr);
2907   return place_local_misc_table;
2908 }
2909 
2910 /*========================================================================*/
2911 /*                    OS threads - not supported                          */
2912 /*========================================================================*/
2913 
unsafe_os_thread_enabled_p(int argc,Scheme_Object * argv[])2914 static Scheme_Object *unsafe_os_thread_enabled_p(int argc, Scheme_Object *argv[])
2915 {
2916   return scheme_false;
2917 }
2918 
unsafe_call_in_os_thread(int argc,Scheme_Object * argv[])2919 static Scheme_Object *unsafe_call_in_os_thread(int argc, Scheme_Object *argv[])
2920 {
2921   scheme_check_proc_arity("unsafe-call-in-os-thread", 0, 0, argc, argv);
2922   scheme_raise_exn(MZEXN_FAIL_UNSUPPORTED, "unsafe-call-in-os-thread: " NOT_SUPPORTED_STR);
2923   ESCAPED_BEFORE_HERE;
2924 }
2925 
unsafe_make_os_semaphore(int argc,Scheme_Object * argv[])2926 static Scheme_Object *unsafe_make_os_semaphore(int argc, Scheme_Object *argv[])
2927 {
2928   scheme_raise_exn(MZEXN_FAIL_UNSUPPORTED, "unsafe-make-os-semaphore: " NOT_SUPPORTED_STR);
2929   ESCAPED_BEFORE_HERE;
2930 }
2931 
unsafe_os_semaphore_wait(int argc,Scheme_Object * argv[])2932 static Scheme_Object *unsafe_os_semaphore_wait(int argc, Scheme_Object *argv[])
2933 {
2934   scheme_raise_exn(MZEXN_FAIL_UNSUPPORTED, "unsafe-os-semaphore-wait: " NOT_SUPPORTED_STR);
2935   ESCAPED_BEFORE_HERE;
2936 }
2937 
unsafe_os_semaphore_post(int argc,Scheme_Object * argv[])2938 static Scheme_Object *unsafe_os_semaphore_post(int argc, Scheme_Object *argv[])
2939 {
2940   scheme_raise_exn(MZEXN_FAIL_UNSUPPORTED, "unsafe-os-semaphore-post: " NOT_SUPPORTED_STR);
2941   ESCAPED_BEFORE_HERE;
2942 }
2943 
unsafe_add_global_finalizer(int argc,Scheme_Object * argv[])2944 static Scheme_Object *unsafe_add_global_finalizer(int argc, Scheme_Object *argv[])
2945 {
2946   scheme_raise_exn(MZEXN_FAIL_UNSUPPORTED, "unsafe-add-global-finalizer: " NOT_SUPPORTED_STR);
2947   ESCAPED_BEFORE_HERE;
2948 }
2949 
2950 /*========================================================================*/
2951 /*                     thread creation and swapping                       */
2952 /*========================================================================*/
2953 
scheme_in_main_thread(void)2954 int scheme_in_main_thread(void)
2955 {
2956   return !scheme_current_thread->next;
2957 }
2958 
stash_current_marks()2959 static void stash_current_marks()
2960 {
2961   Scheme_Object *m;
2962   m = scheme_current_continuation_marks_as(NULL, scheme_current_thread->returned_marks);
2963   scheme_current_thread->returned_marks = m;
2964   swap_target = scheme_current_thread->return_marks_to;
2965   scheme_current_thread->return_marks_to = NULL;
2966 }
2967 
do_swap_thread()2968 static void do_swap_thread()
2969 {
2970  start:
2971 
2972   scheme_zero_unneeded_rands(scheme_current_thread);
2973 
2974 #if WATCH_FOR_NESTED_SWAPS
2975   if (swapping)
2976     printf("death\n");
2977   swapping = 1;
2978 #endif
2979 
2980 #ifdef MZ_USE_PLACES
2981   if (GC_is_using_master()) {
2982     scheme_log_abort("attempted thread swap during master GC use");
2983     abort();
2984   }
2985 #endif
2986 
2987   if (!swap_no_setjmp && SETJMP(scheme_current_thread)) {
2988     /* We're back! */
2989     /* See also initial swap in in start_child() */
2990     thread_swap_count++;
2991 #ifdef RUNSTACK_IS_GLOBAL
2992     MZ_RUNSTACK = scheme_current_thread->runstack;
2993     MZ_RUNSTACK_START = scheme_current_thread->runstack_start;
2994     MZ_CONT_MARK_STACK = scheme_current_thread->cont_mark_stack;
2995     MZ_CONT_MARK_POS = scheme_current_thread->cont_mark_pos;
2996 #endif
2997     RESETJMP(scheme_current_thread);
2998 #if WATCH_FOR_NESTED_SWAPS
2999     swapping = 0;
3000 #endif
3001     scheme_gmp_tls_unload(scheme_current_thread->gmp_tls, scheme_current_thread->gmp_tls_data);
3002     scheme_current_thread->gmp_tls_data = NULL;
3003 
3004     {
3005       Scheme_Object *l, *o;
3006       Scheme_Closure_Func f;
3007       for (l = thread_swap_callbacks; SCHEME_RPAIRP(l); l = SCHEME_CDR(l)) {
3008 	o = SCHEME_CAR(l);
3009 	f = SCHEME_RAW_CLOS_FUNC(o);
3010 	o = SCHEME_RAW_CLOS_DATA(o);
3011 	f(o);
3012       }
3013     }
3014     if ((scheme_current_thread->runstack_owner
3015 	 && ((*scheme_current_thread->runstack_owner) != scheme_current_thread))
3016 	|| (scheme_current_thread->cont_mark_stack_owner
3017 	    && ((*scheme_current_thread->cont_mark_stack_owner) != scheme_current_thread))) {
3018       scheme_takeover_stacks(scheme_current_thread);
3019     }
3020 
3021     scheme_current_thread->current_start_process_msec = process_time_at_swap;
3022 
3023     if (scheme_current_thread->return_marks_to) {
3024       stash_current_marks();
3025       goto start;
3026     }
3027   } else {
3028     Scheme_Thread *new_thread = swap_target;
3029 
3030     if ((!scheme_fuel_counter) || (++process_time_skips >= 100)) {
3031       intptr_t cpm;
3032       cpm = scheme_get_process_milliseconds();
3033       scheme_current_thread->accum_process_msec += (cpm - scheme_current_thread->current_start_process_msec);
3034       process_time_at_swap = cpm;
3035       process_time_skips = 0;
3036     }
3037 
3038     swap_target = NULL;
3039 
3040     swap_no_setjmp = 0;
3041 
3042     /* We're leaving... */
3043 
3044     {
3045       Scheme_Object *l, *o;
3046       Scheme_Closure_Func f;
3047       for (l = thread_swap_out_callbacks; SCHEME_RPAIRP(l); l = SCHEME_CDR(l)) {
3048 	o = SCHEME_CAR(l);
3049 	f = SCHEME_RAW_CLOS_FUNC(o);
3050 	o = SCHEME_RAW_CLOS_DATA(o);
3051 	f(o);
3052       }
3053     }
3054 
3055     if (scheme_current_thread->init_break_cell) {
3056       int cb;
3057       cb = can_break_param(scheme_current_thread);
3058       scheme_current_thread->can_break_at_swap = cb;
3059     }
3060     {
3061       GC_CAN_IGNORE void *data;
3062       data = scheme_gmp_tls_load(scheme_current_thread->gmp_tls);
3063       scheme_current_thread->gmp_tls_data = data;
3064     }
3065 #ifdef RUNSTACK_IS_GLOBAL
3066     scheme_current_thread->runstack = MZ_RUNSTACK;
3067     scheme_current_thread->runstack_start = MZ_RUNSTACK_START;
3068     scheme_current_thread->cont_mark_stack = MZ_CONT_MARK_STACK;
3069     scheme_current_thread->cont_mark_pos = MZ_CONT_MARK_POS;
3070 #endif
3071 
3072 #ifdef MZ_USE_FUTURES
3073     scheme_use_rtcall = new_thread->futures_slow_path_tracing;
3074 #endif
3075 
3076     scheme_current_thread = new_thread;
3077     if (!new_thread->gc_prep_chain) {
3078       new_thread->gc_prep_chain = gc_prep_thread_chain;
3079       gc_prep_thread_chain = new_thread;
3080     }
3081 
3082     /* Fixup current pointers in thread sets */
3083     if (!scheme_current_thread->return_marks_to) {
3084       Scheme_Thread_Set *t_set = new_thread->t_set_parent;
3085       t_set->current = (Scheme_Object *)new_thread;
3086       while (t_set->parent) {
3087 	t_set->parent->current = (Scheme_Object *)t_set;
3088 	t_set = t_set->parent;
3089       }
3090     }
3091 
3092     LONGJMP(scheme_current_thread);
3093   }
3094 }
3095 
scheme_swap_thread(Scheme_Thread * new_thread)3096 void scheme_swap_thread(Scheme_Thread *new_thread)
3097 {
3098   swap_target = new_thread;
3099   new_thread = NULL;
3100   do_swap_thread();
3101 }
3102 
select_thread()3103 static void select_thread()
3104 {
3105   Scheme_Thread *new_thread;
3106   Scheme_Object *o;
3107   Scheme_Thread_Set *t_set;
3108 
3109   /* Try to pick a next thread to avoid DOS attacks
3110      through whatever kinds of things call select_thread() */
3111   o = (Scheme_Object *)scheme_thread_set_top;
3112   while (!SCHEME_THREADP(o)) {
3113     t_set = (Scheme_Thread_Set *)o;
3114     o = get_t_set_next(t_set->current);
3115     if (!o)
3116       o = t_set->first;
3117   }
3118   /* It's possible that o won't work out. So o is a suggestion for the
3119      new thread, but the loop below will pick a definitely suitable
3120      thread. */
3121 
3122   new_thread = (Scheme_Thread *)o;
3123   do {
3124     if (!new_thread)
3125       new_thread = scheme_first_thread;
3126 
3127     /* Can't swap in a thread with a nestee: */
3128     while (new_thread
3129 	   && (new_thread->nestee
3130 	       || (new_thread->running & MZTHREAD_SUSPENDED)
3131 	       /* USER_SUSPENDED should only happen if new_thread is the main thread
3132 		  or if the thread has MZTHREAD_NEED_SUSPEND_CLEANUP */
3133 	       || ((new_thread->running & MZTHREAD_USER_SUSPENDED)
3134 		   && !(new_thread->running & MZTHREAD_NEED_SUSPEND_CLEANUP)))) {
3135       new_thread = new_thread->next;
3136     }
3137 
3138     if (!new_thread && !o) {
3139       /* The main thread must be blocked on a nestee, and everything
3140 	 else is suspended. But we have to go somewhere.  Weakly
3141 	 resume the main thread's innermost nestee. If it's
3142          suspended by the user, then we've deadlocked. */
3143       new_thread = scheme_main_thread;
3144       while (new_thread->nestee) {
3145 	new_thread = new_thread->nestee;
3146       }
3147       if ((new_thread->running & MZTHREAD_USER_SUSPENDED)
3148 	  && !(new_thread->running & MZTHREAD_NEED_SUSPEND_CLEANUP)) {
3149         if (post_system_idle()) {
3150           /* Aha! Someone was waiting for us to do nothing. Try again... */
3151         } else {
3152           scheme_console_printf("unbreakable deadlock\n");
3153           if (scheme_exit)
3154             scheme_exit(1);
3155           /* We really have to exit: */
3156           exit(1);
3157         }
3158       } else {
3159 	scheme_weak_resume_thread(new_thread);
3160       }
3161       break;
3162     }
3163     o = NULL;
3164   } while (!new_thread);
3165 
3166   swap_target = new_thread;
3167   new_thread = NULL;
3168   o = NULL;
3169   t_set = NULL;
3170   do_swap_thread();
3171 }
3172 
thread_is_dead(Scheme_Thread * r)3173 static void thread_is_dead(Scheme_Thread *r)
3174 {
3175   if (r->dead_box) {
3176     Scheme_Object *o;
3177     o = SCHEME_PTR_VAL(r->dead_box);
3178     scheme_post_sema_all(o);
3179   }
3180   if (r->sync_box) {
3181     scheme_post_sema_all(r->sync_box);
3182     r->sync_box = NULL;
3183   }
3184   if (r->running_box) {
3185     SCHEME_PTR_VAL(r->running_box) = NULL;
3186     r->running_box = NULL;
3187   }
3188   r->suspended_box = NULL;
3189   r->resumed_box = NULL;
3190 
3191   r->t_set_parent = NULL;
3192   r->dw = NULL;
3193   r->init_config = NULL;
3194   r->cell_values = NULL;
3195   r->init_break_cell = NULL;
3196   r->cont_mark_stack_segments = NULL;
3197   r->overflow = NULL;
3198 
3199   r->blocker = NULL;
3200 
3201   r->transitive_resumes = NULL;
3202 
3203   r->error_buf = NULL;
3204 
3205   r->spare_runstack = NULL;
3206 
3207   r->mbox_first = NULL;
3208   r->mbox_last = NULL;
3209   r->mbox_sema = NULL;
3210 }
3211 
remove_thread(Scheme_Thread * r)3212 static void remove_thread(Scheme_Thread *r)
3213 {
3214   Scheme_Saved_Stack *saved;
3215   Scheme_Object *l;
3216 
3217   r->running = 0;
3218 
3219   if (r->prev) {
3220     r->prev->next = r->next;
3221     r->next->prev = r->prev;
3222   } else if (r->next) {
3223     r->next->prev = NULL;
3224     scheme_first_thread = r->next;
3225   }
3226   r->next = r->prev = NULL;
3227 
3228   unschedule_in_set((Scheme_Object *)r, r->t_set_parent);
3229 
3230 #ifdef RUNSTACK_IS_GLOBAL
3231   if (r == scheme_current_thread) {
3232     r->runstack = MZ_RUNSTACK;
3233     MZ_RUNSTACK = NULL;
3234     r->runstack_start = MZ_RUNSTACK_START;
3235     MZ_RUNSTACK_START = NULL;
3236     r->cont_mark_stack = MZ_CONT_MARK_STACK;
3237     r->cont_mark_pos = MZ_CONT_MARK_POS;
3238   }
3239 #endif
3240 
3241   if (r->runstack_owner) {
3242     /* Drop ownership, if active, and clear the stack */
3243     if (r == *(r->runstack_owner)) {
3244       if (r->runstack_start) {
3245         scheme_set_runstack_limits(r->runstack_start, r->runstack_size, 0, 0);
3246         r->runstack_start = NULL;
3247       }
3248       for (saved = r->runstack_saved; saved; saved = saved->prev) {
3249         scheme_set_runstack_limits(saved->runstack_start, saved->runstack_size, 0, 0);
3250       }
3251       r->runstack_saved = NULL;
3252       *(r->runstack_owner) = NULL;
3253       r->runstack_owner = NULL;
3254     }
3255   } else {
3256     /* Only this thread used the runstack, so clear/free it
3257        as aggressively as possible */
3258     memset(r->runstack_start, 0, r->runstack_size * sizeof(Scheme_Object*));
3259     r->runstack_start = NULL;
3260     for (saved = r->runstack_saved; saved; saved = saved->prev) {
3261       memset(saved->runstack_start, 0, saved->runstack_size * sizeof(Scheme_Object*));
3262       saved->runstack_start = NULL;
3263     }
3264   }
3265 
3266   r->runstack = NULL;
3267   r->runstack_swapped = NULL;
3268 
3269   if (r->cont_mark_stack_owner
3270       && ((*r->cont_mark_stack_owner) == r)) {
3271     *r->cont_mark_stack_owner = NULL;
3272   }
3273 
3274   r->cont_mark_stack = 0;
3275   r->cont_mark_stack_owner = NULL;
3276   r->cont_mark_stack_swapped = NULL;
3277 
3278   r->ku.apply.tail_rator = NULL;
3279   r->ku.apply.tail_rands = NULL;
3280   r->tail_buffer = NULL;
3281   r->ku.multiple.array = NULL;
3282   r->values_buffer = NULL;
3283 
3284   thread_is_dead(r);
3285 
3286   /* In case we kill a thread while in a bignum operation: */
3287   scheme_gmp_tls_restore_snapshot(r->gmp_tls, r->gmp_tls_data,
3288                                   NULL, ((r == scheme_current_thread) ? 1 : 2));
3289 
3290   if (r == scheme_current_thread) {
3291     /* We're going to be swapped out immediately. */
3292     swap_no_setjmp = 1;
3293   } else
3294     RESETJMP(r);
3295 
3296   scheme_remove_managed(r->mref, (Scheme_Object *)r->mr_hop);
3297   for (l = r->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
3298     scheme_remove_managed((Scheme_Custodian_Reference *)SCHEME_CAR(l), (Scheme_Object *)r->mr_hop);
3299   }
3300   r->extra_mrefs = scheme_null;
3301 }
3302 
scheme_end_current_thread(void)3303 void scheme_end_current_thread(void)
3304 {
3305   if (SAME_OBJ(scheme_current_thread, scheme_main_thread))
3306     exit_or_escape(scheme_current_thread);
3307 
3308   remove_thread(scheme_current_thread);
3309 
3310   thread_ended_with_activity = 1;
3311 
3312   if (scheme_notify_multithread && !scheme_first_thread->next) {
3313     scheme_notify_multithread(0);
3314     have_activity = 0;
3315   }
3316 
3317   select_thread();
3318 }
3319 
start_child(Scheme_Thread * volatile child,Scheme_Object * volatile child_eval)3320 static void start_child(Scheme_Thread * volatile child,
3321 			Scheme_Object * volatile child_eval)
3322 {
3323   if (SETJMP(child)) {
3324     /* Initial swap in: */
3325     Scheme_Object * volatile result = NULL;
3326 
3327     thread_swap_count++;
3328 #ifdef RUNSTACK_IS_GLOBAL
3329     MZ_RUNSTACK = scheme_current_thread->runstack;
3330     MZ_RUNSTACK_START = scheme_current_thread->runstack_start;
3331     MZ_CONT_MARK_STACK = scheme_current_thread->cont_mark_stack;
3332     MZ_CONT_MARK_POS = scheme_current_thread->cont_mark_pos;
3333 #endif
3334     scheme_gmp_tls_unload(scheme_current_thread->gmp_tls, scheme_current_thread->gmp_tls_data);
3335     scheme_current_thread->gmp_tls_data = NULL;
3336     {
3337       Scheme_Object *l, *o;
3338       Scheme_Closure_Func f;
3339       for (l = thread_swap_callbacks; SCHEME_RPAIRP(l); l = SCHEME_CDR(l)) {
3340 	o = SCHEME_CAR(l);
3341 	f = SCHEME_RAW_CLOS_FUNC(o);
3342 	o = SCHEME_RAW_CLOS_DATA(o);
3343 	f(o);
3344       }
3345     }
3346 
3347     scheme_current_thread->current_start_process_msec = process_time_at_swap;
3348 
3349     RESETJMP(child);
3350 
3351 #if WATCH_FOR_NESTED_SWAPS
3352     swapping = 0;
3353 #endif
3354 
3355     if (scheme_current_thread->running & MZTHREAD_KILLED) {
3356       /* This thread is dead! Give up now. */
3357       exit_or_escape(scheme_current_thread);
3358     }
3359 
3360     if (scheme_current_thread->return_marks_to) {
3361       stash_current_marks();
3362       do_swap_thread();
3363     }
3364 
3365     {
3366       mz_jmp_buf newbuf;
3367       scheme_current_thread->error_buf = &newbuf;
3368       if (!scheme_setjmp(newbuf)) {
3369 	/* Run the main thunk: */
3370 	/* (checks for break before doing anything else) */
3371 	result = scheme_apply_thread_thunk(child_eval);
3372       }
3373     }
3374 
3375     /* !! At this point, scheme_current_thread can turn out to be a
3376        different thread, which invoked the original thread's
3377        continuation. */
3378 
3379     /* If we still have a meta continuation, then it means we
3380        should be resuming at a prompt, not exiting. */
3381     while (scheme_current_thread->meta_continuation) {
3382       Scheme_Thread *p = scheme_current_thread;
3383       Scheme_Overflow *oflow;
3384 
3385       p->cjs.val = result;
3386 
3387       if (!SAME_OBJ(p->meta_continuation->prompt_tag, scheme_default_prompt_tag)) {
3388         scheme_signal_error("thread ended with meta continuation that isn't for the default prompt");
3389       } else {
3390         Scheme_Meta_Continuation *mc;
3391         mc = p->meta_continuation;
3392         oflow = mc->overflow;
3393         p->meta_continuation = mc->next;
3394         if (!oflow->eot) {
3395           p->stack_start = oflow->stack_start;
3396           p->decompose_mc = mc;
3397           scheme_longjmpup(&oflow->jmp->cont);
3398         }
3399       }
3400     }
3401 
3402     scheme_end_current_thread();
3403 
3404     /* Shouldn't get here! */
3405     scheme_signal_error("bad thread switch");
3406   }
3407 }
3408 
scheme_do_thread_start_child(Scheme_Thread * child,Scheme_Object * child_eval)3409 void scheme_do_thread_start_child(Scheme_Thread *child, Scheme_Object *child_eval)
3410   XFORM_SKIP_PROC
3411 {
3412   return start_child(child, child_eval);
3413 }
3414 
make_subprocess(Scheme_Object * child_thunk,void * child_start,Scheme_Config * config,Scheme_Thread_Cell_Table * cells,Scheme_Object * break_cell,Scheme_Custodian * mgr,int normal_kill)3415 static Scheme_Object *make_subprocess(Scheme_Object *child_thunk,
3416 				      void *child_start,
3417 				      Scheme_Config *config,
3418 				      Scheme_Thread_Cell_Table *cells,
3419 				      Scheme_Object *break_cell,
3420 				      Scheme_Custodian *mgr,
3421 				      int normal_kill)
3422 {
3423   Scheme_Thread *child;
3424   int turn_on_multi;
3425   Scheme_Object *name_sym = NULL;
3426 
3427   turn_on_multi = !scheme_first_thread->next;
3428 
3429   if (!config)
3430     config = scheme_current_config();
3431   if (!cells)
3432     cells = scheme_inherit_cells(NULL);
3433   if (!break_cell) {
3434     break_cell = scheme_current_break_cell();
3435     if (SAME_OBJ(break_cell, maybe_recycle_cell))
3436       maybe_recycle_cell = NULL;
3437   }
3438 
3439   /* Use child_thunk name, if any, for the thread name.
3440      (Get it before calling make_thread(), in case
3441      getting the name blocks.) */
3442   {
3443     const char *s;
3444     int len;
3445 
3446     s = scheme_get_proc_name(child_thunk, &len, -1);
3447     if (s)  {
3448       if (len < 0)
3449 	name_sym = (Scheme_Object *)s;
3450       else
3451 	name_sym = scheme_intern_exact_symbol(s, len);
3452     }
3453   }
3454 
3455   child = make_thread(config, cells, break_cell, mgr, child_start);
3456   if (name_sym)
3457     child->name = name_sym;
3458 
3459   {
3460     Scheme_Object *v;
3461     v = scheme_thread_cell_get(break_cell, cells);
3462     child->can_break_at_swap = SCHEME_TRUEP(v);
3463   }
3464 
3465   if (!normal_kill)
3466     child->suspend_to_kill = 1;
3467 
3468   child->stack_start = child_start;
3469 
3470   /* Sets the child's jmpbuf for swapping in later: */
3471   scheme_thread_start_child(child, child_thunk);
3472 
3473   if (scheme_notify_multithread && turn_on_multi) {
3474     scheme_notify_multithread(1);
3475     have_activity = 1;
3476   }
3477 
3478   SCHEME_USE_FUEL(1000);
3479 
3480   return (Scheme_Object *)child;
3481 }
3482 
scheme_thread(Scheme_Object * thunk)3483 Scheme_Object *scheme_thread(Scheme_Object *thunk)
3484 {
3485   return scheme_thread_w_details(thunk, NULL, NULL, NULL, NULL, 0);
3486 }
3487 
sch_thread(int argc,Scheme_Object * args[])3488 static Scheme_Object *sch_thread(int argc, Scheme_Object *args[])
3489 {
3490   scheme_check_proc_arity("thread", 0, 0, argc, args);
3491   scheme_custodian_check_available(NULL, "thread", "thread");
3492 
3493   return scheme_thread(args[0]);
3494 }
3495 
unsafe_thread_at_root(int argc,Scheme_Object * args[])3496 static Scheme_Object *unsafe_thread_at_root(int argc, Scheme_Object *args[])
3497 {
3498   scheme_check_proc_arity("unsafe-thread-at-root", 0, 0, argc, args);
3499 
3500   return scheme_thread_w_details(args[0],
3501                                  scheme_minimal_config(),
3502                                  scheme_empty_cell_table(),
3503                                  NULL, /* default break cell */
3504                                  main_custodian,
3505                                  0);
3506 }
3507 
sch_thread_nokill(int argc,Scheme_Object * args[])3508 static Scheme_Object *sch_thread_nokill(int argc, Scheme_Object *args[])
3509 {
3510   scheme_check_proc_arity("thread/suspend-to-kill", 0, 0, argc, args);
3511   scheme_custodian_check_available(NULL, "thread/suspend-to-kill", "thread");
3512 
3513   return scheme_thread_w_details(args[0], NULL, NULL, NULL, NULL, 1);
3514 }
3515 
sch_current(int argc,Scheme_Object * args[])3516 static Scheme_Object *sch_current(int argc, Scheme_Object *args[])
3517 {
3518   return (Scheme_Object *)scheme_current_thread;
3519 }
3520 
thread_p(int argc,Scheme_Object * args[])3521 static Scheme_Object *thread_p(int argc, Scheme_Object *args[])
3522 {
3523   return SCHEME_THREADP(args[0]) ? scheme_true : scheme_false;
3524 }
3525 
thread_running_p(int argc,Scheme_Object * args[])3526 static Scheme_Object *thread_running_p(int argc, Scheme_Object *args[])
3527 {
3528   int running;
3529 
3530   if (!SCHEME_THREADP(args[0]))
3531     scheme_wrong_contract("thread-running?", "thread?", 0, argc, args);
3532 
3533   running = ((Scheme_Thread *)args[0])->running;
3534 
3535   return ((MZTHREAD_STILL_RUNNING(running) && !(running & MZTHREAD_USER_SUSPENDED))
3536 	  ? scheme_true
3537 	  : scheme_false);
3538 }
3539 
thread_dead_p(int argc,Scheme_Object * args[])3540 static Scheme_Object *thread_dead_p(int argc, Scheme_Object *args[])
3541 {
3542   int running;
3543 
3544   if (!SCHEME_THREADP(args[0]))
3545     scheme_wrong_contract("thread-running?", "thread?", 0, argc, args);
3546 
3547   running = ((Scheme_Thread *)args[0])->running;
3548 
3549   return MZTHREAD_STILL_RUNNING(running) ? scheme_false : scheme_true;
3550 }
3551 
thread_wait_done(Scheme_Object * p,Scheme_Schedule_Info * sinfo)3552 static int thread_wait_done(Scheme_Object *p, Scheme_Schedule_Info *sinfo)
3553 {
3554   int running = ((Scheme_Thread *)p)->running;
3555   if (MZTHREAD_STILL_RUNNING(running)) {
3556     /* Replace the direct thread reference with an event, so that
3557        the blocking thread can be dequeued: */
3558     Scheme_Object *evt;
3559     evt = scheme_get_thread_dead((Scheme_Thread *)p);
3560     scheme_set_sync_target(sinfo, evt, p, NULL, 0, 0, NULL);
3561     return 0;
3562   } else
3563     return 1;
3564 }
3565 
thread_wait(int argc,Scheme_Object * args[])3566 static Scheme_Object *thread_wait(int argc, Scheme_Object *args[])
3567 {
3568   Scheme_Thread *p;
3569 
3570   if (!SCHEME_THREADP(args[0]))
3571     scheme_wrong_contract("thread-wait", "thread?", 0, argc, args);
3572 
3573   p = (Scheme_Thread *)args[0];
3574 
3575   if (MZTHREAD_STILL_RUNNING(p->running)) {
3576     sch_sync(1, args);
3577   }
3578 
3579   return scheme_void;
3580 }
3581 
scheme_thread_wait(Scheme_Object * thread)3582 void scheme_thread_wait(Scheme_Object *thread) {
3583   thread_wait(1, &thread);
3584 }
3585 
register_thread_sync()3586 static void register_thread_sync()
3587 {
3588   scheme_add_evt(scheme_thread_type,
3589                  (Scheme_Ready_Fun)thread_wait_done,
3590                  NULL, NULL, 0);
3591 }
3592 
scheme_add_swap_callback(Scheme_Closure_Func f,Scheme_Object * data)3593 void scheme_add_swap_callback(Scheme_Closure_Func f, Scheme_Object *data)
3594 {
3595   Scheme_Object *p;
3596 
3597   p = scheme_make_raw_pair((Scheme_Object *)f, data);
3598   thread_swap_callbacks = scheme_make_raw_pair(p, thread_swap_callbacks);
3599 }
3600 
scheme_add_swap_out_callback(Scheme_Closure_Func f,Scheme_Object * data)3601 void scheme_add_swap_out_callback(Scheme_Closure_Func f, Scheme_Object *data)
3602 {
3603   Scheme_Object *p;
3604 
3605   p = scheme_make_raw_pair((Scheme_Object *)f, data);
3606   thread_swap_out_callbacks = scheme_make_pair(p, thread_swap_out_callbacks);
3607 }
3608 
3609 /**************************************************************************/
3610 /* Ensure that a new thread has a reasonable starting stack */
3611 
3612 #ifdef DO_STACK_CHECK
3613 # define THREAD_STACK_SPACE (STACK_SAFETY_MARGIN / 2)
3614 
scheme_is_stack_too_shallow()3615 int scheme_is_stack_too_shallow()
3616 {
3617 # define SCHEME_PLUS_STACK_DELTA(x) ((x) - THREAD_STACK_SPACE)
3618 # include "mzstkchk.h"
3619   {
3620     return 1;
3621   }
3622   return 0;
3623 }
3624 
thread_k(void)3625 static Scheme_Object *thread_k(void)
3626 {
3627   Scheme_Thread *p = scheme_current_thread;
3628   Scheme_Object *thunk, *result, *break_cell;
3629   Scheme_Config *config;
3630   Scheme_Custodian *mgr;
3631   Scheme_Thread_Cell_Table *cells;
3632   int suspend_to_kill = p->ku.k.i1;
3633 
3634   thunk = (Scheme_Object *)p->ku.k.p1;
3635   config = (Scheme_Config *)p->ku.k.p2;
3636   mgr = (Scheme_Custodian *)p->ku.k.p3;
3637   cells = (Scheme_Thread_Cell_Table *)SCHEME_CAR((Scheme_Object *)p->ku.k.p4);
3638   break_cell = SCHEME_CDR((Scheme_Object *)p->ku.k.p4);
3639 
3640   p->ku.k.p1 = NULL;
3641   p->ku.k.p2 = NULL;
3642   p->ku.k.p3 = NULL;
3643   p->ku.k.p4 = NULL;
3644 
3645   result = make_subprocess(thunk, PROMPT_STACK(result),
3646 			   config, cells, break_cell, mgr, !suspend_to_kill);
3647 
3648   /* Don't get rid of `result'; it keeps the
3649      Precise GC xformer from "optimizing" away
3650      the __gc_var_stack__ frame. */
3651   return result;
3652 }
3653 
3654 #endif /* DO_STACK_CHECK */
3655 
scheme_thread_w_details(Scheme_Object * thunk,Scheme_Config * config,Scheme_Thread_Cell_Table * cells,Scheme_Object * break_cell,Scheme_Custodian * mgr,int suspend_to_kill)3656 Scheme_Object *scheme_thread_w_details(Scheme_Object *thunk,
3657 				       Scheme_Config *config,
3658 				       Scheme_Thread_Cell_Table *cells,
3659 				       Scheme_Object *break_cell,
3660 				       Scheme_Custodian *mgr,
3661 				       int suspend_to_kill)
3662 {
3663   Scheme_Object *result;
3664 #ifndef MZ_PRECISE_GC
3665   void *stack_marker;
3666 #endif
3667 
3668 #ifdef DO_STACK_CHECK
3669   /* Make sure the thread starts out with a reasonable stack size, so
3670      it doesn't thrash right away: */
3671   if (scheme_is_stack_too_shallow()) {
3672     Scheme_Thread *p = scheme_current_thread;
3673 
3674     p->ku.k.p1 = thunk;
3675     p->ku.k.p2 = config;
3676     p->ku.k.p3 = mgr;
3677     result = scheme_make_pair((Scheme_Object *)cells, break_cell);
3678     p->ku.k.p4 = result;
3679     p->ku.k.i1 = suspend_to_kill;
3680 
3681     return scheme_handle_stack_overflow(thread_k);
3682   }
3683 #endif
3684 
3685   result = make_subprocess(thunk, PROMPT_STACK(stack_marker),
3686 			   config, cells, break_cell, mgr, !suspend_to_kill);
3687 
3688   /* Don't get rid of `result'; it keeps the
3689      Precise GC xformer from "optimizing" away
3690      the __gc_var_stack__ frame. */
3691   return result;
3692 }
3693 
3694 /**************************************************************************/
3695 /* Nested threads */
3696 
def_nested_exn_handler(int argc,Scheme_Object * argv[])3697 static Scheme_Object *def_nested_exn_handler(int argc, Scheme_Object *argv[])
3698 {
3699   if (scheme_current_thread->nester) {
3700     Scheme_Thread *p = scheme_current_thread;
3701     p->cjs.jumping_to_continuation = (Scheme_Object *)scheme_current_thread;
3702     p->cjs.alt_full_continuation = NULL;
3703     p->cjs.val = argv[0];
3704     p->cjs.is_kill = 0;
3705     p->cjs.skip_dws = 0;
3706     scheme_longjmp(*p->error_buf, 1);
3707   }
3708 
3709   return scheme_void; /* misuse of exception handler (wrong kind of thread or under prompt) */
3710 }
3711 
3712 MZ_DO_NOT_INLINE(Scheme_Object *scheme_call_as_nested_thread(int argc, Scheme_Object *argv[], void *max_bottom));
3713 
scheme_call_as_nested_thread(int argc,Scheme_Object * argv[],void * max_bottom)3714 Scheme_Object *scheme_call_as_nested_thread(int argc, Scheme_Object *argv[], void *max_bottom)
3715 {
3716   Scheme_Thread *p = scheme_current_thread;
3717   Scheme_Thread * volatile np;
3718   Scheme_Custodian *mgr;
3719   Scheme_Object * volatile v;
3720   mz_jmp_buf newbuf;
3721   volatile int failure;
3722 
3723   scheme_check_proc_arity("call-in-nested-thread", 0, 0, argc, argv);
3724   if (argc > 1) {
3725     if (SCHEME_CUSTODIANP(argv[1]))
3726       mgr = (Scheme_Custodian *)argv[1];
3727     else {
3728       scheme_wrong_contract("call-in-nested-thread", "custodian?", 1, argc, argv);
3729       return NULL;
3730     }
3731   } else
3732     mgr = (Scheme_Custodian *)scheme_get_param(scheme_current_config(), MZCONFIG_CUSTODIAN);
3733 
3734   scheme_custodian_check_available(mgr, "call-in-nested-thread", "thread");
3735 
3736   SCHEME_USE_FUEL(25);
3737 
3738   scheme_wait_until_suspend_ok();
3739 
3740   np = MALLOC_ONE_TAGGED(Scheme_Thread);
3741   np->so.type = scheme_thread_type;
3742 #ifdef MZ_PRECISE_GC
3743   GC_register_new_thread(np, mgr);
3744 #endif
3745   np->running = MZTHREAD_RUNNING;
3746   np->ran_some = 1;
3747 
3748 #ifdef RUNSTACK_IS_GLOBAL
3749   p->runstack = MZ_RUNSTACK;
3750   p->runstack_start = MZ_RUNSTACK_START;
3751   p->cont_mark_stack = MZ_CONT_MARK_STACK;
3752   p->cont_mark_pos = MZ_CONT_MARK_POS;
3753 #endif
3754 
3755   /* zero out anything we need now, because nestee disables
3756      GC cleaning for this thread: */
3757   scheme_prepare_this_thread_for_GC(p);
3758 
3759   if (!p->runstack_owner) {
3760     Scheme_Thread **owner;
3761     owner = MALLOC_N(Scheme_Thread *, 1);
3762     p->runstack_owner = owner;
3763     *owner = p;
3764   }
3765 
3766   np->runstack = p->runstack;
3767   np->runstack_start = p->runstack_start;
3768   np->runstack_size = p->runstack_size;
3769   np->runstack_saved = p->runstack_saved;
3770   np->runstack_owner = p->runstack_owner;
3771   *np->runstack_owner = np;
3772   np->stack_start = p->stack_start;
3773   np->engine_weight = p->engine_weight;
3774   {
3775     Scheme_Object **tb;
3776     tb = MALLOC_N(Scheme_Object *, p->tail_buffer_size);
3777     np->tail_buffer = tb;
3778   }
3779   np->tail_buffer_size = p->tail_buffer_size;
3780 
3781   scheme_gmp_tls_init(np->gmp_tls);
3782 
3783   /* np->prev = NULL; - 0ed by allocation */
3784   np->next = scheme_first_thread;
3785   scheme_first_thread->prev = np;
3786   scheme_first_thread = np;
3787 
3788   np->t_set_parent = p->t_set_parent;
3789   schedule_in_set((Scheme_Object *)np, np->t_set_parent);
3790 
3791   {
3792     Scheme_Thread_Cell_Table *cells;
3793     cells = scheme_inherit_cells(p->cell_values);
3794     np->cell_values = cells;
3795   }
3796   {
3797     Scheme_Config *config;
3798     config = scheme_current_config();
3799     np->init_config = config;
3800   }
3801   {
3802     int cb;
3803     Scheme_Object *bc;
3804     cb = scheme_can_break(p);
3805     p->can_break_at_swap = cb;
3806     bc = scheme_current_break_cell();
3807     np->init_break_cell = bc;
3808     if (SAME_OBJ(bc, maybe_recycle_cell))
3809       maybe_recycle_cell = NULL;
3810   }
3811   np->cont_mark_pos = (MZ_MARK_POS_TYPE)1;
3812   /* others 0ed already by allocation */
3813 
3814   check_ready_break();
3815 
3816   np->nester = p;
3817   p->nestee = np;
3818   np->external_break = p->external_break;
3819   p->external_break = 0;
3820 
3821   {
3822     Scheme_Thread_Custodian_Hop *hop;
3823     Scheme_Custodian_Reference *mref;
3824     hop = MALLOC_ONE_WEAK_RT(Scheme_Thread_Custodian_Hop);
3825     np->mr_hop = hop;
3826     hop->so.type = scheme_thread_hop_type;
3827     {
3828       Scheme_Thread *wp;
3829       wp = (Scheme_Thread *)WEAKIFY((Scheme_Object *)np);
3830       hop->p = wp;
3831     }
3832     mref = scheme_add_managed(mgr, (Scheme_Object *)hop, NULL, NULL, 0);
3833     np->mref = mref;
3834     np->extra_mrefs = scheme_null;
3835 #ifndef MZ_PRECISE_GC
3836     scheme_weak_reference((void **)(void *)&hop->p);
3837 #endif
3838   }
3839 
3840   np->gc_prep_chain = gc_prep_thread_chain;
3841   gc_prep_thread_chain = np;
3842 
3843 #ifdef RUNSTACK_IS_GLOBAL
3844   MZ_CONT_MARK_STACK = np->cont_mark_stack;
3845   MZ_CONT_MARK_POS = np->cont_mark_pos;
3846 #endif
3847 
3848   scheme_current_thread = np;
3849 
3850   if (p != scheme_main_thread)
3851     scheme_weak_suspend_thread(p);
3852 
3853   if (!the_nested_exn_handler) {
3854     REGISTER_SO(the_nested_exn_handler);
3855     the_nested_exn_handler = scheme_make_prim_w_arity(def_nested_exn_handler,
3856                                                       "nested-thread-exception-handler",
3857                                                       1, 1);
3858   }
3859   scheme_set_cont_mark(scheme_exn_handler_key, the_nested_exn_handler);
3860 
3861   /* Call thunk, catch escape: */
3862   np->error_buf = &newbuf;
3863   if (scheme_setjmp(newbuf)) {
3864     if (!np->cjs.is_kill)
3865       v = np->cjs.val;
3866     else
3867       v = NULL;
3868     failure = 1;
3869   } else {
3870     v = scheme_apply_with_prompt(argv[0], 0, NULL);
3871     failure = 0;
3872   }
3873 
3874   scheme_remove_managed(np->mref, (Scheme_Object *)np->mr_hop);
3875   {
3876     Scheme_Object *l;
3877     for (l = np->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
3878       scheme_remove_managed((Scheme_Custodian_Reference *)SCHEME_CAR(l),
3879 			    (Scheme_Object *)np->mr_hop);
3880     }
3881   }
3882   np->extra_mrefs = scheme_null;
3883 #ifdef MZ_PRECISE_GC
3884   WEAKIFIED(np->mr_hop->p) = NULL;
3885 #else
3886   scheme_unweak_reference((void **)(void *)&np->mr_hop->p);
3887 #endif
3888   scheme_remove_all_finalization(np->mr_hop);
3889 
3890   if (np->prev)
3891     np->prev->next = np->next;
3892   else
3893     scheme_first_thread = np->next;
3894   np->next->prev = np->prev;
3895 
3896   np->next = NULL;
3897   np->prev = NULL;
3898 
3899   unschedule_in_set((Scheme_Object *)np, np->t_set_parent);
3900 
3901   if (np->cont_mark_stack_owner
3902       && ((*np->cont_mark_stack_owner) == np)) {
3903     *np->cont_mark_stack_owner = NULL;
3904   }
3905 
3906   np->running = 0;
3907 
3908   *p->runstack_owner = p;
3909 
3910   p->external_break = np->external_break;
3911   p->nestee = NULL;
3912   np->nester = NULL;
3913 
3914   thread_is_dead(np);
3915 
3916   scheme_current_thread = p;
3917 
3918   if (!p->gc_prep_chain) {
3919     p->gc_prep_chain = gc_prep_thread_chain;
3920     gc_prep_thread_chain = p;
3921   }
3922 
3923   if (p != scheme_main_thread)
3924     scheme_weak_resume_thread(p);
3925 
3926 #ifdef RUNSTACK_IS_GLOBAL
3927   MZ_CONT_MARK_STACK = p->cont_mark_stack;
3928   MZ_CONT_MARK_POS = p->cont_mark_pos;
3929 #endif
3930 
3931   if ((p->running & MZTHREAD_KILLED)
3932       || (p->running & MZTHREAD_USER_SUSPENDED))
3933     scheme_thread_block(0.0);
3934 
3935   if (failure) {
3936     if (!v)
3937       scheme_raise_exn(MZEXN_FAIL,
3938 		       "call-in-nested-thread: the thread was killed, or it exited via the default error escape handler");
3939     else
3940       scheme_raise(v);
3941   }
3942 
3943   /* May have just moved a break to a breakable thread: */
3944   /* Check for external break again after swap or sleep */
3945   scheme_check_break_now();
3946 
3947   return v;
3948 }
3949 
call_as_nested_thread(int argc,Scheme_Object * argv[])3950 static Scheme_Object *call_as_nested_thread(int argc, Scheme_Object *argv[])
3951 {
3952   Scheme_Object *result;
3953   result = scheme_call_as_nested_thread(argc, argv, PROMPT_STACK(result));
3954   return result;
3955 }
3956 
3957 /*========================================================================*/
3958 /*                     thread scheduling and termination                  */
3959 /*========================================================================*/
3960 
3961 static int check_fd_semaphores();
3962 
scheme_init_fd_semaphores(void)3963 void scheme_init_fd_semaphores(void)
3964 {
3965   scheme_semaphore_fd_set = rktio_ltps_open(scheme_rktio);
3966 }
3967 
scheme_release_fd_semaphores(void)3968 void scheme_release_fd_semaphores(void)
3969 {
3970   if (scheme_semaphore_fd_set) {
3971     rktio_ltps_remove_all(scheme_rktio, scheme_semaphore_fd_set);
3972     (void)check_fd_semaphores();
3973     rktio_ltps_close(scheme_rktio, scheme_semaphore_fd_set);
3974   }
3975 }
3976 
log_fd_semaphore_error()3977 static void log_fd_semaphore_error()
3978 {
3979   {
3980     Scheme_Logger *logger;
3981     logger = scheme_get_main_logger();
3982     scheme_log(logger, SCHEME_LOG_WARNING, 0,
3983 	       "error for long-term poll set: %R");
3984   }
3985 }
3986 
scheme_fd_to_semaphore(intptr_t fd,int mode,int is_socket)3987 Scheme_Object *scheme_fd_to_semaphore(intptr_t fd, int mode, int is_socket)
3988 {
3989   rktio_fd_t *rfd;
3990   Scheme_Object *sema;
3991 
3992   if (!scheme_semaphore_fd_set)
3993     return NULL;
3994 
3995   rfd = rktio_system_fd(scheme_rktio, fd, (RKTIO_OPEN_READ
3996                                            | RKTIO_OPEN_WRITE
3997                                            | (is_socket ? RKTIO_OPEN_SOCKET : 0)));
3998 
3999   sema = scheme_rktio_fd_to_semaphore(rfd, mode);
4000 
4001   rktio_forget(scheme_rktio, rfd);
4002 
4003   return sema;
4004 }
4005 
scheme_rktio_fd_to_semaphore(rktio_fd_t * fd,int mode)4006 Scheme_Object *scheme_rktio_fd_to_semaphore(rktio_fd_t *fd, int mode)
4007 {
4008   rktio_ltps_handle_t *h;
4009   void **ib;
4010 
4011   if (!scheme_semaphore_fd_set)
4012     return NULL;
4013 
4014   switch(mode) {
4015   case MZFD_CREATE_READ:
4016     mode = RKTIO_LTPS_CREATE_READ;
4017     break;
4018   case MZFD_CREATE_WRITE:
4019     mode = RKTIO_LTPS_CREATE_WRITE;
4020     break;
4021   case MZFD_CHECK_READ:
4022     mode = RKTIO_LTPS_CHECK_READ;
4023     break;
4024   case MZFD_CHECK_WRITE:
4025     mode = RKTIO_LTPS_CHECK_WRITE;
4026     break;
4027   case MZFD_REMOVE:
4028     mode = RKTIO_LTPS_REMOVE;
4029     break;
4030   }
4031 
4032   h = rktio_ltps_add(scheme_rktio, scheme_semaphore_fd_set, fd, mode);
4033 
4034   if (!h) {
4035     if (scheme_last_error_is_racket(RKTIO_ERROR_LTPS_REMOVED)
4036         || scheme_last_error_is_racket(RKTIO_ERROR_LTPS_NOT_FOUND)) {
4037       /* That's a kind of success, not failure. */
4038       return NULL;
4039     }
4040     if (!scheme_last_error_is_racket(RKTIO_ERROR_UNSUPPORTED))
4041       log_fd_semaphore_error();
4042     return NULL;
4043   }
4044 
4045   ib = rktio_ltps_handle_get_data(scheme_rktio, h);
4046   if (!ib) {
4047     ib = scheme_malloc_immobile_box(scheme_make_sema(0));
4048     rktio_ltps_handle_set_data(scheme_rktio, h, ib);
4049   }
4050 
4051   return *(Scheme_Object **)ib;
4052 }
4053 
check_fd_semaphores()4054 static int check_fd_semaphores()
4055 {
4056   rktio_ltps_handle_t *h;
4057   int did = 0;
4058   void *p;
4059   Scheme_Object *sema;
4060 
4061   if (!scheme_semaphore_fd_set)
4062     return 0;
4063 
4064   rktio_ltps_poll(scheme_rktio, scheme_semaphore_fd_set);
4065 
4066   while (1) {
4067     h = rktio_ltps_get_signaled_handle(scheme_rktio, scheme_semaphore_fd_set);
4068     if (h) {
4069       p = rktio_ltps_handle_get_data(scheme_rktio, h);
4070       free(h);
4071 
4072       sema = *(Scheme_Object **)p;
4073       scheme_free_immobile_box(p);
4074 
4075       scheme_post_sema_all(sema);
4076 
4077       did = 1;
4078     } else
4079       break;
4080   }
4081 
4082   return did;
4083 }
4084 
scheme_check_fd_semaphores(void)4085 void scheme_check_fd_semaphores(void)
4086 {
4087   (void)check_fd_semaphores();
4088 }
4089 
4090 typedef struct {
4091   int running;
4092   double sleep_end;
4093   int block_descriptor;
4094   Scheme_Object *blocker;
4095   Scheme_Ready_Fun block_check;
4096   Scheme_Needs_Wakeup_Fun block_needs_wakeup;
4097   Scheme_Kill_Action_Func private_on_kill;
4098   void *private_kill_data;
4099   void **private_kill_next;
4100 } Thread_Schedule_State_Record;
4101 
save_thread_schedule_state(Scheme_Thread * p,Thread_Schedule_State_Record * s,int save_kills)4102 static void save_thread_schedule_state(Scheme_Thread *p,
4103 				       Thread_Schedule_State_Record *s,
4104 				       int save_kills)
4105 {
4106   s->running = p->running;
4107   s->sleep_end = p->sleep_end;
4108   s->block_descriptor = p->block_descriptor;
4109   s->blocker = p->blocker;
4110   s->block_check = p->block_check;
4111   s->block_needs_wakeup = p->block_needs_wakeup;
4112 
4113   if (save_kills) {
4114     s->private_on_kill = p->private_on_kill;
4115     s->private_kill_data = p->private_kill_data;
4116     s->private_kill_next = p->private_kill_next;
4117   }
4118 
4119   p->running = MZTHREAD_RUNNING;
4120   p->sleep_end = 0.0;
4121   p->block_descriptor = 0;
4122   p->blocker = NULL;
4123   p->block_check = NULL;
4124   p->block_needs_wakeup = NULL;
4125 }
4126 
restore_thread_schedule_state(Scheme_Thread * p,Thread_Schedule_State_Record * s,int save_kills)4127 static void restore_thread_schedule_state(Scheme_Thread *p,
4128 					  Thread_Schedule_State_Record *s,
4129 					  int save_kills)
4130 {
4131   p->running = s->running;
4132   p->sleep_end = s->sleep_end;
4133   p->block_descriptor = s->block_descriptor;
4134   p->blocker = s->blocker;
4135   p->block_check = s->block_check;
4136   p->block_needs_wakeup = s->block_needs_wakeup;
4137 
4138   if (save_kills) {
4139     p->private_on_kill = s->private_on_kill;
4140     p->private_kill_data = s->private_kill_data;
4141     p->private_kill_next = s->private_kill_next;
4142   }
4143 }
4144 
check_sleep(int need_activity,int sleep_now)4145 static int check_sleep(int need_activity, int sleep_now)
4146 /* Signals should be suspended */
4147 {
4148   Scheme_Thread *p, *p2;
4149   int end_with_act;
4150   void *fds;
4151 
4152   if (scheme_no_stack_overflow)
4153     return 0;
4154 
4155   /* Is everything blocked? */
4156   if (!do_atomic) {
4157     p = scheme_first_thread;
4158     while (p) {
4159       if (!p->nestee
4160           && (p->ran_some || p->block_descriptor == NOT_BLOCKED)
4161           && (p->next || !(p->running & MZTHREAD_USER_SUSPENDED)))
4162 	break;
4163       p = p->next;
4164     }
4165   } else
4166     p = NULL;
4167 
4168   p2 = scheme_first_thread;
4169   while (p2) {
4170     if (p2->ran_some) {
4171       rkio_reset_sleep_backoff(scheme_rktio);
4172       p2->ran_some = 0;
4173     }
4174     p2 = p2->next;
4175   }
4176 
4177   end_with_act = thread_ended_with_activity;
4178   thread_ended_with_activity = 0;
4179 
4180   if (need_activity
4181       && !end_with_act
4182       && (do_atomic
4183 	  || (!p && ((!sleep_now && scheme_wakeup_on_input)
4184 		     || (sleep_now && (scheme_sleep || scheme_place_sleep)))))) {
4185     double max_sleep_time = 0;
4186 
4187     /* Poll from top-level process, and all subprocesses are blocked. */
4188     /* So, everything is blocked pending external input. */
4189     /* Build a list of file descriptors that we're waiting on */
4190     /* and turn off polling. */
4191     if (have_activity)
4192       scheme_active_but_sleeping = 1;
4193     if (have_activity && scheme_notify_multithread)
4194       scheme_notify_multithread(0);
4195 
4196     fds = rktio_make_poll_set(scheme_rktio);
4197 
4198     needs_sleep_cancelled = 0;
4199 
4200     p = scheme_first_thread;
4201     while (p) {
4202       int merge_time = 0;
4203       double p_time;
4204 
4205       if (p->nestee) {
4206 	/* nothing */
4207       } else if (p->block_descriptor == GENERIC_BLOCKED) {
4208         needs_sleep_time_end = -1.0;
4209 	if (p->block_needs_wakeup) {
4210 	  Scheme_Needs_Wakeup_Fun f = p->block_needs_wakeup;
4211 	  Scheme_Object *blocker = p->blocker;
4212 	  Thread_Schedule_State_Record ssr;
4213 	  save_thread_schedule_state(scheme_current_thread, &ssr, 0);
4214 	  f(blocker, fds);
4215 	  restore_thread_schedule_state(scheme_current_thread, &ssr, 0);
4216 	}
4217         p_time = p->sleep_end;
4218 	merge_time = (p_time > 0.0);
4219         if (needs_sleep_time_end > 0.0) {
4220           if (!merge_time || (needs_sleep_time_end < p_time)) {
4221             p_time = needs_sleep_time_end;
4222             merge_time = 1;
4223           }
4224         }
4225       } else if (p->block_descriptor == SLEEP_BLOCKED) {
4226 	merge_time = 1;
4227         p_time = p->sleep_end;
4228       }
4229 
4230       if (merge_time) {
4231 	double d;
4232 	double t;
4233 
4234 	d = (p_time - scheme_get_inexact_milliseconds());
4235 
4236 	t = (d / 1000);
4237 	if (t <= 0) {
4238 	  t = (float)0.00001;
4239 	  needs_sleep_cancelled = 1;
4240 	}
4241 	if (!max_sleep_time || (t < max_sleep_time))
4242 	  max_sleep_time = t;
4243       }
4244       p = p->next;
4245     }
4246 
4247     if (needs_sleep_cancelled) {
4248       rktio_poll_set_forget(scheme_rktio, fds);
4249       return 0;
4250     }
4251 
4252     if (post_system_idle()) {
4253       rktio_poll_set_forget(scheme_rktio, fds);
4254       return 0;
4255     }
4256 
4257     if (sleep_now) {
4258       float mst = (float)max_sleep_time;
4259 
4260       /* Make sure that mst didn't go to infinity: */
4261       if (mst && !((double)mst < (2 * max_sleep_time))) {
4262         mst = 1000000.0;
4263       }
4264 
4265       {
4266         Scheme_Sleep_Proc slp;
4267         if (scheme_place_sleep)
4268           slp = scheme_place_sleep;
4269         else
4270           slp = scheme_sleep;
4271 
4272         slp(mst, fds);
4273       }
4274     } else if (scheme_wakeup_on_input)
4275       scheme_wakeup_on_input(fds);
4276 
4277     rktio_poll_set_forget(scheme_rktio, fds);
4278 
4279     return 1;
4280   }
4281 
4282   return 0;
4283 }
4284 
scheme_set_wakeup_time(void * fds,double end_time)4285 void scheme_set_wakeup_time(void *fds, double end_time)
4286 {
4287   /* should be called only during a needs_wakeup callback */
4288   needs_sleep_time_end = end_time;
4289 }
4290 
scheme_set_place_sleep(Scheme_Sleep_Proc slp)4291 void scheme_set_place_sleep(Scheme_Sleep_Proc slp)
4292 {
4293   scheme_place_sleep = slp;
4294 }
4295 
post_system_idle()4296 static int post_system_idle()
4297 {
4298   return scheme_try_channel_get(scheme_system_idle_channel);
4299 }
4300 
scheme_cancel_sleep()4301 void scheme_cancel_sleep()
4302 {
4303   needs_sleep_cancelled = 1;
4304 }
4305 
scheme_check_threads(void)4306 void scheme_check_threads(void)
4307 {
4308   double start, now;
4309 
4310   start = scheme_get_inexact_milliseconds();
4311 
4312   while (1) {
4313     scheme_current_thread->suspend_break++;
4314     scheme_thread_block((float)0);
4315     --scheme_current_thread->suspend_break;
4316 
4317     if (check_sleep(have_activity, 0))
4318       break;
4319 
4320     now = scheme_get_inexact_milliseconds();
4321     if (((now - start) * 1000) > MZ_THREAD_QUANTUM_USEC)
4322       break;
4323   }
4324 }
4325 
scheme_wake_up(void)4326 void scheme_wake_up(void)
4327 {
4328   scheme_active_but_sleeping = 0;
4329   if (have_activity && scheme_notify_multithread)
4330     scheme_notify_multithread(1);
4331 }
4332 
scheme_out_of_fuel(void)4333 void scheme_out_of_fuel(void)
4334 {
4335   if (scheme_defining_primitives) return;
4336 
4337   scheme_thread_block((float)0);
4338   scheme_current_thread->ran_some = 1;
4339 }
4340 
init_schedule_info(Scheme_Schedule_Info * sinfo,Scheme_Thread * false_pos_ok,int no_redirect,double sleep_end)4341 static void init_schedule_info(Scheme_Schedule_Info *sinfo, Scheme_Thread *false_pos_ok,
4342 			       int no_redirect, double sleep_end)
4343 {
4344   sinfo->false_positive_ok = false_pos_ok;
4345   sinfo->potentially_false_positive = 0;
4346   sinfo->current_syncing = NULL;
4347   sinfo->spin = 0;
4348   sinfo->is_poll = 0;
4349   sinfo->no_redirect = no_redirect;
4350   sinfo->sleep_end = sleep_end;
4351   sinfo->replace_chain = NULL;
4352 }
4353 
scheme_current_break_cell()4354 Scheme_Object *scheme_current_break_cell()
4355 {
4356   return scheme_extract_one_cc_mark(NULL, scheme_break_enabled_key);
4357 }
4358 
can_break_param(Scheme_Thread * p)4359 static int can_break_param(Scheme_Thread *p)
4360 {
4361   if (p == scheme_current_thread) {
4362     Scheme_Object *v;
4363 
4364     v = scheme_extract_one_cc_mark(NULL, scheme_break_enabled_key);
4365 
4366     v = scheme_thread_cell_get(v, p->cell_values);
4367 
4368     return SCHEME_TRUEP(v);
4369   } else
4370     return p->can_break_at_swap;
4371 }
4372 
scheme_can_break(Scheme_Thread * p)4373 int scheme_can_break(Scheme_Thread *p)
4374 {
4375   if (!p->suspend_break && !all_breaks_disabled && !scheme_no_stack_overflow) {
4376     return can_break_param(p);
4377   } else
4378     return 0;
4379 }
4380 
scheme_set_can_break(int on)4381 void scheme_set_can_break(int on)
4382 {
4383   Scheme_Object *v;
4384 
4385   v = scheme_extract_one_cc_mark(NULL, scheme_break_enabled_key);
4386 
4387   scheme_thread_cell_set(v, scheme_current_thread->cell_values,
4388 			 (on ? scheme_true : scheme_false));
4389 
4390   if (SAME_OBJ(v, maybe_recycle_cell))
4391     maybe_recycle_cell = NULL;
4392 }
4393 
scheme_check_break_now(void)4394 void scheme_check_break_now(void)
4395 {
4396   Scheme_Thread *p = scheme_current_thread;
4397 
4398   check_ready_break();
4399 
4400   if (p->external_break && scheme_can_break(p)) {
4401     scheme_thread_block_w_thread(0.0, p);
4402     p->ran_some = 1;
4403   }
4404 }
4405 
check_break_now(int argc,Scheme_Object * args[])4406 static Scheme_Object *check_break_now(int argc, Scheme_Object *args[])
4407 {
4408   scheme_check_break_now();
4409   return scheme_void;
4410 }
4411 
scheme_push_break_enable(Scheme_Cont_Frame_Data * cframe,int on,int post_check)4412 void scheme_push_break_enable(Scheme_Cont_Frame_Data *cframe, int on, int post_check)
4413 {
4414   Scheme_Object *v = NULL;
4415 
4416   if (recycle_cell) {
4417     if (!SCHEME_TRUEP(((Thread_Cell *)recycle_cell)->def_val) == !on) {
4418       v = recycle_cell;
4419       recycle_cell = NULL;
4420     }
4421   }
4422 
4423   if (!v)
4424     v = scheme_make_thread_cell(on ? scheme_true : scheme_false, 1);
4425   scheme_push_continuation_frame(cframe);
4426   scheme_set_cont_mark(scheme_break_enabled_key, v);
4427   if (post_check)
4428     scheme_check_break_now();
4429 
4430   cframe->cache = v;
4431   maybe_recycle_cell = v;
4432   recycle_cc_count = scheme_cont_capture_count;
4433 }
4434 
scheme_pop_break_enable(Scheme_Cont_Frame_Data * cframe,int post_check)4435 void scheme_pop_break_enable(Scheme_Cont_Frame_Data *cframe, int post_check)
4436 {
4437   scheme_pop_continuation_frame(cframe);
4438   if (post_check)
4439     scheme_check_break_now();
4440 
4441   if (cframe->cache == maybe_recycle_cell) {
4442     if (recycle_cc_count == scheme_cont_capture_count) {
4443       recycle_cell = maybe_recycle_cell;
4444     }
4445     maybe_recycle_cell = NULL;
4446   }
4447 }
4448 
raise_user_break(void * data,int argc,Scheme_Object ** volatile argv)4449 static Scheme_Object *raise_user_break(void *data, int argc, Scheme_Object ** volatile argv)
4450 {
4451   /* The main action here is buried in code to free temporary bignum
4452      space on escapes. Aside from a thread kill, this is the only
4453      place where we have to worry about freeing bignum space, because
4454      kill and escape are the only possible actions within a bignum
4455      calculation. It is possible to have nested bignum calculations,
4456      though (if the break handler performs bignum arithmetic), so
4457      that's why we save and restore an old snapshot. */
4458   mz_jmp_buf *savebuf, newbuf;
4459   intptr_t save[4];
4460   int kind;
4461 
4462   kind = SCHEME_INT_VAL((Scheme_Object *)data);
4463 
4464   savebuf = scheme_current_thread->error_buf;
4465   scheme_current_thread->error_buf = &newbuf;
4466   scheme_gmp_tls_snapshot(scheme_current_thread->gmp_tls, save);
4467 
4468   if (!scheme_setjmp(newbuf)) {
4469     /* >>>> This is the main action <<<< */
4470     scheme_raise_exn(kind, argv[0], ((kind == MZEXN_BREAK_TERMINATE)
4471                                      ? "terminate break"
4472                                      : ((kind == MZEXN_BREAK_HANG_UP)
4473                                         ? "hang-up break"
4474                                         : "user break")));
4475     /* will definitely escape (or thread will die) */
4476   } else {
4477     /* As expected, we're escaping. Unless we're continuing, then
4478        reset temporary bignum memory. */
4479     int cont;
4480     cont = SAME_OBJ((Scheme_Object *)scheme_jumping_to_continuation,
4481 		    argv[0]);
4482     scheme_gmp_tls_restore_snapshot(scheme_current_thread->gmp_tls, NULL, save, !cont);
4483     scheme_longjmp(*savebuf, 1);
4484   }
4485 
4486   /* Can't get here */
4487   return NULL;
4488 }
4489 
raise_break(Scheme_Thread * p)4490 static void raise_break(Scheme_Thread *p)
4491 {
4492   Thread_Schedule_State_Record ssr;
4493   Scheme_Object *a[1];
4494   Scheme_Cont_Frame_Data cframe;
4495   int kind;
4496 
4497   kind = p->external_break;
4498   p->external_break = 0;
4499 
4500   if (p->blocker && (p->block_check == (Scheme_Ready_Fun)syncing_ready)) {
4501     /* Get out of lines for channels, etc., before calling a break exn handler.
4502        This is only strictly necessary for `sync/enable-break`, which wants
4503        to provide a sync-or-break guarantee, but we do it always for consistency. */
4504     get_outof_or_into_lines((Syncing *)p->blocker, 1);
4505   }
4506 
4507   save_thread_schedule_state(p, &ssr, 0);
4508   p->ran_some = 1;
4509 
4510   a[0] = scheme_make_closed_prim((Scheme_Closed_Prim *)raise_user_break, scheme_make_integer(kind));
4511 
4512   /* Continuation frame ensures that this doesn't
4513      look like it's in tail position with respect to
4514      an existing escape continuation */
4515   scheme_push_continuation_frame(&cframe);
4516 
4517   scheme_call_ec(1, a);
4518 
4519   scheme_pop_continuation_frame(&cframe);
4520 
4521   /* Continue from break... */
4522   restore_thread_schedule_state(p, &ssr, 0);
4523 
4524   if (p->blocker && (p->block_check == (Scheme_Ready_Fun)syncing_ready)) {
4525     /* Get back into lines for channels, etc. */
4526     get_outof_or_into_lines((Syncing *)p->blocker, 0);
4527   }
4528 }
4529 
escape_to_kill(Scheme_Thread * p)4530 static void escape_to_kill(Scheme_Thread *p)
4531 {
4532   p->cjs.jumping_to_continuation = (Scheme_Object *)p;
4533   p->cjs.alt_full_continuation = NULL;
4534   p->cjs.is_kill = 1;
4535   p->cjs.skip_dws = 1;
4536   scheme_longjmp(*p->error_buf, 1);
4537 }
4538 
exit_or_escape(Scheme_Thread * p)4539 static void exit_or_escape(Scheme_Thread *p)
4540 {
4541   /* Maybe this killed thread is nested: */
4542   if (p->nester) {
4543     if (p->running & MZTHREAD_KILLED)
4544       p->running -= MZTHREAD_KILLED;
4545     escape_to_kill(p);
4546   }
4547 
4548   if (SAME_OBJ(p, scheme_main_thread)) {
4549     /* Hard exit: */
4550     if (scheme_current_place_id)
4551       escape_to_kill(p);
4552 
4553     if (scheme_exit)
4554       scheme_exit(0);
4555 
4556     /* We really have to exit: */
4557     exit(0);
4558   }
4559 
4560   remove_thread(p);
4561   select_thread();
4562 }
4563 
scheme_break_kind_main_thread_at(void * p,int kind)4564 void scheme_break_kind_main_thread_at(void *p, int kind)
4565 /* This function can be called from an interrupt handler.
4566    On some platforms, it will even be called from multiple
4567    OS threads. In the case of multiple threads, there's a
4568    tiny chance that a single Ctl-C will trigger multiple
4569    break exceptions. */
4570   XFORM_SKIP_PROC
4571 {
4572   if (kind > *(volatile short *)p)
4573     *(volatile short *)p = kind;
4574 }
4575 
scheme_break_main_thread_at(void * p)4576 void scheme_break_main_thread_at(void *p)
4577   XFORM_SKIP_PROC
4578 {
4579   scheme_break_kind_main_thread_at(p, MZEXN_BREAK);
4580 }
4581 
scheme_break_main_thread()4582 void scheme_break_main_thread()
4583 /* Calling this function from an arbitrary
4584    thread is dangerous when therad locals are enabled. */
4585 {
4586   scheme_break_main_thread_at((void *)&delayed_break_ready);
4587 }
4588 
scheme_get_main_thread_break_handle()4589 void *scheme_get_main_thread_break_handle()
4590 {
4591   return (void *)&delayed_break_ready;
4592 }
4593 
scheme_set_break_main_target(Scheme_Thread * p)4594 void scheme_set_break_main_target(Scheme_Thread *p)
4595 {
4596   if (!main_break_target_thread) {
4597     REGISTER_SO(main_break_target_thread);
4598   }
4599   main_break_target_thread = p;
4600 }
4601 
check_ready_break()4602 static void check_ready_break()
4603 {
4604 #if defined(MZ_USE_PLACES)
4605   if (!do_atomic)
4606     scheme_place_check_for_interruption();
4607 #endif
4608 
4609   if (delayed_break_ready) {
4610     if (scheme_main_thread) {
4611       int kind = delayed_break_ready;
4612       delayed_break_ready = 0;
4613       scheme_break_kind_thread(main_break_target_thread, kind);
4614     }
4615   }
4616 }
4617 
scheme_break_kind_thread(Scheme_Thread * p,int kind)4618 void scheme_break_kind_thread(Scheme_Thread *p, int kind)
4619 {
4620   if (!p) {
4621     p = scheme_main_thread;
4622     if (!p)
4623       return;
4624   }
4625 
4626   /* Propagate breaks: */
4627   while (p->nestee) {
4628     p = p->nestee;
4629   }
4630 
4631   if (kind > p->external_break)
4632     p->external_break = kind;
4633 
4634   if (p == scheme_current_thread) {
4635     if (scheme_can_break(p)) {
4636       scheme_fuel_counter = 0;
4637       scheme_jit_stack_boundary = (uintptr_t)-1;
4638     }
4639   }
4640   scheme_weak_resume_thread(p);
4641 }
4642 
scheme_break_thread(Scheme_Thread * p)4643 void scheme_break_thread(Scheme_Thread *p)
4644 {
4645   scheme_break_kind_thread(p, MZEXN_BREAK);
4646 }
4647 
call_on_atomic_timeout(int must)4648 static void call_on_atomic_timeout(int must)
4649 {
4650   Scheme_Thread *p = scheme_current_thread;
4651   Thread_Schedule_State_Record ssr;
4652   Scheme_On_Atomic_Timeout_Proc oat;
4653 
4654   /* Save any state that has to do with the thread blocking or
4655      sleeping, in case on_atomic_timeout() runs Racket code. */
4656   save_thread_schedule_state(p, &ssr, 1);
4657 
4658   /* When on_atomic_timeout is thread-local, need a
4659      local variable so that the function call isn't
4660      obscured to xform: */
4661   oat = on_atomic_timeout;
4662   oat(on_atomic_timeout_data, must);
4663 
4664   restore_thread_schedule_state(p, &ssr, 1);
4665 }
4666 
find_next_thread(Scheme_Thread ** return_arg)4667 static void find_next_thread(Scheme_Thread **return_arg) {
4668   Scheme_Thread *next;
4669   Scheme_Thread *p = scheme_current_thread;
4670   Scheme_Object *next_in_set;
4671   Scheme_Thread_Set *t_set;
4672 
4673   double msecs = 0.0;
4674 
4675   /* Find the next process. Skip processes that are definitely
4676      blocked. */
4677 
4678   /* Start from the root */
4679   next_in_set = (Scheme_Object *)scheme_thread_set_top;
4680   t_set = NULL; /* this will get set at the beginning of the loop */
4681 
4682   /* Each thread may or may not be available. If it's not available,
4683      we search thread by thread to find something that is available. */
4684   while (1) {
4685     /* next_in_set is the thread or set to try... */
4686 
4687     /* While it's a set, go down into the set, choosing the next
4688        item after the set's current. For each set, remember where we
4689        started searching for something to run, so we'll know when
4690        we've tried everything in the set. */
4691     while (!SCHEME_THREADP(next_in_set)) {
4692       t_set = (Scheme_Thread_Set *)next_in_set;
4693       next_in_set = get_t_set_next(t_set->current);
4694       if (!next_in_set)
4695         next_in_set = t_set->first;
4696       t_set->current = next_in_set;
4697       t_set->search_start = next_in_set;
4698     }
4699 
4700     /* Now `t_set' is the set we're trying, and `next' will be the
4701        thread to try: */
4702     next = (Scheme_Thread *)next_in_set;
4703 
4704     /* If we get back to the current thread, then
4705        no other thread was ready. */
4706     if (SAME_PTR(next, p)) {
4707       next = NULL;
4708       break;
4709     }
4710 
4711     /* Check whether `next' is ready... */
4712 
4713     if (next->nestee) {
4714       /* Blocked on nestee */
4715     } else if (next->running & MZTHREAD_USER_SUSPENDED) {
4716       if (next->next || (next->running & MZTHREAD_NEED_SUSPEND_CLEANUP)) {
4717         /* If a non-main thread is still in the queue,
4718            it needs to be swapped in so it can clean up
4719            and suspend itself. */
4720         break;
4721       }
4722     } else if (next->running & MZTHREAD_KILLED) {
4723       /* This one has been terminated. */
4724       if ((next->running & MZTHREAD_NEED_KILL_CLEANUP)
4725           || next->nester
4726           || !next->next) {
4727         /* The thread needs to clean up. Swap it in so it can die. */
4728         break;
4729       } else
4730         remove_thread(next);
4731       break;
4732     } else if (next->external_break && scheme_can_break(next)) {
4733       break;
4734     } else {
4735       if (next->block_descriptor == GENERIC_BLOCKED) {
4736         if (next->block_check) {
4737           Scheme_Ready_Fun_FPC f = (Scheme_Ready_Fun_FPC)next->block_check;
4738 	  Scheme_Object *blocker = next->blocker;
4739           Scheme_Schedule_Info sinfo;
4740 	  Thread_Schedule_State_Record ssr;
4741 	  int b;
4742 
4743 	  save_thread_schedule_state(p, &ssr, 0);
4744 
4745           init_schedule_info(&sinfo, next, 1, next->sleep_end);
4746 	  b = f(blocker, &sinfo);
4747 
4748 	  restore_thread_schedule_state(p, &ssr, 0);
4749 
4750           if (b)
4751             break;
4752           next->sleep_end = sinfo.sleep_end;
4753           msecs = 0.0; /* that could have taken a while */
4754         }
4755       } else if (next->block_descriptor == SLEEP_BLOCKED) {
4756         if (!msecs)
4757           msecs = scheme_get_inexact_milliseconds();
4758         if (next->sleep_end <= msecs)
4759           break;
4760       } else
4761         break;
4762     }
4763 
4764     /* Look for the next thread/set in this set */
4765     if (next->t_set_next)
4766       next_in_set = next->t_set_next;
4767     else
4768       next_in_set = t_set->first;
4769 
4770     /* If we run out of things to try in this set,
4771        go up to find the next set. */
4772     if (SAME_OBJ(next_in_set, t_set->search_start)) {
4773       /* Loop to go up past exhausted sets, clearing search_start
4774          from each exhausted set. */
4775       while (1) {
4776         t_set->search_start = NULL;
4777         t_set = t_set->parent;
4778 
4779         if (t_set) {
4780           next_in_set = get_t_set_next(t_set->current);
4781           if (!next_in_set)
4782             next_in_set = t_set->first;
4783 
4784           if (SAME_OBJ(next_in_set, t_set->search_start)) {
4785             t_set->search_start = NULL;
4786             /* continue going up */
4787           } else {
4788             t_set->current = next_in_set;
4789             break;
4790           }
4791         } else
4792           break;
4793       }
4794 
4795       if (!t_set) {
4796         /* We ran out of things to try. If we
4797            start again with the top, we should
4798            land back at p. */
4799         next = NULL;
4800         break;
4801       }
4802     } else {
4803       /* Set current... */
4804       t_set->current = next_in_set;
4805     }
4806     /* As we go back to the top of the loop, we'll check whether
4807        next_in_set is a thread or set, etc. */
4808   }
4809 
4810   p           = NULL;
4811   next_in_set = NULL;
4812   t_set       = NULL;
4813   *return_arg = next;
4814   next        = NULL;
4815 }
4816 
do_thread_block()4817 static Scheme_Object *do_thread_block()
4818 {
4819   Scheme_Thread *p = scheme_current_thread;
4820   float sleep_time = p->sleep_end;
4821   p->sleep_end = 0.0;
4822 
4823   scheme_thread_block(sleep_time);
4824 
4825   return scheme_false;
4826 }
4827 
scheme_thread_block(float sleep_time)4828 void scheme_thread_block(float sleep_time)
4829      /* If we're blocked, `sleep_time' is a max sleep time,
4830 	not a min sleep time. Otherwise, it's a min & max sleep time.
4831 	This proc auto-resets p's blocking info if an escape occurs. */
4832 {
4833   double sleep_end;
4834   Scheme_Thread *next;
4835   Scheme_Thread *p = scheme_current_thread;
4836   int skip_sleep;
4837 
4838   if (p->return_marks_to) /* just in case we get here */
4839     return;
4840 
4841   if (p->running & MZTHREAD_KILLED) {
4842     /* This thread is dead! Give up now. */
4843     if (!do_atomic)
4844       exit_or_escape(p);
4845   }
4846 
4847   if ((p->running & MZTHREAD_USER_SUSPENDED)
4848       && !(p->running & MZTHREAD_NEED_SUSPEND_CLEANUP)) {
4849     /* This thread was suspended. */
4850     scheme_wait_until_suspend_ok();
4851     if (!p->next) {
4852       /* Suspending the main thread... */
4853       select_thread();
4854     } else
4855       scheme_weak_suspend_thread(p);
4856   }
4857 
4858   /* Check scheduled_kills early and often. */
4859   check_scheduled_kills();
4860 
4861   /* Reap zombie processes: */
4862   rktio_reap_processes(scheme_rktio);
4863 
4864   shrink_cust_box_array();
4865 
4866   /* Scheduling queries might involve callbacks through the FFI that put
4867      the runtime system into `scheme_no_stack_overflow` mode. Imitate
4868      the foreign-call entry point with an extra check that we have enough
4869      stack to survive in foreign functions. */
4870   if (!scheme_no_stack_overflow && scheme_is_stack_too_shallow()) {
4871     p->sleep_end = sleep_time; /* an abuse of the `sleep_end` field to
4872                                   pass `sleep_end` along */
4873     (void)scheme_handle_stack_overflow(do_thread_block);
4874     return;
4875   }
4876 
4877   if (scheme_active_but_sleeping)
4878     scheme_wake_up();
4879 
4880   if (sleep_time > 0) {
4881     sleep_end = scheme_get_inexact_milliseconds();
4882     sleep_end += (sleep_time * 1000.0);
4883   } else
4884     sleep_end = 0;
4885 
4886  start_sleep_check:
4887 
4888   check_ready_break();
4889 
4890   if (!p->external_break && !p->next && scheme_check_for_break && scheme_check_for_break())
4891     p->external_break = 1;
4892 
4893   if (p->external_break && !p->suspend_break && scheme_can_break(p)) {
4894     raise_break(p);
4895     goto start_sleep_check;
4896   }
4897 
4898  swap_or_sleep:
4899 
4900   /* Check scheduled_kills early and often. */
4901   check_scheduled_kills();
4902 
4903 #ifdef MZ_USE_FUTURES
4904   if (!do_atomic)
4905     scheme_check_future_work();
4906 #endif
4907 #if defined(MZ_USE_MZRT) && !defined(DONT_USE_FOREIGN) && !defined(MZ_USE_FFIPOLL)
4908   if (!do_atomic)
4909     scheme_check_foreign_work();
4910 #endif
4911 #if defined(MZ_USE_MZRT)
4912   if (!do_atomic)
4913     scheme_check_glib_log_messages();
4914 #endif
4915 
4916   skip_sleep = 0;
4917   if (check_fd_semaphores()) {
4918     /* double check whether a semaphore for this thread woke up: */
4919     if (!do_atomic && (p->block_descriptor == GENERIC_BLOCKED)) {
4920       if (p->block_check) {
4921         Scheme_Ready_Fun_FPC f = (Scheme_Ready_Fun_FPC)p->block_check;
4922 	Scheme_Object *blocker = p->blocker;
4923         Scheme_Schedule_Info sinfo;
4924 	Thread_Schedule_State_Record ssr;
4925 	int b;
4926 
4927 	save_thread_schedule_state(p, &ssr, 0);
4928 
4929         init_schedule_info(&sinfo, p, 1, sleep_end);
4930 	b = f(blocker, &sinfo);
4931 
4932 	restore_thread_schedule_state(p, &ssr, 0);
4933 
4934         if (b) {
4935           sleep_end = 0;
4936           skip_sleep = 1;
4937         } else {
4938           sleep_end = sinfo.sleep_end;
4939         }
4940       }
4941     }
4942   }
4943 
4944   if (!do_atomic && (sleep_end >= 0.0)) {
4945     find_next_thread(&next);
4946   } else
4947     next = NULL;
4948 
4949   if (next) {
4950     /* Clear out search_start fields */
4951     Scheme_Thread_Set *t_set;
4952     t_set = next->t_set_parent;
4953     while (t_set) {
4954       t_set->search_start = NULL;
4955       t_set = t_set->parent;
4956     }
4957     t_set = NULL;
4958   }
4959 
4960   if ((sleep_end > 0.0) && (p->block_descriptor == NOT_BLOCKED)) {
4961     p->block_descriptor = SLEEP_BLOCKED;
4962     p->sleep_end = sleep_end;
4963   } else if ((sleep_end > 0.0) && (p->block_descriptor == GENERIC_BLOCKED)) {
4964     p->sleep_end = sleep_end;
4965   }
4966 
4967   if (next && (!next->running || (next->running & MZTHREAD_SUSPENDED))) {
4968     /* In the process of selecting another thread, it was suspended or
4969        removed. Very unusual, but possible if a block checker does
4970        strange things??? */
4971     next = NULL;
4972   }
4973 
4974 #if 0
4975   /* Debugging: next must be in the chain of processes */
4976   if (next) {
4977     Scheme_Thread *p = scheme_first_thread;
4978     while (p != next) {
4979       p = p->next;
4980       if (!p) {
4981 	printf("error: tried to switch to bad thread\n");
4982 	exit(1);
4983       }
4984     }
4985   }
4986 #endif
4987 
4988   /*####################################*/
4989   /* THREAD CONTEXT SWITCH HAPPENS HERE */
4990   /*####################################*/
4991 
4992   if (next) {
4993     /* Swap in `next', but first clear references to other threads. */
4994     swap_target = next;
4995     next = NULL;
4996     do_swap_thread();
4997   } else if (do_atomic && on_atomic_timeout
4998              && (atomic_timeout_auto_suspend < 2)) {
4999     if (!atomic_timeout_auto_suspend
5000         || (do_atomic <= atomic_timeout_atomic_level)) {
5001       if (atomic_timeout_auto_suspend) {
5002         atomic_timeout_auto_suspend++;
5003         scheme_fuel_counter = p->engine_weight;
5004         scheme_jit_stack_boundary = scheme_stack_boundary;
5005       }
5006       call_on_atomic_timeout(0);
5007       if (atomic_timeout_auto_suspend > 1)
5008         --atomic_timeout_auto_suspend;
5009     }
5010   } else {
5011     /* If all processes are blocked, check for total process sleeping: */
5012     if ((p->block_descriptor != NOT_BLOCKED) && !skip_sleep) {
5013       check_sleep(1, 1);
5014     }
5015   }
5016 
5017   if (p->block_descriptor == SLEEP_BLOCKED) {
5018     p->block_descriptor = NOT_BLOCKED;
5019   }
5020   p->sleep_end = 0.0;
5021 
5022   /* Killed while I was asleep? */
5023   if (p->running & MZTHREAD_KILLED) {
5024     /* This thread is dead! Give up now. */
5025     if (p->running & MZTHREAD_NEED_KILL_CLEANUP) {
5026       /* The thread needs to clean up. It will block immediately to die. */
5027       return;
5028     } else {
5029       if (!do_atomic)
5030 	exit_or_escape(p);
5031     }
5032   }
5033 
5034   /* Suspended while I was asleep? */
5035   if ((p->running & MZTHREAD_USER_SUSPENDED)
5036       && !(p->running & MZTHREAD_NEED_SUSPEND_CLEANUP)) {
5037     scheme_wait_until_suspend_ok();
5038     if (!p->next)
5039       scheme_thread_block(0.0); /* main thread handled at top of this function */
5040     else
5041       scheme_weak_suspend_thread(p);
5042   }
5043 
5044   /* Check for external break again after swap or sleep */
5045   check_ready_break();
5046   if (p->external_break && !p->suspend_break && scheme_can_break(p)) {
5047     raise_break(p);
5048   }
5049 
5050   /* Check for major GC request from master GC */
5051 #if defined(MZ_PRECISE_GC) && defined(MZ_USE_PLACES)
5052   if (!do_atomic)
5053     GC_check_master_gc_request();
5054 #endif
5055 
5056   /* Propagate memory-use information and check for custodian-based
5057      GC triggers due to child place memory use: */
5058 #if defined(MZ_PRECISE_GC) && defined(MZ_USE_PLACES)
5059   scheme_place_check_memory_use();
5060   check_scheduled_kills();
5061 #endif
5062 
5063   if (sleep_end > 0) {
5064     if (sleep_end > scheme_get_inexact_milliseconds()) {
5065       /* Still have time to sleep if necessary, but make sure we're
5066 	 not ready (because maybe that's why we were swapped back in!) */
5067       if (p->block_descriptor == GENERIC_BLOCKED) {
5068 	if (p->block_check) {
5069 	  Scheme_Ready_Fun_FPC f = (Scheme_Ready_Fun_FPC)p->block_check;
5070 	  Scheme_Object *blocker = p->blocker;
5071 	  Scheme_Schedule_Info sinfo;
5072 	  Thread_Schedule_State_Record ssr;
5073 	  int b;
5074 
5075 	  save_thread_schedule_state(p, &ssr, 0);
5076 
5077 	  init_schedule_info(&sinfo, p, 1, sleep_end);
5078 	  b = f(blocker, &sinfo);
5079 
5080 	  restore_thread_schedule_state(p, &ssr, 0);
5081 
5082 	  if (b) {
5083 	    sleep_end = 0;
5084 	  } else {
5085 	    sleep_end = sinfo.sleep_end;
5086 	  }
5087 	}
5088       }
5089 
5090       if (sleep_end > 0)
5091 	goto swap_or_sleep;
5092     }
5093   }
5094 
5095   if (do_atomic)
5096     missed_context_switch = 1;
5097 
5098   scheme_fuel_counter = p->engine_weight;
5099   scheme_jit_stack_boundary = scheme_stack_boundary;
5100 
5101   scheme_kickoff_green_thread_time_slice_timer(MZ_THREAD_QUANTUM_USEC);
5102 
5103   /* Check scheduled_kills early and often. */
5104   check_scheduled_kills();
5105 }
5106 
scheme_making_progress()5107 void scheme_making_progress()
5108 {
5109   scheme_current_thread->ran_some = 1;
5110 }
5111 
scheme_block_until(Scheme_Ready_Fun _f,Scheme_Needs_Wakeup_Fun fdf,Scheme_Object * data,float delay)5112 int scheme_block_until(Scheme_Ready_Fun _f, Scheme_Needs_Wakeup_Fun fdf,
5113 		       Scheme_Object *data, float delay)
5114 {
5115   int result;
5116   Scheme_Thread *p = scheme_current_thread;
5117   Scheme_Ready_Fun_FPC f = (Scheme_Ready_Fun_FPC)_f;
5118   Scheme_Schedule_Info sinfo;
5119   double sleep_end;
5120 
5121   if (!delay)
5122     sleep_end = 0.0;
5123   else {
5124     sleep_end = scheme_get_inexact_milliseconds();
5125     sleep_end += (delay * 1000.0);
5126   }
5127 
5128   /* We make an sinfo to be polite, but we also assume
5129      that f will not generate any redirections! */
5130   init_schedule_info(&sinfo, NULL, 1, sleep_end);
5131 
5132   while (!(result = f((Scheme_Object *)data, &sinfo))) {
5133     double now_sleep_end = sinfo.sleep_end;
5134     if (sinfo.spin) {
5135       init_schedule_info(&sinfo, NULL, 1, 0.0);
5136       scheme_thread_block(0.0);
5137       scheme_current_thread->ran_some = 1;
5138     } else {
5139       if (now_sleep_end) {
5140 	delay = (float)(now_sleep_end - scheme_get_inexact_milliseconds());
5141 	delay /= 1000.0;
5142 	if (delay <= 0)
5143 	  delay = (float)0.00001;
5144       } else
5145 	delay = 0.0;
5146 
5147       p->block_descriptor = GENERIC_BLOCKED;
5148       p->blocker = (Scheme_Object *)data;
5149       p->block_check = (Scheme_Ready_Fun)f;
5150       p->block_needs_wakeup = fdf;
5151 
5152       scheme_thread_block(delay);
5153 
5154       p->block_descriptor = NOT_BLOCKED;
5155       p->blocker = NULL;
5156       p->block_check = NULL;
5157       p->block_needs_wakeup = NULL;
5158     }
5159     sinfo.sleep_end = sleep_end;
5160   }
5161   p->ran_some = 1;
5162 
5163   return result;
5164 }
5165 
scheme_block_until_enable_break(Scheme_Ready_Fun _f,Scheme_Needs_Wakeup_Fun fdf,Scheme_Object * data,float delay,int enable_break)5166 int scheme_block_until_enable_break(Scheme_Ready_Fun _f, Scheme_Needs_Wakeup_Fun fdf,
5167 				    Scheme_Object *data, float delay, int enable_break)
5168 {
5169   if (enable_break) {
5170     int v;
5171     Scheme_Cont_Frame_Data cframe;
5172 
5173     scheme_push_break_enable(&cframe, 1, 1);
5174     v = scheme_block_until(_f, fdf, data, delay);
5175     scheme_pop_break_enable(&cframe, 0);
5176 
5177     return v;
5178   } else
5179     return scheme_block_until(_f, fdf, data, delay);
5180 }
5181 
ready_unless(Scheme_Object * o)5182 static int ready_unless(Scheme_Object *o)
5183 {
5184   Scheme_Object *data;
5185   Scheme_Ready_Fun f;
5186 
5187   data = (Scheme_Object *)((void **)o)[0];
5188   f = (Scheme_Ready_Fun)((void **)o)[2];
5189 
5190   return f(data);
5191 }
5192 
needs_wakeup_unless(Scheme_Object * o,void * fds)5193 static void needs_wakeup_unless(Scheme_Object *o, void *fds)
5194 {
5195   Scheme_Object *data;
5196   Scheme_Needs_Wakeup_Fun fdf;
5197 
5198   data = (Scheme_Object *)((void **)o)[0];
5199   fdf = (Scheme_Needs_Wakeup_Fun)((void **)o)[3];
5200 
5201   fdf(data, fds);
5202 }
5203 
5204 
scheme_block_until_unless(Scheme_Ready_Fun f,Scheme_Needs_Wakeup_Fun fdf,Scheme_Object * data,float delay,Scheme_Object * unless,int enable_break)5205 int scheme_block_until_unless(Scheme_Ready_Fun f, Scheme_Needs_Wakeup_Fun fdf,
5206 			      Scheme_Object *data, float delay,
5207 			      Scheme_Object *unless,
5208 			      int enable_break)
5209 {
5210   if (unless) {
5211     void **a;
5212     a = MALLOC_N(void *, 4);
5213     a[0] = data;
5214     a[1] = unless;
5215     a[2] = f;
5216     a[3] = fdf;
5217 
5218     data = (Scheme_Object *) mzALIAS a;
5219     f = ready_unless;
5220     if (fdf)
5221       fdf = needs_wakeup_unless;
5222   }
5223 
5224   return scheme_block_until_enable_break(f, fdf, data, delay, enable_break);
5225 }
5226 
scheme_thread_block_enable_break(float sleep_time,int enable_break)5227 void scheme_thread_block_enable_break(float sleep_time, int enable_break)
5228 {
5229   if (enable_break) {
5230     Scheme_Cont_Frame_Data cframe;
5231 
5232     scheme_push_break_enable(&cframe, 1, 1);
5233     scheme_thread_block(sleep_time);
5234     scheme_pop_break_enable(&cframe, 0);
5235   } else
5236     scheme_thread_block(sleep_time);
5237 }
5238 
scheme_is_atomic(void)5239 int scheme_is_atomic(void)
5240 {
5241   return !!do_atomic;
5242 }
5243 
scheme_start_atomic(void)5244 void scheme_start_atomic(void)
5245 {
5246   do_atomic++;
5247 }
5248 
scheme_start_atomic_no_break(void)5249 void scheme_start_atomic_no_break(void)
5250 {
5251   scheme_start_atomic();
5252   all_breaks_disabled++;
5253 }
5254 
scheme_end_atomic_no_swap(void)5255 void scheme_end_atomic_no_swap(void)
5256 {
5257   int v = --do_atomic;
5258   if (v < 0) {
5259     scheme_log_abort("unbalanced end-atomic");
5260     abort();
5261   }
5262 }
5263 
scheme_start_in_scheduler(void)5264 void scheme_start_in_scheduler(void)
5265 {
5266   do_atomic++;
5267   scheme_no_stack_overflow++;
5268 }
5269 
scheme_end_in_scheduler(void)5270 void scheme_end_in_scheduler(void)
5271 {
5272   int v = --do_atomic;
5273   --scheme_no_stack_overflow;
5274   if (v < 0) {
5275     scheme_log_abort("unbalanced end-atomic");
5276     abort();
5277   }
5278 }
5279 
scheme_end_atomic(void)5280 void scheme_end_atomic(void)
5281 {
5282   scheme_end_atomic_no_swap();
5283   if (!do_atomic && missed_context_switch) {
5284     missed_context_switch = 0;
5285     scheme_thread_block(0.0);
5286     scheme_current_thread->ran_some = 1;
5287   }
5288 }
5289 
scheme_end_atomic_can_break(void)5290 void scheme_end_atomic_can_break(void)
5291 {
5292   --all_breaks_disabled;
5293   scheme_end_atomic();
5294   if (!all_breaks_disabled)
5295     scheme_check_break_now();
5296 }
5297 
scheme_wait_until_suspend_ok(void)5298 int scheme_wait_until_suspend_ok(void)
5299 {
5300   int did = 0;
5301 
5302   if (on_atomic_timeout) {
5303     /* new-style atomic timeout */
5304     if (do_atomic > atomic_timeout_atomic_level) {
5305       scheme_log_abort("attempted to wait for suspend in nested atomic mode");
5306       abort();
5307     }
5308   }
5309 
5310   while (do_atomic && on_atomic_timeout) {
5311     did = 1;
5312     if (atomic_timeout_auto_suspend)
5313       atomic_timeout_auto_suspend++;
5314     call_on_atomic_timeout(1);
5315     if (atomic_timeout_auto_suspend > 1)
5316       --atomic_timeout_auto_suspend;
5317   }
5318 
5319   if (do_atomic) {
5320     scheme_log_abort("about to suspend in atomic mode");
5321     abort();
5322   }
5323 
5324   return did;
5325 }
5326 
scheme_set_on_atomic_timeout(Scheme_On_Atomic_Timeout_Proc p,void * data)5327 Scheme_On_Atomic_Timeout_Proc scheme_set_on_atomic_timeout(Scheme_On_Atomic_Timeout_Proc p, void *data)
5328 {
5329   Scheme_On_Atomic_Timeout_Proc old;
5330 
5331   old = on_atomic_timeout;
5332   on_atomic_timeout = p;
5333   on_atomic_timeout_data = data;
5334   if (p) {
5335     atomic_timeout_auto_suspend = 1;
5336     atomic_timeout_atomic_level = do_atomic;
5337   } else {
5338     atomic_timeout_auto_suspend = 0;
5339   }
5340 
5341   return old;
5342 }
5343 
call_timeout_callback(void * data,int must_give_up)5344 static void call_timeout_callback(void *data, int must_give_up)
5345 {
5346   Scheme_Object *a[1];
5347   a[0] = (must_give_up ? scheme_true : scheme_false);
5348 
5349   scheme_start_in_scheduler();
5350   _scheme_apply_multi((Scheme_Object *)data, 1, a);
5351   scheme_end_in_scheduler();
5352 }
5353 
unsafe_set_on_atomic_timeout(int argc,Scheme_Object * argv[])5354 static Scheme_Object *unsafe_set_on_atomic_timeout(int argc, Scheme_Object *argv[])
5355 {
5356   Scheme_On_Atomic_Timeout_Proc r;
5357 
5358   if (SCHEME_FALSEP(argv[0]))
5359     r = scheme_set_on_atomic_timeout(NULL, NULL);
5360   else
5361     r = scheme_set_on_atomic_timeout(call_timeout_callback, argv[0]);
5362 
5363   return (r ? scheme_true : scheme_false);
5364 }
5365 
unsafe_start_atomic(int argc,Scheme_Object ** argv)5366 static Scheme_Object *unsafe_start_atomic(int argc, Scheme_Object **argv)
5367 {
5368   scheme_start_atomic_no_break();
5369   return scheme_void;
5370 }
5371 
unsafe_end_atomic(int argc,Scheme_Object ** argv)5372 static Scheme_Object *unsafe_end_atomic(int argc, Scheme_Object **argv)
5373 {
5374   scheme_end_atomic_can_break();
5375   return scheme_void;
5376 }
5377 
unsafe_start_breakable_atomic(int argc,Scheme_Object ** argv)5378 static Scheme_Object *unsafe_start_breakable_atomic(int argc, Scheme_Object **argv)
5379 {
5380   scheme_start_atomic();
5381   return scheme_void;
5382 }
5383 
unsafe_end_breakable_atomic(int argc,Scheme_Object ** argv)5384 static Scheme_Object *unsafe_end_breakable_atomic(int argc, Scheme_Object **argv)
5385 {
5386   scheme_end_atomic();
5387   return scheme_void;
5388 }
5389 
unsafe_in_atomic_p(int argc,Scheme_Object ** argv)5390 static Scheme_Object *unsafe_in_atomic_p(int argc, Scheme_Object **argv)
5391 {
5392   return (scheme_is_atomic() ? scheme_true : scheme_false);
5393 }
5394 
5395 
scheme_weak_suspend_thread(Scheme_Thread * r)5396 void scheme_weak_suspend_thread(Scheme_Thread *r)
5397 {
5398   if (r->running & MZTHREAD_SUSPENDED)
5399     return;
5400 
5401   if (r->prev) {
5402     r->prev->next = r->next;
5403     r->next->prev = r->prev;
5404   } else {
5405     r->next->prev = NULL;
5406     scheme_first_thread = r->next;
5407   }
5408 
5409   r->next = r->prev = NULL;
5410   unschedule_in_set((Scheme_Object *)r, r->t_set_parent);
5411 
5412   r->running |= MZTHREAD_SUSPENDED;
5413 
5414   scheme_prepare_this_thread_for_GC(r);
5415 
5416   if (r == scheme_current_thread) {
5417     select_thread();
5418 
5419     /* Killed while suspended? */
5420     if ((r->running & MZTHREAD_KILLED) && !(r->running & MZTHREAD_NEED_KILL_CLEANUP))
5421       scheme_thread_block(0);
5422   }
5423 }
5424 
scheme_weak_resume_thread(Scheme_Thread * r)5425 void scheme_weak_resume_thread(Scheme_Thread *r)
5426      /* This function can be called from an interrupt handler, but
5427 	only for the main thread, which is never suspended. */
5428 {
5429   if (!(r->running & MZTHREAD_USER_SUSPENDED)) {
5430     if (r->running & MZTHREAD_SUSPENDED) {
5431       r->running -= MZTHREAD_SUSPENDED;
5432       r->next = scheme_first_thread;
5433       r->prev = NULL;
5434       scheme_first_thread = r;
5435       r->next->prev = r;
5436       r->ran_some = 1;
5437       schedule_in_set((Scheme_Object *)r, r->t_set_parent);
5438       check_tail_buffer_size(r);
5439     }
5440   }
5441 }
5442 
scheme_about_to_move_C_stack(void)5443 void scheme_about_to_move_C_stack(void)
5444 {
5445 }
5446 
5447 static Scheme_Object *
sch_sleep(int argc,Scheme_Object * args[])5448 sch_sleep(int argc, Scheme_Object *args[])
5449 {
5450   float t;
5451 
5452   if (argc && !SCHEME_REALP(args[0]))
5453     scheme_wrong_contract("sleep", "(>=/c 0.0)", 0, argc, args);
5454 
5455   if (argc) {
5456     t = (float)scheme_real_to_double(args[0]);
5457     if (t < 0)
5458       scheme_wrong_contract("sleep", "(>=/c 0.0)", 0, argc, args);
5459   } else
5460     t = 0;
5461 
5462   scheme_thread_block(t);
5463   scheme_current_thread->ran_some = 1;
5464 
5465   return scheme_void;
5466 }
5467 
unsafe_poll_fd(int argc,Scheme_Object ** argv)5468 Scheme_Object *unsafe_poll_fd(int argc, Scheme_Object **argv)
5469 {
5470   intptr_t sfd = 0;
5471   rktio_fd_t *rfd = NULL;
5472   int mode = 0;
5473   int ready = 0;
5474   int is_socket = 1;
5475 
5476   if (!scheme_get_int_val(argv[0], &sfd))
5477     scheme_wrong_contract("unsafe-poll-fd", "handle-integer?", 0, argc, argv);
5478 
5479   if (SAME_OBJ(argv[1], read_symbol))
5480     mode = RKTIO_POLL_READ;
5481   else if (SAME_OBJ(argv[1], write_symbol))
5482     mode = RKTIO_POLL_WRITE;
5483   else
5484     scheme_wrong_contract("unsafe-poll-fd", "(or/c 'read 'write)", 1, argc, argv);
5485 
5486   if (argc > 2) {
5487     is_socket = SCHEME_TRUEP(argv[2]);
5488   }
5489 
5490   rfd = rktio_system_fd(scheme_rktio, sfd, (is_socket ? RKTIO_OPEN_SOCKET : 0));
5491 
5492   if (mode == RKTIO_POLL_READ)
5493     ready = rktio_poll_read_ready(scheme_rktio, rfd);
5494   else if (mode == RKTIO_POLL_WRITE)
5495     ready = rktio_poll_write_ready(scheme_rktio, rfd);
5496 
5497   rktio_forget(scheme_rktio, rfd);
5498   return (ready == RKTIO_POLL_READY) ? scheme_true : scheme_false;
5499 }
5500 
unsafe_poll_ctx_fd_wakeup(int argc,Scheme_Object ** argv)5501 Scheme_Object *unsafe_poll_ctx_fd_wakeup(int argc, Scheme_Object **argv)
5502 {
5503   if (SCHEME_TRUEP(argv[0])) {
5504     void *fds = SCHEME_CPTR_VAL(argv[0]);
5505     intptr_t fd;
5506     int m;
5507 
5508     if (SCHEME_INTP(argv[1]))
5509       fd = SCHEME_INT_VAL(argv[1]);
5510     else
5511       fd = rktio_fd_system_fd(scheme_rktio, (rktio_fd_t *)SCHEME_CPTR_VAL(argv[1]));
5512 
5513     if (SAME_OBJ(argv[2], read_symbol))
5514       m = 0;
5515     else if (SAME_OBJ(argv[2], write_symbol))
5516       m = 1;
5517     else
5518       m = 2;
5519 
5520     scheme_fdset(scheme_get_fdset(fds, m), fd);
5521   }
5522 
5523   return scheme_void;
5524 }
5525 
unsafe_poll_ctx_eventmask_wakeup(int argc,Scheme_Object ** argv)5526 Scheme_Object *unsafe_poll_ctx_eventmask_wakeup(int argc, Scheme_Object **argv)
5527 {
5528   if (SCHEME_TRUEP(argv[0])) {
5529     void *fds = SCHEME_CPTR_VAL(argv[0]);
5530     intptr_t mask = SCHEME_INT_VAL(argv[1]);
5531 
5532     scheme_add_fd_eventmask(fds, mask);
5533   }
5534 
5535   return scheme_void;
5536 }
5537 
unsafe_poll_ctx_time_wakeup(int argc,Scheme_Object ** argv)5538 Scheme_Object *unsafe_poll_ctx_time_wakeup(int argc, Scheme_Object **argv)
5539 {
5540   if (SCHEME_TRUEP(argv[0])) {
5541     void *fds = SCHEME_CPTR_VAL(argv[0]);
5542     double msecs = SCHEME_DBL_VAL(argv[1]);
5543 
5544     scheme_set_wakeup_time(fds, msecs);
5545   }
5546 
5547   return scheme_void;
5548 }
5549 
unsafe_signal_received(int argc,Scheme_Object ** argv)5550 Scheme_Object *unsafe_signal_received(int argc, Scheme_Object **argv)
5551 {
5552   scheme_signal_received();
5553   return scheme_void;
5554 }
5555 
do_signal_received(int argc,Scheme_Object ** argv,Scheme_Object * self)5556 static Scheme_Object *do_signal_received(int argc, Scheme_Object **argv, Scheme_Object *self)
5557 {
5558   void *h = SCHEME_PRIM_CLOSURE_ELS(self)[0];
5559   scheme_signal_received_at(h);
5560   return scheme_void;
5561 }
5562 
unsafe_make_signal_received(int argc,Scheme_Object ** argv)5563 Scheme_Object *unsafe_make_signal_received(int argc, Scheme_Object **argv)
5564 {
5565   void *h;
5566   Scheme_Object *a[1];
5567 
5568   h = scheme_get_signal_handle();
5569   a[0] = (Scheme_Object *)h;
5570   return scheme_make_prim_closure_w_arity(do_signal_received, 1, a,
5571 					  "unsafe-signal-received", 0, 0);
5572 }
5573 
sleep_via_thread(float seconds,void * fds)5574 static void sleep_via_thread(float seconds, void *fds)
5575 {
5576 #ifdef OS_X
5577   scheme_start_sleeper_thread(scheme_sleep, seconds, fds, thread_sleep_callback_fd);
5578   scheme_start_in_scheduler();
5579   _scheme_apply_multi(thread_sleep_callback, 0, NULL);
5580   scheme_end_in_scheduler();
5581   scheme_end_sleeper_thread();
5582 #endif
5583 }
5584 
unsafe_set_sleep_in_thread(int argc,Scheme_Object ** argv)5585 Scheme_Object *unsafe_set_sleep_in_thread(int argc, Scheme_Object **argv)
5586 {
5587   if (!thread_sleep_callback)
5588     REGISTER_SO(thread_sleep_callback);
5589 
5590   thread_sleep_callback = argv[0];
5591   if (SCHEME_INTP(argv[1]))
5592     thread_sleep_callback_fd = SCHEME_INT_VAL(argv[1]);
5593   else
5594     thread_sleep_callback_fd = rktio_fd_system_fd(scheme_rktio, (rktio_fd_t *)SCHEME_CPTR_VAL(argv[1]));
5595 
5596   scheme_place_sleep = sleep_via_thread;
5597 
5598   return scheme_void;
5599 }
5600 
break_thread(int argc,Scheme_Object * args[])5601 static Scheme_Object *break_thread(int argc, Scheme_Object *args[])
5602 {
5603   Scheme_Thread *p;
5604   int kind = MZEXN_BREAK;
5605 
5606   if (!SAME_TYPE(SCHEME_TYPE(args[0]), scheme_thread_type))
5607     scheme_wrong_contract("break-thread", "thread?", 0, argc, args);
5608 
5609   if ((argc > 1) && SCHEME_TRUEP(args[1])) {
5610     if (SCHEME_SYMBOLP(args[1])
5611         && !SCHEME_SYM_WEIRDP(args[1])
5612         && !strcmp(SCHEME_SYM_VAL(args[1]), "hang-up"))
5613       kind = MZEXN_BREAK_HANG_UP;
5614     else if (SCHEME_SYMBOLP(args[1])
5615              && !SCHEME_SYM_WEIRDP(args[1])
5616              && !strcmp(SCHEME_SYM_VAL(args[1]), "terminate"))
5617       kind = MZEXN_BREAK_TERMINATE;
5618     else
5619       scheme_wrong_contract("break-thread", "(or/c #f 'hang-up 'terminate)", 1, argc, args);
5620   }
5621 
5622   p = (Scheme_Thread *)args[0];
5623 
5624   scheme_break_kind_thread(p, kind);
5625 
5626   scheme_check_break_now();
5627 
5628   return scheme_void;
5629 }
5630 
do_kill_thread(Scheme_Thread * p)5631 static int do_kill_thread(Scheme_Thread *p)
5632 {
5633   int kill_self = 0;
5634 
5635   if (!MZTHREAD_STILL_RUNNING(p->running)) {
5636     return 0;
5637   }
5638 
5639   if (p->suspend_to_kill) {
5640     if (p == scheme_current_thread)
5641       return 1; /* suspend in caller */
5642     suspend_thread(p);
5643     return 0;
5644   }
5645 
5646   if (p->nestee)
5647     scheme_break_thread(p->nestee);
5648 
5649   while (p->private_on_kill) {
5650     p->private_on_kill(p->private_kill_data);
5651     if (p->private_kill_next) {
5652       p->private_on_kill = (Scheme_Kill_Action_Func)p->private_kill_next[0];
5653       p->private_kill_data = p->private_kill_next[1];
5654       p->private_kill_next = (void **)p->private_kill_next[2];
5655     } else {
5656       p->private_on_kill = NULL;
5657       p->private_kill_data = NULL;
5658     }
5659   }
5660 
5661   if (p->on_kill)
5662     p->on_kill(p);
5663 
5664   scheme_remove_managed(p->mref, (Scheme_Object *)p->mr_hop);
5665   {
5666     Scheme_Object *l;
5667     for (l = p->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
5668       scheme_remove_managed((Scheme_Custodian_Reference *)SCHEME_CAR(l),
5669 			    (Scheme_Object *)p->mr_hop);
5670     }
5671   }
5672 
5673   if (p->running) {
5674     if (p->running & MZTHREAD_USER_SUSPENDED) {
5675       /* end user suspension, because we need to kill the thread */
5676       p->running -= MZTHREAD_USER_SUSPENDED;
5677     }
5678 
5679     p->running |= MZTHREAD_KILLED;
5680     if ((p->running & MZTHREAD_NEED_KILL_CLEANUP)
5681 	|| p->nester)
5682       scheme_weak_resume_thread(p);
5683     else if (p != scheme_current_thread) {
5684       /* Do kill stuff... */
5685       if (p->next)
5686 	remove_thread(p);
5687     }
5688   }
5689   if (p == scheme_current_thread)
5690     kill_self = 1;
5691 
5692   return kill_self;
5693 }
5694 
scheme_kill_thread(Scheme_Thread * p)5695 void scheme_kill_thread(Scheme_Thread *p)
5696 {
5697   if (do_kill_thread(p)) {
5698     /* Suspend/kill self: */
5699     scheme_wait_until_suspend_ok();
5700     if (p->suspend_to_kill)
5701       suspend_thread(p);
5702     else
5703       scheme_thread_block(0.0);
5704   }
5705 
5706   /* Give killed threads time to die: */
5707   scheme_thread_block(0.0);
5708   scheme_current_thread->ran_some = 1;
5709 }
5710 
kill_thread(int argc,Scheme_Object * argv[])5711 static Scheme_Object *kill_thread(int argc, Scheme_Object *argv[])
5712 {
5713   Scheme_Thread *p = (Scheme_Thread *)argv[0];
5714 
5715   if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_type))
5716     scheme_wrong_contract("kill-thread", "thread?", 0, argc, argv);
5717 
5718   if (!MZTHREAD_STILL_RUNNING(p->running))
5719     return scheme_void;
5720 
5721   check_current_custodian_allows("kill-thread", p);
5722 
5723   scheme_kill_thread(p);
5724 
5725   return scheme_void;
5726 }
5727 
scheme_push_kill_action(Scheme_Kill_Action_Func f,void * d)5728 void scheme_push_kill_action(Scheme_Kill_Action_Func f, void *d)
5729 {
5730   Scheme_Thread *p = scheme_current_thread;
5731 
5732   if (p->private_on_kill) {
5733     /* Pretty unlikely that these get nested. An exception handler
5734        would have to block on and within operations that need special
5735        kill handling. But it could happen. */
5736     void **next;
5737     next = MALLOC_N(void *, 3);
5738     next[0] = (void *)p->private_on_kill;
5739     next[1] = p->private_kill_data;
5740     next[2] = (void *)p->private_kill_next;
5741     p->private_kill_next = next;
5742   }
5743 
5744   p->private_on_kill = f;
5745   p->private_kill_data = d;
5746 }
5747 
scheme_pop_kill_action()5748 void scheme_pop_kill_action()
5749 {
5750   Scheme_Thread *p = scheme_current_thread;
5751 
5752   if (p->private_kill_next) {
5753     p->private_on_kill = (Scheme_Kill_Action_Func)p->private_kill_next[0];
5754     p->private_kill_data = p->private_kill_next[1];
5755     p->private_kill_next = (void **)p->private_kill_next[2];
5756   } else {
5757     p->private_on_kill = NULL;
5758     p->private_kill_data = NULL;
5759   }
5760 }
5761 
5762 /*========================================================================*/
5763 /*                      suspend/resume and evts                          */
5764 /*========================================================================*/
5765 
5766 /* Forward decl: */
5767 static void transitive_resume(Scheme_Object *resumes);
5768 static void transitive_promote(Scheme_Thread *p, Scheme_Custodian *c);
5769 static void promote_thread(Scheme_Thread *p, Scheme_Custodian *to_c);
5770 
thread_suspend(int argc,Scheme_Object * argv[])5771 static Scheme_Object *thread_suspend(int argc, Scheme_Object *argv[])
5772 {
5773   Scheme_Thread *p;
5774 
5775   if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_type))
5776     scheme_wrong_contract("thread-suspend", "thread?", 0, argc, argv);
5777 
5778   p = (Scheme_Thread *)argv[0];
5779 
5780   check_current_custodian_allows("thread-suspend", p);
5781 
5782   suspend_thread(p);
5783 
5784   return scheme_void;
5785 }
5786 
suspend_thread(Scheme_Thread * p)5787 static void suspend_thread(Scheme_Thread *p)
5788 {
5789   int running;
5790 
5791   if (!MZTHREAD_STILL_RUNNING(p->running))
5792     return;
5793 
5794   if (p->running & MZTHREAD_USER_SUSPENDED)
5795     return;
5796 
5797   /* Get running now, just in case the thread is waiting on its own
5798      suspend event (in which case posting to the sema will unsuspend
5799      the thread) */
5800   running = p->running;
5801 
5802   p->resumed_box = NULL;
5803   if (p->suspended_box) {
5804     SCHEME_PTR2_VAL(p->suspended_box) = (Scheme_Object *)p;
5805     scheme_post_sema_all(SCHEME_PTR1_VAL(p->suspended_box));
5806   }
5807   if (p->running_box && (!(p->running & MZTHREAD_SUSPENDED))) {
5808     /* Make transitive-resume link strong, instead of weak: */
5809     SCHEME_PTR_VAL(p->running_box) = (Scheme_Object *)p;
5810   }
5811 
5812   if (SAME_OBJ(p, scheme_main_thread)) {
5813     /* p is the main thread, which we're not allowed to
5814        suspend in the normal way. */
5815     p->running |= MZTHREAD_USER_SUSPENDED;
5816     scheme_main_was_once_suspended = 1;
5817     if (p == scheme_current_thread) {
5818       scheme_thread_block(0.0);
5819       p->ran_some = 1;
5820     }
5821   } else if ((running & (MZTHREAD_NEED_KILL_CLEANUP
5822 			 | MZTHREAD_NEED_SUSPEND_CLEANUP))
5823 	     && (running & MZTHREAD_SUSPENDED)) {
5824     /* p probably needs to get out of semaphore-wait lines, etc. */
5825     scheme_weak_resume_thread(p);
5826     p->running |= MZTHREAD_USER_SUSPENDED;
5827   } else {
5828     if (p == scheme_current_thread) {
5829       scheme_wait_until_suspend_ok();
5830     }
5831     p->running |= MZTHREAD_USER_SUSPENDED;
5832     scheme_weak_suspend_thread(p); /* ok if p is scheme_current_thread */
5833     if (p == scheme_current_thread) {
5834       /* Need to check for breaks */
5835       scheme_check_break_now();
5836     }
5837   }
5838 }
5839 
add_transitive_resume(Scheme_Thread * promote_to,Scheme_Thread * p)5840 static void add_transitive_resume(Scheme_Thread *promote_to, Scheme_Thread *p)
5841 {
5842   Scheme_Object *running_box;
5843   Scheme_Hash_Table *ht;
5844 
5845   if (!p->running_box) {
5846     Scheme_Object *b, *wb;
5847     if ((p->running & MZTHREAD_USER_SUSPENDED)
5848         && !(p->running & MZTHREAD_SUSPENDED))
5849       wb = (Scheme_Object *)p;
5850     else
5851       wb = scheme_make_weak_box((Scheme_Object *)p);
5852     b = scheme_alloc_small_object();
5853     b->type = scheme_thread_dead_type;
5854     SCHEME_PTR_VAL(b) = (Scheme_Object *)wb;
5855     p->running_box = b;
5856   }
5857   running_box = p->running_box;
5858 
5859   if (!promote_to->transitive_resumes) {
5860     /* Create table */
5861     ht = scheme_make_hash_table(SCHEME_hash_ptr);
5862     promote_to->transitive_resumes = (Scheme_Object *)ht;
5863   } else {
5864     /* Purge ht entries for threads that are now dead: */
5865     Scheme_Hash_Table *gone = NULL;
5866     Scheme_Object *b;
5867     int i;
5868 
5869     ht = (Scheme_Hash_Table *)promote_to->transitive_resumes;
5870     for (i = ht->size; i--; ) {
5871       if (ht->vals[i]) {
5872         b = SCHEME_PTR_VAL(ht->keys[i]);
5873 	if (!b
5874             || (SAME_TYPE(SCHEME_TYPE(b), scheme_weak_box_type)
5875                 && !SCHEME_WEAK_BOX_VAL(b))) {
5876 	  /* This one is dead */
5877 	  if (!gone)
5878 	    gone = scheme_make_hash_table(SCHEME_hash_ptr);
5879 	  scheme_hash_set(gone, ht->keys[i], scheme_true);
5880 	}
5881       }
5882     }
5883 
5884     if (gone) {
5885       /* Remove dead ones: */
5886       for (i = gone->size; i--; ) {
5887 	if (gone->vals[i]) {
5888 	  scheme_hash_set(ht, gone->keys[i], NULL);
5889 	}
5890       }
5891     }
5892   }
5893 
5894   scheme_hash_set(ht, running_box, scheme_true);
5895 }
5896 
transitive_resume_k(void)5897 static Scheme_Object *transitive_resume_k(void)
5898 {
5899   Scheme_Thread *p = scheme_current_thread;
5900   Scheme_Object *r = (Scheme_Object *)p->ku.k.p1;
5901 
5902   p->ku.k.p1 = NULL;
5903 
5904   transitive_resume(r);
5905 
5906   return scheme_true;
5907 }
5908 
transitive_resume(Scheme_Object * resumes)5909 static void transitive_resume(Scheme_Object *resumes)
5910 {
5911   Scheme_Hash_Table *ht;
5912   Scheme_Object *a[1];
5913   int i;
5914 
5915 #ifdef DO_STACK_CHECK
5916 #include "mzstkchk.h"
5917   {
5918     Scheme_Thread *p = scheme_current_thread;
5919 
5920     p->ku.k.p1 = resumes;
5921 
5922     p->suspend_break++;
5923     scheme_start_atomic();
5924     scheme_handle_stack_overflow(transitive_resume_k);
5925     scheme_end_atomic_no_swap();
5926     --p->suspend_break;
5927 
5928     return;
5929   }
5930 #endif
5931 
5932   ht = (Scheme_Hash_Table *)resumes;
5933 
5934   for (i = ht->size; i--; ) {
5935     if (ht->vals[i]) {
5936       a[0] = SCHEME_PTR_VAL(ht->keys[i]);
5937       if (a[0]) {
5938         if (SAME_TYPE(SCHEME_TYPE(a[0]), scheme_weak_box_type))
5939           a[0] = SCHEME_WEAK_BOX_VAL(a[0]);
5940         if (a[0])
5941           thread_resume(1, a);
5942       }
5943     }
5944   }
5945 }
5946 
transitive_promote_k(void)5947 static Scheme_Object *transitive_promote_k(void)
5948 {
5949   Scheme_Thread *p = scheme_current_thread;
5950   Scheme_Thread *pp = (Scheme_Thread *)p->ku.k.p1;
5951   Scheme_Custodian *c = (Scheme_Custodian *)p->ku.k.p2;
5952 
5953   p->ku.k.p1 = NULL;
5954   p->ku.k.p2 = NULL;
5955 
5956   transitive_promote(pp, c);
5957 
5958   return scheme_true;
5959 }
5960 
transitive_promote(Scheme_Thread * p,Scheme_Custodian * c)5961 static void transitive_promote(Scheme_Thread *p, Scheme_Custodian *c)
5962 {
5963   Scheme_Hash_Table *ht;
5964   Scheme_Object *t;
5965   int i;
5966 
5967 #ifdef DO_STACK_CHECK
5968 #include "mzstkchk.h"
5969   {
5970     Scheme_Thread *pp = scheme_current_thread;
5971 
5972     pp->ku.k.p1 = p;
5973     pp->ku.k.p2 = c;
5974 
5975     pp->suspend_break++;
5976     scheme_start_atomic();
5977     scheme_handle_stack_overflow(transitive_promote_k);
5978     scheme_end_atomic_no_swap();
5979     --pp->suspend_break;
5980 
5981     return;
5982   }
5983 #endif
5984 
5985   if (!p->transitive_resumes)
5986     return;
5987 
5988   ht = (Scheme_Hash_Table *)p->transitive_resumes;
5989 
5990   for (i = ht->size; i--; ) {
5991     if (ht->vals[i]) {
5992       t = SCHEME_PTR_VAL(ht->keys[i]);
5993       if (SAME_TYPE(SCHEME_TYPE(t), scheme_weak_box_type))
5994         t = SCHEME_WEAK_BOX_VAL(t);
5995       if (t)
5996 	promote_thread((Scheme_Thread *)t, c);
5997     }
5998   }
5999 }
6000 
promote_thread(Scheme_Thread * p,Scheme_Custodian * to_c)6001 static void promote_thread(Scheme_Thread *p, Scheme_Custodian *to_c)
6002 {
6003   Scheme_Custodian *c, *cx;
6004   Scheme_Custodian_Reference *mref;
6005   Scheme_Object *l;
6006 
6007   /* This function also handles transitive promotion. Every transitive
6008      target for p always has at least the custodians of p, so if we don't
6009      add a custodian to p, we don't need to check the rest. */
6010 
6011   if (!p->mref || !CUSTODIAN_FAM(p->mref)) {
6012     /* The thread has no running custodian, so fall through to
6013        just use to_c */
6014   } else {
6015     c = CUSTODIAN_FAM(p->mref);
6016 
6017     /* Check whether c is an ancestor of to_c (in which case we do nothing) */
6018     for (cx = to_c; cx && NOT_SAME_OBJ(cx, c); ) {
6019       cx = CUSTODIAN_FAM(cx->parent);
6020     }
6021     if (cx) return;
6022 
6023     /* Check whether any of the extras are super to to_c.
6024        If so, do nothing. */
6025     for (l = p->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
6026       mref = (Scheme_Custodian_Reference *)SCHEME_CAR(l);
6027       c = CUSTODIAN_FAM(mref);
6028 
6029       for (cx = to_c; cx && NOT_SAME_OBJ(cx, c); ) {
6030 	cx = CUSTODIAN_FAM(cx->parent);
6031       }
6032       if (cx) return;
6033     }
6034 
6035     /* Check whether to_c is super of c: */
6036     for (cx = c; cx && NOT_SAME_OBJ(cx, to_c); ) {
6037       cx = CUSTODIAN_FAM(cx->parent);
6038     }
6039 
6040     /* If cx, fall through to replace the main custodian with to_c,
6041        because it's an ancestor of the current one. Otherwise, they're
6042        unrelated. */
6043     if (!cx) {
6044       /* Check whether any of the extras should be replaced by to_c */
6045       for (l = p->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
6046 	/* Is to_c super of c? */
6047 	for (cx = c; cx && NOT_SAME_OBJ(cx, to_c); ) {
6048 	  cx = CUSTODIAN_FAM(cx->parent);
6049 	}
6050 	if (cx) {
6051 	  /* Replace this custodian with to_c */
6052 	  mref = (Scheme_Custodian_Reference *)SCHEME_CAR(l);
6053 	  scheme_remove_managed(mref, (Scheme_Object *)p->mr_hop);
6054 	  mref = scheme_add_managed(to_c, (Scheme_Object *)p->mr_hop, NULL, NULL, 0);
6055 	  SCHEME_CAR(l) = (Scheme_Object *)mref;
6056 
6057 	  /* It's possible that one of the other custodians is also
6058 	     junior to to_c. Remove it if we find one. */
6059 	  {
6060 	    Scheme_Object *prev;
6061 	    prev = l;
6062 	    for (l = SCHEME_CDR(l); !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
6063 	      mref = (Scheme_Custodian_Reference *)SCHEME_CAR(l);
6064 	      c = CUSTODIAN_FAM(mref);
6065 	      for (cx = c; cx && NOT_SAME_OBJ(cx, to_c); ) {
6066 		cx = CUSTODIAN_FAM(cx->parent);
6067 	      }
6068 	      if (cx)
6069 		SCHEME_CDR(prev) = SCHEME_CDR(l);
6070 	    }
6071 	  }
6072 
6073 	  transitive_promote(p, to_c);
6074 
6075 	  return;
6076 	}
6077       }
6078 
6079       /* Otherwise, this is custodian is unrelated to the existing ones.
6080 	 Add it as an extra custodian. */
6081       mref = scheme_add_managed(to_c, (Scheme_Object *)p->mr_hop, NULL, NULL, 0);
6082       l = scheme_make_raw_pair((Scheme_Object *)mref, p->extra_mrefs);
6083       p->extra_mrefs = l;
6084 
6085       transitive_promote(p, to_c);
6086       return;
6087     }
6088   }
6089 
6090   /* Replace p's main custodian (if any) with to_c */
6091   scheme_remove_managed(p->mref, (Scheme_Object *)p->mr_hop);
6092   mref = scheme_add_managed(to_c, (Scheme_Object *)p->mr_hop, NULL, NULL, 0);
6093   p->mref = mref;
6094 #ifdef MZ_PRECISE_GC
6095   GC_register_thread(p, to_c);
6096 #endif
6097 
6098   transitive_promote(p, to_c);
6099 }
6100 
thread_resume(int argc,Scheme_Object * argv[])6101 static Scheme_Object *thread_resume(int argc, Scheme_Object *argv[])
6102 {
6103   Scheme_Thread *p, *promote_to = NULL;
6104   Scheme_Custodian *promote_c = NULL;
6105 
6106   if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_type))
6107     scheme_wrong_contract("thread-resume", "thread?", 0, argc, argv);
6108 
6109   p = (Scheme_Thread *)argv[0];
6110 
6111   if (argc > 1) {
6112     if (SAME_TYPE(SCHEME_TYPE(argv[1]), scheme_thread_type))
6113       promote_to = (Scheme_Thread *)argv[1];
6114     else if (SAME_TYPE(SCHEME_TYPE(argv[1]), scheme_custodian_type)) {
6115       promote_c = (Scheme_Custodian *)argv[1];
6116       if (promote_c->shut_down)
6117 	promote_c = NULL;
6118     } else {
6119       scheme_wrong_contract("thread-resume", "(or/c thread? custodian?)", 1, argc, argv);
6120       return NULL;
6121     }
6122   }
6123 
6124   if (!MZTHREAD_STILL_RUNNING(p->running))
6125     return scheme_void;
6126 
6127   /* Change/add custodians for p from promote_p */
6128   if (promote_to) {
6129     Scheme_Object *l;
6130     Scheme_Custodian_Reference *mref;
6131 
6132     /* If promote_to doesn't have a working custodian, there's
6133        nothing to donate */
6134     if (promote_to->mref && CUSTODIAN_FAM(promote_to->mref)) {
6135       promote_thread(p, CUSTODIAN_FAM(promote_to->mref));
6136 
6137       for (l = p->extra_mrefs; !SCHEME_NULLP(l); l = SCHEME_CDR(l)) {
6138 	mref = (Scheme_Custodian_Reference *)SCHEME_CAR(l);
6139 	promote_thread(p, CUSTODIAN_FAM(mref));
6140       }
6141     }
6142   }
6143   if (promote_c)
6144     promote_thread(p, promote_c);
6145 
6146   /* Set up transitive resume for future resumes of promote_to: */
6147   if (promote_to
6148       && MZTHREAD_STILL_RUNNING(promote_to->running)
6149       && !SAME_OBJ(promote_to, p))
6150     add_transitive_resume(promote_to, p);
6151 
6152   /* Check whether the thread has a non-shut-down custodian */
6153   {
6154     Scheme_Custodian *c;
6155 
6156     if (p->mref)
6157       c = CUSTODIAN_FAM(p->mref);
6158     else
6159       c = NULL;
6160 
6161     if (!c || c->shut_down)
6162       return scheme_void;
6163   }
6164 
6165   if (p->running & MZTHREAD_USER_SUSPENDED) {
6166     p->suspended_box = NULL;
6167     if (p->resumed_box) {
6168       SCHEME_PTR2_VAL(p->resumed_box) = (Scheme_Object *)p;
6169       scheme_post_sema_all(SCHEME_PTR1_VAL(p->resumed_box));
6170     }
6171 
6172     if (p->running_box && !(p->running & MZTHREAD_SUSPENDED)) {
6173       /* Make transitive-resume weak: */
6174       Scheme_Object *wb;
6175       wb = scheme_make_weak_box((Scheme_Object *)p);
6176       SCHEME_PTR_VAL(p->running_box) = wb;
6177     }
6178 
6179     p->running -= MZTHREAD_USER_SUSPENDED;
6180 
6181     scheme_weak_resume_thread(p);
6182 
6183     if (p->transitive_resumes)
6184       transitive_resume(p->transitive_resumes);
6185   }
6186 
6187   return scheme_void;
6188 }
6189 
make_thread_suspend(int argc,Scheme_Object * argv[])6190 static Scheme_Object *make_thread_suspend(int argc, Scheme_Object *argv[])
6191 {
6192   Scheme_Thread *p;
6193 
6194   if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_type))
6195     scheme_wrong_contract("thread-suspend-evt", "thread?", 0, argc, argv);
6196 
6197   p = (Scheme_Thread *)argv[0];
6198 
6199   return scheme_get_thread_suspend(p);
6200 }
6201 
scheme_get_thread_suspend(Scheme_Thread * p)6202 Scheme_Object *scheme_get_thread_suspend(Scheme_Thread *p)
6203 {
6204   if (!p->suspended_box) {
6205     Scheme_Object *b;
6206     b = scheme_alloc_object();
6207     b->type = scheme_thread_suspend_type;
6208     if (MZTHREAD_STILL_RUNNING(p->running) && (p->running & MZTHREAD_USER_SUSPENDED))
6209       SCHEME_PTR2_VAL(b) = (Scheme_Object *)p;
6210     else {
6211       Scheme_Object *sema;
6212       sema = scheme_make_sema(0);
6213       SCHEME_PTR1_VAL(b) = sema;
6214     }
6215     p->suspended_box = b;
6216   }
6217 
6218   return p->suspended_box;
6219 }
6220 
make_thread_resume(int argc,Scheme_Object * argv[])6221 static Scheme_Object *make_thread_resume(int argc, Scheme_Object *argv[])
6222 {
6223   Scheme_Thread *p;
6224 
6225   if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_type))
6226     scheme_wrong_contract("thread-resume-evt", "thread?", 0, argc, argv);
6227 
6228   p = (Scheme_Thread *)argv[0];
6229 
6230   if (!p->resumed_box) {
6231     Scheme_Object *b;
6232     b = scheme_alloc_object();
6233     b->type = scheme_thread_resume_type;
6234     if (MZTHREAD_STILL_RUNNING(p->running) && !(p->running & MZTHREAD_USER_SUSPENDED))
6235       SCHEME_PTR2_VAL(b) = (Scheme_Object *)p;
6236     else {
6237       Scheme_Object *sema;
6238       sema = scheme_make_sema(0);
6239       SCHEME_PTR1_VAL(b) = sema;
6240     }
6241     p->resumed_box = b;
6242   }
6243 
6244   return p->resumed_box;
6245 }
6246 
resume_suspend_ready(Scheme_Object * o,Scheme_Schedule_Info * sinfo)6247 static int resume_suspend_ready(Scheme_Object *o, Scheme_Schedule_Info *sinfo)
6248 {
6249   Scheme_Object *t;
6250 
6251   t = SCHEME_PTR2_VAL(o);
6252   if (t) {
6253     scheme_set_sync_target(sinfo, o, t, NULL, 0, 0, NULL);
6254     return 1;
6255   }
6256 
6257   scheme_set_sync_target(sinfo, SCHEME_PTR1_VAL(o), o, NULL, 0, 1, NULL);
6258   return 0;
6259 }
6260 
make_thread_dead(int argc,Scheme_Object * argv[])6261 static Scheme_Object *make_thread_dead(int argc, Scheme_Object *argv[])
6262 {
6263   if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_thread_type))
6264     scheme_wrong_contract("thread-dead-evt", "thread?", 0, argc, argv);
6265 
6266   return scheme_get_thread_dead((Scheme_Thread *)argv[0]);
6267 }
6268 
scheme_get_thread_dead(Scheme_Thread * p)6269 Scheme_Object *scheme_get_thread_dead(Scheme_Thread *p)
6270 {
6271   if (!p->dead_box) {
6272     Scheme_Object *b;
6273     Scheme_Object *sema;
6274 
6275     b = scheme_alloc_small_object();
6276     b->type = scheme_thread_dead_type;
6277     sema = scheme_make_sema(0);
6278     SCHEME_PTR_VAL(b) = sema;
6279     if (!MZTHREAD_STILL_RUNNING(p->running))
6280       scheme_post_sema_all(sema);
6281 
6282     p->dead_box = b;
6283   }
6284 
6285   return p->dead_box;
6286 }
6287 
dead_ready(Scheme_Object * o,Scheme_Schedule_Info * sinfo)6288 static int dead_ready(Scheme_Object *o, Scheme_Schedule_Info *sinfo)
6289 {
6290   scheme_set_sync_target(sinfo, SCHEME_PTR_VAL(o), o, NULL, 0, 1, NULL);
6291   return 0;
6292 }
6293 
scheme_get_thread_sync(Scheme_Thread * p)6294 Scheme_Object *scheme_get_thread_sync(Scheme_Thread *p)
6295 {
6296   if (!p->sync_box) {
6297     Scheme_Object *sema;
6298     sema = scheme_make_sema(0);
6299     p->sync_box = sema;
6300   }
6301 
6302   return p->sync_box;
6303 }
6304 
scheme_clear_thread_sync(Scheme_Thread * p)6305 void scheme_clear_thread_sync(Scheme_Thread *p)
6306 {
6307   if (p->sync_box)
6308     p->sync_box = NULL;
6309 }
6310 
6311 /*========================================================================*/
6312 /*                              syncing                                   */
6313 /*========================================================================*/
6314 
6315 static Evt_Set *make_evt_set(const char *name, int argc, Scheme_Object **argv, int delta, int flatten);
6316 
6317 typedef struct Evt {
6318   MZTAG_IF_REQUIRED
6319   Scheme_Type sync_type;
6320   Scheme_Ready_Fun_FPC ready;
6321   Scheme_Needs_Wakeup_Fun needs_wakeup;
6322   Scheme_Sync_Sema_Fun get_sema;
6323   Scheme_Sync_Filter_Fun filter;
6324   int can_redirect;
6325 } Evt;
6326 
6327 
6328 /* PLACE_THREAD_DECL */
6329 static int evts_array_size;
6330 static Evt **evts;
6331 #if defined(MZ_PRECISE_GC) && defined(MZ_USE_PLACES)
6332 THREAD_LOCAL_DECL(static int place_evts_array_size);
6333 THREAD_LOCAL_DECL(static Evt **place_evts);
6334 #endif
6335 
scheme_add_evt_worker(Evt *** evt_array,int * evt_size,Scheme_Type type,Scheme_Ready_Fun ready,Scheme_Needs_Wakeup_Fun wakeup,Scheme_Sync_Filter_Fun filter,int can_redirect)6336 void scheme_add_evt_worker(Evt ***evt_array,
6337                            int *evt_size,
6338                            Scheme_Type type,
6339                            Scheme_Ready_Fun ready,
6340                            Scheme_Needs_Wakeup_Fun wakeup,
6341                            Scheme_Sync_Filter_Fun filter,
6342                            int can_redirect)
6343 {
6344   Evt *naya;
6345 
6346   if (*evt_size <= type) {
6347     Evt **nevts;
6348     int new_size;
6349     new_size = type + 1;
6350     if (new_size < _scheme_last_type_)
6351       new_size = _scheme_last_type_;
6352     nevts = MALLOC_N(Evt*, new_size);
6353     if (*evt_size)
6354       memcpy(nevts, (*evt_array), (*evt_size) * sizeof(Evt*));
6355     (*evt_array) = nevts;
6356     (*evt_size) = new_size;
6357   }
6358 
6359   naya = MALLOC_ONE_RT(Evt);
6360 #ifdef MZTAG_REQUIRED
6361   naya->type = scheme_rt_evt;
6362 #endif
6363   naya->sync_type = type;
6364   naya->ready = (Scheme_Ready_Fun_FPC)ready;
6365   naya->needs_wakeup = wakeup;
6366   naya->filter = filter;
6367   naya->can_redirect = can_redirect;
6368 
6369   (*evt_array)[type] = naya;
6370 }
6371 
scheme_add_evt(Scheme_Type type,Scheme_Ready_Fun ready,Scheme_Needs_Wakeup_Fun wakeup,Scheme_Sync_Filter_Fun filter,int can_redirect)6372 void scheme_add_evt(Scheme_Type type,
6373 		    Scheme_Ready_Fun ready,
6374 		    Scheme_Needs_Wakeup_Fun wakeup,
6375 		    Scheme_Sync_Filter_Fun filter,
6376 		    int can_redirect)
6377 {
6378 #if defined(MZ_PRECISE_GC) && defined(MZ_USE_PLACES)
6379   if (GC_is_place()) {
6380     if (!place_evts) {
6381       REGISTER_SO(place_evts);
6382     }
6383     scheme_add_evt_worker(&place_evts, &place_evts_array_size, type, ready, wakeup, filter, can_redirect);
6384   }
6385   else {
6386 #endif
6387     if (!evts) {
6388       REGISTER_SO(evts);
6389     }
6390     scheme_add_evt_worker(&evts, &evts_array_size, type, ready, wakeup, filter, can_redirect);
6391 #if defined(MZ_PRECISE_GC) && defined(MZ_USE_PLACES)
6392   }
6393 #endif
6394 }
6395 
scheme_add_evt_through_sema(Scheme_Type type,Scheme_Sync_Sema_Fun get_sema,Scheme_Sync_Filter_Fun filter)6396 void scheme_add_evt_through_sema(Scheme_Type type,
6397 				  Scheme_Sync_Sema_Fun get_sema,
6398 				  Scheme_Sync_Filter_Fun filter)
6399 {
6400   scheme_add_evt(type, NULL, NULL, filter, 0);
6401   evts[type]->get_sema = get_sema;
6402 }
6403 
find_evt(Scheme_Object * o)6404 static Evt *find_evt(Scheme_Object *o)
6405 {
6406   Scheme_Type t;
6407   Evt *w = NULL;
6408 
6409   t = SCHEME_TYPE(o);
6410   if (t < evts_array_size)
6411     w = evts[t];
6412 #if defined(MZ_PRECISE_GC) && defined(MZ_USE_PLACES)
6413   if (place_evts && w == NULL)
6414     w = place_evts[t];
6415 #endif
6416 
6417   if (w && w->filter) {
6418     Scheme_Sync_Filter_Fun filter;
6419     filter = w->filter;
6420     if (!filter(o))
6421       return NULL;
6422   }
6423   return w;
6424 }
6425 
scheme_is_evt(Scheme_Object * o)6426 int scheme_is_evt(Scheme_Object *o)
6427 {
6428   if (SCHEME_EVTSETP(o))
6429     return 1;
6430 
6431   return !!find_evt(o);
6432 }
6433 
make_syncing(Evt_Set * evt_set,float timeout,double start_time)6434 static Syncing *make_syncing(Evt_Set *evt_set, float timeout, double start_time)
6435 {
6436   Syncing *syncing;
6437   int pos;
6438 
6439   syncing = MALLOC_ONE_RT(Syncing);
6440 #ifdef MZTAG_REQUIRED
6441   syncing->type = scheme_rt_syncing;
6442 #endif
6443   syncing->set = evt_set;
6444   syncing->timeout = timeout;
6445   if (timeout >= 0)
6446     syncing->sleep_end = start_time + (timeout * 1000);
6447   else
6448     syncing->sleep_end = 0.0;
6449 
6450   if (evt_set->argc > 1) {
6451     Scheme_Config *config;
6452     Scheme_Object *rand_state;
6453     config = scheme_current_config();
6454     rand_state = scheme_get_param(config, MZCONFIG_SCHEDULER_RANDOM_STATE);
6455     pos = scheme_rand((Scheme_Random_State *)rand_state);
6456     syncing->start_pos = (pos % evt_set->argc);
6457   }
6458   syncing->thread = scheme_current_thread;
6459 
6460   return syncing;
6461 }
6462 
scheme_make_syncing(int argc,Scheme_Object ** argv)6463 Syncing *scheme_make_syncing(int argc, Scheme_Object **argv)
6464 {
6465   Evt_Set *evt_set;
6466 
6467   evt_set = make_evt_set("sync", argc, argv, 0, 1);
6468 
6469   return make_syncing(evt_set, -1.0, 0);
6470 }
6471 
splice_ptr_array(void ** a,int al,void ** b,int bl,int i)6472 static void *splice_ptr_array(void **a, int al, void **b, int bl, int i)
6473 {
6474   void **r;
6475   int j;
6476 
6477   MZ_ASSERT (a != NULL || b != NULL);
6478 
6479   r = MALLOC_N(void*, al + bl - 1);
6480 
6481   if (a)
6482     memcpy(r, a, i * sizeof(void*));
6483   if (b)
6484     memcpy(r + i, b, bl * sizeof(void*));
6485   else {
6486     for (j = 0; j < bl; j++) {
6487       r[i+j] = a[i];
6488     }
6489   }
6490   if (a)
6491     memcpy(r + (i + bl), a + (i + 1), (al - i - 1) * sizeof(void*));
6492 
6493   return r;
6494 }
6495 
set_sync_target(Syncing * syncing,int i,Scheme_Object * target,Scheme_Object * wrap,Scheme_Object * nack,int repost,int retry,Scheme_Accept_Sync accept)6496 static void set_sync_target(Syncing *syncing, int i, Scheme_Object *target,
6497 			    Scheme_Object *wrap, Scheme_Object *nack,
6498 			    int repost, int retry, Scheme_Accept_Sync accept)
6499 /* Not ready, deferred to target. */
6500 {
6501   Evt_Set *evt_set = syncing->set;
6502 
6503   if (wrap) {
6504     if (!syncing->wrapss) {
6505       Scheme_Object **wrapss;
6506       wrapss = MALLOC_N(Scheme_Object*, evt_set->argc);
6507       syncing->wrapss = wrapss;
6508     }
6509     if (!syncing->wrapss[i])
6510       syncing->wrapss[i] = scheme_null;
6511     wrap = scheme_make_pair(wrap, syncing->wrapss[i]);
6512     syncing->wrapss[i] = wrap;
6513   }
6514 
6515   if (nack) {
6516     if (!syncing->nackss) {
6517       Scheme_Object **nackss;
6518       nackss = MALLOC_N(Scheme_Object*, evt_set->argc);
6519       syncing->nackss = nackss;
6520     }
6521     if (!syncing->nackss[i])
6522       syncing->nackss[i] = scheme_null;
6523     nack = scheme_make_pair(nack, syncing->nackss[i]);
6524     syncing->nackss[i] = nack;
6525   }
6526 
6527   if (repost) {
6528     if (!syncing->reposts) {
6529       char *s;
6530       s = (char *)scheme_malloc_atomic(evt_set->argc);
6531       memset(s, 0, evt_set->argc);
6532       syncing->reposts = s;
6533     }
6534     syncing->reposts[i] = 1;
6535   }
6536 
6537   if (accept) {
6538     if (!syncing->accepts) {
6539       Scheme_Accept_Sync *s;
6540       s = (Scheme_Accept_Sync *)scheme_malloc_atomic(sizeof(Scheme_Accept_Sync) * evt_set->argc);
6541       memset(s, 0, evt_set->argc * sizeof(Scheme_Accept_Sync));
6542       syncing->accepts = s;
6543     }
6544     syncing->accepts[i] = accept;
6545   }
6546 
6547   if (SCHEME_EVTSETP(target) && retry) {
6548     /* Flatten the set into this one */
6549     Evt_Set *wts;
6550 
6551     if (SCHEME_EVTSET_UNFLATTENEDP(target)) {
6552       Scheme_Object *a[1];
6553       a[0] = target;
6554       wts = make_evt_set("sync", 1, a, 0, 1);
6555     } else
6556       wts = (Evt_Set *)target;
6557 
6558     if (wts->argc == 1) {
6559       /* 1 thing in set? Flattening is easy! */
6560       evt_set->argv[i] = wts->argv[0];
6561       evt_set->ws[i] = wts->ws[0];
6562     } else {
6563       /* Inline the set (in place) */
6564       Scheme_Object **argv;
6565       Evt **ws;
6566 
6567       if (syncing->result > i+1)
6568         syncing->result += wts->argc-1;
6569 
6570       argv = (Scheme_Object **)splice_ptr_array((void **)evt_set->argv,
6571 						evt_set->argc,
6572 						(void **)wts->argv,
6573 						wts->argc,
6574 						i);
6575       ws = (Evt **)splice_ptr_array((void **)evt_set->ws,
6576 					 evt_set->argc,
6577 					 (void **)wts->ws,
6578 					 wts->argc,
6579 					 i);
6580 
6581       evt_set->argv = argv;
6582       evt_set->ws = ws;
6583 
6584       if (syncing->wrapss) {
6585 	argv = (Scheme_Object **)splice_ptr_array((void **)syncing->wrapss,
6586 						  evt_set->argc,
6587 						  (void **)NULL,
6588 						  wts->argc,
6589 						  i);
6590 	syncing->wrapss = argv;
6591       }
6592       if (syncing->nackss) {
6593 	argv = (Scheme_Object **)splice_ptr_array((void **)syncing->nackss,
6594 						  evt_set->argc,
6595 						  (void **)NULL,
6596 						  wts->argc,
6597 						  i);
6598 	syncing->nackss = argv;
6599       }
6600       if (syncing->reposts) {
6601 	char *s;
6602 	int len;
6603 
6604 	len = evt_set->argc + wts->argc - 1;
6605 
6606 	s = (char *)scheme_malloc_atomic(len);
6607 	memset(s, 0, len);
6608 
6609 	memcpy(s, syncing->reposts, i);
6610 	memcpy(s + i + wts->argc, syncing->reposts + i + 1, evt_set->argc - i - 1);
6611 	syncing->reposts = s;
6612       }
6613       if (syncing->accepts) {
6614 	Scheme_Accept_Sync *s;
6615 	int len;
6616 
6617 	len = evt_set->argc + wts->argc - 1;
6618 
6619 	s = (Scheme_Accept_Sync *)scheme_malloc_atomic(len * sizeof(Scheme_Accept_Sync));
6620 	memset(s, 0, len * sizeof(Scheme_Accept_Sync));
6621 
6622 	memcpy(s, syncing->accepts, i * sizeof(Scheme_Accept_Sync));
6623 	memcpy(s + i + wts->argc, syncing->accepts + i + 1, (evt_set->argc - i - 1) * sizeof(Scheme_Accept_Sync));
6624 	syncing->accepts = s;
6625       }
6626 
6627       evt_set->argc += (wts->argc - 1);
6628 
6629       /* scheme_channel_syncer_type needs to know its location, which
6630 	 might have changed: */
6631       argv = evt_set->argv;
6632       for (i = evt_set->argc; i--; ) {
6633 	if (SAME_TYPE(SCHEME_TYPE(argv[i]), scheme_channel_syncer_type)) {
6634 	  ((Scheme_Channel_Syncer *)argv[i])->syncing_i = i;
6635 	}
6636       }
6637 
6638     }
6639   } else {
6640     Evt *ww;
6641     evt_set->argv[i] = target;
6642     ww = find_evt(target);
6643     evt_set->ws[i] = ww;
6644   }
6645 }
6646 
scheme_set_sync_target(Scheme_Schedule_Info * sinfo,Scheme_Object * target,Scheme_Object * wrap,Scheme_Object * nack,int repost,int retry,Scheme_Accept_Sync accept)6647 void scheme_set_sync_target(Scheme_Schedule_Info *sinfo, Scheme_Object *target,
6648 			    Scheme_Object *wrap, Scheme_Object *nack,
6649 			    int repost, int retry, Scheme_Accept_Sync accept)
6650 {
6651   set_sync_target((Syncing *)sinfo->current_syncing, sinfo->w_i,
6652 		  target, wrap, nack, repost, retry, accept);
6653   if (retry) {
6654     /* Rewind one step to try new ones (or continue
6655        if the set was empty). */
6656     sinfo->w_i--;
6657   }
6658 }
6659 
scheme_syncing_ready(Syncing * syncing,Scheme_Schedule_Info * sinfo,int can_suspend)6660 int scheme_syncing_ready(Syncing *syncing, Scheme_Schedule_Info *sinfo, int can_suspend)
6661 {
6662   int i, redirections = 0, all_semas = 1, j, result = 0;
6663   Evt *w;
6664   Scheme_Object *o;
6665   Scheme_Schedule_Info r_sinfo;
6666   Evt_Set *evt_set;
6667   int is_poll;
6668   double sleep_end;
6669 
6670   sleep_end = syncing->sleep_end;
6671 
6672   if (syncing->result) {
6673     result = 1;
6674     goto set_sleep_end_and_return;
6675   }
6676 
6677   /* We must handle target redirections in the objects on which we're
6678      syncing. We never have to redirect the evt_set itself, but
6679      a evt_set can show up as a target, and we inline it in
6680      that case. */
6681 
6682   evt_set = syncing->set;
6683 
6684   is_poll = (syncing->timeout == 0.0);
6685 
6686   /* Anything ready? */
6687   for (j = 0; j < evt_set->argc; j++) {
6688     Scheme_Ready_Fun_FPC ready;
6689 
6690     i = (j + syncing->start_pos) % evt_set->argc;
6691 
6692     o = evt_set->argv[i];
6693     w = evt_set->ws[i];
6694     ready = w->ready;
6695 
6696     if (!SCHEME_SEMAP(o)
6697 	&& !SCHEME_CHANNELP(o) && !SCHEME_CHANNEL_PUTP(o)
6698 	&& !SAME_TYPE(SCHEME_TYPE(o), scheme_channel_syncer_type)
6699 	&& !SAME_TYPE(SCHEME_TYPE(o), scheme_never_evt_type))
6700       all_semas = 0;
6701 
6702     if (ready) {
6703       int yep;
6704 
6705       init_schedule_info(&r_sinfo, sinfo->false_positive_ok, 0, sleep_end);
6706 
6707       r_sinfo.current_syncing = (Scheme_Object *)syncing;
6708       r_sinfo.w_i = i;
6709       r_sinfo.is_poll = is_poll;
6710       r_sinfo.replace_chain = sinfo->replace_chain;
6711 
6712       yep = ready(o, &r_sinfo);
6713 
6714       sleep_end = r_sinfo.sleep_end;
6715       sinfo->replace_chain = r_sinfo.replace_chain;
6716 
6717       /* Calling a guard can allow thread swap, which might choose a
6718          semaphore or a channel, so check for a result: */
6719       if (syncing->result) {
6720         result = 1;
6721         goto set_sleep_end_and_return;
6722       }
6723 
6724       if ((i > r_sinfo.w_i) && sinfo->false_positive_ok) {
6725 	/* There was a redirection. Assert: !yep.
6726 	   Give up if we've chained too much. */
6727 	redirections++;
6728 	if (redirections > 10) {
6729 	  sinfo->potentially_false_positive = 1;
6730 	  result = 1;
6731 	  goto set_sleep_end_and_return;
6732 	}
6733       }
6734 
6735       j += (r_sinfo.w_i - i);
6736 
6737       if (yep) {
6738 	/* If it was a potentially false positive,
6739 	   don't set result permanently. Otherwise,
6740 	   propagate the false-positive indicator.*/
6741 	if (!r_sinfo.potentially_false_positive) {
6742 	  syncing->result = i + 1;
6743 	  if (syncing->disable_break)
6744 	    syncing->disable_break->suspend_break++;
6745 	  if (syncing->reposts && syncing->reposts[i])
6746 	    scheme_post_sema(o);
6747           if (syncing->accepts && syncing->accepts[i])
6748             scheme_accept_sync(syncing, i);
6749 	  scheme_post_syncing_nacks(syncing);
6750 	  result = 1;
6751 	  goto set_sleep_end_and_return;
6752 	} else {
6753 	  sinfo->potentially_false_positive = 1;
6754 	  result = 1;
6755 	  goto set_sleep_end_and_return;
6756 	}
6757       } else if (r_sinfo.spin) {
6758 	sinfo->spin = 1;
6759       }
6760     } else if (w->get_sema) {
6761       int repost = 0;
6762       Scheme_Sync_Sema_Fun get_sema = w->get_sema;
6763       Scheme_Object *sema;
6764 
6765       sema = get_sema(o, &repost);
6766       set_sync_target(syncing, i, sema, o, NULL, repost, 1, NULL);
6767       j--; /* try again with this sema */
6768     }
6769 
6770     if (syncing->result)
6771       scheme_signal_error("internal error: sync result set unexpectedly");
6772   }
6773 
6774   if (syncing->timeout >= 0.0) {
6775     if (syncing->sleep_end <= scheme_get_inexact_milliseconds())
6776       result = 1;
6777   } else if (all_semas && can_suspend) {
6778     /* Try to block in a GCable way: */
6779     if (sinfo->false_positive_ok) {
6780       /* In scheduler. Swap us in so we can suspend. */
6781       sinfo->potentially_false_positive = 1;
6782       result = 1;
6783     } else {
6784       /* Not in scheduler --- we're allowed to block via suspend,
6785 	 which makes the thread GCable. */
6786       scheme_wait_semas_chs(syncing->set->argc, syncing->set->argv, 0, syncing);
6787 
6788       /* In case a break appeared after we chose something,
6789 	 check for a break, because scheme_wait_semas_chs() won't: */
6790       scheme_check_break_now();
6791 
6792       result = 1;
6793     }
6794   }
6795 
6796  set_sleep_end_and_return:
6797 
6798   if (sleep_end
6799       && (!sinfo->sleep_end
6800 	  || (sinfo->sleep_end > sleep_end)))
6801     sinfo->sleep_end = sleep_end;
6802 
6803   return result;
6804 }
6805 
syncing_ready(Syncing * syncing,Scheme_Schedule_Info * sinfo)6806 static int syncing_ready(Syncing *syncing, Scheme_Schedule_Info *sinfo)
6807 {
6808   return scheme_syncing_ready(syncing, sinfo, 1);
6809 }
6810 
scheme_accept_sync(Syncing * syncing,int i)6811 void scheme_accept_sync(Syncing *syncing, int i)
6812 {
6813   /* run atomic accept action to revise the wrap */
6814   Scheme_Accept_Sync accept;
6815   Scheme_Object *v, *pr;
6816 
6817   accept = syncing->accepts[i];
6818   syncing->accepts[i] = NULL;
6819   pr = syncing->wrapss[i];
6820 
6821   v = SCHEME_CAR(pr);
6822   pr = SCHEME_CDR(pr);
6823 
6824   v = accept(v);
6825 
6826   pr = scheme_make_pair(v, pr);
6827   syncing->wrapss[i] = pr;
6828 }
6829 
scheme_syncing_needs_wakeup(Syncing * s,void * fds)6830 void scheme_syncing_needs_wakeup(Syncing *s, void *fds)
6831 {
6832   int i;
6833   Scheme_Object *o, *syncs = NULL;
6834   Syncing *next;
6835   Evt *w;
6836   Evt_Set *evt_set;
6837 
6838   do {
6839     evt_set = s->set;
6840 
6841     for (i = 0; i < evt_set->argc; i++) {
6842       o = evt_set->argv[i];
6843       w = evt_set->ws[i];
6844 
6845       if (SAME_TYPE(SCHEME_TYPE(o), scheme_active_replace_evt_type)) {
6846         /* Handle active_replace_evt specially to avoid stack overflow: */
6847         next = scheme_replace_evt_needs_wakeup(o);
6848         if (next)
6849           syncs = scheme_make_raw_pair((Scheme_Object *)next, syncs);
6850       } else if (w->needs_wakeup) {
6851         Scheme_Needs_Wakeup_Fun nw = w->needs_wakeup;
6852 
6853         nw(o, fds);
6854       }
6855     }
6856 
6857     if (!syncs)
6858       s = NULL;
6859     else {
6860       s = (Syncing *)SCHEME_CAR(syncs);
6861       syncs = SCHEME_CDR(syncs);
6862     }
6863   } while (s);
6864 }
6865 
evt_p(int argc,Scheme_Object * argv[])6866 static Scheme_Object *evt_p(int argc, Scheme_Object *argv[])
6867 {
6868   return (scheme_is_evt(argv[0])
6869 	  ? scheme_true
6870 	  : scheme_false);
6871 }
6872 
evt_set_flatten(Evt_Set * e,int pos,Scheme_Object ** args,Evt ** ws)6873 static int evt_set_flatten(Evt_Set *e, int pos, Scheme_Object **args, Evt **ws)
6874 {
6875   Scheme_Object *stack = scheme_null;
6876   int i;
6877 
6878   while (1) {
6879     for (i = e->argc; i--; ) {
6880       if (!SCHEME_EVTSETP(e->argv[i])) {
6881         if (args) {
6882           args[pos] = e->argv[i];
6883           ws[pos] = e->ws[i];
6884         }
6885         pos++;
6886       } else
6887         stack = scheme_make_pair(e->argv[i], stack);
6888     }
6889 
6890     if (!SCHEME_NULLP(stack)) {
6891       e = (Evt_Set *)SCHEME_CAR(stack);
6892       stack = SCHEME_CDR(stack);
6893     } else
6894       break;
6895   }
6896 
6897   return pos;
6898 }
6899 
make_evt_set(const char * name,int argc,Scheme_Object ** argv,int delta,int flatten)6900 static Evt_Set *make_evt_set(const char *name, int argc, Scheme_Object **argv, int delta, int flatten)
6901 {
6902   Evt *w, **iws, **ws;
6903   Evt_Set *evt_set, *subset;
6904   Scheme_Object **args;
6905   int i, j, count = 0, reuse = 1, unflattened = 0;
6906 
6907   iws = MALLOC_N(Evt*, argc-delta);
6908 
6909   /* Find Evt record for each non-set argument, and compute size --- possibly flattened. */
6910   for (i = 0; i < (argc - delta); i++) {
6911     if (!SCHEME_EVTSETP(argv[i+delta])) {
6912       w = find_evt(argv[i+delta]);
6913       if (!w) {
6914 	scheme_wrong_contract(name, "evt?", i+delta, argc, argv);
6915 	return NULL;
6916       }
6917       iws[i] = w;
6918       count++;
6919     } else if (flatten) {
6920       int n;
6921       if (SCHEME_EVTSET_UNFLATTENEDP(argv[i+delta])) {
6922         n = evt_set_flatten((Evt_Set *)argv[i+delta], 0, NULL, NULL);
6923       } else {
6924         n = ((Evt_Set *)argv[i+delta])->argc;
6925       }
6926       if (n != 1)
6927         reuse = 0;
6928       count += n;
6929     } else {
6930       count++;
6931       unflattened = 1;
6932     }
6933   }
6934 
6935   evt_set = MALLOC_ONE_TAGGED(Evt_Set);
6936   evt_set->iso.so.type = scheme_evt_set_type;
6937   evt_set->argc = count;
6938   if (unflattened)
6939     SCHEME_SET_EVTSET_UNFLATTENED(evt_set);
6940 
6941   if (reuse && (count == (argc - delta)))
6942     ws = iws;
6943   else
6944     ws = MALLOC_N(Evt*, count);
6945 
6946   args = MALLOC_N(Scheme_Object*, count);
6947   for (i = delta, j = 0; i < argc; i++, j++) {
6948     if (flatten && SCHEME_EVTSETP(argv[i])) {
6949       if (SCHEME_EVTSET_UNFLATTENEDP(argv[i])) {
6950         j = evt_set_flatten((Evt_Set *)argv[i], j, args, ws);
6951         j--;
6952       } else {
6953         int k, n;
6954         subset = (Evt_Set *)argv[i];
6955         n = subset->argc;
6956         for (k = 0; k < n; k++, j++) {
6957           args[j] = subset->argv[k];
6958           ws[j] = subset->ws[k];
6959         }
6960         --j;
6961       }
6962     } else {
6963       ws[j] = iws[i-delta];
6964       args[j] = argv[i];
6965     }
6966   }
6967 
6968   evt_set->ws = ws;
6969   evt_set->argv = args;
6970 
6971   return evt_set;
6972 }
6973 
scheme_make_evt_set(int argc,Scheme_Object ** argv)6974 Scheme_Object *scheme_make_evt_set(int argc, Scheme_Object **argv)
6975 {
6976   return (Scheme_Object *)make_evt_set("internal-make-evt-set", argc, argv, 0, 1);
6977 }
6978 
get_members(Scheme_Object * skip_nacks)6979 static Scheme_Object *get_members(Scheme_Object *skip_nacks)
6980 {
6981   if (!skip_nacks)
6982     return scheme_null;
6983   else if (scheme_list_length(skip_nacks) > 5) {
6984     Scheme_Hash_Tree *ht;
6985     ht = scheme_make_hash_tree(SCHEME_hashtr_eq);
6986     for (; SCHEME_PAIRP(skip_nacks); skip_nacks = SCHEME_CDR(skip_nacks)) {
6987       ht = scheme_hash_tree_set(ht, SCHEME_CAR(skip_nacks), scheme_true);
6988     }
6989     return (Scheme_Object *)ht;
6990   } else
6991     return skip_nacks;
6992 }
6993 
is_member(Scheme_Object * a,Scheme_Object * l)6994 XFORM_NONGCING static int is_member(Scheme_Object *a, Scheme_Object *l)
6995 {
6996   if (SCHEME_HASHTRP(l)) {
6997     if (scheme_eq_hash_tree_get((Scheme_Hash_Tree *)l, a))
6998       return 1;
6999   } else {
7000     for (; SCHEME_PAIRP(l); l = SCHEME_CDR(l)) {
7001       if (SAME_OBJ(a, SCHEME_CAR(l)))
7002         return 1;
7003     }
7004   }
7005 
7006   return 0;
7007 }
7008 
post_syncing_nacks(Syncing * syncing,int as_escape)7009 static void post_syncing_nacks(Syncing *syncing, int as_escape)
7010 /* Also removes channel-syncers. Can be called multiple times. */
7011 {
7012   int i, c;
7013   Scheme_Object *l, *syncs = NULL, *skip_nacks = NULL;
7014   Syncing *next;
7015 
7016   do {
7017     if (as_escape) {
7018       Scheme_Thread *p = syncing->thread;
7019 
7020       syncing->thread = NULL;
7021 
7022       if (p && p->sync_box)
7023         scheme_post_sema_all(p->sync_box);
7024 
7025 #ifdef MZ_PRECISE_GC
7026       if (p && p->place_channel_msg_in_flight) {
7027         GC_destroy_orphan_msg_memory(p->place_channel_msg_in_flight);
7028         p->place_channel_msg_in_flight = NULL;
7029       }
7030 #endif
7031     }
7032 
7033     if (syncing->thread && syncing->thread->sync_box)
7034       syncing->thread->sync_box = NULL;
7035 
7036     if (syncing->set) {
7037       c = syncing->set->argc;
7038 
7039       for (i = 0; i < c; i++) {
7040         if (SAME_TYPE(SCHEME_TYPE(syncing->set->argv[i]), scheme_channel_syncer_type))
7041           scheme_get_outof_line((Scheme_Channel_Syncer *)syncing->set->argv[i]);
7042         else if (SAME_TYPE(SCHEME_TYPE(syncing->set->argv[i]), scheme_active_replace_evt_type)) {
7043          /* Handle active_replace_evt specially to avoid stack overflow: */
7044           next = scheme_replace_evt_nack(syncing->set->argv[i]);
7045           if (next) {
7046             syncs = scheme_make_raw_pair((Scheme_Object *)next, syncs);
7047             if ((i + 1) != syncing->result)
7048               syncs = scheme_make_raw_pair(scheme_true, syncs);
7049           }
7050         }
7051 
7052         if (syncing->nackss) {
7053           if ((i + 1) != syncing->result) {
7054             l = syncing->nackss[i];
7055             if (l) {
7056               if (!skip_nacks) {
7057                 if (syncing->result) {
7058                   /* Skip any nacks from the chosen event. If the
7059                      list of nacks is long, convert to a hash tree. */
7060                   skip_nacks = get_members(syncing->nackss[syncing->result-1]);
7061                 } else
7062                   skip_nacks = scheme_null;
7063               }
7064               for (; SCHEME_PAIRP(l); l = SCHEME_CDR(l)) {
7065                 if (!is_member(SCHEME_CAR(l), skip_nacks))
7066                   scheme_post_sema_all(SCHEME_CAR(l));
7067               }
7068             }
7069             syncing->nackss[i] = NULL;
7070           }
7071         }
7072       }
7073     }
7074 
7075     if (!syncs)
7076       syncing = NULL;
7077     else {
7078       if (SAME_OBJ(scheme_true, SCHEME_CAR(syncs))) {
7079         as_escape = 1;
7080         syncs = SCHEME_CDR(syncs);
7081       } else
7082         as_escape = 0;
7083 
7084       syncing = (Syncing *)SCHEME_CAR(syncs);
7085       syncs = SCHEME_CDR(syncs);
7086     }
7087   } while (syncing);
7088 }
7089 
get_outof_or_into_lines(Syncing * syncing,int get_out)7090 static void get_outof_or_into_lines(Syncing *syncing, int get_out)
7091 {
7092   int i, c;
7093   Scheme_Object *syncs = NULL;
7094   Syncing *next;
7095 
7096   if (syncing->result) {
7097     /* already done, so no need to adjust lines */
7098     return;
7099   }
7100 
7101   do {
7102     if (syncing->set) {
7103       c = syncing->set->argc;
7104 
7105       for (i = 0; i < c; i++) {
7106         if (SAME_TYPE(SCHEME_TYPE(syncing->set->argv[i]), scheme_channel_syncer_type)) {
7107           if (get_out)
7108             scheme_get_outof_line((Scheme_Channel_Syncer *)syncing->set->argv[i]);
7109           else
7110             scheme_get_back_into_line((Scheme_Channel_Syncer *)syncing->set->argv[i]);
7111         }
7112         else if (SAME_TYPE(SCHEME_TYPE(syncing->set->argv[i]), scheme_active_replace_evt_type)) {
7113          /* Handle active_replace_evt specially to avoid stack overflow: */
7114           next = scheme_replace_evt_get(syncing->set->argv[i]);
7115           if (next)
7116             syncs = scheme_make_raw_pair((Scheme_Object *)next, syncs);
7117         }
7118       }
7119     }
7120 
7121     if (!syncs)
7122       syncing = NULL;
7123     else {
7124       syncing = (Syncing *)SCHEME_CAR(syncs);
7125       syncs = SCHEME_CDR(syncs);
7126     }
7127   } while (syncing);
7128 }
7129 
scheme_post_syncing_nacks(Syncing * syncing)7130 void scheme_post_syncing_nacks(Syncing *syncing)
7131 {
7132   post_syncing_nacks(syncing, 0);
7133 }
7134 
scheme_escape_during_sync(Syncing * syncing)7135 void scheme_escape_during_sync(Syncing *syncing)
7136 {
7137   post_syncing_nacks(syncing, 1);
7138 }
7139 
scheme_syncing_result(Syncing * syncing,int tailok)7140 Scheme_Object *scheme_syncing_result(Syncing *syncing, int tailok)
7141 {
7142   if (syncing->result) {
7143     /* Apply wrap functions to the selected evt: */
7144     Scheme_Object *o, *l, *a, *to_call = NULL, *args[1], **mv = NULL;
7145     int to_call_is_handle = 0, rc = 1;
7146     Scheme_Cont_Frame_Data cframe;
7147 
7148     o = syncing->set->argv[syncing->result - 1];
7149     if (SAME_TYPE(SCHEME_TYPE(o), scheme_channel_syncer_type)) {
7150       /* This is a put that got changed to a syncer, but not changed back */
7151       o = ((Scheme_Channel_Syncer *)o)->obj;
7152     }
7153     if (syncing->wrapss) {
7154       l = syncing->wrapss[syncing->result - 1];
7155       if (l) {
7156 	for (; SCHEME_PAIRP(l); l = SCHEME_CDR(l)) {
7157 	  a = SCHEME_CAR(l);
7158 	  if (to_call) {
7159             if (rc == 1) {
7160               mv = args;
7161               args[0] = o;
7162             }
7163 
7164 	    /* Call wrap proc with breaks disabled */
7165 	    scheme_push_break_enable(&cframe, 0, 0);
7166 
7167 	    o = scheme_apply_multi(to_call, rc, mv);
7168 
7169             if (SAME_OBJ(o, SCHEME_MULTIPLE_VALUES)) {
7170               rc = scheme_multiple_count;
7171               mv = scheme_multiple_array;
7172               scheme_detach_multple_array(mv);
7173             } else {
7174               rc = 1;
7175               mv = NULL;
7176             }
7177 
7178 	    scheme_pop_break_enable(&cframe, 0);
7179 
7180 	    to_call = NULL;
7181 	  }
7182 	  if (SCHEME_BOXP(a) || SCHEME_PROCP(a)) {
7183 	    if (SCHEME_BOXP(a)) {
7184 	      a = SCHEME_BOX_VAL(a);
7185 	      to_call_is_handle = 1;
7186 	    }
7187 	    to_call = a;
7188 	  } else if (SAME_TYPE(scheme_thread_suspend_type, SCHEME_TYPE(a))
7189 		     || SAME_TYPE(scheme_thread_resume_type, SCHEME_TYPE(a))) {
7190 	    o = SCHEME_PTR2_VAL(a);
7191             rc = 1;
7192           } else {
7193 	    o = a;
7194             rc = 1;
7195           }
7196 	}
7197 
7198 	if (to_call) {
7199           if (rc == 1) {
7200             mv = args;
7201             args[0] = o;
7202           }
7203 
7204 	  /* If to_call is still a wrap-evt (not a handle-evt),
7205 	     then set the config one more time: */
7206 	  if (!to_call_is_handle) {
7207 	    scheme_push_break_enable(&cframe, 0, 0);
7208 	    tailok = 0;
7209 	  }
7210 
7211 	  if (tailok) {
7212 	    return _scheme_tail_apply(to_call, rc, mv);
7213 	  } else {
7214 	    o = scheme_apply_multi(to_call, rc, mv);
7215 
7216             if (SAME_OBJ(o, SCHEME_MULTIPLE_VALUES)) {
7217               rc = scheme_multiple_count;
7218               mv = scheme_multiple_array;
7219               scheme_detach_multple_array(mv);
7220               if (!to_call_is_handle)
7221                 scheme_pop_break_enable(&cframe, 1);
7222               return scheme_values(rc, mv);
7223             } else {
7224               if (!to_call_is_handle)
7225                 scheme_pop_break_enable(&cframe, 1);
7226               return o;
7227             }
7228 	  }
7229 	}
7230       }
7231     }
7232     return o;
7233   } else
7234     return NULL;
7235 }
7236 
do_sync(const char * name,int argc,Scheme_Object * argv[],int with_break,int with_timeout,int _tailok)7237 static Scheme_Object *do_sync(const char *name, int argc, Scheme_Object *argv[],
7238 			      int with_break, int with_timeout, int _tailok)
7239 {
7240   volatile int tailok = _tailok;
7241   Evt_Set * volatile evt_set;
7242   Syncing * volatile syncing;
7243   volatile float timeout = -1.0;
7244   double start_time;
7245   Scheme_Cont_Frame_Data cframe;
7246 
7247   if (with_timeout) {
7248     if (!SCHEME_FALSEP(argv[0])) {
7249       if (SCHEME_REALP(argv[0]))
7250 	timeout = (float)scheme_real_to_double(argv[0]);
7251       else if (scheme_check_proc_arity(NULL, 0, 0, argc, argv))
7252         timeout = 0.0;
7253 
7254       if (timeout < 0.0) {
7255 	scheme_wrong_contract(name, "(>=/c 0.0)", 0, argc, argv);
7256 	return NULL;
7257       }
7258 
7259       start_time = scheme_get_inexact_milliseconds();
7260     } else
7261       start_time = 0;
7262   } else {
7263     start_time = 0;
7264   }
7265 
7266   /* Special case: no timeout, only object is a semaphore */
7267   if (argc == (with_timeout + 1) && !start_time && SCHEME_SEMAP(argv[with_timeout])) {
7268     scheme_wait_sema(argv[with_timeout], with_break ? -1 : 0);
7269     return argv[with_timeout];
7270   }
7271 
7272   evt_set = NULL;
7273 
7274   /* Special case: only argument is an immutable evt set: */
7275   if ((argc == (with_timeout + 1))
7276       && SCHEME_EVTSETP(argv[with_timeout])
7277       && !SCHEME_EVTSET_UNFLATTENEDP(argv[with_timeout])) {
7278     int i;
7279     evt_set = (Evt_Set *)argv[with_timeout];
7280     for (i = evt_set->argc; i--; ) {
7281       if (evt_set->ws[i]->can_redirect) {
7282 	/* Need to copy this set to handle redirections. */
7283 	evt_set = NULL;
7284 	break;
7285       }
7286     }
7287   }
7288 
7289   if (!evt_set)
7290     evt_set = make_evt_set(name, argc, argv, with_timeout, 1);
7291 
7292   if (with_break) {
7293     scheme_push_break_enable(&cframe, 1, 1);
7294   }
7295 
7296   /* Check for another special case: syncing on a set of semaphores
7297      without a timeout. Use general code for channels.
7298      (Note that we check for this case after evt-set flattening.) */
7299   if (timeout < 0.0) {
7300     int i;
7301     for (i = evt_set->argc; i--; ) {
7302       if (!SCHEME_SEMAP(evt_set->argv[i]))
7303 	break;
7304     }
7305     if (i < 0) {
7306       /* Hit the special case. */
7307       i = scheme_wait_semas_chs(evt_set->argc, evt_set->argv, 0, NULL);
7308 
7309       if (with_break) {
7310 	scheme_pop_break_enable(&cframe, 1);
7311       } else {
7312 	/* In case a break appeared after we received a post,
7313 	   check for a break, because scheme_wait_semas_chs() won't: */
7314 	scheme_check_break_now();
7315       }
7316 
7317       if (i)
7318 	return evt_set->argv[i - 1];
7319       else
7320 	return (tailok ? scheme_false : NULL);
7321     }
7322   }
7323 
7324   syncing = make_syncing(evt_set, timeout, start_time);
7325 
7326   if (timeout < 0.0)
7327     timeout = 0.0; /* means "no timeout" to block_until */
7328 
7329   if (with_break) {
7330     /* Suspended breaks when something is selected. */
7331     syncing->disable_break = scheme_current_thread;
7332   }
7333 
7334   BEGIN_ESCAPEABLE(scheme_escape_during_sync, syncing);
7335   scheme_block_until((Scheme_Ready_Fun)syncing_ready,
7336                      (Scheme_Needs_Wakeup_Fun)scheme_syncing_needs_wakeup,
7337 		     (Scheme_Object *)syncing, timeout);
7338   END_ESCAPEABLE();
7339 
7340   if (!syncing->result)
7341     scheme_post_syncing_nacks(syncing);
7342 
7343   if (with_break) {
7344     scheme_pop_break_enable(&cframe, 0);
7345   }
7346 
7347   if (with_break) {
7348     /* Reverse low-level break disable: */
7349     --syncing->disable_break->suspend_break;
7350   }
7351 
7352   if (syncing->result) {
7353     return scheme_syncing_result(syncing, tailok);
7354   } else {
7355     if (with_timeout && SCHEME_PROCP(argv[0])) {
7356       if (tailok)
7357         return _scheme_tail_apply(argv[0], 0, NULL);
7358       else
7359         return _scheme_apply(argv[0], 0, NULL);
7360     } else if (tailok)
7361       return scheme_false;
7362     else
7363       return NULL;
7364   }
7365 }
7366 
sch_sync(int argc,Scheme_Object * argv[])7367 static Scheme_Object *sch_sync(int argc, Scheme_Object *argv[])
7368 {
7369   return do_sync("sync", argc, argv, 0, 0, 1);
7370 }
7371 
sch_sync_timeout(int argc,Scheme_Object * argv[])7372 static Scheme_Object *sch_sync_timeout(int argc, Scheme_Object *argv[])
7373 {
7374   return do_sync("sync/timeout", argc, argv, 0, 1, 1);
7375 }
7376 
scheme_sync(int argc,Scheme_Object * argv[])7377 Scheme_Object *scheme_sync(int argc, Scheme_Object *argv[])
7378 {
7379   return do_sync("sync", argc, argv, 0, 0, 0);
7380 }
7381 
scheme_sync_timeout(int argc,Scheme_Object * argv[])7382 Scheme_Object *scheme_sync_timeout(int argc, Scheme_Object *argv[])
7383 {
7384   return do_sync("sync/timeout", argc, argv, 0, 1, 0);
7385 }
7386 
do_scheme_sync_enable_break(const char * who,int with_timeout,int tailok,int argc,Scheme_Object * argv[])7387 static Scheme_Object *do_scheme_sync_enable_break(const char *who, int with_timeout, int tailok, int argc, Scheme_Object *argv[])
7388 {
7389   Scheme_Object *sema;
7390 
7391   if (with_timeout && (argc == 2) && SCHEME_FALSEP(argv[0]) && SCHEME_SEMAP(argv[1]))
7392     sema = argv[1];
7393   else if (!with_timeout && (argc == 1) && SCHEME_SEMAP(argv[0]))
7394     sema = argv[0];
7395   else
7396     sema = NULL;
7397 
7398   if (sema) {
7399     scheme_wait_sema(sema, -1);
7400     return sema;
7401   }
7402 
7403   return do_sync(who, argc, argv, 1, with_timeout, tailok);
7404 }
7405 
scheme_sync_enable_break(int argc,Scheme_Object * argv[])7406 Scheme_Object *scheme_sync_enable_break(int argc, Scheme_Object *argv[])
7407 {
7408   return do_scheme_sync_enable_break("sync/enable-break", 0, 0, argc, argv);
7409 }
7410 
sch_sync_enable_break(int argc,Scheme_Object * argv[])7411 static Scheme_Object *sch_sync_enable_break(int argc, Scheme_Object *argv[])
7412 {
7413   return do_scheme_sync_enable_break("sync/enable-break", 0, 1, argc, argv);
7414 }
7415 
sch_sync_timeout_enable_break(int argc,Scheme_Object * argv[])7416 static Scheme_Object *sch_sync_timeout_enable_break(int argc, Scheme_Object *argv[])
7417 {
7418   return do_scheme_sync_enable_break("sync/timeout/enable-break", 1, 1, argc, argv);
7419 }
7420 
evts_to_evt(int argc,Scheme_Object * argv[])7421 static Scheme_Object *evts_to_evt(int argc, Scheme_Object *argv[])
7422 {
7423   return (Scheme_Object *)make_evt_set("choice-evt", argc, argv, 0, 0);
7424 }
7425 
7426 /*========================================================================*/
7427 /*                         boxes as place locals                          */
7428 /*========================================================================*/
7429 
unsafe_make_place_local(int argc,Scheme_Object ** argv)7430 static Scheme_Object *unsafe_make_place_local(int argc, Scheme_Object **argv)
7431 {
7432   return scheme_box(argv[0]);
7433 }
7434 
unsafe_place_local_ref(int argc,Scheme_Object ** argv)7435 static Scheme_Object *unsafe_place_local_ref(int argc, Scheme_Object **argv)
7436 {
7437   return SCHEME_BOX_VAL(argv[0]);
7438 }
7439 
unsafe_place_local_set(int argc,Scheme_Object ** argv)7440 static Scheme_Object *unsafe_place_local_set(int argc, Scheme_Object **argv)
7441 {
7442   SCHEME_BOX_VAL(argv[0]) = argv[1];
7443   return scheme_void;
7444 }
7445 
7446 /*========================================================================*/
7447 /*                             thread cells                               */
7448 /*========================================================================*/
7449 
7450 #define SCHEME_THREAD_CELLP(x) (SAME_TYPE(SCHEME_TYPE(x), scheme_thread_cell_type))
7451 
scheme_make_thread_cell(Scheme_Object * def_val,int inherited)7452 Scheme_Object *scheme_make_thread_cell(Scheme_Object *def_val, int inherited)
7453 {
7454   Thread_Cell *c;
7455 
7456   c = MALLOC_ONE_TAGGED(Thread_Cell);
7457   c->so.type = scheme_thread_cell_type;
7458   c->def_val = def_val;
7459   c->inherited = !!inherited;
7460 
7461   return (Scheme_Object *)c;
7462 }
7463 
do_thread_cell_get(Scheme_Object * cell,Scheme_Thread_Cell_Table * cells)7464 static Scheme_Object *do_thread_cell_get(Scheme_Object *cell, Scheme_Thread_Cell_Table *cells)
7465 {
7466   Scheme_Object *v;
7467 
7468   if (((Thread_Cell *)cell)->assigned) {
7469     v = scheme_lookup_in_table(cells, (const char *)cell);
7470     if (v)
7471       return scheme_ephemeron_value(v);
7472   }
7473 
7474   return ((Thread_Cell *)cell)->def_val;
7475 }
7476 
scheme_thread_cell_get(Scheme_Object * cell,Scheme_Thread_Cell_Table * cells)7477 Scheme_Object *scheme_thread_cell_get(Scheme_Object *cell, Scheme_Thread_Cell_Table *cells)
7478 {
7479   if (!((Thread_Cell *)cell)->assigned)
7480     return ((Thread_Cell *)cell)->def_val;
7481   else
7482     return do_thread_cell_get(cell, cells);
7483 }
7484 
scheme_thread_cell_set(Scheme_Object * cell,Scheme_Thread_Cell_Table * cells,Scheme_Object * v)7485 void scheme_thread_cell_set(Scheme_Object *cell, Scheme_Thread_Cell_Table *cells, Scheme_Object *v)
7486 {
7487   if (!((Thread_Cell *)cell)->assigned)
7488     ((Thread_Cell *)cell)->assigned = 1;
7489   v = scheme_make_ephemeron(cell, v);
7490   scheme_add_to_table(cells, (const char *)cell, (void *)v, 0);
7491 }
7492 
scheme_empty_cell_table(void)7493 Scheme_Thread_Cell_Table *scheme_empty_cell_table(void)
7494 {
7495   return scheme_make_bucket_table(20, SCHEME_hash_weak_ptr);
7496 }
7497 
inherit_cells(Scheme_Thread_Cell_Table * cells,Scheme_Thread_Cell_Table * t,int inherited)7498 static Scheme_Thread_Cell_Table *inherit_cells(Scheme_Thread_Cell_Table *cells,
7499 					       Scheme_Thread_Cell_Table *t,
7500 					       int inherited)
7501 {
7502   Scheme_Bucket *bucket;
7503   Scheme_Object *cell, *v;
7504   int i;
7505 
7506   if (!cells)
7507     cells = scheme_current_thread->cell_values;
7508 
7509   if (!t)
7510     t = scheme_empty_cell_table();
7511 
7512   for (i = cells->size; i--; ) {
7513     bucket = cells->buckets[i];
7514     if (bucket && bucket->val && bucket->key) {
7515       cell = (Scheme_Object *)HT_EXTRACT_WEAK(bucket->key);
7516       if (cell && (((Thread_Cell *)cell)->inherited == inherited)) {
7517 	v = (Scheme_Object *)bucket->val;
7518 	scheme_add_to_table(t, (char *)cell, v, 0);
7519       }
7520     }
7521   }
7522 
7523   return t;
7524 }
7525 
scheme_inherit_cells(Scheme_Thread_Cell_Table * cells)7526 Scheme_Thread_Cell_Table *scheme_inherit_cells(Scheme_Thread_Cell_Table *cells)
7527 {
7528   return inherit_cells(cells, NULL, 1);
7529 }
7530 
thread_cell_values(int argc,Scheme_Object * argv[])7531 static Scheme_Object *thread_cell_values(int argc, Scheme_Object *argv[])
7532 {
7533   if (argc == 1) {
7534     Scheme_Thread_Cell_Table *naya;
7535 
7536     if (!SAME_TYPE(scheme_thread_cell_values_type, SCHEME_TYPE(argv[0]))) {
7537       scheme_wrong_contract("current-preserved-thread-cell-values", "thread-cell-values?", 0, argc, argv);
7538       return NULL;
7539     }
7540 
7541     naya = inherit_cells(NULL, NULL, 0);
7542     inherit_cells((Scheme_Thread_Cell_Table *)SCHEME_PTR_VAL(argv[0]), naya, 1);
7543 
7544     scheme_current_thread->cell_values = naya;
7545 
7546     return scheme_void;
7547   } else {
7548     Scheme_Object *o, *ht;
7549 
7550     ht = (Scheme_Object *)inherit_cells(NULL, NULL, 1);
7551 
7552     o = scheme_alloc_small_object();
7553     o->type = scheme_thread_cell_values_type;
7554     SCHEME_PTR_VAL(o) = ht;
7555 
7556     return o;
7557   }
7558 }
7559 
is_thread_cell_values(int argc,Scheme_Object * argv[])7560 static Scheme_Object *is_thread_cell_values(int argc, Scheme_Object *argv[])
7561 {
7562   return (SAME_TYPE(scheme_thread_cell_values_type, SCHEME_TYPE(argv[0]))
7563           ? scheme_true
7564           : scheme_false);
7565 }
7566 
make_thread_cell(int argc,Scheme_Object * argv[])7567 static Scheme_Object *make_thread_cell(int argc, Scheme_Object *argv[])
7568 {
7569   return scheme_make_thread_cell(argv[0], (argc > 1) && SCHEME_TRUEP(argv[1]));
7570 }
7571 
thread_cell_p(int argc,Scheme_Object * argv[])7572 static Scheme_Object *thread_cell_p(int argc, Scheme_Object *argv[])
7573 {
7574   return (SCHEME_THREAD_CELLP(argv[0])
7575 	  ? scheme_true
7576 	  : scheme_false);
7577 }
7578 
thread_cell_get(int argc,Scheme_Object * argv[])7579 static Scheme_Object *thread_cell_get(int argc, Scheme_Object *argv[])
7580 {
7581   if (!SCHEME_THREAD_CELLP(argv[0]))
7582     scheme_wrong_contract("thread-cell-ref", "thread-cell?", 0, argc, argv);
7583   return scheme_thread_cell_get(argv[0], scheme_current_thread->cell_values);
7584 }
7585 
thread_cell_set(int argc,Scheme_Object * argv[])7586 static Scheme_Object *thread_cell_set(int argc, Scheme_Object *argv[])
7587 {
7588   if (!SCHEME_THREAD_CELLP(argv[0]))
7589     scheme_wrong_contract("thread-cell-set!", "thread-cell?", 0, argc, argv);
7590   scheme_thread_cell_set(argv[0], scheme_current_thread->cell_values, argv[1]);
7591   return scheme_void;
7592 }
7593 
7594 
7595 /*========================================================================*/
7596 /*                              parameters                                */
7597 /*========================================================================*/
7598 
7599 SHARED_OK static int max_configs = __MZCONFIG_BUILTIN_COUNT__;
7600 static Scheme_Object *do_param(int argc, Scheme_Object *argv[], Scheme_Object *self);
7601 
config_fail()7602 static Scheme_Config *config_fail()
7603 {
7604   /* in a separate function to help xform */
7605   scheme_longjmp(scheme_error_buf, 1);
7606   return NULL;
7607 }
7608 
scheme_current_config()7609 Scheme_Config *scheme_current_config()
7610 {
7611   GC_CAN_IGNORE Scheme_Object *v;
7612 
7613   v = scheme_extract_one_cc_mark(NULL, scheme_parameterization_key);
7614 
7615   if (!SAME_TYPE(scheme_config_type, SCHEME_TYPE(v))) {
7616     /* Someone has grabbed parameterization-key out of #%paramz
7617        and misused it.
7618        Printing an error message requires consulting parameters,
7619        so just escape. */
7620     return config_fail();
7621   }
7622 
7623   return (Scheme_Config *)v;
7624 }
7625 
do_extend_config(Scheme_Config * c,Scheme_Object * key,Scheme_Object * val)7626 static Scheme_Config *do_extend_config(Scheme_Config *c, Scheme_Object *key, Scheme_Object *val)
7627 {
7628   Scheme_Config *naya;
7629   Scheme_Hash_Tree *ht;
7630 
7631   /* In principle, the key+cell link should be weak, but it's
7632      difficult to imagine a parameter being GC'ed while an active
7633      `parameterize' is still on the stack (or, at least, difficult to
7634      imagine that it matters). */
7635 
7636   naya = MALLOC_ONE_TAGGED(Scheme_Config);
7637   naya->so.type = scheme_config_type;
7638   ht = scheme_hash_tree_set(c->ht, key, scheme_make_thread_cell(val, 1));
7639   naya->ht = ht;
7640   naya->root = c->root;
7641 
7642   return naya;
7643 }
7644 
scheme_extend_config(Scheme_Config * c,int pos,Scheme_Object * init_val)7645 Scheme_Config *scheme_extend_config(Scheme_Config *c, int pos, Scheme_Object *init_val)
7646 {
7647   return do_extend_config(c, scheme_make_integer(pos), init_val);
7648 }
7649 
scheme_install_config(Scheme_Config * config)7650 void scheme_install_config(Scheme_Config *config)
7651 {
7652   scheme_set_cont_mark(scheme_parameterization_key, (Scheme_Object *)config);
7653 }
7654 
find_param_cell(Scheme_Config * c,Scheme_Object * k,int force_cell)7655 Scheme_Object *find_param_cell(Scheme_Config *c, Scheme_Object *k, int force_cell)
7656 {
7657   Scheme_Object *v;
7658   Scheme_Parameterization *p;
7659 
7660   v = scheme_eq_hash_tree_get(c->ht, k);
7661   if (v)
7662     return v;
7663 
7664   p = c->root;
7665   if (SCHEME_INTP(k))
7666     return p->prims[SCHEME_INT_VAL(k)];
7667   else {
7668     if (p->extensions)
7669       return scheme_lookup_in_table(p->extensions, (const char *)k);
7670     else
7671       return NULL;
7672   }
7673 }
7674 
scheme_get_thread_param(Scheme_Config * c,Scheme_Thread_Cell_Table * cells,int pos)7675 Scheme_Object *scheme_get_thread_param(Scheme_Config *c, Scheme_Thread_Cell_Table *cells, int pos)
7676 {
7677   Scheme_Object *cell;
7678 
7679   cell = find_param_cell(c, scheme_make_integer(pos), 0);
7680   return scheme_thread_cell_get(cell, cells);
7681 }
7682 
scheme_get_param(Scheme_Config * c,int pos)7683 Scheme_Object *scheme_get_param(Scheme_Config *c, int pos)
7684 {
7685   if (pos == MZCONFIG_ENV)
7686     return (Scheme_Object *)scheme_get_current_namespace_as_env();
7687 
7688   return scheme_get_thread_param(c, scheme_current_thread->cell_values, pos);
7689 }
7690 
scheme_set_thread_param(Scheme_Config * c,Scheme_Thread_Cell_Table * cells,int pos,Scheme_Object * o)7691 void scheme_set_thread_param(Scheme_Config *c, Scheme_Thread_Cell_Table *cells, int pos, Scheme_Object *o)
7692 {
7693   scheme_thread_cell_set(find_param_cell(c, scheme_make_integer(pos), 1), cells, o);
7694 }
7695 
scheme_set_param(Scheme_Config * c,int pos,Scheme_Object * o)7696 void scheme_set_param(Scheme_Config *c, int pos, Scheme_Object *o)
7697 {
7698   if (pos == MZCONFIG_ENV) {
7699     scheme_set_current_namespace_as_env((Scheme_Env *)o);
7700     return;
7701   }
7702 
7703   scheme_thread_cell_set(find_param_cell(c, scheme_make_integer(pos), 1),
7704 			 scheme_current_thread->cell_values, o);
7705 }
7706 
malloc_paramz()7707 static Scheme_Parameterization *malloc_paramz()
7708 {
7709   return (Scheme_Parameterization *)scheme_malloc_tagged(sizeof(Scheme_Parameterization) +
7710                                                          (max_configs - mzFLEX_DELTA) * sizeof(Scheme_Object*));
7711 }
7712 
scheme_flatten_config(Scheme_Config * orig_c)7713 void scheme_flatten_config(Scheme_Config *orig_c)
7714 {
7715 }
7716 
parameterization_p(int argc,Scheme_Object ** argv)7717 static Scheme_Object *parameterization_p(int argc, Scheme_Object **argv)
7718 {
7719   Scheme_Object *v = argv[0];
7720 
7721   return (SCHEME_CONFIGP(v)
7722 	  ? scheme_true
7723 	  : scheme_false);
7724 }
7725 
7726 
7727 #define SCHEME_PARAMETERP(v) ((SCHEME_PRIMP(v) || SCHEME_CLSD_PRIMP(v)) \
7728                               && ((((Scheme_Primitive_Proc *)v)->pp.flags & SCHEME_PRIM_OTHER_TYPE_MASK) \
7729                                   == SCHEME_PRIM_TYPE_PARAMETER))
7730 
scheme_extend_parameterization(int argc,Scheme_Object * argv[])7731 Scheme_Object *scheme_extend_parameterization(int argc, Scheme_Object *argv[])
7732 {
7733   Scheme_Object *key, *a[2], *param;
7734   Scheme_Config *c;
7735   int i;
7736 
7737   c = (Scheme_Config *)argv[0];
7738 
7739   if (argc < 2) {
7740     scheme_flatten_config(c);
7741   } else if (SCHEME_CONFIGP(c) && (argc & 1)) {
7742     for (i = 1; i < argc; i += 2) {
7743       param = argv[i];
7744       if (!SCHEME_PARAMETERP(param)
7745           && !(SCHEME_CHAPERONEP(param) && SCHEME_PARAMETERP(SCHEME_CHAPERONE_VAL(param)))) {
7746         a[0] = param;
7747 	scheme_wrong_contract("parameterize", "parameter?", -2, 1, a);
7748 	return NULL;
7749       }
7750       key = argv[i + 1];
7751       if (SCHEME_CHAPERONEP(param)) {
7752         a[0] = key;
7753         key = scheme_apply_chaperone(param, 1, a, scheme_void, 0x3);
7754         param = SCHEME_CHAPERONE_VAL(param);
7755       }
7756       a[0] = key;
7757       a[1] = scheme_false;
7758       while (1) {
7759         if (!(((Scheme_Primitive_Proc *)param)->pp.flags & SCHEME_PRIM_IS_CLOSURE)) {
7760           Scheme_Prim *proc;
7761           proc = (Scheme_Prim *)((Scheme_Primitive_Proc *)param)->prim_val;
7762           key = proc(2, a); /* leads to scheme_param_config to set a[1] */
7763           break;
7764         } else {
7765           /* sets a[1] */
7766           key = do_param(2, a, param);
7767           if (SCHEME_PARAMETERP(key)) {
7768             param = key;
7769             a[0] = a[1];
7770           } else
7771             break;
7772         }
7773       }
7774       c = do_extend_config(c, key, a[1]);
7775     }
7776   }
7777 
7778   return (Scheme_Object *)c;
7779 }
7780 
reparameterize(int argc,Scheme_Object ** argv)7781 static Scheme_Object *reparameterize(int argc, Scheme_Object **argv)
7782 {
7783   /* Clones values of all built-in parameters in a new parameterization.
7784      This could be implemented in Racket by enumerating all built-in parameters,
7785      but it's easier and faster here. We need this for the Planet resolver. */
7786   Scheme_Config *c, *naya;
7787   Scheme_Parameterization *pz, *npz;
7788   Scheme_Object *v;
7789   Scheme_Hash_Tree *ht;
7790   int i;
7791 
7792   if (!SCHEME_CONFIGP(argv[0]))
7793     scheme_wrong_contract("reparameterize", "parameterization?", 0, argc, argv);
7794 
7795   c = (Scheme_Config *)argv[0];
7796   scheme_flatten_config(c);
7797 
7798   pz = c->root;
7799   npz = malloc_paramz();
7800   memcpy(npz, pz, sizeof(Scheme_Parameterization));
7801 
7802   naya = MALLOC_ONE_TAGGED(Scheme_Config);
7803   naya->so.type = scheme_config_type;
7804   ht = scheme_make_hash_tree(SCHEME_hashtr_eq);
7805   naya->ht = ht;
7806   naya->root = npz;
7807 
7808   for (i = 0; i < max_configs; i++) {
7809     v = scheme_thread_cell_get(pz->prims[i], scheme_current_thread->cell_values);
7810     v = scheme_make_thread_cell(v, 1);
7811     npz->prims[i] = v;
7812   }
7813 
7814   return (Scheme_Object *)naya;
7815 }
7816 
scheme_is_parameter(Scheme_Object * v)7817 int scheme_is_parameter(Scheme_Object *v)
7818 {
7819   if (SCHEME_CHAPERONEP(v)) v = SCHEME_CHAPERONE_VAL(v);
7820 
7821   return SCHEME_PARAMETERP(v);
7822 }
7823 
parameter_p(int argc,Scheme_Object ** argv)7824 static Scheme_Object *parameter_p(int argc, Scheme_Object **argv)
7825 {
7826   return (scheme_is_parameter(argv[0])
7827 	  ? scheme_true
7828 	  : scheme_false);
7829 }
7830 
do_param(int argc,Scheme_Object * argv[],Scheme_Object * self)7831 static Scheme_Object *do_param(int argc, Scheme_Object *argv[], Scheme_Object *self)
7832 {
7833   Scheme_Object *guard, **argv2, *pos[2];
7834     ParamData *data = (ParamData *)SCHEME_PRIM_CLOSURE_ELS(self)[0];
7835 
7836   if (argc && argv[0]) {
7837     guard = data->guard;
7838     if (guard) {
7839       Scheme_Object *v;
7840 
7841       v = scheme_apply(guard, 1, argv);
7842 
7843       if (argc == 2) {
7844 	/* Special hook for parameterize: */
7845 	argv[1] = v;
7846 	return data->key;
7847       }
7848 
7849       argv2 = MALLOC_N(Scheme_Object *, argc);
7850       memcpy(argv2, argv, argc * sizeof(Scheme_Object *));
7851       argv2[0] = v;
7852     } else if (argc == 2) {
7853       /* Special hook for parameterize: */
7854       argv[1] = argv[0];
7855       return data->key;
7856     } else
7857       argv2 = argv;
7858   } else
7859     argv2 = argv;
7860 
7861   if (data->is_derived) {
7862     if (!argc) {
7863       Scheme_Object *v;
7864       v = _scheme_apply(data->key, argc, argv2);
7865       pos[0] = v;
7866       return _scheme_tail_apply(data->extract_guard, 1, pos);
7867     } else {
7868       return _scheme_tail_apply(data->key, argc, argv2);
7869     }
7870   }
7871 
7872   pos[0] = data->key;
7873   pos[1] = data->defcell;
7874 
7875   return scheme_param_config("parameter-procedure",
7876 			     (Scheme_Object *)(void *)pos,
7877 			     argc, argv2,
7878 			     -2, NULL, NULL, 0);
7879 }
7880 
extract_param(Scheme_Config * config,Scheme_Object * key,Scheme_Object * defcell)7881 static Scheme_Object *extract_param(Scheme_Config *config, Scheme_Object *key, Scheme_Object *defcell)
7882 {
7883   Scheme_Object *cell;
7884 
7885   cell = find_param_cell(config, key, 0);
7886   if (!cell)
7887     cell = defcell;
7888 
7889   if (SCHEME_THREAD_CELLP(cell))
7890     return scheme_thread_cell_get(cell, scheme_current_thread->cell_values);
7891   else
7892     return cell; /* it's really the value, instead of a cell */
7893 }
7894 
do_param_fast(int argc,Scheme_Object * argv[],Scheme_Object * self)7895 static Scheme_Object *do_param_fast(int argc, Scheme_Object *argv[], Scheme_Object *self)
7896 {
7897   ParamData *data = (ParamData *)SCHEME_PRIM_CLOSURE_ELS(self)[0];
7898 
7899   if (!argc && !data->is_derived)
7900     return extract_param(scheme_current_config(), data->key, data->defcell);
7901 
7902   return do_param(argc, argv, self);
7903 }
7904 
make_parameter(int argc,Scheme_Object ** argv)7905 static Scheme_Object *make_parameter(int argc, Scheme_Object **argv)
7906 {
7907   Scheme_Object *p, *cell, *a[1];
7908   ParamData *data;
7909   void *k;
7910   const char *name;
7911 
7912   k = scheme_make_pair(scheme_true, scheme_false); /* generates a key */
7913 
7914   if (argc > 1)
7915     scheme_check_proc_arity2("make-parameter", 1, 1, argc, argv, 1);
7916   if (argc > 2) {
7917     if (!SCHEME_SYMBOLP(argv[2]))
7918       scheme_wrong_contract("make-parameter", "parameter?", 2, argc, argv);
7919     name = scheme_symbol_val(argv[2]);
7920   } else
7921     name = "parameter-procedure";
7922 
7923   data = MALLOC_ONE_RT(ParamData);
7924 #ifdef MZTAG_REQUIRED
7925   data->type = scheme_rt_param_data;
7926 #endif
7927   data->key = (Scheme_Object *)k;
7928   cell = scheme_make_thread_cell(argv[0], 1);
7929   data->defcell = cell;
7930   data->guard = (((argc > 1) && SCHEME_TRUEP(argv[1])) ? argv[1] : NULL);
7931 
7932   a[0] = (Scheme_Object *)data;
7933   p = scheme_make_prim_closure_w_arity(do_param_fast, 1, a,
7934                                        name, 0, 1);
7935   ((Scheme_Primitive_Proc *)p)->pp.flags |= SCHEME_PRIM_TYPE_PARAMETER;
7936 
7937   return p;
7938 }
7939 
make_derived_parameter(int argc,Scheme_Object ** argv)7940 static Scheme_Object *make_derived_parameter(int argc, Scheme_Object **argv)
7941 {
7942   Scheme_Object *p, *a[1];
7943   ParamData *data;
7944 
7945   if (!SCHEME_PARAMETERP(argv[0]))
7946     scheme_wrong_contract("make-derived-parameter", "(and/c parameter? (not/c impersonator?))", 0, argc, argv);
7947 
7948   scheme_check_proc_arity("make-derived-parameter", 1, 1, argc, argv);
7949   scheme_check_proc_arity("make-derived-parameter", 1, 2, argc, argv);
7950 
7951   data = MALLOC_ONE_RT(ParamData);
7952 #ifdef MZTAG_REQUIRED
7953   data->type = scheme_rt_param_data;
7954 #endif
7955   data->is_derived = 1;
7956   data->key = argv[0];
7957   data->guard = argv[1];
7958   data->extract_guard = argv[2];
7959 
7960   a[0] = (Scheme_Object *)data;
7961   p = scheme_make_prim_closure_w_arity(do_param, 1, a,
7962                                        "parameter-procedure", 0, 1);
7963   ((Scheme_Primitive_Proc *)p)->pp.flags |= SCHEME_PRIM_TYPE_PARAMETER;
7964 
7965   return p;
7966 }
7967 
parameter_procedure_eq(int argc,Scheme_Object ** argv)7968 static Scheme_Object *parameter_procedure_eq(int argc, Scheme_Object **argv)
7969 {
7970   Scheme_Object *a, *b;
7971 
7972   a = argv[0];
7973   b = argv[1];
7974 
7975   if (SCHEME_CHAPERONEP(a)) a = SCHEME_CHAPERONE_VAL(a);
7976   if (SCHEME_CHAPERONEP(b)) b = SCHEME_CHAPERONE_VAL(b);
7977 
7978   if (!SCHEME_PARAMETERP(a))
7979     scheme_wrong_contract("parameter-procedure=?", "parameter?", 0, argc, argv);
7980   if (!SCHEME_PARAMETERP(b))
7981     scheme_wrong_contract("parameter-procedure=?", "parameter?", 1, argc, argv);
7982 
7983   return (SAME_OBJ(a, b)
7984 	  ? scheme_true
7985 	  : scheme_false);
7986 }
7987 
scheme_set_command_line_arguments(Scheme_Object * vec)7988 void scheme_set_command_line_arguments(Scheme_Object *vec)
7989 {
7990   if (!initial_cmdline_vec)
7991     REGISTER_SO(initial_cmdline_vec);
7992   initial_cmdline_vec = vec;
7993 }
7994 
scheme_new_param(void)7995 int scheme_new_param(void)
7996 {
7997   return max_configs++;
7998 }
7999 
init_param(Scheme_Thread_Cell_Table * cells,Scheme_Parameterization * params,int pos,Scheme_Object * v)8000 static void init_param(Scheme_Thread_Cell_Table *cells,
8001 		       Scheme_Parameterization *params,
8002 		       int pos,
8003 		       Scheme_Object *v)
8004 {
8005   Scheme_Object *cell;
8006   cell = scheme_make_thread_cell(v, 1);
8007   params->prims[pos] = cell;
8008 }
8009 
scheme_set_root_param(int p,Scheme_Object * v)8010 void scheme_set_root_param(int p, Scheme_Object *v)
8011 {
8012   Scheme_Parameterization *paramz;
8013   paramz = scheme_current_thread->init_config->root;
8014   ((Thread_Cell *)(paramz->prims[p]))->def_val = v;
8015 }
8016 
make_initial_config(Scheme_Thread * p)8017 static void make_initial_config(Scheme_Thread *p)
8018 {
8019   Scheme_Thread_Cell_Table *cells;
8020   Scheme_Parameterization *paramz;
8021   Scheme_Config *config;
8022 
8023   cells = scheme_make_bucket_table(5, SCHEME_hash_weak_ptr);
8024   p->cell_values = cells;
8025 
8026   paramz = malloc_paramz();
8027 #ifdef MZTAG_REQUIRED
8028   paramz->type = scheme_rt_parameterization;
8029 #endif
8030 
8031   config = MALLOC_ONE_TAGGED(Scheme_Config);
8032   config->so.type = scheme_config_type;
8033   config->root = paramz;
8034   {
8035     Scheme_Hash_Tree *ht;
8036     ht = scheme_make_hash_tree(SCHEME_hashtr_eq);
8037     config->ht = ht;
8038   }
8039 
8040   p->init_config = config;
8041 
8042   init_param(cells, paramz, MZCONFIG_CASE_SENS, (scheme_case_sensitive ? scheme_true : scheme_false));
8043   init_param(cells, paramz, MZCONFIG_CAN_READ_PIPE_QUOTE, scheme_true);
8044 
8045   init_param(cells, paramz, MZCONFIG_LOAD_DELAY_ENABLED, scheme_init_load_on_demand ? scheme_true : scheme_false);
8046   init_param(cells, paramz, MZCONFIG_DELAY_LOAD_INFO, scheme_false);
8047 
8048   init_param(cells, paramz, MZCONFIG_PRINT_GRAPH, scheme_false);
8049   init_param(cells, paramz, MZCONFIG_PRINT_STRUCT, scheme_true);
8050   init_param(cells, paramz, MZCONFIG_PRINT_BOX, scheme_true);
8051   init_param(cells, paramz, MZCONFIG_PRINT_VEC_SHORTHAND, scheme_false);
8052   init_param(cells, paramz, MZCONFIG_PRINT_HASH_TABLE, scheme_true);
8053   init_param(cells, paramz, MZCONFIG_PRINT_UNREADABLE, scheme_true);
8054   init_param(cells, paramz, MZCONFIG_PRINT_PAIR_CURLY, scheme_false);
8055   init_param(cells, paramz, MZCONFIG_PRINT_MPAIR_CURLY, scheme_true);
8056   init_param(cells, paramz, MZCONFIG_PRINT_READER, scheme_false);
8057   init_param(cells, paramz, MZCONFIG_PRINT_LONG_BOOLEAN, scheme_false);
8058   init_param(cells, paramz, MZCONFIG_PRINT_AS_QQ, scheme_true);
8059   init_param(cells, paramz, MZCONFIG_PRINT_SYNTAX_WIDTH, scheme_make_integer(256));
8060 
8061   init_param(cells, paramz, MZCONFIG_COMPILE_MODULE_CONSTS, scheme_true);
8062   init_param(cells, paramz, MZCONFIG_USE_JIT, scheme_startup_use_jit ? scheme_true : scheme_false);
8063   init_param(cells, paramz, MZCONFIG_COMPILE_TARGET_MACHINE, scheme_startup_compile_machine_independent ? scheme_false : racket_symbol);
8064 
8065   {
8066     Scheme_Object *s;
8067     s = scheme_make_immutable_sized_utf8_string("", 0);
8068     init_param(cells, paramz, MZCONFIG_LOCALE, s);
8069   }
8070 
8071   init_param(cells, paramz, MZCONFIG_ERROR_PRINT_WIDTH, scheme_make_integer(256));
8072   init_param(cells, paramz, MZCONFIG_ERROR_PRINT_CONTEXT_LENGTH, scheme_make_integer(16));
8073   init_param(cells, paramz, MZCONFIG_ERROR_PRINT_SRCLOC, scheme_true);
8074 
8075   REGISTER_SO(main_custodian);
8076   REGISTER_SO(limited_custodians);
8077   main_custodian = scheme_make_custodian(NULL);
8078 #ifdef MZ_PRECISE_GC
8079   GC_register_root_custodian(main_custodian);
8080 #endif
8081   init_param(cells, paramz, MZCONFIG_CUSTODIAN, (Scheme_Object *)main_custodian);
8082 
8083   REGISTER_SO(initial_plumber);
8084   initial_plumber = (Scheme_Plumber *)make_plumber(0, NULL);
8085   init_param(cells, paramz, MZCONFIG_PLUMBER, (Scheme_Object *)initial_plumber);
8086 
8087   init_param(cells, paramz, MZCONFIG_ALLOW_SET_UNDEFINED, (scheme_allow_set_undefined
8088 							  ? scheme_true
8089 							  : scheme_false));
8090 
8091   {
8092     Scheme_Security_Guard *sg;
8093 
8094     sg = MALLOC_ONE_TAGGED(Scheme_Security_Guard);
8095     sg->so.type = scheme_security_guard_type;
8096     init_param(cells, paramz, MZCONFIG_SECURITY_GUARD, (Scheme_Object *)sg);
8097   }
8098 
8099   {
8100     Scheme_Object *s;
8101     char *pwd;
8102     s = scheme_make_path(scheme_os_getcwd(NULL, 0, NULL, 1));
8103     s = scheme_path_to_directory_path(s);
8104     init_param(cells, paramz, MZCONFIG_CURRENT_DIRECTORY, s);
8105 #ifndef DOS_FILE_SYSTEM
8106     pwd = scheme_getenv("PWD");
8107     if (pwd) {
8108       Scheme_Object *id1, *id2, *a[2];
8109       id1 = scheme_get_fd_identity(NULL, 0, pwd, 1);
8110       if (id1) {
8111         id2 = scheme_get_fd_identity(NULL, 0, SCHEME_PATH_VAL(s), 1);
8112         if (id2 && scheme_eqv(id1, id2)) {
8113           s = scheme_make_path(pwd);
8114           a[0] = s;
8115           a[1] = scheme_true;
8116           s = scheme_simplify_path(2, a);
8117           s = scheme_path_to_directory_path(s);
8118           init_param(cells, paramz, MZCONFIG_CURRENT_DIRECTORY, s);
8119         }
8120       }
8121     }
8122 #endif
8123     init_param(cells, paramz, MZCONFIG_CURRENT_USER_DIRECTORY, s);
8124     scheme_set_original_dir(s);
8125   }
8126 
8127   {
8128     Scheme_Object *ev;
8129     ev = scheme_make_environment_variables(NULL);
8130     init_param(cells, paramz, MZCONFIG_CURRENT_ENV_VARS, ev);
8131   }
8132 
8133   init_param(cells, paramz, MZCONFIG_FORCE_DELETE_PERMS, scheme_true);
8134 
8135   {
8136     Scheme_Object *rs;
8137     rs = scheme_make_random_state(scheme_get_milliseconds());
8138     init_param(cells, paramz, MZCONFIG_RANDOM_STATE, rs);
8139     rs = scheme_make_random_state(scheme_get_milliseconds());
8140     init_param(cells, paramz, MZCONFIG_SCHEDULER_RANDOM_STATE, rs);
8141   }
8142 
8143   {
8144     Scheme_Object *ph;
8145 
8146     ph = scheme_make_prim_w_arity(scheme_default_print_handler,
8147 				  "default-print-handler",
8148 				  1, 1);
8149     init_param(cells, paramz, MZCONFIG_PRINT_HANDLER, ph);
8150 
8151     ph = scheme_make_prim_w_arity(scheme_default_prompt_read_handler,
8152                                   "default-prompt-read-handler",
8153                                   0, 0);
8154     init_param(cells, paramz, MZCONFIG_PROMPT_READ_HANDLER, ph);
8155 
8156     ph = scheme_make_prim_w_arity(scheme_default_read_input_port_handler,
8157                                   "default-get-interaction-input-port",
8158                                   0, 0);
8159     init_param(cells, paramz, MZCONFIG_READ_INPUT_PORT_HANDLER, ph);
8160 
8161     ph = scheme_make_prim_w_arity(scheme_default_read_handler,
8162                                   "default-read-interaction-handler",
8163                                   2, 2);
8164     init_param(cells, paramz, MZCONFIG_READ_HANDLER, ph);
8165   }
8166   init_param(cells, paramz, MZCONFIG_PORT_COUNT_LINES, scheme_false);
8167 
8168   {
8169     Scheme_Object *lh;
8170     lh = scheme_make_prim_w_arity2(scheme_default_load_extension,
8171 				   "default-load-extension-handler",
8172 				   2, 2,
8173 				   0, -1);
8174     init_param(cells, paramz, MZCONFIG_LOAD_EXTENSION_HANDLER, lh);
8175   }
8176 
8177   {
8178     Scheme_Object *ins = initial_inspector;
8179     init_param(cells, paramz, MZCONFIG_INSPECTOR, ins);
8180     init_param(cells, paramz, MZCONFIG_CODE_INSPECTOR, ins);
8181   }
8182 
8183   {
8184     Scheme_Object *zlv;
8185     if (initial_cmdline_vec)
8186       zlv = initial_cmdline_vec;
8187     else
8188       zlv = scheme_make_vector(0, NULL);
8189     init_param(cells, paramz, MZCONFIG_CMDLINE_ARGS, zlv);
8190   }
8191 
8192   {
8193     Scheme_Thread_Set *t_set;
8194     t_set = create_thread_set(NULL);
8195     init_param(cells, paramz, MZCONFIG_THREAD_SET, (Scheme_Object *)t_set);
8196   }
8197 
8198   init_param(cells, paramz, MZCONFIG_THREAD_INIT_STACK_SIZE, scheme_make_integer(DEFAULT_INIT_STACK_SIZE));
8199 
8200   {
8201     int i;
8202     for (i = 0; i < max_configs; i++) {
8203       if (!paramz->prims[i])
8204 	init_param(cells, paramz, i, scheme_false);
8205     }
8206   }
8207 
8208   REGISTER_SO(initial_config);
8209   initial_config = config;
8210 }
8211 
scheme_minimal_config(void)8212 Scheme_Config *scheme_minimal_config(void)
8213 {
8214   return initial_config;
8215 }
8216 
scheme_compile_target_check(int argc,Scheme_Object ** argv)8217 Scheme_Object *scheme_compile_target_check(int argc, Scheme_Object **argv)
8218 {
8219   if (SCHEME_FALSEP(argv[0]) || SAME_OBJ(argv[0], racket_symbol))
8220     return scheme_true;
8221   else
8222     return scheme_false;
8223 }
8224 
scheme_set_startup_load_on_demand(int on)8225 void scheme_set_startup_load_on_demand(int on)
8226 {
8227   scheme_init_load_on_demand = on;
8228 }
8229 
scheme_register_parameter(Scheme_Prim * function,char * name,int which)8230 Scheme_Object *scheme_register_parameter(Scheme_Prim *function, char *name, int which)
8231 {
8232   Scheme_Object *o;
8233 
8234   if (!config_map) {
8235     REGISTER_SO(config_map);
8236     config_map = MALLOC_N(Scheme_Object*, max_configs);
8237   }
8238 
8239   if (config_map[which])
8240     return config_map[which];
8241 
8242   o = scheme_make_prim_w_arity(function, name, 0, 1);
8243   ((Scheme_Primitive_Proc *)o)->pp.flags |= SCHEME_PRIM_TYPE_PARAMETER;
8244 
8245   config_map[which] = o;
8246 
8247   return o;
8248 }
8249 
8250 typedef Scheme_Object *(*PCheck_Proc)(int, Scheme_Object **, Scheme_Config *);
8251 
do_param_config(char * name,Scheme_Object * pos,int argc,Scheme_Object ** argv,int arity,Scheme_Object * (* check)(int,Scheme_Object **),char * expected,int isboolorfilter,int expected_is_contract)8252 static Scheme_Object *do_param_config(char *name, Scheme_Object *pos,
8253                                       int argc, Scheme_Object **argv,
8254                                       int arity,
8255                                       /* -3 => like -1, plus use check to unmarshall the value
8256                                          -2 => user parameter; pos is array [key, defcell]
8257                                          -1 => use check; if isboolorfilter, check is a filter
8258                                          (and expected is ignored), and if check is NULL,
8259                                          parameter is boolean-valued
8260                                          0+ => check argument for this arity */
8261                                       Scheme_Object *(*check)(int, Scheme_Object **),
8262                                       /* Actually called with (int, S_O **, Scheme_Config *) */
8263                                       char *expected,
8264                                       int isboolorfilter,
8265                                       int expected_is_contract)
8266 {
8267   Scheme_Config *config;
8268 
8269   config = scheme_current_config();
8270 
8271   if (argc == 0) {
8272     if (arity == -2) {
8273       return extract_param(config, ((Scheme_Object **)pos)[0], ((Scheme_Object **)pos)[1]);
8274     } else {
8275       Scheme_Object *s;
8276       s = scheme_get_param(config, SCHEME_INT_VAL(pos));
8277       if (arity == -3) {
8278 	Scheme_Object *a[1];
8279 	PCheck_Proc checkp = (PCheck_Proc)check;
8280 	a[0] = s;
8281 	s = checkp(1, a, config);
8282       }
8283       return s;
8284     }
8285   } else {
8286     Scheme_Object *naya = argv[0];
8287 
8288     if (arity != -2) {
8289       if (arity < 0) {
8290 	if (check) {
8291 	  PCheck_Proc checkp = (PCheck_Proc)check;
8292 	  Scheme_Object *r;
8293 
8294 	  r = checkp(1, argv, config);
8295 
8296 	  if (!isboolorfilter && SCHEME_FALSEP(r))
8297 	    r = NULL;
8298 
8299 	  if (!r) {
8300             if (expected_is_contract)
8301               scheme_wrong_contract(name, expected, 0, 1, argv);
8302             else
8303               scheme_wrong_type(name, expected, 0, 1, argv);
8304 	    return NULL;
8305 	  }
8306 
8307 	  if (isboolorfilter)
8308 	    naya = r;
8309 	}
8310       } else
8311 	scheme_check_proc_arity(name, arity, 0, argc, argv);
8312 
8313       if (isboolorfilter && !check)
8314 	naya = ((SCHEME_TRUEP(naya)) ? scheme_true : scheme_false);
8315 
8316       if (argc == 2) {
8317 	/* Special hook for parameterize: */
8318 	argv[1] = naya;
8319 	return pos;
8320       } else
8321 	scheme_set_param(config, SCHEME_INT_VAL(pos), naya);
8322     } else {
8323       Scheme_Object *cell;
8324 
8325       cell = find_param_cell(config, ((Scheme_Object **)pos)[0], 1);
8326       if (!cell)
8327 	cell = ((Scheme_Object **)pos)[1];
8328 
8329       scheme_thread_cell_set(cell, scheme_current_thread->cell_values, naya);
8330     }
8331 
8332     return scheme_void;
8333   }
8334 }
8335 
scheme_param_config(char * name,Scheme_Object * pos,int argc,Scheme_Object ** argv,int arity,Scheme_Object * (* check)(int,Scheme_Object **),char * expected_type,int isboolorfilter)8336 Scheme_Object *scheme_param_config(char *name, Scheme_Object *pos,
8337 				   int argc, Scheme_Object **argv,
8338 				   int arity,
8339 				   Scheme_Object *(*check)(int, Scheme_Object **),
8340 				   char *expected_type,
8341 				   int isboolorfilter)
8342 {
8343   return do_param_config(name, pos, argc, argv, arity, check,
8344                          expected_type, isboolorfilter, 0);
8345 }
8346 
scheme_param_config2(char * name,Scheme_Object * pos,int argc,Scheme_Object ** argv,int arity,Scheme_Object * (* check)(int,Scheme_Object **),char * expected_contract,int isboolorfilter)8347 Scheme_Object *scheme_param_config2(char *name, Scheme_Object *pos,
8348                                     int argc, Scheme_Object **argv,
8349                                     int arity,
8350                                     Scheme_Object *(*check)(int, Scheme_Object **),
8351                                     char *expected_contract,
8352                                     int isboolorfilter)
8353 {
8354   return do_param_config(name, pos, argc, argv, arity, check,
8355                          expected_contract, isboolorfilter, 1);
8356 }
8357 
8358 static Scheme_Object *
exact_positive_integer_p(int argc,Scheme_Object * argv[])8359 exact_positive_integer_p (int argc, Scheme_Object *argv[])
8360 {
8361   Scheme_Object *n = argv[argc-1];
8362   if (SCHEME_INTP(n) && (SCHEME_INT_VAL(n) > 0))
8363     return scheme_true;
8364   if (SCHEME_BIGNUMP(n) && SCHEME_BIGPOS(n))
8365     return scheme_true;
8366 
8367   return scheme_false;
8368 }
8369 
current_thread_initial_stack_size(int argc,Scheme_Object * argv[])8370 static Scheme_Object *current_thread_initial_stack_size(int argc, Scheme_Object *argv[])
8371 {
8372   return scheme_param_config2("current-thread-initial-stack-size",
8373                               scheme_make_integer(MZCONFIG_THREAD_INIT_STACK_SIZE),
8374                               argc, argv,
8375                               -1, exact_positive_integer_p, "exact-positive-integer?", 0);
8376 }
8377 
phantom_bytes_p(int argc,Scheme_Object * argv[])8378 static Scheme_Object *phantom_bytes_p(int argc, Scheme_Object *argv[])
8379 {
8380   return (SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_phantom_bytes_type)
8381           ? scheme_true
8382           : scheme_false);
8383 }
8384 
make_phantom_bytes(int argc,Scheme_Object * argv[])8385 static Scheme_Object *make_phantom_bytes(int argc, Scheme_Object *argv[])
8386 {
8387   Scheme_Phantom_Bytes *pb;
8388 
8389   if (!scheme_nonneg_exact_p(argv[0]))
8390     scheme_wrong_contract("make-phantom-bytes", "exact-nonnegative-integer?", 0, argc, argv);
8391 
8392   if (!SCHEME_INTP(argv[0]))
8393     scheme_raise_out_of_memory("make-phantom-bytes", NULL);
8394 
8395   pb = MALLOC_ONE_TAGGED(Scheme_Phantom_Bytes);
8396   pb->so.type = scheme_phantom_bytes_type;
8397   pb->size = SCHEME_INT_VAL(argv[0]);
8398 
8399 # ifdef MZ_PRECISE_GC
8400   if (!GC_allocate_phantom_bytes(pb, pb->size)) {
8401     pb->size = 0;
8402     scheme_raise_out_of_memory("make-phantom-bytes", NULL);
8403   }
8404 # endif
8405 
8406   return (Scheme_Object *)pb;
8407 }
8408 
set_phantom_bytes(int argc,Scheme_Object * argv[])8409 static Scheme_Object *set_phantom_bytes(int argc, Scheme_Object *argv[])
8410 {
8411   Scheme_Phantom_Bytes *pb;
8412   intptr_t amt;
8413 # ifdef MZ_PRECISE_GC
8414   intptr_t old_size;
8415 # endif
8416 
8417   if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_phantom_bytes_type))
8418     scheme_wrong_contract("set-phantom-bytes!", "phantom-bytes?", 0, argc, argv);
8419   if (!scheme_nonneg_exact_p(argv[1]))
8420     scheme_wrong_contract("set-phantom-bytes!", "exact-nonnegative-integer?", 1, argc, argv);
8421 
8422   pb = (Scheme_Phantom_Bytes *)argv[0];
8423   amt = SCHEME_INT_VAL(argv[1]);
8424 
8425 # ifdef MZ_PRECISE_GC
8426   old_size = pb->size;
8427 #endif
8428 
8429   pb->size = amt;
8430 
8431 # ifdef MZ_PRECISE_GC
8432   if (!GC_allocate_phantom_bytes(pb, amt - old_size)) {
8433     pb->size = old_size;
8434     scheme_raise_out_of_memory("make-phantom-bytes", NULL);
8435   }
8436 # endif
8437 
8438   return scheme_void;
8439 }
8440 
8441 /*========================================================================*/
8442 /*                             environment                                */
8443 /*========================================================================*/
8444 
scheme_get_env(Scheme_Config * c)8445 Scheme_Env *scheme_get_env(Scheme_Config *c)
8446   XFORM_SKIP_PROC
8447 {
8448   Scheme_Object *o;
8449 
8450   if (!c)
8451     c = scheme_current_config();
8452 
8453   o = scheme_get_param(c, MZCONFIG_ENV);
8454   return (Scheme_Env *)o;
8455 }
8456 
8457 /*========================================================================*/
8458 /*                           security guards                              */
8459 /*========================================================================*/
8460 
make_security_guard(int argc,Scheme_Object * argv[])8461 static Scheme_Object *make_security_guard(int argc, Scheme_Object *argv[])
8462 {
8463   Scheme_Security_Guard *sg;
8464 
8465   if (!(SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_security_guard_type)))
8466     scheme_wrong_contract("make-security-guard", "security-guard?", 0, argc, argv);
8467   scheme_check_proc_arity("make-security-guard", 3, 1, argc, argv);
8468   scheme_check_proc_arity("make-security-guard", 4, 2, argc, argv);
8469   if (argc > 3)
8470     scheme_check_proc_arity2("make-security-guard", 3, 3, argc, argv, 1);
8471 
8472   sg = MALLOC_ONE_TAGGED(Scheme_Security_Guard);
8473   sg->so.type = scheme_security_guard_type;
8474   sg->parent = (Scheme_Security_Guard *)argv[0];
8475   sg->file_proc = argv[1];
8476   sg->network_proc = argv[2];
8477   if ((argc > 3) && SCHEME_TRUEP(argv[3]))
8478     sg->link_proc = argv[3];
8479 
8480   return (Scheme_Object *)sg;
8481 }
8482 
unsafe_make_security_guard_at_root(int argc,Scheme_Object * argv[])8483 static Scheme_Object *unsafe_make_security_guard_at_root(int argc, Scheme_Object *argv[])
8484 {
8485   Scheme_Security_Guard *sg;
8486 
8487   if (argc > 0)
8488     scheme_check_proc_arity("unsafe-make-security-guard-at-root", 3, 0, argc, argv);
8489   if (argc > 1)
8490     scheme_check_proc_arity("unsafe-make-security-guard-at-root", 4, 1, argc, argv);
8491   if (argc > 2)
8492     scheme_check_proc_arity2("unsafe-make-security-guard-at-root", 3, 2, argc, argv, 1);
8493 
8494   sg = MALLOC_ONE_TAGGED(Scheme_Security_Guard);
8495   sg->so.type = scheme_security_guard_type;
8496   sg->parent = NULL;
8497   sg->file_proc = ((argc > 0) ? argv[0] : NULL);
8498   sg->network_proc = ((argc > 1) ? argv[1] : NULL);
8499   sg->link_proc = ((argc > 2) ? argv[2] : NULL);
8500 
8501   return (Scheme_Object *)sg;
8502 }
8503 
security_guard_p(int argc,Scheme_Object * argv[])8504 static Scheme_Object *security_guard_p(int argc, Scheme_Object *argv[])
8505 {
8506   return ((SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_security_guard_type))
8507 	  ? scheme_true
8508 	  : scheme_false);
8509 }
8510 
current_security_guard(int argc,Scheme_Object * argv[])8511 static Scheme_Object *current_security_guard(int argc, Scheme_Object *argv[])
8512 {
8513   return scheme_param_config2("current-security-guard",
8514                               scheme_make_integer(MZCONFIG_SECURITY_GUARD),
8515                               argc, argv,
8516                               -1, security_guard_p, "security-guard?", 0);
8517 }
8518 
security_guard_check_file(int argc,Scheme_Object * argv[])8519 static Scheme_Object *security_guard_check_file(int argc, Scheme_Object *argv[])
8520 {
8521   Scheme_Object *l, *a;
8522   int guards = 0;
8523 
8524   if (!SCHEME_SYMBOLP(argv[0]))
8525     scheme_wrong_contract("security-guard-check-file", "symbol?", 0, argc, argv);
8526 
8527   if (!SCHEME_PATH_STRINGP(argv[1]))
8528     scheme_wrong_contract("security-guard-check-file", "path-string?", 1, argc, argv);
8529 
8530   l = argv[2];
8531   while (SCHEME_PAIRP(l)) {
8532     a = SCHEME_CAR(l);
8533     if (SAME_OBJ(a, exists_symbol))
8534       guards |= SCHEME_GUARD_FILE_EXISTS;
8535     else if (SAME_OBJ(a, delete_symbol))
8536       guards |= SCHEME_GUARD_FILE_DELETE;
8537     else if (SAME_OBJ(a, execute_symbol))
8538       guards |= SCHEME_GUARD_FILE_EXECUTE;
8539     else if (SAME_OBJ(a, write_symbol))
8540       guards |= SCHEME_GUARD_FILE_WRITE;
8541     else if (SAME_OBJ(a, read_symbol))
8542       guards |= SCHEME_GUARD_FILE_READ;
8543     else
8544       break;
8545 
8546     l = SCHEME_CDR(l);
8547   }
8548 
8549   if (!SCHEME_NULLP(l))
8550     scheme_wrong_contract("security-guard-check-file",
8551                           "(listof (or/c 'read 'write 'execute 'delete 'exists))",
8552                           2, argc, argv);
8553 
8554   a = argv[1];
8555   if (!SCHEME_PATHP(a))
8556     a = scheme_char_string_to_path(a);
8557 
8558   scheme_security_check_file(scheme_symbol_val(argv[0]),
8559                              SCHEME_PATH_VAL(a),
8560                              guards);
8561 
8562   return scheme_void;
8563 }
8564 
security_guard_check_file_link(int argc,Scheme_Object * argv[])8565 static Scheme_Object *security_guard_check_file_link(int argc, Scheme_Object *argv[])
8566 {
8567   Scheme_Object *a, *b;
8568 
8569   if (!SCHEME_SYMBOLP(argv[0]))
8570     scheme_wrong_contract("security-guard-check-file-link", "symbol?", 0, argc, argv);
8571 
8572   if (!SCHEME_PATH_STRINGP(argv[1]))
8573     scheme_wrong_contract("security-guard-check-file-link", "path-string?", 1, argc, argv);
8574 
8575   if (!SCHEME_PATH_STRINGP(argv[2]))
8576     scheme_wrong_contract("security-guard-check-file-link", "path-string?", 2, argc, argv);
8577 
8578   a = argv[1];
8579   if (!SCHEME_PATHP(a))
8580     a = scheme_char_string_to_path(a);
8581 
8582   b = argv[2];
8583   if (!SCHEME_PATHP(b))
8584     b = scheme_char_string_to_path(b);
8585 
8586   scheme_security_check_file_link(scheme_symbol_val(argv[0]), SCHEME_PATH_VAL(a), SCHEME_PATH_VAL(b));
8587 
8588   return scheme_void;
8589 }
8590 
security_guard_check_network(int argc,Scheme_Object * argv[])8591 static Scheme_Object *security_guard_check_network(int argc, Scheme_Object *argv[])
8592 {
8593   Scheme_Object *a;
8594 
8595   if (!SCHEME_SYMBOLP(argv[0]))
8596     scheme_wrong_contract("security-guard-check-network", "symbol?", 0, argc, argv);
8597 
8598   if (SCHEME_TRUEP(argv[1]) && !SCHEME_CHAR_STRINGP(argv[1]))
8599     scheme_wrong_contract("security-guard-check-network", "(or/c string? #f)", 1, argc, argv);
8600 
8601   if (SCHEME_TRUEP(argv[2])
8602       && (!SCHEME_INTP(argv[2])
8603           || (SCHEME_INT_VAL(argv[2]) < 1)
8604           || (SCHEME_INT_VAL(argv[2]) > 65535)))
8605     scheme_wrong_contract("security-guard-check-network", "(or/c (integer-in 1 65535) #f)", 2, argc, argv);
8606 
8607   if (!SAME_OBJ(argv[3], client_symbol) && !SAME_OBJ(argv[3], server_symbol))
8608     scheme_wrong_contract("security-guard-check-network", "(or/c 'client'server)", 3, argc, argv);
8609 
8610   if (SCHEME_TRUEP(argv[1]))
8611     a = scheme_char_string_to_byte_string(argv[1]);
8612   else
8613     a = NULL;
8614 
8615   scheme_security_check_network(scheme_symbol_val(argv[0]),
8616                                 a ? SCHEME_BYTE_STR_VAL(a) : NULL,
8617                                 SCHEME_TRUEP(argv[2]) ? SCHEME_INT_VAL(argv[2]) : 0,
8618                                 SAME_OBJ(argv[3], client_symbol));
8619 
8620   return scheme_void;
8621 }
8622 
scheme_security_check_file(const char * who,const char * filename,int guards)8623 void scheme_security_check_file(const char *who, const char *filename, int guards)
8624 {
8625   Scheme_Security_Guard *sg;
8626 
8627   sg = (Scheme_Security_Guard *)scheme_get_param(scheme_current_config(), MZCONFIG_SECURITY_GUARD);
8628 
8629   if (sg->file_proc) {
8630     Scheme_Object *l = scheme_null, *a[3];
8631 
8632     if (guards & SCHEME_GUARD_FILE_EXISTS)
8633       l = scheme_make_pair(exists_symbol, l);
8634     if (guards & SCHEME_GUARD_FILE_DELETE)
8635       l = scheme_make_pair(delete_symbol, l);
8636     if (guards & SCHEME_GUARD_FILE_EXECUTE)
8637       l = scheme_make_pair(execute_symbol, l);
8638     if (guards & SCHEME_GUARD_FILE_WRITE)
8639       l = scheme_make_pair(write_symbol, l);
8640     if (guards & SCHEME_GUARD_FILE_READ)
8641       l = scheme_make_pair(read_symbol, l);
8642 
8643     a[0] = scheme_intern_symbol(who);
8644     a[1] = (filename ? scheme_make_sized_path((char *)filename, -1, 1) : scheme_false);
8645     a[2] = l;
8646 
8647     while (sg->parent) {
8648       scheme_apply(sg->file_proc, 3, a);
8649       sg = sg->parent;
8650     }
8651   }
8652 }
8653 
scheme_security_check_file_link(const char * who,const char * filename,const char * content)8654 void scheme_security_check_file_link(const char *who, const char *filename, const char *content)
8655 {
8656   Scheme_Security_Guard *sg;
8657 
8658   sg = (Scheme_Security_Guard *)scheme_get_param(scheme_current_config(), MZCONFIG_SECURITY_GUARD);
8659 
8660   if (sg->file_proc) {
8661     Scheme_Object *a[3];
8662 
8663     a[0] = scheme_intern_symbol(who);
8664     a[1] = scheme_make_sized_path((char *)filename, -1, 1);
8665     a[2] = scheme_make_sized_path((char *)content, -1, 1);
8666 
8667     while (sg->parent) {
8668       if (sg->link_proc)
8669 	scheme_apply(sg->link_proc, 3, a);
8670       else {
8671 	scheme_signal_error("%s: security guard does not allow any link operation; attempted from: %s to: %s",
8672 			    who,
8673 			    filename,
8674 			    content);
8675       }
8676       sg = sg->parent;
8677     }
8678   }
8679 }
8680 
scheme_security_check_network(const char * who,const char * host,int port,int client)8681 void scheme_security_check_network(const char *who, const char *host, int port, int client)
8682 {
8683   Scheme_Security_Guard *sg;
8684 
8685   sg = (Scheme_Security_Guard *)scheme_get_param(scheme_current_config(), MZCONFIG_SECURITY_GUARD);
8686 
8687   if (sg->network_proc) {
8688     Scheme_Object *a[4];
8689 
8690     a[0] = scheme_intern_symbol(who);
8691     a[1] = (host ? scheme_make_sized_utf8_string((char *)host, -1) : scheme_false);
8692     a[2] = ((port < 1) ? scheme_false : scheme_make_integer(port));
8693     a[3] = (client ? client_symbol : server_symbol);
8694 
8695     while (sg->parent) {
8696       scheme_apply(sg->network_proc, 4, a);
8697       sg = sg->parent;
8698     }
8699   }
8700 }
8701 
8702 /*========================================================================*/
8703 /*                         wills and will executors                       */
8704 /*========================================================================*/
8705 
8706 typedef struct ActiveWill {
8707   MZTAG_IF_REQUIRED
8708   Scheme_Object *o;
8709   Scheme_Object *proc;
8710   struct WillExecutor *w;  /* Set to will executor when executed */
8711   struct ActiveWill *next;
8712 } ActiveWill;
8713 
8714 typedef struct WillExecutor {
8715   Scheme_Object so;
8716   Scheme_Object *sema;
8717   ActiveWill *first, *last;
8718   int is_late;
8719 } WillExecutor;
8720 
activate_will(void * o,void * data)8721 static void activate_will(void *o, void *data)
8722 {
8723   ActiveWill *a;
8724   WillExecutor *w;
8725   Scheme_Object *proc;
8726 
8727   if (SCHEME_PAIRP(data)) {
8728     w = (WillExecutor *)SCHEME_CAR(data);
8729     proc = SCHEME_CDR(data);
8730   } else {
8731     w = (WillExecutor *)scheme_ephemeron_key(data);
8732     proc = scheme_ephemeron_value(data);
8733   }
8734 
8735   if (w) {
8736     a = MALLOC_ONE_RT(ActiveWill);
8737 #ifdef MZTAG_REQUIRED
8738     a->type = scheme_rt_will;
8739 #endif
8740     a->o = (Scheme_Object *)o;
8741     a->proc = proc;
8742 
8743     if (w->last)
8744       w->last->next = a;
8745     else
8746       w->first = a;
8747     w->last = a;
8748     scheme_post_sema(w->sema);
8749 
8750     if (w->is_late) {
8751       /* Ensure that a late will executor stays live in this place
8752          as long as there are wills to execute. */
8753       if (!late_will_executors_with_pending) {
8754         REGISTER_SO(late_will_executors_with_pending);
8755         late_will_executors_with_pending = scheme_make_hash_table(SCHEME_hash_ptr);
8756       }
8757       scheme_hash_set(late_will_executors_with_pending, (Scheme_Object *)w, scheme_true);
8758     }
8759   }
8760 }
8761 
do_next_will(WillExecutor * w)8762 static Scheme_Object *do_next_will(WillExecutor *w)
8763 {
8764   ActiveWill *a;
8765   Scheme_Object *o[1];
8766 
8767   a = w->first;
8768   w->first = a->next;
8769   if (!w->first) {
8770     w->last = NULL;
8771     if (w->is_late)
8772       scheme_hash_set(late_will_executors_with_pending, (Scheme_Object *)w, NULL);
8773   }
8774 
8775   o[0] = a->o;
8776   a->o = NULL;
8777 
8778   return scheme_apply_multi(a->proc, 1, o);
8779 }
8780 
make_will_executor(int argc,Scheme_Object ** argv)8781 static Scheme_Object *make_will_executor(int argc, Scheme_Object **argv)
8782 {
8783   WillExecutor *w;
8784   Scheme_Object *sema;
8785 
8786   w = MALLOC_ONE_TAGGED(WillExecutor);
8787   sema = scheme_make_sema(0);
8788 
8789   w->so.type = scheme_will_executor_type;
8790   w->first = NULL;
8791   w->last = NULL;
8792   w->sema = sema;
8793   w->is_late = 0;
8794 
8795   return (Scheme_Object *)w;
8796 }
8797 
scheme_make_late_will_executor()8798 Scheme_Object *scheme_make_late_will_executor()
8799 {
8800   WillExecutor *w;
8801 
8802   w = (WillExecutor *)make_will_executor(0, NULL);
8803   w->is_late = 1;
8804 
8805   return (Scheme_Object *)w;
8806 }
8807 
will_executor_p(int argc,Scheme_Object ** argv)8808 static Scheme_Object *will_executor_p(int argc, Scheme_Object **argv)
8809 {
8810   return ((SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_will_executor_type))
8811 	  ? scheme_true
8812 	  : scheme_false);
8813 }
8814 
register_will(int argc,Scheme_Object ** argv)8815 static Scheme_Object *register_will(int argc, Scheme_Object **argv)
8816 {
8817   Scheme_Object *e;
8818 
8819   if (NOT_SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_will_executor_type))
8820     scheme_wrong_contract("will-register", "will-executor?", 0, argc, argv);
8821   scheme_check_proc_arity("will-register", 1, 2, argc, argv);
8822 
8823   if (((WillExecutor *)argv[0])->is_late) {
8824     e = scheme_make_pair(argv[0], argv[2]);
8825     scheme_add_finalizer(argv[1], activate_will, e);
8826   } else {
8827     /* If we lose track of the will executor, then drop the finalizer. */
8828     e = scheme_make_ephemeron(argv[0], argv[2]);
8829     scheme_add_scheme_finalizer(argv[1], activate_will, e);
8830   }
8831 
8832 
8833   return scheme_void;
8834 }
8835 
will_executor_try(int argc,Scheme_Object ** argv)8836 static Scheme_Object *will_executor_try(int argc, Scheme_Object **argv)
8837 {
8838   WillExecutor *w;
8839 
8840   if (NOT_SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_will_executor_type))
8841     scheme_wrong_contract("will-try-execute", "will-executor?", 0, argc, argv);
8842 
8843   w = (WillExecutor *)argv[0];
8844 
8845   if (scheme_wait_sema(w->sema, 1))
8846     return do_next_will(w);
8847   else if (argc > 1)
8848     return argv[1];
8849   else
8850     return scheme_false;
8851 }
8852 
will_executor_go(int argc,Scheme_Object ** argv)8853 static Scheme_Object *will_executor_go(int argc, Scheme_Object **argv)
8854 {
8855   WillExecutor *w;
8856 
8857   if (NOT_SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_will_executor_type))
8858     scheme_wrong_contract("will-execute", "will-executor?", 0, argc, argv);
8859 
8860   w = (WillExecutor *)argv[0];
8861 
8862   scheme_wait_sema(w->sema, 0);
8863 
8864   return do_next_will(w);
8865 }
8866 
will_executor_sema(Scheme_Object * w,int * repost)8867 static Scheme_Object *will_executor_sema(Scheme_Object *w, int *repost)
8868 {
8869   *repost = 1;
8870   return ((WillExecutor *)w)->sema;
8871 }
8872 
8873 /*========================================================================*/
8874 /*                         GC preparation and timing                      */
8875 /*========================================================================*/
8876 
8877 /* We don't currently support threads on a platform with a weaked
8878    memory model than x86, and no memory-order operations are needed on
8879    x86. */
8880 
memory_order(int argc,Scheme_Object * args[])8881 static Scheme_Object *memory_order(int argc, Scheme_Object *args[])
8882 {
8883   return scheme_void;
8884 }
8885 
8886 /*========================================================================*/
8887 /*                         GC preparation and timing                      */
8888 /*========================================================================*/
8889 
8890 typedef struct Scheme_GC_Pre_Post_Callback_Desc {
8891   /* All pointer fields => allocate with GC_malloc() */
8892   Scheme_Object *boxed_key;
8893   Scheme_Object *pre_desc;
8894   Scheme_Object *post_desc;
8895   struct Scheme_GC_Pre_Post_Callback_Desc *prev;
8896   struct Scheme_GC_Pre_Post_Callback_Desc *next;
8897 } Scheme_GC_Pre_Post_Callback_Desc;
8898 
8899 
scheme_add_gc_callback(Scheme_Object * pre,Scheme_Object * post)8900 Scheme_Object *scheme_add_gc_callback(Scheme_Object *pre, Scheme_Object *post)
8901 {
8902   Scheme_GC_Pre_Post_Callback_Desc *desc;
8903   Scheme_Object *key, *boxed;
8904 
8905   desc = (Scheme_GC_Pre_Post_Callback_Desc *)GC_malloc(sizeof(Scheme_GC_Pre_Post_Callback_Desc));
8906   desc->pre_desc = pre;
8907   desc->post_desc = post;
8908 
8909   key = scheme_make_vector(1, scheme_false);
8910   boxed = scheme_make_weak_box(key);
8911   desc->boxed_key = boxed;
8912 
8913   desc->next = gc_prepost_callback_descs;
8914   gc_prepost_callback_descs = desc;
8915 
8916   return key;
8917 }
8918 
scheme_remove_gc_callback(Scheme_Object * key)8919 void scheme_remove_gc_callback(Scheme_Object *key)
8920 {
8921   Scheme_GC_Pre_Post_Callback_Desc *prev = NULL, *desc;
8922 
8923   desc = gc_prepost_callback_descs;
8924   while (desc) {
8925     if (SAME_OBJ(SCHEME_WEAK_BOX_VAL(desc->boxed_key), key)) {
8926       if (prev)
8927         prev->next = desc->next;
8928       else
8929         gc_prepost_callback_descs = desc->next;
8930       if (desc->next)
8931         desc->next->prev = desc->prev;
8932     }
8933     prev = desc;
8934     desc = desc->next;
8935   }
8936 }
8937 
unsafe_add_collect_callbacks(int argc,Scheme_Object * argv[])8938 static Scheme_Object *unsafe_add_collect_callbacks(int argc, Scheme_Object *argv[])
8939 {
8940   return scheme_add_gc_callback(argv[0], argv[1]);
8941 }
8942 
unsafe_remove_collect_callbacks(int argc,Scheme_Object * argv[])8943 static Scheme_Object *unsafe_remove_collect_callbacks(int argc, Scheme_Object *argv[])
8944 {
8945   scheme_remove_gc_callback(argv[0]);
8946   return scheme_void;
8947 }
8948 
8949 #if defined(_MSC_VER) || defined(__MINGW32__)
8950 # define mzOSAPI WINAPI
8951 #else
8952 # define mzOSAPI /* empty */
8953 #endif
8954 
8955 typedef void (*gccb_Int_to_Void)(int);
8956 typedef void (*gccb_Ptr_Ptr_Ptr_Int_to_Void)(void*, void*, void*, int);
8957 typedef void (*gccb_Ptr_Ptr_Ptr_to_Void)(void*, void*, void*);
8958 typedef void* (*gccb_Ptr_Ptr_to_Ptr)(void*, void*);
8959 typedef void (*gccb_Ptr_Ptr_to_Void)(void*, void*);
8960 typedef void (*gccb_Ptr_Ptr_Float_to_Void)(void*, void*, float);
8961 typedef void (*gccb_Ptr_Ptr_Double_to_Void)(void*, void*, double);
8962 typedef void (*gccb_Float_Float_Float_Float_to_Void)(float, float, float, float);
8963 typedef void (*gccb_Ptr_Ptr_Ptr_Nine_Ints)(void*,void*,void*,int,int,int,int,int,int,int,int,int);
8964 typedef void (mzOSAPI *gccb_OSapi_Ptr_Int_to_Void)(void*, int);
8965 typedef void (mzOSAPI *gccb_OSapi_Ptr_Ptr_to_Void)(void*, void*);
8966 typedef void (mzOSAPI *gccb_OSapi_Ptr_Four_Ints_Ptr_Int_Int_Long_to_Void)(void*, int, int, int, int,
8967                                                                           void*, int, int, long);
8968 
8969 #ifdef DONT_USE_FOREIGN
8970 # define scheme_extract_pointer(x) NULL
8971 #endif
8972 
run_gc_callbacks(int pre)8973 static void run_gc_callbacks(int pre)
8974   XFORM_SKIP_PROC
8975 {
8976   Scheme_GC_Pre_Post_Callback_Desc *prev = NULL, *desc;
8977   Scheme_Object *acts, *act, *protocol;
8978   void *save = NULL;
8979   int j;
8980 
8981   desc = gc_prepost_callback_descs;
8982   while (desc) {
8983     if (!SCHEME_WEAK_BOX_VAL(desc->boxed_key)) {
8984       if (prev)
8985         prev->next = desc->next;
8986       else
8987         gc_prepost_callback_descs = desc->next;
8988       if (desc->next)
8989         desc->next->prev = desc->prev;
8990     } else {
8991       if (pre)
8992         acts = desc->pre_desc;
8993       else
8994         acts = desc->post_desc;
8995       for (j = 0; j < SCHEME_VEC_SIZE(acts); j++) {
8996         act = SCHEME_VEC_ELS(acts)[j];
8997         protocol = SCHEME_VEC_ELS(act)[0];
8998         /* The set of supported protocols is arbitrary, based on what we've needed
8999            so far. */
9000         if (!strcmp(SCHEME_SYM_VAL(protocol), "int->void")) {
9001           gccb_Int_to_Void proc;
9002           int i;
9003 
9004           proc = (gccb_Int_to_Void)scheme_extract_pointer(SCHEME_VEC_ELS(act)[1]);
9005           i = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[2]);
9006 
9007           proc(i);
9008         } else if (!strcmp(SCHEME_SYM_VAL(protocol), "ptr_ptr_ptr_int->void")) {
9009           gccb_Ptr_Ptr_Ptr_Int_to_Void proc;
9010           void *a, *b, *c;
9011           int i;
9012 
9013           proc = (gccb_Ptr_Ptr_Ptr_Int_to_Void)scheme_extract_pointer(SCHEME_VEC_ELS(act)[1]);
9014           a = scheme_extract_pointer(SCHEME_VEC_ELS(act)[2]);
9015           b = scheme_extract_pointer(SCHEME_VEC_ELS(act)[3]);
9016           c = scheme_extract_pointer(SCHEME_VEC_ELS(act)[4]);
9017           i = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[5]);
9018 
9019           proc(a, b, c, i);
9020         } else if (!strcmp(SCHEME_SYM_VAL(protocol), "ptr_ptr->save")) {
9021           gccb_Ptr_Ptr_to_Ptr proc;
9022           void *a, *b;
9023 
9024           proc = (gccb_Ptr_Ptr_to_Ptr)scheme_extract_pointer(SCHEME_VEC_ELS(act)[1]);
9025           a = scheme_extract_pointer(SCHEME_VEC_ELS(act)[2]);
9026           b = scheme_extract_pointer(SCHEME_VEC_ELS(act)[3]);
9027 
9028           save = proc(a, b);
9029         } else if (!strcmp(SCHEME_SYM_VAL(protocol), "save!_ptr->void")) {
9030           if (save) {
9031             gccb_Ptr_Ptr_to_Void proc;
9032             void *b;
9033 
9034             proc = (gccb_Ptr_Ptr_to_Void)scheme_extract_pointer(SCHEME_VEC_ELS(act)[1]);
9035             b = scheme_extract_pointer(SCHEME_VEC_ELS(act)[2]);
9036 
9037             proc(save, b);
9038           }
9039         } else if (!strcmp(SCHEME_SYM_VAL(protocol), "ptr_ptr_ptr->void")) {
9040           gccb_Ptr_Ptr_Ptr_to_Void proc;
9041           void *a, *b, *c;
9042 
9043           proc = (gccb_Ptr_Ptr_Ptr_to_Void)scheme_extract_pointer(SCHEME_VEC_ELS(act)[1]);
9044           a = scheme_extract_pointer(SCHEME_VEC_ELS(act)[2]);
9045           b = scheme_extract_pointer(SCHEME_VEC_ELS(act)[3]);
9046           c = scheme_extract_pointer(SCHEME_VEC_ELS(act)[4]);
9047 
9048           proc(a, b, c);
9049         } else if (!strcmp(SCHEME_SYM_VAL(protocol), "ptr_ptr_float->void")) {
9050           gccb_Ptr_Ptr_Float_to_Void proc;
9051           void *a, *b;
9052           float f;
9053 
9054           proc = (gccb_Ptr_Ptr_Float_to_Void)scheme_extract_pointer(SCHEME_VEC_ELS(act)[1]);
9055           a = scheme_extract_pointer(SCHEME_VEC_ELS(act)[2]);
9056           b = scheme_extract_pointer(SCHEME_VEC_ELS(act)[3]);
9057           f = SCHEME_DBL_VAL(SCHEME_VEC_ELS(act)[4]);
9058 
9059           proc(a, b, f);
9060         } else if (!strcmp(SCHEME_SYM_VAL(protocol), "ptr_ptr_double->void")) {
9061           gccb_Ptr_Ptr_Double_to_Void proc;
9062           void *a, *b;
9063           double d;
9064 
9065           proc = (gccb_Ptr_Ptr_Double_to_Void)scheme_extract_pointer(SCHEME_VEC_ELS(act)[1]);
9066           a = scheme_extract_pointer(SCHEME_VEC_ELS(act)[2]);
9067           b = scheme_extract_pointer(SCHEME_VEC_ELS(act)[3]);
9068           d = SCHEME_DBL_VAL(SCHEME_VEC_ELS(act)[4]);
9069 
9070           proc(a, b, d);
9071         } else if (!strcmp(SCHEME_SYM_VAL(protocol), "float_float_float_float->void")) {
9072           gccb_Float_Float_Float_Float_to_Void proc;
9073           double d1, d2, d3, d4;
9074 
9075           proc = (gccb_Float_Float_Float_Float_to_Void)scheme_extract_pointer(SCHEME_VEC_ELS(act)[1]);
9076           d1 = SCHEME_DBL_VAL(SCHEME_VEC_ELS(act)[2]);
9077           d2 = SCHEME_DBL_VAL(SCHEME_VEC_ELS(act)[3]);
9078           d3 = SCHEME_DBL_VAL(SCHEME_VEC_ELS(act)[4]);
9079           d4 = SCHEME_DBL_VAL(SCHEME_VEC_ELS(act)[5]);
9080 
9081           proc(d1, d2, d3, d4);
9082         } else if (!strcmp(SCHEME_SYM_VAL(protocol), "ptr_ptr_ptr_int_int_int_int_int_int_int_int_int->void")) {
9083           gccb_Ptr_Ptr_Ptr_Nine_Ints proc;
9084           void *a, *b, *c;
9085           int i1, i2, i3, i4, i5, i6, i7, i8, i9;
9086 
9087           proc = (gccb_Ptr_Ptr_Ptr_Nine_Ints)scheme_extract_pointer(SCHEME_VEC_ELS(act)[1]);
9088           a = scheme_extract_pointer(SCHEME_VEC_ELS(act)[2]);
9089           b = scheme_extract_pointer(SCHEME_VEC_ELS(act)[3]);
9090           c = scheme_extract_pointer(SCHEME_VEC_ELS(act)[4]);
9091           i1 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[5]);
9092           i2 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[6]);
9093           i3 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[7]);
9094           i4 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[8]);
9095           i5 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[9]);
9096           i6 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[10]);
9097           i7 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[11]);
9098           i8 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[12]);
9099           i9 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[13]);
9100 
9101           proc(a, b, c, i1, i2, i3, i4, i5, i6, i7, i8, i9);
9102         } else if (!strcmp(SCHEME_SYM_VAL(protocol), "osapi_ptr_ptr->void")) {
9103           gccb_OSapi_Ptr_Ptr_to_Void proc;
9104           void *a, *b;
9105 
9106           proc = (gccb_OSapi_Ptr_Ptr_to_Void)scheme_extract_pointer(SCHEME_VEC_ELS(act)[1]);
9107           a = scheme_extract_pointer(SCHEME_VEC_ELS(act)[2]);
9108           b = scheme_extract_pointer(SCHEME_VEC_ELS(act)[3]);
9109 
9110           proc(a, b);
9111         } else if (!strcmp(SCHEME_SYM_VAL(protocol), "osapi_ptr_int->void")) {
9112           gccb_OSapi_Ptr_Int_to_Void proc;
9113           void *a;
9114           int i;
9115 
9116           proc = (gccb_OSapi_Ptr_Int_to_Void)scheme_extract_pointer(SCHEME_VEC_ELS(act)[1]);
9117           a = scheme_extract_pointer(SCHEME_VEC_ELS(act)[2]);
9118           i = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[3]);
9119 
9120           proc(a, i);
9121         } else if (!strcmp(SCHEME_SYM_VAL(protocol), "osapi_ptr_int_int_int_int_ptr_int_int_long->void")) {
9122           gccb_OSapi_Ptr_Four_Ints_Ptr_Int_Int_Long_to_Void proc;
9123           void *a, *b;
9124           int i1, i2, i3, i4, i5, i6;
9125           long l1;
9126 
9127           proc = (gccb_OSapi_Ptr_Four_Ints_Ptr_Int_Int_Long_to_Void)scheme_extract_pointer(SCHEME_VEC_ELS(act)[1]);
9128           a = scheme_extract_pointer(SCHEME_VEC_ELS(act)[2]);
9129           i1 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[3]);
9130           i2 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[4]);
9131           i3 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[5]);
9132           i4 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[6]);
9133           b = scheme_extract_pointer(SCHEME_VEC_ELS(act)[7]);
9134           i5 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[8]);
9135           i6 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[9]);
9136           l1 = SCHEME_INT_VAL(SCHEME_VEC_ELS(act)[10]);
9137 
9138           proc(a, i1, i2, i3, i4, b, i5, i6, l1);
9139         }
9140         prev = desc;
9141       }
9142     }
9143     desc = desc->next;
9144   }
9145 }
9146 
9147 #ifdef MZ_XFORM
9148 START_XFORM_SKIP;
9149 #endif
9150 
scheme_zero_unneeded_rands(Scheme_Thread * p)9151 void scheme_zero_unneeded_rands(Scheme_Thread *p)
9152 {
9153   /* Call this procedure before GC or before copying out
9154      a thread's stack. */
9155 }
9156 
prepare_thread_for_GC(Scheme_Object * t)9157 static void prepare_thread_for_GC(Scheme_Object *t)
9158 {
9159   Scheme_Thread *p = (Scheme_Thread *)t;
9160 
9161   if (!p->running) return;
9162 
9163   /* zero ununsed part of env stack in each thread */
9164 
9165   if (!p->nestee) {
9166     Scheme_Saved_Stack *saved;
9167 # define RUNSTACK_TUNE(x) /* x   - Used for performance tuning */
9168     RUNSTACK_TUNE( intptr_t size; );
9169 
9170     if ((!p->runstack_owner
9171          || (p == *p->runstack_owner))
9172         && p->runstack_start) {
9173       intptr_t rs_end;
9174       Scheme_Object **rs_start;
9175 
9176       /* If there's a meta-prompt, we can also zero out past the unused part */
9177       if (p->meta_prompt && (scheme_prompt_runstack_boundary_start(p->meta_prompt) == p->runstack_start)) {
9178         rs_end = p->meta_prompt->runstack_boundary_offset;
9179       } else {
9180         rs_end = p->runstack_size;
9181       }
9182 
9183       if ((p->runstack_tmp_keep >= p->runstack_start)
9184           && (p->runstack_tmp_keep < p->runstack))
9185         rs_start = p->runstack_tmp_keep;
9186       else
9187         rs_start = p->runstack;
9188 
9189       scheme_set_runstack_limits(p->runstack_start,
9190                                  p->runstack_size,
9191                                  rs_start - p->runstack_start,
9192                                  rs_end);
9193 
9194       RUNSTACK_TUNE( size = p->runstack_size - (p->runstack - p->runstack_start); );
9195 
9196       for (saved = p->runstack_saved; saved; saved = saved->prev) {
9197 	RUNSTACK_TUNE( size += saved->runstack_size; );
9198 
9199         if (p->meta_prompt && (scheme_prompt_runstack_boundary_start(p->meta_prompt) == saved->runstack_start)) {
9200           rs_end = p->meta_prompt->runstack_boundary_offset;
9201         } else {
9202           rs_end = saved->runstack_size;
9203         }
9204 
9205         if (saved->runstack_start)
9206           scheme_set_runstack_limits(saved->runstack_start,
9207                                      saved->runstack_size,
9208                                      saved->runstack_offset,
9209                                      rs_end);
9210       }
9211     }
9212 
9213     RUNSTACK_TUNE( printf("%ld\n", size); );
9214 
9215     if (p->tail_buffer && (p->tail_buffer != p->runstack_tmp_keep)) {
9216       int i;
9217       for (i = 0; i < p->tail_buffer_size; i++) {
9218 	p->tail_buffer[i] = NULL;
9219       }
9220     }
9221   }
9222 
9223   if ((!p->cont_mark_stack_owner
9224        || (p == *p->cont_mark_stack_owner))
9225       && p->cont_mark_stack) {
9226     int segcount, i, segpos;
9227 
9228     /* release unused cont mark stack segments */
9229     if (p->cont_mark_stack)
9230       segcount = ((intptr_t)(p->cont_mark_stack - 1) >> SCHEME_LOG_MARK_SEGMENT_SIZE) + 1;
9231     else
9232       segcount = 0;
9233     for (i = segcount; i < p->cont_mark_seg_count; i++) {
9234       p->cont_mark_stack_segments[i] = NULL;
9235     }
9236     if (segcount < p->cont_mark_seg_count)
9237       p->cont_mark_seg_count = segcount;
9238 
9239     /* zero unused part of last mark stack segment */
9240     segpos = ((intptr_t)p->cont_mark_stack >> SCHEME_LOG_MARK_SEGMENT_SIZE);
9241 
9242     if (segpos < p->cont_mark_seg_count) {
9243       Scheme_Cont_Mark *seg = p->cont_mark_stack_segments[segpos];
9244       int stackpos = ((intptr_t)p->cont_mark_stack & SCHEME_MARK_SEGMENT_MASK);
9245       if (seg) {
9246         for (i = stackpos; i < SCHEME_MARK_SEGMENT_SIZE; i++) {
9247           if (seg[i].key) {
9248             seg[i].key = NULL;
9249             seg[i].val = NULL;
9250             seg[i].cache = NULL;
9251           } else {
9252             /* NULL means we already cleared from here on. */
9253             break;
9254           }
9255         }
9256       }
9257     }
9258 
9259     {
9260       MZ_MARK_STACK_TYPE pos;
9261       /* also zero out slots before the current bottom */
9262       for (pos = 0; pos < p->cont_mark_stack_bottom; pos++) {
9263         Scheme_Cont_Mark *seg;
9264         int stackpos;
9265         segpos = ((intptr_t)pos >> SCHEME_LOG_MARK_SEGMENT_SIZE);
9266         seg = p->cont_mark_stack_segments[segpos];
9267         if (seg) {
9268           stackpos = ((intptr_t)pos & SCHEME_MARK_SEGMENT_MASK);
9269           seg[stackpos].key = NULL;
9270           seg[stackpos].val = NULL;
9271           seg[stackpos].cache = NULL;
9272         }
9273       }
9274     }
9275   }
9276 
9277   if (p->values_buffer) {
9278     if (p->values_buffer_size > 128)
9279       p->values_buffer = NULL;
9280     else {
9281       memset(p->values_buffer, 0, sizeof(Scheme_Object*) * p->values_buffer_size);
9282     }
9283   }
9284 
9285   p->spare_runstack = NULL;
9286 }
9287 
scheme_prepare_this_thread_for_GC(Scheme_Thread * p)9288 void scheme_prepare_this_thread_for_GC(Scheme_Thread *p)
9289 {
9290   if (p == scheme_current_thread) {
9291 #ifdef RUNSTACK_IS_GLOBAL
9292     scheme_current_thread->runstack = MZ_RUNSTACK;
9293     scheme_current_thread->runstack_start = MZ_RUNSTACK_START;
9294     scheme_current_thread->cont_mark_stack = MZ_CONT_MARK_STACK;
9295     scheme_current_thread->cont_mark_pos = MZ_CONT_MARK_POS;
9296 #endif
9297   }
9298   prepare_thread_for_GC((Scheme_Object *)p);
9299 }
9300 
get_ready_for_GC()9301 static void get_ready_for_GC()
9302 {
9303   start_this_gc_real_time = scheme_get_inexact_milliseconds();
9304   start_this_gc_time = scheme_get_process_milliseconds();
9305 
9306 #ifndef MZ_PRECISE_GC
9307   {
9308     gc_pre_used_bytes = GC_get_memory_use();
9309     if (max_gc_pre_used_bytes < gc_pre_used_bytes)
9310       max_gc_pre_used_bytes = gc_pre_used_bytes;
9311   }
9312 #endif
9313 
9314 #ifdef MZ_USE_FUTURES
9315   scheme_future_block_until_gc();
9316 #endif
9317 
9318   run_gc_callbacks(1);
9319 
9320   scheme_zero_unneeded_rands(scheme_current_thread);
9321 
9322   scheme_clear_prompt_cache();
9323   scheme_clear_rx_buffers();
9324   scheme_clear_bignum_cache();
9325   scheme_clear_delayed_load_cache();
9326 #ifdef MZ_USE_PLACES
9327   scheme_clear_place_ifs_stack();
9328 #endif
9329 
9330 #ifdef RUNSTACK_IS_GLOBAL
9331   if (scheme_current_thread->running) {
9332     scheme_current_thread->runstack = MZ_RUNSTACK;
9333     scheme_current_thread->runstack_start = MZ_RUNSTACK_START;
9334     scheme_current_thread->cont_mark_stack = MZ_CONT_MARK_STACK;
9335     scheme_current_thread->cont_mark_pos = MZ_CONT_MARK_POS;
9336   }
9337 #endif
9338 
9339   /* Prepare each thread that has run: */
9340   if (gc_prep_thread_chain) {
9341     Scheme_Thread *p, *next;
9342     p = gc_prep_thread_chain;
9343     while (p != p->gc_prep_chain) {
9344       prepare_thread_for_GC((Scheme_Object *)p);
9345       next = p->gc_prep_chain;
9346       p->gc_prep_chain = NULL;
9347       p = next;
9348     }
9349     prepare_thread_for_GC((Scheme_Object *)p);
9350     p->gc_prep_chain = NULL;
9351     gc_prep_thread_chain = NULL;
9352   }
9353 
9354 #ifdef MZ_PRECISE_GC
9355   scheme_flush_stack_copy_cache();
9356 #endif
9357 
9358   scheme_fuel_counter = 0;
9359   scheme_jit_stack_boundary = (uintptr_t)-1;
9360 
9361 #ifdef WINDOWS_PROCESSES
9362   scheme_suspend_remembered_threads();
9363 #endif
9364 
9365   {
9366     GC_CAN_IGNORE void *data;
9367     data = scheme_gmp_tls_load(scheme_current_thread->gmp_tls);
9368     scheme_current_thread->gmp_tls_data = data;
9369   }
9370 
9371 #ifdef MZ_PRECISE_GC
9372 # ifdef MZ_USE_JIT
9373   scheme_clean_native_symtab();
9374 # endif
9375 #endif
9376 
9377   scheme_did_gc_count++;
9378 }
9379 
9380 extern int GC_words_allocd;
9381 
done_with_GC()9382 static void done_with_GC()
9383 {
9384   scheme_gmp_tls_unload(scheme_current_thread->gmp_tls, scheme_current_thread->gmp_tls_data);
9385   scheme_current_thread->gmp_tls_data = NULL;
9386 
9387 #ifdef RUNSTACK_IS_GLOBAL
9388 # ifdef MZ_PRECISE_GC
9389   if (scheme_current_thread->running) {
9390     MZ_RUNSTACK = scheme_current_thread->runstack;
9391     MZ_RUNSTACK_START = scheme_current_thread->runstack_start;
9392   }
9393 # endif
9394 #endif
9395 #ifdef WINDOWS_PROCESSES
9396   scheme_resume_remembered_threads();
9397 #endif
9398 
9399   end_this_gc_time = scheme_get_process_milliseconds();
9400   end_this_gc_real_time = scheme_get_inexact_milliseconds();
9401   scheme_total_gc_time += (end_this_gc_time - start_this_gc_time);
9402 
9403   gc_prep_thread_chain = scheme_current_thread;
9404   scheme_current_thread->gc_prep_chain = scheme_current_thread;
9405 
9406   run_gc_callbacks(0);
9407 
9408 #ifdef MZ_USE_FUTURES
9409   scheme_future_continue_after_gc();
9410 #endif
9411 
9412 #ifndef MZ_PRECISE_GC
9413   {
9414     Scheme_Logger *logger = scheme_get_gc_logger();
9415     int debug_gc = 0, debug_gc_major = 0;
9416 
9417     if (logger && scheme_log_level_topic_p(logger, SCHEME_LOG_DEBUG, gc_symbol))
9418       debug_gc = 1;
9419     if (logger && scheme_log_level_topic_p(logger, SCHEME_LOG_DEBUG, gc_major_symbol))
9420       debug_gc_major = 1;
9421 
9422     if (debug_gc || debug_gc_major) {
9423       char buf[128], nums[128];
9424       intptr_t buflen;
9425       intptr_t post_use = GC_get_memory_use();
9426 
9427       memset(nums, 0, sizeof(nums));
9428 
9429       sprintf(buf,
9430               "GC: MAJ @ %sK; free %sK %" PRIdPTR "ms @ %" PRIdPTR,
9431               gc_num(nums, gc_pre_used_bytes), gc_num(nums, gc_pre_used_bytes - post_use),
9432               end_this_gc_time - start_this_gc_time,
9433               start_this_gc_time);
9434       buflen = strlen(buf);
9435 
9436       if (debug_gc)
9437         scheme_log_name_pfx_message(logger, SCHEME_LOG_DEBUG, gc_symbol, buf, buflen, NULL, 0);
9438       if (debug_gc_major)
9439         scheme_log_name_pfx_message(logger, SCHEME_LOG_DEBUG, gc_major_symbol, buf, buflen, NULL, 0);
9440 
9441 
9442     }
9443     num_major_garbage_collections++;
9444     if (scheme_code_page_total > max_code_page_total)
9445       max_code_page_total = scheme_code_page_total;
9446   }
9447 #endif
9448 }
9449 
9450 #ifdef MZ_USE_PLACES
9451 # define PLACE_ID_FORMAT "%d:"
9452 #else
9453 # define PLACE_ID_FORMAT ""
9454 #endif
9455 
9456 #ifdef MZ_PRECISE_GC
9457 
9458 #ifdef MZ_XFORM
9459 END_XFORM_SKIP;
9460 #endif
9461 
inform_GC(int master_gc,int major_gc,int inc_gc,intptr_t pre_used,intptr_t post_used,intptr_t pre_admin,intptr_t post_admin,intptr_t post_child_places_used)9462 static void inform_GC(int master_gc, int major_gc, int inc_gc,
9463                       intptr_t pre_used, intptr_t post_used,
9464                       intptr_t pre_admin, intptr_t post_admin,
9465                       intptr_t post_child_places_used)
9466 {
9467   Scheme_Logger *logger;
9468   int debug_gc = 0, debug_gc_major = 0;
9469 
9470   if (!master_gc) {
9471     if ((pre_used > max_gc_pre_used_bytes)
9472         && (max_gc_pre_used_bytes >= 0))
9473       max_gc_pre_used_bytes = pre_used;
9474     if (scheme_code_page_total > max_code_page_total)
9475       max_code_page_total = scheme_code_page_total;
9476   }
9477 
9478   if (major_gc)
9479     num_major_garbage_collections++;
9480   else
9481     num_minor_garbage_collections++;
9482 
9483   logger = scheme_get_gc_logger();
9484   if (logger && scheme_log_level_topic_p(logger, SCHEME_LOG_DEBUG, gc_symbol))
9485     debug_gc = 1;
9486   if (logger && major_gc && scheme_log_level_topic_p(logger, SCHEME_LOG_DEBUG, gc_major_symbol))
9487     debug_gc_major = 1;
9488 
9489   if (debug_gc || debug_gc_major) {
9490     /* Don't use scheme_log(), because it wants to allocate a buffer
9491        based on the max value-print width, and we may not be at a
9492        point where parameters are available. */
9493     char buf[256], nums[128];
9494     intptr_t buflen, delta, admin_delta;
9495     Scheme_Object *vec, *v;
9496 
9497     vec = scheme_false;
9498     if (!master_gc && gc_info_prefab) {
9499       vec = scheme_make_vector(11, scheme_false);
9500       SCHEME_VEC_ELS(vec)[1] = (major_gc
9501                                 ? major_symbol
9502                                 : (inc_gc ? incremental_symbol : minor_symbol));
9503       SCHEME_VEC_ELS(vec)[2] = scheme_make_integer(pre_used);
9504       SCHEME_VEC_ELS(vec)[3] = scheme_make_integer(pre_admin);
9505       SCHEME_VEC_ELS(vec)[4] = scheme_make_integer(scheme_code_page_total);
9506       SCHEME_VEC_ELS(vec)[5] = scheme_make_integer(post_used);
9507       SCHEME_VEC_ELS(vec)[6] = scheme_make_integer(post_admin);
9508       v = scheme_make_integer_value(start_this_gc_time);
9509       SCHEME_VEC_ELS(vec)[7] = v;
9510       v = scheme_make_integer_value(end_this_gc_time);
9511       SCHEME_VEC_ELS(vec)[8] = v;
9512       v = scheme_make_double(start_this_gc_real_time);
9513       SCHEME_VEC_ELS(vec)[9] = v;
9514       v = scheme_make_double(end_this_gc_real_time);
9515       SCHEME_VEC_ELS(vec)[10] = v;
9516       vec = scheme_make_prefab_struct_instance(gc_info_prefab, vec);
9517     }
9518 
9519     START_XFORM_SKIP;
9520 
9521     memset(nums, 0, sizeof(nums));
9522 
9523     delta = pre_used - post_used;
9524     admin_delta = (pre_admin - post_admin) - delta;
9525     sprintf(buf,
9526             "GC: " PLACE_ID_FORMAT "%s @ %sK(+%sK)[+%sK];"
9527             " free %sK(%s%sK) %" PRIdPTR "ms @ %" PRIdPTR,
9528 #ifdef MZ_USE_PLACES
9529             scheme_current_place_id,
9530 #endif
9531             (master_gc ? "MST" : (major_gc ? "MAJ" : (inc_gc ? "mIn" : "min"))),
9532             gc_num(nums, pre_used), gc_num(nums, pre_admin - pre_used),
9533             gc_num(nums, scheme_code_page_total),
9534             gc_num(nums, delta), ((admin_delta < 0) ? "" : "+"),  gc_num(nums, admin_delta),
9535             (master_gc ? 0 : (end_this_gc_time - start_this_gc_time)),
9536             start_this_gc_time);
9537     buflen = strlen(buf);
9538 
9539     END_XFORM_SKIP;
9540 
9541     if (debug_gc)
9542       scheme_log_name_pfx_message(logger, SCHEME_LOG_DEBUG, gc_symbol, buf, buflen, vec, 0);
9543     if (debug_gc_major)
9544       scheme_log_name_pfx_message(logger, SCHEME_LOG_DEBUG, gc_major_symbol, buf, buflen, vec, 0);
9545   }
9546 
9547 #ifdef MZ_USE_PLACES
9548   if (!master_gc) {
9549     scheme_place_set_memory_use(post_used + post_child_places_used);
9550   }
9551 #endif
9552 }
9553 #endif
9554 
log_peak_memory_use()9555 static void log_peak_memory_use()
9556 {
9557   if (max_gc_pre_used_bytes > 0) {
9558     Scheme_Logger *logger;
9559     int debug_gc = 0, debug_gc_major = 0;
9560 
9561     logger = scheme_get_gc_logger();
9562     if (logger && scheme_log_level_topic_p(logger, SCHEME_LOG_INFO, gc_symbol))
9563       debug_gc = 1;
9564     if (logger && scheme_log_level_topic_p(logger, SCHEME_LOG_INFO, gc_major_symbol))
9565       debug_gc_major = 1;
9566 
9567     if (debug_gc || debug_gc_major) {
9568       char buf[256], nums[128], *num, *numc, *numt, *num2, *numa;
9569       intptr_t buflen, allocated_bytes, max_bytes;
9570 #ifdef MZ_PRECISE_GC
9571       allocated_bytes = GC_get_memory_ever_used();
9572 #else
9573       allocated_bytes = GC_get_total_bytes();
9574 #endif
9575 #ifdef MZ_PRECISE_GC
9576       max_bytes = GC_get_memory_max_allocated();
9577 #else
9578       max_bytes = GC_get_memory_peak_use();
9579 #endif
9580       memset(nums, 0, sizeof(nums));
9581       num = gc_num(nums, max_gc_pre_used_bytes);
9582       numa = gc_num(nums, max_bytes - max_gc_pre_used_bytes);
9583       numc = gc_num(nums, max_code_page_total);
9584       numt = gc_num(nums, allocated_bytes);
9585       num2 = gc_unscaled_num(nums, scheme_total_gc_time);
9586       sprintf(buf,
9587               "GC: " PLACE_ID_FORMAT "atexit peak %sK(+%sK)[+%sK]; alloc %sK; major %d; minor %d; %sms",
9588 #ifdef MZ_USE_PLACES
9589               scheme_current_place_id,
9590 #endif
9591               num,
9592               numa,
9593               numc,
9594               numt,
9595               num_major_garbage_collections,
9596               num_minor_garbage_collections,
9597               num2);
9598       buflen = strlen(buf);
9599 
9600       if (debug_gc)
9601         scheme_log_name_pfx_message(logger, SCHEME_LOG_INFO, gc_symbol, buf, buflen, scheme_false, 0);
9602       if (debug_gc_major)
9603         scheme_log_name_pfx_message(logger, SCHEME_LOG_INFO, gc_major_symbol, buf, buflen, scheme_false, 0);
9604 
9605       /* Setting to a negative value ensures that we log the peak only once: */
9606       max_gc_pre_used_bytes = -1;
9607     }
9608   }
9609 }
9610 
gc_unscaled_num(char * nums,intptr_t v)9611 static char *gc_unscaled_num(char *nums, intptr_t v)
9612 /* format a number with commas */
9613 {
9614   int i, j, len, clen, c, d;
9615   for (i = 0; nums[i] || nums[i+1]; i++) {
9616   }
9617   i++;
9618 
9619   sprintf(nums+i, "%" PRIdPTR, v);
9620   for (len = 0; nums[i+len]; len++) { }
9621   clen = len + ((len + ((nums[i] == '-') ? -2 : -1)) / 3);
9622 
9623   c = 0;
9624   d = (clen - len);
9625   for (j = i + clen - 1; j > i; j--) {
9626     if (c == 3) {
9627       nums[j] = ',';
9628       d--;
9629       c = 0;
9630     } else {
9631       nums[j] = nums[j - d];
9632       c++;
9633     }
9634   }
9635 
9636   return nums + i;
9637 }
9638 
gc_num(char * nums,intptr_t v)9639 static char *gc_num(char *nums, intptr_t v)
9640 {
9641   return gc_unscaled_num(nums, v/1024);  /* bytes => kbytes */
9642 }
9643 
9644 /*========================================================================*/
9645 /*                                 stats                                  */
9646 /*========================================================================*/
9647 
set_perf_vector(Scheme_Object * v,Scheme_Object * ov,int i,Scheme_Object * a)9648 static void set_perf_vector(Scheme_Object *v, Scheme_Object *ov, int i, Scheme_Object *a)
9649 {
9650   if (SAME_OBJ(v, ov))
9651     SCHEME_VEC_ELS(v)[i] = a;
9652   else
9653     scheme_chaperone_vector_set(ov, i, a);
9654 }
9655 
current_stats(int argc,Scheme_Object * argv[])9656 static Scheme_Object *current_stats(int argc, Scheme_Object *argv[])
9657 {
9658   Scheme_Object *v, *ov;
9659   Scheme_Thread *t = NULL;
9660 
9661   v = argv[0];
9662 
9663   ov = v;
9664   if (SCHEME_CHAPERONEP(v))
9665     v = SCHEME_CHAPERONE_VAL(v);
9666 
9667   if (!SCHEME_MUTABLE_VECTORP(v))
9668     scheme_wrong_contract("vector-set-performance-stats!", "(and/c vector? (not/c immutable?))", 0, argc, argv);
9669   if (argc > 1) {
9670     if (!SCHEME_FALSEP(argv[1])) {
9671       if (!SCHEME_THREADP(argv[1]))
9672 	scheme_wrong_contract("vector-set-performance-stats!", "(or/c thread? #f)", 0, argc, argv);
9673       t = (Scheme_Thread *)argv[1];
9674     }
9675   }
9676 
9677   if (t) {
9678     switch (SCHEME_VEC_SIZE(v)) {
9679     default:
9680     case 4:
9681       {
9682 	/* Stack size: */
9683 	intptr_t sz = 0;
9684 
9685 	if (MZTHREAD_STILL_RUNNING(t->running)) {
9686 	  Scheme_Overflow *overflow;
9687 	  Scheme_Saved_Stack *runstack_saved;
9688 
9689 	  /* C stack */
9690 	  if (t == scheme_current_thread) {
9691 	    void *stk_start, *stk_end;
9692 	    stk_start = t->stack_start;
9693 	    stk_end = (void *)&stk_end;
9694 #         ifdef STACK_GROWS_UP
9695 	    sz = (intptr_t)stk_end XFORM_OK_MINUS (intptr_t)stk_start;
9696 #         endif
9697 #         ifdef STACK_GROWS_DOWN
9698 	    sz = (intptr_t)stk_start XFORM_OK_MINUS (intptr_t)stk_end;
9699 #         endif
9700 	  } else {
9701 	    if (t->jmpup_buf.stack_copy)
9702 	      sz = t->jmpup_buf.stack_size;
9703 	  }
9704 	  for (overflow = t->overflow; overflow; overflow = overflow->prev) {
9705 	    sz += overflow->jmp->cont.stack_size;
9706 	  }
9707 
9708 	  /* Scheme stack */
9709 	  {
9710 	    int ssz;
9711 	    if (t == scheme_current_thread) {
9712 	      ssz = (MZ_RUNSTACK_START + t->runstack_size) - MZ_RUNSTACK;
9713 	    } else {
9714 	      ssz = (t->runstack_start + t->runstack_size) - t->runstack;
9715 	    }
9716 	    for (runstack_saved = t->runstack_saved; runstack_saved; runstack_saved = runstack_saved->prev) {
9717 	      ssz += runstack_saved->runstack_size;
9718 	    }
9719 	    sz += sizeof(Scheme_Object *) * ssz;
9720 	  }
9721 
9722 	  /* Mark stack */
9723 	  if (t == scheme_current_thread) {
9724 	    sz += ((intptr_t)scheme_current_cont_mark_pos >> 1) * sizeof(Scheme_Cont_Mark);
9725 	  } else {
9726 	    sz += ((intptr_t)t->cont_mark_pos >> 1) * sizeof(Scheme_Cont_Mark);
9727 	  }
9728 	}
9729 
9730         set_perf_vector(v, ov, 3, scheme_make_integer(sz));
9731       }
9732     case 3:
9733       set_perf_vector(v, ov, 2, (t->block_descriptor
9734                                  ? scheme_true
9735                                  : ((t->running & MZTHREAD_SUSPENDED)
9736                                     ? scheme_true
9737                                     : scheme_false)));
9738     case 2:
9739       {
9740 	Scheme_Object *dp;
9741 	dp = thread_dead_p(1, (Scheme_Object **) mzALIAS &t);
9742 	set_perf_vector(v, ov, 1, dp);
9743       }
9744     case 1:
9745       {
9746 	Scheme_Object *rp;
9747 	rp = thread_running_p(1, (Scheme_Object **) mzALIAS &t);
9748 	set_perf_vector(v, ov, 0, rp);
9749       }
9750     case 0:
9751       break;
9752     }
9753   } else {
9754     intptr_t cpuend, end, gcend;
9755 
9756     cpuend = scheme_get_process_milliseconds();
9757     end = scheme_get_milliseconds();
9758     gcend = scheme_total_gc_time;
9759 
9760     switch (SCHEME_VEC_SIZE(v)) {
9761     default:
9762     case 12:
9763       set_perf_vector(v, ov, 11, scheme_make_integer(max_gc_pre_used_bytes));
9764     case 11:
9765       set_perf_vector(v, ov, 10, scheme_make_integer(scheme_jit_malloced));
9766     case 10:
9767       set_perf_vector(v, ov, 9, scheme_make_integer(scheme_hash_iteration_count));
9768     case 9:
9769       set_perf_vector(v, ov, 8, scheme_make_integer(scheme_hash_request_count));
9770     case 8:
9771       set_perf_vector(v, ov, 7, scheme_make_integer(scheme_num_read_syntax_objects));
9772     case 7:
9773       set_perf_vector(v, ov, 6, scheme_make_integer(num_running_threads+1));
9774     case 6:
9775       set_perf_vector(v, ov, 5, scheme_make_integer(scheme_overflow_count));
9776     case 5:
9777       set_perf_vector(v, ov, 4, scheme_make_integer(thread_swap_count));
9778     case 4:
9779       set_perf_vector(v, ov, 3, scheme_make_integer(scheme_did_gc_count));
9780     case 3:
9781       set_perf_vector(v, ov, 2, scheme_make_integer(gcend));
9782     case 2:
9783       set_perf_vector(v, ov, 1, scheme_make_integer(end));
9784     case 1:
9785       set_perf_vector(v, ov, 0, scheme_make_integer(cpuend));
9786     case 0:
9787       break;
9788     }
9789   }
9790 
9791   return scheme_void;
9792 }
9793 
9794 /*========================================================================*/
9795 /*                             gmp allocation                             */
9796 /*========================================================================*/
9797 
9798 /* Allocate atomic, immobile memory for GMP. Although we have set up
9799    GMP to reliably free anything that it allocates, we allocate via
9800    the GC to get accounting with 3m. The set of allocated blocks are
9801    stored in a "mem_pool" variable, which is a linked list; GMP
9802    allocates with a stack discipline, so maintaining the list is easy.
9803    Meanwhile, scheme_gmp_tls_unload, etc., attach to the pool to the
9804    owning thread as needed for GC. */
9805 
scheme_malloc_gmp(uintptr_t amt,void ** mem_pool)9806 void *scheme_malloc_gmp(uintptr_t amt, void **mem_pool)
9807 {
9808   void *p, *mp;
9809 
9810   p = scheme_malloc_atomic_allow_interior(amt);
9811 
9812   mp = scheme_make_raw_pair(p, *mem_pool);
9813   *mem_pool = mp;
9814 
9815   return p;
9816 }
9817 
scheme_free_gmp(void * p,void ** mem_pool)9818 void scheme_free_gmp(void *p, void **mem_pool)
9819 {
9820   if (p != SCHEME_CAR(*mem_pool))
9821     scheme_log(NULL,
9822                SCHEME_LOG_FATAL,
9823                0,
9824                "bad GMP memory free");
9825   *mem_pool = SCHEME_CDR(*mem_pool);
9826 }
9827 
9828 /*========================================================================*/
9829 /*                               precise GC                               */
9830 /*========================================================================*/
9831 
scheme_new_jmpupbuf_holder(void)9832 Scheme_Jumpup_Buf_Holder *scheme_new_jmpupbuf_holder(void)
9833 /* Scheme_Jumpup_Buf_Holder exists for precise GC, and for external
9834    programs that want to store Jumpup_Bufs, because the GC interaction
9835    is tricky. For example, we use it above for a special trampoline
9836    implementation. */
9837 {
9838   Scheme_Jumpup_Buf_Holder *h;
9839 
9840   h = MALLOC_ONE_RT(Scheme_Jumpup_Buf_Holder);
9841 #ifdef MZ_PRECISE_GC
9842   h->type = scheme_rt_buf_holder;
9843 #endif
9844 
9845   return h;
9846 }
9847 
9848 #ifdef MZ_PRECISE_GC
scheme_get_current_thread_stack_start(void)9849 uintptr_t scheme_get_current_thread_stack_start(void)
9850 {
9851   Scheme_Thread *p;
9852   p = scheme_current_thread;
9853   return (uintptr_t)p->stack_start;
9854 }
9855 #endif
9856 
9857 #ifdef MZ_PRECISE_GC
9858 
9859 START_XFORM_SKIP;
9860 
9861 #include "mzmark_thread.inc"
9862 
register_traversers(void)9863 static void register_traversers(void)
9864 {
9865   GC_REG_TRAV(scheme_will_executor_type, mark_will_executor_val);
9866   GC_REG_TRAV(scheme_custodian_type, mark_custodian_val);
9867   GC_REG_TRAV(scheme_cust_box_type, mark_custodian_box_val);
9868   GC_REG_TRAV(scheme_thread_hop_type, mark_thread_hop);
9869   GC_REG_TRAV(scheme_evt_set_type, mark_evt_set);
9870   GC_REG_TRAV(scheme_thread_set_type, mark_thread_set);
9871   GC_REG_TRAV(scheme_config_type, mark_config);
9872   GC_REG_TRAV(scheme_thread_cell_type, mark_thread_cell);
9873   GC_REG_TRAV(scheme_plumber_type, mark_plumber);
9874 
9875   GC_REG_TRAV(scheme_rt_param_data, mark_param_data);
9876   GC_REG_TRAV(scheme_rt_will, mark_will);
9877   GC_REG_TRAV(scheme_rt_evt, mark_evt);
9878   GC_REG_TRAV(scheme_rt_syncing, mark_syncing);
9879   GC_REG_TRAV(scheme_rt_parameterization, mark_parameterization);
9880 }
9881 
9882 END_XFORM_SKIP;
9883 
9884 #endif
9885