1 #include "schpriv.h"
2 #include "schrktio.h"
3 static Scheme_Object* scheme_place_enabled(int argc, Scheme_Object *args[]);
4 static Scheme_Object* scheme_place_shared(int argc, Scheme_Object *args[]);
5
6 THREAD_LOCAL_DECL(int scheme_current_place_id);
7
8 SHARED_OK static intptr_t embedded_load_len;
9 SHARED_OK static const char *embedded_load;
10
11 #ifdef MZ_USE_PLACES
12
13 #include "schmach.h"
14
15 READ_ONLY static Scheme_Object *scheme_def_place_exit_proc;
16 SHARED_OK static int scheme_places_enabled = 1;
17
18 ROSYM static Scheme_Object *quote_symbol;
19
20 static int id_counter;
21 static mzrt_mutex *id_counter_mutex;
22
23 SHARED_OK mz_proc_thread *scheme_master_proc_thread;
24 THREAD_LOCAL_DECL(static struct Scheme_Place_Object *place_object);
25 THREAD_LOCAL_DECL(static Scheme_Place *all_child_places);
26 THREAD_LOCAL_DECL(static uintptr_t force_gc_for_place_accounting);
27 THREAD_LOCAL_DECL(static Scheme_Struct_Type *place_event_prefab);
28 static Scheme_Object *scheme_place(int argc, Scheme_Object *args[]);
29 static Scheme_Object *place_pumper_threads(int argc, Scheme_Object *args[]);
30 static Scheme_Object *place_wait(int argc, Scheme_Object *args[]);
31 static Scheme_Object *place_kill(int argc, Scheme_Object *args[]);
32 static Scheme_Object *place_break(int argc, Scheme_Object *args[]);
33 static Scheme_Object *place_p(int argc, Scheme_Object *args[]);
34 static Scheme_Object *place_send(int argc, Scheme_Object *args[]);
35 static Scheme_Object *place_receive(int argc, Scheme_Object *args[]);
36 static Scheme_Object *place_channel_p(int argc, Scheme_Object *args[]);
37 static Scheme_Object *def_place_exit_handler_proc(int argc, Scheme_Object *args[]);
38 static Scheme_Object *place_channel(int argc, Scheme_Object *args[]);
39 static Scheme_Object* place_allowed_p(int argc, Scheme_Object *args[]);
40 static void cust_kill_place(Scheme_Object *pl, void *notused);
41 static void resume_one_place_with_lock(Scheme_Place_Object *place_obj);
42
43 static Scheme_Place_Async_Channel *place_async_channel_create();
44 static Scheme_Place_Bi_Channel *place_bi_channel_malloc();
45 static Scheme_Place_Bi_Channel *place_bi_channel_create();
46 static Scheme_Place_Bi_Channel *place_bi_peer_channel_create(Scheme_Place_Bi_Channel *orig);
47 static int place_channel_ready(Scheme_Object *so, Scheme_Schedule_Info *sinfo);
48 static void place_async_send(Scheme_Place_Async_Channel *ch, Scheme_Object *o);
49 static Scheme_Object *place_async_receive(Scheme_Place_Async_Channel *ch);
50 static Scheme_Object *places_deep_copy_to_master(Scheme_Object *so);
51 static Scheme_Object *make_place_dead(int argc, Scheme_Object *argv[]);
52 static int place_dead_ready(Scheme_Object *o, Scheme_Schedule_Info *sinfo);
53 static void* GC_master_malloc_tagged(size_t size);
54 static void destroy_place_object_locks(Scheme_Place_Object *place_obj);
55
56 static void bi_channel_refcount(Scheme_Place_Bi_Channel *ch, int delta);
57 static void bi_channel_set_finalizer(Scheme_Place_Bi_Channel *ch);
58
59 #if defined(MZ_USE_PLACES) && defined(MZ_PRECISE_GC)
60 static Scheme_Object *places_deep_copy_worker(Scheme_Object *so, Scheme_Hash_Table **ht,
61 int mode, int gcable, int can_raise_exn,
62 Scheme_Object **master_chain,
63 Scheme_Object **invalid_object,
64 char **delayed_err, intptr_t *delayed_errno, intptr_t *delayed_errkind);
65 # define mzPDC_CHECK 0
66 # define mzPDC_COPY 1
67 # define mzPDC_UNCOPY 2
68 # define mzPDC_DIRECT_UNCOPY 3
69 # define mzPDC_DESER 4
70 # define mzPDC_CLEAN 5
71
72 static Scheme_Object *strip_chaperones(Scheme_Object *so);
73 #endif
74
75 static Scheme_Object *places_prepare_direct(Scheme_Object *so);
76 static void log_place_event(const char *what, const char *tag, int has_amount, intptr_t amount);
77
78 # ifdef MZ_PRECISE_GC
79 static void register_traversers(void);
80 # endif
81
82 static void *place_start_proc(void *arg);
83 MZ_DO_NOT_INLINE(static void *place_start_proc_after_stack(void *data_arg, void *stack_base));
84
85 # define PLACE_PRIM_W_ARITY(name, func, a1, a2, env) ADD_PRIM_W_ARITY(name, func, a1, a2, env)
86
87 #else
88
89 SHARED_OK static int scheme_places_enabled = 0;
90
91 # define PLACE_PRIM_W_ARITY(name, func, a1, a2, env) ADD_PRIM_W_ARITY(name, not_implemented, a1, a2, env)
92
not_implemented(int argc,Scheme_Object ** argv)93 static Scheme_Object *not_implemented(int argc, Scheme_Object **argv)
94 {
95 scheme_raise_exn(MZEXN_FAIL_UNSUPPORTED, "not supported");
96 return NULL;
97 }
98
99 # ifdef MZ_PRECISE_GC
register_traversers(void)100 static void register_traversers(void) { }
101 # endif
102
103 #endif
104
105 /*========================================================================*/
106 /* initialization */
107 /*========================================================================*/
108
scheme_init_place(Scheme_Startup_Env * env)109 void scheme_init_place(Scheme_Startup_Env *env)
110 {
111 #ifdef MZ_PRECISE_GC
112 register_traversers();
113 #endif
114
115 scheme_switch_prim_instance(env, "#%place");
116
117 ADD_PRIM_W_ARITY("place-enabled?", scheme_place_enabled, 0, 0, env);
118 ADD_PRIM_W_ARITY("place-shared?", scheme_place_shared, 1, 1, env);
119 PLACE_PRIM_W_ARITY("dynamic-place", scheme_place, 5, 5, env);
120 PLACE_PRIM_W_ARITY("place-pumper-threads", place_pumper_threads, 1, 2, env);
121 PLACE_PRIM_W_ARITY("place-wait", place_wait, 1, 1, env);
122 PLACE_PRIM_W_ARITY("place-kill", place_kill, 1, 1, env);
123 PLACE_PRIM_W_ARITY("place-break", place_break, 1, 2, env);
124 PLACE_PRIM_W_ARITY("place?", place_p, 1, 1, env);
125 PLACE_PRIM_W_ARITY("place-channel", place_channel, 0, 0, env);
126 PLACE_PRIM_W_ARITY("place-channel-put", place_send, 2, 2, env);
127 PLACE_PRIM_W_ARITY("place-channel-get", place_receive, 1, 1, env);
128 PLACE_PRIM_W_ARITY("place-channel?", place_channel_p, 1, 1, env);
129 PLACE_PRIM_W_ARITY("place-message-allowed?", place_allowed_p, 1, 1, env);
130 PLACE_PRIM_W_ARITY("place-dead-evt", make_place_dead, 1, 1, env);
131
132 scheme_restore_prim_instance(env);
133 }
134
scheme_init_place_per_place()135 void scheme_init_place_per_place()
136 {
137 #ifdef MZ_USE_PLACES
138 REGISTER_SO(all_child_places);
139
140 REGISTER_SO(place_event_prefab);
141 place_event_prefab = scheme_lookup_prefab_type(scheme_intern_symbol("place-event"), 4);
142 #endif
143 }
144
scheme_place_enabled(int argc,Scheme_Object * args[])145 static Scheme_Object* scheme_place_enabled(int argc, Scheme_Object *args[]) {
146 return (scheme_places_enabled == 0) ? scheme_false : scheme_true;
147 }
148
scheme_place_shared(int argc,Scheme_Object * args[])149 static Scheme_Object* scheme_place_shared(int argc, Scheme_Object *args[]) {
150 return SHARED_ALLOCATEDP(args[0]) ? scheme_true : scheme_false;
151 }
152
scheme_init_places_once()153 void scheme_init_places_once() {
154 #ifdef MZ_USE_PLACES
155 scheme_add_evt(scheme_place_type, (Scheme_Ready_Fun)place_channel_ready, NULL, NULL, 1);
156 scheme_add_evt(scheme_place_bi_channel_type, (Scheme_Ready_Fun)place_channel_ready, NULL, NULL, 1);
157 scheme_add_evt(scheme_place_dead_type, (Scheme_Ready_Fun)place_dead_ready, NULL, NULL, 1);
158 mzrt_mutex_create(&id_counter_mutex);
159 REGISTER_SO(scheme_def_place_exit_proc);
160 scheme_def_place_exit_proc = scheme_make_prim_w_arity(def_place_exit_handler_proc, "default-place-exit-handler", 1, 1);
161
162 REGISTER_SO(quote_symbol);
163 quote_symbol = scheme_intern_symbol("quote");
164 #endif
165 }
166
scheme_get_place_id(void)167 int scheme_get_place_id(void)
168 {
169 #ifdef MZ_USE_PLACES
170 return scheme_current_place_id;
171 #else
172 return 0;
173 #endif
174 }
175
scheme_register_embedded_load(intptr_t len,const char * s)176 void scheme_register_embedded_load(intptr_t len, const char *s)
177 {
178 embedded_load_len = len;
179 embedded_load = s;
180 }
181
182 #ifdef MZ_USE_PLACES
183
184 /************************************************************************/
185 /************************************************************************/
186 /************************************************************************/
187
188 typedef struct Place_Start_Data {
189 /* Allocated as array of objects, so all
190 field must be pointers */
191 Scheme_Object *module;
192 Scheme_Object *function;
193 Scheme_Object *channel;
194 Scheme_Object *current_library_collection_paths;
195 Scheme_Object *current_library_collection_links;
196 Scheme_Object *compiled_roots;
197 Scheme_Object *current_directory;
198 mzrt_sema *ready; /* malloc'ed item */
199 struct Scheme_Place_Object *place_obj; /* malloc'ed item */
200 struct NewGC *parent_gc;
201 Scheme_Object *cust_limit;
202 rktio_fd_t *in;
203 rktio_fd_t *out;
204 rktio_fd_t *err;
205 Scheme_Object *new_id;
206 } Place_Start_Data;
207
null_out_runtime_globals()208 static void null_out_runtime_globals() {
209 scheme_current_thread = NULL;
210 scheme_first_thread = NULL;
211 scheme_main_thread = NULL;
212
213 scheme_current_runstack_start = NULL;
214 scheme_current_runstack = NULL;
215 scheme_current_cont_mark_stack = 0;
216 scheme_current_cont_mark_pos = 0;
217 }
218
scheme_make_place_object()219 Scheme_Object *scheme_make_place_object() {
220 Scheme_Place_Object *place_obj;
221 place_obj = GC_master_malloc_tagged(sizeof(Scheme_Place_Object));
222 place_obj->so.type = scheme_place_object_type;
223 mzrt_mutex_create(&place_obj->lock);
224 place_obj->die = 0;
225 place_obj->dead = 0;
226 place_obj->refcount = 1;
227 place_obj->pbreak = 0;
228 place_obj->result = 1;
229 return (Scheme_Object *)place_obj;
230 }
231
close_six_fds(rktio_fd_t ** rw)232 static void close_six_fds(rktio_fd_t **rw) {
233 int i;
234 for (i = 0; i < 6; i++) {
235 if (rw[i])
236 rktio_close_noerr(scheme_rktio, rw[i]);
237 }
238 }
239
place_pumper_threads(int argc,Scheme_Object * args[])240 Scheme_Object *place_pumper_threads(int argc, Scheme_Object *args[]) {
241 Scheme_Place *place;
242 Scheme_Object *tmp;
243
244 place = (Scheme_Place *) args[0];
245 if (!SAME_TYPE(SCHEME_TYPE(args[0]), scheme_place_type))
246 scheme_wrong_contract("place-pumper-threads", "place?", 0, argc, args);
247
248 if (argc == 2) {
249 tmp = args[1];
250 if (!SCHEME_VECTORP(tmp) || SCHEME_VEC_SIZE(tmp) != 3)
251 scheme_wrong_type("place-pumper-threads", "vector of size 3", 1, argc, args);
252 place->pumper_threads = tmp;
253 }
254 return place->pumper_threads;
255 }
256
scheme_place(int argc,Scheme_Object * args[])257 Scheme_Object *scheme_place(int argc, Scheme_Object *args[]) {
258 Scheme_Place *place;
259 Place_Start_Data *place_data;
260 mz_proc_thread *proc_thread;
261 Scheme_Object *collection_paths;
262 Scheme_Object *collection_links;
263 Scheme_Object *directory;
264 Scheme_Place_Object *place_obj;
265 mzrt_sema *ready;
266 struct NewGC *parent_gc;
267 Scheme_Custodian *cust;
268 intptr_t mem_limit;
269 Scheme_Object *in_arg;
270 Scheme_Object *out_arg;
271 Scheme_Object *err_arg;
272 rktio_fd_t *rw[6], **rwp;
273
274 rw[0] = NULL;
275 rw[1] = NULL;
276 rw[2] = NULL;
277 rw[3] = NULL;
278 rw[4] = NULL;
279 rw[5] = NULL;
280
281 /* To avoid runaway place creation, check for termination before continuing. */
282 scheme_thread_block(0.0);
283
284 parent_gc = GC_get_current_instance();
285
286 /* create place object */
287 place = MALLOC_ONE_TAGGED(Scheme_Place);
288 place->so.type = scheme_place_type;
289 place_obj = (Scheme_Place_Object *) scheme_make_place_object();
290 place->place_obj = place_obj;
291 {
292 GC_CAN_IGNORE void *handle;
293 handle = scheme_get_signal_handle();
294 place_obj->parent_signal_handle = handle;
295 }
296
297 /* The use_factor partly determines how often a child place notifies
298 a parent place that it is using more memory. If the child
299 notified the parent evey time its memory use increased, that
300 would probably be too often. But notifying every time the memory
301 use doubles isn't good enough, because a long chain of places
302 wouldn't alert parents often enough to limit total memory
303 use. Decreasing the factor for each generation means that the
304 alerts become more frequent as nesting gets deeper. */
305 place_obj->use_factor = (place_object ? (place_object->use_factor / 2) : 1.0);
306
307 mzrt_sema_create(&ready, 0);
308
309 /* pass critical info to new place */
310 place_data = MALLOC_ONE(Place_Start_Data);
311 place_data->ready = ready;
312 place_data->place_obj = place_obj;
313 place_data->parent_gc = parent_gc;
314
315 {
316 in_arg = args[2];
317 out_arg = args[3];
318 err_arg = args[4];
319
320 if (!scheme_is_module_path(args[0]) && !SCHEME_PATHP(args[0]) && !scheme_is_resolved_module_path(args[0])) {
321 scheme_wrong_contract("dynamic-place", "(or/c module-path? path? resolved-module-path?)", 0, argc, args);
322 }
323 if (!SCHEME_SYMBOLP(args[1])) {
324 scheme_wrong_contract("dynamic-place", "symbol?", 1, argc, args);
325 }
326 if (SCHEME_TRUEP(in_arg) && !SCHEME_TRUEP(scheme_file_stream_port_p(1, &in_arg))) {
327 scheme_wrong_contract("dynamic-place", "(or/c (and/c file-stream-port? input-port?) #f)", 2, argc, args);
328 }
329 if (SCHEME_TRUEP(out_arg) && !SCHEME_TRUEP(scheme_file_stream_port_p(1, &out_arg))) {
330 scheme_wrong_contract("dynamic-place", "(or/c (and/c file-stream-port? output-port?) #f)", 3, argc, args);
331 }
332 if (SCHEME_TRUEP(err_arg) && !SCHEME_TRUEP(scheme_file_stream_port_p(1, &err_arg))) {
333 scheme_wrong_contract("dynamic-place", "(or/c (and/c file-stream-port? output-port?) #f)", 4, argc, args);
334 }
335
336 if (SCHEME_PAIRP(args[0])
337 && SAME_OBJ(SCHEME_CAR(args[0]), quote_symbol)
338 && !scheme_is_predefined_module_path(args[0])) {
339 scheme_contract_error("dynamic-place", "not a filesystem or predefined module-path",
340 "module path", 1, args[0],
341 NULL);
342 }
343
344 place_data->module = args[0];
345 place_data->function = args[1];
346 place_data->ready = ready;
347
348 /* create channel */
349 {
350 Scheme_Place_Bi_Channel *channel;
351 channel = place_bi_channel_create();
352 place->channel = (Scheme_Object *) channel;
353 channel = place_bi_peer_channel_create(channel);
354 place_data->channel = (Scheme_Object *) channel;
355 }
356 }
357
358 collection_paths = scheme_current_library_collection_paths(0, NULL);
359 place_data->current_library_collection_paths = collection_paths;
360
361 collection_links = scheme_current_library_collection_links(0, NULL);
362 place_data->current_library_collection_links = collection_links;
363
364 collection_paths = scheme_compiled_file_roots(0, NULL);
365 place_data->compiled_roots = collection_paths;
366
367 directory = scheme_current_directory(0, NULL);
368 place_data->current_directory = directory;
369
370 cust = scheme_get_current_custodian();
371 mem_limit = GC_get_account_memory_limit(cust);
372 place_data->cust_limit = scheme_make_integer(mem_limit);
373 place_obj->memory_limit = mem_limit;
374 place_obj->parent_need_gc = &force_gc_for_place_accounting;
375
376 {
377 rktio_fd_t *tmpfd;
378
379 if (SCHEME_TRUEP(in_arg)) {
380 if (scheme_port_closed_p(in_arg)) {
381 close_six_fds(rw);
382 scheme_contract_error("dynamic-place", "port is closed",
383 "port", 1, in_arg,
384 NULL);
385 }
386 scheme_get_port_rktio_file_descriptor(in_arg, &tmpfd);
387 tmpfd = rktio_dup(scheme_rktio, tmpfd);
388 if (!tmpfd) {
389 close_six_fds(rw);
390 scheme_rktio_error("dynamic-place", "stdin dup");
391 }
392 rw[0] = tmpfd;
393 } else {
394 rwp = rktio_make_pipe(scheme_rktio, 0);
395 if (!rwp) {
396 close_six_fds(rw);
397 scheme_rktio_error("dynamic-place", "stdin pipe");
398 } else {
399 rw[0] = rwp[0];
400 rw[1] = rwp[1];
401 free(rwp);
402 }
403 }
404
405 if (SCHEME_TRUEP(out_arg)) {
406 if (scheme_port_closed_p(out_arg)) {
407 close_six_fds(rw);
408 scheme_contract_error("dynamic-place", "port is closed",
409 "port", 1, out_arg,
410 NULL);
411 }
412 scheme_get_port_rktio_file_descriptor(out_arg, &tmpfd);
413 tmpfd = rktio_dup(scheme_rktio, tmpfd);
414 if (!tmpfd) {
415 close_six_fds(rw);
416 scheme_rktio_error("dynamic-place", "stdout dup");
417 }
418 rw[3] = tmpfd;
419 } else {
420 rwp = rktio_make_pipe(scheme_rktio, 0);
421 if (!rwp) {
422 close_six_fds(rw);
423 scheme_rktio_error("dynamic-place", "stdout pipe");
424 } else {
425 rw[2] = rwp[0];
426 rw[3] = rwp[1];
427 free(rwp);
428 }
429 }
430
431 if (SCHEME_TRUEP(err_arg)) {
432 if (scheme_port_closed_p(err_arg)) {
433 close_six_fds(rw);
434 scheme_contract_error("dynamic-place", "port is closed",
435 "port", 1, err_arg,
436 NULL);
437 }
438 scheme_get_port_rktio_file_descriptor(err_arg, &tmpfd);
439 tmpfd = rktio_dup(scheme_rktio, tmpfd);
440 if (!tmpfd) {
441 close_six_fds(rw);
442 scheme_rktio_error("dynamic-place", "stderr dup");
443 }
444 rw[5] = tmpfd;
445 } else {
446 rwp = rktio_make_pipe(scheme_rktio, 0);
447 if (!rwp) {
448 close_six_fds(rw);
449 scheme_rktio_error("dynamic-place", "stderr pipe");
450 } else {
451 rw[4] = rwp[0];
452 rw[5] = rwp[1];
453 free(rwp);
454 }
455 }
456
457 {
458 place_data->in = rw[0];
459 place_data->out = rw[3];
460 place_data->err = rw[5];
461 }
462 }
463
464 {
465 Scheme_Object *tmp;
466 tmp = places_prepare_direct(place_data->current_library_collection_paths);
467 place_data->current_library_collection_paths = tmp;
468 tmp = places_prepare_direct(place_data->current_library_collection_links);
469 place_data->current_library_collection_links = tmp;
470 tmp = places_prepare_direct(place_data->compiled_roots);
471 place_data->compiled_roots = tmp;
472 tmp = places_prepare_direct(place_data->current_directory);
473 place_data->current_directory = tmp;
474 tmp = places_prepare_direct(place_data->channel);
475 place_data->channel = tmp;
476 tmp = places_prepare_direct(place_data->module);
477 place_data->module = tmp;
478 tmp = places_prepare_direct(place_data->function);
479 place_data->function = tmp;
480 }
481
482 /* create new place */
483 proc_thread = mz_proc_thread_create(place_start_proc, place_data);
484
485 if (!proc_thread) {
486 mzrt_sema_destroy(ready);
487 ready = NULL;
488 scheme_signal_error("place: place creation failed");
489 }
490
491 mz_proc_thread_detach(proc_thread);
492 proc_thread = NULL;
493
494 /* wait until the place has started and grabbed the value
495 from `place_data'; it's important that a GC doesn't happen
496 here until the other place is far enough. */
497 mzrt_sema_wait(ready);
498 mzrt_sema_destroy(ready);
499 ready = NULL;
500
501 if (!place_data->place_obj)
502 scheme_signal_error("place: place creation failed");
503
504 log_place_event("id %d: create %" PRIdPTR, "create", 1, place_data->place_obj->id);
505
506 place_data->ready = NULL;
507 place_data->place_obj = NULL;
508
509 place->next = all_child_places;
510 if (place->next)
511 place->next->prev = place;
512 all_child_places = place;
513
514 {
515 Scheme_Custodian_Reference *mref;
516 mref = scheme_add_managed(NULL,
517 (Scheme_Object *)place,
518 cust_kill_place,
519 NULL,
520 1);
521 place->mref = mref;
522 }
523
524 #ifdef MZ_PRECISE_GC
525 GC_register_new_thread(place, cust);
526 #endif
527
528 {
529 Scheme_Object *a[4];
530 Scheme_Object *tmpport;
531 a[0] = (Scheme_Object *) place;
532 if (rw[1]) {
533 tmpport = scheme_make_rktio_fd_output_port(rw[1], scheme_intern_symbol("place-in"), 0);
534 a[1] = tmpport;
535 }
536 else
537 a[1] = scheme_false;
538
539 if (rw[2]) {
540 tmpport = scheme_make_rktio_fd_input_port(rw[2], scheme_intern_symbol("place-out"));
541 a[2] = tmpport;
542 }
543 else
544 a[2] = scheme_false;
545
546 if (rw[4]) {
547 tmpport = scheme_make_rktio_fd_input_port(rw[4], scheme_intern_symbol("place-err"));
548 a[3] = tmpport;
549 }
550 else
551 a[3] = scheme_false;
552 return scheme_values(4, a);
553 }
554 }
555
do_place_kill(Scheme_Place * place)556 static void do_place_kill(Scheme_Place *place)
557 {
558 Scheme_Place_Object *place_obj;
559 intptr_t refcount;
560 int old_id;
561
562 place_obj = place->place_obj;
563
564 if (!place_obj) return;
565
566 {
567 mzrt_mutex_lock(place_obj->lock);
568
569 if (!place_obj->dead) {
570 if (!place_obj->die)
571 place_obj->die = 1;
572 if (place_obj->signal_handle)
573 scheme_signal_received_at(place_obj->signal_handle);
574 resume_one_place_with_lock(place_obj);
575
576 do {
577 /* We need to block until the place is really finished; that
578 should happen quickly, so block atomically (with a little
579 cooperation for GCing, just in case). */
580 mzrt_mutex_unlock(place_obj->lock);
581 GC_check_master_gc_request();
582 scheme_start_atomic();
583 scheme_thread_block(0.0);
584 scheme_end_atomic_no_swap();
585 mzrt_mutex_lock(place_obj->lock);
586 } while (!place_obj->dead);
587 }
588
589 place_obj->refcount--;
590 refcount = place_obj->refcount;
591
592 if (place_obj->signal_handle)
593 scheme_signal_received_at(place_obj->signal_handle);
594
595 place->result = place_obj->result;
596
597 place_obj->parent_signal_handle = NULL;
598
599 if (refcount)
600 resume_one_place_with_lock(place_obj);
601
602 mzrt_mutex_unlock(place_obj->lock);
603 }
604
605 scheme_remove_managed(place->mref, (Scheme_Object *)place);
606
607 if (place->next)
608 place->next->prev = place->prev;
609 if (place->prev)
610 place->prev->next = place->next;
611 else
612 all_child_places = place->next;
613
614 old_id = place_obj->id;
615
616 if (!refcount) {
617 destroy_place_object_locks(place_obj);
618 }
619 place->place_obj = NULL;
620
621 log_place_event("id %d: reap %" PRIdPTR, "reap", 1, old_id);
622 }
623
do_place_break(Scheme_Place * place,int kind)624 static int do_place_break(Scheme_Place *place, int kind)
625 {
626 Scheme_Place_Object *place_obj;
627 place_obj = place->place_obj;
628
629 if (place_obj) {
630 mzrt_mutex_lock(place_obj->lock);
631
632 place_obj->pbreak = kind;
633
634 if (place_obj->signal_handle)
635 scheme_signal_received_at(place_obj->signal_handle);
636
637 mzrt_mutex_unlock(place_obj->lock);
638 }
639
640 return 0;
641 }
642
cust_kill_place(Scheme_Object * pl,void * notused)643 static void cust_kill_place(Scheme_Object *pl, void *notused)
644 {
645 do_place_kill((Scheme_Place *)pl);
646 }
647
place_kill(int argc,Scheme_Object * args[])648 static Scheme_Object *place_kill(int argc, Scheme_Object *args[]) {
649 Scheme_Place *place;
650 place = (Scheme_Place *) args[0];
651
652 if (!SAME_TYPE(SCHEME_TYPE(args[0]), scheme_place_type))
653 scheme_wrong_contract("place-kill", "place?", 0, argc, args);
654
655 do_place_kill(place);
656 return scheme_void;
657 }
658
place_break(int argc,Scheme_Object * args[])659 static Scheme_Object *place_break(int argc, Scheme_Object *args[])
660 {
661 Scheme_Place *place = (Scheme_Place *) args[0];
662 int kind = MZEXN_BREAK;
663
664 if (!SAME_TYPE(SCHEME_TYPE(args[0]), scheme_place_type))
665 scheme_wrong_contract("place-break", "place?", 0, argc, args);
666
667 if ((argc > 1) && SCHEME_TRUEP(args[1])) {
668 if (SCHEME_SYMBOLP(args[1])
669 && !SCHEME_SYM_WEIRDP(args[1])
670 && !strcmp(SCHEME_SYM_VAL(args[1]), "hang-up"))
671 kind = MZEXN_BREAK_HANG_UP;
672 else if (SCHEME_SYMBOLP(args[1])
673 && !SCHEME_SYM_WEIRDP(args[1])
674 && !strcmp(SCHEME_SYM_VAL(args[1]), "terminate"))
675 kind = MZEXN_BREAK_TERMINATE;
676 else
677 scheme_wrong_contract("place-break", "(or/c #f 'hang-up 'terminate)", 1, argc, args);
678 }
679
680 do_place_break(place, kind);
681
682 return scheme_void;
683 }
684
place_deadp(Scheme_Object * place)685 static int place_deadp(Scheme_Object *place) {
686 Scheme_Place_Object *place_obj;
687 int dead = 0;
688 place_obj = (Scheme_Place_Object*) ((Scheme_Place *)place)->place_obj;
689
690 if (place_obj == NULL) {
691 return 1;
692 }
693 else
694 {
695 mzrt_mutex_lock(place_obj->lock);
696
697 dead = place_obj->die | place_obj->dead;
698
699 mzrt_mutex_unlock(place_obj->lock);
700 }
701 if (dead) { return 1; }
702 return 0;
703 }
704
make_place_dead(int argc,Scheme_Object * argv[])705 static Scheme_Object *make_place_dead(int argc, Scheme_Object *argv[])
706 {
707 Scheme_Object *b;
708
709 if (!SAME_TYPE(SCHEME_TYPE(argv[0]), scheme_place_type))
710 scheme_wrong_contract("place-dead-evt", "place?", 0, argc, argv);
711
712 b = scheme_alloc_small_object();
713 b->type = scheme_place_dead_type;
714 SCHEME_PTR_VAL(b) = argv[0];
715 return b;
716 }
717
place_dead_ready(Scheme_Object * o,Scheme_Schedule_Info * sinfo)718 static int place_dead_ready(Scheme_Object *o, Scheme_Schedule_Info *sinfo) {
719 if (place_deadp(SCHEME_PTR_VAL(o))) {
720 scheme_set_sync_target(sinfo, o, NULL, NULL, 0, 0, NULL);
721 return 1;
722 }
723 return 0;
724 }
725
726 /* ---------------------------------------------------------------------- */
727
place_wait_ready(Scheme_Object * _p)728 static int place_wait_ready(Scheme_Object *_p) {
729 Scheme_Place *p = (Scheme_Place *)_p;
730 int done;
731
732 if (!p->place_obj) return 1;
733
734 mzrt_mutex_lock(p->place_obj->lock);
735 done = p->place_obj->dead;
736 mzrt_mutex_unlock(p->place_obj->lock);
737
738 if (done) {
739 do_place_kill(p); /* sets result, frees place */
740 /* wait for pumper threads to finish */
741 return 1;
742 }
743
744 return 0;
745 }
746
place_wait(int argc,Scheme_Object * args[])747 static Scheme_Object *place_wait(int argc, Scheme_Object *args[]) {
748 Scheme_Place *place;
749 place = (Scheme_Place *) args[0];
750
751 if (!SAME_TYPE(SCHEME_TYPE(args[0]), scheme_place_type))
752 scheme_wrong_contract("place-wait", "place?", 0, argc, args);
753
754 scheme_block_until(place_wait_ready, NULL, (Scheme_Object*)place, 0);
755
756 if (SCHEME_VECTORP(place->pumper_threads)) {
757 int i;
758 for (i=0; i<3; i++) {
759 Scheme_Object *tmp;
760 tmp = SCHEME_VEC_ELS(place->pumper_threads)[i];
761 if (SCHEME_THREADP(tmp))
762 scheme_thread_wait(tmp);
763 }
764 }
765
766 return scheme_make_integer(place->result);
767 }
768
place_p(int argc,Scheme_Object * args[])769 static Scheme_Object *place_p(int argc, Scheme_Object *args[])
770 {
771 return SAME_TYPE(SCHEME_TYPE(args[0]), scheme_place_type) ? scheme_true : scheme_false;
772 }
773
do_places_deep_copy(Scheme_Object * so,int mode,int gcable,Scheme_Object ** master_chain,Scheme_Object ** invalid_object,char ** delayed_err,intptr_t * delayed_errno,intptr_t * delayed_errkind)774 static Scheme_Object *do_places_deep_copy(Scheme_Object *so, int mode, int gcable,
775 Scheme_Object **master_chain,
776 Scheme_Object **invalid_object,
777 char **delayed_err, intptr_t *delayed_errno, intptr_t *delayed_errkind)
778 {
779 #if defined(MZ_USE_PLACES) && defined(MZ_PRECISE_GC)
780 Scheme_Hash_Table *ht = NULL;
781 return places_deep_copy_worker(so, &ht, mode, gcable, gcable, master_chain, invalid_object,
782 delayed_err, delayed_errno, delayed_errkind);
783 #else
784 return so;
785 #endif
786 }
787
places_prepare_direct(Scheme_Object * so)788 static Scheme_Object *places_prepare_direct(Scheme_Object *so) {
789 so = strip_chaperones(so);
790 (void)do_places_deep_copy(so, mzPDC_CHECK, 1, NULL, NULL, NULL, NULL, NULL);
791 return so;
792 }
793
places_deep_direct_uncopy(Scheme_Object * so)794 static Scheme_Object *places_deep_direct_uncopy(Scheme_Object *so) {
795 return do_places_deep_copy(so, mzPDC_DIRECT_UNCOPY, 1, NULL, NULL, NULL, NULL, NULL);
796 }
797
bad_place_message(Scheme_Object * so,char * delayed_err,intptr_t delayed_errno,intptr_t delayed_errkind)798 static void bad_place_message(Scheme_Object *so,
799 char *delayed_err, intptr_t delayed_errno, intptr_t delayed_errkind) {
800 if (delayed_err) {
801 rktio_set_last_error(scheme_rktio, delayed_errkind, delayed_errno);
802 scheme_rktio_error("place-channel-put", delayed_err);
803 } else
804 scheme_contract_error("place-channel-put",
805 "value not allowed in a message",
806 "value", 1, so,
807 NULL);
808 }
809
box_fd(rktio_fd_transfer_t * fd)810 static void *box_fd(rktio_fd_transfer_t *fd)
811 {
812 rktio_fd_transfer_t **fdp;
813 fdp = scheme_malloc_atomic(sizeof(rktio_fd_transfer_t*));
814 *fdp = fd;
815 return fdp;
816 }
817
unbox_fd(void * p)818 static rktio_fd_transfer_t *unbox_fd(void *p)
819 {
820 return *(rktio_fd_transfer_t **)p;
821 }
822
bad_place_message2(Scheme_Object * so,Scheme_Object * o,int can_raise_exn,char * delayed_err,intptr_t delayed_errno,intptr_t delayed_errkind)823 static void bad_place_message2(Scheme_Object *so, Scheme_Object *o, int can_raise_exn,
824 char *delayed_err, intptr_t delayed_errno, intptr_t delayed_errkind) {
825 Scheme_Object *l;
826 Scheme_Vector *v = (Scheme_Vector *) o;
827 if (v) {
828 if (SCHEME_VEC_ELS(v)[0]) {
829 l = SCHEME_VEC_ELS(v)[0];
830 while (SCHEME_RPAIRP(l)) {
831 rktio_fd_close_transfer(unbox_fd(SCHEME_CAR(l)));
832 l = SCHEME_CDR(l);
833 SCHEME_USE_FUEL(1);
834 }
835 }
836 if (SCHEME_VEC_ELS(v)[1]) {
837 l = SCHEME_VEC_ELS(v)[1];
838 while (SCHEME_RPAIRP(l)) {
839 rktio_fd_close_transfer(unbox_fd(SCHEME_CAR(l)));
840 l = SCHEME_CDR(l);
841 SCHEME_USE_FUEL(1);
842 }
843 }
844 }
845 if (can_raise_exn)
846 bad_place_message(so, delayed_err, delayed_errno, delayed_errkind);
847 }
848
push_duped_fd(Scheme_Object ** fd_accumulators,intptr_t slot,rktio_fd_transfer_t * dupfdt)849 static void push_duped_fd(Scheme_Object **fd_accumulators, intptr_t slot, rktio_fd_transfer_t *dupfdt) {
850 Scheme_Object *tmp;
851 Scheme_Vector *v;
852 if (fd_accumulators) {
853 if (!*fd_accumulators) {
854 tmp = scheme_make_vector(2, scheme_null);
855 *fd_accumulators = tmp;
856 }
857 v = (Scheme_Vector*) *fd_accumulators;
858
859 tmp = scheme_make_raw_pair(box_fd(dupfdt), SCHEME_VEC_ELS(v)[slot]);
860 SCHEME_VEC_ELS(v)[slot] = tmp;
861 }
862 }
863
trivial_copy(Scheme_Object * so,Scheme_Object ** master_chain)864 static Scheme_Object *trivial_copy(Scheme_Object *so, Scheme_Object **master_chain)
865 {
866 switch (SCHEME_TYPE(so)) {
867 case scheme_integer_type:
868 case scheme_true_type:
869 case scheme_false_type:
870 case scheme_null_type:
871 case scheme_void_type:
872 return so;
873 case scheme_byte_string_type:
874 case scheme_flvector_type:
875 #ifdef MZ_LONG_DOUBLE
876 case scheme_extflvector_type:
877 #endif
878 case scheme_fxvector_type:
879 if (SHARED_ALLOCATEDP(so)) {
880 scheme_hash_key(so);
881 if (master_chain) {
882 /* Keep track of all the objects that are in a message that
883 refer to master-allocated objects, so that the
884 corresponding objects can be marked during a master GC,
885 in case one happens before the message is received. */
886 Scheme_Object *mc;
887 mc = scheme_make_raw_pair(so, *master_chain);
888 *master_chain = mc;
889 }
890 return so;
891 }
892 }
893
894 return NULL;
895 }
896
shallow_types_copy(Scheme_Object * so,Scheme_Hash_Table * ht,Scheme_Object ** fd_accumulators,char ** delayed_err,intptr_t * delayed_errno,intptr_t * delayed_errkind,int mode,int can_raise_exn,Scheme_Object ** master_chain,Scheme_Object ** invalid_object)897 static Scheme_Object *shallow_types_copy(Scheme_Object *so, Scheme_Hash_Table *ht,
898 Scheme_Object **fd_accumulators,
899 char **delayed_err, intptr_t *delayed_errno, intptr_t *delayed_errkind,
900 int mode, int can_raise_exn,
901 Scheme_Object **master_chain,
902 Scheme_Object **invalid_object) {
903 Scheme_Object *new_so;
904 int copy_mode = ((mode == mzPDC_COPY) || (mode == mzPDC_UNCOPY) || (mode == mzPDC_DIRECT_UNCOPY));
905
906 new_so = trivial_copy(so, master_chain);
907 if (new_so) return new_so;
908
909 new_so = so;
910
911 switch (SCHEME_TYPE(so)) {
912 case scheme_place_type:
913 so = ((Scheme_Place *) so)->channel;
914 new_so = so;
915 case scheme_place_bi_channel_type: /* ^^^ fall through ^^* */
916 if (copy_mode) {
917 Scheme_Place_Bi_Channel *ch;
918 ch = place_bi_channel_malloc();
919 ch->link->sendch = ((Scheme_Place_Bi_Channel *)so)->link->sendch;
920 ch->link->recvch = ((Scheme_Place_Bi_Channel *)so)->link->recvch;
921
922 if ((mode == mzPDC_COPY) || (mode == mzPDC_DIRECT_UNCOPY))
923 bi_channel_refcount(ch, 1);
924 if ((mode == mzPDC_UNCOPY) || (mode == mzPDC_DIRECT_UNCOPY))
925 bi_channel_set_finalizer(ch);
926
927 if (master_chain) {
928 /* See setting of master_chain in trivial_copy(): */
929 new_so = scheme_make_raw_pair((Scheme_Object *)ch->link->sendch, *master_chain);
930 new_so = scheme_make_raw_pair((Scheme_Object *)ch->link->recvch, new_so);
931 *master_chain = new_so;
932 }
933 new_so = (Scheme_Object *)ch;
934 } else if (mode == mzPDC_CLEAN) {
935 bi_channel_refcount((Scheme_Place_Bi_Channel *)so, -1);
936 } else if (mode == mzPDC_DESER) {
937 bi_channel_set_finalizer((Scheme_Place_Bi_Channel *)so);
938 }
939 break;
940 case scheme_char_type:
941 if (copy_mode)
942 new_so = scheme_make_char(SCHEME_CHAR_VAL(so));
943 break;
944 case scheme_bignum_type:
945 if (copy_mode)
946 new_so = scheme_bignum_copy(so);
947 break;
948 case scheme_rational_type:
949 if (copy_mode) {
950 Scheme_Object *n;
951 Scheme_Object *d;
952 n = scheme_rational_numerator(so);
953 d = scheme_rational_denominator(so);
954 n = shallow_types_copy(n, NULL, fd_accumulators, delayed_err, delayed_errno, delayed_errkind,
955 mode, can_raise_exn, master_chain, invalid_object);
956 d = shallow_types_copy(d, NULL, fd_accumulators, delayed_err, delayed_errno, delayed_errkind,
957 mode, can_raise_exn, master_chain, invalid_object);
958 new_so = scheme_make_rational_pre_normalized(n, d);
959 }
960 break;
961 case scheme_float_type:
962 if (copy_mode)
963 new_so = scheme_make_float(SCHEME_FLT_VAL(so));
964 break;
965 case scheme_double_type:
966 if (copy_mode)
967 new_so = scheme_make_double(SCHEME_DBL_VAL(so));
968 break;
969 #ifdef MZ_LONG_DOUBLE
970 case scheme_long_double_type:
971 if (copy_mode)
972 new_so = scheme_make_long_double(SCHEME_LONG_DBL_VAL(so));
973 break;
974 #endif
975 case scheme_complex_type:
976 if (copy_mode) {
977 Scheme_Object *r;
978 Scheme_Object *i;
979 r = scheme_complex_real_part(so);
980 i = scheme_complex_imaginary_part(so);
981 r = shallow_types_copy(r, NULL, fd_accumulators, delayed_err, delayed_errno, delayed_errkind,
982 mode, can_raise_exn, master_chain, invalid_object);
983 i = shallow_types_copy(i, NULL, fd_accumulators, delayed_err, delayed_errno, delayed_errkind,
984 mode, can_raise_exn, master_chain, invalid_object);
985 new_so = scheme_make_complex(r, i);
986 }
987 break;
988 case scheme_char_string_type:
989 if (copy_mode) {
990 new_so = scheme_make_sized_offset_char_string(SCHEME_CHAR_STR_VAL(so), 0, SCHEME_CHAR_STRLEN_VAL(so), 1);
991 SCHEME_SET_IMMUTABLE(new_so);
992 }
993 break;
994 case scheme_byte_string_type:
995 /* not allocated as shared, since that's covered above */
996 if (copy_mode) {
997 new_so = scheme_make_sized_offset_byte_string(SCHEME_BYTE_STR_VAL(so), 0, SCHEME_BYTE_STRLEN_VAL(so), 1);
998 SCHEME_SET_IMMUTABLE(new_so);
999 }
1000 break;
1001 case scheme_unix_path_type:
1002 case scheme_windows_path_type:
1003 if (copy_mode)
1004 new_so = scheme_make_sized_offset_kind_path(SCHEME_BYTE_STR_VAL(so), 0, SCHEME_BYTE_STRLEN_VAL(so), 1,
1005 SCHEME_TYPE(so));
1006 break;
1007 case scheme_symbol_type:
1008 if (mode == mzPDC_COPY) {
1009 new_so = scheme_make_sized_offset_byte_string((char *)so, SCHEME_SYMSTR_OFFSET(so), SCHEME_SYM_LEN(so), 1);
1010 if (SCHEME_SYM_UNINTERNEDP(so)) {
1011 MZ_OPT_HASH_KEY(&((Scheme_Symbol*)new_so)->iso) = 0x1;
1012 } else if (SCHEME_SYM_PARALLELP(so)) {
1013 MZ_OPT_HASH_KEY(&((Scheme_Symbol*)new_so)->iso) = 0x2;
1014 }
1015 new_so->type = scheme_serialized_symbol_type;
1016 } else if (mode == mzPDC_DIRECT_UNCOPY) {
1017 char *str, buf[64];
1018 intptr_t len;
1019 len = SCHEME_SYM_LEN(so);
1020 if (len < 64)
1021 str = buf;
1022 else
1023 str = (char *)scheme_malloc_atomic(len);
1024 memcpy(str, SCHEME_SYM_VAL(so), len);
1025 if (SCHEME_SYM_UNINTERNEDP(so))
1026 new_so = scheme_make_exact_symbol(str, len);
1027 else if (SCHEME_SYM_PARALLELP(so))
1028 new_so = scheme_intern_exact_parallel_symbol(str, len);
1029 else
1030 new_so = scheme_intern_exact_symbol(str, len);
1031 } else if (mode != mzPDC_CHECK) {
1032 scheme_log_abort("encountered symbol in bad mode");
1033 abort();
1034 }
1035 break;
1036 case scheme_serialized_symbol_type:
1037 if ((mode == mzPDC_UNCOPY) || (mode == mzPDC_DESER)) {
1038 if (SCHEME_SYM_UNINTERNEDP(so)) {
1039 new_so = scheme_make_exact_symbol(SCHEME_BYTE_STR_VAL(so), SCHEME_BYTE_STRLEN_VAL(so));
1040 }
1041 else if (SCHEME_SYM_PARALLELP(so)) {
1042 new_so = scheme_intern_exact_parallel_symbol(SCHEME_BYTE_STR_VAL(so), SCHEME_BYTE_STRLEN_VAL(so));
1043 }
1044 else {
1045 new_so = scheme_intern_exact_symbol(SCHEME_BYTE_STR_VAL(so), SCHEME_BYTE_STRLEN_VAL(so));
1046 }
1047 } else if (mode != mzPDC_CLEAN) {
1048 scheme_log_abort("encountered serialized symbol in bad mode");
1049 abort();
1050 }
1051 break;
1052 case scheme_keyword_type:
1053 if (mode == mzPDC_COPY) {
1054 new_so = scheme_make_sized_offset_byte_string((char *)so, SCHEME_SYMSTR_OFFSET(so), SCHEME_SYM_LEN(so), 1);
1055 new_so->type = scheme_serialized_keyword_type;
1056 } else if (mode == mzPDC_DIRECT_UNCOPY) {
1057 char *str, buf[64];
1058 intptr_t len;
1059 len = SCHEME_SYM_LEN(so);
1060 if (len < 64)
1061 str = buf;
1062 else
1063 str = (char *)scheme_malloc_atomic(len);
1064 memcpy(str, SCHEME_SYM_VAL(so), len);
1065 new_so = scheme_intern_exact_keyword(str, len);
1066 } else if (mode != mzPDC_CHECK) {
1067 scheme_log_abort("encountered keyword in bad mode");
1068 abort();
1069 }
1070 break;
1071 case scheme_serialized_keyword_type:
1072 if ((mode == mzPDC_UNCOPY) || (mode == mzPDC_DESER)) {
1073 new_so = scheme_intern_exact_keyword(SCHEME_BYTE_STR_VAL(so), SCHEME_BYTE_STRLEN_VAL(so));
1074 } else if (mode != mzPDC_CLEAN) {
1075 scheme_log_abort("encountered serialized keyword in bad mode");
1076 abort();
1077 }
1078 break;
1079 case scheme_fxvector_type:
1080 /* not allocated as shared, since that's covered above */
1081 if (copy_mode) {
1082 Scheme_Vector *vec;
1083 intptr_t i;
1084 intptr_t size = SCHEME_FXVEC_SIZE(so);
1085 vec = scheme_alloc_fxvector(size);
1086
1087 for (i = 0; i < size; i++) {
1088 SCHEME_FXVEC_ELS(vec)[i] = SCHEME_FXVEC_ELS(so)[i];
1089 }
1090 new_so = (Scheme_Object *) vec;
1091 }
1092 break;
1093 case scheme_flvector_type:
1094 /* not allocated as shared, since that's covered above */
1095 if (copy_mode) {
1096 Scheme_Double_Vector *vec;
1097 intptr_t i;
1098 intptr_t size = SCHEME_FLVEC_SIZE(so);
1099 vec = scheme_alloc_flvector(size);
1100
1101 for (i = 0; i < size; i++) {
1102 SCHEME_FLVEC_ELS(vec)[i] = SCHEME_FLVEC_ELS(so)[i];
1103 }
1104 new_so = (Scheme_Object *) vec;
1105 }
1106 break;
1107 #ifdef MZ_LONG_DOUBLE
1108 case scheme_extflvector_type:
1109 /* not allocated as shared, since that's covered above */
1110 if (copy_mode) {
1111 Scheme_Long_Double_Vector *vec;
1112 intptr_t i;
1113 intptr_t size = SCHEME_EXTFLVEC_SIZE(so);
1114 vec = scheme_alloc_extflvector(size);
1115
1116 for (i = 0; i < size; i++) {
1117 SCHEME_EXTFLVEC_ELS(vec)[i] = SCHEME_EXTFLVEC_ELS(so)[i];
1118 }
1119 new_so = (Scheme_Object *) vec;
1120 }
1121 break;
1122 #endif
1123 case scheme_cpointer_type:
1124 if (SCHEME_CPTR_FLAGS(so) & 0x1) {
1125 if (copy_mode) {
1126 Scheme_Object *o;
1127 Scheme_Object *o2;
1128 if (SCHEME_CPTR_FLAGS(so) & 0x2) {
1129 o = (Scheme_Object *)scheme_malloc_small_tagged(sizeof(Scheme_Offset_Cptr));
1130 SCHEME_CPTR_FLAGS(o) |= 0x2;
1131 ((Scheme_Offset_Cptr *)o)->offset = ((Scheme_Offset_Cptr *)so)->offset;
1132 }
1133 else
1134 o = (Scheme_Object *)scheme_malloc_small_tagged(sizeof(Scheme_Cptr));
1135
1136 o->type = scheme_cpointer_type;
1137 SCHEME_CPTR_FLAGS(o) |= 0x1;
1138 SCHEME_CPTR_VAL(o) = SCHEME_CPTR_VAL(so);
1139 o2 = SCHEME_CPTR_TYPE(so);
1140 if (o2)
1141 o2 = shallow_types_copy(o2, NULL, fd_accumulators, delayed_err, delayed_errno, delayed_errkind,
1142 mode, can_raise_exn, master_chain, invalid_object);
1143 SCHEME_CPTR_TYPE(o) = o2;
1144
1145 new_so = o;
1146 } else {
1147 if (SCHEME_CPTR_TYPE(so)) {
1148 (void)shallow_types_copy(SCHEME_CPTR_TYPE(so), NULL, fd_accumulators, delayed_err, delayed_errno, delayed_errkind,
1149 mode, can_raise_exn, master_chain, invalid_object);
1150 }
1151 }
1152 }
1153 else {
1154 bad_place_message2(so, *fd_accumulators, can_raise_exn, 0, 0, 0);
1155 if (invalid_object) *invalid_object = so;
1156 return NULL;
1157 }
1158 break;
1159 case scheme_input_port_type:
1160 case scheme_output_port_type:
1161 {
1162 rktio_fd_t *fd;
1163 if ((fd = scheme_get_port_rktio_socket(so))) {
1164 if (mode == mzPDC_COPY) {
1165 Scheme_Object *tmp;
1166 Scheme_Object *portname;
1167 Scheme_Serialized_Socket_FD *ssfd;
1168 rktio_fd_t *dupfd;
1169 rktio_fd_transfer_t *dupfdt;
1170 dupfd = rktio_dup(scheme_rktio, fd);
1171 if (!dupfd) {
1172 if (can_raise_exn)
1173 scheme_rktio_error("dynamic-place", "socket dup");
1174 if (delayed_err) {
1175 intptr_t tmp;
1176 *delayed_err = "socket dup";
1177 tmp = rktio_get_last_error(scheme_rktio);
1178 *delayed_errno = tmp;
1179 tmp = rktio_get_last_error_kind(scheme_rktio);
1180 *delayed_errkind = tmp;
1181 }
1182 if (invalid_object) *invalid_object = so;
1183 return NULL;
1184 }
1185 dupfdt = rktio_fd_detach(scheme_rktio, dupfd);
1186 push_duped_fd(fd_accumulators, 1, dupfdt);
1187 ssfd = scheme_malloc_tagged(sizeof(Scheme_Serialized_Socket_FD));
1188 ssfd->so.type = scheme_serialized_tcp_fd_type;
1189 ssfd->type = so->type;
1190 ssfd->fdt = dupfdt;
1191 portname = scheme_port_name(so);
1192 tmp = shallow_types_copy(portname, ht, fd_accumulators, delayed_err, delayed_errno, delayed_errkind,
1193 mode, can_raise_exn, master_chain, invalid_object);
1194 ssfd->name = tmp;
1195 return (Scheme_Object *)ssfd;
1196 }
1197 }
1198 else if (SCHEME_TRUEP(scheme_file_stream_port_p(1, &so))) {
1199 if (scheme_get_port_rktio_file_descriptor(so, &fd)) {
1200 if (mode == mzPDC_COPY) {
1201 Scheme_Object *tmp;
1202 Scheme_Serialized_File_FD *sffd;
1203 rktio_fd_t *dupfd;
1204 rktio_fd_transfer_t *dupfdt;
1205 sffd = scheme_malloc_tagged(sizeof(Scheme_Serialized_File_FD));
1206 sffd->so.type = scheme_serialized_file_fd_type;
1207 scheme_get_serialized_fd_flags(so, sffd);
1208 tmp = shallow_types_copy(scheme_port_name(so), ht, fd_accumulators, delayed_err, delayed_errno, delayed_errkind,
1209 mode, can_raise_exn, master_chain, invalid_object);
1210 sffd->name = tmp;
1211 dupfd = rktio_dup(scheme_rktio, fd);
1212 if (!dupfd) {
1213 if (can_raise_exn)
1214 scheme_rktio_error("dynamic-place", "port dup");
1215 if (delayed_err) {
1216 intptr_t tmp;
1217 *delayed_err = "port dup";
1218 tmp = rktio_get_last_error(scheme_rktio);
1219 *delayed_errno = tmp;
1220 tmp = rktio_get_last_error_kind(scheme_rktio);
1221 *delayed_errkind = tmp;
1222 }
1223 if (invalid_object) *invalid_object = so;
1224 return NULL;
1225 }
1226 dupfdt = rktio_fd_detach(scheme_rktio, dupfd);
1227 push_duped_fd(fd_accumulators, 0, dupfdt);
1228 sffd->fdt = dupfdt;
1229 sffd->type = so->type;
1230 new_so = (Scheme_Object *) sffd;
1231 }
1232 }
1233 else {
1234 bad_place_message2(so, *fd_accumulators, can_raise_exn, 0, 0, 0);
1235 if (invalid_object) *invalid_object = so;
1236 return NULL;
1237 }
1238 }
1239 else {
1240 bad_place_message2(so, *fd_accumulators, can_raise_exn, 0, 0, 0);
1241 if (invalid_object) *invalid_object = so;
1242 return NULL;
1243 }
1244 }
1245 break;
1246 case scheme_serialized_tcp_fd_type:
1247 {
1248 if ((mode == mzPDC_UNCOPY) || (mode == mzPDC_DIRECT_UNCOPY) || (mode == mzPDC_DESER)) {
1249 Scheme_Object *in;
1250 Scheme_Object *out;
1251 Scheme_Object *name;
1252 int type = ((Scheme_Serialized_Socket_FD *) so)->type;
1253 rktio_fd_transfer_t *fdt = ((Scheme_Serialized_Socket_FD *) so)->fdt;
1254 rktio_fd_t *fd;
1255
1256 name = ((Scheme_Serialized_Socket_FD *) so)->name;
1257 name = shallow_types_copy(name, NULL, fd_accumulators, delayed_err, delayed_errno, delayed_errkind,
1258 mode, can_raise_exn, master_chain, invalid_object);
1259 fd = rktio_fd_attach(scheme_rktio, fdt);
1260
1261 /* scheme_socket_to_ports(fd, "tcp-accepted", 1, &in, &out); */
1262 if (type == scheme_input_port_type) {
1263 scheme_rktio_socket_to_input_port(fd, name, 1, &in);
1264 /* scheme_tcp_abandon_port(out); */
1265 new_so = in;
1266 }
1267 else {
1268 scheme_rktio_socket_to_output_port(fd, name, 1, &out);
1269 /* scheme_tcp_abandon_port(in); */
1270 new_so = out;
1271 }
1272 } else if (mode == mzPDC_CLEAN) {
1273 rktio_fd_transfer_t *fdt = ((Scheme_Serialized_Socket_FD *) so)->fdt;
1274 rktio_fd_close_transfer(fdt);
1275 } else {
1276 scheme_log_abort("encountered serialized TCP socket in bad mode");
1277 abort();
1278 }
1279 }
1280 break;
1281 case scheme_serialized_file_fd_type:
1282 {
1283 if ((mode == mzPDC_UNCOPY) || (mode == mzPDC_DIRECT_UNCOPY) || (mode == mzPDC_DESER)) {
1284 Scheme_Serialized_File_FD *ffd;
1285 Scheme_Object *name;
1286 rktio_fd_t *fd;
1287 int type;
1288
1289 ffd = (Scheme_Serialized_File_FD *) so;
1290 fd = rktio_fd_attach(scheme_rktio, ffd->fdt);
1291 name = ffd->name;
1292 type = ffd->type;
1293
1294 name = shallow_types_copy(name, NULL, fd_accumulators, delayed_err, delayed_errno, delayed_errkind,
1295 mode, can_raise_exn, master_chain, invalid_object);
1296
1297 if (type == scheme_input_port_type) {
1298 new_so = scheme_make_rktio_fd_input_port(fd, name);
1299 }
1300 else {
1301 new_so = scheme_make_rktio_fd_output_port(fd, name, 0);
1302 }
1303 } else if (mode == mzPDC_CLEAN) {
1304 Scheme_Serialized_File_FD *sffd;
1305 sffd = (Scheme_Serialized_File_FD *) so;
1306 rktio_fd_close_transfer(sffd->fdt);
1307 } else {
1308 scheme_log_abort("encountered serialized fd in bad mode");
1309 abort();
1310 }
1311 }
1312 break;
1313 default:
1314 new_so = NULL;
1315 break;
1316 }
1317 if (ht && new_so) {
1318 scheme_hash_set(ht, so, new_so);
1319 }
1320 return new_so;
1321 }
1322
1323 /* InFinite Stack */
1324 #define IFS_SIZE 512
1325 #define IFS_CACHE_SLOT (IFS_SIZE - 1)
1326 #define IFS_SEGMENT_BOTTOM 1
1327 #define IFS_PREV_SEG_SLOT 0
create_infinite_stack(int gcable)1328 static Scheme_Object* create_infinite_stack(int gcable) {
1329 Scheme_Object **v;
1330
1331 if (gcable) {
1332 /* If a GC is not possible, then we prefer to malloc() the stack
1333 space so that the space doesn't show up as part of the
1334 message allocation. */
1335 if (reusable_ifs_stack) {
1336 v = reusable_ifs_stack;
1337 reusable_ifs_stack = NULL;
1338 } else {
1339 v = GC_malloc(IFS_SIZE * sizeof(Scheme_Object*));
1340 }
1341 } else {
1342 v = malloc(IFS_SIZE * sizeof(Scheme_Object*));
1343 v[IFS_PREV_SEG_SLOT] = NULL;
1344 v[IFS_CACHE_SLOT] = NULL;
1345 }
1346
1347 return (Scheme_Object *) v;
1348 }
free_infinite_stack(Scheme_Object ** st,intptr_t max_depth,int gcable)1349 static void free_infinite_stack(Scheme_Object** st, intptr_t max_depth, int gcable) {
1350 Scheme_Object **prev;
1351 if (st[IFS_CACHE_SLOT]) {
1352 if (!gcable) free(st[IFS_CACHE_SLOT]);
1353 st[IFS_CACHE_SLOT] = NULL;
1354 }
1355 prev = (Scheme_Object **) st[IFS_PREV_SEG_SLOT];
1356 if (prev) {
1357 prev[IFS_CACHE_SLOT] = NULL;
1358 }
1359 if (!gcable) free(st);
1360 else if (!reusable_ifs_stack && (max_depth >= 0)) {
1361 if (max_depth > IFS_SIZE)
1362 max_depth = IFS_SIZE;
1363 memset(st, 0, max_depth * sizeof(Scheme_Object*));
1364 reusable_ifs_stack = st;
1365 }
1366 }
1367
scheme_clear_place_ifs_stack()1368 void scheme_clear_place_ifs_stack()
1369 {
1370 reusable_ifs_stack = NULL;
1371 }
1372
inf_push(Scheme_Object ** instack,Scheme_Object * item,uintptr_t * indepth,uintptr_t * maxdepth,int gcable)1373 static MZ_INLINE void inf_push(Scheme_Object **instack, Scheme_Object *item, uintptr_t *indepth,
1374 uintptr_t *maxdepth, int gcable) {
1375 Scheme_Object **stack = (Scheme_Object **) *instack;
1376 if (*indepth == IFS_CACHE_SLOT) {
1377 if (stack[IFS_CACHE_SLOT]) { /* cached */
1378 stack = (Scheme_Object **) stack[IFS_CACHE_SLOT];
1379 }
1380 else { /* no cache */
1381 Scheme_Object *tmp;
1382 tmp = create_infinite_stack(gcable);
1383 stack[IFS_CACHE_SLOT] = tmp;
1384 stack = (Scheme_Object **)stack[IFS_CACHE_SLOT];
1385 stack[IFS_PREV_SEG_SLOT] = (Scheme_Object *) (*instack);
1386 }
1387 *instack = (Scheme_Object *) stack;
1388 *indepth = IFS_SEGMENT_BOTTOM;
1389 }
1390
1391 /* printf("PUSH %p %li %p\n", stack, *indepth, item); */
1392 stack[((*indepth)++)] = item;
1393 if (*indepth > *maxdepth)
1394 *maxdepth = *indepth;
1395 return;
1396 }
1397
inf_pop(Scheme_Object ** instack,uintptr_t * indepth,int gcable)1398 static MZ_INLINE Scheme_Object *inf_pop(Scheme_Object **instack, uintptr_t *indepth, int gcable) {
1399 Scheme_Object **stack = (Scheme_Object **) *instack;
1400 Scheme_Object *val;
1401 if (*indepth == IFS_SEGMENT_BOTTOM) {
1402 if (stack[IFS_CACHE_SLOT]) { /* already have cached segment, free it*/
1403 free_infinite_stack((Scheme_Object **) stack[IFS_CACHE_SLOT], -1, gcable);
1404 stack[IFS_CACHE_SLOT] = NULL;
1405 }
1406 if (stack[IFS_PREV_SEG_SLOT]) {
1407 stack = (Scheme_Object **) stack[IFS_PREV_SEG_SLOT];
1408 stack[IFS_CACHE_SLOT] = (Scheme_Object *)(*instack);
1409 *instack = (Scheme_Object*) stack;
1410 *indepth = IFS_CACHE_SLOT;
1411 }
1412 else {
1413 printf("pop beyond start of inf stack\n");
1414 abort();
1415 return NULL;
1416 }
1417 }
1418
1419 val = stack[--(*indepth)];
1420 /* printf("Pop %p %li %p\n", stack, *indepth, val); */
1421 stack[*indepth] = NULL;
1422 return val;
1423 }
1424
inf_set(Scheme_Object ** instack,int pos,Scheme_Object * item,uintptr_t * indepth)1425 static MZ_INLINE Scheme_Object *inf_set(Scheme_Object **instack, int pos, Scheme_Object *item, uintptr_t *indepth) {
1426 Scheme_Object **stack = (Scheme_Object **) *instack;
1427 Scheme_Object *old;
1428 int realpos;
1429 if (*indepth <= pos + 1) {
1430 if (stack[IFS_PREV_SEG_SLOT]) {
1431 stack = (Scheme_Object **) stack[IFS_PREV_SEG_SLOT];
1432 realpos = (IFS_CACHE_SLOT - (pos + 2)) + *indepth;
1433 }
1434 else {
1435 printf("set beyond start of inf stack\n");
1436 abort();
1437 return NULL;
1438 }
1439 }
1440 else { realpos = *indepth - 1 - pos; }
1441
1442 /* printf("Set %p %i %li %i %p\n", stack, pos, *indepth, realpos, item); */
1443 old = stack[realpos];
1444 stack[realpos] = item;
1445 return old;
1446 }
1447
inf_get(Scheme_Object ** instack,int pos,uintptr_t * indepth)1448 static MZ_INLINE Scheme_Object *inf_get(Scheme_Object **instack, int pos, uintptr_t *indepth) {
1449 Scheme_Object **stack = (Scheme_Object **) *instack;
1450 Scheme_Object *item;
1451 int realpos;
1452 if (*indepth <= pos + 1) {
1453 if (stack[IFS_PREV_SEG_SLOT]) {
1454 stack = (Scheme_Object **) stack[IFS_PREV_SEG_SLOT];
1455 realpos = (IFS_CACHE_SLOT - (pos + 2)) + *indepth;
1456 }
1457 else {
1458 printf("get beyond start of inf stack\n");
1459 abort();
1460 return NULL;
1461 }
1462 }
1463 else { realpos = *indepth - 1 - pos; }
1464
1465 item = stack[realpos];
1466
1467 /* printf("Get %p %i %li %i %p\n", stack, pos, *indepth, realpos, item); */
1468 return item;
1469 }
1470
1471 /* This code often executes with the master GC switched on, so it
1472 cannot use the usual stack overflow mechanism or raise exceptions
1473 in that case. Therefore, it must use its own stack implementation
1474 for recursion. */
places_deep_copy_worker(Scheme_Object * so,Scheme_Hash_Table ** ht,int mode,int gcable,int can_raise_exn,Scheme_Object ** master_chain,Scheme_Object ** invalid_object,char ** delayed_err,intptr_t * delayed_errno,intptr_t * delayed_errkind)1475 static Scheme_Object *places_deep_copy_worker(Scheme_Object *so, Scheme_Hash_Table **ht,
1476 int mode, int gcable, int can_raise_exn,
1477 Scheme_Object **master_chain,
1478 Scheme_Object **invalid_object,
1479 char **delayed_err, intptr_t *delayed_errno, intptr_t *delayed_errkind) {
1480 Scheme_Object *inf_stack = NULL;
1481 Scheme_Object *reg0 = NULL;
1482 uintptr_t inf_stack_depth = 0, inf_max_depth = 0;
1483
1484 Scheme_Object *fd_accumulators = NULL;
1485
1486 int set_mode = ((mode == mzPDC_COPY)
1487 || (mode == mzPDC_UNCOPY) || (mode == mzPDC_DIRECT_UNCOPY)
1488 || (mode == mzPDC_DESER));
1489
1490 /* lifted variables for xform*/
1491 Scheme_Object *pair;
1492 Scheme_Object *vec;
1493 Scheme_Object *nht;
1494 Scheme_Object *hti;
1495 Scheme_Object *htk;
1496 intptr_t i;
1497 intptr_t size;
1498 Scheme_Structure *st;
1499 Scheme_Serialized_Structure *sst;
1500 Scheme_Struct_Type *stype;
1501
1502 #define DEEP_DO_CDR 1
1503 #define DEEP_DO_FIN_PAIR 2
1504 #define DEEP_VEC1 3
1505 #define DEEP_ST1 4
1506 #define DEEP_ST2 5
1507 #define DEEP_SST1 6
1508 #define DEEP_SST2 7
1509 #define DEEP_HT1 8
1510 #define DEEP_HT2 9
1511 #define DEEP_RETURN 10
1512 #define DEEP_DONE 11
1513 #define RETURN do { goto DEEP_RETURN_L; } while(0);
1514 #define ABORT do { goto DEEP_DONE_L; } while(0);
1515 #define IFS_PUSH(x) inf_push(&inf_stack, x, &inf_stack_depth, &inf_max_depth, gcable)
1516 #define IFS_POP inf_pop(&inf_stack, &inf_stack_depth, gcable)
1517 #define IFS_POPN(n) do { int N = (n); while (N > 0) { IFS_POP; N--;} } while(0);
1518 #define IFS_GET(n) inf_get(&inf_stack, (n), &inf_stack_depth)
1519 #define IFS_SET(n, x) inf_set(&inf_stack, (n), x, &inf_stack_depth)
1520 #define GOTO_NEXT_CONT(dest, cont) do { IFS_PUSH(scheme_make_integer(cont)); goto DEEP_DO; } while(0);
1521 #define SET_R0(x) reg0 = x
1522 #define GET_R0() (reg0)
1523
1524 Scheme_Object *new_so = so;
1525 int ctr = 0;
1526
1527 /* First, check for simple values that don't need to be hashed: */
1528 new_so = shallow_types_copy(so, *ht, &fd_accumulators, delayed_err, delayed_errno, delayed_errkind,
1529 mode, can_raise_exn, master_chain, invalid_object);
1530 if (new_so) return new_so;
1531 if (delayed_err && *delayed_err) {
1532 bad_place_message2(so, fd_accumulators, can_raise_exn, *delayed_err, *delayed_errkind, *delayed_errno);
1533 return NULL;
1534 }
1535
1536 if (*ht) {
1537 Scheme_Object *r;
1538 if ((r = scheme_hash_get(*ht, so))) {
1539 return r;
1540 }
1541 }
1542
1543 if (!*ht) {
1544 Scheme_Hash_Table *_ht;
1545 _ht = scheme_make_hash_table(SCHEME_hash_ptr);
1546 *ht = _ht;
1547 }
1548
1549 inf_stack = create_infinite_stack(gcable);
1550 inf_stack_depth = 1;
1551 inf_max_depth = 1;
1552
1553 IFS_PUSH(scheme_make_integer(DEEP_DONE));
1554 SET_R0(so);
1555
1556 DEEP_DO:
1557 ctr++;
1558
1559 so = GET_R0();
1560 new_so = trivial_copy(so, master_chain);
1561 if (new_so) RETURN;
1562
1563 if (*ht) {
1564 if ((new_so = scheme_hash_get(*ht, so))) {
1565 SET_R0(new_so);
1566 RETURN;
1567 }
1568 }
1569
1570 new_so = shallow_types_copy(so, *ht, &fd_accumulators, delayed_err, delayed_errno, delayed_errkind,
1571 mode, can_raise_exn, master_chain, invalid_object);
1572 if (new_so) RETURN;
1573 if (delayed_err && *delayed_err) {
1574 bad_place_message2(so, fd_accumulators, can_raise_exn, *delayed_err, *delayed_errkind, *delayed_errno);
1575 ABORT;
1576 }
1577
1578 if (gcable && (mode == mzPDC_UNCOPY))
1579 SCHEME_USE_FUEL(1);
1580
1581 switch (SCHEME_TYPE(so)) {
1582 /* --------- pair ----------- */
1583 case scheme_pair_type:
1584 /* handle cycles: */
1585 if ((mode == mzPDC_COPY) || (mode == mzPDC_UNCOPY) || (mode == mzPDC_DIRECT_UNCOPY)) {
1586 pair = scheme_make_pair(scheme_false, scheme_false);
1587 SCHEME_PAIR_COPY_FLAGS(pair, so);
1588 } else
1589 pair = so;
1590 scheme_hash_set(*ht, so, pair);
1591
1592 IFS_PUSH(so);
1593 IFS_PUSH(pair);
1594 SET_R0(SCHEME_CAR(so));
1595 GOTO_NEXT_CONT(DEEP_DO, DEEP_DO_CDR);
1596
1597 DEEP_DO_CDR_L:
1598 pair = IFS_GET(0);
1599 so = IFS_GET(1);
1600 if (set_mode) {
1601 SCHEME_CAR(pair) = GET_R0();
1602 }
1603 SET_R0(SCHEME_CDR(so));
1604 GOTO_NEXT_CONT(DEEP_DO, DEEP_DO_FIN_PAIR);
1605
1606 DEEP_DO_FIN_PAIR_L:
1607 pair = IFS_POP;
1608 so = IFS_POP;
1609 if (set_mode) {
1610 SCHEME_CDR(pair) = GET_R0();
1611 new_so = pair;
1612 }
1613 RETURN;
1614 break;
1615
1616 /* --------- vector ----------- */
1617 case scheme_vector_type:
1618 size = SCHEME_VEC_SIZE(so);
1619
1620 if ((mode == mzPDC_COPY) || (mode == mzPDC_UNCOPY) || (mode == mzPDC_DIRECT_UNCOPY))
1621 vec = scheme_make_vector(size, 0);
1622 else
1623 vec = so;
1624
1625 /* handle cycles: */
1626 scheme_hash_set(*ht, so, vec);
1627 i = 0;
1628
1629 IFS_PUSH(vec);
1630 IFS_PUSH(so);
1631 IFS_PUSH(scheme_make_integer(size));
1632 IFS_PUSH(scheme_make_integer(i));
1633
1634 if (i < size) {
1635 SET_R0(SCHEME_VEC_ELS(so)[i]);
1636 GOTO_NEXT_CONT(DEEP_DO, DEEP_VEC1);
1637 }
1638 else {
1639 goto DEEP_VEC2;
1640 }
1641
1642 DEEP_VEC1_L:
1643 /* vector loop*/
1644 i = SCHEME_INT_VAL(IFS_GET(0));
1645 size = SCHEME_INT_VAL(IFS_GET(1));
1646 so = IFS_GET(2);
1647 vec = IFS_GET(3);
1648 if (set_mode) {
1649 SCHEME_VEC_ELS(vec)[i] = GET_R0();
1650 }
1651 i++;
1652 if (i < size) {
1653 IFS_SET(0, scheme_make_integer(i));
1654 SET_R0(SCHEME_VEC_ELS(so)[i]);
1655 GOTO_NEXT_CONT(DEEP_DO, DEEP_VEC1);
1656 }
1657 else {
1658 goto DEEP_VEC2;
1659 }
1660
1661 DEEP_VEC2:
1662 i = SCHEME_INT_VAL(IFS_POP);
1663 size = SCHEME_INT_VAL(IFS_POP);
1664 so = IFS_POP;
1665 vec = IFS_POP;
1666
1667 if (set_mode) {
1668 SCHEME_SET_IMMUTABLE(vec);
1669 new_so = vec;
1670 } else
1671 new_so = vec;
1672 RETURN;
1673 break;
1674
1675 /* --------- structure ----------- */
1676 case scheme_structure_type:
1677 st = (Scheme_Structure*)so;
1678 stype = st->stype;
1679 size = stype->num_slots;
1680
1681 if (!stype->prefab_key) {
1682 bad_place_message2(so, fd_accumulators, can_raise_exn, 0, 0, 0);
1683 if (invalid_object) *invalid_object = so;
1684 new_so = NULL;
1685 ABORT;
1686 }
1687 if (!(MZ_OPT_HASH_KEY(&stype->iso) & STRUCT_TYPE_ALL_IMMUTABLE)) {
1688 bad_place_message2(so, fd_accumulators, can_raise_exn, 0, 0, 0);
1689 if (invalid_object) *invalid_object = so;
1690 new_so = NULL;
1691 ABORT;
1692 }
1693
1694 IFS_PUSH((Scheme_Object *)st);
1695 SET_R0(SCHEME_CDR(stype->prefab_key));
1696 GOTO_NEXT_CONT(DEEP_DO, DEEP_ST1);
1697
1698 DEEP_ST1_L:
1699 st = (Scheme_Structure*) IFS_GET(0);
1700 so = (Scheme_Object *) st;
1701 size = st->stype->num_slots;
1702 if (mode == mzPDC_COPY) {
1703 new_so = scheme_make_serialized_struct_instance(GET_R0(), size);
1704 sst = (Scheme_Serialized_Structure*)new_so;
1705 } else if (mode == mzPDC_CHECK) {
1706 sst = NULL;
1707 } else {
1708 scheme_log_abort("encountered structure in bad mode");
1709 abort();
1710 }
1711
1712 /* handle cycles: */
1713 scheme_hash_set(*ht, so, new_so);
1714
1715 i = 0;
1716 if (i < size) {
1717 IFS_PUSH(scheme_make_integer(size));
1718 IFS_PUSH(scheme_make_integer(i));
1719 IFS_PUSH((Scheme_Object *)sst);
1720 SET_R0(st->slots[i]);
1721 GOTO_NEXT_CONT(DEEP_DO, DEEP_ST2);
1722 }
1723 else {
1724 IFS_POP;
1725 RETURN;
1726 }
1727
1728 DEEP_ST2_L:
1729 i = SCHEME_INT_VAL(IFS_GET(1));
1730 size = SCHEME_INT_VAL(IFS_GET(2));
1731 st = (Scheme_Structure*) IFS_GET(3);
1732 so = (Scheme_Object *) st;
1733 if (mode == mzPDC_COPY) {
1734 sst = (Scheme_Serialized_Structure *)IFS_GET(0);
1735 sst->slots[i] = GET_R0();
1736 }
1737 i++;
1738 if (i < size) {
1739 IFS_SET(1, scheme_make_integer(i));
1740 SET_R0(st->slots[i]);
1741 GOTO_NEXT_CONT(DEEP_DO, DEEP_ST2);
1742 }
1743 else {
1744 if (mode == mzPDC_COPY)
1745 new_so = (Scheme_Object *)sst;
1746 IFS_POPN(4);
1747 RETURN;
1748 }
1749 break;
1750
1751 /* --------- serialized structure ----------- */
1752 case scheme_serialized_structure_type:
1753 sst = (Scheme_Serialized_Structure*)so;
1754
1755 IFS_PUSH((Scheme_Object *)sst);
1756 SET_R0(sst->prefab_key);
1757 GOTO_NEXT_CONT(DEEP_DO, DEEP_SST1);
1758
1759 DEEP_SST1_L:
1760 sst = (Scheme_Serialized_Structure*) IFS_GET(0);
1761 so = (Scheme_Object *) sst;
1762 size = sst->num_slots;
1763 if ((mode == mzPDC_UNCOPY) || (mode == mzPDC_DESER)) {
1764 stype = scheme_lookup_prefab_type(GET_R0(), size);
1765 new_so = scheme_make_blank_prefab_struct_instance(stype);
1766
1767 st = (Scheme_Structure*)new_so;
1768 } else if (mode == mzPDC_CLEAN) {
1769 st = NULL;
1770 } else {
1771 scheme_log_abort("encountered serialized structure in bad mode");
1772 abort();
1773 }
1774
1775 /* handle cycles: */
1776 scheme_hash_set(*ht, so, new_so);
1777
1778 i = 0;
1779 if (i < size) {
1780 IFS_PUSH(scheme_make_integer(size));
1781 IFS_PUSH(scheme_make_integer(i));
1782 IFS_PUSH((Scheme_Object *)st);
1783 SET_R0(sst->slots[i]);
1784 GOTO_NEXT_CONT(DEEP_DO, DEEP_SST2);
1785 }
1786 else {
1787 IFS_POP;
1788 RETURN;
1789 }
1790
1791 DEEP_SST2_L:
1792 i = SCHEME_INT_VAL(IFS_GET(1));
1793 size = SCHEME_INT_VAL(IFS_GET(2));
1794 sst = (Scheme_Serialized_Structure*) IFS_GET(3);
1795 so = (Scheme_Object *) sst;
1796 if ((mode == mzPDC_UNCOPY) || (mode == mzPDC_DESER)) {
1797 st = (Scheme_Structure *) IFS_GET(0);
1798 st->slots[i] = GET_R0();
1799 }
1800 i++;
1801 if (i < size) {
1802 IFS_SET(1, scheme_make_integer(i));
1803 SET_R0(sst->slots[i]);
1804 GOTO_NEXT_CONT(DEEP_DO, DEEP_SST2);
1805 }
1806 else {
1807 new_so = (Scheme_Object *)st;
1808 IFS_POPN(4);
1809 RETURN;
1810 }
1811 break;
1812 case scheme_hash_table_type:
1813 case scheme_hash_tree_type:
1814 case scheme_eq_hash_tree_type:
1815 case scheme_eqv_hash_tree_type:
1816 if (set_mode) {
1817 if (scheme_true == scheme_hash_eq_p(1, &so)) {
1818 nht = scheme_make_immutable_hasheq(0, NULL);
1819 }
1820 else if ( scheme_true == scheme_hash_eqv_p(1, &so)) {
1821 nht = scheme_make_immutable_hasheqv(0, NULL);
1822 }
1823 else if ( scheme_true == scheme_hash_equal_p(1, &so)) {
1824 nht = scheme_make_immutable_hash(0, NULL);
1825 }
1826 }
1827 else
1828 nht = so;
1829
1830 /* handle cycles: */
1831 scheme_hash_set(*ht, so, nht);
1832 hti = scheme_hash_table_iterate_start(1,&so);
1833 i = 0;
1834
1835 IFS_PUSH(nht);
1836 IFS_PUSH(so);
1837 IFS_PUSH(hti);
1838
1839 if (SCHEME_INTP(hti)) {
1840 Scheme_Object *a[2];
1841 a[0] = so;
1842 a[1] = hti;
1843 SET_R0(scheme_hash_table_iterate_key(2, a));
1844 GOTO_NEXT_CONT(DEEP_DO, DEEP_HT1);
1845 }
1846 else {
1847 goto DEEP_HT3;
1848 }
1849
1850 DEEP_HT1_L:
1851 /* hash table loop*/
1852 hti = IFS_GET(0);
1853 so = IFS_GET(1);
1854 nht = IFS_GET(2);
1855 IFS_PUSH(GET_R0());
1856
1857 {
1858 Scheme_Object *a[2];
1859 a[0] = so;
1860 a[1] = hti;
1861 SET_R0(scheme_hash_table_iterate_value(2, a));
1862 GOTO_NEXT_CONT(DEEP_DO, DEEP_HT2);
1863 }
1864
1865 DEEP_HT2_L:
1866 htk = IFS_POP;
1867 hti = IFS_GET(0);
1868 so = IFS_GET(1);
1869 nht = IFS_GET(2);
1870
1871 if (set_mode) {
1872 Scheme_Object *a[3];
1873 a[0] = nht;
1874 a[1] = htk;
1875 a[2] = GET_R0();
1876 nht = scheme_hash_table_put(3, a);
1877 IFS_SET(2, nht);
1878 }
1879 {
1880 Scheme_Object *a[3];
1881 a[0] = so;
1882 a[1] = hti;
1883 hti = scheme_hash_table_iterate_next(2, a);
1884 }
1885
1886 if (SCHEME_INTP(hti)) {
1887 Scheme_Object *a[2];
1888 IFS_SET(0, hti);
1889 a[0] = so;
1890 a[1] = hti;
1891 SET_R0(scheme_hash_table_iterate_key(2, a));
1892 GOTO_NEXT_CONT(DEEP_DO, DEEP_HT1);
1893 }
1894 else {
1895 goto DEEP_HT3;
1896 }
1897
1898 DEEP_HT3:
1899 hti = IFS_POP;
1900 so = IFS_POP;
1901 nht = IFS_POP;
1902
1903 if (set_mode)
1904 new_so = nht;
1905 else
1906 new_so = so;
1907 RETURN;
1908 break;
1909
1910 default:
1911 bad_place_message2(so, fd_accumulators, can_raise_exn, 0, 0, 0);
1912 if (invalid_object) *invalid_object = so;
1913 new_so = NULL;
1914 ABORT;
1915 break;
1916 }
1917
1918 DEEP_RETURN_L:
1919 {
1920 ctr--;
1921 SET_R0(new_so);
1922 switch(SCHEME_INT_VAL(IFS_POP)) {
1923 case DEEP_DO_CDR: goto DEEP_DO_CDR_L;
1924 case DEEP_DO_FIN_PAIR: goto DEEP_DO_FIN_PAIR_L;
1925 case DEEP_VEC1: goto DEEP_VEC1_L;
1926 case DEEP_ST1: goto DEEP_ST1_L;
1927 case DEEP_ST2: goto DEEP_ST2_L;
1928 case DEEP_SST1: goto DEEP_SST1_L;
1929 case DEEP_SST2: goto DEEP_SST2_L;
1930 case DEEP_HT1: goto DEEP_HT1_L;
1931 case DEEP_HT2: goto DEEP_HT2_L;
1932 case DEEP_RETURN: goto DEEP_RETURN_L;
1933 case DEEP_DONE: goto DEEP_DONE_L;
1934 default:
1935 printf("Invalid places_deep_copy_worker state\n");
1936 abort();
1937 }
1938 }
1939
1940 DEEP_DONE_L:
1941 free_infinite_stack((Scheme_Object **) inf_stack, inf_max_depth, gcable);
1942 return new_so;
1943
1944 #undef DEEP_DO_CDR
1945 #undef DEEP_DO_FIN_PAIR
1946 #undef DEEP_VEC1
1947 #undef DEEP_ST1
1948 #undef DEEP_ST2
1949 #undef DEEP_SST1
1950 #undef DEEP_SST2
1951 #undef DEEP_HT1
1952 #undef DEEP_TT2
1953 #undef DEEP_RETURN
1954 #undef DEEP_DONE
1955 #undef RETURNS
1956 #undef IFS_PUSH
1957 #undef IFS_POP
1958 #undef IFS_POPN
1959 #undef IFS_GET
1960 #undef IFS_SET
1961 #undef GOTO_NEXT_CONT
1962 #undef GOTO_NEXT
1963
1964 }
1965
1966 static Scheme_Object *strip_chaperones_k(void);
1967
1968 /* Recognizes the same shapes as places_deep_copy_worker, but also
1969 allows chaperones and impersonators. The result is an
1970 impersonator-free copy of `so`. */
strip_chaperones(Scheme_Object * so)1971 static Scheme_Object *strip_chaperones(Scheme_Object *so)
1972 {
1973 Scheme_Object *o;
1974
1975 #ifdef DO_STACK_CHECK
1976 {
1977 # include "mzstkchk.h"
1978 {
1979 Scheme_Thread *p = scheme_current_thread;
1980 p->ku.k.p1 = (void *)so;
1981 return scheme_handle_stack_overflow(strip_chaperones_k);
1982 }
1983 }
1984 #endif
1985
1986 if (SCHEME_CHAPERONEP(so))
1987 o = SCHEME_CHAPERONE_VAL(so);
1988 else
1989 o = so;
1990
1991 if (SCHEME_PAIRP(o)) {
1992 Scheme_Object *a, *d;
1993 a = strip_chaperones(SCHEME_CAR(o));
1994 if (!a) return NULL;
1995 d = strip_chaperones(SCHEME_CDR(o));
1996 if (!d) return NULL;
1997 return scheme_make_pair(a, d);
1998 } else if (SCHEME_VECTORP(o)) {
1999 Scheme_Object *v, *e;
2000 intptr_t len = SCHEME_VEC_SIZE(o), i;
2001 v = scheme_make_vector(len, NULL);
2002 for (i = 0; i < len; i++) {
2003 if (SAME_OBJ(o, so))
2004 e = SCHEME_VEC_ELS(so)[i];
2005 else
2006 e = scheme_chaperone_vector_ref(so, i);
2007 e = strip_chaperones(e);
2008 if (!e) return NULL;
2009 SCHEME_VEC_ELS(v)[i] = e;
2010 }
2011 return v;
2012 } else if (SCHEME_HASHTP(o) || SCHEME_HASHTRP(o)) {
2013 return scheme_chaperone_hash_table_filtered_copy(so, strip_chaperones);
2014 } else if (SCHEME_STRUCTP(o)) {
2015 Scheme_Structure *s = (Scheme_Structure *)(o), *s2;
2016 Scheme_Object *e;
2017 intptr_t i, len = s->stype->num_slots;
2018 if (!s->stype->prefab_key)
2019 return NULL;
2020 s2 = (Scheme_Structure *)scheme_make_blank_prefab_struct_instance(s->stype);
2021 for (i = 0; i < len; i++) {
2022 if (SAME_OBJ(o, so))
2023 e = s->slots[i];
2024 else
2025 e = scheme_struct_ref(so, i);
2026 e = strip_chaperones(e);
2027 if (!e) return NULL;
2028 s2->slots[i] = e;
2029 }
2030 return (Scheme_Object *)s2;
2031 } else
2032 return so;
2033 }
2034
strip_chaperones_k(void)2035 static Scheme_Object *strip_chaperones_k(void)
2036 {
2037 Scheme_Thread *p = scheme_current_thread;
2038 Scheme_Object *so = (Scheme_Object *)p->ku.k.p1;
2039
2040 p->ku.k.p1 = NULL;
2041
2042 return strip_chaperones(so);
2043 }
2044
2045 #if 0
2046 /* unused code, may be useful when/if we revive shared symbol and prefab key tables */
2047 Scheme_Struct_Type *scheme_make_prefab_struct_type_in_master(Scheme_Object *base,
2048 Scheme_Object *parent,
2049 int num_fields,
2050 int num_uninit_fields,
2051 Scheme_Object *uninit_val,
2052 char *immutable_array)
2053 {
2054 # ifdef MZ_PRECISE_GC
2055 void *original_gc;
2056 # endif
2057 Scheme_Object *cname;
2058 Scheme_Object *cuninit_val;
2059 char *cimm_array = NULL;
2060 int local_slots = num_fields + num_uninit_fields;
2061 Scheme_Struct_Type *stype;
2062
2063 # ifdef MZ_PRECISE_GC
2064 original_gc = GC_switch_to_master_gc();
2065 scheme_start_atomic();
2066 # endif
2067
2068 cname = places_deep_uncopy(base);
2069 cuninit_val = places_deep_uncopy(uninit_val);
2070 if (local_slots) {
2071 cimm_array = (char *)scheme_malloc_atomic(local_slots);
2072 memcpy(cimm_array, immutable_array, local_slots);
2073 }
2074 stype = scheme_make_prefab_struct_type_raw(cname, parent, num_fields, num_uninit_fields, cuninit_val, cimm_array);
2075
2076 # ifdef MZ_PRECISE_GC
2077 scheme_end_atomic_no_swap();
2078 GC_switch_back_from_master(original_gc);
2079 # endif
2080
2081 return stype;
2082 }
2083 #endif
2084
log_place_event(const char * what,const char * tag,int has_amount,intptr_t amount)2085 static void log_place_event(const char *what, const char *tag, int has_amount, intptr_t amount)
2086 {
2087 int id;
2088 Scheme_Logger *pl;
2089 Scheme_Object *data, *tag_sym, *t;
2090
2091 pl = scheme_get_place_logger();
2092 if (!scheme_log_level_p(pl, SCHEME_LOG_DEBUG))
2093 return;
2094
2095 id = scheme_current_place_id;
2096 tag_sym = scheme_intern_symbol(tag);
2097
2098 data = scheme_make_blank_prefab_struct_instance(place_event_prefab);
2099 ((Scheme_Structure *)data)->slots[0] = scheme_make_integer(id);
2100 ((Scheme_Structure *)data)->slots[1] = tag_sym;
2101 ((Scheme_Structure *)data)->slots[2] = (has_amount
2102 ? scheme_make_integer(amount)
2103 : scheme_false);
2104 t = scheme_make_double(scheme_get_inexact_milliseconds());
2105 ((Scheme_Structure *)data)->slots[3] = t;
2106
2107 if (has_amount)
2108 scheme_log_w_data(pl, SCHEME_LOG_DEBUG, 0, data,
2109 what, id, amount);
2110 else
2111 scheme_log_w_data(pl, SCHEME_LOG_DEBUG, 0, data,
2112 what, id);
2113 }
2114
place_start_proc(void * data_arg)2115 static void *place_start_proc(void *data_arg) {
2116 void *stack_base;
2117 void *rc;
2118 stack_base = PROMPT_STACK(stack_base);
2119 rc = place_start_proc_after_stack(data_arg, stack_base);
2120 stack_base = NULL;
2121 return rc;
2122 }
2123
pause_one_place(Scheme_Place * p)2124 static void pause_one_place(Scheme_Place *p)
2125 {
2126 Scheme_Place_Object *place_obj = p->place_obj;
2127
2128 if (place_obj) {
2129 mzrt_mutex_lock(place_obj->lock);
2130 if (!place_obj->pause) {
2131 mzrt_sema *s;
2132 mzrt_sema_create(&s, 0);
2133 place_obj->pause = s;
2134 }
2135 mzrt_mutex_unlock(place_obj->lock);
2136 }
2137 }
2138
resume_one_place_with_lock(Scheme_Place_Object * place_obj)2139 static void resume_one_place_with_lock(Scheme_Place_Object *place_obj)
2140 {
2141 if (place_obj->pause) {
2142 mzrt_sema *s = place_obj->pause;
2143 place_obj->pause = NULL;
2144 if (!place_obj->pausing) {
2145 mzrt_sema_destroy(s);
2146 } else {
2147 mzrt_sema_post(s);
2148 }
2149 }
2150 }
2151
resume_one_place(Scheme_Place * p)2152 static void resume_one_place(Scheme_Place *p)
2153 {
2154 Scheme_Place_Object *place_obj = p->place_obj;
2155
2156 if (place_obj) {
2157 mzrt_mutex_lock(place_obj->lock);
2158 resume_one_place_with_lock(place_obj);
2159 mzrt_mutex_unlock(place_obj->lock);
2160 }
2161 }
2162
pause_all_child_places()2163 static void pause_all_child_places()
2164 {
2165 Scheme_Place *p = all_child_places;
2166 while (p) {
2167 pause_one_place(p);
2168 p = p->next;
2169 }
2170 }
2171
resume_all_child_places()2172 static void resume_all_child_places()
2173 {
2174 Scheme_Place *p = all_child_places;
2175 while (p) {
2176 resume_one_place(p);
2177 p = p->next;
2178 }
2179 }
2180
destroy_place_object_locks(Scheme_Place_Object * place_obj)2181 void destroy_place_object_locks(Scheme_Place_Object *place_obj) {
2182 mzrt_mutex_destroy(place_obj->lock);
2183 if (place_obj->pause)
2184 mzrt_sema_destroy(place_obj->pause);
2185 place_obj->lock = NULL;
2186 place_obj->pause = NULL;
2187 }
2188
scheme_place_check_for_interruption()2189 void scheme_place_check_for_interruption()
2190 {
2191 Scheme_Place_Object *place_obj;
2192 char local_die;
2193 char local_break;
2194 mzrt_sema *local_pause;
2195
2196 place_obj = place_object;
2197 if (!place_obj)
2198 return;
2199
2200 while (1) {
2201 mzrt_mutex_lock(place_obj->lock);
2202
2203 local_die = place_obj->die;
2204 local_break = place_obj->pbreak;
2205 local_pause = place_obj->pause;
2206 if (local_die)
2207 place_obj->die = -1;
2208 place_obj->pbreak = 0;
2209 if (local_pause)
2210 place_obj->pausing = 1;
2211
2212 mzrt_mutex_unlock(place_obj->lock);
2213
2214 if (local_pause) {
2215 pause_all_child_places();
2216 mzrt_sema_wait(local_pause);
2217 mzrt_sema_destroy(local_pause);
2218 local_pause = NULL;
2219 resume_all_child_places();
2220 } else
2221 break;
2222 }
2223
2224 if (local_die > 0)
2225 scheme_kill_thread(scheme_main_thread);
2226 if (local_break)
2227 scheme_break_kind_thread(NULL, local_break);
2228 }
2229
scheme_place_set_memory_use(intptr_t mem_use)2230 void scheme_place_set_memory_use(intptr_t mem_use)
2231 {
2232 Scheme_Place_Object *place_obj;
2233
2234 place_obj = place_object;
2235 if (!place_obj)
2236 return;
2237
2238 mzrt_mutex_lock(place_obj->lock);
2239 place_obj->memory_use = mem_use;
2240 mzrt_mutex_unlock(place_obj->lock);
2241
2242 if (place_obj->parent_signal_handle && place_obj->memory_limit) {
2243 if (mem_use > place_obj->memory_limit) {
2244 /* tell the parent place to force a GC, and therefore check
2245 custodian limits that will kill this place; pause this
2246 place and its children to give the original place time
2247 to kill this one */
2248 pause_all_child_places();
2249 mzrt_ensure_max_cas(place_obj->parent_need_gc, 1);
2250 scheme_signal_received_at(place_obj->parent_signal_handle);
2251 } else if (mem_use > (1 + place_obj->use_factor) * place_obj->prev_notify_memory_use) {
2252 /* make sure the parent notices that we're using more memory: */
2253 if (place_obj->parent_signal_handle)
2254 scheme_signal_received_at(place_obj->parent_signal_handle);
2255 place_obj->prev_notify_memory_use = mem_use;
2256 } else if (mem_use < place_obj->prev_notify_memory_use) {
2257 place_obj->prev_notify_memory_use = mem_use;
2258 }
2259 }
2260 }
2261
scheme_place_check_memory_use()2262 void scheme_place_check_memory_use()
2263 {
2264 intptr_t m;
2265
2266 m = GC_propagate_hierarchy_memory_use();
2267 scheme_place_set_memory_use(m);
2268
2269 if (force_gc_for_place_accounting) {
2270 force_gc_for_place_accounting = 0;
2271 scheme_collect_garbage();
2272 resume_all_child_places();
2273 }
2274 }
2275
place_set_result(struct Scheme_Place_Object * place_obj,Scheme_Object * result)2276 static void place_set_result(struct Scheme_Place_Object *place_obj, Scheme_Object *result)
2277 /* always called as a place terminates */
2278 {
2279 intptr_t status;
2280
2281 if (SCHEME_INTP(result)) {
2282 status = SCHEME_INT_VAL(result);
2283 if (status < 1 || status > 255)
2284 status = 0;
2285 } else
2286 status = 0;
2287
2288 mzrt_mutex_lock(place_obj->lock);
2289 place_obj->result = status;
2290 if (place_obj->parent_signal_handle) {
2291 scheme_signal_received_at(place_obj->parent_signal_handle);
2292 place_obj->parent_signal_handle = NULL;
2293 }
2294 place_obj->signal_handle = NULL;
2295 place_obj->dead = 1;
2296 mzrt_mutex_unlock(place_obj->lock);
2297 }
2298
terminate_current_place(Scheme_Object * result)2299 static void terminate_current_place(Scheme_Object *result)
2300 {
2301 intptr_t place_obj_die;
2302 intptr_t refcount;
2303 Scheme_Place_Object *place_obj;
2304
2305 place_obj = place_object;
2306
2307 mzrt_mutex_lock(place_obj->lock);
2308 place_obj_die = place_obj->die;
2309 mzrt_mutex_unlock(place_obj->lock);
2310
2311 if (!place_obj_die) {
2312 if (scheme_flush_managed(NULL, 1))
2313 result = scheme_make_integer(1);
2314 }
2315
2316 place_object = NULL;
2317
2318 /*printf("Leavin place: proc thread id%u\n", ptid);*/
2319
2320 /* Beware that the destroy operation might trigger a GC to cooperate
2321 with the master GC: */
2322 scheme_place_instance_destroy(place_obj_die);
2323
2324 place_set_result(place_obj, result);
2325
2326 mzrt_mutex_lock(place_obj->lock);
2327
2328 place_obj->refcount--;
2329 refcount = place_obj->refcount;
2330
2331 mzrt_mutex_unlock(place_obj->lock);
2332
2333 if (!refcount)
2334 destroy_place_object_locks(place_obj);
2335 }
2336
def_place_exit_handler_proc(int argc,Scheme_Object * argv[])2337 static Scheme_Object *def_place_exit_handler_proc(int argc, Scheme_Object *argv[])
2338 {
2339 log_place_event("id %d: exit (via `exit')", "exit", 0, 0);
2340
2341 terminate_current_place(argv[0]);
2342
2343 mz_proc_thread_exit(NULL);
2344
2345 return scheme_void; /* Never get here */
2346 }
2347
do_embedded_load()2348 static int do_embedded_load()
2349 {
2350 if (embedded_load) {
2351 Scheme_Thread * volatile p;
2352 mz_jmp_buf * volatile save, newbuf;
2353 volatile int rc;
2354
2355 p = scheme_get_current_thread();
2356 save = p->error_buf;
2357 p->error_buf = &newbuf;
2358
2359 if (!scheme_setjmp(newbuf)) {
2360 scheme_embedded_load(embedded_load_len, embedded_load, 1);
2361 rc = 1;
2362 } else {
2363 rc = 0;
2364 }
2365
2366 p->error_buf = save;
2367
2368 return rc;
2369 }
2370
2371 return 1;
2372 }
2373
place_start_proc_after_stack(void * data_arg,void * stack_base)2374 static void *place_start_proc_after_stack(void *data_arg, void *stack_base) {
2375 Place_Start_Data *place_data;
2376 Scheme_Place_Object *place_obj;
2377 Scheme_Object *place_main;
2378 Scheme_Object *a[2], *channel, *result;
2379 intptr_t mem_limit;
2380
2381 place_data = (Place_Start_Data *) data_arg;
2382 data_arg = NULL;
2383
2384 /* printf("Startin place: proc thread id%u\n", ptid); */
2385
2386 /* create pristine THREAD_LOCAL variables*/
2387 null_out_runtime_globals();
2388
2389 mzrt_mutex_lock(id_counter_mutex);
2390 scheme_current_place_id = ++id_counter;
2391 mzrt_mutex_unlock(id_counter_mutex);
2392
2393 mem_limit = SCHEME_INT_VAL(place_data->cust_limit);
2394
2395 /* scheme_make_thread behaves differently if the above global vars are not null */
2396 if (!scheme_place_instance_init(stack_base, place_data->parent_gc, mem_limit)) {
2397 /* setup failed (because we're out of some resource?); try to exit gracefully */
2398 place_data->place_obj = NULL; /* reports failure */
2399 mzrt_sema_post(place_data->ready);
2400 return NULL;
2401 }
2402
2403 a[0] = places_deep_direct_uncopy(place_data->current_library_collection_paths);
2404 scheme_current_library_collection_paths(1, a);
2405 a[0] = places_deep_direct_uncopy(place_data->current_library_collection_links);
2406 scheme_current_library_collection_links(1, a);
2407 a[0] = places_deep_direct_uncopy(place_data->compiled_roots);
2408 scheme_compiled_file_roots(1, a);
2409 a[0] = places_deep_direct_uncopy(place_data->current_directory);
2410 scheme_current_directory(1, a);
2411 scheme_seal_parameters();
2412
2413 a[0] = places_deep_direct_uncopy(place_data->module);
2414 a[1] = places_deep_direct_uncopy(place_data->function);
2415 a[1] = scheme_intern_exact_symbol(SCHEME_SYM_VAL(a[1]), SCHEME_SYM_LEN(a[1]));
2416 channel = places_deep_direct_uncopy(place_data->channel);
2417 place_obj = place_data->place_obj;
2418 REGISTER_SO(place_object);
2419 place_object = place_obj;
2420 place_obj->refcount++;
2421
2422 place_obj->id = scheme_current_place_id;
2423
2424 {
2425 void *signal_handle;
2426 signal_handle = scheme_get_signal_handle();
2427 place_obj->signal_handle = signal_handle;
2428 }
2429
2430 {
2431 Scheme_Object *tmp;
2432 if (place_data->in) {
2433 tmp = scheme_make_rktio_fd_input_port(place_data->in, scheme_intern_symbol("place-in"));
2434 if (scheme_orig_stdin_port) {
2435 scheme_close_input_port(scheme_orig_stdin_port);
2436 }
2437 scheme_orig_stdin_port = tmp;
2438 }
2439 if (place_data->out >= 0) {
2440 tmp = scheme_make_rktio_fd_output_port(place_data->out, scheme_intern_symbol("place-out"), 0);
2441 if (scheme_orig_stdout_port) {
2442 scheme_close_output_port(scheme_orig_stdout_port);
2443 }
2444 scheme_orig_stdout_port = tmp;
2445 }
2446 if (place_data->err >= 0) {
2447 tmp = scheme_make_rktio_fd_output_port(place_data->err, scheme_intern_symbol("place-err"), 0);
2448 if (scheme_orig_stderr_port) {
2449 scheme_close_output_port(scheme_orig_stderr_port);
2450 }
2451 scheme_orig_stderr_port = tmp;
2452 }
2453 scheme_init_port_config();
2454 }
2455
2456 mzrt_sema_post(place_data->ready);
2457 place_data = NULL;
2458 # ifdef MZ_PRECISE_GC
2459 /* this prevents a master collection attempt from deadlocking with the
2460 place_data->ready semaphore above */
2461 GC_allow_master_gc_check();
2462 # endif
2463
2464 /* at point, don't refer to place_data or its content
2465 anymore, because it's allocated in the other place */
2466
2467 scheme_set_root_param(MZCONFIG_EXIT_HANDLER, scheme_def_place_exit_proc);
2468
2469 log_place_event("id %d: enter", "enter", 0, 0);
2470
2471 if (do_embedded_load()) {
2472 Scheme_Thread * volatile p;
2473 mz_jmp_buf * volatile saved_error_buf;
2474 mz_jmp_buf new_error_buf;
2475 Scheme_Object * volatile rc = scheme_false;
2476
2477 p = scheme_get_current_thread();
2478 saved_error_buf = p->error_buf;
2479 p->error_buf = &new_error_buf;
2480 if (!scheme_setjmp(new_error_buf)) {
2481 if (!scheme_rktio)
2482 scheme_signal_error("place: I/O manager initialization failed");
2483
2484 place_main = scheme_dynamic_require(2, a);
2485 a[0] = channel;
2486 (void)scheme_apply(place_main, 1, a);
2487 rc = scheme_make_integer(0);
2488 } else {
2489 rc = scheme_make_integer(1);
2490 }
2491 p->error_buf = saved_error_buf;
2492
2493 result = rc;
2494 } else {
2495 result = scheme_make_integer(1);
2496 }
2497
2498 log_place_event("id %d: exit", "exit", 0, 0);
2499
2500 terminate_current_place(result);
2501
2502 return NULL;
2503 }
2504
places_serialize(Scheme_Object * so,void ** msg_memory,Scheme_Object ** master_chain,Scheme_Object ** invalid_object,char ** delayed_err,intptr_t * delayed_errno,intptr_t * delayed_errkind)2505 static Scheme_Object *places_serialize(Scheme_Object *so, void **msg_memory, Scheme_Object **master_chain,
2506 Scheme_Object **invalid_object,
2507 char **delayed_err, intptr_t *delayed_errno, intptr_t *delayed_errkind) {
2508 #if defined(MZ_USE_PLACES) && defined(MZ_PRECISE_GC)
2509 Scheme_Object *new_so;
2510 Scheme_Object *tmp;
2511
2512 new_so = trivial_copy(so, NULL);
2513 if (new_so) return new_so;
2514
2515 while (1) {
2516 GC_create_message_allocator();
2517 new_so = do_places_deep_copy(so, mzPDC_COPY, 0, master_chain, invalid_object,
2518 delayed_err, delayed_errno, delayed_errkind);
2519 tmp = GC_finish_message_allocator();
2520 if (!new_so)
2521 GC_destroy_orphan_msg_memory(tmp);
2522 else
2523 (*msg_memory) = tmp;
2524
2525 if (!new_so && !*delayed_err && SCHEME_CHAPERONEP(*invalid_object)) {
2526 /* try again after removing chaperones */
2527 so = strip_chaperones(so);
2528 if (!so)
2529 break;
2530 } else
2531 break;
2532 }
2533
2534 return new_so;
2535 #else
2536 return so;
2537 #endif
2538 }
2539
places_deserialize(Scheme_Object * so,void * msg_memory,Scheme_Thread * from_p)2540 static Scheme_Object *places_deserialize(Scheme_Object *so, void *msg_memory, Scheme_Thread *from_p)
2541 /* The caller must immediately drop any reference to `so' and
2542 `msg_memory' after this function returns; otherwise, since the
2543 `msg_memory' page may be deallocated, a GC could crash.
2544 Also, we have to clear out the in-flight references in `from_p`
2545 before the pages are discarded or adopted (where the latter
2546 can trigger a GC, which creates the main problem) */
2547 {
2548 #if defined(MZ_USE_PLACES) && defined(MZ_PRECISE_GC)
2549 Scheme_Object *new_so = so;
2550
2551 new_so = trivial_copy(so, NULL);
2552 if (new_so) return new_so;
2553
2554 /* small messages are deemed to be < 1k, this could be tuned in either direction */
2555 if (GC_message_small_objects_size(msg_memory, 1024)) {
2556 new_so = do_places_deep_copy(so, mzPDC_UNCOPY, 1, NULL, NULL, NULL, NULL, NULL);
2557 from_p->place_channel_msg_in_flight = NULL;
2558 from_p->place_channel_msg_chain_in_flight = NULL;
2559 GC_dispose_short_message_allocator(msg_memory);
2560 /* from this point, we must return immediately, so that any
2561 reference to `so' can be dropped before GC. */
2562 msg_memory = NULL;
2563 }
2564 else {
2565 from_p->place_channel_msg_in_flight = NULL;
2566 from_p->place_channel_msg_chain_in_flight = NULL;
2567 GC_adopt_message_allocator(msg_memory);
2568 msg_memory = NULL;
2569 #if !defined(SHARED_TABLES)
2570 new_so = do_places_deep_copy(so, mzPDC_DESER, 1, NULL, NULL, NULL, NULL, NULL);
2571 #endif
2572 }
2573 return new_so;
2574 #else
2575 return so;
2576 #endif
2577 }
2578
place_send(int argc,Scheme_Object * args[])2579 Scheme_Object *place_send(int argc, Scheme_Object *args[])
2580 {
2581 Scheme_Place_Bi_Channel *ch;
2582 if (SAME_TYPE(SCHEME_TYPE(args[0]), scheme_place_type)) {
2583 ch = (Scheme_Place_Bi_Channel *) ((Scheme_Place *) args[0])->channel;
2584 }
2585 else if (SAME_TYPE(SCHEME_TYPE(args[0]), scheme_place_bi_channel_type)) {
2586 ch = (Scheme_Place_Bi_Channel *) args[0];
2587 }
2588 else {
2589 ch = NULL;
2590 scheme_wrong_contract("place-channel-put", "place-channel?", 0, argc, args);
2591 }
2592 place_async_send((Scheme_Place_Async_Channel *) ch->link->sendch, args[1]);
2593 return scheme_void;
2594 }
2595
place_receive(int argc,Scheme_Object * args[])2596 Scheme_Object *place_receive(int argc, Scheme_Object *args[]) {
2597 Scheme_Place_Bi_Channel *ch;
2598 if (SAME_TYPE(SCHEME_TYPE(args[0]), scheme_place_type)) {
2599 ch = (Scheme_Place_Bi_Channel *) ((Scheme_Place *) args[0])->channel;
2600 }
2601 else if (SAME_TYPE(SCHEME_TYPE(args[0]), scheme_place_bi_channel_type)) {
2602 ch = (Scheme_Place_Bi_Channel *) args[0];
2603 }
2604 else {
2605 ch = NULL;
2606 scheme_wrong_contract("place-channel-get", "place-channel?", 0, argc, args);
2607 }
2608 return place_async_receive((Scheme_Place_Async_Channel *)ch->link->recvch);
2609 }
2610
place_allowed_p(int argc,Scheme_Object * args[])2611 static Scheme_Object* place_allowed_p(int argc, Scheme_Object *args[])
2612 {
2613 Scheme_Hash_Table *ht = NULL;
2614 Scheme_Object *v, *invalid_object = NULL;
2615
2616 v = args[0];
2617
2618 if (places_deep_copy_worker(v, &ht, mzPDC_CHECK, 1, 0, NULL, &invalid_object, NULL, NULL, NULL))
2619 return scheme_true;
2620 else {
2621 if (invalid_object && SCHEME_CHAPERONEP(invalid_object)) {
2622 v = strip_chaperones(v);
2623 if (v && places_deep_copy_worker(v, &ht, mzPDC_CHECK, 1, 0, NULL, NULL, NULL, NULL, NULL))
2624 return scheme_true;
2625 }
2626 return scheme_false;
2627 }
2628 }
2629
2630 # ifdef MZ_PRECISE_GC
scheme_spawn_master_place()2631 void scheme_spawn_master_place() {
2632 mzrt_proc_first_thread_init();
2633
2634
2635 /* scheme_master_proc_thread = mz_proc_thread_create(master_scheme_place, NULL); */
2636 scheme_master_proc_thread = (void*) ~0;
2637
2638 }
2639 # endif
2640
2641 /*========================================================================*/
2642 /* places async channels */
2643 /*========================================================================*/
2644
GC_master_malloc(size_t size)2645 static void* GC_master_malloc(size_t size) {
2646 void *ptr;
2647 #ifdef MZ_PRECISE_GC
2648 void *original_gc;
2649 original_gc = GC_switch_to_master_gc();
2650 #endif
2651 ptr = GC_malloc(size);
2652 #ifdef MZ_PRECISE_GC
2653 GC_switch_back_from_master(original_gc);
2654 #endif
2655 return ptr;
2656 }
2657
GC_master_malloc_tagged(size_t size)2658 static void* GC_master_malloc_tagged(size_t size) {
2659 void *ptr;
2660 #ifdef MZ_PRECISE_GC
2661 void *original_gc;
2662 original_gc = GC_switch_to_master_gc();
2663 #endif
2664 ptr = scheme_malloc_small_tagged(size);
2665 #ifdef MZ_PRECISE_GC
2666 GC_switch_back_from_master(original_gc);
2667 #endif
2668 return ptr;
2669 }
2670
maybe_report_message_size(Scheme_Place_Async_Channel * ch)2671 static void maybe_report_message_size(Scheme_Place_Async_Channel *ch)
2672 {
2673 #ifdef MZ_PRECISE_GC
2674 if ((ch->reported_size > (2 * ch->mem_size))
2675 || (((ch->reported_size * 2) < ch->mem_size)
2676 && ((ch->mem_size - ch->reported_size) > (1 << (LOG_APAGE_SIZE + 1))))) {
2677 intptr_t delta = ch->mem_size - ch->reported_size;
2678 ch->reported_size = ch->mem_size;
2679 GC_report_unsent_message_delta(delta);
2680 }
2681 #endif
2682 }
2683
async_channel_finalize(void * p,void * data)2684 static void async_channel_finalize(void *p, void* data) {
2685 Scheme_Place_Async_Channel *ch;
2686 int i;
2687 Scheme_Hash_Table *ht = NULL;
2688 ch = (Scheme_Place_Async_Channel*)p;
2689
2690 ch->mem_size = 0;
2691 maybe_report_message_size(ch);
2692
2693 mzrt_mutex_destroy(ch->lock);
2694 ch->lock = NULL;
2695 for (i = 0; i < ch->size ; i++) {
2696 ht = NULL;
2697 if (ch->msgs[i]) {
2698 (void)places_deep_copy_worker(ch->msgs[i], &ht, mzPDC_CLEAN, 0, 0, NULL, NULL, NULL, NULL, NULL);
2699 ch->msgs[i] = NULL;
2700 }
2701 #ifdef MZ_PRECISE_GC
2702 if (ch->msg_memory[i]) {
2703 GC_destroy_orphan_msg_memory(ch->msg_memory[i]);
2704 }
2705 #endif
2706 ch->msg_memory[i] = NULL;
2707 ch->msg_chains[i] = NULL;
2708 }
2709 ch->in = 0;
2710 ch->out = 0;
2711 ch->count = 0;
2712
2713 if (ch->wakeup_signal) {
2714 /*release single receiver */
2715 if (SCHEME_PLACE_OBJECTP(ch->wakeup_signal)) {
2716 int refcount = 0;
2717 Scheme_Place_Object *place_obj;
2718 place_obj = ((Scheme_Place_Object *) ch->wakeup_signal);
2719
2720 mzrt_mutex_lock(place_obj->lock);
2721 place_obj->refcount--;
2722 refcount = place_obj->refcount;
2723 mzrt_mutex_unlock(place_obj->lock);
2724 if (!refcount) {
2725 destroy_place_object_locks(place_obj);
2726 }
2727 }
2728 /*release multiple receiver */
2729 else if (SCHEME_VECTORP(ch->wakeup_signal)) {
2730 Scheme_Object *v = ch->wakeup_signal;
2731 int i;
2732 int size = SCHEME_VEC_SIZE(v);
2733 for (i = 0; i < size; i++) {
2734 Scheme_Place_Object *o3;
2735 o3 = (Scheme_Place_Object *)SCHEME_VEC_ELS(v)[i];
2736 if (o3) {
2737 int refcount = 0;
2738 mzrt_mutex_lock(o3->lock);
2739 SCHEME_VEC_ELS(v)[i] = NULL;
2740 o3->refcount--;
2741 refcount = o3->refcount;
2742 mzrt_mutex_unlock(o3->lock);
2743
2744 if (!refcount) {
2745 destroy_place_object_locks(o3);
2746 }
2747 }
2748 }
2749 }
2750 }
2751 }
2752
place_async_channel_create()2753 Scheme_Place_Async_Channel *place_async_channel_create() {
2754 Scheme_Object **msgs, **msg_chains;
2755 Scheme_Place_Async_Channel *ch;
2756 void **msg_memory;
2757 #ifdef MZ_PRECISE_GC
2758 void *original_gc;
2759 #endif
2760
2761 ch = GC_master_malloc_tagged(sizeof(Scheme_Place_Async_Channel));
2762 ch->so.type = scheme_place_async_channel_type;
2763
2764 msgs = GC_master_malloc(sizeof(Scheme_Object*) * 8);
2765 msg_memory = GC_master_malloc(sizeof(void*) * 8);
2766 msg_chains = GC_master_malloc(sizeof(Scheme_Object*) * 8);
2767
2768 ch->in = 0;
2769 ch->out = 0;
2770 ch->count = 0;
2771 ch->size = 8;
2772 mzrt_mutex_create(&ch->lock);
2773 ch->msgs = msgs;
2774 ch->msg_memory = msg_memory;
2775 ch->msg_chains = msg_chains;
2776 ch->wakeup_signal = NULL;
2777
2778 #ifdef MZ_PRECISE_GC
2779 original_gc = GC_switch_to_master_gc();
2780 GC_set_finalizer(ch, 1, 1, async_channel_finalize, NULL, NULL, NULL);
2781 GC_switch_back_from_master(original_gc);
2782 #endif
2783 /* FIXME? Need finalizer for non-precise GC if places become supported
2784 in that mode. */
2785
2786 return ch;
2787 }
2788
async_channel_refcount(Scheme_Place_Async_Channel * ch,int for_send,int delta)2789 static void async_channel_refcount(Scheme_Place_Async_Channel *ch, int for_send, int delta)
2790 {
2791 if (!ch->lock) {
2792 /* can happen via finalization, where the channel is already finalized
2793 m(due to the lack of ordering on finalization) */
2794 return;
2795 }
2796 mzrt_mutex_lock(ch->lock);
2797 if (for_send)
2798 ch->wr_ref += delta;
2799 else
2800 ch->rd_ref += delta;
2801 if ((ch->wr_ref < 0) || (ch->rd_ref < 0)) {
2802 scheme_log_abort("internal error: bad reference count on async channel");
2803 abort();
2804 }
2805 mzrt_mutex_unlock(ch->lock);
2806 }
2807
scheme_place_make_async_channel()2808 Scheme_Object *scheme_place_make_async_channel()
2809 {
2810 Scheme_Place_Async_Channel *ch;
2811 ch = place_async_channel_create();
2812
2813 /* we don't allocate a bi channel, so claim an implicit sender and receiver: */
2814 async_channel_refcount(ch, 0, 1);
2815 async_channel_refcount(ch, 1, 1);
2816
2817 return (Scheme_Object *)ch;
2818 }
2819
bi_channel_refcount(Scheme_Place_Bi_Channel * ch,int delta)2820 static void bi_channel_refcount(Scheme_Place_Bi_Channel *ch, int delta)
2821 {
2822 async_channel_refcount(ch->link->sendch, 1, delta);
2823 async_channel_refcount(ch->link->recvch, 0, delta);
2824 }
2825
bi_channel_refcount_down(void * _ch,void * data)2826 static void bi_channel_refcount_down(void *_ch, void *data)
2827 {
2828 Scheme_Place_Bi_Channel *ch = (Scheme_Place_Bi_Channel *)_ch;
2829
2830 if (!ch->link->sendch) {
2831 /* released by scheme_free_place_bi_channels() already */
2832 return;
2833 }
2834
2835 if (ch->link->prev)
2836 ch->link->prev->next = ch->link->next;
2837 else
2838 place_channel_links = ch->link->next;
2839 if (ch->link->next)
2840 ch->link->next->prev = ch->link->prev;
2841
2842 bi_channel_refcount(ch, -1);
2843 }
2844
scheme_free_place_bi_channels()2845 void scheme_free_place_bi_channels()
2846 {
2847 Scheme_Place_Bi_Channel_Link *link;
2848
2849 for (link = place_channel_links; link; link = link->next) {
2850 async_channel_refcount(link->sendch, 1, -1);
2851 async_channel_refcount(link->recvch, 0, -1);
2852 /* It's possible that a GC will run after this: */
2853 link->sendch = NULL;
2854 link->recvch = NULL;
2855 }
2856 place_channel_links = NULL;
2857 }
2858
bi_channel_set_finalizer(Scheme_Place_Bi_Channel * ch)2859 static void bi_channel_set_finalizer(Scheme_Place_Bi_Channel *ch)
2860 {
2861 ch->link->next = place_channel_links;
2862 if (place_channel_links)
2863 place_channel_links->prev = ch->link;
2864 place_channel_links = ch->link;
2865
2866 scheme_add_finalizer(ch, bi_channel_refcount_down, NULL);
2867 }
2868
2869
place_bi_channel_malloc()2870 Scheme_Place_Bi_Channel *place_bi_channel_malloc() {
2871 Scheme_Place_Bi_Channel *ch;
2872 Scheme_Place_Bi_Channel_Link *link;
2873
2874 ch = MALLOC_ONE_TAGGED(Scheme_Place_Bi_Channel);
2875 ch->so.type = scheme_place_bi_channel_type;
2876
2877 link = (Scheme_Place_Bi_Channel_Link*)scheme_malloc(sizeof(Scheme_Place_Bi_Channel_Link));
2878 ch->link = link;
2879
2880 return ch;
2881 }
2882
place_bi_channel_create()2883 Scheme_Place_Bi_Channel *place_bi_channel_create() {
2884 Scheme_Place_Async_Channel *tmp;
2885 Scheme_Place_Bi_Channel *ch;
2886
2887 ch = place_bi_channel_malloc();
2888
2889 tmp = place_async_channel_create();
2890 ch->link->sendch = tmp;
2891 tmp = place_async_channel_create();
2892 ch->link->recvch = tmp;
2893
2894 bi_channel_refcount(ch, 1);
2895 bi_channel_set_finalizer(ch);
2896
2897 return ch;
2898 }
2899
place_bi_peer_channel_create(Scheme_Place_Bi_Channel * orig)2900 Scheme_Place_Bi_Channel *place_bi_peer_channel_create(Scheme_Place_Bi_Channel *orig) {
2901 Scheme_Place_Bi_Channel *ch;
2902
2903 ch = place_bi_channel_malloc();
2904
2905 ch->link->sendch = orig->link->recvch;
2906 ch->link->recvch = orig->link->sendch;
2907
2908 bi_channel_refcount(ch, 1);
2909 bi_channel_set_finalizer(ch);
2910
2911 return ch;
2912 }
2913
place_channel(int argc,Scheme_Object * args[])2914 static Scheme_Object *place_channel(int argc, Scheme_Object *args[]) {
2915 Scheme_Place_Bi_Channel *ch;
2916 Scheme_Object *a[2];
2917 ch = place_bi_channel_create();
2918 a[0] = (Scheme_Object *) ch;
2919 a[1] = (Scheme_Object *) place_bi_peer_channel_create(ch);
2920 return scheme_values(2, a);
2921 }
2922
place_channel_p(int argc,Scheme_Object * args[])2923 static Scheme_Object *place_channel_p(int argc, Scheme_Object *args[])
2924 {
2925 return (SAME_TYPE(SCHEME_TYPE(args[0]), scheme_place_bi_channel_type) ||
2926 SAME_TYPE(SCHEME_TYPE(args[0]), scheme_place_type)) ? scheme_true : scheme_false;
2927 }
2928
GC_master_make_vector(int size)2929 static Scheme_Object *GC_master_make_vector(int size) {
2930 Scheme_Object *v;
2931 #ifdef MZ_PRECISE_GC
2932 void *original_gc;
2933 original_gc = GC_switch_to_master_gc();
2934 #endif
2935 v = scheme_make_vector(size, NULL);
2936 #ifdef MZ_PRECISE_GC
2937 GC_switch_back_from_master(original_gc);
2938 #endif
2939 return v;
2940 }
2941
place_async_send(Scheme_Place_Async_Channel * ch,Scheme_Object * uo)2942 static void place_async_send(Scheme_Place_Async_Channel *ch, Scheme_Object *uo) {
2943 void *msg_memory = NULL;
2944 Scheme_Object *o, *master_chain = NULL, *invalid_object = NULL;
2945 intptr_t sz, cnt;
2946 char *delayed_err = NULL;
2947 intptr_t delayed_errno = 0;
2948 intptr_t delayed_errkind = 0;
2949
2950
2951 o = places_serialize(uo, &msg_memory, &master_chain, &invalid_object,
2952 &delayed_err, &delayed_errno, &delayed_errkind);
2953 /* uo needs to stay live until `master_chain` is registered in `ch` */
2954
2955 if (!o) {
2956 if (invalid_object && !delayed_err) {
2957 scheme_contract_error("place-channel-put",
2958 "value not allowed in a message",
2959 "value", 1, invalid_object,
2960 "message", 1, uo,
2961 NULL);
2962 } else
2963 bad_place_message(uo, delayed_err, delayed_errno, delayed_errkind);
2964 }
2965
2966 {
2967 intptr_t msg_size;
2968 msg_size = GC_message_allocator_size(msg_memory);
2969 log_place_event("id %d: put message of %" PRIdPTR " bytes", "put", 1, msg_size);
2970 }
2971
2972 mzrt_mutex_lock(ch->lock);
2973 {
2974 cnt = ch->count;
2975 if (ch->count == ch->size) { /* GROW QUEUE */
2976 Scheme_Object **new_msgs = NULL, **new_chains = NULL;
2977 void **new_msg_memory = NULL;
2978 intptr_t sz = 0;
2979
2980 /* Can't allocate while holding the lock, so release lock and loop: */
2981 while (ch->count == ch->size) {
2982 if ((sz == ch->size) && new_msgs) {
2983 if (ch->out < ch->in) {
2984 memcpy(new_msgs, ch->msgs + ch->out, sizeof(Scheme_Object *) * (ch->in - ch->out));
2985 memcpy(new_msg_memory, ch->msg_memory + ch->out, sizeof(void*) * (ch->in - ch->out));
2986 memcpy(new_chains, ch->msg_chains + ch->out, sizeof(void*) * (ch->in - ch->out));
2987 }
2988 else {
2989 int s1 = (ch->size - ch->out);
2990 memcpy(new_msgs, ch->msgs + ch->out, sizeof(Scheme_Object *) * s1);
2991 memcpy(new_msgs + s1, ch->msgs, sizeof(Scheme_Object *) * ch->in);
2992
2993 memcpy(new_msg_memory, ch->msg_memory + ch->out, sizeof(void*) * s1);
2994 memcpy(new_msg_memory + s1, ch->msg_memory, sizeof(void*) * ch->in);
2995
2996 memcpy(new_chains, ch->msg_chains + ch->out, sizeof(Scheme_Object *) * s1);
2997 memcpy(new_chains + s1, ch->msg_chains, sizeof(Scheme_Object *) * ch->in);
2998 }
2999
3000 ch->msgs = new_msgs;
3001 ch->msg_memory = new_msg_memory;
3002 ch->msg_chains = new_chains;
3003 ch->in = ch->size;
3004 ch->out = 0;
3005 ch->size *= 2;
3006
3007 break;
3008 } else {
3009 sz = ch->size;
3010 mzrt_mutex_unlock(ch->lock);
3011
3012 new_msgs = GC_master_malloc(sizeof(Scheme_Object*) * sz * 2);
3013 new_msg_memory = GC_master_malloc(sizeof(void*) * sz * 2);
3014 new_chains = GC_master_malloc(sizeof(Scheme_Object*) * sz * 2);
3015
3016 mzrt_mutex_lock(ch->lock);
3017 }
3018 }
3019 }
3020
3021 ch->msgs[ch->in] = o;
3022 ch->msg_memory[ch->in] = msg_memory;
3023 ch->msg_chains[ch->in] = master_chain;
3024 ++ch->count;
3025 ch->in = ((ch->in + 1) % ch->size);
3026
3027 sz = GC_message_allocator_size(msg_memory);
3028 ch->mem_size += sz;
3029
3030 maybe_report_message_size(ch);
3031 }
3032
3033 /* make sure `uo` is treated as live until here: */
3034 if (!uo) scheme_signal_error("?");
3035
3036 if (!cnt && ch->wakeup_signal) {
3037 /*wake up possibly sleeping single receiver */
3038 if (SCHEME_PLACE_OBJECTP(ch->wakeup_signal)) {
3039 Scheme_Place_Object *place_obj;
3040 place_obj = ((Scheme_Place_Object *) ch->wakeup_signal);
3041
3042 mzrt_mutex_lock(place_obj->lock);
3043 if (place_obj->signal_handle) {
3044 scheme_signal_received_at(place_obj->signal_handle);
3045 }
3046 mzrt_mutex_unlock(place_obj->lock);
3047 }
3048 /*wake up possibly sleeping multiple receiver */
3049 else if (SCHEME_VECTORP(ch->wakeup_signal)) {
3050 Scheme_Object *v = ch->wakeup_signal;
3051 int i, j, delta;
3052 int size = SCHEME_VEC_SIZE(v);
3053 int alive = 0;
3054 /* Try to be fair by cycling through the available places
3055 starting at `delta'. */
3056 delta = ch->delta++;
3057 if (delta < 0) delta = -delta;
3058 for (j = 0; j < size; j++) {
3059 Scheme_Place_Object *o3;
3060 i = (j + delta) % size;
3061 o3 = (Scheme_Place_Object *)SCHEME_VEC_ELS(v)[i];
3062 if (o3) {
3063 int refcount = 0;
3064 mzrt_mutex_lock(o3->lock);
3065 if (o3->signal_handle != NULL) {
3066 scheme_signal_received_at(o3->signal_handle);
3067 alive++;
3068 }
3069 else {
3070 SCHEME_VEC_ELS(v)[i] = NULL;
3071 o3->refcount--;
3072 }
3073 refcount = o3->refcount;
3074 mzrt_mutex_unlock(o3->lock);
3075
3076 if (!refcount) {
3077 destroy_place_object_locks(o3);
3078 }
3079 }
3080 }
3081 /* shrink if more than half are unused */
3082 if (alive < (size / 2)) {
3083 if (alive == 1) {
3084 ch->wakeup_signal = NULL;
3085 for (i = 0; i < size; i++) {
3086 Scheme_Place_Object *o2 = (Scheme_Place_Object *)SCHEME_VEC_ELS(v)[i];
3087 if (o2) {
3088 ch->wakeup_signal = (Scheme_Object *)o2;
3089 break;
3090 }
3091 }
3092 }
3093 else {
3094 Scheme_Object *nv;
3095 int ncnt = 0;
3096 nv = GC_master_make_vector(size/2);
3097 for (i = 0; i < size; i++) {
3098 Scheme_Place_Object *o2 = (Scheme_Place_Object *)SCHEME_VEC_ELS(v)[i];
3099 if (o2) {
3100 SCHEME_VEC_ELS(nv)[ncnt] = (Scheme_Object *)o2;
3101 ncnt++;
3102 }
3103 }
3104 ch->wakeup_signal = nv;
3105 }
3106 }
3107 }
3108 else {
3109 printf("Oops not a valid ch->wakeup_signal\n");
3110 exit(1);
3111 }
3112 }
3113 mzrt_mutex_unlock(ch->lock);
3114 }
3115
scheme_place_async_channel_send(Scheme_Object * ch,Scheme_Object * uo)3116 void scheme_place_async_channel_send(Scheme_Object *ch, Scheme_Object *uo) {
3117 place_async_send((Scheme_Place_Async_Channel *)ch, uo);
3118 }
3119
place_object_inc_refcount(Scheme_Object * o)3120 static void place_object_inc_refcount(Scheme_Object *o) {
3121 Scheme_Place_Object *place_obj;
3122 place_obj = (Scheme_Place_Object *) o;
3123
3124 mzrt_mutex_lock(place_obj->lock);
3125 place_obj->refcount++;
3126 mzrt_mutex_unlock(place_obj->lock);
3127 }
3128
place_object_dec_refcount(Scheme_Object * o)3129 static void place_object_dec_refcount(Scheme_Object *o) {
3130 int refcount;
3131 Scheme_Place_Object *place_obj;
3132 place_obj = (Scheme_Place_Object *) o;
3133
3134 mzrt_mutex_lock(place_obj->lock);
3135 place_obj->refcount--;
3136 refcount = place_obj->refcount;
3137 mzrt_mutex_unlock(place_obj->lock);
3138
3139 if (!refcount) {
3140 destroy_place_object_locks(place_obj);
3141 }
3142 }
3143
lock_and_register_place_object_with_channel(Scheme_Place_Async_Channel * ch,Scheme_Object * o)3144 static void lock_and_register_place_object_with_channel(Scheme_Place_Async_Channel *ch, Scheme_Object *o)
3145 {
3146 Scheme_Object *avail_vector;
3147
3148 mzrt_mutex_lock(ch->lock);
3149
3150 if (ch->count)
3151 return; /* no need for a wakeup signal, since data is available */
3152
3153 /* loop in case we need to release the lock temporarily to allocate: */
3154 while (1) {
3155 if (ch->wakeup_signal == o) {
3156 return;
3157 }
3158 else if (!ch->wakeup_signal) {
3159 place_object_inc_refcount(o);
3160 ch->wakeup_signal = o;
3161 return;
3162 }
3163 else if (SCHEME_PLACE_OBJECTP(ch->wakeup_signal)
3164 && ( (Scheme_Place_Object *) ch->wakeup_signal)->signal_handle == NULL) {
3165 place_object_dec_refcount(ch->wakeup_signal);
3166 place_object_inc_refcount(o);
3167 ch->wakeup_signal = o;
3168 return;
3169 }
3170 else if (SCHEME_VECTORP(ch->wakeup_signal)) {
3171 int i = 0;
3172 Scheme_Object *v = ch->wakeup_signal;
3173 int size = SCHEME_VEC_SIZE(v);
3174 /* already registered? */
3175 for (i = 0; i < size; i++) {
3176 Scheme_Object *vo = SCHEME_VEC_ELS(v)[i];
3177 if (vo == o)
3178 return;
3179 }
3180 /* look for unused slot in wakeup vector */
3181 for (i = 0; i < size; i++) {
3182 Scheme_Object *vo = SCHEME_VEC_ELS(v)[i];
3183 if (!vo) {
3184 place_object_inc_refcount(o);
3185 SCHEME_VEC_ELS(v)[i] = o;
3186 return;
3187 }
3188 else if (SCHEME_PLACE_OBJECTP(vo) &&
3189 ((Scheme_Place_Object *)vo)->signal_handle == NULL) {
3190 place_object_dec_refcount(vo);
3191 place_object_inc_refcount(o);
3192 SCHEME_VEC_ELS(v)[i] = o;
3193 return;
3194 }
3195 }
3196 /* fall through to here, need to grow wakeup vector;
3197 must do so without the lock */
3198 {
3199 if (avail_vector && (SCHEME_VEC_SIZE(avail_vector) == size*2)) {
3200 Scheme_Object *nv;
3201 nv = avail_vector;
3202 for (i = 0; i < size; i++) {
3203 SCHEME_VEC_ELS(nv)[i] = SCHEME_VEC_ELS(v)[i];
3204 }
3205 place_object_inc_refcount(o);
3206 SCHEME_VEC_ELS(nv)[size+1] = o;
3207 ch->wakeup_signal = nv;
3208 } else {
3209 mzrt_mutex_unlock(ch->lock);
3210 avail_vector = GC_master_make_vector(size*2);
3211 mzrt_mutex_lock(ch->lock);
3212 }
3213 }
3214 }
3215 /* grow from single wakeup to multiple wakeups */
3216 else if (SCHEME_PLACE_OBJECTP(ch->wakeup_signal)) {
3217 if (avail_vector && (SCHEME_VEC_SIZE(avail_vector) == 2)) {
3218 Scheme_Object *v;
3219 v = avail_vector;
3220 SCHEME_VEC_ELS(v)[0] = ch->wakeup_signal;
3221 place_object_inc_refcount(o);
3222 SCHEME_VEC_ELS(v)[1] = o;
3223 ch->wakeup_signal = v;
3224 } else {
3225 mzrt_mutex_unlock(ch->lock);
3226 avail_vector = GC_master_make_vector(2);
3227 mzrt_mutex_lock(ch->lock);
3228 }
3229 }
3230 else {
3231 printf("Oops not a valid ch->wakeup_signal\n");
3232 exit(1);
3233 }
3234 }
3235 }
3236
place_async_try_receive_raw(Scheme_Place_Async_Channel * ch,void ** msg_memory_ptr,void ** msg_chain_ptr,int * _no_writers)3237 static Scheme_Object *place_async_try_receive_raw(Scheme_Place_Async_Channel *ch,
3238 void **msg_memory_ptr,
3239 void **msg_chain_ptr,
3240 int *_no_writers)
3241 /* The result must not be retained past extraction from `*msg_memory_ptr'! */
3242 {
3243 Scheme_Object *msg = NULL;
3244 void *msg_memory = NULL, *msg_chain = NULL;
3245 intptr_t sz;
3246
3247 lock_and_register_place_object_with_channel(ch, (Scheme_Object *) place_object);
3248 {
3249 if (ch->count > 0) { /* GET MSG */
3250 msg = ch->msgs[ch->out];
3251 msg_memory = ch->msg_memory[ch->out];
3252 msg_chain = ch->msg_chains[ch->out];
3253
3254 ch->msgs[ch->out] = NULL;
3255 ch->msg_memory[ch->out] = NULL;
3256 ch->msg_chains[ch->out] = NULL;
3257
3258 /* No GCs from here until msg_chain is registered */
3259
3260 --ch->count;
3261 ch->out = ((ch->out + 1) % ch->size);
3262
3263 sz = GC_message_allocator_size(msg_memory);
3264 ch->mem_size -= sz;
3265
3266 maybe_report_message_size(ch);
3267 }
3268 }
3269 if (!msg && !ch->wr_ref && _no_writers)
3270 *_no_writers = 1;
3271 mzrt_mutex_unlock(ch->lock);
3272
3273 *msg_memory_ptr = msg_memory;
3274 *msg_chain_ptr = msg_chain;
3275
3276 return msg;
3277 }
3278
cleanup_msg_memmory(void * thread)3279 static void cleanup_msg_memmory(void *thread) {
3280 Scheme_Thread *p = thread;
3281 if (p->place_channel_msg_in_flight) {
3282 p->place_channel_msg_chain_in_flight = NULL;
3283 GC_destroy_orphan_msg_memory(p->place_channel_msg_in_flight);
3284 p->place_channel_msg_in_flight = NULL;
3285 }
3286 }
3287
log_received_msg(Scheme_Object * msg,void * msg_memory)3288 static void log_received_msg(Scheme_Object *msg, void *msg_memory)
3289 {
3290 if (msg) {
3291 intptr_t msg_size;
3292 msg_size = GC_message_allocator_size(msg_memory);
3293 log_place_event("id %d: get message of %" PRIdPTR " bytes", "get", 1, msg_size);
3294 }
3295 }
3296
place_async_try_receive(Scheme_Place_Async_Channel * ch,int * _no_writers)3297 static Scheme_Object *place_async_try_receive(Scheme_Place_Async_Channel *ch, int *_no_writers) {
3298 Scheme_Object *msg = NULL;
3299 Scheme_Thread *p = scheme_current_thread;
3300 GC_CAN_IGNORE void *msg_memory, *msg_chain;
3301 BEGIN_ESCAPEABLE(cleanup_msg_memmory, p);
3302 msg = place_async_try_receive_raw(ch, &msg_memory, &msg_chain, _no_writers);
3303 /* no GCs until msg_chain is registered */
3304 if (msg) {
3305 p->place_channel_msg_in_flight = msg_memory;
3306 p->place_channel_msg_chain_in_flight = msg_chain;
3307 log_received_msg(msg, msg_memory);
3308 msg = places_deserialize(msg, msg_memory, p);
3309 }
3310 END_ESCAPEABLE();
3311 return msg;
3312 }
3313
scheme_place_async_ch_ready(Scheme_Place_Async_Channel * ch)3314 static int scheme_place_async_ch_ready(Scheme_Place_Async_Channel *ch) {
3315 int ready = 0;
3316 lock_and_register_place_object_with_channel(ch, (Scheme_Object *) place_object);
3317 {
3318 if (ch->count > 0) ready = 1;
3319 if (!ch->wr_ref) ready = 1;
3320 }
3321 mzrt_mutex_unlock(ch->lock);
3322 return ready;
3323 }
3324
place_channel_finish_ready(void * d,int argc,struct Scheme_Object * argv[])3325 static Scheme_Object *place_channel_finish_ready(void *d, int argc, struct Scheme_Object *argv[])
3326 {
3327 Scheme_Object *msg;
3328 Scheme_Thread *p = scheme_current_thread;
3329
3330 msg = *(Scheme_Object **)d;
3331
3332 BEGIN_ESCAPEABLE(cleanup_msg_memmory, p);
3333 msg = places_deserialize(msg, p->place_channel_msg_in_flight, p);
3334 END_ESCAPEABLE();
3335
3336 return msg;
3337 }
3338
place_channel_ready(Scheme_Object * so,Scheme_Schedule_Info * sinfo)3339 static int place_channel_ready(Scheme_Object *so, Scheme_Schedule_Info *sinfo) {
3340 Scheme_Place_Bi_Channel *ch;
3341 Scheme_Object *msg = NULL;
3342 Scheme_Object *wrapper;
3343 GC_CAN_IGNORE void *msg_memory = NULL, *msg_chain = NULL;
3344 int no_writers = 0;
3345
3346 if (SAME_TYPE(SCHEME_TYPE(so), scheme_place_type)) {
3347 ch = (Scheme_Place_Bi_Channel *) ((Scheme_Place *) so)->channel;
3348 }
3349 else {
3350 ch = (Scheme_Place_Bi_Channel *)so;
3351 }
3352
3353 msg = place_async_try_receive_raw((Scheme_Place_Async_Channel *) ch->link->recvch,
3354 &msg_memory, &msg_chain, &no_writers);
3355 /* no GCs until msg_chain is registered */
3356 if (msg != NULL) {
3357 Scheme_Object **msg_holder;
3358 Scheme_Thread *p = ((Syncing *)(sinfo->current_syncing))->thread;
3359
3360 p->place_channel_msg_in_flight = msg_memory;
3361 p->place_channel_msg_chain_in_flight = msg_chain;
3362
3363 log_received_msg(msg, msg_memory);
3364
3365 /* Hold `msg' in atomic memory, because we're not allowed to hold onto
3366 it beyond release of msg_memory, and `wrapper' and the result
3367 flow into the evt system in general. */
3368 msg_holder = (Scheme_Object **)scheme_malloc_atomic(sizeof(Scheme_Object*));
3369 *msg_holder = msg;
3370
3371 wrapper = scheme_make_closed_prim(place_channel_finish_ready, msg_holder);
3372 scheme_set_sync_target(sinfo, scheme_void, wrapper, NULL, 0, 0, NULL);
3373
3374 return 1;
3375 }
3376
3377 if (no_writers) {
3378 /* block on a semaphore that is not accessible, which may allow the thread
3379 to be GCed */
3380 scheme_set_sync_target(sinfo, scheme_make_sema(0), scheme_void, NULL, 0, 0, NULL);
3381 return 0;
3382 }
3383
3384 return 0;
3385 }
3386
place_async_receive(Scheme_Place_Async_Channel * ch)3387 static Scheme_Object *place_async_receive(Scheme_Place_Async_Channel *ch) {
3388 Scheme_Object *msg = NULL;
3389 int no_writers = 0;
3390
3391 while (1) {
3392 msg = place_async_try_receive(ch, &no_writers);
3393 if (msg)
3394 break;
3395 else {
3396 if (no_writers) {
3397 /* No writers are left for this channel, so suspend the thread */
3398 scheme_wait_sema(scheme_make_sema(0), 0);
3399 }
3400 scheme_thread_block(0);
3401 scheme_block_until((Scheme_Ready_Fun) scheme_place_async_ch_ready, NULL, (Scheme_Object *) ch, 0);
3402 }
3403 }
3404
3405 return msg;
3406 }
3407
scheme_place_async_channel_receive(Scheme_Object * ch)3408 Scheme_Object *scheme_place_async_channel_receive(Scheme_Object *ch) {
3409 return place_async_receive((Scheme_Place_Async_Channel *)ch);
3410 }
3411
scheme_place_can_receive()3412 int scheme_place_can_receive()
3413 {
3414 return !!place_object;
3415 }
3416
3417 /*========================================================================*/
3418 /* precise GC traversers */
3419 /*========================================================================*/
3420
3421 #ifdef MZ_PRECISE_GC
3422
3423 START_XFORM_SKIP;
3424
3425 #include "mzmark_place.inc"
3426
register_traversers(void)3427 static void register_traversers(void)
3428 {
3429 GC_REG_TRAV(scheme_place_type, place_val);
3430 GC_REG_TRAV(scheme_place_object_type, place_object_val);
3431 GC_REG_TRAV(scheme_place_async_channel_type, place_async_channel_val);
3432 GC_REG_TRAV(scheme_place_bi_channel_type, place_bi_channel_val);
3433 GC_REG_TRAV(scheme_serialized_file_fd_type, serialized_file_fd_val);
3434 GC_REG_TRAV(scheme_serialized_tcp_fd_type, serialized_socket_fd_val);
3435 }
3436
3437 END_XFORM_SKIP;
3438
3439 #endif
3440
3441 /************************************************************************/
3442 /************************************************************************/
3443 /************************************************************************/
3444
3445 #endif
3446