1 /* Copyright (C) 2001-2006 Artifex Software, Inc.
2 All Rights Reserved.
3
4 This software is provided AS-IS with no warranty, either express or
5 implied.
6
7 This software is distributed under license and may not be copied, modified
8 or distributed except as expressly authorized under the terms of that
9 license. Refer to licensing information at http://www.artifex.com/
10 or contact Artifex Software, Inc., 7 Mt. Lassen Drive - Suite A-134,
11 San Rafael, CA 94903, U.S.A., +1(415)492-9861, for further information.
12 */
13
14 /* $Id: zcontext.c 9043 2008-08-28 22:48:19Z giles $ */
15 /* Display PostScript context operators */
16 #include "memory_.h"
17 #include "ghost.h"
18 #include "gp.h" /* for usertime */
19 #include "oper.h"
20 #include "gsexit.h"
21 #include "gsgc.h"
22 #include "gsstruct.h"
23 #include "gsutil.h"
24 #include "gxalloc.h"
25 #include "gxstate.h" /* for copying gstate stack */
26 #include "stream.h" /* for files.h */
27 #include "files.h"
28 #include "idict.h"
29 #include "igstate.h"
30 #include "icontext.h"
31 #include "interp.h"
32 #include "isave.h"
33 #include "istruct.h"
34 #include "dstack.h"
35 #include "estack.h"
36 #include "ostack.h"
37 #include "store.h"
38
39 /*
40 * Define the rescheduling interval. A value of max_int effectively
41 * disables scheduling. The only reason not to make this const is to
42 * allow it to be changed during testing.
43 */
44 static int reschedule_interval = 100;
45
46 /* Scheduling hooks in interp.c */
47 extern int (*gs_interp_reschedule_proc)(i_ctx_t **);
48 extern int (*gs_interp_time_slice_proc)(i_ctx_t **);
49 extern int gs_interp_time_slice_ticks;
50
51 /* Context structure */
52 typedef enum {
53 cs_active,
54 cs_done
55 } ctx_status_t;
56 typedef long ctx_index_t; /* >= 0 */
57 typedef struct gs_context_s gs_context_t;
58 typedef struct gs_scheduler_s gs_scheduler_t;
59
60 /*
61 * If several contexts share local VM, then if any one of them has done an
62 * unmatched save, the others are not allowed to run. We handle this by
63 * maintaining the following invariant:
64 * When control reaches the point in the scheduler that decides
65 * what context to run next, then for each group of contexts
66 * sharing local VM, if the save level for that VM is non-zero,
67 * saved_local_vm is only set in the context that has unmatched
68 * saves.
69 * We maintain this invariant as follows: when control enters the
70 * scheduler, if a context was running, we set its saved_local_vm flag
71 * to (save_level > 0). When selecting a context to run, we ignore
72 * contexts where saved_local_vm is false and the local VM save_level > 0.
73 */
74 struct gs_context_s {
75 gs_context_state_t state; /* (must be first for subclassing) */
76 /* Private state */
77 gs_scheduler_t *scheduler;
78 ctx_status_t status;
79 ctx_index_t index; /* > 0 */
80 bool detach; /* true if a detach has been */
81 /* executed for this context */
82 bool saved_local_vm; /* (see above) */
83 bool visible; /* during GC, true if visible; */
84 /* otherwise, always true */
85 ctx_index_t next_index; /* next context with same status */
86 /* (active, waiting on same lock, */
87 /* waiting on same condition, */
88 /* waiting to be destroyed) */
89 ctx_index_t joiner_index; /* context waiting on a join */
90 /* for this one */
91 gs_context_t *table_next; /* hash table chain -- this must be a real */
92 /* pointer, for looking up indices */
93 };
94 static inline bool
context_is_visible(const gs_context_t * pctx)95 context_is_visible(const gs_context_t *pctx)
96 {
97 return (pctx && pctx->visible);
98 }
99 static inline gs_context_t *
visible_context(gs_context_t * pctx)100 visible_context(gs_context_t *pctx)
101 {
102 return (pctx && pctx->visible ? pctx : (gs_context_t *)0);
103 }
104
105 /* GC descriptor */
106 static
CLEAR_MARKS_PROC(context_clear_marks)107 CLEAR_MARKS_PROC(context_clear_marks)
108 {
109 gs_context_t *const pctx = vptr;
110
111 (*st_context_state.clear_marks)
112 (cmem, &pctx->state, sizeof(pctx->state), &st_context_state);
113 }
114 static
115 ENUM_PTRS_WITH(context_enum_ptrs, gs_context_t *pctx)
116 ENUM_PREFIX(st_context_state, 2);
117 case 0: return ENUM_OBJ(pctx->scheduler);
118 case 1: {
119 /* Return the next *visible* context. */
120 const gs_context_t *next = pctx->table_next;
121
122 while (next && !next->visible)
123 next = next->table_next;
124 return ENUM_OBJ(next);
125 }
126 ENUM_PTRS_END
127 static RELOC_PTRS_WITH(context_reloc_ptrs, gs_context_t *pctx)
128 RELOC_PREFIX(st_context_state);
129 RELOC_VAR(pctx->scheduler);
130 /* Don't relocate table_next -- the scheduler object handles that. */
131 RELOC_PTRS_END
132 gs_private_st_complex_only(st_context, gs_context_t, "gs_context_t",
133 context_clear_marks, context_enum_ptrs, context_reloc_ptrs, 0);
134
135 /*
136 * Context list structure. Note that this uses context indices, not
137 * pointers, to avoid having to worry about pointers between local VMs.
138 */
139 typedef struct ctx_list_s {
140 ctx_index_t head_index;
141 ctx_index_t tail_index;
142 } ctx_list_t;
143
144 /* Condition structure */
145 typedef struct gs_condition_s {
146 ctx_list_t waiting; /* contexts waiting on this condition */
147 } gs_condition_t;
148 gs_private_st_simple(st_condition, gs_condition_t, "conditiontype");
149
150 /* Lock structure */
151 typedef struct gs_lock_s {
152 ctx_list_t waiting; /* contexts waiting for this lock, */
153 /* must be first for subclassing */
154 ctx_index_t holder_index; /* context holding the lock, if any */
155 gs_scheduler_t *scheduler;
156 } gs_lock_t;
157 gs_private_st_ptrs1(st_lock, gs_lock_t, "locktype",
158 lock_enum_ptrs, lock_reloc_ptrs, scheduler);
159
160 /* Global state */
161 /*typedef struct gs_scheduler_s gs_scheduler_t; *//* (above) */
162 struct gs_scheduler_s {
163 gs_context_t *current;
164 long usertime_initial; /* usertime when current started running */
165 ctx_list_t active;
166 vm_reclaim_proc((*save_vm_reclaim));
167 ctx_index_t dead_index;
168 #define CTX_TABLE_SIZE 19
169 gs_context_t *table[CTX_TABLE_SIZE];
170 };
171
172 /* Convert a context index to a context pointer. */
173 static gs_context_t *
index_context(const gs_scheduler_t * psched,long index)174 index_context(const gs_scheduler_t *psched, long index)
175 {
176 gs_context_t *pctx;
177
178 if (index == 0)
179 return 0;
180 pctx = psched->table[index % CTX_TABLE_SIZE];
181 while (pctx != 0 && pctx->index != index)
182 pctx = pctx->table_next;
183 return pctx;
184 }
185
186 /* Structure definition */
187 gs_private_st_composite(st_scheduler, gs_scheduler_t, "gs_scheduler",
188 scheduler_enum_ptrs, scheduler_reloc_ptrs);
189 /*
190 * The only cross-local-VM pointers in the context machinery are the
191 * table_next pointers in contexts, and the current and table[] pointers
192 * in the scheduler. We need to handle all of these specially.
193 */
ENUM_PTRS_WITH(scheduler_enum_ptrs,gs_scheduler_t * psched)194 static ENUM_PTRS_WITH(scheduler_enum_ptrs, gs_scheduler_t *psched)
195 {
196 index -= 1;
197 if (index < CTX_TABLE_SIZE) {
198 gs_context_t *pctx = psched->table[index];
199
200 while (pctx && !pctx->visible)
201 pctx = pctx->table_next;
202 return ENUM_OBJ(pctx);
203 }
204 return 0;
205 }
206 case 0: return ENUM_OBJ(visible_context(psched->current));
207 ENUM_PTRS_END
RELOC_PTRS_WITH(scheduler_reloc_ptrs,gs_scheduler_t * psched)208 static RELOC_PTRS_WITH(scheduler_reloc_ptrs, gs_scheduler_t *psched)
209 {
210 if (psched->current->visible)
211 RELOC_VAR(psched->current);
212 {
213 int i;
214
215 for (i = 0; i < CTX_TABLE_SIZE; ++i) {
216 gs_context_t **ppctx = &psched->table[i];
217 gs_context_t **pnext;
218
219 for (; *ppctx; ppctx = pnext) {
220 pnext = &(*ppctx)->table_next;
221 if ((*ppctx)->visible)
222 RELOC_VAR(*ppctx);
223 }
224 }
225 }
226 }
227 RELOC_PTRS_END
228
229 /*
230 * The context scheduler requires special handling during garbage
231 * collection, since it is the only structure that can legitimately
232 * reference objects in multiple local VMs. To deal with this, we wrap the
233 * interpreter's garbage collector with code that prevents it from seeing
234 * contexts in other than the current local VM. ****** WORKS FOR LOCAL GC,
235 * NOT FOR GLOBAL ******
236 */
237 static void
context_reclaim(vm_spaces * pspaces,bool global)238 context_reclaim(vm_spaces * pspaces, bool global)
239 {
240 /*
241 * Search through the registered roots to find the current context.
242 * (This is a hack so we can find the scheduler.)
243 */
244 int i;
245 gs_context_t *pctx = 0; /* = 0 is bogus to pacify compilers */
246 gs_scheduler_t *psched = 0;
247 gs_ref_memory_t *lmem = 0; /* = 0 is bogus to pacify compilers */
248 chunk_locator_t loc;
249
250 for (i = countof(pspaces->memories.indexed) - 1; psched == 0 && i > 0; --i) {
251 gs_ref_memory_t *mem = pspaces->memories.indexed[i];
252 const gs_gc_root_t *root = mem->roots;
253
254 for (; root; root = root->next) {
255 if (gs_object_type((gs_memory_t *)mem, *root->p) == &st_context) {
256 pctx = *root->p;
257 psched = pctx->scheduler;
258 lmem = mem;
259 break;
260 }
261 }
262 }
263
264 /* Hide all contexts in other (local) VMs. */
265 /*
266 * See context_create below for why we look for the context
267 * in stable memory.
268 */
269 loc.memory = (gs_ref_memory_t *)gs_memory_stable((gs_memory_t *)lmem);
270 loc.cp = 0;
271 for (i = 0; i < CTX_TABLE_SIZE; ++i)
272 for (pctx = psched->table[i]; pctx; pctx = pctx->table_next)
273 pctx->visible = chunk_locate_ptr(pctx, &loc);
274
275 #ifdef DEBUG
276 if (!psched->current->visible) {
277 lprintf("Current context is invisible!\n");
278 gs_abort((gs_memory_t *)lmem);
279 }
280 #endif
281
282 /* Do the actual garbage collection. */
283 psched->save_vm_reclaim(pspaces, global);
284
285 /* Make all contexts visible again. */
286 for (i = 0; i < CTX_TABLE_SIZE; ++i)
287 for (pctx = psched->table[i]; pctx; pctx = pctx->table_next)
288 pctx->visible = true;
289 }
290
291
292 /* Forward references */
293 static int context_create(gs_scheduler_t *, gs_context_t **,
294 const gs_dual_memory_t *,
295 const gs_context_state_t *, bool);
296 static long context_usertime(void);
297 static int context_param(const gs_scheduler_t *, os_ptr, gs_context_t **);
298 static void context_destroy(gs_context_t *);
299 static void stack_copy(ref_stack_t *, const ref_stack_t *, uint, uint);
300 static int lock_acquire(os_ptr, gs_context_t *);
301 static int lock_release(ref *);
302
303 /* Internal procedures */
304 static void
context_load(gs_scheduler_t * psched,gs_context_t * pctx)305 context_load(gs_scheduler_t *psched, gs_context_t *pctx)
306 {
307 if_debug1('"', "[\"]loading %ld\n", pctx->index);
308 if ( pctx->state.keep_usertime )
309 psched->usertime_initial = context_usertime();
310 context_state_load(&pctx->state);
311 }
312 static void
context_store(gs_scheduler_t * psched,gs_context_t * pctx)313 context_store(gs_scheduler_t *psched, gs_context_t *pctx)
314 {
315 if_debug1('"', "[\"]storing %ld\n", pctx->index);
316 context_state_store(&pctx->state);
317 if ( pctx->state.keep_usertime )
318 pctx->state.usertime_total +=
319 context_usertime() - psched->usertime_initial;
320 }
321
322 /* List manipulation */
323 static void
add_last(const gs_scheduler_t * psched,ctx_list_t * pl,gs_context_t * pc)324 add_last(const gs_scheduler_t *psched, ctx_list_t *pl, gs_context_t *pc)
325 {
326 pc->next_index = 0;
327 if (pl->head_index == 0)
328 pl->head_index = pc->index;
329 else
330 index_context(psched, pl->tail_index)->next_index = pc->index;
331 pl->tail_index = pc->index;
332 }
333
334 /* ------ Initialization ------ */
335
336 static int ctx_initialize(i_ctx_t **);
337 static int ctx_reschedule(i_ctx_t **);
338 static int ctx_time_slice(i_ctx_t **);
339 static int
zcontext_init(i_ctx_t * i_ctx_p)340 zcontext_init(i_ctx_t *i_ctx_p)
341 {
342 /* Complete initialization after the interpreter is entered. */
343 gs_interp_reschedule_proc = ctx_initialize;
344 gs_interp_time_slice_proc = ctx_initialize;
345 gs_interp_time_slice_ticks = 0;
346 return 0;
347 }
348 /*
349 * The interpreter calls this procedure at the first reschedule point.
350 * It completes context initialization.
351 */
352 static int
ctx_initialize(i_ctx_t ** pi_ctx_p)353 ctx_initialize(i_ctx_t **pi_ctx_p)
354 {
355 i_ctx_t *i_ctx_p = *pi_ctx_p; /* for gs_imemory */
356 gs_ref_memory_t *imem = iimemory_system;
357 gs_scheduler_t *psched =
358 gs_alloc_struct_immovable((gs_memory_t *) imem, gs_scheduler_t,
359 &st_scheduler, "gs_scheduler");
360
361 psched->current = 0;
362 psched->active.head_index = psched->active.tail_index = 0;
363 psched->save_vm_reclaim = i_ctx_p->memory.spaces.vm_reclaim;
364 i_ctx_p->memory.spaces.vm_reclaim = context_reclaim;
365 psched->dead_index = 0;
366 memset(psched->table, 0, sizeof(psched->table));
367 /* Create an initial context. */
368 if (context_create(psched, &psched->current, &gs_imemory, *pi_ctx_p, true) < 0) {
369 lprintf("Can't create initial context!");
370 gs_abort(imemory);
371 }
372 psched->current->scheduler = psched;
373 /* Hook into the interpreter. */
374 *pi_ctx_p = &psched->current->state;
375 gs_interp_reschedule_proc = ctx_reschedule;
376 gs_interp_time_slice_proc = ctx_time_slice;
377 gs_interp_time_slice_ticks = reschedule_interval;
378 return 0;
379 }
380
381 /* ------ Interpreter interface to scheduler ------ */
382
383 /* When an operator decides it is time to run a new context, */
384 /* it returns o_reschedule. The interpreter saves all its state in */
385 /* memory, calls ctx_reschedule, and then loads the state from memory. */
386 static int
ctx_reschedule(i_ctx_t ** pi_ctx_p)387 ctx_reschedule(i_ctx_t **pi_ctx_p)
388 {
389 gs_context_t *current = (gs_context_t *)*pi_ctx_p;
390 gs_scheduler_t *psched = current->scheduler;
391
392 #ifdef DEBUG
393 if (*pi_ctx_p != ¤t->state) {
394 lprintf2("current->state = 0x%lx, != i_ctx_p = 0x%lx!\n",
395 (ulong)¤t->state, (ulong)*pi_ctx_p);
396 }
397 #endif
398 /* If there are any dead contexts waiting to be released, */
399 /* take care of that now. */
400 while (psched->dead_index != 0) {
401 gs_context_t *dead = index_context(psched, psched->dead_index);
402 long next_index = dead->next_index;
403
404 if (current == dead) {
405 if_debug1('"', "[\"]storing dead %ld\n", current->index);
406 context_state_store(¤t->state);
407 current = 0;
408 }
409 context_destroy(dead);
410 psched->dead_index = next_index;
411 }
412 /* Update saved_local_vm. See above for the invariant. */
413 if (current != 0)
414 current->saved_local_vm =
415 current->state.memory.space_local->saved != 0;
416 /* Run the first ready context, taking the 'save' lock into account. */
417 {
418 gs_context_t *prev = 0;
419 gs_context_t *ready;
420
421 for (ready = index_context(psched, psched->active.head_index);;
422 prev = ready, ready = index_context(psched, ready->next_index)
423 ) {
424 if (ready == 0) {
425 if (current != 0)
426 context_store(psched, current);
427 lprintf("No context to run!");
428 return_error(e_Fatal);
429 }
430 /* See above for an explanation of the following test. */
431 if (ready->state.memory.space_local->saved != 0 &&
432 !ready->saved_local_vm
433 )
434 continue;
435 /* Found a context to run. */
436 {
437 ctx_index_t next_index = ready->next_index;
438
439 if (prev)
440 prev->next_index = next_index;
441 else
442 psched->active.head_index = next_index;
443 if (!next_index)
444 psched->active.tail_index = (prev ? prev->index : 0);
445 }
446 break;
447 }
448 if (ready == current)
449 return 0; /* no switch */
450 /*
451 * Save the state of the current context in psched->current,
452 * if any context is current.
453 */
454 if (current != 0)
455 context_store(psched, current);
456 psched->current = ready;
457 /* Load the state of the new current context. */
458 context_load(psched, ready);
459 /* Switch the interpreter's context state pointer. */
460 *pi_ctx_p = &ready->state;
461 }
462 return 0;
463 }
464
465 /* If the interpreter wants to time-slice, it saves its state, */
466 /* calls ctx_time_slice, and reloads its state. */
467 static int
ctx_time_slice(i_ctx_t ** pi_ctx_p)468 ctx_time_slice(i_ctx_t **pi_ctx_p)
469 {
470 gs_scheduler_t *psched = ((gs_context_t *)*pi_ctx_p)->scheduler;
471
472 if (psched->active.head_index == 0)
473 return 0;
474 if_debug0('"', "[\"]time-slice\n");
475 add_last(psched, &psched->active, psched->current);
476 return ctx_reschedule(pi_ctx_p);
477 }
478
479 /* ------ Context operators ------ */
480
481 /* - currentcontext <context> */
482 static int
zcurrentcontext(i_ctx_t * i_ctx_p)483 zcurrentcontext(i_ctx_t *i_ctx_p)
484 {
485 os_ptr op = osp;
486 const gs_context_t *current = (const gs_context_t *)i_ctx_p;
487
488 push(1);
489 make_int(op, current->index);
490 return 0;
491 }
492
493 /* <context> detach - */
494 static int
zdetach(i_ctx_t * i_ctx_p)495 zdetach(i_ctx_t *i_ctx_p)
496 {
497 os_ptr op = osp;
498 const gs_scheduler_t *psched = ((gs_context_t *)i_ctx_p)->scheduler;
499 gs_context_t *pctx;
500 int code;
501
502 if ((code = context_param(psched, op, &pctx)) < 0)
503 return code;
504 if_debug2('\'', "[']detach %ld, status = %d\n",
505 pctx->index, pctx->status);
506 if (pctx->joiner_index != 0 || pctx->detach)
507 return_error(e_invalidcontext);
508 switch (pctx->status) {
509 case cs_active:
510 pctx->detach = true;
511 break;
512 case cs_done:
513 context_destroy(pctx);
514 }
515 pop(1);
516 return 0;
517 }
518
519 static int
520 do_fork(i_ctx_t *i_ctx_p, os_ptr op, const ref * pstdin,
521 const ref * pstdout, uint mcount, bool local),
522 values_older_than(const ref_stack_t * pstack, uint first, uint last,
523 int max_space);
524 static int
525 fork_done(i_ctx_t *),
526 fork_done_with_error(i_ctx_t *),
527 finish_join(i_ctx_t *),
528 reschedule_now(i_ctx_t *);
529
530 /* <mark> <obj1> ... <objN> <proc> .fork <context> */
531 /* <mark> <obj1> ... <objN> <proc> <stdin|null> <stdout|null> */
532 /* .localfork <context> */
533 static int
zfork(i_ctx_t * i_ctx_p)534 zfork(i_ctx_t *i_ctx_p)
535 {
536 os_ptr op = osp;
537 uint mcount = ref_stack_counttomark(&o_stack);
538 ref rnull;
539
540 if (mcount == 0)
541 return_error(e_unmatchedmark);
542 make_null(&rnull);
543 return do_fork(i_ctx_p, op, &rnull, &rnull, mcount, false);
544 }
545 static int
zlocalfork(i_ctx_t * i_ctx_p)546 zlocalfork(i_ctx_t *i_ctx_p)
547 {
548 os_ptr op = osp;
549 uint mcount = ref_stack_counttomark(&o_stack);
550 int code;
551
552 if (mcount == 0)
553 return_error(e_unmatchedmark);
554 code = values_older_than(&o_stack, 1, mcount - 1, avm_local);
555 if (code < 0)
556 return code;
557 code = do_fork(i_ctx_p, op - 2, op - 1, op, mcount - 2, true);
558 if (code < 0)
559 return code;
560 op = osp;
561 op[-2] = *op;
562 pop(2);
563 return code;
564 }
565
566 /* Internal procedure to actually do the fork operation. */
567 static int
do_fork(i_ctx_t * i_ctx_p,os_ptr op,const ref * pstdin,const ref * pstdout,uint mcount,bool local)568 do_fork(i_ctx_t *i_ctx_p, os_ptr op, const ref * pstdin, const ref * pstdout,
569 uint mcount, bool local)
570 {
571 gs_context_t *pcur = (gs_context_t *)i_ctx_p;
572 gs_scheduler_t *psched = pcur->scheduler;
573 stream *s;
574 gs_dual_memory_t dmem;
575 gs_context_t *pctx;
576 ref old_userdict, new_userdict;
577 int code;
578
579 check_proc(*op);
580 if (iimemory_local->save_level)
581 return_error(e_invalidcontext);
582 if (r_has_type(pstdout, t_null)) {
583 code = zget_stdout(i_ctx_p, &s);
584 if (code < 0)
585 return code;
586 pstdout = &ref_stdio[1];
587 } else
588 check_read_file(s, pstdout);
589 if (r_has_type(pstdin, t_null)) {
590 code = zget_stdin(i_ctx_p, &s);
591 if (code < 0)
592 return code;
593 pstdin = &ref_stdio[0];
594 } else
595 check_read_file(s, pstdin);
596 dmem = gs_imemory;
597 if (local) {
598 /* Share global VM, private local VM. */
599 ref *puserdict;
600 uint userdict_size;
601 gs_memory_t *parent = iimemory_local->non_gc_memory;
602 gs_ref_memory_t *lmem;
603 gs_ref_memory_t *lmem_stable;
604
605 if (dict_find_string(systemdict, "userdict", &puserdict) <= 0 ||
606 !r_has_type(puserdict, t_dictionary)
607 )
608 return_error(e_Fatal);
609 old_userdict = *puserdict;
610 userdict_size = dict_maxlength(&old_userdict);
611 lmem = ialloc_alloc_state(parent, iimemory_local->chunk_size);
612 lmem_stable = ialloc_alloc_state(parent, iimemory_local->chunk_size);
613 if (lmem == 0 || lmem_stable == 0) {
614 gs_free_object(parent, lmem_stable, "do_fork");
615 gs_free_object(parent, lmem, "do_fork");
616 return_error(e_VMerror);
617 }
618 lmem->space = avm_local;
619 lmem_stable->space = avm_local;
620 lmem->stable_memory = (gs_memory_t *)lmem_stable;
621 dmem.space_local = lmem;
622 code = context_create(psched, &pctx, &dmem, &pcur->state, false);
623 if (code < 0) {
624 /****** FREE lmem ******/
625 return code;
626 }
627 /*
628 * Create a new userdict. PostScript code will take care of
629 * the rest of the initialization of the new context.
630 */
631 code = dict_alloc(lmem, userdict_size, &new_userdict);
632 if (code < 0) {
633 context_destroy(pctx);
634 /****** FREE lmem ******/
635 return code;
636 }
637 } else {
638 /* Share global and local VM. */
639 code = context_create(psched, &pctx, &dmem, &pcur->state, false);
640 if (code < 0) {
641 /****** FREE lmem ******/
642 return code;
643 }
644 /*
645 * Copy the gstate stack. The current method is not elegant;
646 * in fact, I'm not entirely sure it works.
647 */
648 {
649 int n;
650 const gs_state *old;
651 gs_state *new;
652
653 for (n = 0, old = igs; old != 0; old = gs_state_saved(old))
654 ++n;
655 for (old = pctx->state.pgs; old != 0; old = gs_state_saved(old))
656 --n;
657 for (; n > 0 && code >= 0; --n)
658 code = gs_gsave(pctx->state.pgs);
659 if (code < 0) {
660 /****** FREE lmem & GSTATES ******/
661 return code;
662 }
663 for (old = igs, new = pctx->state.pgs;
664 old != 0 /* (== new != 0) */ && code >= 0;
665 old = gs_state_saved(old), new = gs_state_saved(new)
666 )
667 code = gs_setgstate(new, old);
668 if (code < 0) {
669 /****** FREE lmem & GSTATES ******/
670 return code;
671 }
672 }
673 }
674 pctx->state.language_level = i_ctx_p->language_level;
675 pctx->state.dict_stack.min_size = idict_stack.min_size;
676 pctx->state.dict_stack.userdict_index = idict_stack.userdict_index;
677 pctx->state.stdio[0] = *pstdin;
678 pctx->state.stdio[1] = *pstdout;
679 pctx->state.stdio[2] = pcur->state.stdio[2];
680 /* Initialize the interpreter stacks. */
681 {
682 ref_stack_t *dstack = (ref_stack_t *)&pctx->state.dict_stack;
683 uint count = ref_stack_count(&d_stack);
684 uint copy = (local ? min_dstack_size : count);
685
686 ref_stack_push(dstack, copy);
687 stack_copy(dstack, &d_stack, copy, count - copy);
688 if (local) {
689 /* Substitute the new userdict for the old one. */
690 long i;
691
692 for (i = 0; i < copy; ++i) {
693 ref *pdref = ref_stack_index(dstack, i);
694
695 if (obj_eq(imemory, pdref, &old_userdict))
696 *pdref = new_userdict;
697 }
698 }
699 }
700 {
701 ref_stack_t *estack = (ref_stack_t *)&pctx->state.exec_stack;
702
703 ref_stack_push(estack, 3);
704 /* fork_done must be executed in both normal and error cases. */
705 make_mark_estack(estack->p - 2, es_other, fork_done_with_error);
706 make_oper(estack->p - 1, 0, fork_done);
707 *estack->p = *op;
708 }
709 {
710 ref_stack_t *ostack = (ref_stack_t *)&pctx->state.op_stack;
711 uint count = mcount - 2;
712
713 ref_stack_push(ostack, count);
714 stack_copy(ostack, &o_stack, count, osp - op + 1);
715 }
716 pctx->state.binary_object_format = pcur->state.binary_object_format;
717 add_last(psched, &psched->active, pctx);
718 pop(mcount - 1);
719 op = osp;
720 make_int(op, pctx->index);
721 return 0;
722 }
723
724 /*
725 * Check that all values being passed by fork or join are old enough
726 * to be valid in the environment to which they are being transferred.
727 */
728 static int
values_older_than(const ref_stack_t * pstack,uint first,uint last,int next_space)729 values_older_than(const ref_stack_t * pstack, uint first, uint last,
730 int next_space)
731 {
732 uint i;
733
734 for (i = first; i <= last; ++i)
735 if (r_space(ref_stack_index(pstack, (long)i)) >= next_space)
736 return_error(e_invalidaccess);
737 return 0;
738 }
739
740 /* This gets executed when a context terminates normally. */
741 /****** MUST DO ALL RESTORES ******/
742 /****** WHAT IF invalidrestore? ******/
743 static int
fork_done(i_ctx_t * i_ctx_p)744 fork_done(i_ctx_t *i_ctx_p)
745 {
746 os_ptr op = osp;
747 gs_context_t *pcur = (gs_context_t *)i_ctx_p;
748 gs_scheduler_t *psched = pcur->scheduler;
749
750 if_debug2('\'', "[']done %ld%s\n", pcur->index,
751 (pcur->detach ? ", detached" : ""));
752 /*
753 * Clear the context's dictionary, execution and graphics stacks
754 * now, to retain as little as possible in case of a garbage
755 * collection or restore. We know that fork_done is the
756 * next-to-bottom entry on the execution stack.
757 */
758 ref_stack_pop_to(&d_stack, min_dstack_size);
759 pop_estack(&pcur->state, ref_stack_count(&e_stack) - 1);
760 gs_grestoreall(igs);
761 /*
762 * If there are any unmatched saves, we need to execute restores
763 * until there aren't. An invalidrestore is possible and will
764 * result in an error termination.
765 */
766 if (iimemory_local->save_level) {
767 ref *prestore;
768
769 if (dict_find_string(systemdict, "restore", &prestore) <= 0) {
770 lprintf("restore not found in systemdict!");
771 return_error(e_Fatal);
772 }
773 if (pcur->detach) {
774 ref_stack_clear(&o_stack); /* help avoid invalidrestore */
775 op = osp;
776 }
777 push(1);
778 make_tv(op, t_save, saveid, alloc_save_current_id(&gs_imemory));
779 push_op_estack(fork_done);
780 ++esp;
781 ref_assign(esp, prestore);
782 return o_push_estack;
783 }
784 if (pcur->detach) {
785 /*
786 * We would like to free the context's memory, but we can't do
787 * it yet, because the interpreter still has references to it.
788 * Instead, queue the context to be freed the next time we
789 * reschedule. We can, however, clear its operand stack now.
790 */
791 ref_stack_clear(&o_stack);
792 context_store(psched, pcur);
793 pcur->next_index = psched->dead_index;
794 psched->dead_index = pcur->index;
795 psched->current = 0;
796 } else {
797 gs_context_t *pctx = index_context(psched, pcur->joiner_index);
798
799 pcur->status = cs_done;
800 /* Schedule the context waiting to join this one, if any. */
801 if (pctx != 0)
802 add_last(psched, &psched->active, pctx);
803 }
804 return o_reschedule;
805 }
806 /*
807 * This gets executed when the stack is being unwound for an error
808 * termination.
809 */
810 static int
fork_done_with_error(i_ctx_t * i_ctx_p)811 fork_done_with_error(i_ctx_t *i_ctx_p)
812 {
813 /****** WHAT TO DO? ******/
814 return fork_done(i_ctx_p);
815 }
816
817 /* <context> join <mark> <obj1> ... <objN> */
818 static int
zjoin(i_ctx_t * i_ctx_p)819 zjoin(i_ctx_t *i_ctx_p)
820 {
821 os_ptr op = osp;
822 gs_context_t *current = (gs_context_t *)i_ctx_p;
823 gs_scheduler_t *psched = current->scheduler;
824 gs_context_t *pctx;
825 int code;
826
827 if ((code = context_param(psched, op, &pctx)) < 0)
828 return code;
829 if_debug2('\'', "[']join %ld, status = %d\n",
830 pctx->index, pctx->status);
831 /*
832 * It doesn't seem logically necessary, but the Red Book says that
833 * the context being joined must share both global and local VM with
834 * the current context.
835 */
836 if (pctx->joiner_index != 0 || pctx->detach || pctx == current ||
837 pctx->state.memory.space_global !=
838 current->state.memory.space_global ||
839 pctx->state.memory.space_local !=
840 current->state.memory.space_local ||
841 iimemory_local->save_level != 0
842 )
843 return_error(e_invalidcontext);
844 switch (pctx->status) {
845 case cs_active:
846 /*
847 * We need to re-execute the join after the joined
848 * context is done. Since we can't return both
849 * o_push_estack and o_reschedule, we push a call on
850 * reschedule_now, which accomplishes the latter.
851 */
852 check_estack(2);
853 push_op_estack(finish_join);
854 push_op_estack(reschedule_now);
855 pctx->joiner_index = current->index;
856 return o_push_estack;
857 case cs_done:
858 {
859 const ref_stack_t *ostack =
860 (ref_stack_t *)&pctx->state.op_stack;
861 uint count = ref_stack_count(ostack);
862
863 push(count);
864 {
865 ref *rp = ref_stack_index(&o_stack, count);
866
867 make_mark(rp);
868 }
869 stack_copy(&o_stack, ostack, count, 0);
870 context_destroy(pctx);
871 }
872 }
873 return 0;
874 }
875
876 /* Finish a deferred join. */
877 static int
finish_join(i_ctx_t * i_ctx_p)878 finish_join(i_ctx_t *i_ctx_p)
879 {
880 os_ptr op = osp;
881 gs_context_t *current = (gs_context_t *)i_ctx_p;
882 gs_scheduler_t *psched = current->scheduler;
883 gs_context_t *pctx;
884 int code;
885
886 if ((code = context_param(psched, op, &pctx)) < 0)
887 return code;
888 if_debug2('\'', "[']finish_join %ld, status = %d\n",
889 pctx->index, pctx->status);
890 if (pctx->joiner_index != current->index)
891 return_error(e_invalidcontext);
892 pctx->joiner_index = 0;
893 return zjoin(i_ctx_p);
894 }
895
896 /* Reschedule now. */
897 static int
reschedule_now(i_ctx_t * i_ctx_p)898 reschedule_now(i_ctx_t *i_ctx_p)
899 {
900 return o_reschedule;
901 }
902
903 /* - yield - */
904 static int
zyield(i_ctx_t * i_ctx_p)905 zyield(i_ctx_t *i_ctx_p)
906 {
907 gs_context_t *current = (gs_context_t *)i_ctx_p;
908 gs_scheduler_t *psched = current->scheduler;
909
910 if (psched->active.head_index == 0)
911 return 0;
912 if_debug0('"', "[\"]yield\n");
913 add_last(psched, &psched->active, current);
914 return o_reschedule;
915 }
916
917 /* ------ Condition and lock operators ------ */
918
919 static int
920 monitor_cleanup(i_ctx_t *),
921 monitor_release(i_ctx_t *),
922 await_lock(i_ctx_t *);
923 static void
924 activate_waiting(gs_scheduler_t *, ctx_list_t * pcl);
925
926 /* - condition <condition> */
927 static int
zcondition(i_ctx_t * i_ctx_p)928 zcondition(i_ctx_t *i_ctx_p)
929 {
930 os_ptr op = osp;
931 gs_condition_t *pcond =
932 ialloc_struct(gs_condition_t, &st_condition, "zcondition");
933
934 if (pcond == 0)
935 return_error(e_VMerror);
936 pcond->waiting.head_index = pcond->waiting.tail_index = 0;
937 push(1);
938 make_istruct(op, a_all, pcond);
939 return 0;
940 }
941
942 /* - lock <lock> */
943 static int
zlock(i_ctx_t * i_ctx_p)944 zlock(i_ctx_t *i_ctx_p)
945 {
946 os_ptr op = osp;
947 gs_lock_t *plock = ialloc_struct(gs_lock_t, &st_lock, "zlock");
948
949 if (plock == 0)
950 return_error(e_VMerror);
951 plock->holder_index = 0;
952 plock->waiting.head_index = plock->waiting.tail_index = 0;
953 push(1);
954 make_istruct(op, a_all, plock);
955 return 0;
956 }
957
958 /* <lock> <proc> monitor - */
959 static int
zmonitor(i_ctx_t * i_ctx_p)960 zmonitor(i_ctx_t *i_ctx_p)
961 {
962 gs_context_t *current = (gs_context_t *)i_ctx_p;
963 os_ptr op = osp;
964 gs_lock_t *plock;
965 gs_context_t *pctx;
966 int code;
967
968 check_stype(op[-1], st_lock);
969 check_proc(*op);
970 plock = r_ptr(op - 1, gs_lock_t);
971 pctx = index_context(current->scheduler, plock->holder_index);
972 if_debug1('\'', "[']monitor 0x%lx\n", (ulong) plock);
973 if (pctx != 0) {
974 if (pctx == current ||
975 (iimemory_local->save_level != 0 &&
976 pctx->state.memory.space_local ==
977 current->state.memory.space_local)
978 )
979 return_error(e_invalidcontext);
980 }
981 /*
982 * We push on the e-stack:
983 * The lock object
984 * An e-stack mark with monitor_cleanup, to release the lock
985 * in case of an error
986 * monitor_release, to release the lock in the normal case
987 * The procedure to execute
988 */
989 check_estack(4);
990 code = lock_acquire(op - 1, current);
991 if (code != 0) { /* We didn't acquire the lock. Re-execute this later. */
992 push_op_estack(zmonitor);
993 return code; /* o_reschedule */
994 }
995 *++esp = op[-1];
996 push_mark_estack(es_other, monitor_cleanup);
997 push_op_estack(monitor_release);
998 *++esp = *op;
999 pop(2);
1000 return o_push_estack;
1001 }
1002 /* Release the monitor lock when unwinding for an error or exit. */
1003 static int
monitor_cleanup(i_ctx_t * i_ctx_p)1004 monitor_cleanup(i_ctx_t *i_ctx_p)
1005 {
1006 int code = lock_release(esp);
1007
1008 if (code < 0)
1009 return code;
1010 --esp;
1011 return o_pop_estack;
1012 }
1013 /* Release the monitor lock when the procedure completes. */
1014 static int
monitor_release(i_ctx_t * i_ctx_p)1015 monitor_release(i_ctx_t *i_ctx_p)
1016 {
1017 int code = lock_release(esp - 1);
1018
1019 if (code < 0)
1020 return code;
1021 esp -= 2;
1022 return o_pop_estack;
1023 }
1024
1025 /* <condition> notify - */
1026 static int
znotify(i_ctx_t * i_ctx_p)1027 znotify(i_ctx_t *i_ctx_p)
1028 {
1029 os_ptr op = osp;
1030 gs_context_t *current = (gs_context_t *)i_ctx_p;
1031 gs_condition_t *pcond;
1032
1033 check_stype(*op, st_condition);
1034 pcond = r_ptr(op, gs_condition_t);
1035 if_debug1('"', "[\"]notify 0x%lx\n", (ulong) pcond);
1036 pop(1);
1037 op--;
1038 if (pcond->waiting.head_index == 0) /* nothing to do */
1039 return 0;
1040 activate_waiting(current->scheduler, &pcond->waiting);
1041 return zyield(i_ctx_p);
1042 }
1043
1044 /* <lock> <condition> wait - */
1045 static int
zwait(i_ctx_t * i_ctx_p)1046 zwait(i_ctx_t *i_ctx_p)
1047 {
1048 os_ptr op = osp;
1049 gs_context_t *current = (gs_context_t *)i_ctx_p;
1050 gs_scheduler_t *psched = current->scheduler;
1051 gs_lock_t *plock;
1052 gs_context_t *pctx;
1053 gs_condition_t *pcond;
1054
1055 check_stype(op[-1], st_lock);
1056 plock = r_ptr(op - 1, gs_lock_t);
1057 check_stype(*op, st_condition);
1058 pcond = r_ptr(op, gs_condition_t);
1059 if_debug2('"', "[\"]wait lock 0x%lx, condition 0x%lx\n",
1060 (ulong) plock, (ulong) pcond);
1061 pctx = index_context(psched, plock->holder_index);
1062 if (pctx == 0 || pctx != psched->current ||
1063 (iimemory_local->save_level != 0 &&
1064 (r_space(op - 1) == avm_local || r_space(op) == avm_local))
1065 )
1066 return_error(e_invalidcontext);
1067 check_estack(1);
1068 lock_release(op - 1);
1069 add_last(psched, &pcond->waiting, pctx);
1070 push_op_estack(await_lock);
1071 return o_reschedule;
1072 }
1073 /* When the condition is signaled, wait for acquiring the lock. */
1074 static int
await_lock(i_ctx_t * i_ctx_p)1075 await_lock(i_ctx_t *i_ctx_p)
1076 {
1077 gs_context_t *current = (gs_context_t *)i_ctx_p;
1078 os_ptr op = osp;
1079 int code = lock_acquire(op - 1, current);
1080
1081 if (code == 0) {
1082 pop(2);
1083 return 0;
1084 }
1085 /* We didn't acquire the lock. Re-execute the wait. */
1086 push_op_estack(await_lock);
1087 return code; /* o_reschedule */
1088 }
1089
1090 /* Activate a list of waiting contexts, and reset the list. */
1091 static void
activate_waiting(gs_scheduler_t * psched,ctx_list_t * pcl)1092 activate_waiting(gs_scheduler_t *psched, ctx_list_t * pcl)
1093 {
1094 gs_context_t *pctx = index_context(psched, pcl->head_index);
1095 gs_context_t *next;
1096
1097 for (; pctx != 0; pctx = next) {
1098 next = index_context(psched, pctx->next_index);
1099 add_last(psched, &psched->active, pctx);
1100 }
1101 pcl->head_index = pcl->tail_index = 0;
1102 }
1103
1104 /* ------ Miscellaneous operators ------ */
1105
1106 /* - usertime <int> */
1107 static int
zusertime_context(i_ctx_t * i_ctx_p)1108 zusertime_context(i_ctx_t *i_ctx_p)
1109 {
1110 gs_context_t *current = (gs_context_t *)i_ctx_p;
1111 gs_scheduler_t *psched = current->scheduler;
1112 os_ptr op = osp;
1113 long utime = context_usertime();
1114
1115 push(1);
1116 if (!current->state.keep_usertime) {
1117 /*
1118 * This is the first time this context has executed usertime:
1119 * we must track its execution time from now on.
1120 */
1121 psched->usertime_initial = utime;
1122 current->state.keep_usertime = true;
1123 }
1124 make_int(op, current->state.usertime_total + utime -
1125 psched->usertime_initial);
1126 return 0;
1127 }
1128
1129 /* ------ Internal procedures ------ */
1130
1131 /* Create a context. */
1132 static int
context_create(gs_scheduler_t * psched,gs_context_t ** ppctx,const gs_dual_memory_t * dmem,const gs_context_state_t * i_ctx_p,bool copy_state)1133 context_create(gs_scheduler_t * psched, gs_context_t ** ppctx,
1134 const gs_dual_memory_t * dmem,
1135 const gs_context_state_t *i_ctx_p, bool copy_state)
1136 {
1137 /*
1138 * Contexts are always created at the outermost save level, so they do
1139 * not need to be allocated in stable memory for the sake of
1140 * save/restore. However, context_reclaim needs to be able to test
1141 * whether a given context belongs to a given local VM, and allocating
1142 * contexts in stable local VM avoids the need to scan multiple save
1143 * levels when making this test.
1144 */
1145 gs_memory_t *mem = gs_memory_stable((gs_memory_t *)dmem->space_local);
1146 gs_context_t *pctx;
1147 int code;
1148 long ctx_index;
1149 gs_context_t **pte;
1150
1151 pctx = gs_alloc_struct(mem, gs_context_t, &st_context, "context_create");
1152 if (pctx == 0)
1153 return_error(e_VMerror);
1154 if (copy_state) {
1155 pctx->state = *i_ctx_p;
1156 } else {
1157 gs_context_state_t *pctx_st = &pctx->state;
1158
1159 code = context_state_alloc(&pctx_st, systemdict, dmem);
1160 if (code < 0) {
1161 gs_free_object(mem, pctx, "context_create");
1162 return code;
1163 }
1164 }
1165 ctx_index = gs_next_ids(mem, 1);
1166 pctx->scheduler = psched;
1167 pctx->status = cs_active;
1168 pctx->index = ctx_index;
1169 pctx->detach = false;
1170 pctx->saved_local_vm = false;
1171 pctx->visible = true;
1172 pctx->next_index = 0;
1173 pctx->joiner_index = 0;
1174 pte = &psched->table[ctx_index % CTX_TABLE_SIZE];
1175 pctx->table_next = *pte;
1176 *pte = pctx;
1177 *ppctx = pctx;
1178 if (gs_debug_c('\'') | gs_debug_c('"'))
1179 dlprintf2("[']create %ld at 0x%lx\n", ctx_index, (ulong) pctx);
1180 return 0;
1181 }
1182
1183 /* Check a context ID. Note that we do not check for context validity. */
1184 static int
context_param(const gs_scheduler_t * psched,os_ptr op,gs_context_t ** ppctx)1185 context_param(const gs_scheduler_t * psched, os_ptr op, gs_context_t ** ppctx)
1186 {
1187 gs_context_t *pctx;
1188
1189 check_type(*op, t_integer);
1190 pctx = index_context(psched, op->value.intval);
1191 if (pctx == 0)
1192 return_error(e_invalidcontext);
1193 *ppctx = pctx;
1194 return 0;
1195 }
1196
1197 /* Read the usertime as a single value. */
1198 static long
context_usertime(void)1199 context_usertime(void)
1200 {
1201 long secs_ns[2];
1202
1203 gp_get_usertime(secs_ns);
1204 return secs_ns[0] * 1000 + secs_ns[1] / 1000000;
1205 }
1206
1207 /* Destroy a context. */
1208 static void
context_destroy(gs_context_t * pctx)1209 context_destroy(gs_context_t * pctx)
1210 {
1211 gs_ref_memory_t *mem = pctx->state.memory.space_local;
1212 gs_scheduler_t *psched = pctx->scheduler;
1213 gs_context_t **ppctx = &psched->table[pctx->index % CTX_TABLE_SIZE];
1214
1215 while (*ppctx != pctx)
1216 ppctx = &(*ppctx)->table_next;
1217 *ppctx = (*ppctx)->table_next;
1218 if (gs_debug_c('\'') | gs_debug_c('"'))
1219 dlprintf3("[']destroy %ld at 0x%lx, status = %d\n",
1220 pctx->index, (ulong) pctx, pctx->status);
1221 if (!context_state_free(&pctx->state))
1222 gs_free_object((gs_memory_t *) mem, pctx, "context_destroy");
1223 }
1224
1225 /* Copy the top elements of one stack to another. */
1226 /* Note that this does not push the elements: */
1227 /* the destination stack must have enough space preallocated. */
1228 static void
stack_copy(ref_stack_t * to,const ref_stack_t * from,uint count,uint from_index)1229 stack_copy(ref_stack_t * to, const ref_stack_t * from, uint count,
1230 uint from_index)
1231 {
1232 long i;
1233
1234 for (i = (long)count - 1; i >= 0; --i)
1235 *ref_stack_index(to, i) = *ref_stack_index(from, i + from_index);
1236 }
1237
1238 /* Acquire a lock. Return 0 if acquired, o_reschedule if not. */
1239 static int
lock_acquire(os_ptr op,gs_context_t * pctx)1240 lock_acquire(os_ptr op, gs_context_t * pctx)
1241 {
1242 gs_lock_t *plock = r_ptr(op, gs_lock_t);
1243
1244 if (plock->holder_index == 0) {
1245 plock->holder_index = pctx->index;
1246 plock->scheduler = pctx->scheduler;
1247 return 0;
1248 }
1249 add_last(pctx->scheduler, &plock->waiting, pctx);
1250 return o_reschedule;
1251 }
1252
1253 /* Release a lock. Return 0 if OK, e_invalidcontext if not. */
1254 static int
lock_release(ref * op)1255 lock_release(ref * op)
1256 {
1257 gs_lock_t *plock = r_ptr(op, gs_lock_t);
1258 gs_scheduler_t *psched = plock->scheduler;
1259 gs_context_t *pctx = index_context(psched, plock->holder_index);
1260
1261 if (pctx != 0 && pctx == psched->current) {
1262 plock->holder_index = 0;
1263 activate_waiting(psched, &plock->waiting);
1264 return 0;
1265 }
1266 return_error(e_invalidcontext);
1267 }
1268
1269 /* ------ Initialization procedure ------ */
1270
1271 /* We need to split the table because of the 16-element limit. */
1272 const op_def zcontext1_op_defs[] = {
1273 {"0condition", zcondition},
1274 {"0currentcontext", zcurrentcontext},
1275 {"1detach", zdetach},
1276 {"2.fork", zfork},
1277 {"1join", zjoin},
1278 {"4.localfork", zlocalfork},
1279 {"0lock", zlock},
1280 {"2monitor", zmonitor},
1281 {"1notify", znotify},
1282 {"2wait", zwait},
1283 {"0yield", zyield},
1284 /* Note that the following replace prior definitions */
1285 /* in the indicated files: */
1286 {"0usertime", zusertime_context}, /* zmisc.c */
1287 op_def_end(0)
1288 };
1289 const op_def zcontext2_op_defs[] = {
1290 /* Internal operators */
1291 {"0%fork_done", fork_done},
1292 {"1%finish_join", finish_join},
1293 {"0%monitor_cleanup", monitor_cleanup},
1294 {"0%monitor_release", monitor_release},
1295 {"2%await_lock", await_lock},
1296 op_def_end(zcontext_init)
1297 };
1298