1 /* Copyright (C) 1991, 2000 artofcode LLC.  All rights reserved.
2 
3   This program is free software; you can redistribute it and/or modify it
4   under the terms of the GNU General Public License as published by the
5   Free Software Foundation; either version 2 of the License, or (at your
6   option) any later version.
7 
8   This program is distributed in the hope that it will be useful, but
9   WITHOUT ANY WARRANTY; without even the implied warranty of
10   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11   General Public License for more details.
12 
13   You should have received a copy of the GNU General Public License along
14   with this program; if not, write to the Free Software Foundation, Inc.,
15   59 Temple Place, Suite 330, Boston, MA, 02111-1307.
16 
17 */
18 
19 /*$Id: zcontext.c,v 1.7.2.1.2.1 2003/01/17 00:49:05 giles Exp $ */
20 /* Display PostScript context operators */
21 #include "memory_.h"
22 #include "ghost.h"
23 #include "gp.h"			/* for usertime */
24 #include "oper.h"
25 #include "gsexit.h"
26 #include "gsgc.h"
27 #include "gsstruct.h"
28 #include "gsutil.h"
29 #include "gxalloc.h"
30 #include "gxstate.h"		/* for copying gstate stack */
31 #include "stream.h"		/* for files.h */
32 #include "files.h"
33 #include "idict.h"
34 #include "igstate.h"
35 #include "icontext.h"
36 #include "interp.h"
37 #include "isave.h"
38 #include "istruct.h"
39 #include "dstack.h"
40 #include "estack.h"
41 #include "ostack.h"
42 #include "store.h"
43 
44 /*
45  * Define the rescheduling interval.  A value of max_int effectively
46  * disables scheduling.  The only reason not to make this const is to
47  * allow it to be changed during testing.
48  */
49 private int reschedule_interval = 100;
50 
51 /* Scheduling hooks in interp.c */
52 extern int (*gs_interp_reschedule_proc)(P1(i_ctx_t **));
53 extern int (*gs_interp_time_slice_proc)(P1(i_ctx_t **));
54 extern int gs_interp_time_slice_ticks;
55 
56 /* Context structure */
57 typedef enum {
58     cs_active,
59     cs_done
60 } ctx_status_t;
61 typedef long ctx_index_t;	/* >= 0 */
62 typedef struct gs_context_s gs_context_t;
63 typedef struct gs_scheduler_s gs_scheduler_t;
64 
65 /*
66  * If several contexts share local VM, then if any one of them has done an
67  * unmatched save, the others are not allowed to run.  We handle this by
68  * maintaining the following invariant:
69  *      When control reaches the point in the scheduler that decides
70  *      what context to run next, then for each group of contexts
71  *      sharing local VM, if the save level for that VM is non-zero,
72  *      saved_local_vm is only set in the context that has unmatched
73  *      saves.
74  * We maintain this invariant as follows: when control enters the
75  * scheduler, if a context was running, we set its saved_local_vm flag
76  * to (save_level > 0).  When selecting a context to run, we ignore
77  * contexts where saved_local_vm is false and the local VM save_level > 0.
78  */
79 struct gs_context_s {
80     gs_context_state_t state;	/* (must be first for subclassing) */
81     /* Private state */
82     gs_scheduler_t *scheduler;
83     ctx_status_t status;
84     ctx_index_t index;		/* > 0 */
85     bool detach;		/* true if a detach has been */
86 				/* executed for this context */
87     bool saved_local_vm;	/* (see above) */
88     bool visible;		/* during GC, true if visible; */
89 				/* otherwise, always true */
90     ctx_index_t next_index;	/* next context with same status */
91 				/* (active, waiting on same lock, */
92 				/* waiting on same condition, */
93 				/* waiting to be destroyed) */
94     ctx_index_t joiner_index;	/* context waiting on a join */
95 				/* for this one */
96     gs_context_t *table_next;	/* hash table chain -- this must be a real */
97 				/* pointer, for looking up indices */
98 };
99 inline private bool
context_is_visible(const gs_context_t * pctx)100 context_is_visible(const gs_context_t *pctx)
101 {
102     return (pctx && pctx->visible);
103 }
104 inline private gs_context_t *
visible_context(gs_context_t * pctx)105 visible_context(gs_context_t *pctx)
106 {
107     return (pctx && pctx->visible ? pctx : (gs_context_t *)0);
108 }
109 
110 /* GC descriptor */
111 private
CLEAR_MARKS_PROC(context_clear_marks)112 CLEAR_MARKS_PROC(context_clear_marks)
113 {
114     gs_context_t *const pctx = vptr;
115 
116     (*st_context_state.clear_marks)
117 	(&pctx->state, sizeof(pctx->state), &st_context_state);
118 }
119 private
120 ENUM_PTRS_WITH(context_enum_ptrs, gs_context_t *pctx)
121 ENUM_PREFIX(st_context_state, 2);
122 case 0: return ENUM_OBJ(pctx->scheduler);
123 case 1: {
124     /* Return the next *visible* context. */
125     const gs_context_t *next = pctx->table_next;
126 
127     while (next && !next->visible)
128 	next = next->table_next;
129     return ENUM_OBJ(next);
130 }
131 ENUM_PTRS_END
132 private RELOC_PTRS_WITH(context_reloc_ptrs, gs_context_t *pctx)
133     RELOC_PREFIX(st_context_state);
134     RELOC_VAR(pctx->scheduler);
135     /* Don't relocate table_next -- the scheduler object handles that. */
136 RELOC_PTRS_END
137 gs_private_st_complex_only(st_context, gs_context_t, "gs_context_t",
138 	     context_clear_marks, context_enum_ptrs, context_reloc_ptrs, 0);
139 
140 /*
141  * Context list structure.  Note that this uses context indices, not
142  * pointers, to avoid having to worry about pointers between local VMs.
143  */
144 typedef struct ctx_list_s {
145     ctx_index_t head_index;
146     ctx_index_t tail_index;
147 } ctx_list_t;
148 
149 /* Condition structure */
150 typedef struct gs_condition_s {
151     ctx_list_t waiting;	/* contexts waiting on this condition */
152 } gs_condition_t;
153 gs_private_st_simple(st_condition, gs_condition_t, "conditiontype");
154 
155 /* Lock structure */
156 typedef struct gs_lock_s {
157     ctx_list_t waiting;		/* contexts waiting for this lock, */
158 				/* must be first for subclassing */
159     ctx_index_t holder_index;	/* context holding the lock, if any */
160     gs_scheduler_t *scheduler;
161 } gs_lock_t;
162 gs_private_st_ptrs1(st_lock, gs_lock_t, "locktype",
163 		    lock_enum_ptrs, lock_reloc_ptrs, scheduler);
164 
165 /* Global state */
166 /*typedef struct gs_scheduler_s gs_scheduler_t; *//* (above) */
167 struct gs_scheduler_s {
168     gs_context_t *current;
169     long usertime_initial;	/* usertime when current started running */
170     ctx_list_t active;
171     vm_reclaim_proc((*save_vm_reclaim));
172     ctx_index_t dead_index;
173 #define CTX_TABLE_SIZE 19
174     gs_context_t *table[CTX_TABLE_SIZE];
175 };
176 
177 /* Convert a context index to a context pointer. */
178 private gs_context_t *
index_context(const gs_scheduler_t * psched,long index)179 index_context(const gs_scheduler_t *psched, long index)
180 {
181     gs_context_t *pctx;
182 
183     if (index == 0)
184 	return 0;
185     pctx = psched->table[index % CTX_TABLE_SIZE];
186     while (pctx != 0 && pctx->index != index)
187 	pctx = pctx->table_next;
188     return pctx;
189 }
190 
191 /* Structure definition */
192 gs_private_st_composite(st_scheduler, gs_scheduler_t, "gs_scheduler",
193 			scheduler_enum_ptrs, scheduler_reloc_ptrs);
194 /*
195  * The only cross-local-VM pointers in the context machinery are the
196  * table_next pointers in contexts, and the current and table[] pointers
197  * in the scheduler.  We need to handle all of these specially.
198  */
ENUM_PTRS_WITH(scheduler_enum_ptrs,gs_scheduler_t * psched)199 private ENUM_PTRS_WITH(scheduler_enum_ptrs, gs_scheduler_t *psched)
200 {
201     index -= 1;
202     if (index < CTX_TABLE_SIZE) {
203 	gs_context_t *pctx = psched->table[index];
204 
205 	while (pctx && !pctx->visible)
206 	    pctx = pctx->table_next;
207 	return ENUM_OBJ(pctx);
208     }
209     return 0;
210 }
211 case 0: return ENUM_OBJ(visible_context(psched->current));
212 ENUM_PTRS_END
RELOC_PTRS_WITH(scheduler_reloc_ptrs,gs_scheduler_t * psched)213 private RELOC_PTRS_WITH(scheduler_reloc_ptrs, gs_scheduler_t *psched)
214 {
215     if (psched->current->visible)
216 	RELOC_VAR(psched->current);
217     {
218 	int i;
219 
220 	for (i = 0; i < CTX_TABLE_SIZE; ++i) {
221 	    gs_context_t **ppctx = &psched->table[i];
222 	    gs_context_t **pnext;
223 
224 	    for (; *ppctx; ppctx = pnext) {
225 		pnext = &(*ppctx)->table_next;
226 		if ((*ppctx)->visible)
227 		    RELOC_VAR(*ppctx);
228 	    }
229 	}
230     }
231 }
232 RELOC_PTRS_END
233 
234 /*
235  * The context scheduler requires special handling during garbage
236  * collection, since it is the only structure that can legitimately
237  * reference objects in multiple local VMs.  To deal with this, we wrap the
238  * interpreter's garbage collector with code that prevents it from seeing
239  * contexts in other than the current local VM.  ****** WORKS FOR LOCAL GC,
240  * NOT FOR GLOBAL ******
241  */
242 private void
context_reclaim(vm_spaces * pspaces,bool global)243 context_reclaim(vm_spaces * pspaces, bool global)
244 {
245     /*
246      * Search through the registered roots to find the current context.
247      * (This is a hack so we can find the scheduler.)
248      */
249     int i;
250     gs_context_t *pctx = 0;	/* = 0 is bogus to pacify compilers */
251     gs_scheduler_t *psched = 0;
252     gs_ref_memory_t *lmem = 0;	/* = 0 is bogus to pacify compilers */
253     chunk_locator_t loc;
254 
255     for (i = countof(pspaces->memories.indexed) - 1; psched == 0 && i > 0; --i) {
256 	gs_ref_memory_t *mem = pspaces->memories.indexed[i];
257 	const gs_gc_root_t *root = mem->roots;
258 
259 	for (; root; root = root->next) {
260 	    if (gs_object_type((gs_memory_t *)mem, *root->p) == &st_context) {
261 		pctx = *root->p;
262 		psched = pctx->scheduler;
263 		lmem = mem;
264 		break;
265 	    }
266 	}
267     }
268 
269     /* Hide all contexts in other (local) VMs. */
270     /*
271      * See context_create below for why we look for the context
272      * in stable memory.
273      */
274     loc.memory = (gs_ref_memory_t *)gs_memory_stable((gs_memory_t *)lmem);
275     loc.cp = 0;
276     for (i = 0; i < CTX_TABLE_SIZE; ++i)
277 	for (pctx = psched->table[i]; pctx; pctx = pctx->table_next)
278 	    pctx->visible = chunk_locate_ptr(pctx, &loc);
279 
280 #ifdef DEBUG
281     if (!psched->current->visible) {
282 	lprintf("Current context is invisible!\n");
283 	gs_abort();
284     }
285 #endif
286 
287     /* Do the actual garbage collection. */
288     psched->save_vm_reclaim(pspaces, global);
289 
290     /* Make all contexts visible again. */
291     for (i = 0; i < CTX_TABLE_SIZE; ++i)
292 	for (pctx = psched->table[i]; pctx; pctx = pctx->table_next)
293 	    pctx->visible = true;
294 }
295 
296 
297 /* Forward references */
298 private int context_create(P5(gs_scheduler_t *, gs_context_t **,
299 			      const gs_dual_memory_t *,
300 			      const gs_context_state_t *, bool));
301 private long context_usertime(P0());
302 private int context_param(P3(const gs_scheduler_t *, os_ptr, gs_context_t **));
303 private void context_destroy(P1(gs_context_t *));
304 private void stack_copy(P4(ref_stack_t *, const ref_stack_t *, uint, uint));
305 private int lock_acquire(P2(os_ptr, gs_context_t *));
306 private int lock_release(P1(ref *));
307 
308 /* Internal procedures */
309 private void
context_load(gs_scheduler_t * psched,gs_context_t * pctx)310 context_load(gs_scheduler_t *psched, gs_context_t *pctx)
311 {
312     if_debug1('"', "[\"]loading %ld\n", pctx->index);
313     if ( pctx->state.keep_usertime )
314       psched->usertime_initial = context_usertime();
315     context_state_load(&pctx->state);
316 }
317 private void
context_store(gs_scheduler_t * psched,gs_context_t * pctx)318 context_store(gs_scheduler_t *psched, gs_context_t *pctx)
319 {
320     if_debug1('"', "[\"]storing %ld\n", pctx->index);
321     context_state_store(&pctx->state);
322     if ( pctx->state.keep_usertime )
323       pctx->state.usertime_total +=
324         context_usertime() - psched->usertime_initial;
325 }
326 
327 /* List manipulation */
328 private void
add_last(const gs_scheduler_t * psched,ctx_list_t * pl,gs_context_t * pc)329 add_last(const gs_scheduler_t *psched, ctx_list_t *pl, gs_context_t *pc)
330 {
331     pc->next_index = 0;
332     if (pl->head_index == 0)
333 	pl->head_index = pc->index;
334     else
335 	index_context(psched, pl->tail_index)->next_index = pc->index;
336     pl->tail_index = pc->index;
337 }
338 
339 /* ------ Initialization ------ */
340 
341 private int ctx_initialize(P1(i_ctx_t **));
342 private int ctx_reschedule(P1(i_ctx_t **));
343 private int ctx_time_slice(P1(i_ctx_t **));
344 private int
zcontext_init(i_ctx_t * i_ctx_p)345 zcontext_init(i_ctx_t *i_ctx_p)
346 {
347     /* Complete initialization after the interpreter is entered. */
348     gs_interp_reschedule_proc = ctx_initialize;
349     gs_interp_time_slice_proc = ctx_initialize;
350     gs_interp_time_slice_ticks = 0;
351     return 0;
352 }
353 /*
354  * The interpreter calls this procedure at the first reschedule point.
355  * It completes context initialization.
356  */
357 private int
ctx_initialize(i_ctx_t ** pi_ctx_p)358 ctx_initialize(i_ctx_t **pi_ctx_p)
359 {
360     i_ctx_t *i_ctx_p = *pi_ctx_p; /* for gs_imemory */
361     gs_ref_memory_t *imem = iimemory_system;
362     gs_scheduler_t *psched =
363 	gs_alloc_struct_immovable((gs_memory_t *) imem, gs_scheduler_t,
364 				  &st_scheduler, "gs_scheduler");
365 
366     psched->current = 0;
367     psched->active.head_index = psched->active.tail_index = 0;
368     psched->save_vm_reclaim = i_ctx_p->memory.spaces.vm_reclaim;
369     i_ctx_p->memory.spaces.vm_reclaim = context_reclaim;
370     psched->dead_index = 0;
371     memset(psched->table, 0, sizeof(psched->table));
372     /* Create an initial context. */
373     if (context_create(psched, &psched->current, &gs_imemory, *pi_ctx_p, true) < 0) {
374 	lprintf("Can't create initial context!");
375 	gs_abort();
376     }
377     psched->current->scheduler = psched;
378     /* Hook into the interpreter. */
379     *pi_ctx_p = &psched->current->state;
380     gs_interp_reschedule_proc = ctx_reschedule;
381     gs_interp_time_slice_proc = ctx_time_slice;
382     gs_interp_time_slice_ticks = reschedule_interval;
383     return 0;
384 }
385 
386 /* ------ Interpreter interface to scheduler ------ */
387 
388 /* When an operator decides it is time to run a new context, */
389 /* it returns o_reschedule.  The interpreter saves all its state in */
390 /* memory, calls ctx_reschedule, and then loads the state from memory. */
391 private int
ctx_reschedule(i_ctx_t ** pi_ctx_p)392 ctx_reschedule(i_ctx_t **pi_ctx_p)
393 {
394     gs_context_t *current = (gs_context_t *)*pi_ctx_p;
395     gs_scheduler_t *psched = current->scheduler;
396 
397 #ifdef DEBUG
398     if (*pi_ctx_p != &current->state) {
399 	lprintf2("current->state = 0x%lx, != i_ctx_p = 0x%lx!\n",
400 		 (ulong)&current->state, (ulong)*pi_ctx_p);
401     }
402 #endif
403     /* If there are any dead contexts waiting to be released, */
404     /* take care of that now. */
405     while (psched->dead_index != 0) {
406 	gs_context_t *dead = index_context(psched, psched->dead_index);
407 	long next_index = dead->next_index;
408 
409 	if (current == dead) {
410 	    if_debug1('"', "[\"]storing dead %ld\n", current->index);
411 	    context_state_store(&current->state);
412 	    current = 0;
413 	}
414 	context_destroy(dead);
415 	psched->dead_index = next_index;
416     }
417     /* Update saved_local_vm.  See above for the invariant. */
418     if (current != 0)
419 	current->saved_local_vm =
420 	    current->state.memory.space_local->saved != 0;
421     /* Run the first ready context, taking the 'save' lock into account. */
422     {
423 	gs_context_t *prev = 0;
424 	gs_context_t *ready;
425 
426 	for (ready = index_context(psched, psched->active.head_index);;
427 	     prev = ready, ready = index_context(psched, ready->next_index)
428 	    ) {
429 	    if (ready == 0) {
430 		if (current != 0)
431 		    context_store(psched, current);
432 		lprintf("No context to run!");
433 		return_error(e_Fatal);
434 	    }
435 	    /* See above for an explanation of the following test. */
436 	    if (ready->state.memory.space_local->saved != 0 &&
437 		!ready->saved_local_vm
438 		)
439 		continue;
440 	    /* Found a context to run. */
441 	    {
442 		ctx_index_t next_index = ready->next_index;
443 
444 		if (prev)
445 		    prev->next_index = next_index;
446 		else
447 		    psched->active.head_index = next_index;
448 		if (!next_index)
449 		    psched->active.tail_index = (prev ? prev->index : 0);
450 	    }
451 	    break;
452 	}
453 	if (ready == current)
454 	    return 0;		/* no switch */
455 	/*
456 	 * Save the state of the current context in psched->current,
457 	 * if any context is current.
458 	 */
459 	if (current != 0)
460 	    context_store(psched, current);
461 	psched->current = ready;
462 	/* Load the state of the new current context. */
463 	context_load(psched, ready);
464 	/* Switch the interpreter's context state pointer. */
465 	*pi_ctx_p = &ready->state;
466     }
467     return 0;
468 }
469 
470 /* If the interpreter wants to time-slice, it saves its state, */
471 /* calls ctx_time_slice, and reloads its state. */
472 private int
ctx_time_slice(i_ctx_t ** pi_ctx_p)473 ctx_time_slice(i_ctx_t **pi_ctx_p)
474 {
475     gs_scheduler_t *psched = ((gs_context_t *)*pi_ctx_p)->scheduler;
476 
477     if (psched->active.head_index == 0)
478 	return 0;
479     if_debug0('"', "[\"]time-slice\n");
480     add_last(psched, &psched->active, psched->current);
481     return ctx_reschedule(pi_ctx_p);
482 }
483 
484 /* ------ Context operators ------ */
485 
486 /* - currentcontext <context> */
487 private int
zcurrentcontext(i_ctx_t * i_ctx_p)488 zcurrentcontext(i_ctx_t *i_ctx_p)
489 {
490     os_ptr op = osp;
491     const gs_context_t *current = (const gs_context_t *)i_ctx_p;
492 
493     push(1);
494     make_int(op, current->index);
495     return 0;
496 }
497 
498 /* <context> detach - */
499 private int
zdetach(i_ctx_t * i_ctx_p)500 zdetach(i_ctx_t *i_ctx_p)
501 {
502     os_ptr op = osp;
503     const gs_scheduler_t *psched = ((gs_context_t *)i_ctx_p)->scheduler;
504     gs_context_t *pctx;
505     int code;
506 
507     if ((code = context_param(psched, op, &pctx)) < 0)
508 	return code;
509     if_debug2('\'', "[']detach %ld, status = %d\n",
510 	      pctx->index, pctx->status);
511     if (pctx->joiner_index != 0 || pctx->detach)
512 	return_error(e_invalidcontext);
513     switch (pctx->status) {
514 	case cs_active:
515 	    pctx->detach = true;
516 	    break;
517 	case cs_done:
518 	    context_destroy(pctx);
519     }
520     pop(1);
521     return 0;
522 }
523 
524 private int
525     do_fork(P6(i_ctx_t *i_ctx_p, os_ptr op, const ref * pstdin,
526 	       const ref * pstdout, uint mcount, bool local)),
527     values_older_than(P4(const ref_stack_t * pstack, uint first, uint last,
528 			 int max_space));
529 private int
530     fork_done(P1(i_ctx_t *)),
531     fork_done_with_error(P1(i_ctx_t *)),
532     finish_join(P1(i_ctx_t *)),
533     reschedule_now(P1(i_ctx_t *));
534 
535 /* <mark> <obj1> ... <objN> <proc> .fork <context> */
536 /* <mark> <obj1> ... <objN> <proc> <stdin|null> <stdout|null> */
537 /*   .localfork <context> */
538 private int
zfork(i_ctx_t * i_ctx_p)539 zfork(i_ctx_t *i_ctx_p)
540 {
541     os_ptr op = osp;
542     uint mcount = ref_stack_counttomark(&o_stack);
543     ref rnull;
544 
545     if (mcount == 0)
546 	return_error(e_unmatchedmark);
547     make_null(&rnull);
548     return do_fork(i_ctx_p, op, &rnull, &rnull, mcount, false);
549 }
550 private int
zlocalfork(i_ctx_t * i_ctx_p)551 zlocalfork(i_ctx_t *i_ctx_p)
552 {
553     os_ptr op = osp;
554     uint mcount = ref_stack_counttomark(&o_stack);
555     int code;
556 
557     if (mcount == 0)
558 	return_error(e_unmatchedmark);
559     code = values_older_than(&o_stack, 1, mcount - 1, avm_local);
560     if (code < 0)
561 	return code;
562     code = do_fork(i_ctx_p, op - 2, op - 1, op, mcount - 2, true);
563     if (code < 0)
564 	return code;
565     op = osp;
566     op[-2] = *op;
567     pop(2);
568     return code;
569 }
570 
571 /* Internal procedure to actually do the fork operation. */
572 private int
do_fork(i_ctx_t * i_ctx_p,os_ptr op,const ref * pstdin,const ref * pstdout,uint mcount,bool local)573 do_fork(i_ctx_t *i_ctx_p, os_ptr op, const ref * pstdin, const ref * pstdout,
574 	uint mcount, bool local)
575 {
576     gs_context_t *pcur = (gs_context_t *)i_ctx_p;
577     gs_scheduler_t *psched = pcur->scheduler;
578     stream *s;
579     gs_dual_memory_t dmem;
580     gs_context_t *pctx;
581     ref old_userdict, new_userdict;
582     int code;
583 
584     check_proc(*op);
585     if (iimemory_local->save_level)
586 	return_error(e_invalidcontext);
587     if (r_has_type(pstdout, t_null)) {
588 	code = zget_stdout(i_ctx_p, &s);
589 	if (code < 0)
590 	    return code;
591 	pstdout = &ref_stdio[1];
592     } else
593 	check_read_file(s, pstdout);
594     if (r_has_type(pstdin, t_null)) {
595 	code = zget_stdin(i_ctx_p, &s);
596 	if (code < 0)
597 	    return code;
598 	pstdin = &ref_stdio[0];
599     } else
600 	check_read_file(s, pstdin);
601     dmem = gs_imemory;
602     if (local) {
603 	/* Share global VM, private local VM. */
604 	ref *puserdict;
605 	uint userdict_size;
606 	gs_raw_memory_t *parent = iimemory_local->parent;
607 	gs_ref_memory_t *lmem;
608 	gs_ref_memory_t *lmem_stable;
609 
610 	if (dict_find_string(systemdict, "userdict", &puserdict) <= 0 ||
611 	    !r_has_type(puserdict, t_dictionary)
612 	    )
613 	    return_error(e_Fatal);
614 	old_userdict = *puserdict;
615 	userdict_size = dict_maxlength(&old_userdict);
616 	lmem = ialloc_alloc_state(parent, iimemory_local->chunk_size);
617 	lmem_stable = ialloc_alloc_state(parent, iimemory_local->chunk_size);
618 	if (lmem == 0 || lmem_stable == 0) {
619 	    gs_free_object(parent, lmem_stable, "do_fork");
620 	    gs_free_object(parent, lmem, "do_fork");
621 	    return_error(e_VMerror);
622 	}
623 	lmem->space = avm_local;
624 	lmem_stable->space = avm_local;
625 	lmem->stable_memory = (gs_memory_t *)lmem_stable;
626 	dmem.space_local = lmem;
627 	code = context_create(psched, &pctx, &dmem, &pcur->state, false);
628 	if (code < 0) {
629 	    /****** FREE lmem ******/
630 	    return code;
631 	}
632 	/*
633 	 * Create a new userdict.  PostScript code will take care of
634 	 * the rest of the initialization of the new context.
635 	 */
636 	code = dict_alloc(lmem, userdict_size, &new_userdict);
637 	if (code < 0) {
638 	    context_destroy(pctx);
639 	    /****** FREE lmem ******/
640 	    return code;
641 	}
642     } else {
643 	/* Share global and local VM. */
644 	code = context_create(psched, &pctx, &dmem, &pcur->state, false);
645 	if (code < 0) {
646 	    /****** FREE lmem ******/
647 	    return code;
648 	}
649 	/*
650 	 * Copy the gstate stack.  The current method is not elegant;
651 	 * in fact, I'm not entirely sure it works.
652 	 */
653 	{
654 	    int n;
655 	    const gs_state *old;
656 	    gs_state *new;
657 
658 	    for (n = 0, old = igs; old != 0; old = gs_state_saved(old))
659 		++n;
660 	    for (old = pctx->state.pgs; old != 0; old = gs_state_saved(old))
661 		--n;
662 	    for (; n > 0 && code >= 0; --n)
663 		code = gs_gsave(pctx->state.pgs);
664 	    if (code < 0) {
665 /****** FREE lmem & GSTATES ******/
666 		return code;
667 	    }
668 	    for (old = igs, new = pctx->state.pgs;
669 		 old != 0 /* (== new != 0) */  && code >= 0;
670 		 old = gs_state_saved(old), new = gs_state_saved(new)
671 		)
672 		code = gs_setgstate(new, old);
673 	    if (code < 0) {
674 /****** FREE lmem & GSTATES ******/
675 		return code;
676 	    }
677 	}
678     }
679     pctx->state.language_level = i_ctx_p->language_level;
680     pctx->state.dict_stack.min_size = idict_stack.min_size;
681     pctx->state.dict_stack.userdict_index = idict_stack.userdict_index;
682     pctx->state.stdio[0] = *pstdin;
683     pctx->state.stdio[1] = *pstdout;
684     pctx->state.stdio[2] = pcur->state.stdio[2];
685     /* Initialize the interpreter stacks. */
686     {
687 	ref_stack_t *dstack = (ref_stack_t *)&pctx->state.dict_stack;
688 	uint count = ref_stack_count(&d_stack);
689 	uint copy = (local ? min_dstack_size : count);
690 
691 	ref_stack_push(dstack, copy);
692 	stack_copy(dstack, &d_stack, copy, count - copy);
693 	if (local) {
694 	    /* Substitute the new userdict for the old one. */
695 	    long i;
696 
697 	    for (i = 0; i < copy; ++i) {
698 		ref *pdref = ref_stack_index(dstack, i);
699 
700 		if (obj_eq(pdref, &old_userdict))
701 		    *pdref = new_userdict;
702 	    }
703 	}
704     }
705     {
706 	ref_stack_t *estack = (ref_stack_t *)&pctx->state.exec_stack;
707 
708 	ref_stack_push(estack, 3);
709 	/* fork_done must be executed in both normal and error cases. */
710 	make_mark_estack(estack->p - 2, es_other, fork_done_with_error);
711 	make_oper(estack->p - 1, 0, fork_done);
712 	*estack->p = *op;
713     }
714     {
715 	ref_stack_t *ostack = (ref_stack_t *)&pctx->state.op_stack;
716 	uint count = mcount - 2;
717 
718 	ref_stack_push(ostack, count);
719 	stack_copy(ostack, &o_stack, count, osp - op + 1);
720     }
721     pctx->state.binary_object_format = pcur->state.binary_object_format;
722     add_last(psched, &psched->active, pctx);
723     pop(mcount - 1);
724     op = osp;
725     make_int(op, pctx->index);
726     return 0;
727 }
728 
729 /*
730  * Check that all values being passed by fork or join are old enough
731  * to be valid in the environment to which they are being transferred.
732  */
733 private int
values_older_than(const ref_stack_t * pstack,uint first,uint last,int next_space)734 values_older_than(const ref_stack_t * pstack, uint first, uint last,
735 		  int next_space)
736 {
737     uint i;
738 
739     for (i = first; i <= last; ++i)
740 	if (r_space(ref_stack_index(pstack, (long)i)) >= next_space)
741 	    return_error(e_invalidaccess);
742     return 0;
743 }
744 
745 /* This gets executed when a context terminates normally. */
746 /****** MUST DO ALL RESTORES ******/
747 /****** WHAT IF invalidrestore? ******/
748 private int
fork_done(i_ctx_t * i_ctx_p)749 fork_done(i_ctx_t *i_ctx_p)
750 {
751     os_ptr op = osp;
752     gs_context_t *pcur = (gs_context_t *)i_ctx_p;
753     gs_scheduler_t *psched = pcur->scheduler;
754 
755     if_debug2('\'', "[']done %ld%s\n", pcur->index,
756 	      (pcur->detach ? ", detached" : ""));
757     /*
758      * Clear the context's dictionary, execution and graphics stacks
759      * now, to retain as little as possible in case of a garbage
760      * collection or restore.  We know that fork_done is the
761      * next-to-bottom entry on the execution stack.
762      */
763     ref_stack_pop_to(&d_stack, min_dstack_size);
764     pop_estack(&pcur->state, ref_stack_count(&e_stack) - 1);
765     gs_grestoreall(igs);
766     /*
767      * If there are any unmatched saves, we need to execute restores
768      * until there aren't.  An invalidrestore is possible and will
769      * result in an error termination.
770      */
771     if (iimemory_local->save_level) {
772 	ref *prestore;
773 
774 	if (dict_find_string(systemdict, "restore", &prestore) <= 0) {
775 	    lprintf("restore not found in systemdict!");
776 	    return_error(e_Fatal);
777 	}
778 	if (pcur->detach) {
779 	    ref_stack_clear(&o_stack);	/* help avoid invalidrestore */
780 	    op = osp;
781 	}
782 	push(1);
783 	make_tv(op, t_save, saveid, alloc_save_current_id(&gs_imemory));
784 	push_op_estack(fork_done);
785 	++esp;
786 	ref_assign(esp, prestore);
787 	return o_push_estack;
788     }
789     if (pcur->detach) {
790 	/*
791 	 * We would like to free the context's memory, but we can't do
792 	 * it yet, because the interpreter still has references to it.
793 	 * Instead, queue the context to be freed the next time we
794 	 * reschedule.  We can, however, clear its operand stack now.
795 	 */
796 	ref_stack_clear(&o_stack);
797 	context_store(psched, pcur);
798 	pcur->next_index = psched->dead_index;
799 	psched->dead_index = pcur->index;
800 	psched->current = 0;
801     } else {
802 	gs_context_t *pctx = index_context(psched, pcur->joiner_index);
803 
804 	pcur->status = cs_done;
805 	/* Schedule the context waiting to join this one, if any. */
806 	if (pctx != 0)
807 	    add_last(psched, &psched->active, pctx);
808     }
809     return o_reschedule;
810 }
811 /*
812  * This gets executed when the stack is being unwound for an error
813  * termination.
814  */
815 private int
fork_done_with_error(i_ctx_t * i_ctx_p)816 fork_done_with_error(i_ctx_t *i_ctx_p)
817 {
818 /****** WHAT TO DO? ******/
819     return fork_done(i_ctx_p);
820 }
821 
822 /* <context> join <mark> <obj1> ... <objN> */
823 private int
zjoin(i_ctx_t * i_ctx_p)824 zjoin(i_ctx_t *i_ctx_p)
825 {
826     os_ptr op = osp;
827     gs_context_t *current = (gs_context_t *)i_ctx_p;
828     gs_scheduler_t *psched = current->scheduler;
829     gs_context_t *pctx;
830     int code;
831 
832     if ((code = context_param(psched, op, &pctx)) < 0)
833 	return code;
834     if_debug2('\'', "[']join %ld, status = %d\n",
835 	      pctx->index, pctx->status);
836     /*
837      * It doesn't seem logically necessary, but the Red Book says that
838      * the context being joined must share both global and local VM with
839      * the current context.
840      */
841     if (pctx->joiner_index != 0 || pctx->detach || pctx == current ||
842 	pctx->state.memory.space_global !=
843 	  current->state.memory.space_global ||
844 	pctx->state.memory.space_local !=
845 	  current->state.memory.space_local ||
846 	iimemory_local->save_level != 0
847 	)
848 	return_error(e_invalidcontext);
849     switch (pctx->status) {
850 	case cs_active:
851 	    /*
852 	     * We need to re-execute the join after the joined
853 	     * context is done.  Since we can't return both
854 	     * o_push_estack and o_reschedule, we push a call on
855 	     * reschedule_now, which accomplishes the latter.
856 	     */
857 	    check_estack(2);
858 	    push_op_estack(finish_join);
859 	    push_op_estack(reschedule_now);
860 	    pctx->joiner_index = current->index;
861 	    return o_push_estack;
862 	case cs_done:
863 	    {
864 		const ref_stack_t *ostack =
865 		    (ref_stack_t *)&pctx->state.op_stack;
866 		uint count = ref_stack_count(ostack);
867 
868 		push(count);
869 		{
870 		    ref *rp = ref_stack_index(&o_stack, count);
871 
872 		    make_mark(rp);
873 		}
874 		stack_copy(&o_stack, ostack, count, 0);
875 		context_destroy(pctx);
876 	    }
877     }
878     return 0;
879 }
880 
881 /* Finish a deferred join. */
882 private int
finish_join(i_ctx_t * i_ctx_p)883 finish_join(i_ctx_t *i_ctx_p)
884 {
885     os_ptr op = osp;
886     gs_context_t *current = (gs_context_t *)i_ctx_p;
887     gs_scheduler_t *psched = current->scheduler;
888     gs_context_t *pctx;
889     int code;
890 
891     if ((code = context_param(psched, op, &pctx)) < 0)
892 	return code;
893     if_debug2('\'', "[']finish_join %ld, status = %d\n",
894 	      pctx->index, pctx->status);
895     if (pctx->joiner_index != current->index)
896 	return_error(e_invalidcontext);
897     pctx->joiner_index = 0;
898     return zjoin(i_ctx_p);
899 }
900 
901 /* Reschedule now. */
902 private int
reschedule_now(i_ctx_t * i_ctx_p)903 reschedule_now(i_ctx_t *i_ctx_p)
904 {
905     return o_reschedule;
906 }
907 
908 /* - yield - */
909 private int
zyield(i_ctx_t * i_ctx_p)910 zyield(i_ctx_t *i_ctx_p)
911 {
912     gs_context_t *current = (gs_context_t *)i_ctx_p;
913     gs_scheduler_t *psched = current->scheduler;
914 
915     if (psched->active.head_index == 0)
916 	return 0;
917     if_debug0('"', "[\"]yield\n");
918     add_last(psched, &psched->active, current);
919     return o_reschedule;
920 }
921 
922 /* ------ Condition and lock operators ------ */
923 
924 private int
925     monitor_cleanup(P1(i_ctx_t *)),
926     monitor_release(P1(i_ctx_t *)),
927     await_lock(P1(i_ctx_t *));
928 private void
929      activate_waiting(P2(gs_scheduler_t *, ctx_list_t * pcl));
930 
931 /* - condition <condition> */
932 private int
zcondition(i_ctx_t * i_ctx_p)933 zcondition(i_ctx_t *i_ctx_p)
934 {
935     os_ptr op = osp;
936     gs_condition_t *pcond =
937 	ialloc_struct(gs_condition_t, &st_condition, "zcondition");
938 
939     if (pcond == 0)
940 	return_error(e_VMerror);
941     pcond->waiting.head_index = pcond->waiting.tail_index = 0;
942     push(1);
943     make_istruct(op, a_all, pcond);
944     return 0;
945 }
946 
947 /* - lock <lock> */
948 private int
zlock(i_ctx_t * i_ctx_p)949 zlock(i_ctx_t *i_ctx_p)
950 {
951     os_ptr op = osp;
952     gs_lock_t *plock = ialloc_struct(gs_lock_t, &st_lock, "zlock");
953 
954     if (plock == 0)
955 	return_error(e_VMerror);
956     plock->holder_index = 0;
957     plock->waiting.head_index = plock->waiting.tail_index = 0;
958     push(1);
959     make_istruct(op, a_all, plock);
960     return 0;
961 }
962 
963 /* <lock> <proc> monitor - */
964 private int
zmonitor(i_ctx_t * i_ctx_p)965 zmonitor(i_ctx_t *i_ctx_p)
966 {
967     gs_context_t *current = (gs_context_t *)i_ctx_p;
968     os_ptr op = osp;
969     gs_lock_t *plock;
970     gs_context_t *pctx;
971     int code;
972 
973     check_stype(op[-1], st_lock);
974     check_proc(*op);
975     plock = r_ptr(op - 1, gs_lock_t);
976     pctx = index_context(current->scheduler, plock->holder_index);
977     if_debug1('\'', "[']monitor 0x%lx\n", (ulong) plock);
978     if (pctx != 0) {
979 	if (pctx == current ||
980 	    (iimemory_local->save_level != 0 &&
981 	     pctx->state.memory.space_local ==
982 	     current->state.memory.space_local)
983 	    )
984 	    return_error(e_invalidcontext);
985     }
986     /*
987      * We push on the e-stack:
988      *      The lock object
989      *      An e-stack mark with monitor_cleanup, to release the lock
990      *        in case of an error
991      *      monitor_release, to release the lock in the normal case
992      *      The procedure to execute
993      */
994     check_estack(4);
995     code = lock_acquire(op - 1, current);
996     if (code != 0) {		/* We didn't acquire the lock.  Re-execute this later. */
997 	push_op_estack(zmonitor);
998 	return code;		/* o_reschedule */
999     }
1000     *++esp = op[-1];
1001     push_mark_estack(es_other, monitor_cleanup);
1002     push_op_estack(monitor_release);
1003     *++esp = *op;
1004     pop(2);
1005     return o_push_estack;
1006 }
1007 /* Release the monitor lock when unwinding for an error or exit. */
1008 private int
monitor_cleanup(i_ctx_t * i_ctx_p)1009 monitor_cleanup(i_ctx_t *i_ctx_p)
1010 {
1011     int code = lock_release(esp);
1012 
1013     if (code < 0)
1014 	return code;
1015     --esp;
1016     return o_pop_estack;
1017 }
1018 /* Release the monitor lock when the procedure completes. */
1019 private int
monitor_release(i_ctx_t * i_ctx_p)1020 monitor_release(i_ctx_t *i_ctx_p)
1021 {
1022     int code = lock_release(esp - 1);
1023 
1024     if (code < 0)
1025 	return code;
1026     esp -= 2;
1027     return o_pop_estack;
1028 }
1029 
1030 /* <condition> notify - */
1031 private int
znotify(i_ctx_t * i_ctx_p)1032 znotify(i_ctx_t *i_ctx_p)
1033 {
1034     os_ptr op = osp;
1035     gs_context_t *current = (gs_context_t *)i_ctx_p;
1036     gs_condition_t *pcond;
1037 
1038     check_stype(*op, st_condition);
1039     pcond = r_ptr(op, gs_condition_t);
1040     if_debug1('"', "[\"]notify 0x%lx\n", (ulong) pcond);
1041     pop(1);
1042     op--;
1043     if (pcond->waiting.head_index == 0)	/* nothing to do */
1044 	return 0;
1045     activate_waiting(current->scheduler, &pcond->waiting);
1046     return zyield(i_ctx_p);
1047 }
1048 
1049 /* <lock> <condition> wait - */
1050 private int
zwait(i_ctx_t * i_ctx_p)1051 zwait(i_ctx_t *i_ctx_p)
1052 {
1053     os_ptr op = osp;
1054     gs_context_t *current = (gs_context_t *)i_ctx_p;
1055     gs_scheduler_t *psched = current->scheduler;
1056     gs_lock_t *plock;
1057     gs_context_t *pctx;
1058     gs_condition_t *pcond;
1059 
1060     check_stype(op[-1], st_lock);
1061     plock = r_ptr(op - 1, gs_lock_t);
1062     check_stype(*op, st_condition);
1063     pcond = r_ptr(op, gs_condition_t);
1064     if_debug2('"', "[\"]wait lock 0x%lx, condition 0x%lx\n",
1065 	      (ulong) plock, (ulong) pcond);
1066     pctx = index_context(psched, plock->holder_index);
1067     if (pctx == 0 || pctx != psched->current ||
1068 	(iimemory_local->save_level != 0 &&
1069 	 (r_space(op - 1) == avm_local || r_space(op) == avm_local))
1070 	)
1071 	return_error(e_invalidcontext);
1072     check_estack(1);
1073     lock_release(op - 1);
1074     add_last(psched, &pcond->waiting, pctx);
1075     push_op_estack(await_lock);
1076     return o_reschedule;
1077 }
1078 /* When the condition is signaled, wait for acquiring the lock. */
1079 private int
await_lock(i_ctx_t * i_ctx_p)1080 await_lock(i_ctx_t *i_ctx_p)
1081 {
1082     gs_context_t *current = (gs_context_t *)i_ctx_p;
1083     os_ptr op = osp;
1084     int code = lock_acquire(op - 1, current);
1085 
1086     if (code == 0) {
1087 	pop(2);
1088 	return 0;
1089     }
1090     /* We didn't acquire the lock.  Re-execute the wait. */
1091     push_op_estack(await_lock);
1092     return code;		/* o_reschedule */
1093 }
1094 
1095 /* Activate a list of waiting contexts, and reset the list. */
1096 private void
activate_waiting(gs_scheduler_t * psched,ctx_list_t * pcl)1097 activate_waiting(gs_scheduler_t *psched, ctx_list_t * pcl)
1098 {
1099     gs_context_t *pctx = index_context(psched, pcl->head_index);
1100     gs_context_t *next;
1101 
1102     for (; pctx != 0; pctx = next) {
1103 	next = index_context(psched, pctx->next_index);
1104 	add_last(psched, &psched->active, pctx);
1105     }
1106     pcl->head_index = pcl->tail_index = 0;
1107 }
1108 
1109 /* ------ Miscellaneous operators ------ */
1110 
1111 /* - usertime <int> */
1112 private int
zusertime_context(i_ctx_t * i_ctx_p)1113 zusertime_context(i_ctx_t *i_ctx_p)
1114 {
1115     gs_context_t *current = (gs_context_t *)i_ctx_p;
1116     gs_scheduler_t *psched = current->scheduler;
1117     os_ptr op = osp;
1118     long utime = context_usertime();
1119 
1120     push(1);
1121     if (!current->state.keep_usertime) {
1122 	/*
1123 	 * This is the first time this context has executed usertime:
1124 	 * we must track its execution time from now on.
1125 	 */
1126 	psched->usertime_initial = utime;
1127 	current->state.keep_usertime = true;
1128     }
1129     make_int(op, current->state.usertime_total + utime -
1130 	     psched->usertime_initial);
1131     return 0;
1132 }
1133 
1134 /* ------ Internal procedures ------ */
1135 
1136 /* Create a context. */
1137 private int
context_create(gs_scheduler_t * psched,gs_context_t ** ppctx,const gs_dual_memory_t * dmem,const gs_context_state_t * i_ctx_p,bool copy_state)1138 context_create(gs_scheduler_t * psched, gs_context_t ** ppctx,
1139 	       const gs_dual_memory_t * dmem,
1140 	       const gs_context_state_t *i_ctx_p, bool copy_state)
1141 {
1142     /*
1143      * Contexts are always created at the outermost save level, so they do
1144      * not need to be allocated in stable memory for the sake of
1145      * save/restore.  However, context_reclaim needs to be able to test
1146      * whether a given context belongs to a given local VM, and allocating
1147      * contexts in stable local VM avoids the need to scan multiple save
1148      * levels when making this test.
1149      */
1150     gs_memory_t *mem = gs_memory_stable((gs_memory_t *)dmem->space_local);
1151     gs_context_t *pctx;
1152     int code;
1153     long ctx_index;
1154     gs_context_t **pte;
1155 
1156     pctx = gs_alloc_struct(mem, gs_context_t, &st_context, "context_create");
1157     if (pctx == 0)
1158 	return_error(e_VMerror);
1159     if (copy_state) {
1160 	pctx->state = *i_ctx_p;
1161     } else {
1162 	gs_context_state_t *pctx_st = &pctx->state;
1163 
1164 	code = context_state_alloc(&pctx_st, systemdict, dmem);
1165 	if (code < 0) {
1166 	    gs_free_object(mem, pctx, "context_create");
1167 	    return code;
1168 	}
1169     }
1170     ctx_index = gs_next_ids(1);
1171     pctx->scheduler = psched;
1172     pctx->status = cs_active;
1173     pctx->index = ctx_index;
1174     pctx->detach = false;
1175     pctx->saved_local_vm = false;
1176     pctx->visible = true;
1177     pctx->next_index = 0;
1178     pctx->joiner_index = 0;
1179     pte = &psched->table[ctx_index % CTX_TABLE_SIZE];
1180     pctx->table_next = *pte;
1181     *pte = pctx;
1182     *ppctx = pctx;
1183     if (gs_debug_c('\'') | gs_debug_c('"'))
1184 	dlprintf2("[']create %ld at 0x%lx\n", ctx_index, (ulong) pctx);
1185     return 0;
1186 }
1187 
1188 /* Check a context ID.  Note that we do not check for context validity. */
1189 private int
context_param(const gs_scheduler_t * psched,os_ptr op,gs_context_t ** ppctx)1190 context_param(const gs_scheduler_t * psched, os_ptr op, gs_context_t ** ppctx)
1191 {
1192     gs_context_t *pctx;
1193 
1194     check_type(*op, t_integer);
1195     pctx = index_context(psched, op->value.intval);
1196     if (pctx == 0)
1197 	return_error(e_invalidcontext);
1198     *ppctx = pctx;
1199     return 0;
1200 }
1201 
1202 /* Read the usertime as a single value. */
1203 private long
context_usertime(void)1204 context_usertime(void)
1205 {
1206     long secs_ns[2];
1207 
1208     gp_get_usertime(secs_ns);
1209     return secs_ns[0] * 1000 + secs_ns[1] / 1000000;
1210 }
1211 
1212 /* Destroy a context. */
1213 private void
context_destroy(gs_context_t * pctx)1214 context_destroy(gs_context_t * pctx)
1215 {
1216     gs_ref_memory_t *mem = pctx->state.memory.space_local;
1217     gs_scheduler_t *psched = pctx->scheduler;
1218     gs_context_t **ppctx = &psched->table[pctx->index % CTX_TABLE_SIZE];
1219 
1220     while (*ppctx != pctx)
1221 	ppctx = &(*ppctx)->table_next;
1222     *ppctx = (*ppctx)->table_next;
1223     if (gs_debug_c('\'') | gs_debug_c('"'))
1224 	dlprintf3("[']destroy %ld at 0x%lx, status = %d\n",
1225 		  pctx->index, (ulong) pctx, pctx->status);
1226     if (!context_state_free(&pctx->state))
1227 	gs_free_object((gs_memory_t *) mem, pctx, "context_destroy");
1228 }
1229 
1230 /* Copy the top elements of one stack to another. */
1231 /* Note that this does not push the elements: */
1232 /* the destination stack must have enough space preallocated. */
1233 private void
stack_copy(ref_stack_t * to,const ref_stack_t * from,uint count,uint from_index)1234 stack_copy(ref_stack_t * to, const ref_stack_t * from, uint count,
1235 	   uint from_index)
1236 {
1237     long i;
1238 
1239     for (i = (long)count - 1; i >= 0; --i)
1240 	*ref_stack_index(to, i) = *ref_stack_index(from, i + from_index);
1241 }
1242 
1243 /* Acquire a lock.  Return 0 if acquired, o_reschedule if not. */
1244 private int
lock_acquire(os_ptr op,gs_context_t * pctx)1245 lock_acquire(os_ptr op, gs_context_t * pctx)
1246 {
1247     gs_lock_t *plock = r_ptr(op, gs_lock_t);
1248 
1249     if (plock->holder_index == 0) {
1250 	plock->holder_index = pctx->index;
1251 	plock->scheduler = pctx->scheduler;
1252 	return 0;
1253     }
1254     add_last(pctx->scheduler, &plock->waiting, pctx);
1255     return o_reschedule;
1256 }
1257 
1258 /* Release a lock.  Return 0 if OK, e_invalidcontext if not. */
1259 private int
lock_release(ref * op)1260 lock_release(ref * op)
1261 {
1262     gs_lock_t *plock = r_ptr(op, gs_lock_t);
1263     gs_scheduler_t *psched = plock->scheduler;
1264     gs_context_t *pctx = index_context(psched, plock->holder_index);
1265 
1266     if (pctx != 0 && pctx == psched->current) {
1267 	plock->holder_index = 0;
1268 	activate_waiting(psched, &plock->waiting);
1269 	return 0;
1270     }
1271     return_error(e_invalidcontext);
1272 }
1273 
1274 /* ------ Initialization procedure ------ */
1275 
1276 /* We need to split the table because of the 16-element limit. */
1277 const op_def zcontext1_op_defs[] = {
1278     {"0condition", zcondition},
1279     {"0currentcontext", zcurrentcontext},
1280     {"1detach", zdetach},
1281     {"2.fork", zfork},
1282     {"1join", zjoin},
1283     {"4.localfork", zlocalfork},
1284     {"0lock", zlock},
1285     {"2monitor", zmonitor},
1286     {"1notify", znotify},
1287     {"2wait", zwait},
1288     {"0yield", zyield},
1289 		/* Note that the following replace prior definitions */
1290 		/* in the indicated files: */
1291     {"0usertime", zusertime_context},	/* zmisc.c */
1292     op_def_end(0)
1293 };
1294 const op_def zcontext2_op_defs[] = {
1295 		/* Internal operators */
1296     {"0%fork_done", fork_done},
1297     {"1%finish_join", finish_join},
1298     {"0%monitor_cleanup", monitor_cleanup},
1299     {"0%monitor_release", monitor_release},
1300     {"2%await_lock", await_lock},
1301     op_def_end(zcontext_init)
1302 };
1303