1 /******************************** -*- C -*- ****************************
2  *
3  *	The Smalltalk Virtual Machine itself.
4  *
5  *	This, together with oop.c, is the `bridge' between Smalltalk and
6  *	the underlying machine
7  *
8  *
9  ***********************************************************************/
10 
11 /***********************************************************************
12  *
13  * Copyright 1988,89,90,91,92,94,95,99,2000,2001,2002,2006,2007,2008,2009
14  * Free Software Foundation, Inc.
15  * Written by Steve Byrne.
16  *
17  * This file is part of GNU Smalltalk.
18  *
19  * GNU Smalltalk is free software; you can redistribute it and/or modify it
20  * under the terms of the GNU General Public License as published by the Free
21  * Software Foundation; either version 2, or (at your option) any later
22  * version.
23  *
24  * Linking GNU Smalltalk statically or dynamically with other modules is
25  * making a combined work based on GNU Smalltalk.  Thus, the terms and
26  * conditions of the GNU General Public License cover the whole
27  * combination.
28  *
29  * In addition, as a special exception, the Free Software Foundation
30  * give you permission to combine GNU Smalltalk with free software
31  * programs or libraries that are released under the GNU LGPL and with
32  * independent programs running under the GNU Smalltalk virtual machine.
33  *
34  * You may copy and distribute such a system following the terms of the
35  * GNU GPL for GNU Smalltalk and the licenses of the other code
36  * concerned, provided that you include the source code of that other
37  * code when and as the GNU GPL requires distribution of source code.
38  *
39  * Note that people who make modified versions of GNU Smalltalk are not
40  * obligated to grant this special exception for their modified
41  * versions; it is their choice whether to do so.  The GNU General
42  * Public License gives permission to release a modified version without
43  * this exception; this exception also makes it possible to release a
44  * modified version which carries forward this exception.
45  *
46  * GNU Smalltalk is distributed in the hope that it will be useful, but WITHOUT
47  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
48  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
49  * more details.
50  *
51  * You should have received a copy of the GNU General Public License along with
52  * GNU Smalltalk; see the file COPYING.	 If not, write to the Free Software
53  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
54  *
55  ***********************************************************************/
56 
57 #include "gstpriv.h"
58 #include "lock.h"
59 
60 /* The local regs concept hopes, by caching the values of IP and SP in
61    local register variables, to increase performance.  You only need
62    to export the variables when calling out to routines that might
63    change them and that create objects.  This is because creating
64    objects may trigger the GC, which can change the values of IP and
65    SP (since they point into the object space).  It's easy to deal
66    with that, however, it's just a matter of importing and exporting
67    the registers at the correct places: for example stack operations
68    are innocuous, while message sends can result in a GC (because
69    stack chunks are exhausted or because primitive #new is invoked),
70    so they export the registers and import them (possibly with their
71    value changed by the GC) after the send.  I'm leaving the code to
72    deal with them as local registers conditionally compiled in so that
73    you can disable it easily if necessary; however this seems quite
74    improbable except for debugging purposes.  */
75 #define LOCAL_REGS
76 
77 /* By "hard wiring" the definitions of the special math operators
78    (bytecodes 176-191), we get a performance boost of more than 50%.
79    Yes, it means that we cannot redefine + et al for SmallInteger and
80    Float, but I think the trade is worth it.  Besides, the Blue Book
81    does it.  */
82 #define OPEN_CODE_MATH
83 
84 /* Pipelining uses separate fetch-decode-execute stages, which is a
85    nice choice for VLIW machines.  It also enables more aggressive
86    caching of global variables.  It is currently enabled for the IA-64
87    only, because it is a win only where we would have had lots of
88    unused instruction scheduling slots and an awful lot of registers. */
89 #if REG_AVAILABILITY == 3
90 #define PIPELINING
91 #endif
92 
93 /* Answer the quantum assigned to each Smalltalk process (in
94    milliseconds) before it is preempted.  Setting this to zero
95    disables preemption until gst_processor_scheduler>>#timeSlice: is
96    invoked.  */
97 #define DEFAULT_PREEMPTION_TIMESLICE 40
98 
99 /* This symbol does not control execution speed.  Instead, it causes
100    SEND_MESSAGE to print every message that is ever sent in the
101    SmallInteger(Object)>>#printString form.  Can be useful to find out
102    the last method sent before an error, if the context stack is
103    trashed when the debugger gets control and printing a backtrace is
104    impossible.  */
105 /* #define DEBUG_CODE_FLOW */
106 
107 /* The method cache is a hash table used to cache the most commonly
108    used methods.  Its size is determined by this preprocessor
109    constant.  It is currently 2048, a mostly random choice; you can
110    modify it, but be sure it is a power of two.  Additionally,
111    separately from this, the interpreter caches the last primitive
112    numbers used for sends of #at:, #at:put: and #size, in an attempt
113    to speed up these messages for Arrays, Strings, and ByteArrays.  */
114 #define METHOD_CACHE_SIZE		(1 << 11)
115 
116 typedef struct interp_jmp_buf
117 {
118   jmp_buf jmpBuf;
119   struct interp_jmp_buf *next;
120   unsigned short suspended;
121   unsigned char interpreter;
122   unsigned char interrupted;
123   OOP processOOP;
124 }
125 interp_jmp_buf;
126 
127 
128 
129 /* If this is true, for each byte code that is executed, we print on
130    stdout the byte index within the current gst_compiled_method and a
131    decoded interpretation of the byte code.  */
132 int _gst_execution_tracing = 0;
133 
134 /* When this is true, and an interrupt occurs (such as SIGABRT),
135    Smalltalk will terminate itself by making a core dump (normally it
136    produces a backtrace).  */
137 mst_Boolean _gst_make_core_file = false;
138 
139 /* When true, this indicates that there is no top level loop for
140    control to return to, so it causes the system to exit.  */
141 mst_Boolean _gst_non_interactive = true;
142 
143 /* The table of functions that implement the primitives.  */
144 prim_table_entry _gst_primitive_table[NUM_PRIMITIVES];
145 prim_table_entry _gst_default_primitive_table[NUM_PRIMITIVES];
146 
147 /* Some performance counters from the interpreter: these
148    count the number of special returns.  */
149 unsigned long _gst_literal_returns = 0;
150 unsigned long _gst_inst_var_returns = 0;
151 unsigned long _gst_self_returns = 0;
152 
153 /* The number of primitives executed.  */
154 unsigned long _gst_primitives_executed = 0;
155 
156 /* The number of bytecodes executed.  */
157 unsigned long _gst_bytecode_counter = 0;
158 
159 /* The number of method cache misses */
160 unsigned long _gst_cache_misses = 0;
161 
162 /* The number of cache lookups - either hits or misses */
163 unsigned long _gst_sample_counter = 0;
164 
165 /* The OOP for an IdentityDictionary that stores the raw profile.  */
166 OOP _gst_raw_profile = NULL;
167 
168 /* A bytecode counter value used while profiling. */
169 unsigned long _gst_saved_bytecode_counter = 0;
170 
171 #ifdef ENABLE_JIT_TRANSLATION
172 #define method_base		0
173 char *native_ip = NULL;
174 #else /* plain bytecode interpreter */
175 static ip_type method_base;
176 #endif
177 
178 /* Global state
179    The following variables constitute the interpreter's state:
180 
181    ip -- the real memory address of the next byte code to be executed.
182 
183    sp -- the real memory address of the stack that's stored in the
184    currently executing block or method context.
185 
186    _gst_this_method -- a gst_compiled_method or gst_compiled_block
187    that is the currently executing method.
188 
189    _gst_this_context_oop -- a gst_block_context or gst_method_context
190    that indicates the context that the interpreter is currently
191    running in.
192 
193    _gst_temporaries -- physical address of the base of the method
194    temporary variables.  Typically a small number of bytes (multiple
195    of 4 since it points to OOPs) lower than sp.
196 
197    _gst_literals -- physical address of the base of the method
198    literals.
199 
200    _gst_self -- an OOP that is the current receiver of the current
201    message.  */
202 
203 /* The virtual machine's stack and instruction pointers.  */
204 OOP *sp = NULL;
205 ip_type ip;
206 
207 OOP *_gst_temporaries = NULL;
208 OOP *_gst_literals = NULL;
209 OOP _gst_self = NULL;
210 OOP _gst_this_context_oop = NULL;
211 OOP _gst_this_method = NULL;
212 
213 /* Signal this semaphore at the following instruction.  */
214 static OOP single_step_semaphore = NULL;
215 
216 /* CompiledMethod cache which memoizes the methods and some more
217    information for each class->selector pairs.  */
218 static method_cache_entry method_cache[METHOD_CACHE_SIZE] CACHELINE_ALIGNED;
219 
220 /* The number of the last primitive called.  */
221 static int last_primitive;
222 
223 /* A special cache that tries to skip method lookup when #at:, #at:put
224    and #size are implemented by a class through a primitive, and is
225    repeatedly sent to instances of the same class.  Since this is a
226    mini-inline cache it makes no sense when JIT translation is
227    enabled.  */
228 #ifndef ENABLE_JIT_TRANSLATION
229 static OOP at_cache_class;
230 static intptr_t at_cache_spec;
231 
232 static OOP at_put_cache_class;
233 static intptr_t at_put_cache_spec;
234 
235 static OOP size_cache_class;
236 static int size_cache_prim;
237 
238 static OOP class_cache_class;
239 static int class_cache_prim;
240 #endif
241 
242 /* Queue for async (outside the interpreter) semaphore signals */
243 static mst_Boolean async_queue_enabled = true;
244 static async_queue_entry queued_async_signals_tail;
245 static async_queue_entry *queued_async_signals = &queued_async_signals_tail;
246 static async_queue_entry *queued_async_signals_sig = &queued_async_signals_tail;
247 
248 /* When not NULL, this causes the byte code interpreter to immediately
249    send the message whose selector is here to the current stack
250    top.  */
251 const char *_gst_abort_execution = NULL;
252 
253 /* Set to non-nil if a process must preempt the current one.  */
254 static OOP switch_to_process;
255 
256 /* Set to true if it is time to switch process in a round-robin
257    time-sharing fashion.  */
258 static mst_Boolean time_to_preempt;
259 
260 /* Used to bail out of a C callout and back to the interpreter.  */
261 static interp_jmp_buf *reentrancy_jmp_buf = NULL;
262 
263 /* when this flag is on and execution tracing is in effect, the top of
264    the stack is printed as well as the byte code */
265 static int verbose_exec_tracing = false;
266 
267 /* This is the bridge to the primitive operations in the GNU Smalltalk
268    system.  This function invokes the proper primitive_func with the
269    correct id and the same NUMARGS and METHODOOP with which it was
270    invoked.  */
271 static inline intptr_t execute_primitive_operation (int primitive,
272 						    volatile int numArgs);
273 
274 /* Execute a #at: primitive, with arguments REC and IDX, knowing that
275    the receiver's class has an instance specification SPEC.  */
276 static inline mst_Boolean cached_index_oop_primitive (OOP rec,
277 						      OOP idx,
278 						      intptr_t spec);
279 
280 /* Execute a #at:put: primitive, with arguments REC/IDX/VAL, knowing that
281    the receiver's class has an instance specification SPEC.  */
282 static inline mst_Boolean cached_index_oop_put_primitive (OOP rec,
283 							  OOP idx,
284 							  OOP val,
285 							  intptr_t spec);
286 
287 /* Empty the queue of asynchronous calls.  */
288 static void empty_async_queue (void);
289 
290 /* Try to find another process with higher or same priority as the
291    active one.  Return whether there is one.  */
292 static mst_Boolean would_reschedule_process (void);
293 
294 /* Locates in the ProcessorScheduler's process lists and returns the
295    highest priority process different from the current process.  */
296 static OOP highest_priority_process (void);
297 
298 /* Remove the head of the given list (a Semaphore is a subclass of
299    LinkedList) and answer it.  */
300 static OOP remove_first_link (OOP semaphoreOOP);
301 
302 /* Add PROCESSOOP as the head of the given list (a Semaphore is a
303    subclass of LinkedList) and answer it.  */
304 static void add_first_link (OOP semaphoreOOP,
305 			   OOP processOOP);
306 
307 /* Add PROCESSOOP as the tail of the given list (a Semaphore is a
308    subclass of LinkedList) and answer it.  */
309 static void add_last_link (OOP semaphoreOOP,
310 			   OOP processOOP);
311 
312 /* Answer the highest priority process different from the current one.
313    Answer nil if there is no other process than the current one.
314    Create a new process that terminates execution if there is no
315    runnable process (which should never be because there is always the
316    idle process).  */
317 static OOP next_scheduled_process (void);
318 
319 /* Create a Process that is running at userSchedulingPriority on the
320    CONTEXTOOP context, and answer it.  */
321 static OOP create_callin_process (OOP contextOOP);
322 
323 /* Set a timer at the end of which we'll preempt the current process.  */
324 static void set_preemption_timer (void);
325 
326 /* Same as _gst_parse_stream, but creating a reentrancy_jmpbuf.  Returns
327    true if interrupted. */
328 static mst_Boolean parse_stream_with_protection (mst_Boolean method);
329 
330 /* Put the given process to sleep by rotating the list of processes for
331    PROCESSOOP's priority (i.e. it was the head of the list and becomes
332    the tail).  */
333 static void sleep_process (OOP processOOP);
334 
335 /* Yield control from the active process.  */
336 static void active_process_yield (void);
337 
338 /* Sets flags so that the interpreter switches to PROCESSOOP at the
339    next sequence point.  Unless PROCESSOOP is already active, in which
340    case nothing happens, the process is made the head of the list of
341    processes for PROCESSOOP's priority.  Return PROCESSOOP.  */
342 static OOP activate_process (OOP processOOP);
343 
344 /* Restore the virtual machine's state from the ContextPart OOP.  */
345 static void resume_suspended_context (OOP oop);
346 
347 /* Save the virtual machine's state into the suspended Process and
348    ContextPart objects, and load them from NEWPROCESS and from
349    NEWPROCESS's suspendedContext.  The Processor (the only instance
350    of ProcessorScheduler) is also updated accordingly.  */
351 static void change_process_context (OOP newProcess);
352 
353 /* Mark the semaphores attached to the process system (asynchronous
354    events, the signal queue, and if any the process which we'll
355    switch to at the next sequence point).  */
356 static void mark_semaphore_oops (void);
357 
358 /* Copy the semaphores attached to the process system (asynchronous
359    events, the signal queue, and if any the process which we'll
360    switch to at the next sequence point).  */
361 static void copy_semaphore_oops (void);
362 
363 /* Suspend execution of PROCESSOOP.  */
364 static void suspend_process (OOP processOOP);
365 
366 /* Resume execution of PROCESSOOP.  If it must preempt the currently
367    running process, or if ALWAYSPREEMPT is true, put to sleep the
368    active process and activate PROCESSOOP instead; if it must not,
369    make it the head of the process list for its priority, so that
370    it will be picked once higher priority processes all go to sleep.
371 
372    If PROCESSOOP is terminating, answer false.  If PROCESSOOP can
373    be restarted or at least put back in the process list for its
374    priority, answer true.  */
375 static mst_Boolean resume_process (OOP processOOP,
376 				   mst_Boolean alwaysPreempt);
377 
378 /* Answer whether PROCESSOOP is ready to execute (neither terminating,
379    nor suspended, nor waiting on a semaphore).  */
380 static mst_Boolean is_process_ready (OOP processOOP) ATTRIBUTE_PURE;
381 
382 /* Answer whether any processes are queued in the PROCESSLISTOOP
383    (which can be a LinkedList or a Semaphore).  */
384 static inline mst_Boolean is_empty (OOP processListOOP) ATTRIBUTE_PURE;
385 
386 /* Answer whether the processs is terminating, that is, it does not
387    have an execution context to resume execution from.  */
388 static inline mst_Boolean is_process_terminating (OOP processOOP) ATTRIBUTE_PURE;
389 
390 /* Answer the process that is scheduled to run (that is, the
391    executing process or, if any, the process that is scheduled
392    to start execution at the next sequence point.  */
393 static inline OOP get_scheduled_process (void) ATTRIBUTE_PURE;
394 
395 /* Answer the active process (that is, the process that executed
396    the last bytecode.  */
397 static inline OOP get_active_process (void) ATTRIBUTE_PURE;
398 
399 /* Create a new Semaphore OOP with SIGNALS signals on it and return it.  */
400 static inline OOP semaphore_new (int signals);
401 
402 /* Pop NUMARGS items from the stack and put them into a newly
403    created Array object, which is them returned.  */
404 static inline OOP create_args_array (int numArgs);
405 
406 /* This is the equivalent of SEND_MESSAGE, but is for blocks.  The
407    block context that is to the the receiver of the "value" message
408    should be the NUMARGS-th into the stack.  SP is set to the top of
409    the arguments in the block context, which have been copied out of
410    the caller's context.
411 
412    The block should accept between NUMARGS - CULL_UP_TO and
413    NUMARGS arguments.  If this is not true (failure) return true;
414    on success return false.  */
415 static mst_Boolean send_block_value (int numArgs, int cull_up_to);
416 
417 /* This is a kind of simplified _gst_send_message_internal that,
418    instead of setting up a context for a particular receiver, stores
419    information on the lookup into METHODDATA.  Unlike
420    _gst_send_message_internal, this function is generic and valid for
421    both the interpreter and the JIT compiler.  */
422 static mst_Boolean lookup_method (OOP sendSelector,
423 				  method_cache_entry *methodData,
424 				  int sendArgs,
425 				  OOP method_class);
426 
427 /* This tenures context objects from the stack to the context pools
428    (see below for a description).  */
429 static void empty_context_stack (void);
430 
431 /* This allocates a new context pool, eventually triggering a GC once
432    no more pools are available.  */
433 static void alloc_new_chunk ();
434 
435 /* This allocates a context object which is SIZE words big from
436    a pool, allocating one if the current pool is full.  */
437 static inline gst_method_context alloc_stack_context (int size);
438 
439 /* This frees the most recently allocated stack from the current
440    context pool.  It is called when unwinding.  */
441 static inline void dealloc_stack_context (gst_context_part context);
442 
443 /* This allocates a new context of SIZE, prepares an OOP for it
444    (taking it from the LIFO_CONTEXTS arrays that is defined below),
445    and pops SENDARGS arguments from the current context.  Only the
446    parentContext field of the newly-allocated context is initialized,
447    because the other fields can be desumed from the execution state:
448    these other fields instead are filled in the parent context since
449    the execution state will soon be overwritten.  */
450 static inline gst_method_context activate_new_context (int size,
451 						       int sendArgs);
452 
453 /* Push the ARGS topmost words below the stack pointer, and then TEMPS
454    nil objects, onto the stack of CONTEXT.  */
455 static inline void prepare_context (gst_context_part context,
456 				    int args,
457 				    int temps);
458 
459 /* Return from the current context and restore the virtual machine's
460    status (ip, sp, _gst_this_method, _gst_self, ...).  */
461 static void __attribute__ ((__always_inline__)) unwind_context (void);
462 
463 /* Check whether it is true that sending SENDSELECTOR to RECEIVER
464    accepts NUMARGS arguments.  Note that the RECEIVER is only used to
465    do a quick check in the method cache before examining the selector
466    itself; in other words, true is returned even if a message is not
467    understood by the receiver, provided that NUMARGS matches the
468    number of arguments expected by the selector (1 if binary, else the
469    number of colons).  If you don't know a receiver you can just pass
470    _gst_nil_oop or directly call _gst_selector_num_args.  */
471 static inline mst_Boolean check_send_correctness (OOP receiver,
472 						  OOP sendSelector,
473 						  int numArgs);
474 
475 /* Unwind the contexts up until the caller of the method that
476    created the block context, no matter how many levels of message
477    sending are between where we currently are and the context that
478    we are going to return from.
479 
480    Note that unwind_method is only called inside `dirty' (or `full')
481    block closures, hence the context we return from can be found by
482    following OUTERCONTEXT links starting from the currently executing
483    context, and until we reach a MethodContext.  */
484 static mst_Boolean unwind_method (void);
485 
486 /* Unwind up to context returnContextOOP, carefully examining the
487    method call stack.  That is, we examine each context and we only
488    deallocate those that, during their execution, did not create a
489    block context; the others need to be marked as returned.  We
490    continue up the call chain until we finally reach methodContextOOP
491    or an unwind method.  In this case the non-unwind contexts between
492    the unwind method and the returnContextOOP must be removed from the
493    chain.  */
494 static mst_Boolean unwind_to (OOP returnContextOOP);
495 
496 /* Arrange things so that all the non-unwinding contexts up to
497    returnContextOOP aren't executed.  For block contexts this can
498    be done simply by removing them from the chain, but method
499    context must stay there so that we can do non-local returns
500    from them!  For this reason, method contexts are flagged as
501    disabled and unwind_context takes care of skipping them when
502    doing a local return.  */
503 static mst_Boolean disable_non_unwind_contexts (OOP returnContextOOP);
504 
505 /* Called to preempt the current process after a specified amount
506    of time has been spent in the GNU Smalltalk interpreter.  */
507 #ifdef ENABLE_PREEMPTION
508 static RETSIGTYPE preempt_smalltalk_process (int sig);
509 #endif
510 
511 /* Push an execution state for process PROCESSOOP.  The process is
512    used for two reasons: 1) it is suspended if there is a call-in
513    while the execution state is on the top of the stack; 2) it is
514    sent #userInterrupt if the user presses Ctrl-C.  */
515 static void push_jmp_buf (interp_jmp_buf *jb,
516 			  int for_interpreter,
517 			  OOP processOOP);
518 
519 /* Pop an execution state.  Return true if the interruption has to
520    be propagated up.  */
521 static mst_Boolean pop_jmp_buf (void);
522 
523 /* Jump out of the top execution state.  This is used by C call-out
524    primitives to jump out repeatedly until a Smalltalk process is
525    encountered and terminated.  */
526 static void stop_execution (void);
527 
528 /* Pick a process that is the highest-priority process different from
529    the currently executing one, and schedule it for execution after
530    the first sequence points.  */
531 #define ACTIVE_PROCESS_YIELD() \
532   activate_process(next_scheduled_process())
533 
534 /* Answer an OOP for a Smalltalk object of class Array, holding the
535    different process lists for each priority.  */
536 #define GET_PROCESS_LISTS() \
537   (((gst_processor_scheduler)OOP_TO_OBJ(_gst_processor_oop))->processLists)
538 
539 /* Tell the interpreter that special actions are needed as soon as a
540    sequence point is reached.  */
541 #ifdef ENABLE_JIT_TRANSLATION
542 mst_Boolean _gst_except_flag = false;
543 #define SET_EXCEPT_FLAG(x) \
544   do { _gst_except_flag = (x); __sync_synchronize (); } while (0)
545 
546 #else
547 static void * const *global_monitored_bytecodes;
548 static void * const *global_normal_bytecodes;
549 static void * const *dispatch_vec;
550 
551 #define SET_EXCEPT_FLAG(x) do { \
552   dispatch_vec = (x) ? global_monitored_bytecodes : global_normal_bytecodes; \
553   __sync_synchronize (); \
554 } while (0)
555 #endif
556 
557 /* Answer an hash value for a send of the SENDSELECTOR message, when
558    the CompiledMethod is found in class METHODCLASS.  */
559 #define METHOD_CACHE_HASH(sendSelector, methodClass)			 \
560     (( ((intptr_t)(sendSelector)) ^ ((intptr_t)(methodClass)) / (2 * sizeof (PTR))) \
561       & (METHOD_CACHE_SIZE - 1))
562 
563 /* Answer whether CONTEXT is a MethodContext.  This happens whenever
564    we have some SmallInteger flags (and not the pointer to the outer
565    context) in the last instance variable.  */
566 #define CONTEXT_FLAGS(context) \
567   ( ((gst_method_context)(context)) ->flags)
568 
569 /* Answer the sender of CONTEXTOOP.  */
570 #define PARENT_CONTEXT(contextOOP) \
571   ( ((gst_method_context) OOP_TO_OBJ (contextOOP)) ->parentContext)
572 
573 /* Set whether the old context was a trusted one.  Untrusted contexts
574    are those whose receiver or sender is untrusted.  */
575 #define UPDATE_CONTEXT_TRUSTFULNESS(contextOOP, parentContextOOP) \
576   MAKE_OOP_UNTRUSTED (contextOOP, \
577     IS_OOP_UNTRUSTED (_gst_self) | \
578     IS_OOP_UNTRUSTED (parentContextOOP));
579 
580 /* Set whether the current context is an untrusted one.  Untrusted contexts
581    are those whose receiver or sender is untrusted.  */
582 #define IS_THIS_CONTEXT_UNTRUSTED() \
583   (UPDATE_CONTEXT_TRUSTFULNESS(_gst_this_context_oop, \
584 			       PARENT_CONTEXT (_gst_this_context_oop)) \
585      & F_UNTRUSTED)
586 
587 
588 /* Context management
589 
590    The contexts make up a linked list.  Their structure is:
591 
592       +-----------------------------------+
593       | parentContext			  |
594       +-----------------------------------+	THESE ARE CONTEXT'S
595       | misc. information		  |	FIXED INSTANCE VARIABLES
596       | ...				  |
597       +-----------------------------------+-------------------------------
598       | args				  |
599       | ...				  |	THESE ARE THE CONTEXT'S
600       +-----------------------------------+	INDEXED INSTANCE VARIABLES
601       | temps				  |
602       | ...				  |
603       +-----------------------------------+
604       | stack				  |
605       | ...				  |
606       +-----------------------------------+
607 
608    The space labeled "misc. information" is initialized when
609    thisContext is pushed or when the method becomes the parent context
610    of a newly activated context.  It contains, among other things, the
611    pointer to the CompiledMethod or CompiledBlock for the context.
612    That's comparable to leaf procedure optimization in RISC
613    processors.
614 
615    Contexts are special in that they are not created immediately in
616    the main heap.  Instead they have three life phases:
617 
618    a) their OOPs are allocated on a stack, and their object data is
619    allocated outside of the main heap.  This state lasts until the
620    context returns (in which case the OOP can be reused) or until a
621    reference to the context is made (in which case we swiftly move all
622    the OOPs to the OOP table, leaving the object data outside the
623    heap).
624 
625    b) their OOPs are allocated in the main OOP table, their object
626    data still resides outside of the main heap.  Unlike the main heap,
627    this area grows more slowly, but like the main heap, a GC is
628    triggered when it's full.  Upon GC, most context objects (which are
629    generated by `full' or `dirty' blocks) that could not be discarded
630    when they were returned from are reclaimed, and the others are
631    tenured, moving them to the main heap.
632 
633    c) their OOPs are allocated in the main OOP table, their object
634    data stays in the main heap.  And in this state they will remain
635    until they become garbage and are reclaimed.  */
636 
637 /* I made CHUNK_SIZE a nice power of two.  Allocate 64KB at a time,
638    never use more than 3 MB; anyway these are here so behavior can be
639    fine tuned.  MAX_LIFO_DEPTH is enough to have room for an entire
640    stack chunk and avoid testing for overflows in lifo_contexts.  */
641 #define CHUNK_SIZE			16384
642 #define MAX_CHUNKS_IN_MEMORY		48
643 #define MAX_LIFO_DEPTH			(CHUNK_SIZE / CTX_SIZE(0))
644 
645 /* CHUNK points to an item of CHUNKS.  CUR_CHUNK_BEGIN is equal
646    to *CHUNK (i.e. points to the base of the current chunk) and
647    CUR_CHUNK_END is equal to CUR_CHUNK_BEGIN + CHUNK_SIZE.  */
648 static gst_context_part cur_chunk_begin = NULL, cur_chunk_end = NULL;
649 static gst_context_part chunks[MAX_CHUNKS_IN_MEMORY] CACHELINE_ALIGNED;
650 static gst_context_part *chunk = chunks - 1;
651 
652 /* These are used for OOP's allocated in a LIFO manner.  A context is
653    kept on this stack as long as it generates only clean blocks, as
654    long as it resides in the same chunk as the newest object created,
655    and as long as no context switches happen since the time the
656    process was created.  FREE_LIFO_CONTEXT points to just after the
657    top of the stack.  */
658 static struct oop_s lifo_contexts[MAX_LIFO_DEPTH] CACHELINE_ALIGNED;
659 static OOP free_lifo_context = lifo_contexts;
660 
661 /* Include `plug-in' modules for the appropriate interpreter.
662 
663    A plug-in must define
664    - _gst_send_message_internal
665    - _gst_send_method
666    - send_block_value
667    - _gst_interpret
668    - GET_CONTEXT_IP
669    - SET_THIS_METHOD
670    - _gst_validate_method_cache_entries
671    - any others that are needed by the particular implementation (e.g.
672      lookup_native_ip for the JIT plugin)
673 
674    They are included rather than linked to for speed (they need access
675    to lots of inlines and macros).  */
676 
677 #include "prims.inl"
678 
679 #ifdef ENABLE_JIT_TRANSLATION
680 #include "interp-jit.inl"
681 #else
682 #include "interp-bc.inl"
683 #endif
684 
685 
686 void
_gst_empty_context_pool(void)687 _gst_empty_context_pool (void)
688 {
689   if (*chunks)
690     {
691       chunk = chunks;
692       cur_chunk_begin = *chunk;
693       cur_chunk_end = (gst_context_part) (
694         ((char *) cur_chunk_begin) + SIZE_TO_BYTES(CHUNK_SIZE));
695     }
696   else
697     {
698       chunk = chunks - 1;
699       cur_chunk_begin = cur_chunk_end = NULL;
700     }
701 }
702 
703 void
empty_context_stack(void)704 empty_context_stack (void)
705 {
706   OOP contextOOP, last, oop;
707   gst_method_context context;
708 
709   /* printf("[[[[ Gosh, not lifo anymore! (free = %p, base = %p)\n",
710      free_lifo_context, lifo_contexts); */
711   if COMMON (free_lifo_context != lifo_contexts)
712     for (free_lifo_context = contextOOP = lifo_contexts,
713          last = _gst_this_context_oop,
714          context = (gst_method_context) OOP_TO_OBJ (contextOOP);;)
715       {
716 	oop = alloc_oop (context, contextOOP->flags | _gst_mem.active_flag);
717 
718         /* Fill the object's uninitialized fields. */
719         context->objClass = CONTEXT_FLAGS (context) & MCF_IS_METHOD_CONTEXT
720           ? _gst_method_context_class
721 	  : _gst_block_context_class;
722 
723 #ifndef ENABLE_JIT_TRANSLATION
724 	/* This field is unused without the JIT compiler, but it must
725 	   be initialized when a context becomes a fully formed
726 	   Smalltalk object.  We do that here.  Note that we need the
727 	   field so that the same image is usable with or without the
728 	   JIT compiler.  */
729 	context->native_ip = DUMMY_NATIVE_IP;
730 #endif
731 
732 	/* The last context is not referenced anywhere, so we're done
733 	   with it.  */
734 	if (contextOOP++ == last)
735 	  {
736             _gst_this_context_oop = oop;
737 	    break;
738 	  }
739 
740 	/* Else we redirect its sender field to the main OOP table */
741 	context = (gst_method_context) OOP_TO_OBJ (contextOOP);
742 	context->parentContext = oop;
743       }
744   else
745     {
746       if (IS_NIL (_gst_this_context_oop))
747 	return;
748 
749       context = (gst_method_context) OOP_TO_OBJ (_gst_this_context_oop);
750     }
751 
752   /* When a context gets out of the context stack it must be a fully
753      formed Smalltalk object.  These fields were left uninitialized in
754      _gst_send_message_internal and send_block_value -- set them here.  */
755   context->method = _gst_this_method;
756   context->receiver = _gst_self;
757   context->spOffset = FROM_INT (sp - context->contextStack);
758   context->ipOffset = FROM_INT (ip - method_base);
759 
760   UPDATE_CONTEXT_TRUSTFULNESS (_gst_this_context_oop, context->parentContext);
761 
762   /* Even if the JIT is active, the current context might have no
763      attached native_ip -- in fact it has one only if we are being
764      called from activate_new_context -- so we have to `invent'
765      one. We test for a valid native_ip first, though; this test must
766      have no false positives, i.e. it won't ever overwrite a valid
767      native_ip, and won't leave a bogus OOP for the native_ip.  */
768   if (!IS_INT (context->native_ip))
769     context->native_ip = DUMMY_NATIVE_IP;
770 }
771 
772 void
alloc_new_chunk(void)773 alloc_new_chunk (void)
774 {
775   if UNCOMMON (++chunk >= &chunks[MAX_CHUNKS_IN_MEMORY])
776     {
777       /* No more chunks available - GC */
778       _gst_scavenge ();
779       return;
780     }
781 
782   empty_context_stack ();
783 
784   /* Allocate memory only the first time we're using the chunk.
785      _gst_empty_context_pool resets the status but doesn't free
786      the memory.  */
787   if UNCOMMON (!*chunk)
788     *chunk = (gst_context_part) xcalloc (1, SIZE_TO_BYTES (CHUNK_SIZE));
789 
790   cur_chunk_begin = *chunk;
791   cur_chunk_end = (gst_context_part) (
792     ((char *) cur_chunk_begin) + SIZE_TO_BYTES(CHUNK_SIZE));
793 }
794 
795 gst_method_context
alloc_stack_context(int size)796 alloc_stack_context (int size)
797 {
798   gst_method_context newContext;
799 
800   size = CTX_SIZE (size);
801   for (;;)
802     {
803       newContext = (gst_method_context) cur_chunk_begin;
804       cur_chunk_begin += size;
805       if COMMON (cur_chunk_begin < cur_chunk_end)
806         {
807 	  newContext->objSize = FROM_INT (size);
808 	  return (newContext);
809 	}
810 
811       /* Not enough room in the current chunk */
812       alloc_new_chunk ();
813     }
814 }
815 
816 gst_method_context
activate_new_context(int size,int sendArgs)817 activate_new_context (int size,
818 		      int sendArgs)
819 {
820   OOP oop;
821   gst_method_context newContext;
822   gst_method_context thisContext;
823 
824 #ifndef OPTIMIZE
825   if (IS_NIL (_gst_this_context_oop))
826     {
827       printf ("Somebody forgot _gst_prepare_execution_environment!\n");
828       abort ();
829     }
830 #endif
831 
832   /* We cannot overflow lifo_contexts, because it is designed to
833      contain all of the contexts in a chunk, and we empty lifo_contexts
834      when we exhaust a chunk.  So we can get the oop the easy way.  */
835   newContext = alloc_stack_context (size);
836   oop = free_lifo_context++;
837 
838   /* printf("[[[[ Context (size %d) allocated at %p (oop = %p)\n",
839      size, newContext, oop); */
840   SET_OOP_OBJECT (oop, newContext);
841 
842   newContext->parentContext = _gst_this_context_oop;
843 
844   /* save old context information */
845   /* leave sp pointing to receiver, which is replaced on return with
846      value */
847   thisContext = (gst_method_context) OOP_TO_OBJ (_gst_this_context_oop);
848   thisContext->method = _gst_this_method;
849   thisContext->receiver = _gst_self;
850   thisContext->spOffset =
851     FROM_INT ((sp - thisContext->contextStack) - sendArgs);
852   thisContext->ipOffset = FROM_INT (ip - method_base);
853 
854   UPDATE_CONTEXT_TRUSTFULNESS (_gst_this_context_oop, thisContext->parentContext);
855   _gst_this_context_oop = oop;
856 
857   return (newContext);
858 }
859 
860 void
dealloc_stack_context(gst_context_part context)861 dealloc_stack_context (gst_context_part context)
862 {
863 #ifndef OPTIMIZE
864   if (free_lifo_context == lifo_contexts
865       || (OOP_TO_OBJ (free_lifo_context - 1) != (gst_object) context))
866     {
867       _gst_errorf ("Deallocating a non-LIFO context!!!");
868       abort ();
869     }
870 #endif
871 
872   cur_chunk_begin = context;
873   free_lifo_context--;
874 }
875 
876 void
prepare_context(gst_context_part context,int args,int temps)877 prepare_context (gst_context_part context,
878 		 int args,
879 		 int temps)
880 {
881   REGISTER (1, OOP *stackBase);
882   _gst_temporaries = stackBase = context->contextStack;
883   if (args)
884     {
885       REGISTER (2, OOP * src);
886       src = &sp[1 - args];
887       stackBase[0] = src[0];
888       if (args > 1)
889         {
890           stackBase[1] = src[1];
891           if (args > 2)
892             {
893               stackBase[2] = src[2];
894               if (args > 3)
895                 memcpy (&stackBase[3], &src[3], (args - 3) * sizeof (OOP));
896             }
897         }
898       stackBase += args;
899     }
900   if (temps)
901     {
902       REGISTER (2, OOP src);
903       src = _gst_nil_oop;
904       stackBase[0] = src;
905       if (temps > 1)
906         {
907           stackBase[1] = src;
908           if (temps > 2)
909             {
910               int n = 2;
911               do
912                 stackBase[n] = src;
913               while UNCOMMON (n++ < temps);
914             }
915         }
916       stackBase += temps;
917     }
918   sp = stackBase - 1;
919 }
920 
921 mst_Boolean
lookup_method(OOP sendSelector,method_cache_entry * methodData,int sendArgs,OOP method_class)922 lookup_method (OOP sendSelector,
923 	       method_cache_entry *methodData,
924 	       int sendArgs,
925 	       OOP method_class)
926 {
927   inc_ptr inc;
928   OOP argsArrayOOP;
929 
930   if (_gst_find_method (method_class, sendSelector, methodData))
931     return (true);
932 
933   inc = INC_SAVE_POINTER ();
934   argsArrayOOP = create_args_array (sendArgs);
935   INC_ADD_OOP (argsArrayOOP);
936   PUSH_OOP (_gst_message_new_args (sendSelector, argsArrayOOP));
937   INC_RESTORE_POINTER (inc);
938   return (false);
939 }
940 
941 mst_Boolean
_gst_find_method(OOP receiverClass,OOP sendSelector,method_cache_entry * methodData)942 _gst_find_method (OOP receiverClass,
943 	          OOP sendSelector,
944 	          method_cache_entry *methodData)
945 {
946   OOP method_class = receiverClass;
947   for (; !IS_NIL (method_class);
948        method_class = SUPERCLASS (method_class))
949     {
950       OOP methodOOP =
951 	_gst_find_class_method (method_class, sendSelector);
952       if (!IS_NIL (methodOOP))
953 	{
954 	  methodData->startingClassOOP = receiverClass;
955 	  methodData->selectorOOP = sendSelector;
956 	  methodData->methodOOP = methodOOP;
957 	  methodData->methodClassOOP = method_class;
958 	  methodData->methodHeader = GET_METHOD_HEADER (methodOOP);
959 
960 #ifdef ENABLE_JIT_TRANSLATION
961 	  /* Force the translation to be looked up the next time
962 	     this entry is used for a message send.  */
963 	  methodData->receiverClass = NULL;
964 #endif
965 	  _gst_cache_misses++;
966 	  return (true);
967 	}
968     }
969 
970   return (false);
971 }
972 
973 OOP
create_args_array(int numArgs)974 create_args_array (int numArgs)
975 {
976   gst_object argsArray;
977   OOP argsArrayOOP;
978   int i;
979 
980   argsArray = new_instance_with (_gst_array_class, numArgs, &argsArrayOOP);
981   for (i = 0; i < numArgs; i++)
982     argsArray->data[i] = STACK_AT (numArgs - i - 1);
983 
984   POP_N_OOPS (numArgs);
985   return argsArrayOOP;
986 }
987 
988 mst_Boolean
check_send_correctness(OOP receiver,OOP sendSelector,int numArgs)989 check_send_correctness (OOP receiver,
990 			OOP sendSelector,
991 			int numArgs)
992 {
993   int hashIndex;
994   method_cache_entry *methodData;
995   OOP receiverClass;
996 
997   receiverClass = OOP_INT_CLASS (receiver);
998   hashIndex = METHOD_CACHE_HASH (sendSelector, receiverClass);
999   methodData = &method_cache[hashIndex];
1000 
1001   if (methodData->selectorOOP != sendSelector
1002       || methodData->startingClassOOP != receiverClass)
1003     {
1004       /* If we do not find the method, don't worry and fire
1005 	 #doesNotUnderstand:  */
1006       if (!_gst_find_method (receiverClass, sendSelector, methodData))
1007 	return (true);
1008 
1009       methodData = &method_cache[hashIndex];
1010     }
1011 
1012   return (methodData->methodHeader.numArgs == numArgs);
1013 }
1014 
1015 void
unwind_context(void)1016 unwind_context (void)
1017 {
1018   gst_method_context oldContext, newContext;
1019   OOP newContextOOP;
1020 
1021   newContextOOP = _gst_this_context_oop;
1022   newContext = (gst_method_context) OOP_TO_OBJ (newContextOOP);
1023 
1024   do
1025     {
1026       oldContext = newContext;
1027 
1028       /* Descend in the chain...  */
1029       newContextOOP = oldContext->parentContext;
1030 
1031       if COMMON (free_lifo_context > lifo_contexts)
1032         dealloc_stack_context ((gst_context_part) oldContext);
1033 
1034       /* This context cannot be deallocated in a LIFO way.  We must
1035          keep it around so that the blocks it created can reference
1036          arguments and temporaries in it. Method contexts, however,
1037          need to be marked as non-returnable so that attempts to
1038          return from them to an undefined place will lose; doing
1039          that for block contexts too, we skip a test and are also
1040          able to garbage collect more context objects.  And doing
1041          that for _all_ method contexts is more icache-friendly.  */
1042       oldContext->parentContext = _gst_nil_oop;
1043 
1044       newContext = (gst_method_context) OOP_TO_OBJ (newContextOOP);
1045     }
1046   while UNCOMMON (CONTEXT_FLAGS (newContext)
1047 		  == (MCF_IS_METHOD_CONTEXT | MCF_IS_DISABLED_CONTEXT));
1048 
1049   /* Clear the bit so that we return here just once.
1050      This makes this absurd snippet work:
1051 
1052 	^[ [ 12 ] ensure: [ ^34 ] ] ensure: [ 56 ]!
1053 
1054      If it were not for this statement, the inner #ensure:
1055      would resume after the ^34 block exited, and would answer
1056      12 (the result of the evaluation of the receiver of the
1057      inner #ensure:).
1058 
1059      HACK ALERT!!  This is actually valid only for method contexts
1060      but I carefully put the modified bits in the low bits so that
1061      they are already zero for block contexts.  */
1062   CONTEXT_FLAGS (newContext) &= ~(MCF_IS_DISABLED_CONTEXT |
1063 				  MCF_IS_UNWIND_CONTEXT);
1064 
1065   _gst_this_context_oop = newContextOOP;
1066   _gst_temporaries = newContext->contextStack;
1067   sp = newContext->contextStack + TO_INT (newContext->spOffset);
1068   _gst_self = newContext->receiver;
1069 
1070   SET_THIS_METHOD (newContext->method, GET_CONTEXT_IP (newContext));
1071 }
1072 
1073 
1074 
1075 mst_Boolean
unwind_method(void)1076 unwind_method (void)
1077 {
1078   OOP oldContextOOP, newContextOOP;
1079   gst_block_context newContext;
1080 
1081   /* We're executing in a block context and an explicit return is
1082      encountered.  This means that we are to return from the caller of
1083      the method that created the block context, no matter how many
1084      levels of message sending are between where we currently are and
1085      our parent method context.  */
1086 
1087   newContext = (gst_block_context) OOP_TO_OBJ (_gst_this_context_oop);
1088   do
1089     {
1090       newContextOOP = newContext->outerContext;
1091       newContext = (gst_block_context) OOP_TO_OBJ (newContextOOP);
1092     }
1093   while UNCOMMON (!(CONTEXT_FLAGS (newContext) & MCF_IS_METHOD_CONTEXT));
1094 
1095   /* test for block return in a dead method */
1096   if UNCOMMON (IS_NIL (newContext->parentContext))
1097     {
1098       /* We are to create a reference to thisContext, so empty the
1099          stack.  */
1100       empty_context_stack ();
1101       oldContextOOP = _gst_this_context_oop;
1102 
1103       /* Just unwind to the caller, and prepare to send a message to
1104          the context */
1105       unwind_context ();
1106       SET_STACKTOP (oldContextOOP);
1107 
1108       return (false);
1109     }
1110 
1111   return unwind_to (newContext->parentContext);
1112 }
1113 
1114 
1115 mst_Boolean
unwind_to(OOP returnContextOOP)1116 unwind_to (OOP returnContextOOP)
1117 {
1118   OOP oldContextOOP, newContextOOP;
1119   gst_method_context oldContext, newContext;
1120 
1121   empty_context_stack ();
1122 
1123   newContextOOP = _gst_this_context_oop;
1124   newContext = (gst_method_context) OOP_TO_OBJ (newContextOOP);
1125 
1126   while (newContextOOP != returnContextOOP)
1127     {
1128       oldContextOOP = newContextOOP;
1129       oldContext = newContext;
1130 
1131       /* Descend in the chain...  */
1132       newContextOOP = oldContext->parentContext;
1133       newContext = (gst_method_context) OOP_TO_OBJ (newContextOOP);
1134 
1135       /* Check if we got to an unwinding context (#ensure:).  */
1136       if UNCOMMON (CONTEXT_FLAGS (newContext) & MCF_IS_UNWIND_CONTEXT)
1137         {
1138 	  mst_Boolean result;
1139 	  _gst_this_context_oop = oldContextOOP;
1140 
1141 	  /* _gst_this_context_oop is the context above the
1142 	     one we return to.   We only unwind up to the #ensure:
1143 	     context.  */
1144 	  result = disable_non_unwind_contexts (returnContextOOP);
1145 
1146 	  unwind_context ();
1147 	  return result;
1148 	}
1149 
1150       /* This context cannot be deallocated in a LIFO way.  We must
1151          keep it around so that the blocks it created can reference
1152          arguments and temporaries in it. Method contexts, however,
1153          need to be marked as non-returnable so that attempts to
1154          return from them to an undefined place will lose; doing
1155          that for block contexts too, we skip a test and are also
1156          able to garbage collect more context objects.  */
1157       oldContext->parentContext = _gst_nil_oop;
1158     }
1159 
1160   /* Clear the bit so that we return here just once.
1161      This makes this absurd snippet work:
1162 
1163         ^[ [ 12 ] ensure: [ ^34 ] ] ensure: [ 56 ]!
1164 
1165      If it were not for this statement, the inner #ensure:
1166      would resume after the ^34 block exited, and would answer
1167      12 (the result of the evaluation of the receiver of the
1168      inner #ensure:).
1169 
1170      HACK ALERT!!  This is actually valid only for method contexts
1171      but I carefully put the modified bits in the low bits so that
1172      they are already zero for block contexts.  */
1173   CONTEXT_FLAGS (newContext) &= ~(MCF_IS_DISABLED_CONTEXT |
1174                                   MCF_IS_UNWIND_CONTEXT);
1175 
1176   _gst_this_context_oop = newContextOOP;
1177   _gst_temporaries = newContext->contextStack;
1178   sp = newContext->contextStack + TO_INT (newContext->spOffset);
1179   _gst_self = newContext->receiver;
1180 
1181   SET_THIS_METHOD (newContext->method, GET_CONTEXT_IP (newContext));
1182   return (true);
1183 }
1184 
1185 mst_Boolean
disable_non_unwind_contexts(OOP returnContextOOP)1186 disable_non_unwind_contexts (OOP returnContextOOP)
1187 {
1188   OOP newContextOOP, *chain;
1189   gst_method_context oldContext, newContext;
1190 
1191   newContextOOP = _gst_this_context_oop;
1192   newContext = (gst_method_context) OOP_TO_OBJ (newContextOOP);
1193   chain = &newContext->parentContext;
1194 
1195   for (;;)
1196     {
1197       oldContext = newContext;
1198 
1199       /* Descend in the chain...  */
1200       newContextOOP = oldContext->parentContext;
1201       newContext = (gst_method_context) OOP_TO_OBJ (newContextOOP);
1202 
1203       if (!(CONTEXT_FLAGS (oldContext) & MCF_IS_METHOD_CONTEXT))
1204         /* This context cannot be deallocated in a LIFO way.  Setting
1205 	   its parent context field to nil makes us able to garbage
1206 	   collect more context objects.  */
1207         oldContext->parentContext = _gst_nil_oop;
1208 
1209       if (IS_NIL (newContextOOP))
1210 	{
1211 	  *chain = newContextOOP;
1212 	  return (false);
1213 	}
1214 
1215       if (newContextOOP == returnContextOOP)
1216 	{
1217 	  *chain = newContextOOP;
1218 	  chain = &newContext->parentContext;
1219 	  break;
1220 	}
1221 
1222       if (CONTEXT_FLAGS (newContext) & MCF_IS_METHOD_CONTEXT)
1223 	{
1224 	  CONTEXT_FLAGS (newContext) |= MCF_IS_DISABLED_CONTEXT;
1225 	  *chain = newContextOOP;
1226 	  chain = &newContext->parentContext;
1227 	}
1228     }
1229 
1230   /* Skip any disabled methods.  */
1231   while UNCOMMON (CONTEXT_FLAGS (newContext)
1232                   == (MCF_IS_METHOD_CONTEXT | MCF_IS_DISABLED_CONTEXT))
1233     {
1234       oldContext = newContext;
1235 
1236       /* Descend in the chain...  */
1237       newContextOOP = oldContext->parentContext;
1238       newContext = (gst_method_context) OOP_TO_OBJ (newContextOOP);
1239 
1240       /* This context cannot be deallocated in a LIFO way.  We must
1241          keep it around so that the blocks it created can reference
1242          arguments and temporaries in it. Method contexts, however,
1243          need to be marked as non-returnable so that attempts to
1244          return from them to an undefined place will lose; doing
1245          that for block contexts too, we skip a test and are also
1246          able to garbage collect more context objects.  */
1247       oldContext->parentContext = _gst_nil_oop;
1248     }
1249 
1250   *chain = newContext->parentContext;
1251   return (true);
1252 }
1253 
1254 
1255 OOP
_gst_make_block_closure(OOP blockOOP)1256 _gst_make_block_closure (OOP blockOOP)
1257 {
1258   gst_block_closure closure;
1259   gst_compiled_block block;
1260   OOP closureOOP;
1261 
1262   closure = (gst_block_closure) new_instance (_gst_block_closure_class,
1263                                               &closureOOP);
1264 
1265   /* Check how clean the block is: if it only accesses self,
1266      we can afford not moving the context chain to the heap
1267      and setting the outerContext to nil.  */
1268   block = (gst_compiled_block) OOP_TO_OBJ (blockOOP);
1269 
1270   if (block->header.clean > 1)
1271     {
1272       empty_context_stack ();
1273       closure->outerContext = _gst_this_context_oop;
1274     }
1275   else
1276     closure->outerContext = _gst_nil_oop;
1277 
1278   closure->block = blockOOP;
1279   closure->receiver = _gst_self;
1280   return (closureOOP);
1281 }
1282 
1283 
1284 void
change_process_context(OOP newProcess)1285 change_process_context (OOP newProcess)
1286 {
1287   OOP processOOP;
1288   gst_process process;
1289   gst_processor_scheduler processor;
1290   mst_Boolean enable_async_queue;
1291 
1292   switch_to_process = _gst_nil_oop;
1293 
1294   /* save old context information */
1295   if (!IS_NIL (_gst_this_context_oop))
1296     empty_context_stack ();
1297 
1298   /* printf("Switching to process %#O at priority %#O\n",
1299     ((gst_process) OOP_TO_OBJ (newProcess))->name,
1300     ((gst_process) OOP_TO_OBJ (newProcess))->priority); */
1301 
1302   processor = (gst_processor_scheduler) OOP_TO_OBJ (_gst_processor_oop);
1303   processOOP = processor->activeProcess;
1304   if (processOOP != newProcess)
1305     {
1306       process = (gst_process) OOP_TO_OBJ (processOOP);
1307 
1308       if (!IS_NIL (processOOP) && !is_process_terminating (processOOP))
1309         process->suspendedContext = _gst_this_context_oop;
1310 
1311       processor->activeProcess = newProcess;
1312       process = (gst_process) OOP_TO_OBJ (newProcess);
1313       enable_async_queue = IS_NIL (process->interrupts)
1314 		           || TO_INT (process->interrupts) >= 0;
1315 
1316       resume_suspended_context (process->suspendedContext);
1317 
1318       /* Interrupt-enabling cannot be controlled globally from Smalltalk,
1319          but only on a per-Process basis.  You might think that this leaves
1320          much to be desired, because you could actually reenter a Process
1321          with interrupts disabled, if it yields control to another which
1322          has interrupts enabled.  In principle, this is true, but consider
1323          that when interrupts are disabled you can yield in three ways only:
1324          - by doing Process>>#suspend -- and then your process will not
1325            be scheduled
1326          - by doing ProcessorScheduler>>#yield -- and then I'll tell you that
1327            I gave you enough rope to shoot yourself on your feet, and that's
1328            what you did
1329          - by doing Semaphore>>#wait -- and then most likely your blocking
1330            section has terminated (see RecursionLock>>#critical: for an
1331            example).  */
1332 
1333       async_queue_enabled = enable_async_queue;
1334     }
1335 }
1336 
1337 void
resume_suspended_context(OOP oop)1338 resume_suspended_context (OOP oop)
1339 {
1340   gst_method_context thisContext;
1341 
1342   _gst_this_context_oop = oop;
1343   thisContext = (gst_method_context) OOP_TO_OBJ (oop);
1344   sp = thisContext->contextStack + TO_INT (thisContext->spOffset);
1345   SET_THIS_METHOD (thisContext->method, GET_CONTEXT_IP (thisContext));
1346 
1347 #if ENABLE_JIT_TRANSLATION
1348   ip = TO_INT (thisContext->ipOffset);
1349 #endif
1350 
1351   _gst_temporaries = thisContext->contextStack;
1352   _gst_self = thisContext->receiver;
1353   free_lifo_context = lifo_contexts;
1354 }
1355 
1356 
1357 
1358 OOP
get_active_process(void)1359 get_active_process (void)
1360 {
1361   if (!IS_NIL (switch_to_process))
1362     return (switch_to_process);
1363   else
1364     return (get_scheduled_process ());
1365 }
1366 
1367 OOP
get_scheduled_process(void)1368 get_scheduled_process (void)
1369 {
1370   gst_processor_scheduler processor;
1371 
1372   processor =
1373     (gst_processor_scheduler) OOP_TO_OBJ (_gst_processor_oop);
1374 
1375   return (processor->activeProcess);
1376 }
1377 
1378 static void
remove_process_from_list(OOP processOOP)1379 remove_process_from_list (OOP processOOP)
1380 {
1381   gst_semaphore sem;
1382   gst_process process, lastProcess;
1383   OOP lastProcessOOP;
1384 
1385   if (IS_NIL (processOOP))
1386     return;
1387 
1388   process = (gst_process) OOP_TO_OBJ (processOOP);
1389   if (!IS_NIL (process->myList))
1390     {
1391       /* Disconnect the process from its list.  */
1392       sem = (gst_semaphore) OOP_TO_OBJ (process->myList);
1393       if (sem->firstLink == processOOP)
1394         {
1395           sem->firstLink = process->nextLink;
1396           if (sem->lastLink == processOOP)
1397             /* It was the only process in the list */
1398             sem->lastLink = _gst_nil_oop;
1399         }
1400       else
1401         {
1402           /* Find the new prev node */
1403           lastProcessOOP = sem->firstLink;
1404           lastProcess = (gst_process) OOP_TO_OBJ (lastProcessOOP);
1405           while (lastProcess->nextLink != processOOP)
1406             {
1407               lastProcessOOP = lastProcess->nextLink;
1408               lastProcess = (gst_process) OOP_TO_OBJ (lastProcessOOP);
1409             }
1410 
1411           lastProcess->nextLink = process->nextLink;
1412 	  if (sem->lastLink == processOOP)
1413             sem->lastLink = lastProcessOOP;
1414         }
1415 
1416       process->myList = _gst_nil_oop;
1417     }
1418 
1419   process->nextLink = _gst_nil_oop;
1420 }
1421 
1422 void
add_first_link(OOP semaphoreOOP,OOP processOOP)1423 add_first_link (OOP semaphoreOOP,
1424 		OOP processOOP)
1425 {
1426   gst_semaphore sem;
1427   gst_process process;
1428 
1429   process = (gst_process) OOP_TO_OBJ (processOOP);
1430   remove_process_from_list (processOOP);
1431 
1432   sem = (gst_semaphore) OOP_TO_OBJ (semaphoreOOP);
1433   process->myList = semaphoreOOP;
1434   process->nextLink = sem->firstLink;
1435 
1436   sem->firstLink = processOOP;
1437   if (IS_NIL (sem->lastLink))
1438     sem->lastLink = processOOP;
1439 }
1440 
1441 void
suspend_process(OOP processOOP)1442 suspend_process (OOP processOOP)
1443 {
1444   remove_process_from_list (processOOP);
1445   if (get_scheduled_process() == processOOP)
1446     ACTIVE_PROCESS_YIELD ();
1447 }
1448 
1449 void
_gst_terminate_process(OOP processOOP)1450 _gst_terminate_process (OOP processOOP)
1451 {
1452   gst_process process;
1453 
1454   process = (gst_process) OOP_TO_OBJ (processOOP);
1455   process->suspendedContext = _gst_nil_oop;
1456   suspend_process (processOOP);
1457 }
1458 
1459 void
add_last_link(OOP semaphoreOOP,OOP processOOP)1460 add_last_link (OOP semaphoreOOP,
1461 	       OOP processOOP)
1462 {
1463   gst_semaphore sem;
1464   gst_process process, lastProcess;
1465   OOP lastProcessOOP;
1466 
1467   process = (gst_process) OOP_TO_OBJ (processOOP);
1468   remove_process_from_list (processOOP);
1469 
1470   sem = (gst_semaphore) OOP_TO_OBJ (semaphoreOOP);
1471   process->myList = semaphoreOOP;
1472   process->nextLink = _gst_nil_oop;
1473 
1474   if (IS_NIL (sem->lastLink))
1475     sem->firstLink = sem->lastLink = processOOP;
1476   else
1477     {
1478       lastProcessOOP = sem->lastLink;
1479       lastProcess = (gst_process) OOP_TO_OBJ (lastProcessOOP);
1480       lastProcess->nextLink = processOOP;
1481       sem->lastLink = processOOP;
1482     }
1483 }
1484 
1485 mst_Boolean
is_empty(OOP processListOOP)1486 is_empty (OOP processListOOP)
1487 {
1488   gst_semaphore processList;
1489 
1490   processList = (gst_semaphore) OOP_TO_OBJ (processListOOP);
1491   return (IS_NIL (processList->firstLink));
1492 }
1493 
1494 /* TODO: this was taken from VMpr_Processor_yield.  Try to use
1495    the macro ACTIVE_PROCESS_YIELD instead?  */
1496 
1497 void
active_process_yield(void)1498 active_process_yield (void)
1499 {
1500   OOP activeProcess = get_active_process ();
1501   OOP newProcess = highest_priority_process();
1502 
1503   if (is_process_ready (activeProcess))
1504     sleep_process (activeProcess);	/* move to the end of the list */
1505 
1506   activate_process (IS_NIL (newProcess) ? activeProcess : newProcess);
1507 }
1508 
1509 
1510 mst_Boolean
_gst_sync_signal(OOP semaphoreOOP,mst_Boolean incr_if_empty)1511 _gst_sync_signal (OOP semaphoreOOP, mst_Boolean incr_if_empty)
1512 {
1513   gst_semaphore sem;
1514   gst_process process;
1515   gst_method_context suspendedContext;
1516   OOP processOOP;
1517   int spOffset;
1518 
1519   sem = (gst_semaphore) OOP_TO_OBJ (semaphoreOOP);
1520   do
1521     {
1522       /* printf ("signal %O %O\n", semaphoreOOP, sem->firstLink); */
1523       if (is_empty (semaphoreOOP))
1524 	{
1525 	  if (incr_if_empty)
1526 	    sem->signals = INCR_INT (sem->signals);
1527 	  return false;
1528 	}
1529 
1530       processOOP = remove_first_link (semaphoreOOP);
1531 
1532       /* If they terminated this process, well, try another */
1533     }
1534   while (!resume_process (processOOP, false));
1535 
1536   /* Put the semaphore at the stack top as a marker that the
1537      wait was not interrupted.  This assumes that _gst_sync_wait
1538      is only called from primitives.  */
1539   process = (gst_process) OOP_TO_OBJ (processOOP);
1540   suspendedContext = (gst_method_context) OOP_TO_OBJ (process->suspendedContext);
1541   spOffset = TO_INT (suspendedContext->spOffset);
1542   suspendedContext->contextStack[spOffset] = semaphoreOOP;
1543   return true;
1544 }
1545 
1546 void
_gst_do_async_signal(OOP semaphoreOOP)1547 _gst_do_async_signal (OOP semaphoreOOP)
1548 {
1549   _gst_sync_signal (semaphoreOOP, true);
1550 }
1551 
1552 void
_gst_do_async_signal_and_unregister(OOP semaphoreOOP)1553 _gst_do_async_signal_and_unregister (OOP semaphoreOOP)
1554 {
1555   _gst_sync_signal (semaphoreOOP, true);
1556   _gst_unregister_oop (semaphoreOOP);
1557 }
1558 
1559 /* Async-signal-safe version, does no allocation.  Using an atomic operation
1560    is still the simplest choice, but on top of that we check that the entry
1561    is not already in the list.  Also, the datum and next field are NULLed
1562    automatically when the call is made.  */
1563 void
_gst_async_call_internal(async_queue_entry * e)1564 _gst_async_call_internal (async_queue_entry *e)
1565 {
1566   /* For async-signal safety, we need to check that the entry is not
1567      already in the list.  Checking that atomically with CAS is the
1568      simplest way.  */
1569   do
1570     if (__sync_val_compare_and_swap(&e->next, NULL, queued_async_signals_sig))
1571       return;
1572   while (!__sync_bool_compare_and_swap (&queued_async_signals_sig, e->next, e));
1573   SET_EXCEPT_FLAG (true);
1574 }
1575 
1576 void
_gst_async_call(void (* func)(OOP),OOP arg)1577 _gst_async_call (void (*func) (OOP), OOP arg)
1578 {
1579   /* Thread-safe version for the masses.  This lockless stack
1580      is reversed in the interpreter loop to get FIFO behavior.  */
1581   async_queue_entry *sig = xmalloc (sizeof (async_queue_entry));
1582   sig->func = func;
1583   sig->data = arg;
1584 
1585   do
1586     sig->next = queued_async_signals;
1587   while (!__sync_bool_compare_and_swap (&queued_async_signals,
1588                                         sig->next, sig));
1589   _gst_wakeup ();
1590   SET_EXCEPT_FLAG (true);
1591 }
1592 
1593 mst_Boolean
_gst_have_pending_async_calls()1594 _gst_have_pending_async_calls ()
1595 {
1596   return (queued_async_signals != &queued_async_signals_tail
1597           || queued_async_signals_sig != &queued_async_signals_tail);
1598 }
1599 
1600 void
empty_async_queue()1601 empty_async_queue ()
1602 {
1603   async_queue_entry *sig, *sig_reversed;
1604 
1605   /* Process a batch of asynchronous requests.  These are pushed
1606      in LIFO order by _gst_async_call.  By reversing the list
1607      in place before walking it, we get FIFO order.  */
1608   sig = __sync_swap (&queued_async_signals, &queued_async_signals_tail);
1609   sig_reversed = &queued_async_signals_tail;
1610   while (sig != &queued_async_signals_tail)
1611     {
1612       async_queue_entry *next = sig->next;
1613       sig->next = sig_reversed;
1614       sig_reversed = sig;
1615       sig = next;
1616     }
1617 
1618   sig = sig_reversed;
1619   while (sig != &queued_async_signals_tail)
1620     {
1621       async_queue_entry *next = sig->next;
1622       sig->func (sig->data);
1623       free (sig);
1624       sig = next;
1625     }
1626 
1627   /* For async-signal-safe processing, we need to avoid entering
1628      the same item twice into the list.  So we use NEXT to mark
1629      items that have been added...  */
1630   sig = __sync_swap (&queued_async_signals_sig, &queued_async_signals_tail);
1631   sig_reversed = &queued_async_signals_tail;
1632   while (sig != &queued_async_signals_tail)
1633     {
1634       async_queue_entry *next = sig->next;
1635       sig->next = sig_reversed;
1636       sig_reversed = sig;
1637       sig = next;
1638     }
1639 
1640   sig = sig_reversed;
1641   while (sig != &queued_async_signals_tail)
1642     {
1643       async_queue_entry *next = sig->next;
1644       void (*func) (OOP) = sig->func;
1645       OOP data = sig->data;
1646       barrier ();
1647 
1648       sig->data = NULL;
1649       barrier ();
1650 
1651       /* ... and we only NULL it after a signal handler can start
1652          writing to it.  */
1653       sig->next = NULL;
1654       barrier ();
1655       func (data);
1656       sig = next;
1657     }
1658 }
1659 
1660 void
_gst_async_signal(OOP semaphoreOOP)1661 _gst_async_signal (OOP semaphoreOOP)
1662 {
1663   _gst_async_call (_gst_do_async_signal, semaphoreOOP);
1664 }
1665 
1666 void
_gst_async_signal_and_unregister(OOP semaphoreOOP)1667 _gst_async_signal_and_unregister (OOP semaphoreOOP)
1668 {
1669   _gst_async_call (_gst_do_async_signal_and_unregister, semaphoreOOP);
1670 }
1671 
1672 void
_gst_sync_wait(OOP semaphoreOOP)1673 _gst_sync_wait (OOP semaphoreOOP)
1674 {
1675   gst_semaphore sem;
1676 
1677   sem = (gst_semaphore) OOP_TO_OBJ (semaphoreOOP);
1678   if (TO_INT (sem->signals) <= 0)
1679     {
1680       /* Have to suspend.  Prepare return value for #wait and move
1681          this process to the end of the list.
1682 
1683          Tweaking the stack top means that this function should only
1684 	 be called from a primitive.  */
1685       SET_STACKTOP (_gst_nil_oop);
1686       add_last_link (semaphoreOOP, get_active_process ());
1687       if (IS_NIL (ACTIVE_PROCESS_YIELD ()))
1688         {
1689 	  printf ("No runnable process");
1690 	  activate_process (_gst_prepare_execution_environment ());
1691 	}
1692     }
1693   else
1694     sem->signals = DECR_INT (sem->signals);
1695 
1696   /* printf ("wait %O %O\n", semaphoreOOP, sem->firstLink); */
1697 }
1698 
1699 OOP
remove_first_link(OOP semaphoreOOP)1700 remove_first_link (OOP semaphoreOOP)
1701 {
1702   gst_semaphore sem;
1703   gst_process process;
1704   OOP processOOP;
1705 
1706   sem = (gst_semaphore) OOP_TO_OBJ (semaphoreOOP);
1707   processOOP = sem->firstLink;
1708   process = (gst_process) OOP_TO_OBJ (processOOP);
1709 
1710   sem = (gst_semaphore) OOP_TO_OBJ (semaphoreOOP);
1711   sem->firstLink = process->nextLink;
1712   if (IS_NIL (sem->firstLink))
1713     sem->lastLink = _gst_nil_oop;
1714 
1715   /* Unlink the process from any list it was in! */
1716   process->myList = _gst_nil_oop;
1717   process->nextLink = _gst_nil_oop;
1718   return (processOOP);
1719 }
1720 
1721 mst_Boolean
resume_process(OOP processOOP,mst_Boolean alwaysPreempt)1722 resume_process (OOP processOOP,
1723 		mst_Boolean alwaysPreempt)
1724 {
1725   int priority;
1726   OOP activeOOP;
1727   OOP processLists;
1728   OOP processList;
1729   gst_process process, active;
1730   mst_Boolean ints_enabled;
1731 
1732   /* 2002-19-12: tried get_active_process instead of get_scheduled_process.  */
1733   activeOOP = get_active_process ();
1734   active = (gst_process) OOP_TO_OBJ (activeOOP);
1735   process = (gst_process) OOP_TO_OBJ (processOOP);
1736   priority = TO_INT (process->priority);
1737 
1738   /* As a special exception, don't preempt a process that has disabled
1739      interrupts. ### this behavior is currently disabled.  */
1740   ints_enabled = IS_NIL (active->interrupts)
1741 	         || TO_INT(active->interrupts) <= 0;
1742 
1743   /* resume_process is also used when changing the priority of a ready/active
1744      process.  In this case, first remove the process from its current list.  */
1745   if (processOOP == activeOOP)
1746     {
1747       assert (!alwaysPreempt);
1748       remove_process_from_list (processOOP);
1749     }
1750   else if (priority >= TO_INT (active->priority) /* && ints_enabled */ )
1751     alwaysPreempt = true;
1752 
1753   if (IS_NIL (processOOP) || is_process_terminating (processOOP))
1754     /* The process was terminated - nothing to resume, fail */
1755     return (false);
1756 
1757   /* We have no active process, activate this guy instantly.  */
1758   if (IS_NIL (activeOOP))
1759     {
1760       activate_process (processOOP);
1761       return (true);
1762     }
1763 
1764   processLists = GET_PROCESS_LISTS ();
1765   processList = ARRAY_AT (processLists, priority);
1766 
1767   if (alwaysPreempt)
1768     {
1769       /* We're resuming a process with a *equal or higher* priority, so sleep
1770          the current one and activate the new one */
1771       sleep_process (activeOOP);
1772       activate_process (processOOP);
1773     }
1774   else
1775     {
1776       /* this process has a lower priority than the active one, so the
1777          policy is that it doesn't preempt the currently running one.
1778          Anyway, it must be the first in its priority queue - so don't
1779          put it to sleep.  */
1780       add_first_link (processList, processOOP);
1781     }
1782 
1783   return (true);
1784 }
1785 
1786 OOP
activate_process(OOP processOOP)1787 activate_process (OOP processOOP)
1788 {
1789   gst_process process;
1790   int priority;
1791   OOP processLists;
1792   OOP processList;
1793 
1794   if (IS_NIL (processOOP))
1795     return processOOP;
1796 
1797   /* 2002-19-12: tried get_active_process instead of get_scheduled_process.  */
1798   if (processOOP != get_active_process ())
1799     {
1800       process = (gst_process) OOP_TO_OBJ (processOOP);
1801       priority = TO_INT (process->priority);
1802       processLists = GET_PROCESS_LISTS ();
1803       processList = ARRAY_AT (processLists, priority);
1804       add_first_link (processList, processOOP);
1805     }
1806 
1807   SET_EXCEPT_FLAG (true);
1808   switch_to_process = processOOP;
1809   return processOOP;
1810 }
1811 
1812 #ifdef ENABLE_PREEMPTION
1813 RETSIGTYPE
preempt_smalltalk_process(int sig)1814 preempt_smalltalk_process (int sig)
1815 {
1816   time_to_preempt = true;
1817   SET_EXCEPT_FLAG (true);
1818 }
1819 #endif
1820 
1821 mst_Boolean
is_process_terminating(OOP processOOP)1822 is_process_terminating (OOP processOOP)
1823 {
1824   gst_process process;
1825 
1826   process = (gst_process) OOP_TO_OBJ (processOOP);
1827   return (IS_NIL (process->suspendedContext));
1828 }
1829 
1830 mst_Boolean
is_process_ready(OOP processOOP)1831 is_process_ready (OOP processOOP)
1832 {
1833   gst_process process;
1834   int priority;
1835   OOP processLists;
1836   OOP processList;
1837 
1838   process = (gst_process) OOP_TO_OBJ (processOOP);
1839   priority = TO_INT (process->priority);
1840   processLists = GET_PROCESS_LISTS ();
1841   processList = ARRAY_AT (processLists, priority);
1842 
1843   /* check if process is in the priority queue */
1844   return (process->myList == processList);
1845 }
1846 
1847 void
sleep_process(OOP processOOP)1848 sleep_process (OOP processOOP)
1849 {
1850   gst_process process;
1851   int priority;
1852   OOP processLists;
1853   OOP processList;
1854 
1855   process = (gst_process) OOP_TO_OBJ (processOOP);
1856   priority = TO_INT (process->priority);
1857   processLists = GET_PROCESS_LISTS ();
1858   processList = ARRAY_AT (processLists, priority);
1859 
1860   /* add process to end of priority queue */
1861   add_last_link (processList, processOOP);
1862 }
1863 
1864 
1865 mst_Boolean
would_reschedule_process()1866 would_reschedule_process ()
1867 {
1868   OOP processLists, processListOOP;
1869   int priority, activePriority;
1870   OOP processOOP;
1871   gst_process process;
1872   gst_semaphore processList;
1873 
1874   if (!IS_NIL (switch_to_process))
1875     return false;
1876 
1877   processOOP = get_scheduled_process ();
1878   process = (gst_process) OOP_TO_OBJ (processOOP);
1879   activePriority = TO_INT (process->priority);
1880   processLists = GET_PROCESS_LISTS ();
1881   priority = NUM_OOPS (OOP_TO_OBJ (processLists));
1882   do
1883     {
1884       assert (priority > 0);
1885       processListOOP = ARRAY_AT (processLists, priority);
1886     }
1887   while (is_empty (processListOOP) && --priority >= activePriority);
1888 
1889   processList = (gst_semaphore) OOP_TO_OBJ (processListOOP);
1890   return (priority < activePriority
1891 	  || (priority == activePriority
1892 	      /* If the same priority, check if the list has the
1893 		 current process as the sole element.  */
1894 	      && processList->firstLink == processList->lastLink
1895 	      && processList->firstLink == processOOP));
1896 }
1897 
1898 OOP
highest_priority_process(void)1899 highest_priority_process (void)
1900 {
1901   OOP processLists, processListOOP;
1902   int priority;
1903   OOP processOOP;
1904   gst_semaphore processList;
1905 
1906   processLists = GET_PROCESS_LISTS ();
1907   priority = NUM_OOPS (OOP_TO_OBJ (processLists));
1908   for (; priority > 0; priority--)
1909     {
1910       processListOOP = ARRAY_AT (processLists, priority);
1911       if (!is_empty (processListOOP))
1912 	{
1913 	  processOOP = remove_first_link (processListOOP);
1914 	  if (processOOP == get_scheduled_process ())
1915 	    {
1916 	      add_last_link (processListOOP, processOOP);
1917 	      _gst_check_process_state ();
1918 
1919 	      /* If there's only one element in the list, discard this
1920 	         priority.  */
1921 	      processList = (gst_semaphore) OOP_TO_OBJ (processListOOP);
1922 	      if (processList->firstLink == processList->lastLink)
1923 		continue;
1924 
1925 	      processOOP = remove_first_link (processListOOP);
1926 	    }
1927 
1928 	  return (processOOP);
1929 	}
1930     }
1931   return (_gst_nil_oop);
1932 }
1933 
1934 OOP
next_scheduled_process(void)1935 next_scheduled_process (void)
1936 {
1937   OOP processOOP;
1938   gst_processor_scheduler processor;
1939 
1940   processOOP = highest_priority_process ();
1941 
1942   if (!IS_NIL (processOOP))
1943     return (processOOP);
1944 
1945   if (is_process_ready (get_scheduled_process ()))
1946     return (_gst_nil_oop);
1947 
1948   processor = (gst_processor_scheduler) OOP_TO_OBJ (_gst_processor_oop);
1949   processor->activeProcess = _gst_nil_oop;
1950 
1951   return (_gst_nil_oop);
1952 }
1953 
1954 void
_gst_check_process_state(void)1955 _gst_check_process_state (void)
1956 {
1957   OOP processLists, processListOOP, processOOP;
1958   int priority, n;
1959   gst_semaphore processList;
1960   gst_process process;
1961 
1962   processLists = GET_PROCESS_LISTS ();
1963   priority = NUM_OOPS (OOP_TO_OBJ (processLists));
1964   for (n = 0; priority > 0; --priority)
1965     {
1966       processListOOP = ARRAY_AT (processLists, priority);
1967       processList = (gst_semaphore) OOP_TO_OBJ (processListOOP);
1968 
1969       if (IS_NIL (processList->firstLink) && IS_NIL (processList->lastLink))
1970         continue;
1971 
1972       /* Sanity check the first and last link in the process list.  */
1973       if (IS_NIL (processList->firstLink) || IS_NIL (processList->lastLink))
1974         abort ();
1975 
1976       for (processOOP = processList->firstLink;
1977 	   !IS_NIL (processOOP);
1978 	   processOOP = process->nextLink, n++)
1979 	{
1980 	  process = (gst_process) OOP_TO_OBJ (processOOP);
1981 	  if (process->myList != processListOOP)
1982 	    abort ();
1983 
1984 	  if (process->priority != FROM_INT (priority))
1985 	    abort ();
1986 
1987           /* Sanity check the last link in the process list.  */
1988 	  if (IS_NIL (process->nextLink) && processOOP != processList->lastLink)
1989 	    abort ();
1990 
1991 	  /* Check (rather brutally) for loops in the process lists.  */
1992 	  if (++n > _gst_mem.ot_size)
1993 	    abort ();
1994 	}
1995     }
1996 }
1997 
1998 /* Mainly for being invoked from a debugger */
1999 void
_gst_print_process_state(void)2000 _gst_print_process_state (void)
2001 {
2002   OOP processLists, processListOOP, processOOP;
2003   int priority;
2004   gst_semaphore processList;
2005   gst_process process;
2006 
2007   processLists = GET_PROCESS_LISTS ();
2008   priority = NUM_OOPS (OOP_TO_OBJ (processLists));
2009 
2010   processOOP = get_scheduled_process ();
2011   process = (gst_process) OOP_TO_OBJ (processOOP);
2012   if (processOOP == _gst_nil_oop)
2013     printf ("No active process\n");
2014   else
2015     printf ("Active process: <Proc %p prio: %td next %p context %p>\n",
2016 	    processOOP, TO_INT (process->priority),
2017 	    process->nextLink, process->suspendedContext);
2018 
2019   for (; priority > 0; priority--)
2020     {
2021       processListOOP = ARRAY_AT (processLists, priority);
2022       processList = (gst_semaphore) OOP_TO_OBJ (processListOOP);
2023 
2024       if (IS_NIL (processList->firstLink))
2025         continue;
2026 
2027       printf ("  Priority %d: First %p last %p ",
2028 	      priority, processList->firstLink,
2029 	      processList->lastLink);
2030 
2031       for (processOOP = processList->firstLink; !IS_NIL (processOOP);
2032 	   processOOP = process->nextLink)
2033 	{
2034 	  process = (gst_process) OOP_TO_OBJ (processOOP);
2035 	  printf ("\n    <Proc %p prio: %td context %p> ",
2036 		  processOOP, TO_INT (process->priority),
2037 		  process->suspendedContext);
2038 	}
2039 
2040 
2041       printf ("\n");
2042     }
2043 }
2044 
2045 OOP
semaphore_new(int signals)2046 semaphore_new (int signals)
2047 {
2048   gst_semaphore sem;
2049   OOP semaphoreOOP;
2050 
2051   sem = (gst_semaphore) instantiate (_gst_semaphore_class, &semaphoreOOP);
2052   sem->signals = FROM_INT (signals);
2053 
2054   return (semaphoreOOP);
2055 }
2056 
2057 void
_gst_init_process_system(void)2058 _gst_init_process_system (void)
2059 {
2060   gst_processor_scheduler processor;
2061   int i;
2062 
2063   processor = (gst_processor_scheduler) OOP_TO_OBJ (_gst_processor_oop);
2064   if (IS_NIL (processor->processLists))
2065     {
2066       gst_object processLists;
2067 
2068       processLists = instantiate_with (_gst_array_class, NUM_PRIORITIES,
2069 				       &processor->processLists);
2070 
2071       for (i = 0; i < NUM_PRIORITIES; i++)
2072 	processLists->data[i] = semaphore_new (0);
2073     }
2074 
2075   if (IS_NIL (processor->processTimeslice))
2076     processor->processTimeslice =
2077       FROM_INT (DEFAULT_PREEMPTION_TIMESLICE);
2078 
2079   /* No process is active -- so highest_priority_process() need not
2080      worry about discarding an active process.  */
2081   processor->activeProcess = _gst_nil_oop;
2082   switch_to_process = _gst_nil_oop;
2083   activate_process (highest_priority_process ());
2084   set_preemption_timer ();
2085 }
2086 
2087 OOP
create_callin_process(OOP contextOOP)2088 create_callin_process (OOP contextOOP)
2089 {
2090   OOP processListsOOP;
2091   gst_processor_scheduler processor;
2092   gst_process initialProcess;
2093   OOP initialProcessOOP, initialProcessListOOP, nameOOP;
2094   inc_ptr inc = INC_SAVE_POINTER ();
2095 
2096   processor = (gst_processor_scheduler) OOP_TO_OBJ (_gst_processor_oop);
2097   processListsOOP = processor->processLists;
2098   initialProcessListOOP = ARRAY_AT (processListsOOP, 4);
2099 
2100   nameOOP = _gst_string_new ("call-in process");
2101   INC_ADD_OOP (nameOOP);
2102 
2103   initialProcess = (gst_process)
2104     instantiate (_gst_callin_process_class, &initialProcessOOP);
2105 
2106   INC_ADD_OOP (initialProcessOOP);
2107   initialProcess->priority = FROM_INT (USER_SCHEDULING_PRIORITY);
2108   initialProcess->interruptLock = _gst_nil_oop;
2109   initialProcess->suspendedContext = contextOOP;
2110   initialProcess->name = nameOOP;
2111   INC_RESTORE_POINTER (inc);
2112 
2113   /* Put initialProcessOOP in the root set */
2114   add_first_link (initialProcessListOOP, initialProcessOOP);
2115 
2116   _gst_invalidate_method_cache ();
2117   return (initialProcessOOP);
2118 }
2119 
2120 int
_gst_get_var(enum gst_var_index index)2121 _gst_get_var (enum gst_var_index index)
2122 {
2123   switch (index)
2124     {
2125     case GST_DECLARE_TRACING:
2126       return (_gst_declare_tracing);
2127     case GST_EXECUTION_TRACING:
2128       return (_gst_execution_tracing);
2129     case GST_EXECUTION_TRACING_VERBOSE:
2130       return (verbose_exec_tracing);
2131     case GST_GC_MESSAGE:
2132       return (_gst_gc_message);
2133     case GST_VERBOSITY:
2134       return (_gst_verbosity);
2135     case GST_MAKE_CORE_FILE:
2136       return (_gst_make_core_file);
2137     case GST_REGRESSION_TESTING:
2138       return (_gst_regression_testing);
2139     default:
2140       return (-1);
2141     }
2142 }
2143 
2144 int
_gst_set_var(enum gst_var_index index,int value)2145 _gst_set_var (enum gst_var_index index, int value)
2146 {
2147   int old = _gst_get_var (index);
2148   if (value < 0)
2149     return -1;
2150 
2151   switch (index)
2152     {
2153     case GST_DECLARE_TRACING:
2154       _gst_declare_tracing = value;
2155       break;
2156     case GST_EXECUTION_TRACING:
2157       _gst_execution_tracing = value;
2158       break;
2159     case GST_EXECUTION_TRACING_VERBOSE:
2160       verbose_exec_tracing = value;
2161       break;
2162     case GST_GC_MESSAGE:
2163       _gst_gc_message = value;
2164       break;
2165     case GST_VERBOSITY:
2166       _gst_verbosity = value;
2167       break;
2168     case GST_MAKE_CORE_FILE:
2169       _gst_make_core_file = value;
2170       break;
2171     case GST_REGRESSION_TESTING:
2172       _gst_regression_testing = true;
2173       break;
2174     default:
2175       return (-1);
2176     }
2177 
2178   return old;
2179 }
2180 
2181 
2182 void
_gst_init_interpreter(void)2183 _gst_init_interpreter (void)
2184 {
2185   unsigned int i;
2186 
2187 #ifdef ENABLE_JIT_TRANSLATION
2188   _gst_init_translator ();
2189   ip = 0;
2190 #else
2191   ip = NULL;
2192 #endif
2193 
2194   _gst_this_context_oop = _gst_nil_oop;
2195   for (i = 0; i < MAX_LIFO_DEPTH; i++)
2196     lifo_contexts[i].flags = F_POOLED | F_CONTEXT;
2197 
2198   _gst_init_async_events ();
2199   _gst_init_process_system ();
2200 }
2201 
2202 OOP
_gst_prepare_execution_environment(void)2203 _gst_prepare_execution_environment (void)
2204 {
2205   gst_method_context dummyContext;
2206   OOP dummyContextOOP, processOOP;
2207   inc_ptr inc = INC_SAVE_POINTER ();
2208 
2209   empty_context_stack ();
2210   dummyContext = alloc_stack_context (4);
2211   dummyContext->objClass = _gst_method_context_class;
2212   dummyContext->parentContext = _gst_nil_oop;
2213   dummyContext->method = _gst_get_termination_method ();
2214   dummyContext->flags = MCF_IS_METHOD_CONTEXT
2215 	 | MCF_IS_EXECUTION_ENVIRONMENT
2216 	 | MCF_IS_UNWIND_CONTEXT;
2217   dummyContext->receiver = _gst_nil_oop;
2218   dummyContext->ipOffset = FROM_INT (0);
2219   dummyContext->spOffset = FROM_INT (-1);
2220 
2221 #ifdef ENABLE_JIT_TRANSLATION
2222   dummyContext->native_ip = GET_NATIVE_IP ((char *) _gst_return_from_native_code);
2223 #else
2224   dummyContext->native_ip = DUMMY_NATIVE_IP;	/* See empty_context_stack */
2225 #endif
2226 
2227   dummyContextOOP = alloc_oop (dummyContext,
2228 			       _gst_mem.active_flag | F_POOLED | F_CONTEXT);
2229 
2230 
2231   INC_ADD_OOP (dummyContextOOP);
2232   processOOP = create_callin_process (dummyContextOOP);
2233 
2234   INC_RESTORE_POINTER (inc);
2235   return (processOOP);
2236 }
2237 
2238 OOP
_gst_nvmsg_send(OOP receiver,OOP sendSelector,OOP * args,int sendArgs)2239 _gst_nvmsg_send (OOP receiver,
2240                  OOP sendSelector,
2241                  OOP *args,
2242                  int sendArgs)
2243 {
2244   inc_ptr inc = INC_SAVE_POINTER ();
2245 #if 0
2246   OOP dirMessageOOP;
2247 #endif
2248   OOP processOOP, currentProcessOOP;
2249   OOP result;
2250   gst_process process;
2251   int i;
2252 
2253   processOOP = _gst_prepare_execution_environment ();
2254   INC_ADD_OOP (processOOP);
2255 
2256   _gst_check_process_state ();
2257   /* _gst_print_process_state (); */
2258   /* _gst_show_backtrace (stdout); */
2259 
2260   if (reentrancy_jmp_buf && !reentrancy_jmp_buf->suspended++)
2261     suspend_process (reentrancy_jmp_buf->processOOP);
2262 
2263   currentProcessOOP = get_active_process ();
2264   change_process_context (processOOP);
2265 
2266   PUSH_OOP (receiver);
2267   for (i = 0; i < sendArgs; i++)
2268     PUSH_OOP (args[i]);
2269 
2270   if (!sendSelector)
2271     send_block_value (sendArgs, sendArgs);
2272   else if (OOP_CLASS (sendSelector) == _gst_symbol_class)
2273     SEND_MESSAGE (sendSelector, sendArgs);
2274   else
2275     _gst_send_method (sendSelector);
2276 
2277   process = (gst_process) OOP_TO_OBJ (currentProcessOOP);
2278 
2279   if (!IS_NIL (currentProcessOOP)
2280       && TO_INT (process->priority) > USER_SCHEDULING_PRIORITY)
2281     ACTIVE_PROCESS_YIELD ();
2282 
2283   result = _gst_interpret (processOOP);
2284   INC_ADD_OOP (result);
2285 
2286   /* Re-enable the previously executing process *now*, because a
2287      primitive might expect the current stack pointer to be that
2288      of the process that was executing.  */
2289   if (reentrancy_jmp_buf && !--reentrancy_jmp_buf->suspended
2290       && !is_process_terminating (reentrancy_jmp_buf->processOOP))
2291     {
2292       resume_process (reentrancy_jmp_buf->processOOP, true);
2293       if (!IS_NIL (switch_to_process))
2294         change_process_context (switch_to_process);
2295     }
2296 
2297   INC_RESTORE_POINTER (inc);
2298   return (result);
2299 }
2300 
2301 void
set_preemption_timer(void)2302 set_preemption_timer (void)
2303 {
2304 #ifdef ENABLE_PREEMPTION
2305   gst_processor_scheduler processor;
2306   int timeSlice;
2307 
2308   processor = (gst_processor_scheduler) OOP_TO_OBJ (_gst_processor_oop);
2309   timeSlice = TO_INT (processor->processTimeslice);
2310 
2311   time_to_preempt = false;
2312   if (timeSlice > 0)
2313     _gst_sigvtalrm_every (timeSlice, preempt_smalltalk_process);
2314 #endif
2315 }
2316 
2317 void
_gst_invalidate_method_cache(void)2318 _gst_invalidate_method_cache (void)
2319 {
2320   int i;
2321 
2322   /* Only do this if some code was run since the last cache cleanup,
2323      as it is quite expensive.  */
2324   if (!_gst_sample_counter)
2325     return;
2326 
2327 #ifdef ENABLE_JIT_TRANSLATION
2328   _gst_reset_inline_caches ();
2329 #else
2330   at_cache_class = at_put_cache_class =
2331     size_cache_class = class_cache_class = NULL;
2332 #endif
2333 
2334   _gst_cache_misses = _gst_sample_counter = 0;
2335 
2336   for (i = 0; i < METHOD_CACHE_SIZE; i++)
2337     {
2338       method_cache[i].selectorOOP = NULL;
2339 #ifdef ENABLE_JIT_TRANSLATION
2340       method_cache[i].receiverClass = NULL;
2341 #endif
2342     }
2343 }
2344 
2345 
2346 void
_gst_copy_processor_registers(void)2347 _gst_copy_processor_registers (void)
2348 {
2349   copy_semaphore_oops ();
2350 
2351   /* Get everything into the main OOP table first.  */
2352   if (_gst_this_context_oop)
2353     MAYBE_COPY_OOP (_gst_this_context_oop);
2354 
2355   /* everything else is pointed to by _gst_this_context_oop, either
2356      directly or indirectly, or has been copyed when scanning the
2357      registered roots.  */
2358 }
2359 
2360 void
copy_semaphore_oops(void)2361 copy_semaphore_oops (void)
2362 {
2363   async_queue_entry *sig;
2364 
2365   for (sig = queued_async_signals; sig != &queued_async_signals_tail;
2366        sig = sig->next)
2367     MAYBE_COPY_OOP (sig->data);
2368   for (sig = queued_async_signals_sig; sig != &queued_async_signals_tail;
2369        sig = sig->next)
2370     MAYBE_COPY_OOP (sig->data);
2371 
2372   /* there does seem to be a window where this is not valid */
2373   if (single_step_semaphore)
2374     MAYBE_COPY_OOP (single_step_semaphore);
2375 
2376   /* there does seem to be a window where this is not valid */
2377   MAYBE_COPY_OOP (switch_to_process);
2378 }
2379 
2380 
2381 
2382 void
_gst_mark_processor_registers(void)2383 _gst_mark_processor_registers (void)
2384 {
2385   mark_semaphore_oops ();
2386   if (_gst_this_context_oop)
2387     MAYBE_MARK_OOP (_gst_this_context_oop);
2388 
2389   /* everything else is pointed to by _gst_this_context_oop, either
2390      directly or indirectly, or has been marked when scanning the
2391      registered roots.  */
2392 }
2393 
2394 void
mark_semaphore_oops(void)2395 mark_semaphore_oops (void)
2396 {
2397   async_queue_entry *sig;
2398 
2399   for (sig = queued_async_signals; sig != &queued_async_signals_tail;
2400        sig = sig->next)
2401     MAYBE_MARK_OOP (sig->data);
2402   for (sig = queued_async_signals_sig; sig != &queued_async_signals_tail;
2403        sig = sig->next)
2404     MAYBE_MARK_OOP (sig->data);
2405 
2406   /* there does seem to be a window where this is not valid */
2407   if (single_step_semaphore)
2408     MAYBE_MARK_OOP (single_step_semaphore);
2409 
2410   /* there does seem to be a window where this is not valid */
2411   MAYBE_MARK_OOP (switch_to_process);
2412 }
2413 
2414 
2415 
2416 
2417 void
_gst_fixup_object_pointers(void)2418 _gst_fixup_object_pointers (void)
2419 {
2420   gst_method_context thisContext;
2421 
2422   if (!IS_NIL (_gst_this_context_oop))
2423     {
2424       /* Create real OOPs for the contexts here.  If we do it while copying,
2425          the newly created OOPs are in to-space and are never scanned! */
2426       empty_context_stack ();
2427 
2428       thisContext =
2429 	(gst_method_context) OOP_TO_OBJ (_gst_this_context_oop);
2430 #ifdef DEBUG_FIXUP
2431       fflush (stderr);
2432       printf
2433 	("\nF sp %x %d    ip %x %d	_gst_this_method %x  thisContext %x",
2434 	 sp, sp - thisContext->contextStack, ip, ip - method_base,
2435 	 _gst_this_method->object, thisContext);
2436       fflush (stdout);
2437 #endif
2438       thisContext->method = _gst_this_method;
2439       thisContext->receiver = _gst_self;
2440       thisContext->spOffset = FROM_INT (sp - thisContext->contextStack);
2441       thisContext->ipOffset = FROM_INT (ip - method_base);
2442     }
2443 }
2444 
2445 void
_gst_restore_object_pointers(void)2446 _gst_restore_object_pointers (void)
2447 {
2448   gst_context_part thisContext;
2449 
2450   /* !!! The objects can move after the growing or compact phase. But,
2451      all this information is re-computable, so we pick up
2452      _gst_this_method to adjust the ip and _gst_literals accordingly,
2453      and we also pick up the context to adjust sp and the temps
2454      accordingly.  */
2455 
2456   if (!IS_NIL (_gst_this_context_oop))
2457     {
2458       thisContext =
2459 	(gst_context_part) OOP_TO_OBJ (_gst_this_context_oop);
2460       _gst_temporaries = thisContext->contextStack;
2461 
2462 #ifndef OPTIMIZE		/* Mon Jul 3 01:21:06 1995 */
2463       /* these should not be necessary */
2464       if (_gst_this_method != thisContext->method)
2465 	{
2466 	  printf ("$$$$$$$$$$$$$$$$$$$ GOT ONE!!!!\n");
2467 	  printf ("this method %O\n", _gst_this_method);
2468 	  printf ("this context %O\n", thisContext->receiver);
2469 	  abort ();
2470 	}
2471       if (_gst_self != thisContext->receiver)
2472 	{
2473 	  printf ("$$$$$$$$$$$$$$$$$$$ GOT ONE!!!!\n");
2474 	  printf ("self %O\n", _gst_self);
2475 	  printf ("this context %O\n", thisContext->receiver);
2476 	  abort ();
2477 	}
2478 #endif /* OPTIMIZE Mon Jul 3 01:21:06 1995 */
2479 
2480       SET_THIS_METHOD (_gst_this_method, GET_CONTEXT_IP (thisContext));
2481       sp = TO_INT (thisContext->spOffset) + thisContext->contextStack;
2482 
2483 #ifdef DEBUG_FIXUP
2484       fflush (stderr);
2485       printf
2486 	("\nR sp %x %d    ip %x %d	_gst_this_method %x  thisContext %x\n",
2487 	 sp, sp - thisContext->contextStack, ip, ip - method_base,
2488 	 _gst_this_method->object, thisContext);
2489       fflush (stdout);
2490 #endif
2491     }
2492 
2493   SET_EXCEPT_FLAG (true);	/* force to import registers */
2494 }
2495 
2496 static RETSIGTYPE
interrupt_on_signal(int sig)2497 interrupt_on_signal (int sig)
2498 {
2499   if (reentrancy_jmp_buf)
2500     stop_execution ();
2501   else
2502     {
2503       _gst_set_signal_handler (sig, SIG_DFL);
2504       raise (sig);
2505     }
2506 }
2507 
2508 static void
backtrace_on_signal_1(mst_Boolean is_serious_error,mst_Boolean c_backtrace)2509 backtrace_on_signal_1 (mst_Boolean is_serious_error, mst_Boolean c_backtrace)
2510 {
2511   static int reentering = -1;
2512 
2513   /* Avoid recursive signals */
2514   reentering++;
2515 
2516   if ((reentrancy_jmp_buf && reentrancy_jmp_buf->interpreter)
2517       && !reentering
2518       && ip
2519       && !_gst_gc_running)
2520     _gst_show_backtrace (stderr);
2521   else
2522     {
2523       if (is_serious_error)
2524         _gst_errorf ("Error occurred while not in byte code interpreter!!");
2525 
2526 #ifdef HAVE_EXECINFO_H
2527       /* Don't print a backtrace, for example, if exiting during a
2528 	 compilation.  */
2529       if (c_backtrace && !reentering)
2530 	{
2531           PTR array[11];
2532           size_t size = backtrace (array, 11);
2533           backtrace_symbols_fd (array + 1, size - 1, STDERR_FILENO);
2534         }
2535 #endif
2536     }
2537 
2538   reentering--;
2539 }
2540 
2541 static RETSIGTYPE
backtrace_on_signal(int sig)2542 backtrace_on_signal (int sig)
2543 {
2544   _gst_errorf ("%s", strsignal (sig));
2545   _gst_set_signal_handler (sig, backtrace_on_signal);
2546   backtrace_on_signal_1 (sig != SIGTERM, sig != SIGTERM);
2547   _gst_set_signal_handler (sig, SIG_DFL);
2548   raise (sig);
2549 }
2550 
2551 #ifdef SIGUSR1
2552 static RETSIGTYPE
user_backtrace_on_signal(int sig)2553 user_backtrace_on_signal (int sig)
2554 {
2555   _gst_set_signal_handler (sig, user_backtrace_on_signal);
2556   backtrace_on_signal_1 (false, true);
2557 }
2558 #endif
2559 
2560 void
_gst_init_signals(void)2561 _gst_init_signals (void)
2562 {
2563   if (!_gst_make_core_file)
2564     {
2565 #ifdef ENABLE_JIT_TRANSLATION
2566       _gst_set_signal_handler (SIGILL, backtrace_on_signal);
2567 #endif
2568       _gst_set_signal_handler (SIGABRT, backtrace_on_signal);
2569     }
2570   _gst_set_signal_handler (SIGTERM, backtrace_on_signal);
2571   _gst_set_signal_handler (SIGINT, interrupt_on_signal);
2572 #ifdef SIGUSR1
2573   _gst_set_signal_handler (SIGUSR1, user_backtrace_on_signal);
2574 #endif
2575 }
2576 
2577 
2578 void
_gst_show_backtrace(FILE * fp)2579 _gst_show_backtrace (FILE *fp)
2580 {
2581   OOP contextOOP;
2582   gst_method_context context;
2583   gst_compiled_block block;
2584   gst_compiled_method method;
2585   gst_method_info methodInfo;
2586 
2587   empty_context_stack ();
2588   for (contextOOP = _gst_this_context_oop; !IS_NIL (contextOOP);
2589        contextOOP = context->parentContext)
2590     {
2591       context = (gst_method_context) OOP_TO_OBJ (contextOOP);
2592       if (CONTEXT_FLAGS (context)
2593 	  == (MCF_IS_METHOD_CONTEXT | MCF_IS_DISABLED_CONTEXT))
2594 	continue;
2595 
2596       /* printf ("(OOP %p)", context->method); */
2597       fprintf (fp, "(ip %d)", TO_INT (context->ipOffset));
2598       if (CONTEXT_FLAGS (context) & MCF_IS_METHOD_CONTEXT)
2599 	{
2600 	  OOP receiver, receiverClass;
2601 
2602           if (CONTEXT_FLAGS (context) & MCF_IS_EXECUTION_ENVIRONMENT)
2603 	    {
2604 	      if (IS_NIL(context->parentContext))
2605 	        fprintf (fp, "<bottom>\n");
2606 	      else
2607 	        fprintf (fp, "<unwind point>\n");
2608 	      continue;
2609 	    }
2610 
2611           if (CONTEXT_FLAGS (context) & MCF_IS_UNWIND_CONTEXT)
2612 	    fprintf (fp, "<unwind> ");
2613 
2614 	  /* a method context */
2615 	  method = (gst_compiled_method) OOP_TO_OBJ (context->method);
2616 	  methodInfo =
2617 	    (gst_method_info) OOP_TO_OBJ (method->descriptor);
2618 	  receiver = context->receiver;
2619 	  if (IS_INT (receiver))
2620 	    receiverClass = _gst_small_integer_class;
2621 
2622 	  else
2623 	    receiverClass = OOP_CLASS (receiver);
2624 
2625 	  if (receiverClass == methodInfo->class)
2626 	    fprintf (fp, "%O", receiverClass);
2627 	  else
2628 	    fprintf (fp, "%O(%O)", receiverClass, methodInfo->class);
2629 	}
2630       else
2631 	{
2632 	  /* a block context */
2633 	  block = (gst_compiled_block) OOP_TO_OBJ (context->method);
2634 	  method = (gst_compiled_method) OOP_TO_OBJ (block->method);
2635 	  methodInfo =
2636 	    (gst_method_info) OOP_TO_OBJ (method->descriptor);
2637 
2638 	  fprintf (fp, "[] in %O", methodInfo->class);
2639 	}
2640       fprintf (fp, ">>%O\n", methodInfo->selector);
2641     }
2642 }
2643 
2644 void
_gst_show_stack_contents(void)2645 _gst_show_stack_contents (void)
2646 {
2647   gst_method_context context;
2648   OOP *walk;
2649   mst_Boolean first;
2650 
2651   if (IS_NIL (_gst_this_context_oop))
2652     return;
2653 
2654   context = (gst_method_context) OOP_TO_OBJ (_gst_this_context_oop);
2655   for (first = true, walk = context->contextStack;
2656        walk <= sp; first = false, walk++)
2657     {
2658       if (!first)
2659 	printf (", ");
2660 
2661       printf ("%O", *walk);
2662     }
2663   printf ("\n\n");
2664 }
2665 
2666 
2667 static inline mst_Boolean
cached_index_oop_primitive(OOP rec,OOP idx,intptr_t spec)2668 cached_index_oop_primitive (OOP rec, OOP idx, intptr_t spec)
2669 {
2670   OOP result;
2671   if (!IS_INT (idx))
2672     return (true);
2673 
2674   result = index_oop_spec (rec, OOP_TO_OBJ (rec), TO_INT (idx), spec);
2675   if UNCOMMON (!result)
2676     return (true);
2677 
2678   POP_N_OOPS (1);
2679   SET_STACKTOP (result);
2680   return (false);
2681 }
2682 
2683 static inline mst_Boolean
cached_index_oop_put_primitive(OOP rec,OOP idx,OOP val,intptr_t spec)2684 cached_index_oop_put_primitive (OOP rec, OOP idx, OOP val, intptr_t spec)
2685 {
2686   if (!IS_INT (idx))
2687     return (true);
2688 
2689   if UNCOMMON (!index_oop_put_spec (rec, OOP_TO_OBJ (rec), TO_INT (idx),
2690 				    val, spec))
2691     return (true);
2692 
2693   POP_N_OOPS (2);
2694   SET_STACKTOP (val);
2695   return (false);
2696 }
2697 
2698 static inline intptr_t
execute_primitive_operation(int primitive,volatile int numArgs)2699 execute_primitive_operation (int primitive, volatile int numArgs)
2700 {
2701   prim_table_entry *pte = &_gst_primitive_table[primitive];
2702 
2703   intptr_t result = pte->func (pte->id, numArgs);
2704   last_primitive = primitive;
2705   return result;
2706 }
2707 
2708 prim_table_entry *
_gst_get_primitive_attributes(int primitive)2709 _gst_get_primitive_attributes (int primitive)
2710 {
2711   return &_gst_default_primitive_table[primitive];
2712 }
2713 
2714 void
_gst_set_primitive_attributes(int primitive,prim_table_entry * pte)2715 _gst_set_primitive_attributes (int primitive, prim_table_entry *pte)
2716 {
2717   if (pte)
2718     _gst_primitive_table[primitive] = *pte;
2719   else
2720     _gst_primitive_table[primitive] = _gst_default_primitive_table[0];
2721 }
2722 
2723 void
push_jmp_buf(interp_jmp_buf * jb,int for_interpreter,OOP processOOP)2724 push_jmp_buf (interp_jmp_buf *jb, int for_interpreter, OOP processOOP)
2725 {
2726   jb->next = reentrancy_jmp_buf;
2727   jb->processOOP = processOOP;
2728   jb->suspended = 0;
2729   jb->interpreter = for_interpreter;
2730   jb->interrupted = false;
2731   _gst_register_oop (processOOP);
2732   reentrancy_jmp_buf = jb;
2733 }
2734 
2735 mst_Boolean
pop_jmp_buf(void)2736 pop_jmp_buf (void)
2737 {
2738   interp_jmp_buf *jb = reentrancy_jmp_buf;
2739   reentrancy_jmp_buf = jb->next;
2740 
2741   if (jb->interpreter && !is_process_terminating (jb->processOOP))
2742     _gst_terminate_process (jb->processOOP);
2743 
2744   _gst_unregister_oop (jb->processOOP);
2745   return jb->interrupted && reentrancy_jmp_buf;
2746 }
2747 
2748 void
stop_execution(void)2749 stop_execution (void)
2750 {
2751   reentrancy_jmp_buf->interrupted = true;
2752 
2753   if (reentrancy_jmp_buf->interpreter
2754       && !is_process_terminating (reentrancy_jmp_buf->processOOP))
2755     {
2756       _gst_abort_execution = "userInterrupt";
2757       SET_EXCEPT_FLAG (true);
2758       if (get_active_process () != reentrancy_jmp_buf->processOOP)
2759 	resume_process (reentrancy_jmp_buf->processOOP, true);
2760     }
2761   else
2762     longjmp (reentrancy_jmp_buf->jmpBuf, 1);
2763 }
2764 
2765 mst_Boolean
parse_stream_with_protection(mst_Boolean method)2766 parse_stream_with_protection (mst_Boolean method)
2767 {
2768   interp_jmp_buf jb;
2769 
2770   push_jmp_buf (&jb, false, get_active_process ());
2771   if (setjmp (jb.jmpBuf) == 0)
2772     _gst_parse_stream (method);
2773 
2774   return pop_jmp_buf ();
2775 }
2776