1 /* VM library: main header file.
2 
3    Copyright (C) 2016, 2017, 2018, 2019, 2020 Luca Saiu
4    Written by Luca Saiu
5 
6    This file is part of Jitter.
7 
8    Jitter is free software: you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation, either version 3 of the License, or
11    (at your option) any later version.
12 
13    Jitter is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17 
18    You should have received a copy of the GNU General Public License
19    along with Jitter.  If not, see <http://www.gnu.org/licenses/>. */
20 
21 
22 /* Generated file warning.
23  * ************************************************************************** */
24 
25 /* Unless this file is named exactly "vm.h" , without any prefix, you are
26    looking at a machine-generated derived file.  The original source is the vm.h
27    template from Jitter, with added code implementing the vmprefix VM. */
28 
29 
30 
31 
32 /* This multiple-inclusion guard is opened here in the template, and will be
33    closed at the end of the generated code.  It is normal to find no matching
34    #endif in the template file.  */
35 #ifndef VMPREFIX_VM_H_
36 #define VMPREFIX_VM_H_
37 
38 
39 /* This is the main VM header to use from hand-written code.
40  * ************************************************************************** */
41 
42 #include <stdio.h>
43 #include <stdbool.h>
44 
45 #include <jitter/jitter.h>
46 #include <jitter/jitter-hash.h>
47 #include <jitter/jitter-stack.h>
48 #include <jitter/jitter-instruction.h>
49 #include <jitter/jitter-mutable-routine.h>
50 #include <jitter/jitter-print.h>
51 #include <jitter/jitter-routine.h>
52 //#include <jitter/jitter-specialize.h> // FIXME: what about only declaring jitter_specialize in another header, and not including this?
53 #include <jitter/jitter-disassemble.h>
54 #include <jitter/jitter-vm.h>
55 #include <jitter/jitter-profile.h>
56 #include <jitter/jitter-data-locations.h>
57 #include <jitter/jitter-arithmetic.h>
58 #include <jitter/jitter-bitwise.h>
59 #include <jitter/jitter-signals.h>
60 #include <jitter/jitter-list.h>
61 
62 
63 
64 
65 /* Initialization and finalization.
66  * ************************************************************************** */
67 
68 /* Initialize the runtime state for the vmprefix VM.  This needs to be called
69    before using VM routines or VM states in any way. */
70 void
71 vmprefix_initialize (void);
72 
73 /* Finalize the runtime state, freeing some resources.  After calling this no
74    use of VM routines or states is allowed.  It is possible to re-initialize
75    after finalizing; these later re-initializations might be more efficient than
76    the first initialization. */
77 void
78 vmprefix_finalize (void);
79 
80 
81 
82 
83 /* State data structure initialization and finalization.
84  * ************************************************************************** */
85 
86 /* The machine state is separated into the backing and the more compact runtime
87    data structures, to be allocated in registers as far as possible.  These are
88    just a forward-declarations: the actual definitions are machine-generated. */
89 struct vmprefix_state_backing;
90 struct vmprefix_state_runtime;
91 
92 /* A data structure containing both the backing and the runtime state.  This is
93    a forward-declaration: the actual definition will come after both are
94    defined. */
95 struct vmprefix_state;
96 
97 /* Initialize the pointed VM state data structure, or fail fatally.  The
98    function definition is machine-generated, even if it may include user code.
99    The state backing and runtime are initialized at the same time, and in fact
100    the distinction between them is invisible to the VM user. */
101 void
102 vmprefix_state_initialize (struct vmprefix_state *state)
103   __attribute__ ((nonnull (1)));
104 
105 /* Finalize the pointed VM state data structure, or fail fatally.  The function
106    definition is machine-generated, even if it may include user code.  The state
107    backing and runtime are finalized at the same time. */
108 void
109 vmprefix_state_finalize (struct vmprefix_state *state)
110   __attribute__ ((nonnull (1)));
111 
112 
113 
114 
115 /* State data structure: iteration.
116  * ************************************************************************** */
117 
118 /* The header of a doubly-linked list linking every state for the vmprefix VM
119    together.  This global is automatically wrapped, and therefore also
120    accessible from VM instruction code. */
121 extern struct jitter_list_header * const
122 vmprefix_states;
123 
124 /* A pointer to the current state, only accessible from VM code.  This is usable
125    for pointer comparison when iterating over states. */
126 #define VMPREFIX_OWN_STATE                           \
127   ((struct vmprefix_state *) jitter_original_state)
128 
129 /* Given an l-value of type struct vmprefix_state * (usually a variable name)
130    expand to a for loop statement iterating over every existing vmprefix state
131    using the l-value as iteration variable.  The expansion will execute the
132    statement immediately following the macro call with the l-value in scope;
133    in order words the loop body is not a macro argument, but follows the macro
134    use.
135    The l-value may be evaluated an unspecified number of times.
136    This macro is safe to use within VM instruction code.
137    For example:
138      struct vmprefix_state *s;
139      VMPREFIX_FOR_EACH_STATE (s)
140        printf ("This is a state: %p\n", s); // (but printf unsafe in VM code) */
141 #define VMPREFIX_FOR_EACH_STATE(jitter_state_iteration_lvalue)     \
142   for ((jitter_state_iteration_lvalue)                             \
143           = vmprefix_states->first;                                \
144        (jitter_state_iteration_lvalue)                             \
145           != NULL;                                                 \
146        (jitter_state_iteration_lvalue)                             \
147          = (jitter_state_iteration_lvalue)->links.next)            \
148     /* Here comes the body supplied by the user: no semicolon. */
149 
150 
151 
152 
153 /* Mutable routine initialization.
154  * ************************************************************************** */
155 
156 /* Return a freshly-allocated empty mutable routine for the vmprefix VM. */
157 struct jitter_mutable_routine*
158 vmprefix_make_mutable_routine (void)
159   __attribute__ ((returns_nonnull));
160 
161 /* Mutable routine finalization is actually VM-independent, but a definition of
162    vmprefix_destroy_mutable_routine is provided below as a macro, for cosmetic
163    reasons. */
164 
165 
166 /* Mutable routines: code generation C API.
167  * ************************************************************************** */
168 
169 /* This is the preferred way of adding a new VM instruction to a pointed
170    routine, more efficient than vmprefix_mutable_routine_append_instruction_name
171    even if only usable when the VM instruction opcode is known at compile time.
172    The unspecialized instruction name must be explicitly mangled by the user as
173    per the rules in jitterc_mangle.c .  For example an instruction named foo_bar
174    can be added to the routine pointed by p with any one of
175      vmprefix_mutable_routine_append_instruction_name (p, "foo_bar");
176    ,
177      VMPREFIX_MUTABLE_ROUTINE_APPEND_INSTRUCTION (p, foo_ubar);
178    , and
179      VMPREFIX_MUTABLE_ROUTINE_APPEND_INSTRUCTION_ID
180         (p, vmprefix_meta_instruction_id_foo_ubar);
181    .
182    The string "foo_bar" is not mangled, but the token foo_ubar is. */
183 #define VMPREFIX_MUTABLE_ROUTINE_APPEND_INSTRUCTION(                 \
184           routine_p, instruction_mangled_name_root)                  \
185   do                                                                 \
186     {                                                                \
187       jitter_mutable_routine_append_meta_instruction                 \
188          ((routine_p),                                               \
189           vmprefix_meta_instructions                                 \
190           + JITTER_CONCATENATE_TWO(vmprefix_meta_instruction_id_,    \
191                                    instruction_mangled_name_root));  \
192     }                                                                \
193   while (false)
194 
195 /* Append the unspecialized instruction whose id is given to the pointed routine.
196    The id must be a case of enum vmprefix_meta_instruction_id ; such cases have
197    a name starting with vmprefix_meta_instruction_id_ .
198    This is slightly less convenient to use than VMPREFIX_MUTABLE_ROUTINE_APPEND_INSTRUCTION
199    but more general, as the instruction id is allowed to be a non-constant C
200    expression. */
201 #define VMPREFIX_MUTABLE_ROUTINE_APPEND_INSTRUCTION_ID(_jitter_routine_p,       \
202                                                        _jitter_instruction_id)  \
203   do                                                                            \
204     {                                                                           \
205       jitter_mutable_routine_append_instruction_id                              \
206          ((_jitter_routine_p),                                                  \
207           vmprefix_meta_instructions,                                           \
208           VMPREFIX_META_INSTRUCTION_NO,                                         \
209           (_jitter_instruction_id));                                            \
210     }                                                                           \
211   while (false)
212 
213 /* This is the preferred way of appending a register argument to the instruction
214    being added to the pointed routine, more convenient than directly using
215    vmprefix_mutable_routine_append_register_id_parameter , even if only usable
216    when the register class is known at compile time.  Here the register class is
217    only provided as a letter, but both the routine pointer and the register
218    index are arbitrary C expressions.
219    For example, in
220      VMPREFIX_MUTABLE_ROUTINE_APPEND_REGISTER_PARAMETER (p, r,
221                                                          variable_to_index (x));
222    the second macro argument "r" represents the register class named "r", and
223    not the value of a variable named r. */
224 #define VMPREFIX_MUTABLE_ROUTINE_APPEND_REGISTER_PARAMETER(routine_p,     \
225                                                            class_letter,  \
226                                                            index)         \
227   do                                                                      \
228     {                                                                     \
229       vmprefix_mutable_routine_append_register_parameter                  \
230          ((routine_p),                                                    \
231           & JITTER_CONCATENATE_TWO(vmprefix_register_class_,              \
232                                    class_letter),                         \
233           (index));                                                       \
234     }                                                                     \
235   while (false)
236 
237 
238 
239 
240 /* Routine unified API: initialization.
241  * ************************************************************************** */
242 
243 /* See the comments above in "Mutable routines: initialization", and the
244    implementation of the unified routine API in <jitter/jitter-routine.h> . */
245 
246 #define vmprefix_make_routine vmprefix_make_mutable_routine
247 
248 
249 
250 
251 /* Routine unified API: code generation C API.
252  * ************************************************************************** */
253 
254 /* See the comments above in "Mutable routines: code generation C API". */
255 
256 #define VMPREFIX_ROUTINE_APPEND_INSTRUCTION  \
257   VMPREFIX_MUTABLE_ROUTINE_APPEND_INSTRUCTION
258 #define VMPREFIX_ROUTINE_APPEND_INSTRUCTION_ID  \
259   VMPREFIX_MUTABLE_ROUTINE_APPEND_INSTRUCTION_ID
260 #define VMPREFIX_ROUTINE_APPEND_REGISTER_PARAMETER  \
261   VMPREFIX_MUTABLE_ROUTINE_APPEND_REGISTER_PARAMETER
262 
263 
264 
265 
266 /* Array: special-purpose data.
267  * ************************************************************************** */
268 
269 /* The Array is a convenient place to store special-purpose data, accessible in
270    an efficient way from a VM routine.
271    Every item in special-purpose data is thread-local. */
272 
273 /* The special-purpose data struct.  Every Array contains one of these at unbiased
274    offset VMPREFIX_SPECIAL_PURPOSE_STATE_DATA_UNBIASED_OFFSET from the unbiased
275    beginning of the array.
276    This entire struct is aligned to at least sizeof (jitter_int) bytes.  The
277    entire struct is meant to be always accessed through a pointer-to-volatile,
278    as its content may be altered from signal handlers and from different
279    threads.  In particualar the user should use the macro
280      VMPREFIX_ARRAY_TO_SPECIAL_PURPOSE_STATE_DATA
281    defined below and the macros defined from it as accessors.
282    VM code accessing special-purpose data for its own state should use
283      VMPREFIX_SPECIAL_PURPOSE_STATE_DATA
284    and the macros defined from it. */
285 struct jitter_special_purpose_state_data
286 {
287   /* Notification fields.
288    * ***************************************************************** */
289 
290   /* This is a Boolean flag, held as a word-sized datum so as to ensure
291      atomicity in access.  It is also aligned to at least sizeof (jitter_int)
292      bytes.
293      Non-zero means that there is at least one notification pending, zero means
294      that there are no notifications.  The flag specifies no other details: it
295      is meant to be fast to check, with detailed information about each pending
296      notification available elsewhere.
297      It is the receiver's responsibility to periodically poll for notifications
298      in application-specific "safe-points":
299      A check can be inserted, for example, in all of these program points:
300      a) at every backward branch;
301      b) at every procedure entry;
302      c) right after a call to each blocking primitive (as long as primitives
303        can be interrupted).
304      Safe-point checks are designed to be short and fast in the common case.  In
305      the common case no action is required, and the VM routine should simply
306      fall through.  If an action is required then control should branch off to a
307      handler, where the user may implement the required behavior.
308      It is mandatory that, as long as notifications can arrive, this field
309      is reset to zero (when handling pending notifications) only by a thread
310      running VM code in the state containing this struct.
311      Other threads are allowed to set this to non-zero, in order to send a
312      notification.  */
313   jitter_int pending_notifications;
314 
315   /* Information about pending signal notifications.  If any signal is pending
316      then pending_notifications must also be set, so that a notification check
317      can always just quickly check pending_notifications, and then look at more
318      details (including in pending_signal_notifications) only in the rare case
319      of pending_notifications being true. */
320   struct jitter_signal_notification *pending_signal_notifications;
321 
322 
323   /* Profiling instrumentation fields.
324    * ***************************************************************** */
325   struct jitter_profile_runtime profile_runtime;
326 };
327 
328 
329 
330 
331 /* The Array and volatility.
332  * ************************************************************************** */
333 
334 /* Some fields of The Array, seen from VM code, are meant to be volatile, since
335    they can be set by signal handlers or by other threads.  However it is
336    acceptable to not see such changes immediately after they occur (notifications
337    will get delayed, but not lost) and always accessing such data through a
338    volatile struct is suboptimal.
339 
340    Non-VM code does need a volatile qualifier.
341 
342    Advanced dispatches already need a trick using inline assembly to make the
343    base pointer (a biased pointer to The Array beginning) appear to
344    spontaneously change beween instruction.  That is sufficient to express the
345    degree of volatility required for this purpose.
346    Simple dispatches, on targets where inline assembly may not be available at
347    all, will use an actual volatile qualifier. */
348 #if defined (JITTER_DISPATCH_SWITCH)               \
349     || defined (JITTER_DISPATCH_DIRECT_THREADING)
350 # define VMPREFIX_ARRAY_VOLATILE_QUALIFIER volatile
351 #elif defined (JITTER_DISPATCH_MINIMAL_THREADING)  \
352       || defined (JITTER_DISPATCH_NO_THREADING)
353 # define VMPREFIX_ARRAY_VOLATILE_QUALIFIER /* nothing */
354 #else
355 # error "unknown dispatch: this should not happen"
356 #endif /* dispatch conditional */
357 
358 
359 
360 
361 /* Array element access: residuals, transfers, slow registers, and more.
362  * ************************************************************************** */
363 
364 /* In order to cover a wider range of addresses with simple base + register
365    addressing the base does not necessarily point to the beginning of the Array;
366    instead the base points to the beginning of the Array plus JITTER_ARRAY_BIAS
367    bytes.
368    FIXME: define the bias as a value appropriate to each architecture.  I think
369    I should just move the definition to jitter-machine.h and provide a default
370    here, in case the definition is missing on some architecture. */
371 
372 /* FIXME: Horrible, horrible, horrible temporary workaround!
373 
374    This is a temporary workaround, very ugly and fragile, to compensate
375    a limitation in jitter-specialize.c , which I will need to rewrite anyway.
376    The problem is that jitter-specialize.c patches snippets to load non-label
377    residuals in a VM-independent way based only on slow-register/memory residual
378    indices, which is incorrect.  By using this particular bias I am cancelling
379    that error.
380    Test case, on a machine having only one register residual and a VM having just
381      one fast register:
382      [luca@moore ~/repos/jitter/_build/native-gcc-9]$ Q=bin/uninspired--no-threading; make $Q && echo 'mov 2, %r1' | libtool --mode=execute valgrind $Q --disassemble - --print-locations
383    If this bias is wrong the slow-register accesses in mov/nR/%rR will use two
384    different offsets, one for reading and another for writing.  With this
385    workaround they will be the same.
386    Good, with workadound (biased offset 0x0 from the base in %rbx):
387     # 0x4a43d38: mov/nR/%rR 0x2, 0x20 (21 bytes):
388         0x0000000004effb30 41 bc 02 00 00 00    	movl   $0x2,%r12d
389         0x0000000004effb36 48 c7 43 00 20 00 00 00 	movq   $0x20,0x0(%rbx)
390         0x0000000004effb3e 48 8b 13             	movq   (%rbx),%rdx
391         0x0000000004effb41 4c 89 24 13          	movq   %r12,(%rbx,%rdx,1)
392    Bad, with JITTER_ARRAY_BIAS defined as zero: first write at 0x0(%rbx)
393                                                 then read at 0x10(%rbx):
394     # 0x4a43d38: mov/nR/%rR 0x2, 0x30 (22 bytes):
395         0x0000000004effb30 41 bc 02 00 00 00    	movl   $0x2,%r12d
396         0x0000000004effb36 48 c7 43 00 30 00 00 00 	movq   $0x30,0x0(%rbx)
397         0x0000000004effb3e 48 8b 53 10          	movq   0x10(%rbx),%rdx
398         0x0000000004effb42 4c 89 24 13          	movq   %r12,(%rbx,%rdx,1) */
399 #define JITTER_ARRAY_BIAS \
400   (sizeof (struct jitter_special_purpose_state_data))
401 //#define JITTER_ARRAY_BIAS //0//(((jitter_int) 1 << 15))//(((jitter_int) 1 << 31))//0//0//16//0
402 
403 /* Array-based globals are not implemented yet.  For the purpose of computing
404    Array offsets I will say they are zero. */
405 #define VMPREFIX_GLOBAL_NO 0
406 
407 /* Transfer registers are not implemented yet.  For the purpose of computing
408    Array offsets I will say they are zero. */
409 #define VMPREFIX_TRANSFER_REGISTER_NO 0
410 
411 /* Define macros holding offsets in bytes for the first global, memory residual
412    and transfer register, from an initial Array pointer.
413    In general we have to keep into account:
414    - globals (word-sized);
415    - special-purpose state data;
416    - memory residuals (word-sized);
417    - transfer registers (word-sized);
418    - slow registers (vmprefix_any_register-sized and aligned).
419    Notice that memory
420    residuals (meaning residuals stored in The Array) are zero on dispatching
421    modes different from no-threading.  This relies on
422    VMPREFIX_MAX_MEMORY_RESIDUAL_ARITY , defined below, which in its turn depends
423    on VMPREFIX_MAX_RESIDUAL_ARITY, which is machine-generated. */
424 #define VMPREFIX_FIRST_GLOBAL_UNBIASED_OFFSET  \
425   0
426 #define VMPREFIX_SPECIAL_PURPOSE_STATE_DATA_UNBIASED_OFFSET  \
427   (VMPREFIX_FIRST_GLOBAL_UNBIASED_OFFSET                     \
428    + sizeof (jitter_int) * VMPREFIX_GLOBAL_NO)
429 #define VMPREFIX_FIRST_MEMORY_RESIDUAL_UNBIASED_OFFSET   \
430   (VMPREFIX_SPECIAL_PURPOSE_STATE_DATA_UNBIASED_OFFSET   \
431    + sizeof (struct jitter_special_purpose_state_data))
432 #define VMPREFIX_FIRST_TRANSFER_REGISTER_UNBIASED_OFFSET        \
433   (VMPREFIX_FIRST_MEMORY_RESIDUAL_UNBIASED_OFFSET               \
434    + sizeof (jitter_int) * VMPREFIX_MAX_MEMORY_RESIDUAL_ARITY)
435 #define VMPREFIX_FIRST_SLOW_REGISTER_UNBIASED_OFFSET          \
436   JITTER_NEXT_MULTIPLE_OF_POSITIVE                            \
437      (VMPREFIX_FIRST_TRANSFER_REGISTER_UNBIASED_OFFSET        \
438       + sizeof (jitter_int) * VMPREFIX_TRANSFER_REGISTER_NO,  \
439       sizeof (union vmprefix_any_register))
440 
441 /* Expand to the offset of the special-purpose data struct from the Array
442    biased beginning. */
443 #define VMPREFIX_SPECIAL_PURPOSE_STATE_DATA_OFFSET       \
444   (VMPREFIX_SPECIAL_PURPOSE_STATE_DATA_UNBIASED_OFFSET   \
445    - JITTER_ARRAY_BIAS)
446 
447 /* Given an expression evaluating to the Array unbiased beginning, expand to
448    an expression evaluating to a pointer to its special-purpose data.
449    This is convenient for accessing special-purpose data from outside the
450    state -- for example, to set the pending notification flag for another
451    thread.
452    There are two versions of this feature:
453      VMPREFIX_ARRAY_TO_SPECIAL_PURPOSE_STATE_DATA
454    is meant to be used to access state data for some other thread, or in
455    general out of VM code.
456      VMPREFIX_OWN_SPECIAL_PURPOSE_STATE_DATA
457    is for VM code accessing its own special-purpose data. */
458 #define VMPREFIX_ARRAY_TO_SPECIAL_PURPOSE_STATE_DATA_PRIVATE(qualifier,      \
459                                                              array_address)  \
460   ((qualifier struct jitter_special_purpose_state_data *)                    \
461    (((char *) (array_address))                                               \
462     + VMPREFIX_SPECIAL_PURPOSE_STATE_DATA_UNBIASED_OFFSET))
463 #define VMPREFIX_ARRAY_TO_SPECIAL_PURPOSE_STATE_DATA(array_address)       \
464   VMPREFIX_ARRAY_TO_SPECIAL_PURPOSE_STATE_DATA_PRIVATE (volatile,         \
465                                                         (array_address))
466 #define VMPREFIX_OWN_SPECIAL_PURPOSE_STATE_DATA          \
467   VMPREFIX_ARRAY_TO_SPECIAL_PURPOSE_STATE_DATA_PRIVATE   \
468      (VMPREFIX_ARRAY_VOLATILE_QUALIFIER,                 \
469       ((char *) jitter_array_base) - JITTER_ARRAY_BIAS)
470 
471 /* Given a state pointer, expand to an expression evaluating to a pointer to
472    the state's special-purpose data.  This is meant for threads accessing
473    other threads' special-purpose data, typically to set notifications. */
474 #define VMPREFIX_STATE_TO_SPECIAL_PURPOSE_STATE_DATA(state_p)  \
475   (VMPREFIX_ARRAY_TO_SPECIAL_PURPOSE_STATE_DATA                \
476      ((state_p)->vmprefix_state_backing.jitter_array))
477 
478 /* Given a state pointer, expand to an expression evaluating to the
479    pending_notification field for the state as an l-value.  This is meant for
480    threads sending notifications to other threads. */
481 #define VMPREFIX_STATE_TO_PENDING_NOTIFICATIONS(state_p)   \
482   (VMPREFIX_STATE_TO_SPECIAL_PURPOSE_STATE_DATA (state_p)  \
483      ->pending_notifications)
484 
485 /* Given a state pointer and a signal, expand to an l-value evaluating to a the
486    pending field of the struct jitter_signal_notification element for the given
487    signal in the pointed state.  This is meant for threads sending signal
488    notifications to other threads and for C handler function. */
489 #define VMPREFIX_STATE_AND_SIGNAL_TO_PENDING_SIGNAL_NOTIFICATION(state_p,    \
490                                                                  signal_id)  \
491   (((VMPREFIX_STATE_TO_SPECIAL_PURPOSE_STATE_DATA (state_p)                   \
492        ->pending_signal_notifications)                                        \
493     + (signal_id))->pending)
494 
495 
496 /* Expand to the offset of the i-th register of class c in bytes from the Array
497    beginning.
498    The c argument must be a literal C (one-character) identifier.
499    The i argument should always be a compile-time constant for performance, and
500    it is in generated code.
501    The i-th c-class register must be slow, otherwise the offset will be
502    incorrect -- in fact fast registers are, hopefully, not in memory at all.
503 
504    Slow registers come in the Array ordered first by index, then by class.  For
505    example if there are three classes "r" with 4 fast registers, "f" with 7 fast
506    registers and "q" with 2 fast registers, slow registers can be accessed in
507    this order:
508      r4, f7, q2, r5, r8, q3, r6, r9, q4, and so on.
509    Each contiguous group of slow registers spanning every class and starting
510    from the first class (here for example <r5, r6, q3>) is called a "rank".
511    This organization is convenient since changing the number of slow registers
512    doesn't invalidate any offset computed in the past: the Array can simply be
513    resized and its base pointer updated, without changing the code accessing it.
514 
515    This relies on macro such as VMPREFIX_REGISTER_CLASS_NO and
516    VMPREFIX_REGISTER_?_FAST_REGISTER_NO and , defined below in machine-generated
517    code. */
518 #define VMPREFIX_SLOW_REGISTER_UNBIASED_OFFSET(c, i)                     \
519   (VMPREFIX_FIRST_SLOW_REGISTER_UNBIASED_OFFSET                          \
520    + (sizeof (union vmprefix_any_register)                               \
521       * (VMPREFIX_REGISTER_CLASS_NO                                      \
522          * ((i) - JITTER_CONCATENATE_THREE(VMPREFIX_REGISTER_, c,        \
523                                            _FAST_REGISTER_NO))           \
524          + JITTER_CONCATENATE_THREE(VMPREFIX_REGISTER_, c, _CLASS_ID))))
525 
526 /* Expand to the offset of the i-th register of class c in bytes from the base,
527    keeping the bias into account. */
528 #define VMPREFIX_SLOW_REGISTER_OFFSET(c, i)                              \
529   (VMPREFIX_SLOW_REGISTER_UNBIASED_OFFSET(c, i) - JITTER_ARRAY_BIAS)
530 
531 /* Expand to the Array size in bytes, assuming the given number of slow
532    registers per class.  This is an allocation size, ignoring the bias. */
533 #define VMPREFIX_ARRAY_SIZE(slow_register_per_class_no)                  \
534   (VMPREFIX_FIRST_SLOW_REGISTER_UNBIASED_OFFSET                          \
535    + (sizeof (union vmprefix_any_register)                               \
536       * VMPREFIX_REGISTER_CLASS_NO                                       \
537       * (slow_register_per_class_no)))
538 
539 
540 
541 
542 /* Residual access.
543  * ************************************************************************** */
544 
545 /* How many residuals we can have at most in memory, which is to say,
546    without counting residuals kept in reserved registers.
547 
548    Implementation note: it would be wrong here to use a CPP conditional based on
549    the value of VMPREFIX_MAX_RESIDUAL_ARITY , as I was doing in a preliminary
550    version.  That lead to a tricky bug, since VMPREFIX_MAX_RESIDUAL_ARITY ,
551    which is defined below but is not yet available here, simply counted as 0
552    for the purposes of evaluating the CPP condititional. */
553 #ifdef JITTER_DISPATCH_NO_THREADING
554   /* We are using no-threading dispatch.  If there are no more residuals
555      than reserved residual registers then we never need to keep any in
556      memory.  Otherwise we need to keep as many residuals in memory as the
557      total number of residuals minus how many registers are reserved for
558      them. */
559 # define VMPREFIX_MAX_MEMORY_RESIDUAL_ARITY                          \
560     ((VMPREFIX_MAX_RESIDUAL_ARITY <= JITTER_RESIDUAL_REGISTER_NO)    \
561      ? 0                                                             \
562      : (VMPREFIX_MAX_RESIDUAL_ARITY - JITTER_RESIDUAL_REGISTER_NO))
563 #else // Not no-threading.
564   /* No registers are reserved for residuals in this dispatching mode; even if
565      in fact all residuals are memory residuals they don't count here, since
566      residuals are not held in The Array in this dispatching mode. */
567 # define VMPREFIX_MAX_MEMORY_RESIDUAL_ARITY  \
568   0
569 #endif // #ifdef JITTER_DISPATCH_NO_THREADING
570 
571 #ifdef JITTER_DISPATCH_NO_THREADING
572 /* Expand to the offset from the base, in bytes, of the i-th residual.  The
573    given index must be greater than or equal to JITTER_RESIDUAL_REGISTER_NO;
574    residuals with indices lower than that number are not stored in The Array
575    at all.
576    This is not useful with any of the other dispatching modes, where residuals
577    directly follow each VM instruction opcode or thread.  For good performance i
578    should always be a compile-time constant, as it is in machine-generated
579    code.
580    Residuals always have the size of a jitter word, even if some register class
581    may be wider. */
582 /* FIXME: if later I use a different policy than simply checking
583    JITTER_RESIDUAL_REGISTER_NO to decide how many residuals to keep in
584    registers, then I have to change this or meet very nasty bugs. */
585 # define VMPREFIX_RESIDUAL_UNBIASED_OFFSET(i)                      \
586     (VMPREFIX_FIRST_MEMORY_RESIDUAL_UNBIASED_OFFSET                \
587      + (sizeof (jitter_int) * (i - JITTER_RESIDUAL_REGISTER_NO)))
588 # define VMPREFIX_RESIDUAL_OFFSET(i)  \
589     (VMPREFIX_RESIDUAL_UNBIASED_OFFSET(i) - JITTER_ARRAY_BIAS)
590 #endif // #ifdef JITTER_DISPATCH_NO_THREADING
591 
592 
593 
594 /* Mutable routine text frontend.
595  * ************************************************************************** */
596 
597 /* Parse VM code from the given file or string into the pointed VM routine,
598    which is allowed but not required to be empty.
599    These are simple wrappers around functions implemented in the Bison file. */
600 void
601 vmprefix_parse_mutable_routine_from_file_star (FILE *input_file,
602                                                struct jitter_mutable_routine *p)
603   __attribute__ ((nonnull (1, 2)));
604 void
605 vmprefix_parse_mutable_routine_from_file (const char *input_file_name,
606                                           struct jitter_mutable_routine *p)
607   __attribute__ ((nonnull (1, 2)));
608 void
609 vmprefix_parse_mutable_routine_from_string (const char *string,
610                                             struct jitter_mutable_routine *p)
611   __attribute__ ((nonnull (1, 2)));
612 
613 
614 
615 
616 /* Unified routine text frontend.
617  * ************************************************************************** */
618 
619 /* The C wrappers for the ordinary API can be reused for the unified API, since
620    it internally works with mutable routines. */
621 #define vmprefix_parse_routine_from_file_star  \
622   vmprefix_parse_mutable_routine_from_file_star
623 #define vmprefix_parse_routine_from_file  \
624   vmprefix_parse_mutable_routine_from_file
625 #define vmprefix_parse_routine_from_string  \
626   vmprefix_parse_mutable_routine_from_string
627 
628 
629 
630 
631 /* Machine-generated data structures.
632  * ************************************************************************** */
633 
634 /* Declare a few machine-generated data structures, which together define a VM. */
635 
636 /* Threads or pointers to native code blocks of course don't exist with
637    switch-dispatching. */
638 #ifndef JITTER_DISPATCH_SWITCH
639 /* Every possible thread, indexed by enum jitter_specialized_instruction_opcode .
640    This is used at specialization time, and the user shouldn't need to touch
641    it. */
642 extern const jitter_thread *
643 vmprefix_threads;
644 
645 /* VM instruction end label.  These are not all reachable at run time, but
646    having them in a global array might prevent older GCCs from being too clever
647    in reordering blocks. */
648 extern const jitter_thread *
649 vmprefix_thread_ends;
650 
651 /* The size, in chars, of each thread's native code.  The elements are in the
652    same order of vmprefix_threads.  Sizes could conceptually be of type size_t ,
653    but in order to be defensive I'm storing pointer differences as signed
654    values, so that we may catch compilation problems: if any VM instruction end
655    *precedes* its VM instruction beginning, then the compiler has reordered
656    labels, which would have disastrous effects with replicated code. */
657 extern const long *
658 vmprefix_thread_sizes;
659 #endif // #ifndef JITTER_DISPATCH_SWITCH
660 
661 /* This is defined in the machine-generated vm/meta-instructions.c . */
662 extern struct jitter_hash_table
663 vmprefix_meta_instruction_hash;
664 
665 /* An array specifying every existing meta-instruction, defined in the order of
666    enum vmprefix_meta_instruction_id .  This is defined in vm/meta-instructions.c ,
667    which is machine-generated. */
668 extern const struct jitter_meta_instruction
669 vmprefix_meta_instructions [];
670 
671 /* An array whose indices are specialised instruction opcodes, and
672    whose elements are the corresponding unspecialised instructions
673    opcodes -- or -1 when there is no mapping mapping having */
674 extern const int
675 vmprefix_specialized_instruction_to_unspecialized_instruction [];
676 
677 /* How many residual parameters each specialized instruction has.  The
678    actual array definition is machine-generated. */
679 extern const size_t
680 vmprefix_specialized_instruction_residual_arities [];
681 
682 /* An array of bitmasks, one per specialized instruction.  Each bitmask holds
683    one bit per residual argument, counting from the least significant (the first
684    residual arg maps to element & (1 << 0), the second to element & (1 << 1),
685    and so on).
686    Each bit is 1 if and only if the corresponding residual argument is a label
687    or a fast label.
688    Only residual arguments are counted: for example a specialized instruction
689    foo_n1_lR_r2 would have a mask with the *first* bit set. */
690 extern const unsigned long // FIXME: possibly use a shorter type when possible
691 vmprefix_specialized_instruction_label_bitmasks [];
692 
693 /* Like vmprefix_specialized_instruction_label_bitmasks , but for fast labels
694    only.
695    The actual definition is conditionalized so as to appear only when
696    needed according to the dispatching model. */
697 extern const unsigned long // FIXME: possibly use a shorter type when possible
698 vmprefix_specialized_instruction_fast_label_bitmasks [];
699 
700 /* An array of booleans in which each element is true iff the specialized
701    instruction whose opcode is the index is relocatable. */
702 extern const bool
703 vmprefix_specialized_instruction_relocatables [];
704 
705 /* An array of booleans in which each element is true iff the specialized
706    instruction whose opcode is the index is a caller. */
707 extern const bool
708 vmprefix_specialized_instruction_callers [];
709 
710 /* An array of booleans in which each element is true iff the specialized
711    instruction whose opcode is the index is a callee. */
712 extern const bool
713 vmprefix_specialized_instruction_callees [];
714 
715 /* This big array of strings contains the name of each specialized instruction,
716    in the order of enum vmprefix_specialized_instruction_opcode . */
717 extern const char* const
718 vmprefix_specialized_instruction_names [];
719 
720 
721 /* A pointer to a struct containing const pointers to the structures above, plus
722    sizes; there will be only one instance of this per VM, machine-generated.
723    Each program data structure contains a pointer to that instance, so that
724    VM-independent functions, given a program, will have everything needed to
725    work.  The one instance of struct jitter_vm for the vmprefix VM. */
726 extern struct jitter_vm * const
727 vmprefix_vm;
728 
729 /* A pointer to a struct containing VM-specific parameters set in part when
730    calling jitterc and in part when compiling the generated C code, such as the
731    dispatching model and the number of fast registers.  The data is fully
732    initialized only after a call to vmprefix_initialize . */
733 extern const
734 struct jitter_vm_configuration * const
735 vmprefix_vm_configuration;
736 
737 
738 
739 
740 /* Compatibility macros.
741  * ************************************************************************** */
742 
743 /* It is convenient, for future extensibility, to expose an interface in which
744    some VM-independent functions and data structures actually look as if they
745    were specific to the user VM. */
746 
747 /* What the user refers to as struct vmprefix_mutable_routine is actually a
748    struct jitter_mutable_routine , whose definition is VM-independent. */
749 #define vmprefix_mutable_routine jitter_mutable_routine
750 
751 /* Same for executable routines. */
752 #define vmprefix_executable_routine jitter_executable_routine
753 
754 /* Same for unified routines. */
755 #define vmprefix_routine jitter_routine
756 
757 /* Destroy a non-executable routine (routine initialization is actually
758    VM-specific). */
759 #define vmprefix_destroy_mutable_routine jitter_destroy_mutable_routine
760 
761 /* Destroy a unified routine. */
762 #define vmprefix_destroy_routine jitter_destroy_routine
763 
764 /* Pin a unified routine. */
765 #define vmprefix_pin_routine jitter_pin_routine
766 
767 /* Unpin a unified routine. */
768 #define vmprefix_unpin_routine jitter_unpin_routine
769 
770 /* Print VM configuration. */
771 #define vmprefix_print_vm_configuration jitter_print_vm_configuration
772 
773 /* Generic routine construction API. */
774 #define vmprefix_label \
775   jitter_label
776 #define vmprefix_fresh_label \
777   jitter_fresh_label
778 
779 /* Mutable routine option API. */
780 #define vmprefix_set_mutable_routine_option_slow_literals_only \
781   jitter_set_mutable_routine_option_slow_literals_only
782 #define vmprefix_set_mutable_routine_option_slow_registers_only \
783   jitter_set_mutable_routine_option_slow_registers_only
784 #define vmprefix_set_mutable_routine_option_slow_literals_and_registers_only \
785   jitter_set_mutable_routine_option_slow_literals_and_registers_only
786 #define vmprefix_set_mutable_routine_option_add_final_exitvm \
787   jitter_set_mutable_routine_option_add_final_exitvm
788 #define vmprefix_set_mutable_routine_option_optimization_rewriting \
789   jitter_set_mutable_routine_option_optimization_rewriting
790 
791 /* Printing and disassembling: ordinary API. */
792 #define vmprefix_mutable_routine_print \
793   jitter_mutable_routine_print
794 #define vmprefix_executable_routine_disassemble \
795   jitter_executable_routine_disassemble
796 
797 /* Mutable routine construction API. */
798 #define vmprefix_mutable_routine_append_instruction_name \
799   jitter_mutable_routine_append_instruction_name
800 #define vmprefix_mutable_routine_append_meta_instruction \
801   jitter_mutable_routine_append_meta_instruction
802 #define vmprefix_mutable_routine_append_label \
803   jitter_mutable_routine_append_label
804 #define vmprefix_mutable_routine_append_symbolic_label \
805   jitter_mutable_routine_append_symbolic_label
806 #define vmprefix_mutable_routine_append_register_parameter \
807   jitter_mutable_routine_append_register_parameter
808 #define vmprefix_mutable_routine_append_literal_parameter \
809   jitter_mutable_routine_append_literal_parameter
810 #define vmprefix_mutable_routine_append_signed_literal_parameter \
811   jitter_mutable_routine_append_signed_literal_parameter
812 #define vmprefix_mutable_routine_append_unsigned_literal_parameter \
813   jitter_mutable_routine_append_unsigned_literal_parameter
814 #define vmprefix_mutable_routine_append_pointer_literal_parameter \
815   jitter_mutable_routine_append_pointer_literal_parameter
816 #define vmprefix_mutable_routine_append_label_parameter \
817   jitter_mutable_routine_append_label_parameter
818 #define vmprefix_mutable_routine_append_symbolic_label_parameter \
819   jitter_mutable_routine_append_symbolic_label_parameter
820 
821 /* Mutable routine destruction. */
822 #define vmprefix_destroy_executable_routine \
823   jitter_destroy_executable_routine
824 
825 /* Making executable routines from mutable routines. */
826 #define vmprefix_make_executable_routine \
827   jitter_make_executable_routine
828 
829 /* Unified routine option API. */
830 #define vmprefix_set_routine_option_slow_literals_only \
831   jitter_set_mutable_routine_option_slow_literals_only
832 #define vmprefix_set_routine_option_slow_registers_only \
833   jitter_set_mutable_routine_option_slow_registers_only
834 #define vmprefix_set_routine_option_slow_literals_and_registers_only \
835   jitter_set_mutable_routine_option_slow_literals_and_registers_only
836 #define vmprefix_set_routine_option_add_final_exitvm \
837   jitter_set_mutable_routine_option_add_final_exitvm
838 #define vmprefix_set_routine_option_optimization_rewriting \
839   jitter_set_mutable_routine_option_optimization_rewriting
840 
841 /* Printing and disassembling: unified API.  These do not follow the pattern of
842    the rest: wrapped identifiers here are the names of C functions specific to
843    the unified API */
844 #define vmprefix_routine_print \
845   jitter_routine_print
846 #define vmprefix_routine_disassemble \
847   jitter_routine_disassemble
848 
849 /* Unified routine construction API. */
850 #define vmprefix_routine_append_instruction_name \
851   jitter_mutable_routine_append_instruction_name
852 #define vmprefix_routine_append_meta_instruction \
853   jitter_mutable_routine_append_meta_instruction
854 #define vmprefix_routine_append_label \
855   jitter_mutable_routine_append_label
856 #define vmprefix_routine_append_symbolic_label \
857   jitter_mutable_routine_append_symbolic_label
858 #define vmprefix_routine_append_register_parameter \
859   jitter_mutable_routine_append_register_parameter
860 #define vmprefix_routine_append_literal_parameter \
861   jitter_mutable_routine_append_literal_parameter
862 #define vmprefix_routine_append_signed_literal_parameter \
863   jitter_mutable_routine_append_signed_literal_parameter
864 #define vmprefix_routine_append_unsigned_literal_parameter \
865   jitter_mutable_routine_append_unsigned_literal_parameter
866 #define vmprefix_routine_append_pointer_literal_parameter \
867   jitter_mutable_routine_append_pointer_literal_parameter
868 #define vmprefix_routine_append_label_parameter \
869   jitter_mutable_routine_append_label_parameter
870 #define vmprefix_routine_append_symbolic_label_parameter \
871   jitter_mutable_routine_append_symbolic_label_parameter
872 
873 /* Mutable routine destruction. */
874 #define vmprefix_destroy_routine                                           \
875   /* This does not follow the pattern of the rest: the wrapped identifier  \
876      here is the name of a C function specific to the unified API. */      \
877   jitter_destroy_routine
878 
879 /* The unified API has no facility to explicitly make executable routines: their
880    very existence is hidden.  For this reason some of the macros above, such
881    vmprefix_make_executable_routine, have no unified counterpart here. */
882 
883 /* Profiling.  Apart from vmprefix_state_profile, which returns a pointer to
884    the profile within a pointed state structure, everything else here has the
885    same API as the functionality in jitter/jitter-profile.h , without the VM
886    pointer.
887    Notice that this API does nothing useful onless one of the CPP macros
888    JITTER_PROFILE_COUNT or JITTER_PROFILE_SAMPLE is defined. */
889 #define vmprefix_profile_runtime  \
890   jitter_profile_runtime /* the struct name */
891 #define vmprefix_profile  \
892   jitter_profile /* the struct name */
893 // FIXME: no: distinguish between struct jitter_profile_runtime and its user-friendly variant
894 struct jitter_profile_runtime *
895 vmprefix_state_profile_runtime (struct vmprefix_state *s)
896   __attribute__ ((returns_nonnull, nonnull (1)));
897 struct vmprefix_profile_runtime*
898 vmprefix_profile_runtime_make (void)
899   __attribute__ ((returns_nonnull));
900 #define vmprefix_profile_destroy jitter_profile_destroy
901 void
902 vmprefix_profile_runtime_clear (struct vmprefix_profile_runtime *p)
903   __attribute__ ((nonnull (1)));
904 void
905 vmprefix_profile_runtime_merge_from (struct vmprefix_profile_runtime *to,
906                                      const struct vmprefix_profile_runtime *from)
907   __attribute__ ((nonnull (1, 2)));
908 void
909 vmprefix_profile_runtime_merge_from_state (struct vmprefix_profile_runtime *to,
910                                    const struct vmprefix_state *from_state)
911   __attribute__ ((nonnull (1, 2)));
912 struct vmprefix_profile *
913 vmprefix_profile_unspecialized_from_runtime
914    (const struct vmprefix_profile_runtime *p)
915   __attribute__ ((returns_nonnull, nonnull (1)));
916 struct vmprefix_profile *
917 vmprefix_profile_specialized_from_runtime (const struct vmprefix_profile_runtime
918                                            *p)
919   __attribute__ ((returns_nonnull, nonnull (1)));
920 void
921 vmprefix_profile_runtime_print_unspecialized
922    (jitter_print_context ct,
923     const struct vmprefix_profile_runtime *p)
924   __attribute__ ((nonnull (1, 2)));
925 void
926 vmprefix_profile_runtime_print_specialized (jitter_print_context ct,
927                                             const struct vmprefix_profile_runtime
928                                             *p)
929   __attribute__ ((nonnull (1, 2)));
930 
931 
932 
933 
934 /* Register class types.
935  * ************************************************************************** */
936 
937 /* Return a pointer to a statically allocated register class descriptor, given
938    the register class character, or NULL if the character does not represent a
939    valid register class.
940 
941    A constant array indexed by a character would have been more efficient, but
942    relying on character ordering is not portable, at least in theory.  A
943    non-constant array could be initialized in a portable way, but that would
944    probably not be worth the trouble. */
945 const struct jitter_register_class *
946 vmprefix_register_class_character_to_register_class (char c)
947   __attribute__ ((pure));
948 
949 
950 /* A constant array of constant pointers to every existing register class
951    descriptor, ordered by class id; each pointer within the array refers the
952    only existing class descriptor for its class.  The number of elements is
953    VMPREFIX_REGISTER_CLASS_NO , but that is not declared because the definition
954    of VMPREFIX_REGISTER_CLASS_NO comes later in generated code.
955 
956    This is useful when the user code enumerates every existing register class,
957    particularly for debugging. */
958 extern const struct jitter_register_class * const
959 vmprefix_regiter_classes [];
960 
961 
962 
963 
964 /* Array re-allocation.
965  * ************************************************************************** */
966 
967 /* Make the Array in the pointed state large enough to accommodate the given
968    number of slow reigsters per class, adjusting the Array pointer as needed
969    and recording information about the new size in the state; change nothing
970    if the array is already large enough.  Return the new base.
971    For example passing 3 as the value of slow_register_no would make
972    place for three slow registers per register class: if the current VM had two
973    classes 'r' and 'f' than the function would ensure that the Array can hold
974    three 'r' and three 'f' slow registers, independently from the number
975    of fast 'r' or 'f' registers.
976    Any new elements allocated in the Array are left uninitialized, but its old
977    content remains valid. */
978 char *
979 vmprefix_make_place_for_slow_registers (struct vmprefix_state *s,
980                                         jitter_int slow_register_no_per_class)
981   __attribute__ ((noinline));
982 
983 
984 
985 
986 /* **************************************************************************
987  * Evrything following this point is for internal use only.
988  * ************************************************************************** */
989 
990 
991 
992 
993 /* Defect tables.
994  * ************************************************************************** */
995 
996 /* It is harmless to declare these unconditionally, even if they only used when
997    patch-ins are available.  See jitter/jitter-defect.h .*/
998 
999 /* The worst-case defect table.  This is a global constant array, having one
1000    element per specialized instruction. */
1001 extern const jitter_uint
1002 vmprefix_worst_case_defect_table [];
1003 
1004 /* The actual defect table, to be filled at initialization time. */
1005 extern jitter_uint
1006 vmprefix_defect_table [];
1007 
1008 
1009 
1010 
1011 /* Instruction rewriter.
1012  * ************************************************************************** */
1013 
1014 /* Try to apply each rewrite rule in order and run the first one that matches,
1015    if any, on the pointed program.  When a rule fires the following ones are not
1016    checked but if a rule, after removing the last few instructions, adds another
1017    one, the addition will trigger another rewrite in its turn, and so on until
1018    no more rewriting is possible.  The rewriting process is inherently
1019    recursive.
1020 
1021    The implementation of this function is machine-generated, but the user can
1022    add her own code in the rewriter-c block, which ends up near the beginning of
1023    this function body, right after JITTTER_REWRITE_FUNCTION_PROLOG_ .  The
1024    formal argument seen from the body is named jitter_mutable_routine_p .
1025 
1026    Rationale: the argument is named differently in the body in order to keep
1027    the namespace conventions and, more importantly, to encourage the user to
1028    read this comment.
1029 
1030    The user must *not* append labels to the VM routines during rewriting: that
1031    would break it.  The user is responsible for destroying any instruction she
1032    removes, including their arguments.  The user can assume that
1033    jitter_rewritable_instruction_no is strictly greater than zero. */
1034 void
1035 vmprefix_rewrite (struct jitter_mutable_routine *jitter_mutable_routine_p);
1036 
1037 
1038 
1039 
1040 /* Program points at run time in executable routines.
1041  * ************************************************************************** */
1042 
1043 /* Provide a nice name for a program point type which looks VM-dependent. */
1044 typedef jitter_program_point
1045 vmprefix_program_point;
1046 
1047 /* Again, provide a VM-dependent alias for an actually VM-independent macro. */
1048 #define VMPREFIX_EXECUTABLE_ROUTINE_BEGINNING(_jitter_executable_routine_ptr)  \
1049   JITTER_EXECUTABLE_ROUTINE_BEGINNING(_jitter_executable_routine_ptr)
1050 
1051 
1052 
1053 
1054 /* Program points at run time in routines: unified routine API.
1055  * ************************************************************************** */
1056 
1057 /* Like VMPREFIX_EXECUTABLE_ROUTINE_BEGINNING for the unified routine API. */
1058 #define VMPREFIX_ROUTINE_BEGINNING(_jitter_routine)                \
1059   JITTER_EXECUTABLE_ROUTINE_BEGINNING                              \
1060      (jitter_routine_make_executable_if_needed (_jitter_routine))
1061 
1062 
1063 
1064 /* Executing code from an executable routine.
1065  * ************************************************************************** */
1066 
1067 /* Make sure that the pointed state has enough slow registers to run the pointed
1068    executable routine; if that is not the case, allocate more slow registers. */
1069 void
1070 vmprefix_ensure_enough_slow_registers_for_executable_routine
1071    (const struct jitter_executable_routine *er, struct vmprefix_state *s)
1072   __attribute__ ((nonnull (1, 2)));
1073 
1074 /* Run VM code starting from the given program point (which must belong to some
1075    executable routine), in the pointed VM state.
1076 
1077    Since no executable routine is given this cannot automatically guarantee that
1078    the slow registers in the pointed state are in sufficient number; it is the
1079    user's responsibility to check, if needed.
1080 
1081    This function is also usable with the unified routine API. */
1082 void
1083 vmprefix_branch_to_program_point (vmprefix_program_point p,
1084                                   struct vmprefix_state *s)
1085   __attribute__ ((nonnull (1, 2)));
1086 
1087 /* Run VM code starting from the beginning of the pointed executable routine,
1088    in the pointed VM state.  This does ensure that the slow registers in
1089    the pointed state are in sufficient number, by calling
1090    vmprefix_ensure_enough_slow_registers_for .
1091    This function is slightly less efficient than
1092    vmprefix_branch_to_program_point , and vmprefix_branch_to_program_point
1093    should be preferred in contexts where C code repeatedly calls VM code. */
1094 void
1095 vmprefix_execute_executable_routine (const struct jitter_executable_routine *er,
1096                                      struct vmprefix_state *s)
1097   __attribute__ ((nonnull (1, 2)));
1098 
1099 
1100 
1101 
1102 /* Executing code: unified routine API.
1103  * ************************************************************************** */
1104 
1105 /* Like vmprefix_ensure_enough_slow_registers_for_executable_routine , with the
1106    unified API. */
1107 void
1108 vmprefix_ensure_enough_slow_registers_for_routine
1109    (jitter_routine r, struct vmprefix_state *s)
1110   __attribute__ ((nonnull (1, 2)));
1111 
1112 /* vmprefix_branch_to_program_point , declared above, is also usable with the
1113    unified routine API. */
1114 
1115 /* Like vmprefix_execute_executable_routine, for a unified routine. */
1116 void
1117 vmprefix_execute_routine (jitter_routine r,
1118                           struct vmprefix_state *s)
1119   __attribute__ ((nonnull (1, 2)));
1120 
1121 
1122 
1123 
1124 /* Low-level debugging features relying on assembly: data locations.
1125  * ************************************************************************** */
1126 
1127 /* Dump human-readable information about data locations to the given print
1128    context.
1129    This is a trivial VM-dependent wrapper around jitter_dump_data_locations,
1130    which does not require a struct jitter_vm pointer as input. */
1131 void
1132 vmprefix_dump_data_locations (jitter_print_context output)
1133   __attribute__ ((nonnull (1)));
1134 
1135 
1136 
1137 
1138 /* Sample profiling: internal API.
1139  * ************************************************************************** */
1140 
1141 /* The functions in this sections are used internally by vm2.c, only when
1142    sample-profiling is enabled.  In fact these functions are not defined at all
1143    otherwise. */
1144 
1145 /* Initialise global sampling-related structures. */
1146 // FIXME: no: distinguish struct jitter_profile_runtime and struct jitter_profile
1147 void
1148 vmprefix_profile_sample_initialize (void);
1149 
1150 /* Begin sampling. */
1151 void
1152 vmprefix_profile_sample_start (struct vmprefix_state *state)
1153   __attribute__ ((nonnull (1)));
1154 
1155 /* Stop sampling. */
1156 void
1157 vmprefix_profile_sample_stop (void);
1158 
1159 
1160 
1161 
1162 /* Machine-generated code.
1163  * ************************************************************************** */
1164 
1165 /* What follows could be conceptually split into several generated header files,
1166    but having too many files would be inconvenient for the user to compile and
1167    link.  For this reason we generate a single header. */
1168 
1169