1 /* VM library: main VM C file template.
2 
3    Copyright (C) 2016, 2017, 2018, 2019, 2020 Luca Saiu
4    Written by Luca Saiu
5 
6    This file is part of Jitter.
7 
8    Jitter is free software: you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation, either version 3 of the License, or
11    (at your option) any later version.
12 
13    Jitter is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17 
18    You should have received a copy of the GNU General Public License
19    along with Jitter.  If not, see <http://www.gnu.org/licenses/>. */
20 
21 
22 /* Generated file warning.
23  * ************************************************************************** */
24 
25 /* Unless this file is named exactly "vm1.c" , without any prefix, you are
26    looking at a machine-generated derived file.  The original source is the vm.c
27    template from Jitter, with added code implementing the vmprefix VM. */
28 
29 
30 
31 
32 #include <assert.h>
33 #include <string.h>
34 
35 #include <jitter/jitter.h>
36 
37 #if defined (JITTER_PROFILE_SAMPLE)
38 #include <sys/time.h>
39 #endif // #if defined (JITTER_PROFILE_SAMPLE)
40 
41 #include <jitter/jitter-hash.h>
42 #include <jitter/jitter-instruction.h>
43 #include <jitter/jitter-mmap.h>
44 #include <jitter/jitter-mutable-routine.h>
45 #include <jitter/jitter-print.h>
46 #include <jitter/jitter-rewrite.h>
47 #include <jitter/jitter-routine.h>
48 #include <jitter/jitter-routine-parser.h>
49 #include <jitter/jitter-specialize.h>
50 #include <jitter/jitter-defect.h>
51 #include <jitter/jitter-patch-in.h>
52 
53 /* I don't need to include <jitter/jitter-executor.h> here, nor to define
54    JITTER_THIS_CAN_INCLUDE_JITTER_EXECUTOR_H ; doing so carelessly might
55    lead to subtle bugs, that it is better to prevent.
56    Of course I can reconsider this decision in the future. */
57 
58 #include <jitter/jitter-data-locations.h>
59 
60 #include "vmprefix-vm.h"
61 //#include "vmprefix-specialized-instructions.h"
62 //#include "vmprefix-meta-instructions.h"
63 #include <jitter/jitter-fatal.h>
64 
65 
66 
67 
68 /* Check requirements for particular features.
69  * ************************************************************************** */
70 
71 /* VM sample-profiling is only supported with GCC.  Do not bother activating it
72    with other compilers, if the numbers are unreliable in the end. */
73 #if  defined (JITTER_PROFILE_SAMPLE)        \
74      && ! defined (JITTER_HAVE_ACTUAL_GCC)
75 # error "Sample-profiling is only reliable with GCC: it requires (machine-independent)"
76 # error "GNU C extended asm, and it is not worth supporting other compilers if"
77 # error "the numbers turn out to be unreliable in the end."
78 #endif
79 
80 
81 
82 
83 /* Machine-generated data structures.
84  * ************************************************************************** */
85 
86 /* Machine-generated data structures defining this VM.  Initializing a static
87    struct is problematic, as it requires constant expressions for each field --
88    and const pointers don't qualify.  This is why we initialize the struct
89    fields below in vmprefix_initialize. */
90 static struct jitter_vm
91 the_vmprefix_vm;
92 
93 struct jitter_vm * const
94 vmprefix_vm = & the_vmprefix_vm;
95 
96 struct jitter_list_header * const
97 vmprefix_states = & the_vmprefix_vm.states;
98 
99 /* It is convenient to have this initialised at start up, even before calling
100    any initialisation function.  This makes it reliable to read this when, for
101    example, handling --version . */
102 static const struct jitter_vm_configuration
103 vmprefix_vm_the_configuration
104   = {
105       VMPREFIX_LOWER_CASE_PREFIX /* lower_case_prefix */,
106       VMPREFIX_UPPER_CASE_PREFIX /* upper_case_prefix */,
107       VMPREFIX_MAX_FAST_REGISTER_NO_PER_CLASS
108         /* max_fast_register_no_per_class */,
109       VMPREFIX_MAX_NONRESIDUAL_LITERAL_NO /* max_nonresidual_literal_no */,
110       VMPREFIX_DISPATCH_HUMAN_READABLE /* dispatch_human_readable */,
111       /* The instrumentation field can be seen as a bit map.  See the comment
112          in jitter/jitter-vm.h . */
113       (jitter_vm_instrumentation_none
114 #if defined (JITTER_PROFILE_COUNT)
115        | jitter_vm_instrumentation_count
116 #endif
117 #if defined (JITTER_PROFILE_SAMPLE)
118        | jitter_vm_instrumentation_sample
119 #endif
120        ) /* instrumentation */
121     };
122 
123 const struct jitter_vm_configuration * const
124 vmprefix_vm_configuration
125   = & vmprefix_vm_the_configuration;
126 
127 
128 
129 
130 /* Initialization and finalization: internal functions, not for the user.
131  * ************************************************************************** */
132 
133 /* Initialize threads.  This only needs to be called once at initialization, and
134    the user doesn't need to bother with it.  Defined along with the executor. */
135 void
136 vmprefix_initialize_threads (void);
137 
138 /* Check that the encodings in enum jitter_specialized_instruction_opcode (as
139    used in the specializer) are coherent with machine-generated code.  Making a
140    mistake here would introduce subtle bugs, so it's better to be defensive. */
141 static void
vmprefix_check_specialized_instruction_opcode_once(void)142 vmprefix_check_specialized_instruction_opcode_once (void)
143 {
144   static bool already_checked = false;
145   if (already_checked)
146     return;
147 
148   assert (((enum jitter_specialized_instruction_opcode)
149            vmprefix_specialized_instruction_opcode__eINVALID)
150           == jitter_specialized_instruction_opcode_INVALID);
151   assert (((enum jitter_specialized_instruction_opcode)
152            vmprefix_specialized_instruction_opcode__eBEGINBASICBLOCK)
153           == jitter_specialized_instruction_opcode_BEGINBASICBLOCK);
154   assert (((enum jitter_specialized_instruction_opcode)
155            vmprefix_specialized_instruction_opcode__eEXITVM)
156           == jitter_specialized_instruction_opcode_EXITVM);
157   assert (((enum jitter_specialized_instruction_opcode)
158            vmprefix_specialized_instruction_opcode__eDATALOCATIONS)
159           == jitter_specialized_instruction_opcode_DATALOCATIONS);
160   assert (((enum jitter_specialized_instruction_opcode)
161            vmprefix_specialized_instruction_opcode__eNOP)
162           == jitter_specialized_instruction_opcode_NOP);
163   assert (((enum jitter_specialized_instruction_opcode)
164            vmprefix_specialized_instruction_opcode__eUNREACHABLE0)
165           == jitter_specialized_instruction_opcode_UNREACHABLE0);
166   assert (((enum jitter_specialized_instruction_opcode)
167            vmprefix_specialized_instruction_opcode__eUNREACHABLE1)
168           == jitter_specialized_instruction_opcode_UNREACHABLE1);
169   assert (((enum jitter_specialized_instruction_opcode)
170            vmprefix_specialized_instruction_opcode__eUNREACHABLE2)
171           == jitter_specialized_instruction_opcode_UNREACHABLE2);
172 
173   already_checked = true;
174 }
175 
176 /* A prototype for a machine-generated function not needing a public
177    declaration, only called thru a pointer within struct jitter_vm . */
178 int
179 vmprefix_specialize_instruction (struct jitter_mutable_routine *p,
180                                  const struct jitter_instruction *ins);
181 
182 /* Initialize the pointed special-purpose data structure. */
183 static void
vmprefix_initialize_special_purpose_data(volatile struct jitter_special_purpose_state_data * d)184 vmprefix_initialize_special_purpose_data
185    (volatile struct jitter_special_purpose_state_data *d)
186 {
187   d->pending_notifications = 0;
188   jitter_initialize_pending_signal_notifications
189      (& d->pending_signal_notifications);
190 
191   /* Initialise profiling fields. */
192   jitter_profile_runtime_initialize (vmprefix_vm,
193                                      (struct jitter_profile_runtime *)
194                                      & d->profile_runtime);
195 }
196 
197 /* Finalize the pointed special-purpose data structure. */
198 static void
vmprefix_finalize_special_purpose_data(volatile struct jitter_special_purpose_state_data * d)199 vmprefix_finalize_special_purpose_data
200    (volatile struct jitter_special_purpose_state_data *d)
201 {
202   jitter_finalize_pending_signal_notifications
203      (d->pending_signal_notifications);
204 
205   jitter_profile_runtime_finalize (vmprefix_vm,
206                                    (struct jitter_profile_runtime *)
207                                    & d->profile_runtime);
208 }
209 
210 
211 
212 
213 /* Check that we link with the correct Jitter library.
214  * ************************************************************************** */
215 
216 /* It is possible to make a mistake at link time, and link a VM compiled with
217    some threading model with the Jitter runtime for a different model.  That
218    would cause crashes, that is better to prevent.  This is a way to detect such
219    mistakes very early, by causing a link-time failure in case of mismatch. */
220 extern volatile const bool
221 JITTER_DISPATCH_DEPENDENT_GLOBAL_NAME;
222 
223 
224 
225 
226 /* Low-level debugging features relying on assembly: data locations.
227  * ************************************************************************** */
228 
229 #if defined (JITTER_HAVE_KNOWN_BINARY_FORMAT) && ! defined (JITTER_DISPATCH_SWITCH)
230 /* A declaration for data locations, as visible from C.  The global is defined in
231    assembly in its own separate section thru the machinery in
232    jitter/jitter-sections.h . */
233 extern const char
234 JITTER_DATA_LOCATION_NAME(vmprefix) [];
235 #endif // #if ...
236 
237 void
vmprefix_dump_data_locations(jitter_print_context output)238 vmprefix_dump_data_locations (jitter_print_context output)
239 {
240 #ifndef JITTER_DISPATCH_SWITCH
241   jitter_dump_data_locations (output, & the_vmprefix_vm);
242 #else
243   jitter_print_char_star (output,
244                           "VM data location information unavailable\n");
245 #endif // #ifndef JITTER_DISPATCH_SWITCH
246 }
247 
248 
249 
250 
251 /* Initialization and finalization.
252  * ************************************************************************** */
253 
254 #ifdef JITTER_HAVE_PATCH_IN
255 JITTER_DEFECT_DESCRIPTOR_DECLARATIONS_(vmprefix)
256 JITTER_PATCH_IN_DESCRIPTOR_DECLARATIONS_(vmprefix)
257 #endif // #ifdef JITTER_HAVE_PATCH_IN
258 
259 #ifndef JITTER_DISPATCH_SWITCH
260 /* True iff thread sizes are all non-negative and non-huge.  We refuse to
261    disassemble otherwise, and when replication is enabled we refuse to run
262    altogether.  See the comment right below. */
263 static bool
264 vmprefix_threads_validated = false;
265 #endif // #ifndef JITTER_DISPATCH_SWITCH
266 
267 /* Omit vmprefix_validate_thread_sizes_once for switch-dispatching, as threads
268    don't exist at all in that case.*/
269 #ifndef JITTER_DISPATCH_SWITCH
270 /* Check that VM instruction sizes are all non-negative, and that no thread
271    starts before the end of the previous one.  Even one violation of such
272    conditions is a symptom that the code has not been compiled with
273    -fno-reorder-blocks , which would have disastrous effects with replication.
274    It's better to validate threads at startup and fail immediately than to crash
275    at run time.
276 
277    If even one thread appears to be wrong then refuse to disassemble when
278    replication is disabled, and refuse to run altogether if replication is
279    enabled. */
280 static void
vmprefix_validate_threads_once(void)281 vmprefix_validate_threads_once (void)
282 {
283   /* Return if this is not the first time we got here. */
284   static bool already_validated = false;
285   if (already_validated)
286     return;
287 
288 #ifdef JITTER_REPLICATE
289 # define JITTER_FAIL(error_text)                                             \
290     do                                                                       \
291       {                                                                      \
292         fprintf (stderr,                                                     \
293                  "About specialized instruction %i (%s) at %p, size %liB\n", \
294                  i, vmprefix_specialized_instruction_names [i],              \
295                  vmprefix_threads [i],                                       \
296                  vmprefix_thread_sizes [i]);                                 \
297         jitter_fatal ("%s: you are not compiling with -fno-reorder-blocks",  \
298                       error_text);                                           \
299       }                                                                      \
300     while (false)
301 #else
302 # define JITTER_FAIL(ignored_error_text)  \
303     do                                    \
304       {                                   \
305         everything_valid = false;         \
306         goto out;                         \
307       }                                   \
308     while (false)
309 #endif // #ifdef JITTER_REPLICATE
310 
311   /* The minimum address the next instruction code has to start at.
312 
313      This relies on NULL being zero, or in general lower in magnitude than any
314      valid pointer.  It is not worth the trouble to be pedantic, as this will be
315      true on every architecture where I can afford low-level tricks. */
316   jitter_thread lower_bound = NULL;
317 
318   /* Check every thread.  We rely on the order here, following specialized
319      instruction opcodes. */
320   int i;
321   bool everything_valid = true;
322   for (i = 0; i < VMPREFIX_SPECIALIZED_INSTRUCTION_NO; i ++)
323     {
324       jitter_thread thread = vmprefix_threads [i];
325       long size = vmprefix_thread_sizes [i];
326 
327       /* Check that the current thread has non-negative non-huge size and
328          doesn't start before the end of the previous one.  If this is true for
329          all threads we can conclude that they are non-overlapping as well. */
330       if (__builtin_expect (size < 0, false))
331         JITTER_FAIL("a specialized instruction has negative code size");
332       if (__builtin_expect (size > (1 << 24), false))
333         JITTER_FAIL("a specialized instruction has huge code size");
334       if (__builtin_expect (lower_bound > thread, false))
335         JITTER_FAIL("non-sequential thread");
336 
337       /* The next thread cannot start before the end of the current one. */
338       lower_bound = ((char*) thread) + size;
339     }
340 
341 #undef JITTER_FAIL
342 
343 #ifndef JITTER_REPLICATE
344  out:
345 #endif // #ifndef JITTER_REPLICATE
346 
347   /* If we have validated every thread size then disassembling appears safe. */
348   if (everything_valid)
349     vmprefix_threads_validated = true;
350 
351   /* We have checked the thread sizes, once and for all.  If this function gets
352      called again, thru a second vmprefix initialization, it will immediately
353      return. */
354   already_validated = true;
355 }
356 #endif // #ifndef JITTER_DISPATCH_SWITCH
357 
358 #ifdef JITTER_HAVE_PATCH_IN
359 /* The actual defect table.  We only need it when patch-ins are in use. */
360 jitter_uint
361 vmprefix_defect_table [VMPREFIX_SPECIALIZED_INSTRUCTION_NO];
362 #endif // #ifdef JITTER_HAVE_PATCH_IN
363 
364 void
vmprefix_initialize(void)365 vmprefix_initialize (void)
366 {
367   /* Check that the Jitter library we linked is the right one.  This check
368      actually only useful to force the global to be used.  I prefer not to use
369      an assert, because assertions can be disabled. */
370   if (! JITTER_DISPATCH_DEPENDENT_GLOBAL_NAME)
371     jitter_fatal ("impossible to reach: the thing should fail at link time");
372 
373 #ifdef JITTER_REPLICATE
374   /* Initialize the executable-memory subsystem. */
375   jitter_initialize_executable ();
376 #endif // #ifdef JITTER_REPLICATE
377 
378   /* Initialise the print-context machinery. */
379   jitter_print_initialize ();
380 
381   /* Perform some sanity checks which only need to be run once. */
382   vmprefix_check_specialized_instruction_opcode_once ();
383 
384   /* We have to initialize threads before vmprefix_threads , since the struct
385      needs threads. */
386   vmprefix_initialize_threads ();
387 
388 #ifndef JITTER_DISPATCH_SWITCH
389   /* Validate threads, to make sure the generated code was not compiled with
390      incorrect options.  This only needs to be done once. */
391   vmprefix_validate_threads_once ();
392 #endif // ifndef JITTER_DISPATCH_SWITCH
393 
394   /* Initialize the object pointed by vmprefix_vm (see the comment above as to
395      why we do it here).  Before actually setting the fields to valid data, fill
396      the whole struct with a -- hopefully -- invalid pattern, just to catch
397      bugs. */
398   static bool vm_struct_initialized = false;
399   if (! vm_struct_initialized)
400     {
401       memset (& the_vmprefix_vm, 0xff, sizeof (struct jitter_vm));
402 
403       /* Make the configuration struct reachable from the VM struct. */
404       the_vmprefix_vm.configuration = vmprefix_vm_configuration;
405       //vmprefix_print_vm_configuration (stdout, & the_vmprefix_vm.configuration);
406 
407       /* Initialize meta-instruction pointers for implicit instructions.
408          VM-independent program specialization relies on those, so they have to
409          be accessible to the Jitter library, out of generated code.  Since
410          meta-instructions are sorted alphabetically in the array, the index
411          is not fixed. */
412       the_vmprefix_vm.exitvm_meta_instruction
413         = (vmprefix_meta_instructions + vmprefix_meta_instruction_id_exitvm);
414       the_vmprefix_vm.unreachable_meta_instruction
415         = (vmprefix_meta_instructions
416            + vmprefix_meta_instruction_id_unreachable);
417 
418       /* Threads or pointers to native code blocks of course don't exist with
419    switch-dispatching. */
420 #ifndef JITTER_DISPATCH_SWITCH
421       the_vmprefix_vm.threads = (jitter_thread *)vmprefix_threads;
422       the_vmprefix_vm.thread_sizes = (long *) vmprefix_thread_sizes;
423       the_vmprefix_vm.threads_validated = vmprefix_threads_validated;
424 #if defined (JITTER_HAVE_KNOWN_BINARY_FORMAT)
425       the_vmprefix_vm.data_locations = JITTER_DATA_LOCATION_NAME(vmprefix);
426 #else
427       the_vmprefix_vm.data_locations = NULL;
428 #endif // #if defined (JITTER_HAVE_KNOWN_BINARY_FORMAT)
429 #endif // #ifndef JITTER_DISPATCH_SWITCH
430 
431       the_vmprefix_vm.specialized_instruction_residual_arities
432         = vmprefix_specialized_instruction_residual_arities;
433       the_vmprefix_vm.specialized_instruction_label_bitmasks
434         = vmprefix_specialized_instruction_label_bitmasks;
435 #ifdef JITTER_HAVE_PATCH_IN
436       the_vmprefix_vm.specialized_instruction_fast_label_bitmasks
437         = vmprefix_specialized_instruction_fast_label_bitmasks;
438       the_vmprefix_vm.patch_in_descriptors =
439         JITTER_PATCH_IN_DESCRIPTORS_NAME(vmprefix);
440       const size_t patch_in_descriptor_size
441         = sizeof (struct jitter_patch_in_descriptor);
442       the_vmprefix_vm.patch_in_descriptor_no
443         = (JITTER_PATCH_IN_DESCRIPTORS_SIZE_IN_BYTES_NAME(vmprefix)
444            / patch_in_descriptor_size);
445       /* Cheap sanity check: if the size in bytes is not a multiple of
446          the element size, we're doing something very wrong. */
447       if (JITTER_PATCH_IN_DESCRIPTORS_SIZE_IN_BYTES_NAME(vmprefix)
448           % patch_in_descriptor_size != 0)
449         jitter_fatal ("patch-in descriptors total size not a multiple "
450                       "of the element size");
451       /* Initialize the patch-in table for this VM. */
452       the_vmprefix_vm.patch_in_table
453         = jitter_make_patch_in_table (the_vmprefix_vm.patch_in_descriptors,
454                                       the_vmprefix_vm.patch_in_descriptor_no,
455                                       VMPREFIX_SPECIALIZED_INSTRUCTION_NO);
456 #else
457       the_vmprefix_vm.specialized_instruction_fast_label_bitmasks = NULL;
458 #endif // #ifdef JITTER_HAVE_PATCH_IN
459 
460       /* FIXME: I might want to conditionalize this. */
461       the_vmprefix_vm.specialized_instruction_relocatables
462         = vmprefix_specialized_instruction_relocatables;
463 
464       the_vmprefix_vm.specialized_instruction_callers
465         = vmprefix_specialized_instruction_callers;
466       the_vmprefix_vm.specialized_instruction_callees
467         = vmprefix_specialized_instruction_callees;
468 
469       the_vmprefix_vm.specialized_instruction_names
470         = vmprefix_specialized_instruction_names;
471       the_vmprefix_vm.specialized_instruction_no
472         = VMPREFIX_SPECIALIZED_INSTRUCTION_NO;
473 
474       the_vmprefix_vm.meta_instruction_string_hash
475         = & vmprefix_meta_instruction_hash;
476       the_vmprefix_vm.meta_instructions
477         = (struct jitter_meta_instruction *) vmprefix_meta_instructions;
478       the_vmprefix_vm.meta_instruction_no = VMPREFIX_META_INSTRUCTION_NO;
479       the_vmprefix_vm.max_meta_instruction_name_length
480         = VMPREFIX_MAX_META_INSTRUCTION_NAME_LENGTH;
481       the_vmprefix_vm.specialized_instruction_to_unspecialized_instruction
482         = vmprefix_specialized_instruction_to_unspecialized_instruction;
483       the_vmprefix_vm.register_class_character_to_register_class
484         = vmprefix_register_class_character_to_register_class;
485       the_vmprefix_vm.specialize_instruction = vmprefix_specialize_instruction;
486       the_vmprefix_vm.rewrite = vmprefix_rewrite;
487 
488 #ifdef JITTER_HAVE_PATCH_IN
489       /* Fill the defect table.  Since the array in question is a global with a
490          fixed size, this needs to be done only once. */
491       jitter_fill_defect_table (vmprefix_defect_table,
492                                 & the_vmprefix_vm,
493                                 vmprefix_worst_case_defect_table,
494                                 JITTER_DEFECT_DESCRIPTORS_NAME (vmprefix),
495                                 (JITTER_DEFECT_DESCRIPTORS_SIZE_IN_BYTES_NAME
496                                     (vmprefix)
497                                  / sizeof (struct jitter_defect_descriptor)));
498 #endif // #ifdef JITTER_HAVE_PATCH_IN
499 
500       /* Initialize the empty list of states. */
501       JITTER_LIST_INITIALIZE_HEADER (& the_vmprefix_vm.states);
502 
503       vm_struct_initialized = true;
504     }
505 
506   jitter_initialize_meta_instructions (& vmprefix_meta_instruction_hash,
507                                          vmprefix_meta_instructions,
508                                          VMPREFIX_META_INSTRUCTION_NO);
509 
510 #ifdef JITTER_HAVE_PATCH_IN
511   jitter_dump_defect_table (stderr, vmprefix_defect_table, & the_vmprefix_vm);
512 #endif // #ifdef JITTER_HAVE_PATCH_IN
513 }
514 
515 void
vmprefix_finalize(void)516 vmprefix_finalize (void)
517 {
518   /* There's no need to touch the_vmprefix_vm ; we can keep it as it is, as it
519      contains no dynamically-allocated fields. */
520   /* Threads need no finalization. */
521   jitter_finalize_meta_instructions (& vmprefix_meta_instruction_hash);
522 
523 #ifdef JITTER_HAVE_PATCH_IN
524   /* Destroy the patch-in table for this VM. */
525   jitter_destroy_patch_in_table (the_vmprefix_vm.patch_in_table,
526                                  VMPREFIX_SPECIALIZED_INSTRUCTION_NO);
527 #endif // #ifdef JITTER_HAVE_PATCH_IN
528 
529 #ifdef JITTER_REPLICATE
530   /* Finalize the executable-memory subsystem. */
531   jitter_finalize_executable ();
532 #endif // #ifdef JITTER_REPLICATE
533 
534   /* Finalize the state list.  If it is not empty then something has gone
535      wrong earlier. */
536   if (the_vmprefix_vm.states.first != NULL
537       || the_vmprefix_vm.states.last != NULL)
538     jitter_fatal ("not every state structure was destroyed before VMPREFIX "
539                   "finalisation.");
540 }
541 
542 
543 
544 
545 /* VM-dependant mutable routine initialization.
546  * ************************************************************************** */
547 
548 struct jitter_mutable_routine*
vmprefix_make_mutable_routine(void)549 vmprefix_make_mutable_routine (void)
550 {
551   return jitter_make_mutable_routine (vmprefix_vm);
552 }
553 
554 
555 
556 
557 /* Sample profiling: internal API.
558  * ************************************************************************** */
559 
560 #if defined (JITTER_PROFILE_SAMPLE)
561 
562 /* Sample profiling depends on some system features: fail immediately if they
563    are not available */
564 #if ! defined (JITTER_HAVE_SIGACTION) || ! defined (JITTER_HAVE_SETITIMER)
565 # jitter_fatal "sample-profiling depends on sigaction and setitimer"
566 #endif
567 
568 static struct itimerval
569 vmprefix_timer_interval;
570 
571 static struct itimerval
572 vmprefix_timer_disabled_interval;
573 
574 /* The sampling data, currently global.  The current implementation does not
575    play well with threads, but it can be changed later keeping the same user
576    API. */
577 struct vmprefix_sample_profile_state
578 {
579   /* The state currently sample-profiling.  Since such a state can be only one
580      right now this field is useful for printing error messages in case the user
581      sets up sample-profiling from two states at the same time by mistake.
582      This field is also useful for temporarily suspending and then reenabling
583      sampling, when The Array is being resized: if the signal handler sees that
584      this field is NULL it will not touch the fields. */
585   struct vmprefix_state *state_p;
586 
587   /* A pointer to the counts field within the sample_profile_runtime struct. */
588   uint32_t *counts;
589 
590   /* A pointer to the current specialised instruction opcode within the
591      sample_profile_runtime struct. */
592   volatile jitter_int * specialized_opcode_p;
593 
594   /* A pointer to the field counting the number of samples, again within the
595      sample_profile_runtime struct. */
596   unsigned int *sample_no_p;
597 };
598 
599 /* The (currently) one and only global state for sample-profiling. */
600 static struct vmprefix_sample_profile_state
601 vmprefix_sample_profile_state;
602 
603 static void
vmprefix_sigprof_handler(int signal)604 vmprefix_sigprof_handler (int signal)
605 {
606 #if 0
607   assert (vmprefix_sample_profile_state.state_p != NULL);
608 #endif
609 
610   jitter_int specialized_opcode
611     = * vmprefix_sample_profile_state.specialized_opcode_p;
612   if (__builtin_expect ((specialized_opcode >= 0
613                          && (specialized_opcode
614                              < VMPREFIX_SPECIALIZED_INSTRUCTION_NO)),
615                         true))
616     vmprefix_sample_profile_state.counts [specialized_opcode] ++;
617 
618   (* vmprefix_sample_profile_state.sample_no_p) ++;
619 }
620 
621 void
vmprefix_profile_sample_initialize(void)622 vmprefix_profile_sample_initialize (void)
623 {
624   /* Perform a sanity check over the sampling period. */
625   if (JITTER_PROFILE_SAMPLE_PERIOD_IN_MILLISECONDS <= 0 ||
626       JITTER_PROFILE_SAMPLE_PERIOD_IN_MILLISECONDS >= 1000)
627     jitter_fatal ("invalid JITTER_PROFILE_SAMPLE_PERIOD_IN_MILLISECONDS: %f",
628                   (double) JITTER_PROFILE_SAMPLE_PERIOD_IN_MILLISECONDS);
629   struct sigaction action;
630   sigaction (SIGPROF, NULL, & action);
631   action.sa_handler = vmprefix_sigprof_handler;
632   sigaction (SIGPROF, & action, NULL);
633 
634   long microseconds
635     = (long) (JITTER_PROFILE_SAMPLE_PERIOD_IN_MILLISECONDS * 1000);
636   vmprefix_timer_interval.it_interval.tv_sec = 0;
637   vmprefix_timer_interval.it_interval.tv_usec = microseconds;
638   vmprefix_timer_interval.it_value = vmprefix_timer_interval.it_interval;
639 
640   vmprefix_sample_profile_state.state_p = NULL;
641   vmprefix_timer_disabled_interval.it_interval.tv_sec = 0;
642   vmprefix_timer_disabled_interval.it_interval.tv_usec = 0;
643   vmprefix_timer_disabled_interval.it_value
644     = vmprefix_timer_disabled_interval.it_interval;
645 }
646 
647 void
vmprefix_profile_sample_start(struct vmprefix_state * state_p)648 vmprefix_profile_sample_start (struct vmprefix_state *state_p)
649 {
650   struct jitter_sample_profile_runtime *spr
651     = ((struct jitter_sample_profile_runtime *)
652        & VMPREFIX_STATE_TO_SPECIAL_PURPOSE_STATE_DATA (state_p)
653            ->profile_runtime.sample_profile_runtime);
654 
655   if (vmprefix_sample_profile_state.state_p != NULL)
656     {
657       if (state_p != vmprefix_sample_profile_state.state_p)
658         jitter_fatal ("currently it is only possible to sample-profile from "
659                       "one state at the time: trying to sample-profile from "
660                       "the state %p when already sample-profiling from the "
661                       "state %p",
662                       state_p, vmprefix_sample_profile_state.state_p);
663       else
664         {
665           /* This situation is a symptom of a bug, but does not need to lead
666              to a fatal error. */
667           printf ("WARNING: starting profile on the state %p when profiling "
668                   "was already active in the same state.\n"
669                   "Did you call longjmp from VM code?", state_p);
670           fflush (stdout);
671         }
672     }
673   vmprefix_sample_profile_state.state_p = state_p;
674   vmprefix_sample_profile_state.sample_no_p = & spr->sample_no;
675   vmprefix_sample_profile_state.counts = spr->counts;
676   vmprefix_sample_profile_state.specialized_opcode_p
677     = & spr->current_specialized_instruction_opcode;
678   //fprintf (stderr, "SAMPLE START\n"); fflush (NULL);
679   if (setitimer (ITIMER_PROF, & vmprefix_timer_interval, NULL) != 0)
680     jitter_fatal ("setitimer failed when establishing a timer");
681 }
682 
683 void
vmprefix_profile_sample_stop(void)684 vmprefix_profile_sample_stop (void)
685 {
686   if (setitimer (ITIMER_PROF, & vmprefix_timer_disabled_interval, NULL) != 0)
687     jitter_fatal ("setitimer failed when disabling a timer");
688 
689   vmprefix_sample_profile_state.state_p = NULL;
690 
691   /* The rest is just for defenisveness' sake. */
692   * vmprefix_sample_profile_state.specialized_opcode_p = -1;
693   vmprefix_sample_profile_state.sample_no_p = NULL;
694   vmprefix_sample_profile_state.counts = NULL;
695   vmprefix_sample_profile_state.specialized_opcode_p = NULL;
696 }
697 #endif // #if defined (JITTER_PROFILE_SAMPLE)
698 
699 
700 
701 
702 /* Array re-allocation.
703  * ************************************************************************** */
704 
705 char *
vmprefix_make_place_for_slow_registers(struct vmprefix_state * s,jitter_int new_slow_register_no_per_class)706 vmprefix_make_place_for_slow_registers (struct vmprefix_state *s,
707                                         jitter_int new_slow_register_no_per_class)
708 {
709   if (new_slow_register_no_per_class < 0)
710     jitter_fatal ("vmprefix_make_place_for_slow_registers: negative slow "
711                   "register number");
712   jitter_int old_slow_register_no_per_class
713     = s->vmprefix_state_backing.jitter_slow_register_no_per_class;
714   /* Change nothing if we already have enough space for the required number of
715      slow registers.  The no-change case will be the most common one, and
716      this function might be worth optimizing. */
717   if (__builtin_expect (new_slow_register_no_per_class
718                         > old_slow_register_no_per_class,
719                         false))
720     {
721 #if defined (JITTER_PROFILE_SAMPLE)
722       /* If sample-profiling is currently in progress on this state suspend it
723          temporarily. */
724       bool suspending_sample_profiling
725         = (vmprefix_sample_profile_state.state_p == s);
726       if (suspending_sample_profiling)
727         vmprefix_profile_sample_stop ();
728 #endif // #if defined (JITTER_PROFILE_SAMPLE)
729 
730 #if 0
731       printf ("Increasing slow register-no (per class) from %li to %li\n", (long) old_slow_register_no_per_class, (long)new_slow_register_no_per_class);
732       printf ("Array size %li -> %li\n", (long) VMPREFIX_ARRAY_SIZE(old_slow_register_no_per_class), (long) VMPREFIX_ARRAY_SIZE(new_slow_register_no_per_class));
733 #endif
734       /* Save the new value for new_slow_register_no_per_class in the state
735          structure; reallocate the Array. */
736       s->vmprefix_state_backing.jitter_slow_register_no_per_class
737         = new_slow_register_no_per_class;
738       s->vmprefix_state_backing.jitter_array
739         = jitter_xrealloc ((void *) s->vmprefix_state_backing.jitter_array,
740                            VMPREFIX_ARRAY_SIZE(new_slow_register_no_per_class));
741 
742       /* Initialise the slow registers we have just added, for every class. */
743       union vmprefix_any_register *first_slow_register
744         = ((union vmprefix_any_register *)
745            ((char *) s->vmprefix_state_backing.jitter_array
746             + VMPREFIX_FIRST_SLOW_REGISTER_UNBIASED_OFFSET));
747       jitter_int i;
748       for (i = old_slow_register_no_per_class;
749            i < new_slow_register_no_per_class;
750            i ++)
751         {
752           /* A pointer to the i-th rank of slow registers.  Every register
753              in the rank is new and in general (according to its class) may
754              need initialisation. */
755           union vmprefix_any_register *rank
756             = first_slow_register + (i * VMPREFIX_REGISTER_CLASS_NO);
757           VMPREFIX_INITIALIZE_SLOW_REGISTER_RANK (rank);
758         }
759 #if defined (JITTER_PROFILE_SAMPLE)
760       /* Now we can resume sample-profiling on this state if we suspended it. */
761       if (suspending_sample_profiling)
762         vmprefix_profile_sample_start (s);
763 #endif // #if defined (JITTER_PROFILE_SAMPLE)
764 #if 0
765       printf ("Done resizing The Array\n");
766 #endif
767     }
768 
769   /* Return the new (or unchanged) base, by simply adding the bias to the
770      Array as it is now. */
771   return s->vmprefix_state_backing.jitter_array + JITTER_ARRAY_BIAS;
772 }
773 
774 void
vmprefix_ensure_enough_slow_registers_for_executable_routine(const struct jitter_executable_routine * er,struct vmprefix_state * s)775 vmprefix_ensure_enough_slow_registers_for_executable_routine
776    (const struct jitter_executable_routine *er, struct vmprefix_state *s)
777 {
778   vmprefix_make_place_for_slow_registers (s, er->slow_register_per_class_no);
779 }
780 
781 
782 
783 
784 /* Program text frontend.
785  * ************************************************************************** */
786 
787 void
vmprefix_parse_mutable_routine_from_file_star(FILE * input_file,struct jitter_mutable_routine * p)788 vmprefix_parse_mutable_routine_from_file_star (FILE *input_file,
789                                                struct jitter_mutable_routine *p)
790 {
791   jitter_parse_mutable_routine_from_file_star (input_file, p, vmprefix_vm);
792 }
793 
794 void
vmprefix_parse_mutable_routine_from_file(const char * input_file_name,struct jitter_mutable_routine * p)795 vmprefix_parse_mutable_routine_from_file (const char *input_file_name,
796                                           struct jitter_mutable_routine *p)
797 {
798   jitter_parse_mutable_routine_from_file (input_file_name, p, vmprefix_vm);
799 }
800 
801 void
vmprefix_parse_mutable_routine_from_string(const char * string,struct jitter_mutable_routine * p)802 vmprefix_parse_mutable_routine_from_string (const char *string,
803                                             struct jitter_mutable_routine *p)
804 {
805   jitter_parse_mutable_routine_from_string (string, p, vmprefix_vm);
806 }
807 
808 
809 
810 
811 /* Executing code: unified routine API.
812  * ************************************************************************** */
813 
814 void
vmprefix_ensure_enough_slow_registers_for_routine(jitter_routine r,struct vmprefix_state * s)815 vmprefix_ensure_enough_slow_registers_for_routine
816    (jitter_routine r, struct vmprefix_state *s)
817 {
818   struct jitter_executable_routine *e
819     = jitter_routine_make_executable_if_needed (r);
820   vmprefix_ensure_enough_slow_registers_for_executable_routine (e, s);
821 }
822 
823 void
vmprefix_execute_routine(jitter_routine r,struct vmprefix_state * s)824 vmprefix_execute_routine (jitter_routine r,
825                           struct vmprefix_state *s)
826 {
827   struct jitter_executable_routine *e
828     = jitter_routine_make_executable_if_needed (r);
829   vmprefix_execute_executable_routine (e, s);
830 }
831 
832 
833 
834 
835 /* Profiling: user API.
836  * ************************************************************************** */
837 
838 /* These functions are all trivial wrappers around the functionality declared
839    in jitter/jitter-profile.h, hiding the VM pointer. */
840 
841 struct vmprefix_profile_runtime *
vmprefix_state_profile_runtime(struct vmprefix_state * s)842 vmprefix_state_profile_runtime (struct vmprefix_state *s)
843 {
844   volatile struct jitter_special_purpose_state_data *spd
845     = VMPREFIX_ARRAY_TO_SPECIAL_PURPOSE_STATE_DATA
846         (s->vmprefix_state_backing.jitter_array);
847   return (struct vmprefix_profile_runtime *) & spd->profile_runtime;
848 }
849 
850 struct vmprefix_profile_runtime *
vmprefix_profile_runtime_make(void)851 vmprefix_profile_runtime_make (void)
852 {
853   return jitter_profile_runtime_make (vmprefix_vm);
854 }
855 
856 void
vmprefix_profile_runtime_clear(struct vmprefix_profile_runtime * p)857 vmprefix_profile_runtime_clear (struct vmprefix_profile_runtime * p)
858 {
859   jitter_profile_runtime_clear (vmprefix_vm, p);
860 }
861 
862 void
vmprefix_profile_runtime_merge_from(struct vmprefix_profile_runtime * to,const struct vmprefix_profile_runtime * from)863 vmprefix_profile_runtime_merge_from (struct vmprefix_profile_runtime *to,
864                                      const struct vmprefix_profile_runtime *from)
865 {
866   jitter_profile_runtime_merge_from (vmprefix_vm, to, from);
867 }
868 
869 void
vmprefix_profile_runtime_merge_from_state(struct vmprefix_profile_runtime * to,const struct vmprefix_state * from_state)870 vmprefix_profile_runtime_merge_from_state (struct vmprefix_profile_runtime *to,
871                                            const struct vmprefix_state *from_state)
872 {
873   const struct vmprefix_profile_runtime* from
874     = vmprefix_state_profile_runtime ((struct vmprefix_state *) from_state);
875   jitter_profile_runtime_merge_from (vmprefix_vm, to, from);
876 }
877 
878 void
vmprefix_profile_runtime_print_unspecialized(jitter_print_context ct,const struct vmprefix_profile_runtime * p)879 vmprefix_profile_runtime_print_unspecialized
880    (jitter_print_context ct,
881     const struct vmprefix_profile_runtime *p)
882 {
883   jitter_profile_runtime_print_unspecialized (ct, vmprefix_vm, p);
884 }
885 
886 void
vmprefix_profile_runtime_print_specialized(jitter_print_context ct,const struct vmprefix_profile_runtime * p)887 vmprefix_profile_runtime_print_specialized (jitter_print_context ct,
888                                             const struct vmprefix_profile_runtime
889                                             *p)
890 {
891   jitter_profile_runtime_print_specialized (ct, vmprefix_vm, p);
892 }
893 
894 struct vmprefix_profile *
vmprefix_profile_unspecialized_from_runtime(const struct vmprefix_profile_runtime * p)895 vmprefix_profile_unspecialized_from_runtime
896    (const struct vmprefix_profile_runtime *p)
897 {
898   return jitter_profile_unspecialized_from_runtime (vmprefix_vm, p);
899 }
900 
901 struct vmprefix_profile *
vmprefix_profile_specialized_from_runtime(const struct vmprefix_profile_runtime * p)902 vmprefix_profile_specialized_from_runtime (const struct vmprefix_profile_runtime
903                                            *p)
904 {
905   return jitter_profile_specialized_from_runtime (vmprefix_vm, p);
906 }
907 
908 
909 
910 
911 /* Evrything following this point is machine-generated.
912  * ************************************************************************** */
913 
914 /* What follows could be conceptually split into several generated C files, but
915    having too many of them would be inconvenient for the user to compile and
916    link.  For this reason we currently generate just three files: one is this,
917    which also contains the specializer, another is for the executor, and then a
918    header -- a main module is optional.  The executor will be potentially very
919    large, so it is best compiled separately.  The specializer might be large as
920    well at this stage, even if its compilation is usually much less
921    expensive. */
922