1 #ifdef RCSID
2 static char RCSid[] =
3 "$Header: d:/cvsroot/tads/tads3/VMRUN.CPP,v 1.4 1999/07/11 00:46:58 MJRoberts Exp $";
4 #endif
5
6 /*
7 * Copyright (c) 1998, 2002 Michael J. Roberts. All Rights Reserved.
8 *
9 * Please see the accompanying license file, LICENSE.TXT, for information
10 * on using and copying this software.
11 */
12 /*
13 Name
14 vmrun.cpp - VM Execution
15 Function
16
17 Notes
18
19 Modified
20 11/12/98 MJRoberts - Creation
21 */
22
23 #include <stdio.h>
24
25 #include "t3std.h"
26 #include "os.h"
27 #include "vmrun.h"
28 #include "vmdbg.h"
29 #include "vmop.h"
30 #include "vmstack.h"
31 #include "vmerr.h"
32 #include "vmerrnum.h"
33 #include "vmobj.h"
34 #include "vmlst.h"
35 #include "vmstr.h"
36 #include "vmtobj.h"
37 #include "vmfunc.h"
38 #include "vmmeta.h"
39 #include "vmbif.h"
40 #include "vmpredef.h"
41 #include "vmfile.h"
42 #include "vmsave.h"
43 #include "vmprof.h"
44 #include "vmhash.h"
45
46
47 /* ------------------------------------------------------------------------ */
48 /*
49 * Initialize
50 */
CVmRun()51 CVmRun::CVmRun()
52 {
53 init();
54 }
55
init()56 void CVmRun::init()
57 {
58 /* start out with 'nil' in R0 */
59 r0_.set_nil();
60
61 /* there's no frame yet */
62 frame_ptr_ = 0;
63
64 /* there's no entry pointer yet */
65 entry_ptr_ = 0;
66 entry_ptr_native_ = 0;
67
68 /* function header size is not yet known */
69 funchdr_size_ = 0;
70
71 /* we have no 'say' function yet */
72 say_func_ = 0;
73
74 /* no default 'say' method */
75 say_method_ = VM_INVALID_PROP;
76
77 /* no debugger halt requested yet */
78 halt_vm_ = FALSE;
79
80 /* we have no program counter yet */
81 pc_ptr_ = 0;
82
83 /*
84 * If we're including the profiler in the build, allocate and
85 * initialize its memory structures.
86 */
87 #ifdef VM_PROFILER
88
89 /*
90 * Allocate the profiler stack. This stack will contain one record per
91 * activation frame in the regular VM stack.
92 */
93 prof_stack_max_ = 250;
94 (prof_stack_ = (vm_profiler_rec *)
95 t3malloc(prof_stack_max_ * sizeof(prof_stack_[0])));
96
97 /* we don't have anything on the profiler stack yet */
98 prof_stack_idx_ = 0;
99
100 /* create the profiler master hash table */
101 prof_master_table_ = new CVmHashTable(512, new CVmHashFuncCI(), TRUE);
102
103 /* we're not running the profiler yet */
104 profiling_ = FALSE;
105
106 #endif /* VM_PROFILER */
107 }
108
109 /* ------------------------------------------------------------------------ */
110 /*
111 * Terminate
112 */
~CVmRun()113 CVmRun::~CVmRun()
114 {
115 terminate();
116 }
117
terminate()118 void CVmRun::terminate()
119 {
120 /*
121 * If we're including the profiler in the build, delete its memory
122 * structures.
123 */
124 #ifdef VM_PROFILER
125
126 /* delete the profiler stack */
127 t3free(prof_stack_);
128 prof_stack_ = 0;
129
130 /* delete the profiler master hash table */
131 delete prof_master_table_;
132 prof_master_table_ = 0;
133
134 #endif /* VM_PROFILER */
135 }
136
137 /* ------------------------------------------------------------------------ */
138 /*
139 * Set the function header size
140 */
set_funchdr_size(size_t siz)141 void CVmRun::set_funchdr_size(size_t siz)
142 {
143 /* remember the new size */
144 funchdr_size_ = siz;
145
146 /*
147 * ensure that the size is at least as large as our required
148 * function header block - if it's not, this version of the VM can't
149 * run this image file
150 */
151 if (siz < VMFUNC_HDR_MIN_SIZE)
152 err_throw(VMERR_IMAGE_INCOMPAT_HDR_FMT);
153 }
154
155 /* ------------------------------------------------------------------------ */
156 /*
157 * Add two values, leaving the result in *val1
158 */
compute_sum(VMG_ vm_val_t * val1,vm_val_t * val2)159 void CVmRun::compute_sum(VMG_ vm_val_t *val1, vm_val_t *val2)
160 {
161 /* the meaning of "add" depends on the type of the first operand */
162 switch(val1->typ)
163 {
164 case VM_SSTRING:
165 /*
166 * string constant - add the second value to the string, using
167 * the static string add method
168 */
169 CVmObjString::add_to_str(vmg_ val1, VM_INVALID_OBJ,
170 get_const_ptr(vmg_ val1->val.ofs), val2);
171 break;
172
173 case VM_LIST:
174 /*
175 * list constant - add the second value to the list, using the
176 * static list add method
177 */
178 CVmObjList::add_to_list(vmg_ val1, VM_INVALID_OBJ,
179 get_const_ptr(vmg_ val1->val.ofs), val2);
180 break;
181
182 case VM_OBJ:
183 /*
184 * object - add the second value to the object, using the
185 * object's virtual metaclass add method
186 */
187 vm_objp(vmg_ val1->val.obj)->add_val(vmg_ val1, val1->val.obj, val2);
188 break;
189
190 case VM_INT:
191 /* make sure the other value is a number as well */
192 if (!val2->is_numeric())
193 err_throw(VMERR_NUM_VAL_REQD);
194
195 /* compute the sum */
196 val1->val.intval += val2->num_to_int();
197 break;
198
199 default:
200 /* other types cannot be added */
201 err_throw(VMERR_BAD_TYPE_ADD);
202 break;
203 }
204 }
205
206
207 /* ------------------------------------------------------------------------ */
208 /*
209 * Compute the difference of two values, leaving the result in *val1
210 */
compute_diff(VMG_ vm_val_t * val1,vm_val_t * val2)211 void CVmRun::compute_diff(VMG_ vm_val_t *val1, vm_val_t *val2)
212 {
213 /* the meaning of "subtract" depends on the type of the first operand */
214 switch(val1->typ)
215 {
216 case VM_LIST:
217 /*
218 * list constant - remove the second value from the list, using
219 * the static list subtraction method
220 */
221 CVmObjList::sub_from_list(vmg_ val1, val1,
222 get_const_ptr(vmg_ val1->val.ofs), val2);
223 break;
224
225 case VM_OBJ:
226 /* object - use the object's virtual subtraction method */
227 vm_objp(vmg_ val1->val.obj)->sub_val(vmg_ val1, val1->val.obj, val2);
228 break;
229
230 case VM_INT:
231 /* make sure the other value is a number as well */
232 if (!val2->is_numeric())
233 err_throw(VMERR_NUM_VAL_REQD);
234
235 /* compute the difference */
236 val1->val.intval -= val2->num_to_int();
237 break;
238
239 default:
240 /* other types cannot be subtracted */
241 err_throw(VMERR_BAD_TYPE_SUB);
242 }
243
244 }
245
246 /* ------------------------------------------------------------------------ */
247 /*
248 * Compute the product val1 * val2, leaving the result in val1
249 */
compute_product(VMG_ vm_val_t * val1,vm_val_t * val2)250 void CVmRun::compute_product(VMG_ vm_val_t *val1, vm_val_t *val2)
251 {
252 switch(val1->typ)
253 {
254 case VM_OBJ:
255 /* use the object's virtual multiplication method */
256 vm_objp(vmg_ val1->val.obj)->mul_val(vmg_ val1, val1->val.obj, val2);
257 break;
258
259 case VM_INT:
260 /* make sure the other value is a number as well */
261 if (!val2->is_numeric())
262 err_throw(VMERR_NUM_VAL_REQD);
263
264 /* compute the product */
265 val1->val.intval *= val2->num_to_int();
266 break;
267
268 default:
269 /* other types are invalid */
270 err_throw(VMERR_BAD_TYPE_MUL);
271 }
272 }
273
274 /* ------------------------------------------------------------------------ */
275 /*
276 * Compute the quotient val1/val2, leaving the result in val1.
277 */
compute_quotient(VMG_ vm_val_t * val1,vm_val_t * val2)278 void CVmRun::compute_quotient(VMG_ vm_val_t *val1, vm_val_t *val2)
279 {
280 switch(val1->typ)
281 {
282 case VM_OBJ:
283 /* use the object's virtual division method */
284 vm_objp(vmg_ val1->val.obj)->div_val(vmg_ val1, val1->val.obj, val2);
285 break;
286
287 case VM_INT:
288 /* make sure the other value is a number as well */
289 if (!val2->is_numeric())
290 err_throw(VMERR_NUM_VAL_REQD);
291
292 /* check for divide by zero */
293 if (val2->num_to_int() == 0)
294 err_throw(VMERR_DIVIDE_BY_ZERO);
295
296 /* compute the product */
297 val1->val.intval = os_divide_long(val1->val.intval,
298 val2->num_to_int());
299 break;
300
301 default:
302 /* other types are invalid */
303 err_throw(VMERR_BAD_TYPE_DIV);
304 }
305 }
306
307 /* ------------------------------------------------------------------------ */
308 /*
309 * XOR two values and push the result. The values can be numeric or
310 * logical. If either value is logical, the result will be logical;
311 * otherwise, the result will be a bitwise XOR of the integers.
312 */
xor_and_push(VMG_ vm_val_t * val1,vm_val_t * val2)313 void CVmRun::xor_and_push(VMG_ vm_val_t *val1, vm_val_t *val2)
314 {
315 /* if either value is logical, compute the logical XOR */
316 if (val1->is_logical() && val2->is_logical())
317 {
318 /* both values are logical - compute the logical XOR */
319 val1->set_logical(val1->get_logical() ^ val2->get_logical());
320 }
321 else if (val1->is_logical() || val2->is_logical())
322 {
323 /*
324 * one value is logical, but not both - convert the other value
325 * from a number to a logical and compute the result as a
326 * logical value
327 */
328 if (!val1->is_logical())
329 val1->num_to_logical();
330 else if (!val2->is_logical())
331 val2->num_to_logical();
332
333 /* compute the logical xor */
334 val1->set_logical(val1->get_logical() ^ val2->get_logical());
335 }
336 else if (val1->typ == VM_INT && val2->typ == VM_INT)
337 {
338 /* compute and store the bitwise XOR */
339 val1->val.intval = val1->val.intval ^ val2->val.intval;
340 }
341 else
342 {
343 /* no logical conversion */
344 err_throw(VMERR_NO_LOG_CONV);
345 }
346
347 /* push the result */
348 pushval(vmg_ val1);
349 }
350
351
352 /* ------------------------------------------------------------------------ */
353 /*
354 * Index a value and push the result.
355 */
apply_index(VMG_ vm_val_t * result,const vm_val_t * container_val,const vm_val_t * index_val)356 void CVmRun::apply_index(VMG_ vm_val_t *result,
357 const vm_val_t *container_val,
358 const vm_val_t *index_val)
359 {
360 /* check the type of the value we're indexing */
361 switch(container_val->typ)
362 {
363 case VM_LIST:
364 /* list constant - use the static list indexing method */
365 CVmObjList::index_list(vmg_ result,
366 get_const_ptr(vmg_ container_val->val.ofs),
367 index_val);
368 break;
369
370 case VM_OBJ:
371 /* object - use the object's virtual indexing method */
372 vm_objp(vmg_ container_val->val.obj)
373 ->index_val(vmg_ result, container_val->val.obj, index_val);
374 break;
375
376 default:
377 /* other values cannot be indexed */
378 err_throw(VMERR_CANNOT_INDEX_TYPE);
379 }
380 }
381
382 /* ------------------------------------------------------------------------ */
383 /*
384 * Set an indexed value. Updates *container_val with the modified
385 * container, if the operation requires this. (For example, setting an
386 * indexed element of a list will create a new list, and return the new
387 * list in *container_val. Setting an element of a vector simply modifies
388 * the vector in place, hence the container reference is unchanged.)
389 */
set_index(VMG_ vm_val_t * container_val,const vm_val_t * index_val,const vm_val_t * new_val)390 void CVmRun::set_index(VMG_ vm_val_t *container_val,
391 const vm_val_t *index_val,
392 const vm_val_t *new_val)
393 {
394 switch(container_val->typ)
395 {
396 case VM_LIST:
397 /* list constant - use the static list set-index method */
398 CVmObjList::set_index_list(vmg_ container_val,
399 get_const_ptr(vmg_ container_val->val.ofs),
400 index_val, new_val);
401 break;
402
403 case VM_OBJ:
404 /* object - use the object's virtual set-index method */
405 vm_objp(vmg_ container_val->val.obj)
406 ->set_index_val(vmg_ container_val,
407 container_val->val.obj, index_val, new_val);
408 break;
409
410 default:
411 /* other values cannot be indexed */
412 err_throw(VMERR_CANNOT_INDEX_TYPE);
413 }
414 }
415
416 /* ------------------------------------------------------------------------ */
417 /*
418 * Create a new object and store it in R0
419 */
new_and_store_r0(VMG_ const uchar * pc,uint metaclass_idx,uint argc,int is_transient)420 const uchar *CVmRun::new_and_store_r0(VMG_ const uchar *pc,
421 uint metaclass_idx, uint argc,
422 int is_transient)
423 {
424 vm_obj_id_t obj;
425
426 /* create the object */
427 obj = G_meta_table->create_from_stack(vmg_ &pc, metaclass_idx, argc);
428
429 /* if we got a valid object, store a reference to it in R0 */
430 if (obj != VM_INVALID_OBJ)
431 {
432 /* set the object return value */
433 r0_.set_obj(obj);
434
435 /* make the object transient if desired */
436 if (is_transient)
437 G_obj_table->set_obj_transient(obj);
438 }
439 else
440 {
441 /* failed - return nil */
442 r0_.set_nil();
443 }
444
445 /* return the new instruction pointer */
446 return pc;
447 }
448
449 /* ------------------------------------------------------------------------ */
450 /*
451 * Execute byte code
452 */
run(VMG_ const uchar * start_pc)453 void CVmRun::run(VMG_ const uchar *start_pc)
454 {
455 /*
456 * If you're concerned about a compiler warning on the following
457 * 'register' declaration, refer to the footnote at the bottom of this
458 * file (search for [REGISTER_P_FOOTNOTE]). Executive summary: you can
459 * safely ignore the warning, and I'm keeping the code as it is.
460 */
461 register const uchar *p = start_pc;
462 const uchar *last_pc;
463 const uchar **old_pc_ptr;
464 vm_val_t *valp;
465 vm_val_t *valp2;
466 vm_val_t val;
467 vm_val_t val2;
468 vm_val_t val3;
469 int done;
470 vm_obj_id_t obj;
471 vm_prop_id_t prop;
472 uint argc;
473 uint idx;
474 uint set_idx;
475 pool_ofs_t ofs;
476 uint cnt;
477 vm_obj_id_t unhandled_exc;
478 int level;
479 int trans;
480
481 /* save the enclosing program counter pointer, and remember the new one */
482 old_pc_ptr = pc_ptr_;
483 pc_ptr_ = &last_pc;
484
485 /* we're not done yet */
486 done = FALSE;
487
488 /* no unhandled exception yet */
489 unhandled_exc = VM_INVALID_OBJ;
490
491 /*
492 * Come back here whenever we catch a run-time exception and find a
493 * byte-code error handler to process it in the stack. We'll
494 * re-enter our exception handler and resume byte-code execution at
495 * the handler.
496 */
497 resume_execution:
498
499 /*
500 * Execute all code within an exception frame. If any routine we
501 * call throws an exception, we'll catch the exception and process
502 * it as a run-time error.
503 */
504 err_try
505 {
506 /* execute code until something makes us stop */
507 for (;;)
508 {
509 VM_IF_DEBUGGER(static int brkchk = 0);
510
511 /*
512 * check for user-requested break, and step into the debugger
513 * if we find it
514 */
515 VM_IF_DEBUGGER(
516 /* check for break every so often */
517 if (++brkchk > 10000)
518 {
519 /* reset the break counter */
520 brkchk = 0;
521
522 /* check for break, and step into debugger if found */
523 if (os_break())
524 G_debugger->set_break_stop();
525 }
526 );
527
528 /* if we're single-stepping, break into the debugger */
529 VM_IF_DEBUGGER(if (G_debugger->is_single_step())
530 G_debugger->step(vmg_ &p, entry_ptr_, FALSE, 0));
531
532 /* check for a halt request from the debugger */
533 VM_IF_DEBUGGER(if (halt_vm_) { done = TRUE; goto exit_loop; });
534
535 exec_instruction:
536 /*
537 * Remember the location of this instruction in a non-register
538 * variable, in case there's an exception. (We know that
539 * last_pc is guaranteed to be a non-register variable because
540 * we take its address and store it in our pc_ptr_ member.)
541 *
542 * We need to know the location of the last instruction when
543 * an exception occurs so that we can find the exception
544 * handler. We want to encourage the compiler to enregister
545 * 'p', since we access it so frequently in this routine; but
546 * if it's in a register, there's a risk we'd get the
547 * setjmp-time value in our exception handler. To handle both
548 * needs, simply copy the value to our non-register variable
549 * last_pc; this will still let the vast majority of our
550 * access to 'p' use fast register operations if the compiler
551 * allows this, while ensuring we have a safe copy around in
552 * case of exceptions.
553 */
554 last_pc = p;
555
556 /* execute the current instruction */
557 switch(*p++)
558 {
559 case OPC_PUSH_0:
560 /* push the constant value 0 */
561 push_int(vmg_ 0);
562 break;
563
564 case OPC_PUSH_1:
565 /* push the constant value 1 */
566 push_int(vmg_ 1);
567 break;
568
569 case OPC_PUSHINT8:
570 /* push an SBYTE operand value */
571 push_int(vmg_ get_op_int8(&p));
572 break;
573
574 case OPC_PUSHINT:
575 /* push a UINT4 operand value */
576 push_int(vmg_ get_op_int32(&p));
577 break;
578
579 case OPC_PUSHENUM:
580 /* push a UINT4 operand value */
581 push_enum(vmg_ get_op_uint32(&p));
582 break;
583
584 case OPC_PUSHSTR:
585 /* push UINT4 offset operand as a string */
586 G_stk->push()->set_sstring(get_op_uint32(&p));
587 break;
588
589 case OPC_PUSHSTRI:
590 /* inline string - get the length prefix */
591 cnt = get_op_uint16(&p);
592
593 /* create the new string from the inline data */
594 obj = CVmObjString::create(vmg_ FALSE, (const char *)p, cnt);
595
596 /* skip past the string's bytes */
597 p += cnt;
598
599 /* push the new string */
600 push_obj(vmg_ obj);
601 break;
602
603 case OPC_PUSHLST:
604 /* push UINT4 offset operand as a list */
605 G_stk->push()->set_list(get_op_uint32(&p));
606 break;
607
608 case OPC_PUSHOBJ:
609 /* push UINT4 object ID operand */
610 G_stk->push()->set_obj(get_op_uint32(&p));
611 break;
612
613 case OPC_PUSHNIL:
614 /* push nil */
615 push_nil(vmg0_);
616 break;
617
618 case OPC_PUSHTRUE:
619 /* push true */
620 G_stk->push()->set_true();
621 break;
622
623 case OPC_PUSHPROPID:
624 /* push UINT2 property ID operand */
625 G_stk->push()->set_propid(get_op_uint16(&p));
626 break;
627
628 case OPC_PUSHFNPTR:
629 /* push a function pointer operand */
630 G_stk->push()->set_fnptr(get_op_uint32(&p));
631 break;
632
633 case OPC_PUSHPARLST:
634 /* get the number of fixed parameters */
635 cnt = *p++;
636
637 /* allocate the list from the parameters */
638 obj = CVmObjList::create_from_params(
639 vmg_ cnt, get_cur_argc(vmg0_) - cnt);
640
641 /* push the new list */
642 push_obj(vmg_ obj);
643 break;
644
645 case OPC_MAKELSTPAR:
646 {
647 const char *lstp;
648 uint i;
649 uint hdr_depth;
650 CVmFuncPtr hdr_ptr;
651
652 /* pop the value */
653 popval(vmg_ &val);
654
655 /* pop the argument counter so far */
656 pop_int(vmg_ &val2);
657
658 /* if it's not a list, just push it again unchanged */
659 if ((lstp = val.get_as_list(vmg0_)) == 0)
660 {
661 /* put it back on the stack */
662 pushval(vmg_ &val);
663
664 /* increment the argument count and push it */
665 ++val2.val.intval;
666 pushval(vmg_ &val2);
667
668 /* our work here is done */
669 break;
670 }
671
672 /* set up a pointer to the current function header */
673 hdr_ptr.set(entry_ptr_native_);
674
675 /* get the depth required for the header */
676 hdr_depth = hdr_ptr.get_stack_depth();
677
678 /*
679 * deduct the amount stack space we've already used
680 * from the amount noted in the header, because
681 * that's the amount more that we could need for the
682 * fixed stuff
683 */
684 hdr_depth -= (G_stk->get_depth_rel(frame_ptr_) - 1);
685
686 /* get the number of elements in the list */
687 cnt = vmb_get_len(lstp);
688
689 /* make sure we have enough stack space available */
690 if (!G_stk->check_space(cnt + hdr_depth))
691 err_throw(VMERR_STACK_OVERFLOW);
692
693 /* push the elements of the list from last to first */
694 for (i = cnt ; i != 0 ; --i)
695 {
696 /* get this element's value */
697 CVmObjList::index_and_push(vmg_ lstp, i);
698 }
699
700 /* increment and push the argument count */
701 val2.val.intval += cnt;
702 pushval(vmg_ &val2);
703 }
704 break;
705
706 case OPC_NEG:
707 /* check for an object */
708 if ((valp = G_stk->get(0))->typ == VM_OBJ)
709 {
710 /* call the object's negate method */
711 vm_objp(vmg_ valp->val.obj)
712 ->neg_val(vmg_ &val2, valp->val.obj);
713
714 /* replace TOS with the result */
715 *valp = val2;
716 }
717 else
718 {
719 /* make sure it's a number */
720 if (!valp->is_numeric())
721 err_throw(VMERR_NUM_VAL_REQD);
722
723 /* negate number in place */
724 valp->val.intval = -valp->val.intval;
725 }
726 break;
727
728 case OPC_BNOT:
729 /* ensure we have an integer */
730 if ((valp = G_stk->get(0))->typ != VM_INT)
731 err_throw(VMERR_INT_VAL_REQD);
732
733 /* bitwise NOT the integer on top of stack */
734 valp->val.intval = ~valp->val.intval;
735 break;
736
737 case OPC_ADD:
738 /* if they're both integers, add them the quick way */
739 valp = G_stk->get(0);
740 valp2 = G_stk->get(1);
741 if (valp->typ == VM_INT && valp2->typ == VM_INT)
742 {
743 /* add the two values */
744 valp2->val.intval += valp->val.intval;
745
746 /* discard the second value */
747 G_stk->discard();
748 }
749 else
750 {
751 /*
752 * compute the sum of (TOS-1) + (TOS), leaving the
753 * result in (TOS-1)
754 */
755 compute_sum(vmg_ valp2, valp);
756
757 /* discard TOS */
758 G_stk->discard();
759 }
760 break;
761
762 case OPC_INC:
763 /*
764 * Increment the value at top of stack. We must perform
765 * the same type conversions as the ADD instruction
766 * does. As an optimization, check to see if we have an
767 * integer on top of the stack, and if so simply
768 * increment its value without popping and repushing.
769 */
770 if ((valp = G_stk->get(0))->typ == VM_INT)
771 {
772 /* it's an integer - increment it, and we're done */
773 ++(valp->val.intval);
774 }
775 else
776 {
777 /* add 1 to the value at TOS, leaving it on the stack */
778 val2.set_int(1);
779 compute_sum(vmg_ valp, &val2);
780 }
781 break;
782
783 case OPC_DEC:
784 /*
785 * Decrement the value at top of stack. We must perform
786 * the same type conversions as the SUB instruction
787 * does. As an optimization, check to see if we have an
788 * integer on top of the stack, and if so simply
789 * decrement its value without popping and repushing.
790 */
791 if ((valp = G_stk->get(0))->typ == VM_INT)
792 {
793 /* it's an integer - decrement it, and we're done */
794 --(valp->val.intval);
795 }
796 else
797 {
798 /* compute TOS - 1, leaving the result in TOS */
799 val2.set_int(1);
800 compute_diff(vmg_ valp, &val2);
801 }
802 break;
803
804 case OPC_SUB:
805 /* if they're both integers, subtract them the quick way */
806 valp = G_stk->get(0);
807 valp2 = G_stk->get(1);
808 if (valp->typ == VM_INT && valp2->typ == VM_INT)
809 {
810 /* compute the difference */
811 valp2->val.intval -= valp->val.intval;
812
813 /* discard the second value */
814 G_stk->discard();
815 }
816 else
817 {
818 /*
819 * compute the difference (TOS-1) - (TOS), leaving the
820 * result in (TOS-1)
821 */
822 compute_diff(vmg_ valp2, valp);
823
824 /* discard TOS */
825 G_stk->discard();
826 }
827 break;
828
829 case OPC_MUL:
830 /* if they're both integers, this is easy */
831 valp = G_stk->get(0);
832 valp2 = G_stk->get(1);
833 if (valp->typ == VM_INT && valp2->typ == VM_INT)
834 {
835 /* compute the difference */
836 valp2->val.intval *= valp->val.intval;
837
838 /* discard the second value */
839 G_stk->discard();
840 }
841 else
842 {
843 /*
844 * compute the product (TOS-1) * (TOS), leaving the
845 * result in (TOS-1)
846 */
847 compute_product(vmg_ valp2, valp);
848
849 /* discard TOS */
850 G_stk->discard();
851 }
852 break;
853
854 case OPC_DIV:
855 /* if they're both integers, divide them the quick way */
856 valp = G_stk->get(0);
857 valp2 = G_stk->get(1);
858 if (valp->typ == VM_INT && valp2->typ == VM_INT)
859 {
860 /* check for division by zero */
861 if (valp->val.intval == 0)
862 err_throw(VMERR_DIVIDE_BY_ZERO);
863
864 /* compute the result of the division */
865 valp2->val.intval = os_divide_long(
866 valp2->val.intval, valp->val.intval);
867
868 /* discard the second value */
869 G_stk->discard();
870 }
871 else
872 {
873 /*
874 * compute (TOS-1) / (TOS), leaving the result in
875 * (TOS-1)
876 */
877 compute_quotient(vmg_ valp2, valp);
878
879 /* discard TOS */
880 G_stk->discard();
881 }
882 break;
883
884 case OPC_MOD:
885 /* remainder number at (TOS-1) by number at top of stack */
886 valp = G_stk->get(0);
887 valp2 = G_stk->get(1);
888
889 /* make sure the values are numeric */
890 if (!valp->is_numeric() || !valp2->is_numeric())
891 err_throw(VMERR_NUM_VAL_REQD);
892
893 /*
894 * compute the remainger (TOS-1) % (TOS), leaving the
895 * result at (TOS-1), and discard the second operand
896 */
897 valp2->val.intval = os_remainder_long(
898 valp2->val.intval, valp->val.intval);
899 G_stk->discard();
900 break;
901
902 case OPC_BAND:
903 /* bitwise AND two integers on top of stack */
904 valp = G_stk->get(0);
905 valp2 = G_stk->get(1);
906
907 /* ensure we have two integers */
908 if (valp->typ != VM_INT || valp2->typ != VM_INT)
909 err_throw(VMERR_INT_VAL_REQD);
910
911 /* compute the result and discard the second operand */
912 valp2->val.intval &= valp->val.intval;
913 G_stk->discard();
914 break;
915
916 case OPC_BOR:
917 /* bitwise OR two integers on top of stack */
918 valp = G_stk->get(0);
919 valp2 = G_stk->get(1);
920
921 /* ensure we have two integers */
922 if (valp->typ != VM_INT || valp2->typ != VM_INT)
923 err_throw(VMERR_INT_VAL_REQD);
924
925 /* compute the result and discard the second operand */
926 valp2->val.intval |= valp->val.intval;
927 G_stk->discard();
928 break;
929
930 case OPC_SHL:
931 /*
932 * bit-shift left integer at (TOS-1) by integer at top
933 * of stack
934 */
935 valp = G_stk->get(0);
936 valp2 = G_stk->get(1);
937
938 /* ensure we have two integers */
939 if (valp->typ != VM_INT || valp2->typ != VM_INT)
940 err_throw(VMERR_INT_VAL_REQD);
941
942 /* compute the result and discard the second operand */
943 valp2->val.intval <<= valp->val.intval;
944 G_stk->discard();
945 break;
946
947 case OPC_SHR:
948 /*
949 * bit-shift right integer at (TOS-1) by integer at top
950 * of stack
951 */
952 valp = G_stk->get(0);
953 valp2 = G_stk->get(1);
954
955 /* ensure we have two integers */
956 if (valp->typ != VM_INT || valp2->typ != VM_INT)
957 err_throw(VMERR_INT_VAL_REQD);
958
959 /* compute the result and discard the second operand */
960 valp2->val.intval >>= valp->val.intval;
961 G_stk->discard();
962 break;
963
964 case OPC_XOR:
965 /* XOR two values at top of stack */
966 popval_2(vmg_ &val, &val2);
967 xor_and_push(vmg_ &val, &val2);
968 break;
969
970 case OPC_NOT:
971 /*
972 * invert the logic value; if the value is a number,
973 * treat 0 as nil and non-zero as true
974 */
975 valp = G_stk->get(0);
976 switch(valp->typ)
977 {
978 case VM_NIL:
979 /* !nil -> true */
980 valp->set_true();
981 break;
982
983 case VM_OBJ:
984 /* !obj -> true if obj is nil, nil otherwise */
985 valp->set_logical(valp->val.obj == VM_INVALID_OBJ);
986 break;
987
988 case VM_TRUE:
989 case VM_PROP:
990 case VM_SSTRING:
991 case VM_LIST:
992 case VM_CODEOFS:
993 case VM_FUNCPTR:
994 case VM_ENUM:
995 /* these are all considered true, so !them -> nil */
996 valp->set_nil();
997 break;
998
999 case VM_INT:
1000 /* !int -> true if int is 0, nil otherwise */
1001 valp->set_logical(valp->val.intval == 0);
1002 break;
1003
1004 default:
1005 err_throw(VMERR_NO_LOG_CONV);
1006 }
1007 break;
1008
1009 case OPC_BOOLIZE:
1010 /* set to a boolean value */
1011 valp = G_stk->get(0);
1012 switch(valp->typ)
1013 {
1014 case VM_NIL:
1015 case VM_TRUE:
1016 /* it's already a logical value - leave it alone */
1017 break;
1018
1019 case VM_INT:
1020 /* integer: 0 -> nil, non-zero -> true */
1021 valp->set_logical(valp->val.intval);
1022 break;
1023
1024 case VM_ENUM:
1025 /* an enum is always non-nil */
1026 valp->set_true();
1027 break;
1028
1029 default:
1030 err_throw(VMERR_NO_LOG_CONV);
1031 }
1032 break;
1033
1034 case OPC_EQ:
1035 /* compare two values at top of stack for equality */
1036 push_bool(vmg_ pop2_equal(vmg0_));
1037 break;
1038
1039 case OPC_NE:
1040 /* compare two values at top of stack for inequality */
1041 push_bool(vmg_ !pop2_equal(vmg0_));
1042 break;
1043
1044 case OPC_LT:
1045 /* compare values at top of stack - true if (TOS-1) < TOS */
1046 push_bool(vmg_ pop2_compare_lt(vmg0_));
1047 break;
1048
1049 case OPC_LE:
1050 /* compare values at top of stack - true if (TOS-1) <= TOS */
1051 push_bool(vmg_ pop2_compare_le(vmg0_));
1052 break;
1053
1054 case OPC_GT:
1055 /* compare values at top of stack - true if (TOS-1) > TOS */
1056 push_bool(vmg_ pop2_compare_gt(vmg0_));
1057 break;
1058
1059 case OPC_GE:
1060 /* compare values at top of stack - true if (TOS-1) >= TOS */
1061 push_bool(vmg_ pop2_compare_ge(vmg0_));
1062 break;
1063
1064 case OPC_VARARGC:
1065 {
1066 uchar opc;
1067
1068 /* get the modified opcode */
1069 opc = *p++;
1070
1071 /*
1072 * skip the immediate data argument count - this is
1073 * superseded by our dynamic argument counter
1074 */
1075 ++p;
1076
1077 /* pop the argument counter */
1078 pop_int(vmg_ &val);
1079 argc = val.val.intval;
1080
1081 /* execute the appropriate next opcode */
1082 switch(opc)
1083 {
1084 case OPC_CALL:
1085 goto do_opc_call;
1086
1087 case OPC_PTRCALL:
1088 goto do_opc_ptrcall;
1089
1090 case OPC_CALLPROP:
1091 goto do_opc_callprop;
1092
1093 case OPC_PTRCALLPROP:
1094 goto do_opc_ptrcallprop;
1095
1096 case OPC_CALLPROPSELF:
1097 goto do_opc_callpropself;
1098
1099 case OPC_PTRCALLPROPSELF:
1100 goto do_opc_ptrcallpropself;
1101
1102 case OPC_OBJCALLPROP:
1103 goto do_opc_objcallprop;
1104
1105 case OPC_CALLPROPLCL1:
1106 goto do_opc_callproplcl1;
1107
1108 case OPC_CALLPROPR0:
1109 goto do_opc_callpropr0;
1110
1111 case OPC_INHERIT:
1112 goto do_opc_inherit;
1113
1114 case OPC_PTRINHERIT:
1115 goto do_opc_ptrinherit;
1116
1117 case OPC_EXPINHERIT:
1118 goto do_opc_expinherit;
1119
1120 case OPC_PTREXPINHERIT:
1121 goto do_opc_ptrexpinherit;
1122
1123 case OPC_DELEGATE:
1124 goto do_opc_delegate;
1125
1126 case OPC_PTRDELEGATE:
1127 goto do_opc_ptrdelegate;
1128
1129 case OPC_BUILTIN_A:
1130 goto do_opc_builtin_a;
1131
1132 case OPC_BUILTIN_B:
1133 goto do_opc_builtin_b;
1134
1135 case OPC_BUILTIN_C:
1136 goto do_opc_builtin_c;
1137
1138 case OPC_BUILTIN_D:
1139 goto do_opc_builtin_d;
1140
1141 case OPC_BUILTIN1:
1142 goto do_opc_builtin1;
1143
1144 case OPC_BUILTIN2:
1145 goto do_opc_builtin2;
1146
1147 case OPC_NEW1:
1148 trans = FALSE;
1149 goto do_opc_new1_argc;
1150
1151 case OPC_TRNEW1:
1152 trans = TRUE;
1153 goto do_opc_new1_argc;
1154
1155 case OPC_NEW2:
1156 trans = FALSE;
1157 goto do_opc_new2_argc;
1158
1159 case OPC_TRNEW2:
1160 trans = TRUE;
1161 goto do_opc_new2_argc;
1162
1163 default:
1164 err_throw(VMERR_INVALID_OPCODE_MOD);
1165 break;
1166 }
1167 }
1168 break;
1169
1170 case OPC_CALL:
1171 /* get the argument count */
1172 argc = get_op_uint8(&p);
1173
1174 do_opc_call:
1175 /* get the code offset to invoke */
1176 ofs = get_op_int32(&p);
1177
1178 /* call it */
1179 p = do_call_func_nr(vmg_ p - entry_ptr_native_, ofs, argc);
1180 break;
1181
1182 case OPC_PTRCALL:
1183 /* get the argument count */
1184 argc = get_op_uint8(&p);
1185
1186 do_opc_ptrcall:
1187 /* retrieve the target of the call */
1188 popval(vmg_ &val);
1189
1190 /*
1191 * if it's a prop ID, and there's a valid "self" object,
1192 * treat it as a PTRCALLPROPSELF
1193 */
1194 if (val.typ == VM_PROP && get_self(vmg0_) != VM_INVALID_OBJ)
1195 goto do_opc_ptrcallpropself_val;
1196
1197 /* call the function */
1198 p = call_func_ptr(vmg_ &val, argc, 0, p - entry_ptr_native_);
1199 break;
1200
1201 case OPC_RETVAL:
1202 /* pop the return value into R0 */
1203 popval(vmg_ &r0_);
1204
1205 /* return */
1206 if ((p = do_return(vmg0_)) == 0)
1207 goto exit_loop;
1208 break;
1209
1210 case OPC_RET:
1211 /* return, leaving R0 unchanged */
1212 if ((p = do_return(vmg0_)) == 0)
1213 goto exit_loop;
1214 break;
1215
1216 case OPC_RETNIL:
1217 /* store nil in R0 */
1218 r0_.set_nil();
1219
1220 /* return */
1221 if ((p = do_return(vmg0_)) == 0)
1222 goto exit_loop;
1223 break;
1224
1225 case OPC_RETTRUE:
1226 /* store true in R0 */
1227 r0_.set_true();
1228
1229 /* return */
1230 if ((p = do_return(vmg0_)) == 0)
1231 goto exit_loop;
1232 break;
1233
1234 case OPC_GETPROP:
1235 /* get the object whose property we're fetching */
1236 G_stk->pop(&val);
1237
1238 /* evaluate the property given by the immediate data */
1239 prop = get_op_uint16(&p);
1240 p = get_prop(vmg_ p - entry_ptr_native_, &val, prop, &val, 0);
1241 break;
1242
1243 case OPC_GETPROPLCL1:
1244 /* get the local whose property we're evaluating */
1245 valp = get_local(vmg_ get_op_uint8(&p));
1246
1247 /* evaluate the property of the local variable */
1248 prop = get_op_uint16(&p);
1249 p = get_prop(vmg_ p - entry_ptr_native_, valp, prop, valp, 0);
1250 break;
1251
1252 case OPC_GETPROPR0:
1253 /* evaluate the property of R0 */
1254 valp = &r0_;
1255 prop = get_op_uint16(&p);
1256 p = get_prop(vmg_ p - entry_ptr_native_, valp, prop, valp, 0);
1257 break;
1258
1259 case OPC_CALLPROP:
1260 /* get the argument count */
1261 argc = get_op_uint8(&p);
1262
1263 do_opc_callprop:
1264 /* pop the object whose property we're fetching */
1265 G_stk->pop(&val);
1266
1267 /* evaluate the property given by the immediate data */
1268 prop = get_op_uint16(&p);
1269 p = get_prop(vmg_ p - entry_ptr_native_, &val,
1270 prop, &val, argc);
1271 break;
1272
1273 case OPC_CALLPROPLCL1:
1274 /* get the argument count */
1275 argc = get_op_uint8(&p);
1276
1277 do_opc_callproplcl1:
1278 /* get the local whose property we're calling */
1279 valp = get_local(vmg_ get_op_uint8(&p));
1280
1281 /* call the property of the local */
1282 prop = get_op_uint16(&p);
1283 p = get_prop(vmg_ p - entry_ptr_native_, valp,
1284 prop, valp, argc);
1285 break;
1286
1287 case OPC_CALLPROPR0:
1288 /* get the argument count */
1289 argc = get_op_uint8(&p);
1290
1291 do_opc_callpropr0:
1292 /* call the property of R0 */
1293 val = r0_;
1294 prop = get_op_uint16(&p);
1295 p = get_prop(vmg_ p - entry_ptr_native_, &val,
1296 prop, &val, argc);
1297 break;
1298
1299 case OPC_PTRCALLPROP:
1300 /* get the argument count */
1301 argc = get_op_uint8(&p);
1302
1303 do_opc_ptrcallprop:
1304 /*
1305 * pop the property to be evaluated, and the object
1306 * whose property we're evaluating
1307 */
1308 pop_prop(vmg_ &val);
1309 G_stk->pop(&val2);
1310
1311 /* evaluate the property */
1312 p = get_prop(vmg_ p - entry_ptr_native_, &val2,
1313 val.val.prop, &val2, argc);
1314 break;
1315
1316 case OPC_GETPROPSELF:
1317 /* evaluate the property of 'self' */
1318 val.set_obj(get_self(vmg0_));
1319 prop = get_op_uint16(&p);
1320 p = get_prop(vmg_ p - entry_ptr_native_, &val, prop, &val, 0);
1321 break;
1322
1323 case OPC_CALLPROPSELF:
1324 /* get the argument count */
1325 argc = get_op_uint8(&p);
1326
1327 do_opc_callpropself:
1328 /* evaluate the property of 'self' */
1329 val.set_obj(get_self(vmg0_));
1330 prop = get_op_uint16(&p);
1331 p = get_prop(vmg_ p - entry_ptr_native_, &val,
1332 prop, &val, argc);
1333 break;
1334
1335 case OPC_PTRCALLPROPSELF:
1336 /* get the argument count */
1337 argc = get_op_uint8(&p);
1338
1339 do_opc_ptrcallpropself:
1340 /* get the property to be evaluated */
1341 pop_prop(vmg_ &val);
1342
1343 do_opc_ptrcallpropself_val:
1344 /* evaluate the property of 'self' */
1345 val2.set_obj(get_self(vmg0_));
1346 p = get_prop(vmg_ p - entry_ptr_native_,
1347 &val2, val.val.prop, &val2, argc);
1348 break;
1349
1350 case OPC_OBJGETPROP:
1351 /* get the object */
1352 val.set_obj((vm_obj_id_t)get_op_uint32(&p));
1353
1354 /* evaluate the property */
1355 prop = get_op_uint16(&p);
1356 p = get_prop(vmg_ p - entry_ptr_native_, &val, prop, &val, 0);
1357 break;
1358
1359 case OPC_OBJCALLPROP:
1360 /* get the argument count */
1361 argc = get_op_uint8(&p);
1362
1363 do_opc_objcallprop:
1364 /* get the object */
1365 val.set_obj((vm_obj_id_t)get_op_uint32(&p));
1366
1367 /* evaluate the property */
1368 prop = get_op_uint16(&p);
1369 p = get_prop(vmg_ p - entry_ptr_native_,
1370 &val, prop, &val, argc);
1371 break;
1372
1373 case OPC_GETPROPDATA:
1374 /* get the object whose property we're fetching */
1375 G_stk->pop(&val);
1376
1377 /*
1378 * if the object is not an object, it's one of the
1379 * native types, in which case we'll definitely run
1380 * native code to evaluate the property, in which case
1381 * it's not valid for speculative evaluation
1382 */
1383 if (val.typ != VM_OBJ)
1384 err_throw(VMERR_BAD_SPEC_EVAL);
1385
1386 /* get the property */
1387 prop = (vm_prop_id_t)get_op_uint16(&p);
1388
1389 /* check validity for speculative evaluation */
1390 check_prop_spec_eval(vmg_ val.val.obj, prop);
1391
1392 /* evaluate the property given by the immediate data */
1393 p = get_prop(vmg_ p - entry_ptr_native_, &val, prop, &val, 0);
1394 break;
1395
1396 case OPC_PTRGETPROPDATA:
1397 /* get the property and object to evaluate */
1398 pop_prop(vmg_ &val);
1399 G_stk->pop(&val2);
1400
1401 /*
1402 * if the object is not an object, it's one of the
1403 * native types, in which case we'll definitely run
1404 * native code to evaluate the property, in which case
1405 * it's not valid for speculative evaluation
1406 */
1407 if (val2.typ != VM_OBJ)
1408 err_throw(VMERR_BAD_SPEC_EVAL);
1409
1410 /* check validity for speculative evaluation */
1411 check_prop_spec_eval(vmg_ val2.val.obj, val.val.prop);
1412
1413 /* evaluate it */
1414 p = get_prop(vmg_ p - entry_ptr_native_,
1415 &val2, val.val.prop, &val2, 0);
1416 break;
1417
1418 case OPC_GETLCL1:
1419 /* push the local */
1420 pushval(vmg_ get_local(vmg_ get_op_uint8(&p)));
1421 break;
1422
1423 case OPC_GETLCL2:
1424 /* push the local */
1425 pushval(vmg_ get_local(vmg_ get_op_uint16(&p)));
1426 break;
1427
1428 case OPC_GETARG1:
1429 /* push the argument */
1430 pushval(vmg_ get_param(vmg_ get_op_uint8(&p)));
1431 break;
1432
1433 case OPC_GETARG2:
1434 /* push the argument */
1435 pushval(vmg_ get_param(vmg_ get_op_uint16(&p)));
1436 break;
1437
1438 case OPC_PUSHSELF:
1439 /* push 'self' */
1440 push_obj(vmg_ get_self(vmg0_));
1441 break;
1442
1443 case OPC_SETSELF:
1444 /* retrieve the 'self' object */
1445 G_stk->pop(&val);
1446
1447 /* set 'self' */
1448 set_self(vmg_ &val);
1449 break;
1450
1451 case OPC_STORECTX:
1452 {
1453 char buf[VMB_LEN + 4*VMB_DATAHOLDER];
1454
1455 /* our list has four elements */
1456 vmb_put_len(buf, 4);
1457
1458 /*
1459 * put the list elements: 'self', targetprop, original
1460 * target object, and defining object
1461 */
1462 vmb_put_dh_obj(buf + VMB_LEN, get_self(vmg0_));
1463 vmb_put_dh_prop(buf + VMB_LEN + VMB_DATAHOLDER,
1464 get_target_prop(vmg0_));
1465 vmb_put_dh_obj(buf + VMB_LEN + 2*VMB_DATAHOLDER,
1466 get_orig_target_obj(vmg0_));
1467 vmb_put_dh_obj(buf + VMB_LEN + 3*VMB_DATAHOLDER,
1468 get_defining_obj(vmg0_));
1469
1470 /* push a new list copied from our prepared buffer */
1471 push_obj(vmg_ CVmObjList::create(vmg_ FALSE, buf));
1472 }
1473 break;
1474
1475 case OPC_LOADCTX:
1476 {
1477 const char *lstp;
1478
1479 /*
1480 * convert the context object (at top of stack) to a
1481 * list pointer
1482 */
1483 lstp = G_stk->get(0)->get_as_list(vmg0_);
1484
1485 /* throw an error if it's not what we're expecting */
1486 if (lstp == 0 || vmb_get_len(lstp) < 4)
1487 err_throw(VMERR_LIST_VAL_REQD);
1488
1489 /* retrieve and store the context elements */
1490 set_method_ctx(
1491 vmg_ vmb_get_dh_obj(lstp + VMB_LEN),
1492 vmb_get_dh_prop(lstp + VMB_LEN
1493 + VMB_DATAHOLDER),
1494 vmb_get_dh_obj(lstp + VMB_LEN
1495 + 2*VMB_DATAHOLDER),
1496 vmb_get_dh_obj(lstp + VMB_LEN
1497 + 3*VMB_DATAHOLDER));
1498
1499 /* discard the context object at top of stack */
1500 G_stk->discard();
1501 }
1502 break;
1503
1504 case OPC_PUSHCTXELE:
1505 /* check our context element type */
1506 switch(*p++)
1507 {
1508 case PUSHCTXELE_TARGPROP:
1509 /* push the target property ID */
1510 push_prop(vmg_ get_target_prop(vmg0_));
1511 break;
1512
1513 case PUSHCTXELE_TARGOBJ:
1514 /* push the original target object ID */
1515 push_obj(vmg_ get_orig_target_obj(vmg0_));
1516 break;
1517
1518 case PUSHCTXELE_DEFOBJ:
1519 /* push the defining object */
1520 push_obj(vmg_ get_defining_obj(vmg0_));
1521 break;
1522
1523 default:
1524 /* the opcode is not valid in this VM version */
1525 err_throw(VMERR_INVALID_OPCODE);
1526 }
1527 break;
1528
1529 case OPC_GETARGC:
1530 /* push the argument counter */
1531 push_int(vmg_ get_cur_argc(vmg0_));
1532 break;
1533
1534 case OPC_DUP:
1535 /* re-push the item at top of stack */
1536 pushval(vmg_ G_stk->get(0));
1537 break;
1538
1539 case OPC_SWAP:
1540 /* swap the top two elements on the stack */
1541 valp = G_stk->get(0);
1542 valp2 = G_stk->get(1);
1543
1544 /* make a working copy of TOS */
1545 val = *valp;
1546
1547 /* copy TOS-1 over TOS */
1548 *valp = *valp2;
1549
1550 /* copy the working copy of TOS over TOS-1 */
1551 *valp2 = val;
1552 break;
1553
1554 case OPC_DISC:
1555 /* discard the item at the top of the stack */
1556 G_stk->discard();
1557 break;
1558
1559 case OPC_DISC1:
1560 /* discard n items */
1561 G_stk->discard(get_op_uint8(&p));
1562 break;
1563
1564 case OPC_GETR0:
1565 /* push the contents of R0 */
1566 pushval(vmg_ &r0_);
1567 break;
1568
1569 case OPC_GETDBARGC:
1570 /* push the argument count from the selected frame */
1571 push_int(vmg_ get_argc_at_level(vmg_ get_op_uint16(&p) + 1));
1572 break;
1573
1574 case OPC_GETDBLCL:
1575 /* get the local variable number and stack level */
1576 idx = get_op_uint16(&p);
1577 level = get_op_uint16(&p);
1578
1579 /* push the value */
1580 pushval(vmg_ get_local_at_level(vmg_ idx, level + 1));
1581 break;
1582
1583 case OPC_GETDBARG:
1584 /* get the parameter variable number and stack level */
1585 idx = get_op_uint16(&p);
1586 level = get_op_uint16(&p);
1587
1588 /* push the value */
1589 pushval(vmg_ get_param_at_level(vmg_ idx, level + 1));
1590 break;
1591
1592 case OPC_SETDBLCL:
1593 /* get the local variable number and stack level */
1594 idx = get_op_uint16(&p);
1595 level = get_op_uint16(&p);
1596
1597 /* get the local pointer */
1598 valp = get_local_at_level(vmg_ idx, level + 1);
1599
1600 /* pop the value into the local */
1601 popval(vmg_ valp);
1602 break;
1603
1604 case OPC_SETDBARG:
1605 /* get the parameter variable number and stack level */
1606 idx = get_op_uint16(&p);
1607 level = get_op_uint16(&p);
1608
1609 /* get the parameter pointer */
1610 valp = get_param_at_level(vmg_ idx, level + 1);
1611
1612 /* pop the value into the local */
1613 popval(vmg_ valp);
1614 break;
1615
1616 case OPC_SWITCH:
1617 /* get the control value */
1618 valp = G_stk->get(0);
1619
1620 /* get the case count */
1621 cnt = get_op_uint16(&p);
1622
1623 /* iterate through the case table */
1624 for ( ; cnt != 0 ; p += 7, --cnt)
1625 {
1626 /* get this value */
1627 vmb_get_dh((const char *)p, &val2);
1628
1629 /* check if the values match */
1630 if (valp->equals(vmg_ &val2))
1631 {
1632 /* it matches - jump to this offset */
1633 p += VMB_DATAHOLDER;
1634 p += osrp2s(p);
1635
1636 /* no need to look any further */
1637 break;
1638 }
1639 }
1640
1641 /* discard the control value */
1642 G_stk->discard();
1643
1644 /* if we didn't find it, jump to the default case */
1645 if (cnt == 0)
1646 p += osrp2s(p);
1647 break;
1648
1649 case OPC_JMP:
1650 /* unconditionally jump to the given offset */
1651 p += osrp2s(p);
1652 break;
1653
1654 case OPC_JT:
1655 /* get the value */
1656 valp = G_stk->get(0);
1657
1658 /*
1659 * if it's true, or a non-zero numeric value, or any
1660 * non-numeric and non-boolean value, jump
1661 */
1662 if (valp->typ == VM_NIL
1663 || (valp->typ == VM_INT && valp->val.intval == 0))
1664 {
1665 /* it's zero or nil - do not jump */
1666 p += 2;
1667 }
1668 else
1669 {
1670 /* it's non-zero and non-nil - jump */
1671 p += osrp2s(p);
1672 }
1673
1674 /* discard the value */
1675 G_stk->discard();
1676 break;
1677
1678 case OPC_JR0T:
1679 /*
1680 * if R0 is true, or it's a non-zero numeric value, or any
1681 * non-numeric and non-boolean value, jump
1682 */
1683 if (r0_.typ == VM_NIL
1684 || (r0_.typ == VM_INT && r0_.val.intval == 0))
1685 {
1686 /* it's zero or nil - do not jump */
1687 p += 2;
1688 }
1689 else
1690 {
1691 /* it's non-zero and non-nil - jump */
1692 p += osrp2s(p);
1693 }
1694 break;
1695
1696 case OPC_JF:
1697 /* get the value */
1698 valp = G_stk->get(0);
1699
1700 /*
1701 * if it's true, or a non-zero numeric value, or any
1702 * non-numeric and non-boolean value, do not jump;
1703 * otherwise, jump
1704 */
1705 if (valp->typ == VM_NIL
1706 || (valp->typ == VM_INT && valp->val.intval == 0))
1707 {
1708 /* it's zero or nil - jump */
1709 p += osrp2s(p);
1710 }
1711 else
1712 {
1713 /* it's non-zero and non-nil - do not jump */
1714 p += 2;
1715 }
1716
1717 /* discard the value */
1718 G_stk->discard();
1719 break;
1720
1721 case OPC_JR0F:
1722 /*
1723 * if R0 is true, or it's a non-zero numeric value, or any
1724 * non-numeric and non-boolean value, stay put; otherwise,
1725 * jump
1726 */
1727 if (r0_.typ == VM_NIL
1728 || (r0_.typ == VM_INT && r0_.val.intval == 0))
1729 {
1730 /* it's zero or nil - jump */
1731 p += osrp2s(p);
1732 }
1733 else
1734 {
1735 /* it's non-zero and non-nil - do not jump */
1736 p += 2;
1737 }
1738 break;
1739
1740 case OPC_JE:
1741 /* jump if the two values at top of stack are equal */
1742 p += (pop2_equal(vmg0_) ? osrp2s(p) : 2);
1743 break;
1744
1745 case OPC_JNE:
1746 /* jump if the two values at top of stack are not equal */
1747 p += (!pop2_equal(vmg0_) ? osrp2s(p) : 2);
1748 break;
1749
1750 case OPC_JGT:
1751 /* jump if greater */
1752 p += (pop2_compare_gt(vmg0_) ? osrp2s(p) : 2);
1753 break;
1754
1755 case OPC_JGE:
1756 /* jump if greater or equal */
1757 p += (pop2_compare_ge(vmg0_) ? osrp2s(p) : 2);
1758 break;
1759
1760 case OPC_JLT:
1761 /* jump if less */
1762 p += (pop2_compare_lt(vmg0_) ? osrp2s(p) : 2);
1763 break;
1764
1765 case OPC_JLE:
1766 /* jump if less or equal */
1767 p += (pop2_compare_le(vmg0_) ? osrp2s(p) : 2);
1768 break;
1769
1770 case OPC_JST:
1771 /* get (do not remove) the element at top of stack */
1772 valp = G_stk->get(0);
1773
1774 /*
1775 * if it's true or a non-zero number, jump, saving the
1776 * value; otherwise, require that it be a logical value,
1777 * pop it, and proceed
1778 */
1779 if (valp->typ == VM_TRUE
1780 || valp->typ == VM_ENUM
1781 || valp->typ == VM_INT && !valp->num_is_zero())
1782 {
1783 /* it's true - save it and jump */
1784 p += osrp2s(p);
1785 }
1786 else
1787 {
1788 /*
1789 * it's not true - discard the value, but require
1790 * that it be a valid logical value
1791 */
1792 if (valp->typ != VM_NIL && valp->typ != VM_INT)
1793 err_throw(VMERR_LOG_VAL_REQD);
1794 G_stk->discard();
1795
1796 /* skip to the next instruction */
1797 p += 2;
1798 }
1799 break;
1800
1801 case OPC_JSF:
1802 /* get (do not remove) the element at top of stack */
1803 valp = G_stk->get(0);
1804
1805 /*
1806 * if it's nil or zero, jump, saving the value;
1807 * otherwise, discard the value and proceed
1808 */
1809 if (valp->typ == VM_NIL
1810 || valp->typ == VM_INT && valp->num_is_zero())
1811 {
1812 /* it's nil or zero - save it and jump */
1813 p += osrp2s(p);
1814 }
1815 else
1816 {
1817 /* it's something non-false - discard it */
1818 G_stk->discard();
1819
1820 /* skip to the next instruction */
1821 p += 2;
1822 }
1823 break;
1824
1825 case OPC_LJSR:
1826 /*
1827 * compute and push the offset of the next instruction
1828 * (at +2 because of the branch offset operand) from our
1829 * method header - this will be the return address,
1830 * which in this offset format will survive any code
1831 * swapping that might occur in subsequent execution
1832 */
1833 push_int(vmg_ pc_to_method_ofs(p + 2));
1834
1835 /* jump to the target address */
1836 p += osrp2s(p);
1837 break;
1838
1839 case OPC_LRET:
1840 /* get the indicated local variable */
1841 valp = get_local(vmg_ get_op_uint16(&p));
1842
1843 /* the value must be an integer */
1844 if (valp->typ != VM_INT)
1845 err_throw(VMERR_INT_VAL_REQD);
1846
1847 /*
1848 * jump to the code address obtained from adding the
1849 * integer value in the given local variable to the
1850 * current method header pointer
1851 */
1852 p = entry_ptr_native_ + valp->val.intval;
1853 break;
1854
1855 case OPC_JNIL:
1856 /* jump if top of stack is nil */
1857 valp = G_stk->get(0);
1858 p += (valp->typ == VM_NIL ? osrp2s(p) : 2);
1859
1860 /* discard the top value, regardless of what happened */
1861 G_stk->discard();
1862 break;
1863
1864 case OPC_JNOTNIL:
1865 /* jump if top of stack is not nil */
1866 valp = G_stk->get(0);
1867 p += (valp->typ != VM_NIL ? osrp2s(p) : 2);
1868
1869 /* discard the top value, regardless of what happened */
1870 G_stk->discard();
1871 break;
1872
1873 case OPC_SAY:
1874 /* get the string offset */
1875 ofs = get_op_int32(&p);
1876
1877 /* display it */
1878 p = disp_dstring(vmg_ ofs, p - entry_ptr_native_,
1879 get_self(vmg0_));
1880 break;
1881
1882 case OPC_SAYVAL:
1883 /* invoke the default string display function */
1884 p = disp_string_val(vmg_ p - entry_ptr_native_,
1885 get_self(vmg0_));
1886 break;
1887
1888 case OPC_THROW:
1889 /* pop the exception object */
1890 pop_obj(vmg_ &val);
1891
1892 /*
1893 * Throw it. Note that we pass the start of the current
1894 * instruction as the program counter, since we want to
1895 * find the exception handler (if any) for the current
1896 * instruction, not for the next instruction.
1897 */
1898 if ((p = do_throw(vmg_ p - 1, val.val.obj)) == 0)
1899 {
1900 /* remember the unhandled exception for re-throwing */
1901 unhandled_exc = val.val.obj;
1902
1903 /* terminate execution */
1904 goto exit_loop;
1905 }
1906 break;
1907
1908 case OPC_INHERIT:
1909 /* get the argument count */
1910 argc = get_op_uint8(&p);
1911
1912 do_opc_inherit:
1913 /* inherit the property */
1914 prop = (vm_prop_id_t)get_op_uint16(&p);
1915 p = inh_prop(vmg_ p - entry_ptr_native_, prop, argc);
1916 break;
1917
1918 case OPC_PTRINHERIT:
1919 /* get the argument count */
1920 argc = get_op_uint8(&p);
1921
1922 do_opc_ptrinherit:
1923 /* pop the property to be inherited */
1924 pop_prop(vmg_ &val);
1925
1926 /* inherit it */
1927 p = inh_prop(vmg_ p - entry_ptr_native_, val.val.prop, argc);
1928 break;
1929
1930 case OPC_EXPINHERIT:
1931 /* get the argument count */
1932 argc = get_op_uint8(&p);
1933
1934 do_opc_expinherit:
1935 /* get the property to inherit */
1936 prop = (vm_prop_id_t)get_op_uint16(&p);
1937
1938 /* get the superclass to inherit it from */
1939 val.set_obj((vm_obj_id_t)get_op_uint32(&p));
1940
1941 /*
1942 * inherit it -- process this essentially the same way
1943 * as a normal CALLPROP, since we're going to evaluate
1944 * the given property of the given object, but retain
1945 * the current 'self' object
1946 */
1947 val2.set_obj(get_self(vmg0_));
1948 p = get_prop(vmg_ p - entry_ptr_native_,
1949 &val, prop, &val2, argc);
1950 break;
1951
1952 case OPC_PTREXPINHERIT:
1953 /* get the argument count */
1954 argc = get_op_uint8(&p);
1955
1956 do_opc_ptrexpinherit:
1957 /* pop the property to inherit */
1958 pop_prop(vmg_ &val);
1959
1960 /* get the superclass to inherit it from */
1961 val3.set_obj((vm_obj_id_t)get_op_uint32(&p));
1962
1963 /* inherit it */
1964 val2.set_obj(get_self(vmg0_));
1965 p = get_prop(vmg_ p - entry_ptr_native_,
1966 &val3, val.val.prop, &val2, argc);
1967 break;
1968
1969 case OPC_DELEGATE:
1970 /* get the argument count */
1971 argc = get_op_uint8(&p);
1972
1973 do_opc_delegate:
1974 /* get the property to inherit */
1975 prop = (vm_prop_id_t)get_op_uint16(&p);
1976
1977 /* get the object to delegate to */
1978 G_stk->pop(&val);
1979
1980 /* delegate it */
1981 val2.set_obj(get_self(vmg0_));
1982 p = get_prop(vmg_ p - entry_ptr_native_,
1983 &val, prop, &val2, argc);
1984 break;
1985
1986 case OPC_PTRDELEGATE:
1987 /* get the argument count */
1988 argc = get_op_uint8(&p);
1989
1990 do_opc_ptrdelegate:
1991 /* pop the property to delegate to */
1992 pop_prop(vmg_ &val);
1993
1994 /* pop the object to delegate to */
1995 G_stk->pop(&val2);
1996
1997 /* delegate it */
1998 val3.set_obj(get_self(vmg0_));
1999 p = get_prop(vmg_ p - entry_ptr_native_,
2000 &val2, val.val.prop, &val3, argc);
2001 break;
2002
2003 case OPC_BUILTIN_A:
2004 /* get the function index and argument count */
2005 argc = get_op_uint8(&p);
2006
2007 do_opc_builtin_a:
2008 idx = get_op_uint8(&p);
2009
2010 /* call the function in set #0 */
2011 call_bif(vmg_ 0, idx, argc);
2012 break;
2013
2014 case OPC_BUILTIN_B:
2015 /* get the function index and argument count */
2016 argc = get_op_uint8(&p);
2017
2018 do_opc_builtin_b:
2019 idx = get_op_uint8(&p);
2020
2021 /* call the function in set #1 */
2022 call_bif(vmg_ 1, idx, argc);
2023 break;
2024
2025 case OPC_BUILTIN_C:
2026 /* get the function index and argument count */
2027 argc = get_op_uint8(&p);
2028
2029 do_opc_builtin_c:
2030 idx = get_op_uint8(&p);
2031
2032 /* call the function in set #2 */
2033 call_bif(vmg_ 2, idx, argc);
2034 break;
2035
2036 case OPC_BUILTIN_D:
2037 /* get the function index and argument count */
2038 argc = get_op_uint8(&p);
2039
2040 do_opc_builtin_d:
2041 idx = get_op_uint8(&p);
2042
2043 /* call the function in set #3 */
2044 call_bif(vmg_ 3, idx, argc);
2045 break;
2046
2047 case OPC_BUILTIN1:
2048 /* get the function index and argument count */
2049 argc = get_op_uint8(&p);
2050
2051 do_opc_builtin1:
2052 idx = get_op_uint8(&p);
2053
2054 /* get the function set ID */
2055 set_idx = get_op_uint8(&p);
2056
2057 /* call the function in set #0 */
2058 call_bif(vmg_ set_idx, idx, argc);
2059 break;
2060
2061 case OPC_BUILTIN2:
2062 /* get the function index and argument count */
2063 argc = get_op_uint8(&p);
2064
2065 do_opc_builtin2:
2066 idx = get_op_uint16(&p);
2067
2068 /* get the function set ID */
2069 set_idx = get_op_uint8(&p);
2070
2071 /* call the function in set #0 */
2072 call_bif(vmg_ set_idx, idx, argc);
2073 break;
2074
2075 case OPC_CALLEXT:
2076 //$$$
2077 err_throw(VMERR_CALLEXT_NOT_IMPL);
2078 break;
2079
2080 case OPC_INDEX:
2081 /*
2082 * make a safe copy of the object to index, as we're going
2083 * to store the result directly over that stack slot
2084 */
2085 val = *(valp = G_stk->get(1));
2086
2087 /* index val by TOS, storing the result at TOS-1 */
2088 apply_index(vmg_ valp, &val, G_stk->get(0));
2089
2090 /* discard the index value */
2091 G_stk->discard();
2092 break;
2093
2094 case OPC_IDXLCL1INT8:
2095 /* get the local */
2096 valp = get_local(vmg_ get_op_uint8(&p));
2097
2098 /* get the index value */
2099 val2.set_int(get_op_uint8(&p));
2100
2101 /*
2102 * look up the indexed value of the local, storing the
2103 * result in a newly-pushed stack element
2104 */
2105 apply_index(vmg_ G_stk->push(), valp, &val2);
2106 break;
2107
2108 case OPC_IDXINT8:
2109 /*
2110 * make a copy of the value to index, so we can overwrite
2111 * the stack slot with the result
2112 */
2113 val = *(valp = G_stk->get(0));
2114
2115 /* set up the index value */
2116 val2.set_int(get_op_uint8(&p));
2117
2118 /* apply the index, storing the result at TOS */
2119 apply_index(vmg_ valp, &val, &val2);
2120 break;
2121
2122 case OPC_BP:
2123 /* step back to the breakpoint location itself */
2124 VM_IF_DEBUGGER(--p);
2125
2126 /* let the debugger take control */
2127 VM_IF_DEBUGGER(G_debugger
2128 ->step(vmg_ &p, entry_ptr_, TRUE, 0));
2129
2130 /* if there's no debugger, it's an error */
2131 VM_IF_NOT_DEBUGGER(err_throw(VMERR_BREAKPOINT));
2132
2133 /*
2134 * go back and execute the current instruction - bypass
2135 * single-step tracing into the debugger in this case,
2136 * since the debugger expects when it returns that one
2137 * instruction will always be traced before the debugger
2138 * is re-entered
2139 */
2140 goto exec_instruction;
2141
2142 case OPC_NOP:
2143 /* NO OP - no effect */
2144 break;
2145
2146 case OPC_TRNEW1:
2147 trans = TRUE;
2148 goto do_opc_new1;
2149
2150 case OPC_NEW1:
2151 trans = FALSE;
2152 /* fall through to do_opc_new1 */
2153
2154 do_opc_new1:
2155 /* get the argument count */
2156 argc = get_op_uint8(&p);
2157
2158 /* fall through to do_opc_new1_argc */
2159
2160 do_opc_new1_argc:
2161 /* get the metaclass ID */
2162 idx = get_op_uint8(&p);
2163
2164 /* create the new object */
2165 p = new_and_store_r0(vmg_ p, idx, argc, trans);
2166 break;
2167
2168 case OPC_TRNEW2:
2169 trans = TRUE;
2170 goto do_opc_new2;
2171
2172 case OPC_NEW2:
2173 trans = FALSE;
2174 /* fall through to do_opc_new2 */
2175
2176 do_opc_new2:
2177 /* get the argument count */
2178 argc = get_op_uint16(&p);
2179
2180 /* fall through to do_opc_new2_argc */
2181
2182 do_opc_new2_argc:
2183 /* get the metaclass ID */
2184 idx = get_op_uint16(&p);
2185
2186 /* create the new object */
2187 p = new_and_store_r0(vmg_ p, idx, argc, trans);
2188 break;
2189
2190 case OPC_INCLCL:
2191 /* get the local */
2192 valp = get_local(vmg_ get_op_uint16(&p));
2193
2194 /* check if it's a number */
2195 if (valp->is_numeric())
2196 {
2197 /* it's a number - just increment the value */
2198 ++(valp->val.intval);
2199 }
2200 else
2201 {
2202 /* it's a non-numeric value - do the full addition */
2203 val2.set_int(1);
2204 compute_sum(vmg_ valp, &val2);
2205 }
2206 break;
2207
2208 case OPC_DECLCL:
2209 /* get the local */
2210 valp = get_local(vmg_ get_op_uint16(&p));
2211
2212 /* check for a number */
2213 if (valp->is_numeric())
2214 {
2215 /* it's a number - just decrement the value */
2216 --(valp->val.intval);
2217 }
2218 else
2219 {
2220 /* non-numeric - we must do the full subtraction work */
2221 val2.set_int(1);
2222 compute_diff(vmg_ valp, &val2);
2223 }
2224 break;
2225
2226 case OPC_ADDILCL1:
2227 /* get the local */
2228 valp = get_local(vmg_ get_op_uint8(&p));
2229
2230 /* if it's numeric, handle it in-line */
2231 if (valp->is_numeric())
2232 {
2233 /* it's a number - just add the value */
2234 valp->val.intval += get_op_int8(&p);
2235 }
2236 else
2237 {
2238 /* get the number to add */
2239 val2.set_int(get_op_int8(&p));
2240
2241 /* compute the sum, leaving the result in the local */
2242 compute_sum(vmg_ valp, &val2);
2243 }
2244 break;
2245
2246 case OPC_ADDILCL4:
2247 /* get the local */
2248 valp = get_local(vmg_ get_op_uint16(&p));
2249
2250 /* if it's a number, handle it in-line */
2251 if (valp->is_numeric())
2252 {
2253 /* it's a number - just add the value */
2254 valp->val.intval += get_op_int32(&p);
2255 }
2256 else
2257 {
2258 /* get the number to add */
2259 val2.set_int(get_op_int32(&p));
2260
2261 /* compute the sum, leaving the result in the local */
2262 compute_sum(vmg_ valp, &val2);
2263 }
2264 break;
2265
2266 case OPC_ADDTOLCL:
2267 /* get the local */
2268 valp = get_local(vmg_ get_op_uint16(&p));
2269
2270 /* get the value to add */
2271 valp2 = G_stk->get(0);
2272
2273 /* if they're both numeric, handle in-line */
2274 if (valp->is_numeric() && valp2->is_numeric())
2275 {
2276 /* add the value to the local */
2277 valp->val.intval += valp2->val.intval;
2278 }
2279 else
2280 {
2281 /* compute the sum, leaving the result in the local */
2282 compute_sum(vmg_ valp, valp2);
2283 }
2284
2285 /* discard the addend */
2286 G_stk->discard();
2287 break;
2288
2289 case OPC_SUBFROMLCL:
2290 /* get the local */
2291 valp = get_local(vmg_ get_op_uint16(&p));
2292
2293 /* get the value to add */
2294 valp2 = G_stk->get(0);
2295
2296 /* if they're both numeric, handle in-line */
2297 if (valp->is_numeric() && valp2->is_numeric())
2298 {
2299 /* subtract the value from the local */
2300 valp->val.intval -= valp2->val.intval;
2301 }
2302 else
2303 {
2304 /* subtract the values, leaving the result in the local */
2305 compute_diff(vmg_ valp, valp2);
2306 }
2307
2308 /* discard the value subtracted */
2309 G_stk->discard();
2310 break;
2311
2312 case OPC_ZEROLCL1:
2313 /* get the local and set it to zero */
2314 get_local(vmg_ get_op_uint8(&p))->set_int(0);
2315 break;
2316
2317 case OPC_ZEROLCL2:
2318 /* get the local and set it to zero */
2319 get_local(vmg_ get_op_uint16(&p))->set_int(0);
2320 break;
2321
2322 case OPC_NILLCL1:
2323 /* get the local and set it to zero */
2324 get_local(vmg_ get_op_uint8(&p))->set_nil();
2325 break;
2326
2327 case OPC_NILLCL2:
2328 /* get the local and set it to zero */
2329 get_local(vmg_ get_op_uint16(&p))->set_nil();
2330 break;
2331
2332 case OPC_ONELCL1:
2333 /* get the local and set it to zero */
2334 get_local(vmg_ get_op_uint8(&p))->set_int(1);
2335 break;
2336
2337 case OPC_ONELCL2:
2338 /* get the local and set it to zero */
2339 get_local(vmg_ get_op_uint16(&p))->set_int(1);
2340 break;
2341
2342 case OPC_SETLCL1:
2343 /* get a pointer to the local */
2344 valp = get_local(vmg_ get_op_uint8(&p));
2345
2346 /* pop the value into the local */
2347 popval(vmg_ valp);
2348 break;
2349
2350 case OPC_SETLCL2:
2351 /* get a pointer to the local */
2352 valp = get_local(vmg_ get_op_uint16(&p));
2353
2354 /* pop the value into the local */
2355 popval(vmg_ valp);
2356 break;
2357
2358 case OPC_SETLCL1R0:
2359 /* store R0 in the specific local */
2360 *get_local(vmg_ get_op_uint8(&p)) = r0_;
2361 break;
2362
2363 case OPC_SETARG1:
2364 /* get a pointer to the parameter */
2365 valp = get_param(vmg_ get_op_uint8(&p));
2366
2367 /* pop the value into the parameter */
2368 popval(vmg_ valp);
2369 break;
2370
2371 case OPC_SETARG2:
2372 /* get a pointer to the parameter */
2373 valp = get_param(vmg_ get_op_uint16(&p));
2374
2375 /* pop the value into the parameter */
2376 popval(vmg_ valp);
2377 break;
2378
2379 case OPC_SETIND:
2380 /* pop the index */
2381 popval(vmg_ &val2);
2382
2383 /* pop the value to be indexed */
2384 popval(vmg_ &val);
2385
2386 /* pop the value to assign */
2387 popval(vmg_ &val3);
2388
2389 /* assign into the index */
2390 set_index(vmg_ &val, &val2, &val3);
2391
2392 /* push the new container value */
2393 pushval(vmg_ &val);
2394 break;
2395
2396 case OPC_SETINDLCL1I8:
2397 /* get the local */
2398 valp = get_local(vmg_ get_op_uint8(&p));
2399
2400 /* get the index value */
2401 val2.set_int(get_op_uint8(&p));
2402
2403 /* pop the value to assign */
2404 popval(vmg_ &val3);
2405
2406 /*
2407 * set the index value - this will update the local
2408 * variable directly if the container value changes
2409 */
2410 set_index(vmg_ valp, &val2, &val3);
2411 break;
2412
2413 case OPC_SETPROP:
2414 /* get the object whose property we're setting */
2415 pop_obj(vmg_ &val);
2416
2417 /* pop the value we're setting */
2418 popval(vmg_ &val2);
2419
2420 /* set the value */
2421 set_prop(vmg_ val.val.obj, get_op_uint16(&p), &val2);
2422 break;
2423
2424 case OPC_PTRSETPROP:
2425 /* get the property and object to set */
2426 pop_prop(vmg_ &val);
2427 pop_obj(vmg_ &val2);
2428
2429 /* get the value to set */
2430 popval(vmg_ &val3);
2431
2432 /* set it */
2433 set_prop(vmg_ val2.val.obj, val.val.prop, &val3);
2434 break;
2435
2436 case OPC_SETPROPSELF:
2437 /* get the value to set */
2438 popval(vmg_ &val);
2439
2440 /* set it */
2441 set_prop(vmg_ get_self(vmg0_), get_op_uint16(&p), &val);
2442 break;
2443
2444 case OPC_OBJSETPROP:
2445 /* get the objet */
2446 obj = (vm_obj_id_t)get_op_uint32(&p);
2447
2448 /* get the new value */
2449 popval(vmg_ &val);
2450
2451 /* set the property */
2452 set_prop(vmg_ obj, get_op_uint16(&p), &val);
2453 break;
2454
2455 #ifdef OS_FILL_OUT_CASE_TABLES
2456 /*
2457 * Since we this switch is the innermost inner loop of the VM,
2458 * we go to some extra lengths to optimize it where possible.
2459 * See tads2/osifc.h for information on how to use
2460 * OS_FILL_OUT_CASE_TABLES and OS_IMPOSSIBLE_DEFAULT_CASE.
2461 *
2462 * Our controlling expression is an unsigned character value,
2463 * so we know the range of possible values will be limited to
2464 * 0-255. Therefore, we simply need to provide a "case"
2465 * alternative for every invalid opcode. To further encourage
2466 * the compiler to favor speed here, we specifically put
2467 * different code in every one of these case alternatives, to
2468 * force the compiler to generate a separate jump location for
2469 * each one; some compilers will generate a two-level jump
2470 * table if many cases point to shared code, to reduce the size
2471 * of the table, but we don't want that here because this
2472 * switch is critical to VM performance so we want it as fast
2473 * as possible.
2474 */
2475 case 0x00: val.val.intval = 0x00;
2476 case 0x10: val.val.intval = 0x10;
2477 case 0x11: val.val.intval = 0x11;
2478 case 0x12: val.val.intval = 0x12;
2479 case 0x13: val.val.intval = 0x13;
2480 case 0x14: val.val.intval = 0x14;
2481 case 0x15: val.val.intval = 0x15;
2482 case 0x16: val.val.intval = 0x16;
2483 case 0x17: val.val.intval = 0x17;
2484 case 0x18: val.val.intval = 0x18;
2485 case 0x19: val.val.intval = 0x19;
2486 case 0x1A: val.val.intval = 0x1A;
2487 case 0x1B: val.val.intval = 0x1B;
2488 case 0x1C: val.val.intval = 0x1C;
2489 case 0x1D: val.val.intval = 0x1D;
2490 case 0x1E: val.val.intval = 0x1E;
2491 case 0x1F: val.val.intval = 0x1F;
2492 case 0x30: val.val.intval = 0x30;
2493 case 0x31: val.val.intval = 0x31;
2494 case 0x32: val.val.intval = 0x32;
2495 case 0x33: val.val.intval = 0x33;
2496 case 0x34: val.val.intval = 0x34;
2497 case 0x35: val.val.intval = 0x35;
2498 case 0x36: val.val.intval = 0x36;
2499 case 0x37: val.val.intval = 0x37;
2500 case 0x38: val.val.intval = 0x38;
2501 case 0x39: val.val.intval = 0x39;
2502 case 0x3A: val.val.intval = 0x3A;
2503 case 0x3B: val.val.intval = 0x3B;
2504 case 0x3C: val.val.intval = 0x3C;
2505 case 0x3D: val.val.intval = 0x3D;
2506 case 0x3E: val.val.intval = 0x3E;
2507 case 0x3F: val.val.intval = 0x3F;
2508 case 0x46: val.val.intval = 0x46;
2509 case 0x47: val.val.intval = 0x47;
2510 case 0x48: val.val.intval = 0x48;
2511 case 0x49: val.val.intval = 0x49;
2512 case 0x4A: val.val.intval = 0x4A;
2513 case 0x4B: val.val.intval = 0x4B;
2514 case 0x4C: val.val.intval = 0x4C;
2515 case 0x4D: val.val.intval = 0x4D;
2516 case 0x4E: val.val.intval = 0x4E;
2517 case 0x4F: val.val.intval = 0x4F;
2518 case 0x53: val.val.intval = 0x53;
2519 case 0x55: val.val.intval = 0x55;
2520 case 0x56: val.val.intval = 0x56;
2521 case 0x57: val.val.intval = 0x57;
2522 case 0x5A: val.val.intval = 0x5A;
2523 case 0x5B: val.val.intval = 0x5B;
2524 case 0x5C: val.val.intval = 0x5C;
2525 case 0x5D: val.val.intval = 0x5D;
2526 case 0x5E: val.val.intval = 0x5E;
2527 case 0x5F: val.val.intval = 0x5F;
2528 case 0x6E: val.val.intval = 0x6E;
2529 case 0x6F: val.val.intval = 0x6F;
2530 case 0x70: val.val.intval = 0x70;
2531 case 0x71: val.val.intval = 0x71;
2532 case 0x79: val.val.intval = 0x79;
2533 case 0x7A: val.val.intval = 0x7A;
2534 case 0x7B: val.val.intval = 0x7B;
2535 case 0x7C: val.val.intval = 0x7C;
2536 case 0x7D: val.val.intval = 0x7D;
2537 case 0x7E: val.val.intval = 0x7E;
2538 case 0x7F: val.val.intval = 0x7F;
2539 case 0x8F: val.val.intval = 0x8F;
2540 case 0xA2: val.val.intval = 0xA2;
2541 case 0xA3: val.val.intval = 0xA3;
2542 case 0xA4: val.val.intval = 0xA4;
2543 case 0xA5: val.val.intval = 0xA5;
2544 case 0xA6: val.val.intval = 0xA6;
2545 case 0xA7: val.val.intval = 0xA7;
2546 case 0xA8: val.val.intval = 0xA8;
2547 case 0xA9: val.val.intval = 0xA9;
2548 case 0xAA: val.val.intval = 0xAA;
2549 case 0xAB: val.val.intval = 0xAB;
2550 case 0xAC: val.val.intval = 0xAC;
2551 case 0xAD: val.val.intval = 0xAD;
2552 case 0xAE: val.val.intval = 0xAE;
2553 case 0xAF: val.val.intval = 0xAF;
2554 case 0xBD: val.val.intval = 0xBD;
2555 case 0xBE: val.val.intval = 0xBE;
2556 case 0xBF: val.val.intval = 0xBF;
2557 case 0xC4: val.val.intval = 0xC4;
2558 case 0xC5: val.val.intval = 0xC5;
2559 case 0xC6: val.val.intval = 0xC6;
2560 case 0xC7: val.val.intval = 0xC7;
2561 case 0xC8: val.val.intval = 0xC8;
2562 case 0xC9: val.val.intval = 0xC9;
2563 case 0xCA: val.val.intval = 0xCA;
2564 case 0xCB: val.val.intval = 0xCB;
2565 case 0xCC: val.val.intval = 0xCC;
2566 case 0xCD: val.val.intval = 0xCD;
2567 case 0xCE: val.val.intval = 0xCE;
2568 case 0xCF: val.val.intval = 0xCF;
2569 case 0xDC: val.val.intval = 0xDC;
2570 case 0xDD: val.val.intval = 0xDD;
2571 case 0xDE: val.val.intval = 0xDE;
2572 case 0xDF: val.val.intval = 0xDF;
2573 case 0xF0: val.val.intval = 0xF0;
2574 case 0xF3: val.val.intval = 0xF3;
2575 case 0xF4: val.val.intval = 0xF4;
2576 case 0xF5: val.val.intval = 0xF5;
2577 case 0xF6: val.val.intval = 0xF6;
2578 case 0xF7: val.val.intval = 0xF7;
2579 case 0xF8: val.val.intval = 0xF8;
2580 case 0xF9: val.val.intval = 0xF9;
2581 case 0xFA: val.val.intval = 0xFA;
2582 case 0xFB: val.val.intval = 0xFB;
2583 case 0xFC: val.val.intval = 0xFC;
2584 case 0xFD: val.val.intval = 0xFD;
2585 case 0xFE: val.val.intval = 0xFE;
2586 case 0xFF: val.val.intval = 0xFF;
2587 err_throw(VMERR_INVALID_OPCODE);
2588
2589 OS_IMPOSSIBLE_DEFAULT_CASE
2590
2591 #else /* OS_FILL_OUT_CASE_TABLES */
2592 case 0:
2593 /*
2594 * Explicitly call out this invalid instruction case so
2595 * that we can avoid extra work in computing the switch.
2596 * Some compilers will be smart enough to observe that we
2597 * populate the full range of possible values (0-255) for
2598 * the datatype of the switch control expression, and thus
2599 * will build jump tables that can be jumped through
2600 * without range-checking the value. (No range checking
2601 * is necessary, because a uchar simply cannot hold any
2602 * values outside of the 0-255 range.) This doesn't
2603 * guarantee that the compiler will be smart, but it does
2604 * help with some compilers and shouldn't hurt performance
2605 * with those that don't make any use of the situation.
2606 */
2607 err_throw(VMERR_INVALID_OPCODE);
2608
2609 case 0xFF:
2610 /*
2611 * explicitly call out this invalid instruction for the
2612 * same reasons we call out case 0 above
2613 */
2614 err_throw(VMERR_INVALID_OPCODE);
2615
2616 default:
2617 /* unrecognized opcode */
2618 err_throw(VMERR_INVALID_OPCODE);
2619 break;
2620
2621 #endif /* OS_FILL_OUT_CASE_TABLES */
2622 }
2623 }
2624
2625 /*
2626 * We jump to this label when it's time to terminate execution
2627 * and return to the host environment which called us.
2628 */
2629 exit_loop:
2630 /* note that we're ready to return */
2631 done = TRUE;
2632 }
2633 err_catch(err)
2634 {
2635 int i;
2636 volatile int released_reserve = FALSE;
2637
2638 err_try
2639 {
2640 /*
2641 * Return to the start of the most recent instruction - we've
2642 * already at least partially decoded the instruction, so we
2643 * won't be pointing to its first byte. Note that last_pc is
2644 * a non-register variable (because we take its address to
2645 * store in pc_ptr_), so it will correctly indicate the
2646 * current instruction even though we've jumped here via
2647 * longjmp.
2648 */
2649 p = last_pc;
2650
2651 /*
2652 * Create a new exception object to describe the error. The
2653 * arguments to the constructor are the error number and the
2654 * error parameters.
2655 *
2656 * If the error code is "unhandled exception," it means that
2657 * an exception occurred in a recursive interpreter
2658 * invocation, and the exception wasn't handled within the
2659 * code called recursively; in this case, we can simply
2660 * re-throw the original error, and perhaps handle it in the
2661 * context of the current code.
2662 */
2663 if (err->get_error_code() == VMERR_UNHANDLED_EXC)
2664 {
2665 /* get the original exception object from the error stack */
2666 obj = (vm_obj_id_t)err->get_param_ulong(0);
2667 }
2668 else
2669 {
2670 /* step into the debugger, if it's present */
2671 VM_IF_DEBUGGER(
2672 {
2673 const uchar *dbgp;
2674
2675 /*
2676 * If we're in the process of halting the VM, don't
2677 * bother stepping into the debugger. We'll check the
2678 * same thing in a moment, after we get back from
2679 * stepping into the debugger, but this check isn't
2680 * redundant: we could already be halting even before
2681 * we enter the debugger here, because we could be
2682 * unwinding the native (C++) error stack on our way
2683 * out from such a halt.
2684 */
2685 if (halt_vm_)
2686 {
2687 done = TRUE;
2688 goto skip_throw;
2689 }
2690
2691 /* make a copy of the PC for the debugger's use */
2692 dbgp = p;
2693
2694 /* step into the debugger */
2695 G_debugger->step(vmg_ &dbgp, entry_ptr_, FALSE,
2696 err->get_error_code());
2697
2698 /*
2699 * if the VM was halted while in the debugger, stop
2700 * running immediately - do not process the exception
2701 * any further
2702 */
2703 if (halt_vm_)
2704 {
2705 done = TRUE;
2706 goto skip_throw;
2707 }
2708
2709 /*
2710 * if they moved the execution pointer, resume
2711 * execution at the new point, discarding the
2712 * exception
2713 */
2714 if (dbgp != p)
2715 {
2716 /* resume execution at the new location */
2717 p = dbgp;
2718
2719 /* discard the exception and resume execution */
2720 goto skip_throw;
2721 }
2722 }
2723 );
2724
2725 /*
2726 * If this is a stack overflow exception, there's probably
2727 * not enough stack left to create the exception object.
2728 * Fortunately, we have an emergency stack reserve just for
2729 * such conditions, so release it now, hopefully giving us
2730 * enough room to work with to construct the exception.
2731 */
2732 if (err->get_error_code() == VMERR_STACK_OVERFLOW)
2733 released_reserve = G_stk->release_reserve();
2734
2735 /* push the error parameters (in reverse order) */
2736 for (i = err->get_param_count() ; i > 0 ; )
2737 {
2738 /* go to the next parameter */
2739 --i;
2740
2741 /* see what we have and push an appropriate value */
2742 switch(err->get_param_type(i-1))
2743 {
2744 case ERR_TYPE_INT:
2745 /* push the integer value */
2746 push_int(vmg_ err->get_param_int(i));
2747 break;
2748
2749 case ERR_TYPE_ULONG:
2750 /* push the value */
2751 push_int(vmg_ (int32)err->get_param_ulong(i));
2752 break;
2753
2754 case ERR_TYPE_TEXTCHAR:
2755 /* push a new string with the text */
2756 push_obj(vmg_ CVmObjString::create(vmg_ FALSE,
2757 err->get_param_text(i),
2758 get_strlen(err->get_param_text(i))));
2759 break;
2760
2761 case ERR_TYPE_CHAR:
2762 /* push a new string with the text */
2763 push_obj(vmg_ CVmObjString::create(vmg_ FALSE,
2764 err->get_param_char(i),
2765 strlen(err->get_param_char(i))));
2766 break;
2767
2768 default:
2769 /* unrecognized type - push nil for now */
2770 push_nil(vmg0_);
2771 break;
2772 }
2773 }
2774
2775 /*
2776 * if there's a RuntimeError base class defined, create an
2777 * instance; otherwise, create a simple instance of the
2778 * basic object type to throw as a placeholder, since the
2779 * program hasn't made any provision to catch run-time
2780 * errors
2781 */
2782 if (G_predef->rterr != VM_INVALID_OBJ)
2783 {
2784 /* push the error number */
2785 push_int(vmg_ err->get_error_code());
2786
2787 /*
2788 * If we're not in the debugger, set up a recursive
2789 * call frame for the constructor invocation. We'll
2790 * do this on any recursive call into byte code if
2791 * we're running in the debugger, so we only need to
2792 * do this in the non-debug version.
2793 *
2794 * This extra recursive frame is needed in this one
2795 * case when in non-debug mode because the constructor
2796 * to the exception object might want to look at the
2797 * stack trace. In order for the location where the
2798 * error actually occurred to be included in the stack
2799 * trace, we need to push a recursive call frame that
2800 * points back to that location.
2801 */
2802 VM_IF_NOT_DEBUGGER(enter_recursive_frame(
2803 vmg_ err->get_param_count() + 1, &last_pc));
2804
2805 /*
2806 * Create the new RuntimeException instance. Run the
2807 * constructor in a recursive invocation of the
2808 * interpreter (by passing a null PC pointer).
2809 */
2810 vm_objp(vmg_ G_predef->rterr)
2811 ->create_instance(vmg_ G_predef->rterr, 0,
2812 err->get_param_count() + 1);
2813
2814 /* get the object from R0 */
2815 if (r0_.typ != VM_OBJ)
2816 err_throw(VMERR_OBJ_VAL_REQD);
2817 obj = r0_.val.obj;
2818 }
2819 else
2820 {
2821 /*
2822 * There's no RuntimeError object defined by the image
2823 * file, so create a basic object to throw. This
2824 * won't convey any information to the program except
2825 * that it's not one of the errors they're expecting;
2826 * this is fine, since they have made no provisions to
2827 * catch VM errors, as demonstrated by their lack of a
2828 * RuntimeError definition.
2829 */
2830 obj = CVmObjTads::create(vmg_ FALSE, 0, 1);
2831 }
2832
2833 /*
2834 * if possible, set the exceptionMessage property in the
2835 * new exception object to the default error message for
2836 * the run-time error we're processing
2837 */
2838 if (G_predef->rterrmsg_prop != VM_INVALID_PROP)
2839 {
2840 const char *msg;
2841 char buf[256];
2842 vm_obj_id_t str_obj;
2843
2844 /* format the message text */
2845 msg = err_get_msg(vm_messages, vm_message_count,
2846 err->get_error_code(), FALSE);
2847 err_format_msg(buf, sizeof(buf), msg, err);
2848
2849 /*
2850 * momentarily push the new exception object, so we
2851 * don't lose track of it if we run garbage collection
2852 * here
2853 */
2854 push_obj(vmg_ obj);
2855
2856 /* create a string object with the message text */
2857 str_obj =
2858 CVmObjString::create(vmg_ FALSE, buf, strlen(buf));
2859
2860 /*
2861 * before we can build a stack trace, let the debugger
2862 * synchronize its current position information
2863 */
2864 VM_IF_DEBUGGER(
2865 G_debugger->sync_exec_pos(vmg_ p, entry_ptr_));
2866
2867 /* set the property in the new object */
2868 val.set_obj(str_obj);
2869 vm_objp(vmg_ obj)
2870 ->set_prop(vmg_ G_undo, obj,
2871 G_predef->rterrmsg_prop, &val);
2872
2873 /* we don't need gc protection any more */
2874 G_stk->discard();
2875 }
2876 }
2877
2878 /*
2879 * If we released the stack reserve, take it back. We've
2880 * finished creating the exception object, so we don't need the
2881 * emergency stack space any more. We want to put it back now
2882 * that we're done with it so that it'll be there for us if we
2883 * should run into another stack overflow in the future.
2884 */
2885 if (released_reserve)
2886 G_stk->recover_reserve();
2887
2888 /* throw the exception */
2889 if ((p = do_throw(vmg_ p, obj)) == 0)
2890 {
2891 /* remember the unhandled exception for a moment */
2892 unhandled_exc = obj;
2893 }
2894
2895 /* come here to skip throwing the exception */
2896 VM_IF_DEBUGGER(skip_throw: );
2897 }
2898 err_catch(exc2)
2899 {
2900 /*
2901 * we got another exception trying to handle the first
2902 * exception - just throw the error again, but at least clean
2903 * up statics on the way out
2904 */
2905 pc_ptr_ = old_pc_ptr;
2906
2907 /* if we released the stack reserve, take it back */
2908 if (released_reserve)
2909 G_stk->recover_reserve();
2910
2911 /* re-throw the error */
2912 err_rethrow();
2913 }
2914 err_end;
2915 }
2916 err_end;
2917
2918 /*
2919 * If an unhandled exception occurred, re-throw it. This will wrap our
2920 * exception object in a C++ object and throw it through our C++
2921 * err_try/err_catch exception mechanism, so that the exception is
2922 * thrown out of the recursive native-code invoker.
2923 */
2924 if (unhandled_exc != VM_INVALID_OBJ)
2925 {
2926 /* restore the enclosing PC pointer */
2927 pc_ptr_ = old_pc_ptr;
2928
2929 /* re-throw the unhandled exception */
2930 err_throw_a(VMERR_UNHANDLED_EXC, 1,
2931 ERR_TYPE_ULONG, (unsigned long)unhandled_exc);
2932 }
2933
2934 /* if we're not done, go back and resume execution */
2935 if (!done)
2936 goto resume_execution;
2937
2938 /* restore the enclosing PC pointer */
2939 pc_ptr_ = old_pc_ptr;
2940 }
2941
2942
2943 /* ------------------------------------------------------------------------ */
2944 /*
2945 * Throw an exception of the given class, with the constructor arguments
2946 * on the stack.
2947 */
throw_new_class(VMG_ vm_obj_id_t cls,uint argc,const char * fallback_msg)2948 void CVmRun::throw_new_class(VMG_ vm_obj_id_t cls, uint argc,
2949 const char *fallback_msg)
2950 {
2951 /* if the class isn't defined, use the basic run-time exception */
2952 if (cls != VM_INVALID_OBJ)
2953 {
2954 /* create the object */
2955 vm_objp(vmg_ cls)->create_instance(vmg_ cls, 0, argc);
2956
2957 /* make sure we created an object */
2958 if (r0_.typ == VM_OBJ)
2959 {
2960 vm_obj_id_t exc_obj;
2961
2962 /* get the object from R0 */
2963 exc_obj = r0_.val.obj;
2964
2965 /*
2966 * throw an 'unhandled exception' with this object as the
2967 * parameter; the execution loop will catch it and dispatch it
2968 * properly
2969 */
2970 err_throw_a(VMERR_UNHANDLED_EXC, 1,
2971 ERR_TYPE_ULONG, (unsigned long)exc_obj);
2972 }
2973 }
2974
2975 /*
2976 * the imported exception class isn't defined, or we failed to create
2977 * it; throw a generic intrinsic class exception with the fallback
2978 * message string
2979 */
2980 err_throw_a(VMERR_INTCLS_GENERAL_ERROR, 1, ERR_TYPE_CHAR, fallback_msg);
2981 }
2982
2983
2984 /* ------------------------------------------------------------------------ */
2985 /*
2986 * Throw an exception. Returns true if an exception handler was found,
2987 * which means that execution can proceed; returns false if no handler
2988 * was found, in which case the execution loop must throw the exception
2989 * to its caller.
2990 */
do_throw(VMG_ const uchar * pc,vm_obj_id_t exception_obj)2991 const uchar *CVmRun::do_throw(VMG_ const uchar *pc, vm_obj_id_t exception_obj)
2992 {
2993 /*
2994 * Search the stack for a handler for this exception class. Start
2995 * at the current stack frame; if we find a handler here, use it;
2996 * otherwise, unwind the stack to the enclosing frame and search for
2997 * a handler there; repeat until we exhaust the stack.
2998 */
2999 for (;;)
3000 {
3001 CVmExcTablePtr tab;
3002 const uchar *func_start;
3003 uint ofs;
3004
3005 /* get a pointer to the start of the current function */
3006 func_start = entry_ptr_native_;
3007
3008 /* set up a pointer to the current exception table */
3009 if (tab.set(func_start))
3010 {
3011 size_t cnt;
3012 size_t i;
3013 CVmExcEntryPtr entry;
3014
3015 /* calculate our offset in the current function */
3016 ofs = pc - func_start;
3017
3018 /* set up a pointer to the first table entry */
3019 tab.set_entry_ptr(vmg_ &entry, 0);
3020
3021 /* loop through the entries */
3022 for (i = 0, cnt = tab.get_count() ; i < cnt ;
3023 ++i, entry.inc(vmg0_))
3024 {
3025 /*
3026 * Check to see if we're in the range for this entry.
3027 * If this entry covers the appropriate range, and the
3028 * exception we're handling is of the class handled by
3029 * this exception (or derives from that class), this
3030 * handler handles this exception.
3031 */
3032 if (ofs >= entry.get_start_ofs()
3033 && ofs <= entry.get_end_ofs()
3034 && (entry.get_exception() == VM_INVALID_OBJ
3035 || exception_obj == entry.get_exception()
3036 || (vm_objp(vmg_ exception_obj)
3037 ->is_instance_of(vmg_ entry.get_exception()))))
3038 {
3039 /*
3040 * this is it - move the program counter to the
3041 * first byte of the handler's code
3042 */
3043 pc = func_start + entry.get_handler_ofs();
3044
3045 /* push the exception so that the handler can get at it */
3046 push_obj(vmg_ exception_obj);
3047
3048 /* return the new program counter at which to resume */
3049 return pc;
3050 }
3051 }
3052 }
3053
3054 /*
3055 * We didn't find a handler in the current function - unwind the
3056 * stack one level, using an ordinary RETURN operation (we're not
3057 * really returning, though, so we don't need to provide a return
3058 * value). First, though, check to make sure there is an enclosing
3059 * frame at all - if there's not, we can simply return immediately.
3060 */
3061 if (frame_ptr_ == 0)
3062 {
3063 /* there's no enclosing frame, so there's nowhere to go */
3064 return 0;
3065 }
3066
3067 /* try unwinding the stack a level */
3068 if ((pc = do_return(vmg0_)) == 0)
3069 {
3070 /*
3071 * The enclosing frame is a recursive invocation, so we cannot
3072 * unwind any further at this point. Return null to indicate
3073 * that the exception was not handled and should be thrown out
3074 * of the current recursive VM invocation.
3075 */
3076 return 0;
3077 }
3078 }
3079 }
3080
3081
3082 /* ------------------------------------------------------------------------ */
3083 /*
3084 * Call a built-in function
3085 */
call_bif(VMG_ uint set_index,uint func_index,uint argc)3086 void CVmRun::call_bif(VMG_ uint set_index, uint func_index, uint argc)
3087 {
3088 /*
3089 * Call the function -- presume the compiler has ensured that the
3090 * function set index is valid for the load image, and that the
3091 * function index is valid for the function set; all of this can be
3092 * determined at compile time, since function sets are statically
3093 * defined.
3094 */
3095 G_bif_table->call_func(vmg_ set_index, func_index, argc);
3096 }
3097
3098
3099 /* ------------------------------------------------------------------------ */
3100 /*
3101 * Call a function pointer
3102 */
call_func_ptr(VMG_ const vm_val_t * funcptr,uint argc,const char * recurse_name,uint caller_ofs)3103 const uchar *CVmRun::call_func_ptr(VMG_ const vm_val_t *funcptr, uint argc,
3104 const char *recurse_name, uint caller_ofs)
3105 {
3106 /*
3107 * if it's an object, and the predefined property ObjectCallProp is
3108 * defined, and the object defines this property, call this property
3109 * in the object
3110 */
3111 if (funcptr->typ == VM_OBJ
3112 && G_predef->obj_call_prop != VM_INVALID_PROP)
3113 {
3114 vm_val_t prop_val;
3115 vm_obj_id_t srcobj;
3116 int found;
3117 uint objcall_argc = 0;
3118
3119 /* make sure the object defines ObjectCallProp */
3120 found = vm_objp(vmg_ funcptr->val.obj)
3121 ->get_prop(vmg_ G_predef->obj_call_prop, &prop_val,
3122 funcptr->val.obj, &srcobj, &objcall_argc);
3123
3124 /*
3125 * if we didn't find it, this object can't be used in this
3126 * fashion - throw an error
3127 */
3128 if (!found)
3129 err_throw(VMERR_FUNCPTR_VAL_REQD);
3130
3131 /*
3132 * if this is a function pointer, call the function pointer with
3133 * the function object as 'self'
3134 */
3135 if (prop_val.typ == VM_FUNCPTR)
3136 {
3137 /* call the function and return the new program counter */
3138 return do_call(vmg_ caller_ofs, prop_val.val.ofs, argc,
3139 funcptr->val.obj, VM_INVALID_PROP,
3140 funcptr->val.obj, srcobj, recurse_name);
3141 }
3142
3143 /* proceed with the new value */
3144 funcptr = &prop_val;
3145 }
3146
3147 /* if it's not a function pointer, it's an error */
3148 if (funcptr->typ != VM_FUNCPTR)
3149 err_throw(VMERR_FUNCPTR_VAL_REQD);
3150
3151 /* call the function */
3152 return do_call(vmg_ caller_ofs, funcptr->val.ofs, argc,
3153 VM_INVALID_OBJ, VM_INVALID_PROP,
3154 VM_INVALID_OBJ, VM_INVALID_OBJ, recurse_name);
3155 }
3156
3157 /* ------------------------------------------------------------------------ */
3158 /*
3159 * Call a function, non-recursively.
3160 *
3161 * This is a separate form of do_call(), but simplified for cases where we
3162 * know in advance that we won't need to check for recursion and when we
3163 * know in advance that we're calling a function and thus have no 'self'
3164 * or other method context objects. These simplifications reduce the
3165 * amount of work we have to do, so that ordinary function calls run a
3166 * little faster than they would if we used the full do_call() routine.
3167 */
do_call_func_nr(VMG_ uint caller_ofs,pool_ofs_t target_ofs,uint argc)3168 const uchar *CVmRun::do_call_func_nr(VMG_ uint caller_ofs,
3169 pool_ofs_t target_ofs, uint argc)
3170 {
3171 const uchar *target_ofs_ptr;
3172 CVmFuncPtr hdr_ptr;
3173 uint i;
3174 vm_val_t *fp;
3175 int lcl_cnt;
3176
3177 /* store nil in R0 */
3178 r0_.set_nil();
3179
3180 /* translate the target address */
3181 target_ofs_ptr = (const uchar *)G_code_pool->get_ptr(target_ofs);
3182
3183 /* set up a pointer to the new function header */
3184 hdr_ptr.set(target_ofs_ptr);
3185
3186 /* get the number of locals from the header */
3187 lcl_cnt = hdr_ptr.get_local_cnt();
3188
3189 /* get the target's stack space needs and check for stack overflow */
3190 if (!G_stk->check_space(hdr_ptr.get_stack_depth() + 8))
3191 err_throw(VMERR_STACK_OVERFLOW);
3192
3193 /* allocate the stack frame */
3194 fp = G_stk->push(8 + lcl_cnt);
3195
3196 /* there's no target property, target object, defining object, or self */
3197 (fp++)->set_propid(VM_INVALID_PROP);
3198 (fp++)->set_nil();
3199 (fp++)->set_nil();
3200 (fp++)->set_nil();
3201
3202 /* push the caller's code offset */
3203 (fp++)->set_codeofs(caller_ofs);
3204
3205 /* push the current entrypoint code offset */
3206 (fp++)->set_codeofs(entry_ptr_);
3207
3208 /* push the actual parameter count */
3209 (fp++)->set_int((int32)argc);
3210
3211 /* push the current frame pointer */
3212 (fp++)->set_stack(frame_ptr_);
3213
3214 /* verify the argument count */
3215 if (!hdr_ptr.argc_ok(argc))
3216 err_throw(VMERR_WRONG_NUM_OF_ARGS);
3217
3218 /* set up the new stack frame */
3219 frame_ptr_ = fp;
3220
3221 /* load EP with the new code offset */
3222 entry_ptr_ = target_ofs;
3223 entry_ptr_native_ = target_ofs_ptr;
3224
3225 /* push nil for each local */
3226 for (i = lcl_cnt ; i != 0 ; --i)
3227 (fp++)->set_nil();
3228
3229 /* create and activate the new function's profiler frame */
3230 VM_IF_PROFILER(if (profiling_)
3231 prof_enter(target_ofs, VM_INVALID_OBJ, VM_INVALID_PROP));
3232
3233 /* return the new program counter */
3234 return target_ofs_ptr + get_funchdr_size();
3235 }
3236
3237 /* ------------------------------------------------------------------------ */
3238 /*
3239 * Call a function or method
3240 */
do_call(VMG_ uint caller_ofs,pool_ofs_t target_ofs,uint argc,vm_obj_id_t self,vm_prop_id_t target_prop,vm_obj_id_t orig_target_obj,vm_obj_id_t defining_obj,const char * recurse_name)3241 const uchar *CVmRun::do_call(VMG_ uint caller_ofs,
3242 pool_ofs_t target_ofs, uint argc,
3243 vm_obj_id_t self, vm_prop_id_t target_prop,
3244 vm_obj_id_t orig_target_obj,
3245 vm_obj_id_t defining_obj,
3246 const char *recurse_name)
3247 {
3248 const uchar *target_ofs_ptr;
3249 CVmFuncPtr hdr_ptr;
3250 uint i;
3251 vm_val_t *fp;
3252 int lcl_cnt;
3253
3254 /* store nil in R0 */
3255 r0_.set_nil();
3256
3257 /*
3258 * If we have a debugger, and this is a recursive call, set up a
3259 * frame for the recursive call, so that the debugger can look up
3260 * the stack to the byte-code caller of the native code that's
3261 * recursing into the VM.
3262 *
3263 * This is unnecessary if there's no debugger; the only reason we
3264 * need a special frame on native recursion is to allow the debugger
3265 * to traverse the stack correctly through the native call.
3266 */
3267 VM_IF_DEBUGGER(if (caller_ofs == 0)
3268 enter_recursive_frame(vmg_ argc, pc_ptr_));
3269
3270 /*
3271 * We're done with the old code segment now, so we can safely
3272 * translate a new address. Get the physical address we're calling
3273 * -- this will swap in the new code segment if necessary.
3274 */
3275 target_ofs_ptr = (const uchar *)G_code_pool->get_ptr(target_ofs);
3276
3277 /* set up a pointer to the new function header */
3278 hdr_ptr.set(target_ofs_ptr);
3279
3280 /* get the number of locals from the header */
3281 lcl_cnt = hdr_ptr.get_local_cnt();
3282
3283 /*
3284 * Get the space needs of the new function, and ensure we have enough
3285 * stack space available. Include the size of the frame that we store
3286 * (the original target object, the target property, the defining
3287 * object, the 'self' object, the caller's code offset, the caller's
3288 * entrypoint offset, the actual parameter count, and the enclosing
3289 * frame pointer) in our space needs.
3290 */
3291 if (!G_stk->check_space(hdr_ptr.get_stack_depth() + 8))
3292 {
3293 /*
3294 * If we just entered a recursive frame, remove it. This will
3295 * allow us to stop in the debugger in the byte code that triggered
3296 * the recursive call.
3297 */
3298 VM_IF_DEBUGGER(if (caller_ofs == 0)
3299 leave_recursive_frame(vmg0_));
3300
3301 /* throw the error */
3302 err_throw(VMERR_STACK_OVERFLOW);
3303 }
3304
3305 /* allocate the stack frame */
3306 fp = G_stk->push(8 + lcl_cnt);
3307
3308 /* push the target property */
3309 (fp++)->set_propid(target_prop);
3310
3311 /*
3312 * if there's no 'self' object, push nil's for the object context;
3313 * otherwise, push the object context
3314 */
3315 if (self == VM_INVALID_OBJ)
3316 {
3317 /* push nil for target, defining, and self */
3318 (fp++)->set_nil();
3319 (fp++)->set_nil();
3320 (fp++)->set_nil();
3321 }
3322 else
3323 {
3324 /* push the original target object */
3325 (fp++)->set_obj(orig_target_obj);
3326
3327 /* push the defining object */
3328 (fp++)->set_obj(defining_obj);
3329
3330 /* push 'self' */
3331 (fp++)->set_obj(self);
3332 }
3333
3334 /*
3335 * Push the caller's code offset. Note that if the caller's offset is
3336 * zero, it indicates that the caller is not the byte-code interpreter
3337 * and that this is a recursive invocation; we represent recursive
3338 * frames using a zero caller offset, to we can just use the zero
3339 * value as given in this case.
3340 */
3341 (fp++)->set_codeofs(caller_ofs);
3342
3343 /* push the current entrypoint code offset */
3344 (fp++)->set_codeofs(entry_ptr_);
3345
3346 /* push the actual parameter count */
3347 (fp++)->set_int((int32)argc);
3348
3349 /* push the current frame pointer */
3350 (fp++)->set_stack(frame_ptr_);
3351
3352 /*
3353 * check the argument count - do this before establishing the new
3354 * frame and entry pointers, so that if we report a stack traceback in
3355 * the debugger, we'll report the error in the calling frame, which is
3356 * where it really belongs
3357 */
3358 if (!hdr_ptr.argc_ok(argc))
3359 {
3360 /* leave the recursive frame, if we entered one */
3361 VM_IF_DEBUGGER(if (caller_ofs == 0)
3362 leave_recursive_frame(vmg0_));
3363
3364 /*
3365 * if we're making a recursive call, throw an error indicating
3366 * what kind of recursive call we're making
3367 */
3368 if (recurse_name != 0)
3369 {
3370 /* throw the named generic argument mismatch error */
3371 err_throw_a(VMERR_WRONG_NUM_OF_ARGS_CALLING, 1,
3372 ERR_TYPE_CHAR, recurse_name);
3373 }
3374 else
3375 {
3376 /* throw the generic argument mismatch error */
3377 err_throw(VMERR_WRONG_NUM_OF_ARGS);
3378 }
3379 }
3380
3381 /*
3382 * set up the new frame so that the frame pointer points to the old
3383 * frame pointer stored in the stack
3384 */
3385 frame_ptr_ = fp;
3386
3387 /* load EP with the new code offset */
3388 entry_ptr_ = target_ofs;
3389 entry_ptr_native_ = target_ofs_ptr;
3390
3391 /* push nil for each local */
3392 for (i = lcl_cnt ; i != 0 ; --i)
3393 (fp++)->set_nil();
3394
3395 /* create and activate the new function's profiler frame */
3396 VM_IF_PROFILER(if (profiling_)
3397 prof_enter(target_ofs, defining_obj, target_prop));
3398
3399 /* if desired, make a recursive call into the byte code interpreter */
3400 if (caller_ofs != 0)
3401 {
3402 /*
3403 * return the new program counter at the first byte of code in the
3404 * new function, which immediately follows the header
3405 */
3406 return target_ofs_ptr + get_funchdr_size();
3407 }
3408 else
3409 {
3410 VM_IF_DEBUGGER(err_try {)
3411
3412 /* recursively call the interpreter loop */
3413 run(vmg_ target_ofs_ptr + get_funchdr_size());
3414
3415 /*
3416 * if the debugger is present, always remove our recursive frame on
3417 * the way out
3418 */
3419 VM_IF_DEBUGGER(
3420 }
3421 err_finally
3422 {
3423 leave_recursive_frame(vmg0_);
3424 }
3425 err_end;)
3426
3427 /*
3428 * this was a recursive call, so there's no program counter to
3429 * return - just return null
3430 */
3431 return 0;
3432 }
3433 }
3434
3435 /*
3436 * Determine if we're in a recursive VM invocation. If this frame or
3437 * any enclosing frame other than the outermost has a code offset of
3438 * zero in the return address slot, we are in a recursive VM invocation.
3439 */
3440 int CVmRun::is_recursive_invocation(VMG0_) const
3441 {
3442 vm_val_t *p;
3443
3444 /* start with the current frame */
3445 p = frame_ptr_;
3446
3447 /* if there's no frame pointer, it's obviously not recursive */
3448 if (p == 0)
3449 return FALSE;
3450
3451 /* scan frames until we get to the outermost frame */
3452 for (;;)
3453 {
3454 /*
3455 * If this is the outermost frame, we can stop now. The
3456 * outermost frame has an enclosing frame pointer value of null.
3457 * (A given frame pointer always points directly to the
3458 * enclosing frame pointer stored in the stack frame, so the
3459 * offset from this frame pointer is zero.)
3460 */
3461 if (get_enclosing_frame_ptr(vmg_ p) == 0)
3462 break;
3463
3464 /*
3465 * Check the return address in this frame - if it's at offset
3466 * zero, it means that this method was called directly as a
3467 * recursive VM invocation.
3468 */
3469 if (get_return_addr_from_frame(vmg_ p) == 0)
3470 return TRUE;
3471
3472 /* move to the enclosing frame */
3473 p = get_enclosing_frame_ptr(vmg_ p);
3474 }
3475
3476 /*
3477 * we didn't find any direct invocations after the outermost frame,
3478 * so this is the top-level VM invocation
3479 */
3480 return FALSE;
3481 }
3482
3483
3484 /*
3485 * Return from the current function. Returns true if execution can
3486 * proceed, false if this returns us out of the outermost function, in
3487 * which case the execution loop must terminate and return control to
3488 * the host environment.
3489 */
3490 const uchar *CVmRun::do_return(VMG0_)
3491 {
3492 int argc;
3493 pool_ofs_t caller_ofs;
3494
3495 /*
3496 * The frame pointer always points to the location on the stack
3497 * where we pushed the enclosing frame pointer. Reset the stack
3498 * pointer to the current frame pointer, then pop the enclosing
3499 * frame pointer.
3500 */
3501 G_stk->set_sp(frame_ptr_);
3502 frame_ptr_ = (vm_val_t *)G_stk->get(0)->val.ptr;
3503
3504 /* restore the enclosing argument count */
3505 argc = G_stk->get(1)->val.intval;
3506
3507 /* restore the enclosing entry pointer */
3508 entry_ptr_ = G_stk->get(2)->val.ofs;
3509
3510 /* translate the method entry pointer to a physical address */
3511 entry_ptr_native_ = (const uchar *)G_code_pool->get_ptr(entry_ptr_);
3512
3513 /* restore the enclosing code offset */
3514 caller_ofs = G_stk->get(3)->val.ofs;
3515
3516 /*
3517 * Discard the actual parameters, plus the 'self', defining object,
3518 * original target object, and target property values. While we're at
3519 * it, also discard the enclosing frame pointer, enclosing argument
3520 * count, enclosing entry pointer, and enclosing code offset, which
3521 * we've already restored.
3522 */
3523 G_stk->discard(argc + 8);
3524
3525 /* leave the profiler stack level */
3526 VM_IF_PROFILER(if (profiling_)
3527 prof_leave());
3528
3529 /*
3530 * If the enclosing code offset is invalid, we've returned from the
3531 * outermost function invoked by the host environment. 0 is an
3532 * invalid offset, since offset 0 in a method never contains valid
3533 * code.
3534 */
3535 if (caller_ofs == 0)
3536 return 0;
3537
3538 /*
3539 * return the new program counter - calculate the PC offset by adding
3540 * the offset within the method to the entry pointer
3541 */
3542 return entry_ptr_native_ + caller_ofs;
3543 }
3544
3545
3546 /* ------------------------------------------------------------------------ */
3547 /*
3548 * Recursive frame routines.
3549 */
3550
3551 /*
3552 * Enter a recursive call frame from a native routine
3553 */
3554 void CVmRun::enter_recursive_frame(VMG_ int argc,
3555 const uchar **pc_ptr)
3556 {
3557 pool_ofs_t old_ofs;
3558 int i;
3559
3560 /*
3561 * don't bother setting up a recursive frame for a recursive call
3562 * from the debugger itself - the only purpose of these frames is to
3563 * aid the debugger in tracing the stack, which it obviously won't
3564 * need to do when it's the native caller
3565 */
3566 VM_IF_DEBUGGER(if (G_debugger->is_in_debugger())
3567 return);
3568
3569 /*
3570 * if there's no global PC register, we're being called from the
3571 * outermost native caller, so there's no need for a native frame
3572 */
3573 if (pc_ptr == 0)
3574 return;
3575
3576 /* get the return address from the global PC register */
3577 old_ofs = pc_to_method_ofs(*pc_ptr);
3578
3579 /* make sure we have space for the native frame */
3580 if (!G_stk->check_space(6))
3581 err_throw(VMERR_STACK_OVERFLOW);
3582
3583 /* there's no target property for a recursive caller */
3584 push_nil(vmg0_);
3585
3586 /* there's no original target object */
3587 push_nil(vmg0_);
3588
3589 /* there's no defining object */
3590 push_nil(vmg0_);
3591
3592 /* there's no 'self' for a recursive caller */
3593 push_nil(vmg0_);
3594
3595 /* push the caller's code offset */
3596 push_codeofs(vmg_ old_ofs);
3597
3598 /* push the old entrypoint code offset */
3599 push_codeofs(vmg_ entry_ptr_);
3600
3601 /*
3602 * push the argument count to the routine being invoked from the
3603 * native code - this isn't actually the argument count to the
3604 * native routine, which we don't know, but we must push it anyway
3605 * because the arguments are arranged as though they're to this fake
3606 * native frame
3607 */
3608 push_int(vmg_ (int32)argc);
3609
3610 /* push the current frame pointer */
3611 push_stackptr(vmg_ frame_ptr_);
3612
3613 /* set up the new frame pointer */
3614 frame_ptr_ = G_stk->get_sp();
3615
3616 /* there's no entrypoint address for the native code */
3617 entry_ptr_ = 0;
3618 entry_ptr_native_ = 0;
3619
3620 /*
3621 * call the debugger to do a step trace - the debugger obviously
3622 * can't really stop here, but what it can do is note that we've
3623 * stepped through this native stack level for the purposes of
3624 * determining when it should stop next for step-in, step-over, and
3625 * step-out modes
3626 */
3627 VM_IF_DEBUGGER(if (G_debugger->is_single_step())
3628 G_debugger->step(vmg_ 0, 0, FALSE, 0));
3629
3630 /*
3631 * Copy the arguments from this frame - this is necessary so that
3632 * the recursive frame we'll set up next (after we return) receives
3633 * a copy of its arguments, which we hijacked by establishing this
3634 * intermediate frame to represent the native caller. Note that we
3635 * must follow the normal convention of pushing arguments in reverse
3636 * order.
3637 */
3638 for (i = argc ; i > 0 ; --i)
3639 G_stk->push(get_param(vmg_ i - 1));
3640 }
3641
3642 #ifdef VM_DEBUGGER
3643
3644 /*
3645 * Leave a recursive call frame on our way back out to a native routine
3646 */
3647 void CVmRun::leave_recursive_frame(VMG0_)
3648 {
3649 vm_val_t val;
3650 int argc;
3651
3652 /*
3653 * if we're in the debugger, we will not have set up a recursive
3654 * call frame, so we will not need to remove one
3655 */
3656 if (G_debugger->is_in_debugger())
3657 return;
3658
3659 /*
3660 * if there's no global PC pointer, it means that we're at the
3661 * outermost native frame, which we suppress
3662 */
3663 if (pc_ptr_ == 0)
3664 return;
3665
3666 /* re-activate the enclosing frame */
3667 G_stk->set_sp(frame_ptr_);
3668 G_stk->pop(&val);
3669 frame_ptr_ = (vm_val_t *)val.val.ptr;
3670
3671 /* pop the argument count */
3672 G_stk->pop(&val);
3673 argc = val.val.intval;
3674
3675 /* pop the enclosing entry pointer */
3676 G_stk->pop(&val);
3677 entry_ptr_ = val.val.ofs;
3678 entry_ptr_native_ = (const uchar *)G_code_pool->get_ptr(entry_ptr_);
3679
3680 /*
3681 * discard the enclosing code offset - since we know this is
3682 * actually a native caller, we pushed the enclosing code offset
3683 * only to enable the debugger to find the native caller
3684 */
3685 G_stk->discard();
3686
3687 /*
3688 * discard the actual parameters, plus the target property, original
3689 * target object, defining object, and the 'self' object
3690 */
3691 G_stk->discard(argc + 4);
3692 }
3693
3694 /*
3695 * save the execution context
3696 */
3697 void CVmRun::save_context(VMG_ vmrun_save_ctx *ctx)
3698 {
3699 /* save our registers */
3700 ctx->entry_ptr_ = entry_ptr_;
3701 ctx->frame_ptr_ = frame_ptr_;
3702 ctx->pc_ptr_ = pc_ptr_;
3703
3704 /* save the stack depth */
3705 ctx->old_stack_depth_ = G_stk->get_depth();
3706 }
3707
3708 /*
3709 * restore the execution context
3710 */
3711 void CVmRun::restore_context(VMG_ vmrun_save_ctx *ctx)
3712 {
3713 /* restore our registers */
3714 entry_ptr_ = ctx->entry_ptr_;
3715 entry_ptr_native_ = (const uchar *)G_code_pool->get_ptr(entry_ptr_);
3716 frame_ptr_ = ctx->frame_ptr_;
3717 pc_ptr_ = ctx->pc_ptr_;
3718
3719 /* if there's anything extra left on the stack, discard it */
3720 if (G_stk->get_depth() > ctx->old_stack_depth_)
3721 G_stk->discard(G_stk->get_depth() - ctx->old_stack_depth_);
3722 }
3723
3724 #endif /* VM_DEBUGGER */
3725
3726 /* ------------------------------------------------------------------------ */
3727 /*
3728 * Append a stack trace to a string. This is only meaningful in a
3729 * debugger-equipped version.
3730 */
3731 #if VM_DEBUGGER
3732
3733 /*
3734 * callback context for stack trace appender
3735 */
3736 struct append_stack_ctx
3737 {
3738 /* the string so far */
3739 vm_obj_id_t str_obj;
3740
3741 /* globals */
3742 vm_globals *vmg;
3743
3744 /* frame pointer where we pushed our string for gc protection */
3745 vm_val_t *gc_fp;
3746 };
3747
3748 /*
3749 * stack trace callback
3750 */
3751 static void append_stack_cb(void *ctx0, const char *str, int strl)
3752 {
3753 append_stack_ctx *ctx = (append_stack_ctx *)ctx0;
3754 size_t new_len;
3755 size_t old_len;
3756 const char *old_str;
3757 char *new_str;
3758
3759 /* set up access to globals */
3760 VMGLOB_PTR(ctx->vmg);
3761
3762 /* get the original string text */
3763 old_str = vm_objp(vmg_ ctx->str_obj)->get_as_string(vmg0_);
3764 old_len = vmb_get_len(old_str);
3765 old_str += VMB_LEN;
3766
3767 /*
3768 * allocate a new string, big enough for the old string plus the new
3769 * text, plus a newline
3770 */
3771 new_len = old_len + strl + 1;
3772 ctx->str_obj = CVmObjString::create(vmg_ FALSE, new_len);
3773
3774 /* get the new string buffer */
3775 new_str = ((CVmObjString *)vm_objp(vmg_ ctx->str_obj))->cons_get_buf();
3776
3777 /* build the new string */
3778 memcpy(new_str, old_str, old_len);
3779 new_str[old_len] = '\n';
3780 memcpy(new_str + old_len + 1, str, strl);
3781
3782 /*
3783 * replace our gc-protective stack reference to the old string with
3784 * the new string - we're done with the old string now, so it's okay
3785 * if it gets collected, but we obviously want to keep the new one
3786 * around
3787 */
3788 G_stk->get_from_frame(ctx->gc_fp, 0)->set_obj(ctx->str_obj);
3789 }
3790
3791 /*
3792 * append a stack trace to the given string
3793 */
3794 vm_obj_id_t CVmRun::append_stack_trace(VMG_ vm_obj_id_t str_obj)
3795 {
3796 append_stack_ctx ctx;
3797
3798 /* push the string for protection from gc */
3799 push_obj(vmg_ str_obj);
3800
3801 /* call the debugger to set up the stack traceback */
3802 ctx.str_obj = str_obj;
3803 ctx.vmg = VMGLOB_ADDR;
3804 ctx.gc_fp = G_stk->get_sp();
3805 G_debugger->build_stack_listing(vmg_ &append_stack_cb, &ctx, TRUE);
3806
3807 /* discard the gc protection */
3808 G_stk->discard();
3809
3810 /* return the result string */
3811 return ctx.str_obj;
3812 }
3813
3814 #endif /* VM_DEBUGGER */
3815
3816 /* ------------------------------------------------------------------------ */
3817 /*
3818 * Set a property of an object
3819 */
3820 void CVmRun::set_prop(VMG_ vm_obj_id_t obj, vm_prop_id_t prop,
3821 const vm_val_t *new_val)
3822 {
3823 /* set the property */
3824 vm_objp(vmg_ obj)->set_prop(vmg_ G_undo, obj, prop, new_val);
3825 }
3826
3827 /* ------------------------------------------------------------------------ */
3828 /*
3829 * Evaluate a property of an object
3830 */
3831 const uchar *CVmRun::get_prop(VMG_ uint caller_ofs,
3832 const vm_val_t *target_obj,
3833 vm_prop_id_t target_prop,
3834 const vm_val_t *self, uint argc)
3835 {
3836 vm_val_t val;
3837 vm_obj_id_t srcobj;
3838 int found;
3839 vm_val_t new_self;
3840
3841 /* find the property without evaluating it */
3842 found = get_prop_no_eval(vmg_ &target_obj, target_prop,
3843 &argc, &srcobj, &val, &self, &new_self);
3844
3845 /* if we didn't find it, try propNotDefined */
3846 if (!found && G_predef->prop_not_defined_prop != VM_INVALID_PROP)
3847 {
3848 /*
3849 * We didn't find it, so call propNotDefined on the object, with
3850 * the property originally called as an additional first argument.
3851 * If propNotDefined is not exported by the program, we'll fall
3852 * back on the default of evaluating to nil.
3853 */
3854 found = get_prop_no_eval(vmg_ &target_obj,
3855 G_predef->prop_not_defined_prop,
3856 &argc, &srcobj, &val, &self, &new_self);
3857
3858 /*
3859 * if we found it, and it's code, push the original property ID as
3860 * the new first argument
3861 */
3862 if (found && val.typ == VM_CODEOFS)
3863 {
3864 /*
3865 * add the property argument (we push backwards, so this will
3866 * conveniently become the new first argument, since we're
3867 * pushing it last)
3868 */
3869 push_prop(vmg_ target_prop);
3870
3871 /* count the additional argument */
3872 ++argc;
3873
3874 /* the target property changes to propNotDefined */
3875 target_prop = G_predef->prop_not_defined_prop;
3876 }
3877 }
3878
3879 /* evaluate whatever we found or didn't find */
3880 return eval_prop_val(vmg_ found, caller_ofs, &val, self->val.obj,
3881 target_prop, target_obj, srcobj, argc);
3882 }
3883
3884 /*
3885 * Look up a property without evaluating it.
3886 */
3887 inline int CVmRun::get_prop_no_eval(VMG_ const vm_val_t **target_obj,
3888 vm_prop_id_t target_prop,
3889 uint *argc, vm_obj_id_t *srcobj,
3890 vm_val_t *val,
3891 const vm_val_t **self,
3892 vm_val_t *new_self)
3893 {
3894 int found;
3895 const char *target_ptr;
3896
3897 /*
3898 * we can evaluate properties of regular objects, as well as string
3899 * and list constants - see what we have
3900 */
3901 switch((*target_obj)->typ)
3902 {
3903 case VM_LIST:
3904 /* 'self' must be the same as the target for a constant list */
3905 if ((*self)->typ != (*target_obj)->typ
3906 || (*self)->val.ofs != (*target_obj)->val.ofs)
3907 err_throw(VMERR_OBJ_VAL_REQD);
3908
3909 /* translate the list offset to a physical pointer */
3910 target_ptr = G_const_pool->get_ptr((*target_obj)->val.ofs);
3911
3912 /* evaluate the constant list property */
3913 found = CVmObjList::const_get_prop(vmg_ val, *target_obj,
3914 target_ptr, target_prop,
3915 srcobj, argc);
3916
3917 /*
3918 * If the result is a method to run, we need an actual object for
3919 * 'self'. In this case, create a dynamic list object with the
3920 * same contents as the constant list value.
3921 */
3922 if (found && val->typ == VM_CODEOFS)
3923 {
3924 /* create the list */
3925 new_self->set_obj(CVmObjListConst::create(vmg_ target_ptr));
3926
3927 /* use it as the new 'self' and the new effective target */
3928 *self = new_self;
3929 *target_obj = new_self;
3930 }
3931
3932 /* go evaluate the result as normal */
3933 break;
3934
3935 case VM_SSTRING:
3936 /* 'self' must be the same as the target for a constant string */
3937 if ((*self)->typ != (*target_obj)->typ
3938 || (*self)->val.ofs != (*target_obj)->val.ofs)
3939 err_throw(VMERR_OBJ_VAL_REQD);
3940
3941 /* translate the string offset to a physical pointer */
3942 target_ptr = G_const_pool->get_ptr((*target_obj)->val.ofs);
3943
3944 /* evaluate the constant string property */
3945 found = CVmObjString::const_get_prop(vmg_ val, *target_obj,
3946 target_ptr, target_prop,
3947 srcobj, argc);
3948
3949 /*
3950 * If the result is a method to run, we need an actual object for
3951 * 'self'. In this case, create a dynamic string object with the
3952 * same contents as the constant string value.
3953 */
3954 if (found && val->typ == VM_CODEOFS)
3955 {
3956 /* create the string */
3957 new_self->set_obj(CVmObjStringConst::create(vmg_ target_ptr));
3958
3959 /* it's the new 'self' and the new effective target object */
3960 *self = new_self;
3961 *target_obj = new_self;
3962 }
3963
3964 /* go evaluate the result as normal */
3965 break;
3966
3967 case VM_OBJ:
3968 /* get the property value from the target object */
3969 found = vm_objp(vmg_ (*target_obj)->val.obj)
3970 ->get_prop(vmg_ target_prop, val, (*target_obj)->val.obj,
3971 srcobj, argc);
3972
3973 /* 'self' must be an object as well */
3974 if ((*self)->typ != VM_OBJ)
3975 err_throw(VMERR_OBJ_VAL_REQD);
3976 break;
3977
3978 case VM_NIL:
3979 /* nil pointer dereferenced */
3980 err_throw(VMERR_NIL_DEREF);
3981
3982 default:
3983 /* we can't evaluate properties of anything else */
3984 err_throw(VMERR_OBJ_VAL_REQD);
3985 }
3986
3987 /* return the 'found' indication */
3988 return found;
3989 }
3990
3991 /* ------------------------------------------------------------------------ */
3992 /*
3993 * Given a value that has been retrieved from an object property,
3994 * evaluate the value. If the value contains code, we'll execute the
3995 * code; if it contains a self-printing string, we'll display the
3996 * string; otherwise, we'll just store the value in R0.
3997 *
3998 * 'found' indicates whether or not the property value is defined.
3999 * False indicates that the property value is not defined by the object;
4000 * true indicates that it is.
4001 */
4002 inline const uchar *CVmRun::eval_prop_val(VMG_ int found, uint caller_ofs,
4003 const vm_val_t *val,
4004 vm_obj_id_t self,
4005 vm_prop_id_t target_prop,
4006 const vm_val_t *orig_target_obj,
4007 vm_obj_id_t defining_obj,
4008 uint argc)
4009 {
4010 /* check whether or not the property is defined */
4011 if (found)
4012 {
4013 /* take appropriate action based on the datatype of the result */
4014 switch(val->typ)
4015 {
4016 case VM_CODEOFS:
4017 /*
4018 * It's a method - invoke the method. This will set us up
4019 * to start executing this new code, so there's nothing more
4020 * we need to do here.
4021 */
4022 return do_call(vmg_ caller_ofs, val->val.ofs, argc,
4023 self, target_prop, orig_target_obj->val.obj,
4024 defining_obj, 0);
4025
4026 case VM_DSTRING:
4027 /* no arguments are allowed */
4028 if (argc != 0)
4029 err_throw(VMERR_WRONG_NUM_OF_ARGS);
4030
4031 /*
4032 * it's a self-printing string - invoke the default string
4033 * output function (this is effectively a do_call())
4034 */
4035 return disp_dstring(vmg_ val->val.ofs, caller_ofs, self);
4036
4037 default:
4038 /* for any other value, no arguments are allowed */
4039 if (argc != 0)
4040 err_throw(VMERR_WRONG_NUM_OF_ARGS);
4041
4042 /* store the result in R0 */
4043 r0_ = *val;
4044
4045 /* resume execution where we left off */
4046 return entry_ptr_native_ + caller_ofs;
4047 }
4048 }
4049 else
4050 {
4051 /*
4052 * the property or method is not defined - discard arguments and
4053 * set R0 to nil
4054 */
4055 G_stk->discard(argc);
4056 r0_.set_nil();
4057
4058 /* resume execution where we left off */
4059 return entry_ptr_native_ + caller_ofs;
4060 }
4061 }
4062
4063 /* ------------------------------------------------------------------------ */
4064 /*
4065 * Inherit a property or method from the appropriate superclass of the
4066 * object that defines currently executing code.
4067 */
4068 const uchar *CVmRun::inh_prop(VMG_ uint caller_ofs,
4069 vm_prop_id_t prop, uint argc)
4070 {
4071 vm_val_t orig_target_obj;
4072 vm_obj_id_t defining_obj;
4073 vm_val_t val;
4074 vm_obj_id_t srcobj;
4075 int found;
4076 vm_obj_id_t self;
4077
4078 /* get the defining object from the stack frame */
4079 defining_obj = get_defining_obj(vmg0_);
4080
4081 /* get the original target object from the stack frame */
4082 orig_target_obj.set_obj(get_orig_target_obj(vmg0_));
4083
4084 /* get the 'self' object */
4085 self = get_self(vmg0_);
4086
4087 /* get the inherited property value */
4088 found = vm_objp(vmg_ self)->inh_prop(vmg_ prop, &val, self,
4089 orig_target_obj.val.obj,
4090 defining_obj, &srcobj, &argc);
4091
4092 /* if we didn't find it, try inheriting propNotDefined */
4093 if (!found && G_predef->prop_not_defined_prop != VM_INVALID_PROP)
4094 {
4095 /*
4096 * Look up propNotDefined using the same search conditions we used
4097 * to find the original inherited property. This lets us look up
4098 * the "inherited" propNotDefined.
4099 */
4100 found = vm_objp(vmg_ self)->inh_prop(vmg_
4101 G_predef->prop_not_defined_prop,
4102 &val, self,
4103 orig_target_obj.val.obj,
4104 defining_obj, &srcobj, &argc);
4105
4106 /*
4107 * if we found it, and it's code, push the original property ID we
4108 * were attempting to inherit - this becomes the new first
4109 * parameter to the propNotDefined method
4110 */
4111 if (found && val.typ == VM_CODEOFS)
4112 {
4113 /* add the original property pointer argument */
4114 push_prop(vmg_ prop);
4115
4116 /* count the additional argument */
4117 ++argc;
4118
4119 /* the target property changes to propNotDefined */
4120 prop = G_predef->prop_not_defined_prop;
4121 }
4122 }
4123
4124 /*
4125 * evaluate and store the result - note that "self" remains the
4126 * current "self" object, since we're inheriting within the context
4127 * of the original method call
4128 */
4129 return eval_prop_val(vmg_ found, caller_ofs, &val, self, prop,
4130 &orig_target_obj, srcobj, argc);
4131 }
4132
4133
4134 /* ------------------------------------------------------------------------ */
4135 /*
4136 * Display a dstring via the default string display mechanism
4137 */
4138 const uchar *CVmRun::disp_dstring(VMG_ pool_ofs_t ofs, uint caller_ofs,
4139 vm_obj_id_t self)
4140 {
4141 /* push the string */
4142 G_stk->push()->set_sstring(ofs);
4143
4144 /* invoke the default "say" function */
4145 return disp_string_val(vmg_ caller_ofs, self);
4146 }
4147
4148 /*
4149 * Display the value at top of stack via the default string display
4150 * mechanism
4151 */
4152 const uchar *CVmRun::disp_string_val(VMG_ uint caller_ofs, vm_obj_id_t self)
4153 {
4154 /*
4155 * if there's a valid 'self' object, and there's a default display
4156 * method defined, and 'self' defines or inherits that method,
4157 * invoke the method
4158 */
4159 if (say_method_ != VM_INVALID_PROP && self != VM_INVALID_OBJ)
4160 {
4161 vm_obj_id_t src_obj;
4162 vm_val_t val;
4163
4164 /*
4165 * look up the property - if we find it, and it's a regular
4166 * method, invoke it
4167 */
4168 if (vm_objp(vmg_ self)->get_prop(vmg_ say_method_, &val, self,
4169 &src_obj, 0)
4170 && val.typ == VM_CODEOFS)
4171 {
4172 vm_val_t self_val;
4173
4174 /* set up a 'self' value - this is the target object */
4175 self_val.set_obj(self);
4176
4177 /* there's a default display method - invoke it */
4178 return eval_prop_val(vmg_ TRUE, caller_ofs, &val, self,
4179 say_method_, &self_val, src_obj, 1);
4180 }
4181 }
4182
4183 /* if the "say" function isn't initialized, it's an error */
4184 if (say_func_ == 0 || say_func_->val.typ == VM_NIL)
4185 err_throw(VMERR_SAY_IS_NOT_DEFINED);
4186
4187 /* call the "say" function with the argument at top of stack */
4188 return call_func_ptr(vmg_ &say_func_->val, 1, 0, caller_ofs);
4189 }
4190
4191 /*
4192 * Set the "say" function.
4193 */
4194 void CVmRun::set_say_func(VMG_ const vm_val_t *val)
4195 {
4196 /*
4197 * if we haven't yet allocated a global to hold the 'say' function,
4198 * allocate one now
4199 */
4200 if (say_func_ == 0)
4201 say_func_ = G_obj_table->create_global_var();
4202
4203 /* remember the new function */
4204 say_func_->val = *val;
4205 }
4206
4207 /*
4208 * Get the current "say" function
4209 */
4210 void CVmRun::get_say_func(vm_val_t *val) const
4211 {
4212 /*
4213 * if we ever allocated a global to hold the 'say' function, return its
4214 * value; otherwise, there's no 'say' function, so the result is nil
4215 */
4216 if (say_func_ != 0)
4217 *val = say_func_->val;
4218 else
4219 val->set_nil();
4220 }
4221
4222 /* ------------------------------------------------------------------------ */
4223 /*
4224 * Check a property for speculative evaluation
4225 */
4226 void CVmRun::check_prop_spec_eval(VMG_ vm_obj_id_t obj, vm_prop_id_t prop)
4227 {
4228 vm_val_t val;
4229 vm_obj_id_t srcobj;
4230
4231 /* get the property value */
4232 if (vm_objp(vmg_ obj)->get_prop(vmg_ prop, &val, obj, &srcobj, 0))
4233 {
4234 /* check the type of the value */
4235 switch(val.typ)
4236 {
4237 case VM_CODEOFS:
4238 case VM_DSTRING:
4239 case VM_NATIVE_CODE:
4240 /*
4241 * evaulating these types could result in side effects, so
4242 * this property cannot be evaulated during a speculative
4243 * evaluation
4244 */
4245 err_throw(VMERR_BAD_SPEC_EVAL);
4246 break;
4247
4248 default:
4249 /* evaluating other types causes no side effects, so proceed */
4250 break;
4251 }
4252 }
4253 }
4254
4255 /* ------------------------------------------------------------------------ */
4256 /*
4257 * Set up a function header pointer for the current function
4258 */
4259 void CVmRun::set_current_func_ptr(VMG_ CVmFuncPtr *func_ptr)
4260 {
4261 /* set up the pointer based on the current Entry Pointer register */
4262 func_ptr->set(entry_ptr_native_);
4263 }
4264
4265 /*
4266 * Set up a function header pointer for the return address of the given
4267 * stack frame
4268 */
4269 void CVmRun::set_return_funcptr_from_frame(VMG_ CVmFuncPtr *func_ptr,
4270 vm_val_t *frame_ptr)
4271 {
4272 pool_ofs_t ep;
4273
4274 /* get the enclosing entry pointer for the frame */
4275 ep = get_enclosing_entry_ptr_from_frame(vmg_ frame_ptr);
4276
4277 /* set up the function pointer for the entry pointer */
4278 func_ptr->set((const uchar *)G_code_pool->get_ptr(ep));
4279 }
4280
4281 /* ------------------------------------------------------------------------ */
4282 /*
4283 * Get the frame pointer at a given stack level
4284 */
4285 vm_val_t *CVmRun::get_fp_at_level(VMG_ int level) const
4286 {
4287 vm_val_t *fp;
4288
4289 /* walk up the stack to the desired level */
4290 for (fp = frame_ptr_ ; fp != 0 && level != 0 ;
4291 --level, fp = get_enclosing_frame_ptr(vmg_ fp));
4292
4293 /*
4294 * if we ran out of frames before we reached the desired level,
4295 * throw an error
4296 */
4297 if (fp == 0)
4298 err_throw(VMERR_BAD_FRAME);
4299
4300 /* return the frame */
4301 return fp;
4302 }
4303
4304 /* ------------------------------------------------------------------------ */
4305 /*
4306 * Get the message from an exception object
4307 */
4308 void CVmRun::get_exc_message(VMG_ const CVmException *exc,
4309 char *buf, size_t buflen, int add_unh_prefix)
4310 {
4311 CVmException tmpexc;
4312 const char *tmpmsg;
4313 const char *msg;
4314
4315 /* set up our temporary exception object with no parameters by default */
4316 tmpexc.param_count_ = 0;
4317
4318 /* check for unhandled program exceptions */
4319 if (exc->get_error_code() == VMERR_UNHANDLED_EXC)
4320 {
4321 size_t msg_len;
4322
4323 /*
4324 * This is not a VM error, but is simply an exception that the
4325 * program itself threw but did not handle. We might be able to
4326 * find an informational message in the exception object itself.
4327 */
4328
4329 /* get the exception's message, if available */
4330 msg = get_exc_message(vmg_ exc, &msg_len);
4331 if (msg != 0)
4332 {
4333 /*
4334 * we got a message from the exception object - use it
4335 */
4336
4337 /* set up our parameters for the formatting */
4338 tmpexc.param_count_ = 1;
4339 tmpexc.set_param_str(0, msg, msg_len);
4340
4341 /*
4342 * If they want an "unhandled exception" prefix, get the
4343 * message for the prefix; otherwise, just use the message
4344 * from the exception without further adornment.
4345 */
4346 if (add_unh_prefix)
4347 {
4348 /* they want a prefix - get the prefix message */
4349 tmpmsg = err_get_msg(vm_messages, vm_message_count,
4350 VMERR_UNHANDLED_EXC_PARAM, FALSE);
4351 }
4352 else
4353 {
4354 /* no prefix desired - just use the message as we got it */
4355 tmpmsg = "%s";
4356 }
4357
4358 /* format the message */
4359 err_format_msg(buf, buflen, tmpmsg, &tmpexc);
4360 }
4361 else
4362 {
4363 /* no message - use a generic exception message */
4364 tmpmsg = err_get_msg(vm_messages, vm_message_count,
4365 VMERR_UNHANDLED_EXC, FALSE);
4366 err_format_msg(buf, buflen, tmpmsg, &tmpexc);
4367 }
4368 }
4369 else
4370 {
4371 /*
4372 * It's a VM exception, so we can determine the error's meaning
4373 * from the error code. Look up the message for the error code
4374 * in our error message list.
4375 */
4376 msg = err_get_msg(vm_messages, vm_message_count,
4377 exc->get_error_code(), FALSE);
4378
4379 /* if that failed, just show the error number */
4380 if (msg == 0)
4381 {
4382 /* no message - just show the error code */
4383 tmpmsg = err_get_msg(vm_messages, vm_message_count,
4384 VMERR_VM_EXC_CODE, FALSE);
4385
4386 /* set up our parameters for formatting */
4387 tmpexc.param_count_ = 1;
4388 tmpexc.set_param_int(0, exc->get_error_code());
4389
4390 /* format the message */
4391 err_format_msg(buf, buflen, tmpmsg, &tmpexc);
4392 }
4393 else
4394 {
4395 char tmpbuf[256];
4396
4397 /* format the message from the exception parameters */
4398 err_format_msg(tmpbuf, sizeof(tmpbuf), msg, exc);
4399
4400 /* get the prefix message */
4401 tmpmsg = err_get_msg(vm_messages, vm_message_count,
4402 VMERR_VM_EXC_PARAM, FALSE);
4403
4404 /* set up our parameters for the formatting */
4405 tmpexc.param_count_ = 1;
4406 tmpexc.set_param_str(0, tmpbuf);
4407
4408 /* format the message */
4409 err_format_msg(buf, buflen, tmpmsg, &tmpexc);
4410 }
4411 }
4412 }
4413
4414 /*
4415 * Get the message from an "unhandled exception" error object
4416 */
4417 const char *CVmRun::get_exc_message(VMG_ const CVmException *exc,
4418 size_t *msg_len)
4419 {
4420 vm_obj_id_t exc_obj;
4421
4422 /*
4423 * if the error isn't "unhandled exception," there's not a stored
4424 * exception object; likewise, if there's no object parameter in the
4425 * exception, there's nothing to use to obtain the message
4426 */
4427 if (exc->get_error_code() != VMERR_UNHANDLED_EXC
4428 || exc->get_param_count() < 1)
4429 return 0;
4430
4431 /* get the exception object */
4432 exc_obj = (vm_obj_id_t)exc->get_param_ulong(0);
4433
4434 /* get the message from the object */
4435 return get_exc_message(vmg_ exc_obj, msg_len);
4436 }
4437
4438 /*
4439 * Get the message from an exception object
4440 */
4441 const char *CVmRun::get_exc_message(VMG_ vm_obj_id_t exc_obj, size_t *msg_len)
4442 {
4443 vm_val_t val;
4444 vm_obj_id_t src_obj;
4445 const char *str;
4446 uint argc;
4447
4448 /* if there's no object, there's no message */
4449 if (exc_obj == VM_INVALID_OBJ)
4450 return 0;
4451
4452 /*
4453 * get the exceptionMessage property value from the object; if
4454 * there's not a valid exceptionMessage property defined, or the
4455 * object doesn't have a value for the property, there's no message
4456 */
4457 argc = 0;
4458 if (G_predef->rterrmsg_prop == VM_INVALID_PROP
4459 || (!vm_objp(vmg_ exc_obj)->get_prop(vmg_ G_predef->rterrmsg_prop,
4460 &val, exc_obj, &src_obj,
4461 &argc)))
4462 return 0;
4463
4464 /*
4465 * We got the property. If it's a string or an object containing a
4466 * string, retrieve the string.
4467 */
4468 switch(val.typ)
4469 {
4470 case VM_SSTRING:
4471 /* get the constant string */
4472 str = G_const_pool->get_ptr(val.val.ofs);
4473 break;
4474
4475 case VM_OBJ:
4476 /* get the string value of the object, if possible */
4477 str = vm_objp(vmg_ val.val.obj)->get_as_string(vmg0_);
4478 break;
4479
4480 default:
4481 /* it's not a string - we can't use it */
4482 str = 0;
4483 break;
4484 }
4485
4486 /* check to see if we got a string */
4487 if (str != 0)
4488 {
4489 /*
4490 * The string is in the standard VM internal format, which means
4491 * it has a 2-byte length prefix followed by the bytes of the
4492 * string (with no null termination). Read the length prefix,
4493 * then skip past it so the caller doesn't have to.
4494 */
4495 *msg_len = osrp2(str);
4496 str += VMB_LEN;
4497 }
4498
4499 /* return the string pointer */
4500 return str;
4501 }
4502
4503 /* ------------------------------------------------------------------------ */
4504 /*
4505 * Get the boundaries of the current statement, based on debugging
4506 * information. Returns true if valid debugging information was found for
4507 * the given code location, false if not.
4508 */
4509 int CVmRun::get_stm_bounds(VMG_ const CVmFuncPtr *func_ptr,
4510 ulong method_ofs,
4511 CVmDbgLinePtr *caller_line_ptr,
4512 ulong *stm_start, ulong *stm_end)
4513 {
4514 CVmDbgTablePtr dbg_ptr;
4515 int lo;
4516 int hi;
4517 int cur;
4518
4519 /* presume we won't find anything */
4520 *stm_start = *stm_end = 0;
4521
4522 /*
4523 * if the current method has no line records, we can't find the
4524 * boundaries
4525 */
4526 if (!func_ptr->set_dbg_ptr(&dbg_ptr)
4527 || dbg_ptr.get_line_count(vmg0_) == 0)
4528 {
4529 /* indicate that we didn't find debug information */
4530 return FALSE;
4531 }
4532
4533 /*
4534 * We must perform a binary search of the line records for the line
4535 * that contains this program counter offset.
4536 */
4537 lo = 0;
4538 hi = dbg_ptr.get_line_count(vmg0_) - 1;
4539 while (lo <= hi)
4540 {
4541 ulong start_ofs;
4542 ulong end_ofs;
4543 CVmDbgLinePtr line_ptr;
4544
4545 /* split the difference and get the current entry */
4546 cur = lo + (hi - lo)/2;
4547 dbg_ptr.set_line_ptr(vmg_ &line_ptr, cur);
4548
4549 /* get the current statement's start relative to the method header */
4550 start_ofs = line_ptr.get_start_ofs();
4551
4552 /*
4553 * Get the next statement's start offset, which gives us the end
4554 * of this statement. If this is the last statement in the table,
4555 * it runs to the end of the function; use the debug records table
4556 * offset as the upper bound in this case.
4557 */
4558 if (cur == (int)dbg_ptr.get_line_count(vmg0_) - 1)
4559 {
4560 /*
4561 * it's the last record - use the debug table offset as an
4562 * upper bound, since we know the function can't have any
4563 * executable code past this point
4564 */
4565 end_ofs = func_ptr->get_debug_ofs();
4566 }
4567 else
4568 {
4569 CVmDbgLinePtr next_line_ptr;
4570
4571 /* another record follows this one - use it */
4572 next_line_ptr.copy_from(&line_ptr);
4573 next_line_ptr.inc(vmg0_);
4574 end_ofs = next_line_ptr.get_start_ofs();
4575 }
4576
4577 /* see where we are relative to this line record */
4578 if (method_ofs >= end_ofs)
4579 {
4580 /* we need to go higher */
4581 lo = (cur == lo ? cur + 1 : cur);
4582 }
4583 else if (method_ofs < start_ofs)
4584 {
4585 /* we need to go lower */
4586 hi = (cur == hi ? hi - 1 : cur);
4587 }
4588 else
4589 {
4590 /* found it - set the bounds to this record's limits */
4591 *stm_start = start_ofs;
4592 *stm_end = end_ofs;
4593
4594 /* fill in the caller's line pointer if desired */
4595 if (caller_line_ptr != 0)
4596 caller_line_ptr->copy_from(&line_ptr);
4597
4598 /* indicate that we found the line boundaries successfully */
4599 return TRUE;
4600 }
4601 }
4602
4603 /* return failure */
4604 return FALSE;
4605 }
4606
4607 /* ------------------------------------------------------------------------ */
4608 /*
4609 * Profiler functions
4610 */
4611 #ifdef VM_PROFILER
4612
4613 /*
4614 * Profiler master hash table entry
4615 */
4616 class CVmHashEntryProfiler: public CVmHashEntryCI
4617 {
4618 public:
4619 CVmHashEntryProfiler(const char *str, size_t len,
4620 const vm_profiler_rec *rec)
4621 : CVmHashEntryCI(str, len, TRUE)
4622 {
4623 /* copy the profiler record's identifying portion */
4624 rec_.func = rec->func;
4625 rec_.obj = rec->obj;
4626 rec_.prop = rec->prop;
4627
4628 /* initialize the timers and counters to zero */
4629 rec_.sum_direct.hi = rec_.sum_direct.lo = 0;
4630 rec_.sum_chi.hi = rec_.sum_chi.lo = 0;
4631 rec_.call_cnt = 0;
4632 }
4633
4634 /* our profiler record */
4635 vm_profiler_rec rec_;
4636 };
4637
4638 /*
4639 * Begin profiling
4640 */
4641 void CVmRun::start_profiling()
4642 {
4643 /* clear any old profiler data from the master hash table */
4644 prof_master_table_->delete_all_entries();
4645
4646 /* reset the profiler stack */
4647 prof_stack_idx_ = 0;
4648
4649 /* turn on profiling */
4650 profiling_ = TRUE;
4651 }
4652
4653 /*
4654 * End profiling
4655 */
4656 void CVmRun::end_profiling()
4657 {
4658 /* turn off profiling */
4659 profiling_ = FALSE;
4660
4661 /* leave all active profiler stack levels */
4662 while (prof_stack_idx_ != 0)
4663 prof_leave();
4664 }
4665
4666 /* context for our profiling callback */
4667 struct vmrun_prof_enum
4668 {
4669 /* interpreter object */
4670 CVmRun *terp;
4671
4672 /* debugger object */
4673 CVmDebug *dbg;
4674
4675 /* client callback and its context */
4676 void (*cb)(void *, const char *, unsigned long, unsigned long,
4677 unsigned long);
4678 void *cb_ctx;
4679 };
4680
4681 /*
4682 * Get the profiling data
4683 */
4684 void CVmRun::get_profiling_data(VMG_
4685 void (*cb)(void *,
4686 const char *,
4687 unsigned long,
4688 unsigned long,
4689 unsigned long),
4690 void *cb_ctx)
4691 {
4692 vmrun_prof_enum our_ctx;
4693
4694 /* if there's no debugger, we can't get symbols, so we can't proceed */
4695 if (G_debugger == 0)
4696 return;
4697
4698 /* set up our callback context */
4699 our_ctx.terp = this;
4700 our_ctx.dbg = G_debugger;
4701 our_ctx.cb = cb;
4702 our_ctx.cb_ctx = cb_ctx;
4703
4704 /* enumerate the master table entries through our callback */
4705 prof_master_table_->enum_entries(&prof_enum_cb, &our_ctx);
4706 }
4707
4708 /*
4709 * Callback for enumerating the profiling data
4710 */
4711 void CVmRun::prof_enum_cb(void *ctx0, CVmHashEntry *entry0)
4712 {
4713 vmrun_prof_enum *ctx = (vmrun_prof_enum *)ctx0;
4714 CVmHashEntryProfiler *entry = (CVmHashEntryProfiler *)entry0;
4715 char namebuf[128];
4716 const char *p;
4717
4718 /* generate the name of the function or method */
4719 if (entry->rec_.obj != VM_INVALID_OBJ)
4720 {
4721 char *dst;
4722
4723 /* look up the object name */
4724 p = ctx->dbg->objid_to_sym(entry->rec_.obj);
4725
4726 /* get the original name, if this is a synthetic 'modify' object */
4727 p = ctx->dbg->get_modifying_sym(p);
4728
4729 /*
4730 * if we got an object name, use it; otherwise, synthesize a name
4731 * using the object number
4732 */
4733 if (p != 0)
4734 strcpy(namebuf, p);
4735 else
4736 sprintf(namebuf, "obj#%lx", (long)entry->rec_.obj);
4737
4738 /* add a period */
4739 dst = namebuf + strlen(namebuf);
4740 *dst++ = '.';
4741
4742 /* look up the property name */
4743 p = ctx->dbg->propid_to_sym(entry->rec_.prop);
4744 if (p != 0)
4745 strcpy(dst, p);
4746 else
4747 sprintf(dst, "prop#%x", (int)entry->rec_.prop);
4748 }
4749 else if (entry->rec_.func != 0)
4750 {
4751 /* look up the function at the code offset */
4752 p = ctx->dbg->funcaddr_to_sym(entry->rec_.func);
4753 if (p != 0)
4754 strcpy(namebuf, p);
4755 else
4756 sprintf(namebuf, "func#%lx", (long)entry->rec_.func);
4757 }
4758 else
4759 {
4760 /* it must be system code */
4761 strcpy(namebuf, "<System>");
4762 }
4763
4764 /* invoke the callback with the data */
4765 (*ctx->cb)(ctx->cb_ctx, namebuf,
4766 os_prof_time_to_ms(&entry->rec_.sum_direct),
4767 os_prof_time_to_ms(&entry->rec_.sum_chi),
4768 entry->rec_.call_cnt);
4769 }
4770
4771
4772 /*
4773 * Profile entry into a new function or method
4774 */
4775 void CVmRun::prof_enter(pool_ofs_t call_ofs,
4776 vm_obj_id_t obj, vm_prop_id_t prop)
4777 {
4778 vm_prof_time cur;
4779
4780 /* get the current time */
4781 os_prof_curtime(&cur);
4782
4783 /* if we have a valid previous entry, suspend it */
4784 if (prof_stack_idx_ > 0 && prof_stack_idx_ - 1 < prof_stack_max_)
4785 {
4786 vm_profiler_rec *p;
4787 vm_prof_time delta;
4788
4789 /* get a pointer to the outgoing entry */
4790 p = &prof_stack_[prof_stack_idx_ - 1];
4791
4792 /*
4793 * add the time since the last start to the cumulative time spent
4794 * in this function
4795 */
4796 prof_calc_elapsed(&delta, &cur, &prof_start_);
4797 prof_add_elapsed(&p->sum_direct, &delta);
4798 }
4799
4800 /* if we have room on the profiler stack, add a new level */
4801 if (prof_stack_idx_ < prof_stack_max_)
4802 {
4803 vm_profiler_rec *p;
4804
4805 /* get a pointer to the new entry */
4806 p = &prof_stack_[prof_stack_idx_];
4807
4808 /* remember the identifying data for the method or function */
4809 p->func = call_ofs;
4810 p->obj = obj;
4811 p->prop = prop;
4812
4813 /* we have no cumulative time yet */
4814 p->sum_direct.hi = p->sum_direct.lo = 0;
4815 p->sum_chi.hi = p->sum_chi.lo = 0;
4816 }
4817
4818 /* count the level */
4819 ++prof_stack_idx_;
4820
4821 /* remember the start time in the new current function */
4822 os_prof_curtime(&prof_start_);
4823 }
4824
4825 /*
4826 * Profile returning from a function or method
4827 */
4828 void CVmRun::prof_leave()
4829 {
4830 vm_prof_time delta;
4831 vm_prof_time cur;
4832 vm_prof_time chi;
4833
4834 /* get the current time */
4835 os_prof_curtime(&cur);
4836
4837 /* move to the last level */
4838 --prof_stack_idx_;
4839
4840 /* presume we won't know the child time */
4841 chi.hi = chi.lo = 0;
4842
4843 /* if we're on a valid level, finish the call */
4844 if (prof_stack_idx_ < prof_stack_max_)
4845 {
4846 vm_profiler_rec *p;
4847 CVmHashEntryProfiler *entry;
4848
4849 /* get a pointer to the outgoing entry */
4850 p = &prof_stack_[prof_stack_idx_];
4851
4852 /*
4853 * add the time since the last start to the cumulative time spent
4854 * in this function
4855 */
4856 prof_calc_elapsed(&delta, &cur, &prof_start_);
4857 prof_add_elapsed(&p->sum_direct, &delta);
4858
4859 /*
4860 * Find or create the master record for the terminating function or
4861 * method, and add the cumulative times from this call to the
4862 * master record's cumulative times. Also count the invocation in
4863 * the master record.
4864 */
4865 entry = prof_find_master_rec(p);
4866 prof_add_elapsed(&entry->rec_.sum_direct, &p->sum_direct);
4867 prof_add_elapsed(&entry->rec_.sum_chi, &p->sum_chi);
4868 ++(entry->rec_.call_cnt);
4869
4870 /*
4871 * Calculate the cumulative time in the outgoing function - this is
4872 * the total time directly in the function plus the cumulative time
4873 * in all of its children. We must add this to the caller's
4874 * cumulative child time, since this function and all of its
4875 * children are children of the caller and thus must count in the
4876 * caller's total child time.
4877 */
4878 chi = p->sum_direct;
4879 prof_add_elapsed(&chi, &p->sum_chi);
4880 }
4881
4882 /* if we're leaving to a valid level, re-activate it */
4883 if (prof_stack_idx_ > 0 && prof_stack_idx_ < prof_stack_max_)
4884 {
4885 vm_profiler_rec *p;
4886
4887 /* get a pointer to the resuming entry */
4888 p = &prof_stack_[prof_stack_idx_ - 1];
4889
4890 /*
4891 * add the time spent in the child and its children to our
4892 * cumulative child time
4893 */
4894 prof_add_elapsed(&p->sum_chi, &chi);
4895 }
4896
4897 /*
4898 * remember the new start time for the function we're resuming - we
4899 * must reset this to the current time, since we measure deltas from
4900 * the last call or return on each call or return
4901 */
4902 os_prof_curtime(&prof_start_);
4903 }
4904
4905 /*
4906 * Calculate an elapsed 64-bit time value
4907 */
4908 void CVmRun::prof_calc_elapsed(vm_prof_time *diff, const vm_prof_time *a,
4909 const vm_prof_time *b)
4910 {
4911 /* calculate the differences of the low and high parts */
4912 diff->lo = a->lo - b->lo;
4913 diff->hi = a->hi - b->hi;
4914
4915 /*
4916 * if the low part ended up higher than it started, then we
4917 * underflowed, and hence must borrow from the high part
4918 */
4919 if (diff->lo > a->lo)
4920 --(diff->hi);
4921 }
4922
4923 /*
4924 * Add one elapsed time value to another
4925 */
4926 void CVmRun::prof_add_elapsed(vm_prof_time *sum, const vm_prof_time *val)
4927 {
4928 unsigned long orig_lo;
4929
4930 /* remember the original low part */
4931 orig_lo = sum->lo;
4932
4933 /* add the low parts and high parts */
4934 sum->lo += val->lo;
4935 sum->hi += val->hi;
4936
4937 /*
4938 * if the low part of the sum is less than where it started, then it
4939 * overflowed, and we must hence carry to the high part
4940 */
4941 if (sum->lo < orig_lo)
4942 ++(sum->hi);
4943 }
4944
4945 /*
4946 * Find or create a hash table entry for a profiler record
4947 */
4948 CVmHashEntryProfiler *CVmRun::prof_find_master_rec(const vm_profiler_rec *p)
4949 {
4950 const size_t id_siz = sizeof(p->func) + sizeof(p->obj) + sizeof(p->prop);
4951 char id[id_siz];
4952 CVmHashEntryProfiler *entry;
4953
4954 /*
4955 * Build the ID string, which we'll use as our hash key. We never have
4956 * to serialize this, so it doesn't matter that it's dependent on byte
4957 * order and word size.
4958 */
4959 memcpy(id, &p->func, sizeof(p->func));
4960 memcpy(id + sizeof(p->func), &p->obj, sizeof(p->obj));
4961 memcpy(id + sizeof(p->func) + sizeof(p->obj), &p->prop, sizeof(p->prop));
4962
4963 /* try to find an existing entry */
4964 entry = (CVmHashEntryProfiler *)prof_master_table_->find(id, id_siz);
4965
4966 /* if we didn't find an entry, create one */
4967 if (entry == 0)
4968 {
4969 /* create a new entry */
4970 entry = new CVmHashEntryProfiler(id, id_siz, p);
4971
4972 /* add it to the table */
4973 prof_master_table_->add(entry);
4974 }
4975
4976 /* return the entry */
4977 return entry;
4978 }
4979
4980 #endif /* VM_PROFILER */
4981
4982 /* ------------------------------------------------------------------------ */
4983 /*
4984 * Footnote - for the referring code, search the code above for
4985 * [REGISTER_P_FOOTNOTE].
4986 *
4987 * This footnote pertains to a 'register' declaration that causes gcc (and
4988 * probably some other compilers) to generate a warning message. The
4989 * 'register' declaration is useful on some compilers and will be retained.
4990 * Here's a note I sent to Nikos Chantziaras (who asked about the warning)
4991 * explaining why I'm choosing to leave the 'register' declaration in, and
4992 * why I think this 'register' declaration is actually correct and useful
4993 * despite the warning it generates on some compilers.
4994 *
4995 * The basic issue is that the code takes the address of the variable in
4996 * question in expressions passed as parameters to certain function calls.
4997 * These function calls all happen to be in-linable functions, and it
4998 * happens that in each function, the address operator is always canceled
4999 * out by a '*' dereference operator - in other words, we have '*&p', which
5000 * the compiler can turn into just plain 'p' when the calls are in-lined,
5001 * eliminating the need to actually take the address of 'p'.
5002 *
5003 * Nikos:
5004 *. >I'm no expert, but I think GCC barks at this because it isn't possible
5005 *. >at all to store the variable in a register if the code wants its
5006 *. >address, therefore the 'register' in the declaration does nothing.
5007 *
5008 * That's correct, but a compiler is always free to ignore 'register'
5009 * declarations *anyway*, even if enregistration is possible. Therefore a
5010 * warning that it's not possible to obey 'register' is unnecessary,
5011 * because it's explicit in the language definition that 'register' is not
5012 * binding. It simply is not possible for an ignored 'register' attribute
5013 * to cause unexpected behavior. Warnings really should only be generated
5014 * for situations where it is likely that the programmer expects different
5015 * behavior than the compiler will deliver; in the case of an ignored
5016 * 'register' attribute, the programmer is *required* to expect that the
5017 * attribute might be ignored, so a warning to this effect is superfluous.
5018 *
5019 * Now, I understand why they generate the warning - it's because the
5020 * compiler believes that the program code itself makes enregistration
5021 * impossible, not because the compiler has chosen for optimization
5022 * purposes to ignore the 'register' request. However, as we'll see
5023 * shortly, the program code doesn't truly make enregistration impossible;
5024 * it is merely impossible in some interpretations of the code. Therefore
5025 * we really are back to the compiler choosing to ignore the 'register'
5026 * request due to its own optimization decisions; the 'register' request is
5027 * made impossible far downstream of the actual decisions that the compiler
5028 * makes (which have to do with in-line vs out-of-line calls), but it
5029 * really is compiler decisions that make it impossible, not the inherent
5030 * structure of the code.
5031 *
5032 *. >Furthermore, I'm not sure I understand the relationship
5033 *. >between 'register' and inlining; why should "*(&p)" do something
5034 *. >else "in calls to inlines" than its obvious meaning?
5035 *
5036 * When a function is in-lined, the compiler is not required to generate
5037 * the same code it would generate for the most general case of the same
5038 * function call, as long as the meaning is the same.
5039 *
5040 * For example, suppose we have some code that contains a call to a
5041 * function like so:
5042 *
5043 * a = myFunc(a + 7, 3);
5044 *
5045 * In the general out-of-line case, the compiler must generate some
5046 * machine-code instructions like this:
5047 *
5048 *. push #3
5049 *. mov [a], d0
5050 *. add #7, d0
5051 *. push d0
5052 *. call #myFunc
5053 *. mov d0, [a]
5054 *
5055 * The compiler doesn't have access to the inner workings of myFunc, so it
5056 * must generate the appropriate code for the generic interface to an
5057 * external function.
5058 *
5059 * Now, suppose the function is defined like so:
5060 *
5061 * int myFunc(int a, int b) { return a - 6; }
5062 *
5063 * and further suppose that the compiler decides to in-line this function.
5064 * In-lining means the compiler will generate the code that implements the
5065 * function directly in the caller; there will be no call to an external
5066 * linkage point. This means the compiler can implement the linkage to the
5067 * function with a custom one-off interface for this particular invocation
5068 * - every in-line invocation can be customized to the exact context where
5069 * it appears. So, for example, if we call myFunc right now and registers
5070 * d1 and d2 happens to be available, we can put the parameters in d1 and
5071 * d2, and the generated function will refer to those registers for the
5072 * parameters rather than having to look in the stack. Later on, if we
5073 * generate a separate call to the same function, but registers d3 and d7
5074 * are the ones available, we can use those instead. Each generated copy
5075 * of the function can fit its exact context.
5076 *
5077 * Furthermore, looking at this function and at the arguments passed, we
5078 * can see that the formal parameter 'b' has no effect on the function's
5079 * results, and the actual parameter '3' passed for 'b' has no side
5080 * effects. Therefore, the compiler is free to completely ignore this
5081 * parameter - there's no need to generate any code for it at all, since we
5082 * have sufficient knowledge to see that it has no effect on the meaning of
5083 * the code.
5084 *
5085 * Further still, we can globally optimize the entire function. So, we can
5086 * see that myFunc(a+7, 3) is going to turn into the expression (a+7-6).
5087 * We can fold constants to arrive at (a+1) as the result of the function.
5088 * We can therefore generate the entire code for the function's invocation
5089 * like so:
5090 *
5091 * inc [a]
5092 *
5093 * Okay, now let's look at the &p case. In the specific examples in
5094 * vmrun.cpp, we have a bunch of function invocations like this:
5095 *
5096 * register const char *p;
5097 *. int x = myfunc(&p);
5098 *
5099 * In the most general case, we have to generate code like this:
5100 *
5101 *. lea [p], d0 ; load effective address
5102 *. push d0
5103 *. call #myfunc
5104 *. mov d0, [x]
5105 *
5106 * So, in the most general case of a call with external linkage, we need
5107 * 'p' to have a main memory address so that we can push it on the stack as
5108 * the parameter to this call. Registers don't have main memory addresses,
5109 * so 'p' can't go in a register.
5110 *
5111 * However, we know what myfunc() looks like:
5112 *
5113 *. char myfunc(const char **p)
5114 *. {
5115 *. char c = **p;
5116 *. *p += 1;
5117 *. return c;
5118 *. }
5119 *
5120 * If the compiler chooses to in-line this function, it can globally
5121 * optimize its linkage and implementation as we saw earlier. So, the
5122 * compiler can rewrite the code like so:
5123 *
5124 * register const char *p;
5125 *. int x = **(&p);
5126 *. *(&p) += 1;
5127 *
5128 * which can be further rewritten to:
5129 *
5130 *. register const char *p;
5131 *. int x = *p;
5132 *. p += 1;
5133 *
5134 * Now we can generate the machine code for the final optimized form:
5135 *
5136 *. mov [p], a0 ; get the *value* of p into index register 0
5137 *. mov.byte [a0+0], d0 ; get the value index register 0 points to
5138 *. mov.byte d0, [x] ; store it in x
5139 *. inc [p] ; inc the value of p
5140 *
5141 * Nowhere do we need a main memory address for p. This means the compiler
5142 * can keep p in a register, say d5:
5143 *
5144 *. mov d5, a0
5145 *. mov.byte [a0+0], d0
5146 *. mov.byte d0, [x]
5147 *. inc d5
5148 *
5149 * And this is indeed exactly what the code that comes out of vc++ looks
5150 * like (changed from my abstract machine to 32-bit x86, of course).
5151 *
5152 * So: if the compiler chooses to in-line the functions that are called
5153 * with '&p' as a parameter, and the compiler performs the available
5154 * optimizations on those calls once they're in-lined, then a memory
5155 * address for 'p' is never needed. Thus there is a valid interpretation
5156 * of the code where 'register p' can be obeyed. If the compiler doesn't
5157 * choose to in-line the functions or make those optimizations, then the
5158 * compiler will be unable to satisfy the 'register p' request and will be
5159 * forced to put 'p' in addressable main memory. But it really is entirely
5160 * up to the compiler whether to obey the 'register p' request; the
5161 * program's structure does not make the request impossible to satisfy.
5162 * Therefore there is no reason for the compiler to warn about this, any
5163 * more than there would be if the compiler chose not to obey the 'register
5164 * p' simply because it thought it could make more optimal use of the
5165 * available registers. That gcc warns is understandable, in that a
5166 * superficial reading of the code would not reveal the optimization
5167 * opportunity; but the warning is nonetheless unnecessary, and the
5168 * 'register' does provide useful optimization hinting to at least vc++, so
5169 * I think it's best to leave it in and ignore the warning.
5170 */
5171