1 #ifdef RCSID
2 static char RCSid[] =
3 "$Header: d:/cvsroot/tads/tads3/VMOBJ.CPP,v 1.4 1999/07/11 00:46:58 MJRoberts Exp $";
4 #endif
5
6 /*
7 * Copyright (c) 1998, 2002 Michael J. Roberts. All Rights Reserved.
8 *
9 * Please see the accompanying license file, LICENSE.TXT, for information
10 * on using and copying this software.
11 */
12 /*
13 Name
14 vmobj.cpp - VM object manager
15 Function
16
17 Notes
18
19 Modified
20 10/28/98 MJRoberts - Creation
21 */
22
23 #include <stdlib.h>
24 #include <memory.h>
25 #include <assert.h>
26
27 #include "t3std.h"
28 #include "vmtype.h"
29 #include "vmobj.h"
30 #include "vmstack.h"
31 #include "vmundo.h"
32 #include "vmrun.h"
33 #include "vmfile.h"
34 #include "vmmeta.h"
35 #include "vmlst.h"
36 #include "vmstr.h"
37 #include "vmintcls.h"
38 #include "vmpool.h"
39 #include "vmfunc.h"
40 #include "vmpredef.h"
41 #include "vmhash.h"
42 #include "vmtobj.h"
43
44
45 /* ------------------------------------------------------------------------ */
46 /*
47 * Base fixed-size object entry implementation
48 */
49
50 /*
51 * Metaclass registration object for the root object class. Note that a
52 * root object can never be instantiated; this entry is purely for the
53 * use of the type system.
54 */
55 static CVmMetaclassRoot metaclass_reg_obj;
56 CVmMetaclass *CVmObject::metaclass_reg_ = &metaclass_reg_obj;
57
58 /* function table */
59 int (CVmObject::
60 *CVmObject::func_table_[])(VMG_ vm_obj_id_t self,
61 vm_val_t *retval, uint *argc,
62 vm_prop_id_t prop, vm_obj_id_t *source_obj) =
63 {
64 &CVmObject::getp_undef,
65 &CVmObject::getp_of_kind,
66 &CVmObject::getp_sclist,
67 &CVmObject::getp_propdef,
68 &CVmObject::getp_proptype,
69 &CVmObject::getp_get_prop_list,
70 &CVmObject::getp_get_prop_params,
71 &CVmObject::getp_is_class,
72 &CVmObject::getp_propinh,
73 &CVmObject::getp_is_transient
74 };
75
76 /*
77 * Allocate space for an object from a page table, given the object ID.
78 * The caller must allocate the object ID prior to new'ing the object;
79 * operator new will store the memory for the new object in the object
80 * slot the caller allocated.
81 */
operator new(size_t siz,VMG_ vm_obj_id_t obj_id)82 void *CVmObject::operator new(size_t siz, VMG_ vm_obj_id_t obj_id)
83 {
84 /*
85 * The size must be the size of an object entry. This size never
86 * changes, even for subclasses of the object type, since all
87 * variable-size data must be stored in the variable-size portion of
88 * the object. Here we are only concerned with allocating the
89 * fixed-size object descriptor.
90 */
91 assert(siz == sizeof(CVmObject));
92
93 /* return the memory contained in the object entry */
94 return G_obj_table->get_obj(obj_id);
95 }
96
97 /*
98 * Determine if this object is an instance of the given object. By
99 * default, we will simply check to see if the given object is the
100 * IntrinsicClass instance that represents our metaclass or one of its
101 * superclasses.
102 */
is_instance_of(VMG_ vm_obj_id_t obj)103 int CVmObject::is_instance_of(VMG_ vm_obj_id_t obj)
104 {
105 vm_meta_entry_t *entry;
106
107 /*
108 * we can only be an instance of the object if the object is an
109 * IntrinsicClass instance, since by default we have only intrinsic
110 * classes among our superclasses
111 */
112 if (!CVmObjClass::is_intcls_obj(vmg_ obj))
113 return FALSE;
114
115 /*
116 * look up my metaclass in the metaclass dependency table, and
117 * determine if my dependency table entry's record of the
118 * IntrinsicClass object for the metaclass matches the given object
119 */
120 entry = (G_meta_table
121 ->get_entry_from_reg(get_metaclass_reg()->get_reg_idx()));
122
123 /*
124 * if we have an entry, ask our superclass object if it is an
125 * instance of the given object; otherwise, we must not be an
126 * instance
127 */
128 if (entry != 0)
129 {
130 /* if this is our direct superclass, we're an instance */
131 if (entry->class_obj_ == obj)
132 return TRUE;
133
134 /* if there's no intrinsic class object, return false */
135 if (entry->class_obj_ == VM_INVALID_OBJ)
136 return FALSE;
137
138 /* ask the superclass if it inherits from the given object */
139 return vm_objp(vmg_ entry->class_obj_)->is_instance_of(vmg_ obj);
140 }
141 else
142 {
143 /*
144 * no metaclass table entry - we can't really make any
145 * determination, so indicate that we're not an instance
146 */
147 return FALSE;
148 }
149 }
150
151 /*
152 * Get the nth superclass. By default, an object's superclass is
153 * represented by the intrinsic class object for the metaclass.
154 */
get_superclass(VMG_ vm_obj_id_t,int sc_idx) const155 vm_obj_id_t CVmObject::get_superclass(VMG_ vm_obj_id_t /*self*/,
156 int sc_idx) const
157 {
158 vm_meta_entry_t *entry;
159
160 /* we only have one superclass */
161 if (sc_idx != 0)
162 return VM_INVALID_OBJ;
163
164 /* look up the metaclass entry */
165 entry = (G_meta_table
166 ->get_entry_from_reg(get_metaclass_reg()->get_reg_idx()));
167
168 /* return the IntrinsicClass object that represents this metaclass */
169 return (entry != 0 ? entry->class_obj_ : VM_INVALID_OBJ);
170 }
171
172 /*
173 * Get a property
174 */
get_prop(VMG_ vm_prop_id_t prop,vm_val_t * retval,vm_obj_id_t self,vm_obj_id_t * source_obj,uint * argc)175 int CVmObject::get_prop(VMG_ vm_prop_id_t prop, vm_val_t *retval,
176 vm_obj_id_t self, vm_obj_id_t *source_obj, uint *argc)
177 {
178 ushort func_idx;
179
180 /* translate the property index to an index into our function table */
181 func_idx = G_meta_table
182 ->prop_to_vector_idx(metaclass_reg_->get_reg_idx(), prop);
183
184 /* if we find it, the source object is the 'Object' intrinsic class */
185 *source_obj = metaclass_reg_->get_class_obj(vmg0_);
186
187 /* call the appropriate function */
188 return (this->*func_table_[func_idx])(vmg_ self, retval, argc,
189 prop, source_obj);
190 }
191
192 /*
193 * Inherit a property
194 */
inh_prop(VMG_ vm_prop_id_t prop,vm_val_t * retval,vm_obj_id_t self,vm_obj_id_t orig_target_obj,vm_obj_id_t defining_obj,vm_obj_id_t * source_obj,uint * argc)195 int CVmObject::inh_prop(VMG_ vm_prop_id_t prop, vm_val_t *retval,
196 vm_obj_id_t self, vm_obj_id_t orig_target_obj,
197 vm_obj_id_t defining_obj, vm_obj_id_t *source_obj,
198 uint *argc)
199 {
200 ushort func_idx;
201
202 /*
203 * We're inheriting. This is never called from native code, as native
204 * code does its inheriting directly through C++ calls to base class
205 * native code; hence, we can only be called from a byte-code modifier
206 * object.
207 *
208 * First, try looking for a native implementation. We can reach this
209 * point if a byte-code object overrides an intrinsic method, then
210 * inherits from the byte-code override.
211 */
212 func_idx = G_meta_table
213 ->prop_to_vector_idx(metaclass_reg_->get_reg_idx(), prop);
214 if (func_idx != 0)
215 {
216 /* the source object is the 'Object' intrinsic class */
217 *source_obj = metaclass_reg_->get_class_obj(vmg0_);
218
219 /* call the native implementation */
220 return (this->*func_table_[func_idx])(vmg_ self, retval, argc,
221 prop, source_obj);
222 }
223
224 /*
225 * We didn't find it among the intrinsic methods, so look at the
226 * modifier objects.
227 */
228 return find_modifier_prop(vmg_ prop, retval, self, orig_target_obj,
229 defining_obj, source_obj, argc);
230 }
231
232 /*
233 * Get a property that isn't defined in our property table
234 */
getp_undef(VMG_ vm_obj_id_t self,vm_val_t * retval,uint * argc,vm_prop_id_t prop,vm_obj_id_t * source_obj)235 int CVmObject::getp_undef(VMG_ vm_obj_id_t self,
236 vm_val_t *retval, uint *argc,
237 vm_prop_id_t prop, vm_obj_id_t *source_obj)
238 {
239 /*
240 * We didn't find a native implementation of the method, but there's
241 * still one more place to look: the "modifier" object for our class
242 * tree. Modifier objects are byte-code objects that can provide
243 * implementations of methods that add to intrinsic classes (modifiers
244 * can't override intrinsic methods, but they can add new methods).
245 *
246 * Since we're looking for a property on an initial get-property call
247 * (not an inheritance call), we don't yet have a defining object to
248 * find and skip in the inheritance tree, so use VM_INVALID_OBJ as the
249 * defining object. In addition, we're directly calling the method, so
250 * the target object is the same as the 'self' object.
251 */
252 return find_modifier_prop(vmg_ prop, retval, self, self,
253 VM_INVALID_OBJ, source_obj, argc);
254 }
255
256 /*
257 * Find a modifier property.
258 */
find_modifier_prop(VMG_ vm_prop_id_t prop,vm_val_t * retval,vm_obj_id_t self,vm_obj_id_t orig_target_obj,vm_obj_id_t defining_obj,vm_obj_id_t * source_obj,uint * argc)259 int CVmObject::find_modifier_prop(VMG_ vm_prop_id_t prop, vm_val_t *retval,
260 vm_obj_id_t self,
261 vm_obj_id_t orig_target_obj,
262 vm_obj_id_t defining_obj,
263 vm_obj_id_t *source_obj,
264 uint *argc)
265 {
266 vm_meta_entry_t *entry;
267 int found_def_obj;
268
269 /* we haven't yet found the defining superclass */
270 found_def_obj = FALSE;
271
272 /* get my metaclass from the dependency table */
273 entry = (G_meta_table
274 ->get_entry_from_reg(get_metaclass_reg()->get_reg_idx()));
275
276 /*
277 * if there's an associated intrinsic class object, check to see if
278 * it provides a user modifier object for this intrinsic class
279 */
280 while (entry != 0 && entry->class_obj_ != VM_INVALID_OBJ)
281 {
282 vm_obj_id_t mod_obj;
283
284 /* ask the intrinsic class object for the user modifier object */
285 mod_obj = ((CVmObjClass *)vm_objp(vmg_ entry->class_obj_))
286 ->get_mod_obj();
287
288 /*
289 * If we have a defining object, we must ignore objects in the
290 * superclass tree until we find the defining object. Therefore,
291 * scan up the superclass tree for mod_obj and see if we can find
292 * the defining object; when we find it, we can start looking at
293 * objects for real at the defining object's superclass.
294 *
295 * (Superclasses in modifier objects aren't real superclasses,
296 * because modifier objects are classless. Instead, the superclass
297 * list simply implements the 'modify' chain.)
298 */
299 if (mod_obj != VM_INVALID_OBJ
300 && defining_obj != VM_INVALID_OBJ && !found_def_obj)
301 {
302 /*
303 * if the defining object isn't among the byte-code
304 * superclasses of the modifier object, we must skip this
305 * entire intrinsic class and move to the intrinsic superclass
306 */
307 if (mod_obj == defining_obj
308 || vm_objp(vmg_ mod_obj)->is_instance_of(vmg_ defining_obj))
309 {
310 /*
311 * the defining object is among my modifier family - this
312 * means that this is the intrinsic superclass where we
313 * found the modifier method
314 */
315 found_def_obj = TRUE;
316 }
317 else
318 {
319 /*
320 * The current defining object is not part of the modifier
321 * chain for this intrinsic class, so we've already skipped
322 * past this point in the intrinsic superclass tree on past
323 * inheritances. Simply move to the next intrinsic class
324 * and look at its modifier.
325 */
326 goto next_intrinsic_sc;
327 }
328 }
329
330 /*
331 * If there's a modifier object, send the property request to it.
332 * We are effectively delegating the method call to the modifier
333 * object, so we must use the "inherited property" call, not the
334 * plain get_prop() call: 'self' is the original self, but the
335 * target object is the intrinsic class modifier object.
336 */
337 if (mod_obj != VM_INVALID_OBJ
338 && vm_objp(vmg_ mod_obj)->inh_prop(
339 vmg_ prop, retval, self, mod_obj, defining_obj,
340 source_obj, argc))
341 return TRUE;
342
343 /* we didn't find it in this object, so look at its super-metaclass */
344 next_intrinsic_sc:
345 if (entry->meta_->get_supermeta_reg() != 0)
346 {
347 /* get the super-metaclass ID */
348 entry = (G_meta_table
349 ->get_entry_from_reg(entry->meta_
350 ->get_supermeta_reg()
351 ->get_reg_idx()));
352
353 /*
354 * if we've already found the previous defining intrinsic
355 * class, we can forget about the previous defining modifier
356 * object now: since we're moving to a new intrinsic
357 * superclass, we will have no superclass relation to the
358 * previous defining object in the new modifier family, so we
359 * can simply use the next definition of the property we find
360 */
361 if (found_def_obj)
362 defining_obj = VM_INVALID_OBJ;
363 }
364 else
365 {
366 /* no super-metaclass - give up */
367 break;
368 }
369 }
370
371 /* we don't have a modifier object, so the property is undefined */
372 return FALSE;
373 }
374
375 /*
376 * property evaluator - ofKind
377 */
getp_of_kind(VMG_ vm_obj_id_t self,vm_val_t * retval,uint * argc,vm_prop_id_t,vm_obj_id_t *)378 int CVmObject::getp_of_kind(VMG_ vm_obj_id_t self,
379 vm_val_t *retval, uint *argc,
380 vm_prop_id_t, vm_obj_id_t *)
381 {
382 vm_val_t sc;
383 static CVmNativeCodeDesc desc(1);
384
385 /* check arguments */
386 if (get_prop_check_argc(retval, argc, &desc))
387 return TRUE;
388
389 /* pop the superclass, and make sure it's an object */
390 G_stk->pop(&sc);
391 if (sc.typ != VM_OBJ)
392 err_throw(VMERR_OBJ_VAL_REQD);
393
394 /* check for an identity test */
395 if (sc.val.obj == self)
396 {
397 /* x.ofKind(x) == true */
398 retval->set_true();
399 }
400 else
401 {
402 /* check to see if the object is a superclass of ours */
403 retval->set_logical(is_instance_of(vmg_ sc.val.obj));
404 }
405
406 /* handled */
407 return TRUE;
408 }
409
410 /*
411 * property evaluator - isClass
412 */
getp_is_class(VMG_ vm_obj_id_t self,vm_val_t * retval,uint * argc,vm_prop_id_t,vm_obj_id_t *)413 int CVmObject::getp_is_class(VMG_ vm_obj_id_t self,
414 vm_val_t *retval, uint *argc,
415 vm_prop_id_t, vm_obj_id_t *)
416 {
417 static CVmNativeCodeDesc desc(0);
418
419 /* check arguments */
420 if (get_prop_check_argc(retval, argc, &desc))
421 return TRUE;
422
423 /* indicate whether or not we're a class object */
424 retval->set_logical(is_class_object(vmg_ self));
425
426 /* handled */
427 return TRUE;
428 }
429
430 /*
431 * property evaluator - isTransient
432 */
getp_is_transient(VMG_ vm_obj_id_t self,vm_val_t * retval,uint * argc,vm_prop_id_t,vm_obj_id_t *)433 int CVmObject::getp_is_transient(VMG_ vm_obj_id_t self,
434 vm_val_t *retval, uint *argc,
435 vm_prop_id_t, vm_obj_id_t *)
436 {
437 static CVmNativeCodeDesc desc(0);
438
439 /* check arguments */
440 if (get_prop_check_argc(retval, argc, &desc))
441 return TRUE;
442
443 /* indicate whether or not we're transient */
444 retval->set_logical(G_obj_table->is_obj_transient(self));
445
446 /* handled */
447 return TRUE;
448 }
449
450 /*
451 * property evaluator - getSuperclassList
452 */
getp_sclist(VMG_ vm_obj_id_t self,vm_val_t * retval,uint * argc,vm_prop_id_t,vm_obj_id_t *)453 int CVmObject::getp_sclist(VMG_ vm_obj_id_t self,
454 vm_val_t *retval, uint *argc,
455 vm_prop_id_t, vm_obj_id_t *)
456 {
457 size_t sc_cnt;
458 vm_obj_id_t lst_obj;
459 CVmObjList *lstp;
460 size_t i;
461 static CVmNativeCodeDesc desc(0);
462
463 /* check arguments */
464 if (get_prop_check_argc(retval, argc, &desc))
465 return TRUE;
466
467 /* push a self-reference for GC protection */
468 G_interpreter->push_obj(vmg_ self);
469
470 /* get the number of superclasses */
471 sc_cnt = get_superclass_count(vmg_ self);
472
473 /* allocate a list for the results */
474 lst_obj = CVmObjList::create(vmg_ FALSE, sc_cnt);
475 lstp = (CVmObjList *)vm_objp(vmg_ lst_obj);
476
477 /* build the superclass list */
478 for (i = 0 ; i < sc_cnt ; ++i)
479 {
480 vm_val_t ele_val;
481
482 /* get this superclass */
483 ele_val.set_obj(get_superclass(vmg_ self, i));
484
485 /* set the list element */
486 lstp->cons_set_element(i, &ele_val);
487 }
488
489 /* discard our GC protection */
490 G_stk->discard();
491
492 /* return the list */
493 retval->set_obj(lst_obj);
494
495 /* handled */
496 return TRUE;
497 }
498
499 /*
500 * property evaluator - propDefined
501 */
getp_propdef(VMG_ vm_obj_id_t self,vm_val_t * retval,uint * in_argc,vm_prop_id_t,vm_obj_id_t *)502 int CVmObject::getp_propdef(VMG_ vm_obj_id_t self,
503 vm_val_t *retval, uint *in_argc,
504 vm_prop_id_t, vm_obj_id_t *)
505 {
506 uint argc = (in_argc != 0 ? *in_argc : 0);
507 vm_val_t val;
508 int flags;
509 int found;
510 vm_prop_id_t prop;
511 vm_obj_id_t source_obj;
512 static CVmNativeCodeDesc desc(1, 1);
513
514 /* check arguments */
515 if (get_prop_check_argc(retval, in_argc, &desc))
516 return TRUE;
517
518 /* pop the property address to test */
519 G_interpreter->pop_prop(vmg_ &val);
520 prop = val.val.prop;
521
522 /* if we have the flag argument, get it; otherwise, use the default */
523 if (argc >= 2)
524 {
525 /* get the flag value */
526 G_interpreter->pop_int(vmg_ &val);
527 flags = (int)val.val.intval;
528 }
529 else
530 {
531 /* use the default flags */
532 flags = VMOBJ_PROPDEF_ANY;
533 }
534
535 /* presume we won't find a valid source object */
536 source_obj = VM_INVALID_OBJ;
537
538 /* look up the property */
539 found = get_prop(vmg_ prop, &val, self, &source_obj, 0);
540
541 /*
542 * If we found a result, check to see if it's an intrinsic class
543 * modifier object. If it is, replace it with its intrinsic class:
544 * modifier objects are invisible through the reflection mechanism, and
545 * appear to be the actual intrinsic classes they modify.
546 */
547 if (found && CVmObjIntClsMod::is_intcls_mod_obj(vmg_ source_obj))
548 source_obj = find_intcls_for_mod(vmg_ self, source_obj);
549
550 /* check the flags */
551 switch(flags)
552 {
553 case VMOBJ_PROPDEF_ANY:
554 /* return true if the property is defined */
555 retval->set_logical(found);
556 break;
557
558 case VMOBJ_PROPDEF_DIRECTLY:
559 /* return true if the property is defined directly */
560 retval->set_logical(found && source_obj == self);
561 break;
562
563 case VMOBJ_PROPDEF_INHERITS:
564 /* return true if the property is inherited only */
565 retval->set_logical(found && source_obj != self);
566 break;
567
568 case VMOBJ_PROPDEF_GET_CLASS:
569 /* return the defining class, or nil if it's not defined */
570 if (found)
571 {
572 /*
573 * If we got a valid source object, return it. If we didn't
574 * get a valid source object, but we found the property,
575 * return 'self' as the result; this isn't exactly right, but
576 * this should only be possible when the source object is an
577 * intrinsic class for which no intrinsic class object is
578 * defined, in which case the best we can do is provide 'self'
579 * as the answer.
580 */
581 retval->set_obj(source_obj != VM_INVALID_OBJ ? source_obj : self);
582 }
583 else
584 {
585 /* didn't find it - the return value is nil */
586 retval->set_nil();
587 }
588 break;
589
590 default:
591 /* other flags are invalid */
592 err_throw(VMERR_BAD_VAL_BIF);
593 break;
594 }
595
596 /* handled */
597 return TRUE;
598 }
599
600 /*
601 * Find the intrinsic class which the given modifier object modifies. This
602 * can only be used with a modifier that modifies my intrinsic class or one
603 * of its intrinsic superclasses.
604 */
find_intcls_for_mod(VMG_ vm_obj_id_t self,vm_obj_id_t mod_obj)605 vm_obj_id_t CVmObject::find_intcls_for_mod(VMG_ vm_obj_id_t self,
606 vm_obj_id_t mod_obj)
607 {
608 vm_meta_entry_t *entry;
609
610 /* get my metaclass from the dependency table */
611 entry = (G_meta_table
612 ->get_entry_from_reg(get_metaclass_reg()->get_reg_idx()));
613
614 /*
615 * if there's an intrinsic class object for the metaclass, ask it to do
616 * the work
617 */
618 if (entry != 0 && entry->class_obj_ != VM_INVALID_OBJ)
619 return (((CVmObjClass *)vm_objp(vmg_ entry->class_obj_))
620 ->find_mod_src_obj(vmg_ entry->class_obj_, mod_obj));
621
622 /* there's no intrinsic metaclass, so we can't find what we need */
623 return VM_INVALID_OBJ;
624 }
625
626 /*
627 * property evaluator - propInherited
628 */
getp_propinh(VMG_ vm_obj_id_t self,vm_val_t * retval,uint * in_argc,vm_prop_id_t,vm_obj_id_t *)629 int CVmObject::getp_propinh(VMG_ vm_obj_id_t self,
630 vm_val_t *retval, uint *in_argc,
631 vm_prop_id_t, vm_obj_id_t *)
632 {
633 uint argc = (in_argc != 0 ? *in_argc : 0);
634 vm_val_t val;
635 int flags;
636 int found;
637 vm_prop_id_t prop;
638 vm_obj_id_t source_obj;
639 vm_obj_id_t orig_target_obj;
640 vm_obj_id_t defining_obj;
641 static CVmNativeCodeDesc desc(3, 1);
642
643 /* check arguments */
644 if (get_prop_check_argc(retval, in_argc, &desc))
645 return TRUE;
646
647 /* pop the property address to test */
648 G_interpreter->pop_prop(vmg_ &val);
649 prop = val.val.prop;
650
651 /* get the original target object */
652 G_interpreter->pop_obj(vmg_ &val);
653 orig_target_obj = val.val.obj;
654
655 /* get the defining object */
656 G_interpreter->pop_obj(vmg_ &val);
657 defining_obj = val.val.obj;
658
659 /* if we have the flag argument, get it; otherwise, use the default */
660 if (argc >= 4)
661 {
662 /* get the flag value */
663 G_interpreter->pop_int(vmg_ &val);
664 flags = (int)val.val.intval;
665 }
666 else
667 {
668 /* use the default flags */
669 flags = VMOBJ_PROPDEF_ANY;
670 }
671
672 /* presume we won't find a valid source object */
673 source_obj = VM_INVALID_OBJ;
674
675 /* look up the property */
676 found = inh_prop(vmg_ prop, &val, self, orig_target_obj, defining_obj,
677 &source_obj, 0);
678
679 /* check the flags */
680 switch(flags)
681 {
682 case VMOBJ_PROPDEF_ANY:
683 /* return true if the property is defined */
684 retval->set_logical(found);
685 break;
686
687 case VMOBJ_PROPDEF_GET_CLASS:
688 /* return the defining class, or nil if it's not defined */
689 if (found)
690 {
691 /* return the source object, or 'self' if we didn't find one */
692 retval->set_obj(source_obj != VM_INVALID_OBJ ? source_obj : self);
693 }
694 else
695 {
696 /* didn't find it - the return value is nil */
697 retval->set_nil();
698 }
699 break;
700
701 default:
702 /* other flags are invalid */
703 err_throw(VMERR_BAD_VAL_BIF);
704 break;
705 }
706
707 /* handled */
708 return TRUE;
709 }
710
711 /*
712 * property evaluator - propType
713 */
getp_proptype(VMG_ vm_obj_id_t self,vm_val_t * retval,uint * argc,vm_prop_id_t,vm_obj_id_t *)714 int CVmObject::getp_proptype(VMG_ vm_obj_id_t self,
715 vm_val_t *retval, uint *argc,
716 vm_prop_id_t, vm_obj_id_t *)
717 {
718 vm_val_t val;
719 vm_prop_id_t prop;
720 vm_obj_id_t source_obj;
721 static CVmNativeCodeDesc desc(1);
722
723 /* check arguments */
724 if (get_prop_check_argc(retval, argc, &desc))
725 return TRUE;
726
727 /* pop the property address to test */
728 G_interpreter->pop_prop(vmg_ &val);
729 prop = val.val.prop;
730
731 /* get the property value */
732 if (!get_prop(vmg_ prop, &val, self, &source_obj, 0))
733 {
734 /* the property isn't defined on the object - the result is nil */
735 retval->set_nil();
736 }
737 else
738 {
739 /* set the return value to the property's datatype value */
740 retval->set_datatype(vmg_ &val);
741 }
742
743 /* handled */
744 return TRUE;
745 }
746
747 /*
748 * property evaluator - getPropList
749 */
getp_get_prop_list(VMG_ vm_obj_id_t self,vm_val_t * retval,uint * argc,vm_prop_id_t,vm_obj_id_t *)750 int CVmObject::getp_get_prop_list(VMG_ vm_obj_id_t self,
751 vm_val_t *retval, uint *argc,
752 vm_prop_id_t, vm_obj_id_t *)
753 {
754 static CVmNativeCodeDesc desc(0);
755
756 /* check arguments */
757 if (get_prop_check_argc(retval, argc, &desc))
758 return TRUE;
759
760 /* push a self-reference for gc protection */
761 G_stk->push()->set_obj(self);
762
763 /* build my property list */
764 build_prop_list(vmg_ self, retval);
765
766 /* discard the gc protection */
767 G_stk->discard();
768
769 /* handled */
770 return TRUE;
771 }
772
773 /*
774 * Build a list of properties directly defined by this instance
775 */
build_prop_list(VMG_ vm_obj_id_t,vm_val_t * retval)776 void CVmObject::build_prop_list(VMG_ vm_obj_id_t /*self*/, vm_val_t *retval)
777 {
778 /*
779 * by default, object instances have no directly defined properties,
780 * so create and return an empty list
781 */
782 retval->set_obj(CVmObjList::create(vmg_ FALSE, (size_t)0));
783 }
784
785 /*
786 * property evaluator - getPropParams
787 */
getp_get_prop_params(VMG_ vm_obj_id_t self,vm_val_t * retval,uint * argc,vm_prop_id_t,vm_obj_id_t *)788 int CVmObject::getp_get_prop_params(VMG_ vm_obj_id_t self,
789 vm_val_t *retval, uint *argc,
790 vm_prop_id_t, vm_obj_id_t *)
791 {
792 vm_val_t val;
793 vm_prop_id_t prop;
794 vm_obj_id_t source_obj;
795 int min_args, opt_args, varargs;
796 static CVmNativeCodeDesc desc(1);
797 CVmObjList *lst;
798
799 /* check arguments */
800 if (get_prop_check_argc(retval, argc, &desc))
801 return TRUE;
802
803 /* pop the property address to test */
804 G_interpreter->pop_prop(vmg_ &val);
805 prop = val.val.prop;
806
807 /* push a self-reference while we're working */
808 G_stk->push()->set_obj(self);
809
810 /* get the property value */
811 if (!get_prop(vmg_ prop, &val, self, &source_obj, 0))
812 {
813 /* no such method - no arguments */
814 min_args = opt_args = 0;
815 varargs = FALSE;
816 }
817 else if (val.typ == VM_CODEOFS)
818 {
819 CVmFuncPtr func;
820
821 /* get the function header for the code offset */
822 func.set((const uchar *)G_code_pool->get_ptr(val.val.ofs));
823
824 /*
825 * Get the argument information from the function header. Note
826 * that p-code methods cannot have optional arguments.
827 */
828 min_args = func.get_min_argc();
829 opt_args = 0;
830 varargs = func.is_varargs();
831
832 /*
833 * re-evaluate the currently executing method's entry pointer, to
834 * ensure that the current code page is the most recently used
835 * page, in case we're on a swapping system (note that we don't
836 * attempt to retranslate the pointer, since we assume that we
837 * didn't actually swap it out just now - we assume that we have
838 * enough cache space to keep at least two pages in memory at
839 * once, and since the currently-executing code page should have
840 * been the most recently used page before our translation just
841 * above, it should not have been swapped out)
842 */
843 G_interpreter->touch_entry_ptr_page(vmg0_);
844 }
845 else if (val.typ == VM_NATIVE_CODE)
846 {
847 /* get the arguments from the native code descriptor */
848 min_args = val.val.native_desc->min_argc_;
849 opt_args = val.val.native_desc->opt_argc_;
850 varargs = val.val.native_desc->varargs_;
851 }
852 else
853 {
854 /* it's not a function - no arguments */
855 min_args = opt_args = 0;
856 varargs = FALSE;
857 }
858
859 /*
860 * Allocate our return list. We need three elements: [minArgs,
861 * optionalArgs, isVarargs].
862 */
863 retval->set_obj(CVmObjList::create(vmg_ FALSE, 3));
864
865 /* get the list object, properly cast */
866 lst = (CVmObjList *)vm_objp(vmg_ retval->val.obj);
867
868 /* set the minimum argument count */
869 val.set_int(min_args);
870 lst->cons_set_element(0, &val);
871
872 /* set the optional argument count */
873 val.set_int(opt_args);
874 lst->cons_set_element(1, &val);
875
876 /* set the varargs flag */
877 val.set_logical(varargs);
878 lst->cons_set_element(2, &val);
879
880 /* discard our self-reference */
881 G_stk->discard();
882
883 /* handled */
884 return TRUE;
885 }
886
887 /*
888 * Call a static property
889 */
call_stat_prop(VMG_ vm_val_t * retval,const uchar ** pc_ptr,uint * argc,vm_prop_id_t prop)890 int CVmObject::call_stat_prop(VMG_ vm_val_t *retval, const uchar **pc_ptr,
891 uint *argc, vm_prop_id_t prop)
892 {
893 /* not handled */
894 return FALSE;
895 }
896
897
898 /* ------------------------------------------------------------------------ */
899 /*
900 * CVmMetaclass implementation
901 */
902
903 /*
904 * Get a metaclass's super-metaclass. We'll look up our super-metaclass
905 * in the metaclass registration table and return the IntrinsicClass
906 * object we find referenced there.
907 */
get_supermeta(VMG_ int idx) const908 vm_obj_id_t CVmMetaclass::get_supermeta(VMG_ int idx) const
909 {
910 vm_meta_entry_t *entry;
911
912 /* we only have one supermetaclass */
913 if (idx != 0)
914 return VM_INVALID_OBJ;
915
916 /* if I don't have a supermetaclass at all, return nil */
917 if (get_supermeta_reg() == 0)
918 return VM_INVALID_OBJ;
919
920 /* look up my supermetaclass entry */
921 entry = (G_meta_table->get_entry_from_reg(
922 get_supermeta_reg()->get_reg_idx()));
923
924 /* return the IntrinsicClass object that represents this metaclass */
925 return (entry != 0 ? entry->class_obj_ : VM_INVALID_OBJ);
926 }
927
928 /*
929 * Determine if I'm an instance of the given object. Most metaclasses
930 * inherit directly from CVmObject, so we'll return true only if the
931 * object is the CVmObject IntrinsicClass object
932 */
is_meta_instance_of(VMG_ vm_obj_id_t obj) const933 int CVmMetaclass::is_meta_instance_of(VMG_ vm_obj_id_t obj) const
934 {
935 vm_meta_entry_t *entry;
936 CVmMetaclass *sc;
937
938 /* iterate over my supermetaclasses */
939 for (sc = get_supermeta_reg() ; sc != 0 ; sc = sc->get_supermeta_reg())
940 {
941 /* look up the metaclass entry for this supermetaclass */
942 entry = (G_meta_table->get_entry_from_reg(sc->get_reg_idx()));
943
944 /*
945 * if the object matches the current superclass's IntrinsicClass
946 * object, we're a subclass of that object; otherwise we're not
947 */
948 if (entry != 0 && entry->class_obj_ == obj)
949 return TRUE;
950 }
951
952 /* it's not one of my superclasses */
953 return FALSE;
954 }
955
956 /*
957 * Get my intrinsic class object
958 */
get_class_obj(VMG0_) const959 vm_obj_id_t CVmMetaclass::get_class_obj(VMG0_) const
960 {
961 vm_meta_entry_t *entry;
962
963 /* get my metacalss entry */
964 entry = G_meta_table->get_entry_from_reg(get_reg_idx());
965
966 /* if we found our entry, return the class from the entry */
967 return (entry != 0 ? entry->class_obj_ : VM_INVALID_OBJ);
968 }
969
970 /* ------------------------------------------------------------------------ */
971 /*
972 * object table implementation
973 */
974
975 /*
976 * allocate object table
977 */
init()978 void CVmObjTable::init()
979 {
980 /* allocate the initial set of page slots */
981 page_slots_ = 10;
982 pages_ = (CVmObjPageEntry **)t3malloc(page_slots_ * sizeof(*pages_));
983
984 /* if that failed, throw an error */
985 if (pages_ == 0)
986 err_throw(VMERR_OUT_OF_MEMORY);
987
988 /* no pages are in use yet */
989 pages_used_ = 0;
990
991 /* there are no free objects yet */
992 first_free_ = 0;
993
994 /* there's nothing in the GC work queue yet */
995 gc_queue_head_ = VM_INVALID_OBJ;
996
997 /* nothing in the finalizer work queue yet */
998 finalize_queue_head_ = VM_INVALID_OBJ;
999
1000 /* we haven't allocated anything yet */
1001 allocs_since_gc_ = 0;
1002
1003 /*
1004 * Set the upper limit for allocations between garbage collection.
1005 * We want to choose this number so that we balance the time it
1006 * takes to collect garbage against the memory it consumes to leave
1007 * it uncollected.
1008 */
1009 max_allocs_between_gc_ = 1000;
1010
1011 /* enable the garbage collector */
1012 gc_enabled_ = TRUE;
1013
1014 /* there are no saved image data pointers yet */
1015 image_ptr_head_ = 0;
1016 image_ptr_tail_ = 0;
1017 image_ptr_last_cnt_ = 0;
1018
1019 /* no global pages yet */
1020 globals_ = 0;
1021
1022 /* no global variables yet */
1023 global_var_head_ = 0;
1024
1025 /* create the post_load_init() request table */
1026 post_load_init_table_ = new CVmHashTable(128, new CVmHashFuncCS(), TRUE);
1027 }
1028
1029 /*
1030 * delete object table
1031 */
~CVmObjTable()1032 CVmObjTable::~CVmObjTable()
1033 {
1034 }
1035
1036 /*
1037 * Delete the table. (We need to separate this out into a method so
1038 * that we can get access to the global variables.)
1039 */
delete_obj_table(VMG0_)1040 void CVmObjTable::delete_obj_table(VMG0_)
1041 {
1042 size_t i;
1043 vm_image_ptr_page *ip_page;
1044 vm_image_ptr_page *ip_next;
1045
1046 /* delete all entries in the post-load initialization table */
1047 post_load_init_table_->delete_all_entries();
1048
1049 /* go through the pages and delete the entries */
1050 for (i = 0 ; i < pages_used_ ; ++i)
1051 {
1052 int j;
1053 CVmObjPageEntry *entry;
1054
1055 /* delete all of the objects on the page */
1056 for (j = 0, entry = pages_[i] ; j < VM_OBJ_PAGE_CNT ; ++j, ++entry)
1057 {
1058 /* if this entry is still in use, delete it */
1059 if (!entry->free_)
1060 entry->get_vm_obj()->notify_delete(vmg_ entry->in_root_set_);
1061 }
1062 }
1063
1064 /* delete each page we've allocated */
1065 for (i = 0 ; i < pages_used_ ; ++i)
1066 {
1067 /* delete this page */
1068 t3free(pages_[i]);
1069 }
1070
1071 /* free the master page table */
1072 t3free(pages_);
1073
1074 /* we no longer have any pages */
1075 pages_ = 0;
1076 page_slots_ = 0;
1077 pages_used_ = 0;
1078
1079 /* delete each object image data pointer page */
1080 for (ip_page = image_ptr_head_ ; ip_page != 0 ; ip_page = ip_next)
1081 {
1082 /* remember the next page (before we delete this one) */
1083 ip_next = ip_page->next_;
1084
1085 /* delete this page */
1086 t3free(ip_page);
1087 }
1088
1089 /* delete the linked list of globals */
1090 if (globals_ != 0)
1091 {
1092 delete globals_;
1093 globals_ = 0;
1094 }
1095
1096 /*
1097 * delete any left-over global variables (these should always be
1098 * deleted by subsystems before we get here, but this is the last
1099 * chance, so clean them up manually)
1100 */
1101 while (global_var_head_ != 0)
1102 delete_global_var(global_var_head_);
1103
1104 /* delete the post-load initialization table */
1105 delete post_load_init_table_;
1106 post_load_init_table_ = 0;
1107 }
1108
1109 /*
1110 * Enable or disable garbage collection
1111 */
enable_gc(VMG_ int enable)1112 int CVmObjTable::enable_gc(VMG_ int enable)
1113 {
1114 int old_enable;
1115
1116 /* remember the old status for returning */
1117 old_enable = gc_enabled_;
1118
1119 /* set the new status */
1120 gc_enabled_ = enable;
1121
1122 /*
1123 * if we're enabling GC after it was previously disabled, check to
1124 * see if we should perform a GC pass now (but don't count this as a
1125 * separate allocation)
1126 */
1127 if (!old_enable && enable)
1128 alloc_check_gc(vmg_ FALSE);
1129
1130 /* return the previous status */
1131 return old_enable;
1132 }
1133
1134 /*
1135 * allocate a new object ID
1136 */
alloc_obj(VMG_ int in_root_set,int can_have_refs,int can_have_weak_refs)1137 vm_obj_id_t CVmObjTable::alloc_obj(VMG_ int in_root_set,
1138 int can_have_refs, int can_have_weak_refs)
1139 {
1140 vm_obj_id_t ret;
1141 CVmObjPageEntry *entry;
1142
1143 /* count the allocation and maybe perform garbage collection */
1144 alloc_check_gc(vmg_ TRUE);
1145
1146 /* if the free list is empty, allocate a new page of object entries */
1147 if (first_free_ == VM_INVALID_OBJ)
1148 alloc_new_page();
1149
1150 /* remember the first item in the free list - this is our result */
1151 ret = first_free_;
1152
1153 /* get the object table entry for the ID */
1154 entry = get_entry(ret);
1155
1156 /* unlink new entry from the free list */
1157 first_free_ = entry->next_obj_;
1158 if (entry->next_obj_ != VM_INVALID_OBJ)
1159 get_entry(entry->next_obj_)->ptr_.prev_free_ = entry->ptr_.prev_free_;
1160
1161 /* initialize the entry */
1162 init_entry_for_alloc(ret, entry, in_root_set,
1163 can_have_refs, can_have_weak_refs);
1164
1165 /* return the free object */
1166 return ret;
1167 }
1168
1169 /*
1170 * Run garbage collection before allocating an object
1171 */
gc_before_alloc(VMG0_)1172 void CVmObjTable::gc_before_alloc(VMG0_)
1173 {
1174 /* run a full garbage collection pass */
1175 gc_pass_init(vmg0_);
1176 gc_pass_finish(vmg0_);
1177 }
1178
1179 /*
1180 * Allocate an object with a particular ID
1181 */
alloc_obj_with_id(vm_obj_id_t id,int in_root_set,int can_have_refs,int can_have_weak_refs)1182 void CVmObjTable::alloc_obj_with_id(vm_obj_id_t id, int in_root_set,
1183 int can_have_refs, int can_have_weak_refs)
1184 {
1185 CVmObjPageEntry *entry;
1186
1187 /*
1188 * if the page containing the given object ID hasn't been allocated,
1189 * allocate pages until it's available
1190 */
1191 while (id >= pages_used_ * VM_OBJ_PAGE_CNT)
1192 alloc_new_page();
1193
1194 /* get the object table entry for the ID */
1195 entry = get_entry(id);
1196
1197 /* if the desired object ID is already taken, it's an error */
1198 if (!entry->free_)
1199 err_throw(VMERR_OBJ_IN_USE);
1200
1201 /* unlink the item - set the previous item's forward pointer... */
1202 if (entry->ptr_.prev_free_ == VM_INVALID_OBJ)
1203 first_free_ = entry->next_obj_;
1204 else
1205 get_entry(entry->ptr_.prev_free_)->next_obj_ = entry->next_obj_;
1206
1207 /* ...and the next items back pointer */
1208 if (entry->next_obj_ != VM_INVALID_OBJ)
1209 get_entry(entry->next_obj_)->ptr_.prev_free_ = entry->ptr_.prev_free_;
1210
1211 /* initialize the entry for allocation */
1212 init_entry_for_alloc(id, entry, in_root_set,
1213 can_have_refs, can_have_weak_refs);
1214 }
1215
1216 /*
1217 * Initialize an object table entry that we've just allocated
1218 */
init_entry_for_alloc(vm_obj_id_t id,CVmObjPageEntry * entry,int in_root_set,int can_have_refs,int can_have_weak_refs)1219 void CVmObjTable::init_entry_for_alloc(vm_obj_id_t id,
1220 CVmObjPageEntry *entry,
1221 int in_root_set,
1222 int can_have_refs,
1223 int can_have_weak_refs)
1224 {
1225 /* mark the entry as in use */
1226 entry->free_ = FALSE;
1227
1228 /* no undo savepoint has been created since the object was created */
1229 entry->in_undo_ = FALSE;
1230
1231 /* mark the object as being in the root set if appropriate */
1232 entry->in_root_set_ = in_root_set;
1233
1234 /* presume it's an ordinary persistent object */
1235 entry->transient_ = FALSE;
1236
1237 /* presume it won't need post-load initialization */
1238 entry->requested_post_load_init_ = FALSE;
1239
1240 /* set the GC characteristics as requested */
1241 entry->can_have_refs_ = can_have_refs;
1242 entry->can_have_weak_refs_ = can_have_weak_refs;
1243
1244 /*
1245 * Mark the object as initially unreachable and unfinalizable. It's
1246 * not necessarily really unreachable at this point, but we mark it
1247 * as such because the garbage collector hasn't explicitly traced
1248 * the object to be reachable. The initial conditions for garbage
1249 * collection are that all objects not in the root set and not
1250 * finalizable are marked as unreachable; since we're not in a gc
1251 * pass right now (we can't be - memory cannot be allocated during a
1252 * gc pass), we know that we must establish initial gc conditions
1253 * for the next time we start a gc pass.
1254 */
1255 entry->reachable_ = VMOBJ_UNREACHABLE;
1256 entry->finalize_state_ = VMOBJ_UNFINALIZABLE;
1257
1258 /* add it to the GC work queue for the next GC pass */
1259 if (in_root_set)
1260 add_to_gc_queue(id, entry, VMOBJ_REACHABLE);
1261 }
1262
1263 #if 0 // moved to in-line in header, since it's called *a lot*
1264 /*
1265 * get the page entry for a given ID
1266 */
1267 CVmObjPageEntry *CVmObjTable::get_entry(vm_obj_id_t id) const
1268 {
1269 size_t main_idx;
1270 size_t page_idx;
1271
1272 /* get the index of the page in the main array */
1273 main_idx = (size_t)(id >> VM_OBJ_PAGE_CNT_LOG2);
1274
1275 /* get the index within the page */
1276 page_idx = (size_t)(id & (VM_OBJ_PAGE_CNT - 1));
1277
1278 /* get the object */
1279 return &pages_[main_idx][page_idx];
1280 }
1281 #endif
1282
1283 /*
1284 * Delete an object, given the object table entry.
1285 */
delete_entry(VMG_ vm_obj_id_t id,CVmObjPageEntry * entry)1286 void CVmObjTable::delete_entry(VMG_ vm_obj_id_t id, CVmObjPageEntry *entry)
1287 {
1288 /* mark the object table entry as free */
1289 entry->free_ = TRUE;
1290
1291 /* it's not in the root set if it's free */
1292 entry->in_root_set_ = FALSE;
1293
1294 /*
1295 * notify the object that it's being deleted - this will let the
1296 * object release any additional resources (such as variable-size
1297 * heap space) that it's holding
1298 */
1299 entry->get_vm_obj()->notify_delete(vmg_ FALSE);
1300
1301 /*
1302 * remove any post-load initialization request for the object, if it
1303 * ever requested post-load initialization
1304 */
1305 if (entry->requested_post_load_init_)
1306 remove_post_load_init(id);
1307
1308 /* link this object into the head of the free list */
1309 entry->next_obj_ = first_free_;
1310
1311 /* link the previous head back to this object */
1312 if (first_free_ != VM_INVALID_OBJ)
1313 get_entry(first_free_)->ptr_.prev_free_ = id;
1314
1315 /* this object doesn't have a previous entry */
1316 entry->ptr_.prev_free_ = VM_INVALID_OBJ;
1317
1318 /* it's now the first entry in the list */
1319 first_free_ = id;
1320 }
1321
1322
1323 /*
1324 * allocate a new page of objects
1325 */
alloc_new_page()1326 void CVmObjTable::alloc_new_page()
1327 {
1328 size_t i;
1329 vm_obj_id_t id;
1330 CVmObjPageEntry *entry;
1331
1332 /* first, make sure we have room in the master page list */
1333 if (pages_used_ == page_slots_)
1334 {
1335 /* increase the number of page slots */
1336 page_slots_ += 10;
1337
1338 /* allocate space for the increased number of slots */
1339 pages_ = (CVmObjPageEntry **)t3realloc(
1340 pages_, page_slots_ * sizeof(*pages_));
1341 }
1342
1343 /* allocate a new page */
1344 pages_[pages_used_] =
1345 (CVmObjPageEntry *)t3malloc(VM_OBJ_PAGE_CNT * sizeof(*pages_[0]));
1346
1347 /* if that failed, throw an error */
1348 if (pages_[pages_used_] == 0)
1349 err_throw(VMERR_OUT_OF_MEMORY);
1350
1351 /*
1352 * initialize the new page to be entirely free - add each element of
1353 * the page to the free list
1354 */
1355 entry = pages_[pages_used_];
1356 i = 0;
1357 id = pages_used_ * VM_OBJ_PAGE_CNT;
1358
1359 /*
1360 * if this is the start of the very first page, leave off the first
1361 * object, since its ID is invalid
1362 */
1363 if (id == VM_INVALID_OBJ)
1364 {
1365 /* mark the object as free so we don't try to free it later */
1366 entry->free_ = TRUE;
1367
1368 /* don't put the invalid object in the free list */
1369 ++i;
1370 ++entry;
1371 ++id;
1372 }
1373
1374 /* loop over each object in this page */
1375 for ( ; i < VM_OBJ_PAGE_CNT ; ++i, ++entry, ++id)
1376 {
1377 /* set the forward pointer in this object */
1378 entry->next_obj_ = first_free_;
1379
1380 /* set the back pointer in the previous object in the list */
1381 if (first_free_ != VM_INVALID_OBJ)
1382 get_entry(first_free_)->ptr_.prev_free_ = id;
1383
1384 /* there's nothing before this entry yet */
1385 entry->ptr_.prev_free_ = VM_INVALID_OBJ;
1386
1387 /* this is now the head of the list */
1388 first_free_ = id;
1389
1390 /* mark it free */
1391 entry->free_ = TRUE;
1392
1393 /* presume it's not part of the root set */
1394 entry->in_root_set_ = FALSE;
1395
1396 /*
1397 * mark it initially unreachable and finalized, since it's not
1398 * even allocated yet
1399 */
1400 entry->reachable_ = VMOBJ_UNREACHABLE;
1401 entry->finalize_state_ = VMOBJ_FINALIZED;
1402 }
1403
1404 /* count the new page we've allocated */
1405 ++pages_used_;
1406 }
1407
1408 /*
1409 * Add an object to the list of machine globals.
1410 */
add_to_globals(vm_obj_id_t obj)1411 void CVmObjTable::add_to_globals(vm_obj_id_t obj)
1412 {
1413 CVmObjGlobPage *pg;
1414
1415 /*
1416 * if this is a root set object, there is no need to mark it as
1417 * global, since it is inherently uncollectable as an image file
1418 * object to begin with
1419 */
1420 if (get_entry(obj)->in_root_set_)
1421 return;
1422
1423 /* if we have any global pages allocated, try adding to the head page */
1424 if (globals_ != 0 && globals_->add_entry(obj))
1425 {
1426 /* we successfully added it to the head page - we're done */
1427 return;
1428 }
1429
1430 /*
1431 * either the head page is full, or we haven't allocated any global
1432 * pages at all yet; in either case, allocate a new page and link it
1433 * at the head of our list
1434 */
1435 pg = new CVmObjGlobPage();
1436 pg->nxt_ = globals_;
1437 globals_ = pg;
1438
1439 /*
1440 * add the object to the new page - it must fit, since the new page is
1441 * empty
1442 */
1443 globals_->add_entry(obj);
1444 }
1445
1446 /*
1447 * Collect all garbage. This does a complete garbage collection pass,
1448 * returning only after all unreachable objects have been collected. If
1449 * incremental garbage collection is not required, the caller can simply
1450 * invoke this routine to do the entire operation in a single call.
1451 */
gc_full(VMG0_)1452 void CVmObjTable::gc_full(VMG0_)
1453 {
1454 /*
1455 * run the initial pass to mark globally-reachable objects, then run
1456 * the garbage collector to completion
1457 */
1458 gc_pass_init(vmg0_);
1459 gc_pass_finish(vmg0_);
1460 }
1461
1462 /*
1463 * Garbage collector - initialize. Add all globally-reachable objects
1464 * to the work queue.
1465 *
1466 * We assume that the following initial conditions hold: all objects
1467 * except root set objects are marked as unreferenced, and all root set
1468 * objects are marked as referenced; all root set objects are in the GC
1469 * work queue. So, we don't need to worry about finding root objects or
1470 * initializing the other objects at this point.
1471 */
gc_pass_init(VMG0_)1472 void CVmObjTable::gc_pass_init(VMG0_)
1473 {
1474 /*
1475 * reset the allocations-since-gc counter, since this is now the
1476 * last gc pass, and we obviously haven't performed any allocations
1477 * since this gc pass yet
1478 */
1479 allocs_since_gc_ = 0;
1480
1481 /* trace objects reachable from the stack */
1482 gc_trace_stack(vmg0_);
1483
1484 /* trace objects reachable from imports */
1485 gc_trace_imports(vmg0_);
1486
1487 /* trace objects reachable from machine globals */
1488 gc_trace_globals(vmg0_);
1489
1490 /*
1491 * Process undo records - for each undo record, mark any referenced
1492 * objects as reachable. Undo records are part of the root set.
1493 */
1494 G_undo->gc_mark_refs(vmg0_);
1495 }
1496
1497 /*
1498 * Garbage collection - continue processing the work queue. This
1499 * processes a set of entries from the work queue. This routine can be
1500 * used for incremental garbage collection: after calling
1501 * gc_pass_init(), the caller can repeatedly invoke this routine until
1502 * it returns false. Since this routine will return after a short time
1503 * even if there's more work left to do, other operations (such as
1504 * processing user input) can be interleaved in a single thread with
1505 * garbage collection.
1506 *
1507 * The actual number of entries that we process is configurable at VM
1508 * compile-time via VM_GC_WORK_INCREMENT. The point of running the GC
1509 * incrementally is to allow GC work to be interleaved with long-running
1510 * user I/O operations (such as reading a line of text from the
1511 * keyboard) in the foreground thread, so the work increment should be
1512 * chosen so that each call to this routine completes quickly enough
1513 * that the user will perceive no delay.
1514 *
1515 * Returns true if more work remains to be done, false if not. The
1516 * caller should invoke this routine repeatedly until it returns false.
1517 */
gc_pass_continue(VMG_ int trace_transient)1518 int CVmObjTable::gc_pass_continue(VMG_ int trace_transient)
1519 {
1520 int cnt;
1521
1522 /*
1523 * keep going until we exhaust the queue or run for our maximum
1524 * number of iterations
1525 */
1526 for (cnt = VM_GC_WORK_INCREMENT ;
1527 cnt != 0 && gc_queue_head_ != VM_INVALID_OBJ ; --cnt)
1528 {
1529 vm_obj_id_t cur;
1530 CVmObjPageEntry *entry;
1531
1532 /* get the next item from the work queue */
1533 cur = gc_queue_head_;
1534
1535 /* get this object's entry */
1536 entry = get_entry(cur);
1537
1538 /* remove this entry from the work queue */
1539 gc_queue_head_ = entry->next_obj_;
1540
1541 /*
1542 * Tell this object to mark its references. Mark the referenced
1543 * objects with the same state as this object, if they're not
1544 * already marked with a stronger state.
1545 *
1546 * If we're not tracing transients, do not trace this object if
1547 * it's transient.
1548 */
1549 if (trace_transient || !entry->transient_)
1550 entry->get_vm_obj()->mark_refs(vmg_ entry->reachable_);
1551 }
1552
1553 /*
1554 * return true if there's more work to do; there's more work to do
1555 * if we have any objects left in the gc work queue
1556 */
1557 return gc_queue_head_ != VM_INVALID_OBJ;
1558 }
1559
1560 /*
1561 * Finish garbage collection. We'll finish any work remaining in the
1562 * work queue, so this is safe to call at any time after gc_pass_init(),
1563 * after any number of calls (even zero) to gc_pass_continue().
1564 */
gc_pass_finish(VMG0_)1565 void CVmObjTable::gc_pass_finish(VMG0_)
1566 {
1567 CVmObjPageEntry **pg;
1568 CVmObjPageEntry *entry;
1569 size_t i;
1570 size_t j;
1571 vm_obj_id_t id;
1572
1573 /*
1574 * Make sure we're done processing the work queue -- keep calling
1575 * gc_pass_continue() until it indicates that it's finished. If
1576 * we're skipping finalizers, stop as soon as the state structure
1577 * indicates that we've started running finalizers.
1578 */
1579 gc_trace_work_queue(vmg_ TRUE);
1580
1581 /*
1582 * We've now marked everything that's reachable from the root set as
1583 * VMOBJ_REACHABLE. We can therefore determine the set of objects
1584 * that are newly 'finalizable' - an object becomes finalizable when
1585 * it was previously 'unfinalizable' and is not reachable, because we
1586 * can finalize an object any time after it first becomes unreachable.
1587 * So, scan all objects for eligibility for the 'finalizable'
1588 * transition, and make the transition in those objects.
1589 */
1590 for (id = 0, i = pages_used_, pg = pages_ ; i > 0 ; ++pg, --i)
1591 {
1592 /* go through each entry on this page */
1593 for (j = VM_OBJ_PAGE_CNT, entry = *pg ; j > 0 ; --j, ++entry, ++id)
1594 {
1595 /*
1596 * if this entry is not free and is not in the root set, check
1597 * to see if its finalization status is changing
1598 */
1599 if (!entry->free_ && !entry->in_root_set_)
1600 {
1601 /*
1602 * If the entry is not reachable, and was previously
1603 * unfinalizable, it is now finalizable.
1604 *
1605 * Note that an object must actually be reachable to avoid
1606 * become finalizable at this point. Not only do
1607 * unreachable objects become finalizable, but
1608 * 'f-reachable' objects do, too, since these are only
1609 * reachable from other finalizable objects.
1610 */
1611 if (entry->reachable_ != VMOBJ_REACHABLE
1612 && entry->finalize_state_ == VMOBJ_UNFINALIZABLE)
1613 {
1614 /*
1615 * This object is newly finalizable. If it has no
1616 * finalizer, the object can go directly to the
1617 * 'finalized' state; otherwise, add it to the queue
1618 * of objects with pending finalizers.
1619 */
1620 if (entry->get_vm_obj()->has_finalizer(vmg_ id))
1621 {
1622 /*
1623 * This object is not reachable from the root set
1624 * and was previously unfinalizable. Make the
1625 * object finalizable.
1626 */
1627 entry->finalize_state_ = VMOBJ_FINALIZABLE;
1628 }
1629 else
1630 {
1631 /*
1632 * the entry has no finalizer, so we can make this
1633 * object 'finalized' immediately
1634 */
1635 entry->finalize_state_ = VMOBJ_FINALIZED;
1636 }
1637 }
1638
1639 /*
1640 * If this object is finalizable, add it to the work queue
1641 * in state "f-reachable." We must mark everything this
1642 * object references, directly or indirectly, as
1643 * f-reachable, which we'll do with another pass through
1644 * the gc queue momentarily.
1645 */
1646 if (entry->finalize_state_ == VMOBJ_FINALIZABLE)
1647 {
1648 /*
1649 * this object and all of the objects it references
1650 * are "f-reachable"
1651 */
1652 add_to_gc_queue(id, entry, VMOBJ_F_REACHABLE);
1653 }
1654 }
1655 }
1656 }
1657
1658 /*
1659 * During the scan above, we put all of the finalizable objects in the
1660 * work queue in reachability state 'f-reachable'. (Actually,
1661 * finalizable objects that were fully reachable were not put in the
1662 * work queue, because they are in a stronger reachability state that
1663 * we've already fully scanned.) Trace the work queue so that we mark
1664 * everything reachable indirectly from an f-reachable object as also
1665 * being at least f-reachable.
1666 */
1667 gc_trace_work_queue(vmg_ TRUE);
1668
1669 /*
1670 * We have now marked everything that's fully reachable as being in
1671 * state 'reachable', and everything that's reachable from a
1672 * finalizable object as being in state 'f-reachable'. Anything that
1673 * is still in state 'unreachable' is garbage and can be collected.
1674 */
1675 for (id = 0, i = pages_used_, pg = pages_ ; i > 0 ; ++pg, --i)
1676 {
1677 /* go through each entry on this page */
1678 for (j = VM_OBJ_PAGE_CNT, entry = *pg ; j > 0 ; --j, ++entry, ++id)
1679 {
1680 /* if it's not already free, process it */
1681 if (!entry->free_)
1682 {
1683 /* if the object is deletable, delete it */
1684 if (entry->is_deletable())
1685 {
1686 /*
1687 * This object is completely unreachable, and it has
1688 * already been finalized. This means there is no
1689 * possibility that the object could ever become
1690 * reachable again, hence we can discard the object.
1691 */
1692 delete_entry(vmg_ id, entry);
1693 }
1694 else
1695 {
1696 /*
1697 * The object is reachable, so keep it around. Since
1698 * it's staying around, and since we know which
1699 * objects we're deleting and which are staying, we
1700 * can ask this object to remove all of its "stale
1701 * weak references" - that is, weak references to
1702 * objects that we're about to delete. Don't bother
1703 * notifying the object if it's incapable of keeping
1704 * weak references.
1705 */
1706 if (entry->can_have_weak_refs_)
1707 entry->get_vm_obj()->remove_stale_weak_refs(vmg0_);
1708
1709 /*
1710 * If this object is finalizable, put it in the
1711 * finalizer queue, so that we can run its finalizer
1712 * when we're done scanning the table.
1713 */
1714 if (entry->finalize_state_ == VMOBJ_FINALIZABLE)
1715 add_to_finalize_queue(id, entry);
1716
1717 /*
1718 * restore initial conditions for this object, so that
1719 * we're properly set up for the next GC pass
1720 */
1721 gc_set_init_conditions(id, entry);
1722 }
1723 }
1724 }
1725 }
1726
1727 /*
1728 * Go through the undo records and clear any stale weak references
1729 * contained in the undo list.
1730 */
1731 G_undo->gc_remove_stale_weak_refs(vmg0_);
1732
1733 /*
1734 * All of the finalizable objects are now in the finalizer queue. Run
1735 * through the finalizer queue and run each such object's finalizer.
1736 */
1737 run_finalizers(vmg0_);
1738 }
1739
1740 /*
1741 * Trace all objects reachable from the work queue.
1742 */
gc_trace_work_queue(VMG_ int trace_transient)1743 void CVmObjTable::gc_trace_work_queue(VMG_ int trace_transient)
1744 {
1745 /*
1746 * trace everything reachable directly from the work queue, until we
1747 * exhaust the queue
1748 */
1749 while (gc_pass_continue(vmg_ trace_transient)) ;
1750 }
1751
1752 /*
1753 * Garbage collection: trace objects reachable from the stack
1754 */
gc_trace_stack(VMG0_)1755 void CVmObjTable::gc_trace_stack(VMG0_)
1756 {
1757 size_t i;
1758 size_t depth;
1759 vm_val_t *val;
1760
1761 /*
1762 * Process the stack. For each stack element that refers to an
1763 * object, mark the object as referenced and add it to the work
1764 * queue.
1765 *
1766 * Note that it makes no difference in what order we process the
1767 * stack elements; we go from depth down to 0 merely as a trivial
1768 * micro-optimization to avoid evaluating the stack depth on every
1769 * iteration of the loop.
1770 */
1771 for (i = 0, depth = G_stk->get_depth() ; i < depth ; ++i)
1772 {
1773 /*
1774 * If this element refers to an object, and the object hasn't
1775 * already been marked as referenced, mark it as reachable and
1776 * add it to the work queue.
1777 *
1778 * Note that we only have to worry about objects here. We don't
1779 * have to worry about constant lists, even though they can
1780 * contain object references, because any object reference in a
1781 * constant list must be a root set object, and we've already
1782 * processed all root set objects.
1783 */
1784 val = G_stk->get(i);
1785 if (val->typ == VM_OBJ && val->val.obj != VM_INVALID_OBJ)
1786 add_to_gc_queue(val->val.obj, VMOBJ_REACHABLE);
1787 }
1788 }
1789
1790 /*
1791 * Trace objects reachable from imports
1792 */
gc_trace_imports(VMG0_)1793 void CVmObjTable::gc_trace_imports(VMG0_)
1794 {
1795 /*
1796 * generate the list of object imports; for each one, if we have a
1797 * valid object for the import, mark it as reachable
1798 */
1799 #define VM_IMPORT_OBJ(sym, mem) \
1800 if (G_predef->mem != VM_INVALID_OBJ) \
1801 add_to_gc_queue(G_predef->mem, VMOBJ_REACHABLE);
1802 #define VM_NOIMPORT_OBJ(sym, mem) VM_IMPORT_OBJ(sym, mem)
1803 #include "vmimport.h"
1804 }
1805
1806 /*
1807 * Garbage collection: trace objects reachable from the machine globals
1808 */
gc_trace_globals(VMG0_)1809 void CVmObjTable::gc_trace_globals(VMG0_)
1810 {
1811 CVmObjGlobPage *pg;
1812 vm_val_t *val;
1813 vm_globalvar_t *var;
1814
1815 /* trace each page of globals */
1816 for (pg = globals_ ; pg != 0 ; pg = pg->nxt_)
1817 {
1818 size_t i;
1819 vm_obj_id_t *objp;
1820
1821 /* trace each item on this page */
1822 for (objp = pg->objs_, i = pg->used_ ; i != 0 ; ++objp, --i)
1823 {
1824 /* trace this global */
1825 add_to_gc_queue(*objp, VMOBJ_REACHABLE);
1826 }
1827 }
1828
1829 /* the return value register (R0) is a machine global */
1830 val = G_interpreter->get_r0();
1831 if (val->typ == VM_OBJ && val->val.obj != VM_INVALID_OBJ)
1832 add_to_gc_queue(val->val.obj, VMOBJ_REACHABLE);
1833
1834 /* trace the global variables defined by other subsystems */
1835 for (var = global_var_head_ ; var != 0 ; var = var->nxt)
1836 {
1837 /* if this global variable contains an object, trace it */
1838 if (var->val.typ == VM_OBJ && var->val.val.obj != VM_INVALID_OBJ)
1839 add_to_gc_queue(var->val.val.obj, VMOBJ_REACHABLE);
1840 }
1841 }
1842
1843 #if 0 // moved inline, as it's small and is called a fair amount
1844 /*
1845 * Set the initial conditions for an object, in preparation for the next
1846 * GC pass.
1847 */
1848 void CVmObjTable::gc_set_init_conditions(vm_obj_id_t id,
1849 CVmObjPageEntry *entry)
1850 {
1851 /*
1852 * Mark the object as unreachable -- at the start of each GC pass,
1853 * all non-root-set objects must be marked unreachable.
1854 */
1855 entry->reachable_ = VMOBJ_UNREACHABLE;
1856
1857 /*
1858 * If it's in the root set, add it to the GC work queue -- all
1859 * root-set objects must be in the work queue and marked as reachable
1860 * at the start of each GC pass.
1861 *
1862 * If the object is not in the root set, check to see if it's
1863 * finalizable. If so, add it to the finalizer queue, so that we
1864 * eventually run its finalizer.
1865 */
1866 if (entry->in_root_set_)
1867 add_to_gc_queue(id, entry, VMOBJ_REACHABLE);
1868 }
1869 #endif
1870
1871 /*
1872 * Run finalizers
1873 */
run_finalizers(VMG0_)1874 void CVmObjTable::run_finalizers(VMG0_)
1875 {
1876 /* keep going until we run out of work or reach our work limit */
1877 while (finalize_queue_head_ != VM_INVALID_OBJ)
1878 {
1879 CVmObjPageEntry *entry;
1880 vm_obj_id_t id;
1881
1882 /* get the next object from the queue */
1883 id = finalize_queue_head_;
1884
1885 /* get the entry */
1886 entry = get_entry(id);
1887
1888 /* remove the entry form the queue */
1889 finalize_queue_head_ = entry->next_obj_;
1890
1891 /* mark the object as finalized */
1892 entry->finalize_state_ = VMOBJ_FINALIZED;
1893
1894 /*
1895 * the entry is no longer in any queue, so we must mark it as
1896 * unreachable -- this ensures that the initial conditions are
1897 * correct for the next garbage collection pass, since all
1898 * objects not in the work queue must be marked as unreachable
1899 * (it doesn't matter whether the object is actually reachable;
1900 * the garbage collector will make that determination when it
1901 * next runs)
1902 */
1903 entry->reachable_ = VMOBJ_UNREACHABLE;
1904
1905 /* invoke its finalizer */
1906 entry->get_vm_obj()->invoke_finalizer(vmg_ id);
1907 }
1908 }
1909
1910
1911 /*
1912 * Receive notification that we're creating a new undo savepoint
1913 */
notify_new_savept()1914 void CVmObjTable::notify_new_savept()
1915 {
1916 CVmObjPageEntry **pg;
1917 CVmObjPageEntry *entry;
1918 size_t i;
1919 size_t j;
1920 vm_obj_id_t id;
1921
1922 /* go through each page of objects */
1923 for (id = 0, i = pages_used_, pg = pages_ ; i > 0 ; ++pg, --i)
1924 {
1925 /* go through each entry on this page */
1926 for (j = VM_OBJ_PAGE_CNT, entry = *pg ; j > 0 ; --j, ++entry, ++id)
1927 {
1928 /* if this entry is active, tell it about the new savepoint */
1929 if (!entry->free_)
1930 {
1931 /*
1932 * the object existed at the start of this savepoint, so
1933 * it must keep undo information throughout the savepoint
1934 */
1935 entry->in_undo_ = TRUE;
1936
1937 /* notify the object of the new savepoint */
1938 entry->get_vm_obj()->notify_new_savept();
1939 }
1940 }
1941 }
1942 }
1943
1944 /*
1945 * Apply undo
1946 */
apply_undo(VMG_ CVmUndoRecord * rec)1947 void CVmObjTable::apply_undo(VMG_ CVmUndoRecord *rec)
1948 {
1949 /* tell the object to apply the undo */
1950 if (rec->obj != VM_INVALID_OBJ)
1951 get_obj(rec->obj)->apply_undo(vmg_ rec);
1952 }
1953
1954
1955 /*
1956 * Scan all objects and add metaclass entries to the metaclass
1957 * dependency table for any metaclasses of which there are existing
1958 * instances.
1959 */
add_metadeps_for_instances(VMG0_)1960 void CVmObjTable::add_metadeps_for_instances(VMG0_)
1961 {
1962 CVmObjPageEntry **pg;
1963 size_t i;
1964 vm_obj_id_t id;
1965
1966 /* go through each page in the object table */
1967 for (id = 0, i = pages_used_, pg = pages_ ; i > 0 ; ++pg, --i)
1968 {
1969 size_t j;
1970 CVmObjPageEntry *entry;
1971
1972 /* start at the start of the page, but skip object ID = 0 */
1973 j = VM_OBJ_PAGE_CNT;
1974 entry = *pg;
1975
1976 /* go through each entry on this page */
1977 for ( ; j > 0 ; --j, ++entry, ++id)
1978 {
1979 /* if this entry is in use, add its metaclass if necessary */
1980 if (!entry->free_)
1981 G_meta_table->add_entry_if_new(
1982 entry->get_vm_obj()->get_metaclass_reg()->get_reg_idx(),
1983 0, VM_INVALID_PROP, VM_INVALID_PROP);
1984 }
1985 }
1986 }
1987
1988
1989 /* ------------------------------------------------------------------------ */
1990 /*
1991 * creation
1992 */
CVmObjFixup(ulong entry_cnt)1993 CVmObjFixup::CVmObjFixup(ulong entry_cnt)
1994 {
1995 uint i;
1996
1997 /* remember the number of entries */
1998 cnt_ = entry_cnt;
1999
2000 /* no entries are used yet */
2001 used_ = 0;
2002
2003 /* if we have no entries, there's nothing to do */
2004 if (cnt_ == 0)
2005 {
2006 arr_ = 0;
2007 pages_ = 0;
2008 return;
2009 }
2010
2011 /* calculate the number of subarrays we need */
2012 pages_ = (entry_cnt + VMOBJFIXUP_SUB_SIZE - 1) / VMOBJFIXUP_SUB_SIZE;
2013
2014 /* allocate the necessary number of subarrays */
2015 arr_ = (obj_fixup_entry **)t3malloc(pages_ * sizeof(arr_[0]));
2016
2017 /* allocate the subarrays */
2018 for (i = 0 ; i < pages_ ; ++i)
2019 {
2020 size_t cur_cnt;
2021
2022 /*
2023 * allocate a full page, except for the last page, which might be
2024 * only partially used
2025 */
2026 cur_cnt = VMOBJFIXUP_SUB_SIZE;
2027 if (i + 1 == pages_)
2028 cur_cnt = ((entry_cnt - 1) % VMOBJFIXUP_SUB_SIZE) + 1;
2029
2030 /* allocate it */
2031 arr_[i] = (obj_fixup_entry *)t3malloc(cur_cnt * sizeof(arr_[i][i]));
2032 }
2033 }
2034
2035 /*
2036 * deletion
2037 */
~CVmObjFixup()2038 CVmObjFixup::~CVmObjFixup()
2039 {
2040 uint i;
2041
2042 /* if we never allocated an array, there's nothing to do */
2043 if (arr_ == 0)
2044 return;
2045
2046 /* delete each subarray */
2047 for (i = 0 ; i < pages_ ; ++i)
2048 t3free(arr_[i]);
2049
2050 /* delete the main array */
2051 t3free(arr_);
2052 }
2053
2054 /*
2055 * add a fixup
2056 */
add_fixup(vm_obj_id_t old_id,vm_obj_id_t new_id)2057 void CVmObjFixup::add_fixup(vm_obj_id_t old_id, vm_obj_id_t new_id)
2058 {
2059 obj_fixup_entry *entry;
2060
2061 /* allocate the next available entry */
2062 entry = get_entry(used_++);
2063
2064 /* store it */
2065 entry->old_id = old_id;
2066 entry->new_id = new_id;
2067 }
2068
2069 /*
2070 * translate an ID
2071 */
get_new_id(VMG_ vm_obj_id_t old_id)2072 vm_obj_id_t CVmObjFixup::get_new_id(VMG_ vm_obj_id_t old_id)
2073 {
2074 obj_fixup_entry *entry;
2075
2076 /*
2077 * if it's a root-set object, don't bother even trying to translate
2078 * it, because root-set objects have stable ID's that never change on
2079 * saving or restoring
2080 */
2081 if (G_obj_table->is_obj_id_valid(old_id)
2082 && G_obj_table->is_obj_in_root_set(old_id))
2083 return old_id;
2084
2085 /* find the entry by the object ID */
2086 entry = find_entry(old_id);
2087
2088 /*
2089 * if we found it, return the new ID; otherwise, return the old ID
2090 * unchanged, since the ID evidently doesn't require mapping
2091 */
2092 return (entry != 0 ? entry->new_id : old_id);
2093 }
2094
2095 /*
2096 * find an entry given the old object ID
2097 */
find_entry(vm_obj_id_t old_id)2098 obj_fixup_entry *CVmObjFixup::find_entry(vm_obj_id_t old_id)
2099 {
2100 ulong lo;
2101 ulong hi;
2102 ulong cur;
2103
2104 /* do a binary search for the entry */
2105 lo = 0;
2106 hi = cnt_ - 1;
2107 while (lo <= hi)
2108 {
2109 obj_fixup_entry *entry;
2110
2111 /* split the difference */
2112 cur = lo + (hi - lo)/2;
2113 entry = get_entry(cur);
2114
2115 /* is it the one we're looking for? */
2116 if (entry->old_id == old_id)
2117 {
2118 /* it's the one - return the entry */
2119 return entry;
2120 }
2121 else if (old_id > entry->old_id)
2122 {
2123 /* we need to go higher */
2124 lo = (cur == lo ? cur + 1 : cur);
2125 }
2126 else
2127 {
2128 /* we need to go lower */
2129 hi = (cur == hi ? cur - 1 : cur);
2130 }
2131 }
2132
2133 /* didn't find it */
2134 return 0;
2135 }
2136
2137 /*
2138 * Fix a DATAHOLDER value
2139 */
fix_dh(VMG_ char * dh)2140 void CVmObjFixup::fix_dh(VMG_ char *dh)
2141 {
2142 /* if it's an object, translate the ID */
2143 if (vmb_get_dh_type(dh) == VM_OBJ)
2144 {
2145 vm_obj_id_t id;
2146
2147 /* get the object value */
2148 id = vmb_get_dh_obj(dh);
2149
2150 /* translate it */
2151 id = get_new_id(vmg_ id);
2152
2153 /*
2154 * if it's invalid, set the dataholder value to nil; otherwise,
2155 * set it to the new object ID
2156 */
2157 if (id == VM_INVALID_OBJ)
2158 vmb_put_dh_nil(dh);
2159 else
2160 vmb_put_dh_obj(dh, id);
2161 }
2162 }
2163
2164 /*
2165 * Fix up an array of DATAHOLDER values.
2166 */
fix_dh_array(VMG_ char * arr,size_t cnt)2167 void CVmObjFixup::fix_dh_array(VMG_ char *arr, size_t cnt)
2168 {
2169 /* scan the array of dataholders */
2170 for ( ; cnt != 0 ; --cnt, arr += VMB_DATAHOLDER)
2171 fix_dh(vmg_ arr);
2172 }
2173
2174 /*
2175 * Fix a portable VMB_OBJECT_ID field
2176 */
fix_vmb_obj(VMG_ char * p)2177 void CVmObjFixup::fix_vmb_obj(VMG_ char *p)
2178 {
2179 vm_obj_id_t id;
2180
2181 /* get the old ID */
2182 id = vmb_get_objid(p);
2183
2184 /* fix it up */
2185 id = get_new_id(vmg_ id);
2186
2187 /* store it back */
2188 vmb_put_objid(p, id);
2189 }
2190
2191 /*
2192 * Fix an array of portable VMB_OBJECT_ID fields
2193 */
fix_vmb_obj_array(VMG_ char * p,size_t cnt)2194 void CVmObjFixup::fix_vmb_obj_array(VMG_ char *p, size_t cnt)
2195 {
2196 /* scan the array */
2197 for ( ; cnt != 0 ; --cnt, p += VMB_OBJECT_ID)
2198 fix_vmb_obj(vmg_ p);
2199 }
2200
2201
2202 /* ------------------------------------------------------------------------ */
2203 /*
2204 * Save/restore
2205 */
2206
2207 /*
2208 * table-of-contents flags
2209 */
2210
2211 /* object is transient (so the object isn't saved to the state file) */
2212 #define VMOBJ_TOC_TRANSIENT 0x0001
2213
2214
2215 /*
2216 * Save state to a file
2217 */
save(VMG_ CVmFile * fp)2218 void CVmObjTable::save(VMG_ CVmFile *fp)
2219 {
2220 CVmObjPageEntry **pg;
2221 CVmObjPageEntry *entry;
2222 size_t i;
2223 size_t j;
2224 vm_obj_id_t id;
2225 size_t toc_cnt;
2226 size_t save_cnt;
2227 long toc_cnt_pos;
2228 long end_pos;
2229
2230 /*
2231 * Before we save, perform a full GC pass. This will ensure that we
2232 * have removed all objects that are referenced only weakly, and
2233 * cleaned up the weak references to them; this is important because
2234 * we don't trace weak references for the purposes of calculating the
2235 * set of objects that must be saved, and hence won't save any objects
2236 * that are only weakly referenced, which would leave dangling
2237 * references in the saved state if those weak references weren't
2238 * cleaned up before the objects containing them are saved.
2239 */
2240 gc_full(vmg0_);
2241
2242 /*
2243 * Make sure that all of the metaclasses that we are actually using
2244 * are in the metaclass dependency table. We store the table in the
2245 * file, because the table provides the mapping from file-local
2246 * metaclass ID's to actual metaclasses; we must make sure that the
2247 * table is complete (i.e., contains an entry for each metaclass of
2248 * which there is an instance) before storing the table.
2249 */
2250 add_metadeps_for_instances(vmg0_);
2251
2252 /* save the metaclass table */
2253 G_meta_table->write_to_file(fp);
2254
2255 /*
2256 * Figure out what objects we need to save. We only need to save
2257 * objects that are directly reachable from the root object set, from
2258 * the imports, or from the globals.
2259 *
2260 * We don't need to save objects that are only accessible from the
2261 * undo, because we don't save any undo information in the file. We
2262 * also don't need to save any objects that are reachable only from
2263 * the stack, since the stack is inherently transient.
2264 *
2265 * Note that we don't need to trace from transient objects, since we
2266 * won't be saving the transient objects and thus won't need to save
2267 * anything referenced only from transient objects.
2268 *
2269 * So, we merely trace objects reachable from the imports, globals,
2270 * and work queue. At any time between GC passes, the work queue
2271 * contains the complete list of root-set objects, hence we can simply
2272 * trace from the current work queue.
2273 */
2274 gc_trace_imports(vmg0_);
2275 gc_trace_globals(vmg0_);
2276 gc_trace_work_queue(vmg_ FALSE);
2277
2278 /*
2279 * Before we save the objects themselves, save a table of contents of
2280 * the dynamically-allocated objects to be saved. This table of
2281 * contents will allow us to fix up references to objects on reloading
2282 * the file with the new object numbers we assign them at that time.
2283 * First, write a placeholder for the table of contents entry count.
2284 *
2285 * Note that we must store the table of contents in ascending order of
2286 * object ID. This happens naturally, since we scan the table in
2287 * order of object ID.
2288 */
2289 toc_cnt = 0;
2290 save_cnt = 0;
2291 toc_cnt_pos = fp->get_pos();
2292 fp->write_int4(0);
2293
2294 /* now scan the object pages and save the table of contents */
2295 for (id = 0, i = pages_used_, pg = pages_ ; i > 0 ; ++pg, --i)
2296 {
2297 /* scan all objects on this page */
2298 for (j = VM_OBJ_PAGE_CNT, entry = *pg ; j > 0 ; -- j, ++entry, ++id)
2299 {
2300 /*
2301 * If the entry is currently reachable, and it was dynamically
2302 * allocated (which means it's not in the root set), then add
2303 * it to the table of contents. Note that we won't
2304 * necessarily be saving the object, because the object could
2305 * be transient - but if so, then we still want the record of
2306 * the transient object, so we'll know on reloading that the
2307 * object is no longer available.
2308 */
2309 if (!entry->free_
2310 && entry->reachable_ == VMOBJ_REACHABLE
2311 && !entry->in_root_set_)
2312 {
2313 ulong flags;
2314
2315 /* set up the flags */
2316 flags = 0;
2317 if (entry->transient_)
2318 flags |= VMOBJ_TOC_TRANSIENT;
2319
2320 /* write the object ID and flags */
2321 fp->write_int4(id);
2322 fp->write_int4(flags);
2323
2324 /* count it */
2325 ++toc_cnt;
2326 }
2327
2328 /* if it's saveable, count it among the saveable objects */
2329 if (entry->is_saveable())
2330 ++save_cnt;
2331 }
2332 }
2333
2334 /* go back and fix up the size prefix for the table of contents */
2335 end_pos = fp->get_pos();
2336 fp->set_pos(toc_cnt_pos);
2337 fp->write_int4(toc_cnt);
2338 fp->set_pos(end_pos);
2339
2340 /* write the saveable object count, which we calculated above */
2341 fp->write_int4(save_cnt);
2342
2343 /* scan all object pages, and save each reachable object */
2344 for (id = 0, i = pages_used_, pg = pages_ ; i > 0 ; ++pg, --i)
2345 {
2346 /* scan all objects on this page */
2347 for (j = VM_OBJ_PAGE_CNT, entry = *pg ; j > 0 ; -- j, ++entry, ++id)
2348 {
2349 /* if this object is saveable, save it */
2350 if (entry->is_saveable())
2351 {
2352 uint idx;
2353 char buf[2];
2354
2355 /* write the object ID */
2356 fp->write_int4(id);
2357
2358 /* store the root-set flag */
2359 buf[0] = (entry->in_root_set_ != 0);
2360
2361 /* store the dependency table index */
2362 idx = entry->get_vm_obj()->
2363 get_metaclass_reg()->get_reg_idx();
2364 buf[1] = (char)G_meta_table->get_dependency_index(idx);
2365
2366 /* write the data */
2367 fp->write_bytes(buf, 2);
2368
2369 /* save the metaclass-specific state */
2370 entry->get_vm_obj()->save_to_file(vmg_ fp);
2371 }
2372
2373 /*
2374 * restore this object to the appropriate conditions in
2375 * preparation for the next GC pass, so that we leave things
2376 * as we found them -- saving the VM's state thus has no
2377 * effect on the VM's state
2378 */
2379 gc_set_init_conditions(id, entry);
2380 }
2381 }
2382 }
2383
2384 /*
2385 * Restore state from a file
2386 */
restore(VMG_ CVmFile * fp,CVmObjFixup ** fixups)2387 int CVmObjTable::restore(VMG_ CVmFile *fp, CVmObjFixup **fixups)
2388 {
2389 int err;
2390 ulong cnt;
2391
2392 /* presume we won't create a fixup table */
2393 *fixups = 0;
2394
2395 /* load the metaclass table */
2396 if ((err = G_meta_table->read_from_file(fp)) != 0)
2397 return err;
2398
2399 /*
2400 * Reset all objects to the initial image file load state. Note that
2401 * we wait until after we've read the metaclass table to reset the
2402 * objects, because any intrinsic class objects in the root set will
2403 * need to re-initialize their presence in the metaclass table, which
2404 * they can't do until after the metaclass table has itself been
2405 * reloaded.
2406 */
2407 G_obj_table->reset_to_image(vmg0_);
2408
2409 /* read the size of the table of contents */
2410 cnt = (ulong)fp->read_int4();
2411
2412 /* allocate the fixup table */
2413 *fixups = new CVmObjFixup(cnt);
2414
2415 /* read the fixup table */
2416 for ( ; cnt != 0 ; --cnt)
2417 {
2418 vm_obj_id_t old_id;
2419 vm_obj_id_t new_id;
2420 ulong flags;
2421
2422 /* read the next entry */
2423 old_id = (vm_obj_id_t)fp->read_int4();
2424 flags = (ulong)fp->read_int4();
2425
2426 /*
2427 * Allocate a new object ID for this entry. If the object was
2428 * transient, then it won't actually have been saved, so we must
2429 * fix up references to the object to nil.
2430 */
2431 if (!(flags & VMOBJ_TOC_TRANSIENT))
2432 new_id = vm_new_id(vmg_ FALSE);
2433 else
2434 new_id = VM_INVALID_OBJ;
2435
2436 /*
2437 * Add the entry. Note that the table of contents is stored in
2438 * ascending order of old ID (i.e., the ID's in the saved state
2439 * file's numbering system); this is the same ordering required by
2440 * the fixup table, so we can simply read entries from the file
2441 * and add them directly to the fixup table.
2442 */
2443 (*fixups)->add_fixup(old_id, new_id);
2444 }
2445
2446 /* read the number of saved objects */
2447 cnt = (ulong)fp->read_int4();
2448
2449 /* read the objects */
2450 for ( ; cnt != 0 ; --cnt)
2451 {
2452 char buf[2];
2453 vm_obj_id_t id;
2454 int in_root_set;
2455 uint meta_idx;
2456 CVmObject *obj;
2457
2458 /* read the original object ID */
2459 id = (vm_obj_id_t)fp->read_int4();
2460
2461 /* read the root-set flag and dependency table index */
2462 fp->read_bytes(buf, 2);
2463 in_root_set = buf[0];
2464 meta_idx = (uchar)buf[1];
2465
2466 /*
2467 * if it's not in the root set, we need to create a new object;
2468 * otherwise, the object must already exist, since it came from
2469 * the object file
2470 */
2471 if (in_root_set)
2472 {
2473 /*
2474 * make sure the object is valid - since it's supposedly in
2475 * the root set, it must already exist
2476 */
2477 if (!is_obj_id_valid(id) || get_entry(id)->free_)
2478 return VMERR_SAVED_OBJ_ID_INVALID;
2479 }
2480 else
2481 {
2482 /*
2483 * the object was dynamically allocated, so it will have a new
2484 * object number - fix up the object ID to the new numbering
2485 * system
2486 */
2487 id = (*fixups)->get_new_id(vmg_ id);
2488
2489 /* create the object */
2490 G_meta_table->create_for_restore(vmg_ meta_idx, id);
2491 }
2492
2493 /* read the object's data */
2494 obj = get_obj(id);
2495 obj->restore_from_file(vmg_ id, fp, *fixups);
2496 }
2497
2498 /* success */
2499 return 0;
2500 }
2501
2502 /*
2503 * Save an image data pointer
2504 */
save_image_pointer(vm_obj_id_t obj_id,const char * ptr,size_t siz)2505 void CVmObjTable::save_image_pointer(vm_obj_id_t obj_id, const char *ptr,
2506 size_t siz)
2507 {
2508 vm_image_ptr *slot;
2509
2510 /* allocate a new page if we're out of slots on the current page */
2511 if (image_ptr_head_ == 0)
2512 {
2513 /* no pages yet - allocate the first page */
2514 image_ptr_head_ = (vm_image_ptr_page *)
2515 t3malloc(sizeof(vm_image_ptr_page));
2516 if (image_ptr_head_ == 0)
2517 err_throw(VMERR_OUT_OF_MEMORY);
2518
2519 /* it's also the last page */
2520 image_ptr_tail_ = image_ptr_head_;
2521 image_ptr_tail_->next_ = 0;
2522
2523 /* no slots used on this page yet */
2524 image_ptr_last_cnt_ = 0;
2525 }
2526 else if (image_ptr_last_cnt_ == VM_IMAGE_PTRS_PER_PAGE)
2527 {
2528 /* the last page is full - allocate another one */
2529 image_ptr_tail_->next_ = (vm_image_ptr_page *)
2530 t3malloc(sizeof(vm_image_ptr_page));
2531 if (image_ptr_tail_->next_ == 0)
2532 err_throw(VMERR_OUT_OF_MEMORY);
2533
2534 /* it's the new last page */
2535 image_ptr_tail_ = image_ptr_tail_->next_;
2536 image_ptr_tail_->next_ = 0;
2537
2538 /* no slots used on this page yet */
2539 image_ptr_last_cnt_ = 0;
2540 }
2541
2542 /* get the next available slot */
2543 slot = &image_ptr_tail_->ptrs_[image_ptr_last_cnt_];
2544
2545 /* save the data */
2546 slot->obj_id_ = obj_id;
2547 slot->image_data_ptr_ = ptr;
2548 slot->image_data_len_ = siz;
2549
2550 /* count the new record */
2551 ++image_ptr_last_cnt_;
2552 }
2553
2554 /*
2555 * Reset to initial image file state
2556 */
reset_to_image(VMG0_)2557 void CVmObjTable::reset_to_image(VMG0_)
2558 {
2559 CVmObjPageEntry **pg;
2560 CVmObjPageEntry *entry;
2561 size_t i;
2562 size_t j;
2563 vm_obj_id_t id;
2564 vm_image_ptr_page *ip_page;
2565
2566 /*
2567 * Drop all undo information. Since we're resetting to the initial
2568 * state, the undo for our outgoing state will no longer be
2569 * relevant.
2570 */
2571 G_undo->drop_undo(vmg0_);
2572
2573 /* delete all of the globals */
2574 if (globals_ != 0)
2575 {
2576 delete globals_;
2577 globals_ = 0;
2578 }
2579
2580 /*
2581 * Go through the object table and reset each non-transient object in
2582 * the root set to its initial conditions.
2583 */
2584 for (id = 0, i = pages_used_, pg = pages_ ; i > 0 ; ++pg, --i)
2585 {
2586 /* scan all objects on this page */
2587 for (j = VM_OBJ_PAGE_CNT, entry = *pg ; j > 0 ; --j, ++entry, ++id)
2588 {
2589 /*
2590 * if it's not free, and it's in the root set, and it's not
2591 * transient, reset it
2592 */
2593 if (!entry->free_ && entry->in_root_set_ && !entry->transient_)
2594 {
2595 /*
2596 * This object is part of the root set, so it's part of
2597 * the state immediately after loading the image. Reset
2598 * the object to its load file conditions.
2599 */
2600 entry->get_vm_obj()->reset_to_image(vmg_ id);
2601 }
2602 }
2603 }
2604
2605 /*
2606 * Go through all of the objects for which we've explicitly saved the
2607 * original image file location, and ask them to reset using the image
2608 * data.
2609 */
2610 for (ip_page = image_ptr_head_ ; ip_page != 0 ; ip_page = ip_page->next_)
2611 {
2612 size_t cnt;
2613 vm_image_ptr *slot;
2614
2615 /*
2616 * get the count for this page - if this is the last page, it's
2617 * the last page counter; otherwise, it's a full page, since we
2618 * fill up each page before creating a new one
2619 */
2620 if (ip_page->next_ == 0)
2621 cnt = image_ptr_last_cnt_;
2622 else
2623 cnt = VM_IMAGE_PTRS_PER_PAGE;
2624
2625 /* go through the records on the page */
2626 for (slot = ip_page->ptrs_ ; cnt != 0 ; --cnt, ++slot)
2627 {
2628 /* it this object is non-transient, reload it */
2629 if (!get_entry(slot->obj_id_)->transient_)
2630 {
2631 /* reload it using the saved image data */
2632 vm_objp(vmg_ slot->obj_id_)
2633 ->reload_from_image(vmg_ slot->obj_id_,
2634 slot->image_data_ptr_,
2635 slot->image_data_len_);
2636 }
2637 }
2638 }
2639 }
2640
2641 /*
2642 * Create a global variable
2643 */
create_global_var()2644 vm_globalvar_t *CVmObjTable::create_global_var()
2645 {
2646 vm_globalvar_t *var;
2647
2648 /* create the new variable */
2649 var = new vm_globalvar_t();
2650
2651 /* initialize the variable's value to nil */
2652 var->val.set_nil();
2653
2654 /* link it into our list of globals */
2655 var->nxt = global_var_head_;
2656 var->prv = 0;
2657 if (global_var_head_ != 0)
2658 global_var_head_->prv = var;
2659 global_var_head_ = var;
2660
2661 /* return the variable */
2662 return var;
2663 }
2664
2665 /*
2666 * Delete a global variable
2667 */
delete_global_var(vm_globalvar_t * var)2668 void CVmObjTable::delete_global_var(vm_globalvar_t *var)
2669 {
2670 /* unlink it from the list of globals */
2671 if (var->nxt != 0)
2672 var->nxt->prv = var->prv;
2673
2674 if (var->prv != 0)
2675 var->prv->nxt = var->nxt;
2676 else
2677 global_var_head_ = var->nxt;
2678
2679 /* delete the memory */
2680 delete var;
2681 }
2682
2683
2684 /* ------------------------------------------------------------------------ */
2685 /*
2686 * post-load initialization status
2687 */
2688 enum pli_stat_t
2689 {
2690 PLI_UNINITED, /* not yet initialized */
2691 PLI_IN_PROGRESS, /* initialization in progress */
2692 PLI_INITED /* initialization completed */
2693 };
2694
2695 /*
2696 * Post-load initialization hash table entry. We use the object ID,
2697 * treating the binary representation as a string of bytes, as the hash
2698 * key.
2699 */
2700 class CVmHashEntryPLI: public CVmHashEntryCS
2701 {
2702 public:
CVmHashEntryPLI(vm_obj_id_t id)2703 CVmHashEntryPLI(vm_obj_id_t id)
2704 : CVmHashEntryCS((char *)&id, sizeof(id), TRUE)
2705 {
2706 /*
2707 * remember our object ID for easy access (technically, it's stored
2708 * as our key value as well, so this is redundant; but it's
2709 * transformed into a block of bytes for the key, so it's easier to
2710 * keep a separate copy of the true type)
2711 */
2712 id_ = id;
2713
2714 /* initialize our status */
2715 status = PLI_UNINITED;
2716 }
2717
2718 /* our object ID */
2719 vm_obj_id_t id_;
2720
2721 /* status for current operation */
2722 pli_stat_t status;
2723 };
2724
2725 /*
2726 * Request post-load initialization.
2727 */
request_post_load_init(vm_obj_id_t obj)2728 void CVmObjTable::request_post_load_init(vm_obj_id_t obj)
2729 {
2730 CVmHashEntryPLI *entry;
2731
2732 /* check for an existing entry - if there's not one already, add one */
2733 entry = (CVmHashEntryPLI *)
2734 post_load_init_table_->find((char *)&obj, sizeof(obj));
2735 if (entry == 0)
2736 {
2737 /* it's not there yet - add a new entry */
2738 post_load_init_table_->add(new CVmHashEntryPLI(obj));
2739
2740 /* mark the object as having requested post-load initialization */
2741 get_entry(obj)->requested_post_load_init_ = TRUE;
2742 }
2743 }
2744
2745 /*
2746 * Explicitly invoke post-load initialization on a given object
2747 */
ensure_post_load_init(VMG_ vm_obj_id_t obj)2748 void CVmObjTable::ensure_post_load_init(VMG_ vm_obj_id_t obj)
2749 {
2750 CVmHashEntryPLI *entry;
2751
2752 /* find the entry */
2753 entry = (CVmHashEntryPLI *)
2754 post_load_init_table_->find((char *)&obj, sizeof(obj));
2755
2756 /* if we found it, invoke its initialization method */
2757 if (entry != 0)
2758 call_post_load_init(vmg_ entry);
2759 }
2760
2761 /*
2762 * Invoke post-load initialization on the given object, if appropriate
2763 */
call_post_load_init(VMG_ CVmHashEntryPLI * entry)2764 void CVmObjTable::call_post_load_init(VMG_ CVmHashEntryPLI *entry)
2765 {
2766 /* check the status */
2767 switch (entry->status)
2768 {
2769 case PLI_UNINITED:
2770 /*
2771 * It's not yet initialized, so we need to initialize it now. Mark
2772 * it as having its initialization in progress.
2773 */
2774 entry->status = PLI_IN_PROGRESS;
2775
2776 /*
2777 * push the entry on the stack to protect it from gc while we're
2778 * initializing it
2779 */
2780 G_stk->push()->set_obj(entry->id_);
2781
2782 /* invoke its initialization */
2783 vm_objp(vmg_ entry->id_)->post_load_init(vmg_ entry->id_);
2784
2785 /* mark the object as having completed initialization */
2786 entry->status = PLI_INITED;
2787
2788 /* discard our GC protection */
2789 G_stk->discard();
2790
2791 /* done */
2792 break;
2793
2794 case PLI_IN_PROGRESS:
2795 /*
2796 * The object is in the process of being initialized. This must
2797 * mean that the object's initializer is calling us indirectly,
2798 * probably through another object's initializer that it invoked
2799 * explicitly as a dependency, which in turn means that we have a
2800 * circular dependency. This is illegal, so abort with an error.
2801 */
2802 err_throw(VMERR_CIRCULAR_INIT);
2803 break;
2804
2805 case PLI_INITED:
2806 /* it's already been initialized, so there's nothing to do */
2807 break;
2808 }
2809 }
2810
2811 /*
2812 * Remove a post-load initialization record
2813 */
remove_post_load_init(vm_obj_id_t obj)2814 void CVmObjTable::remove_post_load_init(vm_obj_id_t obj)
2815 {
2816 CVmHashEntryPLI *entry;
2817
2818 /* find the entry */
2819 entry = (CVmHashEntryPLI *)
2820 post_load_init_table_->find((char *)&obj, sizeof(obj));
2821
2822 /* if we found the entry, remove it from the table and delete it */
2823 if (entry != 0)
2824 {
2825 /* remove it */
2826 post_load_init_table_->remove(entry);
2827
2828 /* delete it */
2829 delete entry;
2830
2831 /* mark the entry as no longer being registered for post-load init */
2832 get_entry(obj)->requested_post_load_init_ = FALSE;
2833 }
2834 }
2835
2836 /*
2837 * post-load initialization enumeration context
2838 */
2839 struct pli_enum_ctx
2840 {
2841 vm_globals *globals;
2842 };
2843
2844 /*
2845 * Invoke post-load initialization on all objects that have requested it.
2846 * This must be called after initial program load, restarts, and restore
2847 * operations.
2848 */
do_all_post_load_init(VMG0_)2849 void CVmObjTable::do_all_post_load_init(VMG0_)
2850 {
2851 pli_enum_ctx ctx;
2852
2853 /* set up our context */
2854 ctx.globals = VMGLOB_ADDR;
2855
2856 /* first, mark all entries as having status 'uninitialized' */
2857 post_load_init_table_->enum_entries(&pli_status_cb, &ctx);
2858
2859 /* next, invoke the initializer method for each entry */
2860 post_load_init_table_->enum_entries(&pli_invoke_cb, &ctx);
2861 }
2862
2863 /*
2864 * post-load initialization enumeration callback: mark entries as having
2865 * status 'uninitialized'
2866 */
pli_status_cb(void * ctx0,CVmHashEntry * entry0)2867 void CVmObjTable::pli_status_cb(void *ctx0, CVmHashEntry *entry0)
2868 {
2869 CVmHashEntryPLI *entry = (CVmHashEntryPLI *)entry0;
2870
2871 /* mark the entry as having status 'uninitialized' */
2872 entry->status = PLI_UNINITED;
2873 }
2874
2875 /*
2876 * post-load initialization enumeration callback: mark entries as having
2877 * status 'uninitialized'
2878 */
pli_invoke_cb(void * ctx0,CVmHashEntry * entry0)2879 void CVmObjTable::pli_invoke_cb(void *ctx0, CVmHashEntry *entry0)
2880 {
2881 pli_enum_ctx *ctx = (pli_enum_ctx *)ctx0;
2882 CVmHashEntryPLI *entry = (CVmHashEntryPLI *)entry0;
2883 VMGLOB_PTR(ctx->globals);
2884
2885 /* invoke post-load initialization on the object */
2886 call_post_load_init(vmg_ entry);
2887 }
2888
2889
2890 /* ------------------------------------------------------------------------ */
2891 /*
2892 * Memory manager implementation
2893 */
2894
CVmMemory(VMG_ CVmVarHeap * varheap)2895 CVmMemory::CVmMemory(VMG_ CVmVarHeap *varheap)
2896 {
2897 /* remember our variable-size heap */
2898 varheap_ = varheap;
2899
2900 /* initialize the variable-size heap */
2901 varheap_->init(vmg0_);
2902 }
2903
2904 /* ------------------------------------------------------------------------ */
2905 /*
2906 * Hybrid cell/malloc memory manager - cell sub-block manager
2907 */
2908
2909 /*
2910 * Allocate an object. Since we always allocate blocks of a fixed size,
2911 * we can ignore the size parameter; the heap manager will only route us
2912 * requests that fit in our blocks.
2913 */
alloc(size_t)2914 CVmVarHeapHybrid_hdr *CVmVarHeapHybrid_head::alloc(size_t)
2915 {
2916 CVmVarHeapHybrid_hdr *ret;
2917
2918 /* if there isn't an entry, allocate another array */
2919 if (first_free_ == 0)
2920 {
2921 CVmVarHeapHybrid_array *arr;
2922 size_t real_cell_size;
2923 char *p;
2924 size_t cnt;
2925
2926 /*
2927 * Allocate another page. We need space for the array header
2928 * itself, plus space for all of the cells. We want page_count_
2929 * cells, each of size cell_size_ plus the size of the per-cell
2930 * header, rounded to the worst-case alignment size for the
2931 * platform.
2932 */
2933 real_cell_size = osrndsz(cell_size_
2934 + osrndsz(sizeof(CVmVarHeapHybrid_hdr)));
2935 arr = (CVmVarHeapHybrid_array *)
2936 t3malloc(sizeof(CVmVarHeapHybrid_array)
2937 + (page_count_ * real_cell_size));
2938
2939 /* if that failed, throw an error */
2940 if (arr == 0)
2941 err_throw(VMERR_OUT_OF_MEMORY);
2942
2943 /* link the array into the master list */
2944 arr->next_array = mem_mgr_->first_array_;
2945 mem_mgr_->first_array_ = arr;
2946
2947 /*
2948 * Build the free list. Each cell goes into the free list; the
2949 * 'next' pointer is stored in the data area of the cell.
2950 */
2951 for (p = arr->mem, cnt = page_count_ ; cnt > 0 ;
2952 p += real_cell_size, --cnt)
2953 {
2954 /* link this one into the free list */
2955 *(void **)p = first_free_;
2956 first_free_ = (void *)p;
2957 }
2958 }
2959
2960 /* remember the return value */
2961 ret = (CVmVarHeapHybrid_hdr *)first_free_;
2962
2963 /*
2964 * when we initialized or last freed this entry, we stored a pointer
2965 * to the next item in the free list in the object's data - retrieve
2966 * this pointer now, and update our free list head to point to the
2967 * next item
2968 */
2969 first_free_ = *(void **)first_free_;
2970
2971 /* fill in the block's pointer to the allocating heap (i.e., this) */
2972 ret->block = this;
2973
2974 /* return the item */
2975 return ret;
2976 }
2977
2978 /*
2979 * Reallocate
2980 */
realloc(CVmVarHeapHybrid_hdr * mem,size_t siz,CVmObject * obj)2981 void *CVmVarHeapHybrid_head::realloc(CVmVarHeapHybrid_hdr *mem, size_t siz,
2982 CVmObject *obj)
2983 {
2984 void *new_mem;
2985
2986 /*
2987 * if the new block fits in our cell size, return the original
2988 * memory unchanged; note that we must adjust the pointer so that we
2989 * return the client-visible portion
2990 */
2991 if (siz <= cell_size_)
2992 return (void *)(mem + 1);
2993
2994 /*
2995 * The memory won't fit in our cell size, so not only can't we
2996 * re-use the existing cell, but we can't allocate the memory from
2997 * our own sub-block at all. Allocate an entirely new block from
2998 * the heap manager.
2999 */
3000 new_mem = mem_mgr_->alloc_mem(siz, obj);
3001
3002 /*
3003 * Copy the old cell's contents to the new memory. Note that the
3004 * user-visible portion of the old cell starts immediately after the
3005 * header; don't copy the old header, since it's not applicable to
3006 * the new object. Note also that we got a pointer directly to the
3007 * user-visible portion of the new object, so we don't need to make
3008 * any adjustments to the new pointer.
3009 */
3010 memcpy(new_mem, (void *)(mem + 1), cell_size_);
3011
3012 /* free the old memory */
3013 free(mem);
3014
3015 /* return the new memory */
3016 return new_mem;
3017 }
3018
3019 /*
3020 * Release memory
3021 */
free(CVmVarHeapHybrid_hdr * mem)3022 void CVmVarHeapHybrid_head::free(CVmVarHeapHybrid_hdr *mem)
3023 {
3024 /* link the block into our free list */
3025 *(void **)mem = first_free_;
3026 first_free_ = (void *)mem;
3027 }
3028
3029 /* ------------------------------------------------------------------------ */
3030 /*
3031 * Hybrid cell/malloc heap manager
3032 */
3033
3034 /*
3035 * construct
3036 */
CVmVarHeapHybrid()3037 CVmVarHeapHybrid::CVmVarHeapHybrid()
3038 {
3039 /* set the cell heap count */
3040 cell_heap_cnt_ = 5;
3041
3042 /* allocate our cell heap pointer array */
3043 cell_heaps_ = (CVmVarHeapHybrid_head **)
3044 t3malloc(cell_heap_cnt_ * sizeof(CVmVarHeapHybrid_head *));
3045
3046 /* if that failed, throw an error */
3047 if (cell_heaps_ == 0)
3048 err_throw(VMERR_OUT_OF_MEMORY);
3049
3050 /*
3051 * Allocate our cell heaps. Set up the heaps so that the pages run
3052 * about 32k each.
3053 */
3054 cell_heaps_[0] = new CVmVarHeapHybrid_head(this, 32, 850);
3055 cell_heaps_[1] = new CVmVarHeapHybrid_head(this, 64, 400);
3056 cell_heaps_[2] = new CVmVarHeapHybrid_head(this, 128, 200);
3057 cell_heaps_[3] = new CVmVarHeapHybrid_head(this, 256, 100);
3058 cell_heaps_[4] = new CVmVarHeapHybrid_head(this, 512, 50);
3059
3060 /* allocate our malloc heap manager */
3061 malloc_heap_ = new CVmVarHeapHybrid_malloc();
3062
3063 /* we haven't allocated any cell array pages yet */
3064 first_array_ = 0;
3065 }
3066
3067 /*
3068 * delete
3069 */
~CVmVarHeapHybrid()3070 CVmVarHeapHybrid::~CVmVarHeapHybrid()
3071 {
3072 size_t i;
3073
3074 /* delete our cell heaps */
3075 for (i = 0 ; i < cell_heap_cnt_ ; ++i)
3076 delete cell_heaps_[i];
3077
3078 /* delete the cell heap pointer array */
3079 t3free(cell_heaps_);
3080
3081 /* delete all of the arrays */
3082 while (first_array_ != 0)
3083 {
3084 CVmVarHeapHybrid_array *nxt;
3085
3086 /* remember the next one */
3087 nxt = first_array_->next_array;
3088
3089 /* delete this one */
3090 t3free(first_array_);
3091
3092 /* move on to the next one */
3093 first_array_ = nxt;
3094 }
3095
3096 /* delete the malloc-based subheap manager */
3097 delete malloc_heap_;
3098 }
3099
3100 /*
3101 * allocate memory
3102 */
alloc_mem(size_t siz,CVmObject *)3103 void *CVmVarHeapHybrid::alloc_mem(size_t siz, CVmObject *)
3104 {
3105 CVmVarHeapHybrid_head **subheap;
3106 size_t i;
3107
3108 /* scan for a cell-based subheap that can handle the request */
3109 for (i = 0, subheap = cell_heaps_ ; i < cell_heap_cnt_ ; ++i, ++subheap)
3110 {
3111 /*
3112 * If it will fit in this one's cell size, allocate it from this
3113 * subheap. Note that we must adjust the return pointer so that
3114 * it points to the caller-visible portion of the block returned
3115 * from the subheap, which immediately follows the internal
3116 * header.
3117 */
3118 if (siz <= (*subheap)->get_cell_size())
3119 return (void *)((*subheap)->alloc(siz) + 1);
3120 }
3121
3122 /*
3123 * We couldn't find a cell-based manager that can handle a block
3124 * this large. Allocate the block from the default malloc heap.
3125 * Note that the caller-visible block is the part that immediately
3126 * follows our internal header, so we must adjust the return pointer
3127 * accordingly.
3128 */
3129 return (void *)(malloc_heap_->alloc(siz) + 1);
3130 }
3131
3132 /*
3133 * reallocate memory
3134 */
realloc_mem(size_t siz,void * mem,CVmObject * obj)3135 void *CVmVarHeapHybrid::realloc_mem(size_t siz, void *mem,
3136 CVmObject *obj)
3137 {
3138 CVmVarHeapHybrid_hdr *hdr;
3139
3140 /*
3141 * get the block header, which immediately precedes the
3142 * caller-visible block
3143 */
3144 hdr = ((CVmVarHeapHybrid_hdr *)mem) - 1;
3145
3146 /*
3147 * read the header to get the block manager that originally
3148 * allocated the memory, and ask it to reallocate the memory
3149 */
3150 return hdr->block->realloc(hdr, siz, obj);
3151 }
3152
3153 /*
3154 * free memory
3155 */
free_mem(void * mem)3156 void CVmVarHeapHybrid::free_mem(void *mem)
3157 {
3158 CVmVarHeapHybrid_hdr *hdr;
3159
3160 /*
3161 * get the block header, which immediately precedes the
3162 * caller-visible block
3163 */
3164 hdr = ((CVmVarHeapHybrid_hdr *)mem) - 1;
3165
3166 /*
3167 * read the header to get the block manager that originally
3168 * allocated the memory, and ask it to free the memory
3169 */
3170 hdr->block->free(hdr);
3171 }
3172
3173