1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2  * This source file is part of SableVM.                            *
3  *                                                                 *
4  * See the file "LICENSE" for the copyright information and for    *
5  * the terms and conditions for copying, distribution and          *
6  * modification of this source file.                               *
7  * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
8 
9 #if defined (HAS_SYSTEM_CLEAR_CACHE)
10 /* A clever hack to avoid time consuming writing of an optimized
11  * cache flush instruction for every architecture in the world.
12  * Unfotunatelly it won't work w/o GCC (GBP) */
13 extern void __clear_cache (char *beg, char *end);
14 #endif
15 
16 /* Beware: Architecture-specific pieces are in *alphabetical* order */
17 #if (( defined (__sparc__) || defined (__ia64__) || defined (__alpha__) \
18     || defined (__i386__) || defined (__powerpc__) || defined (__s390__) \
19     || defined (__hppa__) || defined (__arm__) || defined (__m68k__) \
20     || defined (__mc68000__) || defined (__mips__) || defined (__mipsel__) \
21     || defined (__x86_64__) || defined (_POWER)  \
22      ) && defined (__GNUC__))
23 
24 /*
25 ----------------------------------------------------------------------
26 _svmf_iflush
27 ----------------------------------------------------------------------
28 */
29 
30 /* This function updates the word "*pword" (pointed to by pword) in
31    the INSTRUCTION cache, on processors with separate data and
32    instruction caches.  This a requirement for getting the
33    inline-threading engine to work on such relatively common
34    processors. */
35 
36 svm_static inline void
_svmf_iflush(_svmt_word * pword SVM_UNUSED)37 _svmf_iflush (_svmt_word *pword SVM_UNUSED)
38 {
39 /* *INDENT-OFF* */
40 #if defined (__alpha__)
41 
42   /* 134 is the code for IMB.  I am not sure if after below code we
43      don't need another 44 NOP instructions beggining byte16 aligned
44      (whatever that means) helpful:
45      http://www.atomised.org/docs/XFree86-4.2.1/compiler_8h-source.html
46      I was told that "call_pal %0 imb" costs about 300 CPU cycles and
47      that first memory barrier "mb" is probably not needed. (GBP) */
48 
49   __asm__ __volatile__ ("mb; call_pal %0 #imb"::"i" (134):"memory");
50 
51 #elif defined (__hppa__)
52 
53   __asm__ __volatile__ ("fdc 0(%0)\n"
54                        "fic 0(%%sr4, %0)\n"
55                        "sync\n"
56                        :: "r" ((unsigned long)pword & ~63) : "memory");
57 
58 #elif defined (__i386__)
59   /* do nothing */
60 #elif defined (__ia64__)
61 
62   /* also based on the above "helpful" address, so I think it's OK. (GBP) */
63   __asm__ __volatile__ ("fc %0;;; sync.i;;; mf;;; srlz.i;;;"::
64 			"r" (pword):"memory");
65 
66 #elif defined (__powerpc__) || defined (_POWER)
67 
68   __asm__ __volatile__ ("dcbst 0,%0\n\t"
69 			"sync\n\t"
70 			"icbi 0,%0\n\t"
71 			"isync" : : "r" (pword));
72 
73 #elif defined (__sparc__)
74 
75   /* http://www.wcug.wwu.edu/lists/smpdev/199702/msg00023.html
76      http://www.haskell.org/pipermail/glasgow-haskell-bugs/2002-April/002345.html
77      In fact the flush instruction flushes *two* words. It may take up to 5
78      instructions before the flush is visible but it might work w/o it - for
79      SableVM I doubt the flushed code will be exectued before any 5 other
80      assembler intructions? */
81 
82   __asm__ __volatile__ ("flush %0; nop; nop; nop; nop; nop;" : : "r" (pword));
83 
84 /* ARM version:
85   http://cvs.perl.org/cgi/viewcvs.cgi/parrot/jit/arm/jit_emit.h?rev=1.18
86 */
87 
88 #elif defined (_SABLEVM_INLINED_THREADED_INTERPRETER)
89 #if defined (HAS_SYSTEM_CLEAR_CACHE)
90   __clear_cache((char *) pword, (char *) (pword + 1));
91 #else
92 #error "SableVM is not prepared to run the inlined threaded interpreter on this kind of a system or __clear_cache is not available."
93 #endif
94 #endif
95 /* *INDENT-ON* */
96 }
97 
98 /*
99 ----------------------------------------------------------------------
100 _svmh_compare_and_swap
101 ----------------------------------------------------------------------
102 */
103 
104 #define _svmm_compare_and_swap(word, old_value, new_value) \
105 _svmh_compare_and_swap (&word, old_value, new_value)
106 
107 /* this function ATOMICALLY does the following:
108 
109    if (*pword == old_value)
110      {
111        *pword = new_value;
112        return 1;
113      }
114    else
115      {
116        return 0;
117      }
118 */
119 
120 svm_static inline jboolean
_svmh_compare_and_swap(volatile _svmt_word * pword,_svmt_word old_value,_svmt_word new_value)121 _svmh_compare_and_swap (volatile _svmt_word *pword, _svmt_word old_value,
122 			_svmt_word new_value)
123 {
124   /* Yes, some inline assembly source code... Unfortunaltely, this
125      cannot be expressed in C. */
126 
127 #if defined (__alpha__)
128   register _svmt_word result, tmp;
129 
130 /* *INDENT-OFF* */
131   __asm__ __volatile__ ("1:  mb\n\t"			/* make sure */
132 			"    ldq_l      %1,%4\n\t"	/* load *pword into tmp (reg,<= mem) */
133 			"    cmpeq      %1,%5,%0\n\t"	/* result = (*pword == tmp) */
134 			"    beq        %0,3f\n\t"	/* nothing to do if they differ(0) - jump away */
135 			"    mov        %3,%1\n\t"	/* copy tmp<=new so that we didnt lose it */
136 			"    stq_c      %1,%4\n\t"	/* *pword = new_value (reg,=> mem) */
137 			"    beq        %1,2f\n\t"	/* store could fail! (%1 overwritten!) */
138 			"    mb\n\t"			/* make sure */
139 			"    br         3f\n\t"		/* were done */
140 			"2:  br         1b\n\t"		/* goto "again" */
141 			"3:  nop"
142 			:"=&r" (result), "=&r" (tmp), "=m" (*pword)
143 			:"r" (new_value), "m" (*pword), "r" (old_value));
144 /* *INDENT-ON* */
145 
146 #elif defined (__arm__)
147   /* borrowed from glibc so is LGPL - file atomicity.h for arm (GBP) */
148   int result, tmp;
149 /* *INDENT-OFF* */
150 	__asm__ ("\n"
151            "0:\tldr\t%1,[%2]\n\t"
152            "mov\t%0,#0\n\t"
153            "cmp\t%1,%4\n\t"
154            "bne\t1f\n\t"
155            "swp\t%0,%3,[%2]\n\t"
156            "cmp\t%1,%0\n\t"
157            "swpne\t%1,%0,[%2]\n\t"
158            "bne\t0b\n\t"
159            "mov\t%0,#1\n"
160            "1:"
161            : "=&r" (result), "=&r" (tmp)
162            : "r" (pword), "r" (new_value), "r" (old_value)
163            : "cc", "memory");
164 /* *INDENT-ON* */
165 
166 #elif defined (__hppa__)
167 
168   /* The only atomic insn available on hppa is "load and clear";
169    * locks are initialized to 1 on hppa. In this simple implementation
170    * we use a global lock to make compare-and-swap atomic
171    */
172 
173 #define __ldcw(a) ({ \
174     unsigned int __ret; \
175     __asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \
176     __ret; \
177   })
178 
179   static struct
180   {
181     volatile unsigned int __attribute__ ((aligned (16))) lock;
182   } lock;
183 
184   static int lock_initialized = 0;
185   int result;
186 
187   if (!lock_initialized)
188     {
189       lock.lock = 1;
190     }
191 
192   /* spinlock */
193   while (__ldcw (&lock.lock) == 0);
194   if (*pword == old_value)
195     {
196       *pword = new_value;
197       result = 1;
198     }
199   else
200     {
201       result = 0;
202     }
203   /* unlock */
204   lock.lock = 1;
205   /* Prevent reordering */
206   __asm__ __volatile__ ("":::"memory");
207 
208 #elif defined (__i386__)
209 
210   /* On the ia32, cmpxchgl has a side effect.  When swapping fails,
211      the following variable contains the value that is currently in
212      *pword (presumably different from old_value). */
213 
214   _svmt_word current_value;
215   _svmt_u8 result;
216 
217 /* *INDENT-OFF* */
218   __asm__ __volatile__ ("lock\n\t"
219 			"cmpxchgl %3, %1\n\t"
220 			"sete %0"
221 			:"=q" (result), "=m" (*pword), "=a" (current_value)
222 			:"r" (new_value), "m" (*pword), "a" (old_value)
223 			:"memory");
224 /* *INDENT-ON* */
225 
226 #elif defined (__ia64__)
227 
228 #include <ia64intrin.h>
229 
230   jboolean result;
231   result = __sync_bool_compare_and_swap (pword, old_value, new_value);
232 
233 #elif defined (__m68k__) || defined (__mc68000__)
234 
235   /* borrowed from glibc so is LGPL - file atomicity.h for m68k (GBP) */
236 
237   char result;
238   long int readval;
239 
240 /* *INDENT-OFF* */
241 
242   __asm__ __volatile__ ("cas%.l %2,%3,%1; seq %0"
243                         : "=dm" (result), "=m" (*pword), "=d" (readval)
244                         : "d" (new_value), "m" (*pword), "2" (old_value));
245 /* *INDENT-ON* */
246 
247 #elif defined (__mips__) || defined (__mipsel__)
248 
249   /* borrowed from glibc so is LGPL - file atomicity.h for mips (GBP) */
250 
251   long int result, temp;
252 
253 /* *INDENT-OFF* */
254   __asm__ __volatile__
255     ("/* Inline compare & swap */\n"
256      "1:\n\t"
257      ".set      push\n\t"
258      ".set      mips2\n\t"
259      "ll        %1,%5\n\t"
260      "move      %0,$0\n\t"
261      "bne       %1,%3,2f\n\t"
262      "move      %0,%4\n\t"
263      "sc        %0,%2\n\t"
264      ".set      pop\n\t"
265      "beqz      %0,1b\n"
266      "2:\n\t"
267      "/* End compare & swap */"
268      : "=&r" (result), "=&r" (temp), "=m" (*pword)
269      : "r" (old_value), "r" (new_value), "m" (*pword)
270      : "memory");
271 /* *INDENT-ON* */
272 
273 #elif defined (__powerpc__)
274 
275   int result;
276 
277   /*
278    * Adapted from the glibc-linuxthreads, file pt-machine.h
279    */
280 
281 /* *INDENT-OFF* */
282   __asm__ __volatile__ ("0:    lwarx %0,0,%1\n\t"
283 			"      xor. %0,%3,%0\n\t"
284 			"      bne 1f\n\t"
285 			"      stwcx. %2,0,%1\n\t"
286 			"      bne- 0b\n\t"
287 			"1:    ":"=&r" (result):"r" (pword), "r" (new_value),
288 			"r" (old_value):"cr0", "memory");
289   /* This version of __compare_and_swap is to be used when acquiring
290      a lock, so we don't need to worry about whether other memory
291      operations have completed, but we do need to be sure that any loads
292      after this point really occur after we have acquired the lock.  */
293   __asm__ __volatile__ ("isync" : : : "memory");
294 /* *INDENT-ON* */
295 
296   /* flip the value of result */
297   result = !result;
298 
299 #elif defined (_POWER)
300 
301   int result;
302 
303   /*
304    * Adapted from the glibc-linuxthreads, file pt-machine.h
305    */
306 
307 /* *INDENT-OFF* */
308   __asm__ __volatile__ ("SVM0:    lwarx %0,0,%1\n\t"
309 			"      xor. %0,%3,%0\n\t"
310 			"      bne SVM1\n\t"
311 			"      stwcx. %2,0,%1\n\t"
312 			"      bne- SVM0\n\t"
313 			"SVM1:    ":"=&r" (result):"r" (pword), "r" (new_value),
314 			"r" (old_value):"cr0", "memory");
315   /* This version of __compare_and_swap is to be used when acquiring
316      a lock, so we don't need to worry about whether other memory
317      operations have completed, but we do need to be sure that any loads
318      after this point really occur after we have acquired the lock.  */
319   __asm__ __volatile__ ("isync" : : : "memory");
320 /* *INDENT-ON* */
321 
322   /* flip the value of result */
323   result = !result;
324 
325 #elif defined (__s390__)
326 
327   /* borrowed from glibc so is LGPL - file atomicity.h for s390-32 (GBP) */
328 
329   int result;
330 
331 /* *INDENT-OFF* */
332   __asm__ __volatile__ ("  la   1,%1\n"
333                         "  lr   0,%2\n"
334                         "  cs   0,%3,0(1)\n"
335                         "  ipm  %0\n"
336                         "  srl  %0,28\n"
337                         "0:"
338                         : "=&r" (result), "+m" (*pword)
339                         : "d" (old_value) , "d" (new_value)
340                         : "memory", "0", "1", "cc");
341 /* *INDENT-ON* */
342 
343   result = !result;
344 
345 #elif defined (__sparc__)
346 
347 /* Unfortunatelly we have to use the simplest CPU instruction set if
348  * we don't want to see errors like:
349  * ... (Requires v9|v9a|v9b; requested architecture is sparclite.)
350  * There may be some filed for improvements here.
351  * Code borrowed from glibc atomicity.h for sparc32 (GBP) */
352 
353   register long int result, tmp2;
354 
355 /* *INDENT-OFF* */
356   static unsigned char lock;
357   __asm__ __volatile__ ("1:      ldstub  [%1], %0\n\t"
358                         "        cmp     %0, 0\n\t"
359                         "        bne     1b\n\t"
360                         "         nop"
361                         : "=&r" (tmp2)
362                         : "r" (&lock)
363                         : "memory");
364   if (*pword != old_value)
365      result = 0;
366   else {
367      *pword = new_value;
368      result = 1;
369   }
370   __asm__ __volatile__ ("stb     %%g0, [%0]"
371                         : /* no outputs */
372                         : "r" (&lock)
373                         : "memory");
374 /* *INDENT-ON* */
375 
376 #elif defined (__x86_64__)
377 
378   _svmt_word current_value;
379   _svmt_u8 result;
380 
381 /* *INDENT-OFF* */
382   __asm__ __volatile__ ("lock\n\t"
383                         "cmpxchgq %3, %1\n\t"
384                         "sete %0"
385                         :"=q" (result), "=m" (*pword), "=a" (current_value)
386                         :"r" (new_value), "m" (*pword), "a" (old_value)
387                         :"memory");
388 /* *INDENT-ON* */
389 
390 #else
391 #error "SableVM is not prepared to run on this kind of a system; no atomic compare&swap defined."
392 #endif
393 
394   return result ? JNI_TRUE : JNI_FALSE;
395 }
396 
397 /*
398 ----------------------------------------------------------------------
399 _svmf_calloc
400 ----------------------------------------------------------------------
401 */
402 
403 /* must return aligned memory */
404 
405 static void *
_svmf_calloc(size_t nmemb,size_t size)406 _svmf_calloc (size_t nmemb, size_t size)
407 {
408   return calloc (nmemb, size);
409 }
410 
411 /*
412 ----------------------------------------------------------------------
413 _svmf_realloc
414 ----------------------------------------------------------------------
415 */
416 
417 /* must return aligned memory */
418 
419 static void *
_svmf_realloc(void * ptr,size_t size)420 _svmf_realloc (void *ptr, size_t size)
421 {
422   return realloc (ptr, size);
423 }
424 
425 /*
426 ----------------------------------------------------------------------
427 _svmf_malloc
428 ----------------------------------------------------------------------
429 */
430 
431 /* must return aligned memory */
432 
433 static void *
_svmf_malloc(size_t size)434 _svmf_malloc (size_t size)
435 {
436   return malloc (size);
437 }
438 
439 /*
440 ----------------------------------------------------------------------
441 _svmf_free
442 ----------------------------------------------------------------------
443 */
444 
445 static void
_svmf_free(void * ptr)446 _svmf_free (void *ptr)
447 {
448   free (ptr);
449 }
450 
451 /*
452 ----------------------------------------------------------------------
453 _svmf_prepare_class_lockword
454 ----------------------------------------------------------------------
455 */
456 
457 /* lockword layout (high to low bits):
458      1  : fat(1) / thin(0)
459      15 :
460        thin:
461          10 : thread ID (min value = 0x0001, max value = 0x03ff)
462 	      (value = 0 means that the object is unlocked)
463          5  : recursion count (min value = 0x000, max value = 0x01f)
464        fat:
465          15 : lock ID (min value = 0x00000, max value = 0x07fff)
466 
467 #if defined (_SABLEVM_BIDIRECTIONAL_OBJECT_LAYOUT)
468 
469      6 : start offset div alignment (overflow value = 0x03f)
470      6 : end offset div alignment (overflow value = 0x03f) [add 1 if hash_moved]
471      2 : no-hash(0x00) / hash-notmoved (0x01) / hash-moved (0x02)
472      1 : non-array(0x0)
473      1 : always 1 (to deferentiate header word from ptr)
474 
475 #elif defined(_SABLEVM_TRADITIONAL_OBJECT_LAYOUT)
476 
477      1 : unused (0)
478      3 : end offset div alignment [add 1 if hash_moved]
479      7 : reference fields bit layout
480      1 : info in header (0) / overflow (1)
481      2 : no-hash(0x00) / hash-notmoved (0x01) / hash-moved (0x02)
482      1 : non-array(0x0)
483      1 : always 1 (to deferentiate header word from ptr)
484 
485 So, if a class type has a maximum of 12 words of fields, the layout of
486 reference fields is embedded in the object header.
487 
488 #endif
489 
490 */
491 
492 svm_static void
_svmf_prepare_class_lockword(_svmt_class_info * class)493 _svmf_prepare_class_lockword (_svmt_class_info *class)
494 {
495   _svmt_word lockword = 0;
496 
497 #if defined (_SABLEVM_BIDIRECTIONAL_OBJECT_LAYOUT)
498   _svmt_word start = class->data.noninterface.start_offset / SVM_ALIGNMENT;
499   _svmt_word end =
500     (class->data.noninterface.next_offset_no_hashcode - _svmf_aligned_size_t
501      (sizeof (_svmt_object_instance))) / SVM_ALIGNMENT;
502 
503   if (start > SVM_LOCKWORD_START_OVERFLOW)
504     {
505       start = SVM_LOCKWORD_START_OVERFLOW;
506     }
507 
508   if (end > SVM_LOCKWORD_END_OVERFLOW)
509     {
510       end = SVM_LOCKWORD_END_OVERFLOW;
511     }
512 
513   lockword = start << 10;
514   lockword |= end << 4;
515   lockword |= 0x01;
516 
517 #elif defined(_SABLEVM_TRADITIONAL_OBJECT_LAYOUT)
518 
519   _svmt_word end =
520     (class->data.noninterface.next_offset_no_hashcode - _svmf_aligned_size_t
521      (sizeof (_svmt_object_instance))) / sizeof (void *);
522   _svmt_word offsets = 0;
523   const size_t head_size =
524     _svmf_aligned_size_t (sizeof (_svmt_object_instance));
525   jint i;
526   jint ref_field_count = class->data.noninterface.ref_field_count;
527   size_t *ref_field_offsets = class->data.noninterface.ref_field_offsets;
528   jboolean overflow = JNI_FALSE;
529 
530   if (end > 7)
531     {
532       overflow = JNI_TRUE;
533     }
534   else
535     {
536       for (i = 0; i < ref_field_count; i++)
537 	{
538 	  size_t bit = (ref_field_offsets[i] - head_size) / sizeof (void *);
539 
540 	  assert (bit < 7);
541 
542 	  offsets |= ((_svmt_word) 1) << bit;
543 	}
544     }
545 
546   if (overflow)
547     {
548       lockword = 1 << 4;
549     }
550   else
551     {
552       lockword = end << 12;
553       lockword |= offsets << 5;
554     }
555 
556   lockword |= 0x01;
557 
558 #endif /* defined (_SABLEVM_BIDIRECTIONAL_OBJECT_LAYOUT) */
559 
560   assert (!_svmf_is_set_flag (class->access_flags, SVM_ACC_INTERFACE));
561 
562   class->data.noninterface.initial_lockword = lockword;
563 }
564 
565 /*
566 ----------------------------------------------------------------------
567 _svmf_prepare_array_lockword
568 ----------------------------------------------------------------------
569 */
570 
571 /* lockword layout (high to low bits):
572      1  : fat(1) / thin(0)
573      15 :
574        thin:
575          10 : thread ID
576          5  : recursion count
577        fat:
578          15 : lock ID
579 
580 #if defined (_SABLEVM_BIDIRECTIONAL_OBJECT_LAYOUT)
581 
582     10 : unused (0)
583      4 : SVM_TYPE_XXX (base type)
584      1 : array(0x1)
585      1 : always 1 (to deferentiate header word from ptr)
586 
587 #elif defined(_SABLEVM_TRADITIONAL_OBJECT_LAYOUT)
588 
589     10 : unused (0)
590      4 : SVM_TYPE_XXX (base type)
591      1 : array(0x1)
592      1 : always 1 (to deferentiate header word from ptr)
593 
594 #endif
595 
596 */
597 
598 svm_static void
_svmf_prepare_array_lockword(_svmt_array_info * array)599 _svmf_prepare_array_lockword (_svmt_array_info *array)
600 {
601   _svmt_word lockword = 0;
602   _svmt_word type =
603     (array->dimensions == 1) ? array->base_type : SVM_TYPE_REFERENCE;
604 
605 #if defined (_SABLEVM_BIDIRECTIONAL_OBJECT_LAYOUT)
606 
607   lockword |= type << 2;
608   lockword |= 0x01 << 1;
609   lockword |= 0x01;
610 
611 #elif defined(_SABLEVM_TRADITIONAL_OBJECT_LAYOUT)
612 
613   lockword |= type << 2;
614   lockword |= 0x01 << 1;
615   lockword |= 0x01;
616 
617 #endif /* defined (_SABLEVM_BIDIRECTIONAL_OBJECT_LAYOUT) */
618 
619   array->initial_lockword = lockword;
620 }
621 
622 /*
623 ----------------------------------------------------------------------
624 _svmf_initialize_thinlock_id
625 ----------------------------------------------------------------------
626 */
627 
628 static inline void
_svmf_initialize_thinlock_id(_svmt_JNIEnv * env)629 _svmf_initialize_thinlock_id (_svmt_JNIEnv *env)
630 {
631   assert (env->thread.id < 0x03ff);
632 
633   env->thread.thinlock_id = env->thread.id << 21;
634 }
635 
636 /*
637 ----------------------------------------------------------------------
638 _svmf_lockword_get_extra_bits
639 ----------------------------------------------------------------------
640 */
641 
642 inline static _svmt_word
_svmf_lockword_get_extra_bits(_svmt_word lockword)643 _svmf_lockword_get_extra_bits (_svmt_word lockword)
644 {
645   return lockword & 0x0ffff;
646 }
647 
648 /*
649 ----------------------------------------------------------------------
650 _svmf_lockword_is_thin
651 ----------------------------------------------------------------------
652 */
653 
654 inline static jboolean
_svmf_lockword_is_thin(_svmt_word lockword)655 _svmf_lockword_is_thin (_svmt_word lockword)
656 {
657   return (lockword & 0x80000000) == 0;
658 }
659 
660 /*
661 ----------------------------------------------------------------------
662 _svmf_lockword_is_forward_reference
663 ----------------------------------------------------------------------
664 */
665 
666 inline static jboolean
_svmf_lockword_is_forward_reference(_svmt_word lockword)667 _svmf_lockword_is_forward_reference (_svmt_word lockword)
668 {
669 #if defined (_SABLEVM_BIDIRECTIONAL_OBJECT_LAYOUT)
670 
671   return (lockword & 0x01) == 0;
672 
673 #elif defined(_SABLEVM_TRADITIONAL_OBJECT_LAYOUT)
674 
675   return (lockword & 0x01) == 0;
676 
677 #endif
678 }
679 
680 /*
681 ----------------------------------------------------------------------
682 _svmf_lockword_get_thinlock_id
683 ----------------------------------------------------------------------
684 */
685 
686 inline static _svmt_word
_svmf_lockword_get_thinlock_id(_svmt_word lockword)687 _svmf_lockword_get_thinlock_id (_svmt_word lockword)
688 {
689   return lockword & 0x7fe00000;
690 }
691 
692 /*
693 ----------------------------------------------------------------------
694 _svmf_lockword_get_thread_id
695 ----------------------------------------------------------------------
696 */
697 
698 inline static jint
_svmf_lockword_get_thread_id(_svmt_word lockword)699 _svmf_lockword_get_thread_id (_svmt_word lockword)
700 {
701   return (lockword & 0x7fe00000) >> 21;
702 }
703 
704 /*
705 ----------------------------------------------------------------------
706 _svmf_lockword_get_fatlockindex
707 ----------------------------------------------------------------------
708 */
709 
710 inline static jint
_svmf_lockword_get_fatlock_index(_svmt_word lockword)711 _svmf_lockword_get_fatlock_index (_svmt_word lockword)
712 {
713   return (lockword & 0x7fff0000) >> 16;
714 }
715 
716 /*
717 ----------------------------------------------------------------------
718 _svmf_lockword_get_recursive_count
719 ----------------------------------------------------------------------
720 */
721 
722 inline static jint
_svmf_lockword_get_thinlock_recursive_count(_svmt_word lockword)723 _svmf_lockword_get_thinlock_recursive_count (_svmt_word lockword)
724 {
725   return (lockword & 0x001f0000) >> 16;
726 }
727 
728 /*
729 ----------------------------------------------------------------------
730 _svmf_lockword_thinlock
731 ----------------------------------------------------------------------
732 */
733 
734 inline static _svmt_word
_svmf_lockword_thinlock(_svmt_word thinlock_id,jint recursive_count,_svmt_word extra_bits)735 _svmf_lockword_thinlock (_svmt_word thinlock_id, jint recursive_count,
736 			 _svmt_word extra_bits)
737 {
738   return thinlock_id | (((_svmt_word) recursive_count) << 16) | extra_bits;
739 }
740 
741 /*
742 ----------------------------------------------------------------------
743 _svmf_lockword_fatlock
744 ----------------------------------------------------------------------
745 */
746 
747 inline static _svmt_word
_svmf_lockword_fatlock(_svmt_word fatlock_id,_svmt_word extra_bits)748 _svmf_lockword_fatlock (_svmt_word fatlock_id, _svmt_word extra_bits)
749 {
750   return (1 << 31) | (fatlock_id << 16) | extra_bits;
751 }
752 
753 /*
754 ----------------------------------------------------------------------
755 _svmf_lockword_is_array
756 ----------------------------------------------------------------------
757 */
758 
759 inline static jboolean
_svmf_lockword_is_array(_svmt_word lockword)760 _svmf_lockword_is_array (_svmt_word lockword)
761 {
762 #if defined (_SABLEVM_BIDIRECTIONAL_OBJECT_LAYOUT)
763 
764   return (lockword >> 1) & 0x01;
765 
766 #elif defined(_SABLEVM_TRADITIONAL_OBJECT_LAYOUT)
767 
768   return (lockword >> 1) & 0x01;
769 
770 #endif
771 }
772 
773 /*
774 ----------------------------------------------------------------------
775 _svmf_lockword_get_array_type
776 ----------------------------------------------------------------------
777 */
778 
779 inline static _svmt_word
_svmf_lockword_get_array_type(_svmt_word lockword)780 _svmf_lockword_get_array_type (_svmt_word lockword)
781 {
782   assert (_svmf_lockword_is_array (lockword));
783 
784 #if defined (_SABLEVM_BIDIRECTIONAL_OBJECT_LAYOUT)
785 
786   return (lockword >> 2) & 0x0f;
787 
788 #elif defined(_SABLEVM_TRADITIONAL_OBJECT_LAYOUT)
789 
790   return (lockword >> 2) & 0x0f;
791 
792 #endif
793 }
794 
795 /*
796 ----------------------------------------------------------------------
797 _svmf_lockword_get_hashstate
798 ----------------------------------------------------------------------
799 */
800 
801 inline static _svmt_word
_svmf_lockword_get_hashstate(_svmt_word lockword)802 _svmf_lockword_get_hashstate (_svmt_word lockword)
803 {
804   assert (!_svmf_lockword_is_array (lockword));
805 
806 #if defined (_SABLEVM_BIDIRECTIONAL_OBJECT_LAYOUT)
807 
808   return (lockword >> 2) & 0x03;
809 
810 #elif defined(_SABLEVM_TRADITIONAL_OBJECT_LAYOUT)
811 
812   return (lockword >> 2) & 0x03;
813 
814 #endif
815 }
816 
817 /*
818 ----------------------------------------------------------------------
819 _svmm_lockword_set_hashstate
820 ----------------------------------------------------------------------
821 */
822 
823 #define _svmm_lockword_set_hashstate(lockword, state) \
824 _svmh_lockword_set_hashstate (&lockword, state)
825 
826 inline static void
_svmh_lockword_set_hashstate(_svmt_word * plockword,_svmt_word state)827 _svmh_lockword_set_hashstate (_svmt_word *plockword, _svmt_word state)
828 {
829 #if defined (_SABLEVM_BIDIRECTIONAL_OBJECT_LAYOUT)
830 
831   *plockword = (*plockword & 0xFFFFFFF3) | (state << 2);
832 
833 #elif defined(_SABLEVM_TRADITIONAL_OBJECT_LAYOUT)
834 
835   *plockword = (*plockword & 0xFFFFFFF3) | (state << 2);
836 
837 #endif
838 }
839 
840 /*
841 ----------------------------------------------------------------------
842 _svmf_lockword_object_get_end_offset
843 ----------------------------------------------------------------------
844 */
845 
846 inline static size_t
_svmf_lockword_object_get_end_offset(_svmt_word lockword)847 _svmf_lockword_object_get_end_offset (_svmt_word lockword)
848 {
849   assert (!_svmf_lockword_is_array (lockword));
850 
851 #if defined (_SABLEVM_BIDIRECTIONAL_OBJECT_LAYOUT)
852 
853   return (((lockword >> 4) & 0x03f) * (size_t) SVM_ALIGNMENT) +
854     _svmf_aligned_size_t (sizeof (_svmt_object_instance));
855 
856 #elif defined (_SABLEVM_TRADITIONAL_OBJECT_LAYOUT)
857 
858   return (((lockword >> 12) & 0x07) * (size_t) SVM_ALIGNMENT) +
859     _svmf_aligned_size_t (sizeof (_svmt_object_instance));
860 
861 #endif
862 }
863 
864 /*
865 ----------------------------------------------------------------------
866 _svmf_lockword_object_get_start_offset
867 ----------------------------------------------------------------------
868 */
869 
870 #if defined (_SABLEVM_BIDIRECTIONAL_OBJECT_LAYOUT)
871 
872 inline static size_t
_svmf_lockword_object_get_start_offset(_svmt_word lockword)873 _svmf_lockword_object_get_start_offset (_svmt_word lockword)
874 {
875   assert (!_svmf_lockword_is_array (lockword));
876 
877   return (((lockword >> 10) & 0x03f) * (size_t) SVM_ALIGNMENT);
878 }
879 
880 #endif
881 
882 /*
883 ----------------------------------------------------------------------
884 _svmf_lockword_object_is_info_in_header
885 ----------------------------------------------------------------------
886 */
887 
888 #if defined (_SABLEVM_TRADITIONAL_OBJECT_LAYOUT)
889 
890 inline static jboolean
_svmf_lockword_object_is_info_in_header(_svmt_word lockword)891 _svmf_lockword_object_is_info_in_header (_svmt_word lockword)
892 {
893   assert (!_svmf_lockword_is_array (lockword));
894 
895   return ((lockword >> 4) & 0x01) == 0;
896 }
897 
898 #endif
899 
900 /*
901 ----------------------------------------------------------------------
902 _svmf_lockword_object_get_ref_layout
903 ----------------------------------------------------------------------
904 */
905 
906 #if defined (_SABLEVM_TRADITIONAL_OBJECT_LAYOUT)
907 
908 inline static _svmt_word
_svmf_lockword_object_get_ref_layout(_svmt_word lockword)909 _svmf_lockword_object_get_ref_layout (_svmt_word lockword)
910 {
911   assert (!_svmf_lockword_is_array (lockword));
912 
913   return (lockword >> 5) & 0x07f;
914 }
915 
916 #endif
917 
918 /*
919 ----------------------------------------------------------------------
920 _svmf_word_is_reference
921 ----------------------------------------------------------------------
922 */
923 
924 inline static jboolean
_svmf_word_is_reference(_svmt_word word)925 _svmf_word_is_reference (_svmt_word word)
926 {
927 #if defined (_SABLEVM_BIDIRECTIONAL_OBJECT_LAYOUT)
928 
929   return (word & 0x01) == 0;
930 
931 #elif defined(_SABLEVM_TRADITIONAL_OBJECT_LAYOUT)
932 
933   return (word & 0x01) == 0;
934 
935 #endif
936 }
937 
938 #endif /* (defined (__i386__) && defined (__GNUC__)) */
939