1 /*
2  * Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "gc/shared/barrierSetAssembler.hpp"
28 #include "interpreter/interp_masm.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "interpreter/interpreterRuntime.hpp"
31 #include "interpreter/templateTable.hpp"
32 #include "memory/universe.hpp"
33 #include "oops/cpCache.hpp"
34 #include "oops/klass.inline.hpp"
35 #include "oops/methodData.hpp"
36 #include "oops/objArrayKlass.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "prims/jvmtiExport.hpp"
39 #include "prims/methodHandles.hpp"
40 #include "runtime/frame.inline.hpp"
41 #include "runtime/sharedRuntime.hpp"
42 #include "runtime/stubRoutines.hpp"
43 #include "runtime/synchronizer.hpp"
44 #include "utilities/powerOfTwo.hpp"
45 
46 #define __ _masm->
47 
48 //----------------------------------------------------------------------------------------------------
49 // Address computation
50 
51 // local variables
iaddress(int n)52 static inline Address iaddress(int n)            {
53   return Address(Rlocals, Interpreter::local_offset_in_bytes(n));
54 }
55 
laddress(int n)56 static inline Address laddress(int n)            { return iaddress(n + 1); }
haddress(int n)57 static inline Address haddress(int n)            { return iaddress(n + 0); }
58 
faddress(int n)59 static inline Address faddress(int n)            { return iaddress(n); }
daddress(int n)60 static inline Address daddress(int n)            { return laddress(n); }
aaddress(int n)61 static inline Address aaddress(int n)            { return iaddress(n); }
62 
63 
get_local_base_addr(Register r,Register index)64 void TemplateTable::get_local_base_addr(Register r, Register index) {
65   __ sub(r, Rlocals, AsmOperand(index, lsl, Interpreter::logStackElementSize));
66 }
67 
load_iaddress(Register index,Register scratch)68 Address TemplateTable::load_iaddress(Register index, Register scratch) {
69   return Address(Rlocals, index, lsl, Interpreter::logStackElementSize, basic_offset, sub_offset);
70 }
71 
load_aaddress(Register index,Register scratch)72 Address TemplateTable::load_aaddress(Register index, Register scratch) {
73   return load_iaddress(index, scratch);
74 }
75 
load_faddress(Register index,Register scratch)76 Address TemplateTable::load_faddress(Register index, Register scratch) {
77 #ifdef __SOFTFP__
78   return load_iaddress(index, scratch);
79 #else
80   get_local_base_addr(scratch, index);
81   return Address(scratch);
82 #endif // __SOFTFP__
83 }
84 
load_daddress(Register index,Register scratch)85 Address TemplateTable::load_daddress(Register index, Register scratch) {
86   get_local_base_addr(scratch, index);
87   return Address(scratch, Interpreter::local_offset_in_bytes(1));
88 }
89 
90 // At top of Java expression stack which may be different than SP.
91 // It isn't for category 1 objects.
at_tos()92 static inline Address at_tos() {
93   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(0));
94 }
95 
at_tos_p1()96 static inline Address at_tos_p1() {
97   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(1));
98 }
99 
at_tos_p2()100 static inline Address at_tos_p2() {
101   return Address(Rstack_top, Interpreter::expr_offset_in_bytes(2));
102 }
103 
104 
105 // Loads double/long local into R0_tos_lo/R1_tos_hi with two
106 // separate ldr instructions (supports nonadjacent values).
107 // Used for longs in all modes, and for doubles in SOFTFP mode.
load_category2_local(Register Rlocal_index,Register tmp)108 void TemplateTable::load_category2_local(Register Rlocal_index, Register tmp) {
109   const Register Rlocal_base = tmp;
110   assert_different_registers(Rlocal_index, tmp);
111 
112   get_local_base_addr(Rlocal_base, Rlocal_index);
113   __ ldr(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
114   __ ldr(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
115 }
116 
117 
118 // Stores R0_tos_lo/R1_tos_hi to double/long local with two
119 // separate str instructions (supports nonadjacent values).
120 // Used for longs in all modes, and for doubles in SOFTFP mode
store_category2_local(Register Rlocal_index,Register tmp)121 void TemplateTable::store_category2_local(Register Rlocal_index, Register tmp) {
122   const Register Rlocal_base = tmp;
123   assert_different_registers(Rlocal_index, tmp);
124 
125   get_local_base_addr(Rlocal_base, Rlocal_index);
126   __ str(R0_tos_lo, Address(Rlocal_base, Interpreter::local_offset_in_bytes(1)));
127   __ str(R1_tos_hi, Address(Rlocal_base, Interpreter::local_offset_in_bytes(0)));
128 }
129 
130 // Returns address of Java array element using temp register as address base.
get_array_elem_addr(BasicType elemType,Register array,Register index,Register temp)131 Address TemplateTable::get_array_elem_addr(BasicType elemType, Register array, Register index, Register temp) {
132   int logElemSize = exact_log2(type2aelembytes(elemType));
133   __ add_ptr_scaled_int32(temp, array, index, logElemSize);
134   return Address(temp, arrayOopDesc::base_offset_in_bytes(elemType));
135 }
136 
137 // Returns address of Java array element using temp register as offset from array base
get_array_elem_addr_same_base(BasicType elemType,Register array,Register index,Register temp)138 Address TemplateTable::get_array_elem_addr_same_base(BasicType elemType, Register array, Register index, Register temp) {
139   int logElemSize = exact_log2(type2aelembytes(elemType));
140   if (logElemSize == 0) {
141     __ add(temp, index, arrayOopDesc::base_offset_in_bytes(elemType));
142   } else {
143     __ mov(temp, arrayOopDesc::base_offset_in_bytes(elemType));
144     __ add_ptr_scaled_int32(temp, temp, index, logElemSize);
145   }
146   return Address(array, temp);
147 }
148 
149 //----------------------------------------------------------------------------------------------------
150 // Condition conversion
convNegCond(TemplateTable::Condition cc)151 AsmCondition convNegCond(TemplateTable::Condition cc) {
152   switch (cc) {
153     case TemplateTable::equal        : return ne;
154     case TemplateTable::not_equal    : return eq;
155     case TemplateTable::less         : return ge;
156     case TemplateTable::less_equal   : return gt;
157     case TemplateTable::greater      : return le;
158     case TemplateTable::greater_equal: return lt;
159   }
160   ShouldNotReachHere();
161   return nv;
162 }
163 
164 //----------------------------------------------------------------------------------------------------
165 // Miscelaneous helper routines
166 
167 // Store an oop (or NULL) at the address described by obj.
168 // Blows all volatile registers R0-R3, Rtemp, LR).
169 // Also destroys new_val and obj.base().
do_oop_store(InterpreterMacroAssembler * _masm,Address obj,Register new_val,Register tmp1,Register tmp2,Register tmp3,bool is_null,DecoratorSet decorators=0)170 static void do_oop_store(InterpreterMacroAssembler* _masm,
171                          Address obj,
172                          Register new_val,
173                          Register tmp1,
174                          Register tmp2,
175                          Register tmp3,
176                          bool is_null,
177                          DecoratorSet decorators = 0) {
178 
179   assert_different_registers(obj.base(), new_val, tmp1, tmp2, tmp3, noreg);
180   if (is_null) {
181     __ store_heap_oop_null(obj, new_val, tmp1, tmp2, tmp3, decorators);
182   } else {
183     __ store_heap_oop(obj, new_val, tmp1, tmp2, tmp3, decorators);
184   }
185 }
186 
do_oop_load(InterpreterMacroAssembler * _masm,Register dst,Address obj,DecoratorSet decorators=0)187 static void do_oop_load(InterpreterMacroAssembler* _masm,
188                         Register dst,
189                         Address obj,
190                         DecoratorSet decorators = 0) {
191   __ load_heap_oop(dst, obj, noreg, noreg, noreg, decorators);
192 }
193 
at_bcp(int offset)194 Address TemplateTable::at_bcp(int offset) {
195   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
196   return Address(Rbcp, offset);
197 }
198 
199 
200 // Blows volatile registers R0-R3, Rtemp, LR.
patch_bytecode(Bytecodes::Code bc,Register bc_reg,Register temp_reg,bool load_bc_into_bc_reg,int byte_no)201 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
202                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
203                                    int byte_no) {
204   assert_different_registers(bc_reg, temp_reg);
205   if (!RewriteBytecodes)  return;
206   Label L_patch_done;
207 
208   switch (bc) {
209   case Bytecodes::_fast_aputfield:
210   case Bytecodes::_fast_bputfield:
211   case Bytecodes::_fast_zputfield:
212   case Bytecodes::_fast_cputfield:
213   case Bytecodes::_fast_dputfield:
214   case Bytecodes::_fast_fputfield:
215   case Bytecodes::_fast_iputfield:
216   case Bytecodes::_fast_lputfield:
217   case Bytecodes::_fast_sputfield:
218     {
219       // We skip bytecode quickening for putfield instructions when
220       // the put_code written to the constant pool cache is zero.
221       // This is required so that every execution of this instruction
222       // calls out to InterpreterRuntime::resolve_get_put to do
223       // additional, required work.
224       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
225       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
226       __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1, sizeof(u2));
227       __ mov(bc_reg, bc);
228       __ cbz(temp_reg, L_patch_done);  // test if bytecode is zero
229     }
230     break;
231   default:
232     assert(byte_no == -1, "sanity");
233     // the pair bytecodes have already done the load.
234     if (load_bc_into_bc_reg) {
235       __ mov(bc_reg, bc);
236     }
237   }
238 
239   if (__ can_post_breakpoint()) {
240     Label L_fast_patch;
241     // if a breakpoint is present we can't rewrite the stream directly
242     __ ldrb(temp_reg, at_bcp(0));
243     __ cmp(temp_reg, Bytecodes::_breakpoint);
244     __ b(L_fast_patch, ne);
245     if (bc_reg != R3) {
246       __ mov(R3, bc_reg);
247     }
248     __ mov(R1, Rmethod);
249     __ mov(R2, Rbcp);
250     // Let breakpoint table handling rewrite to quicker bytecode
251     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R1, R2, R3);
252     __ b(L_patch_done);
253     __ bind(L_fast_patch);
254   }
255 
256 #ifdef ASSERT
257   Label L_okay;
258   __ ldrb(temp_reg, at_bcp(0));
259   __ cmp(temp_reg, (int)Bytecodes::java_code(bc));
260   __ b(L_okay, eq);
261   __ cmp(temp_reg, bc_reg);
262   __ b(L_okay, eq);
263   __ stop("patching the wrong bytecode");
264   __ bind(L_okay);
265 #endif
266 
267   // patch bytecode
268   __ strb(bc_reg, at_bcp(0));
269   __ bind(L_patch_done);
270 }
271 
272 //----------------------------------------------------------------------------------------------------
273 // Individual instructions
274 
nop()275 void TemplateTable::nop() {
276   transition(vtos, vtos);
277   // nothing to do
278 }
279 
shouldnotreachhere()280 void TemplateTable::shouldnotreachhere() {
281   transition(vtos, vtos);
282   __ stop("shouldnotreachhere bytecode");
283 }
284 
285 
286 
aconst_null()287 void TemplateTable::aconst_null() {
288   transition(vtos, atos);
289   __ mov(R0_tos, 0);
290 }
291 
292 
iconst(int value)293 void TemplateTable::iconst(int value) {
294   transition(vtos, itos);
295   __ mov_slow(R0_tos, value);
296 }
297 
298 
lconst(int value)299 void TemplateTable::lconst(int value) {
300   transition(vtos, ltos);
301   assert((value == 0) || (value == 1), "unexpected long constant");
302   __ mov(R0_tos, value);
303   __ mov(R1_tos_hi, 0);
304 }
305 
306 
fconst(int value)307 void TemplateTable::fconst(int value) {
308   transition(vtos, ftos);
309   const int zero = 0;         // 0.0f
310   const int one = 0x3f800000; // 1.0f
311   const int two = 0x40000000; // 2.0f
312 
313   switch(value) {
314   case 0:   __ mov(R0_tos, zero);   break;
315   case 1:   __ mov(R0_tos, one);    break;
316   case 2:   __ mov(R0_tos, two);    break;
317   default:  ShouldNotReachHere();   break;
318   }
319 
320 #ifndef __SOFTFP__
321   __ fmsr(S0_tos, R0_tos);
322 #endif // !__SOFTFP__
323 }
324 
325 
dconst(int value)326 void TemplateTable::dconst(int value) {
327   transition(vtos, dtos);
328   const int one_lo = 0;            // low part of 1.0
329   const int one_hi = 0x3ff00000;   // high part of 1.0
330 
331   if (value == 0) {
332 #ifdef __SOFTFP__
333     __ mov(R0_tos_lo, 0);
334     __ mov(R1_tos_hi, 0);
335 #else
336     __ mov(R0_tmp, 0);
337     __ fmdrr(D0_tos, R0_tmp, R0_tmp);
338 #endif // __SOFTFP__
339   } else if (value == 1) {
340     __ mov(R0_tos_lo, one_lo);
341     __ mov_slow(R1_tos_hi, one_hi);
342 #ifndef __SOFTFP__
343     __ fmdrr(D0_tos, R0_tos_lo, R1_tos_hi);
344 #endif // !__SOFTFP__
345   } else {
346     ShouldNotReachHere();
347   }
348 }
349 
350 
bipush()351 void TemplateTable::bipush() {
352   transition(vtos, itos);
353   __ ldrsb(R0_tos, at_bcp(1));
354 }
355 
356 
sipush()357 void TemplateTable::sipush() {
358   transition(vtos, itos);
359   __ ldrsb(R0_tmp, at_bcp(1));
360   __ ldrb(R1_tmp, at_bcp(2));
361   __ orr(R0_tos, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
362 }
363 
364 
ldc(bool wide)365 void TemplateTable::ldc(bool wide) {
366   transition(vtos, vtos);
367   Label fastCase, Condy, Done;
368 
369   const Register Rindex = R1_tmp;
370   const Register Rcpool = R2_tmp;
371   const Register Rtags  = R3_tmp;
372   const Register RtagType = R3_tmp;
373 
374   if (wide) {
375     __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
376   } else {
377     __ ldrb(Rindex, at_bcp(1));
378   }
379   __ get_cpool_and_tags(Rcpool, Rtags);
380 
381   const int base_offset = ConstantPool::header_size() * wordSize;
382   const int tags_offset = Array<u1>::base_offset_in_bytes();
383 
384   // get const type
385   __ add(Rtemp, Rtags, tags_offset);
386   __ ldrb(RtagType, Address(Rtemp, Rindex));
387   volatile_barrier(MacroAssembler::LoadLoad, Rtemp);
388 
389   // unresolved class - get the resolved class
390   __ cmp(RtagType, JVM_CONSTANT_UnresolvedClass);
391 
392   // unresolved class in error (resolution failed) - call into runtime
393   // so that the same error from first resolution attempt is thrown.
394   __ cond_cmp(RtagType, JVM_CONSTANT_UnresolvedClassInError, ne);
395 
396   // resolved class - need to call vm to get java mirror of the class
397   __ cond_cmp(RtagType, JVM_CONSTANT_Class, ne);
398 
399   __ b(fastCase, ne);
400 
401   // slow case - call runtime
402   __ mov(R1, wide);
403   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R1);
404   __ push(atos);
405   __ b(Done);
406 
407   // int, float, String
408   __ bind(fastCase);
409 
410   __ cmp(RtagType, JVM_CONSTANT_Integer);
411   __ cond_cmp(RtagType, JVM_CONSTANT_Float, ne);
412   __ b(Condy, ne);
413 
414   // itos, ftos
415   __ add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
416   __ ldr_u32(R0_tos, Address(Rtemp, base_offset));
417 
418   // floats and ints are placed on stack in the same way, so
419   // we can use push(itos) to transfer float value without VFP
420   __ push(itos);
421   __ b(Done);
422 
423   __ bind(Condy);
424   condy_helper(Done);
425 
426   __ bind(Done);
427 }
428 
429 // Fast path for caching oop constants.
fast_aldc(bool wide)430 void TemplateTable::fast_aldc(bool wide) {
431   transition(vtos, atos);
432   int index_size = wide ? sizeof(u2) : sizeof(u1);
433   Label resolved;
434 
435   // We are resolved if the resolved reference cache entry contains a
436   // non-null object (CallSite, etc.)
437   assert_different_registers(R0_tos, R2_tmp);
438   __ get_index_at_bcp(R2_tmp, 1, R0_tos, index_size);
439   __ load_resolved_reference_at_index(R0_tos, R2_tmp);
440   __ cbnz(R0_tos, resolved);
441 
442   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
443 
444   // first time invocation - must resolve first
445   __ mov(R1, (int)bytecode());
446   __ call_VM(R0_tos, entry, R1);
447   __ bind(resolved);
448 
449   { // Check for the null sentinel.
450     // If we just called the VM, that already did the mapping for us,
451     // but it's harmless to retry.
452     Label notNull;
453     Register result = R0;
454     Register tmp = R1;
455     Register rarg = R2;
456 
457     // Stash null_sentinel address to get its value later
458     __ mov_slow(rarg, (uintptr_t)Universe::the_null_sentinel_addr());
459     __ ldr(tmp, Address(rarg));
460     __ resolve_oop_handle(tmp);
461     __ cmp(result, tmp);
462     __ b(notNull, ne);
463     __ mov(result, 0);  // NULL object reference
464     __ bind(notNull);
465   }
466 
467   if (VerifyOops) {
468     __ verify_oop(R0_tos);
469   }
470 }
471 
ldc2_w()472 void TemplateTable::ldc2_w() {
473   transition(vtos, vtos);
474   const Register Rtags  = R2_tmp;
475   const Register Rindex = R3_tmp;
476   const Register Rcpool = R4_tmp;
477   const Register Rbase  = R5_tmp;
478 
479   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
480 
481   __ get_cpool_and_tags(Rcpool, Rtags);
482   const int base_offset = ConstantPool::header_size() * wordSize;
483   const int tags_offset = Array<u1>::base_offset_in_bytes();
484 
485   __ add(Rbase, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
486 
487   // get type from tags
488   __ add(Rtemp, Rtags, tags_offset);
489   __ ldrb(Rtemp, Address(Rtemp, Rindex));
490 
491   Label Condy, exit;
492 #ifdef __ABI_HARD__
493   Label NotDouble;
494   __ cmp(Rtemp, JVM_CONSTANT_Double);
495   __ b(NotDouble, ne);
496   __ ldr_double(D0_tos, Address(Rbase, base_offset));
497 
498   __ push(dtos);
499   __ b(exit);
500   __ bind(NotDouble);
501 #endif
502 
503   __ cmp(Rtemp, JVM_CONSTANT_Long);
504   __ b(Condy, ne);
505   __ ldr(R0_tos_lo, Address(Rbase, base_offset + 0 * wordSize));
506   __ ldr(R1_tos_hi, Address(Rbase, base_offset + 1 * wordSize));
507   __ push(ltos);
508   __ b(exit);
509 
510   __ bind(Condy);
511   condy_helper(exit);
512 
513   __ bind(exit);
514 }
515 
516 
condy_helper(Label & Done)517 void TemplateTable::condy_helper(Label& Done)
518 {
519   Register obj   = R0_tmp;
520   Register rtmp  = R1_tmp;
521   Register flags = R2_tmp;
522   Register off   = R3_tmp;
523 
524   __ mov(rtmp, (int) bytecode());
525   __ call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rtmp);
526   __ get_vm_result_2(flags, rtmp);
527 
528   // VMr = obj = base address to find primitive value to push
529   // VMr2 = flags = (tos, off) using format of CPCE::_flags
530   __ mov(off, flags);
531 
532   __ logical_shift_left( off, off, 32 - ConstantPoolCacheEntry::field_index_bits);
533   __ logical_shift_right(off, off, 32 - ConstantPoolCacheEntry::field_index_bits);
534 
535   const Address field(obj, off);
536 
537   __ logical_shift_right(flags, flags, ConstantPoolCacheEntry::tos_state_shift);
538   // Make sure we don't need to mask flags after the above shift
539   ConstantPoolCacheEntry::verify_tos_state_shift();
540 
541   switch (bytecode()) {
542     case Bytecodes::_ldc:
543     case Bytecodes::_ldc_w:
544       {
545         // tos in (itos, ftos, stos, btos, ctos, ztos)
546         Label notIntFloat, notShort, notByte, notChar, notBool;
547         __ cmp(flags, itos);
548         __ cond_cmp(flags, ftos, ne);
549         __ b(notIntFloat, ne);
550         __ ldr(R0_tos, field);
551         __ push(itos);
552         __ b(Done);
553 
554         __ bind(notIntFloat);
555         __ cmp(flags, stos);
556         __ b(notShort, ne);
557         __ ldrsh(R0_tos, field);
558         __ push(stos);
559         __ b(Done);
560 
561         __ bind(notShort);
562         __ cmp(flags, btos);
563         __ b(notByte, ne);
564         __ ldrsb(R0_tos, field);
565         __ push(btos);
566         __ b(Done);
567 
568         __ bind(notByte);
569         __ cmp(flags, ctos);
570         __ b(notChar, ne);
571         __ ldrh(R0_tos, field);
572         __ push(ctos);
573         __ b(Done);
574 
575         __ bind(notChar);
576         __ cmp(flags, ztos);
577         __ b(notBool, ne);
578         __ ldrsb(R0_tos, field);
579         __ push(ztos);
580         __ b(Done);
581 
582         __ bind(notBool);
583         break;
584       }
585 
586     case Bytecodes::_ldc2_w:
587       {
588         Label notLongDouble;
589         __ cmp(flags, ltos);
590         __ cond_cmp(flags, dtos, ne);
591         __ b(notLongDouble, ne);
592 
593         __ add(rtmp, obj, wordSize);
594         __ ldr(R0_tos_lo, Address(obj, off));
595         __ ldr(R1_tos_hi, Address(rtmp, off));
596         __ push(ltos);
597         __ b(Done);
598 
599         __ bind(notLongDouble);
600 
601         break;
602       }
603 
604     default:
605       ShouldNotReachHere();
606     }
607 
608     __ stop("bad ldc/condy");
609 }
610 
611 
locals_index(Register reg,int offset)612 void TemplateTable::locals_index(Register reg, int offset) {
613   __ ldrb(reg, at_bcp(offset));
614 }
615 
iload()616 void TemplateTable::iload() {
617   iload_internal();
618 }
619 
nofast_iload()620 void TemplateTable::nofast_iload() {
621   iload_internal(may_not_rewrite);
622 }
623 
iload_internal(RewriteControl rc)624 void TemplateTable::iload_internal(RewriteControl rc) {
625   transition(vtos, itos);
626 
627   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
628     Label rewrite, done;
629     const Register next_bytecode = R1_tmp;
630     const Register target_bytecode = R2_tmp;
631 
632     // get next byte
633     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
634     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
635     // last two iloads in a pair.  Comparing against fast_iload means that
636     // the next bytecode is neither an iload or a caload, and therefore
637     // an iload pair.
638     __ cmp(next_bytecode, Bytecodes::_iload);
639     __ b(done, eq);
640 
641     __ cmp(next_bytecode, Bytecodes::_fast_iload);
642     __ mov(target_bytecode, Bytecodes::_fast_iload2);
643     __ b(rewrite, eq);
644 
645     // if _caload, rewrite to fast_icaload
646     __ cmp(next_bytecode, Bytecodes::_caload);
647     __ mov(target_bytecode, Bytecodes::_fast_icaload);
648     __ b(rewrite, eq);
649 
650     // rewrite so iload doesn't check again.
651     __ mov(target_bytecode, Bytecodes::_fast_iload);
652 
653     // rewrite
654     // R2: fast bytecode
655     __ bind(rewrite);
656     patch_bytecode(Bytecodes::_iload, target_bytecode, Rtemp, false);
657     __ bind(done);
658   }
659 
660   // Get the local value into tos
661   const Register Rlocal_index = R1_tmp;
662   locals_index(Rlocal_index);
663   Address local = load_iaddress(Rlocal_index, Rtemp);
664   __ ldr_s32(R0_tos, local);
665 }
666 
667 
fast_iload2()668 void TemplateTable::fast_iload2() {
669   transition(vtos, itos);
670   const Register Rlocal_index = R1_tmp;
671 
672   locals_index(Rlocal_index);
673   Address local = load_iaddress(Rlocal_index, Rtemp);
674   __ ldr_s32(R0_tos, local);
675   __ push(itos);
676 
677   locals_index(Rlocal_index, 3);
678   local = load_iaddress(Rlocal_index, Rtemp);
679   __ ldr_s32(R0_tos, local);
680 }
681 
fast_iload()682 void TemplateTable::fast_iload() {
683   transition(vtos, itos);
684   const Register Rlocal_index = R1_tmp;
685 
686   locals_index(Rlocal_index);
687   Address local = load_iaddress(Rlocal_index, Rtemp);
688   __ ldr_s32(R0_tos, local);
689 }
690 
691 
lload()692 void TemplateTable::lload() {
693   transition(vtos, ltos);
694   const Register Rlocal_index = R2_tmp;
695 
696   locals_index(Rlocal_index);
697   load_category2_local(Rlocal_index, R3_tmp);
698 }
699 
700 
fload()701 void TemplateTable::fload() {
702   transition(vtos, ftos);
703   const Register Rlocal_index = R2_tmp;
704 
705   // Get the local value into tos
706   locals_index(Rlocal_index);
707   Address local = load_faddress(Rlocal_index, Rtemp);
708 #ifdef __SOFTFP__
709   __ ldr(R0_tos, local);
710 #else
711   __ ldr_float(S0_tos, local);
712 #endif // __SOFTFP__
713 }
714 
715 
dload()716 void TemplateTable::dload() {
717   transition(vtos, dtos);
718   const Register Rlocal_index = R2_tmp;
719 
720   locals_index(Rlocal_index);
721 
722 #ifdef __SOFTFP__
723   load_category2_local(Rlocal_index, R3_tmp);
724 #else
725   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
726 #endif // __SOFTFP__
727 }
728 
729 
aload()730 void TemplateTable::aload() {
731   transition(vtos, atos);
732   const Register Rlocal_index = R1_tmp;
733 
734   locals_index(Rlocal_index);
735   Address local = load_aaddress(Rlocal_index, Rtemp);
736   __ ldr(R0_tos, local);
737 }
738 
739 
locals_index_wide(Register reg)740 void TemplateTable::locals_index_wide(Register reg) {
741   assert_different_registers(reg, Rtemp);
742   __ ldrb(Rtemp, at_bcp(2));
743   __ ldrb(reg, at_bcp(3));
744   __ orr(reg, reg, AsmOperand(Rtemp, lsl, 8));
745 }
746 
747 
wide_iload()748 void TemplateTable::wide_iload() {
749   transition(vtos, itos);
750   const Register Rlocal_index = R2_tmp;
751 
752   locals_index_wide(Rlocal_index);
753   Address local = load_iaddress(Rlocal_index, Rtemp);
754   __ ldr_s32(R0_tos, local);
755 }
756 
757 
wide_lload()758 void TemplateTable::wide_lload() {
759   transition(vtos, ltos);
760   const Register Rlocal_index = R2_tmp;
761   const Register Rlocal_base = R3_tmp;
762 
763   locals_index_wide(Rlocal_index);
764   load_category2_local(Rlocal_index, R3_tmp);
765 }
766 
767 
wide_fload()768 void TemplateTable::wide_fload() {
769   transition(vtos, ftos);
770   const Register Rlocal_index = R2_tmp;
771 
772   locals_index_wide(Rlocal_index);
773   Address local = load_faddress(Rlocal_index, Rtemp);
774 #ifdef __SOFTFP__
775   __ ldr(R0_tos, local);
776 #else
777   __ ldr_float(S0_tos, local);
778 #endif // __SOFTFP__
779 }
780 
781 
wide_dload()782 void TemplateTable::wide_dload() {
783   transition(vtos, dtos);
784   const Register Rlocal_index = R2_tmp;
785 
786   locals_index_wide(Rlocal_index);
787 #ifdef __SOFTFP__
788   load_category2_local(Rlocal_index, R3_tmp);
789 #else
790   __ ldr_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
791 #endif // __SOFTFP__
792 }
793 
794 
wide_aload()795 void TemplateTable::wide_aload() {
796   transition(vtos, atos);
797   const Register Rlocal_index = R2_tmp;
798 
799   locals_index_wide(Rlocal_index);
800   Address local = load_aaddress(Rlocal_index, Rtemp);
801   __ ldr(R0_tos, local);
802 }
803 
index_check(Register array,Register index)804 void TemplateTable::index_check(Register array, Register index) {
805   // Pop ptr into array
806   __ pop_ptr(array);
807   index_check_without_pop(array, index);
808 }
809 
index_check_without_pop(Register array,Register index)810 void TemplateTable::index_check_without_pop(Register array, Register index) {
811   assert_different_registers(array, index, Rtemp);
812   // check array
813   __ null_check(array, Rtemp, arrayOopDesc::length_offset_in_bytes());
814   // check index
815   __ ldr_s32(Rtemp, Address(array, arrayOopDesc::length_offset_in_bytes()));
816   __ cmp_32(index, Rtemp);
817   if (index != R4_ArrayIndexOutOfBounds_index) {
818     // convention with generate_ArrayIndexOutOfBounds_handler()
819     __ mov(R4_ArrayIndexOutOfBounds_index, index, hs);
820   }
821   __ mov(R1, array, hs);
822   __ b(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, hs);
823 }
824 
825 
iaload()826 void TemplateTable::iaload() {
827   transition(itos, itos);
828   const Register Rarray = R1_tmp;
829   const Register Rindex = R0_tos;
830 
831   index_check(Rarray, Rindex);
832   Address addr = get_array_elem_addr_same_base(T_INT, Rarray, Rindex, Rtemp);
833   __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
834 }
835 
836 
laload()837 void TemplateTable::laload() {
838   transition(itos, ltos);
839   const Register Rarray = R1_tmp;
840   const Register Rindex = R0_tos;
841 
842   index_check(Rarray, Rindex);
843 
844   Address addr = get_array_elem_addr_same_base(T_LONG, Rarray, Rindex, Rtemp);
845   __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, addr, noreg /* ltos */, noreg, noreg, noreg);
846 }
847 
848 
faload()849 void TemplateTable::faload() {
850   transition(itos, ftos);
851   const Register Rarray = R1_tmp;
852   const Register Rindex = R0_tos;
853 
854   index_check(Rarray, Rindex);
855 
856   Address addr = get_array_elem_addr_same_base(T_FLOAT, Rarray, Rindex, Rtemp);
857   __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, addr, noreg /* ftos */, noreg, noreg, noreg);
858 }
859 
860 
daload()861 void TemplateTable::daload() {
862   transition(itos, dtos);
863   const Register Rarray = R1_tmp;
864   const Register Rindex = R0_tos;
865 
866   index_check(Rarray, Rindex);
867 
868   Address addr = get_array_elem_addr_same_base(T_DOUBLE, Rarray, Rindex, Rtemp);
869   __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, addr, noreg /* dtos */, noreg, noreg, noreg);
870 }
871 
872 
aaload()873 void TemplateTable::aaload() {
874   transition(itos, atos);
875   const Register Rarray = R1_tmp;
876   const Register Rindex = R0_tos;
877 
878   index_check(Rarray, Rindex);
879   do_oop_load(_masm, R0_tos, get_array_elem_addr_same_base(T_OBJECT, Rarray, Rindex, Rtemp), IS_ARRAY);
880 }
881 
882 
baload()883 void TemplateTable::baload() {
884   transition(itos, itos);
885   const Register Rarray = R1_tmp;
886   const Register Rindex = R0_tos;
887 
888   index_check(Rarray, Rindex);
889   Address addr = get_array_elem_addr_same_base(T_BYTE, Rarray, Rindex, Rtemp);
890   __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
891 }
892 
893 
caload()894 void TemplateTable::caload() {
895   transition(itos, itos);
896   const Register Rarray = R1_tmp;
897   const Register Rindex = R0_tos;
898 
899   index_check(Rarray, Rindex);
900   Address addr = get_array_elem_addr_same_base(T_CHAR, Rarray, Rindex, Rtemp);
901   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
902 }
903 
904 
905 // iload followed by caload frequent pair
fast_icaload()906 void TemplateTable::fast_icaload() {
907   transition(vtos, itos);
908   const Register Rlocal_index = R1_tmp;
909   const Register Rarray = R1_tmp;
910   const Register Rindex = R4_tmp; // index_check prefers index on R4
911   assert_different_registers(Rlocal_index, Rindex);
912   assert_different_registers(Rarray, Rindex);
913 
914   // load index out of locals
915   locals_index(Rlocal_index);
916   Address local = load_iaddress(Rlocal_index, Rtemp);
917   __ ldr_s32(Rindex, local);
918 
919   // get array element
920   index_check(Rarray, Rindex);
921   Address addr = get_array_elem_addr_same_base(T_CHAR, Rarray, Rindex, Rtemp);
922   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
923 }
924 
925 
saload()926 void TemplateTable::saload() {
927   transition(itos, itos);
928   const Register Rarray = R1_tmp;
929   const Register Rindex = R0_tos;
930 
931   index_check(Rarray, Rindex);
932   Address addr = get_array_elem_addr_same_base(T_SHORT, Rarray, Rindex, Rtemp);
933   __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
934 }
935 
936 
iload(int n)937 void TemplateTable::iload(int n) {
938   transition(vtos, itos);
939   __ ldr_s32(R0_tos, iaddress(n));
940 }
941 
942 
lload(int n)943 void TemplateTable::lload(int n) {
944   transition(vtos, ltos);
945   __ ldr(R0_tos_lo, laddress(n));
946   __ ldr(R1_tos_hi, haddress(n));
947 }
948 
949 
fload(int n)950 void TemplateTable::fload(int n) {
951   transition(vtos, ftos);
952 #ifdef __SOFTFP__
953   __ ldr(R0_tos, faddress(n));
954 #else
955   __ ldr_float(S0_tos, faddress(n));
956 #endif // __SOFTFP__
957 }
958 
959 
dload(int n)960 void TemplateTable::dload(int n) {
961   transition(vtos, dtos);
962 #ifdef __SOFTFP__
963   __ ldr(R0_tos_lo, laddress(n));
964   __ ldr(R1_tos_hi, haddress(n));
965 #else
966   __ ldr_double(D0_tos, daddress(n));
967 #endif // __SOFTFP__
968 }
969 
970 
aload(int n)971 void TemplateTable::aload(int n) {
972   transition(vtos, atos);
973   __ ldr(R0_tos, aaddress(n));
974 }
975 
aload_0()976 void TemplateTable::aload_0() {
977   aload_0_internal();
978 }
979 
nofast_aload_0()980 void TemplateTable::nofast_aload_0() {
981   aload_0_internal(may_not_rewrite);
982 }
983 
aload_0_internal(RewriteControl rc)984 void TemplateTable::aload_0_internal(RewriteControl rc) {
985   transition(vtos, atos);
986   // According to bytecode histograms, the pairs:
987   //
988   // _aload_0, _fast_igetfield
989   // _aload_0, _fast_agetfield
990   // _aload_0, _fast_fgetfield
991   //
992   // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
993   // bytecode checks if the next bytecode is either _fast_igetfield,
994   // _fast_agetfield or _fast_fgetfield and then rewrites the
995   // current bytecode into a pair bytecode; otherwise it rewrites the current
996   // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
997   //
998   // Note: If the next bytecode is _getfield, the rewrite must be delayed,
999   //       otherwise we may miss an opportunity for a pair.
1000   //
1001   // Also rewrite frequent pairs
1002   //   aload_0, aload_1
1003   //   aload_0, iload_1
1004   // These bytecodes with a small amount of code are most profitable to rewrite
1005   if ((rc == may_rewrite) && __ rewrite_frequent_pairs()) {
1006     Label rewrite, done;
1007     const Register next_bytecode = R1_tmp;
1008     const Register target_bytecode = R2_tmp;
1009 
1010     // get next byte
1011     __ ldrb(next_bytecode, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
1012 
1013     // if _getfield then wait with rewrite
1014     __ cmp(next_bytecode, Bytecodes::_getfield);
1015     __ b(done, eq);
1016 
1017     // if _igetfield then rewrite to _fast_iaccess_0
1018     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1019     __ cmp(next_bytecode, Bytecodes::_fast_igetfield);
1020     __ mov(target_bytecode, Bytecodes::_fast_iaccess_0);
1021     __ b(rewrite, eq);
1022 
1023     // if _agetfield then rewrite to _fast_aaccess_0
1024     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1025     __ cmp(next_bytecode, Bytecodes::_fast_agetfield);
1026     __ mov(target_bytecode, Bytecodes::_fast_aaccess_0);
1027     __ b(rewrite, eq);
1028 
1029     // if _fgetfield then rewrite to _fast_faccess_0, else rewrite to _fast_aload0
1030     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
1031     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
1032 
1033     __ cmp(next_bytecode, Bytecodes::_fast_fgetfield);
1034     __ mov(target_bytecode, Bytecodes::_fast_faccess_0, eq);
1035     __ mov(target_bytecode, Bytecodes::_fast_aload_0, ne);
1036 
1037     // rewrite
1038     __ bind(rewrite);
1039     patch_bytecode(Bytecodes::_aload_0, target_bytecode, Rtemp, false);
1040 
1041     __ bind(done);
1042   }
1043 
1044   aload(0);
1045 }
1046 
istore()1047 void TemplateTable::istore() {
1048   transition(itos, vtos);
1049   const Register Rlocal_index = R2_tmp;
1050 
1051   locals_index(Rlocal_index);
1052   Address local = load_iaddress(Rlocal_index, Rtemp);
1053   __ str_32(R0_tos, local);
1054 }
1055 
1056 
lstore()1057 void TemplateTable::lstore() {
1058   transition(ltos, vtos);
1059   const Register Rlocal_index = R2_tmp;
1060 
1061   locals_index(Rlocal_index);
1062   store_category2_local(Rlocal_index, R3_tmp);
1063 }
1064 
1065 
fstore()1066 void TemplateTable::fstore() {
1067   transition(ftos, vtos);
1068   const Register Rlocal_index = R2_tmp;
1069 
1070   locals_index(Rlocal_index);
1071   Address local = load_faddress(Rlocal_index, Rtemp);
1072 #ifdef __SOFTFP__
1073   __ str(R0_tos, local);
1074 #else
1075   __ str_float(S0_tos, local);
1076 #endif // __SOFTFP__
1077 }
1078 
1079 
dstore()1080 void TemplateTable::dstore() {
1081   transition(dtos, vtos);
1082   const Register Rlocal_index = R2_tmp;
1083 
1084   locals_index(Rlocal_index);
1085 
1086 #ifdef __SOFTFP__
1087   store_category2_local(Rlocal_index, R3_tmp);
1088 #else
1089   __ str_double(D0_tos, load_daddress(Rlocal_index, Rtemp));
1090 #endif // __SOFTFP__
1091 }
1092 
1093 
astore()1094 void TemplateTable::astore() {
1095   transition(vtos, vtos);
1096   const Register Rlocal_index = R1_tmp;
1097 
1098   __ pop_ptr(R0_tos);
1099   locals_index(Rlocal_index);
1100   Address local = load_aaddress(Rlocal_index, Rtemp);
1101   __ str(R0_tos, local);
1102 }
1103 
1104 
wide_istore()1105 void TemplateTable::wide_istore() {
1106   transition(vtos, vtos);
1107   const Register Rlocal_index = R2_tmp;
1108 
1109   __ pop_i(R0_tos);
1110   locals_index_wide(Rlocal_index);
1111   Address local = load_iaddress(Rlocal_index, Rtemp);
1112   __ str_32(R0_tos, local);
1113 }
1114 
1115 
wide_lstore()1116 void TemplateTable::wide_lstore() {
1117   transition(vtos, vtos);
1118   const Register Rlocal_index = R2_tmp;
1119   const Register Rlocal_base = R3_tmp;
1120 
1121   __ pop_l(R0_tos_lo, R1_tos_hi);
1122 
1123   locals_index_wide(Rlocal_index);
1124   store_category2_local(Rlocal_index, R3_tmp);
1125 }
1126 
1127 
wide_fstore()1128 void TemplateTable::wide_fstore() {
1129   wide_istore();
1130 }
1131 
1132 
wide_dstore()1133 void TemplateTable::wide_dstore() {
1134   wide_lstore();
1135 }
1136 
1137 
wide_astore()1138 void TemplateTable::wide_astore() {
1139   transition(vtos, vtos);
1140   const Register Rlocal_index = R2_tmp;
1141 
1142   __ pop_ptr(R0_tos);
1143   locals_index_wide(Rlocal_index);
1144   Address local = load_aaddress(Rlocal_index, Rtemp);
1145   __ str(R0_tos, local);
1146 }
1147 
1148 
iastore()1149 void TemplateTable::iastore() {
1150   transition(itos, vtos);
1151   const Register Rindex = R4_tmp; // index_check prefers index in R4
1152   const Register Rarray = R3_tmp;
1153   // R0_tos: value
1154 
1155   __ pop_i(Rindex);
1156   index_check(Rarray, Rindex);
1157   Address addr = get_array_elem_addr_same_base(T_INT, Rarray, Rindex, Rtemp);
1158   __ access_store_at(T_INT, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg, false);
1159 }
1160 
1161 
lastore()1162 void TemplateTable::lastore() {
1163   transition(ltos, vtos);
1164   const Register Rindex = R4_tmp; // index_check prefers index in R4
1165   const Register Rarray = R3_tmp;
1166   // R0_tos_lo:R1_tos_hi: value
1167 
1168   __ pop_i(Rindex);
1169   index_check(Rarray, Rindex);
1170 
1171   Address addr = get_array_elem_addr_same_base(T_LONG, Rarray, Rindex, Rtemp);
1172   __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, addr, noreg /* ltos */, noreg, noreg, noreg, false);
1173 }
1174 
1175 
fastore()1176 void TemplateTable::fastore() {
1177   transition(ftos, vtos);
1178   const Register Rindex = R4_tmp; // index_check prefers index in R4
1179   const Register Rarray = R3_tmp;
1180   // S0_tos/R0_tos: value
1181 
1182   __ pop_i(Rindex);
1183   index_check(Rarray, Rindex);
1184   Address addr = get_array_elem_addr_same_base(T_FLOAT, Rarray, Rindex, Rtemp);
1185   __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, addr, noreg /* ftos */, noreg, noreg, noreg, false);
1186 }
1187 
1188 
dastore()1189 void TemplateTable::dastore() {
1190   transition(dtos, vtos);
1191   const Register Rindex = R4_tmp; // index_check prefers index in R4
1192   const Register Rarray = R3_tmp;
1193   // D0_tos / R0_tos_lo:R1_to_hi: value
1194 
1195   __ pop_i(Rindex);
1196   index_check(Rarray, Rindex);
1197 
1198   Address addr = get_array_elem_addr_same_base(T_DOUBLE, Rarray, Rindex, Rtemp);
1199   __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, addr, noreg /* dtos */, noreg, noreg, noreg, false);
1200 }
1201 
1202 
aastore()1203 void TemplateTable::aastore() {
1204   transition(vtos, vtos);
1205   Label is_null, throw_array_store, done;
1206 
1207   const Register Raddr_1   = R1_tmp;
1208   const Register Rvalue_2  = R2_tmp;
1209   const Register Rarray_3  = R3_tmp;
1210   const Register Rindex_4  = R4_tmp;   // preferred by index_check_without_pop()
1211   const Register Rsub_5    = R5_tmp;
1212   const Register Rsuper_LR = LR_tmp;
1213 
1214   // stack: ..., array, index, value
1215   __ ldr(Rvalue_2, at_tos());     // Value
1216   __ ldr_s32(Rindex_4, at_tos_p1());  // Index
1217   __ ldr(Rarray_3, at_tos_p2());  // Array
1218 
1219   index_check_without_pop(Rarray_3, Rindex_4);
1220 
1221   // Compute the array base
1222   __ add(Raddr_1, Rarray_3, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1223 
1224   // do array store check - check for NULL value first
1225   __ cbz(Rvalue_2, is_null);
1226 
1227   // Load subklass
1228   __ load_klass(Rsub_5, Rvalue_2);
1229   // Load superklass
1230   __ load_klass(Rtemp, Rarray_3);
1231   __ ldr(Rsuper_LR, Address(Rtemp, ObjArrayKlass::element_klass_offset()));
1232 
1233   __ gen_subtype_check(Rsub_5, Rsuper_LR, throw_array_store, R0_tmp, R3_tmp);
1234   // Come here on success
1235 
1236   // Store value
1237   __ add(Raddr_1, Raddr_1, AsmOperand(Rindex_4, lsl, LogBytesPerHeapOop));
1238 
1239   // Now store using the appropriate barrier
1240   do_oop_store(_masm, Raddr_1, Rvalue_2, Rtemp, R0_tmp, R3_tmp, false, IS_ARRAY);
1241   __ b(done);
1242 
1243   __ bind(throw_array_store);
1244 
1245   // Come here on failure of subtype check
1246   __ profile_typecheck_failed(R0_tmp);
1247 
1248   // object is at TOS
1249   __ b(Interpreter::_throw_ArrayStoreException_entry);
1250 
1251   // Have a NULL in Rvalue_2, store NULL at array[index].
1252   __ bind(is_null);
1253   __ profile_null_seen(R0_tmp);
1254 
1255   // Store a NULL
1256   do_oop_store(_masm, Address::indexed_oop(Raddr_1, Rindex_4), Rvalue_2, Rtemp, R0_tmp, R3_tmp, true, IS_ARRAY);
1257 
1258   // Pop stack arguments
1259   __ bind(done);
1260   __ add(Rstack_top, Rstack_top, 3 * Interpreter::stackElementSize);
1261 }
1262 
1263 
bastore()1264 void TemplateTable::bastore() {
1265   transition(itos, vtos);
1266   const Register Rindex = R4_tmp; // index_check prefers index in R4
1267   const Register Rarray = R3_tmp;
1268   // R0_tos: value
1269 
1270   __ pop_i(Rindex);
1271   index_check(Rarray, Rindex);
1272 
1273   // Need to check whether array is boolean or byte
1274   // since both types share the bastore bytecode.
1275   __ load_klass(Rtemp, Rarray);
1276   __ ldr_u32(Rtemp, Address(Rtemp, Klass::layout_helper_offset()));
1277   Label L_skip;
1278   __ tst(Rtemp, Klass::layout_helper_boolean_diffbit());
1279   __ b(L_skip, eq);
1280   __ and_32(R0_tos, R0_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
1281   __ bind(L_skip);
1282   Address addr = get_array_elem_addr_same_base(T_BYTE, Rarray, Rindex, Rtemp);
1283   __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg, false);
1284 }
1285 
1286 
castore()1287 void TemplateTable::castore() {
1288   transition(itos, vtos);
1289   const Register Rindex = R4_tmp; // index_check prefers index in R4
1290   const Register Rarray = R3_tmp;
1291   // R0_tos: value
1292 
1293   __ pop_i(Rindex);
1294   index_check(Rarray, Rindex);
1295   Address addr = get_array_elem_addr_same_base(T_CHAR, Rarray, Rindex, Rtemp);
1296   __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg, false);
1297 }
1298 
1299 
sastore()1300 void TemplateTable::sastore() {
1301   assert(arrayOopDesc::base_offset_in_bytes(T_CHAR) ==
1302            arrayOopDesc::base_offset_in_bytes(T_SHORT),
1303          "base offsets for char and short should be equal");
1304   castore();
1305 }
1306 
1307 
istore(int n)1308 void TemplateTable::istore(int n) {
1309   transition(itos, vtos);
1310   __ str_32(R0_tos, iaddress(n));
1311 }
1312 
1313 
lstore(int n)1314 void TemplateTable::lstore(int n) {
1315   transition(ltos, vtos);
1316   __ str(R0_tos_lo, laddress(n));
1317   __ str(R1_tos_hi, haddress(n));
1318 }
1319 
1320 
fstore(int n)1321 void TemplateTable::fstore(int n) {
1322   transition(ftos, vtos);
1323 #ifdef __SOFTFP__
1324   __ str(R0_tos, faddress(n));
1325 #else
1326   __ str_float(S0_tos, faddress(n));
1327 #endif // __SOFTFP__
1328 }
1329 
1330 
dstore(int n)1331 void TemplateTable::dstore(int n) {
1332   transition(dtos, vtos);
1333 #ifdef __SOFTFP__
1334   __ str(R0_tos_lo, laddress(n));
1335   __ str(R1_tos_hi, haddress(n));
1336 #else
1337   __ str_double(D0_tos, daddress(n));
1338 #endif // __SOFTFP__
1339 }
1340 
1341 
astore(int n)1342 void TemplateTable::astore(int n) {
1343   transition(vtos, vtos);
1344   __ pop_ptr(R0_tos);
1345   __ str(R0_tos, aaddress(n));
1346 }
1347 
1348 
pop()1349 void TemplateTable::pop() {
1350   transition(vtos, vtos);
1351   __ add(Rstack_top, Rstack_top, Interpreter::stackElementSize);
1352 }
1353 
1354 
pop2()1355 void TemplateTable::pop2() {
1356   transition(vtos, vtos);
1357   __ add(Rstack_top, Rstack_top, 2*Interpreter::stackElementSize);
1358 }
1359 
1360 
dup()1361 void TemplateTable::dup() {
1362   transition(vtos, vtos);
1363   // stack: ..., a
1364   __ load_ptr(0, R0_tmp);
1365   __ push_ptr(R0_tmp);
1366   // stack: ..., a, a
1367 }
1368 
1369 
dup_x1()1370 void TemplateTable::dup_x1() {
1371   transition(vtos, vtos);
1372   // stack: ..., a, b
1373   __ load_ptr(0, R0_tmp);  // load b
1374   __ load_ptr(1, R2_tmp);  // load a
1375   __ store_ptr(1, R0_tmp); // store b
1376   __ store_ptr(0, R2_tmp); // store a
1377   __ push_ptr(R0_tmp);     // push b
1378   // stack: ..., b, a, b
1379 }
1380 
1381 
dup_x2()1382 void TemplateTable::dup_x2() {
1383   transition(vtos, vtos);
1384   // stack: ..., a, b, c
1385   __ load_ptr(0, R0_tmp);   // load c
1386   __ load_ptr(1, R2_tmp);   // load b
1387   __ load_ptr(2, R4_tmp);   // load a
1388 
1389   __ push_ptr(R0_tmp);      // push c
1390 
1391   // stack: ..., a, b, c, c
1392   __ store_ptr(1, R2_tmp);  // store b
1393   __ store_ptr(2, R4_tmp);  // store a
1394   __ store_ptr(3, R0_tmp);  // store c
1395   // stack: ..., c, a, b, c
1396 }
1397 
1398 
dup2()1399 void TemplateTable::dup2() {
1400   transition(vtos, vtos);
1401   // stack: ..., a, b
1402   __ load_ptr(1, R0_tmp);  // load a
1403   __ push_ptr(R0_tmp);     // push a
1404   __ load_ptr(1, R0_tmp);  // load b
1405   __ push_ptr(R0_tmp);     // push b
1406   // stack: ..., a, b, a, b
1407 }
1408 
1409 
dup2_x1()1410 void TemplateTable::dup2_x1() {
1411   transition(vtos, vtos);
1412 
1413   // stack: ..., a, b, c
1414   __ load_ptr(0, R4_tmp);  // load c
1415   __ load_ptr(1, R2_tmp);  // load b
1416   __ load_ptr(2, R0_tmp);  // load a
1417 
1418   __ push_ptr(R2_tmp);     // push b
1419   __ push_ptr(R4_tmp);     // push c
1420 
1421   // stack: ..., a, b, c, b, c
1422 
1423   __ store_ptr(2, R0_tmp);  // store a
1424   __ store_ptr(3, R4_tmp);  // store c
1425   __ store_ptr(4, R2_tmp);  // store b
1426 
1427   // stack: ..., b, c, a, b, c
1428 }
1429 
1430 
dup2_x2()1431 void TemplateTable::dup2_x2() {
1432   transition(vtos, vtos);
1433   // stack: ..., a, b, c, d
1434   __ load_ptr(0, R0_tmp);  // load d
1435   __ load_ptr(1, R2_tmp);  // load c
1436   __ push_ptr(R2_tmp);     // push c
1437   __ push_ptr(R0_tmp);     // push d
1438   // stack: ..., a, b, c, d, c, d
1439   __ load_ptr(4, R4_tmp);  // load b
1440   __ store_ptr(4, R0_tmp); // store d in b
1441   __ store_ptr(2, R4_tmp); // store b in d
1442   // stack: ..., a, d, c, b, c, d
1443   __ load_ptr(5, R4_tmp);  // load a
1444   __ store_ptr(5, R2_tmp); // store c in a
1445   __ store_ptr(3, R4_tmp); // store a in c
1446   // stack: ..., c, d, a, b, c, d
1447 }
1448 
1449 
swap()1450 void TemplateTable::swap() {
1451   transition(vtos, vtos);
1452   // stack: ..., a, b
1453   __ load_ptr(1, R0_tmp);  // load a
1454   __ load_ptr(0, R2_tmp);  // load b
1455   __ store_ptr(0, R0_tmp); // store a in b
1456   __ store_ptr(1, R2_tmp); // store b in a
1457   // stack: ..., b, a
1458 }
1459 
1460 
iop2(Operation op)1461 void TemplateTable::iop2(Operation op) {
1462   transition(itos, itos);
1463   const Register arg1 = R1_tmp;
1464   const Register arg2 = R0_tos;
1465 
1466   __ pop_i(arg1);
1467   switch (op) {
1468     case add  : __ add_32 (R0_tos, arg1, arg2); break;
1469     case sub  : __ sub_32 (R0_tos, arg1, arg2); break;
1470     case mul  : __ mul_32 (R0_tos, arg1, arg2); break;
1471     case _and : __ and_32 (R0_tos, arg1, arg2); break;
1472     case _or  : __ orr_32 (R0_tos, arg1, arg2); break;
1473     case _xor : __ eor_32 (R0_tos, arg1, arg2); break;
1474     case shl  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsl, arg2)); break;
1475     case shr  : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, asr, arg2)); break;
1476     case ushr : __ andr(arg2, arg2, 0x1f); __ mov (R0_tos, AsmOperand(arg1, lsr, arg2)); break;
1477     default   : ShouldNotReachHere();
1478   }
1479 }
1480 
1481 
lop2(Operation op)1482 void TemplateTable::lop2(Operation op) {
1483   transition(ltos, ltos);
1484   const Register arg1_lo = R2_tmp;
1485   const Register arg1_hi = R3_tmp;
1486   const Register arg2_lo = R0_tos_lo;
1487   const Register arg2_hi = R1_tos_hi;
1488 
1489   __ pop_l(arg1_lo, arg1_hi);
1490   switch (op) {
1491     case add : __ adds(R0_tos_lo, arg1_lo, arg2_lo); __ adc (R1_tos_hi, arg1_hi, arg2_hi); break;
1492     case sub : __ subs(R0_tos_lo, arg1_lo, arg2_lo); __ sbc (R1_tos_hi, arg1_hi, arg2_hi); break;
1493     case _and: __ andr(R0_tos_lo, arg1_lo, arg2_lo); __ andr(R1_tos_hi, arg1_hi, arg2_hi); break;
1494     case _or : __ orr (R0_tos_lo, arg1_lo, arg2_lo); __ orr (R1_tos_hi, arg1_hi, arg2_hi); break;
1495     case _xor: __ eor (R0_tos_lo, arg1_lo, arg2_lo); __ eor (R1_tos_hi, arg1_hi, arg2_hi); break;
1496     default : ShouldNotReachHere();
1497   }
1498 }
1499 
1500 
idiv()1501 void TemplateTable::idiv() {
1502   transition(itos, itos);
1503   __ mov(R2, R0_tos);
1504   __ pop_i(R0);
1505   // R0 - dividend
1506   // R2 - divisor
1507   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1508   // R1 - result
1509   __ mov(R0_tos, R1);
1510 }
1511 
1512 
irem()1513 void TemplateTable::irem() {
1514   transition(itos, itos);
1515   __ mov(R2, R0_tos);
1516   __ pop_i(R0);
1517   // R0 - dividend
1518   // R2 - divisor
1519   __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::none);
1520   // R0 - remainder
1521 }
1522 
1523 
lmul()1524 void TemplateTable::lmul() {
1525   transition(ltos, ltos);
1526   const Register arg1_lo = R0_tos_lo;
1527   const Register arg1_hi = R1_tos_hi;
1528   const Register arg2_lo = R2_tmp;
1529   const Register arg2_hi = R3_tmp;
1530 
1531   __ pop_l(arg2_lo, arg2_hi);
1532 
1533   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lmul), arg1_lo, arg1_hi, arg2_lo, arg2_hi);
1534 }
1535 
1536 
ldiv()1537 void TemplateTable::ldiv() {
1538   transition(ltos, ltos);
1539   const Register x_lo = R2_tmp;
1540   const Register x_hi = R3_tmp;
1541   const Register y_lo = R0_tos_lo;
1542   const Register y_hi = R1_tos_hi;
1543 
1544   __ pop_l(x_lo, x_hi);
1545 
1546   // check if y = 0
1547   __ orrs(Rtemp, y_lo, y_hi);
1548   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1549   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), y_lo, y_hi, x_lo, x_hi);
1550 }
1551 
1552 
lrem()1553 void TemplateTable::lrem() {
1554   transition(ltos, ltos);
1555   const Register x_lo = R2_tmp;
1556   const Register x_hi = R3_tmp;
1557   const Register y_lo = R0_tos_lo;
1558   const Register y_hi = R1_tos_hi;
1559 
1560   __ pop_l(x_lo, x_hi);
1561 
1562   // check if y = 0
1563   __ orrs(Rtemp, y_lo, y_hi);
1564   __ call(Interpreter::_throw_ArithmeticException_entry, relocInfo::none, eq);
1565   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), y_lo, y_hi, x_lo, x_hi);
1566 }
1567 
1568 
lshl()1569 void TemplateTable::lshl() {
1570   transition(itos, ltos);
1571   const Register shift_cnt = R4_tmp;
1572   const Register val_lo = R2_tmp;
1573   const Register val_hi = R3_tmp;
1574 
1575   __ pop_l(val_lo, val_hi);
1576   __ andr(shift_cnt, R0_tos, 63);
1577   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsl, shift_cnt);
1578 }
1579 
1580 
lshr()1581 void TemplateTable::lshr() {
1582   transition(itos, ltos);
1583   const Register shift_cnt = R4_tmp;
1584   const Register val_lo = R2_tmp;
1585   const Register val_hi = R3_tmp;
1586 
1587   __ pop_l(val_lo, val_hi);
1588   __ andr(shift_cnt, R0_tos, 63);
1589   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, asr, shift_cnt);
1590 }
1591 
1592 
lushr()1593 void TemplateTable::lushr() {
1594   transition(itos, ltos);
1595   const Register shift_cnt = R4_tmp;
1596   const Register val_lo = R2_tmp;
1597   const Register val_hi = R3_tmp;
1598 
1599   __ pop_l(val_lo, val_hi);
1600   __ andr(shift_cnt, R0_tos, 63);
1601   __ long_shift(R0_tos_lo, R1_tos_hi, val_lo, val_hi, lsr, shift_cnt);
1602 }
1603 
1604 
fop2(Operation op)1605 void TemplateTable::fop2(Operation op) {
1606   transition(ftos, ftos);
1607 #ifdef __SOFTFP__
1608   __ mov(R1, R0_tos);
1609   __ pop_i(R0);
1610   switch (op) {
1611     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc), R0, R1); break;
1612     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc), R0, R1); break;
1613     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fmul), R0, R1); break;
1614     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_fdiv), R0, R1); break;
1615     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1); break;
1616     default : ShouldNotReachHere();
1617   }
1618 #else
1619   const FloatRegister arg1 = S1_tmp;
1620   const FloatRegister arg2 = S0_tos;
1621 
1622   switch (op) {
1623     case add: __ pop_f(arg1); __ add_float(S0_tos, arg1, arg2); break;
1624     case sub: __ pop_f(arg1); __ sub_float(S0_tos, arg1, arg2); break;
1625     case mul: __ pop_f(arg1); __ mul_float(S0_tos, arg1, arg2); break;
1626     case div: __ pop_f(arg1); __ div_float(S0_tos, arg1, arg2); break;
1627     case rem:
1628 #ifndef __ABI_HARD__
1629       __ pop_f(arg1);
1630       __ fmrs(R0, arg1);
1631       __ fmrs(R1, arg2);
1632       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), R0, R1);
1633       __ fmsr(S0_tos, R0);
1634 #else
1635       __ mov_float(S1_reg, arg2);
1636       __ pop_f(S0);
1637       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1638 #endif // !__ABI_HARD__
1639       break;
1640     default : ShouldNotReachHere();
1641   }
1642 #endif // __SOFTFP__
1643 }
1644 
1645 
dop2(Operation op)1646 void TemplateTable::dop2(Operation op) {
1647   transition(dtos, dtos);
1648 #ifdef __SOFTFP__
1649   __ mov(R2, R0_tos_lo);
1650   __ mov(R3, R1_tos_hi);
1651   __ pop_l(R0, R1);
1652   switch (op) {
1653     // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
1654     case add: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc), R0, R1, R2, R3); break;
1655     case sub: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc), R0, R1, R2, R3); break;
1656     case mul: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_dmul), R0, R1, R2, R3); break;
1657     case div: __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_ddiv), R0, R1, R2, R3); break;
1658     case rem: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3); break;
1659     default : ShouldNotReachHere();
1660   }
1661 #else
1662   const FloatRegister arg1 = D1_tmp;
1663   const FloatRegister arg2 = D0_tos;
1664 
1665   switch (op) {
1666     case add: __ pop_d(arg1); __ add_double(D0_tos, arg1, arg2); break;
1667     case sub: __ pop_d(arg1); __ sub_double(D0_tos, arg1, arg2); break;
1668     case mul: __ pop_d(arg1); __ mul_double(D0_tos, arg1, arg2); break;
1669     case div: __ pop_d(arg1); __ div_double(D0_tos, arg1, arg2); break;
1670     case rem:
1671 #ifndef __ABI_HARD__
1672       __ pop_d(arg1);
1673       __ fmrrd(R0, R1, arg1);
1674       __ fmrrd(R2, R3, arg2);
1675       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), R0, R1, R2, R3);
1676       __ fmdrr(D0_tos, R0, R1);
1677 #else
1678       __ mov_double(D1, arg2);
1679       __ pop_d(D0);
1680       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1681 #endif // !__ABI_HARD__
1682       break;
1683     default : ShouldNotReachHere();
1684   }
1685 #endif // __SOFTFP__
1686 }
1687 
1688 
ineg()1689 void TemplateTable::ineg() {
1690   transition(itos, itos);
1691   __ neg_32(R0_tos, R0_tos);
1692 }
1693 
1694 
lneg()1695 void TemplateTable::lneg() {
1696   transition(ltos, ltos);
1697   __ rsbs(R0_tos_lo, R0_tos_lo, 0);
1698   __ rsc (R1_tos_hi, R1_tos_hi, 0);
1699 }
1700 
1701 
fneg()1702 void TemplateTable::fneg() {
1703   transition(ftos, ftos);
1704 #ifdef __SOFTFP__
1705   // Invert sign bit
1706   const int sign_mask = 0x80000000;
1707   __ eor(R0_tos, R0_tos, sign_mask);
1708 #else
1709   __ neg_float(S0_tos, S0_tos);
1710 #endif // __SOFTFP__
1711 }
1712 
1713 
dneg()1714 void TemplateTable::dneg() {
1715   transition(dtos, dtos);
1716 #ifdef __SOFTFP__
1717   // Invert sign bit in the high part of the double
1718   const int sign_mask_hi = 0x80000000;
1719   __ eor(R1_tos_hi, R1_tos_hi, sign_mask_hi);
1720 #else
1721   __ neg_double(D0_tos, D0_tos);
1722 #endif // __SOFTFP__
1723 }
1724 
1725 
iinc()1726 void TemplateTable::iinc() {
1727   transition(vtos, vtos);
1728   const Register Rconst = R2_tmp;
1729   const Register Rlocal_index = R1_tmp;
1730   const Register Rval = R0_tmp;
1731 
1732   __ ldrsb(Rconst, at_bcp(2));
1733   locals_index(Rlocal_index);
1734   Address local = load_iaddress(Rlocal_index, Rtemp);
1735   __ ldr_s32(Rval, local);
1736   __ add(Rval, Rval, Rconst);
1737   __ str_32(Rval, local);
1738 }
1739 
1740 
wide_iinc()1741 void TemplateTable::wide_iinc() {
1742   transition(vtos, vtos);
1743   const Register Rconst = R2_tmp;
1744   const Register Rlocal_index = R1_tmp;
1745   const Register Rval = R0_tmp;
1746 
1747   // get constant in Rconst
1748   __ ldrsb(R2_tmp, at_bcp(4));
1749   __ ldrb(R3_tmp, at_bcp(5));
1750   __ orr(Rconst, R3_tmp, AsmOperand(R2_tmp, lsl, 8));
1751 
1752   locals_index_wide(Rlocal_index);
1753   Address local = load_iaddress(Rlocal_index, Rtemp);
1754   __ ldr_s32(Rval, local);
1755   __ add(Rval, Rval, Rconst);
1756   __ str_32(Rval, local);
1757 }
1758 
1759 
convert()1760 void TemplateTable::convert() {
1761   // Checking
1762 #ifdef ASSERT
1763   { TosState tos_in  = ilgl;
1764     TosState tos_out = ilgl;
1765     switch (bytecode()) {
1766       case Bytecodes::_i2l: // fall through
1767       case Bytecodes::_i2f: // fall through
1768       case Bytecodes::_i2d: // fall through
1769       case Bytecodes::_i2b: // fall through
1770       case Bytecodes::_i2c: // fall through
1771       case Bytecodes::_i2s: tos_in = itos; break;
1772       case Bytecodes::_l2i: // fall through
1773       case Bytecodes::_l2f: // fall through
1774       case Bytecodes::_l2d: tos_in = ltos; break;
1775       case Bytecodes::_f2i: // fall through
1776       case Bytecodes::_f2l: // fall through
1777       case Bytecodes::_f2d: tos_in = ftos; break;
1778       case Bytecodes::_d2i: // fall through
1779       case Bytecodes::_d2l: // fall through
1780       case Bytecodes::_d2f: tos_in = dtos; break;
1781       default             : ShouldNotReachHere();
1782     }
1783     switch (bytecode()) {
1784       case Bytecodes::_l2i: // fall through
1785       case Bytecodes::_f2i: // fall through
1786       case Bytecodes::_d2i: // fall through
1787       case Bytecodes::_i2b: // fall through
1788       case Bytecodes::_i2c: // fall through
1789       case Bytecodes::_i2s: tos_out = itos; break;
1790       case Bytecodes::_i2l: // fall through
1791       case Bytecodes::_f2l: // fall through
1792       case Bytecodes::_d2l: tos_out = ltos; break;
1793       case Bytecodes::_i2f: // fall through
1794       case Bytecodes::_l2f: // fall through
1795       case Bytecodes::_d2f: tos_out = ftos; break;
1796       case Bytecodes::_i2d: // fall through
1797       case Bytecodes::_l2d: // fall through
1798       case Bytecodes::_f2d: tos_out = dtos; break;
1799       default             : ShouldNotReachHere();
1800     }
1801     transition(tos_in, tos_out);
1802   }
1803 #endif // ASSERT
1804 
1805   // Conversion
1806   switch (bytecode()) {
1807     case Bytecodes::_i2l:
1808       __ mov(R1_tos_hi, AsmOperand(R0_tos, asr, BitsPerWord-1));
1809       break;
1810 
1811     case Bytecodes::_i2f:
1812 #ifdef __SOFTFP__
1813       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2f), R0_tos);
1814 #else
1815       __ fmsr(S0_tmp, R0_tos);
1816       __ fsitos(S0_tos, S0_tmp);
1817 #endif // __SOFTFP__
1818       break;
1819 
1820     case Bytecodes::_i2d:
1821 #ifdef __SOFTFP__
1822       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_i2d), R0_tos);
1823 #else
1824       __ fmsr(S0_tmp, R0_tos);
1825       __ fsitod(D0_tos, S0_tmp);
1826 #endif // __SOFTFP__
1827       break;
1828 
1829     case Bytecodes::_i2b:
1830       __ sign_extend(R0_tos, R0_tos, 8);
1831       break;
1832 
1833     case Bytecodes::_i2c:
1834       __ zero_extend(R0_tos, R0_tos, 16);
1835       break;
1836 
1837     case Bytecodes::_i2s:
1838       __ sign_extend(R0_tos, R0_tos, 16);
1839       break;
1840 
1841     case Bytecodes::_l2i:
1842       /* nothing to do */
1843       break;
1844 
1845     case Bytecodes::_l2f:
1846       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f), R0_tos_lo, R1_tos_hi);
1847 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
1848       __ fmsr(S0_tos, R0);
1849 #endif // !__SOFTFP__ && !__ABI_HARD__
1850       break;
1851 
1852     case Bytecodes::_l2d:
1853       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2d), R0_tos_lo, R1_tos_hi);
1854 #if !defined(__SOFTFP__) && !defined(__ABI_HARD__)
1855       __ fmdrr(D0_tos, R0, R1);
1856 #endif // !__SOFTFP__ && !__ABI_HARD__
1857       break;
1858 
1859     case Bytecodes::_f2i:
1860 #ifndef __SOFTFP__
1861       __ ftosizs(S0_tos, S0_tos);
1862       __ fmrs(R0_tos, S0_tos);
1863 #else
1864       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), R0_tos);
1865 #endif // !__SOFTFP__
1866       break;
1867 
1868     case Bytecodes::_f2l:
1869 #ifndef __SOFTFP__
1870       __ fmrs(R0_tos, S0_tos);
1871 #endif // !__SOFTFP__
1872       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), R0_tos);
1873       break;
1874 
1875     case Bytecodes::_f2d:
1876 #ifdef __SOFTFP__
1877       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_f2d), R0_tos);
1878 #else
1879       __ convert_f2d(D0_tos, S0_tos);
1880 #endif // __SOFTFP__
1881       break;
1882 
1883     case Bytecodes::_d2i:
1884 #ifndef __SOFTFP__
1885       __ ftosizd(Stemp, D0);
1886       __ fmrs(R0, Stemp);
1887 #else
1888       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), R0_tos_lo, R1_tos_hi);
1889 #endif // !__SOFTFP__
1890       break;
1891 
1892     case Bytecodes::_d2l:
1893 #ifndef __SOFTFP__
1894       __ fmrrd(R0_tos_lo, R1_tos_hi, D0_tos);
1895 #endif // !__SOFTFP__
1896       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), R0_tos_lo, R1_tos_hi);
1897       break;
1898 
1899     case Bytecodes::_d2f:
1900 #ifdef __SOFTFP__
1901       __ call_VM_leaf(CAST_FROM_FN_PTR(address, __aeabi_d2f), R0_tos_lo, R1_tos_hi);
1902 #else
1903       __ convert_d2f(S0_tos, D0_tos);
1904 #endif // __SOFTFP__
1905       break;
1906 
1907     default:
1908       ShouldNotReachHere();
1909   }
1910 }
1911 
1912 
lcmp()1913 void TemplateTable::lcmp() {
1914   transition(ltos, itos);
1915   const Register arg1_lo = R2_tmp;
1916   const Register arg1_hi = R3_tmp;
1917   const Register arg2_lo = R0_tos_lo;
1918   const Register arg2_hi = R1_tos_hi;
1919   const Register res = R4_tmp;
1920 
1921   __ pop_l(arg1_lo, arg1_hi);
1922 
1923   // long compare arg1 with arg2
1924   // result is -1/0/+1 if '<'/'='/'>'
1925   Label done;
1926 
1927   __ mov (res, 0);
1928   __ cmp (arg1_hi, arg2_hi);
1929   __ mvn (res, 0, lt);
1930   __ mov (res, 1, gt);
1931   __ b(done, ne);
1932   __ cmp (arg1_lo, arg2_lo);
1933   __ mvn (res, 0, lo);
1934   __ mov (res, 1, hi);
1935   __ bind(done);
1936   __ mov (R0_tos, res);
1937 }
1938 
1939 
float_cmp(bool is_float,int unordered_result)1940 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1941   assert((unordered_result == 1) || (unordered_result == -1), "invalid unordered result");
1942 
1943 
1944 #ifdef __SOFTFP__
1945 
1946   if (is_float) {
1947     transition(ftos, itos);
1948     const Register Rx = R0;
1949     const Register Ry = R1;
1950 
1951     __ mov(Ry, R0_tos);
1952     __ pop_i(Rx);
1953 
1954     if (unordered_result == 1) {
1955       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg), Rx, Ry);
1956     } else {
1957       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl), Rx, Ry);
1958     }
1959 
1960   } else {
1961 
1962     transition(dtos, itos);
1963     const Register Rx_lo = R0;
1964     const Register Rx_hi = R1;
1965     const Register Ry_lo = R2;
1966     const Register Ry_hi = R3;
1967 
1968     __ mov(Ry_lo, R0_tos_lo);
1969     __ mov(Ry_hi, R1_tos_hi);
1970     __ pop_l(Rx_lo, Rx_hi);
1971 
1972     if (unordered_result == 1) {
1973       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
1974     } else {
1975       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl), Rx_lo, Rx_hi, Ry_lo, Ry_hi);
1976     }
1977   }
1978 
1979 #else
1980 
1981   if (is_float) {
1982     transition(ftos, itos);
1983     __ pop_f(S1_tmp);
1984     __ fcmps(S1_tmp, S0_tos);
1985   } else {
1986     transition(dtos, itos);
1987     __ pop_d(D1_tmp);
1988     __ fcmpd(D1_tmp, D0_tos);
1989   }
1990 
1991   __ fmstat();
1992 
1993   // comparison result | flag N | flag Z | flag C | flag V
1994   // "<"               |   1    |   0    |   0    |   0
1995   // "=="              |   0    |   1    |   1    |   0
1996   // ">"               |   0    |   0    |   1    |   0
1997   // unordered         |   0    |   0    |   1    |   1
1998 
1999   if (unordered_result < 0) {
2000     __ mov(R0_tos, 1);           // result ==  1 if greater
2001     __ mvn(R0_tos, 0, lt);       // result == -1 if less or unordered (N!=V)
2002   } else {
2003     __ mov(R0_tos, 1);           // result ==  1 if greater or unordered
2004     __ mvn(R0_tos, 0, mi);       // result == -1 if less (N=1)
2005   }
2006   __ mov(R0_tos, 0, eq);         // result ==  0 if equ (Z=1)
2007 #endif // __SOFTFP__
2008 }
2009 
2010 
branch(bool is_jsr,bool is_wide)2011 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2012 
2013   const Register Rdisp = R0_tmp;
2014   const Register Rbumped_taken_count = R5_tmp;
2015 
2016   __ profile_taken_branch(R0_tmp, Rbumped_taken_count); // R0 holds updated MDP, Rbumped_taken_count holds bumped taken count
2017 
2018   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2019                              InvocationCounter::counter_offset();
2020   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2021                               InvocationCounter::counter_offset();
2022   const int method_offset = frame::interpreter_frame_method_offset * wordSize;
2023 
2024   // Load up R0 with the branch displacement
2025   if (is_wide) {
2026     __ ldrsb(R0_tmp, at_bcp(1));
2027     __ ldrb(R1_tmp, at_bcp(2));
2028     __ ldrb(R2_tmp, at_bcp(3));
2029     __ ldrb(R3_tmp, at_bcp(4));
2030     __ orr(R0_tmp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2031     __ orr(R0_tmp, R2_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2032     __ orr(Rdisp, R3_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2033   } else {
2034     __ ldrsb(R0_tmp, at_bcp(1));
2035     __ ldrb(R1_tmp, at_bcp(2));
2036     __ orr(Rdisp, R1_tmp, AsmOperand(R0_tmp, lsl, BitsPerByte));
2037   }
2038 
2039   // Handle all the JSR stuff here, then exit.
2040   // It's much shorter and cleaner than intermingling with the
2041   // non-JSR normal-branch stuff occuring below.
2042   if (is_jsr) {
2043     // compute return address as bci in R1
2044     const Register Rret_addr = R1_tmp;
2045     assert_different_registers(Rdisp, Rret_addr, Rtemp);
2046 
2047     __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2048     __ sub(Rret_addr, Rbcp, - (is_wide ? 5 : 3) + in_bytes(ConstMethod::codes_offset()));
2049     __ sub(Rret_addr, Rret_addr, Rtemp);
2050 
2051     // Load the next target bytecode into R3_bytecode and advance Rbcp
2052     __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2053 
2054     // Push return address
2055     __ push_i(Rret_addr);
2056     // jsr returns vtos
2057     __ dispatch_only_noverify(vtos);
2058     return;
2059   }
2060 
2061   // Normal (non-jsr) branch handling
2062 
2063   // Adjust the bcp by the displacement in Rdisp and load next bytecode.
2064   __ ldrb(R3_bytecode, Address(Rbcp, Rdisp, lsl, 0, pre_indexed));
2065 
2066   assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
2067   Label backedge_counter_overflow;
2068   Label profile_method;
2069   Label dispatch;
2070 
2071   if (UseLoopCounter) {
2072     // increment backedge counter for backward branches
2073     // Rdisp (R0): target offset
2074 
2075     const Register Rcnt = R2_tmp;
2076     const Register Rcounters = R1_tmp;
2077 
2078     // count only if backward branch
2079     __ tst(Rdisp, Rdisp);
2080     __ b(dispatch, pl);
2081 
2082     if (TieredCompilation) {
2083       Label no_mdo;
2084       int increment = InvocationCounter::count_increment;
2085       if (ProfileInterpreter) {
2086         // Are we profiling?
2087         __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
2088         __ cbz(Rtemp, no_mdo);
2089         // Increment the MDO backedge counter
2090         const Address mdo_backedge_counter(Rtemp, in_bytes(MethodData::backedge_counter_offset()) +
2091                                                   in_bytes(InvocationCounter::counter_offset()));
2092         const Address mask(Rtemp, in_bytes(MethodData::backedge_mask_offset()));
2093         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
2094                                    Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2095         __ b(dispatch);
2096       }
2097       __ bind(no_mdo);
2098       // Increment backedge counter in MethodCounters*
2099       // Note Rbumped_taken_count is a callee saved registers for ARM32
2100       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2101                              Rdisp, R3_bytecode,
2102                              noreg);
2103       const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
2104       __ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
2105                                  Rcnt, R4_tmp, eq, &backedge_counter_overflow);
2106     } else { // not TieredCompilation
2107       // Increment backedge counter in MethodCounters*
2108       __ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
2109                              Rdisp, R3_bytecode,
2110                              noreg);
2111       __ ldr_u32(Rtemp, Address(Rcounters, be_offset));           // load backedge counter
2112       __ add(Rtemp, Rtemp, InvocationCounter::count_increment);   // increment counter
2113       __ str_32(Rtemp, Address(Rcounters, be_offset));            // store counter
2114 
2115       __ ldr_u32(Rcnt, Address(Rcounters, inv_offset));           // load invocation counter
2116       __ bic(Rcnt, Rcnt, ~InvocationCounter::count_mask_value);  // and the status bits
2117       __ add(Rcnt, Rcnt, Rtemp);                                 // add both counters
2118 
2119       if (ProfileInterpreter) {
2120         // Test to see if we should create a method data oop
2121         const Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
2122         __ ldr_s32(Rtemp, profile_limit);
2123         __ cmp_32(Rcnt, Rtemp);
2124         __ b(dispatch, lt);
2125 
2126         // if no method data exists, go to profile method
2127         __ test_method_data_pointer(R4_tmp, profile_method);
2128 
2129         if (UseOnStackReplacement) {
2130           // check for overflow against Rbumped_taken_count, which is the MDO taken count
2131           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2132           __ ldr_s32(Rtemp, backward_branch_limit);
2133           __ cmp(Rbumped_taken_count, Rtemp);
2134           __ b(dispatch, lo);
2135 
2136           // When ProfileInterpreter is on, the backedge_count comes from the
2137           // MethodData*, which value does not get reset on the call to
2138           // frequency_counter_overflow().  To avoid excessive calls to the overflow
2139           // routine while the method is being compiled, add a second test to make
2140           // sure the overflow function is called only once every overflow_frequency.
2141           const int overflow_frequency = 1024;
2142 
2143           // was '__ andrs(...,overflow_frequency-1)', testing if lowest 10 bits are 0
2144           assert(overflow_frequency == (1 << 10),"shift by 22 not correct for expected frequency");
2145           __ movs(Rbumped_taken_count, AsmOperand(Rbumped_taken_count, lsl, 22));
2146 
2147           __ b(backedge_counter_overflow, eq);
2148         }
2149       } else {
2150         if (UseOnStackReplacement) {
2151           // check for overflow against Rcnt, which is the sum of the counters
2152           const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
2153           __ ldr_s32(Rtemp, backward_branch_limit);
2154           __ cmp_32(Rcnt, Rtemp);
2155           __ b(backedge_counter_overflow, hs);
2156 
2157         }
2158       }
2159     }
2160     __ bind(dispatch);
2161   }
2162 
2163   if (!UseOnStackReplacement) {
2164     __ bind(backedge_counter_overflow);
2165   }
2166 
2167   // continue with the bytecode @ target
2168   __ dispatch_only(vtos, true);
2169 
2170   if (UseLoopCounter) {
2171     if (ProfileInterpreter && !TieredCompilation) {
2172       // Out-of-line code to allocate method data oop.
2173       __ bind(profile_method);
2174 
2175       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2176       __ set_method_data_pointer_for_bcp();
2177       // reload next bytecode
2178       __ ldrb(R3_bytecode, Address(Rbcp));
2179       __ b(dispatch);
2180     }
2181 
2182     if (UseOnStackReplacement) {
2183       // invocation counter overflow
2184       __ bind(backedge_counter_overflow);
2185 
2186       __ sub(R1, Rbcp, Rdisp);                   // branch bcp
2187       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
2188 
2189       // R0: osr nmethod (osr ok) or NULL (osr not possible)
2190       const Register Rnmethod = R0;
2191 
2192       __ ldrb(R3_bytecode, Address(Rbcp));       // reload next bytecode
2193 
2194       __ cbz(Rnmethod, dispatch);                // test result, no osr if null
2195 
2196       // nmethod may have been invalidated (VM may block upon call_VM return)
2197       __ ldrb(R1_tmp, Address(Rnmethod, nmethod::state_offset()));
2198       __ cmp(R1_tmp, nmethod::in_use);
2199       __ b(dispatch, ne);
2200 
2201       // We have the address of an on stack replacement routine in Rnmethod,
2202       // We need to prepare to execute the OSR method. First we must
2203       // migrate the locals and monitors off of the stack.
2204 
2205       __ mov(Rtmp_save0, Rnmethod);                      // save the nmethod
2206 
2207       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2208 
2209       // R0 is OSR buffer
2210 
2211       __ ldr(R1_tmp, Address(Rtmp_save0, nmethod::osr_entry_point_offset()));
2212       __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
2213 
2214       __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
2215       __ bic(SP, Rtemp, StackAlignmentInBytes - 1);     // Remove frame and align stack
2216 
2217       __ jump(R1_tmp);
2218     }
2219   }
2220 }
2221 
2222 
if_0cmp(Condition cc)2223 void TemplateTable::if_0cmp(Condition cc) {
2224   transition(itos, vtos);
2225   // assume branch is more often taken than not (loops use backward branches)
2226   Label not_taken;
2227   __ cmp_32(R0_tos, 0);
2228   __ b(not_taken, convNegCond(cc));
2229   branch(false, false);
2230   __ bind(not_taken);
2231   __ profile_not_taken_branch(R0_tmp);
2232 }
2233 
2234 
if_icmp(Condition cc)2235 void TemplateTable::if_icmp(Condition cc) {
2236   transition(itos, vtos);
2237   // assume branch is more often taken than not (loops use backward branches)
2238   Label not_taken;
2239   __ pop_i(R1_tmp);
2240   __ cmp_32(R1_tmp, R0_tos);
2241   __ b(not_taken, convNegCond(cc));
2242   branch(false, false);
2243   __ bind(not_taken);
2244   __ profile_not_taken_branch(R0_tmp);
2245 }
2246 
2247 
if_nullcmp(Condition cc)2248 void TemplateTable::if_nullcmp(Condition cc) {
2249   transition(atos, vtos);
2250   assert(cc == equal || cc == not_equal, "invalid condition");
2251 
2252   // assume branch is more often taken than not (loops use backward branches)
2253   Label not_taken;
2254   if (cc == equal) {
2255     __ cbnz(R0_tos, not_taken);
2256   } else {
2257     __ cbz(R0_tos, not_taken);
2258   }
2259   branch(false, false);
2260   __ bind(not_taken);
2261   __ profile_not_taken_branch(R0_tmp);
2262 }
2263 
2264 
if_acmp(Condition cc)2265 void TemplateTable::if_acmp(Condition cc) {
2266   transition(atos, vtos);
2267   // assume branch is more often taken than not (loops use backward branches)
2268   Label not_taken;
2269   __ pop_ptr(R1_tmp);
2270   __ cmpoop(R1_tmp, R0_tos);
2271   __ b(not_taken, convNegCond(cc));
2272   branch(false, false);
2273   __ bind(not_taken);
2274   __ profile_not_taken_branch(R0_tmp);
2275 }
2276 
2277 
ret()2278 void TemplateTable::ret() {
2279   transition(vtos, vtos);
2280   const Register Rlocal_index = R1_tmp;
2281   const Register Rret_bci = Rtmp_save0; // R4/R19
2282 
2283   locals_index(Rlocal_index);
2284   Address local = load_iaddress(Rlocal_index, Rtemp);
2285   __ ldr_s32(Rret_bci, local);          // get return bci, compute return bcp
2286   __ profile_ret(Rtmp_save1, Rret_bci);
2287   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2288   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2289   __ add(Rbcp, Rtemp, Rret_bci);
2290   __ dispatch_next(vtos);
2291 }
2292 
2293 
wide_ret()2294 void TemplateTable::wide_ret() {
2295   transition(vtos, vtos);
2296   const Register Rlocal_index = R1_tmp;
2297   const Register Rret_bci = Rtmp_save0; // R4/R19
2298 
2299   locals_index_wide(Rlocal_index);
2300   Address local = load_iaddress(Rlocal_index, Rtemp);
2301   __ ldr_s32(Rret_bci, local);               // get return bci, compute return bcp
2302   __ profile_ret(Rtmp_save1, Rret_bci);
2303   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
2304   __ add(Rtemp, Rtemp, in_bytes(ConstMethod::codes_offset()));
2305   __ add(Rbcp, Rtemp, Rret_bci);
2306   __ dispatch_next(vtos);
2307 }
2308 
2309 
tableswitch()2310 void TemplateTable::tableswitch() {
2311   transition(itos, vtos);
2312 
2313   const Register Rindex  = R0_tos;
2314   const Register Rtemp2  = R1_tmp;
2315   const Register Rabcp   = R2_tmp;  // aligned bcp
2316   const Register Rlow    = R3_tmp;
2317   const Register Rhigh   = R4_tmp;
2318   const Register Roffset = R5_tmp;
2319 
2320   // align bcp
2321   __ add(Rtemp, Rbcp, 1 + (2*BytesPerInt-1));
2322   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2323 
2324   // load lo & hi
2325   __ ldmia(Rabcp, RegisterSet(Rlow) | RegisterSet(Rhigh), writeback);
2326   __ byteswap_u32(Rlow, Rtemp, Rtemp2);
2327   __ byteswap_u32(Rhigh, Rtemp, Rtemp2);
2328 
2329   // compare index with high bound
2330   __ cmp_32(Rhigh, Rindex);
2331 
2332 
2333   // if Rindex <= Rhigh then calculate index in table (Rindex - Rlow)
2334   __ subs(Rindex, Rindex, Rlow, ge);
2335 
2336   // if Rindex <= Rhigh and (Rindex - Rlow) >= 0
2337   // ("ge" status accumulated from cmp and subs instructions) then load
2338   // offset from table, otherwise load offset for default case
2339 
2340   if(ProfileInterpreter) {
2341     Label default_case, continue_execution;
2342 
2343     __ b(default_case, lt);
2344     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt));
2345     __ profile_switch_case(Rabcp, Rindex, Rtemp2, R0_tmp);
2346     __ b(continue_execution);
2347 
2348     __ bind(default_case);
2349     __ profile_switch_default(R0_tmp);
2350     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt));
2351 
2352     __ bind(continue_execution);
2353   } else {
2354     __ ldr(Roffset, Address(Rabcp, -3 * BytesPerInt), lt);
2355     __ ldr(Roffset, Address(Rabcp, Rindex, lsl, LogBytesPerInt), ge);
2356   }
2357 
2358   __ byteswap_u32(Roffset, Rtemp, Rtemp2);
2359 
2360   // load the next bytecode to R3_bytecode and advance Rbcp
2361   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2362   __ dispatch_only(vtos, true);
2363 
2364 }
2365 
2366 
lookupswitch()2367 void TemplateTable::lookupswitch() {
2368   transition(itos, itos);
2369   __ stop("lookupswitch bytecode should have been rewritten");
2370 }
2371 
2372 
fast_linearswitch()2373 void TemplateTable::fast_linearswitch() {
2374   transition(itos, vtos);
2375   Label loop, found, default_case, continue_execution;
2376 
2377   const Register Rkey     = R0_tos;
2378   const Register Rabcp    = R2_tmp;  // aligned bcp
2379   const Register Rdefault = R3_tmp;
2380   const Register Rcount   = R4_tmp;
2381   const Register Roffset  = R5_tmp;
2382 
2383   // bswap Rkey, so we can avoid bswapping the table entries
2384   __ byteswap_u32(Rkey, R1_tmp, Rtemp);
2385 
2386   // align bcp
2387   __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2388   __ align_reg(Rabcp, Rtemp, BytesPerInt);
2389 
2390   // load default & counter
2391   __ ldmia(Rabcp, RegisterSet(Rdefault) | RegisterSet(Rcount), writeback);
2392   __ byteswap_u32(Rcount, R1_tmp, Rtemp);
2393 
2394   __ cmp_32(Rcount, 0);
2395   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2396   __ b(default_case, eq);
2397 
2398   // table search
2399   __ bind(loop);
2400   __ cmp_32(Rtemp, Rkey);
2401   __ b(found, eq);
2402   __ subs(Rcount, Rcount, 1);
2403   __ ldr(Rtemp, Address(Rabcp, 2*BytesPerInt, post_indexed), ne);
2404   __ b(loop, ne);
2405 
2406   // default case
2407   __ bind(default_case);
2408   __ profile_switch_default(R0_tmp);
2409   __ mov(Roffset, Rdefault);
2410   __ b(continue_execution);
2411 
2412   // entry found -> get offset
2413   __ bind(found);
2414   // Rabcp is already incremented and points to the next entry
2415   __ ldr_s32(Roffset, Address(Rabcp, -BytesPerInt));
2416   if (ProfileInterpreter) {
2417     // Calculate index of the selected case.
2418     assert_different_registers(Roffset, Rcount, Rtemp, R0_tmp, R1_tmp, R2_tmp);
2419 
2420     // align bcp
2421     __ add(Rtemp, Rbcp, 1 + (BytesPerInt-1));
2422     __ align_reg(R2_tmp, Rtemp, BytesPerInt);
2423 
2424     // load number of cases
2425     __ ldr_u32(R2_tmp, Address(R2_tmp, BytesPerInt));
2426     __ byteswap_u32(R2_tmp, R1_tmp, Rtemp);
2427 
2428     // Selected index = <number of cases> - <current loop count>
2429     __ sub(R1_tmp, R2_tmp, Rcount);
2430     __ profile_switch_case(R0_tmp, R1_tmp, Rtemp, R1_tmp);
2431   }
2432 
2433   // continue execution
2434   __ bind(continue_execution);
2435   __ byteswap_u32(Roffset, R1_tmp, Rtemp);
2436 
2437   // load the next bytecode to R3_bytecode and advance Rbcp
2438   __ ldrb(R3_bytecode, Address(Rbcp, Roffset, lsl, 0, pre_indexed));
2439   __ dispatch_only(vtos, true);
2440 }
2441 
2442 
fast_binaryswitch()2443 void TemplateTable::fast_binaryswitch() {
2444   transition(itos, vtos);
2445   // Implementation using the following core algorithm:
2446   //
2447   // int binary_search(int key, LookupswitchPair* array, int n) {
2448   //   // Binary search according to "Methodik des Programmierens" by
2449   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2450   //   int i = 0;
2451   //   int j = n;
2452   //   while (i+1 < j) {
2453   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2454   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2455   //     // where a stands for the array and assuming that the (inexisting)
2456   //     // element a[n] is infinitely big.
2457   //     int h = (i + j) >> 1;
2458   //     // i < h < j
2459   //     if (key < array[h].fast_match()) {
2460   //       j = h;
2461   //     } else {
2462   //       i = h;
2463   //     }
2464   //   }
2465   //   // R: a[i] <= key < a[i+1] or Q
2466   //   // (i.e., if key is within array, i is the correct index)
2467   //   return i;
2468   // }
2469 
2470   // register allocation
2471   const Register key    = R0_tos;                // already set (tosca)
2472   const Register array  = R1_tmp;
2473   const Register i      = R2_tmp;
2474   const Register j      = R3_tmp;
2475   const Register h      = R4_tmp;
2476   const Register val    = R5_tmp;
2477   const Register temp1  = Rtemp;
2478   const Register temp2  = LR_tmp;
2479   const Register offset = R3_tmp;
2480 
2481   // set 'array' = aligned bcp + 2 ints
2482   __ add(temp1, Rbcp, 1 + (BytesPerInt-1) + 2*BytesPerInt);
2483   __ align_reg(array, temp1, BytesPerInt);
2484 
2485   // initialize i & j
2486   __ mov(i, 0);                                  // i = 0;
2487   __ ldr_s32(j, Address(array, -BytesPerInt));   // j = length(array);
2488   // Convert j into native byteordering
2489   __ byteswap_u32(j, temp1, temp2);
2490 
2491   // and start
2492   Label entry;
2493   __ b(entry);
2494 
2495   // binary search loop
2496   { Label loop;
2497     __ bind(loop);
2498     // int h = (i + j) >> 1;
2499     __ add(h, i, j);                             // h = i + j;
2500     __ logical_shift_right(h, h, 1);             // h = (i + j) >> 1;
2501     // if (key < array[h].fast_match()) {
2502     //   j = h;
2503     // } else {
2504     //   i = h;
2505     // }
2506     __ ldr_s32(val, Address(array, h, lsl, 1+LogBytesPerInt));
2507     // Convert array[h].match to native byte-ordering before compare
2508     __ byteswap_u32(val, temp1, temp2);
2509     __ cmp_32(key, val);
2510     __ mov(j, h, lt);   // j = h if (key <  array[h].fast_match())
2511     __ mov(i, h, ge);   // i = h if (key >= array[h].fast_match())
2512     // while (i+1 < j)
2513     __ bind(entry);
2514     __ add(temp1, i, 1);                             // i+1
2515     __ cmp(temp1, j);                                // i+1 < j
2516     __ b(loop, lt);
2517   }
2518 
2519   // end of binary search, result index is i (must check again!)
2520   Label default_case;
2521   // Convert array[i].match to native byte-ordering before compare
2522   __ ldr_s32(val, Address(array, i, lsl, 1+LogBytesPerInt));
2523   __ byteswap_u32(val, temp1, temp2);
2524   __ cmp_32(key, val);
2525   __ b(default_case, ne);
2526 
2527   // entry found
2528   __ add(temp1, array, AsmOperand(i, lsl, 1+LogBytesPerInt));
2529   __ ldr_s32(offset, Address(temp1, 1*BytesPerInt));
2530   __ profile_switch_case(R0, i, R1, i);
2531   __ byteswap_u32(offset, temp1, temp2);
2532   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2533   __ dispatch_only(vtos, true);
2534 
2535   // default case
2536   __ bind(default_case);
2537   __ profile_switch_default(R0);
2538   __ ldr_s32(offset, Address(array, -2*BytesPerInt));
2539   __ byteswap_u32(offset, temp1, temp2);
2540   __ ldrb(R3_bytecode, Address(Rbcp, offset, lsl, 0, pre_indexed));
2541   __ dispatch_only(vtos, true);
2542 }
2543 
2544 
_return(TosState state)2545 void TemplateTable::_return(TosState state) {
2546   transition(state, state);
2547   assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2548 
2549   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2550     Label skip_register_finalizer;
2551     assert(state == vtos, "only valid state");
2552     __ ldr(R1, aaddress(0));
2553     __ load_klass(Rtemp, R1);
2554     __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset()));
2555     __ tbz(Rtemp, exact_log2(JVM_ACC_HAS_FINALIZER), skip_register_finalizer);
2556 
2557     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R1);
2558 
2559     __ bind(skip_register_finalizer);
2560   }
2561 
2562   // Narrow result if state is itos but result type is smaller.
2563   // Need to narrow in the return bytecode rather than in generate_return_entry
2564   // since compiled code callers expect the result to already be narrowed.
2565   if (state == itos) {
2566     __ narrow(R0_tos);
2567   }
2568   __ remove_activation(state, LR);
2569 
2570   __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
2571 
2572   // According to interpreter calling conventions, result is returned in R0/R1,
2573   // so ftos (S0) and dtos (D0) are moved to R0/R1.
2574   // This conversion should be done after remove_activation, as it uses
2575   // push(state) & pop(state) to preserve return value.
2576   __ convert_tos_to_retval(state);
2577 
2578   __ ret();
2579 
2580   __ nop(); // to avoid filling CPU pipeline with invalid instructions
2581   __ nop();
2582 }
2583 
2584 
2585 // ----------------------------------------------------------------------------
2586 // Volatile variables demand their effects be made known to all CPU's in
2587 // order.  Store buffers on most chips allow reads & writes to reorder; the
2588 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2589 // memory barrier (i.e., it's not sufficient that the interpreter does not
2590 // reorder volatile references, the hardware also must not reorder them).
2591 //
2592 // According to the new Java Memory Model (JMM):
2593 // (1) All volatiles are serialized wrt to each other.
2594 // ALSO reads & writes act as aquire & release, so:
2595 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2596 // the read float up to before the read.  It's OK for non-volatile memory refs
2597 // that happen before the volatile read to float down below it.
2598 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2599 // that happen BEFORE the write float down to after the write.  It's OK for
2600 // non-volatile memory refs that happen after the volatile write to float up
2601 // before it.
2602 //
2603 // We only put in barriers around volatile refs (they are expensive), not
2604 // _between_ memory refs (that would require us to track the flavor of the
2605 // previous memory refs).  Requirements (2) and (3) require some barriers
2606 // before volatile stores and after volatile loads.  These nearly cover
2607 // requirement (1) but miss the volatile-store-volatile-load case.  This final
2608 // case is placed after volatile-stores although it could just as well go
2609 // before volatile-loads.
volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint,Register tmp,bool preserve_flags,Register load_tgt)2610 void TemplateTable::volatile_barrier(MacroAssembler::Membar_mask_bits order_constraint,
2611                                      Register tmp,
2612                                      bool preserve_flags,
2613                                      Register load_tgt) {
2614   __ membar(order_constraint, tmp, preserve_flags, load_tgt);
2615 }
2616 
2617 // Blows all volatile registers: R0-R3, Rtemp, LR.
resolve_cache_and_index(int byte_no,Register Rcache,Register Rindex,size_t index_size)2618 void TemplateTable::resolve_cache_and_index(int byte_no,
2619                                             Register Rcache,
2620                                             Register Rindex,
2621                                             size_t index_size) {
2622   assert_different_registers(Rcache, Rindex, Rtemp);
2623 
2624   Label resolved;
2625   Bytecodes::Code code = bytecode();
2626   switch (code) {
2627   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2628   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2629   default: break;
2630   }
2631 
2632   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2633   __ get_cache_and_index_and_bytecode_at_bcp(Rcache, Rindex, Rtemp, byte_no, 1, index_size);
2634   __ cmp(Rtemp, code);  // have we resolved this bytecode?
2635   __ b(resolved, eq);
2636 
2637   // resolve first time through
2638   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2639   __ mov(R1, code);
2640   __ call_VM(noreg, entry, R1);
2641   // Update registers with resolved info
2642   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size);
2643   __ bind(resolved);
2644 }
2645 
2646 
2647 // The Rcache and Rindex registers must be set before call
load_field_cp_cache_entry(Register Rcache,Register Rindex,Register Roffset,Register Rflags,Register Robj,bool is_static=false)2648 void TemplateTable::load_field_cp_cache_entry(Register Rcache,
2649                                               Register Rindex,
2650                                               Register Roffset,
2651                                               Register Rflags,
2652                                               Register Robj,
2653                                               bool is_static = false) {
2654 
2655   assert_different_registers(Rcache, Rindex, Rtemp);
2656   assert_different_registers(Roffset, Rflags, Robj, Rtemp);
2657 
2658   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2659 
2660   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
2661 
2662   // Field offset
2663   __ ldr(Roffset, Address(Rtemp,
2664            cp_base_offset + ConstantPoolCacheEntry::f2_offset()));
2665 
2666   // Flags
2667   __ ldr_u32(Rflags, Address(Rtemp,
2668            cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
2669 
2670   if (is_static) {
2671     __ ldr(Robj, Address(Rtemp,
2672              cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
2673     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2674     __ ldr(Robj, Address(Robj, mirror_offset));
2675     __ resolve_oop_handle(Robj);
2676   }
2677 }
2678 
2679 
2680 // Blows all volatile registers: R0-R3, Rtemp, LR.
load_invoke_cp_cache_entry(int byte_no,Register method,Register itable_index,Register flags,bool is_invokevirtual,bool is_invokevfinal,bool is_invokedynamic)2681 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2682                                                Register method,
2683                                                Register itable_index,
2684                                                Register flags,
2685                                                bool is_invokevirtual,
2686                                                bool is_invokevfinal/*unused*/,
2687                                                bool is_invokedynamic) {
2688   // setup registers
2689   const Register cache = R2_tmp;
2690   const Register index = R3_tmp;
2691   const Register temp_reg = Rtemp;
2692   assert_different_registers(cache, index, temp_reg);
2693   assert_different_registers(method, itable_index, temp_reg);
2694 
2695   // determine constant pool cache field offsets
2696   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2697   const int method_offset = in_bytes(
2698     ConstantPoolCache::base_offset() +
2699       ((byte_no == f2_byte)
2700        ? ConstantPoolCacheEntry::f2_offset()
2701        : ConstantPoolCacheEntry::f1_offset()
2702       )
2703     );
2704   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2705                                     ConstantPoolCacheEntry::flags_offset());
2706   // access constant pool cache fields
2707   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2708                                     ConstantPoolCacheEntry::f2_offset());
2709 
2710   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2711   resolve_cache_and_index(byte_no, cache, index, index_size);
2712     __ add(temp_reg, cache, AsmOperand(index, lsl, LogBytesPerWord));
2713     __ ldr(method, Address(temp_reg, method_offset));
2714 
2715   if (itable_index != noreg) {
2716     __ ldr(itable_index, Address(temp_reg, index_offset));
2717   }
2718   __ ldr_u32(flags, Address(temp_reg, flags_offset));
2719 }
2720 
2721 
2722 // The registers cache and index expected to be set before call, and should not be Rtemp.
2723 // Blows volatile registers R0-R3, Rtemp, LR,
2724 // except cache and index registers which are preserved.
jvmti_post_field_access(Register Rcache,Register Rindex,bool is_static,bool has_tos)2725 void TemplateTable::jvmti_post_field_access(Register Rcache,
2726                                             Register Rindex,
2727                                             bool is_static,
2728                                             bool has_tos) {
2729   assert_different_registers(Rcache, Rindex, Rtemp);
2730 
2731   if (__ can_post_field_access()) {
2732     // Check to see if a field access watch has been set before we take
2733     // the time to call into the VM.
2734 
2735     Label Lcontinue;
2736 
2737     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_access_count_addr());
2738     __ cbz(Rtemp, Lcontinue);
2739 
2740     // cache entry pointer
2741     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
2742     __ add(R2, R2, in_bytes(ConstantPoolCache::base_offset()));
2743     if (is_static) {
2744       __ mov(R1, 0);        // NULL object reference
2745     } else {
2746       __ pop(atos);         // Get the object
2747       __ mov(R1, R0_tos);
2748       __ verify_oop(R1);
2749       __ push(atos);        // Restore stack state
2750     }
2751     // R1: object pointer or NULL
2752     // R2: cache entry pointer
2753     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2754                R1, R2);
2755     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
2756 
2757     __ bind(Lcontinue);
2758   }
2759 }
2760 
2761 
pop_and_check_object(Register r)2762 void TemplateTable::pop_and_check_object(Register r) {
2763   __ pop_ptr(r);
2764   __ null_check(r, Rtemp);  // for field access must check obj.
2765   __ verify_oop(r);
2766 }
2767 
2768 
getfield_or_static(int byte_no,bool is_static,RewriteControl rc)2769 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2770   transition(vtos, vtos);
2771 
2772   const Register Roffset  = R2_tmp;
2773   const Register Robj     = R3_tmp;
2774   const Register Rcache   = R4_tmp;
2775   const Register Rflagsav = Rtmp_save0;  // R4/R19
2776   const Register Rindex   = R5_tmp;
2777   const Register Rflags   = R5_tmp;
2778 
2779   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
2780   jvmti_post_field_access(Rcache, Rindex, is_static, false);
2781   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
2782 
2783   __ mov(Rflagsav, Rflags);
2784 
2785   if (!is_static) pop_and_check_object(Robj);
2786 
2787   Label Done, Lint, Ltable, shouldNotReachHere;
2788   Label Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
2789 
2790   // compute type
2791   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
2792   // Make sure we don't need to mask flags after the above shift
2793   ConstantPoolCacheEntry::verify_tos_state_shift();
2794 
2795   // There are actually two versions of implementation of getfield/getstatic:
2796   //
2797   // 1) Table switch using add(PC,...) instruction (fast_version)
2798   // 2) Table switch using ldr(PC,...) instruction
2799   //
2800   // First version requires fixed size of code block for each case and
2801   // can not be used in RewriteBytecodes and VerifyOops
2802   // modes.
2803 
2804   // Size of fixed size code block for fast_version
2805   const int log_max_block_size = 3;
2806   const int max_block_size = 1 << log_max_block_size;
2807 
2808   // Decide if fast version is enabled
2809   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops;
2810 
2811   // On 32-bit ARM atos and itos cases can be merged only for fast version, because
2812   // atos requires additional processing in slow version.
2813   bool atos_merged_with_itos = fast_version;
2814 
2815   assert(number_of_states == 10, "number of tos states should be equal to 9");
2816 
2817   __ cmp(Rflags, itos);
2818   if(atos_merged_with_itos) {
2819     __ cmp(Rflags, atos, ne);
2820   }
2821 
2822   // table switch by type
2823   if(fast_version) {
2824     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
2825   } else {
2826     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
2827   }
2828 
2829   // jump to itos/atos case
2830   __ b(Lint);
2831 
2832   // table with addresses for slow version
2833   if (fast_version) {
2834     // nothing to do
2835   } else  {
2836     __ bind(Ltable);
2837     __ emit_address(Lbtos);
2838     __ emit_address(Lztos);
2839     __ emit_address(Lctos);
2840     __ emit_address(Lstos);
2841     __ emit_address(Litos);
2842     __ emit_address(Lltos);
2843     __ emit_address(Lftos);
2844     __ emit_address(Ldtos);
2845     __ emit_address(Latos);
2846   }
2847 
2848 #ifdef ASSERT
2849   int seq = 0;
2850 #endif
2851   // btos
2852   {
2853     assert(btos == seq++, "btos has unexpected value");
2854     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
2855     __ bind(Lbtos);
2856     __ access_load_at(T_BYTE, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2857     __ push(btos);
2858     // Rewrite bytecode to be faster
2859     if (!is_static && rc == may_rewrite) {
2860       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
2861     }
2862     __ b(Done);
2863   }
2864 
2865   // ztos (same as btos for getfield)
2866   {
2867     assert(ztos == seq++, "btos has unexpected value");
2868     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
2869     __ bind(Lztos);
2870     __ access_load_at(T_BOOLEAN, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2871     __ push(ztos);
2872     // Rewrite bytecode to be faster (use btos fast getfield)
2873     if (!is_static && rc == may_rewrite) {
2874       patch_bytecode(Bytecodes::_fast_bgetfield, R0_tmp, Rtemp);
2875     }
2876     __ b(Done);
2877   }
2878 
2879   // ctos
2880   {
2881     assert(ctos == seq++, "ctos has unexpected value");
2882     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
2883     __ bind(Lctos);
2884     __ access_load_at(T_CHAR, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2885     __ push(ctos);
2886     if (!is_static && rc == may_rewrite) {
2887       patch_bytecode(Bytecodes::_fast_cgetfield, R0_tmp, Rtemp);
2888     }
2889     __ b(Done);
2890   }
2891 
2892   // stos
2893   {
2894     assert(stos == seq++, "stos has unexpected value");
2895     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
2896     __ bind(Lstos);
2897     __ access_load_at(T_SHORT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2898     __ push(stos);
2899     if (!is_static && rc == may_rewrite) {
2900       patch_bytecode(Bytecodes::_fast_sgetfield, R0_tmp, Rtemp);
2901     }
2902     __ b(Done);
2903   }
2904 
2905   // itos
2906   {
2907     assert(itos == seq++, "itos has unexpected value");
2908     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
2909     __ bind(Litos);
2910     __ b(shouldNotReachHere);
2911   }
2912 
2913   // ltos
2914   {
2915     assert(ltos == seq++, "ltos has unexpected value");
2916     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
2917     __ bind(Lltos);
2918     __ access_load_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg);
2919     __ push(ltos);
2920     if (!is_static && rc == may_rewrite) {
2921       patch_bytecode(Bytecodes::_fast_lgetfield, R0_tmp, Rtemp);
2922     }
2923     __ b(Done);
2924   }
2925 
2926   // ftos
2927   {
2928     assert(ftos == seq++, "ftos has unexpected value");
2929     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
2930     __ bind(Lftos);
2931     // floats and ints are placed on stack in same way, so
2932     // we can use push(itos) to transfer value without using VFP
2933     __ access_load_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2934     __ push(itos);
2935     if (!is_static && rc == may_rewrite) {
2936       patch_bytecode(Bytecodes::_fast_fgetfield, R0_tmp, Rtemp);
2937     }
2938     __ b(Done);
2939   }
2940 
2941   // dtos
2942   {
2943     assert(dtos == seq++, "dtos has unexpected value");
2944     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
2945     __ bind(Ldtos);
2946     // doubles and longs are placed on stack in the same way, so
2947     // we can use push(ltos) to transfer value without using VFP
2948     __ access_load_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg);
2949     __ push(ltos);
2950     if (!is_static && rc == may_rewrite) {
2951       patch_bytecode(Bytecodes::_fast_dgetfield, R0_tmp, Rtemp);
2952     }
2953     __ b(Done);
2954   }
2955 
2956   // atos
2957   {
2958     assert(atos == seq++, "atos has unexpected value");
2959 
2960     // atos case for slow version on 32-bit ARM
2961     if(!atos_merged_with_itos) {
2962       __ bind(Latos);
2963       do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
2964       __ push(atos);
2965       // Rewrite bytecode to be faster
2966       if (!is_static && rc == may_rewrite) {
2967         patch_bytecode(Bytecodes::_fast_agetfield, R0_tmp, Rtemp);
2968       }
2969       __ b(Done);
2970     }
2971   }
2972 
2973   assert(vtos == seq++, "vtos has unexpected value");
2974 
2975   __ bind(shouldNotReachHere);
2976   __ should_not_reach_here();
2977 
2978   // itos and atos cases are frequent so it makes sense to move them out of table switch
2979   // atos case can be merged with itos case (and thus moved out of table switch) on 32-bit ARM, fast version only
2980 
2981   __ bind(Lint);
2982   __ access_load_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
2983   __ push(itos);
2984   // Rewrite bytecode to be faster
2985   if (!is_static && rc == may_rewrite) {
2986     patch_bytecode(Bytecodes::_fast_igetfield, R0_tmp, Rtemp);
2987   }
2988 
2989   __ bind(Done);
2990 
2991   // Check for volatile field
2992   Label notVolatile;
2993   __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2994 
2995   volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
2996 
2997   __ bind(notVolatile);
2998 }
2999 
getfield(int byte_no)3000 void TemplateTable::getfield(int byte_no) {
3001   getfield_or_static(byte_no, false);
3002 }
3003 
nofast_getfield(int byte_no)3004 void TemplateTable::nofast_getfield(int byte_no) {
3005   getfield_or_static(byte_no, false, may_not_rewrite);
3006 }
3007 
getstatic(int byte_no)3008 void TemplateTable::getstatic(int byte_no) {
3009   getfield_or_static(byte_no, true);
3010 }
3011 
3012 
3013 // The registers cache and index expected to be set before call, and should not be R1 or Rtemp.
3014 // Blows volatile registers R0-R3, Rtemp, LR,
3015 // except cache and index registers which are preserved.
jvmti_post_field_mod(Register Rcache,Register Rindex,bool is_static)3016 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rindex, bool is_static) {
3017   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3018   assert_different_registers(Rcache, Rindex, R1, Rtemp);
3019 
3020   if (__ can_post_field_modification()) {
3021     // Check to see if a field modification watch has been set before we take
3022     // the time to call into the VM.
3023     Label Lcontinue;
3024 
3025     __ ldr_global_s32(Rtemp, (address)JvmtiExport::get_field_modification_count_addr());
3026     __ cbz(Rtemp, Lcontinue);
3027 
3028     if (is_static) {
3029       // Life is simple.  Null out the object pointer.
3030       __ mov(R1, 0);
3031     } else {
3032       // Life is harder. The stack holds the value on top, followed by the object.
3033       // We don't know the size of the value, though; it could be one or two words
3034       // depending on its type. As a result, we must find the type to determine where
3035       // the object is.
3036 
3037       __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3038       __ ldr_u32(Rtemp, Address(Rtemp, cp_base_offset + ConstantPoolCacheEntry::flags_offset()));
3039 
3040       __ logical_shift_right(Rtemp, Rtemp, ConstantPoolCacheEntry::tos_state_shift);
3041       // Make sure we don't need to mask Rtemp after the above shift
3042       ConstantPoolCacheEntry::verify_tos_state_shift();
3043 
3044       __ cmp(Rtemp, ltos);
3045       __ cond_cmp(Rtemp, dtos, ne);
3046       // two word value (ltos/dtos)
3047       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(2)), eq);
3048 
3049       // one word value (not ltos, dtos)
3050       __ ldr(R1, Address(SP, Interpreter::expr_offset_in_bytes(1)), ne);
3051     }
3052 
3053     // cache entry pointer
3054     __ add(R2, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3055     __ add(R2, R2, in_bytes(cp_base_offset));
3056 
3057     // object (tos)
3058     __ mov(R3, Rstack_top);
3059 
3060     // R1: object pointer set up above (NULL if static)
3061     // R2: cache entry pointer
3062     // R3: value object on the stack
3063     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
3064                R1, R2, R3);
3065     __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3066 
3067     __ bind(Lcontinue);
3068   }
3069 }
3070 
3071 
putfield_or_static(int byte_no,bool is_static,RewriteControl rc)3072 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3073   transition(vtos, vtos);
3074 
3075   const Register Roffset  = R2_tmp;
3076   const Register Robj     = R3_tmp;
3077   const Register Rcache   = R4_tmp;
3078   const Register Rflagsav = Rtmp_save0;  // R4/R19
3079   const Register Rindex   = R5_tmp;
3080   const Register Rflags   = R5_tmp;
3081 
3082   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
3083   jvmti_post_field_mod(Rcache, Rindex, is_static);
3084   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
3085 
3086   // Check for volatile field
3087   Label notVolatile;
3088   __ mov(Rflagsav, Rflags);
3089   __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3090 
3091   volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3092 
3093   __ bind(notVolatile);
3094 
3095   Label Done, Lint, shouldNotReachHere;
3096   Label Ltable, Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
3097 
3098   // compute type
3099   __ logical_shift_right(Rflags, Rflags, ConstantPoolCacheEntry::tos_state_shift);
3100   // Make sure we don't need to mask flags after the above shift
3101   ConstantPoolCacheEntry::verify_tos_state_shift();
3102 
3103   // There are actually two versions of implementation of putfield/putstatic:
3104   //
3105   // 32-bit ARM:
3106   // 1) Table switch using add(PC,...) instruction (fast_version)
3107   // 2) Table switch using ldr(PC,...) instruction
3108   //
3109   // First version requires fixed size of code block for each case and
3110   // can not be used in RewriteBytecodes and VerifyOops
3111   // modes.
3112 
3113   // Size of fixed size code block for fast_version (in instructions)
3114   const int log_max_block_size = 3;
3115   const int max_block_size = 1 << log_max_block_size;
3116 
3117   // Decide if fast version is enabled
3118   bool fast_version = (is_static || !RewriteBytecodes) && !VerifyOops;
3119 
3120   assert(number_of_states == 10, "number of tos states should be equal to 9");
3121 
3122   // itos case is frequent and is moved outside table switch
3123   __ cmp(Rflags, itos);
3124 
3125   // table switch by type
3126   if (fast_version) {
3127     __ add(PC, PC, AsmOperand(Rflags, lsl, log_max_block_size + Assembler::LogInstructionSize), ne);
3128   } else  {
3129     __ ldr(PC, Address(PC, Rflags, lsl, LogBytesPerWord), ne);
3130   }
3131 
3132   // jump to itos case
3133   __ b(Lint);
3134 
3135   // table with addresses for slow version
3136   if (fast_version) {
3137     // nothing to do
3138   } else  {
3139     __ bind(Ltable);
3140     __ emit_address(Lbtos);
3141     __ emit_address(Lztos);
3142     __ emit_address(Lctos);
3143     __ emit_address(Lstos);
3144     __ emit_address(Litos);
3145     __ emit_address(Lltos);
3146     __ emit_address(Lftos);
3147     __ emit_address(Ldtos);
3148     __ emit_address(Latos);
3149   }
3150 
3151 #ifdef ASSERT
3152   int seq = 0;
3153 #endif
3154   // btos
3155   {
3156     assert(btos == seq++, "btos has unexpected value");
3157     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
3158     __ bind(Lbtos);
3159     __ pop(btos);
3160     if (!is_static) pop_and_check_object(Robj);
3161     __ access_store_at(T_BYTE, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3162     if (!is_static && rc == may_rewrite) {
3163       patch_bytecode(Bytecodes::_fast_bputfield, R0_tmp, Rtemp, true, byte_no);
3164     }
3165     __ b(Done);
3166   }
3167 
3168   // ztos
3169   {
3170     assert(ztos == seq++, "ztos has unexpected value");
3171     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
3172     __ bind(Lztos);
3173     __ pop(ztos);
3174     if (!is_static) pop_and_check_object(Robj);
3175     __ access_store_at(T_BOOLEAN, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3176     if (!is_static && rc == may_rewrite) {
3177       patch_bytecode(Bytecodes::_fast_zputfield, R0_tmp, Rtemp, true, byte_no);
3178     }
3179     __ b(Done);
3180   }
3181 
3182   // ctos
3183   {
3184     assert(ctos == seq++, "ctos has unexpected value");
3185     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
3186     __ bind(Lctos);
3187     __ pop(ctos);
3188     if (!is_static) pop_and_check_object(Robj);
3189     __ access_store_at(T_CHAR, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3190     if (!is_static && rc == may_rewrite) {
3191       patch_bytecode(Bytecodes::_fast_cputfield, R0_tmp, Rtemp, true, byte_no);
3192     }
3193     __ b(Done);
3194   }
3195 
3196   // stos
3197   {
3198     assert(stos == seq++, "stos has unexpected value");
3199     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
3200     __ bind(Lstos);
3201     __ pop(stos);
3202     if (!is_static) pop_and_check_object(Robj);
3203     __ access_store_at(T_SHORT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3204     if (!is_static && rc == may_rewrite) {
3205       patch_bytecode(Bytecodes::_fast_sputfield, R0_tmp, Rtemp, true, byte_no);
3206     }
3207     __ b(Done);
3208   }
3209 
3210   // itos
3211   {
3212     assert(itos == seq++, "itos has unexpected value");
3213     FixedSizeCodeBlock itos_block(_masm, max_block_size, fast_version);
3214     __ bind(Litos);
3215     __ b(shouldNotReachHere);
3216   }
3217 
3218   // ltos
3219   {
3220     assert(ltos == seq++, "ltos has unexpected value");
3221     FixedSizeCodeBlock ltos_block(_masm, max_block_size, fast_version);
3222     __ bind(Lltos);
3223     __ pop(ltos);
3224     if (!is_static) pop_and_check_object(Robj);
3225     __ access_store_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg, false);
3226     if (!is_static && rc == may_rewrite) {
3227       patch_bytecode(Bytecodes::_fast_lputfield, R0_tmp, Rtemp, true, byte_no);
3228     }
3229     __ b(Done);
3230   }
3231 
3232   // ftos
3233   {
3234     assert(ftos == seq++, "ftos has unexpected value");
3235     FixedSizeCodeBlock ftos_block(_masm, max_block_size, fast_version);
3236     __ bind(Lftos);
3237     // floats and ints are placed on stack in the same way, so
3238     // we can use pop(itos) to transfer value without using VFP
3239     __ pop(itos);
3240     if (!is_static) pop_and_check_object(Robj);
3241     __ access_store_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3242     if (!is_static && rc == may_rewrite) {
3243       patch_bytecode(Bytecodes::_fast_fputfield, R0_tmp, Rtemp, true, byte_no);
3244     }
3245     __ b(Done);
3246   }
3247 
3248   // dtos
3249   {
3250     assert(dtos == seq++, "dtos has unexpected value");
3251     FixedSizeCodeBlock dtos_block(_masm, max_block_size, fast_version);
3252     __ bind(Ldtos);
3253     // doubles and longs are placed on stack in the same way, so
3254     // we can use pop(ltos) to transfer value without using VFP
3255     __ pop(ltos);
3256     if (!is_static) pop_and_check_object(Robj);
3257     __ access_store_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg, false);
3258     if (!is_static && rc == may_rewrite) {
3259       patch_bytecode(Bytecodes::_fast_dputfield, R0_tmp, Rtemp, true, byte_no);
3260     }
3261     __ b(Done);
3262   }
3263 
3264   // atos
3265   {
3266     assert(atos == seq++, "dtos has unexpected value");
3267     __ bind(Latos);
3268     __ pop(atos);
3269     if (!is_static) pop_and_check_object(Robj);
3270     // Store into the field
3271     do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R5_tmp, false);
3272     if (!is_static && rc == may_rewrite) {
3273       patch_bytecode(Bytecodes::_fast_aputfield, R0_tmp, Rtemp, true, byte_no);
3274     }
3275     __ b(Done);
3276   }
3277 
3278   __ bind(shouldNotReachHere);
3279   __ should_not_reach_here();
3280 
3281   // itos case is frequent and is moved outside table switch
3282   __ bind(Lint);
3283   __ pop(itos);
3284   if (!is_static) pop_and_check_object(Robj);
3285   __ access_store_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
3286   if (!is_static && rc == may_rewrite) {
3287     patch_bytecode(Bytecodes::_fast_iputfield, R0_tmp, Rtemp, true, byte_no);
3288   }
3289 
3290   __ bind(Done);
3291 
3292   Label notVolatile2;
3293   if (is_static) {
3294     // Just check for volatile. Memory barrier for static final field
3295     // is handled by class initialization.
3296     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile2);
3297     volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3298     __ bind(notVolatile2);
3299   } else {
3300     // Check for volatile field and final field
3301     Label skipMembar;
3302 
3303     __ tst(Rflagsav, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3304            1 << ConstantPoolCacheEntry::is_final_shift);
3305     __ b(skipMembar, eq);
3306 
3307     __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile2);
3308 
3309     // StoreLoad barrier after volatile field write
3310     volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3311     __ b(skipMembar);
3312 
3313     // StoreStore barrier after final field write
3314     __ bind(notVolatile2);
3315     volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3316 
3317     __ bind(skipMembar);
3318   }
3319 }
3320 
putfield(int byte_no)3321 void TemplateTable::putfield(int byte_no) {
3322   putfield_or_static(byte_no, false);
3323 }
3324 
nofast_putfield(int byte_no)3325 void TemplateTable::nofast_putfield(int byte_no) {
3326   putfield_or_static(byte_no, false, may_not_rewrite);
3327 }
3328 
putstatic(int byte_no)3329 void TemplateTable::putstatic(int byte_no) {
3330   putfield_or_static(byte_no, true);
3331 }
3332 
3333 
jvmti_post_fast_field_mod()3334 void TemplateTable::jvmti_post_fast_field_mod() {
3335   // This version of jvmti_post_fast_field_mod() is not used on ARM
3336   Unimplemented();
3337 }
3338 
3339 // Blows volatile registers R0-R3, Rtemp, LR,
3340 // but preserves tosca with the given state.
jvmti_post_fast_field_mod(TosState state)3341 void TemplateTable::jvmti_post_fast_field_mod(TosState state) {
3342   if (__ can_post_field_modification()) {
3343     // Check to see if a field modification watch has been set before we take
3344     // the time to call into the VM.
3345     Label done;
3346 
3347     __ ldr_global_s32(R2, (address)JvmtiExport::get_field_modification_count_addr());
3348     __ cbz(R2, done);
3349 
3350     __ pop_ptr(R3);               // copy the object pointer from tos
3351     __ verify_oop(R3);
3352     __ push_ptr(R3);              // put the object pointer back on tos
3353 
3354     __ push(state);               // save value on the stack
3355 
3356     // access constant pool cache entry
3357     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3358 
3359     __ mov(R1, R3);
3360     assert(Interpreter::expr_offset_in_bytes(0) == 0, "adjust this code");
3361     __ mov(R3, Rstack_top); // put tos addr into R3
3362 
3363     // R1: object pointer copied above
3364     // R2: cache entry pointer
3365     // R3: jvalue object on the stack
3366     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), R1, R2, R3);
3367 
3368     __ pop(state);                // restore value
3369 
3370     __ bind(done);
3371   }
3372 }
3373 
3374 
fast_storefield(TosState state)3375 void TemplateTable::fast_storefield(TosState state) {
3376   transition(state, vtos);
3377 
3378   ByteSize base = ConstantPoolCache::base_offset();
3379 
3380   jvmti_post_fast_field_mod(state);
3381 
3382   const Register Rcache  = R2_tmp;
3383   const Register Rindex  = R3_tmp;
3384   const Register Roffset = R3_tmp;
3385   const Register Rflags  = Rtmp_save0; // R4/R19
3386   const Register Robj    = R5_tmp;
3387 
3388   // access constant pool cache
3389   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3390 
3391   __ add(Rcache, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3392 
3393   // load flags to test volatile
3394   __ ldr_u32(Rflags, Address(Rcache, base + ConstantPoolCacheEntry::flags_offset()));
3395 
3396   // replace index with field offset from cache entry
3397   __ ldr(Roffset, Address(Rcache, base + ConstantPoolCacheEntry::f2_offset()));
3398 
3399   // Check for volatile store
3400   Label notVolatile;
3401   __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3402 
3403   volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
3404 
3405   __ bind(notVolatile);
3406 
3407   // Get object from stack
3408   pop_and_check_object(Robj);
3409 
3410   Address addr = Address(Robj, Roffset);
3411   // access field
3412   switch (bytecode()) {
3413     case Bytecodes::_fast_zputfield:
3414       __ access_store_at(T_BOOLEAN, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false);
3415       break;
3416     case Bytecodes::_fast_bputfield:
3417       __ access_store_at(T_BYTE, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false);
3418       break;
3419     case Bytecodes::_fast_sputfield:
3420       __ access_store_at(T_SHORT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false);
3421       break;
3422     case Bytecodes::_fast_cputfield:
3423       __ access_store_at(T_CHAR, IN_HEAP, addr, R0_tos, noreg, noreg, noreg,false);
3424       break;
3425     case Bytecodes::_fast_iputfield:
3426       __ access_store_at(T_INT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false);
3427       break;
3428     case Bytecodes::_fast_lputfield:
3429       __ access_store_at(T_LONG, IN_HEAP, addr, noreg, noreg, noreg, noreg, false);
3430       break;
3431     case Bytecodes::_fast_fputfield:
3432       __ access_store_at(T_FLOAT, IN_HEAP, addr, noreg, noreg, noreg, noreg, false);
3433       break;
3434     case Bytecodes::_fast_dputfield:
3435       __ access_store_at(T_DOUBLE, IN_HEAP, addr, noreg, noreg, noreg, noreg, false);
3436       break;
3437     case Bytecodes::_fast_aputfield:
3438       do_oop_store(_masm, addr, R0_tos, Rtemp, R1_tmp, R2_tmp, false);
3439       break;
3440 
3441     default:
3442       ShouldNotReachHere();
3443   }
3444 
3445   Label notVolatile2;
3446   Label skipMembar;
3447   __ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift |
3448          1 << ConstantPoolCacheEntry::is_final_shift);
3449   __ b(skipMembar, eq);
3450 
3451   __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile2);
3452 
3453   // StoreLoad barrier after volatile field write
3454   volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
3455   __ b(skipMembar);
3456 
3457   // StoreStore barrier after final field write
3458   __ bind(notVolatile2);
3459   volatile_barrier(MacroAssembler::StoreStore, Rtemp);
3460 
3461   __ bind(skipMembar);
3462 }
3463 
fast_accessfield(TosState state)3464 void TemplateTable::fast_accessfield(TosState state) {
3465   transition(atos, state);
3466 
3467   // do the JVMTI work here to avoid disturbing the register state below
3468   if (__ can_post_field_access()) {
3469     // Check to see if a field access watch has been set before we take
3470     // the time to call into the VM.
3471     Label done;
3472     __ ldr_global_s32(R2, (address) JvmtiExport::get_field_access_count_addr());
3473     __ cbz(R2, done);
3474     // access constant pool cache entry
3475     __ get_cache_entry_pointer_at_bcp(R2, R1, 1);
3476     __ push_ptr(R0_tos);  // save object pointer before call_VM() clobbers it
3477     __ verify_oop(R0_tos);
3478     __ mov(R1, R0_tos);
3479     // R1: object pointer copied above
3480     // R2: cache entry pointer
3481     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R1, R2);
3482     __ pop_ptr(R0_tos);   // restore object pointer
3483 
3484     __ bind(done);
3485   }
3486 
3487   const Register Robj    = R0_tos;
3488   const Register Rcache  = R2_tmp;
3489   const Register Rflags  = R2_tmp;
3490   const Register Rindex  = R3_tmp;
3491   const Register Roffset = R3_tmp;
3492 
3493   // access constant pool cache
3494   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
3495   // replace index with field offset from cache entry
3496   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3497   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3498 
3499   // load flags to test volatile
3500   __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3501 
3502   __ verify_oop(Robj);
3503   __ null_check(Robj, Rtemp);
3504 
3505   Address addr = Address(Robj, Roffset);
3506   // access field
3507   switch (bytecode()) {
3508     case Bytecodes::_fast_bgetfield:
3509       __ access_load_at(T_BYTE, IN_HEAP, addr, R0_tos, noreg, noreg, noreg);
3510       break;
3511     case Bytecodes::_fast_sgetfield:
3512       __ access_load_at(T_SHORT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg);
3513       break;
3514     case Bytecodes::_fast_cgetfield:
3515       __ access_load_at(T_CHAR, IN_HEAP, addr, R0_tos, noreg, noreg, noreg);
3516       break;
3517     case Bytecodes::_fast_igetfield:
3518       __ access_load_at(T_INT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg);
3519       break;
3520     case Bytecodes::_fast_lgetfield:
3521       __ access_load_at(T_LONG, IN_HEAP, addr, noreg, noreg, noreg, noreg);
3522       break;
3523     case Bytecodes::_fast_fgetfield:
3524       __ access_load_at(T_FLOAT, IN_HEAP, addr, noreg, noreg, noreg, noreg);
3525       break;
3526     case Bytecodes::_fast_dgetfield:
3527       __ access_load_at(T_DOUBLE, IN_HEAP, addr, noreg, noreg, noreg, noreg);
3528       break;
3529     case Bytecodes::_fast_agetfield:
3530       do_oop_load(_masm, R0_tos, addr);
3531       __ verify_oop(R0_tos);
3532       break;
3533     default:
3534       ShouldNotReachHere();
3535   }
3536 
3537   // Check for volatile load
3538   Label notVolatile;
3539   __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3540 
3541   volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3542 
3543   __ bind(notVolatile);
3544 }
3545 
3546 
fast_xaccess(TosState state)3547 void TemplateTable::fast_xaccess(TosState state) {
3548   transition(vtos, state);
3549 
3550   const Register Robj = R1_tmp;
3551   const Register Rcache = R2_tmp;
3552   const Register Rindex = R3_tmp;
3553   const Register Roffset = R3_tmp;
3554   const Register Rflags = R4_tmp;
3555   Label done;
3556 
3557   // get receiver
3558   __ ldr(Robj, aaddress(0));
3559 
3560   // access constant pool cache
3561   __ get_cache_and_index_at_bcp(Rcache, Rindex, 2);
3562   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
3563   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
3564 
3565   // load flags to test volatile
3566   __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
3567 
3568   // make sure exception is reported in correct bcp range (getfield is next instruction)
3569   __ add(Rbcp, Rbcp, 1);
3570   __ null_check(Robj, Rtemp);
3571   __ sub(Rbcp, Rbcp, 1);
3572 
3573 
3574   if (state == itos) {
3575     __ access_load_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
3576   } else if (state == atos) {
3577     do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
3578     __ verify_oop(R0_tos);
3579   } else if (state == ftos) {
3580 #ifdef __SOFTFP__
3581     __ ldr(R0_tos, Address(Robj, Roffset));
3582 #else
3583     __ access_load_at(T_FLOAT, IN_HEAP, Address(Robj, Roffset), noreg /* ftos */, noreg, noreg, noreg);
3584 #endif // __SOFTFP__
3585   } else {
3586     ShouldNotReachHere();
3587   }
3588 
3589   // Check for volatile load
3590   Label notVolatile;
3591   __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3592 
3593   volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
3594 
3595   __ bind(notVolatile);
3596 
3597   __ bind(done);
3598 }
3599 
3600 
3601 
3602 //----------------------------------------------------------------------------------------------------
3603 // Calls
3604 
prepare_invoke(int byte_no,Register method,Register index,Register recv,Register flags)3605 void TemplateTable::prepare_invoke(int byte_no,
3606                                    Register method,  // linked method (or i-klass)
3607                                    Register index,   // itable index, MethodType, etc.
3608                                    Register recv,    // if caller wants to see it
3609                                    Register flags    // if caller wants to test it
3610                                    ) {
3611   // determine flags
3612   const Bytecodes::Code code = bytecode();
3613   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
3614   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
3615   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
3616   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
3617   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
3618   const bool load_receiver       = (recv != noreg);
3619   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3620   assert(recv  == noreg || recv  == R2, "");
3621   assert(flags == noreg || flags == R3, "");
3622 
3623   // setup registers & access constant pool cache
3624   if (recv  == noreg)  recv  = R2;
3625   if (flags == noreg)  flags = R3;
3626   const Register temp = Rtemp;
3627   const Register ret_type = R1_tmp;
3628   assert_different_registers(method, index, flags, recv, LR, ret_type, temp);
3629 
3630   // save 'interpreter return address'
3631   __ save_bcp();
3632 
3633   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3634 
3635   // maybe push extra argument
3636   if (is_invokedynamic || is_invokehandle) {
3637     Label L_no_push;
3638     __ tbz(flags, ConstantPoolCacheEntry::has_appendix_shift, L_no_push);
3639     __ mov(temp, index);
3640     __ load_resolved_reference_at_index(index, temp);
3641     __ verify_oop(index);
3642     __ push_ptr(index);  // push appendix (MethodType, CallSite, etc.)
3643     __ bind(L_no_push);
3644   }
3645 
3646   // load receiver if needed (after extra argument is pushed so parameter size is correct)
3647   if (load_receiver) {
3648     __ andr(temp, flags, (uintx)ConstantPoolCacheEntry::parameter_size_mask);  // get parameter size
3649     Address recv_addr = __ receiver_argument_address(Rstack_top, temp, recv);
3650     __ ldr(recv, recv_addr);
3651     __ verify_oop(recv);
3652   }
3653 
3654   // compute return type
3655   __ logical_shift_right(ret_type, flags, ConstantPoolCacheEntry::tos_state_shift);
3656   // Make sure we don't need to mask flags after the above shift
3657   ConstantPoolCacheEntry::verify_tos_state_shift();
3658   // load return address
3659   { const address table = (address) Interpreter::invoke_return_entry_table_for(code);
3660     __ mov_slow(temp, table);
3661     __ ldr(LR, Address::indexed_ptr(temp, ret_type));
3662   }
3663 }
3664 
3665 
invokevirtual_helper(Register index,Register recv,Register flags)3666 void TemplateTable::invokevirtual_helper(Register index,
3667                                          Register recv,
3668                                          Register flags) {
3669 
3670   const Register recv_klass = R2_tmp;
3671 
3672   assert_different_registers(index, recv, flags, Rtemp);
3673   assert_different_registers(index, recv_klass, R0_tmp, Rtemp);
3674 
3675   // Test for an invoke of a final method
3676   Label notFinal;
3677   __ tbz(flags, ConstantPoolCacheEntry::is_vfinal_shift, notFinal);
3678 
3679   assert(index == Rmethod, "Method* must be Rmethod, for interpreter calling convention");
3680 
3681   // do the call - the index is actually the method to call
3682 
3683   // It's final, need a null check here!
3684   __ null_check(recv, Rtemp);
3685 
3686   // profile this call
3687   __ profile_final_call(R0_tmp);
3688 
3689   __ jump_from_interpreted(Rmethod);
3690 
3691   __ bind(notFinal);
3692 
3693   // get receiver klass
3694   __ null_check(recv, Rtemp, oopDesc::klass_offset_in_bytes());
3695   __ load_klass(recv_klass, recv);
3696 
3697   // profile this call
3698   __ profile_virtual_call(R0_tmp, recv_klass);
3699 
3700   // get target Method* & entry point
3701   const int base = in_bytes(Klass::vtable_start_offset());
3702   assert(vtableEntry::size() == 1, "adjust the scaling in the code below");
3703   __ add(Rtemp, recv_klass, AsmOperand(index, lsl, LogHeapWordSize));
3704   __ ldr(Rmethod, Address(Rtemp, base + vtableEntry::method_offset_in_bytes()));
3705   __ jump_from_interpreted(Rmethod);
3706 }
3707 
invokevirtual(int byte_no)3708 void TemplateTable::invokevirtual(int byte_no) {
3709   transition(vtos, vtos);
3710   assert(byte_no == f2_byte, "use this argument");
3711 
3712   const Register Rrecv  = R2_tmp;
3713   const Register Rflags = R3_tmp;
3714 
3715   prepare_invoke(byte_no, Rmethod, noreg, Rrecv, Rflags);
3716 
3717   // Rmethod: index
3718   // Rrecv:   receiver
3719   // Rflags:  flags
3720   // LR:      return address
3721 
3722   invokevirtual_helper(Rmethod, Rrecv, Rflags);
3723 }
3724 
3725 
invokespecial(int byte_no)3726 void TemplateTable::invokespecial(int byte_no) {
3727   transition(vtos, vtos);
3728   assert(byte_no == f1_byte, "use this argument");
3729   const Register Rrecv  = R2_tmp;
3730   prepare_invoke(byte_no, Rmethod, noreg, Rrecv);
3731   __ verify_oop(Rrecv);
3732   __ null_check(Rrecv, Rtemp);
3733   // do the call
3734   __ profile_call(Rrecv);
3735   __ jump_from_interpreted(Rmethod);
3736 }
3737 
3738 
invokestatic(int byte_no)3739 void TemplateTable::invokestatic(int byte_no) {
3740   transition(vtos, vtos);
3741   assert(byte_no == f1_byte, "use this argument");
3742   prepare_invoke(byte_no, Rmethod);
3743   // do the call
3744   __ profile_call(R2_tmp);
3745   __ jump_from_interpreted(Rmethod);
3746 }
3747 
3748 
fast_invokevfinal(int byte_no)3749 void TemplateTable::fast_invokevfinal(int byte_no) {
3750   transition(vtos, vtos);
3751   assert(byte_no == f2_byte, "use this argument");
3752   __ stop("fast_invokevfinal is not used on ARM");
3753 }
3754 
3755 
invokeinterface(int byte_no)3756 void TemplateTable::invokeinterface(int byte_no) {
3757   transition(vtos, vtos);
3758   assert(byte_no == f1_byte, "use this argument");
3759 
3760   const Register Ritable = R1_tmp;
3761   const Register Rrecv   = R2_tmp;
3762   const Register Rinterf = R5_tmp;
3763   const Register Rindex  = R4_tmp;
3764   const Register Rflags  = R3_tmp;
3765   const Register Rklass  = R2_tmp; // Note! Same register with Rrecv
3766 
3767   prepare_invoke(byte_no, Rinterf, Rmethod, Rrecv, Rflags);
3768 
3769   // First check for Object case, then private interface method,
3770   // then regular interface method.
3771 
3772   // Special case of invokeinterface called for virtual method of
3773   // java.lang.Object.  See cpCache.cpp for details.
3774   Label notObjectMethod;
3775   __ tbz(Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift, notObjectMethod);
3776   invokevirtual_helper(Rmethod, Rrecv, Rflags);
3777   __ bind(notObjectMethod);
3778 
3779   // Get receiver klass into Rklass - also a null check
3780   __ load_klass(Rklass, Rrecv);
3781 
3782   // Check for private method invocation - indicated by vfinal
3783   Label no_such_interface;
3784 
3785   Label notVFinal;
3786   __ tbz(Rflags, ConstantPoolCacheEntry::is_vfinal_shift, notVFinal);
3787 
3788   Label subtype;
3789   __ check_klass_subtype(Rklass, Rinterf, R1_tmp, R3_tmp, noreg, subtype);
3790   // If we get here the typecheck failed
3791   __ b(no_such_interface);
3792   __ bind(subtype);
3793 
3794   // do the call
3795   __ profile_final_call(R0_tmp);
3796   __ jump_from_interpreted(Rmethod);
3797 
3798   __ bind(notVFinal);
3799 
3800   // Receiver subtype check against REFC.
3801   __ lookup_interface_method(// inputs: rec. class, interface
3802                              Rklass, Rinterf, noreg,
3803                              // outputs:  scan temp. reg1, scan temp. reg2
3804                              noreg, Ritable, Rtemp,
3805                              no_such_interface);
3806 
3807   // profile this call
3808   __ profile_virtual_call(R0_tmp, Rklass);
3809 
3810   // Get declaring interface class from method
3811   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
3812   __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
3813   __ ldr(Rinterf, Address(Rtemp, ConstantPool::pool_holder_offset_in_bytes()));
3814 
3815   // Get itable index from method
3816   __ ldr_s32(Rtemp, Address(Rmethod, Method::itable_index_offset()));
3817   __ add(Rtemp, Rtemp, (-Method::itable_index_max)); // small negative constant is too large for an immediate on arm32
3818   __ neg(Rindex, Rtemp);
3819 
3820   __ lookup_interface_method(// inputs: rec. class, interface
3821                              Rklass, Rinterf, Rindex,
3822                              // outputs:  scan temp. reg1, scan temp. reg2
3823                              Rmethod, Ritable, Rtemp,
3824                              no_such_interface);
3825 
3826   // Rmethod: Method* to call
3827 
3828   // Check for abstract method error
3829   // Note: This should be done more efficiently via a throw_abstract_method_error
3830   //       interpreter entry point and a conditional jump to it in case of a null
3831   //       method.
3832   { Label L;
3833     __ cbnz(Rmethod, L);
3834     // throw exception
3835     // note: must restore interpreter registers to canonical
3836     //       state for exception handling to work correctly!
3837     __ restore_method();
3838     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3839     // the call_VM checks for exception, so we should never return here.
3840     __ should_not_reach_here();
3841     __ bind(L);
3842   }
3843 
3844   // do the call
3845   __ jump_from_interpreted(Rmethod);
3846 
3847   // throw exception
3848   __ bind(no_such_interface);
3849   __ restore_method();
3850   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
3851   // the call_VM checks for exception, so we should never return here.
3852   __ should_not_reach_here();
3853 }
3854 
invokehandle(int byte_no)3855 void TemplateTable::invokehandle(int byte_no) {
3856   transition(vtos, vtos);
3857 
3858   const Register Rrecv  = R2_tmp;
3859   const Register Rmtype = R4_tmp;
3860   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
3861 
3862   prepare_invoke(byte_no, R5_method, Rmtype, Rrecv);
3863   __ null_check(Rrecv, Rtemp);
3864 
3865   // Rmtype:  MethodType object (from cpool->resolved_references[f1], if necessary)
3866   // Rmethod: MH.invokeExact_MT method (from f2)
3867 
3868   // Note:  Rmtype is already pushed (if necessary) by prepare_invoke
3869 
3870   // do the call
3871   __ profile_final_call(R3_tmp);  // FIXME: profile the LambdaForm also
3872   __ mov(Rmethod, R5_method);
3873   __ jump_from_interpreted(Rmethod);
3874 }
3875 
invokedynamic(int byte_no)3876 void TemplateTable::invokedynamic(int byte_no) {
3877   transition(vtos, vtos);
3878 
3879   const Register Rcallsite = R4_tmp;
3880   const Register R5_method = R5_tmp;  // can't reuse Rmethod!
3881 
3882   prepare_invoke(byte_no, R5_method, Rcallsite);
3883 
3884   // Rcallsite: CallSite object (from cpool->resolved_references[f1])
3885   // Rmethod:   MH.linkToCallSite method (from f2)
3886 
3887   // Note:  Rcallsite is already pushed by prepare_invoke
3888 
3889   if (ProfileInterpreter) {
3890     __ profile_call(R2_tmp);
3891   }
3892 
3893   // do the call
3894   __ mov(Rmethod, R5_method);
3895   __ jump_from_interpreted(Rmethod);
3896 }
3897 
3898 //----------------------------------------------------------------------------------------------------
3899 // Allocation
3900 
_new()3901 void TemplateTable::_new() {
3902   transition(vtos, atos);
3903 
3904   const Register Robj   = R0_tos;
3905   const Register Rcpool = R1_tmp;
3906   const Register Rindex = R2_tmp;
3907   const Register Rtags  = R3_tmp;
3908   const Register Rsize  = R3_tmp;
3909 
3910   Register Rklass = R4_tmp;
3911   assert_different_registers(Rcpool, Rindex, Rtags, Rklass, Rtemp);
3912   assert_different_registers(Rcpool, Rindex, Rklass, Rsize);
3913 
3914   Label slow_case;
3915   Label done;
3916   Label initialize_header;
3917   Label initialize_object;  // including clearing the fields
3918 
3919   const bool allow_shared_alloc =
3920     Universe::heap()->supports_inline_contig_alloc();
3921 
3922   // Literals
3923   InlinedAddress Lheap_top_addr(allow_shared_alloc ? (address)Universe::heap()->top_addr() : NULL);
3924 
3925   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
3926   __ get_cpool_and_tags(Rcpool, Rtags);
3927 
3928   // Make sure the class we're about to instantiate has been resolved.
3929   // This is done before loading InstanceKlass to be consistent with the order
3930   // how Constant Pool is updated (see ConstantPool::klass_at_put)
3931   const int tags_offset = Array<u1>::base_offset_in_bytes();
3932   __ add(Rtemp, Rtags, Rindex);
3933 
3934   __ ldrb(Rtemp, Address(Rtemp, tags_offset));
3935 
3936   // use Rklass as a scratch
3937   volatile_barrier(MacroAssembler::LoadLoad, Rklass);
3938 
3939   // get InstanceKlass
3940   __ cmp(Rtemp, JVM_CONSTANT_Class);
3941   __ b(slow_case, ne);
3942   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rklass);
3943 
3944   // make sure klass is initialized & doesn't have finalizer
3945   // make sure klass is fully initialized
3946   __ ldrb(Rtemp, Address(Rklass, InstanceKlass::init_state_offset()));
3947   __ cmp(Rtemp, InstanceKlass::fully_initialized);
3948   __ b(slow_case, ne);
3949 
3950   // get instance_size in InstanceKlass (scaled to a count of bytes)
3951   __ ldr_u32(Rsize, Address(Rklass, Klass::layout_helper_offset()));
3952 
3953   // test to see if it has a finalizer or is malformed in some way
3954   // Klass::_lh_instance_slow_path_bit is really a bit mask, not bit number
3955   __ tbnz(Rsize, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
3956 
3957   // Allocate the instance:
3958   //  If TLAB is enabled:
3959   //    Try to allocate in the TLAB.
3960   //    If fails, go to the slow path.
3961   //  Else If inline contiguous allocations are enabled:
3962   //    Try to allocate in eden.
3963   //    If fails due to heap end, go to slow path.
3964   //
3965   //  If TLAB is enabled OR inline contiguous is enabled:
3966   //    Initialize the allocation.
3967   //    Exit.
3968   //
3969   //  Go to slow path.
3970   if (UseTLAB) {
3971     const Register Rtlab_top = R1_tmp;
3972     const Register Rtlab_end = R2_tmp;
3973     assert_different_registers(Robj, Rsize, Rklass, Rtlab_top, Rtlab_end);
3974 
3975     __ tlab_allocate(Robj, Rtlab_top, Rtlab_end, Rsize, slow_case);
3976     if (ZeroTLAB) {
3977       // the fields have been already cleared
3978       __ b(initialize_header);
3979     } else {
3980       // initialize both the header and fields
3981       __ b(initialize_object);
3982     }
3983   } else {
3984     // Allocation in the shared Eden, if allowed.
3985     if (allow_shared_alloc) {
3986       const Register Rheap_top_addr = R2_tmp;
3987       const Register Rheap_top = R5_tmp;
3988       const Register Rheap_end = Rtemp;
3989       assert_different_registers(Robj, Rklass, Rsize, Rheap_top_addr, Rheap_top, Rheap_end, LR);
3990 
3991       __ eden_allocate(Robj, Rheap_top, Rheap_top_addr, Rheap_end, Rsize, slow_case);
3992     }
3993   }
3994 
3995   if (UseTLAB || allow_shared_alloc) {
3996     const Register Rzero0 = R1_tmp;
3997     const Register Rzero1 = R2_tmp;
3998     const Register Rzero_end = R5_tmp;
3999     const Register Rzero_cur = Rtemp;
4000     assert_different_registers(Robj, Rsize, Rklass, Rzero0, Rzero1, Rzero_cur, Rzero_end);
4001 
4002     // The object is initialized before the header.  If the object size is
4003     // zero, go directly to the header initialization.
4004     __ bind(initialize_object);
4005     __ subs(Rsize, Rsize, sizeof(oopDesc));
4006     __ add(Rzero_cur, Robj, sizeof(oopDesc));
4007     __ b(initialize_header, eq);
4008 
4009 #ifdef ASSERT
4010     // make sure Rsize is a multiple of 8
4011     Label L;
4012     __ tst(Rsize, 0x07);
4013     __ b(L, eq);
4014     __ stop("object size is not multiple of 8 - adjust this code");
4015     __ bind(L);
4016 #endif
4017 
4018     __ mov(Rzero0, 0);
4019     __ mov(Rzero1, 0);
4020     __ add(Rzero_end, Rzero_cur, Rsize);
4021 
4022     // initialize remaining object fields: Rsize was a multiple of 8
4023     { Label loop;
4024       // loop is unrolled 2 times
4025       __ bind(loop);
4026       // #1
4027       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback);
4028       __ cmp(Rzero_cur, Rzero_end);
4029       // #2
4030       __ stmia(Rzero_cur, RegisterSet(Rzero0) | RegisterSet(Rzero1), writeback, ne);
4031       __ cmp(Rzero_cur, Rzero_end, ne);
4032       __ b(loop, ne);
4033     }
4034 
4035     // initialize object header only.
4036     __ bind(initialize_header);
4037     if (UseBiasedLocking) {
4038       __ ldr(Rtemp, Address(Rklass, Klass::prototype_header_offset()));
4039     } else {
4040       __ mov_slow(Rtemp, (intptr_t)markWord::prototype().value());
4041     }
4042     // mark
4043     __ str(Rtemp, Address(Robj, oopDesc::mark_offset_in_bytes()));
4044 
4045     // klass
4046     __ store_klass(Rklass, Robj); // blows Rklass:
4047     Rklass = noreg;
4048 
4049     // Note: Disable DTrace runtime check for now to eliminate overhead on each allocation
4050     if (DTraceAllocProbes) {
4051       // Trigger dtrace event for fastpath
4052       Label Lcontinue;
4053 
4054       __ ldrb_global(Rtemp, (address)&DTraceAllocProbes);
4055       __ cbz(Rtemp, Lcontinue);
4056 
4057       __ push(atos);
4058       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), Robj);
4059       __ pop(atos);
4060 
4061       __ bind(Lcontinue);
4062     }
4063 
4064     __ b(done);
4065   } else {
4066     // jump over literals
4067     __ b(slow_case);
4068   }
4069 
4070   if (allow_shared_alloc) {
4071     __ bind_literal(Lheap_top_addr);
4072   }
4073 
4074   // slow case
4075   __ bind(slow_case);
4076   __ get_constant_pool(Rcpool);
4077   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4078   __ call_VM(Robj, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
4079 
4080   // continue
4081   __ bind(done);
4082 
4083   // StoreStore barrier required after complete initialization
4084   // (headers + content zeroing), before the object may escape.
4085   __ membar(MacroAssembler::StoreStore, R1_tmp);
4086 }
4087 
4088 
newarray()4089 void TemplateTable::newarray() {
4090   transition(itos, atos);
4091   __ ldrb(R1, at_bcp(1));
4092   __ mov(R2, R0_tos);
4093   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R1, R2);
4094   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4095 }
4096 
4097 
anewarray()4098 void TemplateTable::anewarray() {
4099   transition(itos, atos);
4100   __ get_unsigned_2_byte_index_at_bcp(R2, 1);
4101   __ get_constant_pool(R1);
4102   __ mov(R3, R0_tos);
4103   call_VM(R0_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R1, R2, R3);
4104   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4105 }
4106 
4107 
arraylength()4108 void TemplateTable::arraylength() {
4109   transition(atos, itos);
4110   __ null_check(R0_tos, Rtemp, arrayOopDesc::length_offset_in_bytes());
4111   __ ldr_s32(R0_tos, Address(R0_tos, arrayOopDesc::length_offset_in_bytes()));
4112 }
4113 
4114 
checkcast()4115 void TemplateTable::checkcast() {
4116   transition(atos, atos);
4117   Label done, is_null, quicked, resolved, throw_exception;
4118 
4119   const Register Robj = R0_tos;
4120   const Register Rcpool = R2_tmp;
4121   const Register Rtags = R3_tmp;
4122   const Register Rindex = R4_tmp;
4123   const Register Rsuper = R3_tmp;
4124   const Register Rsub   = R4_tmp;
4125   const Register Rsubtype_check_tmp1 = R1_tmp;
4126   const Register Rsubtype_check_tmp2 = LR_tmp;
4127 
4128   __ cbz(Robj, is_null);
4129 
4130   // Get cpool & tags index
4131   __ get_cpool_and_tags(Rcpool, Rtags);
4132   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4133 
4134   // See if bytecode has already been quicked
4135   __ add(Rtemp, Rtags, Rindex);
4136   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4137 
4138   __ cmp(Rtemp, JVM_CONSTANT_Class);
4139 
4140   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4141 
4142   __ b(quicked, eq);
4143 
4144   __ push(atos);
4145   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4146   // vm_result_2 has metadata result
4147   __ get_vm_result_2(Rsuper, Robj);
4148   __ pop_ptr(Robj);
4149   __ b(resolved);
4150 
4151   __ bind(throw_exception);
4152   // Come here on failure of subtype check
4153   __ profile_typecheck_failed(R1_tmp);
4154   __ mov(R2_ClassCastException_obj, Robj);             // convention with generate_ClassCastException_handler()
4155   __ b(Interpreter::_throw_ClassCastException_entry);
4156 
4157   // Get superklass in Rsuper and subklass in Rsub
4158   __ bind(quicked);
4159   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4160 
4161   __ bind(resolved);
4162   __ load_klass(Rsub, Robj);
4163 
4164   // Generate subtype check. Blows both tmps and Rtemp.
4165   assert_different_registers(Robj, Rsub, Rsuper, Rsubtype_check_tmp1, Rsubtype_check_tmp2, Rtemp);
4166   __ gen_subtype_check(Rsub, Rsuper, throw_exception, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4167 
4168   // Come here on success
4169 
4170   // Collect counts on whether this check-cast sees NULLs a lot or not.
4171   if (ProfileInterpreter) {
4172     __ b(done);
4173     __ bind(is_null);
4174     __ profile_null_seen(R1_tmp);
4175   } else {
4176     __ bind(is_null);   // same as 'done'
4177   }
4178   __ bind(done);
4179 }
4180 
4181 
instanceof()4182 void TemplateTable::instanceof() {
4183   // result = 0: obj == NULL or  obj is not an instanceof the specified klass
4184   // result = 1: obj != NULL and obj is     an instanceof the specified klass
4185 
4186   transition(atos, itos);
4187   Label done, is_null, not_subtype, quicked, resolved;
4188 
4189   const Register Robj = R0_tos;
4190   const Register Rcpool = R2_tmp;
4191   const Register Rtags = R3_tmp;
4192   const Register Rindex = R4_tmp;
4193   const Register Rsuper = R3_tmp;
4194   const Register Rsub   = R4_tmp;
4195   const Register Rsubtype_check_tmp1 = R0_tmp;
4196   const Register Rsubtype_check_tmp2 = R1_tmp;
4197 
4198   __ cbz(Robj, is_null);
4199 
4200   __ load_klass(Rsub, Robj);
4201 
4202   // Get cpool & tags index
4203   __ get_cpool_and_tags(Rcpool, Rtags);
4204   __ get_unsigned_2_byte_index_at_bcp(Rindex, 1);
4205 
4206   // See if bytecode has already been quicked
4207   __ add(Rtemp, Rtags, Rindex);
4208   __ ldrb(Rtemp, Address(Rtemp, Array<u1>::base_offset_in_bytes()));
4209   __ cmp(Rtemp, JVM_CONSTANT_Class);
4210 
4211   volatile_barrier(MacroAssembler::LoadLoad, Rtemp, true);
4212 
4213   __ b(quicked, eq);
4214 
4215   __ push(atos);
4216   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4217   // vm_result_2 has metadata result
4218   __ get_vm_result_2(Rsuper, Robj);
4219   __ pop_ptr(Robj);
4220   __ b(resolved);
4221 
4222   // Get superklass in Rsuper and subklass in Rsub
4223   __ bind(quicked);
4224   __ load_resolved_klass_at_offset(Rcpool, Rindex, Rsuper);
4225 
4226   __ bind(resolved);
4227   __ load_klass(Rsub, Robj);
4228 
4229   // Generate subtype check. Blows both tmps and Rtemp.
4230   __ gen_subtype_check(Rsub, Rsuper, not_subtype, Rsubtype_check_tmp1, Rsubtype_check_tmp2);
4231 
4232   // Come here on success
4233   __ mov(R0_tos, 1);
4234   __ b(done);
4235 
4236   __ bind(not_subtype);
4237   // Come here on failure
4238   __ profile_typecheck_failed(R1_tmp);
4239   __ mov(R0_tos, 0);
4240 
4241   // Collect counts on whether this test sees NULLs a lot or not.
4242   if (ProfileInterpreter) {
4243     __ b(done);
4244     __ bind(is_null);
4245     __ profile_null_seen(R1_tmp);
4246   } else {
4247     __ bind(is_null);   // same as 'done'
4248   }
4249   __ bind(done);
4250 }
4251 
4252 
4253 //----------------------------------------------------------------------------------------------------
4254 // Breakpoints
_breakpoint()4255 void TemplateTable::_breakpoint() {
4256 
4257   // Note: We get here even if we are single stepping..
4258   // jbug inists on setting breakpoints at every bytecode
4259   // even if we are in single step mode.
4260 
4261   transition(vtos, vtos);
4262 
4263   // get the unpatched byte code
4264   __ mov(R1, Rmethod);
4265   __ mov(R2, Rbcp);
4266   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R1, R2);
4267   __ mov(Rtmp_save0, R0);
4268 
4269   // post the breakpoint event
4270   __ mov(R1, Rmethod);
4271   __ mov(R2, Rbcp);
4272   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R1, R2);
4273 
4274   // complete the execution of original bytecode
4275   __ mov(R3_bytecode, Rtmp_save0);
4276   __ dispatch_only_normal(vtos);
4277 }
4278 
4279 
4280 //----------------------------------------------------------------------------------------------------
4281 // Exceptions
4282 
athrow()4283 void TemplateTable::athrow() {
4284   transition(atos, vtos);
4285   __ mov(Rexception_obj, R0_tos);
4286   __ null_check(Rexception_obj, Rtemp);
4287   __ b(Interpreter::throw_exception_entry());
4288 }
4289 
4290 
4291 //----------------------------------------------------------------------------------------------------
4292 // Synchronization
4293 //
4294 // Note: monitorenter & exit are symmetric routines; which is reflected
4295 //       in the assembly code structure as well
4296 //
4297 // Stack layout:
4298 //
4299 // [expressions  ] <--- Rstack_top        = expression stack top
4300 // ..
4301 // [expressions  ]
4302 // [monitor entry] <--- monitor block top = expression stack bot
4303 // ..
4304 // [monitor entry]
4305 // [frame data   ] <--- monitor block bot
4306 // ...
4307 // [saved FP     ] <--- FP
4308 
4309 
monitorenter()4310 void TemplateTable::monitorenter() {
4311   transition(atos, vtos);
4312 
4313   const Register Robj = R0_tos;
4314   const Register Rentry = R1_tmp;
4315 
4316   // check for NULL object
4317   __ null_check(Robj, Rtemp);
4318 
4319   __ resolve(IS_NOT_NULL, Robj);
4320 
4321   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4322   assert (entry_size % StackAlignmentInBytes == 0, "keep stack alignment");
4323   Label allocate_monitor, allocated;
4324 
4325   // initialize entry pointer
4326   __ mov(Rentry, 0);                             // points to free slot or NULL
4327 
4328   // find a free slot in the monitor block (result in Rentry)
4329   { Label loop, exit;
4330     const Register Rcur = R2_tmp;
4331     const Register Rcur_obj = Rtemp;
4332     const Register Rbottom = R3_tmp;
4333     assert_different_registers(Robj, Rentry, Rcur, Rbottom, Rcur_obj);
4334 
4335     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4336                                  // points to current entry, starting with top-most entry
4337     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4338                                  // points to word before bottom of monitor block
4339 
4340     __ cmp(Rcur, Rbottom);                       // check if there are no monitors
4341     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4342                                                  // prefetch monitor's object for the first iteration
4343     __ b(allocate_monitor, eq);                  // there are no monitors, skip searching
4344 
4345     __ bind(loop);
4346     __ cmp(Rcur_obj, 0);                         // check if current entry is used
4347     __ mov(Rentry, Rcur, eq);                    // if not used then remember entry
4348 
4349     __ cmp(Rcur_obj, Robj);                      // check if current entry is for same object
4350     __ b(exit, eq);                              // if same object then stop searching
4351 
4352     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4353 
4354     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4355     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4356                                                  // prefetch monitor's object for the next iteration
4357     __ b(loop, ne);                              // if not at bottom then check this entry
4358     __ bind(exit);
4359   }
4360 
4361   __ cbnz(Rentry, allocated);                    // check if a slot has been found; if found, continue with that one
4362 
4363   __ bind(allocate_monitor);
4364 
4365   // allocate one if there's no free slot
4366   { Label loop;
4367     assert_different_registers(Robj, Rentry, R2_tmp, Rtemp);
4368 
4369     // 1. compute new pointers
4370 
4371 
4372     __ ldr(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4373                                                  // old monitor block top / expression stack bottom
4374 
4375     __ sub(Rstack_top, Rstack_top, entry_size);  // move expression stack top
4376     __ check_stack_top_on_expansion();
4377 
4378     __ sub(Rentry, Rentry, entry_size);          // move expression stack bottom
4379 
4380     __ mov(R2_tmp, Rstack_top);                  // set start value for copy loop
4381 
4382     __ str(Rentry, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4383                                                  // set new monitor block top
4384 
4385     // 2. move expression stack contents
4386 
4387     __ cmp(R2_tmp, Rentry);                                 // check if expression stack is empty
4388     __ ldr(Rtemp, Address(R2_tmp, entry_size), ne);         // load expression stack word from old location
4389     __ b(allocated, eq);
4390 
4391     __ bind(loop);
4392     __ str(Rtemp, Address(R2_tmp, wordSize, post_indexed)); // store expression stack word at new location
4393                                                             // and advance to next word
4394     __ cmp(R2_tmp, Rentry);                                 // check if bottom reached
4395     __ ldr(Rtemp, Address(R2, entry_size), ne);             // load expression stack word from old location
4396     __ b(loop, ne);                                         // if not at bottom then copy next word
4397   }
4398 
4399   // call run-time routine
4400 
4401   // Rentry: points to monitor entry
4402   __ bind(allocated);
4403 
4404   // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
4405   // The object has already been poped from the stack, so the expression stack looks correct.
4406   __ add(Rbcp, Rbcp, 1);
4407 
4408   __ str(Robj, Address(Rentry, BasicObjectLock::obj_offset_in_bytes()));     // store object
4409   __ lock_object(Rentry);
4410 
4411   // check to make sure this monitor doesn't cause stack overflow after locking
4412   __ save_bcp();  // in case of exception
4413   __ arm_stack_overflow_check(0, Rtemp);
4414 
4415   // The bcp has already been incremented. Just need to dispatch to next instruction.
4416   __ dispatch_next(vtos);
4417 }
4418 
4419 
monitorexit()4420 void TemplateTable::monitorexit() {
4421   transition(atos, vtos);
4422 
4423   const Register Robj = R0_tos;
4424   const Register Rcur = R1_tmp;
4425   const Register Rbottom = R2_tmp;
4426   const Register Rcur_obj = Rtemp;
4427   const Register Rmonitor = R0;      // fixed in unlock_object()
4428 
4429   // check for NULL object
4430   __ null_check(Robj, Rtemp);
4431 
4432   __ resolve(IS_NOT_NULL, Robj);
4433 
4434   const int entry_size = (frame::interpreter_frame_monitor_size() * wordSize);
4435   Label found, throw_exception;
4436 
4437   // find matching slot
4438   { Label loop;
4439     assert_different_registers(Robj, Rcur, Rbottom, Rcur_obj);
4440 
4441     __ ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
4442                                  // points to current entry, starting with top-most entry
4443     __ sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
4444                                  // points to word before bottom of monitor block
4445 
4446     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4447     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4448                                                  // prefetch monitor's object for the first iteration
4449     __ b(throw_exception, eq);                   // throw exception if there are now monitors
4450 
4451     __ bind(loop);
4452     // check if current entry is for same object
4453     __ cmp(Rcur_obj, Robj);
4454     __ b(found, eq);                             // if same object then stop searching
4455     __ add(Rcur, Rcur, entry_size);              // otherwise advance to next entry
4456     __ cmp(Rcur, Rbottom);                       // check if bottom reached
4457     __ ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
4458     __ b (loop, ne);                             // if not at bottom then check this entry
4459   }
4460 
4461   // error handling. Unlocking was not block-structured
4462   __ bind(throw_exception);
4463   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
4464   __ should_not_reach_here();
4465 
4466   // call run-time routine
4467   // Rcur: points to monitor entry
4468   __ bind(found);
4469   __ push_ptr(Robj);                             // make sure object is on stack (contract with oopMaps)
4470   __ mov(Rmonitor, Rcur);
4471   __ unlock_object(Rmonitor);
4472   __ pop_ptr(Robj);                              // discard object
4473 }
4474 
4475 
4476 //----------------------------------------------------------------------------------------------------
4477 // Wide instructions
4478 
wide()4479 void TemplateTable::wide() {
4480   transition(vtos, vtos);
4481   __ ldrb(R3_bytecode, at_bcp(1));
4482 
4483   InlinedAddress Ltable((address)Interpreter::_wentry_point);
4484   __ ldr_literal(Rtemp, Ltable);
4485   __ indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
4486 
4487   __ nop(); // to avoid filling CPU pipeline with invalid instructions
4488   __ nop();
4489   __ bind_literal(Ltable);
4490 }
4491 
4492 
4493 //----------------------------------------------------------------------------------------------------
4494 // Multi arrays
4495 
multianewarray()4496 void TemplateTable::multianewarray() {
4497   transition(vtos, atos);
4498   __ ldrb(Rtmp_save0, at_bcp(3));   // get number of dimensions
4499 
4500   // last dim is on top of stack; we want address of first one:
4501   // first_addr = last_addr + ndims * stackElementSize - 1*wordsize
4502   // the latter wordSize to point to the beginning of the array.
4503   __ add(Rtemp, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
4504   __ sub(R1, Rtemp, wordSize);
4505 
4506   call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R1);
4507   __ add(Rstack_top, Rstack_top, AsmOperand(Rtmp_save0, lsl, Interpreter::logStackElementSize));
4508   // MacroAssembler::StoreStore useless (included in the runtime exit path)
4509 }
4510