1 /*
2  * Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "jvm.h"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shared/cardTable.hpp"
30 #include "gc/shared/cardTableBarrierSet.inline.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "interp_masm_arm.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "interpreter/interpreterRuntime.hpp"
35 #include "logging/log.hpp"
36 #include "oops/arrayOop.hpp"
37 #include "oops/markWord.hpp"
38 #include "oops/method.hpp"
39 #include "oops/methodData.hpp"
40 #include "prims/jvmtiExport.hpp"
41 #include "prims/jvmtiThreadState.hpp"
42 #include "runtime/basicLock.hpp"
43 #include "runtime/biasedLocking.hpp"
44 #include "runtime/frame.inline.hpp"
45 #include "runtime/safepointMechanism.hpp"
46 #include "runtime/sharedRuntime.hpp"
47 #include "utilities/powerOfTwo.hpp"
48 
49 //--------------------------------------------------------------------
50 // Implementation of InterpreterMacroAssembler
51 
52 
53 
54 
InterpreterMacroAssembler(CodeBuffer * code)55 InterpreterMacroAssembler::InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {
56 }
57 
call_VM_helper(Register oop_result,address entry_point,int number_of_arguments,bool check_exceptions)58 void InterpreterMacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
59 #ifdef ASSERT
60   // Ensure that last_sp is not filled.
61   { Label L;
62     ldr(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
63     cbz(Rtemp, L);
64     stop("InterpreterMacroAssembler::call_VM_helper: last_sp != NULL");
65     bind(L);
66   }
67 #endif // ASSERT
68 
69   // Rbcp must be saved/restored since it may change due to GC.
70   save_bcp();
71 
72 
73   // super call
74   MacroAssembler::call_VM_helper(oop_result, entry_point, number_of_arguments, check_exceptions);
75 
76 
77   // Restore interpreter specific registers.
78   restore_bcp();
79   restore_method();
80 }
81 
jump_to_entry(address entry)82 void InterpreterMacroAssembler::jump_to_entry(address entry) {
83   assert(entry, "Entry must have been generated by now");
84   b(entry);
85 }
86 
check_and_handle_popframe()87 void InterpreterMacroAssembler::check_and_handle_popframe() {
88   if (can_pop_frame()) {
89     Label L;
90     const Register popframe_cond = R2_tmp;
91 
92     // Initiate popframe handling only if it is not already being processed.  If the flag
93     // has the popframe_processing bit set, it means that this code is called *during* popframe
94     // handling - we don't want to reenter.
95 
96     ldr_s32(popframe_cond, Address(Rthread, JavaThread::popframe_condition_offset()));
97     tbz(popframe_cond, exact_log2(JavaThread::popframe_pending_bit), L);
98     tbnz(popframe_cond, exact_log2(JavaThread::popframe_processing_bit), L);
99 
100     // Call Interpreter::remove_activation_preserving_args_entry() to get the
101     // address of the same-named entrypoint in the generated interpreter code.
102     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
103 
104     // Call indirectly to avoid generation ordering problem.
105     jump(R0);
106 
107     bind(L);
108   }
109 }
110 
111 
112 // Blows R2, Rtemp. Sets TOS cached value.
load_earlyret_value(TosState state)113 void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
114   const Register thread_state = R2_tmp;
115 
116   ldr(thread_state, Address(Rthread, JavaThread::jvmti_thread_state_offset()));
117 
118   const Address tos_addr(thread_state, JvmtiThreadState::earlyret_tos_offset());
119   const Address oop_addr(thread_state, JvmtiThreadState::earlyret_oop_offset());
120   const Address val_addr(thread_state, JvmtiThreadState::earlyret_value_offset());
121   const Address val_addr_hi(thread_state, JvmtiThreadState::earlyret_value_offset()
122                              + in_ByteSize(wordSize));
123 
124   Register zero = zero_register(Rtemp);
125 
126   switch (state) {
127     case atos: ldr(R0_tos, oop_addr);
128                str(zero, oop_addr);
129                interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
130                break;
131 
132     case ltos: ldr(R1_tos_hi, val_addr_hi);        // fall through
133     case btos:                                     // fall through
134     case ztos:                                     // fall through
135     case ctos:                                     // fall through
136     case stos:                                     // fall through
137     case itos: ldr_s32(R0_tos, val_addr);          break;
138 #ifdef __SOFTFP__
139     case dtos: ldr(R1_tos_hi, val_addr_hi);        // fall through
140     case ftos: ldr(R0_tos, val_addr);              break;
141 #else
142     case ftos: ldr_float (S0_tos, val_addr);       break;
143     case dtos: ldr_double(D0_tos, val_addr);       break;
144 #endif // __SOFTFP__
145     case vtos: /* nothing to do */                 break;
146     default  : ShouldNotReachHere();
147   }
148   // Clean up tos value in the thread object
149   str(zero, val_addr);
150   str(zero, val_addr_hi);
151 
152   mov(Rtemp, (int) ilgl);
153   str_32(Rtemp, tos_addr);
154 }
155 
156 
157 // Blows R2, Rtemp.
check_and_handle_earlyret()158 void InterpreterMacroAssembler::check_and_handle_earlyret() {
159   if (can_force_early_return()) {
160     Label L;
161     const Register thread_state = R2_tmp;
162 
163     ldr(thread_state, Address(Rthread, JavaThread::jvmti_thread_state_offset()));
164     cbz(thread_state, L); // if (thread->jvmti_thread_state() == NULL) exit;
165 
166     // Initiate earlyret handling only if it is not already being processed.
167     // If the flag has the earlyret_processing bit set, it means that this code
168     // is called *during* earlyret handling - we don't want to reenter.
169 
170     ldr_s32(Rtemp, Address(thread_state, JvmtiThreadState::earlyret_state_offset()));
171     cmp(Rtemp, JvmtiThreadState::earlyret_pending);
172     b(L, ne);
173 
174     // Call Interpreter::remove_activation_early_entry() to get the address of the
175     // same-named entrypoint in the generated interpreter code.
176 
177     ldr_s32(R0, Address(thread_state, JvmtiThreadState::earlyret_tos_offset()));
178     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), R0);
179 
180     jump(R0);
181 
182     bind(L);
183   }
184 }
185 
186 
187 // Sets reg. Blows Rtemp.
get_unsigned_2_byte_index_at_bcp(Register reg,int bcp_offset)188 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) {
189   assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");
190   assert(reg != Rtemp, "should be different registers");
191 
192   ldrb(Rtemp, Address(Rbcp, bcp_offset));
193   ldrb(reg, Address(Rbcp, bcp_offset+1));
194   orr(reg, reg, AsmOperand(Rtemp, lsl, BitsPerByte));
195 }
196 
get_index_at_bcp(Register index,int bcp_offset,Register tmp_reg,size_t index_size)197 void InterpreterMacroAssembler::get_index_at_bcp(Register index, int bcp_offset, Register tmp_reg, size_t index_size) {
198   assert_different_registers(index, tmp_reg);
199   if (index_size == sizeof(u2)) {
200     // load bytes of index separately to avoid unaligned access
201     ldrb(index, Address(Rbcp, bcp_offset+1));
202     ldrb(tmp_reg, Address(Rbcp, bcp_offset));
203     orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte));
204   } else if (index_size == sizeof(u4)) {
205     ldrb(index, Address(Rbcp, bcp_offset+3));
206     ldrb(tmp_reg, Address(Rbcp, bcp_offset+2));
207     orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte));
208     ldrb(tmp_reg, Address(Rbcp, bcp_offset+1));
209     orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte));
210     ldrb(tmp_reg, Address(Rbcp, bcp_offset));
211     orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte));
212     // Check if the secondary index definition is still ~x, otherwise
213     // we have to change the following assembler code to calculate the
214     // plain index.
215     assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
216     mvn_32(index, index);  // convert to plain index
217   } else if (index_size == sizeof(u1)) {
218     ldrb(index, Address(Rbcp, bcp_offset));
219   } else {
220     ShouldNotReachHere();
221   }
222 }
223 
224 // Sets cache, index.
get_cache_and_index_at_bcp(Register cache,Register index,int bcp_offset,size_t index_size)225 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size) {
226   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
227   assert_different_registers(cache, index);
228 
229   get_index_at_bcp(index, bcp_offset, cache, index_size);
230 
231   // load constant pool cache pointer
232   ldr(cache, Address(FP, frame::interpreter_frame_cache_offset * wordSize));
233 
234   // convert from field index to ConstantPoolCacheEntry index
235   assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
236   logical_shift_left(index, index, 2);
237 }
238 
239 // Sets cache, index, bytecode.
get_cache_and_index_and_bytecode_at_bcp(Register cache,Register index,Register bytecode,int byte_no,int bcp_offset,size_t index_size)240 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size) {
241   get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
242   // caution index and bytecode can be the same
243   add(bytecode, cache, AsmOperand(index, lsl, LogBytesPerWord));
244   ldrb(bytecode, Address(bytecode, (1 + byte_no) + in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset())));
245   TemplateTable::volatile_barrier(MacroAssembler::LoadLoad, noreg, true);
246 }
247 
248 // Sets cache. Blows reg_tmp.
get_cache_entry_pointer_at_bcp(Register cache,Register reg_tmp,int bcp_offset,size_t index_size)249 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register reg_tmp, int bcp_offset, size_t index_size) {
250   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
251   assert_different_registers(cache, reg_tmp);
252 
253   get_index_at_bcp(reg_tmp, bcp_offset, cache, index_size);
254 
255   // load constant pool cache pointer
256   ldr(cache, Address(FP, frame::interpreter_frame_cache_offset * wordSize));
257 
258   // skip past the header
259   add(cache, cache, in_bytes(ConstantPoolCache::base_offset()));
260   // convert from field index to ConstantPoolCacheEntry index
261   // and from word offset to byte offset
262   assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
263   add(cache, cache, AsmOperand(reg_tmp, lsl, 2 + LogBytesPerWord));
264 }
265 
266 // Load object from cpool->resolved_references(index)
load_resolved_reference_at_index(Register result,Register index)267 void InterpreterMacroAssembler::load_resolved_reference_at_index(
268                                            Register result, Register index) {
269   assert_different_registers(result, index);
270   get_constant_pool(result);
271 
272   Register cache = result;
273   // load pointer for resolved_references[] objArray
274   ldr(cache, Address(result, ConstantPool::cache_offset_in_bytes()));
275   ldr(cache, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
276   resolve_oop_handle(cache);
277   // Add in the index
278   // convert from field index to resolved_references() index and from
279   // word index to byte offset. Since this is a java object, it can be compressed
280   logical_shift_left(index, index, LogBytesPerHeapOop);
281   add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
282   load_heap_oop(result, Address(cache, index));
283 }
284 
load_resolved_klass_at_offset(Register Rcpool,Register Rindex,Register Rklass)285 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
286                                            Register Rcpool, Register Rindex, Register Rklass) {
287   add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
288   ldrh(Rtemp, Address(Rtemp, sizeof(ConstantPool))); // Rtemp = resolved_klass_index
289   ldr(Rklass, Address(Rcpool,  ConstantPool::resolved_klasses_offset_in_bytes())); // Rklass = cpool->_resolved_klasses
290   add(Rklass, Rklass, AsmOperand(Rtemp, lsl, LogBytesPerWord));
291   ldr(Rklass, Address(Rklass, Array<Klass*>::base_offset_in_bytes()));
292 }
293 
294 // Generate a subtype check: branch to not_subtype if sub_klass is
295 // not a subtype of super_klass.
296 // Profiling code for the subtype check failure (profile_typecheck_failed)
297 // should be explicitly generated by the caller in the not_subtype case.
298 // Blows Rtemp, tmp1, tmp2.
gen_subtype_check(Register Rsub_klass,Register Rsuper_klass,Label & not_subtype,Register tmp1,Register tmp2)299 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
300                                                   Register Rsuper_klass,
301                                                   Label &not_subtype,
302                                                   Register tmp1,
303                                                   Register tmp2) {
304 
305   assert_different_registers(Rsub_klass, Rsuper_klass, tmp1, tmp2, Rtemp);
306   Label ok_is_subtype, loop, update_cache;
307 
308   const Register super_check_offset = tmp1;
309   const Register cached_super = tmp2;
310 
311   // Profile the not-null value's klass.
312   profile_typecheck(tmp1, Rsub_klass);
313 
314   // Load the super-klass's check offset into
315   ldr_u32(super_check_offset, Address(Rsuper_klass, Klass::super_check_offset_offset()));
316 
317   // Check for self
318   cmp(Rsub_klass, Rsuper_klass);
319 
320   // Load from the sub-klass's super-class display list, or a 1-word cache of
321   // the secondary superclass list, or a failing value with a sentinel offset
322   // if the super-klass is an interface or exceptionally deep in the Java
323   // hierarchy and we have to scan the secondary superclass list the hard way.
324   // See if we get an immediate positive hit
325   ldr(cached_super, Address(Rsub_klass, super_check_offset));
326 
327   cond_cmp(Rsuper_klass, cached_super, ne);
328   b(ok_is_subtype, eq);
329 
330   // Check for immediate negative hit
331   cmp(super_check_offset, in_bytes(Klass::secondary_super_cache_offset()));
332   b(not_subtype, ne);
333 
334   // Now do a linear scan of the secondary super-klass chain.
335   const Register supers_arr = tmp1;
336   const Register supers_cnt = tmp2;
337   const Register cur_super  = Rtemp;
338 
339   // Load objArrayOop of secondary supers.
340   ldr(supers_arr, Address(Rsub_klass, Klass::secondary_supers_offset()));
341 
342   ldr_u32(supers_cnt, Address(supers_arr, Array<Klass*>::length_offset_in_bytes())); // Load the array length
343   cmp(supers_cnt, 0);
344 
345   // Skip to the start of array elements and prefetch the first super-klass.
346   ldr(cur_super, Address(supers_arr, Array<Klass*>::base_offset_in_bytes(), pre_indexed), ne);
347   b(not_subtype, eq);
348 
349   bind(loop);
350 
351 
352   cmp(cur_super, Rsuper_klass);
353   b(update_cache, eq);
354 
355   subs(supers_cnt, supers_cnt, 1);
356 
357   ldr(cur_super, Address(supers_arr, wordSize, pre_indexed), ne);
358 
359   b(loop, ne);
360 
361   b(not_subtype);
362 
363   bind(update_cache);
364   // Must be equal but missed in cache.  Update cache.
365   str(Rsuper_klass, Address(Rsub_klass, Klass::secondary_super_cache_offset()));
366 
367   bind(ok_is_subtype);
368 }
369 
370 
371 //////////////////////////////////////////////////////////////////////////////////
372 
373 
374 // Java Expression Stack
375 
pop_ptr(Register r)376 void InterpreterMacroAssembler::pop_ptr(Register r) {
377   assert(r != Rstack_top, "unpredictable instruction");
378   ldr(r, Address(Rstack_top, wordSize, post_indexed));
379 }
380 
pop_i(Register r)381 void InterpreterMacroAssembler::pop_i(Register r) {
382   assert(r != Rstack_top, "unpredictable instruction");
383   ldr_s32(r, Address(Rstack_top, wordSize, post_indexed));
384   zap_high_non_significant_bits(r);
385 }
386 
pop_l(Register lo,Register hi)387 void InterpreterMacroAssembler::pop_l(Register lo, Register hi) {
388   assert_different_registers(lo, hi);
389   assert(lo < hi, "lo must be < hi");
390   pop(RegisterSet(lo) | RegisterSet(hi));
391 }
392 
pop_f(FloatRegister fd)393 void InterpreterMacroAssembler::pop_f(FloatRegister fd) {
394   fpops(fd);
395 }
396 
pop_d(FloatRegister fd)397 void InterpreterMacroAssembler::pop_d(FloatRegister fd) {
398   fpopd(fd);
399 }
400 
401 
402 // Transition vtos -> state. Blows R0, R1. Sets TOS cached value.
pop(TosState state)403 void InterpreterMacroAssembler::pop(TosState state) {
404   switch (state) {
405     case atos: pop_ptr(R0_tos);                              break;
406     case btos:                                               // fall through
407     case ztos:                                               // fall through
408     case ctos:                                               // fall through
409     case stos:                                               // fall through
410     case itos: pop_i(R0_tos);                                break;
411     case ltos: pop_l(R0_tos_lo, R1_tos_hi);                  break;
412 #ifdef __SOFTFP__
413     case ftos: pop_i(R0_tos);                                break;
414     case dtos: pop_l(R0_tos_lo, R1_tos_hi);                  break;
415 #else
416     case ftos: pop_f(S0_tos);                                break;
417     case dtos: pop_d(D0_tos);                                break;
418 #endif // __SOFTFP__
419     case vtos: /* nothing to do */                           break;
420     default  : ShouldNotReachHere();
421   }
422   interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
423 }
424 
push_ptr(Register r)425 void InterpreterMacroAssembler::push_ptr(Register r) {
426   assert(r != Rstack_top, "unpredictable instruction");
427   str(r, Address(Rstack_top, -wordSize, pre_indexed));
428   check_stack_top_on_expansion();
429 }
430 
push_i(Register r)431 void InterpreterMacroAssembler::push_i(Register r) {
432   assert(r != Rstack_top, "unpredictable instruction");
433   str_32(r, Address(Rstack_top, -wordSize, pre_indexed));
434   check_stack_top_on_expansion();
435 }
436 
push_l(Register lo,Register hi)437 void InterpreterMacroAssembler::push_l(Register lo, Register hi) {
438   assert_different_registers(lo, hi);
439   assert(lo < hi, "lo must be < hi");
440   push(RegisterSet(lo) | RegisterSet(hi));
441 }
442 
push_f()443 void InterpreterMacroAssembler::push_f() {
444   fpushs(S0_tos);
445 }
446 
push_d()447 void InterpreterMacroAssembler::push_d() {
448   fpushd(D0_tos);
449 }
450 
451 // Transition state -> vtos. Blows Rtemp.
push(TosState state)452 void InterpreterMacroAssembler::push(TosState state) {
453   interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
454   switch (state) {
455     case atos: push_ptr(R0_tos);                              break;
456     case btos:                                                // fall through
457     case ztos:                                                // fall through
458     case ctos:                                                // fall through
459     case stos:                                                // fall through
460     case itos: push_i(R0_tos);                                break;
461     case ltos: push_l(R0_tos_lo, R1_tos_hi);                  break;
462 #ifdef __SOFTFP__
463     case ftos: push_i(R0_tos);                                break;
464     case dtos: push_l(R0_tos_lo, R1_tos_hi);                  break;
465 #else
466     case ftos: push_f();                                      break;
467     case dtos: push_d();                                      break;
468 #endif // __SOFTFP__
469     case vtos: /* nothing to do */                            break;
470     default  : ShouldNotReachHere();
471   }
472 }
473 
474 
475 
476 // Converts return value in R0/R1 (interpreter calling conventions) to TOS cached value.
convert_retval_to_tos(TosState state)477 void InterpreterMacroAssembler::convert_retval_to_tos(TosState state) {
478 #if (!defined __SOFTFP__ && !defined __ABI_HARD__)
479   // According to interpreter calling conventions, result is returned in R0/R1,
480   // but templates expect ftos in S0, and dtos in D0.
481   if (state == ftos) {
482     fmsr(S0_tos, R0);
483   } else if (state == dtos) {
484     fmdrr(D0_tos, R0, R1);
485   }
486 #endif // !__SOFTFP__ && !__ABI_HARD__
487 }
488 
489 // Converts TOS cached value to return value in R0/R1 (according to interpreter calling conventions).
convert_tos_to_retval(TosState state)490 void InterpreterMacroAssembler::convert_tos_to_retval(TosState state) {
491 #if (!defined __SOFTFP__ && !defined __ABI_HARD__)
492   // According to interpreter calling conventions, result is returned in R0/R1,
493   // so ftos (S0) and dtos (D0) are moved to R0/R1.
494   if (state == ftos) {
495     fmrs(R0, S0_tos);
496   } else if (state == dtos) {
497     fmrrd(R0, R1, D0_tos);
498   }
499 #endif // !__SOFTFP__ && !__ABI_HARD__
500 }
501 
502 
503 
504 // Helpers for swap and dup
load_ptr(int n,Register val)505 void InterpreterMacroAssembler::load_ptr(int n, Register val) {
506   ldr(val, Address(Rstack_top, Interpreter::expr_offset_in_bytes(n)));
507 }
508 
store_ptr(int n,Register val)509 void InterpreterMacroAssembler::store_ptr(int n, Register val) {
510   str(val, Address(Rstack_top, Interpreter::expr_offset_in_bytes(n)));
511 }
512 
513 
prepare_to_jump_from_interpreted()514 void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() {
515 
516   // set sender sp
517   mov(Rsender_sp, SP);
518 
519   // record last_sp
520   str(Rsender_sp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
521 }
522 
523 // Jump to from_interpreted entry of a call unless single stepping is possible
524 // in this thread in which case we must call the i2i entry
jump_from_interpreted(Register method)525 void InterpreterMacroAssembler::jump_from_interpreted(Register method) {
526   assert_different_registers(method, Rtemp);
527 
528   prepare_to_jump_from_interpreted();
529 
530   if (can_post_interpreter_events()) {
531     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
532     // compiled code in threads for which the event is enabled.  Check here for
533     // interp_only_mode if these events CAN be enabled.
534 
535     ldr_s32(Rtemp, Address(Rthread, JavaThread::interp_only_mode_offset()));
536     cmp(Rtemp, 0);
537     ldr(PC, Address(method, Method::interpreter_entry_offset()), ne);
538   }
539 
540   indirect_jump(Address(method, Method::from_interpreted_offset()), Rtemp);
541 }
542 
543 
restore_dispatch()544 void InterpreterMacroAssembler::restore_dispatch() {
545   mov_slow(RdispatchTable, (address)Interpreter::dispatch_table(vtos));
546 }
547 
548 
549 // The following two routines provide a hook so that an implementation
550 // can schedule the dispatch in two parts.
dispatch_prolog(TosState state,int step)551 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) {
552   // Nothing ARM-specific to be done here.
553 }
554 
dispatch_epilog(TosState state,int step)555 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
556   dispatch_next(state, step);
557 }
558 
dispatch_base(TosState state,DispatchTableMode table_mode,bool verifyoop,bool generate_poll)559 void InterpreterMacroAssembler::dispatch_base(TosState state,
560                                               DispatchTableMode table_mode,
561                                               bool verifyoop, bool generate_poll) {
562   if (VerifyActivationFrameSize) {
563     Label L;
564     sub(Rtemp, FP, SP);
565     int min_frame_size = (frame::link_offset - frame::interpreter_frame_initial_sp_offset) * wordSize;
566     cmp(Rtemp, min_frame_size);
567     b(L, ge);
568     stop("broken stack frame");
569     bind(L);
570   }
571 
572   if (verifyoop) {
573     interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
574   }
575 
576   Label safepoint;
577   address* const safepoint_table = Interpreter::safept_table(state);
578   address* const table           = Interpreter::dispatch_table(state);
579   bool needs_thread_local_poll = generate_poll && table != safepoint_table;
580 
581   if (needs_thread_local_poll) {
582     NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
583     ldr(Rtemp, Address(Rthread, Thread::polling_page_offset()));
584     tbnz(Rtemp, exact_log2(SafepointMechanism::poll_bit()), safepoint);
585   }
586 
587   if((state == itos) || (state == btos) || (state == ztos) || (state == ctos) || (state == stos)) {
588     zap_high_non_significant_bits(R0_tos);
589   }
590 
591 #ifdef ASSERT
592   Label L;
593   mov_slow(Rtemp, (address)Interpreter::dispatch_table(vtos));
594   cmp(Rtemp, RdispatchTable);
595   b(L, eq);
596   stop("invalid RdispatchTable");
597   bind(L);
598 #endif
599 
600   if (table_mode == DispatchDefault) {
601     if (state == vtos) {
602       indirect_jump(Address::indexed_ptr(RdispatchTable, R3_bytecode), Rtemp);
603     } else {
604       // on 32-bit ARM this method is faster than the one above.
605       sub(Rtemp, RdispatchTable, (Interpreter::distance_from_dispatch_table(vtos) -
606                            Interpreter::distance_from_dispatch_table(state)) * wordSize);
607       indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
608     }
609   } else {
610     assert(table_mode == DispatchNormal, "invalid dispatch table mode");
611     address table = (address) Interpreter::normal_table(state);
612     mov_slow(Rtemp, table);
613     indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
614   }
615 
616   if (needs_thread_local_poll) {
617     bind(safepoint);
618     lea(Rtemp, ExternalAddress((address)safepoint_table));
619     indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
620   }
621 
622   nop(); // to avoid filling CPU pipeline with invalid instructions
623   nop();
624 }
625 
dispatch_only(TosState state,bool generate_poll)626 void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll) {
627   dispatch_base(state, DispatchDefault, true, generate_poll);
628 }
629 
630 
dispatch_only_normal(TosState state)631 void InterpreterMacroAssembler::dispatch_only_normal(TosState state) {
632   dispatch_base(state, DispatchNormal);
633 }
634 
dispatch_only_noverify(TosState state)635 void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) {
636   dispatch_base(state, DispatchNormal, false);
637 }
638 
dispatch_next(TosState state,int step,bool generate_poll)639 void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) {
640   // load next bytecode and advance Rbcp
641   ldrb(R3_bytecode, Address(Rbcp, step, pre_indexed));
642   dispatch_base(state, DispatchDefault, true, generate_poll);
643 }
644 
narrow(Register result)645 void InterpreterMacroAssembler::narrow(Register result) {
646   // mask integer result to narrower return type.
647   const Register Rtmp = R2;
648 
649   // get method type
650   ldr(Rtmp, Address(Rmethod, Method::const_offset()));
651   ldrb(Rtmp, Address(Rtmp, ConstMethod::result_type_offset()));
652 
653   Label notBool, notByte, notChar, done;
654   cmp(Rtmp, T_INT);
655   b(done, eq);
656 
657   cmp(Rtmp, T_BOOLEAN);
658   b(notBool, ne);
659   and_32(result, result, 1);
660   b(done);
661 
662   bind(notBool);
663   cmp(Rtmp, T_BYTE);
664   b(notByte, ne);
665   sign_extend(result, result, 8);
666   b(done);
667 
668   bind(notByte);
669   cmp(Rtmp, T_CHAR);
670   b(notChar, ne);
671   zero_extend(result, result, 16);
672   b(done);
673 
674   bind(notChar);
675   // cmp(Rtmp, T_SHORT);
676   // b(done, ne);
677   sign_extend(result, result, 16);
678 
679   // Nothing to do
680   bind(done);
681 }
682 
683 // remove activation
684 //
685 // Unlock the receiver if this is a synchronized method.
686 // Unlock any Java monitors from syncronized blocks.
687 // Remove the activation from the stack.
688 //
689 // If there are locked Java monitors
690 //    If throw_monitor_exception
691 //       throws IllegalMonitorStateException
692 //    Else if install_monitor_exception
693 //       installs IllegalMonitorStateException
694 //    Else
695 //       no error processing
remove_activation(TosState state,Register ret_addr,bool throw_monitor_exception,bool install_monitor_exception,bool notify_jvmdi)696 void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_addr,
697                                                   bool throw_monitor_exception,
698                                                   bool install_monitor_exception,
699                                                   bool notify_jvmdi) {
700   Label unlock, unlocked, no_unlock;
701 
702   // Note: Registers R0, R1, S0 and D0 (TOS cached value) may be in use for the result.
703 
704   const Address do_not_unlock_if_synchronized(Rthread,
705                          JavaThread::do_not_unlock_if_synchronized_offset());
706 
707   const Register Rflag = R2;
708   const Register Raccess_flags = R3;
709 
710   restore_method();
711 
712   ldrb(Rflag, do_not_unlock_if_synchronized);
713 
714   // get method access flags
715   ldr_u32(Raccess_flags, Address(Rmethod, Method::access_flags_offset()));
716 
717   strb(zero_register(Rtemp), do_not_unlock_if_synchronized); // reset the flag
718 
719   // check if method is synchronized
720 
721   tbz(Raccess_flags, JVM_ACC_SYNCHRONIZED_BIT, unlocked);
722 
723   // Don't unlock anything if the _do_not_unlock_if_synchronized flag is set.
724   cbnz(Rflag, no_unlock);
725 
726   // unlock monitor
727   push(state);                                   // save result
728 
729   // BasicObjectLock will be first in list, since this is a synchronized method. However, need
730   // to check that the object has not been unlocked by an explicit monitorexit bytecode.
731 
732   const Register Rmonitor = R1;                  // fixed in unlock_object()
733   const Register Robj = R2;
734 
735   // address of first monitor
736   sub(Rmonitor, FP, - frame::interpreter_frame_monitor_block_bottom_offset * wordSize + (int)sizeof(BasicObjectLock));
737 
738   ldr(Robj, Address(Rmonitor, BasicObjectLock::obj_offset_in_bytes()));
739   cbnz(Robj, unlock);
740 
741   pop(state);
742 
743   if (throw_monitor_exception) {
744     // Entry already unlocked, need to throw exception
745     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
746     should_not_reach_here();
747   } else {
748     // Monitor already unlocked during a stack unroll.
749     // If requested, install an illegal_monitor_state_exception.
750     // Continue with stack unrolling.
751     if (install_monitor_exception) {
752       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
753     }
754     b(unlocked);
755   }
756 
757 
758   // Exception case for the check that all monitors are unlocked.
759   const Register Rcur = R2;
760   Label restart_check_monitors_unlocked, exception_monitor_is_still_locked;
761 
762   bind(exception_monitor_is_still_locked);
763   // Monitor entry is still locked, need to throw exception.
764   // Rcur: monitor entry.
765 
766   if (throw_monitor_exception) {
767     // Throw exception
768     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
769     should_not_reach_here();
770   } else {
771     // Stack unrolling. Unlock object and install illegal_monitor_exception
772     // Unlock does not block, so don't have to worry about the frame
773 
774     push(state);
775     mov(R1, Rcur);
776     unlock_object(R1);
777 
778     if (install_monitor_exception) {
779       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
780     }
781 
782     pop(state);
783     b(restart_check_monitors_unlocked);
784   }
785 
786   bind(unlock);
787   unlock_object(Rmonitor);
788   pop(state);
789 
790   // Check that for block-structured locking (i.e., that all locked objects has been unlocked)
791   bind(unlocked);
792 
793   // Check that all monitors are unlocked
794   {
795     Label loop;
796 
797     const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
798     const Register Rbottom = R3;
799     const Register Rcur_obj = Rtemp;
800 
801     bind(restart_check_monitors_unlocked);
802 
803     ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
804                                  // points to current entry, starting with top-most entry
805     sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
806                                  // points to word before bottom of monitor block
807 
808     cmp(Rcur, Rbottom);          // check if there are no monitors
809     ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
810                                  // prefetch monitor's object
811     b(no_unlock, eq);
812 
813     bind(loop);
814     // check if current entry is used
815     cbnz(Rcur_obj, exception_monitor_is_still_locked);
816 
817     add(Rcur, Rcur, entry_size);      // otherwise advance to next entry
818     cmp(Rcur, Rbottom);               // check if bottom reached
819     ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
820                                       // prefetch monitor's object
821     b(loop, ne);                      // if not at bottom then check this entry
822   }
823 
824   bind(no_unlock);
825 
826   // jvmti support
827   if (notify_jvmdi) {
828     notify_method_exit(state, NotifyJVMTI);     // preserve TOSCA
829   } else {
830     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
831   }
832 
833   // remove activation
834   mov(Rtemp, FP);
835   ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
836   ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize));
837 
838   if (ret_addr != LR) {
839     mov(ret_addr, LR);
840   }
841 }
842 
843 
844 // At certain points in the method invocation the monitor of
845 // synchronized methods hasn't been entered yet.
846 // To correctly handle exceptions at these points, we set the thread local
847 // variable _do_not_unlock_if_synchronized to true. The remove_activation will
848 // check this flag.
set_do_not_unlock_if_synchronized(bool flag,Register tmp)849 void InterpreterMacroAssembler::set_do_not_unlock_if_synchronized(bool flag, Register tmp) {
850   const Address do_not_unlock_if_synchronized(Rthread,
851                          JavaThread::do_not_unlock_if_synchronized_offset());
852   if (flag) {
853     mov(tmp, 1);
854     strb(tmp, do_not_unlock_if_synchronized);
855   } else {
856     strb(zero_register(tmp), do_not_unlock_if_synchronized);
857   }
858 }
859 
860 // Lock object
861 //
862 // Argument: R1 : Points to BasicObjectLock to be used for locking.
863 // Must be initialized with object to lock.
864 // Blows volatile registers R0-R3, Rtemp, LR. Calls VM.
lock_object(Register Rlock)865 void InterpreterMacroAssembler::lock_object(Register Rlock) {
866   assert(Rlock == R1, "the second argument");
867 
868   if (UseHeavyMonitors) {
869     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock);
870   } else {
871     Label done;
872 
873     const Register Robj = R2;
874     const Register Rmark = R3;
875     assert_different_registers(Robj, Rmark, Rlock, R0, Rtemp);
876 
877     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
878     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
879     const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes();
880 
881     Label already_locked, slow_case;
882 
883     // Load object pointer
884     ldr(Robj, Address(Rlock, obj_offset));
885 
886     if (UseBiasedLocking) {
887       biased_locking_enter(Robj, Rmark/*scratched*/, R0, false, Rtemp, done, slow_case);
888     }
889 
890 
891     // On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
892     // That would be acceptable as ether CAS or slow case path is taken in that case.
893     // Exception to that is if the object is locked by the calling thread, then the recursive test will pass (guaranteed as
894     // loads are satisfied from a store queue if performed on the same processor).
895 
896     assert(oopDesc::mark_offset_in_bytes() == 0, "must be");
897     ldr(Rmark, Address(Robj, oopDesc::mark_offset_in_bytes()));
898 
899     // Test if object is already locked
900     tst(Rmark, markWord::unlocked_value);
901     b(already_locked, eq);
902 
903     // Save old object->mark() into BasicLock's displaced header
904     str(Rmark, Address(Rlock, mark_offset));
905 
906     cas_for_lock_acquire(Rmark, Rlock, Robj, Rtemp, slow_case);
907 
908 #ifndef PRODUCT
909     if (PrintBiasedLockingStatistics) {
910       cond_atomic_inc32(al, BiasedLocking::fast_path_entry_count_addr());
911     }
912 #endif //!PRODUCT
913 
914     b(done);
915 
916     // If we got here that means the object is locked by ether calling thread or another thread.
917     bind(already_locked);
918     // Handling of locked objects: recursive locks and slow case.
919 
920     // Fast check for recursive lock.
921     //
922     // Can apply the optimization only if this is a stack lock
923     // allocated in this thread. For efficiency, we can focus on
924     // recently allocated stack locks (instead of reading the stack
925     // base and checking whether 'mark' points inside the current
926     // thread stack):
927     //  1) (mark & 3) == 0
928     //  2) SP <= mark < SP + os::pagesize()
929     //
930     // Warning: SP + os::pagesize can overflow the stack base. We must
931     // neither apply the optimization for an inflated lock allocated
932     // just above the thread stack (this is why condition 1 matters)
933     // nor apply the optimization if the stack lock is inside the stack
934     // of another thread. The latter is avoided even in case of overflow
935     // because we have guard pages at the end of all stacks. Hence, if
936     // we go over the stack base and hit the stack of another thread,
937     // this should not be in a writeable area that could contain a
938     // stack lock allocated by that thread. As a consequence, a stack
939     // lock less than page size away from SP is guaranteed to be
940     // owned by the current thread.
941     //
942     // Note: assuming SP is aligned, we can check the low bits of
943     // (mark-SP) instead of the low bits of mark. In that case,
944     // assuming page size is a power of 2, we can merge the two
945     // conditions into a single test:
946     // => ((mark - SP) & (3 - os::pagesize())) == 0
947 
948     // (3 - os::pagesize()) cannot be encoded as an ARM immediate operand.
949     // Check independently the low bits and the distance to SP.
950     // -1- test low 2 bits
951     movs(R0, AsmOperand(Rmark, lsl, 30));
952     // -2- test (mark - SP) if the low two bits are 0
953     sub(R0, Rmark, SP, eq);
954     movs(R0, AsmOperand(R0, lsr, exact_log2(os::vm_page_size())), eq);
955     // If still 'eq' then recursive locking OK: store 0 into lock record
956     str(R0, Address(Rlock, mark_offset), eq);
957 
958 
959 #ifndef PRODUCT
960     if (PrintBiasedLockingStatistics) {
961       cond_atomic_inc32(eq, BiasedLocking::fast_path_entry_count_addr());
962     }
963 #endif // !PRODUCT
964 
965     b(done, eq);
966 
967     bind(slow_case);
968 
969     // Call the runtime routine for slow case
970     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock);
971 
972     bind(done);
973   }
974 }
975 
976 
977 // Unlocks an object. Used in monitorexit bytecode and remove_activation.
978 //
979 // Argument: R1: Points to BasicObjectLock structure for lock
980 // Throw an IllegalMonitorException if object is not locked by current thread
981 // Blows volatile registers R0-R3, Rtemp, LR. Calls VM.
unlock_object(Register Rlock)982 void InterpreterMacroAssembler::unlock_object(Register Rlock) {
983   assert(Rlock == R1, "the second argument");
984 
985   if (UseHeavyMonitors) {
986     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), Rlock);
987   } else {
988     Label done, slow_case;
989 
990     const Register Robj = R2;
991     const Register Rmark = R3;
992     const Register Rresult = R0;
993     assert_different_registers(Robj, Rmark, Rlock, R0, Rtemp);
994 
995     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
996     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
997     const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes();
998 
999     const Register Rzero = zero_register(Rtemp);
1000 
1001     // Load oop into Robj
1002     ldr(Robj, Address(Rlock, obj_offset));
1003 
1004     // Free entry
1005     str(Rzero, Address(Rlock, obj_offset));
1006 
1007     if (UseBiasedLocking) {
1008       biased_locking_exit(Robj, Rmark, done);
1009     }
1010 
1011     // Load the old header from BasicLock structure
1012     ldr(Rmark, Address(Rlock, mark_offset));
1013 
1014     // Test for recursion (zero mark in BasicLock)
1015     cbz(Rmark, done);
1016 
1017     bool allow_fallthrough_on_failure = true;
1018 
1019     cas_for_lock_release(Rlock, Rmark, Robj, Rtemp, slow_case, allow_fallthrough_on_failure);
1020 
1021     b(done, eq);
1022 
1023     bind(slow_case);
1024 
1025     // Call the runtime routine for slow case.
1026     str(Robj, Address(Rlock, obj_offset)); // restore obj
1027     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), Rlock);
1028 
1029     bind(done);
1030   }
1031 }
1032 
1033 
1034 // Test ImethodDataPtr.  If it is null, continue at the specified label
test_method_data_pointer(Register mdp,Label & zero_continue)1035 void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& zero_continue) {
1036   assert(ProfileInterpreter, "must be profiling interpreter");
1037   ldr(mdp, Address(FP, frame::interpreter_frame_mdp_offset * wordSize));
1038   cbz(mdp, zero_continue);
1039 }
1040 
1041 
1042 // Set the method data pointer for the current bcp.
1043 // Blows volatile registers R0-R3, Rtemp, LR.
set_method_data_pointer_for_bcp()1044 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1045   assert(ProfileInterpreter, "must be profiling interpreter");
1046   Label set_mdp;
1047 
1048   // Test MDO to avoid the call if it is NULL.
1049   ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
1050   cbz(Rtemp, set_mdp);
1051 
1052   mov(R0, Rmethod);
1053   mov(R1, Rbcp);
1054   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), R0, R1);
1055   // R0/W0: mdi
1056 
1057   // mdo is guaranteed to be non-zero here, we checked for it before the call.
1058   ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
1059   add(Rtemp, Rtemp, in_bytes(MethodData::data_offset()));
1060   add_ptr_scaled_int32(Rtemp, Rtemp, R0, 0);
1061 
1062   bind(set_mdp);
1063   str(Rtemp, Address(FP, frame::interpreter_frame_mdp_offset * wordSize));
1064 }
1065 
1066 
verify_method_data_pointer()1067 void InterpreterMacroAssembler::verify_method_data_pointer() {
1068   assert(ProfileInterpreter, "must be profiling interpreter");
1069 #ifdef ASSERT
1070   Label verify_continue;
1071   save_caller_save_registers();
1072 
1073   const Register Rmdp = R2;
1074   test_method_data_pointer(Rmdp, verify_continue); // If mdp is zero, continue
1075 
1076   // If the mdp is valid, it will point to a DataLayout header which is
1077   // consistent with the bcp.  The converse is highly probable also.
1078 
1079   ldrh(R3, Address(Rmdp, DataLayout::bci_offset()));
1080   ldr(Rtemp, Address(Rmethod, Method::const_offset()));
1081   add(R3, R3, Rtemp);
1082   add(R3, R3, in_bytes(ConstMethod::codes_offset()));
1083   cmp(R3, Rbcp);
1084   b(verify_continue, eq);
1085 
1086   mov(R0, Rmethod);
1087   mov(R1, Rbcp);
1088   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), R0, R1, Rmdp);
1089 
1090   bind(verify_continue);
1091   restore_caller_save_registers();
1092 #endif // ASSERT
1093 }
1094 
1095 
set_mdp_data_at(Register mdp_in,int offset,Register value)1096 void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, int offset, Register value) {
1097   assert(ProfileInterpreter, "must be profiling interpreter");
1098   assert_different_registers(mdp_in, value);
1099   str(value, Address(mdp_in, offset));
1100 }
1101 
1102 
1103 // Increments mdp data. Sets bumped_count register to adjusted counter.
increment_mdp_data_at(Register mdp_in,int offset,Register bumped_count,bool decrement)1104 void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
1105                                                       int offset,
1106                                                       Register bumped_count,
1107                                                       bool decrement) {
1108   assert(ProfileInterpreter, "must be profiling interpreter");
1109 
1110   // Counter address
1111   Address data(mdp_in, offset);
1112   assert_different_registers(mdp_in, bumped_count);
1113 
1114   increment_mdp_data_at(data, bumped_count, decrement);
1115 }
1116 
set_mdp_flag_at(Register mdp_in,int flag_byte_constant)1117 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in, int flag_byte_constant) {
1118   assert_different_registers(mdp_in, Rtemp);
1119   assert(ProfileInterpreter, "must be profiling interpreter");
1120   assert((0 < flag_byte_constant) && (flag_byte_constant < (1 << BitsPerByte)), "flag mask is out of range");
1121 
1122   // Set the flag
1123   ldrb(Rtemp, Address(mdp_in, in_bytes(DataLayout::flags_offset())));
1124   orr(Rtemp, Rtemp, (unsigned)flag_byte_constant);
1125   strb(Rtemp, Address(mdp_in, in_bytes(DataLayout::flags_offset())));
1126 }
1127 
1128 
1129 // Increments mdp data. Sets bumped_count register to adjusted counter.
increment_mdp_data_at(Address data,Register bumped_count,bool decrement)1130 void InterpreterMacroAssembler::increment_mdp_data_at(Address data,
1131                                                       Register bumped_count,
1132                                                       bool decrement) {
1133   assert(ProfileInterpreter, "must be profiling interpreter");
1134 
1135   ldr(bumped_count, data);
1136   if (decrement) {
1137     // Decrement the register. Set condition codes.
1138     subs(bumped_count, bumped_count, DataLayout::counter_increment);
1139     // Avoid overflow.
1140     add(bumped_count, bumped_count, DataLayout::counter_increment, pl);
1141   } else {
1142     // Increment the register. Set condition codes.
1143     adds(bumped_count, bumped_count, DataLayout::counter_increment);
1144     // Avoid overflow.
1145     sub(bumped_count, bumped_count, DataLayout::counter_increment, mi);
1146   }
1147   str(bumped_count, data);
1148 }
1149 
1150 
test_mdp_data_at(Register mdp_in,int offset,Register value,Register test_value_out,Label & not_equal_continue)1151 void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
1152                                                  int offset,
1153                                                  Register value,
1154                                                  Register test_value_out,
1155                                                  Label& not_equal_continue) {
1156   assert(ProfileInterpreter, "must be profiling interpreter");
1157   assert_different_registers(mdp_in, test_value_out, value);
1158 
1159   ldr(test_value_out, Address(mdp_in, offset));
1160   cmp(test_value_out, value);
1161 
1162   b(not_equal_continue, ne);
1163 }
1164 
1165 
update_mdp_by_offset(Register mdp_in,int offset_of_disp,Register reg_temp)1166 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, int offset_of_disp, Register reg_temp) {
1167   assert(ProfileInterpreter, "must be profiling interpreter");
1168   assert_different_registers(mdp_in, reg_temp);
1169 
1170   ldr(reg_temp, Address(mdp_in, offset_of_disp));
1171   add(mdp_in, mdp_in, reg_temp);
1172   str(mdp_in, Address(FP, frame::interpreter_frame_mdp_offset * wordSize));
1173 }
1174 
1175 
update_mdp_by_offset(Register mdp_in,Register reg_offset,Register reg_tmp)1176 void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, Register reg_offset, Register reg_tmp) {
1177   assert(ProfileInterpreter, "must be profiling interpreter");
1178   assert_different_registers(mdp_in, reg_offset, reg_tmp);
1179 
1180   ldr(reg_tmp, Address(mdp_in, reg_offset));
1181   add(mdp_in, mdp_in, reg_tmp);
1182   str(mdp_in, Address(FP, frame::interpreter_frame_mdp_offset * wordSize));
1183 }
1184 
1185 
update_mdp_by_constant(Register mdp_in,int constant)1186 void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, int constant) {
1187   assert(ProfileInterpreter, "must be profiling interpreter");
1188   add(mdp_in, mdp_in, constant);
1189   str(mdp_in, Address(FP, frame::interpreter_frame_mdp_offset * wordSize));
1190 }
1191 
1192 
1193 // Blows volatile registers R0-R3, Rtemp, LR).
update_mdp_for_ret(Register return_bci)1194 void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
1195   assert(ProfileInterpreter, "must be profiling interpreter");
1196   assert_different_registers(return_bci, R0, R1, R2, R3, Rtemp);
1197 
1198   mov(R1, return_bci);
1199   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), R1);
1200 }
1201 
1202 
1203 // Sets mdp, bumped_count registers, blows Rtemp.
profile_taken_branch(Register mdp,Register bumped_count)1204 void InterpreterMacroAssembler::profile_taken_branch(Register mdp, Register bumped_count) {
1205   assert_different_registers(mdp, bumped_count);
1206 
1207   if (ProfileInterpreter) {
1208     Label profile_continue;
1209 
1210     // If no method data exists, go to profile_continue.
1211     // Otherwise, assign to mdp
1212     test_method_data_pointer(mdp, profile_continue);
1213 
1214     // We are taking a branch. Increment the taken count.
1215     increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()), bumped_count);
1216 
1217     // The method data pointer needs to be updated to reflect the new target.
1218     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()), Rtemp);
1219 
1220     bind (profile_continue);
1221   }
1222 }
1223 
1224 
1225 // Sets mdp, blows Rtemp.
profile_not_taken_branch(Register mdp)1226 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1227   assert_different_registers(mdp, Rtemp);
1228 
1229   if (ProfileInterpreter) {
1230     Label profile_continue;
1231 
1232     // If no method data exists, go to profile_continue.
1233     test_method_data_pointer(mdp, profile_continue);
1234 
1235     // We are taking a branch.  Increment the not taken count.
1236     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()), Rtemp);
1237 
1238     // The method data pointer needs to be updated to correspond to the next bytecode
1239     update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1240 
1241     bind (profile_continue);
1242   }
1243 }
1244 
1245 
1246 // Sets mdp, blows Rtemp.
profile_call(Register mdp)1247 void InterpreterMacroAssembler::profile_call(Register mdp) {
1248   assert_different_registers(mdp, Rtemp);
1249 
1250   if (ProfileInterpreter) {
1251     Label profile_continue;
1252 
1253     // If no method data exists, go to profile_continue.
1254     test_method_data_pointer(mdp, profile_continue);
1255 
1256     // We are making a call.  Increment the count.
1257     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()), Rtemp);
1258 
1259     // The method data pointer needs to be updated to reflect the new target.
1260     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1261 
1262     bind (profile_continue);
1263   }
1264 }
1265 
1266 
1267 // Sets mdp, blows Rtemp.
profile_final_call(Register mdp)1268 void InterpreterMacroAssembler::profile_final_call(Register mdp) {
1269   if (ProfileInterpreter) {
1270     Label profile_continue;
1271 
1272     // If no method data exists, go to profile_continue.
1273     test_method_data_pointer(mdp, profile_continue);
1274 
1275     // We are making a call.  Increment the count.
1276     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()), Rtemp);
1277 
1278     // The method data pointer needs to be updated to reflect the new target.
1279     update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
1280 
1281     bind (profile_continue);
1282   }
1283 }
1284 
1285 
1286 // Sets mdp, blows Rtemp.
profile_virtual_call(Register mdp,Register receiver,bool receiver_can_be_null)1287 void InterpreterMacroAssembler::profile_virtual_call(Register mdp, Register receiver, bool receiver_can_be_null) {
1288   assert_different_registers(mdp, receiver, Rtemp);
1289 
1290   if (ProfileInterpreter) {
1291     Label profile_continue;
1292 
1293     // If no method data exists, go to profile_continue.
1294     test_method_data_pointer(mdp, profile_continue);
1295 
1296     Label skip_receiver_profile;
1297     if (receiver_can_be_null) {
1298       Label not_null;
1299       cbnz(receiver, not_null);
1300       // We are making a call.  Increment the count for null receiver.
1301       increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()), Rtemp);
1302       b(skip_receiver_profile);
1303       bind(not_null);
1304     }
1305 
1306     // Record the receiver type.
1307     record_klass_in_profile(receiver, mdp, Rtemp, true);
1308     bind(skip_receiver_profile);
1309 
1310     // The method data pointer needs to be updated to reflect the new target.
1311     update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
1312     bind(profile_continue);
1313   }
1314 }
1315 
1316 
record_klass_in_profile_helper(Register receiver,Register mdp,Register reg_tmp,int start_row,Label & done,bool is_virtual_call)1317 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1318                                         Register receiver, Register mdp,
1319                                         Register reg_tmp,
1320                                         int start_row, Label& done, bool is_virtual_call) {
1321   if (TypeProfileWidth == 0)
1322     return;
1323 
1324   assert_different_registers(receiver, mdp, reg_tmp);
1325 
1326   int last_row = VirtualCallData::row_limit() - 1;
1327   assert(start_row <= last_row, "must be work left to do");
1328   // Test this row for both the receiver and for null.
1329   // Take any of three different outcomes:
1330   //   1. found receiver => increment count and goto done
1331   //   2. found null => keep looking for case 1, maybe allocate this cell
1332   //   3. found something else => keep looking for cases 1 and 2
1333   // Case 3 is handled by a recursive call.
1334   for (int row = start_row; row <= last_row; row++) {
1335     Label next_test;
1336 
1337     // See if the receiver is receiver[n].
1338     int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
1339 
1340     test_mdp_data_at(mdp, recvr_offset, receiver, reg_tmp, next_test);
1341 
1342     // The receiver is receiver[n].  Increment count[n].
1343     int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
1344     increment_mdp_data_at(mdp, count_offset, reg_tmp);
1345     b(done);
1346 
1347     bind(next_test);
1348     // reg_tmp now contains the receiver from the CallData.
1349 
1350     if (row == start_row) {
1351       Label found_null;
1352       // Failed the equality check on receiver[n]...  Test for null.
1353       if (start_row == last_row) {
1354         // The only thing left to do is handle the null case.
1355         if (is_virtual_call) {
1356           cbz(reg_tmp, found_null);
1357           // Receiver did not match any saved receiver and there is no empty row for it.
1358           // Increment total counter to indicate polymorphic case.
1359           increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()), reg_tmp);
1360           b(done);
1361           bind(found_null);
1362         } else {
1363           cbnz(reg_tmp, done);
1364         }
1365         break;
1366       }
1367       // Since null is rare, make it be the branch-taken case.
1368       cbz(reg_tmp, found_null);
1369 
1370       // Put all the "Case 3" tests here.
1371       record_klass_in_profile_helper(receiver, mdp, reg_tmp, start_row + 1, done, is_virtual_call);
1372 
1373       // Found a null.  Keep searching for a matching receiver,
1374       // but remember that this is an empty (unused) slot.
1375       bind(found_null);
1376     }
1377   }
1378 
1379   // In the fall-through case, we found no matching receiver, but we
1380   // observed the receiver[start_row] is NULL.
1381 
1382   // Fill in the receiver field and increment the count.
1383   int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
1384   set_mdp_data_at(mdp, recvr_offset, receiver);
1385   int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
1386   mov(reg_tmp, DataLayout::counter_increment);
1387   set_mdp_data_at(mdp, count_offset, reg_tmp);
1388   if (start_row > 0) {
1389     b(done);
1390   }
1391 }
1392 
record_klass_in_profile(Register receiver,Register mdp,Register reg_tmp,bool is_virtual_call)1393 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1394                                                         Register mdp,
1395                                                         Register reg_tmp,
1396                                                         bool is_virtual_call) {
1397   assert(ProfileInterpreter, "must be profiling");
1398   assert_different_registers(receiver, mdp, reg_tmp);
1399 
1400   Label done;
1401 
1402   record_klass_in_profile_helper(receiver, mdp, reg_tmp, 0, done, is_virtual_call);
1403 
1404   bind (done);
1405 }
1406 
1407 // Sets mdp, blows volatile registers R0-R3, Rtemp, LR).
profile_ret(Register mdp,Register return_bci)1408 void InterpreterMacroAssembler::profile_ret(Register mdp, Register return_bci) {
1409   assert_different_registers(mdp, return_bci, Rtemp, R0, R1, R2, R3);
1410 
1411   if (ProfileInterpreter) {
1412     Label profile_continue;
1413     uint row;
1414 
1415     // If no method data exists, go to profile_continue.
1416     test_method_data_pointer(mdp, profile_continue);
1417 
1418     // Update the total ret count.
1419     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()), Rtemp);
1420 
1421     for (row = 0; row < RetData::row_limit(); row++) {
1422       Label next_test;
1423 
1424       // See if return_bci is equal to bci[n]:
1425       test_mdp_data_at(mdp, in_bytes(RetData::bci_offset(row)), return_bci,
1426                        Rtemp, next_test);
1427 
1428       // return_bci is equal to bci[n].  Increment the count.
1429       increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row)), Rtemp);
1430 
1431       // The method data pointer needs to be updated to reflect the new target.
1432       update_mdp_by_offset(mdp, in_bytes(RetData::bci_displacement_offset(row)), Rtemp);
1433       b(profile_continue);
1434       bind(next_test);
1435     }
1436 
1437     update_mdp_for_ret(return_bci);
1438 
1439     bind(profile_continue);
1440   }
1441 }
1442 
1443 
1444 // Sets mdp.
profile_null_seen(Register mdp)1445 void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
1446   if (ProfileInterpreter) {
1447     Label profile_continue;
1448 
1449     // If no method data exists, go to profile_continue.
1450     test_method_data_pointer(mdp, profile_continue);
1451 
1452     set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
1453 
1454     // The method data pointer needs to be updated.
1455     int mdp_delta = in_bytes(BitData::bit_data_size());
1456     if (TypeProfileCasts) {
1457       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1458     }
1459     update_mdp_by_constant(mdp, mdp_delta);
1460 
1461     bind (profile_continue);
1462   }
1463 }
1464 
1465 
1466 // Sets mdp, blows Rtemp.
profile_typecheck_failed(Register mdp)1467 void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp) {
1468   assert_different_registers(mdp, Rtemp);
1469 
1470   if (ProfileInterpreter && TypeProfileCasts) {
1471     Label profile_continue;
1472 
1473     // If no method data exists, go to profile_continue.
1474     test_method_data_pointer(mdp, profile_continue);
1475 
1476     int count_offset = in_bytes(CounterData::count_offset());
1477     // Back up the address, since we have already bumped the mdp.
1478     count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());
1479 
1480     // *Decrement* the counter.  We expect to see zero or small negatives.
1481     increment_mdp_data_at(mdp, count_offset, Rtemp, true);
1482 
1483     bind (profile_continue);
1484   }
1485 }
1486 
1487 
1488 // Sets mdp, blows Rtemp.
profile_typecheck(Register mdp,Register klass)1489 void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass)
1490 {
1491   assert_different_registers(mdp, klass, Rtemp);
1492 
1493   if (ProfileInterpreter) {
1494     Label profile_continue;
1495 
1496     // If no method data exists, go to profile_continue.
1497     test_method_data_pointer(mdp, profile_continue);
1498 
1499     // The method data pointer needs to be updated.
1500     int mdp_delta = in_bytes(BitData::bit_data_size());
1501     if (TypeProfileCasts) {
1502       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1503 
1504       // Record the object type.
1505       record_klass_in_profile(klass, mdp, Rtemp, false);
1506     }
1507     update_mdp_by_constant(mdp, mdp_delta);
1508 
1509     bind(profile_continue);
1510   }
1511 }
1512 
1513 
1514 // Sets mdp, blows Rtemp.
profile_switch_default(Register mdp)1515 void InterpreterMacroAssembler::profile_switch_default(Register mdp) {
1516   assert_different_registers(mdp, Rtemp);
1517 
1518   if (ProfileInterpreter) {
1519     Label profile_continue;
1520 
1521     // If no method data exists, go to profile_continue.
1522     test_method_data_pointer(mdp, profile_continue);
1523 
1524     // Update the default case count
1525     increment_mdp_data_at(mdp, in_bytes(MultiBranchData::default_count_offset()), Rtemp);
1526 
1527     // The method data pointer needs to be updated.
1528     update_mdp_by_offset(mdp, in_bytes(MultiBranchData::default_displacement_offset()), Rtemp);
1529 
1530     bind(profile_continue);
1531   }
1532 }
1533 
1534 
1535 // Sets mdp. Blows reg_tmp1, reg_tmp2. Index could be the same as reg_tmp2.
profile_switch_case(Register mdp,Register index,Register reg_tmp1,Register reg_tmp2)1536 void InterpreterMacroAssembler::profile_switch_case(Register mdp, Register index, Register reg_tmp1, Register reg_tmp2) {
1537   assert_different_registers(mdp, reg_tmp1, reg_tmp2);
1538   assert_different_registers(mdp, reg_tmp1, index);
1539 
1540   if (ProfileInterpreter) {
1541     Label profile_continue;
1542 
1543     const int count_offset = in_bytes(MultiBranchData::case_array_offset()) +
1544                               in_bytes(MultiBranchData::relative_count_offset());
1545 
1546     const int displacement_offset = in_bytes(MultiBranchData::case_array_offset()) +
1547                               in_bytes(MultiBranchData::relative_displacement_offset());
1548 
1549     // If no method data exists, go to profile_continue.
1550     test_method_data_pointer(mdp, profile_continue);
1551 
1552     // Build the base (index * per_case_size_in_bytes())
1553     logical_shift_left(reg_tmp1, index, exact_log2(in_bytes(MultiBranchData::per_case_size())));
1554 
1555     // Update the case count
1556     add(reg_tmp1, reg_tmp1, count_offset);
1557     increment_mdp_data_at(Address(mdp, reg_tmp1), reg_tmp2);
1558 
1559     // The method data pointer needs to be updated.
1560     add(reg_tmp1, reg_tmp1, displacement_offset - count_offset);
1561     update_mdp_by_offset(mdp, reg_tmp1, reg_tmp2);
1562 
1563     bind (profile_continue);
1564   }
1565 }
1566 
1567 
byteswap_u32(Register r,Register rtmp1,Register rtmp2)1568 void InterpreterMacroAssembler::byteswap_u32(Register r, Register rtmp1, Register rtmp2) {
1569   if (VM_Version::supports_rev()) {
1570     rev(r, r);
1571   } else {
1572     eor(rtmp1, r, AsmOperand(r, ror, 16));
1573     mvn(rtmp2, 0x0000ff00);
1574     andr(rtmp1, rtmp2, AsmOperand(rtmp1, lsr, 8));
1575     eor(r, rtmp1, AsmOperand(r, ror, 8));
1576   }
1577 }
1578 
1579 
inc_global_counter(address address_of_counter,int offset,Register tmp1,Register tmp2,bool avoid_overflow)1580 void InterpreterMacroAssembler::inc_global_counter(address address_of_counter, int offset, Register tmp1, Register tmp2, bool avoid_overflow) {
1581   const intx addr = (intx) (address_of_counter + offset);
1582 
1583   assert ((addr & 0x3) == 0, "address of counter should be aligned");
1584   const intx offset_mask = right_n_bits(12);
1585 
1586   const address base = (address) (addr & ~offset_mask);
1587   const int offs = (int) (addr & offset_mask);
1588 
1589   const Register addr_base = tmp1;
1590   const Register val = tmp2;
1591 
1592   mov_slow(addr_base, base);
1593   ldr_s32(val, Address(addr_base, offs));
1594 
1595   if (avoid_overflow) {
1596     adds_32(val, val, 1);
1597     str(val, Address(addr_base, offs), pl);
1598   } else {
1599     add_32(val, val, 1);
1600     str_32(val, Address(addr_base, offs));
1601   }
1602 }
1603 
interp_verify_oop(Register reg,TosState state,const char * file,int line)1604 void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char *file, int line) {
1605   if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop", file, line); }
1606 }
1607 
1608 // Inline assembly for:
1609 //
1610 // if (thread is in interp_only_mode) {
1611 //   InterpreterRuntime::post_method_entry();
1612 // }
1613 // if (DTraceMethodProbes) {
1614 //   SharedRuntime::dtrace_method_entry(method, receiver);
1615 // }
1616 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
1617 //   SharedRuntime::rc_trace_method_entry(method, receiver);
1618 // }
1619 
notify_method_entry()1620 void InterpreterMacroAssembler::notify_method_entry() {
1621   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1622   // track stack depth.  If it is possible to enter interp_only_mode we add
1623   // the code to check if the event should be sent.
1624   if (can_post_interpreter_events()) {
1625     Label L;
1626 
1627     ldr_s32(Rtemp, Address(Rthread, JavaThread::interp_only_mode_offset()));
1628     cbz(Rtemp, L);
1629 
1630     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
1631 
1632     bind(L);
1633   }
1634 
1635   // Note: Disable DTrace runtime check for now to eliminate overhead on each method entry
1636   if (DTraceMethodProbes) {
1637     Label Lcontinue;
1638 
1639     ldrb_global(Rtemp, (address)&DTraceMethodProbes);
1640     cbz(Rtemp, Lcontinue);
1641 
1642     mov(R0, Rthread);
1643     mov(R1, Rmethod);
1644     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), R0, R1);
1645 
1646     bind(Lcontinue);
1647   }
1648   // RedefineClasses() tracing support for obsolete method entry
1649   if (log_is_enabled(Trace, redefine, class, obsolete)) {
1650     mov(R0, Rthread);
1651     mov(R1, Rmethod);
1652     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1653                  R0, R1);
1654   }
1655 }
1656 
1657 
notify_method_exit(TosState state,NotifyMethodExitMode mode,bool native,Register result_lo,Register result_hi,FloatRegister result_fp)1658 void InterpreterMacroAssembler::notify_method_exit(
1659                  TosState state, NotifyMethodExitMode mode,
1660                  bool native, Register result_lo, Register result_hi, FloatRegister result_fp) {
1661   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1662   // track stack depth.  If it is possible to enter interp_only_mode we add
1663   // the code to check if the event should be sent.
1664   if (mode == NotifyJVMTI && can_post_interpreter_events()) {
1665     Label L;
1666     // Note: frame::interpreter_frame_result has a dependency on how the
1667     // method result is saved across the call to post_method_exit. If this
1668     // is changed then the interpreter_frame_result implementation will
1669     // need to be updated too.
1670 
1671     ldr_s32(Rtemp, Address(Rthread, JavaThread::interp_only_mode_offset()));
1672     cbz(Rtemp, L);
1673 
1674     if (native) {
1675       // For c++ and template interpreter push both result registers on the
1676       // stack in native, we don't know the state.
1677       // See frame::interpreter_frame_result for code that gets the result values from here.
1678       assert(result_lo != noreg, "result registers should be defined");
1679 
1680       assert(result_hi != noreg, "result registers should be defined");
1681 
1682 #ifdef __ABI_HARD__
1683       assert(result_fp != fnoreg, "FP result register must be defined");
1684       sub(SP, SP, 2 * wordSize);
1685       fstd(result_fp, Address(SP));
1686 #endif // __ABI_HARD__
1687 
1688       push(RegisterSet(result_lo) | RegisterSet(result_hi));
1689 
1690       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
1691 
1692       pop(RegisterSet(result_lo) | RegisterSet(result_hi));
1693 #ifdef __ABI_HARD__
1694       fldd(result_fp, Address(SP));
1695       add(SP, SP, 2 * wordSize);
1696 #endif // __ABI_HARD__
1697 
1698     } else {
1699       // For the template interpreter, the value on tos is the size of the
1700       // state. (c++ interpreter calls jvmti somewhere else).
1701       push(state);
1702       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
1703       pop(state);
1704     }
1705 
1706     bind(L);
1707   }
1708 
1709   // Note: Disable DTrace runtime check for now to eliminate overhead on each method exit
1710   if (DTraceMethodProbes) {
1711     Label Lcontinue;
1712 
1713     ldrb_global(Rtemp, (address)&DTraceMethodProbes);
1714     cbz(Rtemp, Lcontinue);
1715 
1716     push(state);
1717 
1718     mov(R0, Rthread);
1719     mov(R1, Rmethod);
1720 
1721     call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), R0, R1);
1722 
1723     pop(state);
1724 
1725     bind(Lcontinue);
1726   }
1727 }
1728 
1729 
1730 #ifndef PRODUCT
1731 
trace_state(const char * msg)1732 void InterpreterMacroAssembler::trace_state(const char* msg) {
1733   int push_size = save_caller_save_registers();
1734 
1735   Label Lcontinue;
1736   InlinedString Lmsg0("%s: FP=" INTPTR_FORMAT ", SP=" INTPTR_FORMAT "\n");
1737   InlinedString Lmsg(msg);
1738   InlinedAddress Lprintf((address)printf);
1739 
1740   ldr_literal(R0, Lmsg0);
1741   ldr_literal(R1, Lmsg);
1742   mov(R2, FP);
1743   add(R3, SP, push_size);  // original SP (without saved registers)
1744   ldr_literal(Rtemp, Lprintf);
1745   call(Rtemp);
1746 
1747   b(Lcontinue);
1748 
1749   bind_literal(Lmsg0);
1750   bind_literal(Lmsg);
1751   bind_literal(Lprintf);
1752 
1753 
1754   bind(Lcontinue);
1755 
1756   restore_caller_save_registers();
1757 }
1758 
1759 #endif
1760 
1761 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
increment_mask_and_jump(Address counter_addr,int increment,Address mask_addr,Register scratch,Register scratch2,AsmCondition cond,Label * where)1762 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
1763                                                         int increment, Address mask_addr,
1764                                                         Register scratch, Register scratch2,
1765                                                         AsmCondition cond, Label* where) {
1766   // caution: scratch2 and base address of counter_addr can be the same
1767   assert_different_registers(scratch, scratch2);
1768   ldr_u32(scratch, counter_addr);
1769   add(scratch, scratch, increment);
1770   str_32(scratch, counter_addr);
1771 
1772   ldr(scratch2, mask_addr);
1773   andrs(scratch, scratch, scratch2);
1774   b(*where, cond);
1775 }
1776 
get_method_counters(Register method,Register Rcounters,Label & skip,bool saveRegs,Register reg1,Register reg2,Register reg3)1777 void InterpreterMacroAssembler::get_method_counters(Register method,
1778                                                     Register Rcounters,
1779                                                     Label& skip,
1780                                                     bool saveRegs,
1781                                                     Register reg1,
1782                                                     Register reg2,
1783                                                     Register reg3) {
1784   const Address method_counters(method, Method::method_counters_offset());
1785   Label has_counters;
1786 
1787   ldr(Rcounters, method_counters);
1788   cbnz(Rcounters, has_counters);
1789 
1790   if (saveRegs) {
1791     // Save and restore in use caller-saved registers since they will be trashed by call_VM
1792     assert(reg1 != noreg, "must specify reg1");
1793     assert(reg2 != noreg, "must specify reg2");
1794     assert(reg3 == noreg, "must not specify reg3");
1795     push(RegisterSet(reg1) | RegisterSet(reg2));
1796   }
1797 
1798   mov(R1, method);
1799   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), R1);
1800 
1801   if (saveRegs) {
1802     pop(RegisterSet(reg1) | RegisterSet(reg2));
1803   }
1804 
1805   ldr(Rcounters, method_counters);
1806   cbz(Rcounters, skip); // No MethodCounters created, OutOfMemory
1807 
1808   bind(has_counters);
1809 }
1810