1 /*
2  * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
3  * Copyright (c) 2013, Red Hat Inc. All rights reserved.
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This code is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 only, as
8  * published by the Free Software Foundation.
9  *
10  * This code is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * version 2 for more details (a copy is included in the LICENSE file that
14  * accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License version
17  * 2 along with this work; if not, write to the Free Software Foundation,
18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21  * or visit www.oracle.com if you need additional information or have any
22  * questions.
23  *
24  */
25 
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.hpp"
28 #include "interpreter/bytecodeHistogram.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "interpreter/interpreterGenerator.hpp"
31 #include "interpreter/interpreterRuntime.hpp"
32 #include "interpreter/templateTable.hpp"
33 #include "interpreter/bytecodeTracer.hpp"
34 #include "oops/arrayOop.hpp"
35 #include "oops/methodData.hpp"
36 #include "oops/method.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "prims/jvmtiExport.hpp"
39 #include "prims/jvmtiThreadState.hpp"
40 #include "runtime/arguments.hpp"
41 #include "runtime/deoptimization.hpp"
42 #include "runtime/frame.inline.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/stubRoutines.hpp"
45 #include "runtime/synchronizer.hpp"
46 #include "runtime/timer.hpp"
47 #include "runtime/vframeArray.hpp"
48 #include "utilities/debug.hpp"
49 #include <sys/types.h>
50 
51 #ifndef PRODUCT
52 #include "oops/method.hpp"
53 #endif // !PRODUCT
54 
55 #define __ _masm->
56 
57 #ifndef CC_INTERP
58 
59 //-----------------------------------------------------------------------------
60 
61 extern "C" void entry(CodeBuffer*);
62 
63 //-----------------------------------------------------------------------------
64 
generate_StackOverflowError_handler()65 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
66   address entry = __ pc();
67 
68 #ifdef ASSERT
69   {
70     Label L;
71     __ ldr(rscratch1, Address(rfp,
72                        frame::interpreter_frame_monitor_block_top_offset *
73                        wordSize));
74     __ mov(rscratch2, sp);
75     __ cmp(rscratch1, rscratch2); // maximal rsp for current rfp (stack
76                            // grows negative)
77     __ br(Assembler::HS, L); // check if frame is complete
78     __ stop ("interpreter frame not set up");
79     __ bind(L);
80   }
81 #endif // ASSERT
82   // Restore bcp under the assumption that the current frame is still
83   // interpreted
84   __ restore_bcp();
85 
86   // expression stack must be empty before entering the VM if an
87   // exception happened
88   __ empty_expression_stack();
89   // throw exception
90   __ call_VM(noreg,
91              CAST_FROM_FN_PTR(address,
92                               InterpreterRuntime::throw_StackOverflowError));
93   return entry;
94 }
95 
generate_ArrayIndexOutOfBounds_handler(const char * name)96 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
97         const char* name) {
98   address entry = __ pc();
99   // expression stack must be empty before entering the VM if an
100   // exception happened
101   __ empty_expression_stack();
102   // setup parameters
103   // ??? convention: expect aberrant index in register r1
104   __ movw(c_rarg2, r1);
105   __ mov(c_rarg1, (address)name);
106   __ call_VM(noreg,
107              CAST_FROM_FN_PTR(address,
108                               InterpreterRuntime::
109                               throw_ArrayIndexOutOfBoundsException),
110              c_rarg1, c_rarg2);
111   return entry;
112 }
113 
generate_ClassCastException_handler()114 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
115   address entry = __ pc();
116 
117   // object is at TOS
118   __ pop(c_rarg1);
119 
120   // expression stack must be empty before entering the VM if an
121   // exception happened
122   __ empty_expression_stack();
123 
124   __ call_VM(noreg,
125              CAST_FROM_FN_PTR(address,
126                               InterpreterRuntime::
127                               throw_ClassCastException),
128              c_rarg1);
129   return entry;
130 }
131 
generate_exception_handler_common(const char * name,const char * message,bool pass_oop)132 address TemplateInterpreterGenerator::generate_exception_handler_common(
133         const char* name, const char* message, bool pass_oop) {
134   assert(!pass_oop || message == NULL, "either oop or message but not both");
135   address entry = __ pc();
136   if (pass_oop) {
137     // object is at TOS
138     __ pop(c_rarg2);
139   }
140   // expression stack must be empty before entering the VM if an
141   // exception happened
142   __ empty_expression_stack();
143   // setup parameters
144   __ lea(c_rarg1, Address((address)name));
145   if (pass_oop) {
146     __ call_VM(r0, CAST_FROM_FN_PTR(address,
147                                     InterpreterRuntime::
148                                     create_klass_exception),
149                c_rarg1, c_rarg2);
150   } else {
151     // kind of lame ExternalAddress can't take NULL because
152     // external_word_Relocation will assert.
153     if (message != NULL) {
154       __ lea(c_rarg2, Address((address)message));
155     } else {
156       __ mov(c_rarg2, NULL_WORD);
157     }
158     __ call_VM(r0,
159                CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
160                c_rarg1, c_rarg2);
161   }
162   // throw exception
163   __ b(address(Interpreter::throw_exception_entry()));
164   return entry;
165 }
166 
generate_continuation_for(TosState state)167 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
168   address entry = __ pc();
169   // NULL last_sp until next java call
170   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
171   __ dispatch_next(state);
172   return entry;
173 }
174 
generate_return_entry_for(TosState state,int step,size_t index_size)175 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
176   address entry = __ pc();
177 
178   // Restore stack bottom in case i2c adjusted stack
179   __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
180   // and NULL it as marker that esp is now tos until next java call
181   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
182   __ restore_bcp();
183   __ restore_locals();
184   __ restore_constant_pool_cache();
185   __ get_method(rmethod);
186 
187   // Pop N words from the stack
188   __ get_cache_and_index_at_bcp(r1, r2, 1, index_size);
189   __ ldr(r1, Address(r1, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
190   __ andr(r1, r1, ConstantPoolCacheEntry::parameter_size_mask);
191 
192   __ add(esp, esp, r1, Assembler::LSL, 3);
193 
194   // Restore machine SP
195   __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
196   __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
197   __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
198   __ ldr(rscratch2,
199          Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
200   __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3);
201   __ andr(sp, rscratch1, -16);
202 
203   __ get_dispatch();
204   __ dispatch_next(state, step);
205 
206   return entry;
207 }
208 
generate_deopt_entry_for(TosState state,int step)209 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
210                                                                int step) {
211   address entry = __ pc();
212   __ restore_bcp();
213   __ restore_locals();
214   __ restore_constant_pool_cache();
215   __ get_method(rmethod);
216   __ get_dispatch();
217 
218   // Calculate stack limit
219   __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
220   __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
221   __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size()
222          + (EnableInvokeDynamic ? 2 : 0));
223   __ ldr(rscratch2,
224          Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
225   __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3);
226   __ andr(sp, rscratch1, -16);
227 
228   // Restore expression stack pointer
229   __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
230   // NULL last_sp until next java call
231   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
232 
233   // handle exceptions
234   {
235     Label L;
236     __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
237     __ cbz(rscratch1, L);
238     __ call_VM(noreg,
239                CAST_FROM_FN_PTR(address,
240                                 InterpreterRuntime::throw_pending_exception));
241     __ should_not_reach_here();
242     __ bind(L);
243   }
244 
245   __ dispatch_next(state, step);
246   return entry;
247 }
248 
249 
BasicType_as_index(BasicType type)250 int AbstractInterpreter::BasicType_as_index(BasicType type) {
251   int i = 0;
252   switch (type) {
253     case T_BOOLEAN: i = 0; break;
254     case T_CHAR   : i = 1; break;
255     case T_BYTE   : i = 2; break;
256     case T_SHORT  : i = 3; break;
257     case T_INT    : i = 4; break;
258     case T_LONG   : i = 5; break;
259     case T_VOID   : i = 6; break;
260     case T_FLOAT  : i = 7; break;
261     case T_DOUBLE : i = 8; break;
262     case T_OBJECT : i = 9; break;
263     case T_ARRAY  : i = 9; break;
264     default       : ShouldNotReachHere();
265   }
266   assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
267          "index out of bounds");
268   return i;
269 }
270 
271 
generate_result_handler_for(BasicType type)272 address TemplateInterpreterGenerator::generate_result_handler_for(
273         BasicType type) {
274     address entry = __ pc();
275   switch (type) {
276   case T_BOOLEAN: __ c2bool(r0);          break;
277   case T_CHAR   : __ uxth(r0, r0);        break;
278   case T_BYTE   : __ sxtb(r0, r0);        break;
279   case T_SHORT  : __ sxth(r0, r0);        break;
280   case T_INT    : __ uxtw(r0, r0);        break;  // FIXME: We almost certainly don't need this
281   case T_LONG   : /* nothing to do */        break;
282   case T_VOID   : /* nothing to do */        break;
283   case T_FLOAT  : /* nothing to do */        break;
284   case T_DOUBLE : /* nothing to do */        break;
285   case T_OBJECT :
286     // retrieve result from frame
287     __ ldr(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
288     // and verify it
289     __ verify_oop(r0);
290     break;
291   default       : ShouldNotReachHere();
292   }
293   __ ret(lr);                                  // return from result handler
294   return entry;
295 }
296 
generate_safept_entry_for(TosState state,address runtime_entry)297 address TemplateInterpreterGenerator::generate_safept_entry_for(
298         TosState state,
299         address runtime_entry) {
300   address entry = __ pc();
301   __ push(state);
302   __ call_VM(noreg, runtime_entry);
303   __ membar(Assembler::AnyAny);
304   __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
305   return entry;
306 }
307 
308 // Helpers for commoning out cases in the various type of method entries.
309 //
310 
311 
312 // increment invocation count & check for overflow
313 //
314 // Note: checking for negative value instead of overflow
315 //       so we have a 'sticky' overflow test
316 //
317 // rmethod: method
318 //
generate_counter_incr(Label * overflow,Label * profile_method,Label * profile_method_continue)319 void InterpreterGenerator::generate_counter_incr(
320         Label* overflow,
321         Label* profile_method,
322         Label* profile_method_continue) {
323   Label done;
324   // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
325   if (TieredCompilation) {
326     int increment = InvocationCounter::count_increment;
327     int mask = ((1 << Tier0InvokeNotifyFreqLog)  - 1) << InvocationCounter::count_shift;
328     Label no_mdo;
329     if (ProfileInterpreter) {
330       // Are we profiling?
331       __ ldr(r0, Address(rmethod, Method::method_data_offset()));
332       __ cbz(r0, no_mdo);
333       // Increment counter in the MDO
334       const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) +
335                                                 in_bytes(InvocationCounter::counter_offset()));
336       __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow);
337       __ b(done);
338     }
339     __ bind(no_mdo);
340     // Increment counter in MethodCounters
341     const Address invocation_counter(rscratch2,
342                   MethodCounters::invocation_counter_offset() +
343                   InvocationCounter::counter_offset());
344     __ get_method_counters(rmethod, rscratch2, done);
345     __ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow);
346     __ bind(done);
347   } else {
348     const Address backedge_counter(rscratch2,
349                   MethodCounters::backedge_counter_offset() +
350                   InvocationCounter::counter_offset());
351     const Address invocation_counter(rscratch2,
352                   MethodCounters::invocation_counter_offset() +
353                   InvocationCounter::counter_offset());
354 
355     __ get_method_counters(rmethod, rscratch2, done);
356 
357     if (ProfileInterpreter) { // %%% Merge this into MethodData*
358       __ ldrw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
359       __ addw(r1, r1, 1);
360       __ strw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
361     }
362     // Update standard invocation counters
363     __ ldrw(r1, invocation_counter);
364     __ ldrw(r0, backedge_counter);
365 
366     __ addw(r1, r1, InvocationCounter::count_increment);
367     __ andw(r0, r0, InvocationCounter::count_mask_value);
368 
369     __ strw(r1, invocation_counter);
370     __ addw(r0, r0, r1);                // add both counters
371 
372     // profile_method is non-null only for interpreted method so
373     // profile_method != NULL == !native_call
374 
375     if (ProfileInterpreter && profile_method != NULL) {
376       // Test to see if we should create a method data oop
377       unsigned long offset;
378       __ adrp(rscratch2, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit),
379               offset);
380       __ ldrw(rscratch2, Address(rscratch2, offset));
381       __ cmp(r0, rscratch2);
382       __ br(Assembler::LT, *profile_method_continue);
383 
384       // if no method data exists, go to profile_method
385       __ test_method_data_pointer(rscratch2, *profile_method);
386     }
387 
388     {
389       unsigned long offset;
390       __ adrp(rscratch2,
391               ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit),
392               offset);
393       __ ldrw(rscratch2, Address(rscratch2, offset));
394       __ cmpw(r0, rscratch2);
395       __ br(Assembler::HS, *overflow);
396     }
397     __ bind(done);
398   }
399 }
400 
generate_counter_overflow(Label * do_continue)401 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
402 
403   // Asm interpreter on entry
404   // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
405   // Everything as it was on entry
406 
407   // InterpreterRuntime::frequency_counter_overflow takes two
408   // arguments, the first (thread) is passed by call_VM, the second
409   // indicates if the counter overflow occurs at a backwards branch
410   // (NULL bcp).  We pass zero for it.  The call returns the address
411   // of the verified entry point for the method or NULL if the
412   // compilation did not complete (either went background or bailed
413   // out).
414   __ mov(c_rarg1, 0);
415   __ call_VM(noreg,
416              CAST_FROM_FN_PTR(address,
417                               InterpreterRuntime::frequency_counter_overflow),
418              c_rarg1);
419 
420   __ b(*do_continue);
421 }
422 
423 // See if we've got enough room on the stack for locals plus overhead.
424 // The expression stack grows down incrementally, so the normal guard
425 // page mechanism will work for that.
426 //
427 // NOTE: Since the additional locals are also always pushed (wasn't
428 // obvious in generate_method_entry) so the guard should work for them
429 // too.
430 //
431 // Args:
432 //      r3: number of additional locals this frame needs (what we must check)
433 //      rmethod: Method*
434 //
435 // Kills:
436 //      r0
generate_stack_overflow_check(void)437 void InterpreterGenerator::generate_stack_overflow_check(void) {
438 
439   // monitor entry size: see picture of stack set
440   // (generate_method_entry) and frame_amd64.hpp
441   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
442 
443   // total overhead size: entry_size + (saved rbp through expr stack
444   // bottom).  be sure to change this if you add/subtract anything
445   // to/from the overhead area
446   const int overhead_size =
447     -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
448 
449   const int page_size = os::vm_page_size();
450 
451   Label after_frame_check;
452 
453   // see if the frame is greater than one page in size. If so,
454   // then we need to verify there is enough stack space remaining
455   // for the additional locals.
456   //
457   // Note that we use SUBS rather than CMP here because the immediate
458   // field of this instruction may overflow.  SUBS can cope with this
459   // because it is a macro that will expand to some number of MOV
460   // instructions and a register operation.
461   __ subs(rscratch1, r3, (page_size - overhead_size) / Interpreter::stackElementSize);
462   __ br(Assembler::LS, after_frame_check);
463 
464   // compute rsp as if this were going to be the last frame on
465   // the stack before the red zone
466 
467   const Address stack_base(rthread, Thread::stack_base_offset());
468   const Address stack_size(rthread, Thread::stack_size_offset());
469 
470   // locals + overhead, in bytes
471   __ mov(r0, overhead_size);
472   __ add(r0, r0, r3, Assembler::LSL, Interpreter::logStackElementSize);  // 2 slots per parameter.
473 
474   __ ldr(rscratch1, stack_base);
475   __ ldr(rscratch2, stack_size);
476 
477 #ifdef ASSERT
478   Label stack_base_okay, stack_size_okay;
479   // verify that thread stack base is non-zero
480   __ cbnz(rscratch1, stack_base_okay);
481   __ stop("stack base is zero");
482   __ bind(stack_base_okay);
483   // verify that thread stack size is non-zero
484   __ cbnz(rscratch2, stack_size_okay);
485   __ stop("stack size is zero");
486   __ bind(stack_size_okay);
487 #endif
488 
489   // Add stack base to locals and subtract stack size
490   __ sub(rscratch1, rscratch1, rscratch2); // Stack limit
491   __ add(r0, r0, rscratch1);
492 
493   // Use the maximum number of pages we might bang.
494   const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
495                                                                               (StackRedPages+StackYellowPages);
496 
497   // add in the red and yellow zone sizes
498   __ add(r0, r0, max_pages * page_size * 2);
499 
500   // check against the current stack bottom
501   __ cmp(sp, r0);
502   __ br(Assembler::HI, after_frame_check);
503 
504   // Remove the incoming args, peeling the machine SP back to where it
505   // was in the caller.  This is not strictly necessary, but unless we
506   // do so the stack frame may have a garbage FP; this ensures a
507   // correct call stack that we can always unwind.  The ANDR should be
508   // unnecessary because the sender SP in r13 is always aligned, but
509   // it doesn't hurt.
510   __ andr(sp, r13, -16);
511 
512   // Note: the restored frame is not necessarily interpreted.
513   // Use the shared runtime version of the StackOverflowError.
514   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
515   __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
516 
517   // all done with frame size check
518   __ bind(after_frame_check);
519 }
520 
521 // Allocate monitor and lock method (asm interpreter)
522 //
523 // Args:
524 //      rmethod: Method*
525 //      rlocals: locals
526 //
527 // Kills:
528 //      r0
529 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
530 //      rscratch1, rscratch2 (scratch regs)
lock_method(void)531 void InterpreterGenerator::lock_method(void) {
532   // synchronize method
533   const Address access_flags(rmethod, Method::access_flags_offset());
534   const Address monitor_block_top(
535         rfp,
536         frame::interpreter_frame_monitor_block_top_offset * wordSize);
537   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
538 
539 #ifdef ASSERT
540   {
541     Label L;
542     __ ldrw(r0, access_flags);
543     __ tst(r0, JVM_ACC_SYNCHRONIZED);
544     __ br(Assembler::NE, L);
545     __ stop("method doesn't need synchronization");
546     __ bind(L);
547   }
548 #endif // ASSERT
549 
550   // get synchronization object
551   {
552     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
553     Label done;
554     __ ldrw(r0, access_flags);
555     __ tst(r0, JVM_ACC_STATIC);
556     // get receiver (assume this is frequent case)
557     __ ldr(r0, Address(rlocals, Interpreter::local_offset_in_bytes(0)));
558     __ br(Assembler::EQ, done);
559     __ ldr(r0, Address(rmethod, Method::const_offset()));
560     __ ldr(r0, Address(r0, ConstMethod::constants_offset()));
561     __ ldr(r0, Address(r0,
562                            ConstantPool::pool_holder_offset_in_bytes()));
563     __ ldr(r0, Address(r0, mirror_offset));
564 
565 #ifdef ASSERT
566     {
567       Label L;
568       __ cbnz(r0, L);
569       __ stop("synchronization object is NULL");
570       __ bind(L);
571     }
572 #endif // ASSERT
573 
574     __ bind(done);
575   }
576 
577   // add space for monitor & lock
578   __ sub(sp, sp, entry_size); // add space for a monitor entry
579   __ sub(esp, esp, entry_size);
580   __ mov(rscratch1, esp);
581   __ str(rscratch1, monitor_block_top);  // set new monitor block top
582   // store object
583   __ str(r0, Address(esp, BasicObjectLock::obj_offset_in_bytes()));
584   __ mov(c_rarg1, esp); // object address
585   __ lock_object(c_rarg1);
586 }
587 
588 // Generate a fixed interpreter frame. This is identical setup for
589 // interpreted methods and for native methods hence the shared code.
590 //
591 // Args:
592 //      lr: return address
593 //      rmethod: Method*
594 //      rlocals: pointer to locals
595 //      rcpool: cp cache
596 //      stack_pointer: previous sp
generate_fixed_frame(bool native_call)597 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
598   // initialize fixed part of activation frame
599   if (native_call) {
600     __ sub(esp, sp, 12 *  wordSize);
601     __ mov(rbcp, zr);
602     __ stp(esp, zr, Address(__ pre(sp, -12 * wordSize)));
603     // add 2 zero-initialized slots for native calls
604     __ stp(zr, zr, Address(sp, 10 * wordSize));
605   } else {
606     __ sub(esp, sp, 10 *  wordSize);
607     __ ldr(rscratch1, Address(rmethod, Method::const_offset()));      // get ConstMethod
608     __ add(rbcp, rscratch1, in_bytes(ConstMethod::codes_offset())); // get codebase
609     __ stp(esp, rbcp, Address(__ pre(sp, -10 * wordSize)));
610   }
611 
612   if (ProfileInterpreter) {
613     Label method_data_continue;
614     __ ldr(rscratch1, Address(rmethod, Method::method_data_offset()));
615     __ cbz(rscratch1, method_data_continue);
616     __ lea(rscratch1, Address(rscratch1, in_bytes(MethodData::data_offset())));
617     __ bind(method_data_continue);
618     __ stp(rscratch1, rmethod, Address(sp, 4 * wordSize));  // save Method* and mdp (method data pointer)
619   } else {
620     __ stp(zr, rmethod, Address(sp, 4 * wordSize));        // save Method* (no mdp)
621   }
622 
623   __ ldr(rcpool, Address(rmethod, Method::const_offset()));
624   __ ldr(rcpool, Address(rcpool, ConstMethod::constants_offset()));
625   __ ldr(rcpool, Address(rcpool, ConstantPool::cache_offset_in_bytes()));
626   __ stp(rlocals, rcpool, Address(sp, 2 * wordSize));
627 
628   __ stp(rfp, lr, Address(sp, 8 * wordSize));
629   __ lea(rfp, Address(sp, 8 * wordSize));
630 
631   // set sender sp
632   // leave last_sp as null
633   __ stp(zr, r13, Address(sp, 6 * wordSize));
634 
635   // Move SP out of the way
636   if (! native_call) {
637     __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
638     __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
639     __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size()
640            + (EnableInvokeDynamic ? 2 : 0));
641     __ sub(rscratch1, sp, rscratch1, ext::uxtw, 3);
642     __ andr(sp, rscratch1, -16);
643   }
644 }
645 
646 // End of helpers
647 
648 // Various method entries
649 //------------------------------------------------------------------------------------------------------------------------
650 //
651 //
652 
653 // Call an accessor method (assuming it is resolved, otherwise drop
654 // into vanilla (slow path) entry
generate_accessor_entry(void)655 address InterpreterGenerator::generate_accessor_entry(void) {
656   return NULL;
657 }
658 
659 // Method entry for java.lang.ref.Reference.get.
generate_Reference_get_entry(void)660 address InterpreterGenerator::generate_Reference_get_entry(void) {
661 #if INCLUDE_ALL_GCS
662   // Code: _aload_0, _getfield, _areturn
663   // parameter size = 1
664   //
665   // The code that gets generated by this routine is split into 2 parts:
666   //    1. The "intrinsified" code for G1 (or any SATB based GC),
667   //    2. The slow path - which is an expansion of the regular method entry.
668   //
669   // Notes:-
670   // * In the G1 code we do not check whether we need to block for
671   //   a safepoint. If G1 is enabled then we must execute the specialized
672   //   code for Reference.get (except when the Reference object is null)
673   //   so that we can log the value in the referent field with an SATB
674   //   update buffer.
675   //   If the code for the getfield template is modified so that the
676   //   G1 pre-barrier code is executed when the current method is
677   //   Reference.get() then going through the normal method entry
678   //   will be fine.
679   // * The G1 code can, however, check the receiver object (the instance
680   //   of java.lang.Reference) and jump to the slow path if null. If the
681   //   Reference object is null then we obviously cannot fetch the referent
682   //   and so we don't need to call the G1 pre-barrier. Thus we can use the
683   //   regular method entry code to generate the NPE.
684   //
685   // This code is based on generate_accessor_entry.
686   //
687   // rmethod: Method*
688   // r13: senderSP must preserve for slow path, set SP to it on fast path
689 
690   address entry = __ pc();
691 
692   const int referent_offset = java_lang_ref_Reference::referent_offset;
693   guarantee(referent_offset > 0, "referent offset not initialized");
694 
695   if (UseG1GC) {
696     Label slow_path;
697     const Register local_0 = c_rarg0;
698     // Check if local 0 != NULL
699     // If the receiver is null then it is OK to jump to the slow path.
700     __ ldr(local_0, Address(esp, 0));
701     __ cbz(local_0, slow_path);
702 
703     // Load the value of the referent field.
704     const Address field_address(local_0, referent_offset);
705     __ load_heap_oop(local_0, field_address);
706 
707     __ mov(r19, r13);   // Move senderSP to a callee-saved register
708     // Generate the G1 pre-barrier code to log the value of
709     // the referent field in an SATB buffer.
710     __ enter(); // g1_write may call runtime
711     __ g1_write_barrier_pre(noreg /* obj */,
712                             local_0 /* pre_val */,
713                             rthread /* thread */,
714                             rscratch2 /* tmp */,
715                             true /* tosca_live */,
716                             true /* expand_call */);
717     __ leave();
718     // areturn
719     __ andr(sp, r19, -16);  // done with stack
720     __ ret(lr);
721 
722     // generate a vanilla interpreter entry as the slow path
723     __ bind(slow_path);
724     (void) generate_normal_entry(false);
725 
726     return entry;
727   }
728 #endif // INCLUDE_ALL_GCS
729 
730   // If G1 is not enabled then attempt to go through the accessor entry point
731   // Reference.get is an accessor
732   return generate_accessor_entry();
733 }
734 
735 /**
736  * Method entry for static native methods:
737  *   int java.util.zip.CRC32.update(int crc, int b)
738  */
generate_CRC32_update_entry()739 address InterpreterGenerator::generate_CRC32_update_entry() {
740   if (UseCRC32Intrinsics) {
741     address entry = __ pc();
742 
743     // rmethod: Method*
744     // r13: senderSP must preserved for slow path
745     // esp: args
746 
747     Label slow_path;
748     // If we need a safepoint check, generate full interpreter entry.
749     ExternalAddress state(SafepointSynchronize::address_of_state());
750     unsigned long offset;
751     __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
752     __ ldrw(rscratch1, Address(rscratch1, offset));
753     assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
754     __ cbnz(rscratch1, slow_path);
755 
756     // We don't generate local frame and don't align stack because
757     // we call stub code and there is no safepoint on this path.
758 
759     // Load parameters
760     const Register crc = c_rarg0;  // crc
761     const Register val = c_rarg1;  // source java byte value
762     const Register tbl = c_rarg2;  // scratch
763 
764     // Arguments are reversed on java expression stack
765     __ ldrw(val, Address(esp, 0));              // byte value
766     __ ldrw(crc, Address(esp, wordSize));       // Initial CRC
767 
768     __ adrp(tbl, ExternalAddress(StubRoutines::crc_table_addr()), offset);
769     __ add(tbl, tbl, offset);
770 
771     __ ornw(crc, zr, crc); // ~crc
772     __ update_byte_crc32(crc, val, tbl);
773     __ ornw(crc, zr, crc); // ~crc
774 
775     // result in c_rarg0
776 
777     __ andr(sp, r13, -16);
778     __ ret(lr);
779 
780     // generate a vanilla native entry as the slow path
781     __ bind(slow_path);
782 
783     (void) generate_native_entry(false);
784 
785     return entry;
786   }
787   return generate_native_entry(false);
788 }
789 
790 /**
791  * Method entry for static native methods:
792  *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
793  *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
794  */
generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind)795 address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
796   if (UseCRC32Intrinsics) {
797     address entry = __ pc();
798 
799     // rmethod,: Method*
800     // r13: senderSP must preserved for slow path
801 
802     Label slow_path;
803     // If we need a safepoint check, generate full interpreter entry.
804     ExternalAddress state(SafepointSynchronize::address_of_state());
805     unsigned long offset;
806     __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
807     __ ldrw(rscratch1, Address(rscratch1, offset));
808     assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
809     __ cbnz(rscratch1, slow_path);
810 
811     // We don't generate local frame and don't align stack because
812     // we call stub code and there is no safepoint on this path.
813 
814     // Load parameters
815     const Register crc = c_rarg0;  // crc
816     const Register buf = c_rarg1;  // source java byte array address
817     const Register len = c_rarg2;  // length
818     const Register off = len;      // offset (never overlaps with 'len')
819 
820     // Arguments are reversed on java expression stack
821     // Calculate address of start element
822     if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
823       __ ldr(buf, Address(esp, 2*wordSize)); // long buf
824       __ ldrw(off, Address(esp, wordSize)); // offset
825       __ add(buf, buf, off); // + offset
826       __ ldrw(crc,   Address(esp, 4*wordSize)); // Initial CRC
827     } else {
828       __ ldr(buf, Address(esp, 2*wordSize)); // byte[] array
829       __ add(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
830       __ ldrw(off, Address(esp, wordSize)); // offset
831       __ add(buf, buf, off); // + offset
832       __ ldrw(crc,   Address(esp, 3*wordSize)); // Initial CRC
833     }
834     // Can now load 'len' since we're finished with 'off'
835     __ ldrw(len, Address(esp, 0x0)); // Length
836 
837     __ andr(sp, r13, -16); // Restore the caller's SP
838 
839     // We are frameless so we can just jump to the stub.
840     __ b(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()));
841 
842     // generate a vanilla native entry as the slow path
843     __ bind(slow_path);
844 
845     (void) generate_native_entry(false);
846 
847     return entry;
848   }
849   return generate_native_entry(false);
850 }
851 
bang_stack_shadow_pages(bool native_call)852 void InterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
853   // Bang each page in the shadow zone. We can't assume it's been done for
854   // an interpreter frame with greater than a page of locals, so each page
855   // needs to be checked.  Only true for non-native.
856   if (UseStackBanging) {
857     const int start_page = native_call ? StackShadowPages : 1;
858     const int page_size = os::vm_page_size();
859     for (int pages = start_page; pages <= StackShadowPages ; pages++) {
860       __ sub(rscratch2, sp, pages*page_size);
861       __ str(zr, Address(rscratch2));
862     }
863   }
864 }
865 
866 
867 // Interpreter stub for calling a native method. (asm interpreter)
868 // This sets up a somewhat different looking stack for calling the
869 // native method than the typical interpreter frame setup.
generate_native_entry(bool synchronized)870 address InterpreterGenerator::generate_native_entry(bool synchronized) {
871   // determine code generation flags
872   bool inc_counter  = UseCompiler || CountCompiledCalls;
873 
874   // r1: Method*
875   // rscratch1: sender sp
876 
877   address entry_point = __ pc();
878 
879   const Address constMethod       (rmethod, Method::const_offset());
880   const Address access_flags      (rmethod, Method::access_flags_offset());
881   const Address size_of_parameters(r2, ConstMethod::
882                                        size_of_parameters_offset());
883 
884   // get parameter size (always needed)
885   __ ldr(r2, constMethod);
886   __ load_unsigned_short(r2, size_of_parameters);
887 
888   // native calls don't need the stack size check since they have no
889   // expression stack and the arguments are already on the stack and
890   // we only add a handful of words to the stack
891 
892   // rmethod: Method*
893   // r2: size of parameters
894   // rscratch1: sender sp
895 
896   // for natives the size of locals is zero
897 
898   // compute beginning of parameters (rlocals)
899   __ add(rlocals, esp, r2, ext::uxtx, 3);
900   __ add(rlocals, rlocals, -wordSize);
901 
902   // Pull SP back to minimum size: this avoids holes in the stack
903   __ andr(sp, esp, -16);
904 
905   // initialize fixed part of activation frame
906   generate_fixed_frame(true);
907 
908   // make sure method is native & not abstract
909 #ifdef ASSERT
910   __ ldrw(r0, access_flags);
911   {
912     Label L;
913     __ tst(r0, JVM_ACC_NATIVE);
914     __ br(Assembler::NE, L);
915     __ stop("tried to execute non-native method as native");
916     __ bind(L);
917   }
918   {
919     Label L;
920     __ tst(r0, JVM_ACC_ABSTRACT);
921     __ br(Assembler::EQ, L);
922     __ stop("tried to execute abstract method in interpreter");
923     __ bind(L);
924   }
925 #endif
926 
927   // Since at this point in the method invocation the exception
928   // handler would try to exit the monitor of synchronized methods
929   // which hasn't been entered yet, we set the thread local variable
930   // _do_not_unlock_if_synchronized to true. The remove_activation
931   // will check this flag.
932 
933    const Address do_not_unlock_if_synchronized(rthread,
934         in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
935   __ mov(rscratch2, true);
936   __ strb(rscratch2, do_not_unlock_if_synchronized);
937 
938   // increment invocation count & check for overflow
939   Label invocation_counter_overflow;
940   if (inc_counter) {
941     generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
942   }
943 
944   Label continue_after_compile;
945   __ bind(continue_after_compile);
946 
947   bang_stack_shadow_pages(true);
948 
949   // reset the _do_not_unlock_if_synchronized flag
950   __ strb(zr, do_not_unlock_if_synchronized);
951 
952   // check for synchronized methods
953   // Must happen AFTER invocation_counter check and stack overflow check,
954   // so method is not locked if overflows.
955   if (synchronized) {
956     lock_method();
957   } else {
958     // no synchronization necessary
959 #ifdef ASSERT
960     {
961       Label L;
962       __ ldrw(r0, access_flags);
963       __ tst(r0, JVM_ACC_SYNCHRONIZED);
964       __ br(Assembler::EQ, L);
965       __ stop("method needs synchronization");
966       __ bind(L);
967     }
968 #endif
969   }
970 
971   // start execution
972 #ifdef ASSERT
973   {
974     Label L;
975     const Address monitor_block_top(rfp,
976                  frame::interpreter_frame_monitor_block_top_offset * wordSize);
977     __ ldr(rscratch1, monitor_block_top);
978     __ cmp(esp, rscratch1);
979     __ br(Assembler::EQ, L);
980     __ stop("broken stack frame setup in interpreter");
981     __ bind(L);
982   }
983 #endif
984 
985   // jvmti support
986   __ notify_method_entry();
987 
988   // work registers
989   const Register t = r17;
990   const Register result_handler = r19;
991 
992   // allocate space for parameters
993   __ ldr(t, Address(rmethod, Method::const_offset()));
994   __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
995 
996   __ sub(rscratch1, esp, t, ext::uxtx, Interpreter::logStackElementSize);
997   __ andr(sp, rscratch1, -16);
998   __ mov(esp, rscratch1);
999 
1000   // get signature handler
1001   {
1002     Label L;
1003     __ ldr(t, Address(rmethod, Method::signature_handler_offset()));
1004     __ cbnz(t, L);
1005     __ call_VM(noreg,
1006                CAST_FROM_FN_PTR(address,
1007                                 InterpreterRuntime::prepare_native_call),
1008                rmethod);
1009     __ ldr(t, Address(rmethod, Method::signature_handler_offset()));
1010     __ bind(L);
1011   }
1012 
1013   // call signature handler
1014   assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals,
1015          "adjust this code");
1016   assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp,
1017          "adjust this code");
1018   assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
1019           "adjust this code");
1020 
1021   // The generated handlers do not touch rmethod (the method).
1022   // However, large signatures cannot be cached and are generated
1023   // each time here.  The slow-path generator can do a GC on return,
1024   // so we must reload it after the call.
1025   __ blr(t);
1026   __ get_method(rmethod);        // slow path can do a GC, reload rmethod
1027 
1028 
1029   // result handler is in r0
1030   // set result handler
1031   __ mov(result_handler, r0);
1032   // pass mirror handle if static call
1033   {
1034     Label L;
1035     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
1036     __ ldrw(t, Address(rmethod, Method::access_flags_offset()));
1037     __ tst(t, JVM_ACC_STATIC);
1038     __ br(Assembler::EQ, L);
1039     // get mirror
1040     __ ldr(t, Address(rmethod, Method::const_offset()));
1041     __ ldr(t, Address(t, ConstMethod::constants_offset()));
1042     __ ldr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
1043     __ ldr(t, Address(t, mirror_offset));
1044     // copy mirror into activation frame
1045     __ str(t, Address(rfp, frame::interpreter_frame_oop_temp_offset * wordSize));
1046     // pass handle to mirror
1047     __ add(c_rarg1, rfp, frame::interpreter_frame_oop_temp_offset * wordSize);
1048     __ bind(L);
1049   }
1050 
1051   // get native function entry point in r10
1052   {
1053     Label L;
1054     __ ldr(r10, Address(rmethod, Method::native_function_offset()));
1055     address unsatisfied = (SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1056     __ mov(rscratch2, unsatisfied);
1057     __ ldr(rscratch2, rscratch2);
1058     __ cmp(r10, rscratch2);
1059     __ br(Assembler::NE, L);
1060     __ call_VM(noreg,
1061                CAST_FROM_FN_PTR(address,
1062                                 InterpreterRuntime::prepare_native_call),
1063                rmethod);
1064     __ get_method(rmethod);
1065     __ ldr(r10, Address(rmethod, Method::native_function_offset()));
1066     __ bind(L);
1067   }
1068 
1069   // pass JNIEnv
1070   __ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset()));
1071 
1072   // Set the last Java PC in the frame anchor to be the return address from
1073   // the call to the native method: this will allow the debugger to
1074   // generate an accurate stack trace.
1075   Label native_return;
1076   __ set_last_Java_frame(esp, rfp, native_return, rscratch1);
1077 
1078   // change thread state
1079 #ifdef ASSERT
1080   {
1081     Label L;
1082     __ ldrw(t, Address(rthread, JavaThread::thread_state_offset()));
1083     __ cmp(t, _thread_in_Java);
1084     __ br(Assembler::EQ, L);
1085     __ stop("Wrong thread state in native stub");
1086     __ bind(L);
1087   }
1088 #endif
1089 
1090   // Change state to native
1091   __ mov(rscratch1, _thread_in_native);
1092   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1093   __ stlrw(rscratch1, rscratch2);
1094 
1095   // Call the native method.
1096   __ blr(r10);
1097   __ bind(native_return);
1098   __ maybe_isb();
1099   __ get_method(rmethod);
1100   // result potentially in r0 or v0
1101 
1102   // make room for the pushes we're about to do
1103   __ sub(rscratch1, esp, 4 * wordSize);
1104   __ andr(sp, rscratch1, -16);
1105 
1106   // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1107   // in order to extract the result of a method call. If the order of these
1108   // pushes change or anything else is added to the stack then the code in
1109   // interpreter_frame_result must also change.
1110   __ push(dtos);
1111   __ push(ltos);
1112 
1113   // change thread state
1114   __ mov(rscratch1, _thread_in_native_trans);
1115   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1116   __ stlrw(rscratch1, rscratch2);
1117 
1118   if (os::is_MP()) {
1119     if (UseMembar) {
1120       // Force this write out before the read below
1121       __ dsb(Assembler::SY);
1122     } else {
1123       // Write serialization page so VM thread can do a pseudo remote membar.
1124       // We use the current thread pointer to calculate a thread specific
1125       // offset to write to within the page. This minimizes bus traffic
1126       // due to cache line collision.
1127       __ serialize_memory(rthread, rscratch2);
1128     }
1129   }
1130 
1131   // check for safepoint operation in progress and/or pending suspend requests
1132   {
1133     Label Continue;
1134     {
1135       unsigned long offset;
1136       __ adrp(rscratch2, SafepointSynchronize::address_of_state(), offset);
1137       __ ldrw(rscratch2, Address(rscratch2, offset));
1138     }
1139     assert(SafepointSynchronize::_not_synchronized == 0,
1140            "SafepointSynchronize::_not_synchronized");
1141     Label L;
1142     __ cbnz(rscratch2, L);
1143     __ ldrw(rscratch2, Address(rthread, JavaThread::suspend_flags_offset()));
1144     __ cbz(rscratch2, Continue);
1145     __ bind(L);
1146 
1147     // Don't use call_VM as it will see a possible pending exception
1148     // and forward it and never return here preventing us from
1149     // clearing _last_native_pc down below.  Also can't use
1150     // call_VM_leaf either as it will check to see if r13 & r14 are
1151     // preserved and correspond to the bcp/locals pointers. So we do a
1152     // runtime call by hand.
1153     //
1154     __ mov(c_rarg0, rthread);
1155     __ mov(rscratch2, CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
1156     __ blr(rscratch2);
1157     __ maybe_isb();
1158     __ get_method(rmethod);
1159     __ reinit_heapbase();
1160     __ bind(Continue);
1161   }
1162 
1163   // change thread state
1164   __ mov(rscratch1, _thread_in_Java);
1165   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1166   __ stlrw(rscratch1, rscratch2);
1167 
1168   // reset_last_Java_frame
1169   __ reset_last_Java_frame(true);
1170 
1171   // reset handle block
1172   __ ldr(t, Address(rthread, JavaThread::active_handles_offset()));
1173   __ str(zr, Address(t, JNIHandleBlock::top_offset_in_bytes()));
1174 
1175   // If result is an oop unbox and store it in frame where gc will see it
1176   // and result handler will pick it up
1177 
1178   {
1179     Label no_oop, not_weak, store_result;
1180     __ adr(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
1181     __ cmp(t, result_handler);
1182     __ br(Assembler::NE, no_oop);
1183     // Unbox oop result, e.g. JNIHandles::resolve result.
1184     __ pop(ltos);
1185     __ cbz(r0, store_result);   // Use NULL as-is.
1186     STATIC_ASSERT(JNIHandles::weak_tag_mask == 1u);
1187     __ tbz(r0, 0, not_weak);    // Test for jweak tag.
1188     // Resolve jweak.
1189     __ ldr(r0, Address(r0, -JNIHandles::weak_tag_value));
1190 #if INCLUDE_ALL_GCS
1191     if (UseG1GC) {
1192       __ enter();                   // Barrier may call runtime.
1193       __ g1_write_barrier_pre(noreg /* obj */,
1194                               r0 /* pre_val */,
1195                               rthread /* thread */,
1196                               t /* tmp */,
1197                               true /* tosca_live */,
1198                               true /* expand_call */);
1199       __ leave();
1200     }
1201 #endif // INCLUDE_ALL_GCS
1202     __ b(store_result);
1203     __ bind(not_weak);
1204     // Resolve (untagged) jobject.
1205     __ ldr(r0, Address(r0, 0));
1206     __ bind(store_result);
1207     __ str(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
1208     // keep stack depth as expected by pushing oop which will eventually be discarded
1209     __ push(ltos);
1210     __ bind(no_oop);
1211   }
1212 
1213   {
1214     Label no_reguard;
1215     __ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset())));
1216     __ ldrb(rscratch1, Address(rscratch1));
1217     __ cmp(rscratch1, JavaThread::stack_guard_yellow_disabled);
1218     __ br(Assembler::NE, no_reguard);
1219 
1220     __ pusha(); // XXX only save smashed registers
1221     __ mov(c_rarg0, rthread);
1222     __ mov(rscratch2, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
1223     __ blr(rscratch2);
1224     __ popa(); // XXX only restore smashed registers
1225     __ bind(no_reguard);
1226   }
1227 
1228   // The method register is junk from after the thread_in_native transition
1229   // until here.  Also can't call_VM until the bcp has been
1230   // restored.  Need bcp for throwing exception below so get it now.
1231   __ get_method(rmethod);
1232 
1233   // restore bcp to have legal interpreter frame, i.e., bci == 0 <=>
1234   // rbcp == code_base()
1235   __ ldr(rbcp, Address(rmethod, Method::const_offset()));   // get ConstMethod*
1236   __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));          // get codebase
1237   // handle exceptions (exception handling will handle unlocking!)
1238   {
1239     Label L;
1240     __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
1241     __ cbz(rscratch1, L);
1242     // Note: At some point we may want to unify this with the code
1243     // used in call_VM_base(); i.e., we should use the
1244     // StubRoutines::forward_exception code. For now this doesn't work
1245     // here because the rsp is not correctly set at this point.
1246     __ MacroAssembler::call_VM(noreg,
1247                                CAST_FROM_FN_PTR(address,
1248                                InterpreterRuntime::throw_pending_exception));
1249     __ should_not_reach_here();
1250     __ bind(L);
1251   }
1252 
1253   // do unlocking if necessary
1254   {
1255     Label L;
1256     __ ldrw(t, Address(rmethod, Method::access_flags_offset()));
1257     __ tst(t, JVM_ACC_SYNCHRONIZED);
1258     __ br(Assembler::EQ, L);
1259     // the code below should be shared with interpreter macro
1260     // assembler implementation
1261     {
1262       Label unlock;
1263       // BasicObjectLock will be first in list, since this is a
1264       // synchronized method. However, need to check that the object
1265       // has not been unlocked by an explicit monitorexit bytecode.
1266 
1267       // monitor expect in c_rarg1 for slow unlock path
1268       __ lea (c_rarg1, Address(rfp,   // address of first monitor
1269                                (intptr_t)(frame::interpreter_frame_initial_sp_offset *
1270                                           wordSize - sizeof(BasicObjectLock))));
1271 
1272       __ ldr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
1273       __ cbnz(t, unlock);
1274 
1275       // Entry already unlocked, need to throw exception
1276       __ MacroAssembler::call_VM(noreg,
1277                                  CAST_FROM_FN_PTR(address,
1278                    InterpreterRuntime::throw_illegal_monitor_state_exception));
1279       __ should_not_reach_here();
1280 
1281       __ bind(unlock);
1282       __ unlock_object(c_rarg1);
1283     }
1284     __ bind(L);
1285   }
1286 
1287   // jvmti support
1288   // Note: This must happen _after_ handling/throwing any exceptions since
1289   //       the exception handler code notifies the runtime of method exits
1290   //       too. If this happens before, method entry/exit notifications are
1291   //       not properly paired (was bug - gri 11/22/99).
1292   __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1293 
1294   // restore potential result in r0:d0, call result handler to
1295   // restore potential result in ST0 & handle result
1296 
1297   __ pop(ltos);
1298   __ pop(dtos);
1299 
1300   __ blr(result_handler);
1301 
1302   // remove activation
1303   __ ldr(esp, Address(rfp,
1304                     frame::interpreter_frame_sender_sp_offset *
1305                     wordSize)); // get sender sp
1306   // remove frame anchor
1307   __ leave();
1308 
1309   // resture sender sp
1310   __ mov(sp, esp);
1311 
1312   __ ret(lr);
1313 
1314   if (inc_counter) {
1315     // Handle overflow of counter and compile method
1316     __ bind(invocation_counter_overflow);
1317     generate_counter_overflow(&continue_after_compile);
1318   }
1319 
1320   return entry_point;
1321 }
1322 
1323 //
1324 // Generic interpreted method entry to (asm) interpreter
1325 //
generate_normal_entry(bool synchronized)1326 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1327   // determine code generation flags
1328   bool inc_counter  = UseCompiler || CountCompiledCalls;
1329 
1330   // rscratch1: sender sp
1331   address entry_point = __ pc();
1332 
1333   const Address constMethod(rmethod, Method::const_offset());
1334   const Address access_flags(rmethod, Method::access_flags_offset());
1335   const Address size_of_parameters(r3,
1336                                    ConstMethod::size_of_parameters_offset());
1337   const Address size_of_locals(r3, ConstMethod::size_of_locals_offset());
1338 
1339   // get parameter size (always needed)
1340   // need to load the const method first
1341   __ ldr(r3, constMethod);
1342   __ load_unsigned_short(r2, size_of_parameters);
1343 
1344   // r2: size of parameters
1345 
1346   __ load_unsigned_short(r3, size_of_locals); // get size of locals in words
1347   __ sub(r3, r3, r2); // r3 = no. of additional locals
1348 
1349   // see if we've got enough room on the stack for locals plus overhead.
1350   generate_stack_overflow_check();
1351 
1352   // compute beginning of parameters (rlocals)
1353   __ add(rlocals, esp, r2, ext::uxtx, 3);
1354   __ sub(rlocals, rlocals, wordSize);
1355 
1356   // Make room for locals
1357   __ sub(rscratch1, esp, r3, ext::uxtx, 3);
1358   __ andr(sp, rscratch1, -16);
1359 
1360   // r3 - # of additional locals
1361   // allocate space for locals
1362   // explicitly initialize locals
1363   {
1364     Label exit, loop;
1365     __ ands(zr, r3, r3);
1366     __ br(Assembler::LE, exit); // do nothing if r3 <= 0
1367     __ bind(loop);
1368     __ str(zr, Address(__ post(rscratch1, wordSize)));
1369     __ sub(r3, r3, 1); // until everything initialized
1370     __ cbnz(r3, loop);
1371     __ bind(exit);
1372   }
1373 
1374   // And the base dispatch table
1375   __ get_dispatch();
1376 
1377   // initialize fixed part of activation frame
1378   generate_fixed_frame(false);
1379 
1380   // make sure method is not native & not abstract
1381 #ifdef ASSERT
1382   __ ldrw(r0, access_flags);
1383   {
1384     Label L;
1385     __ tst(r0, JVM_ACC_NATIVE);
1386     __ br(Assembler::EQ, L);
1387     __ stop("tried to execute native method as non-native");
1388     __ bind(L);
1389   }
1390  {
1391     Label L;
1392     __ tst(r0, JVM_ACC_ABSTRACT);
1393     __ br(Assembler::EQ, L);
1394     __ stop("tried to execute abstract method in interpreter");
1395     __ bind(L);
1396   }
1397 #endif
1398 
1399   // Since at this point in the method invocation the exception
1400   // handler would try to exit the monitor of synchronized methods
1401   // which hasn't been entered yet, we set the thread local variable
1402   // _do_not_unlock_if_synchronized to true. The remove_activation
1403   // will check this flag.
1404 
1405    const Address do_not_unlock_if_synchronized(rthread,
1406         in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1407   __ mov(rscratch2, true);
1408   __ strb(rscratch2, do_not_unlock_if_synchronized);
1409 
1410   // increment invocation count & check for overflow
1411   Label invocation_counter_overflow;
1412   Label profile_method;
1413   Label profile_method_continue;
1414   if (inc_counter) {
1415     generate_counter_incr(&invocation_counter_overflow,
1416                           &profile_method,
1417                           &profile_method_continue);
1418     if (ProfileInterpreter) {
1419       __ bind(profile_method_continue);
1420     }
1421   }
1422 
1423   Label continue_after_compile;
1424   __ bind(continue_after_compile);
1425 
1426   bang_stack_shadow_pages(false);
1427 
1428   // reset the _do_not_unlock_if_synchronized flag
1429   __ strb(zr, do_not_unlock_if_synchronized);
1430 
1431   // check for synchronized methods
1432   // Must happen AFTER invocation_counter check and stack overflow check,
1433   // so method is not locked if overflows.
1434   if (synchronized) {
1435     // Allocate monitor and lock method
1436     lock_method();
1437   } else {
1438     // no synchronization necessary
1439 #ifdef ASSERT
1440     {
1441       Label L;
1442       __ ldrw(r0, access_flags);
1443       __ tst(r0, JVM_ACC_SYNCHRONIZED);
1444       __ br(Assembler::EQ, L);
1445       __ stop("method needs synchronization");
1446       __ bind(L);
1447     }
1448 #endif
1449   }
1450 
1451   // start execution
1452 #ifdef ASSERT
1453   {
1454     Label L;
1455      const Address monitor_block_top (rfp,
1456                  frame::interpreter_frame_monitor_block_top_offset * wordSize);
1457     __ ldr(rscratch1, monitor_block_top);
1458     __ cmp(esp, rscratch1);
1459     __ br(Assembler::EQ, L);
1460     __ stop("broken stack frame setup in interpreter");
1461     __ bind(L);
1462   }
1463 #endif
1464 
1465   // jvmti support
1466   __ notify_method_entry();
1467 
1468   __ dispatch_next(vtos);
1469 
1470   // invocation counter overflow
1471   if (inc_counter) {
1472     if (ProfileInterpreter) {
1473       // We have decided to profile this method in the interpreter
1474       __ bind(profile_method);
1475       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1476       __ set_method_data_pointer_for_bcp();
1477       // don't think we need this
1478       __ get_method(r1);
1479       __ b(profile_method_continue);
1480     }
1481     // Handle overflow of counter and compile method
1482     __ bind(invocation_counter_overflow);
1483     generate_counter_overflow(&continue_after_compile);
1484   }
1485 
1486   return entry_point;
1487 }
1488 
1489 // Entry points
1490 //
1491 // Here we generate the various kind of entries into the interpreter.
1492 // The two main entry type are generic bytecode methods and native
1493 // call method.  These both come in synchronized and non-synchronized
1494 // versions but the frame layout they create is very similar. The
1495 // other method entry types are really just special purpose entries
1496 // that are really entry and interpretation all in one. These are for
1497 // trivial methods like accessor, empty, or special math methods.
1498 //
1499 // When control flow reaches any of the entry types for the interpreter
1500 // the following holds ->
1501 //
1502 // Arguments:
1503 //
1504 // rmethod: Method*
1505 //
1506 // Stack layout immediately at entry
1507 //
1508 // [ return address     ] <--- rsp
1509 // [ parameter n        ]
1510 //   ...
1511 // [ parameter 1        ]
1512 // [ expression stack   ] (caller's java expression stack)
1513 
1514 // Assuming that we don't go to one of the trivial specialized entries
1515 // the stack will look like below when we are ready to execute the
1516 // first bytecode (or call the native routine). The register usage
1517 // will be as the template based interpreter expects (see
1518 // interpreter_aarch64.hpp).
1519 //
1520 // local variables follow incoming parameters immediately; i.e.
1521 // the return address is moved to the end of the locals).
1522 //
1523 // [ monitor entry      ] <--- esp
1524 //   ...
1525 // [ monitor entry      ]
1526 // [ expr. stack bottom ]
1527 // [ saved rbcp         ]
1528 // [ current rlocals    ]
1529 // [ Method*            ]
1530 // [ saved rfp          ] <--- rfp
1531 // [ return address     ]
1532 // [ local variable m   ]
1533 //   ...
1534 // [ local variable 1   ]
1535 // [ parameter n        ]
1536 //   ...
1537 // [ parameter 1        ] <--- rlocals
1538 
generate_method_entry(AbstractInterpreter::MethodKind kind)1539 address AbstractInterpreterGenerator::generate_method_entry(
1540                                         AbstractInterpreter::MethodKind kind) {
1541   // determine code generation flags
1542   bool synchronized = false;
1543   address entry_point = NULL;
1544 
1545   switch (kind) {
1546   case Interpreter::zerolocals             :                                                                             break;
1547   case Interpreter::zerolocals_synchronized: synchronized = true;                                                        break;
1548   case Interpreter::native                 : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false); break;
1549   case Interpreter::native_synchronized    : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(true);  break;
1550   case Interpreter::empty                  : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry();       break;
1551   case Interpreter::accessor               : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry();    break;
1552   case Interpreter::abstract               : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry();    break;
1553 
1554   case Interpreter::java_lang_math_sin     : // fall thru
1555   case Interpreter::java_lang_math_cos     : // fall thru
1556   case Interpreter::java_lang_math_tan     : // fall thru
1557   case Interpreter::java_lang_math_abs     : // fall thru
1558   case Interpreter::java_lang_math_log     : // fall thru
1559   case Interpreter::java_lang_math_log10   : // fall thru
1560   case Interpreter::java_lang_math_sqrt    : // fall thru
1561   case Interpreter::java_lang_math_pow     : // fall thru
1562   case Interpreter::java_lang_math_exp     : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind);    break;
1563   case Interpreter::java_lang_ref_reference_get
1564                                            : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
1565   case Interpreter::java_util_zip_CRC32_update
1566                                            : entry_point = ((InterpreterGenerator*)this)->generate_CRC32_update_entry();  break;
1567   case Interpreter::java_util_zip_CRC32_updateBytes
1568                                            : // fall thru
1569   case Interpreter::java_util_zip_CRC32_updateByteBuffer
1570                                            : entry_point = ((InterpreterGenerator*)this)->generate_CRC32_updateBytes_entry(kind); break;
1571   default                                  : ShouldNotReachHere();                                                       break;
1572   }
1573 
1574   if (entry_point) {
1575     return entry_point;
1576   }
1577 
1578   return ((InterpreterGenerator*) this)->
1579                                 generate_normal_entry(synchronized);
1580 }
1581 
1582 
1583 // These should never be compiled since the interpreter will prefer
1584 // the compiled version to the intrinsic version.
can_be_compiled(methodHandle m)1585 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
1586   switch (method_kind(m)) {
1587     case Interpreter::java_lang_math_sin     : // fall thru
1588     case Interpreter::java_lang_math_cos     : // fall thru
1589     case Interpreter::java_lang_math_tan     : // fall thru
1590     case Interpreter::java_lang_math_abs     : // fall thru
1591     case Interpreter::java_lang_math_log     : // fall thru
1592     case Interpreter::java_lang_math_log10   : // fall thru
1593     case Interpreter::java_lang_math_sqrt    : // fall thru
1594     case Interpreter::java_lang_math_pow     : // fall thru
1595     case Interpreter::java_lang_math_exp     :
1596       return false;
1597     default:
1598       return true;
1599   }
1600 }
1601 
1602 // How much stack a method activation needs in words.
size_top_interpreter_activation(Method * method)1603 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
1604   const int entry_size = frame::interpreter_frame_monitor_size();
1605 
1606   // total overhead size: entry_size + (saved rfp thru expr stack
1607   // bottom).  be sure to change this if you add/subtract anything
1608   // to/from the overhead area
1609   const int overhead_size =
1610     -(frame::interpreter_frame_initial_sp_offset) + entry_size;
1611 
1612   const int stub_code = frame::entry_frame_after_call_words;
1613   const int method_stack = (method->max_locals() + method->max_stack()) *
1614                            Interpreter::stackElementWords;
1615   return (overhead_size + method_stack + stub_code);
1616 }
1617 
1618 // asm based interpreter deoptimization helpers
size_activation(int max_stack,int temps,int extra_args,int monitors,int callee_params,int callee_locals,bool is_top_frame)1619 int AbstractInterpreter::size_activation(int max_stack,
1620                                          int temps,
1621                                          int extra_args,
1622                                          int monitors,
1623                                          int callee_params,
1624                                          int callee_locals,
1625                                          bool is_top_frame) {
1626   // Note: This calculation must exactly parallel the frame setup
1627   // in AbstractInterpreterGenerator::generate_method_entry.
1628 
1629   // fixed size of an interpreter frame:
1630   int overhead = frame::sender_sp_offset -
1631                  frame::interpreter_frame_initial_sp_offset;
1632   // Our locals were accounted for by the caller (or last_frame_adjust
1633   // on the transistion) Since the callee parameters already account
1634   // for the callee's params we only need to account for the extra
1635   // locals.
1636   int size = overhead +
1637          (callee_locals - callee_params) +
1638          monitors * frame::interpreter_frame_monitor_size() +
1639          // On the top frame, at all times SP <= ESP, and SP is
1640          // 16-aligned.  We ensure this by adjusting SP on method
1641          // entry and re-entry to allow room for the maximum size of
1642          // the expression stack.  When we call another method we bump
1643          // SP so that no stack space is wasted.  So, only on the top
1644          // frame do we need to allow max_stack words.
1645          (is_top_frame ? max_stack : temps + extra_args);
1646 
1647   // On AArch64 we always keep the stack pointer 16-aligned, so we
1648   // must round up here.
1649   size = round_to(size, 2);
1650 
1651   return size;
1652 }
1653 
layout_activation(Method * method,int tempcount,int popframe_extra_args,int moncount,int caller_actual_parameters,int callee_param_count,int callee_locals,frame * caller,frame * interpreter_frame,bool is_top_frame,bool is_bottom_frame)1654 void AbstractInterpreter::layout_activation(Method* method,
1655                                             int tempcount,
1656                                             int popframe_extra_args,
1657                                             int moncount,
1658                                             int caller_actual_parameters,
1659                                             int callee_param_count,
1660                                             int callee_locals,
1661                                             frame* caller,
1662                                             frame* interpreter_frame,
1663                                             bool is_top_frame,
1664                                             bool is_bottom_frame) {
1665   // The frame interpreter_frame is guaranteed to be the right size,
1666   // as determined by a previous call to the size_activation() method.
1667   // It is also guaranteed to be walkable even though it is in a
1668   // skeletal state
1669 
1670   int max_locals = method->max_locals() * Interpreter::stackElementWords;
1671   int extra_locals = (method->max_locals() - method->size_of_parameters()) *
1672     Interpreter::stackElementWords;
1673 
1674 #ifdef ASSERT
1675   assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable");
1676 #endif
1677 
1678   interpreter_frame->interpreter_frame_set_method(method);
1679   // NOTE the difference in using sender_sp and
1680   // interpreter_frame_sender_sp interpreter_frame_sender_sp is
1681   // the original sp of the caller (the unextended_sp) and
1682   // sender_sp is fp+8/16 (32bit/64bit) XXX
1683   intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
1684 
1685 #ifdef ASSERT
1686   if (caller->is_interpreted_frame()) {
1687     assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
1688   }
1689 #endif
1690 
1691   interpreter_frame->interpreter_frame_set_locals(locals);
1692   BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
1693   BasicObjectLock* monbot = montop - moncount;
1694   interpreter_frame->interpreter_frame_set_monitor_end(monbot);
1695 
1696   // Set last_sp
1697   intptr_t*  esp = (intptr_t*) monbot -
1698     tempcount*Interpreter::stackElementWords -
1699     popframe_extra_args;
1700   interpreter_frame->interpreter_frame_set_last_sp(esp);
1701 
1702   // All frames but the initial (oldest) interpreter frame we fill in have
1703   // a value for sender_sp that allows walking the stack but isn't
1704   // truly correct. Correct the value here.
1705   if (extra_locals != 0 &&
1706       interpreter_frame->sender_sp() ==
1707       interpreter_frame->interpreter_frame_sender_sp()) {
1708     interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
1709                                                        extra_locals);
1710   }
1711   *interpreter_frame->interpreter_frame_cache_addr() =
1712     method->constants()->cache();
1713 }
1714 
1715 
1716 //-----------------------------------------------------------------------------
1717 // Exceptions
1718 
generate_throw_exception()1719 void TemplateInterpreterGenerator::generate_throw_exception() {
1720   // Entry point in previous activation (i.e., if the caller was
1721   // interpreted)
1722   Interpreter::_rethrow_exception_entry = __ pc();
1723   // Restore sp to interpreter_frame_last_sp even though we are going
1724   // to empty the expression stack for the exception processing.
1725   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
1726   // r0: exception
1727   // r3: return address/pc that threw exception
1728   __ restore_bcp();    // rbcp points to call/send
1729   __ restore_locals();
1730   __ restore_constant_pool_cache();
1731   __ reinit_heapbase();  // restore rheapbase as heapbase.
1732   __ get_dispatch();
1733 
1734   // Entry point for exceptions thrown within interpreter code
1735   Interpreter::_throw_exception_entry = __ pc();
1736   // If we came here via a NullPointerException on the receiver of a
1737   // method, rmethod may be corrupt.
1738   __ get_method(rmethod);
1739   // expression stack is undefined here
1740   // r0: exception
1741   // rbcp: exception bcp
1742   __ verify_oop(r0);
1743   __ mov(c_rarg1, r0);
1744 
1745   // expression stack must be empty before entering the VM in case of
1746   // an exception
1747   __ empty_expression_stack();
1748   // find exception handler address and preserve exception oop
1749   __ call_VM(r3,
1750              CAST_FROM_FN_PTR(address,
1751                           InterpreterRuntime::exception_handler_for_exception),
1752              c_rarg1);
1753 
1754   // Calculate stack limit
1755   __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
1756   __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
1757   __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size()
1758          + (EnableInvokeDynamic ? 2 : 0) + 2);
1759   __ ldr(rscratch2,
1760          Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
1761   __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3);
1762   __ andr(sp, rscratch1, -16);
1763 
1764   // r0: exception handler entry point
1765   // r3: preserved exception oop
1766   // rbcp: bcp for exception handler
1767   __ push_ptr(r3); // push exception which is now the only value on the stack
1768   __ br(r0); // jump to exception handler (may be _remove_activation_entry!)
1769 
1770   // If the exception is not handled in the current frame the frame is
1771   // removed and the exception is rethrown (i.e. exception
1772   // continuation is _rethrow_exception).
1773   //
1774   // Note: At this point the bci is still the bxi for the instruction
1775   // which caused the exception and the expression stack is
1776   // empty. Thus, for any VM calls at this point, GC will find a legal
1777   // oop map (with empty expression stack).
1778 
1779   //
1780   // JVMTI PopFrame support
1781   //
1782 
1783   Interpreter::_remove_activation_preserving_args_entry = __ pc();
1784   __ empty_expression_stack();
1785   // Set the popframe_processing bit in pending_popframe_condition
1786   // indicating that we are currently handling popframe, so that
1787   // call_VMs that may happen later do not trigger new popframe
1788   // handling cycles.
1789   __ ldrw(r3, Address(rthread, JavaThread::popframe_condition_offset()));
1790   __ orr(r3, r3, JavaThread::popframe_processing_bit);
1791   __ strw(r3, Address(rthread, JavaThread::popframe_condition_offset()));
1792 
1793   {
1794     // Check to see whether we are returning to a deoptimized frame.
1795     // (The PopFrame call ensures that the caller of the popped frame is
1796     // either interpreted or compiled and deoptimizes it if compiled.)
1797     // In this case, we can't call dispatch_next() after the frame is
1798     // popped, but instead must save the incoming arguments and restore
1799     // them after deoptimization has occurred.
1800     //
1801     // Note that we don't compare the return PC against the
1802     // deoptimization blob's unpack entry because of the presence of
1803     // adapter frames in C2.
1804     Label caller_not_deoptimized;
1805     __ ldr(c_rarg1, Address(rfp, frame::return_addr_offset * wordSize));
1806     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1807                                InterpreterRuntime::interpreter_contains), c_rarg1);
1808     __ cbnz(r0, caller_not_deoptimized);
1809 
1810     // Compute size of arguments for saving when returning to
1811     // deoptimized caller
1812     __ get_method(r0);
1813     __ ldr(r0, Address(r0, Method::const_offset()));
1814     __ load_unsigned_short(r0, Address(r0, in_bytes(ConstMethod::
1815                                                     size_of_parameters_offset())));
1816     __ lsl(r0, r0, Interpreter::logStackElementSize);
1817     __ restore_locals(); // XXX do we need this?
1818     __ sub(rlocals, rlocals, r0);
1819     __ add(rlocals, rlocals, wordSize);
1820     // Save these arguments
1821     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1822                                            Deoptimization::
1823                                            popframe_preserve_args),
1824                           rthread, r0, rlocals);
1825 
1826     __ remove_activation(vtos,
1827                          /* throw_monitor_exception */ false,
1828                          /* install_monitor_exception */ false,
1829                          /* notify_jvmdi */ false);
1830 
1831     // Inform deoptimization that it is responsible for restoring
1832     // these arguments
1833     __ mov(rscratch1, JavaThread::popframe_force_deopt_reexecution_bit);
1834     __ strw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset()));
1835 
1836     // Continue in deoptimization handler
1837     __ ret(lr);
1838 
1839     __ bind(caller_not_deoptimized);
1840   }
1841 
1842   __ remove_activation(vtos,
1843                        /* throw_monitor_exception */ false,
1844                        /* install_monitor_exception */ false,
1845                        /* notify_jvmdi */ false);
1846 
1847   // Restore the last_sp and null it out
1848   __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
1849   __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
1850 
1851   __ restore_bcp();
1852   __ restore_locals();
1853   __ restore_constant_pool_cache();
1854   __ get_method(rmethod);
1855   __ get_dispatch();
1856 
1857   // The method data pointer was incremented already during
1858   // call profiling. We have to restore the mdp for the current bcp.
1859   if (ProfileInterpreter) {
1860     __ set_method_data_pointer_for_bcp();
1861   }
1862 
1863   // Clear the popframe condition flag
1864   __ strw(zr, Address(rthread, JavaThread::popframe_condition_offset()));
1865   assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive");
1866 
1867 #if INCLUDE_JVMTI
1868   if (EnableInvokeDynamic) {
1869     Label L_done;
1870 
1871     __ ldrb(rscratch1, Address(rbcp, 0));
1872     __ cmpw(rscratch1, Bytecodes::_invokestatic);
1873     __ br(Assembler::NE, L_done);
1874 
1875     // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1876     // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
1877 
1878     __ ldr(c_rarg0, Address(rlocals, 0));
1879     __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), c_rarg0, rmethod, rbcp);
1880 
1881     __ cbz(r0, L_done);
1882 
1883     __ str(r0, Address(esp, 0));
1884     __ bind(L_done);
1885   }
1886 #endif // INCLUDE_JVMTI
1887 
1888   // Restore machine SP
1889   __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
1890   __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
1891   __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size()
1892          + (EnableInvokeDynamic ? 2 : 0));
1893   __ ldr(rscratch2,
1894          Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
1895   __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3);
1896   __ andr(sp, rscratch1, -16);
1897 
1898   __ dispatch_next(vtos);
1899   // end of PopFrame support
1900 
1901   Interpreter::_remove_activation_entry = __ pc();
1902 
1903   // preserve exception over this code sequence
1904   __ pop_ptr(r0);
1905   __ str(r0, Address(rthread, JavaThread::vm_result_offset()));
1906   // remove the activation (without doing throws on illegalMonitorExceptions)
1907   __ remove_activation(vtos, false, true, false);
1908   // restore exception
1909   __ get_vm_result(r0, rthread);
1910 
1911   // In between activations - previous activation type unknown yet
1912   // compute continuation point - the continuation point expects the
1913   // following registers set up:
1914   //
1915   // r0: exception
1916   // lr: return address/pc that threw exception
1917   // esp: expression stack of caller
1918   // rfp: fp of caller
1919   __ stp(r0, lr, Address(__ pre(sp, -2 * wordSize)));  // save exception & return address
1920   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1921                           SharedRuntime::exception_handler_for_return_address),
1922                         rthread, lr);
1923   __ mov(r1, r0);                               // save exception handler
1924   __ ldp(r0, lr, Address(__ post(sp, 2 * wordSize)));  // restore exception & return address
1925   // We might be returning to a deopt handler that expects r3 to
1926   // contain the exception pc
1927   __ mov(r3, lr);
1928   // Note that an "issuing PC" is actually the next PC after the call
1929   __ br(r1);                                    // jump to exception
1930                                                 // handler of caller
1931 }
1932 
1933 
1934 //
1935 // JVMTI ForceEarlyReturn support
1936 //
generate_earlyret_entry_for(TosState state)1937 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1938   address entry = __ pc();
1939 
1940   __ restore_bcp();
1941   __ restore_locals();
1942   __ empty_expression_stack();
1943   __ load_earlyret_value(state);
1944 
1945   __ ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));
1946   Address cond_addr(rscratch1, JvmtiThreadState::earlyret_state_offset());
1947 
1948   // Clear the earlyret state
1949   assert(JvmtiThreadState::earlyret_inactive == 0, "should be");
1950   __ str(zr, cond_addr);
1951 
1952   __ remove_activation(state,
1953                        false, /* throw_monitor_exception */
1954                        false, /* install_monitor_exception */
1955                        true); /* notify_jvmdi */
1956   __ ret(lr);
1957 
1958   return entry;
1959 } // end of ForceEarlyReturn support
1960 
1961 
1962 
1963 //-----------------------------------------------------------------------------
1964 // Helper for vtos entry point generation
1965 
set_vtos_entry_points(Template * t,address & bep,address & cep,address & sep,address & aep,address & iep,address & lep,address & fep,address & dep,address & vep)1966 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
1967                                                          address& bep,
1968                                                          address& cep,
1969                                                          address& sep,
1970                                                          address& aep,
1971                                                          address& iep,
1972                                                          address& lep,
1973                                                          address& fep,
1974                                                          address& dep,
1975                                                          address& vep) {
1976   assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1977   Label L;
1978   aep = __ pc();  __ push_ptr();  __ b(L);
1979   fep = __ pc();  __ push_f();    __ b(L);
1980   dep = __ pc();  __ push_d();    __ b(L);
1981   lep = __ pc();  __ push_l();    __ b(L);
1982   bep = cep = sep =
1983   iep = __ pc();  __ push_i();
1984   vep = __ pc();
1985   __ bind(L);
1986   generate_and_dispatch(t);
1987 }
1988 
1989 //-----------------------------------------------------------------------------
1990 // Generation of individual instructions
1991 
1992 // helpers for generate_and_dispatch
1993 
1994 
InterpreterGenerator(StubQueue * code)1995 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
1996   : TemplateInterpreterGenerator(code) {
1997    generate_all(); // down here so it can be "virtual"
1998 }
1999 
2000 //-----------------------------------------------------------------------------
2001 
2002 // Non-product code
2003 #ifndef PRODUCT
generate_trace_code(TosState state)2004 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
2005   address entry = __ pc();
2006 
2007   __ push(lr);
2008   __ push(state);
2009   __ push(RegSet::range(r0, r15), sp);
2010   __ mov(c_rarg2, r0);  // Pass itos
2011   __ call_VM(noreg,
2012              CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
2013              c_rarg1, c_rarg2, c_rarg3);
2014   __ pop(RegSet::range(r0, r15), sp);
2015   __ pop(state);
2016   __ pop(lr);
2017   __ ret(lr);                                   // return from result handler
2018 
2019   return entry;
2020 }
2021 
count_bytecode()2022 void TemplateInterpreterGenerator::count_bytecode() {
2023   Register rscratch3 = r0;
2024   __ push(rscratch1);
2025   __ push(rscratch2);
2026   __ push(rscratch3);
2027   __ mov(rscratch3, (address) &BytecodeCounter::_counter_value);
2028   __ atomic_add(noreg, 1, rscratch3);
2029   __ pop(rscratch3);
2030   __ pop(rscratch2);
2031   __ pop(rscratch1);
2032 }
2033 
histogram_bytecode(Template * t)2034 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { ; }
2035 
histogram_bytecode_pair(Template * t)2036 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { ; }
2037 
2038 
trace_bytecode(Template * t)2039 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
2040   // Call a little run-time stub to avoid blow-up for each bytecode.
2041   // The run-time runtime saves the right registers, depending on
2042   // the tosca in-state for the given template.
2043 
2044   assert(Interpreter::trace_code(t->tos_in()) != NULL,
2045          "entry must have been generated");
2046   __ bl(Interpreter::trace_code(t->tos_in()));
2047   __ reinit_heapbase();
2048 }
2049 
2050 
stop_interpreter_at()2051 void TemplateInterpreterGenerator::stop_interpreter_at() {
2052   Label L;
2053   __ push(rscratch1);
2054   __ mov(rscratch1, (address) &BytecodeCounter::_counter_value);
2055   __ ldr(rscratch1, Address(rscratch1));
2056   __ mov(rscratch2, StopInterpreterAt);
2057   __ cmpw(rscratch1, rscratch2);
2058   __ br(Assembler::NE, L);
2059   __ brk(0);
2060   __ bind(L);
2061   __ pop(rscratch1);
2062 }
2063 
2064 #endif // !PRODUCT
2065 #endif // ! CC_INTERP
2066