1 /*
2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "gc/shared/barrierSetAssembler.hpp"
28 #include "interpreter/bytecodeHistogram.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "interpreter/interpreterRuntime.hpp"
31 #include "interpreter/interp_masm.hpp"
32 #include "interpreter/templateInterpreterGenerator.hpp"
33 #include "interpreter/templateTable.hpp"
34 #include "oops/arrayOop.hpp"
35 #include "oops/methodData.hpp"
36 #include "oops/method.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "prims/jvmtiExport.hpp"
39 #include "prims/jvmtiThreadState.hpp"
40 #include "runtime/arguments.hpp"
41 #include "runtime/deoptimization.hpp"
42 #include "runtime/frame.inline.hpp"
43 #include "runtime/sharedRuntime.hpp"
44 #include "runtime/stubRoutines.hpp"
45 #include "runtime/synchronizer.hpp"
46 #include "runtime/timer.hpp"
47 #include "runtime/vframeArray.hpp"
48 #include "utilities/align.hpp"
49 #include "utilities/debug.hpp"
50 #include "utilities/macros.hpp"
51
52 // Size of interpreter code. Increase if too small. Interpreter will
53 // fail with a guarantee ("not enough space for interpreter generation");
54 // if too small.
55 // Run with +PrintInterpreter to get the VM to print out the size.
56 // Max size with JVMTI
57 // The sethi() instruction generates lots more instructions when shell
58 // stack limit is unlimited, so that's why this is much bigger.
59 int TemplateInterpreter::InterpreterCodeSize = 260 * K;
60
61 // Generation of Interpreter
62 //
63 // The TemplateInterpreterGenerator generates the interpreter into Interpreter::_code.
64
65
66 #define __ _masm->
67
68
69 //----------------------------------------------------------------------------------------------------
70
71 // LP64 passes floating point arguments in F1, F3, F5, etc. instead of
72 // O0, O1, O2 etc..
73 // Doubles are passed in D0, D2, D4
74 // We store the signature of the first 16 arguments in the first argument
75 // slot because it will be overwritten prior to calling the native
76 // function, with the pointer to the JNIEnv.
77 // If LP64 there can be up to 16 floating point arguments in registers
78 // or 6 integer registers.
generate_slow_signature_handler()79 address TemplateInterpreterGenerator::generate_slow_signature_handler() {
80
81 enum {
82 non_float = 0,
83 float_sig = 1,
84 double_sig = 2,
85 sig_mask = 3
86 };
87
88 address entry = __ pc();
89 Argument argv(0, true);
90
91 // We are in the jni transition frame. Save the last_java_frame corresponding to the
92 // outer interpreter frame
93 //
94 __ set_last_Java_frame(FP, noreg);
95 // make sure the interpreter frame we've pushed has a valid return pc
96 __ mov(O7, I7);
97 __ mov(Lmethod, G3_scratch);
98 __ mov(Llocals, G4_scratch);
99 __ save_frame(0);
100 __ mov(G2_thread, L7_thread_cache);
101 __ add(argv.address_in_frame(), O3);
102 __ mov(G2_thread, O0);
103 __ mov(G3_scratch, O1);
104 __ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type);
105 __ delayed()->mov(G4_scratch, O2);
106 __ mov(L7_thread_cache, G2_thread);
107 __ reset_last_Java_frame();
108
109
110 // load the register arguments (the C code packed them as varargs)
111 Address Sig = argv.address_in_frame(); // Argument 0 holds the signature
112 __ ld_ptr( Sig, G3_scratch ); // Get register argument signature word into G3_scratch
113 __ mov( G3_scratch, G4_scratch);
114 __ srl( G4_scratch, 2, G4_scratch); // Skip Arg 0
115 Label done;
116 for (Argument ldarg = argv.successor(); ldarg.is_float_register(); ldarg = ldarg.successor()) {
117 Label NonFloatArg;
118 Label LoadFloatArg;
119 Label LoadDoubleArg;
120 Label NextArg;
121 Address a = ldarg.address_in_frame();
122 __ andcc(G4_scratch, sig_mask, G3_scratch);
123 __ br(Assembler::zero, false, Assembler::pt, NonFloatArg);
124 __ delayed()->nop();
125
126 __ cmp(G3_scratch, float_sig );
127 __ br(Assembler::equal, false, Assembler::pt, LoadFloatArg);
128 __ delayed()->nop();
129
130 __ cmp(G3_scratch, double_sig );
131 __ br(Assembler::equal, false, Assembler::pt, LoadDoubleArg);
132 __ delayed()->nop();
133
134 __ bind(NonFloatArg);
135 // There are only 6 integer register arguments!
136 if ( ldarg.is_register() )
137 __ ld_ptr(ldarg.address_in_frame(), ldarg.as_register());
138 else {
139 // Optimization, see if there are any more args and get out prior to checking
140 // all 16 float registers. My guess is that this is rare.
141 // If is_register is false, then we are done the first six integer args.
142 __ br_null_short(G4_scratch, Assembler::pt, done);
143 }
144 __ ba(NextArg);
145 __ delayed()->srl( G4_scratch, 2, G4_scratch );
146
147 __ bind(LoadFloatArg);
148 __ ldf( FloatRegisterImpl::S, a, ldarg.as_float_register(), 4);
149 __ ba(NextArg);
150 __ delayed()->srl( G4_scratch, 2, G4_scratch );
151
152 __ bind(LoadDoubleArg);
153 __ ldf( FloatRegisterImpl::D, a, ldarg.as_double_register() );
154 __ ba(NextArg);
155 __ delayed()->srl( G4_scratch, 2, G4_scratch );
156
157 __ bind(NextArg);
158 }
159
160 __ bind(done);
161 __ ret();
162 __ delayed()->restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler
163
164 return entry;
165 }
166
generate_counter_overflow(Label & Lcontinue)167 void TemplateInterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
168
169 // Generate code to initiate compilation on the counter overflow.
170
171 // InterpreterRuntime::frequency_counter_overflow takes two arguments,
172 // the first indicates if the counter overflow occurs at a backwards branch (NULL bcp)
173 // and the second is only used when the first is true. We pass zero for both.
174 // The call returns the address of the verified entry point for the method or NULL
175 // if the compilation did not complete (either went background or bailed out).
176 __ set((int)false, O2);
177 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O2, O2, true);
178 // returns verified_entry_point or NULL
179 // we ignore it in any case
180 __ ba_short(Lcontinue);
181 }
182
183
184 // End of helpers
185
186 // Various method entries
187
188 // Abstract method entry
189 // Attempt to execute abstract method. Throw exception
190 //
generate_abstract_entry(void)191 address TemplateInterpreterGenerator::generate_abstract_entry(void) {
192 address entry = __ pc();
193 // abstract method entry
194 // throw exception
195 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod), G5_method);
196 // the call_VM checks for exception, so we should never return here.
197 __ should_not_reach_here();
198 return entry;
199 }
200
save_native_result(void)201 void TemplateInterpreterGenerator::save_native_result(void) {
202 // result potentially in O0/O1: save it across calls
203 const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
204
205 // result potentially in F0/F1: save it across calls
206 const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
207
208 // save and restore any potential method result value around the unlocking operation
209 __ stf(FloatRegisterImpl::D, F0, d_tmp);
210 __ stx(O0, l_tmp);
211 }
212
restore_native_result(void)213 void TemplateInterpreterGenerator::restore_native_result(void) {
214 const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
215 const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
216
217 // Restore any method result value
218 __ ldf(FloatRegisterImpl::D, d_tmp, F0);
219 __ ldx(l_tmp, O0);
220 }
221
generate_exception_handler_common(const char * name,const char * message,bool pass_oop)222 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
223 assert(!pass_oop || message == NULL, "either oop or message but not both");
224 address entry = __ pc();
225 // expression stack must be empty before entering the VM if an exception happened
226 __ empty_expression_stack();
227 // load exception object
228 __ set((intptr_t)name, G3_scratch);
229 if (pass_oop) {
230 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i);
231 } else {
232 __ set((intptr_t)message, G4_scratch);
233 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch);
234 }
235 // throw exception
236 assert(Interpreter::throw_exception_entry() != NULL, "generate it first");
237 AddressLiteral thrower(Interpreter::throw_exception_entry());
238 __ jump_to(thrower, G3_scratch);
239 __ delayed()->nop();
240 return entry;
241 }
242
generate_ClassCastException_handler()243 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
244 address entry = __ pc();
245 // expression stack must be empty before entering the VM if an exception
246 // happened
247 __ empty_expression_stack();
248 // load exception object
249 __ call_VM(Oexception,
250 CAST_FROM_FN_PTR(address,
251 InterpreterRuntime::throw_ClassCastException),
252 Otos_i);
253 __ should_not_reach_here();
254 return entry;
255 }
256
257
generate_ArrayIndexOutOfBounds_handler()258 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() {
259 address entry = __ pc();
260 // expression stack must be empty before entering the VM if an exception happened
261 __ empty_expression_stack();
262 // Pass the array to create more detailed exceptions.
263 // convention: expect aberrant index in register G3_scratch, then shuffle the
264 // index to G4_scratch for the VM call
265 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, Otos_i);
266 __ should_not_reach_here();
267 return entry;
268 }
269
270
generate_StackOverflowError_handler()271 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
272 address entry = __ pc();
273 // expression stack must be empty before entering the VM if an exception happened
274 __ empty_expression_stack();
275 __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
276 __ should_not_reach_here();
277 return entry;
278 }
279
280
generate_return_entry_for(TosState state,int step,size_t index_size)281 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
282 address entry = __ pc();
283
284 if (state == atos) {
285 __ profile_return_type(O0, G3_scratch, G1_scratch);
286 }
287
288 // The callee returns with the stack possibly adjusted by adapter transition
289 // We remove that possible adjustment here.
290 // All interpreter local registers are untouched. Any result is passed back
291 // in the O0/O1 or float registers. Before continuing, the arguments must be
292 // popped from the java expression stack; i.e., Lesp must be adjusted.
293
294 __ mov(Llast_SP, SP); // Remove any adapter added stack space.
295
296 const Register cache = G3_scratch;
297 const Register index = G1_scratch;
298 __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
299
300 const Register flags = cache;
301 __ ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), flags);
302 const Register parameter_size = flags;
303 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size); // argument size in words
304 __ sll(parameter_size, Interpreter::logStackElementSize, parameter_size); // each argument size in bytes
305 __ add(Lesp, parameter_size, Lesp); // pop arguments
306
307 __ check_and_handle_popframe(Gtemp);
308 __ check_and_handle_earlyret(Gtemp);
309
310 __ dispatch_next(state, step);
311
312 return entry;
313 }
314
315
generate_deopt_entry_for(TosState state,int step,address continuation)316 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) {
317 address entry = __ pc();
318 __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
319 #if INCLUDE_JVMCI
320 // Check if we need to take lock at entry of synchronized method. This can
321 // only occur on method entry so emit it only for vtos with step 0.
322 if (EnableJVMCI && state == vtos && step == 0) {
323 Label L;
324 Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset());
325 __ ldbool(pending_monitor_enter_addr, Gtemp); // Load if pending monitor enter
326 __ cmp_and_br_short(Gtemp, G0, Assembler::equal, Assembler::pn, L);
327 // Clear flag.
328 __ stbool(G0, pending_monitor_enter_addr);
329 // Take lock.
330 lock_method();
331 __ bind(L);
332 } else {
333 #ifdef ASSERT
334 if (EnableJVMCI) {
335 Label L;
336 Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset());
337 __ ldbool(pending_monitor_enter_addr, Gtemp); // Load if pending monitor enter
338 __ cmp_and_br_short(Gtemp, G0, Assembler::equal, Assembler::pn, L);
339 __ stop("unexpected pending monitor in deopt entry");
340 __ bind(L);
341 }
342 #endif
343 }
344 #endif
345 { Label L;
346 Address exception_addr(G2_thread, Thread::pending_exception_offset());
347 __ ld_ptr(exception_addr, Gtemp); // Load pending exception.
348 __ br_null_short(Gtemp, Assembler::pt, L);
349 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
350 __ should_not_reach_here();
351 __ bind(L);
352 }
353 if (continuation == NULL) {
354 __ dispatch_next(state, step);
355 } else {
356 __ jump_to_entry(continuation);
357 }
358 return entry;
359 }
360
361 // A result handler converts/unboxes a native call result into
362 // a java interpreter/compiler result. The current frame is an
363 // interpreter frame. The activation frame unwind code must be
364 // consistent with that of TemplateTable::_return(...). In the
365 // case of native methods, the caller's SP was not modified.
generate_result_handler_for(BasicType type)366 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
367 address entry = __ pc();
368 Register Itos_i = Otos_i ->after_save();
369 Register Itos_l = Otos_l ->after_save();
370 Register Itos_l1 = Otos_l1->after_save();
371 Register Itos_l2 = Otos_l2->after_save();
372 switch (type) {
373 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
374 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i); break; // cannot use and3, 0xFFFF too big as immediate value!
375 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i); break;
376 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i); break;
377 case T_LONG :
378 case T_INT : __ mov(O0, Itos_i); break;
379 case T_VOID : /* nothing to do */ break;
380 case T_FLOAT : assert(F0 == Ftos_f, "fix this code" ); break;
381 case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" ); break;
382 case T_OBJECT :
383 __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i);
384 __ verify_oop(Itos_i);
385 break;
386 default : ShouldNotReachHere();
387 }
388 __ ret(); // return from interpreter activation
389 __ delayed()->restore(I5_savedSP, G0, SP); // remove interpreter frame
390 NOT_PRODUCT(__ emit_int32(0);) // marker for disassembly
391 return entry;
392 }
393
generate_safept_entry_for(TosState state,address runtime_entry)394 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
395 address entry = __ pc();
396 __ push(state);
397 __ call_VM(noreg, runtime_entry);
398 __ dispatch_via(vtos, Interpreter::normal_table(vtos));
399 return entry;
400 }
401
402
403 //
404 // Helpers for commoning out cases in the various type of method entries.
405 //
406
407 // increment invocation count & check for overflow
408 //
409 // Note: checking for negative value instead of overflow
410 // so we have a 'sticky' overflow test
411 //
412 // Lmethod: method
413 // ??: invocation counter
414 //
generate_counter_incr(Label * overflow,Label * profile_method,Label * profile_method_continue)415 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
416 // Note: In tiered we increment either counters in MethodCounters* or in
417 // MDO depending if we're profiling or not.
418 const Register G3_method_counters = G3_scratch;
419 Label done;
420
421 if (TieredCompilation) {
422 const int increment = InvocationCounter::count_increment;
423 Label no_mdo;
424 if (ProfileInterpreter) {
425 // If no method data exists, go to profile_continue.
426 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
427 __ br_null_short(G4_scratch, Assembler::pn, no_mdo);
428 // Increment counter
429 Address mdo_invocation_counter(G4_scratch,
430 in_bytes(MethodData::invocation_counter_offset()) +
431 in_bytes(InvocationCounter::counter_offset()));
432 Address mask(G4_scratch, in_bytes(MethodData::invoke_mask_offset()));
433 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
434 G3_scratch, Lscratch,
435 Assembler::zero, overflow);
436 __ ba_short(done);
437 }
438
439 // Increment counter in MethodCounters*
440 __ bind(no_mdo);
441 Address invocation_counter(G3_method_counters,
442 in_bytes(MethodCounters::invocation_counter_offset()) +
443 in_bytes(InvocationCounter::counter_offset()));
444 __ get_method_counters(Lmethod, G3_method_counters, done);
445 Address mask(G3_method_counters, in_bytes(MethodCounters::invoke_mask_offset()));
446 __ increment_mask_and_jump(invocation_counter, increment, mask,
447 G4_scratch, Lscratch,
448 Assembler::zero, overflow);
449 __ bind(done);
450 } else { // not TieredCompilation
451 // Update standard invocation counters
452 __ get_method_counters(Lmethod, G3_method_counters, done);
453 __ increment_invocation_counter(G3_method_counters, O0, G4_scratch);
454 if (ProfileInterpreter) {
455 Address interpreter_invocation_counter(G3_method_counters,
456 in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
457 __ ld(interpreter_invocation_counter, G4_scratch);
458 __ inc(G4_scratch);
459 __ st(G4_scratch, interpreter_invocation_counter);
460 }
461
462 if (ProfileInterpreter && profile_method != NULL) {
463 // Test to see if we should create a method data oop
464 Address profile_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
465 __ ld(profile_limit, G1_scratch);
466 __ cmp_and_br_short(O0, G1_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue);
467
468 // if no method data exists, go to profile_method
469 __ test_method_data_pointer(*profile_method);
470 }
471
472 Address invocation_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_invocation_limit_offset()));
473 __ ld(invocation_limit, G3_scratch);
474 __ cmp(O0, G3_scratch);
475 __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance
476 __ delayed()->nop();
477 __ bind(done);
478 }
479 }
480
481 // Allocate monitor and lock method (asm interpreter)
482 // ebx - Method*
483 //
lock_method()484 void TemplateInterpreterGenerator::lock_method() {
485 __ ld(Lmethod, in_bytes(Method::access_flags_offset()), O0); // Load access flags.
486
487 #ifdef ASSERT
488 { Label ok;
489 __ btst(JVM_ACC_SYNCHRONIZED, O0);
490 __ br( Assembler::notZero, false, Assembler::pt, ok);
491 __ delayed()->nop();
492 __ stop("method doesn't need synchronization");
493 __ bind(ok);
494 }
495 #endif // ASSERT
496
497 // get synchronization object to O0
498 { Label done;
499 __ btst(JVM_ACC_STATIC, O0);
500 __ br( Assembler::zero, true, Assembler::pt, done);
501 __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
502
503 // lock the mirror, not the Klass*
504 __ load_mirror(O0, Lmethod, Lscratch);
505
506 #ifdef ASSERT
507 __ tst(O0);
508 __ breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
509 #endif // ASSERT
510
511 __ bind(done);
512 }
513
514 __ add_monitor_to_stack(true, noreg, noreg); // allocate monitor elem
515 __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes()); // store object
516 // __ untested("lock_object from method entry");
517 __ lock_object(Lmonitors, O0);
518 }
519
520 // See if we've got enough room on the stack for locals plus overhead below
521 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError
522 // without going through the signal handler, i.e., reserved and yellow zones
523 // will not be made usable. The shadow zone must suffice to handle the
524 // overflow.
generate_stack_overflow_check(Register Rframe_size,Register Rscratch)525 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size,
526 Register Rscratch) {
527 const int page_size = os::vm_page_size();
528 Label after_frame_check;
529
530 assert_different_registers(Rframe_size, Rscratch);
531
532 __ set(page_size, Rscratch);
533 __ cmp_and_br_short(Rframe_size, Rscratch, Assembler::lessEqual, Assembler::pt, after_frame_check);
534
535 // Get the stack overflow limit, and in debug, verify it is non-zero.
536 __ ld_ptr(G2_thread, JavaThread::stack_overflow_limit_offset(), Rscratch);
537 #ifdef ASSERT
538 Label limit_ok;
539 __ br_notnull_short(Rscratch, Assembler::pn, limit_ok);
540 __ stop("stack overflow limit is zero in generate_stack_overflow_check");
541 __ bind(limit_ok);
542 #endif
543
544 // Add in the size of the frame (which is the same as subtracting it from the
545 // SP, which would take another register.
546 __ add(Rscratch, Rframe_size, Rscratch);
547
548 // The frame is greater than one page in size, so check against
549 // the bottom of the stack.
550 __ cmp_and_brx_short(SP, Rscratch, Assembler::greaterUnsigned, Assembler::pt, after_frame_check);
551
552 // The stack will overflow, throw an exception.
553
554 // Note that SP is restored to sender's sp (in the delay slot). This
555 // is necessary if the sender's frame is an extended compiled frame
556 // (see gen_c2i_adapter()) and safer anyway in case of JSR292
557 // adaptations.
558
559 // Note also that the restored frame is not necessarily interpreted.
560 // Use the shared runtime version of the StackOverflowError.
561 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
562 AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry());
563 __ jump_to(stub, Rscratch);
564 __ delayed()->mov(O5_savedSP, SP);
565
566 // If you get to here, then there is enough stack space.
567 __ bind(after_frame_check);
568 }
569
570
571 //
572 // Generate a fixed interpreter frame. This is identical setup for interpreted
573 // methods and for native methods hence the shared code.
574
575
576 //----------------------------------------------------------------------------------------------------
577 // Stack frame layout
578 //
579 // When control flow reaches any of the entry types for the interpreter
580 // the following holds ->
581 //
582 // C2 Calling Conventions:
583 //
584 // The entry code below assumes that the following registers are set
585 // when coming in:
586 // G5_method: holds the Method* of the method to call
587 // Lesp: points to the TOS of the callers expression stack
588 // after having pushed all the parameters
589 //
590 // The entry code does the following to setup an interpreter frame
591 // pop parameters from the callers stack by adjusting Lesp
592 // set O0 to Lesp
593 // compute X = (max_locals - num_parameters)
594 // bump SP up by X to accommodate the extra locals
595 // compute X = max_expression_stack
596 // + vm_local_words
597 // + 16 words of register save area
598 // save frame doing a save sp, -X, sp growing towards lower addresses
599 // set Lbcp, Lmethod, LcpoolCache
600 // set Llocals to i0
601 // set Lmonitors to FP - rounded_vm_local_words
602 // set Lesp to Lmonitors - 4
603 //
604 // The frame has now been setup to do the rest of the entry code
605
606 // Try this optimization: Most method entries could live in a
607 // "one size fits all" stack frame without all the dynamic size
608 // calculations. It might be profitable to do all this calculation
609 // statically and approximately for "small enough" methods.
610
611 //-----------------------------------------------------------------------------------------------
612
613 // C1 Calling conventions
614 //
615 // Upon method entry, the following registers are setup:
616 //
617 // g2 G2_thread: current thread
618 // g5 G5_method: method to activate
619 // g4 Gargs : pointer to last argument
620 //
621 //
622 // Stack:
623 //
624 // +---------------+ <--- sp
625 // | |
626 // : reg save area :
627 // | |
628 // +---------------+ <--- sp + 0x40
629 // | |
630 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
631 // | |
632 // +---------------+ <--- sp + 0x5c
633 // | |
634 // : free :
635 // | |
636 // +---------------+ <--- Gargs
637 // | |
638 // : arguments :
639 // | |
640 // +---------------+
641 // | |
642 //
643 //
644 //
645 // AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
646 //
647 // +---------------+ <--- sp
648 // | |
649 // : reg save area :
650 // | |
651 // +---------------+ <--- sp + 0x40
652 // | |
653 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
654 // | |
655 // +---------------+ <--- sp + 0x5c
656 // | |
657 // : :
658 // | | <--- Lesp
659 // +---------------+ <--- Lmonitors (fp - 0x18)
660 // | VM locals |
661 // +---------------+ <--- fp
662 // | |
663 // : reg save area :
664 // | |
665 // +---------------+ <--- fp + 0x40
666 // | |
667 // : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
668 // | |
669 // +---------------+ <--- fp + 0x5c
670 // | |
671 // : free :
672 // | |
673 // +---------------+
674 // | |
675 // : nonarg locals :
676 // | |
677 // +---------------+
678 // | |
679 // : arguments :
680 // | | <--- Llocals
681 // +---------------+ <--- Gargs
682 // | |
683
generate_fixed_frame(bool native_call)684 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
685 //
686 //
687 // The entry code sets up a new interpreter frame in 4 steps:
688 //
689 // 1) Increase caller's SP by for the extra local space needed:
690 // (check for overflow)
691 // Efficient implementation of xload/xstore bytecodes requires
692 // that arguments and non-argument locals are in a contiguously
693 // addressable memory block => non-argument locals must be
694 // allocated in the caller's frame.
695 //
696 // 2) Create a new stack frame and register window:
697 // The new stack frame must provide space for the standard
698 // register save area, the maximum java expression stack size,
699 // the monitor slots (0 slots initially), and some frame local
700 // scratch locations.
701 //
702 // 3) The following interpreter activation registers must be setup:
703 // Lesp : expression stack pointer
704 // Lbcp : bytecode pointer
705 // Lmethod : method
706 // Llocals : locals pointer
707 // Lmonitors : monitor pointer
708 // LcpoolCache: constant pool cache
709 //
710 // 4) Initialize the non-argument locals if necessary:
711 // Non-argument locals may need to be initialized to NULL
712 // for GC to work. If the oop-map information is accurate
713 // (in the absence of the JSR problem), no initialization
714 // is necessary.
715 //
716 // (gri - 2/25/2000)
717
718
719 int rounded_vm_local_words = align_up((int)frame::interpreter_frame_vm_local_words, WordsPerLong );
720
721 const int extra_space =
722 rounded_vm_local_words + // frame local scratch space
723 Method::extra_stack_entries() + // extra stack for jsr 292
724 frame::memory_parameter_word_sp_offset + // register save area
725 (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
726
727 const Register Glocals_size = G3;
728 const Register RconstMethod = Glocals_size;
729 const Register Otmp1 = O3;
730 const Register Otmp2 = O4;
731 // Lscratch can't be used as a temporary because the call_stub uses
732 // it to assert that the stack frame was setup correctly.
733 const Address constMethod (G5_method, Method::const_offset());
734 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
735
736 __ ld_ptr( constMethod, RconstMethod );
737 __ lduh( size_of_parameters, Glocals_size);
738
739 // Gargs points to first local + BytesPerWord
740 // Set the saved SP after the register window save
741 //
742 assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP);
743 __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1);
744 __ add(Gargs, Otmp1, Gargs);
745
746 if (native_call) {
747 __ calc_mem_param_words( Glocals_size, Gframe_size );
748 __ add( Gframe_size, extra_space, Gframe_size);
749 __ round_to( Gframe_size, WordsPerLong );
750 __ sll( Gframe_size, LogBytesPerWord, Gframe_size );
751
752 // Native calls don't need the stack size check since they have no
753 // expression stack and the arguments are already on the stack and
754 // we only add a handful of words to the stack.
755 } else {
756
757 //
758 // Compute number of locals in method apart from incoming parameters
759 //
760 const Address size_of_locals(Otmp1, ConstMethod::size_of_locals_offset());
761 __ ld_ptr(constMethod, Otmp1);
762 __ lduh(size_of_locals, Otmp1);
763 __ sub(Otmp1, Glocals_size, Glocals_size);
764 __ round_to(Glocals_size, WordsPerLong);
765 __ sll(Glocals_size, Interpreter::logStackElementSize, Glocals_size);
766
767 // See if the frame is greater than one page in size. If so,
768 // then we need to verify there is enough stack space remaining.
769 // Frame_size = (max_stack + extra_space) * BytesPerWord;
770 __ ld_ptr(constMethod, Gframe_size);
771 __ lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size);
772 __ add(Gframe_size, extra_space, Gframe_size);
773 __ round_to(Gframe_size, WordsPerLong);
774 __ sll(Gframe_size, Interpreter::logStackElementSize, Gframe_size);
775
776 // Add in java locals size for stack overflow check only
777 __ add(Gframe_size, Glocals_size, Gframe_size);
778
779 const Register Otmp2 = O4;
780 assert_different_registers(Otmp1, Otmp2, O5_savedSP);
781 generate_stack_overflow_check(Gframe_size, Otmp1);
782
783 __ sub(Gframe_size, Glocals_size, Gframe_size);
784
785 //
786 // bump SP to accommodate the extra locals
787 //
788 __ sub(SP, Glocals_size, SP);
789 }
790
791 //
792 // now set up a stack frame with the size computed above
793 //
794 __ neg( Gframe_size );
795 __ save( SP, Gframe_size, SP );
796
797 //
798 // now set up all the local cache registers
799 //
800 // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note
801 // that all present references to Lbyte_code initialize the register
802 // immediately before use
803 if (native_call) {
804 __ mov(G0, Lbcp);
805 } else {
806 __ ld_ptr(G5_method, Method::const_offset(), Lbcp);
807 __ add(Lbcp, in_bytes(ConstMethod::codes_offset()), Lbcp);
808 }
809 __ mov( G5_method, Lmethod); // set Lmethod
810 // Get mirror and store it in the frame as GC root for this Method*
811 Register mirror = LcpoolCache;
812 __ load_mirror(mirror, Lmethod, Lscratch);
813 __ st_ptr(mirror, FP, (frame::interpreter_frame_mirror_offset * wordSize) + STACK_BIAS);
814 __ get_constant_pool_cache(LcpoolCache); // set LcpoolCache
815 __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
816 __ add(Lmonitors, STACK_BIAS, Lmonitors); // Account for 64 bit stack bias
817 __ sub(Lmonitors, BytesPerWord, Lesp); // set Lesp
818
819 // setup interpreter activation registers
820 __ sub(Gargs, BytesPerWord, Llocals); // set Llocals
821
822 if (ProfileInterpreter) {
823 __ set_method_data_pointer();
824 }
825
826 }
827
828 // Method entry for java.lang.ref.Reference.get.
generate_Reference_get_entry(void)829 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
830 // Code: _aload_0, _getfield, _areturn
831 // parameter size = 1
832 //
833 // The code that gets generated by this routine is split into 2 parts:
834 // 1. The "intrinsified" code performing an ON_WEAK_OOP_REF load,
835 // 2. The slow path - which is an expansion of the regular method entry.
836 //
837 // Notes:-
838 // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed.
839 // * We may jump to the slow path iff the receiver is null. If the
840 // Reference object is null then we no longer perform an ON_WEAK_OOP_REF load
841 // Thus we can use the regular method entry code to generate the NPE.
842 //
843 // This code is based on generate_accessor_enty.
844
845 address entry = __ pc();
846
847 const int referent_offset = java_lang_ref_Reference::referent_offset;
848 guarantee(referent_offset > 0, "referent offset not initialized");
849
850 Label slow_path;
851
852 // In the G1 code we don't check if we need to reach a safepoint. We
853 // continue and the thread will safepoint at the next bytecode dispatch.
854
855 // Check if local 0 != NULL
856 // If the receiver is null then it is OK to jump to the slow path.
857 __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
858 // check if local 0 == NULL and go the slow path
859 __ cmp_and_brx_short(Otos_i, 0, Assembler::equal, Assembler::pn, slow_path);
860
861 __ load_heap_oop(Otos_i, referent_offset, Otos_i, G3_scratch, ON_WEAK_OOP_REF);
862
863 // _areturn
864 __ retl(); // return from leaf routine
865 __ delayed()->mov(O5_savedSP, SP);
866
867 // Generate regular method entry
868 __ bind(slow_path);
869 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
870 return entry;
871 }
872
873 /**
874 * Method entry for static native methods:
875 * int java.util.zip.CRC32.update(int crc, int b)
876 */
generate_CRC32_update_entry()877 address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
878
879 if (UseCRC32Intrinsics) {
880 address entry = __ pc();
881
882 Label L_slow_path;
883 // If we need a safepoint check, generate full interpreter entry.
884 __ safepoint_poll(L_slow_path, false, G2_thread, O2);
885 __ delayed()->nop();
886
887 // Load parameters
888 const Register crc = O0; // initial crc
889 const Register val = O1; // byte to update with
890 const Register table = O2; // address of 256-entry lookup table
891
892 __ ldub(Gargs, 3, val);
893 __ lduw(Gargs, 8, crc);
894
895 __ set(ExternalAddress(StubRoutines::crc_table_addr()), table);
896
897 __ not1(crc); // ~crc
898 __ clruwu(crc);
899 __ update_byte_crc32(crc, val, table);
900 __ not1(crc); // ~crc
901
902 // result in O0
903 __ retl();
904 __ delayed()->nop();
905
906 // generate a vanilla native entry as the slow path
907 __ bind(L_slow_path);
908 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
909 return entry;
910 }
911 return NULL;
912 }
913
914 /**
915 * Method entry for static native methods:
916 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
917 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
918 */
generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind)919 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
920
921 if (UseCRC32Intrinsics) {
922 address entry = __ pc();
923
924 Label L_slow_path;
925 // If we need a safepoint check, generate full interpreter entry.
926
927 __ safepoint_poll(L_slow_path, false, G2_thread, O2);
928 __ delayed()->nop();
929
930 // Load parameters from the stack
931 const Register crc = O0; // initial crc
932 const Register buf = O1; // source java byte array address
933 const Register len = O2; // len
934 const Register offset = O3; // offset
935
936 // Arguments are reversed on java expression stack
937 // Calculate address of start element
938 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
939 __ lduw(Gargs, 0, len);
940 __ lduw(Gargs, 8, offset);
941 __ ldx( Gargs, 16, buf);
942 __ lduw(Gargs, 32, crc);
943 __ add(buf, offset, buf);
944 } else {
945 __ lduw(Gargs, 0, len);
946 __ lduw(Gargs, 8, offset);
947 __ ldx( Gargs, 16, buf);
948 __ lduw(Gargs, 24, crc);
949 __ add(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE), buf); // account for the header size
950 __ add(buf, offset, buf);
951 }
952
953 // Call the crc32 kernel
954 __ MacroAssembler::save_thread(L7_thread_cache);
955 __ kernel_crc32(crc, buf, len, O3);
956 __ MacroAssembler::restore_thread(L7_thread_cache);
957
958 // result in O0
959 __ retl();
960 __ delayed()->nop();
961
962 // generate a vanilla native entry as the slow path
963 __ bind(L_slow_path);
964 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
965 return entry;
966 }
967 return NULL;
968 }
969
970 /**
971 * Method entry for intrinsic-candidate (non-native) methods:
972 * int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
973 * int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end)
974 * Unlike CRC32, CRC32C does not have any methods marked as native
975 * CRC32C also uses an "end" variable instead of the length variable CRC32 uses
976 */
generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind)977 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
978
979 if (UseCRC32CIntrinsics) {
980 address entry = __ pc();
981
982 // Load parameters from the stack
983 const Register crc = O0; // initial crc
984 const Register buf = O1; // source java byte array address
985 const Register offset = O2; // offset
986 const Register end = O3; // index of last element to process
987 const Register len = O2; // len argument to the kernel
988 const Register table = O3; // crc32c lookup table address
989
990 // Arguments are reversed on java expression stack
991 // Calculate address of start element
992 if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) {
993 __ lduw(Gargs, 0, end);
994 __ lduw(Gargs, 8, offset);
995 __ ldx( Gargs, 16, buf);
996 __ lduw(Gargs, 32, crc);
997 __ add(buf, offset, buf);
998 __ sub(end, offset, len);
999 } else {
1000 __ lduw(Gargs, 0, end);
1001 __ lduw(Gargs, 8, offset);
1002 __ ldx( Gargs, 16, buf);
1003 __ lduw(Gargs, 24, crc);
1004 __ add(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE), buf); // account for the header size
1005 __ add(buf, offset, buf);
1006 __ sub(end, offset, len);
1007 }
1008
1009 // Call the crc32c kernel
1010 __ MacroAssembler::save_thread(L7_thread_cache);
1011 __ kernel_crc32c(crc, buf, len, table);
1012 __ MacroAssembler::restore_thread(L7_thread_cache);
1013
1014 // result in O0
1015 __ retl();
1016 __ delayed()->nop();
1017
1018 return entry;
1019 }
1020 return NULL;
1021 }
1022
1023 /* Math routines only partially supported.
1024 *
1025 * Providing support for fma (float/double) only.
1026 */
generate_math_entry(AbstractInterpreter::MethodKind kind)1027 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind)
1028 {
1029 if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
1030
1031 address entry = __ pc();
1032
1033 switch (kind) {
1034 case Interpreter::java_lang_math_fmaF:
1035 if (UseFMA) {
1036 // float .fma(float a, float b, float c)
1037 const FloatRegister ra = F1;
1038 const FloatRegister rb = F2;
1039 const FloatRegister rc = F3;
1040 const FloatRegister rd = F0; // Result.
1041
1042 __ ldf(FloatRegisterImpl::S, Gargs, 0, rc);
1043 __ ldf(FloatRegisterImpl::S, Gargs, 8, rb);
1044 __ ldf(FloatRegisterImpl::S, Gargs, 16, ra);
1045
1046 __ fmadd(FloatRegisterImpl::S, ra, rb, rc, rd);
1047 __ retl(); // Result in F0 (rd).
1048 __ delayed()->mov(O5_savedSP, SP);
1049
1050 return entry;
1051 }
1052 break;
1053 case Interpreter::java_lang_math_fmaD:
1054 if (UseFMA) {
1055 // double .fma(double a, double b, double c)
1056 const FloatRegister ra = F2; // D1
1057 const FloatRegister rb = F4; // D2
1058 const FloatRegister rc = F6; // D3
1059 const FloatRegister rd = F0; // D0 Result.
1060
1061 __ ldf(FloatRegisterImpl::D, Gargs, 0, rc);
1062 __ ldf(FloatRegisterImpl::D, Gargs, 16, rb);
1063 __ ldf(FloatRegisterImpl::D, Gargs, 32, ra);
1064
1065 __ fmadd(FloatRegisterImpl::D, ra, rb, rc, rd);
1066 __ retl(); // Result in D0 (rd).
1067 __ delayed()->mov(O5_savedSP, SP);
1068
1069 return entry;
1070 }
1071 break;
1072 default:
1073 break;
1074 }
1075 return NULL;
1076 }
1077
1078 // TODO: rather than touching all pages, check against stack_overflow_limit and bang yellow page to
1079 // generate exception
bang_stack_shadow_pages(bool native_call)1080 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
1081 // Quick & dirty stack overflow checking: bang the stack & handle trap.
1082 // Note that we do the banging after the frame is setup, since the exception
1083 // handling code expects to find a valid interpreter frame on the stack.
1084 // Doing the banging earlier fails if the caller frame is not an interpreter
1085 // frame.
1086 // (Also, the exception throwing code expects to unlock any synchronized
1087 // method receiver, so do the banging after locking the receiver.)
1088
1089 // Bang each page in the shadow zone. We can't assume it's been done for
1090 // an interpreter frame with greater than a page of locals, so each page
1091 // needs to be checked. Only true for non-native.
1092 if (UseStackBanging) {
1093 const int page_size = os::vm_page_size();
1094 const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size;
1095 const int start_page = native_call ? n_shadow_pages : 1;
1096 for (int pages = start_page; pages <= n_shadow_pages; pages++) {
1097 __ bang_stack_with_offset(pages*page_size);
1098 }
1099 }
1100 }
1101
1102 //
1103 // Interpreter stub for calling a native method. (asm interpreter)
1104 // This sets up a somewhat different looking stack for calling the native method
1105 // than the typical interpreter frame setup.
1106 //
1107
generate_native_entry(bool synchronized)1108 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
1109 address entry = __ pc();
1110
1111 // the following temporary registers are used during frame creation
1112 const Register Gtmp1 = G3_scratch ;
1113 const Register Gtmp2 = G1_scratch;
1114 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1115
1116 // make sure registers are different!
1117 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
1118
1119 const Address Laccess_flags(Lmethod, Method::access_flags_offset());
1120
1121 const Register Glocals_size = G3;
1122 assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
1123
1124 // make sure method is native & not abstract
1125 // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
1126 #ifdef ASSERT
1127 __ ld(G5_method, Method::access_flags_offset(), Gtmp1);
1128 { Label L;
1129 __ btst(JVM_ACC_NATIVE, Gtmp1);
1130 __ br(Assembler::notZero, false, Assembler::pt, L);
1131 __ delayed()->nop();
1132 __ stop("tried to execute non-native method as native");
1133 __ bind(L);
1134 }
1135 { Label L;
1136 __ btst(JVM_ACC_ABSTRACT, Gtmp1);
1137 __ br(Assembler::zero, false, Assembler::pt, L);
1138 __ delayed()->nop();
1139 __ stop("tried to execute abstract method as non-abstract");
1140 __ bind(L);
1141 }
1142 #endif // ASSERT
1143
1144 // generate the code to allocate the interpreter stack frame
1145 generate_fixed_frame(true);
1146
1147 //
1148 // No locals to initialize for native method
1149 //
1150
1151 // this slot will be set later, we initialize it to null here just in
1152 // case we get a GC before the actual value is stored later
1153 __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS);
1154
1155 const Address do_not_unlock_if_synchronized(G2_thread,
1156 JavaThread::do_not_unlock_if_synchronized_offset());
1157 // Since at this point in the method invocation the exception handler
1158 // would try to exit the monitor of synchronized methods which hasn't
1159 // been entered yet, we set the thread local variable
1160 // _do_not_unlock_if_synchronized to true. If any exception was thrown by
1161 // runtime, exception handling i.e. unlock_if_synchronized_method will
1162 // check this thread local flag.
1163 // This flag has two effects, one is to force an unwind in the topmost
1164 // interpreter frame and not perform an unlock while doing so.
1165
1166 __ movbool(true, G3_scratch);
1167 __ stbool(G3_scratch, do_not_unlock_if_synchronized);
1168
1169 // increment invocation counter and check for overflow
1170 //
1171 // Note: checking for negative value instead of overflow
1172 // so we have a 'sticky' overflow test (may be of
1173 // importance as soon as we have true MT/MP)
1174 Label invocation_counter_overflow;
1175 Label Lcontinue;
1176 if (inc_counter) {
1177 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
1178
1179 }
1180 __ bind(Lcontinue);
1181
1182 bang_stack_shadow_pages(true);
1183
1184 // reset the _do_not_unlock_if_synchronized flag
1185 __ stbool(G0, do_not_unlock_if_synchronized);
1186
1187 // check for synchronized methods
1188 // Must happen AFTER invocation_counter check and stack overflow check,
1189 // so method is not locked if overflows.
1190
1191 if (synchronized) {
1192 lock_method();
1193 } else {
1194 #ifdef ASSERT
1195 { Label ok;
1196 __ ld(Laccess_flags, O0);
1197 __ btst(JVM_ACC_SYNCHRONIZED, O0);
1198 __ br( Assembler::zero, false, Assembler::pt, ok);
1199 __ delayed()->nop();
1200 __ stop("method needs synchronization");
1201 __ bind(ok);
1202 }
1203 #endif // ASSERT
1204 }
1205
1206
1207 // start execution
1208 __ verify_thread();
1209
1210 // JVMTI support
1211 __ notify_method_entry();
1212
1213 // native call
1214
1215 // (note that O0 is never an oop--at most it is a handle)
1216 // It is important not to smash any handles created by this call,
1217 // until any oop handle in O0 is dereferenced.
1218
1219 // (note that the space for outgoing params is preallocated)
1220
1221 // get signature handler
1222 { Label L;
1223 Address signature_handler(Lmethod, Method::signature_handler_offset());
1224 __ ld_ptr(signature_handler, G3_scratch);
1225 __ br_notnull_short(G3_scratch, Assembler::pt, L);
1226 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod);
1227 __ ld_ptr(signature_handler, G3_scratch);
1228 __ bind(L);
1229 }
1230
1231 // Push a new frame so that the args will really be stored in
1232 // Copy a few locals across so the new frame has the variables
1233 // we need but these values will be dead at the jni call and
1234 // therefore not gc volatile like the values in the current
1235 // frame (Lmethod in particular)
1236
1237 // Flush the method pointer to the register save area
1238 __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
1239 __ mov(Llocals, O1);
1240
1241 // calculate where the mirror handle body is allocated in the interpreter frame:
1242 __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2);
1243
1244 // Calculate current frame size
1245 __ sub(SP, FP, O3); // Calculate negative of current frame size
1246 __ save(SP, O3, SP); // Allocate an identical sized frame
1247
1248 // Note I7 has leftover trash. Slow signature handler will fill it in
1249 // should we get there. Normal jni call will set reasonable last_Java_pc
1250 // below (and fix I7 so the stack trace doesn't have a meaningless frame
1251 // in it).
1252
1253 // Load interpreter frame's Lmethod into same register here
1254
1255 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
1256
1257 __ mov(I1, Llocals);
1258 __ mov(I2, Lscratch2); // save the address of the mirror
1259
1260
1261 // ONLY Lmethod and Llocals are valid here!
1262
1263 // call signature handler, It will move the arg properly since Llocals in current frame
1264 // matches that in outer frame
1265
1266 __ callr(G3_scratch, 0);
1267 __ delayed()->nop();
1268
1269 // Result handler is in Lscratch
1270
1271 // Reload interpreter frame's Lmethod since slow signature handler may block
1272 __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
1273
1274 { Label not_static;
1275
1276 __ ld(Laccess_flags, O0);
1277 __ btst(JVM_ACC_STATIC, O0);
1278 __ br( Assembler::zero, false, Assembler::pt, not_static);
1279 // get native function entry point(O0 is a good temp until the very end)
1280 __ delayed()->ld_ptr(Lmethod, in_bytes(Method::native_function_offset()), O0);
1281 // for static methods insert the mirror argument
1282 __ load_mirror(O1, Lmethod, G3_scratch);
1283 #ifdef ASSERT
1284 if (!PrintSignatureHandlers) // do not dirty the output with this
1285 { Label L;
1286 __ br_notnull_short(O1, Assembler::pt, L);
1287 __ stop("mirror is missing");
1288 __ bind(L);
1289 }
1290 #endif // ASSERT
1291 __ st_ptr(O1, Lscratch2, 0);
1292 __ mov(Lscratch2, O1);
1293 __ bind(not_static);
1294 }
1295
1296 // At this point, arguments have been copied off of stack into
1297 // their JNI positions, which are O1..O5 and SP[68..].
1298 // Oops are boxed in-place on the stack, with handles copied to arguments.
1299 // The result handler is in Lscratch. O0 will shortly hold the JNIEnv*.
1300
1301 #ifdef ASSERT
1302 { Label L;
1303 __ br_notnull_short(O0, Assembler::pt, L);
1304 __ stop("native entry point is missing");
1305 __ bind(L);
1306 }
1307 #endif // ASSERT
1308
1309 //
1310 // setup the frame anchor
1311 //
1312 // The scavenge function only needs to know that the PC of this frame is
1313 // in the interpreter method entry code, it doesn't need to know the exact
1314 // PC and hence we can use O7 which points to the return address from the
1315 // previous call in the code stream (signature handler function)
1316 //
1317 // The other trick is we set last_Java_sp to FP instead of the usual SP because
1318 // we have pushed the extra frame in order to protect the volatile register(s)
1319 // in that frame when we return from the jni call
1320 //
1321
1322 __ set_last_Java_frame(FP, O7);
1323 __ mov(O7, I7); // make dummy interpreter frame look like one above,
1324 // not meaningless information that'll confuse me.
1325
1326 // flush the windows now. We don't care about the current (protection) frame
1327 // only the outer frames
1328
1329 __ flushw();
1330
1331 // mark windows as flushed
1332 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
1333 __ set(JavaFrameAnchor::flushed, G3_scratch);
1334 __ st(G3_scratch, flags);
1335
1336 // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
1337
1338 Address thread_state(G2_thread, JavaThread::thread_state_offset());
1339 #ifdef ASSERT
1340 { Label L;
1341 __ ld(thread_state, G3_scratch);
1342 __ cmp_and_br_short(G3_scratch, _thread_in_Java, Assembler::equal, Assembler::pt, L);
1343 __ stop("Wrong thread state in native stub");
1344 __ bind(L);
1345 }
1346 #endif // ASSERT
1347 __ set(_thread_in_native, G3_scratch);
1348 __ st(G3_scratch, thread_state);
1349
1350 // Call the jni method, using the delay slot to set the JNIEnv* argument.
1351 __ save_thread(L7_thread_cache); // save Gthread
1352 __ callr(O0, 0);
1353 __ delayed()->
1354 add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0);
1355
1356 // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD
1357
1358 __ restore_thread(L7_thread_cache); // restore G2_thread
1359 __ reinit_heapbase();
1360
1361 // must we block?
1362
1363 // Block, if necessary, before resuming in _thread_in_Java state.
1364 // In order for GC to work, don't clear the last_Java_sp until after blocking.
1365 { Label no_block;
1366
1367 // Switch thread to "native transition" state before reading the synchronization state.
1368 // This additional state is necessary because reading and testing the synchronization
1369 // state is not atomic w.r.t. GC, as this scenario demonstrates:
1370 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1371 // VM thread changes sync state to synchronizing and suspends threads for GC.
1372 // Thread A is resumed to finish this native method, but doesn't block here since it
1373 // didn't see any synchronization is progress, and escapes.
1374 __ set(_thread_in_native_trans, G3_scratch);
1375 __ st(G3_scratch, thread_state);
1376 if (os::is_MP()) {
1377 if (UseMembar) {
1378 // Force this write out before the read below
1379 __ membar(Assembler::StoreLoad);
1380 } else {
1381 // Write serialization page so VM thread can do a pseudo remote membar.
1382 // We use the current thread pointer to calculate a thread specific
1383 // offset to write to within the page. This minimizes bus traffic
1384 // due to cache line collision.
1385 __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
1386 }
1387 }
1388
1389 Label L;
1390 __ safepoint_poll(L, false, G2_thread, G3_scratch);
1391 __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
1392 __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
1393 __ bind(L);
1394
1395 // Block. Save any potential method result value before the operation and
1396 // use a leaf call to leave the last_Java_frame setup undisturbed.
1397 save_native_result();
1398 __ call_VM_leaf(L7_thread_cache,
1399 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
1400 G2_thread);
1401
1402 // Restore any method result value
1403 restore_native_result();
1404 __ bind(no_block);
1405 }
1406
1407 // Clear the frame anchor now
1408
1409 __ reset_last_Java_frame();
1410
1411 // Move the result handler address
1412 __ mov(Lscratch, G3_scratch);
1413 // return possible result to the outer frame
1414 __ restore(O0, G0, O0);
1415
1416 // Move result handler to expected register
1417 __ mov(G3_scratch, Lscratch);
1418
1419 // Back in normal (native) interpreter frame. State is thread_in_native_trans
1420 // switch to thread_in_Java.
1421
1422 __ set(_thread_in_Java, G3_scratch);
1423 __ st(G3_scratch, thread_state);
1424
1425 if (CheckJNICalls) {
1426 // clear_pending_jni_exception_check
1427 __ st_ptr(G0, G2_thread, JavaThread::pending_jni_exception_check_fn_offset());
1428 }
1429
1430 // reset handle block
1431 __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch);
1432 __ st(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
1433
1434 // If we have an oop result store it where it will be safe for any further gc
1435 // until we return now that we've released the handle it might be protected by
1436
1437 { Label no_oop;
1438
1439 __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
1440 __ cmp_and_brx_short(G3_scratch, Lscratch, Assembler::notEqual, Assembler::pt, no_oop);
1441 __ resolve_jobject(O0, G3_scratch);
1442 // Store it where gc will look for it and result handler expects it.
1443 __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS);
1444
1445 __ bind(no_oop);
1446 }
1447
1448
1449 // handle exceptions (exception handling will handle unlocking!)
1450 { Label L;
1451 Address exception_addr(G2_thread, Thread::pending_exception_offset());
1452 __ ld_ptr(exception_addr, Gtemp);
1453 __ br_null_short(Gtemp, Assembler::pt, L);
1454 // Note: This could be handled more efficiently since we know that the native
1455 // method doesn't have an exception handler. We could directly return
1456 // to the exception handler for the caller.
1457 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
1458 __ should_not_reach_here();
1459 __ bind(L);
1460 }
1461
1462 // JVMTI support (preserves thread register)
1463 __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
1464
1465 if (synchronized) {
1466 // save and restore any potential method result value around the unlocking operation
1467 save_native_result();
1468
1469 __ add( __ top_most_monitor(), O1);
1470 __ unlock_object(O1);
1471
1472 restore_native_result();
1473 }
1474
1475 // dispose of return address and remove activation
1476 #ifdef ASSERT
1477 { Label ok;
1478 __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok);
1479 __ stop("bad I5_savedSP value");
1480 __ should_not_reach_here();
1481 __ bind(ok);
1482 }
1483 #endif
1484 __ jmp(Lscratch, 0);
1485 __ delayed()->nop();
1486
1487 if (inc_counter) {
1488 // handle invocation counter overflow
1489 __ bind(invocation_counter_overflow);
1490 generate_counter_overflow(Lcontinue);
1491 }
1492
1493 return entry;
1494 }
1495
1496
1497 // Generic method entry to (asm) interpreter
generate_normal_entry(bool synchronized)1498 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1499 address entry = __ pc();
1500
1501 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1502
1503 // the following temporary registers are used during frame creation
1504 const Register Gtmp1 = G3_scratch ;
1505 const Register Gtmp2 = G1_scratch;
1506
1507 // make sure registers are different!
1508 assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
1509
1510 const Address constMethod (G5_method, Method::const_offset());
1511 // Seems like G5_method is live at the point this is used. So we could make this look consistent
1512 // and use in the asserts.
1513 const Address access_flags (Lmethod, Method::access_flags_offset());
1514
1515 const Register Glocals_size = G3;
1516 assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
1517
1518 // make sure method is not native & not abstract
1519 // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
1520 #ifdef ASSERT
1521 __ ld(G5_method, Method::access_flags_offset(), Gtmp1);
1522 { Label L;
1523 __ btst(JVM_ACC_NATIVE, Gtmp1);
1524 __ br(Assembler::zero, false, Assembler::pt, L);
1525 __ delayed()->nop();
1526 __ stop("tried to execute native method as non-native");
1527 __ bind(L);
1528 }
1529 { Label L;
1530 __ btst(JVM_ACC_ABSTRACT, Gtmp1);
1531 __ br(Assembler::zero, false, Assembler::pt, L);
1532 __ delayed()->nop();
1533 __ stop("tried to execute abstract method as non-abstract");
1534 __ bind(L);
1535 }
1536 #endif // ASSERT
1537
1538 // generate the code to allocate the interpreter stack frame
1539
1540 generate_fixed_frame(false);
1541
1542 //
1543 // Code to initialize the extra (i.e. non-parm) locals
1544 //
1545 Register init_value = noreg; // will be G0 if we must clear locals
1546 // The way the code was setup before zerolocals was always true for vanilla java entries.
1547 // It could only be false for the specialized entries like accessor or empty which have
1548 // no extra locals so the testing was a waste of time and the extra locals were always
1549 // initialized. We removed this extra complication to already over complicated code.
1550
1551 init_value = G0;
1552 Label clear_loop;
1553
1554 const Register RconstMethod = O1;
1555 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
1556 const Address size_of_locals (RconstMethod, ConstMethod::size_of_locals_offset());
1557
1558 // NOTE: If you change the frame layout, this code will need to
1559 // be updated!
1560 __ ld_ptr( constMethod, RconstMethod );
1561 __ lduh( size_of_locals, O2 );
1562 __ lduh( size_of_parameters, O1 );
1563 __ sll( O2, Interpreter::logStackElementSize, O2);
1564 __ sll( O1, Interpreter::logStackElementSize, O1 );
1565 __ sub( Llocals, O2, O2 );
1566 __ sub( Llocals, O1, O1 );
1567
1568 __ bind( clear_loop );
1569 __ inc( O2, wordSize );
1570
1571 __ cmp( O2, O1 );
1572 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
1573 __ delayed()->st_ptr( init_value, O2, 0 );
1574
1575 const Address do_not_unlock_if_synchronized(G2_thread,
1576 JavaThread::do_not_unlock_if_synchronized_offset());
1577 // Since at this point in the method invocation the exception handler
1578 // would try to exit the monitor of synchronized methods which hasn't
1579 // been entered yet, we set the thread local variable
1580 // _do_not_unlock_if_synchronized to true. If any exception was thrown by
1581 // runtime, exception handling i.e. unlock_if_synchronized_method will
1582 // check this thread local flag.
1583 __ movbool(true, G3_scratch);
1584 __ stbool(G3_scratch, do_not_unlock_if_synchronized);
1585
1586 __ profile_parameters_type(G1_scratch, G3_scratch, G4_scratch, Lscratch);
1587 // increment invocation counter and check for overflow
1588 //
1589 // Note: checking for negative value instead of overflow
1590 // so we have a 'sticky' overflow test (may be of
1591 // importance as soon as we have true MT/MP)
1592 Label invocation_counter_overflow;
1593 Label profile_method;
1594 Label profile_method_continue;
1595 Label Lcontinue;
1596 if (inc_counter) {
1597 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
1598 if (ProfileInterpreter) {
1599 __ bind(profile_method_continue);
1600 }
1601 }
1602 __ bind(Lcontinue);
1603
1604 bang_stack_shadow_pages(false);
1605
1606 // reset the _do_not_unlock_if_synchronized flag
1607 __ stbool(G0, do_not_unlock_if_synchronized);
1608
1609 // check for synchronized methods
1610 // Must happen AFTER invocation_counter check and stack overflow check,
1611 // so method is not locked if overflows.
1612
1613 if (synchronized) {
1614 lock_method();
1615 } else {
1616 #ifdef ASSERT
1617 { Label ok;
1618 __ ld(access_flags, O0);
1619 __ btst(JVM_ACC_SYNCHRONIZED, O0);
1620 __ br( Assembler::zero, false, Assembler::pt, ok);
1621 __ delayed()->nop();
1622 __ stop("method needs synchronization");
1623 __ bind(ok);
1624 }
1625 #endif // ASSERT
1626 }
1627
1628 // start execution
1629
1630 __ verify_thread();
1631
1632 // jvmti support
1633 __ notify_method_entry();
1634
1635 // start executing instructions
1636 __ dispatch_next(vtos);
1637
1638
1639 if (inc_counter) {
1640 if (ProfileInterpreter) {
1641 // We have decided to profile this method in the interpreter
1642 __ bind(profile_method);
1643
1644 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1645 __ set_method_data_pointer_for_bcp();
1646 __ ba_short(profile_method_continue);
1647 }
1648
1649 // handle invocation counter overflow
1650 __ bind(invocation_counter_overflow);
1651 generate_counter_overflow(Lcontinue);
1652 }
1653
1654 return entry;
1655 }
1656
1657 //----------------------------------------------------------------------------------------------------
1658 // Exceptions
generate_throw_exception()1659 void TemplateInterpreterGenerator::generate_throw_exception() {
1660
1661 // Entry point in previous activation (i.e., if the caller was interpreted)
1662 Interpreter::_rethrow_exception_entry = __ pc();
1663 // O0: exception
1664
1665 // entry point for exceptions thrown within interpreter code
1666 Interpreter::_throw_exception_entry = __ pc();
1667 __ verify_thread();
1668 // expression stack is undefined here
1669 // O0: exception, i.e. Oexception
1670 // Lbcp: exception bcp
1671 __ verify_oop(Oexception);
1672
1673
1674 // expression stack must be empty before entering the VM in case of an exception
1675 __ empty_expression_stack();
1676 // find exception handler address and preserve exception oop
1677 // call C routine to find handler and jump to it
1678 __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception);
1679 __ push_ptr(O1); // push exception for exception handler bytecodes
1680
1681 __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!)
1682 __ delayed()->nop();
1683
1684
1685 // if the exception is not handled in the current frame
1686 // the frame is removed and the exception is rethrown
1687 // (i.e. exception continuation is _rethrow_exception)
1688 //
1689 // Note: At this point the bci is still the bxi for the instruction which caused
1690 // the exception and the expression stack is empty. Thus, for any VM calls
1691 // at this point, GC will find a legal oop map (with empty expression stack).
1692
1693 // in current activation
1694 // tos: exception
1695 // Lbcp: exception bcp
1696
1697 //
1698 // JVMTI PopFrame support
1699 //
1700
1701 Interpreter::_remove_activation_preserving_args_entry = __ pc();
1702 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
1703 // Set the popframe_processing bit in popframe_condition indicating that we are
1704 // currently handling popframe, so that call_VMs that may happen later do not trigger new
1705 // popframe handling cycles.
1706
1707 __ ld(popframe_condition_addr, G3_scratch);
1708 __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch);
1709 __ stw(G3_scratch, popframe_condition_addr);
1710
1711 // Empty the expression stack, as in normal exception handling
1712 __ empty_expression_stack();
1713 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
1714
1715 {
1716 // Check to see whether we are returning to a deoptimized frame.
1717 // (The PopFrame call ensures that the caller of the popped frame is
1718 // either interpreted or compiled and deoptimizes it if compiled.)
1719 // In this case, we can't call dispatch_next() after the frame is
1720 // popped, but instead must save the incoming arguments and restore
1721 // them after deoptimization has occurred.
1722 //
1723 // Note that we don't compare the return PC against the
1724 // deoptimization blob's unpack entry because of the presence of
1725 // adapter frames in C2.
1726 Label caller_not_deoptimized;
1727 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7);
1728 __ br_notnull_short(O0, Assembler::pt, caller_not_deoptimized);
1729
1730 const Register Gtmp1 = G3_scratch;
1731 const Register Gtmp2 = G1_scratch;
1732 const Register RconstMethod = Gtmp1;
1733 const Address constMethod(Lmethod, Method::const_offset());
1734 const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
1735
1736 // Compute size of arguments for saving when returning to deoptimized caller
1737 __ ld_ptr(constMethod, RconstMethod);
1738 __ lduh(size_of_parameters, Gtmp1);
1739 __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
1740 __ sub(Llocals, Gtmp1, Gtmp2);
1741 __ add(Gtmp2, wordSize, Gtmp2);
1742 // Save these arguments
1743 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2);
1744 // Inform deoptimization that it is responsible for restoring these arguments
1745 __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1);
1746 Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
1747 __ st(Gtmp1, popframe_condition_addr);
1748
1749 // Return from the current method
1750 // The caller's SP was adjusted upon method entry to accomodate
1751 // the callee's non-argument locals. Undo that adjustment.
1752 __ ret();
1753 __ delayed()->restore(I5_savedSP, G0, SP);
1754
1755 __ bind(caller_not_deoptimized);
1756 }
1757
1758 // Clear the popframe condition flag
1759 __ stw(G0 /* popframe_inactive */, popframe_condition_addr);
1760
1761 // Get out of the current method (how this is done depends on the particular compiler calling
1762 // convention that the interpreter currently follows)
1763 // The caller's SP was adjusted upon method entry to accomodate
1764 // the callee's non-argument locals. Undo that adjustment.
1765 __ restore(I5_savedSP, G0, SP);
1766 // The method data pointer was incremented already during
1767 // call profiling. We have to restore the mdp for the current bcp.
1768 if (ProfileInterpreter) {
1769 __ set_method_data_pointer_for_bcp();
1770 }
1771
1772 #if INCLUDE_JVMTI
1773 { Label L_done;
1774
1775 __ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode
1776 __ cmp_and_br_short(G1_scratch, Bytecodes::_invokestatic, Assembler::notEqual, Assembler::pn, L_done);
1777
1778 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1779 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
1780
1781 __ call_VM(G1_scratch, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), I0, Lmethod, Lbcp);
1782
1783 __ br_null(G1_scratch, false, Assembler::pn, L_done);
1784 __ delayed()->nop();
1785
1786 __ st_ptr(G1_scratch, Lesp, wordSize);
1787 __ bind(L_done);
1788 }
1789 #endif // INCLUDE_JVMTI
1790
1791 // Resume bytecode interpretation at the current bcp
1792 __ dispatch_next(vtos);
1793 // end of JVMTI PopFrame support
1794
1795 Interpreter::_remove_activation_entry = __ pc();
1796
1797 // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here)
1798 __ pop_ptr(Oexception); // get exception
1799
1800 // Intel has the following comment:
1801 //// remove the activation (without doing throws on illegalMonitorExceptions)
1802 // They remove the activation without checking for bad monitor state.
1803 // %%% We should make sure this is the right semantics before implementing.
1804
1805 __ set_vm_result(Oexception);
1806 __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false);
1807
1808 __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI);
1809
1810 __ get_vm_result(Oexception);
1811 __ verify_oop(Oexception);
1812
1813 const int return_reg_adjustment = frame::pc_return_offset;
1814 Address issuing_pc_addr(I7, return_reg_adjustment);
1815
1816 // We are done with this activation frame; find out where to go next.
1817 // The continuation point will be an exception handler, which expects
1818 // the following registers set up:
1819 //
1820 // Oexception: exception
1821 // Oissuing_pc: the local call that threw exception
1822 // Other On: garbage
1823 // In/Ln: the contents of the caller's register window
1824 //
1825 // We do the required restore at the last possible moment, because we
1826 // need to preserve some state across a runtime call.
1827 // (Remember that the caller activation is unknown--it might not be
1828 // interpreted, so things like Lscratch are useless in the caller.)
1829
1830 // Although the Intel version uses call_C, we can use the more
1831 // compact call_VM. (The only real difference on SPARC is a
1832 // harmlessly ignored [re]set_last_Java_frame, compared with
1833 // the Intel code which lacks this.)
1834 __ mov(Oexception, Oexception ->after_save()); // get exception in I0 so it will be on O0 after restore
1835 __ add(issuing_pc_addr, Oissuing_pc->after_save()); // likewise set I1 to a value local to the caller
1836 __ super_call_VM_leaf(L7_thread_cache,
1837 CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
1838 G2_thread, Oissuing_pc->after_save());
1839
1840 // The caller's SP was adjusted upon method entry to accomodate
1841 // the callee's non-argument locals. Undo that adjustment.
1842 __ JMP(O0, 0); // return exception handler in caller
1843 __ delayed()->restore(I5_savedSP, G0, SP);
1844
1845 // (same old exception object is already in Oexception; see above)
1846 // Note that an "issuing PC" is actually the next PC after the call
1847 }
1848
1849
1850 //
1851 // JVMTI ForceEarlyReturn support
1852 //
1853
generate_earlyret_entry_for(TosState state)1854 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1855 address entry = __ pc();
1856
1857 __ empty_expression_stack();
1858 __ load_earlyret_value(state);
1859
1860 __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch);
1861 Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset());
1862
1863 // Clear the earlyret state
1864 __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr);
1865
1866 __ remove_activation(state,
1867 /* throw_monitor_exception */ false,
1868 /* install_monitor_exception */ false);
1869
1870 // The caller's SP was adjusted upon method entry to accomodate
1871 // the callee's non-argument locals. Undo that adjustment.
1872 __ ret(); // return to caller
1873 __ delayed()->restore(I5_savedSP, G0, SP);
1874
1875 return entry;
1876 } // end of JVMTI ForceEarlyReturn support
1877
1878
1879 //------------------------------------------------------------------------------------------------------------------------
1880 // Helper for vtos entry point generation
1881
set_vtos_entry_points(Template * t,address & bep,address & cep,address & sep,address & aep,address & iep,address & lep,address & fep,address & dep,address & vep)1882 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
1883 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1884 Label L;
1885 aep = __ pc(); __ push_ptr(); __ ba_short(L);
1886 fep = __ pc(); __ push_f(); __ ba_short(L);
1887 dep = __ pc(); __ push_d(); __ ba_short(L);
1888 lep = __ pc(); __ push_l(); __ ba_short(L);
1889 iep = __ pc(); __ push_i();
1890 bep = cep = sep = iep; // there aren't any
1891 vep = __ pc(); __ bind(L); // fall through
1892 generate_and_dispatch(t);
1893 }
1894
1895 // --------------------------------------------------------------------------------
1896
1897 // Non-product code
1898 #ifndef PRODUCT
generate_trace_code(TosState state)1899 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1900 address entry = __ pc();
1901
1902 __ push(state);
1903 __ mov(O7, Lscratch); // protect return address within interpreter
1904
1905 // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer
1906 __ mov( Otos_l2, G3_scratch );
1907 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), G0, Otos_l1, G3_scratch);
1908 __ mov(Lscratch, O7); // restore return address
1909 __ pop(state);
1910 __ retl();
1911 __ delayed()->nop();
1912
1913 return entry;
1914 }
1915
1916
1917 // helpers for generate_and_dispatch
1918
count_bytecode()1919 void TemplateInterpreterGenerator::count_bytecode() {
1920 __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch);
1921 }
1922
1923
histogram_bytecode(Template * t)1924 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1925 __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch);
1926 }
1927
1928
histogram_bytecode_pair(Template * t)1929 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1930 AddressLiteral index (&BytecodePairHistogram::_index);
1931 AddressLiteral counters((address) &BytecodePairHistogram::_counters);
1932
1933 // get index, shift out old bytecode, bring in new bytecode, and store it
1934 // _index = (_index >> log2_number_of_codes) |
1935 // (bytecode << log2_number_of_codes);
1936
1937 __ load_contents(index, G4_scratch);
1938 __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch );
1939 __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes, G3_scratch );
1940 __ or3( G3_scratch, G4_scratch, G4_scratch );
1941 __ store_contents(G4_scratch, index, G3_scratch);
1942
1943 // bump bucket contents
1944 // _counters[_index] ++;
1945
1946 __ set(counters, G3_scratch); // loads into G3_scratch
1947 __ sll( G4_scratch, LogBytesPerWord, G4_scratch ); // Index is word address
1948 __ add (G3_scratch, G4_scratch, G3_scratch); // Add in index
1949 __ ld (G3_scratch, 0, G4_scratch);
1950 __ inc (G4_scratch);
1951 __ st (G4_scratch, 0, G3_scratch);
1952 }
1953
1954
trace_bytecode(Template * t)1955 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1956 // Call a little run-time stub to avoid blow-up for each bytecode.
1957 // The run-time runtime saves the right registers, depending on
1958 // the tosca in-state for the given template.
1959 address entry = Interpreter::trace_code(t->tos_in());
1960 guarantee(entry != NULL, "entry must have been generated");
1961 __ call(entry, relocInfo::none);
1962 __ delayed()->nop();
1963 }
1964
1965
stop_interpreter_at()1966 void TemplateInterpreterGenerator::stop_interpreter_at() {
1967 AddressLiteral counter(&BytecodeCounter::_counter_value);
1968 __ load_contents(counter, G3_scratch);
1969 AddressLiteral stop_at(&StopInterpreterAt);
1970 __ load_ptr_contents(stop_at, G4_scratch);
1971 __ cmp(G3_scratch, G4_scratch);
1972 __ breakpoint_trap(Assembler::equal, Assembler::icc);
1973 }
1974 #endif // not PRODUCT
1975