1 /*
2 * Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "interpreter/bytecodeHistogram.hpp"
28 #include "interpreter/cppInterpreter.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "interpreter/interpreterGenerator.hpp"
31 #include "interpreter/interpreterRuntime.hpp"
32 #include "oops/arrayOop.hpp"
33 #include "oops/methodData.hpp"
34 #include "oops/method.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "prims/jvmtiExport.hpp"
37 #include "prims/jvmtiThreadState.hpp"
38 #include "runtime/arguments.hpp"
39 #include "runtime/deoptimization.hpp"
40 #include "runtime/frame.inline.hpp"
41 #include "runtime/interfaceSupport.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/stubRoutines.hpp"
44 #include "runtime/synchronizer.hpp"
45 #include "runtime/timer.hpp"
46 #include "runtime/vframeArray.hpp"
47 #include "utilities/debug.hpp"
48 #include "utilities/macros.hpp"
49 #ifdef SHARK
50 #include "shark/shark_globals.hpp"
51 #endif
52
53 #ifdef CC_INTERP
54
55 // Routine exists to make tracebacks look decent in debugger
56 // while we are recursed in the frame manager/c++ interpreter.
57 // We could use an address in the frame manager but having
58 // frames look natural in the debugger is a plus.
RecursiveInterpreterActivation(interpreterState istate)59 extern "C" void RecursiveInterpreterActivation(interpreterState istate )
60 {
61 //
62 ShouldNotReachHere();
63 }
64
65
66 #define __ _masm->
67 #define STATE(field_name) (Address(state, byte_offset_of(BytecodeInterpreter, field_name)))
68
69 Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized
70 // c++ interpreter entry point this holds that entry point label.
71
72 // default registers for state and sender_sp
73 // state and sender_sp are the same on 32bit because we have no choice.
74 // state could be rsi on 64bit but it is an arg reg and not callee save
75 // so r13 is better choice.
76
77 const Register state = NOT_LP64(rsi) LP64_ONLY(r13);
78 const Register sender_sp_on_entry = NOT_LP64(rsi) LP64_ONLY(r13);
79
80 // NEEDED for JVMTI?
81 // address AbstractInterpreter::_remove_activation_preserving_args_entry;
82
83 static address unctrap_frame_manager_entry = NULL;
84
85 static address deopt_frame_manager_return_atos = NULL;
86 static address deopt_frame_manager_return_btos = NULL;
87 static address deopt_frame_manager_return_itos = NULL;
88 static address deopt_frame_manager_return_ltos = NULL;
89 static address deopt_frame_manager_return_ftos = NULL;
90 static address deopt_frame_manager_return_dtos = NULL;
91 static address deopt_frame_manager_return_vtos = NULL;
92
BasicType_as_index(BasicType type)93 int AbstractInterpreter::BasicType_as_index(BasicType type) {
94 int i = 0;
95 switch (type) {
96 case T_BOOLEAN: i = 0; break;
97 case T_CHAR : i = 1; break;
98 case T_BYTE : i = 2; break;
99 case T_SHORT : i = 3; break;
100 case T_INT : i = 4; break;
101 case T_VOID : i = 5; break;
102 case T_FLOAT : i = 8; break;
103 case T_LONG : i = 9; break;
104 case T_DOUBLE : i = 6; break;
105 case T_OBJECT : // fall through
106 case T_ARRAY : i = 7; break;
107 default : ShouldNotReachHere();
108 }
109 assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
110 return i;
111 }
112
113 // Is this pc anywhere within code owned by the interpreter?
114 // This only works for pc that might possibly be exposed to frame
115 // walkers. It clearly misses all of the actual c++ interpreter
116 // implementation
contains(address pc)117 bool CppInterpreter::contains(address pc) {
118 return (_code->contains(pc) ||
119 pc == CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation));
120 }
121
122
generate_result_handler_for(BasicType type)123 address CppInterpreterGenerator::generate_result_handler_for(BasicType type) {
124 address entry = __ pc();
125 switch (type) {
126 case T_BOOLEAN: __ c2bool(rax); break;
127 case T_CHAR : __ andl(rax, 0xFFFF); break;
128 case T_BYTE : __ sign_extend_byte (rax); break;
129 case T_SHORT : __ sign_extend_short(rax); break;
130 case T_VOID : // fall thru
131 case T_LONG : // fall thru
132 case T_INT : /* nothing to do */ break;
133
134 case T_DOUBLE :
135 case T_FLOAT :
136 {
137 const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
138 __ pop(t); // remove return address first
139 // Must return a result for interpreter or compiler. In SSE
140 // mode, results are returned in xmm0 and the FPU stack must
141 // be empty.
142 if (type == T_FLOAT && UseSSE >= 1) {
143 #ifndef _LP64
144 // Load ST0
145 __ fld_d(Address(rsp, 0));
146 // Store as float and empty fpu stack
147 __ fstp_s(Address(rsp, 0));
148 #endif // !_LP64
149 // and reload
150 __ movflt(xmm0, Address(rsp, 0));
151 } else if (type == T_DOUBLE && UseSSE >= 2 ) {
152 __ movdbl(xmm0, Address(rsp, 0));
153 } else {
154 // restore ST0
155 __ fld_d(Address(rsp, 0));
156 }
157 // and pop the temp
158 __ addptr(rsp, 2 * wordSize);
159 __ push(t); // restore return address
160 }
161 break;
162 case T_OBJECT :
163 // retrieve result from frame
164 __ movptr(rax, STATE(_oop_temp));
165 // and verify it
166 __ verify_oop(rax);
167 break;
168 default : ShouldNotReachHere();
169 }
170 __ ret(0); // return from result handler
171 return entry;
172 }
173
174 // tosca based result to c++ interpreter stack based result.
175 // Result goes to top of native stack.
176
177 #undef EXTEND // SHOULD NOT BE NEEDED
generate_tosca_to_stack_converter(BasicType type)178 address CppInterpreterGenerator::generate_tosca_to_stack_converter(BasicType type) {
179 // A result is in the tosca (abi result) from either a native method call or compiled
180 // code. Place this result on the java expression stack so C++ interpreter can use it.
181 address entry = __ pc();
182
183 const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
184 __ pop(t); // remove return address first
185 switch (type) {
186 case T_VOID:
187 break;
188 case T_BOOLEAN:
189 #ifdef EXTEND
190 __ c2bool(rax);
191 #endif
192 __ push(rax);
193 break;
194 case T_CHAR :
195 #ifdef EXTEND
196 __ andl(rax, 0xFFFF);
197 #endif
198 __ push(rax);
199 break;
200 case T_BYTE :
201 #ifdef EXTEND
202 __ sign_extend_byte (rax);
203 #endif
204 __ push(rax);
205 break;
206 case T_SHORT :
207 #ifdef EXTEND
208 __ sign_extend_short(rax);
209 #endif
210 __ push(rax);
211 break;
212 case T_LONG :
213 __ push(rdx); // pushes useless junk on 64bit
214 __ push(rax);
215 break;
216 case T_INT :
217 __ push(rax);
218 break;
219 case T_FLOAT :
220 // Result is in ST(0)/xmm0
221 __ subptr(rsp, wordSize);
222 if ( UseSSE < 1) {
223 __ fstp_s(Address(rsp, 0));
224 } else {
225 __ movflt(Address(rsp, 0), xmm0);
226 }
227 break;
228 case T_DOUBLE :
229 __ subptr(rsp, 2*wordSize);
230 if ( UseSSE < 2 ) {
231 __ fstp_d(Address(rsp, 0));
232 } else {
233 __ movdbl(Address(rsp, 0), xmm0);
234 }
235 break;
236 case T_OBJECT :
237 __ verify_oop(rax); // verify it
238 __ push(rax);
239 break;
240 default : ShouldNotReachHere();
241 }
242 __ jmp(t); // return from result handler
243 return entry;
244 }
245
generate_stack_to_stack_converter(BasicType type)246 address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType type) {
247 // A result is in the java expression stack of the interpreted method that has just
248 // returned. Place this result on the java expression stack of the caller.
249 //
250 // The current interpreter activation in rsi/r13 is for the method just returning its
251 // result. So we know that the result of this method is on the top of the current
252 // execution stack (which is pre-pushed) and will be return to the top of the caller
253 // stack. The top of the callers stack is the bottom of the locals of the current
254 // activation.
255 // Because of the way activation are managed by the frame manager the value of rsp is
256 // below both the stack top of the current activation and naturally the stack top
257 // of the calling activation. This enable this routine to leave the return address
258 // to the frame manager on the stack and do a vanilla return.
259 //
260 // On entry: rsi/r13 - interpreter state of activation returning a (potential) result
261 // On Return: rsi/r13 - unchanged
262 // rax - new stack top for caller activation (i.e. activation in _prev_link)
263 //
264 // Can destroy rdx, rcx.
265 //
266
267 address entry = __ pc();
268 const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
269 switch (type) {
270 case T_VOID:
271 __ movptr(rax, STATE(_locals)); // pop parameters get new stack value
272 __ addptr(rax, wordSize); // account for prepush before we return
273 break;
274 case T_FLOAT :
275 case T_BOOLEAN:
276 case T_CHAR :
277 case T_BYTE :
278 case T_SHORT :
279 case T_INT :
280 // 1 word result
281 __ movptr(rdx, STATE(_stack));
282 __ movptr(rax, STATE(_locals)); // address for result
283 __ movl(rdx, Address(rdx, wordSize)); // get result
284 __ movptr(Address(rax, 0), rdx); // and store it
285 break;
286 case T_LONG :
287 case T_DOUBLE :
288 // return top two words on current expression stack to caller's expression stack
289 // The caller's expression stack is adjacent to the current frame manager's intepretState
290 // except we allocated one extra word for this intepretState so we won't overwrite it
291 // when we return a two word result.
292
293 __ movptr(rax, STATE(_locals)); // address for result
294 __ movptr(rcx, STATE(_stack));
295 __ subptr(rax, wordSize); // need addition word besides locals[0]
296 __ movptr(rdx, Address(rcx, 2*wordSize)); // get result word (junk in 64bit)
297 __ movptr(Address(rax, wordSize), rdx); // and store it
298 __ movptr(rdx, Address(rcx, wordSize)); // get result word
299 __ movptr(Address(rax, 0), rdx); // and store it
300 break;
301 case T_OBJECT :
302 __ movptr(rdx, STATE(_stack));
303 __ movptr(rax, STATE(_locals)); // address for result
304 __ movptr(rdx, Address(rdx, wordSize)); // get result
305 __ verify_oop(rdx); // verify it
306 __ movptr(Address(rax, 0), rdx); // and store it
307 break;
308 default : ShouldNotReachHere();
309 }
310 __ ret(0);
311 return entry;
312 }
313
generate_stack_to_native_abi_converter(BasicType type)314 address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicType type) {
315 // A result is in the java expression stack of the interpreted method that has just
316 // returned. Place this result in the native abi that the caller expects.
317 //
318 // Similar to generate_stack_to_stack_converter above. Called at a similar time from the
319 // frame manager execept in this situation the caller is native code (c1/c2/call_stub)
320 // and so rather than return result onto caller's java expression stack we return the
321 // result in the expected location based on the native abi.
322 // On entry: rsi/r13 - interpreter state of activation returning a (potential) result
323 // On Return: rsi/r13 - unchanged
324 // Other registers changed [rax/rdx/ST(0) as needed for the result returned]
325
326 address entry = __ pc();
327 switch (type) {
328 case T_VOID:
329 break;
330 case T_BOOLEAN:
331 case T_CHAR :
332 case T_BYTE :
333 case T_SHORT :
334 case T_INT :
335 __ movptr(rdx, STATE(_stack)); // get top of stack
336 __ movl(rax, Address(rdx, wordSize)); // get result word 1
337 break;
338 case T_LONG :
339 __ movptr(rdx, STATE(_stack)); // get top of stack
340 __ movptr(rax, Address(rdx, wordSize)); // get result low word
341 NOT_LP64(__ movl(rdx, Address(rdx, 2*wordSize));) // get result high word
342 break;
343 case T_FLOAT :
344 __ movptr(rdx, STATE(_stack)); // get top of stack
345 if ( UseSSE >= 1) {
346 __ movflt(xmm0, Address(rdx, wordSize));
347 } else {
348 __ fld_s(Address(rdx, wordSize)); // pushd float result
349 }
350 break;
351 case T_DOUBLE :
352 __ movptr(rdx, STATE(_stack)); // get top of stack
353 if ( UseSSE > 1) {
354 __ movdbl(xmm0, Address(rdx, wordSize));
355 } else {
356 __ fld_d(Address(rdx, wordSize)); // push double result
357 }
358 break;
359 case T_OBJECT :
360 __ movptr(rdx, STATE(_stack)); // get top of stack
361 __ movptr(rax, Address(rdx, wordSize)); // get result word 1
362 __ verify_oop(rax); // verify it
363 break;
364 default : ShouldNotReachHere();
365 }
366 __ ret(0);
367 return entry;
368 }
369
return_entry(TosState state,int length,Bytecodes::Code code)370 address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
371 // make it look good in the debugger
372 return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation);
373 }
374
deopt_entry(TosState state,int length)375 address CppInterpreter::deopt_entry(TosState state, int length) {
376 address ret = NULL;
377 if (length != 0) {
378 switch (state) {
379 case atos: ret = deopt_frame_manager_return_atos; break;
380 case btos: ret = deopt_frame_manager_return_btos; break;
381 case ctos:
382 case stos:
383 case itos: ret = deopt_frame_manager_return_itos; break;
384 case ltos: ret = deopt_frame_manager_return_ltos; break;
385 case ftos: ret = deopt_frame_manager_return_ftos; break;
386 case dtos: ret = deopt_frame_manager_return_dtos; break;
387 case vtos: ret = deopt_frame_manager_return_vtos; break;
388 }
389 } else {
390 ret = unctrap_frame_manager_entry; // re-execute the bytecode ( e.g. uncommon trap)
391 }
392 assert(ret != NULL, "Not initialized");
393 return ret;
394 }
395
396 // C++ Interpreter
generate_compute_interpreter_state(const Register state,const Register locals,const Register sender_sp,bool native)397 void CppInterpreterGenerator::generate_compute_interpreter_state(const Register state,
398 const Register locals,
399 const Register sender_sp,
400 bool native) {
401
402 // On entry the "locals" argument points to locals[0] (or where it would be in case no locals in
403 // a static method). "state" contains any previous frame manager state which we must save a link
404 // to in the newly generated state object. On return "state" is a pointer to the newly allocated
405 // state object. We must allocate and initialize a new interpretState object and the method
406 // expression stack. Because the returned result (if any) of the method will be placed on the caller's
407 // expression stack and this will overlap with locals[0] (and locals[1] if double/long) we must
408 // be sure to leave space on the caller's stack so that this result will not overwrite values when
409 // locals[0] and locals[1] do not exist (and in fact are return address and saved rbp). So when
410 // we are non-native we in essence ensure that locals[0-1] exist. We play an extra trick in
411 // non-product builds and initialize this last local with the previous interpreterState as
412 // this makes things look real nice in the debugger.
413
414 // State on entry
415 // Assumes locals == &locals[0]
416 // Assumes state == any previous frame manager state (assuming call path from c++ interpreter)
417 // Assumes rax = return address
418 // rcx == senders_sp
419 // rbx == method
420 // Modifies rcx, rdx, rax
421 // Returns:
422 // state == address of new interpreterState
423 // rsp == bottom of method's expression stack.
424
425 const Address const_offset (rbx, Method::const_offset());
426
427
428 // On entry sp is the sender's sp. This includes the space for the arguments
429 // that the sender pushed. If the sender pushed no args (a static) and the
430 // caller returns a long then we need two words on the sender's stack which
431 // are not present (although when we return a restore full size stack the
432 // space will be present). If we didn't allocate two words here then when
433 // we "push" the result of the caller's stack we would overwrite the return
434 // address and the saved rbp. Not good. So simply allocate 2 words now
435 // just to be safe. This is the "static long no_params() method" issue.
436 // See Lo.java for a testcase.
437 // We don't need this for native calls because they return result in
438 // register and the stack is expanded in the caller before we store
439 // the results on the stack.
440
441 if (!native) {
442 #ifdef PRODUCT
443 __ subptr(rsp, 2*wordSize);
444 #else /* PRODUCT */
445 __ push((int32_t)NULL_WORD);
446 __ push(state); // make it look like a real argument
447 #endif /* PRODUCT */
448 }
449
450 // Now that we are assure of space for stack result, setup typical linkage
451
452 __ push(rax);
453 __ enter();
454
455 __ mov(rax, state); // save current state
456
457 __ lea(rsp, Address(rsp, -(int)sizeof(BytecodeInterpreter)));
458 __ mov(state, rsp);
459
460 // rsi/r13 == state/locals rax == prevstate
461
462 // initialize the "shadow" frame so that use since C++ interpreter not directly
463 // recursive. Simpler to recurse but we can't trim expression stack as we call
464 // new methods.
465 __ movptr(STATE(_locals), locals); // state->_locals = locals()
466 __ movptr(STATE(_self_link), state); // point to self
467 __ movptr(STATE(_prev_link), rax); // state->_link = state on entry (NULL or previous state)
468 __ movptr(STATE(_sender_sp), sender_sp); // state->_sender_sp = sender_sp
469 #ifdef _LP64
470 __ movptr(STATE(_thread), r15_thread); // state->_bcp = codes()
471 #else
472 __ get_thread(rax); // get vm's javathread*
473 __ movptr(STATE(_thread), rax); // state->_bcp = codes()
474 #endif // _LP64
475 __ movptr(rdx, Address(rbx, Method::const_offset())); // get constantMethodOop
476 __ lea(rdx, Address(rdx, ConstMethod::codes_offset())); // get code base
477 if (native) {
478 __ movptr(STATE(_bcp), (int32_t)NULL_WORD); // state->_bcp = NULL
479 } else {
480 __ movptr(STATE(_bcp), rdx); // state->_bcp = codes()
481 }
482 __ xorptr(rdx, rdx);
483 __ movptr(STATE(_oop_temp), rdx); // state->_oop_temp = NULL (only really needed for native)
484 __ movptr(STATE(_mdx), rdx); // state->_mdx = NULL
485 __ movptr(rdx, Address(rbx, Method::const_offset()));
486 __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
487 __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
488 __ movptr(STATE(_constants), rdx); // state->_constants = constants()
489
490 __ movptr(STATE(_method), rbx); // state->_method = method()
491 __ movl(STATE(_msg), (int32_t) BytecodeInterpreter::method_entry); // state->_msg = initial method entry
492 __ movptr(STATE(_result._to_call._callee), (int32_t) NULL_WORD); // state->_result._to_call._callee_callee = NULL
493
494
495 __ movptr(STATE(_monitor_base), rsp); // set monitor block bottom (grows down) this would point to entry [0]
496 // entries run from -1..x where &monitor[x] ==
497
498 {
499 // Must not attempt to lock method until we enter interpreter as gc won't be able to find the
500 // initial frame. However we allocate a free monitor so we don't have to shuffle the expression stack
501 // immediately.
502
503 // synchronize method
504 const Address access_flags (rbx, Method::access_flags_offset());
505 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
506 Label not_synced;
507
508 __ movl(rax, access_flags);
509 __ testl(rax, JVM_ACC_SYNCHRONIZED);
510 __ jcc(Assembler::zero, not_synced);
511
512 // Allocate initial monitor and pre initialize it
513 // get synchronization object
514
515 Label done;
516 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
517 __ movl(rax, access_flags);
518 __ testl(rax, JVM_ACC_STATIC);
519 __ movptr(rax, Address(locals, 0)); // get receiver (assume this is frequent case)
520 __ jcc(Assembler::zero, done);
521 __ movptr(rax, Address(rbx, Method::const_offset()));
522 __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
523 __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
524 __ movptr(rax, Address(rax, mirror_offset));
525 __ bind(done);
526 // add space for monitor & lock
527 __ subptr(rsp, entry_size); // add space for a monitor entry
528 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
529 __ bind(not_synced);
530 }
531
532 __ movptr(STATE(_stack_base), rsp); // set expression stack base ( == &monitors[-count])
533 if (native) {
534 __ movptr(STATE(_stack), rsp); // set current expression stack tos
535 __ movptr(STATE(_stack_limit), rsp);
536 } else {
537 __ subptr(rsp, wordSize); // pre-push stack
538 __ movptr(STATE(_stack), rsp); // set current expression stack tos
539
540 // compute full expression stack limit
541
542 __ movptr(rdx, Address(rbx, Method::const_offset()));
543 __ load_unsigned_short(rdx, Address(rdx, ConstMethod::max_stack_offset())); // get size of expression stack in words
544 __ negptr(rdx); // so we can subtract in next step
545 // Allocate expression stack
546 __ lea(rsp, Address(rsp, rdx, Address::times_ptr, -Method::extra_stack_words()));
547 __ movptr(STATE(_stack_limit), rsp);
548 }
549
550 #ifdef _LP64
551 // Make sure stack is properly aligned and sized for the abi
552 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
553 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
554 #endif // _LP64
555
556
557
558 }
559
560 // Helpers for commoning out cases in the various type of method entries.
561 //
562
563 // increment invocation count & check for overflow
564 //
565 // Note: checking for negative value instead of overflow
566 // so we have a 'sticky' overflow test
567 //
568 // rbx,: method
569 // rcx: invocation counter
570 //
generate_counter_incr(Label * overflow,Label * profile_method,Label * profile_method_continue)571 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
572 Label done;
573 const Address invocation_counter(rax,
574 MethodCounters::invocation_counter_offset() +
575 InvocationCounter::counter_offset());
576 const Address backedge_counter (rax,
577 MethodCounter::backedge_counter_offset() +
578 InvocationCounter::counter_offset());
579
580 __ get_method_counters(rbx, rax, done);
581
582 if (ProfileInterpreter) {
583 __ incrementl(Address(rax,
584 MethodCounters::interpreter_invocation_counter_offset()));
585 }
586 // Update standard invocation counters
587 __ movl(rcx, invocation_counter);
588 __ increment(rcx, InvocationCounter::count_increment);
589 __ movl(invocation_counter, rcx); // save invocation count
590
591 __ movl(rax, backedge_counter); // load backedge counter
592 __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
593
594 __ addl(rcx, rax); // add both counters
595
596 // profile_method is non-null only for interpreted method so
597 // profile_method != NULL == !native_call
598 // BytecodeInterpreter only calls for native so code is elided.
599
600 __ cmp32(rcx,
601 ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
602 __ jcc(Assembler::aboveEqual, *overflow);
603 __ bind(done);
604 }
605
generate_counter_overflow(Label * do_continue)606 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
607
608 // C++ interpreter on entry
609 // rsi/r13 - new interpreter state pointer
610 // rbp - interpreter frame pointer
611 // rbx - method
612
613 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
614 // rbx, - method
615 // rcx - rcvr (assuming there is one)
616 // top of stack return address of interpreter caller
617 // rsp - sender_sp
618
619 // C++ interpreter only
620 // rsi/r13 - previous interpreter state pointer
621
622 // InterpreterRuntime::frequency_counter_overflow takes one argument
623 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
624 // The call returns the address of the verified entry point for the method or NULL
625 // if the compilation did not complete (either went background or bailed out).
626 __ movptr(rax, (int32_t)false);
627 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
628
629 // for c++ interpreter can rsi really be munged?
630 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter))); // restore state
631 __ movptr(rbx, Address(state, byte_offset_of(BytecodeInterpreter, _method))); // restore method
632 __ movptr(rdi, Address(state, byte_offset_of(BytecodeInterpreter, _locals))); // get locals pointer
633
634 __ jmp(*do_continue, relocInfo::none);
635
636 }
637
generate_stack_overflow_check(void)638 void InterpreterGenerator::generate_stack_overflow_check(void) {
639 // see if we've got enough room on the stack for locals plus overhead.
640 // the expression stack grows down incrementally, so the normal guard
641 // page mechanism will work for that.
642 //
643 // Registers live on entry:
644 //
645 // Asm interpreter
646 // rdx: number of additional locals this frame needs (what we must check)
647 // rbx,: Method*
648
649 // C++ Interpreter
650 // rsi/r13: previous interpreter frame state object
651 // rdi: &locals[0]
652 // rcx: # of locals
653 // rdx: number of additional locals this frame needs (what we must check)
654 // rbx: Method*
655
656 // destroyed on exit
657 // rax,
658
659 // NOTE: since the additional locals are also always pushed (wasn't obvious in
660 // generate_method_entry) so the guard should work for them too.
661 //
662
663 // monitor entry size: see picture of stack set (generate_method_entry) and frame_i486.hpp
664 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
665
666 // total overhead size: entry_size + (saved rbp, thru expr stack bottom).
667 // be sure to change this if you add/subtract anything to/from the overhead area
668 const int overhead_size = (int)sizeof(BytecodeInterpreter);
669
670 const int page_size = os::vm_page_size();
671
672 Label after_frame_check;
673
674 // compute rsp as if this were going to be the last frame on
675 // the stack before the red zone
676
677 Label after_frame_check_pop;
678
679 // save rsi == caller's bytecode ptr (c++ previous interp. state)
680 // QQQ problem here?? rsi overload????
681 __ push(state);
682
683 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rsi);
684
685 NOT_LP64(__ get_thread(thread));
686
687 const Address stack_base(thread, Thread::stack_base_offset());
688 const Address stack_size(thread, Thread::stack_size_offset());
689
690 // locals + overhead, in bytes
691 // Always give one monitor to allow us to start interp if sync method.
692 // Any additional monitors need a check when moving the expression stack
693 const int one_monitor = frame::interpreter_frame_monitor_size() * wordSize;
694 __ movptr(rax, Address(rbx, Method::const_offset()));
695 __ load_unsigned_short(rax, Address(rax, ConstMethod::max_stack_offset())); // get size of expression stack in words
696 __ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), one_monitor+Method::extra_stack_words()));
697 __ lea(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size));
698
699 #ifdef ASSERT
700 Label stack_base_okay, stack_size_okay;
701 // verify that thread stack base is non-zero
702 __ cmpptr(stack_base, (int32_t)0);
703 __ jcc(Assembler::notEqual, stack_base_okay);
704 __ stop("stack base is zero");
705 __ bind(stack_base_okay);
706 // verify that thread stack size is non-zero
707 __ cmpptr(stack_size, (int32_t)0);
708 __ jcc(Assembler::notEqual, stack_size_okay);
709 __ stop("stack size is zero");
710 __ bind(stack_size_okay);
711 #endif
712
713 // Add stack base to locals and subtract stack size
714 __ addptr(rax, stack_base);
715 __ subptr(rax, stack_size);
716
717 // We should have a magic number here for the size of the c++ interpreter frame.
718 // We can't actually tell this ahead of time. The debug version size is around 3k
719 // product is 1k and fastdebug is 4k
720 const int slop = 6 * K;
721
722 // Use the maximum number of pages we might bang.
723 const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
724 (StackRedPages+StackYellowPages);
725 // Only need this if we are stack banging which is temporary while
726 // we're debugging.
727 __ addptr(rax, slop + 2*max_pages * page_size);
728
729 // check against the current stack bottom
730 __ cmpptr(rsp, rax);
731 __ jcc(Assembler::above, after_frame_check_pop);
732
733 __ pop(state); // get c++ prev state.
734
735 // throw exception return address becomes throwing pc
736 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
737
738 // all done with frame size check
739 __ bind(after_frame_check_pop);
740 __ pop(state);
741
742 __ bind(after_frame_check);
743 }
744
745 // Find preallocated monitor and lock method (C++ interpreter)
746 // rbx - Method*
747 //
lock_method(void)748 void InterpreterGenerator::lock_method(void) {
749 // assumes state == rsi/r13 == pointer to current interpreterState
750 // minimally destroys rax, rdx|c_rarg1, rdi
751 //
752 // synchronize method
753 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
754 const Address access_flags (rbx, Method::access_flags_offset());
755
756 const Register monitor = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
757
758 // find initial monitor i.e. monitors[-1]
759 __ movptr(monitor, STATE(_monitor_base)); // get monitor bottom limit
760 __ subptr(monitor, entry_size); // point to initial monitor
761
762 #ifdef ASSERT
763 { Label L;
764 __ movl(rax, access_flags);
765 __ testl(rax, JVM_ACC_SYNCHRONIZED);
766 __ jcc(Assembler::notZero, L);
767 __ stop("method doesn't need synchronization");
768 __ bind(L);
769 }
770 #endif // ASSERT
771 // get synchronization object
772 { Label done;
773 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
774 __ movl(rax, access_flags);
775 __ movptr(rdi, STATE(_locals)); // prepare to get receiver (assume common case)
776 __ testl(rax, JVM_ACC_STATIC);
777 __ movptr(rax, Address(rdi, 0)); // get receiver (assume this is frequent case)
778 __ jcc(Assembler::zero, done);
779 __ movptr(rax, Address(rbx, Method::const_offset()));
780 __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
781 __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
782 __ movptr(rax, Address(rax, mirror_offset));
783 __ bind(done);
784 }
785 #ifdef ASSERT
786 { Label L;
787 __ cmpptr(rax, Address(monitor, BasicObjectLock::obj_offset_in_bytes())); // correct object?
788 __ jcc(Assembler::equal, L);
789 __ stop("wrong synchronization lobject");
790 __ bind(L);
791 }
792 #endif // ASSERT
793 // can destroy rax, rdx|c_rarg1, rcx, and (via call_VM) rdi!
794 __ lock_object(monitor);
795 }
796
797 // Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
798
generate_accessor_entry(void)799 address InterpreterGenerator::generate_accessor_entry(void) {
800
801 // rbx: Method*
802
803 // rsi/r13: senderSP must preserved for slow path, set SP to it on fast path
804
805 Label xreturn_path;
806
807 // do fastpath for resolved accessor methods
808 if (UseFastAccessorMethods) {
809
810 address entry_point = __ pc();
811
812 Label slow_path;
813 // If we need a safepoint check, generate full interpreter entry.
814 ExternalAddress state(SafepointSynchronize::address_of_state());
815 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
816 SafepointSynchronize::_not_synchronized);
817
818 __ jcc(Assembler::notEqual, slow_path);
819 // ASM/C++ Interpreter
820 // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
821 // Note: We can only use this code if the getfield has been resolved
822 // and if we don't have a null-pointer exception => check for
823 // these conditions first and use slow path if necessary.
824 // rbx,: method
825 // rcx: receiver
826 __ movptr(rax, Address(rsp, wordSize));
827
828 // check if local 0 != NULL and read field
829 __ testptr(rax, rax);
830 __ jcc(Assembler::zero, slow_path);
831
832 // read first instruction word and extract bytecode @ 1 and index @ 2
833 __ movptr(rdx, Address(rbx, Method::const_offset()));
834 __ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
835 __ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
836 // Shift codes right to get the index on the right.
837 // The bytecode fetched looks like <index><0xb4><0x2a>
838 __ shrl(rdx, 2*BitsPerByte);
839 __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
840 __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
841
842 // rax,: local 0
843 // rbx,: method
844 // rcx: receiver - do not destroy since it is needed for slow path!
845 // rcx: scratch
846 // rdx: constant pool cache index
847 // rdi: constant pool cache
848 // rsi/r13: sender sp
849
850 // check if getfield has been resolved and read constant pool cache entry
851 // check the validity of the cache entry by testing whether _indices field
852 // contains Bytecode::_getfield in b1 byte.
853 assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
854 __ movl(rcx,
855 Address(rdi,
856 rdx,
857 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
858 __ shrl(rcx, 2*BitsPerByte);
859 __ andl(rcx, 0xFF);
860 __ cmpl(rcx, Bytecodes::_getfield);
861 __ jcc(Assembler::notEqual, slow_path);
862
863 // Note: constant pool entry is not valid before bytecode is resolved
864 __ movptr(rcx,
865 Address(rdi,
866 rdx,
867 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
868 __ movl(rdx,
869 Address(rdi,
870 rdx,
871 Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
872
873 Label notByte, notBool, notShort, notChar;
874 const Address field_address (rax, rcx, Address::times_1);
875
876 // Need to differentiate between igetfield, agetfield, bgetfield etc.
877 // because they are different sizes.
878 // Use the type from the constant pool cache
879 __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
880 // Make sure we don't need to mask rdx after the above shift
881 ConstantPoolCacheEntry::verify_tos_state_shift();
882 #ifdef _LP64
883 Label notObj;
884 __ cmpl(rdx, atos);
885 __ jcc(Assembler::notEqual, notObj);
886 // atos
887 __ movptr(rax, field_address);
888 __ jmp(xreturn_path);
889
890 __ bind(notObj);
891 #endif // _LP64
892 __ cmpl(rdx, ztos);
893 __ jcc(Assembler::notEqual, notBool);
894 __ load_signed_byte(rax, field_address);
895 __ jmp(xreturn_path);
896
897 __ cmpl(rdx, btos);
898 __ jcc(Assembler::notEqual, notByte);
899 __ load_signed_byte(rax, field_address);
900 __ jmp(xreturn_path);
901
902 __ bind(notByte);
903 __ cmpl(rdx, stos);
904 __ jcc(Assembler::notEqual, notShort);
905 __ load_signed_short(rax, field_address);
906 __ jmp(xreturn_path);
907
908 __ bind(notShort);
909 __ cmpl(rdx, ctos);
910 __ jcc(Assembler::notEqual, notChar);
911 __ load_unsigned_short(rax, field_address);
912 __ jmp(xreturn_path);
913
914 __ bind(notChar);
915 #ifdef ASSERT
916 Label okay;
917 #ifndef _LP64
918 __ cmpl(rdx, atos);
919 __ jcc(Assembler::equal, okay);
920 #endif // _LP64
921 __ cmpl(rdx, itos);
922 __ jcc(Assembler::equal, okay);
923 __ stop("what type is this?");
924 __ bind(okay);
925 #endif // ASSERT
926 // All the rest are a 32 bit wordsize
927 __ movl(rax, field_address);
928
929 __ bind(xreturn_path);
930
931 // _ireturn/_areturn
932 __ pop(rdi); // get return address
933 __ mov(rsp, sender_sp_on_entry); // set sp to sender sp
934 __ jmp(rdi);
935
936 // generate a vanilla interpreter entry as the slow path
937 __ bind(slow_path);
938 // We will enter c++ interpreter looking like it was
939 // called by the call_stub this will cause it to return
940 // a tosca result to the invoker which might have been
941 // the c++ interpreter itself.
942
943 __ jmp(fast_accessor_slow_entry_path);
944 return entry_point;
945
946 } else {
947 return NULL;
948 }
949
950 }
951
generate_Reference_get_entry(void)952 address InterpreterGenerator::generate_Reference_get_entry(void) {
953 #if INCLUDE_ALL_GCS
954 if (UseG1GC) {
955 // We need to generate have a routine that generates code to:
956 // * load the value in the referent field
957 // * passes that value to the pre-barrier.
958 //
959 // In the case of G1 this will record the value of the
960 // referent in an SATB buffer if marking is active.
961 // This will cause concurrent marking to mark the referent
962 // field as live.
963 Unimplemented();
964 }
965 #endif // INCLUDE_ALL_GCS
966
967 // If G1 is not enabled then attempt to go through the accessor entry point
968 // Reference.get is an accessor
969 return generate_accessor_entry();
970 }
971
972 //
973 // C++ Interpreter stub for calling a native method.
974 // This sets up a somewhat different looking stack for calling the native method
975 // than the typical interpreter frame setup but still has the pointer to
976 // an interpreter state.
977 //
978
generate_native_entry(bool synchronized)979 address InterpreterGenerator::generate_native_entry(bool synchronized) {
980 // determine code generation flags
981 bool inc_counter = UseCompiler || CountCompiledCalls;
982
983 // rbx: Method*
984 // rcx: receiver (unused)
985 // rsi/r13: previous interpreter state (if called from C++ interpreter) must preserve
986 // in any case. If called via c1/c2/call_stub rsi/r13 is junk (to use) but harmless
987 // to save/restore.
988 address entry_point = __ pc();
989
990 const Address constMethod (rbx, Method::const_offset());
991 const Address access_flags (rbx, Method::access_flags_offset());
992 const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
993
994 // rsi/r13 == state/locals rdi == prevstate
995 const Register locals = rdi;
996
997 // get parameter size (always needed)
998 __ movptr(rcx, constMethod);
999 __ load_unsigned_short(rcx, size_of_parameters);
1000
1001 // rbx: Method*
1002 // rcx: size of parameters
1003 __ pop(rax); // get return address
1004 // for natives the size of locals is zero
1005
1006 // compute beginning of parameters /locals
1007
1008 __ lea(locals, Address(rsp, rcx, Address::times_ptr, -wordSize));
1009
1010 // initialize fixed part of activation frame
1011
1012 // Assumes rax = return address
1013
1014 // allocate and initialize new interpreterState and method expression stack
1015 // IN(locals) -> locals
1016 // IN(state) -> previous frame manager state (NULL from stub/c1/c2)
1017 // destroys rax, rcx, rdx
1018 // OUT (state) -> new interpreterState
1019 // OUT(rsp) -> bottom of methods expression stack
1020
1021 // save sender_sp
1022 __ mov(rcx, sender_sp_on_entry);
1023 // start with NULL previous state
1024 __ movptr(state, (int32_t)NULL_WORD);
1025 generate_compute_interpreter_state(state, locals, rcx, true);
1026
1027 #ifdef ASSERT
1028 { Label L;
1029 __ movptr(rax, STATE(_stack_base));
1030 #ifdef _LP64
1031 // duplicate the alignment rsp got after setting stack_base
1032 __ subptr(rax, frame::arg_reg_save_area_bytes); // windows
1033 __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
1034 #endif // _LP64
1035 __ cmpptr(rax, rsp);
1036 __ jcc(Assembler::equal, L);
1037 __ stop("broken stack frame setup in interpreter");
1038 __ bind(L);
1039 }
1040 #endif
1041
1042 const Register unlock_thread = LP64_ONLY(r15_thread) NOT_LP64(rax);
1043 NOT_LP64(__ movptr(unlock_thread, STATE(_thread));) // get thread
1044 // Since at this point in the method invocation the exception handler
1045 // would try to exit the monitor of synchronized methods which hasn't
1046 // been entered yet, we set the thread local variable
1047 // _do_not_unlock_if_synchronized to true. The remove_activation will
1048 // check this flag.
1049
1050 const Address do_not_unlock_if_synchronized(unlock_thread,
1051 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1052 __ movbool(do_not_unlock_if_synchronized, true);
1053
1054 // make sure method is native & not abstract
1055 #ifdef ASSERT
1056 __ movl(rax, access_flags);
1057 {
1058 Label L;
1059 __ testl(rax, JVM_ACC_NATIVE);
1060 __ jcc(Assembler::notZero, L);
1061 __ stop("tried to execute non-native method as native");
1062 __ bind(L);
1063 }
1064 { Label L;
1065 __ testl(rax, JVM_ACC_ABSTRACT);
1066 __ jcc(Assembler::zero, L);
1067 __ stop("tried to execute abstract method in interpreter");
1068 __ bind(L);
1069 }
1070 #endif
1071
1072
1073 // increment invocation count & check for overflow
1074 Label invocation_counter_overflow;
1075 if (inc_counter) {
1076 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
1077 }
1078
1079 Label continue_after_compile;
1080
1081 __ bind(continue_after_compile);
1082
1083 bang_stack_shadow_pages(true);
1084
1085 // reset the _do_not_unlock_if_synchronized flag
1086 NOT_LP64(__ movl(rax, STATE(_thread));) // get thread
1087 __ movbool(do_not_unlock_if_synchronized, false);
1088
1089
1090 // check for synchronized native methods
1091 //
1092 // Note: This must happen *after* invocation counter check, since
1093 // when overflow happens, the method should not be locked.
1094 if (synchronized) {
1095 // potentially kills rax, rcx, rdx, rdi
1096 lock_method();
1097 } else {
1098 // no synchronization necessary
1099 #ifdef ASSERT
1100 { Label L;
1101 __ movl(rax, access_flags);
1102 __ testl(rax, JVM_ACC_SYNCHRONIZED);
1103 __ jcc(Assembler::zero, L);
1104 __ stop("method needs synchronization");
1105 __ bind(L);
1106 }
1107 #endif
1108 }
1109
1110 // start execution
1111
1112 // jvmti support
1113 __ notify_method_entry();
1114
1115 // work registers
1116 const Register method = rbx;
1117 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rdi);
1118 const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); // rcx|rscratch1
1119 const Address constMethod (method, Method::const_offset());
1120 const Address size_of_parameters(t, ConstMethod::size_of_parameters_offset());
1121
1122 // allocate space for parameters
1123 __ movptr(method, STATE(_method));
1124 __ verify_method_ptr(method);
1125 __ movptr(t, constMethod);
1126 __ load_unsigned_short(t, size_of_parameters);
1127 __ shll(t, 2);
1128 #ifdef _LP64
1129 __ subptr(rsp, t);
1130 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1131 __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
1132 #else
1133 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
1134 __ subptr(rsp, t);
1135 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
1136 #endif // _LP64
1137
1138 // get signature handler
1139 Label pending_exception_present;
1140
1141 { Label L;
1142 __ movptr(t, Address(method, Method::signature_handler_offset()));
1143 __ testptr(t, t);
1144 __ jcc(Assembler::notZero, L);
1145 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method, false);
1146 __ movptr(method, STATE(_method));
1147 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
1148 __ jcc(Assembler::notEqual, pending_exception_present);
1149 __ verify_method_ptr(method);
1150 __ movptr(t, Address(method, Method::signature_handler_offset()));
1151 __ bind(L);
1152 }
1153 #ifdef ASSERT
1154 {
1155 Label L;
1156 __ push(t);
1157 __ get_thread(t); // get vm's javathread*
1158 __ cmpptr(t, STATE(_thread));
1159 __ jcc(Assembler::equal, L);
1160 __ int3();
1161 __ bind(L);
1162 __ pop(t);
1163 }
1164 #endif //
1165
1166 const Register from_ptr = InterpreterRuntime::SignatureHandlerGenerator::from();
1167 // call signature handler
1168 assert(InterpreterRuntime::SignatureHandlerGenerator::to () == rsp, "adjust this code");
1169
1170 // The generated handlers do not touch RBX (the method oop).
1171 // However, large signatures cannot be cached and are generated
1172 // each time here. The slow-path generator will blow RBX
1173 // sometime, so we must reload it after the call.
1174 __ movptr(from_ptr, STATE(_locals)); // get the from pointer
1175 __ call(t);
1176 __ movptr(method, STATE(_method));
1177 __ verify_method_ptr(method);
1178
1179 // result handler is in rax
1180 // set result handler
1181 __ movptr(STATE(_result_handler), rax);
1182
1183
1184 // get native function entry point
1185 { Label L;
1186 __ movptr(rax, Address(method, Method::native_function_offset()));
1187 __ testptr(rax, rax);
1188 __ jcc(Assembler::notZero, L);
1189 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
1190 __ movptr(method, STATE(_method));
1191 __ verify_method_ptr(method);
1192 __ movptr(rax, Address(method, Method::native_function_offset()));
1193 __ bind(L);
1194 }
1195
1196 // pass mirror handle if static call
1197 { Label L;
1198 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
1199 __ movl(t, Address(method, Method::access_flags_offset()));
1200 __ testl(t, JVM_ACC_STATIC);
1201 __ jcc(Assembler::zero, L);
1202 // get mirror
1203 __ movptr(t, Address(method, Method:: const_offset()));
1204 __ movptr(t, Address(t, ConstMethod::constants_offset()));
1205 __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
1206 __ movptr(t, Address(t, mirror_offset));
1207 // copy mirror into activation object
1208 __ movptr(STATE(_oop_temp), t);
1209 // pass handle to mirror
1210 #ifdef _LP64
1211 __ lea(c_rarg1, STATE(_oop_temp));
1212 #else
1213 __ lea(t, STATE(_oop_temp));
1214 __ movptr(Address(rsp, wordSize), t);
1215 #endif // _LP64
1216 __ bind(L);
1217 }
1218 #ifdef ASSERT
1219 {
1220 Label L;
1221 __ push(t);
1222 __ get_thread(t); // get vm's javathread*
1223 __ cmpptr(t, STATE(_thread));
1224 __ jcc(Assembler::equal, L);
1225 __ int3();
1226 __ bind(L);
1227 __ pop(t);
1228 }
1229 #endif //
1230
1231 // pass JNIEnv
1232 #ifdef _LP64
1233 __ lea(c_rarg0, Address(thread, JavaThread::jni_environment_offset()));
1234 #else
1235 __ movptr(thread, STATE(_thread)); // get thread
1236 __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
1237
1238 __ movptr(Address(rsp, 0), t);
1239 #endif // _LP64
1240
1241 #ifdef ASSERT
1242 {
1243 Label L;
1244 __ push(t);
1245 __ get_thread(t); // get vm's javathread*
1246 __ cmpptr(t, STATE(_thread));
1247 __ jcc(Assembler::equal, L);
1248 __ int3();
1249 __ bind(L);
1250 __ pop(t);
1251 }
1252 #endif //
1253
1254 #ifdef ASSERT
1255 { Label L;
1256 __ movl(t, Address(thread, JavaThread::thread_state_offset()));
1257 __ cmpl(t, _thread_in_Java);
1258 __ jcc(Assembler::equal, L);
1259 __ stop("Wrong thread state in native stub");
1260 __ bind(L);
1261 }
1262 #endif
1263
1264 // Change state to native (we save the return address in the thread, since it might not
1265 // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
1266 // points into the right code segment. It does not have to be the correct return pc.
1267
1268 __ set_last_Java_frame(thread, noreg, rbp, __ pc());
1269
1270 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1271
1272 __ call(rax);
1273
1274 // result potentially in rdx:rax or ST0
1275 __ movptr(method, STATE(_method));
1276 NOT_LP64(__ movptr(thread, STATE(_thread));) // get thread
1277
1278 // The potential result is in ST(0) & rdx:rax
1279 // With C++ interpreter we leave any possible result in ST(0) until we are in result handler and then
1280 // we do the appropriate stuff for returning the result. rdx:rax must always be saved because just about
1281 // anything we do here will destroy it, st(0) is only saved if we re-enter the vm where it would
1282 // be destroyed.
1283 // It is safe to do these pushes because state is _thread_in_native and return address will be found
1284 // via _last_native_pc and not via _last_jave_sp
1285
1286 // Must save the value of ST(0)/xmm0 since it could be destroyed before we get to result handler
1287 { Label Lpush, Lskip;
1288 ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
1289 ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
1290 __ cmpptr(STATE(_result_handler), float_handler.addr());
1291 __ jcc(Assembler::equal, Lpush);
1292 __ cmpptr(STATE(_result_handler), double_handler.addr());
1293 __ jcc(Assembler::notEqual, Lskip);
1294 __ bind(Lpush);
1295 __ subptr(rsp, 2*wordSize);
1296 if ( UseSSE < 2 ) {
1297 __ fstp_d(Address(rsp, 0));
1298 } else {
1299 __ movdbl(Address(rsp, 0), xmm0);
1300 }
1301 __ bind(Lskip);
1302 }
1303
1304 // save rax:rdx for potential use by result handler.
1305 __ push(rax);
1306 #ifndef _LP64
1307 __ push(rdx);
1308 #endif // _LP64
1309
1310 // Verify or restore cpu control state after JNI call
1311 __ restore_cpu_control_state_after_jni();
1312
1313 // change thread state
1314 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
1315 if(os::is_MP()) {
1316 // Write serialization page so VM thread can do a pseudo remote membar.
1317 // We use the current thread pointer to calculate a thread specific
1318 // offset to write to within the page. This minimizes bus traffic
1319 // due to cache line collision.
1320 __ serialize_memory(thread, rcx);
1321 }
1322
1323 // check for safepoint operation in progress and/or pending suspend requests
1324 { Label Continue;
1325
1326 __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
1327 SafepointSynchronize::_not_synchronized);
1328
1329 // threads running native code and they are expected to self-suspend
1330 // when leaving the _thread_in_native state. We need to check for
1331 // pending suspend requests here.
1332 Label L;
1333 __ jcc(Assembler::notEqual, L);
1334 __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
1335 __ jcc(Assembler::equal, Continue);
1336 __ bind(L);
1337
1338 // Don't use call_VM as it will see a possible pending exception and forward it
1339 // and never return here preventing us from clearing _last_native_pc down below.
1340 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
1341 // preserved and correspond to the bcp/locals pointers.
1342 //
1343
1344 ((MacroAssembler*)_masm)->call_VM_leaf(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
1345 thread);
1346 __ increment(rsp, wordSize);
1347
1348 __ movptr(method, STATE(_method));
1349 __ verify_method_ptr(method);
1350 __ movptr(thread, STATE(_thread)); // get thread
1351
1352 __ bind(Continue);
1353 }
1354
1355 // change thread state
1356 __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
1357
1358 __ reset_last_Java_frame(thread, true, true);
1359
1360 // reset handle block
1361 __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
1362 __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
1363
1364 // If result was an oop then unbox and save it in the frame
1365 { Label L;
1366 Label no_oop, store_result;
1367 ExternalAddress oop_handler(AbstractInterpreter::result_handler(T_OBJECT));
1368 __ cmpptr(STATE(_result_handler), oop_handler.addr());
1369 __ jcc(Assembler::notEqual, no_oop);
1370 #ifndef _LP64
1371 __ pop(rdx);
1372 #endif // _LP64
1373 __ pop(rax);
1374 __ testptr(rax, rax);
1375 __ jcc(Assembler::zero, store_result);
1376 // unbox
1377 __ movptr(rax, Address(rax, 0));
1378 __ bind(store_result);
1379 __ movptr(STATE(_oop_temp), rax);
1380 // keep stack depth as expected by pushing oop which will eventually be discarded
1381 __ push(rax);
1382 #ifndef _LP64
1383 __ push(rdx);
1384 #endif // _LP64
1385 __ bind(no_oop);
1386 }
1387
1388 {
1389 Label no_reguard;
1390 __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
1391 __ jcc(Assembler::notEqual, no_reguard);
1392
1393 __ pusha();
1394 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1395 __ popa();
1396
1397 __ bind(no_reguard);
1398 }
1399
1400
1401 // QQQ Seems like for native methods we simply return and the caller will see the pending
1402 // exception and do the right thing. Certainly the interpreter will, don't know about
1403 // compiled methods.
1404 // Seems that the answer to above is no this is wrong. The old code would see the exception
1405 // and forward it before doing the unlocking and notifying jvmdi that method has exited.
1406 // This seems wrong need to investigate the spec.
1407
1408 // handle exceptions (exception handling will handle unlocking!)
1409 { Label L;
1410 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
1411 __ jcc(Assembler::zero, L);
1412 __ bind(pending_exception_present);
1413
1414 // There are potential results on the stack (rax/rdx, ST(0)) we ignore these and simply
1415 // return and let caller deal with exception. This skips the unlocking here which
1416 // seems wrong but seems to be what asm interpreter did. Can't find this in the spec.
1417 // Note: must preverve method in rbx
1418 //
1419
1420 // remove activation
1421
1422 __ movptr(t, STATE(_sender_sp));
1423 __ leave(); // remove frame anchor
1424 __ pop(rdi); // get return address
1425 __ movptr(state, STATE(_prev_link)); // get previous state for return
1426 __ mov(rsp, t); // set sp to sender sp
1427 __ push(rdi); // push throwing pc
1428 // The skips unlocking!! This seems to be what asm interpreter does but seems
1429 // very wrong. Not clear if this violates the spec.
1430 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1431 __ bind(L);
1432 }
1433
1434 // do unlocking if necessary
1435 { Label L;
1436 __ movl(t, Address(method, Method::access_flags_offset()));
1437 __ testl(t, JVM_ACC_SYNCHRONIZED);
1438 __ jcc(Assembler::zero, L);
1439 // the code below should be shared with interpreter macro assembler implementation
1440 { Label unlock;
1441 const Register monitor = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
1442 // BasicObjectLock will be first in list, since this is a synchronized method. However, need
1443 // to check that the object has not been unlocked by an explicit monitorexit bytecode.
1444 __ movptr(monitor, STATE(_monitor_base));
1445 __ subptr(monitor, frame::interpreter_frame_monitor_size() * wordSize); // address of initial monitor
1446
1447 __ movptr(t, Address(monitor, BasicObjectLock::obj_offset_in_bytes()));
1448 __ testptr(t, t);
1449 __ jcc(Assembler::notZero, unlock);
1450
1451 // Entry already unlocked, need to throw exception
1452 __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1453 __ should_not_reach_here();
1454
1455 __ bind(unlock);
1456 __ unlock_object(monitor);
1457 // unlock can blow rbx so restore it for path that needs it below
1458 __ movptr(method, STATE(_method));
1459 }
1460 __ bind(L);
1461 }
1462
1463 // jvmti support
1464 // Note: This must happen _after_ handling/throwing any exceptions since
1465 // the exception handler code notifies the runtime of method exits
1466 // too. If this happens before, method entry/exit notifications are
1467 // not properly paired (was bug - gri 11/22/99).
1468 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1469
1470 // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result
1471 #ifndef _LP64
1472 __ pop(rdx);
1473 #endif // _LP64
1474 __ pop(rax);
1475 __ movptr(t, STATE(_result_handler)); // get result handler
1476 __ call(t); // call result handler to convert to tosca form
1477
1478 // remove activation
1479
1480 __ movptr(t, STATE(_sender_sp));
1481
1482 __ leave(); // remove frame anchor
1483 __ pop(rdi); // get return address
1484 __ movptr(state, STATE(_prev_link)); // get previous state for return (if c++ interpreter was caller)
1485 __ mov(rsp, t); // set sp to sender sp
1486 __ jmp(rdi);
1487
1488 // invocation counter overflow
1489 if (inc_counter) {
1490 // Handle overflow of counter and compile method
1491 __ bind(invocation_counter_overflow);
1492 generate_counter_overflow(&continue_after_compile);
1493 }
1494
1495 return entry_point;
1496 }
1497
1498 // Generate entries that will put a result type index into rcx
generate_deopt_handling()1499 void CppInterpreterGenerator::generate_deopt_handling() {
1500
1501 Label return_from_deopt_common;
1502
1503 // Generate entries that will put a result type index into rcx
1504 // deopt needs to jump to here to enter the interpreter (return a result)
1505 deopt_frame_manager_return_atos = __ pc();
1506
1507 // rax is live here
1508 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_OBJECT)); // Result stub address array index
1509 __ jmp(return_from_deopt_common);
1510
1511
1512 // deopt needs to jump to here to enter the interpreter (return a result)
1513 deopt_frame_manager_return_btos = __ pc();
1514
1515 // rax is live here
1516 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_BOOLEAN)); // Result stub address array index
1517 __ jmp(return_from_deopt_common);
1518
1519 // deopt needs to jump to here to enter the interpreter (return a result)
1520 deopt_frame_manager_return_itos = __ pc();
1521
1522 // rax is live here
1523 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_INT)); // Result stub address array index
1524 __ jmp(return_from_deopt_common);
1525
1526 // deopt needs to jump to here to enter the interpreter (return a result)
1527
1528 deopt_frame_manager_return_ltos = __ pc();
1529 // rax,rdx are live here
1530 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_LONG)); // Result stub address array index
1531 __ jmp(return_from_deopt_common);
1532
1533 // deopt needs to jump to here to enter the interpreter (return a result)
1534
1535 deopt_frame_manager_return_ftos = __ pc();
1536 // st(0) is live here
1537 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT)); // Result stub address array index
1538 __ jmp(return_from_deopt_common);
1539
1540 // deopt needs to jump to here to enter the interpreter (return a result)
1541 deopt_frame_manager_return_dtos = __ pc();
1542
1543 // st(0) is live here
1544 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_DOUBLE)); // Result stub address array index
1545 __ jmp(return_from_deopt_common);
1546
1547 // deopt needs to jump to here to enter the interpreter (return a result)
1548 deopt_frame_manager_return_vtos = __ pc();
1549
1550 __ movl(rcx, AbstractInterpreter::BasicType_as_index(T_VOID));
1551
1552 // Deopt return common
1553 // an index is present in rcx that lets us move any possible result being
1554 // return to the interpreter's stack
1555 //
1556 // Because we have a full sized interpreter frame on the youngest
1557 // activation the stack is pushed too deep to share the tosca to
1558 // stack converters directly. We shrink the stack to the desired
1559 // amount and then push result and then re-extend the stack.
1560 // We could have the code in size_activation layout a short
1561 // frame for the top activation but that would look different
1562 // than say sparc (which needs a full size activation because
1563 // the windows are in the way. Really it could be short? QQQ
1564 //
1565 __ bind(return_from_deopt_common);
1566
1567 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
1568
1569 // setup rsp so we can push the "result" as needed.
1570 __ movptr(rsp, STATE(_stack)); // trim stack (is prepushed)
1571 __ addptr(rsp, wordSize); // undo prepush
1572
1573 ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack);
1574 // Address index(noreg, rcx, Address::times_ptr);
1575 __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr)));
1576 // __ movl(rcx, Address(noreg, rcx, Address::times_ptr, int(AbstractInterpreter::_tosca_to_stack)));
1577 __ call(rcx); // call result converter
1578
1579 __ movl(STATE(_msg), (int)BytecodeInterpreter::deopt_resume);
1580 __ lea(rsp, Address(rsp, -wordSize)); // prepush stack (result if any already present)
1581 __ movptr(STATE(_stack), rsp); // inform interpreter of new stack depth (parameters removed,
1582 // result if any on stack already )
1583 __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth
1584 }
1585
1586 // Generate the code to handle a more_monitors message from the c++ interpreter
generate_more_monitors()1587 void CppInterpreterGenerator::generate_more_monitors() {
1588
1589
1590 Label entry, loop;
1591 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
1592 // 1. compute new pointers // rsp: old expression stack top
1593 __ movptr(rdx, STATE(_stack_base)); // rdx: old expression stack bottom
1594 __ subptr(rsp, entry_size); // move expression stack top limit
1595 __ subptr(STATE(_stack), entry_size); // update interpreter stack top
1596 __ subptr(STATE(_stack_limit), entry_size); // inform interpreter
1597 __ subptr(rdx, entry_size); // move expression stack bottom
1598 __ movptr(STATE(_stack_base), rdx); // inform interpreter
1599 __ movptr(rcx, STATE(_stack)); // set start value for copy loop
1600 __ jmp(entry);
1601 // 2. move expression stack contents
1602 __ bind(loop);
1603 __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location
1604 __ movptr(Address(rcx, 0), rbx); // and store it at new location
1605 __ addptr(rcx, wordSize); // advance to next word
1606 __ bind(entry);
1607 __ cmpptr(rcx, rdx); // check if bottom reached
1608 __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word
1609 // now zero the slot so we can find it.
1610 __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
1611 __ movl(STATE(_msg), (int)BytecodeInterpreter::got_monitors);
1612 }
1613
1614
1615 // Initial entry to C++ interpreter from the call_stub.
1616 // This entry point is called the frame manager since it handles the generation
1617 // of interpreter activation frames via requests directly from the vm (via call_stub)
1618 // and via requests from the interpreter. The requests from the call_stub happen
1619 // directly thru the entry point. Requests from the interpreter happen via returning
1620 // from the interpreter and examining the message the interpreter has returned to
1621 // the frame manager. The frame manager can take the following requests:
1622
1623 // NO_REQUEST - error, should never happen.
1624 // MORE_MONITORS - need a new monitor. Shuffle the expression stack on down and
1625 // allocate a new monitor.
1626 // CALL_METHOD - setup a new activation to call a new method. Very similar to what
1627 // happens during entry during the entry via the call stub.
1628 // RETURN_FROM_METHOD - remove an activation. Return to interpreter or call stub.
1629 //
1630 // Arguments:
1631 //
1632 // rbx: Method*
1633 // rcx: receiver - unused (retrieved from stack as needed)
1634 // rsi/r13: previous frame manager state (NULL from the call_stub/c1/c2)
1635 //
1636 //
1637 // Stack layout at entry
1638 //
1639 // [ return address ] <--- rsp
1640 // [ parameter n ]
1641 // ...
1642 // [ parameter 1 ]
1643 // [ expression stack ]
1644 //
1645 //
1646 // We are free to blow any registers we like because the call_stub which brought us here
1647 // initially has preserved the callee save registers already.
1648 //
1649 //
1650
1651 static address interpreter_frame_manager = NULL;
1652
generate_normal_entry(bool synchronized)1653 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1654
1655 // rbx: Method*
1656 // rsi/r13: sender sp
1657
1658 // Because we redispatch "recursive" interpreter entries thru this same entry point
1659 // the "input" register usage is a little strange and not what you expect coming
1660 // from the call_stub. From the call stub rsi/rdi (current/previous) interpreter
1661 // state are NULL but on "recursive" dispatches they are what you'd expect.
1662 // rsi: current interpreter state (C++ interpreter) must preserve (null from call_stub/c1/c2)
1663
1664
1665 // A single frame manager is plenty as we don't specialize for synchronized. We could and
1666 // the code is pretty much ready. Would need to change the test below and for good measure
1667 // modify generate_interpreter_state to only do the (pre) sync stuff stuff for synchronized
1668 // routines. Not clear this is worth it yet.
1669
1670 if (interpreter_frame_manager) return interpreter_frame_manager;
1671
1672 address entry_point = __ pc();
1673
1674 // Fast accessor methods share this entry point.
1675 // This works because frame manager is in the same codelet
1676 if (UseFastAccessorMethods && !synchronized) __ bind(fast_accessor_slow_entry_path);
1677
1678 Label dispatch_entry_2;
1679 __ movptr(rcx, sender_sp_on_entry);
1680 __ movptr(state, (int32_t)NULL_WORD); // no current activation
1681
1682 __ jmp(dispatch_entry_2);
1683
1684 const Register locals = rdi;
1685
1686 Label re_dispatch;
1687
1688 __ bind(re_dispatch);
1689
1690 // save sender sp (doesn't include return address
1691 __ lea(rcx, Address(rsp, wordSize));
1692
1693 __ bind(dispatch_entry_2);
1694
1695 // save sender sp
1696 __ push(rcx);
1697
1698 const Address constMethod (rbx, Method::const_offset());
1699 const Address access_flags (rbx, Method::access_flags_offset());
1700 const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset());
1701 const Address size_of_locals (rdx, ConstMethod::size_of_locals_offset());
1702
1703 // const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
1704 // const Address monitor_block_bot (rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
1705 // const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
1706
1707 // get parameter size (always needed)
1708 __ movptr(rdx, constMethod);
1709 __ load_unsigned_short(rcx, size_of_parameters);
1710
1711 // rbx: Method*
1712 // rcx: size of parameters
1713 __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
1714
1715 __ subptr(rdx, rcx); // rdx = no. of additional locals
1716
1717 // see if we've got enough room on the stack for locals plus overhead.
1718 generate_stack_overflow_check(); // C++
1719
1720 // c++ interpreter does not use stack banging or any implicit exceptions
1721 // leave for now to verify that check is proper.
1722 bang_stack_shadow_pages(false);
1723
1724
1725
1726 // compute beginning of parameters (rdi)
1727 __ lea(locals, Address(rsp, rcx, Address::times_ptr, wordSize));
1728
1729 // save sender's sp
1730 // __ movl(rcx, rsp);
1731
1732 // get sender's sp
1733 __ pop(rcx);
1734
1735 // get return address
1736 __ pop(rax);
1737
1738 // rdx - # of additional locals
1739 // allocate space for locals
1740 // explicitly initialize locals
1741 {
1742 Label exit, loop;
1743 __ testl(rdx, rdx); // (32bit ok)
1744 __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
1745 __ bind(loop);
1746 __ push((int32_t)NULL_WORD); // initialize local variables
1747 __ decrement(rdx); // until everything initialized
1748 __ jcc(Assembler::greater, loop);
1749 __ bind(exit);
1750 }
1751
1752
1753 // Assumes rax = return address
1754
1755 // allocate and initialize new interpreterState and method expression stack
1756 // IN(locals) -> locals
1757 // IN(state) -> any current interpreter activation
1758 // destroys rax, rcx, rdx, rdi
1759 // OUT (state) -> new interpreterState
1760 // OUT(rsp) -> bottom of methods expression stack
1761
1762 generate_compute_interpreter_state(state, locals, rcx, false);
1763
1764 // Call interpreter
1765
1766 Label call_interpreter;
1767 __ bind(call_interpreter);
1768
1769 // c++ interpreter does not use stack banging or any implicit exceptions
1770 // leave for now to verify that check is proper.
1771 bang_stack_shadow_pages(false);
1772
1773
1774 // Call interpreter enter here if message is
1775 // set and we know stack size is valid
1776
1777 Label call_interpreter_2;
1778
1779 __ bind(call_interpreter_2);
1780
1781 {
1782 const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
1783
1784 #ifdef _LP64
1785 __ mov(c_rarg0, state);
1786 #else
1787 __ push(state); // push arg to interpreter
1788 __ movptr(thread, STATE(_thread));
1789 #endif // _LP64
1790
1791 // We can setup the frame anchor with everything we want at this point
1792 // as we are thread_in_Java and no safepoints can occur until we go to
1793 // vm mode. We do have to clear flags on return from vm but that is it
1794 //
1795 __ movptr(Address(thread, JavaThread::last_Java_fp_offset()), rbp);
1796 __ movptr(Address(thread, JavaThread::last_Java_sp_offset()), rsp);
1797
1798 // Call the interpreter
1799
1800 RuntimeAddress normal(CAST_FROM_FN_PTR(address, BytecodeInterpreter::run));
1801 RuntimeAddress checking(CAST_FROM_FN_PTR(address, BytecodeInterpreter::runWithChecks));
1802
1803 __ call(JvmtiExport::can_post_interpreter_events() ? checking : normal);
1804 NOT_LP64(__ pop(rax);) // discard parameter to run
1805 //
1806 // state is preserved since it is callee saved
1807 //
1808
1809 // reset_last_Java_frame
1810
1811 NOT_LP64(__ movl(thread, STATE(_thread));)
1812 __ reset_last_Java_frame(thread, true, true);
1813 }
1814
1815 // examine msg from interpreter to determine next action
1816
1817 __ movl(rdx, STATE(_msg)); // Get new message
1818
1819 Label call_method;
1820 Label return_from_interpreted_method;
1821 Label throw_exception;
1822 Label bad_msg;
1823 Label do_OSR;
1824
1825 __ cmpl(rdx, (int32_t)BytecodeInterpreter::call_method);
1826 __ jcc(Assembler::equal, call_method);
1827 __ cmpl(rdx, (int32_t)BytecodeInterpreter::return_from_method);
1828 __ jcc(Assembler::equal, return_from_interpreted_method);
1829 __ cmpl(rdx, (int32_t)BytecodeInterpreter::do_osr);
1830 __ jcc(Assembler::equal, do_OSR);
1831 __ cmpl(rdx, (int32_t)BytecodeInterpreter::throwing_exception);
1832 __ jcc(Assembler::equal, throw_exception);
1833 __ cmpl(rdx, (int32_t)BytecodeInterpreter::more_monitors);
1834 __ jcc(Assembler::notEqual, bad_msg);
1835
1836 // Allocate more monitor space, shuffle expression stack....
1837
1838 generate_more_monitors();
1839
1840 __ jmp(call_interpreter);
1841
1842 // uncommon trap needs to jump to here to enter the interpreter (re-execute current bytecode)
1843 unctrap_frame_manager_entry = __ pc();
1844 //
1845 // Load the registers we need.
1846 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
1847 __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth
1848 __ jmp(call_interpreter_2);
1849
1850
1851
1852 //=============================================================================
1853 // Returning from a compiled method into a deopted method. The bytecode at the
1854 // bcp has completed. The result of the bytecode is in the native abi (the tosca
1855 // for the template based interpreter). Any stack space that was used by the
1856 // bytecode that has completed has been removed (e.g. parameters for an invoke)
1857 // so all that we have to do is place any pending result on the expression stack
1858 // and resume execution on the next bytecode.
1859
1860
1861 generate_deopt_handling();
1862 __ jmp(call_interpreter);
1863
1864
1865 // Current frame has caught an exception we need to dispatch to the
1866 // handler. We can get here because a native interpreter frame caught
1867 // an exception in which case there is no handler and we must rethrow
1868 // If it is a vanilla interpreted frame the we simply drop into the
1869 // interpreter and let it do the lookup.
1870
1871 Interpreter::_rethrow_exception_entry = __ pc();
1872 // rax: exception
1873 // rdx: return address/pc that threw exception
1874
1875 Label return_with_exception;
1876 Label unwind_and_forward;
1877
1878 // restore state pointer.
1879 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
1880
1881 __ movptr(rbx, STATE(_method)); // get method
1882 #ifdef _LP64
1883 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax);
1884 #else
1885 __ movl(rcx, STATE(_thread)); // get thread
1886
1887 // Store exception with interpreter will expect it
1888 __ movptr(Address(rcx, Thread::pending_exception_offset()), rax);
1889 #endif // _LP64
1890
1891 // is current frame vanilla or native?
1892
1893 __ movl(rdx, access_flags);
1894 __ testl(rdx, JVM_ACC_NATIVE);
1895 __ jcc(Assembler::zero, return_with_exception); // vanilla interpreted frame, handle directly
1896
1897 // We drop thru to unwind a native interpreted frame with a pending exception
1898 // We jump here for the initial interpreter frame with exception pending
1899 // We unwind the current acivation and forward it to our caller.
1900
1901 __ bind(unwind_and_forward);
1902
1903 // unwind rbp, return stack to unextended value and re-push return address
1904
1905 __ movptr(rcx, STATE(_sender_sp));
1906 __ leave();
1907 __ pop(rdx);
1908 __ mov(rsp, rcx);
1909 __ push(rdx);
1910 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1911
1912 // Return point from a call which returns a result in the native abi
1913 // (c1/c2/jni-native). This result must be processed onto the java
1914 // expression stack.
1915 //
1916 // A pending exception may be present in which case there is no result present
1917
1918 Label resume_interpreter;
1919 Label do_float;
1920 Label do_double;
1921 Label done_conv;
1922
1923 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
1924 if (UseSSE < 2) {
1925 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
1926 __ movptr(rbx, STATE(_result._to_call._callee)); // get method just executed
1927 __ movl(rcx, Address(rbx, Method::result_index_offset()));
1928 __ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT)); // Result stub address array index
1929 __ jcc(Assembler::equal, do_float);
1930 __ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_DOUBLE)); // Result stub address array index
1931 __ jcc(Assembler::equal, do_double);
1932 #if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
1933 __ empty_FPU_stack();
1934 #endif // COMPILER2
1935 __ jmp(done_conv);
1936
1937 __ bind(do_float);
1938 #ifdef COMPILER2
1939 for (int i = 1; i < 8; i++) {
1940 __ ffree(i);
1941 }
1942 #endif // COMPILER2
1943 __ jmp(done_conv);
1944 __ bind(do_double);
1945 #ifdef COMPILER2
1946 for (int i = 1; i < 8; i++) {
1947 __ ffree(i);
1948 }
1949 #endif // COMPILER2
1950 __ jmp(done_conv);
1951 } else {
1952 __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
1953 __ jmp(done_conv);
1954 }
1955
1956 // Return point to interpreter from compiled/native method
1957 InternalAddress return_from_native_method(__ pc());
1958
1959 __ bind(done_conv);
1960
1961
1962 // Result if any is in tosca. The java expression stack is in the state that the
1963 // calling convention left it (i.e. params may or may not be present)
1964 // Copy the result from tosca and place it on java expression stack.
1965
1966 // Restore rsi/r13 as compiled code may not preserve it
1967
1968 __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
1969
1970 // restore stack to what we had when we left (in case i2c extended it)
1971
1972 __ movptr(rsp, STATE(_stack));
1973 __ lea(rsp, Address(rsp, wordSize));
1974
1975 // If there is a pending exception then we don't really have a result to process
1976
1977 #ifdef _LP64
1978 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
1979 #else
1980 __ movptr(rcx, STATE(_thread)); // get thread
1981 __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
1982 #endif // _LP64
1983 __ jcc(Assembler::notZero, return_with_exception);
1984
1985 // get method just executed
1986 __ movptr(rbx, STATE(_result._to_call._callee));
1987
1988 // callee left args on top of expression stack, remove them
1989 __ movptr(rcx, constMethod);
1990 __ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset()));
1991
1992 __ lea(rsp, Address(rsp, rcx, Address::times_ptr));
1993
1994 __ movl(rcx, Address(rbx, Method::result_index_offset()));
1995 ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack);
1996 // Address index(noreg, rax, Address::times_ptr);
1997 __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr)));
1998 // __ movl(rcx, Address(noreg, rcx, Address::times_ptr, int(AbstractInterpreter::_tosca_to_stack)));
1999 __ call(rcx); // call result converter
2000 __ jmp(resume_interpreter);
2001
2002 // An exception is being caught on return to a vanilla interpreter frame.
2003 // Empty the stack and resume interpreter
2004
2005 __ bind(return_with_exception);
2006
2007 // Exception present, empty stack
2008 __ movptr(rsp, STATE(_stack_base));
2009 __ jmp(resume_interpreter);
2010
2011 // Return from interpreted method we return result appropriate to the caller (i.e. "recursive"
2012 // interpreter call, or native) and unwind this interpreter activation.
2013 // All monitors should be unlocked.
2014
2015 __ bind(return_from_interpreted_method);
2016
2017 Label return_to_initial_caller;
2018
2019 __ movptr(rbx, STATE(_method)); // get method just executed
2020 __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from "recursive" interpreter call?
2021 __ movl(rax, Address(rbx, Method::result_index_offset())); // get result type index
2022 __ jcc(Assembler::equal, return_to_initial_caller); // back to native code (call_stub/c1/c2)
2023
2024 // Copy result to callers java stack
2025 ExternalAddress stack_to_stack((address)CppInterpreter::_stack_to_stack);
2026 // Address index(noreg, rax, Address::times_ptr);
2027
2028 __ movptr(rax, ArrayAddress(stack_to_stack, Address(noreg, rax, Address::times_ptr)));
2029 // __ movl(rax, Address(noreg, rax, Address::times_ptr, int(AbstractInterpreter::_stack_to_stack)));
2030 __ call(rax); // call result converter
2031
2032 Label unwind_recursive_activation;
2033 __ bind(unwind_recursive_activation);
2034
2035 // returning to interpreter method from "recursive" interpreter call
2036 // result converter left rax pointing to top of the java stack for method we are returning
2037 // to. Now all we must do is unwind the state from the completed call
2038
2039 __ movptr(state, STATE(_prev_link)); // unwind state
2040 __ leave(); // pop the frame
2041 __ mov(rsp, rax); // unwind stack to remove args
2042
2043 // Resume the interpreter. The current frame contains the current interpreter
2044 // state object.
2045 //
2046
2047 __ bind(resume_interpreter);
2048
2049 // state == interpreterState object for method we are resuming
2050
2051 __ movl(STATE(_msg), (int)BytecodeInterpreter::method_resume);
2052 __ lea(rsp, Address(rsp, -wordSize)); // prepush stack (result if any already present)
2053 __ movptr(STATE(_stack), rsp); // inform interpreter of new stack depth (parameters removed,
2054 // result if any on stack already )
2055 __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth
2056 __ jmp(call_interpreter_2); // No need to bang
2057
2058 // interpreter returning to native code (call_stub/c1/c2)
2059 // convert result and unwind initial activation
2060 // rax - result index
2061
2062 __ bind(return_to_initial_caller);
2063 ExternalAddress stack_to_native((address)CppInterpreter::_stack_to_native_abi);
2064 // Address index(noreg, rax, Address::times_ptr);
2065
2066 __ movptr(rax, ArrayAddress(stack_to_native, Address(noreg, rax, Address::times_ptr)));
2067 __ call(rax); // call result converter
2068
2069 Label unwind_initial_activation;
2070 __ bind(unwind_initial_activation);
2071
2072 // RETURN TO CALL_STUB/C1/C2 code (result if any in rax/rdx ST(0))
2073
2074 /* Current stack picture
2075
2076 [ incoming parameters ]
2077 [ extra locals ]
2078 [ return address to CALL_STUB/C1/C2]
2079 fp -> [ CALL_STUB/C1/C2 fp ]
2080 BytecodeInterpreter object
2081 expression stack
2082 sp ->
2083
2084 */
2085
2086 // return restoring the stack to the original sender_sp value
2087
2088 __ movptr(rcx, STATE(_sender_sp));
2089 __ leave();
2090 __ pop(rdi); // get return address
2091 // set stack to sender's sp
2092 __ mov(rsp, rcx);
2093 __ jmp(rdi); // return to call_stub
2094
2095 // OSR request, adjust return address to make current frame into adapter frame
2096 // and enter OSR nmethod
2097
2098 __ bind(do_OSR);
2099
2100 Label remove_initial_frame;
2101
2102 // We are going to pop this frame. Is there another interpreter frame underneath
2103 // it or is it callstub/compiled?
2104
2105 // Move buffer to the expected parameter location
2106 __ movptr(rcx, STATE(_result._osr._osr_buf));
2107
2108 __ movptr(rax, STATE(_result._osr._osr_entry));
2109
2110 __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from "recursive" interpreter call?
2111 __ jcc(Assembler::equal, remove_initial_frame); // back to native code (call_stub/c1/c2)
2112
2113 __ movptr(sender_sp_on_entry, STATE(_sender_sp)); // get sender's sp in expected register
2114 __ leave(); // pop the frame
2115 __ mov(rsp, sender_sp_on_entry); // trim any stack expansion
2116
2117
2118 // We know we are calling compiled so push specialized return
2119 // method uses specialized entry, push a return so we look like call stub setup
2120 // this path will handle fact that result is returned in registers and not
2121 // on the java stack.
2122
2123 __ pushptr(return_from_native_method.addr());
2124
2125 __ jmp(rax);
2126
2127 __ bind(remove_initial_frame);
2128
2129 __ movptr(rdx, STATE(_sender_sp));
2130 __ leave();
2131 // get real return
2132 __ pop(rsi);
2133 // set stack to sender's sp
2134 __ mov(rsp, rdx);
2135 // repush real return
2136 __ push(rsi);
2137 // Enter OSR nmethod
2138 __ jmp(rax);
2139
2140
2141
2142
2143 // Call a new method. All we do is (temporarily) trim the expression stack
2144 // push a return address to bring us back to here and leap to the new entry.
2145
2146 __ bind(call_method);
2147
2148 // stack points to next free location and not top element on expression stack
2149 // method expects sp to be pointing to topmost element
2150
2151 __ movptr(rsp, STATE(_stack)); // pop args to c++ interpreter, set sp to java stack top
2152 __ lea(rsp, Address(rsp, wordSize));
2153
2154 __ movptr(rbx, STATE(_result._to_call._callee)); // get method to execute
2155
2156 // don't need a return address if reinvoking interpreter
2157
2158 // Make it look like call_stub calling conventions
2159
2160 // Get (potential) receiver
2161 // get size of parameters in words
2162 __ movptr(rcx, constMethod);
2163 __ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset()));
2164
2165 ExternalAddress recursive(CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation));
2166 __ pushptr(recursive.addr()); // make it look good in the debugger
2167
2168 InternalAddress entry(entry_point);
2169 __ cmpptr(STATE(_result._to_call._callee_entry_point), entry.addr()); // returning to interpreter?
2170 __ jcc(Assembler::equal, re_dispatch); // yes
2171
2172 __ pop(rax); // pop dummy address
2173
2174
2175 // get specialized entry
2176 __ movptr(rax, STATE(_result._to_call._callee_entry_point));
2177 // set sender SP
2178 __ mov(sender_sp_on_entry, rsp);
2179
2180 // method uses specialized entry, push a return so we look like call stub setup
2181 // this path will handle fact that result is returned in registers and not
2182 // on the java stack.
2183
2184 __ pushptr(return_from_native_method.addr());
2185
2186 __ jmp(rax);
2187
2188 __ bind(bad_msg);
2189 __ stop("Bad message from interpreter");
2190
2191 // Interpreted method "returned" with an exception pass it on...
2192 // Pass result, unwind activation and continue/return to interpreter/call_stub
2193 // We handle result (if any) differently based on return to interpreter or call_stub
2194
2195 Label unwind_initial_with_pending_exception;
2196
2197 __ bind(throw_exception);
2198 __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from recursive interpreter call?
2199 __ jcc(Assembler::equal, unwind_initial_with_pending_exception); // no, back to native code (call_stub/c1/c2)
2200 __ movptr(rax, STATE(_locals)); // pop parameters get new stack value
2201 __ addptr(rax, wordSize); // account for prepush before we return
2202 __ jmp(unwind_recursive_activation);
2203
2204 __ bind(unwind_initial_with_pending_exception);
2205
2206 // We will unwind the current (initial) interpreter frame and forward
2207 // the exception to the caller. We must put the exception in the
2208 // expected register and clear pending exception and then forward.
2209
2210 __ jmp(unwind_and_forward);
2211
2212 interpreter_frame_manager = entry_point;
2213 return entry_point;
2214 }
2215
generate_method_entry(AbstractInterpreter::MethodKind kind)2216 address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
2217 // determine code generation flags
2218 bool synchronized = false;
2219 address entry_point = NULL;
2220
2221 switch (kind) {
2222 case Interpreter::zerolocals : break;
2223 case Interpreter::zerolocals_synchronized: synchronized = true; break;
2224 case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
2225 case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
2226 case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
2227 case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
2228 case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
2229 case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break;
2230
2231 case Interpreter::java_lang_math_sin : // fall thru
2232 case Interpreter::java_lang_math_cos : // fall thru
2233 case Interpreter::java_lang_math_tan : // fall thru
2234 case Interpreter::java_lang_math_abs : // fall thru
2235 case Interpreter::java_lang_math_log : // fall thru
2236 case Interpreter::java_lang_math_log10 : // fall thru
2237 case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
2238 case Interpreter::java_lang_ref_reference_get
2239 : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
2240 default : ShouldNotReachHere(); break;
2241 }
2242
2243 if (entry_point) return entry_point;
2244
2245 return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
2246
2247 }
2248
InterpreterGenerator(StubQueue * code)2249 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
2250 : CppInterpreterGenerator(code) {
2251 generate_all(); // down here so it can be "virtual"
2252 }
2253
2254 // Deoptimization helpers for C++ interpreter
2255
2256 // How much stack a method activation needs in words.
size_top_interpreter_activation(Method * method)2257 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
2258
2259 const int stub_code = 4; // see generate_call_stub
2260 // Save space for one monitor to get into the interpreted method in case
2261 // the method is synchronized
2262 int monitor_size = method->is_synchronized() ?
2263 1*frame::interpreter_frame_monitor_size() : 0;
2264
2265 // total static overhead size. Account for interpreter state object, return
2266 // address, saved rbp and 2 words for a "static long no_params() method" issue.
2267
2268 const int overhead_size = sizeof(BytecodeInterpreter)/wordSize +
2269 ( frame::sender_sp_offset - frame::link_offset) + 2;
2270
2271 const int method_stack = (method->max_locals() + method->max_stack()) *
2272 Interpreter::stackElementWords;
2273 return overhead_size + method_stack + stub_code;
2274 }
2275
2276 // returns the activation size.
size_activation_helper(int extra_locals_size,int monitor_size)2277 static int size_activation_helper(int extra_locals_size, int monitor_size) {
2278 return (extra_locals_size + // the addition space for locals
2279 2*BytesPerWord + // return address and saved rbp
2280 2*BytesPerWord + // "static long no_params() method" issue
2281 sizeof(BytecodeInterpreter) + // interpreterState
2282 monitor_size); // monitors
2283 }
2284
layout_interpreterState(interpreterState to_fill,frame * caller,frame * current,Method * method,intptr_t * locals,intptr_t * stack,intptr_t * stack_base,intptr_t * monitor_base,intptr_t * frame_bottom,bool is_top_frame)2285 void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
2286 frame* caller,
2287 frame* current,
2288 Method* method,
2289 intptr_t* locals,
2290 intptr_t* stack,
2291 intptr_t* stack_base,
2292 intptr_t* monitor_base,
2293 intptr_t* frame_bottom,
2294 bool is_top_frame
2295 )
2296 {
2297 // What about any vtable?
2298 //
2299 to_fill->_thread = JavaThread::current();
2300 // This gets filled in later but make it something recognizable for now
2301 to_fill->_bcp = method->code_base();
2302 to_fill->_locals = locals;
2303 to_fill->_constants = method->constants()->cache();
2304 to_fill->_method = method;
2305 to_fill->_mdx = NULL;
2306 to_fill->_stack = stack;
2307 if (is_top_frame && JavaThread::current()->popframe_forcing_deopt_reexecution() ) {
2308 to_fill->_msg = deopt_resume2;
2309 } else {
2310 to_fill->_msg = method_resume;
2311 }
2312 to_fill->_result._to_call._bcp_advance = 0;
2313 to_fill->_result._to_call._callee_entry_point = NULL; // doesn't matter to anyone
2314 to_fill->_result._to_call._callee = NULL; // doesn't matter to anyone
2315 to_fill->_prev_link = NULL;
2316
2317 to_fill->_sender_sp = caller->unextended_sp();
2318
2319 if (caller->is_interpreted_frame()) {
2320 interpreterState prev = caller->get_interpreterState();
2321 to_fill->_prev_link = prev;
2322 // *current->register_addr(GR_Iprev_state) = (intptr_t) prev;
2323 // Make the prev callee look proper
2324 prev->_result._to_call._callee = method;
2325 if (*prev->_bcp == Bytecodes::_invokeinterface) {
2326 prev->_result._to_call._bcp_advance = 5;
2327 } else {
2328 prev->_result._to_call._bcp_advance = 3;
2329 }
2330 }
2331 to_fill->_oop_temp = NULL;
2332 to_fill->_stack_base = stack_base;
2333 // Need +1 here because stack_base points to the word just above the first expr stack entry
2334 // and stack_limit is supposed to point to the word just below the last expr stack entry.
2335 // See generate_compute_interpreter_state.
2336 to_fill->_stack_limit = stack_base - (method->max_stack() + 1);
2337 to_fill->_monitor_base = (BasicObjectLock*) monitor_base;
2338
2339 to_fill->_self_link = to_fill;
2340 assert(stack >= to_fill->_stack_limit && stack < to_fill->_stack_base,
2341 "Stack top out of range");
2342 }
2343
2344
frame_size_helper(int max_stack,int tempcount,int moncount,int callee_param_count,int callee_locals,bool is_top_frame,int & monitor_size,int & full_frame_size)2345 static int frame_size_helper(int max_stack,
2346 int tempcount,
2347 int moncount,
2348 int callee_param_count,
2349 int callee_locals,
2350 bool is_top_frame,
2351 int& monitor_size,
2352 int& full_frame_size) {
2353 int extra_locals_size = (callee_locals - callee_param_count) * BytesPerWord;
2354 monitor_size = sizeof(BasicObjectLock) * moncount;
2355
2356 // First calculate the frame size without any java expression stack
2357 int short_frame_size = size_activation_helper(extra_locals_size,
2358 monitor_size);
2359
2360 // Now with full size expression stack
2361 full_frame_size = short_frame_size + max_stack * BytesPerWord;
2362
2363 // and now with only live portion of the expression stack
2364 short_frame_size = short_frame_size + tempcount * BytesPerWord;
2365
2366 // the size the activation is right now. Only top frame is full size
2367 int frame_size = (is_top_frame ? full_frame_size : short_frame_size);
2368 return frame_size;
2369 }
2370
size_activation(int max_stack,int tempcount,int extra_args,int moncount,int callee_param_count,int callee_locals,bool is_top_frame)2371 int AbstractInterpreter::size_activation(int max_stack,
2372 int tempcount,
2373 int extra_args,
2374 int moncount,
2375 int callee_param_count,
2376 int callee_locals,
2377 bool is_top_frame) {
2378 assert(extra_args == 0, "FIX ME");
2379 // NOTE: return size is in words not bytes
2380
2381 // Calculate the amount our frame will be adjust by the callee. For top frame
2382 // this is zero.
2383
2384 // NOTE: ia64 seems to do this wrong (or at least backwards) in that it
2385 // calculates the extra locals based on itself. Not what the callee does
2386 // to it. So it ignores last_frame_adjust value. Seems suspicious as far
2387 // as getting sender_sp correct.
2388
2389 int unused_monitor_size = 0;
2390 int unused_full_frame_size = 0;
2391 return frame_size_helper(max_stack, tempcount, moncount, callee_param_count, callee_locals,
2392 is_top_frame, unused_monitor_size, unused_full_frame_size)/BytesPerWord;
2393 }
2394
layout_activation(Method * method,int tempcount,int popframe_extra_args,int moncount,int caller_actual_parameters,int callee_param_count,int callee_locals,frame * caller,frame * interpreter_frame,bool is_top_frame,bool is_bottom_frame)2395 void AbstractInterpreter::layout_activation(Method* method,
2396 int tempcount, //
2397 int popframe_extra_args,
2398 int moncount,
2399 int caller_actual_parameters,
2400 int callee_param_count,
2401 int callee_locals,
2402 frame* caller,
2403 frame* interpreter_frame,
2404 bool is_top_frame,
2405 bool is_bottom_frame) {
2406
2407 assert(popframe_extra_args == 0, "FIX ME");
2408 // NOTE this code must exactly mimic what InterpreterGenerator::generate_compute_interpreter_state()
2409 // does as far as allocating an interpreter frame.
2410 // Set up the method, locals, and monitors.
2411 // The frame interpreter_frame is guaranteed to be the right size,
2412 // as determined by a previous call to the size_activation() method.
2413 // It is also guaranteed to be walkable even though it is in a skeletal state
2414 // NOTE: tempcount is the current size of the java expression stack. For top most
2415 // frames we will allocate a full sized expression stack and not the curback
2416 // version that non-top frames have.
2417
2418 int monitor_size = 0;
2419 int full_frame_size = 0;
2420 int frame_size = frame_size_helper(method->max_stack(), tempcount, moncount, callee_param_count, callee_locals,
2421 is_top_frame, monitor_size, full_frame_size);
2422
2423 #ifdef ASSERT
2424 assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
2425 #endif
2426
2427 // MUCHO HACK
2428
2429 intptr_t* frame_bottom = (intptr_t*) ((intptr_t)interpreter_frame->sp() - (full_frame_size - frame_size));
2430
2431 /* Now fillin the interpreterState object */
2432
2433 // The state object is the first thing on the frame and easily located
2434
2435 interpreterState cur_state = (interpreterState) ((intptr_t)interpreter_frame->fp() - sizeof(BytecodeInterpreter));
2436
2437
2438 // Find the locals pointer. This is rather simple on x86 because there is no
2439 // confusing rounding at the callee to account for. We can trivially locate
2440 // our locals based on the current fp().
2441 // Note: the + 2 is for handling the "static long no_params() method" issue.
2442 // (too bad I don't really remember that issue well...)
2443
2444 intptr_t* locals;
2445 // If the caller is interpreted we need to make sure that locals points to the first
2446 // argument that the caller passed and not in an area where the stack might have been extended.
2447 // because the stack to stack to converter needs a proper locals value in order to remove the
2448 // arguments from the caller and place the result in the proper location. Hmm maybe it'd be
2449 // simpler if we simply stored the result in the BytecodeInterpreter object and let the c++ code
2450 // adjust the stack?? HMMM QQQ
2451 //
2452 if (caller->is_interpreted_frame()) {
2453 // locals must agree with the caller because it will be used to set the
2454 // caller's tos when we return.
2455 interpreterState prev = caller->get_interpreterState();
2456 // stack() is prepushed.
2457 locals = prev->stack() + method->size_of_parameters();
2458 // locals = caller->unextended_sp() + (method->size_of_parameters() - 1);
2459 if (locals != interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2) {
2460 // os::breakpoint();
2461 }
2462 } else {
2463 // this is where a c2i would have placed locals (except for the +2)
2464 locals = interpreter_frame->fp() + frame::sender_sp_offset + (method->max_locals() - 1) + 2;
2465 }
2466
2467 intptr_t* monitor_base = (intptr_t*) cur_state;
2468 intptr_t* stack_base = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
2469 /* +1 because stack is always prepushed */
2470 intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (tempcount + 1) * BytesPerWord);
2471
2472
2473 BytecodeInterpreter::layout_interpreterState(cur_state,
2474 caller,
2475 interpreter_frame,
2476 method,
2477 locals,
2478 stack,
2479 stack_base,
2480 monitor_base,
2481 frame_bottom,
2482 is_top_frame);
2483
2484 // BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address, interpreter_frame->fp());
2485 }
2486
2487 #endif // CC_INTERP (all)
2488