1 /*
2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "compiler/disassembler.hpp"
29 #include "interpreter/bytecodeHistogram.hpp"
30 #include "interpreter/bytecodeInterpreter.hpp"
31 #include "interpreter/bytecodeStream.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/interpreterRuntime.hpp"
34 #include "interpreter/interp_masm.hpp"
35 #include "interpreter/templateTable.hpp"
36 #include "memory/allocation.inline.hpp"
37 #include "memory/metaspaceShared.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/arrayOop.hpp"
40 #include "oops/constantPool.hpp"
41 #include "oops/cpCache.inline.hpp"
42 #include "oops/methodData.hpp"
43 #include "oops/method.hpp"
44 #include "oops/oop.inline.hpp"
45 #include "prims/forte.hpp"
46 #include "prims/jvmtiExport.hpp"
47 #include "prims/methodHandles.hpp"
48 #include "runtime/handles.inline.hpp"
49 #include "runtime/sharedRuntime.hpp"
50 #include "runtime/stubRoutines.hpp"
51 #include "runtime/timer.hpp"
52
53 # define __ _masm->
54
55 //------------------------------------------------------------------------------------------------------------------------
56 // Implementation of platform independent aspects of Interpreter
57
initialize()58 void AbstractInterpreter::initialize() {
59 // make sure 'imported' classes are initialized
60 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) BytecodeCounter::reset();
61 if (PrintBytecodeHistogram) BytecodeHistogram::reset();
62 if (PrintBytecodePairHistogram) BytecodePairHistogram::reset();
63 }
64
print()65 void AbstractInterpreter::print() {
66 tty->cr();
67 tty->print_cr("----------------------------------------------------------------------");
68 tty->print_cr("Interpreter");
69 tty->cr();
70 tty->print_cr("code size = %6dK bytes", (int)_code->used_space()/1024);
71 tty->print_cr("total space = %6dK bytes", (int)_code->total_space()/1024);
72 tty->print_cr("wasted space = %6dK bytes", (int)_code->available_space()/1024);
73 tty->cr();
74 tty->print_cr("# of codelets = %6d" , _code->number_of_stubs());
75 if (_code->number_of_stubs() != 0) {
76 tty->print_cr("avg codelet size = %6d bytes", _code->used_space() / _code->number_of_stubs());
77 tty->cr();
78 }
79 _code->print();
80 tty->print_cr("----------------------------------------------------------------------");
81 tty->cr();
82 }
83
84
85 //------------------------------------------------------------------------------------------------------------------------
86 // Implementation of interpreter
87
88 StubQueue* AbstractInterpreter::_code = NULL;
89 bool AbstractInterpreter::_notice_safepoints = false;
90 address AbstractInterpreter::_rethrow_exception_entry = NULL;
91
92 address AbstractInterpreter::_native_entry_begin = NULL;
93 address AbstractInterpreter::_native_entry_end = NULL;
94 address AbstractInterpreter::_slow_signature_handler;
95 address AbstractInterpreter::_entry_table [AbstractInterpreter::number_of_method_entries];
96 address AbstractInterpreter::_cds_entry_table [AbstractInterpreter::number_of_method_entries];
97 address AbstractInterpreter::_native_abi_to_tosca [AbstractInterpreter::number_of_result_handlers];
98
99 //------------------------------------------------------------------------------------------------------------------------
100 // Generation of complete interpreter
101
AbstractInterpreterGenerator(StubQueue * _code)102 AbstractInterpreterGenerator::AbstractInterpreterGenerator(StubQueue* _code) {
103 _masm = NULL;
104 }
105
106
107 //------------------------------------------------------------------------------------------------------------------------
108 // Entry points
109
method_kind(const methodHandle & m)110 AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(const methodHandle& m) {
111 // Abstract method?
112 if (m->is_abstract()) return abstract;
113
114 // Method handle primitive?
115 if (m->is_method_handle_intrinsic()) {
116 vmIntrinsics::ID id = m->intrinsic_id();
117 assert(MethodHandles::is_signature_polymorphic(id), "must match an intrinsic");
118 MethodKind kind = (MethodKind)( method_handle_invoke_FIRST +
119 ((int)id - vmIntrinsics::FIRST_MH_SIG_POLY) );
120 assert(kind <= method_handle_invoke_LAST, "parallel enum ranges");
121 return kind;
122 }
123
124 #ifndef CC_INTERP
125 switch (m->intrinsic_id()) {
126 // Use optimized stub code for CRC32 native methods.
127 case vmIntrinsics::_updateCRC32 : return java_util_zip_CRC32_update;
128 case vmIntrinsics::_updateBytesCRC32 : return java_util_zip_CRC32_updateBytes;
129 case vmIntrinsics::_updateByteBufferCRC32 : return java_util_zip_CRC32_updateByteBuffer;
130 // Use optimized stub code for CRC32C methods.
131 case vmIntrinsics::_updateBytesCRC32C : return java_util_zip_CRC32C_updateBytes;
132 case vmIntrinsics::_updateDirectByteBufferCRC32C : return java_util_zip_CRC32C_updateDirectByteBuffer;
133 case vmIntrinsics::_intBitsToFloat: return java_lang_Float_intBitsToFloat;
134 case vmIntrinsics::_floatToRawIntBits: return java_lang_Float_floatToRawIntBits;
135 case vmIntrinsics::_longBitsToDouble: return java_lang_Double_longBitsToDouble;
136 case vmIntrinsics::_doubleToRawLongBits: return java_lang_Double_doubleToRawLongBits;
137 default: break;
138 }
139 #endif // CC_INTERP
140
141 // Native method?
142 // Note: This test must come _before_ the test for intrinsic
143 // methods. See also comments below.
144 if (m->is_native()) {
145 assert(!m->is_method_handle_intrinsic(), "overlapping bits here, watch out");
146 return m->is_synchronized() ? native_synchronized : native;
147 }
148
149 // Synchronized?
150 if (m->is_synchronized()) {
151 return zerolocals_synchronized;
152 }
153
154 if (RegisterFinalizersAtInit && m->code_size() == 1 &&
155 m->intrinsic_id() == vmIntrinsics::_Object_init) {
156 // We need to execute the special return bytecode to check for
157 // finalizer registration so create a normal frame.
158 return zerolocals;
159 }
160
161 // Empty method?
162 if (m->is_empty_method()) {
163 return empty;
164 }
165
166 // Special intrinsic method?
167 // Note: This test must come _after_ the test for native methods,
168 // otherwise we will run into problems with JDK 1.2, see also
169 // TemplateInterpreterGenerator::generate_method_entry() for
170 // for details.
171 switch (m->intrinsic_id()) {
172 case vmIntrinsics::_dsin : return java_lang_math_sin ;
173 case vmIntrinsics::_dcos : return java_lang_math_cos ;
174 case vmIntrinsics::_dtan : return java_lang_math_tan ;
175 case vmIntrinsics::_dabs : return java_lang_math_abs ;
176 case vmIntrinsics::_dsqrt : return java_lang_math_sqrt ;
177 case vmIntrinsics::_dlog : return java_lang_math_log ;
178 case vmIntrinsics::_dlog10: return java_lang_math_log10;
179 case vmIntrinsics::_dpow : return java_lang_math_pow ;
180 case vmIntrinsics::_dexp : return java_lang_math_exp ;
181 case vmIntrinsics::_fmaD : return java_lang_math_fmaD ;
182 case vmIntrinsics::_fmaF : return java_lang_math_fmaF ;
183
184 case vmIntrinsics::_Reference_get
185 : return java_lang_ref_reference_get;
186 default : break;
187 }
188
189 // Accessor method?
190 if (m->is_getter()) {
191 // TODO: We should have used ::is_accessor above, but fast accessors in Zero expect only getters.
192 // See CppInterpreter::accessor_entry in cppInterpreter_zero.cpp. This should be fixed in Zero,
193 // then the call above updated to ::is_accessor
194 assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1");
195 return accessor;
196 }
197
198 // Note: for now: zero locals for all non-empty methods
199 return zerolocals;
200 }
201
202 #if INCLUDE_CDS
203
get_trampoline_code_buffer(AbstractInterpreter::MethodKind kind)204 address AbstractInterpreter::get_trampoline_code_buffer(AbstractInterpreter::MethodKind kind) {
205 const size_t trampoline_size = SharedRuntime::trampoline_size();
206 address addr = MetaspaceShared::i2i_entry_code_buffers((size_t)(AbstractInterpreter::number_of_method_entries) * trampoline_size);
207 addr += (size_t)(kind) * trampoline_size;
208
209 return addr;
210 }
211
update_cds_entry_table(AbstractInterpreter::MethodKind kind)212 void AbstractInterpreter::update_cds_entry_table(AbstractInterpreter::MethodKind kind) {
213 if (DumpSharedSpaces || UseSharedSpaces) {
214 address trampoline = get_trampoline_code_buffer(kind);
215 _cds_entry_table[kind] = trampoline;
216
217 CodeBuffer buffer(trampoline, (int)(SharedRuntime::trampoline_size()));
218 MacroAssembler _masm(&buffer);
219 SharedRuntime::generate_trampoline(&_masm, _entry_table[kind]);
220 _masm.flush();
221
222 if (PrintInterpreter) {
223 Disassembler::decode(buffer.insts_begin(), buffer.insts_end());
224 }
225 }
226 }
227
228 #endif
229
set_entry_for_kind(AbstractInterpreter::MethodKind kind,address entry)230 void AbstractInterpreter::set_entry_for_kind(AbstractInterpreter::MethodKind kind, address entry) {
231 assert(kind >= method_handle_invoke_FIRST &&
232 kind <= method_handle_invoke_LAST, "late initialization only for MH entry points");
233 assert(_entry_table[kind] == _entry_table[abstract], "previous value must be AME entry");
234 _entry_table[kind] = entry;
235
236 update_cds_entry_table(kind);
237 }
238
239 // Return true if the interpreter can prove that the given bytecode has
240 // not yet been executed (in Java semantics, not in actual operation).
is_not_reached(const methodHandle & method,int bci)241 bool AbstractInterpreter::is_not_reached(const methodHandle& method, int bci) {
242 BytecodeStream s(method, bci);
243 Bytecodes::Code code = s.next();
244
245 if (Bytecodes::is_invoke(code)) {
246 assert(!Bytecodes::must_rewrite(code), "invokes aren't rewritten");
247 ConstantPool* cpool = method()->constants();
248
249 Bytecode invoke_bc(s.bytecode());
250
251 switch (code) {
252 case Bytecodes::_invokedynamic: {
253 assert(invoke_bc.has_index_u4(code), "sanity");
254 int method_index = invoke_bc.get_index_u4(code);
255 return cpool->invokedynamic_cp_cache_entry_at(method_index)->is_f1_null();
256 }
257 case Bytecodes::_invokevirtual: // fall-through
258 case Bytecodes::_invokeinterface: // fall-through
259 case Bytecodes::_invokespecial: // fall-through
260 case Bytecodes::_invokestatic: {
261 if (cpool->has_preresolution()) {
262 return false; // might have been reached
263 }
264 assert(!invoke_bc.has_index_u4(code), "sanity");
265 int method_index = invoke_bc.get_index_u2_cpcache(code);
266 constantPoolHandle cp(Thread::current(), cpool);
267 Method* resolved_method = ConstantPool::method_at_if_loaded(cp, method_index);
268 return (resolved_method == NULL);
269 }
270 default: ShouldNotReachHere();
271 }
272 } else if (!Bytecodes::must_rewrite(code)) {
273 // might have been reached
274 return false;
275 }
276
277 // the bytecode might not be rewritten if the method is an accessor, etc.
278 address ientry = method->interpreter_entry();
279 if (ientry != entry_for_kind(AbstractInterpreter::zerolocals) &&
280 ientry != entry_for_kind(AbstractInterpreter::zerolocals_synchronized))
281 return false; // interpreter does not run this method!
282
283 // otherwise, we can be sure this bytecode has never been executed
284 return true;
285 }
286
287
288 #ifndef PRODUCT
print_method_kind(MethodKind kind)289 void AbstractInterpreter::print_method_kind(MethodKind kind) {
290 switch (kind) {
291 case zerolocals : tty->print("zerolocals" ); break;
292 case zerolocals_synchronized: tty->print("zerolocals_synchronized"); break;
293 case native : tty->print("native" ); break;
294 case native_synchronized : tty->print("native_synchronized" ); break;
295 case empty : tty->print("empty" ); break;
296 case accessor : tty->print("accessor" ); break;
297 case abstract : tty->print("abstract" ); break;
298 case java_lang_math_sin : tty->print("java_lang_math_sin" ); break;
299 case java_lang_math_cos : tty->print("java_lang_math_cos" ); break;
300 case java_lang_math_tan : tty->print("java_lang_math_tan" ); break;
301 case java_lang_math_abs : tty->print("java_lang_math_abs" ); break;
302 case java_lang_math_sqrt : tty->print("java_lang_math_sqrt" ); break;
303 case java_lang_math_log : tty->print("java_lang_math_log" ); break;
304 case java_lang_math_log10 : tty->print("java_lang_math_log10" ); break;
305 case java_lang_math_fmaD : tty->print("java_lang_math_fmaD" ); break;
306 case java_lang_math_fmaF : tty->print("java_lang_math_fmaF" ); break;
307 case java_util_zip_CRC32_update : tty->print("java_util_zip_CRC32_update"); break;
308 case java_util_zip_CRC32_updateBytes : tty->print("java_util_zip_CRC32_updateBytes"); break;
309 case java_util_zip_CRC32_updateByteBuffer : tty->print("java_util_zip_CRC32_updateByteBuffer"); break;
310 case java_util_zip_CRC32C_updateBytes : tty->print("java_util_zip_CRC32C_updateBytes"); break;
311 case java_util_zip_CRC32C_updateDirectByteBuffer: tty->print("java_util_zip_CRC32C_updateDirectByteByffer"); break;
312 default:
313 if (kind >= method_handle_invoke_FIRST &&
314 kind <= method_handle_invoke_LAST) {
315 const char* kind_name = vmIntrinsics::name_at(method_handle_intrinsic(kind));
316 if (kind_name[0] == '_') kind_name = &kind_name[1]; // '_invokeExact' => 'invokeExact'
317 tty->print("method_handle_%s", kind_name);
318 break;
319 }
320 ShouldNotReachHere();
321 break;
322 }
323 }
324 #endif // PRODUCT
325
326
327 //------------------------------------------------------------------------------------------------------------------------
328 // Deoptimization support
329
330 /**
331 * If a deoptimization happens, this function returns the point of next bytecode to continue execution.
332 */
deopt_continue_after_entry(Method * method,address bcp,int callee_parameters,bool is_top_frame)333 address AbstractInterpreter::deopt_continue_after_entry(Method* method, address bcp, int callee_parameters, bool is_top_frame) {
334 assert(method->contains(bcp), "just checkin'");
335
336 // Get the original and rewritten bytecode.
337 Bytecodes::Code code = Bytecodes::java_code_at(method, bcp);
338 assert(!Interpreter::bytecode_should_reexecute(code), "should not reexecute");
339
340 const int bci = method->bci_from(bcp);
341
342 // compute continuation length
343 const int length = Bytecodes::length_at(method, bcp);
344
345 // compute result type
346 BasicType type = T_ILLEGAL;
347
348 switch (code) {
349 case Bytecodes::_invokevirtual :
350 case Bytecodes::_invokespecial :
351 case Bytecodes::_invokestatic :
352 case Bytecodes::_invokeinterface: {
353 Thread *thread = Thread::current();
354 ResourceMark rm(thread);
355 methodHandle mh(thread, method);
356 type = Bytecode_invoke(mh, bci).result_type();
357 // since the cache entry might not be initialized:
358 // (NOT needed for the old calling convension)
359 if (!is_top_frame) {
360 int index = Bytes::get_native_u2(bcp+1);
361 method->constants()->cache()->entry_at(index)->set_parameter_size(callee_parameters);
362 }
363 break;
364 }
365
366 case Bytecodes::_invokedynamic: {
367 Thread *thread = Thread::current();
368 ResourceMark rm(thread);
369 methodHandle mh(thread, method);
370 type = Bytecode_invoke(mh, bci).result_type();
371 // since the cache entry might not be initialized:
372 // (NOT needed for the old calling convension)
373 if (!is_top_frame) {
374 int index = Bytes::get_native_u4(bcp+1);
375 method->constants()->invokedynamic_cp_cache_entry_at(index)->set_parameter_size(callee_parameters);
376 }
377 break;
378 }
379
380 case Bytecodes::_ldc :
381 case Bytecodes::_ldc_w : // fall through
382 case Bytecodes::_ldc2_w:
383 {
384 Thread *thread = Thread::current();
385 ResourceMark rm(thread);
386 methodHandle mh(thread, method);
387 type = Bytecode_loadconstant(mh, bci).result_type();
388 break;
389 }
390
391 default:
392 type = Bytecodes::result_type(code);
393 break;
394 }
395
396 // return entry point for computed continuation state & bytecode length
397 return
398 is_top_frame
399 ? Interpreter::deopt_entry (as_TosState(type), length)
400 : Interpreter::return_entry(as_TosState(type), length, code);
401 }
402
403 // If deoptimization happens, this function returns the point where the interpreter reexecutes
404 // the bytecode.
405 // Note: Bytecodes::_athrow is a special case in that it does not return
406 // Interpreter::deopt_entry(vtos, 0) like others
deopt_reexecute_entry(Method * method,address bcp)407 address AbstractInterpreter::deopt_reexecute_entry(Method* method, address bcp) {
408 assert(method->contains(bcp), "just checkin'");
409 Bytecodes::Code code = Bytecodes::java_code_at(method, bcp);
410 #if defined(COMPILER1) || INCLUDE_JVMCI
411 if(code == Bytecodes::_athrow ) {
412 return Interpreter::rethrow_exception_entry();
413 }
414 #endif /* COMPILER1 || INCLUDE_JVMCI */
415 return Interpreter::deopt_entry(vtos, 0);
416 }
417
418 // If deoptimization happens, the interpreter should reexecute these bytecodes.
419 // This function mainly helps the compilers to set up the reexecute bit.
bytecode_should_reexecute(Bytecodes::Code code)420 bool AbstractInterpreter::bytecode_should_reexecute(Bytecodes::Code code) {
421 switch (code) {
422 case Bytecodes::_lookupswitch:
423 case Bytecodes::_tableswitch:
424 case Bytecodes::_fast_binaryswitch:
425 case Bytecodes::_fast_linearswitch:
426 // recompute condtional expression folded into _if<cond>
427 case Bytecodes::_lcmp :
428 case Bytecodes::_fcmpl :
429 case Bytecodes::_fcmpg :
430 case Bytecodes::_dcmpl :
431 case Bytecodes::_dcmpg :
432 case Bytecodes::_ifnull :
433 case Bytecodes::_ifnonnull :
434 case Bytecodes::_goto :
435 case Bytecodes::_goto_w :
436 case Bytecodes::_ifeq :
437 case Bytecodes::_ifne :
438 case Bytecodes::_iflt :
439 case Bytecodes::_ifge :
440 case Bytecodes::_ifgt :
441 case Bytecodes::_ifle :
442 case Bytecodes::_if_icmpeq :
443 case Bytecodes::_if_icmpne :
444 case Bytecodes::_if_icmplt :
445 case Bytecodes::_if_icmpge :
446 case Bytecodes::_if_icmpgt :
447 case Bytecodes::_if_icmple :
448 case Bytecodes::_if_acmpeq :
449 case Bytecodes::_if_acmpne :
450 // special cases
451 case Bytecodes::_getfield :
452 case Bytecodes::_putfield :
453 case Bytecodes::_getstatic :
454 case Bytecodes::_putstatic :
455 case Bytecodes::_aastore :
456 #ifdef COMPILER1
457 //special case of reexecution
458 case Bytecodes::_athrow :
459 #endif
460 return true;
461
462 default:
463 return false;
464 }
465 }
466
initialize_method_handle_entries()467 void AbstractInterpreter::initialize_method_handle_entries() {
468 // method handle entry kinds are generated later in MethodHandlesAdapterGenerator::generate:
469 for (int i = method_handle_invoke_FIRST; i <= method_handle_invoke_LAST; i++) {
470 MethodKind kind = (MethodKind) i;
471 _entry_table[kind] = _entry_table[Interpreter::abstract];
472 Interpreter::update_cds_entry_table(kind);
473 }
474 }
475