1 /*
2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/icBuffer.hpp"
30 #include "code/nmethod.hpp"
31 #include "code/vtableStubs.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/linkResolver.hpp"
34 #include "memory/metadataFactory.hpp"
35 #include "memory/oopFactory.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "oops/method.inline.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "oops/symbol.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/icache.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/stubRoutines.hpp"
44 #include "utilities/events.hpp"
45
46
47 // Every time a compiled IC is changed or its type is being accessed,
48 // either the CompiledIC_lock must be set or we must be at a safe point.
49
50 //-----------------------------------------------------------------------------
51 // Low-level access to an inline cache. Private, since they might not be
52 // MT-safe to use.
53
cached_value() const54 void* CompiledIC::cached_value() const {
55 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
56 assert (!is_optimized(), "an optimized virtual call does not have a cached metadata");
57
58 if (!is_in_transition_state()) {
59 void* data = get_data();
60 // If we let the metadata value here be initialized to zero...
61 assert(data != NULL || Universe::non_oop_word() == NULL,
62 "no raw nulls in CompiledIC metadatas, because of patching races");
63 return (data == (void*)Universe::non_oop_word()) ? NULL : data;
64 } else {
65 return InlineCacheBuffer::cached_value_for((CompiledIC *)this);
66 }
67 }
68
69
internal_set_ic_destination(address entry_point,bool is_icstub,void * cache,bool is_icholder)70 void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder) {
71 assert(entry_point != NULL, "must set legal entry point");
72 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
73 assert (!is_optimized() || cache == NULL, "an optimized virtual call does not have a cached metadata");
74 assert (cache == NULL || cache != (Metadata*)badOopVal, "invalid metadata");
75
76 assert(!is_icholder || is_icholder_entry(entry_point), "must be");
77
78 // Don't use ic_destination for this test since that forwards
79 // through ICBuffer instead of returning the actual current state of
80 // the CompiledIC.
81 if (is_icholder_entry(_call->destination())) {
82 // When patching for the ICStub case the cached value isn't
83 // overwritten until the ICStub copied into the CompiledIC during
84 // the next safepoint. Make sure that the CompiledICHolder* is
85 // marked for release at this point since it won't be identifiable
86 // once the entry point is overwritten.
87 InlineCacheBuffer::queue_for_release((CompiledICHolder*)get_data());
88 }
89
90 if (TraceCompiledIC) {
91 tty->print(" ");
92 print_compiled_ic();
93 tty->print(" changing destination to " INTPTR_FORMAT, p2i(entry_point));
94 if (!is_optimized()) {
95 tty->print(" changing cached %s to " INTPTR_FORMAT, is_icholder ? "icholder" : "metadata", p2i((address)cache));
96 }
97 if (is_icstub) {
98 tty->print(" (icstub)");
99 }
100 tty->cr();
101 }
102
103 {
104 MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
105 #ifdef ASSERT
106 CodeBlob* cb = CodeCache::find_blob_unsafe(_call->instruction_address());
107 assert(cb != NULL && cb->is_compiled(), "must be compiled");
108 #endif
109 _call->set_destination_mt_safe(entry_point);
110 }
111
112 if (is_optimized() || is_icstub) {
113 // Optimized call sites don't have a cache value and ICStub call
114 // sites only change the entry point. Changing the value in that
115 // case could lead to MT safety issues.
116 assert(cache == NULL, "must be null");
117 return;
118 }
119
120 if (cache == NULL) cache = (void*)Universe::non_oop_word();
121
122 set_data((intptr_t)cache);
123 }
124
125
set_ic_destination(ICStub * stub)126 void CompiledIC::set_ic_destination(ICStub* stub) {
127 internal_set_ic_destination(stub->code_begin(), true, NULL, false);
128 }
129
130
131
ic_destination() const132 address CompiledIC::ic_destination() const {
133 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
134 if (!is_in_transition_state()) {
135 return _call->destination();
136 } else {
137 return InlineCacheBuffer::ic_destination_for((CompiledIC *)this);
138 }
139 }
140
141
is_in_transition_state() const142 bool CompiledIC::is_in_transition_state() const {
143 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
144 return InlineCacheBuffer::contains(_call->destination());;
145 }
146
147
is_icholder_call() const148 bool CompiledIC::is_icholder_call() const {
149 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
150 return !_is_optimized && is_icholder_entry(ic_destination());
151 }
152
153 // Returns native address of 'call' instruction in inline-cache. Used by
154 // the InlineCacheBuffer when it needs to find the stub.
stub_address() const155 address CompiledIC::stub_address() const {
156 assert(is_in_transition_state(), "should only be called when we are in a transition state");
157 return _call->destination();
158 }
159
160 // Clears the IC stub if the compiled IC is in transition state
clear_ic_stub()161 void CompiledIC::clear_ic_stub() {
162 if (is_in_transition_state()) {
163 ICStub* stub = ICStub_from_destination_address(stub_address());
164 stub->clear();
165 }
166 }
167
168 //-----------------------------------------------------------------------------
169 // High-level access to an inline cache. Guaranteed to be MT-safe.
170
initialize_from_iter(RelocIterator * iter)171 void CompiledIC::initialize_from_iter(RelocIterator* iter) {
172 assert(iter->addr() == _call->instruction_address(), "must find ic_call");
173
174 if (iter->type() == relocInfo::virtual_call_type) {
175 virtual_call_Relocation* r = iter->virtual_call_reloc();
176 _is_optimized = false;
177 _value = _call->get_load_instruction(r);
178 } else {
179 assert(iter->type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
180 _is_optimized = true;
181 _value = NULL;
182 }
183 }
184
CompiledIC(CompiledMethod * cm,NativeCall * call)185 CompiledIC::CompiledIC(CompiledMethod* cm, NativeCall* call)
186 : _method(cm)
187 {
188 _call = _method->call_wrapper_at((address) call);
189 address ic_call = _call->instruction_address();
190
191 assert(ic_call != NULL, "ic_call address must be set");
192 assert(cm != NULL, "must pass compiled method");
193 assert(cm->contains(ic_call), "must be in compiled method");
194
195 // Search for the ic_call at the given address.
196 RelocIterator iter(cm, ic_call, ic_call+1);
197 bool ret = iter.next();
198 assert(ret == true, "relocInfo must exist at this address");
199 assert(iter.addr() == ic_call, "must find ic_call");
200
201 initialize_from_iter(&iter);
202 }
203
CompiledIC(RelocIterator * iter)204 CompiledIC::CompiledIC(RelocIterator* iter)
205 : _method(iter->code())
206 {
207 _call = _method->call_wrapper_at(iter->addr());
208 address ic_call = _call->instruction_address();
209
210 CompiledMethod* nm = iter->code();
211 assert(ic_call != NULL, "ic_call address must be set");
212 assert(nm != NULL, "must pass compiled method");
213 assert(nm->contains(ic_call), "must be in compiled method");
214
215 initialize_from_iter(iter);
216 }
217
set_to_megamorphic(CallInfo * call_info,Bytecodes::Code bytecode,TRAPS)218 bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
219 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
220 assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
221 assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
222
223 address entry;
224 if (call_info->call_kind() == CallInfo::itable_call) {
225 assert(bytecode == Bytecodes::_invokeinterface, "");
226 int itable_index = call_info->itable_index();
227 entry = VtableStubs::find_itable_stub(itable_index);
228 if (entry == NULL) {
229 return false;
230 }
231 #ifdef ASSERT
232 int index = call_info->resolved_method()->itable_index();
233 assert(index == itable_index, "CallInfo pre-computes this");
234 InstanceKlass* k = call_info->resolved_method()->method_holder();
235 assert(k->verify_itable_index(itable_index), "sanity check");
236 #endif //ASSERT
237 CompiledICHolder* holder = new CompiledICHolder(call_info->resolved_method()->method_holder(),
238 call_info->resolved_klass(), false);
239 holder->claim();
240 InlineCacheBuffer::create_transition_stub(this, holder, entry);
241 } else {
242 assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
243 // Can be different than selected_method->vtable_index(), due to package-private etc.
244 int vtable_index = call_info->vtable_index();
245 assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
246 entry = VtableStubs::find_vtable_stub(vtable_index);
247 if (entry == NULL) {
248 return false;
249 }
250 InlineCacheBuffer::create_transition_stub(this, NULL, entry);
251 }
252
253 if (TraceICs) {
254 ResourceMark rm;
255 tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
256 p2i(instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry));
257 }
258
259 // We can't check this anymore. With lazy deopt we could have already
260 // cleaned this IC entry before we even return. This is possible if
261 // we ran out of space in the inline cache buffer trying to do the
262 // set_next and we safepointed to free up space. This is a benign
263 // race because the IC entry was complete when we safepointed so
264 // cleaning it immediately is harmless.
265 // assert(is_megamorphic(), "sanity check");
266 return true;
267 }
268
269
270 // true if destination is megamorphic stub
is_megamorphic() const271 bool CompiledIC::is_megamorphic() const {
272 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
273 assert(!is_optimized(), "an optimized call cannot be megamorphic");
274
275 // Cannot rely on cached_value. It is either an interface or a method.
276 return VtableStubs::entry_point(ic_destination()) != NULL;
277 }
278
is_call_to_compiled() const279 bool CompiledIC::is_call_to_compiled() const {
280 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
281
282 // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
283 // method is guaranteed to still exist, since we only remove methods after all inline caches
284 // has been cleaned up
285 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
286 bool is_monomorphic = (cb != NULL && cb->is_compiled());
287 // Check that the cached_value is a klass for non-optimized monomorphic calls
288 // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
289 // for calling directly to vep without using the inline cache (i.e., cached_value == NULL).
290 // For JVMCI this occurs because CHA is only used to improve inlining so call sites which could be optimized
291 // virtuals because there are no currently loaded subclasses of a type are left as virtual call sites.
292 #ifdef ASSERT
293 CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
294 bool is_c1_or_jvmci_method = caller->is_compiled_by_c1() || caller->is_compiled_by_jvmci();
295 assert( is_c1_or_jvmci_method ||
296 !is_monomorphic ||
297 is_optimized() ||
298 !caller->is_alive() ||
299 (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
300 #endif // ASSERT
301 return is_monomorphic;
302 }
303
304
is_call_to_interpreted() const305 bool CompiledIC::is_call_to_interpreted() const {
306 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
307 // Call to interpreter if destination is either calling to a stub (if it
308 // is optimized), or calling to an I2C blob
309 bool is_call_to_interpreted = false;
310 if (!is_optimized()) {
311 // must use unsafe because the destination can be a zombie (and we're cleaning)
312 // and the print_compiled_ic code wants to know if site (in the non-zombie)
313 // is to the interpreter.
314 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
315 is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
316 assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check");
317 } else {
318 // Check if we are calling into our own codeblob (i.e., to a stub)
319 address dest = ic_destination();
320 #ifdef ASSERT
321 {
322 _call->verify_resolve_call(dest);
323 }
324 #endif /* ASSERT */
325 is_call_to_interpreted = _call->is_call_to_interpreted(dest);
326 }
327 return is_call_to_interpreted;
328 }
329
set_to_clean(bool in_use)330 void CompiledIC::set_to_clean(bool in_use) {
331 assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call");
332 if (TraceInlineCacheClearing || TraceICs) {
333 tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
334 print();
335 }
336
337 address entry = _call->get_resolve_call_stub(is_optimized());
338
339 // A zombie transition will always be safe, since the metadata has already been set to NULL, so
340 // we only need to patch the destination
341 bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint();
342
343 if (safe_transition) {
344 // Kill any leftover stub we might have too
345 clear_ic_stub();
346 if (is_optimized()) {
347 set_ic_destination(entry);
348 } else {
349 set_ic_destination_and_value(entry, (void*)NULL);
350 }
351 } else {
352 // Unsafe transition - create stub.
353 InlineCacheBuffer::create_transition_stub(this, NULL, entry);
354 }
355 // We can't check this anymore. With lazy deopt we could have already
356 // cleaned this IC entry before we even return. This is possible if
357 // we ran out of space in the inline cache buffer trying to do the
358 // set_next and we safepointed to free up space. This is a benign
359 // race because the IC entry was complete when we safepointed so
360 // cleaning it immediately is harmless.
361 // assert(is_clean(), "sanity check");
362 }
363
is_clean() const364 bool CompiledIC::is_clean() const {
365 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
366 bool is_clean = false;
367 address dest = ic_destination();
368 is_clean = dest == _call->get_resolve_call_stub(is_optimized());
369 assert(!is_clean || is_optimized() || cached_value() == NULL, "sanity check");
370 return is_clean;
371 }
372
set_to_monomorphic(CompiledICInfo & info)373 void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
374 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
375 // Updating a cache to the wrong entry can cause bugs that are very hard
376 // to track down - if cache entry gets invalid - we just clean it. In
377 // this way it is always the same code path that is responsible for
378 // updating and resolving an inline cache
379 //
380 // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
381 // callsites. In addition ic_miss code will update a site to monomorphic if it determines
382 // that an monomorphic call to the interpreter can now be monomorphic to compiled code.
383 //
384 // In both of these cases the only thing being modifed is the jump/call target and these
385 // transitions are mt_safe
386
387 Thread *thread = Thread::current();
388 if (info.to_interpreter() || info.to_aot()) {
389 // Call to interpreter
390 if (info.is_optimized() && is_optimized()) {
391 assert(is_clean(), "unsafe IC path");
392 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
393 // the call analysis (callee structure) specifies that the call is optimized
394 // (either because of CHA or the static target is final)
395 // At code generation time, this call has been emitted as static call
396 // Call via stub
397 assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check");
398 methodHandle method (thread, (Method*)info.cached_metadata());
399 _call->set_to_interpreted(method, info);
400
401 if (TraceICs) {
402 ResourceMark rm(thread);
403 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to %s: %s",
404 p2i(instruction_address()),
405 (info.to_aot() ? "aot" : "interpreter"),
406 method->print_value_string());
407 }
408 } else {
409 // Call via method-klass-holder
410 InlineCacheBuffer::create_transition_stub(this, info.claim_cached_icholder(), info.entry());
411 if (TraceICs) {
412 ResourceMark rm(thread);
413 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address()));
414 }
415 }
416 } else {
417 // Call to compiled code
418 bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL);
419 #ifdef ASSERT
420 CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
421 assert (cb != NULL && cb->is_compiled(), "must be compiled!");
422 #endif /* ASSERT */
423
424 // This is MT safe if we come from a clean-cache and go through a
425 // non-verified entry point
426 bool safe = SafepointSynchronize::is_at_safepoint() ||
427 (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));
428
429 if (!safe) {
430 InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry());
431 } else {
432 if (is_optimized()) {
433 set_ic_destination(info.entry());
434 } else {
435 set_ic_destination_and_value(info.entry(), info.cached_metadata());
436 }
437 }
438
439 if (TraceICs) {
440 ResourceMark rm(thread);
441 assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be");
442 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass = %s) %s",
443 p2i(instruction_address()),
444 (info.cached_metadata() != NULL) ? ((Klass*)info.cached_metadata())->print_value_string() : "NULL",
445 (safe) ? "" : " via stub");
446 }
447 }
448 // We can't check this anymore. With lazy deopt we could have already
449 // cleaned this IC entry before we even return. This is possible if
450 // we ran out of space in the inline cache buffer trying to do the
451 // set_next and we safepointed to free up space. This is a benign
452 // race because the IC entry was complete when we safepointed so
453 // cleaning it immediately is harmless.
454 // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
455 }
456
457
458 // is_optimized: Compiler has generated an optimized call (i.e. fixed, no inline cache)
459 // static_bound: The call can be static bound. If it isn't also optimized, the property
460 // wasn't provable at time of compilation. An optimized call will have any necessary
461 // null check, while a static_bound won't. A static_bound (but not optimized) must
462 // therefore use the unverified entry point.
compute_monomorphic_entry(const methodHandle & method,Klass * receiver_klass,bool is_optimized,bool static_bound,bool caller_is_nmethod,CompiledICInfo & info,TRAPS)463 void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
464 Klass* receiver_klass,
465 bool is_optimized,
466 bool static_bound,
467 bool caller_is_nmethod,
468 CompiledICInfo& info,
469 TRAPS) {
470 CompiledMethod* method_code = method->code();
471
472 address entry = NULL;
473 if (method_code != NULL && method_code->is_in_use()) {
474 assert(method_code->is_compiled(), "must be compiled");
475 // Call to compiled code
476 //
477 // Note: the following problem exists with Compiler1:
478 // - at compile time we may or may not know if the destination is final
479 // - if we know that the destination is final (is_optimized), we will emit
480 // an optimized virtual call (no inline cache), and need a Method* to make
481 // a call to the interpreter
482 // - if we don't know if the destination is final, we emit a standard
483 // virtual call, and use CompiledICHolder to call interpreted code
484 // (no static call stub has been generated)
485 // - In the case that we here notice the call is static bound we
486 // convert the call into what looks to be an optimized virtual call,
487 // but we must use the unverified entry point (since there will be no
488 // null check on a call when the target isn't loaded).
489 // This causes problems when verifying the IC because
490 // it looks vanilla but is optimized. Code in is_call_to_interpreted
491 // is aware of this and weakens its asserts.
492 if (is_optimized) {
493 entry = method_code->verified_entry_point();
494 } else {
495 entry = method_code->entry_point();
496 }
497 }
498 bool far_c2a = entry != NULL && caller_is_nmethod && method_code->is_far_code();
499 if (entry != NULL && !far_c2a) {
500 // Call to near compiled code (nmethod or aot).
501 info.set_compiled_entry(entry, is_optimized ? NULL : receiver_klass, is_optimized);
502 } else {
503 if (is_optimized) {
504 if (far_c2a) {
505 // Call to aot code from nmethod.
506 info.set_aot_entry(entry, method());
507 } else {
508 // Use stub entry
509 info.set_interpreter_entry(method()->get_c2i_entry(), method());
510 }
511 } else {
512 // Use icholder entry
513 assert(method_code == NULL || method_code->is_compiled(), "must be compiled");
514 CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass);
515 info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder);
516 }
517 }
518 assert(info.is_optimized() == is_optimized, "must agree");
519 }
520
521
is_icholder_entry(address entry)522 bool CompiledIC::is_icholder_entry(address entry) {
523 CodeBlob* cb = CodeCache::find_blob_unsafe(entry);
524 if (cb != NULL && cb->is_adapter_blob()) {
525 return true;
526 }
527 // itable stubs also use CompiledICHolder
528 if (cb != NULL && cb->is_vtable_blob()) {
529 VtableStub* s = VtableStubs::entry_point(entry);
530 return (s != NULL) && s->is_itable_stub();
531 }
532
533 return false;
534 }
535
is_icholder_call_site(virtual_call_Relocation * call_site,const CompiledMethod * cm)536 bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) {
537 // This call site might have become stale so inspect it carefully.
538 address dest = cm->call_wrapper_at(call_site->addr())->destination();
539 return is_icholder_entry(dest);
540 }
541
542 // ----------------------------------------------------------------------------
543
set_to_clean(bool in_use)544 void CompiledStaticCall::set_to_clean(bool in_use) {
545 // in_use is unused but needed to match template function in CompiledMethod
546 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
547 // Reset call site
548 MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
549 #ifdef ASSERT
550 CodeBlob* cb = CodeCache::find_blob_unsafe(instruction_address());
551 assert(cb != NULL && cb->is_compiled(), "must be compiled");
552 #endif
553
554 set_destination_mt_safe(resolve_call_stub());
555
556 // Do not reset stub here: It is too expensive to call find_stub.
557 // Instead, rely on caller (nmethod::clear_inline_caches) to clear
558 // both the call and its stub.
559 }
560
is_clean() const561 bool CompiledStaticCall::is_clean() const {
562 return destination() == resolve_call_stub();
563 }
564
is_call_to_compiled() const565 bool CompiledStaticCall::is_call_to_compiled() const {
566 return CodeCache::contains(destination());
567 }
568
is_call_to_interpreted() const569 bool CompiledDirectStaticCall::is_call_to_interpreted() const {
570 // It is a call to interpreted, if it calls to a stub. Hence, the destination
571 // must be in the stub part of the nmethod that contains the call
572 CompiledMethod* cm = CodeCache::find_compiled(instruction_address());
573 return cm->stub_contains(destination());
574 }
575
is_call_to_far() const576 bool CompiledDirectStaticCall::is_call_to_far() const {
577 // It is a call to aot method, if it calls to a stub. Hence, the destination
578 // must be in the stub part of the nmethod that contains the call
579 CodeBlob* desc = CodeCache::find_blob(instruction_address());
580 return desc->as_compiled_method()->stub_contains(destination());
581 }
582
set_to_compiled(address entry)583 void CompiledStaticCall::set_to_compiled(address entry) {
584 if (TraceICs) {
585 ResourceMark rm;
586 tty->print_cr("%s@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT,
587 name(),
588 p2i(instruction_address()),
589 p2i(entry));
590 }
591 // Call to compiled code
592 assert(CodeCache::contains(entry), "wrong entry point");
593 set_destination_mt_safe(entry);
594 }
595
set(const StaticCallInfo & info)596 void CompiledStaticCall::set(const StaticCallInfo& info) {
597 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
598 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
599 // Updating a cache to the wrong entry can cause bugs that are very hard
600 // to track down - if cache entry gets invalid - we just clean it. In
601 // this way it is always the same code path that is responsible for
602 // updating and resolving an inline cache
603 assert(is_clean(), "do not update a call entry - use clean");
604
605 if (info._to_interpreter) {
606 // Call to interpreted code
607 set_to_interpreted(info.callee(), info.entry());
608 #if INCLUDE_AOT
609 } else if (info._to_aot) {
610 // Call to far code
611 set_to_far(info.callee(), info.entry());
612 #endif
613 } else {
614 set_to_compiled(info.entry());
615 }
616 }
617
618 // Compute settings for a CompiledStaticCall. Since we might have to set
619 // the stub when calling to the interpreter, we need to return arguments.
compute_entry(const methodHandle & m,bool caller_is_nmethod,StaticCallInfo & info)620 void CompiledStaticCall::compute_entry(const methodHandle& m, bool caller_is_nmethod, StaticCallInfo& info) {
621 CompiledMethod* m_code = m->code();
622 info._callee = m;
623 if (m_code != NULL && m_code->is_in_use()) {
624 if (caller_is_nmethod && m_code->is_far_code()) {
625 // Call to far aot code from nmethod.
626 info._to_aot = true;
627 } else {
628 info._to_aot = false;
629 }
630 info._to_interpreter = false;
631 info._entry = m_code->verified_entry_point();
632 } else {
633 // Callee is interpreted code. In any case entering the interpreter
634 // puts a converter-frame on the stack to save arguments.
635 assert(!m->is_method_handle_intrinsic(), "Compiled code should never call interpreter MH intrinsics");
636 info._to_interpreter = true;
637 info._entry = m()->get_c2i_entry();
638 }
639 }
640
find_stub_for(address instruction,bool is_aot)641 address CompiledDirectStaticCall::find_stub_for(address instruction, bool is_aot) {
642 // Find reloc. information containing this call-site
643 RelocIterator iter((nmethod*)NULL, instruction);
644 while (iter.next()) {
645 if (iter.addr() == instruction) {
646 switch(iter.type()) {
647 case relocInfo::static_call_type:
648 return iter.static_call_reloc()->static_stub(is_aot);
649 // We check here for opt_virtual_call_type, since we reuse the code
650 // from the CompiledIC implementation
651 case relocInfo::opt_virtual_call_type:
652 return iter.opt_virtual_call_reloc()->static_stub(is_aot);
653 case relocInfo::poll_type:
654 case relocInfo::poll_return_type: // A safepoint can't overlap a call.
655 default:
656 ShouldNotReachHere();
657 }
658 }
659 }
660 return NULL;
661 }
662
find_stub(bool is_aot)663 address CompiledDirectStaticCall::find_stub(bool is_aot) {
664 return CompiledDirectStaticCall::find_stub_for(instruction_address(), is_aot);
665 }
666
resolve_call_stub() const667 address CompiledDirectStaticCall::resolve_call_stub() const {
668 return SharedRuntime::get_resolve_static_call_stub();
669 }
670
671 //-----------------------------------------------------------------------------
672 // Non-product mode code
673 #ifndef PRODUCT
674
verify()675 void CompiledIC::verify() {
676 _call->verify();
677 assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted()
678 || is_optimized() || is_megamorphic(), "sanity check");
679 }
680
print()681 void CompiledIC::print() {
682 print_compiled_ic();
683 tty->cr();
684 }
685
print_compiled_ic()686 void CompiledIC::print_compiled_ic() {
687 tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT,
688 p2i(instruction_address()), is_call_to_interpreted() ? "interpreted " : "", p2i(ic_destination()), p2i(is_optimized() ? NULL : cached_value()));
689 }
690
print()691 void CompiledDirectStaticCall::print() {
692 tty->print("static call at " INTPTR_FORMAT " -> ", p2i(instruction_address()));
693 if (is_clean()) {
694 tty->print("clean");
695 } else if (is_call_to_compiled()) {
696 tty->print("compiled");
697 } else if (is_call_to_far()) {
698 tty->print("far");
699 } else if (is_call_to_interpreted()) {
700 tty->print("interpreted");
701 }
702 tty->cr();
703 }
704
705 #endif // !PRODUCT
706