1 /*
2 * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/ciCallSite.hpp"
27 #include "ci/ciMethodHandle.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "compiler/compileBroker.hpp"
30 #include "compiler/compileLog.hpp"
31 #include "interpreter/linkResolver.hpp"
32 #include "opto/addnode.hpp"
33 #include "opto/callGenerator.hpp"
34 #include "opto/castnode.hpp"
35 #include "opto/cfgnode.hpp"
36 #include "opto/mulnode.hpp"
37 #include "opto/parse.hpp"
38 #include "opto/rootnode.hpp"
39 #include "opto/runtime.hpp"
40 #include "opto/subnode.hpp"
41 #include "prims/methodHandles.hpp"
42 #include "prims/nativeLookup.hpp"
43 #include "runtime/sharedRuntime.hpp"
44
trace_type_profile(Compile * C,ciMethod * method,int depth,int bci,ciMethod * prof_method,ciKlass * prof_klass,int site_count,int receiver_count)45 void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
46 if (TraceTypeProfile || C->print_inlining()) {
47 outputStream* out = tty;
48 if (!C->print_inlining()) {
49 if (!PrintOpto && !PrintCompilation) {
50 method->print_short_name();
51 tty->cr();
52 }
53 CompileTask::print_inlining_tty(prof_method, depth, bci);
54 } else {
55 out = C->print_inlining_stream();
56 }
57 CompileTask::print_inline_indent(depth, out);
58 out->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count);
59 stringStream ss;
60 prof_klass->name()->print_symbol_on(&ss);
61 out->print("%s", ss.as_string());
62 out->cr();
63 }
64 }
65
call_generator(ciMethod * callee,int vtable_index,bool call_does_dispatch,JVMState * jvms,bool allow_inline,float prof_factor,ciKlass * speculative_receiver_type,bool allow_intrinsics)66 CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch,
67 JVMState* jvms, bool allow_inline,
68 float prof_factor, ciKlass* speculative_receiver_type,
69 bool allow_intrinsics) {
70 ciMethod* caller = jvms->method();
71 int bci = jvms->bci();
72 Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
73 guarantee(callee != NULL, "failed method resolution");
74
75 // Dtrace currently doesn't work unless all calls are vanilla
76 if (env()->dtrace_method_probes()) {
77 allow_inline = false;
78 }
79
80 // Note: When we get profiling during stage-1 compiles, we want to pull
81 // from more specific profile data which pertains to this inlining.
82 // Right now, ignore the information in jvms->caller(), and do method[bci].
83 ciCallProfile profile = caller->call_profile_at_bci(bci);
84
85 // See how many times this site has been invoked.
86 int site_count = profile.count();
87 int receiver_count = -1;
88 if (call_does_dispatch && UseTypeProfile && profile.has_receiver(0)) {
89 // Receivers in the profile structure are ordered by call counts
90 // so that the most called (major) receiver is profile.receiver(0).
91 receiver_count = profile.receiver_count(0);
92 }
93
94 CompileLog* log = this->log();
95 if (log != NULL) {
96 int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
97 int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
98 log->begin_elem("call method='%d' count='%d' prof_factor='%f'",
99 log->identify(callee), site_count, prof_factor);
100 if (call_does_dispatch) log->print(" virtual='1'");
101 if (allow_inline) log->print(" inline='1'");
102 if (receiver_count >= 0) {
103 log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count);
104 if (profile.has_receiver(1)) {
105 log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1));
106 }
107 }
108 if (callee->is_method_handle_intrinsic()) {
109 log->print(" method_handle_intrinsic='1'");
110 }
111 log->end_elem();
112 }
113
114 // Special case the handling of certain common, profitable library
115 // methods. If these methods are replaced with specialized code,
116 // then we return it as the inlined version of the call.
117 // We do this before the strict f.p. check below because the
118 // intrinsics handle strict f.p. correctly.
119 CallGenerator* cg_intrinsic = NULL;
120 if (allow_inline && allow_intrinsics) {
121 CallGenerator* cg = find_intrinsic(callee, call_does_dispatch);
122 if (cg != NULL) {
123 if (cg->is_predicated()) {
124 // Code without intrinsic but, hopefully, inlined.
125 CallGenerator* inline_cg = this->call_generator(callee,
126 vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, speculative_receiver_type, false);
127 if (inline_cg != NULL) {
128 cg = CallGenerator::for_predicated_intrinsic(cg, inline_cg);
129 }
130 }
131
132 // If intrinsic does the virtual dispatch, we try to use the type profile
133 // first, and hopefully inline it as the regular virtual call below.
134 // We will retry the intrinsic if nothing had claimed it afterwards.
135 if (cg->does_virtual_dispatch()) {
136 cg_intrinsic = cg;
137 cg = NULL;
138 } else if (should_delay_vector_inlining(callee, jvms)) {
139 return CallGenerator::for_late_inline(callee, cg);
140 } else {
141 return cg;
142 }
143 }
144 }
145
146 // Do method handle calls.
147 // NOTE: This must happen before normal inlining logic below since
148 // MethodHandle.invoke* are native methods which obviously don't
149 // have bytecodes and so normal inlining fails.
150 if (callee->is_method_handle_intrinsic()) {
151 CallGenerator* cg = CallGenerator::for_method_handle_call(jvms, caller, callee, allow_inline);
152 return cg;
153 }
154
155 // If explicit rounding is required, do not inline strict into non-strict code (or the reverse).
156 if (Matcher::strict_fp_requires_explicit_rounding &&
157 caller->is_strict() != callee->is_strict()) {
158 allow_inline = false;
159 }
160
161 // Attempt to inline...
162 if (allow_inline) {
163 // The profile data is only partly attributable to this caller,
164 // scale back the call site information.
165 float past_uses = jvms->method()->scale_count(site_count, prof_factor);
166 // This is the number of times we expect the call code to be used.
167 float expected_uses = past_uses;
168
169 // Try inlining a bytecoded method:
170 if (!call_does_dispatch) {
171 InlineTree* ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
172 WarmCallInfo scratch_ci;
173 bool should_delay = false;
174 WarmCallInfo* ci = ilt->ok_to_inline(callee, jvms, profile, &scratch_ci, should_delay);
175 assert(ci != &scratch_ci, "do not let this pointer escape");
176 bool allow_inline = (ci != NULL && !ci->is_cold());
177 bool require_inline = (allow_inline && ci->is_hot());
178
179 if (allow_inline) {
180 CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses);
181
182 if (require_inline && cg != NULL) {
183 // Delay the inlining of this method to give us the
184 // opportunity to perform some high level optimizations
185 // first.
186 if (should_delay_string_inlining(callee, jvms)) {
187 return CallGenerator::for_string_late_inline(callee, cg);
188 } else if (should_delay_boxing_inlining(callee, jvms)) {
189 return CallGenerator::for_boxing_late_inline(callee, cg);
190 } else if (should_delay_vector_reboxing_inlining(callee, jvms)) {
191 return CallGenerator::for_vector_reboxing_late_inline(callee, cg);
192 } else if ((should_delay || AlwaysIncrementalInline)) {
193 return CallGenerator::for_late_inline(callee, cg);
194 }
195 }
196 if (cg == NULL || should_delay) {
197 // Fall through.
198 } else if (require_inline || !InlineWarmCalls) {
199 return cg;
200 } else {
201 CallGenerator* cold_cg = call_generator(callee, vtable_index, call_does_dispatch, jvms, false, prof_factor);
202 return CallGenerator::for_warm_call(ci, cold_cg, cg);
203 }
204 }
205 }
206
207 // Try using the type profile.
208 if (call_does_dispatch && site_count > 0 && UseTypeProfile) {
209 // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
210 bool have_major_receiver = profile.has_receiver(0) && (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
211 ciMethod* receiver_method = NULL;
212
213 int morphism = profile.morphism();
214 if (speculative_receiver_type != NULL) {
215 if (!too_many_traps_or_recompiles(caller, bci, Deoptimization::Reason_speculate_class_check)) {
216 // We have a speculative type, we should be able to resolve
217 // the call. We do that before looking at the profiling at
218 // this invoke because it may lead to bimorphic inlining which
219 // a speculative type should help us avoid.
220 receiver_method = callee->resolve_invoke(jvms->method()->holder(),
221 speculative_receiver_type);
222 if (receiver_method == NULL) {
223 speculative_receiver_type = NULL;
224 } else {
225 morphism = 1;
226 }
227 } else {
228 // speculation failed before. Use profiling at the call
229 // (could allow bimorphic inlining for instance).
230 speculative_receiver_type = NULL;
231 }
232 }
233 if (receiver_method == NULL &&
234 (have_major_receiver || morphism == 1 ||
235 (morphism == 2 && UseBimorphicInlining))) {
236 // receiver_method = profile.method();
237 // Profiles do not suggest methods now. Look it up in the major receiver.
238 receiver_method = callee->resolve_invoke(jvms->method()->holder(),
239 profile.receiver(0));
240 }
241 if (receiver_method != NULL) {
242 // The single majority receiver sufficiently outweighs the minority.
243 CallGenerator* hit_cg = this->call_generator(receiver_method,
244 vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor);
245 if (hit_cg != NULL) {
246 // Look up second receiver.
247 CallGenerator* next_hit_cg = NULL;
248 ciMethod* next_receiver_method = NULL;
249 if (morphism == 2 && UseBimorphicInlining) {
250 next_receiver_method = callee->resolve_invoke(jvms->method()->holder(),
251 profile.receiver(1));
252 if (next_receiver_method != NULL) {
253 next_hit_cg = this->call_generator(next_receiver_method,
254 vtable_index, !call_does_dispatch, jvms,
255 allow_inline, prof_factor);
256 if (next_hit_cg != NULL && !next_hit_cg->is_inline() &&
257 have_major_receiver && UseOnlyInlinedBimorphic) {
258 // Skip if we can't inline second receiver's method
259 next_hit_cg = NULL;
260 }
261 }
262 }
263 CallGenerator* miss_cg;
264 Deoptimization::DeoptReason reason = (morphism == 2
265 ? Deoptimization::Reason_bimorphic
266 : Deoptimization::reason_class_check(speculative_receiver_type != NULL));
267 if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) &&
268 !too_many_traps_or_recompiles(caller, bci, reason)
269 ) {
270 // Generate uncommon trap for class check failure path
271 // in case of monomorphic or bimorphic virtual call site.
272 miss_cg = CallGenerator::for_uncommon_trap(callee, reason,
273 Deoptimization::Action_maybe_recompile);
274 } else {
275 // Generate virtual call for class check failure path
276 // in case of polymorphic virtual call site.
277 miss_cg = (IncrementalInlineVirtual ? CallGenerator::for_late_inline_virtual(callee, vtable_index, prof_factor)
278 : CallGenerator::for_virtual_call(callee, vtable_index));
279 }
280 if (miss_cg != NULL) {
281 if (next_hit_cg != NULL) {
282 assert(speculative_receiver_type == NULL, "shouldn't end up here if we used speculation");
283 trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1));
284 // We don't need to record dependency on a receiver here and below.
285 // Whenever we inline, the dependency is added by Parse::Parse().
286 miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX);
287 }
288 if (miss_cg != NULL) {
289 ciKlass* k = speculative_receiver_type != NULL ? speculative_receiver_type : profile.receiver(0);
290 trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, k, site_count, receiver_count);
291 float hit_prob = speculative_receiver_type != NULL ? 1.0 : profile.receiver_prob(0);
292 CallGenerator* cg = CallGenerator::for_predicted_call(k, miss_cg, hit_cg, hit_prob);
293 if (cg != NULL) return cg;
294 }
295 }
296 }
297 }
298 }
299
300 // If there is only one implementor of this interface then we
301 // may be able to bind this invoke directly to the implementing
302 // klass but we need both a dependence on the single interface
303 // and on the method we bind to. Additionally since all we know
304 // about the receiver type is that it's supposed to implement the
305 // interface we have to insert a check that it's the class we
306 // expect. Interface types are not checked by the verifier so
307 // they are roughly equivalent to Object.
308 // The number of implementors for declared_interface is less or
309 // equal to the number of implementors for target->holder() so
310 // if number of implementors of target->holder() == 1 then
311 // number of implementors for decl_interface is 0 or 1. If
312 // it's 0 then no class implements decl_interface and there's
313 // no point in inlining.
314 if (call_does_dispatch && bytecode == Bytecodes::_invokeinterface) {
315 ciInstanceKlass* declared_interface =
316 caller->get_declared_method_holder_at_bci(bci)->as_instance_klass();
317 ciInstanceKlass* singleton = declared_interface->unique_implementor();
318
319 if (singleton != NULL &&
320 (!callee->is_default_method() || callee->is_overpass()) /* CHA doesn't support default methods yet */) {
321 assert(singleton != declared_interface, "not a unique implementor");
322
323 ciMethod* cha_monomorphic_target =
324 callee->find_monomorphic_target(caller->holder(), declared_interface, singleton);
325
326 if (cha_monomorphic_target != NULL &&
327 cha_monomorphic_target->holder() != env()->Object_klass()) { // subtype check against Object is useless
328 ciKlass* holder = cha_monomorphic_target->holder();
329
330 // Try to inline the method found by CHA. Inlined method is guarded by the type check.
331 CallGenerator* hit_cg = call_generator(cha_monomorphic_target,
332 vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor);
333
334 // Deoptimize on type check fail. The interpreter will throw ICCE for us.
335 CallGenerator* miss_cg = CallGenerator::for_uncommon_trap(callee,
336 Deoptimization::Reason_class_check, Deoptimization::Action_none);
337
338 CallGenerator* cg = CallGenerator::for_guarded_call(holder, miss_cg, hit_cg);
339 if (hit_cg != NULL && cg != NULL) {
340 dependencies()->assert_unique_concrete_method(declared_interface, cha_monomorphic_target);
341 return cg;
342 }
343 }
344 }
345 } // call_does_dispatch && bytecode == Bytecodes::_invokeinterface
346
347 // Nothing claimed the intrinsic, we go with straight-forward inlining
348 // for already discovered intrinsic.
349 if (allow_intrinsics && cg_intrinsic != NULL) {
350 assert(cg_intrinsic->does_virtual_dispatch(), "sanity");
351 return cg_intrinsic;
352 }
353 } // allow_inline
354
355 // There was no special inlining tactic, or it bailed out.
356 // Use a more generic tactic, like a simple call.
357 if (call_does_dispatch) {
358 const char* msg = "virtual call";
359 if (C->print_inlining()) {
360 print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
361 }
362 C->log_inline_failure(msg);
363 if (IncrementalInlineVirtual && allow_inline) {
364 return CallGenerator::for_late_inline_virtual(callee, vtable_index, prof_factor); // attempt to inline through virtual call later
365 } else {
366 return CallGenerator::for_virtual_call(callee, vtable_index);
367 }
368 } else {
369 // Class Hierarchy Analysis or Type Profile reveals a unique target,
370 // or it is a static or special call.
371 return CallGenerator::for_direct_call(callee, should_delay_inlining(callee, jvms));
372 }
373 }
374
375 // Return true for methods that shouldn't be inlined early so that
376 // they are easier to analyze and optimize as intrinsics.
should_delay_string_inlining(ciMethod * call_method,JVMState * jvms)377 bool Compile::should_delay_string_inlining(ciMethod* call_method, JVMState* jvms) {
378 if (has_stringbuilder()) {
379
380 if ((call_method->holder() == C->env()->StringBuilder_klass() ||
381 call_method->holder() == C->env()->StringBuffer_klass()) &&
382 (jvms->method()->holder() == C->env()->StringBuilder_klass() ||
383 jvms->method()->holder() == C->env()->StringBuffer_klass())) {
384 // Delay SB calls only when called from non-SB code
385 return false;
386 }
387
388 switch (call_method->intrinsic_id()) {
389 case vmIntrinsics::_StringBuilder_void:
390 case vmIntrinsics::_StringBuilder_int:
391 case vmIntrinsics::_StringBuilder_String:
392 case vmIntrinsics::_StringBuilder_append_char:
393 case vmIntrinsics::_StringBuilder_append_int:
394 case vmIntrinsics::_StringBuilder_append_String:
395 case vmIntrinsics::_StringBuilder_toString:
396 case vmIntrinsics::_StringBuffer_void:
397 case vmIntrinsics::_StringBuffer_int:
398 case vmIntrinsics::_StringBuffer_String:
399 case vmIntrinsics::_StringBuffer_append_char:
400 case vmIntrinsics::_StringBuffer_append_int:
401 case vmIntrinsics::_StringBuffer_append_String:
402 case vmIntrinsics::_StringBuffer_toString:
403 case vmIntrinsics::_Integer_toString:
404 return true;
405
406 case vmIntrinsics::_String_String:
407 {
408 Node* receiver = jvms->map()->in(jvms->argoff() + 1);
409 if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) {
410 CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava();
411 ciMethod* m = csj->method();
412 if (m != NULL &&
413 (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString ||
414 m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString))
415 // Delay String.<init>(new SB())
416 return true;
417 }
418 return false;
419 }
420
421 default:
422 return false;
423 }
424 }
425 return false;
426 }
427
should_delay_boxing_inlining(ciMethod * call_method,JVMState * jvms)428 bool Compile::should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms) {
429 if (eliminate_boxing() && call_method->is_boxing_method()) {
430 set_has_boxed_value(true);
431 return aggressive_unboxing();
432 }
433 return false;
434 }
435
should_delay_vector_inlining(ciMethod * call_method,JVMState * jvms)436 bool Compile::should_delay_vector_inlining(ciMethod* call_method, JVMState* jvms) {
437 return EnableVectorSupport && call_method->is_vector_method();
438 }
439
should_delay_vector_reboxing_inlining(ciMethod * call_method,JVMState * jvms)440 bool Compile::should_delay_vector_reboxing_inlining(ciMethod* call_method, JVMState* jvms) {
441 return EnableVectorSupport && (call_method->intrinsic_id() == vmIntrinsics::_VectorRebox);
442 }
443
444 // uncommon-trap call-sites where callee is unloaded, uninitialized or will not link
can_not_compile_call_site(ciMethod * dest_method,ciInstanceKlass * klass)445 bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) {
446 // Additional inputs to consider...
447 // bc = bc()
448 // caller = method()
449 // iter().get_method_holder_index()
450 assert( dest_method->is_loaded(), "ciTypeFlow should not let us get here" );
451 // Interface classes can be loaded & linked and never get around to
452 // being initialized. Uncommon-trap for not-initialized static or
453 // v-calls. Let interface calls happen.
454 ciInstanceKlass* holder_klass = dest_method->holder();
455 if (!holder_klass->is_being_initialized() &&
456 !holder_klass->is_initialized() &&
457 !holder_klass->is_interface()) {
458 uncommon_trap(Deoptimization::Reason_uninitialized,
459 Deoptimization::Action_reinterpret,
460 holder_klass);
461 return true;
462 }
463
464 assert(dest_method->is_loaded(), "dest_method: typeflow responsibility");
465 return false;
466 }
467
468 #ifdef ASSERT
check_call_consistency(JVMState * jvms,CallGenerator * cg)469 static bool check_call_consistency(JVMState* jvms, CallGenerator* cg) {
470 ciMethod* symbolic_info = jvms->method()->get_method_at_bci(jvms->bci());
471 ciMethod* resolved_method = cg->method();
472 if (!ciMethod::is_consistent_info(symbolic_info, resolved_method)) {
473 tty->print_cr("JVMS:");
474 jvms->dump();
475 tty->print_cr("Bytecode info:");
476 jvms->method()->get_method_at_bci(jvms->bci())->print(); tty->cr();
477 tty->print_cr("Resolved method:");
478 cg->method()->print(); tty->cr();
479 return false;
480 }
481 return true;
482 }
483 #endif // ASSERT
484
485 //------------------------------do_call----------------------------------------
486 // Handle your basic call. Inline if we can & want to, else just setup call.
do_call()487 void Parse::do_call() {
488 // It's likely we are going to add debug info soon.
489 // Also, if we inline a guy who eventually needs debug info for this JVMS,
490 // our contribution to it is cleaned up right here.
491 kill_dead_locals();
492
493 C->print_inlining_assert_ready();
494
495 // Set frequently used booleans
496 const bool is_virtual = bc() == Bytecodes::_invokevirtual;
497 const bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
498 const bool has_receiver = Bytecodes::has_receiver(bc());
499
500 // Find target being called
501 bool will_link;
502 ciSignature* declared_signature = NULL;
503 ciMethod* orig_callee = iter().get_method(will_link, &declared_signature); // callee in the bytecode
504 ciInstanceKlass* holder_klass = orig_callee->holder();
505 ciKlass* holder = iter().get_declared_method_holder();
506 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
507 assert(declared_signature != NULL, "cannot be null");
508
509 // Bump max node limit for JSR292 users
510 if (bc() == Bytecodes::_invokedynamic || orig_callee->is_method_handle_intrinsic()) {
511 C->set_max_node_limit(3*MaxNodeLimit);
512 }
513
514 // uncommon-trap when callee is unloaded, uninitialized or will not link
515 // bailout when too many arguments for register representation
516 if (!will_link || can_not_compile_call_site(orig_callee, klass)) {
517 if (PrintOpto && (Verbose || WizardMode)) {
518 method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci());
519 orig_callee->print_name(); tty->cr();
520 }
521 return;
522 }
523 assert(holder_klass->is_loaded(), "");
524 //assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc"); // XXX invokehandle (cur_bc_raw)
525 // Note: this takes into account invokeinterface of methods declared in java/lang/Object,
526 // which should be invokevirtuals but according to the VM spec may be invokeinterfaces
527 assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
528 // Note: In the absence of miranda methods, an abstract class K can perform
529 // an invokevirtual directly on an interface method I.m if K implements I.
530
531 // orig_callee is the resolved callee which's signature includes the
532 // appendix argument.
533 const int nargs = orig_callee->arg_size();
534 const bool is_signature_polymorphic = MethodHandles::is_signature_polymorphic(orig_callee->intrinsic_id());
535
536 // Push appendix argument (MethodType, CallSite, etc.), if one.
537 if (iter().has_appendix()) {
538 ciObject* appendix_arg = iter().get_appendix();
539 const TypeOopPtr* appendix_arg_type = TypeOopPtr::make_from_constant(appendix_arg, /* require_const= */ true);
540 Node* appendix_arg_node = _gvn.makecon(appendix_arg_type);
541 push(appendix_arg_node);
542 }
543
544 // ---------------------
545 // Does Class Hierarchy Analysis reveal only a single target of a v-call?
546 // Then we may inline or make a static call, but become dependent on there being only 1 target.
547 // Does the call-site type profile reveal only one receiver?
548 // Then we may introduce a run-time check and inline on the path where it succeeds.
549 // The other path may uncommon_trap, check for another receiver, or do a v-call.
550
551 // Try to get the most accurate receiver type
552 ciMethod* callee = orig_callee;
553 int vtable_index = Method::invalid_vtable_index;
554 bool call_does_dispatch = false;
555
556 // Speculative type of the receiver if any
557 ciKlass* speculative_receiver_type = NULL;
558 if (is_virtual_or_interface) {
559 Node* receiver_node = stack(sp() - nargs);
560 const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
561 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
562 // For arrays, klass below is Object. When vtable calls are used,
563 // resolving the call with Object would allow an illegal call to
564 // finalize() on an array. We use holder instead: illegal calls to
565 // finalize() won't be compiled as vtable calls (IC call
566 // resolution will catch the illegal call) and the few legal calls
567 // on array types won't be either.
568 callee = C->optimize_virtual_call(method(), klass, holder, orig_callee,
569 receiver_type, is_virtual,
570 call_does_dispatch, vtable_index); // out-parameters
571 speculative_receiver_type = receiver_type != NULL ? receiver_type->speculative_type() : NULL;
572 }
573
574 // Additional receiver subtype checks for interface calls via invokespecial or invokeinterface.
575 ciKlass* receiver_constraint = NULL;
576 if (iter().cur_bc_raw() == Bytecodes::_invokespecial && !orig_callee->is_object_initializer()) {
577 ciInstanceKlass* calling_klass = method()->holder();
578 ciInstanceKlass* sender_klass =
579 calling_klass->is_unsafe_anonymous() ? calling_klass->unsafe_anonymous_host() :
580 calling_klass;
581 if (sender_klass->is_interface()) {
582 receiver_constraint = sender_klass;
583 }
584 } else if (iter().cur_bc_raw() == Bytecodes::_invokeinterface && orig_callee->is_private()) {
585 assert(holder->is_interface(), "How did we get a non-interface method here!");
586 receiver_constraint = holder;
587 }
588
589 if (receiver_constraint != NULL) {
590 Node* receiver_node = stack(sp() - nargs);
591 Node* cls_node = makecon(TypeKlassPtr::make(receiver_constraint));
592 Node* bad_type_ctrl = NULL;
593 Node* casted_receiver = gen_checkcast(receiver_node, cls_node, &bad_type_ctrl);
594 if (bad_type_ctrl != NULL) {
595 PreserveJVMState pjvms(this);
596 set_control(bad_type_ctrl);
597 uncommon_trap(Deoptimization::Reason_class_check,
598 Deoptimization::Action_none);
599 }
600 if (stopped()) {
601 return; // MUST uncommon-trap?
602 }
603 set_stack(sp() - nargs, casted_receiver);
604 }
605
606 // Note: It's OK to try to inline a virtual call.
607 // The call generator will not attempt to inline a polymorphic call
608 // unless it knows how to optimize the receiver dispatch.
609 bool try_inline = (C->do_inlining() || InlineAccessors);
610
611 // ---------------------
612 dec_sp(nargs); // Temporarily pop args for JVM state of call
613 JVMState* jvms = sync_jvms();
614
615 // ---------------------
616 // Decide call tactic.
617 // This call checks with CHA, the interpreter profile, intrinsics table, etc.
618 // It decides whether inlining is desirable or not.
619 CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type);
620
621 // NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead.
622 orig_callee = callee = NULL;
623
624 // ---------------------
625 // Round double arguments before call
626 round_double_arguments(cg->method());
627
628 // Feed profiling data for arguments to the type system so it can
629 // propagate it as speculative types
630 record_profiled_arguments_for_speculation(cg->method(), bc());
631
632 #ifndef PRODUCT
633 // bump global counters for calls
634 count_compiled_calls(/*at_method_entry*/ false, cg->is_inline());
635
636 // Record first part of parsing work for this call
637 parse_histogram()->record_change();
638 #endif // not PRODUCT
639
640 assert(jvms == this->jvms(), "still operating on the right JVMS");
641 assert(jvms_in_sync(), "jvms must carry full info into CG");
642
643 // save across call, for a subsequent cast_not_null.
644 Node* receiver = has_receiver ? argument(0) : NULL;
645
646 // The extra CheckCastPPs for speculative types mess with PhaseStringOpts
647 if (receiver != NULL && !call_does_dispatch && !cg->is_string_late_inline()) {
648 // Feed profiling data for a single receiver to the type system so
649 // it can propagate it as a speculative type
650 receiver = record_profiled_receiver_for_speculation(receiver);
651 }
652
653 JVMState* new_jvms = cg->generate(jvms);
654 if (new_jvms == NULL) {
655 // When inlining attempt fails (e.g., too many arguments),
656 // it may contaminate the current compile state, making it
657 // impossible to pull back and try again. Once we call
658 // cg->generate(), we are committed. If it fails, the whole
659 // compilation task is compromised.
660 if (failing()) return;
661
662 // This can happen if a library intrinsic is available, but refuses
663 // the call site, perhaps because it did not match a pattern the
664 // intrinsic was expecting to optimize. Should always be possible to
665 // get a normal java call that may inline in that case
666 cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false);
667 new_jvms = cg->generate(jvms);
668 if (new_jvms == NULL) {
669 guarantee(failing(), "call failed to generate: calls should work");
670 return;
671 }
672 }
673
674 if (cg->is_inline()) {
675 // Accumulate has_loops estimate
676 C->env()->notice_inlined_method(cg->method());
677 }
678
679 // Reset parser state from [new_]jvms, which now carries results of the call.
680 // Return value (if any) is already pushed on the stack by the cg.
681 add_exception_states_from(new_jvms);
682 if (new_jvms->map()->control() == top()) {
683 stop_and_kill_map();
684 } else {
685 assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");
686 set_jvms(new_jvms);
687 }
688
689 assert(check_call_consistency(jvms, cg), "inconsistent info");
690
691 if (!stopped()) {
692 // This was some sort of virtual call, which did a null check for us.
693 // Now we can assert receiver-not-null, on the normal return path.
694 if (receiver != NULL && cg->is_virtual()) {
695 Node* cast = cast_not_null(receiver);
696 // %%% assert(receiver == cast, "should already have cast the receiver");
697 }
698
699 // Round double result after a call from strict to non-strict code
700 round_double_result(cg->method());
701
702 ciType* rtype = cg->method()->return_type();
703 ciType* ctype = declared_signature->return_type();
704
705 if (Bytecodes::has_optional_appendix(iter().cur_bc_raw()) || is_signature_polymorphic) {
706 // Be careful here with return types.
707 if (ctype != rtype) {
708 BasicType rt = rtype->basic_type();
709 BasicType ct = ctype->basic_type();
710 if (ct == T_VOID) {
711 // It's OK for a method to return a value that is discarded.
712 // The discarding does not require any special action from the caller.
713 // The Java code knows this, at VerifyType.isNullConversion.
714 pop_node(rt); // whatever it was, pop it
715 } else if (rt == T_INT || is_subword_type(rt)) {
716 // Nothing. These cases are handled in lambda form bytecode.
717 assert(ct == T_INT || is_subword_type(ct), "must match: rt=%s, ct=%s", type2name(rt), type2name(ct));
718 } else if (is_reference_type(rt)) {
719 assert(is_reference_type(ct), "rt=%s, ct=%s", type2name(rt), type2name(ct));
720 if (ctype->is_loaded()) {
721 const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass());
722 const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass());
723 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
724 Node* retnode = pop();
725 Node* cast_obj = _gvn.transform(new CheckCastPPNode(control(), retnode, sig_type));
726 push(cast_obj);
727 }
728 }
729 } else {
730 assert(rt == ct, "unexpected mismatch: rt=%s, ct=%s", type2name(rt), type2name(ct));
731 // push a zero; it's better than getting an oop/int mismatch
732 pop_node(rt);
733 Node* retnode = zerocon(ct);
734 push_node(ct, retnode);
735 }
736 // Now that the value is well-behaved, continue with the call-site type.
737 rtype = ctype;
738 }
739 } else {
740 // Symbolic resolution enforces the types to be the same.
741 // NOTE: We must relax the assert for unloaded types because two
742 // different ciType instances of the same unloaded class type
743 // can appear to be "loaded" by different loaders (depending on
744 // the accessing class).
745 assert(!rtype->is_loaded() || !ctype->is_loaded() || rtype == ctype,
746 "mismatched return types: rtype=%s, ctype=%s", rtype->name(), ctype->name());
747 }
748
749 // If the return type of the method is not loaded, assert that the
750 // value we got is a null. Otherwise, we need to recompile.
751 if (!rtype->is_loaded()) {
752 if (PrintOpto && (Verbose || WizardMode)) {
753 method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci());
754 cg->method()->print_name(); tty->cr();
755 }
756 if (C->log() != NULL) {
757 C->log()->elem("assert_null reason='return' klass='%d'",
758 C->log()->identify(rtype));
759 }
760 // If there is going to be a trap, put it at the next bytecode:
761 set_bci(iter().next_bci());
762 null_assert(peek());
763 set_bci(iter().cur_bci()); // put it back
764 }
765 BasicType ct = ctype->basic_type();
766 if (is_reference_type(ct)) {
767 record_profiled_return_for_speculation();
768 }
769 }
770
771 // Restart record of parsing work after possible inlining of call
772 #ifndef PRODUCT
773 parse_histogram()->set_initial_state(bc());
774 #endif
775 }
776
777 //---------------------------catch_call_exceptions-----------------------------
778 // Put a Catch and CatchProj nodes behind a just-created call.
779 // Send their caught exceptions to the proper handler.
780 // This may be used after a call to the rethrow VM stub,
781 // when it is needed to process unloaded exception classes.
catch_call_exceptions(ciExceptionHandlerStream & handlers)782 void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) {
783 // Exceptions are delivered through this channel:
784 Node* i_o = this->i_o();
785
786 // Add a CatchNode.
787 GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1);
788 GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL);
789 GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0);
790
791 bool default_handler = false;
792 for (; !handlers.is_done(); handlers.next()) {
793 ciExceptionHandler* h = handlers.handler();
794 int h_bci = h->handler_bci();
795 ciInstanceKlass* h_klass = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass();
796 // Do not introduce unloaded exception types into the graph:
797 if (!h_klass->is_loaded()) {
798 if (saw_unloaded->contains(h_bci)) {
799 /* We've already seen an unloaded exception with h_bci,
800 so don't duplicate. Duplication will cause the CatchNode to be
801 unnecessarily large. See 4713716. */
802 continue;
803 } else {
804 saw_unloaded->append(h_bci);
805 }
806 }
807 const Type* h_extype = TypeOopPtr::make_from_klass(h_klass);
808 // (We use make_from_klass because it respects UseUniqueSubclasses.)
809 h_extype = h_extype->join(TypeInstPtr::NOTNULL);
810 assert(!h_extype->empty(), "sanity");
811 // Note: It's OK if the BCIs repeat themselves.
812 bcis->append(h_bci);
813 extypes->append(h_extype);
814 if (h_bci == -1) {
815 default_handler = true;
816 }
817 }
818
819 if (!default_handler) {
820 bcis->append(-1);
821 extypes->append(TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr());
822 }
823
824 int len = bcis->length();
825 CatchNode *cn = new CatchNode(control(), i_o, len+1);
826 Node *catch_ = _gvn.transform(cn);
827
828 // now branch with the exception state to each of the (potential)
829 // handlers
830 for(int i=0; i < len; i++) {
831 // Setup JVM state to enter the handler.
832 PreserveJVMState pjvms(this);
833 // Locals are just copied from before the call.
834 // Get control from the CatchNode.
835 int handler_bci = bcis->at(i);
836 Node* ctrl = _gvn.transform( new CatchProjNode(catch_, i+1,handler_bci));
837 // This handler cannot happen?
838 if (ctrl == top()) continue;
839 set_control(ctrl);
840
841 // Create exception oop
842 const TypeInstPtr* extype = extypes->at(i)->is_instptr();
843 Node *ex_oop = _gvn.transform(new CreateExNode(extypes->at(i), ctrl, i_o));
844
845 // Handle unloaded exception classes.
846 if (saw_unloaded->contains(handler_bci)) {
847 // An unloaded exception type is coming here. Do an uncommon trap.
848 #ifndef PRODUCT
849 // We do not expect the same handler bci to take both cold unloaded
850 // and hot loaded exceptions. But, watch for it.
851 if ((Verbose || WizardMode) && extype->is_loaded()) {
852 tty->print("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ", bci());
853 method()->print_name(); tty->cr();
854 } else if (PrintOpto && (Verbose || WizardMode)) {
855 tty->print("Bailing out on unloaded exception type ");
856 extype->klass()->print_name();
857 tty->print(" at bci:%d in ", bci());
858 method()->print_name(); tty->cr();
859 }
860 #endif
861 // Emit an uncommon trap instead of processing the block.
862 set_bci(handler_bci);
863 push_ex_oop(ex_oop);
864 uncommon_trap(Deoptimization::Reason_unloaded,
865 Deoptimization::Action_reinterpret,
866 extype->klass(), "!loaded exception");
867 set_bci(iter().cur_bci()); // put it back
868 continue;
869 }
870
871 // go to the exception handler
872 if (handler_bci < 0) { // merge with corresponding rethrow node
873 throw_to_exit(make_exception_state(ex_oop));
874 } else { // Else jump to corresponding handle
875 push_ex_oop(ex_oop); // Clear stack and push just the oop.
876 merge_exception(handler_bci);
877 }
878 }
879
880 // The first CatchProj is for the normal return.
881 // (Note: If this is a call to rethrow_Java, this node goes dead.)
882 set_control(_gvn.transform( new CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci)));
883 }
884
885
886 //----------------------------catch_inline_exceptions--------------------------
887 // Handle all exceptions thrown by an inlined method or individual bytecode.
888 // Common case 1: we have no handler, so all exceptions merge right into
889 // the rethrow case.
890 // Case 2: we have some handlers, with loaded exception klasses that have
891 // no subklasses. We do a Deutsch-Shiffman style type-check on the incoming
892 // exception oop and branch to the handler directly.
893 // Case 3: We have some handlers with subklasses or are not loaded at
894 // compile-time. We have to call the runtime to resolve the exception.
895 // So we insert a RethrowCall and all the logic that goes with it.
catch_inline_exceptions(SafePointNode * ex_map)896 void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
897 // Caller is responsible for saving away the map for normal control flow!
898 assert(stopped(), "call set_map(NULL) first");
899 assert(method()->has_exception_handlers(), "don't come here w/o work to do");
900
901 Node* ex_node = saved_ex_oop(ex_map);
902 if (ex_node == top()) {
903 // No action needed.
904 return;
905 }
906 const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr();
907 NOT_PRODUCT(if (ex_type==NULL) tty->print_cr("*** Exception not InstPtr"));
908 if (ex_type == NULL)
909 ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr();
910
911 // determine potential exception handlers
912 ciExceptionHandlerStream handlers(method(), bci(),
913 ex_type->klass()->as_instance_klass(),
914 ex_type->klass_is_exact());
915
916 // Start executing from the given throw state. (Keep its stack, for now.)
917 // Get the exception oop as known at compile time.
918 ex_node = use_exception_state(ex_map);
919
920 // Get the exception oop klass from its header
921 Node* ex_klass_node = NULL;
922 if (has_ex_handler() && !ex_type->klass_is_exact()) {
923 Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());
924 ex_klass_node = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT));
925
926 // Compute the exception klass a little more cleverly.
927 // Obvious solution is to simple do a LoadKlass from the 'ex_node'.
928 // However, if the ex_node is a PhiNode, I'm going to do a LoadKlass for
929 // each arm of the Phi. If I know something clever about the exceptions
930 // I'm loading the class from, I can replace the LoadKlass with the
931 // klass constant for the exception oop.
932 if (ex_node->is_Phi()) {
933 ex_klass_node = new PhiNode(ex_node->in(0), TypeKlassPtr::OBJECT);
934 for (uint i = 1; i < ex_node->req(); i++) {
935 Node* ex_in = ex_node->in(i);
936 if (ex_in == top() || ex_in == NULL) {
937 // This path was not taken.
938 ex_klass_node->init_req(i, top());
939 continue;
940 }
941 Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes());
942 Node* k = _gvn.transform( LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT));
943 ex_klass_node->init_req( i, k );
944 }
945 _gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT);
946
947 }
948 }
949
950 // Scan the exception table for applicable handlers.
951 // If none, we can call rethrow() and be done!
952 // If precise (loaded with no subklasses), insert a D.S. style
953 // pointer compare to the correct handler and loop back.
954 // If imprecise, switch to the Rethrow VM-call style handling.
955
956 int remaining = handlers.count_remaining();
957
958 // iterate through all entries sequentially
959 for (;!handlers.is_done(); handlers.next()) {
960 ciExceptionHandler* handler = handlers.handler();
961
962 if (handler->is_rethrow()) {
963 // If we fell off the end of the table without finding an imprecise
964 // exception klass (and without finding a generic handler) then we
965 // know this exception is not handled in this method. We just rethrow
966 // the exception into the caller.
967 throw_to_exit(make_exception_state(ex_node));
968 return;
969 }
970
971 // exception handler bci range covers throw_bci => investigate further
972 int handler_bci = handler->handler_bci();
973
974 if (remaining == 1) {
975 push_ex_oop(ex_node); // Push exception oop for handler
976 if (PrintOpto && WizardMode) {
977 tty->print_cr(" Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci);
978 }
979 merge_exception(handler_bci); // jump to handler
980 return; // No more handling to be done here!
981 }
982
983 // Get the handler's klass
984 ciInstanceKlass* klass = handler->catch_klass();
985
986 if (!klass->is_loaded()) { // klass is not loaded?
987 // fall through into catch_call_exceptions which will emit a
988 // handler with an uncommon trap.
989 break;
990 }
991
992 if (klass->is_interface()) // should not happen, but...
993 break; // bail out
994
995 // Check the type of the exception against the catch type
996 const TypeKlassPtr *tk = TypeKlassPtr::make(klass);
997 Node* con = _gvn.makecon(tk);
998 Node* not_subtype_ctrl = gen_subtype_check(ex_klass_node, con);
999 if (!stopped()) {
1000 PreserveJVMState pjvms(this);
1001 const TypeInstPtr* tinst = TypeOopPtr::make_from_klass_unique(klass)->cast_to_ptr_type(TypePtr::NotNull)->is_instptr();
1002 assert(klass->has_subklass() || tinst->klass_is_exact(), "lost exactness");
1003 Node* ex_oop = _gvn.transform(new CheckCastPPNode(control(), ex_node, tinst));
1004 push_ex_oop(ex_oop); // Push exception oop for handler
1005 if (PrintOpto && WizardMode) {
1006 tty->print(" Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci);
1007 klass->print_name();
1008 tty->cr();
1009 }
1010 merge_exception(handler_bci);
1011 }
1012 set_control(not_subtype_ctrl);
1013
1014 // Come here if exception does not match handler.
1015 // Carry on with more handler checks.
1016 --remaining;
1017 }
1018
1019 assert(!stopped(), "you should return if you finish the chain");
1020
1021 // Oops, need to call into the VM to resolve the klasses at runtime.
1022 // Note: This call must not deoptimize, since it is not a real at this bci!
1023 kill_dead_locals();
1024
1025 make_runtime_call(RC_NO_LEAF | RC_MUST_THROW,
1026 OptoRuntime::rethrow_Type(),
1027 OptoRuntime::rethrow_stub(),
1028 NULL, NULL,
1029 ex_node);
1030
1031 // Rethrow is a pure call, no side effects, only a result.
1032 // The result cannot be allocated, so we use I_O
1033
1034 // Catch exceptions from the rethrow
1035 catch_call_exceptions(handlers);
1036 }
1037
1038
1039 // (Note: Moved add_debug_info into GraphKit::add_safepoint_edges.)
1040
1041
1042 #ifndef PRODUCT
count_compiled_calls(bool at_method_entry,bool is_inline)1043 void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) {
1044 if( CountCompiledCalls ) {
1045 if( at_method_entry ) {
1046 // bump invocation counter if top method (for statistics)
1047 if (CountCompiledCalls && depth() == 1) {
1048 const TypePtr* addr_type = TypeMetadataPtr::make(method());
1049 Node* adr1 = makecon(addr_type);
1050 Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(Method::compiled_invocation_counter_offset()));
1051 increment_counter(adr2);
1052 }
1053 } else if (is_inline) {
1054 switch (bc()) {
1055 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_inlined_calls_addr()); break;
1056 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break;
1057 case Bytecodes::_invokestatic:
1058 case Bytecodes::_invokedynamic:
1059 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break;
1060 default: fatal("unexpected call bytecode");
1061 }
1062 } else {
1063 switch (bc()) {
1064 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_normal_calls_addr()); break;
1065 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break;
1066 case Bytecodes::_invokestatic:
1067 case Bytecodes::_invokedynamic:
1068 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_static_calls_addr()); break;
1069 default: fatal("unexpected call bytecode");
1070 }
1071 }
1072 }
1073 }
1074 #endif //PRODUCT
1075
1076
optimize_virtual_call(ciMethod * caller,ciInstanceKlass * klass,ciKlass * holder,ciMethod * callee,const TypeOopPtr * receiver_type,bool is_virtual,bool & call_does_dispatch,int & vtable_index,bool check_access)1077 ciMethod* Compile::optimize_virtual_call(ciMethod* caller, ciInstanceKlass* klass,
1078 ciKlass* holder, ciMethod* callee,
1079 const TypeOopPtr* receiver_type, bool is_virtual,
1080 bool& call_does_dispatch, int& vtable_index,
1081 bool check_access) {
1082 // Set default values for out-parameters.
1083 call_does_dispatch = true;
1084 vtable_index = Method::invalid_vtable_index;
1085
1086 // Choose call strategy.
1087 ciMethod* optimized_virtual_method = optimize_inlining(caller, klass, callee,
1088 receiver_type, check_access);
1089
1090 // Have the call been sufficiently improved such that it is no longer a virtual?
1091 if (optimized_virtual_method != NULL) {
1092 callee = optimized_virtual_method;
1093 call_does_dispatch = false;
1094 } else if (!UseInlineCaches && is_virtual && callee->is_loaded()) {
1095 // We can make a vtable call at this site
1096 vtable_index = callee->resolve_vtable_index(caller->holder(), holder);
1097 }
1098 return callee;
1099 }
1100
1101 // Identify possible target method and inlining style
optimize_inlining(ciMethod * caller,ciInstanceKlass * klass,ciMethod * callee,const TypeOopPtr * receiver_type,bool check_access)1102 ciMethod* Compile::optimize_inlining(ciMethod* caller, ciInstanceKlass* klass,
1103 ciMethod* callee, const TypeOopPtr* receiver_type,
1104 bool check_access) {
1105 // only use for virtual or interface calls
1106
1107 // If it is obviously final, do not bother to call find_monomorphic_target,
1108 // because the class hierarchy checks are not needed, and may fail due to
1109 // incompletely loaded classes. Since we do our own class loading checks
1110 // in this module, we may confidently bind to any method.
1111 if (callee->can_be_statically_bound()) {
1112 return callee;
1113 }
1114
1115 // Attempt to improve the receiver
1116 bool actual_receiver_is_exact = false;
1117 ciInstanceKlass* actual_receiver = klass;
1118 if (receiver_type != NULL) {
1119 // Array methods are all inherited from Object, and are monomorphic.
1120 // finalize() call on array is not allowed.
1121 if (receiver_type->isa_aryptr() &&
1122 callee->holder() == env()->Object_klass() &&
1123 callee->name() != ciSymbol::finalize_method_name()) {
1124 return callee;
1125 }
1126
1127 // All other interesting cases are instance klasses.
1128 if (!receiver_type->isa_instptr()) {
1129 return NULL;
1130 }
1131
1132 ciInstanceKlass *ikl = receiver_type->klass()->as_instance_klass();
1133 if (ikl->is_loaded() && ikl->is_initialized() && !ikl->is_interface() &&
1134 (ikl == actual_receiver || ikl->is_subtype_of(actual_receiver))) {
1135 // ikl is a same or better type than the original actual_receiver,
1136 // e.g. static receiver from bytecodes.
1137 actual_receiver = ikl;
1138 // Is the actual_receiver exact?
1139 actual_receiver_is_exact = receiver_type->klass_is_exact();
1140 }
1141 }
1142
1143 ciInstanceKlass* calling_klass = caller->holder();
1144 ciMethod* cha_monomorphic_target = callee->find_monomorphic_target(calling_klass, klass, actual_receiver, check_access);
1145 if (cha_monomorphic_target != NULL) {
1146 assert(!cha_monomorphic_target->is_abstract(), "");
1147 // Look at the method-receiver type. Does it add "too much information"?
1148 ciKlass* mr_klass = cha_monomorphic_target->holder();
1149 const Type* mr_type = TypeInstPtr::make(TypePtr::BotPTR, mr_klass);
1150 if (receiver_type == NULL || !receiver_type->higher_equal(mr_type)) {
1151 // Calling this method would include an implicit cast to its holder.
1152 // %%% Not yet implemented. Would throw minor asserts at present.
1153 // %%% The most common wins are already gained by +UseUniqueSubclasses.
1154 // To fix, put the higher_equal check at the call of this routine,
1155 // and add a CheckCastPP to the receiver.
1156 if (TraceDependencies) {
1157 tty->print_cr("found unique CHA method, but could not cast up");
1158 tty->print(" method = ");
1159 cha_monomorphic_target->print();
1160 tty->cr();
1161 }
1162 if (log() != NULL) {
1163 log()->elem("missed_CHA_opportunity klass='%d' method='%d'",
1164 log()->identify(klass),
1165 log()->identify(cha_monomorphic_target));
1166 }
1167 cha_monomorphic_target = NULL;
1168 }
1169 }
1170
1171 if (cha_monomorphic_target != NULL) {
1172 // Hardwiring a virtual.
1173 assert(!callee->can_be_statically_bound(), "should have been handled earlier");
1174 assert(!cha_monomorphic_target->is_abstract(), "");
1175 if (!cha_monomorphic_target->can_be_statically_bound(actual_receiver)) {
1176 // If we inlined because CHA revealed only a single target method,
1177 // then we are dependent on that target method not getting overridden
1178 // by dynamic class loading. Be sure to test the "static" receiver
1179 // dest_method here, as opposed to the actual receiver, which may
1180 // falsely lead us to believe that the receiver is final or private.
1181 dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target);
1182 }
1183 return cha_monomorphic_target;
1184 }
1185
1186 // If the type is exact, we can still bind the method w/o a vcall.
1187 // (This case comes after CHA so we can see how much extra work it does.)
1188 if (actual_receiver_is_exact) {
1189 // In case of evolution, there is a dependence on every inlined method, since each
1190 // such method can be changed when its class is redefined.
1191 ciMethod* exact_method = callee->resolve_invoke(calling_klass, actual_receiver);
1192 if (exact_method != NULL) {
1193 return exact_method;
1194 }
1195 }
1196
1197 return NULL;
1198 }
1199