1 /*
2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "ci/ciCallSite.hpp"
28 #include "ci/ciObjArray.hpp"
29 #include "ci/ciMemberName.hpp"
30 #include "ci/ciMethodHandle.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "compiler/compileLog.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/callGenerator.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/cfgnode.hpp"
38 #include "opto/parse.hpp"
39 #include "opto/rootnode.hpp"
40 #include "opto/runtime.hpp"
41 #include "opto/subnode.hpp"
42 #include "runtime/sharedRuntime.hpp"
43
44 // Utility function.
tf() const45 const TypeFunc* CallGenerator::tf() const {
46 return TypeFunc::make(method());
47 }
48
is_inlined_method_handle_intrinsic(JVMState * jvms,ciMethod * m)49 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
50 return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
51 }
52
is_inlined_method_handle_intrinsic(ciMethod * caller,int bci,ciMethod * m)53 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
54 ciMethod* symbolic_info = caller->get_method_at_bci(bci);
55 return is_inlined_method_handle_intrinsic(symbolic_info, m);
56 }
57
is_inlined_method_handle_intrinsic(ciMethod * symbolic_info,ciMethod * m)58 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {
59 return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();
60 }
61
62 //-----------------------------ParseGenerator---------------------------------
63 // Internal class which handles all direct bytecode traversal.
64 class ParseGenerator : public InlineCallGenerator {
65 private:
66 bool _is_osr;
67 float _expected_uses;
68
69 public:
ParseGenerator(ciMethod * method,float expected_uses,bool is_osr=false)70 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
71 : InlineCallGenerator(method)
72 {
73 _is_osr = is_osr;
74 _expected_uses = expected_uses;
75 assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
76 }
77
is_parse() const78 virtual bool is_parse() const { return true; }
79 virtual JVMState* generate(JVMState* jvms);
is_osr()80 int is_osr() { return _is_osr; }
81
82 };
83
generate(JVMState * jvms)84 JVMState* ParseGenerator::generate(JVMState* jvms) {
85 Compile* C = Compile::current();
86 C->print_inlining_update(this);
87
88 if (is_osr()) {
89 // The JVMS for a OSR has a single argument (see its TypeFunc).
90 assert(jvms->depth() == 1, "no inline OSR");
91 }
92
93 if (C->failing()) {
94 return NULL; // bailing out of the compile; do not try to parse
95 }
96
97 Parse parser(jvms, method(), _expected_uses);
98 // Grab signature for matching/allocation
99 GraphKit& exits = parser.exits();
100
101 if (C->failing()) {
102 while (exits.pop_exception_state() != NULL) ;
103 return NULL;
104 }
105
106 assert(exits.jvms()->same_calls_as(jvms), "sanity");
107
108 // Simply return the exit state of the parser,
109 // augmented by any exceptional states.
110 return exits.transfer_exceptions_into_jvms();
111 }
112
113 //---------------------------DirectCallGenerator------------------------------
114 // Internal class which handles all out-of-line calls w/o receiver type checks.
115 class DirectCallGenerator : public CallGenerator {
116 private:
117 CallStaticJavaNode* _call_node;
118 // Force separate memory and I/O projections for the exceptional
119 // paths to facilitate late inlinig.
120 bool _separate_io_proj;
121
122 public:
DirectCallGenerator(ciMethod * method,bool separate_io_proj)123 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
124 : CallGenerator(method),
125 _separate_io_proj(separate_io_proj)
126 {
127 }
128 virtual JVMState* generate(JVMState* jvms);
129
call_node() const130 CallStaticJavaNode* call_node() const { return _call_node; }
131 };
132
generate(JVMState * jvms)133 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
134 GraphKit kit(jvms);
135 kit.C->print_inlining_update(this);
136 bool is_static = method()->is_static();
137 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
138 : SharedRuntime::get_resolve_opt_virtual_call_stub();
139
140 if (kit.C->log() != NULL) {
141 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
142 }
143
144 CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
145 if (is_inlined_method_handle_intrinsic(jvms, method())) {
146 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
147 // additional information about the method being invoked should be attached
148 // to the call site to make resolution logic work
149 // (see SharedRuntime::resolve_static_call_C).
150 call->set_override_symbolic_info(true);
151 }
152 _call_node = call; // Save the call node in case we need it later
153 if (!is_static) {
154 // Make an explicit receiver null_check as part of this call.
155 // Since we share a map with the caller, his JVMS gets adjusted.
156 kit.null_check_receiver_before_call(method());
157 if (kit.stopped()) {
158 // And dump it back to the caller, decorated with any exceptions:
159 return kit.transfer_exceptions_into_jvms();
160 }
161 // Mark the call node as virtual, sort of:
162 call->set_optimized_virtual(true);
163 if (method()->is_method_handle_intrinsic() ||
164 method()->is_compiled_lambda_form()) {
165 call->set_method_handle_invoke(true);
166 }
167 }
168 kit.set_arguments_for_java_call(call);
169 kit.set_edges_for_java_call(call, false, _separate_io_proj);
170 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
171 kit.push_node(method()->return_type()->basic_type(), ret);
172 return kit.transfer_exceptions_into_jvms();
173 }
174
175 //--------------------------VirtualCallGenerator------------------------------
176 // Internal class which handles all out-of-line calls checking receiver type.
177 class VirtualCallGenerator : public CallGenerator {
178 private:
179 int _vtable_index;
180 public:
VirtualCallGenerator(ciMethod * method,int vtable_index)181 VirtualCallGenerator(ciMethod* method, int vtable_index)
182 : CallGenerator(method), _vtable_index(vtable_index)
183 {
184 assert(vtable_index == Method::invalid_vtable_index ||
185 vtable_index >= 0, "either invalid or usable");
186 }
is_virtual() const187 virtual bool is_virtual() const { return true; }
188 virtual JVMState* generate(JVMState* jvms);
189 };
190
generate(JVMState * jvms)191 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
192 GraphKit kit(jvms);
193 Node* receiver = kit.argument(0);
194
195 kit.C->print_inlining_update(this);
196
197 if (kit.C->log() != NULL) {
198 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
199 }
200
201 // If the receiver is a constant null, do not torture the system
202 // by attempting to call through it. The compile will proceed
203 // correctly, but may bail out in final_graph_reshaping, because
204 // the call instruction will have a seemingly deficient out-count.
205 // (The bailout says something misleading about an "infinite loop".)
206 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
207 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
208 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
209 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
210 kit.inc_sp(arg_size); // restore arguments
211 kit.uncommon_trap(Deoptimization::Reason_null_check,
212 Deoptimization::Action_none,
213 NULL, "null receiver");
214 return kit.transfer_exceptions_into_jvms();
215 }
216
217 // Ideally we would unconditionally do a null check here and let it
218 // be converted to an implicit check based on profile information.
219 // However currently the conversion to implicit null checks in
220 // Block::implicit_null_check() only looks for loads and stores, not calls.
221 ciMethod *caller = kit.method();
222 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
223 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
224 ((ImplicitNullCheckThreshold > 0) && caller_md &&
225 (caller_md->trap_count(Deoptimization::Reason_null_check)
226 >= (uint)ImplicitNullCheckThreshold))) {
227 // Make an explicit receiver null_check as part of this call.
228 // Since we share a map with the caller, his JVMS gets adjusted.
229 receiver = kit.null_check_receiver_before_call(method());
230 if (kit.stopped()) {
231 // And dump it back to the caller, decorated with any exceptions:
232 return kit.transfer_exceptions_into_jvms();
233 }
234 }
235
236 assert(!method()->is_static(), "virtual call must not be to static");
237 assert(!method()->is_final(), "virtual call should not be to final");
238 assert(!method()->is_private(), "virtual call should not be to private");
239 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
240 "no vtable calls if +UseInlineCaches ");
241 address target = SharedRuntime::get_resolve_virtual_call_stub();
242 // Normal inline cache used for call
243 CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
244 if (is_inlined_method_handle_intrinsic(jvms, method())) {
245 // To be able to issue a direct call (optimized virtual or virtual)
246 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
247 // about the method being invoked should be attached to the call site to
248 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
249 call->set_override_symbolic_info(true);
250 }
251 kit.set_arguments_for_java_call(call);
252 kit.set_edges_for_java_call(call);
253 Node* ret = kit.set_results_for_java_call(call);
254 kit.push_node(method()->return_type()->basic_type(), ret);
255
256 // Represent the effect of an implicit receiver null_check
257 // as part of this call. Since we share a map with the caller,
258 // his JVMS gets adjusted.
259 kit.cast_not_null(receiver);
260 return kit.transfer_exceptions_into_jvms();
261 }
262
for_inline(ciMethod * m,float expected_uses)263 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
264 if (InlineTree::check_can_parse(m) != NULL) return NULL;
265 return new ParseGenerator(m, expected_uses);
266 }
267
268 // As a special case, the JVMS passed to this CallGenerator is
269 // for the method execution already in progress, not just the JVMS
270 // of the caller. Thus, this CallGenerator cannot be mixed with others!
for_osr(ciMethod * m,int osr_bci)271 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
272 if (InlineTree::check_can_parse(m) != NULL) return NULL;
273 float past_uses = m->interpreter_invocation_count();
274 float expected_uses = past_uses;
275 return new ParseGenerator(m, expected_uses, true);
276 }
277
for_direct_call(ciMethod * m,bool separate_io_proj)278 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
279 assert(!m->is_abstract(), "for_direct_call mismatch");
280 return new DirectCallGenerator(m, separate_io_proj);
281 }
282
for_virtual_call(ciMethod * m,int vtable_index)283 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
284 assert(!m->is_static(), "for_virtual_call mismatch");
285 assert(!m->is_method_handle_intrinsic(), "should be a direct call");
286 return new VirtualCallGenerator(m, vtable_index);
287 }
288
289 // Allow inlining decisions to be delayed
290 class LateInlineCallGenerator : public DirectCallGenerator {
291 private:
292 jlong _unique_id; // unique id for log compilation
293 bool _is_pure_call; // a hint that the call doesn't have important side effects to care about
294
295 protected:
296 CallGenerator* _inline_cg;
do_late_inline_check(JVMState * jvms)297 virtual bool do_late_inline_check(JVMState* jvms) { return true; }
298
299 public:
LateInlineCallGenerator(ciMethod * method,CallGenerator * inline_cg,bool is_pure_call=false)300 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) :
301 DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {}
302
is_late_inline() const303 virtual bool is_late_inline() const { return true; }
304
305 // Convert the CallStaticJava into an inline
306 virtual void do_late_inline();
307
generate(JVMState * jvms)308 virtual JVMState* generate(JVMState* jvms) {
309 Compile *C = Compile::current();
310
311 C->log_inline_id(this);
312
313 // Record that this call site should be revisited once the main
314 // parse is finished.
315 if (!is_mh_late_inline()) {
316 C->add_late_inline(this);
317 }
318
319 // Emit the CallStaticJava and request separate projections so
320 // that the late inlining logic can distinguish between fall
321 // through and exceptional uses of the memory and io projections
322 // as is done for allocations and macro expansion.
323 return DirectCallGenerator::generate(jvms);
324 }
325
print_inlining_late(const char * msg)326 virtual void print_inlining_late(const char* msg) {
327 CallNode* call = call_node();
328 Compile* C = Compile::current();
329 C->print_inlining_assert_ready();
330 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
331 C->print_inlining_move_to(this);
332 C->print_inlining_update_delayed(this);
333 }
334
set_unique_id(jlong id)335 virtual void set_unique_id(jlong id) {
336 _unique_id = id;
337 }
338
unique_id() const339 virtual jlong unique_id() const {
340 return _unique_id;
341 }
342 };
343
do_late_inline()344 void LateInlineCallGenerator::do_late_inline() {
345 // Can't inline it
346 CallStaticJavaNode* call = call_node();
347 if (call == NULL || call->outcnt() == 0 ||
348 call->in(0) == NULL || call->in(0)->is_top()) {
349 return;
350 }
351
352 const TypeTuple *r = call->tf()->domain();
353 for (int i1 = 0; i1 < method()->arg_size(); i1++) {
354 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
355 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
356 return;
357 }
358 }
359
360 if (call->in(TypeFunc::Memory)->is_top()) {
361 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
362 return;
363 }
364
365 // check for unreachable loop
366 CallProjections callprojs;
367 call->extract_projections(&callprojs, true);
368 if (callprojs.fallthrough_catchproj == call->in(0) ||
369 callprojs.catchall_catchproj == call->in(0) ||
370 callprojs.fallthrough_memproj == call->in(TypeFunc::Memory) ||
371 callprojs.catchall_memproj == call->in(TypeFunc::Memory) ||
372 callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O) ||
373 callprojs.catchall_ioproj == call->in(TypeFunc::I_O) ||
374 (callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) ||
375 (callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) {
376 return;
377 }
378
379 Compile* C = Compile::current();
380 // Remove inlined methods from Compiler's lists.
381 if (call->is_macro()) {
382 C->remove_macro_node(call);
383 }
384
385 bool result_not_used = (callprojs.resproj == NULL || callprojs.resproj->outcnt() == 0);
386 if (_is_pure_call && result_not_used) {
387 // The call is marked as pure (no important side effects), but result isn't used.
388 // It's safe to remove the call.
389 GraphKit kit(call->jvms());
390 kit.replace_call(call, C->top(), true);
391 } else {
392 // Make a clone of the JVMState that appropriate to use for driving a parse
393 JVMState* old_jvms = call->jvms();
394 JVMState* jvms = old_jvms->clone_shallow(C);
395 uint size = call->req();
396 SafePointNode* map = new SafePointNode(size, jvms);
397 for (uint i1 = 0; i1 < size; i1++) {
398 map->init_req(i1, call->in(i1));
399 }
400
401 // Make sure the state is a MergeMem for parsing.
402 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
403 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
404 C->initial_gvn()->set_type_bottom(mem);
405 map->set_req(TypeFunc::Memory, mem);
406 }
407
408 uint nargs = method()->arg_size();
409 // blow away old call arguments
410 Node* top = C->top();
411 for (uint i1 = 0; i1 < nargs; i1++) {
412 map->set_req(TypeFunc::Parms + i1, top);
413 }
414 jvms->set_map(map);
415
416 // Make enough space in the expression stack to transfer
417 // the incoming arguments and return value.
418 map->ensure_stack(jvms, jvms->method()->max_stack());
419 for (uint i1 = 0; i1 < nargs; i1++) {
420 map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
421 }
422
423 C->print_inlining_assert_ready();
424
425 C->print_inlining_move_to(this);
426
427 C->log_late_inline(this);
428
429 // This check is done here because for_method_handle_inline() method
430 // needs jvms for inlined state.
431 if (!do_late_inline_check(jvms)) {
432 map->disconnect_inputs(NULL, C);
433 return;
434 }
435
436 // Setup default node notes to be picked up by the inlining
437 Node_Notes* old_nn = C->node_notes_at(call->_idx);
438 if (old_nn != NULL) {
439 Node_Notes* entry_nn = old_nn->clone(C);
440 entry_nn->set_jvms(jvms);
441 C->set_default_node_notes(entry_nn);
442 }
443
444 // Now perform the inlining using the synthesized JVMState
445 JVMState* new_jvms = _inline_cg->generate(jvms);
446 if (new_jvms == NULL) return; // no change
447 if (C->failing()) return;
448
449 // Capture any exceptional control flow
450 GraphKit kit(new_jvms);
451
452 // Find the result object
453 Node* result = C->top();
454 int result_size = method()->return_type()->size();
455 if (result_size != 0 && !kit.stopped()) {
456 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
457 }
458
459 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
460 C->env()->notice_inlined_method(_inline_cg->method());
461 C->set_inlining_progress(true);
462 C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
463 kit.replace_call(call, result, true);
464 }
465 }
466
467
for_late_inline(ciMethod * method,CallGenerator * inline_cg)468 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
469 return new LateInlineCallGenerator(method, inline_cg);
470 }
471
472 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
473 ciMethod* _caller;
474 int _attempt;
475 bool _input_not_const;
476
477 virtual bool do_late_inline_check(JVMState* jvms);
already_attempted() const478 virtual bool already_attempted() const { return _attempt > 0; }
479
480 public:
LateInlineMHCallGenerator(ciMethod * caller,ciMethod * callee,bool input_not_const)481 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
482 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
483
is_mh_late_inline() const484 virtual bool is_mh_late_inline() const { return true; }
485
generate(JVMState * jvms)486 virtual JVMState* generate(JVMState* jvms) {
487 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
488
489 Compile* C = Compile::current();
490 if (_input_not_const) {
491 // inlining won't be possible so no need to enqueue right now.
492 call_node()->set_generator(this);
493 } else {
494 C->add_late_inline(this);
495 }
496 return new_jvms;
497 }
498 };
499
do_late_inline_check(JVMState * jvms)500 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
501
502 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
503
504 Compile::current()->print_inlining_update_delayed(this);
505
506 if (!_input_not_const) {
507 _attempt++;
508 }
509
510 if (cg != NULL && cg->is_inline()) {
511 assert(!cg->is_late_inline(), "we're doing late inlining");
512 _inline_cg = cg;
513 Compile::current()->dec_number_of_mh_late_inlines();
514 return true;
515 }
516
517 call_node()->set_generator(this);
518 return false;
519 }
520
for_mh_late_inline(ciMethod * caller,ciMethod * callee,bool input_not_const)521 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
522 Compile::current()->inc_number_of_mh_late_inlines();
523 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
524 return cg;
525 }
526
527 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
528
529 public:
LateInlineStringCallGenerator(ciMethod * method,CallGenerator * inline_cg)530 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
531 LateInlineCallGenerator(method, inline_cg) {}
532
generate(JVMState * jvms)533 virtual JVMState* generate(JVMState* jvms) {
534 Compile *C = Compile::current();
535
536 C->log_inline_id(this);
537
538 C->add_string_late_inline(this);
539
540 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
541 return new_jvms;
542 }
543
is_string_late_inline() const544 virtual bool is_string_late_inline() const { return true; }
545 };
546
for_string_late_inline(ciMethod * method,CallGenerator * inline_cg)547 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
548 return new LateInlineStringCallGenerator(method, inline_cg);
549 }
550
551 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
552
553 public:
LateInlineBoxingCallGenerator(ciMethod * method,CallGenerator * inline_cg)554 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
555 LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
556
generate(JVMState * jvms)557 virtual JVMState* generate(JVMState* jvms) {
558 Compile *C = Compile::current();
559
560 C->log_inline_id(this);
561
562 C->add_boxing_late_inline(this);
563
564 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
565 return new_jvms;
566 }
567 };
568
for_boxing_late_inline(ciMethod * method,CallGenerator * inline_cg)569 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
570 return new LateInlineBoxingCallGenerator(method, inline_cg);
571 }
572
573 //---------------------------WarmCallGenerator--------------------------------
574 // Internal class which handles initial deferral of inlining decisions.
575 class WarmCallGenerator : public CallGenerator {
576 WarmCallInfo* _call_info;
577 CallGenerator* _if_cold;
578 CallGenerator* _if_hot;
579 bool _is_virtual; // caches virtuality of if_cold
580 bool _is_inline; // caches inline-ness of if_hot
581
582 public:
WarmCallGenerator(WarmCallInfo * ci,CallGenerator * if_cold,CallGenerator * if_hot)583 WarmCallGenerator(WarmCallInfo* ci,
584 CallGenerator* if_cold,
585 CallGenerator* if_hot)
586 : CallGenerator(if_cold->method())
587 {
588 assert(method() == if_hot->method(), "consistent choices");
589 _call_info = ci;
590 _if_cold = if_cold;
591 _if_hot = if_hot;
592 _is_virtual = if_cold->is_virtual();
593 _is_inline = if_hot->is_inline();
594 }
595
is_inline() const596 virtual bool is_inline() const { return _is_inline; }
is_virtual() const597 virtual bool is_virtual() const { return _is_virtual; }
is_deferred() const598 virtual bool is_deferred() const { return true; }
599
600 virtual JVMState* generate(JVMState* jvms);
601 };
602
603
for_warm_call(WarmCallInfo * ci,CallGenerator * if_cold,CallGenerator * if_hot)604 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
605 CallGenerator* if_cold,
606 CallGenerator* if_hot) {
607 return new WarmCallGenerator(ci, if_cold, if_hot);
608 }
609
generate(JVMState * jvms)610 JVMState* WarmCallGenerator::generate(JVMState* jvms) {
611 Compile* C = Compile::current();
612 C->print_inlining_update(this);
613
614 if (C->log() != NULL) {
615 C->log()->elem("warm_call bci='%d'", jvms->bci());
616 }
617 jvms = _if_cold->generate(jvms);
618 if (jvms != NULL) {
619 Node* m = jvms->map()->control();
620 if (m->is_CatchProj()) m = m->in(0); else m = C->top();
621 if (m->is_Catch()) m = m->in(0); else m = C->top();
622 if (m->is_Proj()) m = m->in(0); else m = C->top();
623 if (m->is_CallJava()) {
624 _call_info->set_call(m->as_Call());
625 _call_info->set_hot_cg(_if_hot);
626 #ifndef PRODUCT
627 if (PrintOpto || PrintOptoInlining) {
628 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
629 tty->print("WCI: ");
630 _call_info->print();
631 }
632 #endif
633 _call_info->set_heat(_call_info->compute_heat());
634 C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
635 }
636 }
637 return jvms;
638 }
639
make_hot()640 void WarmCallInfo::make_hot() {
641 Unimplemented();
642 }
643
make_cold()644 void WarmCallInfo::make_cold() {
645 // No action: Just dequeue.
646 }
647
648
649 //------------------------PredictedCallGenerator------------------------------
650 // Internal class which handles all out-of-line calls checking receiver type.
651 class PredictedCallGenerator : public CallGenerator {
652 ciKlass* _predicted_receiver;
653 CallGenerator* _if_missed;
654 CallGenerator* _if_hit;
655 float _hit_prob;
656 bool _exact_check;
657
658 public:
PredictedCallGenerator(ciKlass * predicted_receiver,CallGenerator * if_missed,CallGenerator * if_hit,bool exact_check,float hit_prob)659 PredictedCallGenerator(ciKlass* predicted_receiver,
660 CallGenerator* if_missed,
661 CallGenerator* if_hit, bool exact_check,
662 float hit_prob)
663 : CallGenerator(if_missed->method())
664 {
665 // The call profile data may predict the hit_prob as extreme as 0 or 1.
666 // Remove the extremes values from the range.
667 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX;
668 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN;
669
670 _predicted_receiver = predicted_receiver;
671 _if_missed = if_missed;
672 _if_hit = if_hit;
673 _hit_prob = hit_prob;
674 _exact_check = exact_check;
675 }
676
is_virtual() const677 virtual bool is_virtual() const { return true; }
is_inline() const678 virtual bool is_inline() const { return _if_hit->is_inline(); }
is_deferred() const679 virtual bool is_deferred() const { return _if_hit->is_deferred(); }
680
681 virtual JVMState* generate(JVMState* jvms);
682 };
683
684
for_predicted_call(ciKlass * predicted_receiver,CallGenerator * if_missed,CallGenerator * if_hit,float hit_prob)685 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
686 CallGenerator* if_missed,
687 CallGenerator* if_hit,
688 float hit_prob) {
689 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit,
690 /*exact_check=*/true, hit_prob);
691 }
692
for_guarded_call(ciKlass * guarded_receiver,CallGenerator * if_missed,CallGenerator * if_hit)693 CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver,
694 CallGenerator* if_missed,
695 CallGenerator* if_hit) {
696 return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit,
697 /*exact_check=*/false, PROB_ALWAYS);
698 }
699
generate(JVMState * jvms)700 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
701 GraphKit kit(jvms);
702 kit.C->print_inlining_update(this);
703 PhaseGVN& gvn = kit.gvn();
704 // We need an explicit receiver null_check before checking its type.
705 // We share a map with the caller, so his JVMS gets adjusted.
706 Node* receiver = kit.argument(0);
707 CompileLog* log = kit.C->log();
708 if (log != NULL) {
709 log->elem("predicted_call bci='%d' exact='%d' klass='%d'",
710 jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver));
711 }
712
713 receiver = kit.null_check_receiver_before_call(method());
714 if (kit.stopped()) {
715 return kit.transfer_exceptions_into_jvms();
716 }
717
718 // Make a copy of the replaced nodes in case we need to restore them
719 ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
720 replaced_nodes.clone();
721
722 Node* casted_receiver = receiver; // will get updated in place...
723 Node* slow_ctl = NULL;
724 if (_exact_check) {
725 slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob,
726 &casted_receiver);
727 } else {
728 slow_ctl = kit.subtype_check_receiver(receiver, _predicted_receiver,
729 &casted_receiver);
730 }
731
732 SafePointNode* slow_map = NULL;
733 JVMState* slow_jvms = NULL;
734 { PreserveJVMState pjvms(&kit);
735 kit.set_control(slow_ctl);
736 if (!kit.stopped()) {
737 slow_jvms = _if_missed->generate(kit.sync_jvms());
738 if (kit.failing())
739 return NULL; // might happen because of NodeCountInliningCutoff
740 assert(slow_jvms != NULL, "must be");
741 kit.add_exception_states_from(slow_jvms);
742 kit.set_map(slow_jvms->map());
743 if (!kit.stopped())
744 slow_map = kit.stop();
745 }
746 }
747
748 if (kit.stopped()) {
749 // Instance exactly does not matches the desired type.
750 kit.set_jvms(slow_jvms);
751 return kit.transfer_exceptions_into_jvms();
752 }
753
754 // fall through if the instance exactly matches the desired type
755 kit.replace_in_map(receiver, casted_receiver);
756
757 // Make the hot call:
758 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
759 if (new_jvms == NULL) {
760 // Inline failed, so make a direct call.
761 assert(_if_hit->is_inline(), "must have been a failed inline");
762 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
763 new_jvms = cg->generate(kit.sync_jvms());
764 }
765 kit.add_exception_states_from(new_jvms);
766 kit.set_jvms(new_jvms);
767
768 // Need to merge slow and fast?
769 if (slow_map == NULL) {
770 // The fast path is the only path remaining.
771 return kit.transfer_exceptions_into_jvms();
772 }
773
774 if (kit.stopped()) {
775 // Inlined method threw an exception, so it's just the slow path after all.
776 kit.set_jvms(slow_jvms);
777 return kit.transfer_exceptions_into_jvms();
778 }
779
780 // There are 2 branches and the replaced nodes are only valid on
781 // one: restore the replaced nodes to what they were before the
782 // branch.
783 kit.map()->set_replaced_nodes(replaced_nodes);
784
785 // Finish the diamond.
786 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
787 RegionNode* region = new RegionNode(3);
788 region->init_req(1, kit.control());
789 region->init_req(2, slow_map->control());
790 kit.set_control(gvn.transform(region));
791 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
792 iophi->set_req(2, slow_map->i_o());
793 kit.set_i_o(gvn.transform(iophi));
794 // Merge memory
795 kit.merge_memory(slow_map->merged_memory(), region, 2);
796 // Transform new memory Phis.
797 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
798 Node* phi = mms.memory();
799 if (phi->is_Phi() && phi->in(0) == region) {
800 mms.set_memory(gvn.transform(phi));
801 }
802 }
803 uint tos = kit.jvms()->stkoff() + kit.sp();
804 uint limit = slow_map->req();
805 for (uint i = TypeFunc::Parms; i < limit; i++) {
806 // Skip unused stack slots; fast forward to monoff();
807 if (i == tos) {
808 i = kit.jvms()->monoff();
809 if( i >= limit ) break;
810 }
811 Node* m = kit.map()->in(i);
812 Node* n = slow_map->in(i);
813 if (m != n) {
814 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
815 Node* phi = PhiNode::make(region, m, t);
816 phi->set_req(2, n);
817 kit.map()->set_req(i, gvn.transform(phi));
818 }
819 }
820 return kit.transfer_exceptions_into_jvms();
821 }
822
823
for_method_handle_call(JVMState * jvms,ciMethod * caller,ciMethod * callee,bool delayed_forbidden)824 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) {
825 assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
826 bool input_not_const;
827 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const);
828 Compile* C = Compile::current();
829 if (cg != NULL) {
830 if (!delayed_forbidden && AlwaysIncrementalInline) {
831 return CallGenerator::for_late_inline(callee, cg);
832 } else {
833 return cg;
834 }
835 }
836 int bci = jvms->bci();
837 ciCallProfile profile = caller->call_profile_at_bci(bci);
838 int call_site_count = caller->scale_count(profile.count());
839
840 if (IncrementalInline && call_site_count > 0 &&
841 (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
842 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
843 } else {
844 // Out-of-line call.
845 return CallGenerator::for_direct_call(callee);
846 }
847 }
848
for_method_handle_inline(JVMState * jvms,ciMethod * caller,ciMethod * callee,bool & input_not_const)849 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
850 GraphKit kit(jvms);
851 PhaseGVN& gvn = kit.gvn();
852 Compile* C = kit.C;
853 vmIntrinsics::ID iid = callee->intrinsic_id();
854 input_not_const = true;
855 switch (iid) {
856 case vmIntrinsics::_invokeBasic:
857 {
858 // Get MethodHandle receiver:
859 Node* receiver = kit.argument(0);
860 if (receiver->Opcode() == Op_ConP) {
861 input_not_const = false;
862 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
863 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
864 const int vtable_index = Method::invalid_vtable_index;
865
866 if (!ciMethod::is_consistent_info(callee, target)) {
867 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
868 "signatures mismatch");
869 return NULL;
870 }
871
872 CallGenerator* cg = C->call_generator(target, vtable_index,
873 false /* call_does_dispatch */,
874 jvms,
875 true /* allow_inline */,
876 PROB_ALWAYS);
877 return cg;
878 } else {
879 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
880 "receiver not constant");
881 }
882 }
883 break;
884
885 case vmIntrinsics::_linkToVirtual:
886 case vmIntrinsics::_linkToStatic:
887 case vmIntrinsics::_linkToSpecial:
888 case vmIntrinsics::_linkToInterface:
889 {
890 // Get MemberName argument:
891 Node* member_name = kit.argument(callee->arg_size() - 1);
892 if (member_name->Opcode() == Op_ConP) {
893 input_not_const = false;
894 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
895 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
896
897 if (!ciMethod::is_consistent_info(callee, target)) {
898 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
899 "signatures mismatch");
900 return NULL;
901 }
902
903 // In lambda forms we erase signature types to avoid resolving issues
904 // involving class loaders. When we optimize a method handle invoke
905 // to a direct call we must cast the receiver and arguments to its
906 // actual types.
907 ciSignature* signature = target->signature();
908 const int receiver_skip = target->is_static() ? 0 : 1;
909 // Cast receiver to its type.
910 if (!target->is_static()) {
911 Node* arg = kit.argument(0);
912 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
913 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
914 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
915 const Type* recv_type = arg_type->join_speculative(sig_type); // keep speculative part
916 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, recv_type));
917 kit.set_argument(0, cast_obj);
918 }
919 }
920 // Cast reference arguments to its type.
921 for (int i = 0, j = 0; i < signature->count(); i++) {
922 ciType* t = signature->type_at(i);
923 if (t->is_klass()) {
924 Node* arg = kit.argument(receiver_skip + j);
925 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
926 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
927 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
928 const Type* narrowed_arg_type = arg_type->join_speculative(sig_type); // keep speculative part
929 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));
930 kit.set_argument(receiver_skip + j, cast_obj);
931 }
932 }
933 j += t->size(); // long and double take two slots
934 }
935
936 // Try to get the most accurate receiver type
937 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual);
938 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
939 int vtable_index = Method::invalid_vtable_index;
940 bool call_does_dispatch = false;
941
942 ciKlass* speculative_receiver_type = NULL;
943 if (is_virtual_or_interface) {
944 ciInstanceKlass* klass = target->holder();
945 Node* receiver_node = kit.argument(0);
946 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
947 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
948 // optimize_virtual_call() takes 2 different holder
949 // arguments for a corner case that doesn't apply here (see
950 // Parse::do_call())
951 target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
952 target, receiver_type, is_virtual,
953 call_does_dispatch, vtable_index, // out-parameters
954 false /* check_access */);
955 // We lack profiling at this call but type speculation may
956 // provide us with a type
957 speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
958 }
959 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
960 !StressMethodHandleLinkerInlining /* allow_inline */,
961 PROB_ALWAYS,
962 speculative_receiver_type);
963 return cg;
964 } else {
965 print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
966 "member_name not constant");
967 }
968 }
969 break;
970
971 default:
972 fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
973 break;
974 }
975 return NULL;
976 }
977
978
979 //------------------------PredicatedIntrinsicGenerator------------------------------
980 // Internal class which handles all predicated Intrinsic calls.
981 class PredicatedIntrinsicGenerator : public CallGenerator {
982 CallGenerator* _intrinsic;
983 CallGenerator* _cg;
984
985 public:
PredicatedIntrinsicGenerator(CallGenerator * intrinsic,CallGenerator * cg)986 PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
987 CallGenerator* cg)
988 : CallGenerator(cg->method())
989 {
990 _intrinsic = intrinsic;
991 _cg = cg;
992 }
993
is_virtual() const994 virtual bool is_virtual() const { return true; }
is_inlined() const995 virtual bool is_inlined() const { return true; }
is_intrinsic() const996 virtual bool is_intrinsic() const { return true; }
997
998 virtual JVMState* generate(JVMState* jvms);
999 };
1000
1001
for_predicated_intrinsic(CallGenerator * intrinsic,CallGenerator * cg)1002 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
1003 CallGenerator* cg) {
1004 return new PredicatedIntrinsicGenerator(intrinsic, cg);
1005 }
1006
1007
generate(JVMState * jvms)1008 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
1009 // The code we want to generate here is:
1010 // if (receiver == NULL)
1011 // uncommon_Trap
1012 // if (predicate(0))
1013 // do_intrinsic(0)
1014 // else
1015 // if (predicate(1))
1016 // do_intrinsic(1)
1017 // ...
1018 // else
1019 // do_java_comp
1020
1021 GraphKit kit(jvms);
1022 PhaseGVN& gvn = kit.gvn();
1023
1024 CompileLog* log = kit.C->log();
1025 if (log != NULL) {
1026 log->elem("predicated_intrinsic bci='%d' method='%d'",
1027 jvms->bci(), log->identify(method()));
1028 }
1029
1030 if (!method()->is_static()) {
1031 // We need an explicit receiver null_check before checking its type in predicate.
1032 // We share a map with the caller, so his JVMS gets adjusted.
1033 Node* receiver = kit.null_check_receiver_before_call(method());
1034 if (kit.stopped()) {
1035 return kit.transfer_exceptions_into_jvms();
1036 }
1037 }
1038
1039 int n_predicates = _intrinsic->predicates_count();
1040 assert(n_predicates > 0, "sanity");
1041
1042 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1043
1044 // Region for normal compilation code if intrinsic failed.
1045 Node* slow_region = new RegionNode(1);
1046
1047 int results = 0;
1048 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1049 #ifdef ASSERT
1050 JVMState* old_jvms = kit.jvms();
1051 SafePointNode* old_map = kit.map();
1052 Node* old_io = old_map->i_o();
1053 Node* old_mem = old_map->memory();
1054 Node* old_exc = old_map->next_exception();
1055 #endif
1056 Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
1057 #ifdef ASSERT
1058 // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
1059 assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
1060 SafePointNode* new_map = kit.map();
1061 assert(old_io == new_map->i_o(), "generate_predicate should not change i_o");
1062 assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
1063 assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
1064 #endif
1065 if (!kit.stopped()) {
1066 PreserveJVMState pjvms(&kit);
1067 // Generate intrinsic code:
1068 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
1069 if (new_jvms == NULL) {
1070 // Intrinsic failed, use normal compilation path for this predicate.
1071 slow_region->add_req(kit.control());
1072 } else {
1073 kit.add_exception_states_from(new_jvms);
1074 kit.set_jvms(new_jvms);
1075 if (!kit.stopped()) {
1076 result_jvms[results++] = kit.jvms();
1077 }
1078 }
1079 }
1080 if (else_ctrl == NULL) {
1081 else_ctrl = kit.C->top();
1082 }
1083 kit.set_control(else_ctrl);
1084 }
1085 if (!kit.stopped()) {
1086 // Final 'else' after predicates.
1087 slow_region->add_req(kit.control());
1088 }
1089 if (slow_region->req() > 1) {
1090 PreserveJVMState pjvms(&kit);
1091 // Generate normal compilation code:
1092 kit.set_control(gvn.transform(slow_region));
1093 JVMState* new_jvms = _cg->generate(kit.sync_jvms());
1094 if (kit.failing())
1095 return NULL; // might happen because of NodeCountInliningCutoff
1096 assert(new_jvms != NULL, "must be");
1097 kit.add_exception_states_from(new_jvms);
1098 kit.set_jvms(new_jvms);
1099 if (!kit.stopped()) {
1100 result_jvms[results++] = kit.jvms();
1101 }
1102 }
1103
1104 if (results == 0) {
1105 // All paths ended in uncommon traps.
1106 (void) kit.stop();
1107 return kit.transfer_exceptions_into_jvms();
1108 }
1109
1110 if (results == 1) { // Only one path
1111 kit.set_jvms(result_jvms[0]);
1112 return kit.transfer_exceptions_into_jvms();
1113 }
1114
1115 // Merge all paths.
1116 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1117 RegionNode* region = new RegionNode(results + 1);
1118 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1119 for (int i = 0; i < results; i++) {
1120 JVMState* jvms = result_jvms[i];
1121 int path = i + 1;
1122 SafePointNode* map = jvms->map();
1123 region->init_req(path, map->control());
1124 iophi->set_req(path, map->i_o());
1125 if (i == 0) {
1126 kit.set_jvms(jvms);
1127 } else {
1128 kit.merge_memory(map->merged_memory(), region, path);
1129 }
1130 }
1131 kit.set_control(gvn.transform(region));
1132 kit.set_i_o(gvn.transform(iophi));
1133 // Transform new memory Phis.
1134 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1135 Node* phi = mms.memory();
1136 if (phi->is_Phi() && phi->in(0) == region) {
1137 mms.set_memory(gvn.transform(phi));
1138 }
1139 }
1140
1141 // Merge debug info.
1142 Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
1143 uint tos = kit.jvms()->stkoff() + kit.sp();
1144 Node* map = kit.map();
1145 uint limit = map->req();
1146 for (uint i = TypeFunc::Parms; i < limit; i++) {
1147 // Skip unused stack slots; fast forward to monoff();
1148 if (i == tos) {
1149 i = kit.jvms()->monoff();
1150 if( i >= limit ) break;
1151 }
1152 Node* n = map->in(i);
1153 ins[0] = n;
1154 const Type* t = gvn.type(n);
1155 bool needs_phi = false;
1156 for (int j = 1; j < results; j++) {
1157 JVMState* jvms = result_jvms[j];
1158 Node* jmap = jvms->map();
1159 Node* m = NULL;
1160 if (jmap->req() > i) {
1161 m = jmap->in(i);
1162 if (m != n) {
1163 needs_phi = true;
1164 t = t->meet_speculative(gvn.type(m));
1165 }
1166 }
1167 ins[j] = m;
1168 }
1169 if (needs_phi) {
1170 Node* phi = PhiNode::make(region, n, t);
1171 for (int j = 1; j < results; j++) {
1172 phi->set_req(j + 1, ins[j]);
1173 }
1174 map->set_req(i, gvn.transform(phi));
1175 }
1176 }
1177
1178 return kit.transfer_exceptions_into_jvms();
1179 }
1180
1181 //-------------------------UncommonTrapCallGenerator-----------------------------
1182 // Internal class which handles all out-of-line calls checking receiver type.
1183 class UncommonTrapCallGenerator : public CallGenerator {
1184 Deoptimization::DeoptReason _reason;
1185 Deoptimization::DeoptAction _action;
1186
1187 public:
UncommonTrapCallGenerator(ciMethod * m,Deoptimization::DeoptReason reason,Deoptimization::DeoptAction action)1188 UncommonTrapCallGenerator(ciMethod* m,
1189 Deoptimization::DeoptReason reason,
1190 Deoptimization::DeoptAction action)
1191 : CallGenerator(m)
1192 {
1193 _reason = reason;
1194 _action = action;
1195 }
1196
is_virtual() const1197 virtual bool is_virtual() const { ShouldNotReachHere(); return false; }
is_trap() const1198 virtual bool is_trap() const { return true; }
1199
1200 virtual JVMState* generate(JVMState* jvms);
1201 };
1202
1203
1204 CallGenerator*
for_uncommon_trap(ciMethod * m,Deoptimization::DeoptReason reason,Deoptimization::DeoptAction action)1205 CallGenerator::for_uncommon_trap(ciMethod* m,
1206 Deoptimization::DeoptReason reason,
1207 Deoptimization::DeoptAction action) {
1208 return new UncommonTrapCallGenerator(m, reason, action);
1209 }
1210
1211
generate(JVMState * jvms)1212 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1213 GraphKit kit(jvms);
1214 kit.C->print_inlining_update(this);
1215 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
1216 // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
1217 // Use callsite signature always.
1218 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
1219 int nargs = declared_method->arg_size();
1220 kit.inc_sp(nargs);
1221 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1222 if (_reason == Deoptimization::Reason_class_check &&
1223 _action == Deoptimization::Action_maybe_recompile) {
1224 // Temp fix for 6529811
1225 // Don't allow uncommon_trap to override our decision to recompile in the event
1226 // of a class cast failure for a monomorphic call as it will never let us convert
1227 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1228 bool keep_exact_action = true;
1229 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
1230 } else {
1231 kit.uncommon_trap(_reason, _action);
1232 }
1233 return kit.transfer_exceptions_into_jvms();
1234 }
1235
1236 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1237
1238 // (Node: Merged hook_up_exits into ParseGenerator::generate.)
1239
1240 #define NODES_OVERHEAD_PER_METHOD (30.0)
1241 #define NODES_PER_BYTECODE (9.5)
1242
init(JVMState * call_site,ciMethod * call_method,ciCallProfile & profile,float prof_factor)1243 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) {
1244 int call_count = profile.count();
1245 int code_size = call_method->code_size();
1246
1247 // Expected execution count is based on the historical count:
1248 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor);
1249
1250 // Expected profit from inlining, in units of simple call-overheads.
1251 _profit = 1.0;
1252
1253 // Expected work performed by the call in units of call-overheads.
1254 // %%% need an empirical curve fit for "work" (time in call)
1255 float bytecodes_per_call = 3;
1256 _work = 1.0 + code_size / bytecodes_per_call;
1257
1258 // Expected size of compilation graph:
1259 // -XX:+PrintParseStatistics once reported:
1260 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391
1261 // Histogram of 144298 parsed bytecodes:
1262 // %%% Need an better predictor for graph size.
1263 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size);
1264 }
1265
1266 // is_cold: Return true if the node should never be inlined.
1267 // This is true if any of the key metrics are extreme.
is_cold() const1268 bool WarmCallInfo::is_cold() const {
1269 if (count() < WarmCallMinCount) return true;
1270 if (profit() < WarmCallMinProfit) return true;
1271 if (work() > WarmCallMaxWork) return true;
1272 if (size() > WarmCallMaxSize) return true;
1273 return false;
1274 }
1275
1276 // is_hot: Return true if the node should be inlined immediately.
1277 // This is true if any of the key metrics are extreme.
is_hot() const1278 bool WarmCallInfo::is_hot() const {
1279 assert(!is_cold(), "eliminate is_cold cases before testing is_hot");
1280 if (count() >= HotCallCountThreshold) return true;
1281 if (profit() >= HotCallProfitThreshold) return true;
1282 if (work() <= HotCallTrivialWork) return true;
1283 if (size() <= HotCallTrivialSize) return true;
1284 return false;
1285 }
1286
1287 // compute_heat:
compute_heat() const1288 float WarmCallInfo::compute_heat() const {
1289 assert(!is_cold(), "compute heat only on warm nodes");
1290 assert(!is_hot(), "compute heat only on warm nodes");
1291 int min_size = MAX2(0, (int)HotCallTrivialSize);
1292 int max_size = MIN2(500, (int)WarmCallMaxSize);
1293 float method_size = (size() - min_size) / MAX2(1, max_size - min_size);
1294 float size_factor;
1295 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg.
1296 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg.
1297 else if (method_size < 0.5) size_factor = 1; // better than avg.
1298 else size_factor = 0.5; // worse than avg.
1299 return (count() * profit() * size_factor);
1300 }
1301
warmer_than(WarmCallInfo * that)1302 bool WarmCallInfo::warmer_than(WarmCallInfo* that) {
1303 assert(this != that, "compare only different WCIs");
1304 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st");
1305 if (this->heat() > that->heat()) return true;
1306 if (this->heat() < that->heat()) return false;
1307 assert(this->heat() == that->heat(), "no NaN heat allowed");
1308 // Equal heat. Break the tie some other way.
1309 if (!this->call() || !that->call()) return (address)this > (address)that;
1310 return this->call()->_idx > that->call()->_idx;
1311 }
1312
1313 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress)
1314 #define UNINIT_NEXT ((WarmCallInfo*)NULL)
1315
insert_into(WarmCallInfo * head)1316 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) {
1317 assert(next() == UNINIT_NEXT, "not yet on any list");
1318 WarmCallInfo* prev_p = NULL;
1319 WarmCallInfo* next_p = head;
1320 while (next_p != NULL && next_p->warmer_than(this)) {
1321 prev_p = next_p;
1322 next_p = prev_p->next();
1323 }
1324 // Install this between prev_p and next_p.
1325 this->set_next(next_p);
1326 if (prev_p == NULL)
1327 head = this;
1328 else
1329 prev_p->set_next(this);
1330 return head;
1331 }
1332
remove_from(WarmCallInfo * head)1333 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) {
1334 WarmCallInfo* prev_p = NULL;
1335 WarmCallInfo* next_p = head;
1336 while (next_p != this) {
1337 assert(next_p != NULL, "this must be in the list somewhere");
1338 prev_p = next_p;
1339 next_p = prev_p->next();
1340 }
1341 next_p = this->next();
1342 debug_only(this->set_next(UNINIT_NEXT));
1343 // Remove this from between prev_p and next_p.
1344 if (prev_p == NULL)
1345 head = next_p;
1346 else
1347 prev_p->set_next(next_p);
1348 return head;
1349 }
1350
1351 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(),
1352 WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE());
1353 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(),
1354 WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE());
1355
always_hot()1356 WarmCallInfo* WarmCallInfo::always_hot() {
1357 assert(_always_hot.is_hot(), "must always be hot");
1358 return &_always_hot;
1359 }
1360
always_cold()1361 WarmCallInfo* WarmCallInfo::always_cold() {
1362 assert(_always_cold.is_cold(), "must always be cold");
1363 return &_always_cold;
1364 }
1365
1366
1367 #ifndef PRODUCT
1368
print() const1369 void WarmCallInfo::print() const {
1370 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p",
1371 is_cold() ? "cold" : is_hot() ? "hot " : "warm",
1372 count(), profit(), work(), size(), compute_heat(), next());
1373 tty->cr();
1374 if (call() != NULL) call()->dump();
1375 }
1376
print_wci(WarmCallInfo * ci)1377 void print_wci(WarmCallInfo* ci) {
1378 ci->print();
1379 }
1380
print_all() const1381 void WarmCallInfo::print_all() const {
1382 for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1383 p->print();
1384 }
1385
count_all() const1386 int WarmCallInfo::count_all() const {
1387 int cnt = 0;
1388 for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1389 cnt++;
1390 return cnt;
1391 }
1392
1393 #endif //PRODUCT
1394