1 /*
2  * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "ci/ciCallSite.hpp"
28 #include "ci/ciObjArray.hpp"
29 #include "ci/ciMemberName.hpp"
30 #include "ci/ciMethodHandle.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "compiler/compileLog.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/callGenerator.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/cfgnode.hpp"
38 #include "opto/parse.hpp"
39 #include "opto/rootnode.hpp"
40 #include "opto/runtime.hpp"
41 #include "opto/subnode.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "ci/ciNativeEntryPoint.hpp"
44 #include "utilities/debug.hpp"
45 
46 // Utility function.
tf() const47 const TypeFunc* CallGenerator::tf() const {
48   return TypeFunc::make(method());
49 }
50 
is_inlined_method_handle_intrinsic(JVMState * jvms,ciMethod * m)51 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
52   return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
53 }
54 
is_inlined_method_handle_intrinsic(ciMethod * caller,int bci,ciMethod * m)55 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
56   ciMethod* symbolic_info = caller->get_method_at_bci(bci);
57   return is_inlined_method_handle_intrinsic(symbolic_info, m);
58 }
59 
is_inlined_method_handle_intrinsic(ciMethod * symbolic_info,ciMethod * m)60 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {
61   return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();
62 }
63 
64 //-----------------------------ParseGenerator---------------------------------
65 // Internal class which handles all direct bytecode traversal.
66 class ParseGenerator : public InlineCallGenerator {
67 private:
68   bool  _is_osr;
69   float _expected_uses;
70 
71 public:
ParseGenerator(ciMethod * method,float expected_uses,bool is_osr=false)72   ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
73     : InlineCallGenerator(method)
74   {
75     _is_osr        = is_osr;
76     _expected_uses = expected_uses;
77     assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
78   }
79 
is_parse() const80   virtual bool      is_parse() const           { return true; }
81   virtual JVMState* generate(JVMState* jvms);
is_osr()82   int is_osr() { return _is_osr; }
83 
84 };
85 
generate(JVMState * jvms)86 JVMState* ParseGenerator::generate(JVMState* jvms) {
87   Compile* C = Compile::current();
88   C->print_inlining_update(this);
89 
90   if (is_osr()) {
91     // The JVMS for a OSR has a single argument (see its TypeFunc).
92     assert(jvms->depth() == 1, "no inline OSR");
93   }
94 
95   if (C->failing()) {
96     return NULL;  // bailing out of the compile; do not try to parse
97   }
98 
99   Parse parser(jvms, method(), _expected_uses);
100   // Grab signature for matching/allocation
101   GraphKit& exits = parser.exits();
102 
103   if (C->failing()) {
104     while (exits.pop_exception_state() != NULL) ;
105     return NULL;
106   }
107 
108   assert(exits.jvms()->same_calls_as(jvms), "sanity");
109 
110   // Simply return the exit state of the parser,
111   // augmented by any exceptional states.
112   return exits.transfer_exceptions_into_jvms();
113 }
114 
115 //---------------------------DirectCallGenerator------------------------------
116 // Internal class which handles all out-of-line calls w/o receiver type checks.
117 class DirectCallGenerator : public CallGenerator {
118  private:
119   CallStaticJavaNode* _call_node;
120   // Force separate memory and I/O projections for the exceptional
121   // paths to facilitate late inlinig.
122   bool                _separate_io_proj;
123 
124 protected:
set_call_node(CallStaticJavaNode * call)125   void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
126 
127  public:
DirectCallGenerator(ciMethod * method,bool separate_io_proj)128   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
129     : CallGenerator(method),
130       _separate_io_proj(separate_io_proj)
131   {
132   }
133   virtual JVMState* generate(JVMState* jvms);
134 
call_node() const135   virtual CallNode* call_node() const { return _call_node; }
with_call_node(CallNode * call)136   virtual CallGenerator* with_call_node(CallNode* call) {
137     DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
138     dcg->set_call_node(call->as_CallStaticJava());
139     return dcg;
140   }
141 };
142 
generate(JVMState * jvms)143 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
144   GraphKit kit(jvms);
145   kit.C->print_inlining_update(this);
146   bool is_static = method()->is_static();
147   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
148                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
149 
150   if (kit.C->log() != NULL) {
151     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
152   }
153 
154   CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
155   if (is_inlined_method_handle_intrinsic(jvms, method())) {
156     // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
157     // additional information about the method being invoked should be attached
158     // to the call site to make resolution logic work
159     // (see SharedRuntime::resolve_static_call_C).
160     call->set_override_symbolic_info(true);
161   }
162   _call_node = call;  // Save the call node in case we need it later
163   if (!is_static) {
164     // Make an explicit receiver null_check as part of this call.
165     // Since we share a map with the caller, his JVMS gets adjusted.
166     kit.null_check_receiver_before_call(method());
167     if (kit.stopped()) {
168       // And dump it back to the caller, decorated with any exceptions:
169       return kit.transfer_exceptions_into_jvms();
170     }
171     // Mark the call node as virtual, sort of:
172     call->set_optimized_virtual(true);
173     if (method()->is_method_handle_intrinsic() ||
174         method()->is_compiled_lambda_form()) {
175       call->set_method_handle_invoke(true);
176     }
177   }
178   kit.set_arguments_for_java_call(call);
179   kit.set_edges_for_java_call(call, false, _separate_io_proj);
180   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
181   kit.push_node(method()->return_type()->basic_type(), ret);
182   return kit.transfer_exceptions_into_jvms();
183 }
184 
185 //--------------------------VirtualCallGenerator------------------------------
186 // Internal class which handles all out-of-line calls checking receiver type.
187 class VirtualCallGenerator : public CallGenerator {
188 private:
189   int _vtable_index;
190   bool _separate_io_proj;
191   CallDynamicJavaNode* _call_node;
192 
193 protected:
set_call_node(CallDynamicJavaNode * call)194   void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
195 
196 public:
VirtualCallGenerator(ciMethod * method,int vtable_index,bool separate_io_proj)197   VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
198     : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(NULL)
199   {
200     assert(vtable_index == Method::invalid_vtable_index ||
201            vtable_index >= 0, "either invalid or usable");
202   }
is_virtual() const203   virtual bool      is_virtual() const          { return true; }
204   virtual JVMState* generate(JVMState* jvms);
205 
call_node() const206   virtual CallNode* call_node() const { return _call_node; }
vtable_index() const207   int vtable_index() const { return _vtable_index; }
208 
with_call_node(CallNode * call)209   virtual CallGenerator* with_call_node(CallNode* call) {
210     VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
211     cg->set_call_node(call->as_CallDynamicJava());
212     return cg;
213   }
214 };
215 
generate(JVMState * jvms)216 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
217   GraphKit kit(jvms);
218   Node* receiver = kit.argument(0);
219 
220   kit.C->print_inlining_update(this);
221 
222   if (kit.C->log() != NULL) {
223     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
224   }
225 
226   // If the receiver is a constant null, do not torture the system
227   // by attempting to call through it.  The compile will proceed
228   // correctly, but may bail out in final_graph_reshaping, because
229   // the call instruction will have a seemingly deficient out-count.
230   // (The bailout says something misleading about an "infinite loop".)
231   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
232     assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
233     ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
234     int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
235     kit.inc_sp(arg_size);  // restore arguments
236     kit.uncommon_trap(Deoptimization::Reason_null_check,
237                       Deoptimization::Action_none,
238                       NULL, "null receiver");
239     return kit.transfer_exceptions_into_jvms();
240   }
241 
242   // Ideally we would unconditionally do a null check here and let it
243   // be converted to an implicit check based on profile information.
244   // However currently the conversion to implicit null checks in
245   // Block::implicit_null_check() only looks for loads and stores, not calls.
246   ciMethod *caller = kit.method();
247   ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
248   if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
249        ((ImplicitNullCheckThreshold > 0) && caller_md &&
250        (caller_md->trap_count(Deoptimization::Reason_null_check)
251        >= (uint)ImplicitNullCheckThreshold))) {
252     // Make an explicit receiver null_check as part of this call.
253     // Since we share a map with the caller, his JVMS gets adjusted.
254     receiver = kit.null_check_receiver_before_call(method());
255     if (kit.stopped()) {
256       // And dump it back to the caller, decorated with any exceptions:
257       return kit.transfer_exceptions_into_jvms();
258     }
259   }
260 
261   assert(!method()->is_static(), "virtual call must not be to static");
262   assert(!method()->is_final(), "virtual call should not be to final");
263   assert(!method()->is_private(), "virtual call should not be to private");
264   assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
265          "no vtable calls if +UseInlineCaches ");
266   address target = SharedRuntime::get_resolve_virtual_call_stub();
267   // Normal inline cache used for call
268   CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
269   if (is_inlined_method_handle_intrinsic(jvms, method())) {
270     // To be able to issue a direct call (optimized virtual or virtual)
271     // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
272     // about the method being invoked should be attached to the call site to
273     // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
274     call->set_override_symbolic_info(true);
275   }
276   _call_node = call;  // Save the call node in case we need it later
277 
278   kit.set_arguments_for_java_call(call);
279   kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
280   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
281   kit.push_node(method()->return_type()->basic_type(), ret);
282 
283   // Represent the effect of an implicit receiver null_check
284   // as part of this call.  Since we share a map with the caller,
285   // his JVMS gets adjusted.
286   kit.cast_not_null(receiver);
287   return kit.transfer_exceptions_into_jvms();
288 }
289 
for_inline(ciMethod * m,float expected_uses)290 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
291   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
292   return new ParseGenerator(m, expected_uses);
293 }
294 
295 // As a special case, the JVMS passed to this CallGenerator is
296 // for the method execution already in progress, not just the JVMS
297 // of the caller.  Thus, this CallGenerator cannot be mixed with others!
for_osr(ciMethod * m,int osr_bci)298 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
299   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
300   float past_uses = m->interpreter_invocation_count();
301   float expected_uses = past_uses;
302   return new ParseGenerator(m, expected_uses, true);
303 }
304 
for_direct_call(ciMethod * m,bool separate_io_proj)305 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
306   assert(!m->is_abstract(), "for_direct_call mismatch");
307   return new DirectCallGenerator(m, separate_io_proj);
308 }
309 
for_virtual_call(ciMethod * m,int vtable_index)310 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
311   assert(!m->is_static(), "for_virtual_call mismatch");
312   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
313   return new VirtualCallGenerator(m, vtable_index, false /*separate_io_projs*/);
314 }
315 
316 // Allow inlining decisions to be delayed
317 class LateInlineCallGenerator : public DirectCallGenerator {
318  private:
319   jlong _unique_id;   // unique id for log compilation
320   bool _is_pure_call; // a hint that the call doesn't have important side effects to care about
321 
322  protected:
323   CallGenerator* _inline_cg;
do_late_inline_check(Compile * C,JVMState * jvms)324   virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { return true; }
inline_cg() const325   virtual CallGenerator* inline_cg() const { return _inline_cg; }
is_pure_call() const326   virtual bool is_pure_call() const { return _is_pure_call; }
327 
328  public:
LateInlineCallGenerator(ciMethod * method,CallGenerator * inline_cg,bool is_pure_call=false)329   LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) :
330     DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {}
331 
is_late_inline() const332   virtual bool is_late_inline() const { return true; }
333 
334   // Convert the CallStaticJava into an inline
335   virtual void do_late_inline();
336 
generate(JVMState * jvms)337   virtual JVMState* generate(JVMState* jvms) {
338     Compile *C = Compile::current();
339 
340     C->log_inline_id(this);
341 
342     // Record that this call site should be revisited once the main
343     // parse is finished.
344     if (!is_mh_late_inline()) {
345       C->add_late_inline(this);
346     }
347 
348     // Emit the CallStaticJava and request separate projections so
349     // that the late inlining logic can distinguish between fall
350     // through and exceptional uses of the memory and io projections
351     // as is done for allocations and macro expansion.
352     return DirectCallGenerator::generate(jvms);
353   }
354 
print_inlining_late(const char * msg)355   virtual void print_inlining_late(const char* msg) {
356     CallNode* call = call_node();
357     Compile* C = Compile::current();
358     C->print_inlining_assert_ready();
359     C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
360     C->print_inlining_move_to(this);
361     C->print_inlining_update_delayed(this);
362   }
363 
set_unique_id(jlong id)364   virtual void set_unique_id(jlong id) {
365     _unique_id = id;
366   }
367 
unique_id() const368   virtual jlong unique_id() const {
369     return _unique_id;
370   }
371 
with_call_node(CallNode * call)372   virtual CallGenerator* with_call_node(CallNode* call) {
373     LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
374     cg->set_call_node(call->as_CallStaticJava());
375     return cg;
376   }
377 };
378 
for_late_inline(ciMethod * method,CallGenerator * inline_cg)379 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
380   return new LateInlineCallGenerator(method, inline_cg);
381 }
382 
383 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
384   ciMethod* _caller;
385   bool _input_not_const;
386 
387   virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
388 
389  public:
LateInlineMHCallGenerator(ciMethod * caller,ciMethod * callee,bool input_not_const)390   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
391     LateInlineCallGenerator(callee, NULL), _caller(caller), _input_not_const(input_not_const) {}
392 
is_mh_late_inline() const393   virtual bool is_mh_late_inline() const { return true; }
394 
395   // Convert the CallStaticJava into an inline
396   virtual void do_late_inline();
397 
generate(JVMState * jvms)398   virtual JVMState* generate(JVMState* jvms) {
399     JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
400 
401     Compile* C = Compile::current();
402     if (_input_not_const) {
403       // inlining won't be possible so no need to enqueue right now.
404       call_node()->set_generator(this);
405     } else {
406       C->add_late_inline(this);
407     }
408     return new_jvms;
409   }
410 
with_call_node(CallNode * call)411   virtual CallGenerator* with_call_node(CallNode* call) {
412     LateInlineMHCallGenerator* cg = new LateInlineMHCallGenerator(_caller, method(), _input_not_const);
413     cg->set_call_node(call->as_CallStaticJava());
414     return cg;
415   }
416 };
417 
do_late_inline_check(Compile * C,JVMState * jvms)418 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
419   // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
420   bool allow_inline = C->inlining_incrementally();
421   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, _input_not_const);
422   assert(!_input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
423 
424   if (cg != NULL) {
425     assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
426     _inline_cg = cg;
427     C->dec_number_of_mh_late_inlines();
428     return true;
429   } else {
430     // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
431     // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
432     // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
433     return false;
434   }
435 }
436 
for_mh_late_inline(ciMethod * caller,ciMethod * callee,bool input_not_const)437 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
438   assert(IncrementalInlineMH, "required");
439   Compile::current()->inc_number_of_mh_late_inlines();
440   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
441   return cg;
442 }
443 
444 // Allow inlining decisions to be delayed
445 class LateInlineVirtualCallGenerator : public VirtualCallGenerator {
446  private:
447   jlong          _unique_id;   // unique id for log compilation
448   CallGenerator* _inline_cg;
449   ciMethod*      _callee;
450   bool           _is_pure_call;
451   float          _prof_factor;
452 
453  protected:
454   virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
inline_cg() const455   virtual CallGenerator* inline_cg() const { return _inline_cg; }
is_pure_call() const456   virtual bool is_pure_call() const { return _is_pure_call; }
457 
458  public:
LateInlineVirtualCallGenerator(ciMethod * method,int vtable_index,float prof_factor)459   LateInlineVirtualCallGenerator(ciMethod* method, int vtable_index, float prof_factor)
460   : VirtualCallGenerator(method, vtable_index, true /*separate_io_projs*/),
461     _unique_id(0), _inline_cg(NULL), _callee(NULL), _is_pure_call(false), _prof_factor(prof_factor) {}
462 
is_late_inline() const463   virtual bool is_late_inline() const { return true; }
464 
is_virtual_late_inline() const465   virtual bool is_virtual_late_inline() const { return true; }
466 
467   // Convert the CallDynamicJava into an inline
468   virtual void do_late_inline();
469 
set_callee_method(ciMethod * m)470   virtual void set_callee_method(ciMethod* m) {
471     assert(_callee == NULL, "repeated inlining attempt");
472     _callee = m;
473   }
474 
generate(JVMState * jvms)475   virtual JVMState* generate(JVMState* jvms) {
476     // Emit the CallDynamicJava and request separate projections so
477     // that the late inlining logic can distinguish between fall
478     // through and exceptional uses of the memory and io projections
479     // as is done for allocations and macro expansion.
480     JVMState* new_jvms = VirtualCallGenerator::generate(jvms);
481     if (call_node() != NULL) {
482       call_node()->set_generator(this);
483     }
484     return new_jvms;
485   }
486 
print_inlining_late(const char * msg)487   virtual void print_inlining_late(const char* msg) {
488     CallNode* call = call_node();
489     Compile* C = Compile::current();
490     C->print_inlining_assert_ready();
491     C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
492     C->print_inlining_move_to(this);
493     C->print_inlining_update_delayed(this);
494   }
495 
set_unique_id(jlong id)496   virtual void set_unique_id(jlong id) {
497     _unique_id = id;
498   }
499 
unique_id() const500   virtual jlong unique_id() const {
501     return _unique_id;
502   }
503 
with_call_node(CallNode * call)504   virtual CallGenerator* with_call_node(CallNode* call) {
505     LateInlineVirtualCallGenerator* cg = new LateInlineVirtualCallGenerator(method(), vtable_index(), _prof_factor);
506     cg->set_call_node(call->as_CallDynamicJava());
507     return cg;
508   }
509 };
510 
do_late_inline_check(Compile * C,JVMState * jvms)511 bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
512   // Method handle linker case is handled in CallDynamicJavaNode::Ideal().
513   // Unless inlining is performed, _override_symbolic_info bit will be set in DirectCallGenerator::generate().
514 
515   // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
516   bool allow_inline = C->inlining_incrementally();
517   CallGenerator* cg = C->call_generator(_callee,
518                                         vtable_index(),
519                                         false /*call_does_dispatch*/,
520                                         jvms,
521                                         allow_inline,
522                                         _prof_factor,
523                                         NULL /*speculative_receiver_type*/,
524                                         true /*allow_intrinsics*/);
525 
526   if (cg != NULL) {
527     assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining");
528     _inline_cg = cg;
529     return true;
530   } else {
531     // Virtual call which provably doesn't dispatch should be either inlined or replaced with a direct call.
532     assert(false, "no progress");
533     return false;
534   }
535 }
536 
for_late_inline_virtual(ciMethod * m,int vtable_index,float prof_factor)537 CallGenerator* CallGenerator::for_late_inline_virtual(ciMethod* m, int vtable_index, float prof_factor) {
538   assert(IncrementalInlineVirtual, "required");
539   assert(!m->is_static(), "for_virtual_call mismatch");
540   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
541   return new LateInlineVirtualCallGenerator(m, vtable_index, prof_factor);
542 }
543 
do_late_inline()544 void LateInlineCallGenerator::do_late_inline() {
545   CallGenerator::do_late_inline_helper();
546 }
547 
do_late_inline()548 void LateInlineMHCallGenerator::do_late_inline() {
549   CallGenerator::do_late_inline_helper();
550 }
551 
do_late_inline()552 void LateInlineVirtualCallGenerator::do_late_inline() {
553   assert(_callee != NULL, "required"); // set up in CallDynamicJavaNode::Ideal
554   CallGenerator::do_late_inline_helper();
555 }
556 
do_late_inline_helper()557 void CallGenerator::do_late_inline_helper() {
558   assert(is_late_inline(), "only late inline allowed");
559 
560   // Can't inline it
561   CallNode* call = call_node();
562   if (call == NULL || call->outcnt() == 0 ||
563       call->in(0) == NULL || call->in(0)->is_top()) {
564     return;
565   }
566 
567   const TypeTuple *r = call->tf()->domain();
568   for (int i1 = 0; i1 < method()->arg_size(); i1++) {
569     if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
570       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
571       return;
572     }
573   }
574 
575   if (call->in(TypeFunc::Memory)->is_top()) {
576     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
577     return;
578   }
579   if (call->in(TypeFunc::Memory)->is_MergeMem()) {
580     MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
581     if (merge_mem->base_memory() == merge_mem->empty_memory()) {
582       return; // dead path
583     }
584   }
585 
586   // check for unreachable loop
587   CallProjections callprojs;
588   call->extract_projections(&callprojs, true);
589   if ((callprojs.fallthrough_catchproj == call->in(0)) ||
590       (callprojs.catchall_catchproj    == call->in(0)) ||
591       (callprojs.fallthrough_memproj   == call->in(TypeFunc::Memory)) ||
592       (callprojs.catchall_memproj      == call->in(TypeFunc::Memory)) ||
593       (callprojs.fallthrough_ioproj    == call->in(TypeFunc::I_O)) ||
594       (callprojs.catchall_ioproj       == call->in(TypeFunc::I_O)) ||
595       (callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) ||
596       (callprojs.exobj   != NULL && call->find_edge(callprojs.exobj) != -1)) {
597     return;
598   }
599 
600   Compile* C = Compile::current();
601   // Remove inlined methods from Compiler's lists.
602   if (call->is_macro()) {
603     C->remove_macro_node(call);
604   }
605 
606   bool result_not_used = (callprojs.resproj == NULL || callprojs.resproj->outcnt() == 0);
607   if (is_pure_call() && result_not_used) {
608     // The call is marked as pure (no important side effects), but result isn't used.
609     // It's safe to remove the call.
610     GraphKit kit(call->jvms());
611     kit.replace_call(call, C->top(), true);
612   } else {
613     // Make a clone of the JVMState that appropriate to use for driving a parse
614     JVMState* old_jvms = call->jvms();
615     JVMState* jvms = old_jvms->clone_shallow(C);
616     uint size = call->req();
617     SafePointNode* map = new SafePointNode(size, jvms);
618     for (uint i1 = 0; i1 < size; i1++) {
619       map->init_req(i1, call->in(i1));
620     }
621 
622     // Make sure the state is a MergeMem for parsing.
623     if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
624       Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
625       C->initial_gvn()->set_type_bottom(mem);
626       map->set_req(TypeFunc::Memory, mem);
627     }
628 
629     uint nargs = method()->arg_size();
630     // blow away old call arguments
631     Node* top = C->top();
632     for (uint i1 = 0; i1 < nargs; i1++) {
633       map->set_req(TypeFunc::Parms + i1, top);
634     }
635     jvms->set_map(map);
636 
637     // Make enough space in the expression stack to transfer
638     // the incoming arguments and return value.
639     map->ensure_stack(jvms, jvms->method()->max_stack());
640     for (uint i1 = 0; i1 < nargs; i1++) {
641       map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
642     }
643 
644     C->print_inlining_assert_ready();
645 
646     C->print_inlining_move_to(this);
647 
648     C->log_late_inline(this);
649 
650     // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
651     if (!do_late_inline_check(C, jvms)) {
652       map->disconnect_inputs(C);
653       C->print_inlining_update_delayed(this);
654       return;
655     }
656 
657     // Setup default node notes to be picked up by the inlining
658     Node_Notes* old_nn = C->node_notes_at(call->_idx);
659     if (old_nn != NULL) {
660       Node_Notes* entry_nn = old_nn->clone(C);
661       entry_nn->set_jvms(jvms);
662       C->set_default_node_notes(entry_nn);
663     }
664 
665     // Virtual call involves a receiver null check which can be made implicit.
666     if (is_virtual_late_inline()) {
667       GraphKit kit(jvms);
668       kit.null_check_receiver();
669       jvms = kit.transfer_exceptions_into_jvms();
670     }
671 
672     // Now perform the inlining using the synthesized JVMState
673     JVMState* new_jvms = inline_cg()->generate(jvms);
674     if (new_jvms == NULL)  return;  // no change
675     if (C->failing())      return;
676 
677     // Capture any exceptional control flow
678     GraphKit kit(new_jvms);
679 
680     // Find the result object
681     Node* result = C->top();
682     int   result_size = method()->return_type()->size();
683     if (result_size != 0 && !kit.stopped()) {
684       result = (result_size == 1) ? kit.pop() : kit.pop_pair();
685     }
686 
687     if (inline_cg()->is_inline()) {
688       C->set_has_loops(C->has_loops() || inline_cg()->method()->has_loops());
689       C->env()->notice_inlined_method(inline_cg()->method());
690     }
691     C->set_inlining_progress(true);
692     C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
693     kit.replace_call(call, result, true);
694   }
695 }
696 
697 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
698 
699  public:
LateInlineStringCallGenerator(ciMethod * method,CallGenerator * inline_cg)700   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
701     LateInlineCallGenerator(method, inline_cg) {}
702 
generate(JVMState * jvms)703   virtual JVMState* generate(JVMState* jvms) {
704     Compile *C = Compile::current();
705 
706     C->log_inline_id(this);
707 
708     C->add_string_late_inline(this);
709 
710     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
711     return new_jvms;
712   }
713 
is_string_late_inline() const714   virtual bool is_string_late_inline() const { return true; }
715 
with_call_node(CallNode * call)716   virtual CallGenerator* with_call_node(CallNode* call) {
717     LateInlineStringCallGenerator* cg = new LateInlineStringCallGenerator(method(), _inline_cg);
718     cg->set_call_node(call->as_CallStaticJava());
719     return cg;
720   }
721 };
722 
for_string_late_inline(ciMethod * method,CallGenerator * inline_cg)723 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
724   return new LateInlineStringCallGenerator(method, inline_cg);
725 }
726 
727 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
728 
729  public:
LateInlineBoxingCallGenerator(ciMethod * method,CallGenerator * inline_cg)730   LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
731     LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
732 
generate(JVMState * jvms)733   virtual JVMState* generate(JVMState* jvms) {
734     Compile *C = Compile::current();
735 
736     C->log_inline_id(this);
737 
738     C->add_boxing_late_inline(this);
739 
740     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
741     return new_jvms;
742   }
743 
with_call_node(CallNode * call)744   virtual CallGenerator* with_call_node(CallNode* call) {
745     LateInlineBoxingCallGenerator* cg = new LateInlineBoxingCallGenerator(method(), _inline_cg);
746     cg->set_call_node(call->as_CallStaticJava());
747     return cg;
748   }
749 };
750 
for_boxing_late_inline(ciMethod * method,CallGenerator * inline_cg)751 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
752   return new LateInlineBoxingCallGenerator(method, inline_cg);
753 }
754 
755 class LateInlineVectorReboxingCallGenerator : public LateInlineCallGenerator {
756 
757  public:
LateInlineVectorReboxingCallGenerator(ciMethod * method,CallGenerator * inline_cg)758   LateInlineVectorReboxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
759     LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
760 
generate(JVMState * jvms)761   virtual JVMState* generate(JVMState* jvms) {
762     Compile *C = Compile::current();
763 
764     C->log_inline_id(this);
765 
766     C->add_vector_reboxing_late_inline(this);
767 
768     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
769     return new_jvms;
770   }
771 
with_call_node(CallNode * call)772   virtual CallGenerator* with_call_node(CallNode* call) {
773     LateInlineVectorReboxingCallGenerator* cg = new LateInlineVectorReboxingCallGenerator(method(), _inline_cg);
774     cg->set_call_node(call->as_CallStaticJava());
775     return cg;
776   }
777 };
778 
779 //   static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
for_vector_reboxing_late_inline(ciMethod * method,CallGenerator * inline_cg)780 CallGenerator* CallGenerator::for_vector_reboxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
781   return new LateInlineVectorReboxingCallGenerator(method, inline_cg);
782 }
783 //---------------------------WarmCallGenerator--------------------------------
784 // Internal class which handles initial deferral of inlining decisions.
785 class WarmCallGenerator : public CallGenerator {
786   WarmCallInfo*   _call_info;
787   CallGenerator*  _if_cold;
788   CallGenerator*  _if_hot;
789   bool            _is_virtual;   // caches virtuality of if_cold
790   bool            _is_inline;    // caches inline-ness of if_hot
791 
792 public:
WarmCallGenerator(WarmCallInfo * ci,CallGenerator * if_cold,CallGenerator * if_hot)793   WarmCallGenerator(WarmCallInfo* ci,
794                     CallGenerator* if_cold,
795                     CallGenerator* if_hot)
796     : CallGenerator(if_cold->method())
797   {
798     assert(method() == if_hot->method(), "consistent choices");
799     _call_info  = ci;
800     _if_cold    = if_cold;
801     _if_hot     = if_hot;
802     _is_virtual = if_cold->is_virtual();
803     _is_inline  = if_hot->is_inline();
804   }
805 
is_inline() const806   virtual bool      is_inline() const           { return _is_inline; }
is_virtual() const807   virtual bool      is_virtual() const          { return _is_virtual; }
is_deferred() const808   virtual bool      is_deferred() const         { return true; }
809 
810   virtual JVMState* generate(JVMState* jvms);
811 };
812 
813 
for_warm_call(WarmCallInfo * ci,CallGenerator * if_cold,CallGenerator * if_hot)814 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
815                                             CallGenerator* if_cold,
816                                             CallGenerator* if_hot) {
817   return new WarmCallGenerator(ci, if_cold, if_hot);
818 }
819 
generate(JVMState * jvms)820 JVMState* WarmCallGenerator::generate(JVMState* jvms) {
821   Compile* C = Compile::current();
822   C->print_inlining_update(this);
823 
824   if (C->log() != NULL) {
825     C->log()->elem("warm_call bci='%d'", jvms->bci());
826   }
827   jvms = _if_cold->generate(jvms);
828   if (jvms != NULL) {
829     Node* m = jvms->map()->control();
830     if (m->is_CatchProj()) m = m->in(0);  else m = C->top();
831     if (m->is_Catch())     m = m->in(0);  else m = C->top();
832     if (m->is_Proj())      m = m->in(0);  else m = C->top();
833     if (m->is_CallJava()) {
834       _call_info->set_call(m->as_Call());
835       _call_info->set_hot_cg(_if_hot);
836 #ifndef PRODUCT
837       if (PrintOpto || PrintOptoInlining) {
838         tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
839         tty->print("WCI: ");
840         _call_info->print();
841       }
842 #endif
843       _call_info->set_heat(_call_info->compute_heat());
844       C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
845     }
846   }
847   return jvms;
848 }
849 
make_hot()850 void WarmCallInfo::make_hot() {
851   Unimplemented();
852 }
853 
make_cold()854 void WarmCallInfo::make_cold() {
855   // No action:  Just dequeue.
856 }
857 
858 
859 //------------------------PredictedCallGenerator------------------------------
860 // Internal class which handles all out-of-line calls checking receiver type.
861 class PredictedCallGenerator : public CallGenerator {
862   ciKlass*       _predicted_receiver;
863   CallGenerator* _if_missed;
864   CallGenerator* _if_hit;
865   float          _hit_prob;
866   bool           _exact_check;
867 
868 public:
PredictedCallGenerator(ciKlass * predicted_receiver,CallGenerator * if_missed,CallGenerator * if_hit,bool exact_check,float hit_prob)869   PredictedCallGenerator(ciKlass* predicted_receiver,
870                          CallGenerator* if_missed,
871                          CallGenerator* if_hit, bool exact_check,
872                          float hit_prob)
873     : CallGenerator(if_missed->method())
874   {
875     // The call profile data may predict the hit_prob as extreme as 0 or 1.
876     // Remove the extremes values from the range.
877     if (hit_prob > PROB_MAX)   hit_prob = PROB_MAX;
878     if (hit_prob < PROB_MIN)   hit_prob = PROB_MIN;
879 
880     _predicted_receiver = predicted_receiver;
881     _if_missed          = if_missed;
882     _if_hit             = if_hit;
883     _hit_prob           = hit_prob;
884     _exact_check        = exact_check;
885   }
886 
is_virtual() const887   virtual bool      is_virtual()   const    { return true; }
is_inline() const888   virtual bool      is_inline()    const    { return _if_hit->is_inline(); }
is_deferred() const889   virtual bool      is_deferred()  const    { return _if_hit->is_deferred(); }
890 
891   virtual JVMState* generate(JVMState* jvms);
892 };
893 
894 
for_predicted_call(ciKlass * predicted_receiver,CallGenerator * if_missed,CallGenerator * if_hit,float hit_prob)895 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
896                                                  CallGenerator* if_missed,
897                                                  CallGenerator* if_hit,
898                                                  float hit_prob) {
899   return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit,
900                                     /*exact_check=*/true, hit_prob);
901 }
902 
for_guarded_call(ciKlass * guarded_receiver,CallGenerator * if_missed,CallGenerator * if_hit)903 CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver,
904                                                CallGenerator* if_missed,
905                                                CallGenerator* if_hit) {
906   return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit,
907                                     /*exact_check=*/false, PROB_ALWAYS);
908 }
909 
generate(JVMState * jvms)910 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
911   GraphKit kit(jvms);
912   kit.C->print_inlining_update(this);
913   PhaseGVN& gvn = kit.gvn();
914   // We need an explicit receiver null_check before checking its type.
915   // We share a map with the caller, so his JVMS gets adjusted.
916   Node* receiver = kit.argument(0);
917   CompileLog* log = kit.C->log();
918   if (log != NULL) {
919     log->elem("predicted_call bci='%d' exact='%d' klass='%d'",
920               jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver));
921   }
922 
923   receiver = kit.null_check_receiver_before_call(method());
924   if (kit.stopped()) {
925     return kit.transfer_exceptions_into_jvms();
926   }
927 
928   // Make a copy of the replaced nodes in case we need to restore them
929   ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
930   replaced_nodes.clone();
931 
932   Node* casted_receiver = receiver;  // will get updated in place...
933   Node* slow_ctl = NULL;
934   if (_exact_check) {
935     slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob,
936                                        &casted_receiver);
937   } else {
938     slow_ctl = kit.subtype_check_receiver(receiver, _predicted_receiver,
939                                           &casted_receiver);
940   }
941 
942   SafePointNode* slow_map = NULL;
943   JVMState* slow_jvms = NULL;
944   { PreserveJVMState pjvms(&kit);
945     kit.set_control(slow_ctl);
946     if (!kit.stopped()) {
947       slow_jvms = _if_missed->generate(kit.sync_jvms());
948       if (kit.failing())
949         return NULL;  // might happen because of NodeCountInliningCutoff
950       assert(slow_jvms != NULL, "must be");
951       kit.add_exception_states_from(slow_jvms);
952       kit.set_map(slow_jvms->map());
953       if (!kit.stopped())
954         slow_map = kit.stop();
955     }
956   }
957 
958   if (kit.stopped()) {
959     // Instance exactly does not matches the desired type.
960     kit.set_jvms(slow_jvms);
961     return kit.transfer_exceptions_into_jvms();
962   }
963 
964   // fall through if the instance exactly matches the desired type
965   kit.replace_in_map(receiver, casted_receiver);
966 
967   // Make the hot call:
968   JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
969   if (new_jvms == NULL) {
970     // Inline failed, so make a direct call.
971     assert(_if_hit->is_inline(), "must have been a failed inline");
972     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
973     new_jvms = cg->generate(kit.sync_jvms());
974   }
975   kit.add_exception_states_from(new_jvms);
976   kit.set_jvms(new_jvms);
977 
978   // Need to merge slow and fast?
979   if (slow_map == NULL) {
980     // The fast path is the only path remaining.
981     return kit.transfer_exceptions_into_jvms();
982   }
983 
984   if (kit.stopped()) {
985     // Inlined method threw an exception, so it's just the slow path after all.
986     kit.set_jvms(slow_jvms);
987     return kit.transfer_exceptions_into_jvms();
988   }
989 
990   // There are 2 branches and the replaced nodes are only valid on
991   // one: restore the replaced nodes to what they were before the
992   // branch.
993   kit.map()->set_replaced_nodes(replaced_nodes);
994 
995   // Finish the diamond.
996   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
997   RegionNode* region = new RegionNode(3);
998   region->init_req(1, kit.control());
999   region->init_req(2, slow_map->control());
1000   kit.set_control(gvn.transform(region));
1001   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1002   iophi->set_req(2, slow_map->i_o());
1003   kit.set_i_o(gvn.transform(iophi));
1004   // Merge memory
1005   kit.merge_memory(slow_map->merged_memory(), region, 2);
1006   // Transform new memory Phis.
1007   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1008     Node* phi = mms.memory();
1009     if (phi->is_Phi() && phi->in(0) == region) {
1010       mms.set_memory(gvn.transform(phi));
1011     }
1012   }
1013   uint tos = kit.jvms()->stkoff() + kit.sp();
1014   uint limit = slow_map->req();
1015   for (uint i = TypeFunc::Parms; i < limit; i++) {
1016     // Skip unused stack slots; fast forward to monoff();
1017     if (i == tos) {
1018       i = kit.jvms()->monoff();
1019       if( i >= limit ) break;
1020     }
1021     Node* m = kit.map()->in(i);
1022     Node* n = slow_map->in(i);
1023     if (m != n) {
1024       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1025       Node* phi = PhiNode::make(region, m, t);
1026       phi->set_req(2, n);
1027       kit.map()->set_req(i, gvn.transform(phi));
1028     }
1029   }
1030   return kit.transfer_exceptions_into_jvms();
1031 }
1032 
1033 
for_method_handle_call(JVMState * jvms,ciMethod * caller,ciMethod * callee,bool allow_inline)1034 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
1035   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
1036   bool input_not_const;
1037   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1038   Compile* C = Compile::current();
1039   if (cg != NULL) {
1040     if (AlwaysIncrementalInline) {
1041       return CallGenerator::for_late_inline(callee, cg);
1042     } else {
1043       return cg;
1044     }
1045   }
1046   int bci = jvms->bci();
1047   ciCallProfile profile = caller->call_profile_at_bci(bci);
1048   int call_site_count = caller->scale_count(profile.count());
1049 
1050   if (IncrementalInlineMH && call_site_count > 0 &&
1051       (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
1052     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1053   } else {
1054     // Out-of-line call.
1055     return CallGenerator::for_direct_call(callee);
1056   }
1057 }
1058 
1059 class NativeCallGenerator : public CallGenerator {
1060 private:
1061   ciNativeEntryPoint* _nep;
1062 public:
NativeCallGenerator(ciMethod * m,ciNativeEntryPoint * nep)1063   NativeCallGenerator(ciMethod* m, ciNativeEntryPoint* nep)
1064    : CallGenerator(m), _nep(nep) {}
1065 
1066   virtual JVMState* generate(JVMState* jvms);
1067 };
1068 
generate(JVMState * jvms)1069 JVMState* NativeCallGenerator::generate(JVMState* jvms) {
1070   GraphKit kit(jvms);
1071 
1072   Node* call = kit.make_native_call(tf(), method()->arg_size(), _nep); // -fallback, - nep
1073   if (call == NULL) return NULL;
1074 
1075   kit.C->print_inlining_update(this);
1076   address addr = _nep->entry_point();
1077   if (kit.C->log() != NULL) {
1078     kit.C->log()->elem("l2n_intrinsification_success bci='%d' entry_point='" INTPTR_FORMAT "'", jvms->bci(), p2i(addr));
1079   }
1080 
1081   return kit.transfer_exceptions_into_jvms();
1082 }
1083 
for_method_handle_inline(JVMState * jvms,ciMethod * caller,ciMethod * callee,bool allow_inline,bool & input_not_const)1084 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1085   GraphKit kit(jvms);
1086   PhaseGVN& gvn = kit.gvn();
1087   Compile* C = kit.C;
1088   vmIntrinsics::ID iid = callee->intrinsic_id();
1089   input_not_const = true;
1090   if (StressMethodHandleLinkerInlining) {
1091     allow_inline = false;
1092   }
1093   switch (iid) {
1094   case vmIntrinsics::_invokeBasic:
1095     {
1096       // Get MethodHandle receiver:
1097       Node* receiver = kit.argument(0);
1098       if (receiver->Opcode() == Op_ConP) {
1099         input_not_const = false;
1100         const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
1101         ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
1102         const int vtable_index = Method::invalid_vtable_index;
1103 
1104         if (!ciMethod::is_consistent_info(callee, target)) {
1105           print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1106                                  "signatures mismatch");
1107           return NULL;
1108         }
1109 
1110         CallGenerator* cg = C->call_generator(target, vtable_index,
1111                                               false /* call_does_dispatch */,
1112                                               jvms,
1113                                               allow_inline,
1114                                               PROB_ALWAYS);
1115         return cg;
1116       } else {
1117         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1118                                "receiver not constant");
1119       }
1120     }
1121     break;
1122 
1123   case vmIntrinsics::_linkToVirtual:
1124   case vmIntrinsics::_linkToStatic:
1125   case vmIntrinsics::_linkToSpecial:
1126   case vmIntrinsics::_linkToInterface:
1127     {
1128       // Get MemberName argument:
1129       Node* member_name = kit.argument(callee->arg_size() - 1);
1130       if (member_name->Opcode() == Op_ConP) {
1131         input_not_const = false;
1132         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1133         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1134 
1135         if (!ciMethod::is_consistent_info(callee, target)) {
1136           print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1137                                  "signatures mismatch");
1138           return NULL;
1139         }
1140 
1141         // In lambda forms we erase signature types to avoid resolving issues
1142         // involving class loaders.  When we optimize a method handle invoke
1143         // to a direct call we must cast the receiver and arguments to its
1144         // actual types.
1145         ciSignature* signature = target->signature();
1146         const int receiver_skip = target->is_static() ? 0 : 1;
1147         // Cast receiver to its type.
1148         if (!target->is_static()) {
1149           Node* arg = kit.argument(0);
1150           const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
1151           const Type*       sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
1152           if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
1153             const Type* recv_type = arg_type->join_speculative(sig_type); // keep speculative part
1154             Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, recv_type));
1155             kit.set_argument(0, cast_obj);
1156           }
1157         }
1158         // Cast reference arguments to its type.
1159         for (int i = 0, j = 0; i < signature->count(); i++) {
1160           ciType* t = signature->type_at(i);
1161           if (t->is_klass()) {
1162             Node* arg = kit.argument(receiver_skip + j);
1163             const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
1164             const Type*       sig_type = TypeOopPtr::make_from_klass(t->as_klass());
1165             if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
1166               const Type* narrowed_arg_type = arg_type->join_speculative(sig_type); // keep speculative part
1167               Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type));
1168               kit.set_argument(receiver_skip + j, cast_obj);
1169             }
1170           }
1171           j += t->size();  // long and double take two slots
1172         }
1173 
1174         // Try to get the most accurate receiver type
1175         const bool is_virtual              = (iid == vmIntrinsics::_linkToVirtual);
1176         const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
1177         int  vtable_index       = Method::invalid_vtable_index;
1178         bool call_does_dispatch = false;
1179 
1180         ciKlass* speculative_receiver_type = NULL;
1181         if (is_virtual_or_interface) {
1182           ciInstanceKlass* klass = target->holder();
1183           Node*             receiver_node = kit.argument(0);
1184           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1185           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
1186           // optimize_virtual_call() takes 2 different holder
1187           // arguments for a corner case that doesn't apply here (see
1188           // Parse::do_call())
1189           target = C->optimize_virtual_call(caller, klass, klass,
1190                                             target, receiver_type, is_virtual,
1191                                             call_does_dispatch, vtable_index, // out-parameters
1192                                             false /* check_access */);
1193           // We lack profiling at this call but type speculation may
1194           // provide us with a type
1195           speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
1196         }
1197         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1198                                               allow_inline,
1199                                               PROB_ALWAYS,
1200                                               speculative_receiver_type);
1201         return cg;
1202       } else {
1203         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1204                                "member_name not constant");
1205       }
1206     }
1207     break;
1208 
1209     case vmIntrinsics::_linkToNative:
1210     {
1211       Node* nep = kit.argument(callee->arg_size() - 1);
1212       if (nep->Opcode() == Op_ConP) {
1213         input_not_const = false;
1214         const TypeOopPtr* oop_ptr = nep->bottom_type()->is_oopptr();
1215         ciNativeEntryPoint* nep = oop_ptr->const_oop()->as_native_entry_point();
1216         return new NativeCallGenerator(callee, nep);
1217       } else {
1218         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
1219                                "NativeEntryPoint not constant");
1220       }
1221     }
1222     break;
1223 
1224   default:
1225     fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1226     break;
1227   }
1228   return NULL;
1229 }
1230 
1231 
1232 //------------------------PredicatedIntrinsicGenerator------------------------------
1233 // Internal class which handles all predicated Intrinsic calls.
1234 class PredicatedIntrinsicGenerator : public CallGenerator {
1235   CallGenerator* _intrinsic;
1236   CallGenerator* _cg;
1237 
1238 public:
PredicatedIntrinsicGenerator(CallGenerator * intrinsic,CallGenerator * cg)1239   PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
1240                                CallGenerator* cg)
1241     : CallGenerator(cg->method())
1242   {
1243     _intrinsic = intrinsic;
1244     _cg        = cg;
1245   }
1246 
is_virtual() const1247   virtual bool      is_virtual()   const    { return true; }
is_inline() const1248   virtual bool      is_inline()    const    { return true; }
is_intrinsic() const1249   virtual bool      is_intrinsic() const    { return true; }
1250 
1251   virtual JVMState* generate(JVMState* jvms);
1252 };
1253 
1254 
for_predicated_intrinsic(CallGenerator * intrinsic,CallGenerator * cg)1255 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
1256                                                        CallGenerator* cg) {
1257   return new PredicatedIntrinsicGenerator(intrinsic, cg);
1258 }
1259 
1260 
generate(JVMState * jvms)1261 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
1262   // The code we want to generate here is:
1263   //    if (receiver == NULL)
1264   //        uncommon_Trap
1265   //    if (predicate(0))
1266   //        do_intrinsic(0)
1267   //    else
1268   //    if (predicate(1))
1269   //        do_intrinsic(1)
1270   //    ...
1271   //    else
1272   //        do_java_comp
1273 
1274   GraphKit kit(jvms);
1275   PhaseGVN& gvn = kit.gvn();
1276 
1277   CompileLog* log = kit.C->log();
1278   if (log != NULL) {
1279     log->elem("predicated_intrinsic bci='%d' method='%d'",
1280               jvms->bci(), log->identify(method()));
1281   }
1282 
1283   if (!method()->is_static()) {
1284     // We need an explicit receiver null_check before checking its type in predicate.
1285     // We share a map with the caller, so his JVMS gets adjusted.
1286     Node* receiver = kit.null_check_receiver_before_call(method());
1287     if (kit.stopped()) {
1288       return kit.transfer_exceptions_into_jvms();
1289     }
1290   }
1291 
1292   int n_predicates = _intrinsic->predicates_count();
1293   assert(n_predicates > 0, "sanity");
1294 
1295   JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1296 
1297   // Region for normal compilation code if intrinsic failed.
1298   Node* slow_region = new RegionNode(1);
1299 
1300   int results = 0;
1301   for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1302 #ifdef ASSERT
1303     JVMState* old_jvms = kit.jvms();
1304     SafePointNode* old_map = kit.map();
1305     Node* old_io  = old_map->i_o();
1306     Node* old_mem = old_map->memory();
1307     Node* old_exc = old_map->next_exception();
1308 #endif
1309     Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
1310 #ifdef ASSERT
1311     // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
1312     assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
1313     SafePointNode* new_map = kit.map();
1314     assert(old_io  == new_map->i_o(), "generate_predicate should not change i_o");
1315     assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
1316     assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
1317 #endif
1318     if (!kit.stopped()) {
1319       PreserveJVMState pjvms(&kit);
1320       // Generate intrinsic code:
1321       JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
1322       if (new_jvms == NULL) {
1323         // Intrinsic failed, use normal compilation path for this predicate.
1324         slow_region->add_req(kit.control());
1325       } else {
1326         kit.add_exception_states_from(new_jvms);
1327         kit.set_jvms(new_jvms);
1328         if (!kit.stopped()) {
1329           result_jvms[results++] = kit.jvms();
1330         }
1331       }
1332     }
1333     if (else_ctrl == NULL) {
1334       else_ctrl = kit.C->top();
1335     }
1336     kit.set_control(else_ctrl);
1337   }
1338   if (!kit.stopped()) {
1339     // Final 'else' after predicates.
1340     slow_region->add_req(kit.control());
1341   }
1342   if (slow_region->req() > 1) {
1343     PreserveJVMState pjvms(&kit);
1344     // Generate normal compilation code:
1345     kit.set_control(gvn.transform(slow_region));
1346     JVMState* new_jvms = _cg->generate(kit.sync_jvms());
1347     if (kit.failing())
1348       return NULL;  // might happen because of NodeCountInliningCutoff
1349     assert(new_jvms != NULL, "must be");
1350     kit.add_exception_states_from(new_jvms);
1351     kit.set_jvms(new_jvms);
1352     if (!kit.stopped()) {
1353       result_jvms[results++] = kit.jvms();
1354     }
1355   }
1356 
1357   if (results == 0) {
1358     // All paths ended in uncommon traps.
1359     (void) kit.stop();
1360     return kit.transfer_exceptions_into_jvms();
1361   }
1362 
1363   if (results == 1) { // Only one path
1364     kit.set_jvms(result_jvms[0]);
1365     return kit.transfer_exceptions_into_jvms();
1366   }
1367 
1368   // Merge all paths.
1369   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1370   RegionNode* region = new RegionNode(results + 1);
1371   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1372   for (int i = 0; i < results; i++) {
1373     JVMState* jvms = result_jvms[i];
1374     int path = i + 1;
1375     SafePointNode* map = jvms->map();
1376     region->init_req(path, map->control());
1377     iophi->set_req(path, map->i_o());
1378     if (i == 0) {
1379       kit.set_jvms(jvms);
1380     } else {
1381       kit.merge_memory(map->merged_memory(), region, path);
1382     }
1383   }
1384   kit.set_control(gvn.transform(region));
1385   kit.set_i_o(gvn.transform(iophi));
1386   // Transform new memory Phis.
1387   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1388     Node* phi = mms.memory();
1389     if (phi->is_Phi() && phi->in(0) == region) {
1390       mms.set_memory(gvn.transform(phi));
1391     }
1392   }
1393 
1394   // Merge debug info.
1395   Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
1396   uint tos = kit.jvms()->stkoff() + kit.sp();
1397   Node* map = kit.map();
1398   uint limit = map->req();
1399   for (uint i = TypeFunc::Parms; i < limit; i++) {
1400     // Skip unused stack slots; fast forward to monoff();
1401     if (i == tos) {
1402       i = kit.jvms()->monoff();
1403       if( i >= limit ) break;
1404     }
1405     Node* n = map->in(i);
1406     ins[0] = n;
1407     const Type* t = gvn.type(n);
1408     bool needs_phi = false;
1409     for (int j = 1; j < results; j++) {
1410       JVMState* jvms = result_jvms[j];
1411       Node* jmap = jvms->map();
1412       Node* m = NULL;
1413       if (jmap->req() > i) {
1414         m = jmap->in(i);
1415         if (m != n) {
1416           needs_phi = true;
1417           t = t->meet_speculative(gvn.type(m));
1418         }
1419       }
1420       ins[j] = m;
1421     }
1422     if (needs_phi) {
1423       Node* phi = PhiNode::make(region, n, t);
1424       for (int j = 1; j < results; j++) {
1425         phi->set_req(j + 1, ins[j]);
1426       }
1427       map->set_req(i, gvn.transform(phi));
1428     }
1429   }
1430 
1431   return kit.transfer_exceptions_into_jvms();
1432 }
1433 
1434 //-------------------------UncommonTrapCallGenerator-----------------------------
1435 // Internal class which handles all out-of-line calls checking receiver type.
1436 class UncommonTrapCallGenerator : public CallGenerator {
1437   Deoptimization::DeoptReason _reason;
1438   Deoptimization::DeoptAction _action;
1439 
1440 public:
UncommonTrapCallGenerator(ciMethod * m,Deoptimization::DeoptReason reason,Deoptimization::DeoptAction action)1441   UncommonTrapCallGenerator(ciMethod* m,
1442                             Deoptimization::DeoptReason reason,
1443                             Deoptimization::DeoptAction action)
1444     : CallGenerator(m)
1445   {
1446     _reason = reason;
1447     _action = action;
1448   }
1449 
is_virtual() const1450   virtual bool      is_virtual() const          { ShouldNotReachHere(); return false; }
is_trap() const1451   virtual bool      is_trap() const             { return true; }
1452 
1453   virtual JVMState* generate(JVMState* jvms);
1454 };
1455 
1456 
1457 CallGenerator*
for_uncommon_trap(ciMethod * m,Deoptimization::DeoptReason reason,Deoptimization::DeoptAction action)1458 CallGenerator::for_uncommon_trap(ciMethod* m,
1459                                  Deoptimization::DeoptReason reason,
1460                                  Deoptimization::DeoptAction action) {
1461   return new UncommonTrapCallGenerator(m, reason, action);
1462 }
1463 
1464 
generate(JVMState * jvms)1465 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1466   GraphKit kit(jvms);
1467   kit.C->print_inlining_update(this);
1468   // Take the trap with arguments pushed on the stack.  (Cf. null_check_receiver).
1469   // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
1470   // Use callsite signature always.
1471   ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
1472   int nargs = declared_method->arg_size();
1473   kit.inc_sp(nargs);
1474   assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1475   if (_reason == Deoptimization::Reason_class_check &&
1476       _action == Deoptimization::Action_maybe_recompile) {
1477     // Temp fix for 6529811
1478     // Don't allow uncommon_trap to override our decision to recompile in the event
1479     // of a class cast failure for a monomorphic call as it will never let us convert
1480     // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1481     bool keep_exact_action = true;
1482     kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
1483   } else {
1484     kit.uncommon_trap(_reason, _action);
1485   }
1486   return kit.transfer_exceptions_into_jvms();
1487 }
1488 
1489 // (Note:  Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1490 
1491 // (Node:  Merged hook_up_exits into ParseGenerator::generate.)
1492 
1493 #define NODES_OVERHEAD_PER_METHOD (30.0)
1494 #define NODES_PER_BYTECODE (9.5)
1495 
init(JVMState * call_site,ciMethod * call_method,ciCallProfile & profile,float prof_factor)1496 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) {
1497   int call_count = profile.count();
1498   int code_size = call_method->code_size();
1499 
1500   // Expected execution count is based on the historical count:
1501   _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor);
1502 
1503   // Expected profit from inlining, in units of simple call-overheads.
1504   _profit = 1.0;
1505 
1506   // Expected work performed by the call in units of call-overheads.
1507   // %%% need an empirical curve fit for "work" (time in call)
1508   float bytecodes_per_call = 3;
1509   _work = 1.0 + code_size / bytecodes_per_call;
1510 
1511   // Expected size of compilation graph:
1512   // -XX:+PrintParseStatistics once reported:
1513   //  Methods seen: 9184  Methods parsed: 9184  Nodes created: 1582391
1514   //  Histogram of 144298 parsed bytecodes:
1515   // %%% Need an better predictor for graph size.
1516   _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size);
1517 }
1518 
1519 // is_cold:  Return true if the node should never be inlined.
1520 // This is true if any of the key metrics are extreme.
is_cold() const1521 bool WarmCallInfo::is_cold() const {
1522   if (count()  <  WarmCallMinCount)        return true;
1523   if (profit() <  WarmCallMinProfit)       return true;
1524   if (work()   >  WarmCallMaxWork)         return true;
1525   if (size()   >  WarmCallMaxSize)         return true;
1526   return false;
1527 }
1528 
1529 // is_hot:  Return true if the node should be inlined immediately.
1530 // This is true if any of the key metrics are extreme.
is_hot() const1531 bool WarmCallInfo::is_hot() const {
1532   assert(!is_cold(), "eliminate is_cold cases before testing is_hot");
1533   if (count()  >= HotCallCountThreshold)   return true;
1534   if (profit() >= HotCallProfitThreshold)  return true;
1535   if (work()   <= HotCallTrivialWork)      return true;
1536   if (size()   <= HotCallTrivialSize)      return true;
1537   return false;
1538 }
1539 
1540 // compute_heat:
compute_heat() const1541 float WarmCallInfo::compute_heat() const {
1542   assert(!is_cold(), "compute heat only on warm nodes");
1543   assert(!is_hot(),  "compute heat only on warm nodes");
1544   int min_size = MAX2(0,   (int)HotCallTrivialSize);
1545   int max_size = MIN2(500, (int)WarmCallMaxSize);
1546   float method_size = (size() - min_size) / MAX2(1, max_size - min_size);
1547   float size_factor;
1548   if      (method_size < 0.05)  size_factor = 4;   // 2 sigmas better than avg.
1549   else if (method_size < 0.15)  size_factor = 2;   // 1 sigma better than avg.
1550   else if (method_size < 0.5)   size_factor = 1;   // better than avg.
1551   else                          size_factor = 0.5; // worse than avg.
1552   return (count() * profit() * size_factor);
1553 }
1554 
warmer_than(WarmCallInfo * that)1555 bool WarmCallInfo::warmer_than(WarmCallInfo* that) {
1556   assert(this != that, "compare only different WCIs");
1557   assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st");
1558   if (this->heat() > that->heat())   return true;
1559   if (this->heat() < that->heat())   return false;
1560   assert(this->heat() == that->heat(), "no NaN heat allowed");
1561   // Equal heat.  Break the tie some other way.
1562   if (!this->call() || !that->call())  return (address)this > (address)that;
1563   return this->call()->_idx > that->call()->_idx;
1564 }
1565 
1566 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress)
1567 #define UNINIT_NEXT ((WarmCallInfo*)NULL)
1568 
insert_into(WarmCallInfo * head)1569 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) {
1570   assert(next() == UNINIT_NEXT, "not yet on any list");
1571   WarmCallInfo* prev_p = NULL;
1572   WarmCallInfo* next_p = head;
1573   while (next_p != NULL && next_p->warmer_than(this)) {
1574     prev_p = next_p;
1575     next_p = prev_p->next();
1576   }
1577   // Install this between prev_p and next_p.
1578   this->set_next(next_p);
1579   if (prev_p == NULL)
1580     head = this;
1581   else
1582     prev_p->set_next(this);
1583   return head;
1584 }
1585 
remove_from(WarmCallInfo * head)1586 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) {
1587   WarmCallInfo* prev_p = NULL;
1588   WarmCallInfo* next_p = head;
1589   while (next_p != this) {
1590     assert(next_p != NULL, "this must be in the list somewhere");
1591     prev_p = next_p;
1592     next_p = prev_p->next();
1593   }
1594   next_p = this->next();
1595   debug_only(this->set_next(UNINIT_NEXT));
1596   // Remove this from between prev_p and next_p.
1597   if (prev_p == NULL)
1598     head = next_p;
1599   else
1600     prev_p->set_next(next_p);
1601   return head;
1602 }
1603 
1604 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(),
1605                                        WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE());
1606 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(),
1607                                         WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE());
1608 
always_hot()1609 WarmCallInfo* WarmCallInfo::always_hot() {
1610   assert(_always_hot.is_hot(), "must always be hot");
1611   return &_always_hot;
1612 }
1613 
always_cold()1614 WarmCallInfo* WarmCallInfo::always_cold() {
1615   assert(_always_cold.is_cold(), "must always be cold");
1616   return &_always_cold;
1617 }
1618 
1619 
1620 #ifndef PRODUCT
1621 
print() const1622 void WarmCallInfo::print() const {
1623   tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p",
1624              is_cold() ? "cold" : is_hot() ? "hot " : "warm",
1625              count(), profit(), work(), size(), compute_heat(), next());
1626   tty->cr();
1627   if (call() != NULL)  call()->dump();
1628 }
1629 
print_wci(WarmCallInfo * ci)1630 void print_wci(WarmCallInfo* ci) {
1631   ci->print();
1632 }
1633 
print_all() const1634 void WarmCallInfo::print_all() const {
1635   for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1636     p->print();
1637 }
1638 
count_all() const1639 int WarmCallInfo::count_all() const {
1640   int cnt = 0;
1641   for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1642     cnt++;
1643   return cnt;
1644 }
1645 
1646 #endif //PRODUCT
1647