1 /*
2  * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "ci/ciCallSite.hpp"
28 #include "ci/ciObjArray.hpp"
29 #include "ci/ciMemberName.hpp"
30 #include "ci/ciMethodHandle.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "compiler/compileLog.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/callGenerator.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/cfgnode.hpp"
38 #include "opto/parse.hpp"
39 #include "opto/rootnode.hpp"
40 #include "opto/runtime.hpp"
41 #include "opto/subnode.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 
44 // Utility function.
tf() const45 const TypeFunc* CallGenerator::tf() const {
46   return TypeFunc::make(method());
47 }
48 
is_inlined_method_handle_intrinsic(JVMState * jvms,ciMethod * m)49 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
50   return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
51 }
52 
is_inlined_method_handle_intrinsic(ciMethod * caller,int bci,ciMethod * m)53 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
54   ciMethod* symbolic_info = caller->get_method_at_bci(bci);
55   return is_inlined_method_handle_intrinsic(symbolic_info, m);
56 }
57 
is_inlined_method_handle_intrinsic(ciMethod * symbolic_info,ciMethod * m)58 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {
59   return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();
60 }
61 
62 //-----------------------------ParseGenerator---------------------------------
63 // Internal class which handles all direct bytecode traversal.
64 class ParseGenerator : public InlineCallGenerator {
65 private:
66   bool  _is_osr;
67   float _expected_uses;
68 
69 public:
ParseGenerator(ciMethod * method,float expected_uses,bool is_osr=false)70   ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
71     : InlineCallGenerator(method)
72   {
73     _is_osr        = is_osr;
74     _expected_uses = expected_uses;
75     assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
76   }
77 
is_parse() const78   virtual bool      is_parse() const           { return true; }
79   virtual JVMState* generate(JVMState* jvms);
is_osr()80   int is_osr() { return _is_osr; }
81 
82 };
83 
generate(JVMState * jvms)84 JVMState* ParseGenerator::generate(JVMState* jvms) {
85   Compile* C = Compile::current();
86   C->print_inlining_update(this);
87 
88   if (is_osr()) {
89     // The JVMS for a OSR has a single argument (see its TypeFunc).
90     assert(jvms->depth() == 1, "no inline OSR");
91   }
92 
93   if (C->failing()) {
94     return NULL;  // bailing out of the compile; do not try to parse
95   }
96 
97   Parse parser(jvms, method(), _expected_uses);
98   // Grab signature for matching/allocation
99 #ifdef ASSERT
100   if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
101     MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
102     assert(C->env()->system_dictionary_modification_counter_changed(),
103            "Must invalidate if TypeFuncs differ");
104   }
105 #endif
106 
107   GraphKit& exits = parser.exits();
108 
109   if (C->failing()) {
110     while (exits.pop_exception_state() != NULL) ;
111     return NULL;
112   }
113 
114   assert(exits.jvms()->same_calls_as(jvms), "sanity");
115 
116   // Simply return the exit state of the parser,
117   // augmented by any exceptional states.
118   return exits.transfer_exceptions_into_jvms();
119 }
120 
121 //---------------------------DirectCallGenerator------------------------------
122 // Internal class which handles all out-of-line calls w/o receiver type checks.
123 class DirectCallGenerator : public CallGenerator {
124  private:
125   CallStaticJavaNode* _call_node;
126   // Force separate memory and I/O projections for the exceptional
127   // paths to facilitate late inlinig.
128   bool                _separate_io_proj;
129 
130  public:
DirectCallGenerator(ciMethod * method,bool separate_io_proj)131   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
132     : CallGenerator(method),
133       _separate_io_proj(separate_io_proj)
134   {
135   }
136   virtual JVMState* generate(JVMState* jvms);
137 
call_node() const138   CallStaticJavaNode* call_node() const { return _call_node; }
139 };
140 
generate(JVMState * jvms)141 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
142   GraphKit kit(jvms);
143   kit.C->print_inlining_update(this);
144   bool is_static = method()->is_static();
145   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
146                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
147 
148   if (kit.C->log() != NULL) {
149     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
150   }
151 
152   CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
153   if (is_inlined_method_handle_intrinsic(jvms, method())) {
154     // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
155     // additional information about the method being invoked should be attached
156     // to the call site to make resolution logic work
157     // (see SharedRuntime::resolve_static_call_C).
158     call->set_override_symbolic_info(true);
159   }
160   _call_node = call;  // Save the call node in case we need it later
161   if (!is_static) {
162     // Make an explicit receiver null_check as part of this call.
163     // Since we share a map with the caller, his JVMS gets adjusted.
164     kit.null_check_receiver_before_call(method());
165     if (kit.stopped()) {
166       // And dump it back to the caller, decorated with any exceptions:
167       return kit.transfer_exceptions_into_jvms();
168     }
169     // Mark the call node as virtual, sort of:
170     call->set_optimized_virtual(true);
171     if (method()->is_method_handle_intrinsic() ||
172         method()->is_compiled_lambda_form()) {
173       call->set_method_handle_invoke(true);
174     }
175   }
176   kit.set_arguments_for_java_call(call);
177   kit.set_edges_for_java_call(call, false, _separate_io_proj);
178   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
179   kit.push_node(method()->return_type()->basic_type(), ret);
180   return kit.transfer_exceptions_into_jvms();
181 }
182 
183 //--------------------------VirtualCallGenerator------------------------------
184 // Internal class which handles all out-of-line calls checking receiver type.
185 class VirtualCallGenerator : public CallGenerator {
186 private:
187   int _vtable_index;
188 public:
VirtualCallGenerator(ciMethod * method,int vtable_index)189   VirtualCallGenerator(ciMethod* method, int vtable_index)
190     : CallGenerator(method), _vtable_index(vtable_index)
191   {
192     assert(vtable_index == Method::invalid_vtable_index ||
193            vtable_index >= 0, "either invalid or usable");
194   }
is_virtual() const195   virtual bool      is_virtual() const          { return true; }
196   virtual JVMState* generate(JVMState* jvms);
197 };
198 
generate(JVMState * jvms)199 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
200   GraphKit kit(jvms);
201   Node* receiver = kit.argument(0);
202 
203   kit.C->print_inlining_update(this);
204 
205   if (kit.C->log() != NULL) {
206     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
207   }
208 
209   // If the receiver is a constant null, do not torture the system
210   // by attempting to call through it.  The compile will proceed
211   // correctly, but may bail out in final_graph_reshaping, because
212   // the call instruction will have a seemingly deficient out-count.
213   // (The bailout says something misleading about an "infinite loop".)
214   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
215     assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
216     ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
217     int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
218     kit.inc_sp(arg_size);  // restore arguments
219     kit.uncommon_trap(Deoptimization::Reason_null_check,
220                       Deoptimization::Action_none,
221                       NULL, "null receiver");
222     return kit.transfer_exceptions_into_jvms();
223   }
224 
225   // Ideally we would unconditionally do a null check here and let it
226   // be converted to an implicit check based on profile information.
227   // However currently the conversion to implicit null checks in
228   // Block::implicit_null_check() only looks for loads and stores, not calls.
229   ciMethod *caller = kit.method();
230   ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
231   if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
232        ((ImplicitNullCheckThreshold > 0) && caller_md &&
233        (caller_md->trap_count(Deoptimization::Reason_null_check)
234        >= (uint)ImplicitNullCheckThreshold))) {
235     // Make an explicit receiver null_check as part of this call.
236     // Since we share a map with the caller, his JVMS gets adjusted.
237     receiver = kit.null_check_receiver_before_call(method());
238     if (kit.stopped()) {
239       // And dump it back to the caller, decorated with any exceptions:
240       return kit.transfer_exceptions_into_jvms();
241     }
242   }
243 
244   assert(!method()->is_static(), "virtual call must not be to static");
245   assert(!method()->is_final(), "virtual call should not be to final");
246   assert(!method()->is_private(), "virtual call should not be to private");
247   assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
248          "no vtable calls if +UseInlineCaches ");
249   address target = SharedRuntime::get_resolve_virtual_call_stub();
250   // Normal inline cache used for call
251   CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
252   if (is_inlined_method_handle_intrinsic(jvms, method())) {
253     // To be able to issue a direct call (optimized virtual or virtual)
254     // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
255     // about the method being invoked should be attached to the call site to
256     // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
257     call->set_override_symbolic_info(true);
258   }
259   kit.set_arguments_for_java_call(call);
260   kit.set_edges_for_java_call(call);
261   Node* ret = kit.set_results_for_java_call(call);
262   kit.push_node(method()->return_type()->basic_type(), ret);
263 
264   // Represent the effect of an implicit receiver null_check
265   // as part of this call.  Since we share a map with the caller,
266   // his JVMS gets adjusted.
267   kit.cast_not_null(receiver);
268   return kit.transfer_exceptions_into_jvms();
269 }
270 
for_inline(ciMethod * m,float expected_uses)271 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
272   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
273   return new ParseGenerator(m, expected_uses);
274 }
275 
276 // As a special case, the JVMS passed to this CallGenerator is
277 // for the method execution already in progress, not just the JVMS
278 // of the caller.  Thus, this CallGenerator cannot be mixed with others!
for_osr(ciMethod * m,int osr_bci)279 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
280   if (InlineTree::check_can_parse(m) != NULL)  return NULL;
281   float past_uses = m->interpreter_invocation_count();
282   float expected_uses = past_uses;
283   return new ParseGenerator(m, expected_uses, true);
284 }
285 
for_direct_call(ciMethod * m,bool separate_io_proj)286 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
287   assert(!m->is_abstract(), "for_direct_call mismatch");
288   return new DirectCallGenerator(m, separate_io_proj);
289 }
290 
for_virtual_call(ciMethod * m,int vtable_index)291 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
292   assert(!m->is_static(), "for_virtual_call mismatch");
293   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
294   return new VirtualCallGenerator(m, vtable_index);
295 }
296 
297 // Allow inlining decisions to be delayed
298 class LateInlineCallGenerator : public DirectCallGenerator {
299  private:
300   // unique id for log compilation
301   jlong _unique_id;
302 
303  protected:
304   CallGenerator* _inline_cg;
do_late_inline_check(JVMState * jvms)305   virtual bool do_late_inline_check(JVMState* jvms) { return true; }
306 
307  public:
LateInlineCallGenerator(ciMethod * method,CallGenerator * inline_cg)308   LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
309     DirectCallGenerator(method, true), _inline_cg(inline_cg), _unique_id(0) {}
310 
is_late_inline() const311   virtual bool is_late_inline() const { return true; }
312 
313   // Convert the CallStaticJava into an inline
314   virtual void do_late_inline();
315 
generate(JVMState * jvms)316   virtual JVMState* generate(JVMState* jvms) {
317     Compile *C = Compile::current();
318 
319     C->log_inline_id(this);
320 
321     // Record that this call site should be revisited once the main
322     // parse is finished.
323     if (!is_mh_late_inline()) {
324       C->add_late_inline(this);
325     }
326 
327     // Emit the CallStaticJava and request separate projections so
328     // that the late inlining logic can distinguish between fall
329     // through and exceptional uses of the memory and io projections
330     // as is done for allocations and macro expansion.
331     return DirectCallGenerator::generate(jvms);
332   }
333 
print_inlining_late(const char * msg)334   virtual void print_inlining_late(const char* msg) {
335     CallNode* call = call_node();
336     Compile* C = Compile::current();
337     C->print_inlining_assert_ready();
338     C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
339     C->print_inlining_move_to(this);
340     C->print_inlining_update_delayed(this);
341   }
342 
set_unique_id(jlong id)343   virtual void set_unique_id(jlong id) {
344     _unique_id = id;
345   }
346 
unique_id() const347   virtual jlong unique_id() const {
348     return _unique_id;
349   }
350 };
351 
do_late_inline()352 void LateInlineCallGenerator::do_late_inline() {
353   // Can't inline it
354   CallStaticJavaNode* call = call_node();
355   if (call == NULL || call->outcnt() == 0 ||
356       call->in(0) == NULL || call->in(0)->is_top()) {
357     return;
358   }
359 
360   const TypeTuple *r = call->tf()->domain();
361   for (int i1 = 0; i1 < method()->arg_size(); i1++) {
362     if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
363       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
364       return;
365     }
366   }
367 
368   if (call->in(TypeFunc::Memory)->is_top()) {
369     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
370     return;
371   }
372 
373   // check for unreachable loop
374   CallProjections callprojs;
375   call->extract_projections(&callprojs, true);
376   if (callprojs.fallthrough_catchproj == call->in(0) ||
377       callprojs.catchall_catchproj == call->in(0) ||
378       callprojs.fallthrough_memproj == call->in(TypeFunc::Memory) ||
379       callprojs.catchall_memproj == call->in(TypeFunc::Memory) ||
380       callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O) ||
381       callprojs.catchall_ioproj == call->in(TypeFunc::I_O) ||
382       (callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) ||
383       (callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) {
384     return;
385   }
386 
387   Compile* C = Compile::current();
388   // Remove inlined methods from Compiler's lists.
389   if (call->is_macro()) {
390     C->remove_macro_node(call);
391   }
392 
393   // Make a clone of the JVMState that appropriate to use for driving a parse
394   JVMState* old_jvms = call->jvms();
395   JVMState* jvms = old_jvms->clone_shallow(C);
396   uint size = call->req();
397   SafePointNode* map = new SafePointNode(size, jvms);
398   for (uint i1 = 0; i1 < size; i1++) {
399     map->init_req(i1, call->in(i1));
400   }
401 
402   // Make sure the state is a MergeMem for parsing.
403   if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
404     Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
405     C->initial_gvn()->set_type_bottom(mem);
406     map->set_req(TypeFunc::Memory, mem);
407   }
408 
409   uint nargs = method()->arg_size();
410   // blow away old call arguments
411   Node* top = C->top();
412   for (uint i1 = 0; i1 < nargs; i1++) {
413     map->set_req(TypeFunc::Parms + i1, top);
414   }
415   jvms->set_map(map);
416 
417   // Make enough space in the expression stack to transfer
418   // the incoming arguments and return value.
419   map->ensure_stack(jvms, jvms->method()->max_stack());
420   for (uint i1 = 0; i1 < nargs; i1++) {
421     map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
422   }
423 
424   C->print_inlining_assert_ready();
425 
426   C->print_inlining_move_to(this);
427 
428   C->log_late_inline(this);
429 
430   // This check is done here because for_method_handle_inline() method
431   // needs jvms for inlined state.
432   if (!do_late_inline_check(jvms)) {
433     map->disconnect_inputs(NULL, C);
434     return;
435   }
436 
437   // Setup default node notes to be picked up by the inlining
438   Node_Notes* old_nn = C->node_notes_at(call->_idx);
439   if (old_nn != NULL) {
440     Node_Notes* entry_nn = old_nn->clone(C);
441     entry_nn->set_jvms(jvms);
442     C->set_default_node_notes(entry_nn);
443   }
444 
445   // Now perform the inlining using the synthesized JVMState
446   JVMState* new_jvms = _inline_cg->generate(jvms);
447   if (new_jvms == NULL)  return;  // no change
448   if (C->failing())      return;
449 
450   // Capture any exceptional control flow
451   GraphKit kit(new_jvms);
452 
453   // Find the result object
454   Node* result = C->top();
455   int   result_size = method()->return_type()->size();
456   if (result_size != 0 && !kit.stopped()) {
457     result = (result_size == 1) ? kit.pop() : kit.pop_pair();
458   }
459 
460   C->env()->notice_inlined_method(_inline_cg->method());
461   C->set_inlining_progress(true);
462 
463   kit.replace_call(call, result, true);
464 }
465 
466 
for_late_inline(ciMethod * method,CallGenerator * inline_cg)467 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
468   return new LateInlineCallGenerator(method, inline_cg);
469 }
470 
471 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
472   ciMethod* _caller;
473   int _attempt;
474   bool _input_not_const;
475 
476   virtual bool do_late_inline_check(JVMState* jvms);
already_attempted() const477   virtual bool already_attempted() const { return _attempt > 0; }
478 
479  public:
LateInlineMHCallGenerator(ciMethod * caller,ciMethod * callee,bool input_not_const)480   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
481     LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
482 
is_mh_late_inline() const483   virtual bool is_mh_late_inline() const { return true; }
484 
generate(JVMState * jvms)485   virtual JVMState* generate(JVMState* jvms) {
486     JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
487 
488     Compile* C = Compile::current();
489     if (_input_not_const) {
490       // inlining won't be possible so no need to enqueue right now.
491       call_node()->set_generator(this);
492     } else {
493       C->add_late_inline(this);
494     }
495     return new_jvms;
496   }
497 };
498 
do_late_inline_check(JVMState * jvms)499 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
500 
501   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
502 
503   Compile::current()->print_inlining_update_delayed(this);
504 
505   if (!_input_not_const) {
506     _attempt++;
507   }
508 
509   if (cg != NULL && cg->is_inline()) {
510     assert(!cg->is_late_inline(), "we're doing late inlining");
511     _inline_cg = cg;
512     Compile::current()->dec_number_of_mh_late_inlines();
513     return true;
514   }
515 
516   call_node()->set_generator(this);
517   return false;
518 }
519 
for_mh_late_inline(ciMethod * caller,ciMethod * callee,bool input_not_const)520 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
521   Compile::current()->inc_number_of_mh_late_inlines();
522   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
523   return cg;
524 }
525 
526 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
527 
528  public:
LateInlineStringCallGenerator(ciMethod * method,CallGenerator * inline_cg)529   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
530     LateInlineCallGenerator(method, inline_cg) {}
531 
generate(JVMState * jvms)532   virtual JVMState* generate(JVMState* jvms) {
533     Compile *C = Compile::current();
534 
535     C->log_inline_id(this);
536 
537     C->add_string_late_inline(this);
538 
539     JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
540     return new_jvms;
541   }
542 
is_string_late_inline() const543   virtual bool is_string_late_inline() const { return true; }
544 };
545 
for_string_late_inline(ciMethod * method,CallGenerator * inline_cg)546 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
547   return new LateInlineStringCallGenerator(method, inline_cg);
548 }
549 
550 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
551 
552  public:
LateInlineBoxingCallGenerator(ciMethod * method,CallGenerator * inline_cg)553   LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
554     LateInlineCallGenerator(method, inline_cg) {}
555 
generate(JVMState * jvms)556   virtual JVMState* generate(JVMState* jvms) {
557     Compile *C = Compile::current();
558 
559     C->log_inline_id(this);
560 
561     C->add_boxing_late_inline(this);
562 
563     JVMState* new_jvms =  DirectCallGenerator::generate(jvms);
564     return new_jvms;
565   }
566 };
567 
for_boxing_late_inline(ciMethod * method,CallGenerator * inline_cg)568 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
569   return new LateInlineBoxingCallGenerator(method, inline_cg);
570 }
571 
572 //---------------------------WarmCallGenerator--------------------------------
573 // Internal class which handles initial deferral of inlining decisions.
574 class WarmCallGenerator : public CallGenerator {
575   WarmCallInfo*   _call_info;
576   CallGenerator*  _if_cold;
577   CallGenerator*  _if_hot;
578   bool            _is_virtual;   // caches virtuality of if_cold
579   bool            _is_inline;    // caches inline-ness of if_hot
580 
581 public:
WarmCallGenerator(WarmCallInfo * ci,CallGenerator * if_cold,CallGenerator * if_hot)582   WarmCallGenerator(WarmCallInfo* ci,
583                     CallGenerator* if_cold,
584                     CallGenerator* if_hot)
585     : CallGenerator(if_cold->method())
586   {
587     assert(method() == if_hot->method(), "consistent choices");
588     _call_info  = ci;
589     _if_cold    = if_cold;
590     _if_hot     = if_hot;
591     _is_virtual = if_cold->is_virtual();
592     _is_inline  = if_hot->is_inline();
593   }
594 
is_inline() const595   virtual bool      is_inline() const           { return _is_inline; }
is_virtual() const596   virtual bool      is_virtual() const          { return _is_virtual; }
is_deferred() const597   virtual bool      is_deferred() const         { return true; }
598 
599   virtual JVMState* generate(JVMState* jvms);
600 };
601 
602 
for_warm_call(WarmCallInfo * ci,CallGenerator * if_cold,CallGenerator * if_hot)603 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
604                                             CallGenerator* if_cold,
605                                             CallGenerator* if_hot) {
606   return new WarmCallGenerator(ci, if_cold, if_hot);
607 }
608 
generate(JVMState * jvms)609 JVMState* WarmCallGenerator::generate(JVMState* jvms) {
610   Compile* C = Compile::current();
611   C->print_inlining_update(this);
612 
613   if (C->log() != NULL) {
614     C->log()->elem("warm_call bci='%d'", jvms->bci());
615   }
616   jvms = _if_cold->generate(jvms);
617   if (jvms != NULL) {
618     Node* m = jvms->map()->control();
619     if (m->is_CatchProj()) m = m->in(0);  else m = C->top();
620     if (m->is_Catch())     m = m->in(0);  else m = C->top();
621     if (m->is_Proj())      m = m->in(0);  else m = C->top();
622     if (m->is_CallJava()) {
623       _call_info->set_call(m->as_Call());
624       _call_info->set_hot_cg(_if_hot);
625 #ifndef PRODUCT
626       if (PrintOpto || PrintOptoInlining) {
627         tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
628         tty->print("WCI: ");
629         _call_info->print();
630       }
631 #endif
632       _call_info->set_heat(_call_info->compute_heat());
633       C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
634     }
635   }
636   return jvms;
637 }
638 
make_hot()639 void WarmCallInfo::make_hot() {
640   Unimplemented();
641 }
642 
make_cold()643 void WarmCallInfo::make_cold() {
644   // No action:  Just dequeue.
645 }
646 
647 
648 //------------------------PredictedCallGenerator------------------------------
649 // Internal class which handles all out-of-line calls checking receiver type.
650 class PredictedCallGenerator : public CallGenerator {
651   ciKlass*       _predicted_receiver;
652   CallGenerator* _if_missed;
653   CallGenerator* _if_hit;
654   float          _hit_prob;
655 
656 public:
PredictedCallGenerator(ciKlass * predicted_receiver,CallGenerator * if_missed,CallGenerator * if_hit,float hit_prob)657   PredictedCallGenerator(ciKlass* predicted_receiver,
658                          CallGenerator* if_missed,
659                          CallGenerator* if_hit, float hit_prob)
660     : CallGenerator(if_missed->method())
661   {
662     // The call profile data may predict the hit_prob as extreme as 0 or 1.
663     // Remove the extremes values from the range.
664     if (hit_prob > PROB_MAX)   hit_prob = PROB_MAX;
665     if (hit_prob < PROB_MIN)   hit_prob = PROB_MIN;
666 
667     _predicted_receiver = predicted_receiver;
668     _if_missed          = if_missed;
669     _if_hit             = if_hit;
670     _hit_prob           = hit_prob;
671   }
672 
is_virtual() const673   virtual bool      is_virtual()   const    { return true; }
is_inline() const674   virtual bool      is_inline()    const    { return _if_hit->is_inline(); }
is_deferred() const675   virtual bool      is_deferred()  const    { return _if_hit->is_deferred(); }
676 
677   virtual JVMState* generate(JVMState* jvms);
678 };
679 
680 
for_predicted_call(ciKlass * predicted_receiver,CallGenerator * if_missed,CallGenerator * if_hit,float hit_prob)681 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
682                                                  CallGenerator* if_missed,
683                                                  CallGenerator* if_hit,
684                                                  float hit_prob) {
685   return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob);
686 }
687 
688 
generate(JVMState * jvms)689 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
690   GraphKit kit(jvms);
691   kit.C->print_inlining_update(this);
692   PhaseGVN& gvn = kit.gvn();
693   // We need an explicit receiver null_check before checking its type.
694   // We share a map with the caller, so his JVMS gets adjusted.
695   Node* receiver = kit.argument(0);
696   CompileLog* log = kit.C->log();
697   if (log != NULL) {
698     log->elem("predicted_call bci='%d' klass='%d'",
699               jvms->bci(), log->identify(_predicted_receiver));
700   }
701 
702   receiver = kit.null_check_receiver_before_call(method());
703   if (kit.stopped()) {
704     return kit.transfer_exceptions_into_jvms();
705   }
706 
707   // Make a copy of the replaced nodes in case we need to restore them
708   ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
709   replaced_nodes.clone();
710 
711   Node* exact_receiver = receiver;  // will get updated in place...
712   Node* slow_ctl = kit.type_check_receiver(receiver,
713                                            _predicted_receiver, _hit_prob,
714                                            &exact_receiver);
715 
716   SafePointNode* slow_map = NULL;
717   JVMState* slow_jvms = NULL;
718   { PreserveJVMState pjvms(&kit);
719     kit.set_control(slow_ctl);
720     if (!kit.stopped()) {
721       slow_jvms = _if_missed->generate(kit.sync_jvms());
722       if (kit.failing())
723         return NULL;  // might happen because of NodeCountInliningCutoff
724       assert(slow_jvms != NULL, "must be");
725       kit.add_exception_states_from(slow_jvms);
726       kit.set_map(slow_jvms->map());
727       if (!kit.stopped())
728         slow_map = kit.stop();
729     }
730   }
731 
732   if (kit.stopped()) {
733     // Instance exactly does not matches the desired type.
734     kit.set_jvms(slow_jvms);
735     return kit.transfer_exceptions_into_jvms();
736   }
737 
738   // fall through if the instance exactly matches the desired type
739   kit.replace_in_map(receiver, exact_receiver);
740 
741   // Make the hot call:
742   JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
743   if (new_jvms == NULL) {
744     // Inline failed, so make a direct call.
745     assert(_if_hit->is_inline(), "must have been a failed inline");
746     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
747     new_jvms = cg->generate(kit.sync_jvms());
748   }
749   kit.add_exception_states_from(new_jvms);
750   kit.set_jvms(new_jvms);
751 
752   // Need to merge slow and fast?
753   if (slow_map == NULL) {
754     // The fast path is the only path remaining.
755     return kit.transfer_exceptions_into_jvms();
756   }
757 
758   if (kit.stopped()) {
759     // Inlined method threw an exception, so it's just the slow path after all.
760     kit.set_jvms(slow_jvms);
761     return kit.transfer_exceptions_into_jvms();
762   }
763 
764   // There are 2 branches and the replaced nodes are only valid on
765   // one: restore the replaced nodes to what they were before the
766   // branch.
767   kit.map()->set_replaced_nodes(replaced_nodes);
768 
769   // Finish the diamond.
770   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
771   RegionNode* region = new RegionNode(3);
772   region->init_req(1, kit.control());
773   region->init_req(2, slow_map->control());
774   kit.set_control(gvn.transform(region));
775   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
776   iophi->set_req(2, slow_map->i_o());
777   kit.set_i_o(gvn.transform(iophi));
778   // Merge memory
779   kit.merge_memory(slow_map->merged_memory(), region, 2);
780   // Transform new memory Phis.
781   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
782     Node* phi = mms.memory();
783     if (phi->is_Phi() && phi->in(0) == region) {
784       mms.set_memory(gvn.transform(phi));
785     }
786   }
787   uint tos = kit.jvms()->stkoff() + kit.sp();
788   uint limit = slow_map->req();
789   for (uint i = TypeFunc::Parms; i < limit; i++) {
790     // Skip unused stack slots; fast forward to monoff();
791     if (i == tos) {
792       i = kit.jvms()->monoff();
793       if( i >= limit ) break;
794     }
795     Node* m = kit.map()->in(i);
796     Node* n = slow_map->in(i);
797     if (m != n) {
798       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
799       Node* phi = PhiNode::make(region, m, t);
800       phi->set_req(2, n);
801       kit.map()->set_req(i, gvn.transform(phi));
802     }
803   }
804   return kit.transfer_exceptions_into_jvms();
805 }
806 
807 
for_method_handle_call(JVMState * jvms,ciMethod * caller,ciMethod * callee,bool delayed_forbidden)808 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) {
809   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
810   bool input_not_const;
811   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const);
812   Compile* C = Compile::current();
813   if (cg != NULL) {
814     if (!delayed_forbidden && AlwaysIncrementalInline) {
815       return CallGenerator::for_late_inline(callee, cg);
816     } else {
817       return cg;
818     }
819   }
820   int bci = jvms->bci();
821   ciCallProfile profile = caller->call_profile_at_bci(bci);
822   int call_site_count = caller->scale_count(profile.count());
823 
824   if (IncrementalInline && call_site_count > 0 &&
825       (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
826     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
827   } else {
828     // Out-of-line call.
829     return CallGenerator::for_direct_call(callee);
830   }
831 }
832 
for_method_handle_inline(JVMState * jvms,ciMethod * caller,ciMethod * callee,bool & input_not_const)833 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
834   GraphKit kit(jvms);
835   PhaseGVN& gvn = kit.gvn();
836   Compile* C = kit.C;
837   vmIntrinsics::ID iid = callee->intrinsic_id();
838   input_not_const = true;
839   switch (iid) {
840   case vmIntrinsics::_invokeBasic:
841     {
842       // Get MethodHandle receiver:
843       Node* receiver = kit.argument(0);
844       if (receiver->Opcode() == Op_ConP) {
845         input_not_const = false;
846         const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
847         ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
848         const int vtable_index = Method::invalid_vtable_index;
849 
850         if (!ciMethod::is_consistent_info(callee, target)) {
851           print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
852                                  "signatures mismatch");
853           return NULL;
854         }
855 
856         CallGenerator* cg = C->call_generator(target, vtable_index,
857                                               false /* call_does_dispatch */,
858                                               jvms,
859                                               true /* allow_inline */,
860                                               PROB_ALWAYS);
861         return cg;
862       } else {
863         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
864                                "receiver not constant");
865       }
866     }
867     break;
868 
869   case vmIntrinsics::_linkToVirtual:
870   case vmIntrinsics::_linkToStatic:
871   case vmIntrinsics::_linkToSpecial:
872   case vmIntrinsics::_linkToInterface:
873     {
874       // Get MemberName argument:
875       Node* member_name = kit.argument(callee->arg_size() - 1);
876       if (member_name->Opcode() == Op_ConP) {
877         input_not_const = false;
878         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
879         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
880 
881         if (!ciMethod::is_consistent_info(callee, target)) {
882           print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
883                                  "signatures mismatch");
884           return NULL;
885         }
886 
887         // In lambda forms we erase signature types to avoid resolving issues
888         // involving class loaders.  When we optimize a method handle invoke
889         // to a direct call we must cast the receiver and arguments to its
890         // actual types.
891         ciSignature* signature = target->signature();
892         const int receiver_skip = target->is_static() ? 0 : 1;
893         // Cast receiver to its type.
894         if (!target->is_static()) {
895           Node* arg = kit.argument(0);
896           const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
897           const Type*       sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
898           if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
899             Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type));
900             kit.set_argument(0, cast_obj);
901           }
902         }
903         // Cast reference arguments to its type.
904         for (int i = 0, j = 0; i < signature->count(); i++) {
905           ciType* t = signature->type_at(i);
906           if (t->is_klass()) {
907             Node* arg = kit.argument(receiver_skip + j);
908             const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
909             const Type*       sig_type = TypeOopPtr::make_from_klass(t->as_klass());
910             if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
911               Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type));
912               kit.set_argument(receiver_skip + j, cast_obj);
913             }
914           }
915           j += t->size();  // long and double take two slots
916         }
917 
918         // Try to get the most accurate receiver type
919         const bool is_virtual              = (iid == vmIntrinsics::_linkToVirtual);
920         const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
921         int  vtable_index       = Method::invalid_vtable_index;
922         bool call_does_dispatch = false;
923 
924         ciKlass* speculative_receiver_type = NULL;
925         if (is_virtual_or_interface) {
926           ciInstanceKlass* klass = target->holder();
927           Node*             receiver_node = kit.argument(0);
928           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
929           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
930           // optimize_virtual_call() takes 2 different holder
931           // arguments for a corner case that doesn't apply here (see
932           // Parse::do_call())
933           target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
934                                             target, receiver_type, is_virtual,
935                                             call_does_dispatch, vtable_index, // out-parameters
936                                             false /* check_access */);
937           // We lack profiling at this call but type speculation may
938           // provide us with a type
939           speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
940         }
941         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
942                                               !StressMethodHandleLinkerInlining /* allow_inline */,
943                                               PROB_ALWAYS,
944                                               speculative_receiver_type);
945         return cg;
946       } else {
947         print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
948                                "member_name not constant");
949       }
950     }
951     break;
952 
953   default:
954     fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
955     break;
956   }
957   return NULL;
958 }
959 
960 
961 //------------------------PredicatedIntrinsicGenerator------------------------------
962 // Internal class which handles all predicated Intrinsic calls.
963 class PredicatedIntrinsicGenerator : public CallGenerator {
964   CallGenerator* _intrinsic;
965   CallGenerator* _cg;
966 
967 public:
PredicatedIntrinsicGenerator(CallGenerator * intrinsic,CallGenerator * cg)968   PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
969                                CallGenerator* cg)
970     : CallGenerator(cg->method())
971   {
972     _intrinsic = intrinsic;
973     _cg        = cg;
974   }
975 
is_virtual() const976   virtual bool      is_virtual()   const    { return true; }
is_inlined() const977   virtual bool      is_inlined()   const    { return true; }
is_intrinsic() const978   virtual bool      is_intrinsic() const    { return true; }
979 
980   virtual JVMState* generate(JVMState* jvms);
981 };
982 
983 
for_predicated_intrinsic(CallGenerator * intrinsic,CallGenerator * cg)984 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
985                                                        CallGenerator* cg) {
986   return new PredicatedIntrinsicGenerator(intrinsic, cg);
987 }
988 
989 
generate(JVMState * jvms)990 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
991   // The code we want to generate here is:
992   //    if (receiver == NULL)
993   //        uncommon_Trap
994   //    if (predicate(0))
995   //        do_intrinsic(0)
996   //    else
997   //    if (predicate(1))
998   //        do_intrinsic(1)
999   //    ...
1000   //    else
1001   //        do_java_comp
1002 
1003   GraphKit kit(jvms);
1004   PhaseGVN& gvn = kit.gvn();
1005 
1006   CompileLog* log = kit.C->log();
1007   if (log != NULL) {
1008     log->elem("predicated_intrinsic bci='%d' method='%d'",
1009               jvms->bci(), log->identify(method()));
1010   }
1011 
1012   if (!method()->is_static()) {
1013     // We need an explicit receiver null_check before checking its type in predicate.
1014     // We share a map with the caller, so his JVMS gets adjusted.
1015     Node* receiver = kit.null_check_receiver_before_call(method());
1016     if (kit.stopped()) {
1017       return kit.transfer_exceptions_into_jvms();
1018     }
1019   }
1020 
1021   int n_predicates = _intrinsic->predicates_count();
1022   assert(n_predicates > 0, "sanity");
1023 
1024   JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1025 
1026   // Region for normal compilation code if intrinsic failed.
1027   Node* slow_region = new RegionNode(1);
1028 
1029   int results = 0;
1030   for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1031 #ifdef ASSERT
1032     JVMState* old_jvms = kit.jvms();
1033     SafePointNode* old_map = kit.map();
1034     Node* old_io  = old_map->i_o();
1035     Node* old_mem = old_map->memory();
1036     Node* old_exc = old_map->next_exception();
1037 #endif
1038     Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
1039 #ifdef ASSERT
1040     // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
1041     assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
1042     SafePointNode* new_map = kit.map();
1043     assert(old_io  == new_map->i_o(), "generate_predicate should not change i_o");
1044     assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
1045     assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
1046 #endif
1047     if (!kit.stopped()) {
1048       PreserveJVMState pjvms(&kit);
1049       // Generate intrinsic code:
1050       JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
1051       if (new_jvms == NULL) {
1052         // Intrinsic failed, use normal compilation path for this predicate.
1053         slow_region->add_req(kit.control());
1054       } else {
1055         kit.add_exception_states_from(new_jvms);
1056         kit.set_jvms(new_jvms);
1057         if (!kit.stopped()) {
1058           result_jvms[results++] = kit.jvms();
1059         }
1060       }
1061     }
1062     if (else_ctrl == NULL) {
1063       else_ctrl = kit.C->top();
1064     }
1065     kit.set_control(else_ctrl);
1066   }
1067   if (!kit.stopped()) {
1068     // Final 'else' after predicates.
1069     slow_region->add_req(kit.control());
1070   }
1071   if (slow_region->req() > 1) {
1072     PreserveJVMState pjvms(&kit);
1073     // Generate normal compilation code:
1074     kit.set_control(gvn.transform(slow_region));
1075     JVMState* new_jvms = _cg->generate(kit.sync_jvms());
1076     if (kit.failing())
1077       return NULL;  // might happen because of NodeCountInliningCutoff
1078     assert(new_jvms != NULL, "must be");
1079     kit.add_exception_states_from(new_jvms);
1080     kit.set_jvms(new_jvms);
1081     if (!kit.stopped()) {
1082       result_jvms[results++] = kit.jvms();
1083     }
1084   }
1085 
1086   if (results == 0) {
1087     // All paths ended in uncommon traps.
1088     (void) kit.stop();
1089     return kit.transfer_exceptions_into_jvms();
1090   }
1091 
1092   if (results == 1) { // Only one path
1093     kit.set_jvms(result_jvms[0]);
1094     return kit.transfer_exceptions_into_jvms();
1095   }
1096 
1097   // Merge all paths.
1098   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1099   RegionNode* region = new RegionNode(results + 1);
1100   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1101   for (int i = 0; i < results; i++) {
1102     JVMState* jvms = result_jvms[i];
1103     int path = i + 1;
1104     SafePointNode* map = jvms->map();
1105     region->init_req(path, map->control());
1106     iophi->set_req(path, map->i_o());
1107     if (i == 0) {
1108       kit.set_jvms(jvms);
1109     } else {
1110       kit.merge_memory(map->merged_memory(), region, path);
1111     }
1112   }
1113   kit.set_control(gvn.transform(region));
1114   kit.set_i_o(gvn.transform(iophi));
1115   // Transform new memory Phis.
1116   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1117     Node* phi = mms.memory();
1118     if (phi->is_Phi() && phi->in(0) == region) {
1119       mms.set_memory(gvn.transform(phi));
1120     }
1121   }
1122 
1123   // Merge debug info.
1124   Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
1125   uint tos = kit.jvms()->stkoff() + kit.sp();
1126   Node* map = kit.map();
1127   uint limit = map->req();
1128   for (uint i = TypeFunc::Parms; i < limit; i++) {
1129     // Skip unused stack slots; fast forward to monoff();
1130     if (i == tos) {
1131       i = kit.jvms()->monoff();
1132       if( i >= limit ) break;
1133     }
1134     Node* n = map->in(i);
1135     ins[0] = n;
1136     const Type* t = gvn.type(n);
1137     bool needs_phi = false;
1138     for (int j = 1; j < results; j++) {
1139       JVMState* jvms = result_jvms[j];
1140       Node* jmap = jvms->map();
1141       Node* m = NULL;
1142       if (jmap->req() > i) {
1143         m = jmap->in(i);
1144         if (m != n) {
1145           needs_phi = true;
1146           t = t->meet_speculative(gvn.type(m));
1147         }
1148       }
1149       ins[j] = m;
1150     }
1151     if (needs_phi) {
1152       Node* phi = PhiNode::make(region, n, t);
1153       for (int j = 1; j < results; j++) {
1154         phi->set_req(j + 1, ins[j]);
1155       }
1156       map->set_req(i, gvn.transform(phi));
1157     }
1158   }
1159 
1160   return kit.transfer_exceptions_into_jvms();
1161 }
1162 
1163 //-------------------------UncommonTrapCallGenerator-----------------------------
1164 // Internal class which handles all out-of-line calls checking receiver type.
1165 class UncommonTrapCallGenerator : public CallGenerator {
1166   Deoptimization::DeoptReason _reason;
1167   Deoptimization::DeoptAction _action;
1168 
1169 public:
UncommonTrapCallGenerator(ciMethod * m,Deoptimization::DeoptReason reason,Deoptimization::DeoptAction action)1170   UncommonTrapCallGenerator(ciMethod* m,
1171                             Deoptimization::DeoptReason reason,
1172                             Deoptimization::DeoptAction action)
1173     : CallGenerator(m)
1174   {
1175     _reason = reason;
1176     _action = action;
1177   }
1178 
is_virtual() const1179   virtual bool      is_virtual() const          { ShouldNotReachHere(); return false; }
is_trap() const1180   virtual bool      is_trap() const             { return true; }
1181 
1182   virtual JVMState* generate(JVMState* jvms);
1183 };
1184 
1185 
1186 CallGenerator*
for_uncommon_trap(ciMethod * m,Deoptimization::DeoptReason reason,Deoptimization::DeoptAction action)1187 CallGenerator::for_uncommon_trap(ciMethod* m,
1188                                  Deoptimization::DeoptReason reason,
1189                                  Deoptimization::DeoptAction action) {
1190   return new UncommonTrapCallGenerator(m, reason, action);
1191 }
1192 
1193 
generate(JVMState * jvms)1194 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1195   GraphKit kit(jvms);
1196   kit.C->print_inlining_update(this);
1197   // Take the trap with arguments pushed on the stack.  (Cf. null_check_receiver).
1198   // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
1199   // Use callsite signature always.
1200   ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
1201   int nargs = declared_method->arg_size();
1202   kit.inc_sp(nargs);
1203   assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1204   if (_reason == Deoptimization::Reason_class_check &&
1205       _action == Deoptimization::Action_maybe_recompile) {
1206     // Temp fix for 6529811
1207     // Don't allow uncommon_trap to override our decision to recompile in the event
1208     // of a class cast failure for a monomorphic call as it will never let us convert
1209     // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1210     bool keep_exact_action = true;
1211     kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
1212   } else {
1213     kit.uncommon_trap(_reason, _action);
1214   }
1215   return kit.transfer_exceptions_into_jvms();
1216 }
1217 
1218 // (Note:  Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1219 
1220 // (Node:  Merged hook_up_exits into ParseGenerator::generate.)
1221 
1222 #define NODES_OVERHEAD_PER_METHOD (30.0)
1223 #define NODES_PER_BYTECODE (9.5)
1224 
init(JVMState * call_site,ciMethod * call_method,ciCallProfile & profile,float prof_factor)1225 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) {
1226   int call_count = profile.count();
1227   int code_size = call_method->code_size();
1228 
1229   // Expected execution count is based on the historical count:
1230   _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor);
1231 
1232   // Expected profit from inlining, in units of simple call-overheads.
1233   _profit = 1.0;
1234 
1235   // Expected work performed by the call in units of call-overheads.
1236   // %%% need an empirical curve fit for "work" (time in call)
1237   float bytecodes_per_call = 3;
1238   _work = 1.0 + code_size / bytecodes_per_call;
1239 
1240   // Expected size of compilation graph:
1241   // -XX:+PrintParseStatistics once reported:
1242   //  Methods seen: 9184  Methods parsed: 9184  Nodes created: 1582391
1243   //  Histogram of 144298 parsed bytecodes:
1244   // %%% Need an better predictor for graph size.
1245   _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size);
1246 }
1247 
1248 // is_cold:  Return true if the node should never be inlined.
1249 // This is true if any of the key metrics are extreme.
is_cold() const1250 bool WarmCallInfo::is_cold() const {
1251   if (count()  <  WarmCallMinCount)        return true;
1252   if (profit() <  WarmCallMinProfit)       return true;
1253   if (work()   >  WarmCallMaxWork)         return true;
1254   if (size()   >  WarmCallMaxSize)         return true;
1255   return false;
1256 }
1257 
1258 // is_hot:  Return true if the node should be inlined immediately.
1259 // This is true if any of the key metrics are extreme.
is_hot() const1260 bool WarmCallInfo::is_hot() const {
1261   assert(!is_cold(), "eliminate is_cold cases before testing is_hot");
1262   if (count()  >= HotCallCountThreshold)   return true;
1263   if (profit() >= HotCallProfitThreshold)  return true;
1264   if (work()   <= HotCallTrivialWork)      return true;
1265   if (size()   <= HotCallTrivialSize)      return true;
1266   return false;
1267 }
1268 
1269 // compute_heat:
compute_heat() const1270 float WarmCallInfo::compute_heat() const {
1271   assert(!is_cold(), "compute heat only on warm nodes");
1272   assert(!is_hot(),  "compute heat only on warm nodes");
1273   int min_size = MAX2(0,   (int)HotCallTrivialSize);
1274   int max_size = MIN2(500, (int)WarmCallMaxSize);
1275   float method_size = (size() - min_size) / MAX2(1, max_size - min_size);
1276   float size_factor;
1277   if      (method_size < 0.05)  size_factor = 4;   // 2 sigmas better than avg.
1278   else if (method_size < 0.15)  size_factor = 2;   // 1 sigma better than avg.
1279   else if (method_size < 0.5)   size_factor = 1;   // better than avg.
1280   else                          size_factor = 0.5; // worse than avg.
1281   return (count() * profit() * size_factor);
1282 }
1283 
warmer_than(WarmCallInfo * that)1284 bool WarmCallInfo::warmer_than(WarmCallInfo* that) {
1285   assert(this != that, "compare only different WCIs");
1286   assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st");
1287   if (this->heat() > that->heat())   return true;
1288   if (this->heat() < that->heat())   return false;
1289   assert(this->heat() == that->heat(), "no NaN heat allowed");
1290   // Equal heat.  Break the tie some other way.
1291   if (!this->call() || !that->call())  return (address)this > (address)that;
1292   return this->call()->_idx > that->call()->_idx;
1293 }
1294 
1295 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress)
1296 #define UNINIT_NEXT ((WarmCallInfo*)NULL)
1297 
insert_into(WarmCallInfo * head)1298 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) {
1299   assert(next() == UNINIT_NEXT, "not yet on any list");
1300   WarmCallInfo* prev_p = NULL;
1301   WarmCallInfo* next_p = head;
1302   while (next_p != NULL && next_p->warmer_than(this)) {
1303     prev_p = next_p;
1304     next_p = prev_p->next();
1305   }
1306   // Install this between prev_p and next_p.
1307   this->set_next(next_p);
1308   if (prev_p == NULL)
1309     head = this;
1310   else
1311     prev_p->set_next(this);
1312   return head;
1313 }
1314 
remove_from(WarmCallInfo * head)1315 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) {
1316   WarmCallInfo* prev_p = NULL;
1317   WarmCallInfo* next_p = head;
1318   while (next_p != this) {
1319     assert(next_p != NULL, "this must be in the list somewhere");
1320     prev_p = next_p;
1321     next_p = prev_p->next();
1322   }
1323   next_p = this->next();
1324   debug_only(this->set_next(UNINIT_NEXT));
1325   // Remove this from between prev_p and next_p.
1326   if (prev_p == NULL)
1327     head = next_p;
1328   else
1329     prev_p->set_next(next_p);
1330   return head;
1331 }
1332 
1333 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(),
1334                                        WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE());
1335 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(),
1336                                         WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE());
1337 
always_hot()1338 WarmCallInfo* WarmCallInfo::always_hot() {
1339   assert(_always_hot.is_hot(), "must always be hot");
1340   return &_always_hot;
1341 }
1342 
always_cold()1343 WarmCallInfo* WarmCallInfo::always_cold() {
1344   assert(_always_cold.is_cold(), "must always be cold");
1345   return &_always_cold;
1346 }
1347 
1348 
1349 #ifndef PRODUCT
1350 
print() const1351 void WarmCallInfo::print() const {
1352   tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p",
1353              is_cold() ? "cold" : is_hot() ? "hot " : "warm",
1354              count(), profit(), work(), size(), compute_heat(), next());
1355   tty->cr();
1356   if (call() != NULL)  call()->dump();
1357 }
1358 
print_wci(WarmCallInfo * ci)1359 void print_wci(WarmCallInfo* ci) {
1360   ci->print();
1361 }
1362 
print_all() const1363 void WarmCallInfo::print_all() const {
1364   for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1365     p->print();
1366 }
1367 
count_all() const1368 int WarmCallInfo::count_all() const {
1369   int cnt = 0;
1370   for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1371     cnt++;
1372   return cnt;
1373 }
1374 
1375 #endif //PRODUCT
1376