1 /*
2  * Copyright (c) 2010, 2020, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "compiler/compileBroker.hpp"
27 #include "compiler/compilerOracle.hpp"
28 #include "compiler/tieredThresholdPolicy.hpp"
29 #include "memory/resourceArea.hpp"
30 #include "prims/jvmtiExport.hpp"
31 #include "runtime/arguments.hpp"
32 #include "runtime/frame.inline.hpp"
33 #include "runtime/globals_extension.hpp"
34 #include "runtime/handles.inline.hpp"
35 #include "runtime/safepoint.hpp"
36 #include "runtime/safepointVerifiers.hpp"
37 #include "code/scopeDesc.hpp"
38 #include "oops/method.inline.hpp"
39 #if INCLUDE_JVMCI
40 #include "jvmci/jvmci.hpp"
41 #endif
42 
43 #ifdef TIERED
44 
45 #include "c1/c1_Compiler.hpp"
46 #include "opto/c2compiler.hpp"
47 
call_predicate_helper(const methodHandle & method,CompLevel cur_level,int i,int b,double scale)48 bool TieredThresholdPolicy::call_predicate_helper(const methodHandle& method, CompLevel cur_level, int i, int b, double scale) {
49   double threshold_scaling;
50   if (CompilerOracle::has_option_value(method, CompileCommand::CompileThresholdScaling, threshold_scaling)) {
51     scale *= threshold_scaling;
52   }
53   switch(cur_level) {
54   case CompLevel_aot:
55     if (CompilationModeFlag::disable_intermediate()) {
56       return (i >= Tier0AOTInvocationThreshold * scale) ||
57              (i >= Tier0AOTMinInvocationThreshold * scale && i + b >= Tier0AOTCompileThreshold * scale);
58     } else {
59       return (i >= Tier3AOTInvocationThreshold * scale) ||
60              (i >= Tier3AOTMinInvocationThreshold * scale && i + b >= Tier3AOTCompileThreshold * scale);
61     }
62   case CompLevel_none:
63     if (CompilationModeFlag::disable_intermediate()) {
64       return (i >= Tier40InvocationThreshold * scale) ||
65              (i >= Tier40MinInvocationThreshold * scale && i + b >= Tier40CompileThreshold * scale);
66     }
67     // Fall through
68   case CompLevel_limited_profile:
69     return (i >= Tier3InvocationThreshold * scale) ||
70            (i >= Tier3MinInvocationThreshold * scale && i + b >= Tier3CompileThreshold * scale);
71   case CompLevel_full_profile:
72    return (i >= Tier4InvocationThreshold * scale) ||
73           (i >= Tier4MinInvocationThreshold * scale && i + b >= Tier4CompileThreshold * scale);
74   default:
75    return true;
76   }
77 }
78 
loop_predicate_helper(const methodHandle & method,CompLevel cur_level,int i,int b,double scale)79 bool TieredThresholdPolicy::loop_predicate_helper(const methodHandle& method, CompLevel cur_level, int i, int b, double scale) {
80   double threshold_scaling;
81   if (CompilerOracle::has_option_value(method, CompileCommand::CompileThresholdScaling, threshold_scaling)) {
82     scale *= threshold_scaling;
83   }
84   switch(cur_level) {
85   case CompLevel_aot:
86     if (CompilationModeFlag::disable_intermediate()) {
87       return b >= Tier0AOTBackEdgeThreshold * scale;
88     } else {
89       return b >= Tier3AOTBackEdgeThreshold * scale;
90     }
91   case CompLevel_none:
92     if (CompilationModeFlag::disable_intermediate()) {
93       return b >= Tier40BackEdgeThreshold * scale;
94     }
95     // Fall through
96   case CompLevel_limited_profile:
97     return b >= Tier3BackEdgeThreshold * scale;
98   case CompLevel_full_profile:
99     return b >= Tier4BackEdgeThreshold * scale;
100   default:
101     return true;
102   }
103 }
104 
105 // Simple methods are as good being compiled with C1 as C2.
106 // Determine if a given method is such a case.
is_trivial(Method * method)107 bool TieredThresholdPolicy::is_trivial(Method* method) {
108   if (method->is_accessor() ||
109       method->is_constant_getter()) {
110     return true;
111   }
112   return false;
113 }
114 
force_comp_at_level_simple(const methodHandle & method)115 bool TieredThresholdPolicy::force_comp_at_level_simple(const methodHandle& method) {
116   if (CompilationModeFlag::quick_internal()) {
117 #if INCLUDE_JVMCI
118     if (UseJVMCICompiler) {
119       AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization);
120       if (comp != NULL && comp->is_jvmci() && ((JVMCICompiler*) comp)->force_comp_at_level_simple(method)) {
121         return true;
122       }
123     }
124 #endif
125   }
126   return false;
127 }
128 
comp_level(Method * method)129 CompLevel TieredThresholdPolicy::comp_level(Method* method) {
130   CompiledMethod *nm = method->code();
131   if (nm != NULL && nm->is_in_use()) {
132     return (CompLevel)nm->comp_level();
133   }
134   return CompLevel_none;
135 }
136 
print_counters(const char * prefix,Method * m)137 void TieredThresholdPolicy::print_counters(const char* prefix, Method* m) {
138   int invocation_count = m->invocation_count();
139   int backedge_count = m->backedge_count();
140   MethodData* mdh = m->method_data();
141   int mdo_invocations = 0, mdo_backedges = 0;
142   int mdo_invocations_start = 0, mdo_backedges_start = 0;
143   if (mdh != NULL) {
144     mdo_invocations = mdh->invocation_count();
145     mdo_backedges = mdh->backedge_count();
146     mdo_invocations_start = mdh->invocation_count_start();
147     mdo_backedges_start = mdh->backedge_count_start();
148   }
149   tty->print(" %stotal=%d,%d %smdo=%d(%d),%d(%d)", prefix,
150       invocation_count, backedge_count, prefix,
151       mdo_invocations, mdo_invocations_start,
152       mdo_backedges, mdo_backedges_start);
153   tty->print(" %smax levels=%d,%d", prefix,
154       m->highest_comp_level(), m->highest_osr_comp_level());
155 }
156 
157 // Print an event.
print_event(EventType type,Method * m,Method * im,int bci,CompLevel level)158 void TieredThresholdPolicy::print_event(EventType type, Method* m, Method* im,
159                                         int bci, CompLevel level) {
160   bool inlinee_event = m != im;
161 
162   ttyLocker tty_lock;
163   tty->print("%lf: [", os::elapsedTime());
164 
165   switch(type) {
166   case CALL:
167     tty->print("call");
168     break;
169   case LOOP:
170     tty->print("loop");
171     break;
172   case COMPILE:
173     tty->print("compile");
174     break;
175   case REMOVE_FROM_QUEUE:
176     tty->print("remove-from-queue");
177     break;
178   case UPDATE_IN_QUEUE:
179     tty->print("update-in-queue");
180     break;
181   case REPROFILE:
182     tty->print("reprofile");
183     break;
184   case MAKE_NOT_ENTRANT:
185     tty->print("make-not-entrant");
186     break;
187   default:
188     tty->print("unknown");
189   }
190 
191   tty->print(" level=%d ", level);
192 
193   ResourceMark rm;
194   char *method_name = m->name_and_sig_as_C_string();
195   tty->print("[%s", method_name);
196   if (inlinee_event) {
197     char *inlinee_name = im->name_and_sig_as_C_string();
198     tty->print(" [%s]] ", inlinee_name);
199   }
200   else tty->print("] ");
201   tty->print("@%d queues=%d,%d", bci, CompileBroker::queue_size(CompLevel_full_profile),
202                                       CompileBroker::queue_size(CompLevel_full_optimization));
203 
204   tty->print(" rate=");
205   if (m->prev_time() == 0) tty->print("n/a");
206   else tty->print("%f", m->rate());
207 
208   tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
209                                threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
210 
211   if (type != COMPILE) {
212     print_counters("", m);
213     if (inlinee_event) {
214       print_counters("inlinee ", im);
215     }
216     tty->print(" compilable=");
217     bool need_comma = false;
218     if (!m->is_not_compilable(CompLevel_full_profile)) {
219       tty->print("c1");
220       need_comma = true;
221     }
222     if (!m->is_not_osr_compilable(CompLevel_full_profile)) {
223       if (need_comma) tty->print(",");
224       tty->print("c1-osr");
225       need_comma = true;
226     }
227     if (!m->is_not_compilable(CompLevel_full_optimization)) {
228       if (need_comma) tty->print(",");
229       tty->print("c2");
230       need_comma = true;
231     }
232     if (!m->is_not_osr_compilable(CompLevel_full_optimization)) {
233       if (need_comma) tty->print(",");
234       tty->print("c2-osr");
235     }
236     tty->print(" status=");
237     if (m->queued_for_compilation()) {
238       tty->print("in-queue");
239     } else tty->print("idle");
240   }
241   tty->print_cr("]");
242 }
243 
244 
initialize()245 void TieredThresholdPolicy::initialize() {
246   int count = CICompilerCount;
247   bool c1_only = TieredStopAtLevel < CompLevel_full_optimization || CompilationModeFlag::quick_only();
248   bool c2_only = CompilationModeFlag::high_only();
249 #ifdef _LP64
250   // Turn on ergonomic compiler count selection
251   if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
252     FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
253   }
254   if (CICompilerCountPerCPU) {
255     // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
256     int log_cpu = log2_int(os::active_processor_count());
257     int loglog_cpu = log2_int(MAX2(log_cpu, 1));
258     count = MAX2(log_cpu * loglog_cpu * 3 / 2, 2);
259     // Make sure there is enough space in the code cache to hold all the compiler buffers
260     size_t c1_size = Compiler::code_buffer_size();
261     size_t c2_size = C2Compiler::initial_code_buffer_size();
262     size_t buffer_size = c1_only ? c1_size : (c1_size/3 + 2*c2_size/3);
263     int max_count = (ReservedCodeCacheSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / (int)buffer_size;
264     if (count > max_count) {
265       // Lower the compiler count such that all buffers fit into the code cache
266       count = MAX2(max_count, c1_only ? 1 : 2);
267     }
268     FLAG_SET_ERGO(CICompilerCount, count);
269   }
270 #else
271   // On 32-bit systems, the number of compiler threads is limited to 3.
272   // On these systems, the virtual address space available to the JVM
273   // is usually limited to 2-4 GB (the exact value depends on the platform).
274   // As the compilers (especially C2) can consume a large amount of
275   // memory, scaling the number of compiler threads with the number of
276   // available cores can result in the exhaustion of the address space
277   /// available to the VM and thus cause the VM to crash.
278   if (FLAG_IS_DEFAULT(CICompilerCount)) {
279     count = 3;
280     FLAG_SET_ERGO(CICompilerCount, count);
281   }
282 #endif
283 
284   if (c1_only) {
285     // No C2 compiler thread required
286     set_c1_count(count);
287   } else if (c2_only) {
288     set_c2_count(count);
289   } else {
290     set_c1_count(MAX2(count / 3, 1));
291     set_c2_count(MAX2(count - c1_count(), 1));
292   }
293   assert(count == c1_count() + c2_count(), "inconsistent compiler thread count");
294 
295   // Some inlining tuning
296 #ifdef X86
297   if (FLAG_IS_DEFAULT(InlineSmallCode)) {
298     FLAG_SET_DEFAULT(InlineSmallCode, 2500);
299   }
300 #endif
301 
302 #if defined AARCH64
303   if (FLAG_IS_DEFAULT(InlineSmallCode)) {
304     FLAG_SET_DEFAULT(InlineSmallCode, 2500);
305   }
306 #endif
307 
308   set_increase_threshold_at_ratio();
309   set_start_time(nanos_to_millis(os::javaTimeNanos()));
310 }
311 
312 
313 #ifdef ASSERT
verify_level(CompLevel level)314 bool TieredThresholdPolicy::verify_level(CompLevel level) {
315   // AOT and interpreter levels are always valid.
316   if (level == CompLevel_aot || level == CompLevel_none) {
317     return true;
318   }
319   if (CompilationModeFlag::normal()) {
320     return true;
321   } else if (CompilationModeFlag::quick_only()) {
322     return level == CompLevel_simple;
323   } else if (CompilationModeFlag::high_only()) {
324     return level == CompLevel_full_optimization;
325   } else if (CompilationModeFlag::high_only_quick_internal()) {
326     return level == CompLevel_full_optimization || level == CompLevel_simple;
327   }
328   return false;
329 }
330 #endif
331 
332 
limit_level(CompLevel level)333 CompLevel TieredThresholdPolicy::limit_level(CompLevel level) {
334   if (CompilationModeFlag::quick_only()) {
335     level = MIN2(level, CompLevel_simple);
336   }
337   assert(verify_level(level), "Invalid compilation level %d", level);
338   if (level <= TieredStopAtLevel) {
339     return level;
340   }
341   // Some compilation levels are not valid depending on a compilation mode:
342   // a) quick_only - levels 2,3,4 are invalid; levels -1,0,1 are valid;
343   // b) high_only - levels 1,2,3 are invalid; levels -1,0,4 are valid;
344   // c) high_only_quick_internal - levels 2,3 are invalid; levels -1,0,1,4 are valid.
345   // The invalid levels are actually sequential so a single comparison is sufficient.
346   // Down here we already have (level > TieredStopAtLevel), which also implies that
347   // (TieredStopAtLevel < Highest Possible Level), so we need to return a level that is:
348   // a) a max level that is strictly less than the highest for a given compilation mode
349   // b) less or equal to TieredStopAtLevel
350   if (CompilationModeFlag::normal() || CompilationModeFlag::quick_only()) {
351     return (CompLevel)TieredStopAtLevel;
352   }
353 
354   if (CompilationModeFlag::high_only() || CompilationModeFlag::high_only_quick_internal()) {
355     return MIN2(CompLevel_none, (CompLevel)TieredStopAtLevel);
356   }
357 
358   ShouldNotReachHere();
359   return CompLevel_any;
360 }
361 
initial_compile_level_helper(const methodHandle & method)362 CompLevel TieredThresholdPolicy::initial_compile_level_helper(const methodHandle& method) {
363   if (CompilationModeFlag::normal()) {
364     return CompLevel_full_profile;
365   } else if (CompilationModeFlag::quick_only()) {
366     return CompLevel_simple;
367   } else if (CompilationModeFlag::high_only()) {
368     return CompLevel_full_optimization;
369   } else if (CompilationModeFlag::high_only_quick_internal()) {
370     if (force_comp_at_level_simple(method)) {
371       return CompLevel_simple;
372     } else {
373       return CompLevel_full_optimization;
374     }
375   }
376   ShouldNotReachHere();
377   return CompLevel_any;
378 }
379 
initial_compile_level(const methodHandle & method)380 CompLevel TieredThresholdPolicy::initial_compile_level(const methodHandle& method) {
381   return limit_level(initial_compile_level_helper(method));
382 }
383 
384 // Set carry flags on the counters if necessary
handle_counter_overflow(Method * method)385 void TieredThresholdPolicy::handle_counter_overflow(Method* method) {
386   MethodCounters *mcs = method->method_counters();
387   if (mcs != NULL) {
388     mcs->invocation_counter()->set_carry_on_overflow();
389     mcs->backedge_counter()->set_carry_on_overflow();
390   }
391   MethodData* mdo = method->method_data();
392   if (mdo != NULL) {
393     mdo->invocation_counter()->set_carry_on_overflow();
394     mdo->backedge_counter()->set_carry_on_overflow();
395   }
396 }
397 
398 // Called with the queue locked and with at least one element
select_task(CompileQueue * compile_queue)399 CompileTask* TieredThresholdPolicy::select_task(CompileQueue* compile_queue) {
400   CompileTask *max_blocking_task = NULL;
401   CompileTask *max_task = NULL;
402   Method* max_method = NULL;
403   jlong t = nanos_to_millis(os::javaTimeNanos());
404   // Iterate through the queue and find a method with a maximum rate.
405   for (CompileTask* task = compile_queue->first(); task != NULL;) {
406     CompileTask* next_task = task->next();
407     Method* method = task->method();
408     // If a method was unloaded or has been stale for some time, remove it from the queue.
409     // Blocking tasks and tasks submitted from whitebox API don't become stale
410     if (task->is_unloaded() || (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method))) {
411       if (!task->is_unloaded()) {
412         if (PrintTieredEvents) {
413           print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel) task->comp_level());
414         }
415         method->clear_queued_for_compilation();
416       }
417       compile_queue->remove_and_mark_stale(task);
418       task = next_task;
419       continue;
420     }
421     update_rate(t, method);
422     if (max_task == NULL || compare_methods(method, max_method)) {
423       // Select a method with the highest rate
424       max_task = task;
425       max_method = method;
426     }
427 
428     if (task->is_blocking()) {
429       if (max_blocking_task == NULL || compare_methods(method, max_blocking_task->method())) {
430         max_blocking_task = task;
431       }
432     }
433 
434     task = next_task;
435   }
436 
437   if (max_blocking_task != NULL) {
438     // In blocking compilation mode, the CompileBroker will make
439     // compilations submitted by a JVMCI compiler thread non-blocking. These
440     // compilations should be scheduled after all blocking compilations
441     // to service non-compiler related compilations sooner and reduce the
442     // chance of such compilations timing out.
443     max_task = max_blocking_task;
444     max_method = max_task->method();
445   }
446 
447   methodHandle max_method_h(Thread::current(), max_method);
448 
449   if (max_task != NULL && max_task->comp_level() == CompLevel_full_profile &&
450       TieredStopAtLevel > CompLevel_full_profile &&
451       max_method != NULL && is_method_profiled(max_method_h)) {
452     max_task->set_comp_level(CompLevel_limited_profile);
453 
454     if (CompileBroker::compilation_is_complete(max_method_h, max_task->osr_bci(), CompLevel_limited_profile)) {
455       if (PrintTieredEvents) {
456         print_event(REMOVE_FROM_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
457       }
458       compile_queue->remove_and_mark_stale(max_task);
459       max_method->clear_queued_for_compilation();
460       return NULL;
461     }
462 
463     if (PrintTieredEvents) {
464       print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
465     }
466   }
467 
468   return max_task;
469 }
470 
reprofile(ScopeDesc * trap_scope,bool is_osr)471 void TieredThresholdPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
472   for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) {
473     if (PrintTieredEvents) {
474       print_event(REPROFILE, sd->method(), sd->method(), InvocationEntryBci, CompLevel_none);
475     }
476     MethodData* mdo = sd->method()->method_data();
477     if (mdo != NULL) {
478       mdo->reset_start_counters();
479     }
480     if (sd->is_top()) break;
481   }
482 }
483 
event(const methodHandle & method,const methodHandle & inlinee,int branch_bci,int bci,CompLevel comp_level,CompiledMethod * nm,TRAPS)484 nmethod* TieredThresholdPolicy::event(const methodHandle& method, const methodHandle& inlinee,
485                                       int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, TRAPS) {
486   if (comp_level == CompLevel_none &&
487       JvmtiExport::can_post_interpreter_events() &&
488       THREAD->as_Java_thread()->is_interp_only_mode()) {
489     return NULL;
490   }
491   if (ReplayCompiles) {
492     // Don't trigger other compiles in testing mode
493     return NULL;
494   }
495 
496   handle_counter_overflow(method());
497   if (method() != inlinee()) {
498     handle_counter_overflow(inlinee());
499   }
500 
501   if (PrintTieredEvents) {
502     print_event(bci == InvocationEntryBci ? CALL : LOOP, method(), inlinee(), bci, comp_level);
503   }
504 
505   if (bci == InvocationEntryBci) {
506     method_invocation_event(method, inlinee, comp_level, nm, THREAD);
507   } else {
508     // method == inlinee if the event originated in the main method
509     method_back_branch_event(method, inlinee, bci, comp_level, nm, THREAD);
510     // Check if event led to a higher level OSR compilation
511     CompLevel expected_comp_level = MIN2(CompLevel_full_optimization, static_cast<CompLevel>(comp_level + 1));
512     if (!CompilationModeFlag::disable_intermediate() && inlinee->is_not_osr_compilable(expected_comp_level)) {
513       // It's not possble to reach the expected level so fall back to simple.
514       expected_comp_level = CompLevel_simple;
515     }
516     CompLevel max_osr_level = static_cast<CompLevel>(inlinee->highest_osr_comp_level());
517     if (max_osr_level >= expected_comp_level) { // fast check to avoid locking in a typical scenario
518       nmethod* osr_nm = inlinee->lookup_osr_nmethod_for(bci, expected_comp_level, false);
519       assert(osr_nm == NULL || osr_nm->comp_level() >= expected_comp_level, "lookup_osr_nmethod_for is broken");
520       if (osr_nm != NULL && osr_nm->comp_level() != comp_level) {
521         // Perform OSR with new nmethod
522         return osr_nm;
523       }
524     }
525   }
526   return NULL;
527 }
528 
529 // Check if the method can be compiled, change level if necessary
compile(const methodHandle & mh,int bci,CompLevel level,TRAPS)530 void TieredThresholdPolicy::compile(const methodHandle& mh, int bci, CompLevel level, TRAPS) {
531   assert(verify_level(level) && level <= TieredStopAtLevel, "Invalid compilation level %d", level);
532 
533   if (level == CompLevel_none) {
534     if (mh->has_compiled_code()) {
535       // Happens when we switch from AOT to interpreter to profile.
536       MutexLocker ml(Compile_lock);
537       NoSafepointVerifier nsv;
538       if (mh->has_compiled_code()) {
539         mh->code()->make_not_used();
540       }
541       // Deoptimize immediately (we don't have to wait for a compile).
542       JavaThread* jt = THREAD->as_Java_thread();
543       RegisterMap map(jt, false);
544       frame fr = jt->last_frame().sender(&map);
545       Deoptimization::deoptimize_frame(jt, fr.id());
546     }
547     return;
548   }
549   if (level == CompLevel_aot) {
550     if (mh->has_aot_code()) {
551       if (PrintTieredEvents) {
552         print_event(COMPILE, mh(), mh(), bci, level);
553       }
554       MutexLocker ml(Compile_lock);
555       NoSafepointVerifier nsv;
556       if (mh->has_aot_code() && mh->code() != mh->aot_code()) {
557         mh->aot_code()->make_entrant();
558         if (mh->has_compiled_code()) {
559           mh->code()->make_not_entrant();
560         }
561         MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
562         Method::set_code(mh, mh->aot_code());
563       }
564     }
565     return;
566   }
567 
568   if (!CompilationModeFlag::disable_intermediate()) {
569     // Check if the method can be compiled. If it cannot be compiled with C1, continue profiling
570     // in the interpreter and then compile with C2 (the transition function will request that,
571     // see common() ). If the method cannot be compiled with C2 but still can with C1, compile it with
572     // pure C1.
573     if ((bci == InvocationEntryBci && !can_be_compiled(mh, level))) {
574       if (level == CompLevel_full_optimization && can_be_compiled(mh, CompLevel_simple)) {
575         compile(mh, bci, CompLevel_simple, THREAD);
576       }
577       return;
578     }
579     if ((bci != InvocationEntryBci && !can_be_osr_compiled(mh, level))) {
580       if (level == CompLevel_full_optimization && can_be_osr_compiled(mh, CompLevel_simple)) {
581         nmethod* osr_nm = mh->lookup_osr_nmethod_for(bci, CompLevel_simple, false);
582         if (osr_nm != NULL && osr_nm->comp_level() > CompLevel_simple) {
583           // Invalidate the existing OSR nmethod so that a compile at CompLevel_simple is permitted.
584           osr_nm->make_not_entrant();
585         }
586         compile(mh, bci, CompLevel_simple, THREAD);
587       }
588       return;
589     }
590   }
591   if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) {
592     return;
593   }
594   if (!CompileBroker::compilation_is_in_queue(mh)) {
595     if (PrintTieredEvents) {
596       print_event(COMPILE, mh(), mh(), bci, level);
597     }
598     int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
599     update_rate(nanos_to_millis(os::javaTimeNanos()), mh());
600     CompileBroker::compile_method(mh, bci, level, mh, hot_count, CompileTask::Reason_Tiered, THREAD);
601   }
602 }
603 
604 // update_rate() is called from select_task() while holding a compile queue lock.
update_rate(jlong t,Method * m)605 void TieredThresholdPolicy::update_rate(jlong t, Method* m) {
606   // Skip update if counters are absent.
607   // Can't allocate them since we are holding compile queue lock.
608   if (m->method_counters() == NULL)  return;
609 
610   if (is_old(m)) {
611     // We don't remove old methods from the queue,
612     // so we can just zero the rate.
613     m->set_rate(0);
614     return;
615   }
616 
617   // We don't update the rate if we've just came out of a safepoint.
618   // delta_s is the time since last safepoint in milliseconds.
619   jlong delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
620   jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement
621   // How many events were there since the last time?
622   int event_count = m->invocation_count() + m->backedge_count();
623   int delta_e = event_count - m->prev_event_count();
624 
625   // We should be running for at least 1ms.
626   if (delta_s >= TieredRateUpdateMinTime) {
627     // And we must've taken the previous point at least 1ms before.
628     if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
629       m->set_prev_time(t);
630       m->set_prev_event_count(event_count);
631       m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond
632     } else {
633       if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
634         // If nothing happened for 25ms, zero the rate. Don't modify prev values.
635         m->set_rate(0);
636       }
637     }
638   }
639 }
640 
641 // Check if this method has been stale for a given number of milliseconds.
642 // See select_task().
is_stale(jlong t,jlong timeout,Method * m)643 bool TieredThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) {
644   jlong delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
645   jlong delta_t = t - m->prev_time();
646   if (delta_t > timeout && delta_s > timeout) {
647     int event_count = m->invocation_count() + m->backedge_count();
648     int delta_e = event_count - m->prev_event_count();
649     // Return true if there were no events.
650     return delta_e == 0;
651   }
652   return false;
653 }
654 
655 // We don't remove old methods from the compile queue even if they have
656 // very low activity. See select_task().
is_old(Method * method)657 bool TieredThresholdPolicy::is_old(Method* method) {
658   return method->invocation_count() > 50000 || method->backedge_count() > 500000;
659 }
660 
weight(Method * method)661 double TieredThresholdPolicy::weight(Method* method) {
662   return (double)(method->rate() + 1) *
663     (method->invocation_count() + 1) * (method->backedge_count() + 1);
664 }
665 
666 // Apply heuristics and return true if x should be compiled before y
compare_methods(Method * x,Method * y)667 bool TieredThresholdPolicy::compare_methods(Method* x, Method* y) {
668   if (x->highest_comp_level() > y->highest_comp_level()) {
669     // recompilation after deopt
670     return true;
671   } else
672     if (x->highest_comp_level() == y->highest_comp_level()) {
673       if (weight(x) > weight(y)) {
674         return true;
675       }
676     }
677   return false;
678 }
679 
680 // Is method profiled enough?
is_method_profiled(const methodHandle & method)681 bool TieredThresholdPolicy::is_method_profiled(const methodHandle& method) {
682   MethodData* mdo = method->method_data();
683   if (mdo != NULL) {
684     int i = mdo->invocation_count_delta();
685     int b = mdo->backedge_count_delta();
686     return call_predicate_helper(method, CompilationModeFlag::disable_intermediate() ? CompLevel_none : CompLevel_full_profile, i, b, 1);
687   }
688   return false;
689 }
690 
threshold_scale(CompLevel level,int feedback_k)691 double TieredThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) {
692   int comp_count = compiler_count(level);
693   if (comp_count > 0) {
694     double queue_size = CompileBroker::queue_size(level);
695     double k = queue_size / (feedback_k * comp_count) + 1;
696 
697     // Increase C1 compile threshold when the code cache is filled more
698     // than specified by IncreaseFirstTierCompileThresholdAt percentage.
699     // The main intention is to keep enough free space for C2 compiled code
700     // to achieve peak performance if the code cache is under stress.
701     if (!CompilationModeFlag::disable_intermediate() && TieredStopAtLevel == CompLevel_full_optimization && level != CompLevel_full_optimization)  {
702       double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level));
703       if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
704         k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
705       }
706     }
707     return k;
708   }
709   return 1;
710 }
711 
712 // Call and loop predicates determine whether a transition to a higher
713 // compilation level should be performed (pointers to predicate functions
714 // are passed to common()).
715 // Tier?LoadFeedback is basically a coefficient that determines of
716 // how many methods per compiler thread can be in the queue before
717 // the threshold values double.
loop_predicate(int i,int b,CompLevel cur_level,const methodHandle & method)718 bool TieredThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, const methodHandle& method) {
719   double k = 1;
720   switch(cur_level) {
721   case CompLevel_aot: {
722     k = CompilationModeFlag::disable_intermediate() ? 1 : threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
723     break;
724   }
725   case CompLevel_none: {
726     if (CompilationModeFlag::disable_intermediate()) {
727       k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
728       break;
729     }
730   }
731   // Fall through
732   case CompLevel_limited_profile: {
733     k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
734     break;
735   }
736   case CompLevel_full_profile: {
737     k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
738     break;
739   }
740   default:
741     return true;
742   }
743   return loop_predicate_helper(method, cur_level, i, b, k);
744 }
745 
call_predicate(int i,int b,CompLevel cur_level,const methodHandle & method)746 bool TieredThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, const methodHandle& method) {
747   double k = 1;
748   switch(cur_level) {
749   case CompLevel_aot: {
750     k = CompilationModeFlag::disable_intermediate() ? 1 : threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
751     break;
752   }
753   case CompLevel_none: {
754     if (CompilationModeFlag::disable_intermediate()) {
755       k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
756       break;
757     }
758   }
759   // Fall through
760   case CompLevel_limited_profile: {
761     k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
762     break;
763   }
764   case CompLevel_full_profile: {
765     k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
766     break;
767   }
768   default:
769     return true;
770   }
771   return call_predicate_helper(method, cur_level, i, b, k);
772 }
773 
774 // Determine is a method is mature.
is_mature(Method * method)775 bool TieredThresholdPolicy::is_mature(Method* method) {
776   methodHandle mh(Thread::current(), method);
777   if (is_trivial(method) || force_comp_at_level_simple(mh)) return true;
778   MethodData* mdo = method->method_data();
779   if (mdo != NULL) {
780     int i = mdo->invocation_count();
781     int b = mdo->backedge_count();
782     double k = ProfileMaturityPercentage / 100.0;
783     CompLevel main_profile_level = CompilationModeFlag::disable_intermediate() ? CompLevel_none : CompLevel_full_profile;
784     return call_predicate_helper(mh, main_profile_level, i, b, k) || loop_predicate_helper(mh, main_profile_level, i, b, k);
785   }
786   return false;
787 }
788 
789 // If a method is old enough and is still in the interpreter we would want to
790 // start profiling without waiting for the compiled method to arrive.
791 // We also take the load on compilers into the account.
should_create_mdo(const methodHandle & method,CompLevel cur_level)792 bool TieredThresholdPolicy::should_create_mdo(const methodHandle& method, CompLevel cur_level) {
793   if (cur_level != CompLevel_none || force_comp_at_level_simple(method) || !ProfileInterpreter) {
794     return false;
795   }
796   int i = method->invocation_count();
797   int b = method->backedge_count();
798   double k = Tier0ProfilingStartPercentage / 100.0;
799 
800   // If the top level compiler is not keeping up, delay profiling.
801   if (CompileBroker::queue_size(CompLevel_full_optimization) <=  (CompilationModeFlag::disable_intermediate() ? Tier0Delay : Tier3DelayOn) * compiler_count(CompLevel_full_optimization)) {
802     return call_predicate_helper(method, CompLevel_none, i, b, k) || loop_predicate_helper(method, CompLevel_none, i, b, k);
803   }
804   return false;
805 }
806 
807 // Inlining control: if we're compiling a profiled method with C1 and the callee
808 // is known to have OSRed in a C2 version, don't inline it.
should_not_inline(ciEnv * env,ciMethod * callee)809 bool TieredThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
810   CompLevel comp_level = (CompLevel)env->comp_level();
811   if (comp_level == CompLevel_full_profile ||
812       comp_level == CompLevel_limited_profile) {
813     return callee->highest_osr_comp_level() == CompLevel_full_optimization;
814   }
815   return false;
816 }
817 
818 // Create MDO if necessary.
create_mdo(const methodHandle & mh,Thread * THREAD)819 void TieredThresholdPolicy::create_mdo(const methodHandle& mh, Thread* THREAD) {
820   if (mh->is_native() ||
821       mh->is_abstract() ||
822       mh->is_accessor() ||
823       mh->is_constant_getter()) {
824     return;
825   }
826   if (mh->method_data() == NULL) {
827     Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR);
828   }
829   if (ProfileInterpreter) {
830     MethodData* mdo = mh->method_data();
831     if (mdo != NULL) {
832       JavaThread* jt = THREAD->as_Java_thread();
833       frame last_frame = jt->last_frame();
834       if (last_frame.is_interpreted_frame() && mh == last_frame.interpreter_frame_method()) {
835         int bci = last_frame.interpreter_frame_bci();
836         address dp = mdo->bci_to_dp(bci);
837         last_frame.interpreter_frame_set_mdp(dp);
838       }
839     }
840   }
841 }
842 
843 
844 /*
845  * Method states:
846  *   0 - interpreter (CompLevel_none)
847  *   1 - pure C1 (CompLevel_simple)
848  *   2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
849  *   3 - C1 with full profiling (CompLevel_full_profile)
850  *   4 - C2 or Graal (CompLevel_full_optimization)
851  *
852  * Common state transition patterns:
853  * a. 0 -> 3 -> 4.
854  *    The most common path. But note that even in this straightforward case
855  *    profiling can start at level 0 and finish at level 3.
856  *
857  * b. 0 -> 2 -> 3 -> 4.
858  *    This case occurs when the load on C2 is deemed too high. So, instead of transitioning
859  *    into state 3 directly and over-profiling while a method is in the C2 queue we transition to
860  *    level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
861  *
862  * c. 0 -> (3->2) -> 4.
863  *    In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
864  *    to enable the profiling to fully occur at level 0. In this case we change the compilation level
865  *    of the method to 2 while the request is still in-queue, because it'll allow it to run much faster
866  *    without full profiling while c2 is compiling.
867  *
868  * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
869  *    After a method was once compiled with C1 it can be identified as trivial and be compiled to
870  *    level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
871  *
872  * e. 0 -> 4.
873  *    This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
874  *    or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
875  *    the compiled version already exists).
876  *
877  * Note that since state 0 can be reached from any other state via deoptimization different loops
878  * are possible.
879  *
880  */
881 
882 // Common transition function. Given a predicate determines if a method should transition to another level.
common(Predicate p,const methodHandle & method,CompLevel cur_level,bool disable_feedback)883 CompLevel TieredThresholdPolicy::common(Predicate p, const methodHandle& method, CompLevel cur_level, bool disable_feedback) {
884   CompLevel next_level = cur_level;
885   int i = method->invocation_count();
886   int b = method->backedge_count();
887 
888   if (force_comp_at_level_simple(method)) {
889     next_level = CompLevel_simple;
890   } else {
891     if (!CompilationModeFlag::disable_intermediate() && is_trivial(method())) {
892       next_level = CompLevel_simple;
893     } else {
894       switch(cur_level) {
895       default: break;
896       case CompLevel_aot:
897         if (CompilationModeFlag::disable_intermediate()) {
898           if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
899                                    Tier0Delay * compiler_count(CompLevel_full_optimization) &&
900                                   (this->*p)(i, b, cur_level, method))) {
901             next_level = CompLevel_none;
902           }
903         } else {
904           // If we were at full profile level, would we switch to full opt?
905           if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
906             next_level = CompLevel_full_optimization;
907           } else if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
908                                           Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
909                                          (this->*p)(i, b, cur_level, method))) {
910             next_level = CompLevel_full_profile;
911           }
912         }
913         break;
914       case CompLevel_none:
915         if (CompilationModeFlag::disable_intermediate()) {
916           MethodData* mdo = method->method_data();
917           if (mdo != NULL) {
918             // If mdo exists that means we are in a normal profiling mode.
919             int mdo_i = mdo->invocation_count_delta();
920             int mdo_b = mdo->backedge_count_delta();
921             if ((this->*p)(mdo_i, mdo_b, cur_level, method)) {
922               next_level = CompLevel_full_optimization;
923             }
924           }
925         } else {
926           // If we were at full profile level, would we switch to full opt?
927           if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
928             next_level = CompLevel_full_optimization;
929           } else if ((this->*p)(i, b, cur_level, method)) {
930   #if INCLUDE_JVMCI
931             if (EnableJVMCI && UseJVMCICompiler) {
932               // Since JVMCI takes a while to warm up, its queue inevitably backs up during
933               // early VM execution. As of 2014-06-13, JVMCI's inliner assumes that the root
934               // compilation method and all potential inlinees have mature profiles (which
935               // includes type profiling). If it sees immature profiles, JVMCI's inliner
936               // can perform pathologically bad (e.g., causing OutOfMemoryErrors due to
937               // exploring/inlining too many graphs). Since a rewrite of the inliner is
938               // in progress, we simply disable the dialing back heuristic for now and will
939               // revisit this decision once the new inliner is completed.
940               next_level = CompLevel_full_profile;
941             } else
942   #endif
943             {
944               // C1-generated fully profiled code is about 30% slower than the limited profile
945               // code that has only invocation and backedge counters. The observation is that
946               // if C2 queue is large enough we can spend too much time in the fully profiled code
947               // while waiting for C2 to pick the method from the queue. To alleviate this problem
948               // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
949               // we choose to compile a limited profiled version and then recompile with full profiling
950               // when the load on C2 goes down.
951               if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
952                   Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
953                 next_level = CompLevel_limited_profile;
954               } else {
955                 next_level = CompLevel_full_profile;
956               }
957             }
958           }
959         }
960         break;
961       case CompLevel_limited_profile:
962         if (is_method_profiled(method)) {
963           // Special case: we got here because this method was fully profiled in the interpreter.
964           next_level = CompLevel_full_optimization;
965         } else {
966           MethodData* mdo = method->method_data();
967           if (mdo != NULL) {
968             if (mdo->would_profile()) {
969               if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
970                                        Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
971                                        (this->*p)(i, b, cur_level, method))) {
972                 next_level = CompLevel_full_profile;
973               }
974             } else {
975               next_level = CompLevel_full_optimization;
976             }
977           } else {
978             // If there is no MDO we need to profile
979             if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
980                                      Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
981                                      (this->*p)(i, b, cur_level, method))) {
982               next_level = CompLevel_full_profile;
983             }
984           }
985         }
986         break;
987       case CompLevel_full_profile:
988         {
989           MethodData* mdo = method->method_data();
990           if (mdo != NULL) {
991             if (mdo->would_profile()) {
992               int mdo_i = mdo->invocation_count_delta();
993               int mdo_b = mdo->backedge_count_delta();
994               if ((this->*p)(mdo_i, mdo_b, cur_level, method)) {
995                 next_level = CompLevel_full_optimization;
996               }
997             } else {
998               next_level = CompLevel_full_optimization;
999             }
1000           }
1001         }
1002         break;
1003       }
1004     }
1005   }
1006   return limit_level(next_level);
1007 }
1008 
1009 
1010 
1011 // Determine if a method should be compiled with a normal entry point at a different level.
call_event(const methodHandle & method,CompLevel cur_level,Thread * thread)1012 CompLevel TieredThresholdPolicy::call_event(const methodHandle& method, CompLevel cur_level, Thread* thread) {
1013   CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
1014                              common(&TieredThresholdPolicy::loop_predicate, method, cur_level, true));
1015   CompLevel next_level = common(&TieredThresholdPolicy::call_predicate, method, cur_level);
1016 
1017   // If OSR method level is greater than the regular method level, the levels should be
1018   // equalized by raising the regular method level in order to avoid OSRs during each
1019   // invocation of the method.
1020   if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
1021     MethodData* mdo = method->method_data();
1022     guarantee(mdo != NULL, "MDO should not be NULL");
1023     if (mdo->invocation_count() >= 1) {
1024       next_level = CompLevel_full_optimization;
1025     }
1026   } else {
1027     next_level = MAX2(osr_level, next_level);
1028   }
1029   return next_level;
1030 }
1031 
1032 // Determine if we should do an OSR compilation of a given method.
loop_event(const methodHandle & method,CompLevel cur_level,Thread * thread)1033 CompLevel TieredThresholdPolicy::loop_event(const methodHandle& method, CompLevel cur_level, Thread* thread) {
1034   CompLevel next_level = common(&TieredThresholdPolicy::loop_predicate, method, cur_level, true);
1035   if (cur_level == CompLevel_none) {
1036     // If there is a live OSR method that means that we deopted to the interpreter
1037     // for the transition.
1038     CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
1039     if (osr_level > CompLevel_none) {
1040       return osr_level;
1041     }
1042   }
1043   return next_level;
1044 }
1045 
maybe_switch_to_aot(const methodHandle & mh,CompLevel cur_level,CompLevel next_level,Thread * thread)1046 bool TieredThresholdPolicy::maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, Thread* thread) {
1047   if (UseAOT) {
1048     if (cur_level == CompLevel_full_profile || cur_level == CompLevel_none) {
1049       // If the current level is full profile or interpreter and we're switching to any other level,
1050       // activate the AOT code back first so that we won't waste time overprofiling.
1051       compile(mh, InvocationEntryBci, CompLevel_aot, thread);
1052       // Fall through for JIT compilation.
1053     }
1054     if (next_level == CompLevel_limited_profile && cur_level != CompLevel_aot && mh->has_aot_code()) {
1055       // If the next level is limited profile, use the aot code (if there is any),
1056       // since it's essentially the same thing.
1057       compile(mh, InvocationEntryBci, CompLevel_aot, thread);
1058       // Not need to JIT, we're done.
1059       return true;
1060     }
1061   }
1062   return false;
1063 }
1064 
1065 
1066 // Handle the invocation event.
method_invocation_event(const methodHandle & mh,const methodHandle & imh,CompLevel level,CompiledMethod * nm,TRAPS)1067 void TieredThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
1068                                                       CompLevel level, CompiledMethod* nm, TRAPS) {
1069   if (should_create_mdo(mh, level)) {
1070     create_mdo(mh, THREAD);
1071   }
1072   CompLevel next_level = call_event(mh, level, THREAD);
1073   if (next_level != level) {
1074     if (maybe_switch_to_aot(mh, level, next_level, THREAD)) {
1075       // No JITting necessary
1076       return;
1077     }
1078     if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
1079       compile(mh, InvocationEntryBci, next_level, THREAD);
1080     }
1081   }
1082 }
1083 
1084 // Handle the back branch event. Notice that we can compile the method
1085 // with a regular entry from here.
method_back_branch_event(const methodHandle & mh,const methodHandle & imh,int bci,CompLevel level,CompiledMethod * nm,TRAPS)1086 void TieredThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh,
1087                                                      int bci, CompLevel level, CompiledMethod* nm, TRAPS) {
1088   if (should_create_mdo(mh, level)) {
1089     create_mdo(mh, THREAD);
1090   }
1091   // Check if MDO should be created for the inlined method
1092   if (should_create_mdo(imh, level)) {
1093     create_mdo(imh, THREAD);
1094   }
1095 
1096   if (is_compilation_enabled()) {
1097     CompLevel next_osr_level = loop_event(imh, level, THREAD);
1098     CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
1099     // At the very least compile the OSR version
1100     if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
1101       compile(imh, bci, next_osr_level, CHECK);
1102     }
1103 
1104     // Use loop event as an opportunity to also check if there's been
1105     // enough calls.
1106     CompLevel cur_level, next_level;
1107     if (mh() != imh()) { // If there is an enclosing method
1108       if (level == CompLevel_aot) {
1109         // Recompile the enclosing method to prevent infinite OSRs. Stay at AOT level while it's compiling.
1110         if (max_osr_level != CompLevel_none && !CompileBroker::compilation_is_in_queue(mh)) {
1111           CompLevel enclosing_level = limit_level(CompLevel_full_profile);
1112           compile(mh, InvocationEntryBci, enclosing_level, THREAD);
1113         }
1114       } else {
1115         // Current loop event level is not AOT
1116         guarantee(nm != NULL, "Should have nmethod here");
1117         cur_level = comp_level(mh());
1118         next_level = call_event(mh, cur_level, THREAD);
1119 
1120         if (max_osr_level == CompLevel_full_optimization) {
1121           // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
1122           bool make_not_entrant = false;
1123           if (nm->is_osr_method()) {
1124             // This is an osr method, just make it not entrant and recompile later if needed
1125             make_not_entrant = true;
1126           } else {
1127             if (next_level != CompLevel_full_optimization) {
1128               // next_level is not full opt, so we need to recompile the
1129               // enclosing method without the inlinee
1130               cur_level = CompLevel_none;
1131               make_not_entrant = true;
1132             }
1133           }
1134           if (make_not_entrant) {
1135             if (PrintTieredEvents) {
1136               int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
1137               print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
1138             }
1139             nm->make_not_entrant();
1140           }
1141         }
1142         // Fix up next_level if necessary to avoid deopts
1143         if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
1144           next_level = CompLevel_full_profile;
1145         }
1146         if (cur_level != next_level) {
1147           if (!maybe_switch_to_aot(mh, cur_level, next_level, THREAD) && !CompileBroker::compilation_is_in_queue(mh)) {
1148             compile(mh, InvocationEntryBci, next_level, THREAD);
1149           }
1150         }
1151       }
1152     } else {
1153       cur_level = comp_level(mh());
1154       next_level = call_event(mh, cur_level, THREAD);
1155       if (next_level != cur_level) {
1156         if (!maybe_switch_to_aot(mh, cur_level, next_level, THREAD) && !CompileBroker::compilation_is_in_queue(mh)) {
1157           compile(mh, InvocationEntryBci, next_level, THREAD);
1158         }
1159       }
1160     }
1161   }
1162 }
1163 
1164 #endif
1165