1 /*
2  * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "jvm.h"
27 #include "asm/assembler.inline.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/compiledIC.hpp"
30 #include "code/compiledMethod.inline.hpp"
31 #include "code/dependencies.hpp"
32 #include "code/nativeInst.hpp"
33 #include "code/nmethod.hpp"
34 #include "code/scopeDesc.hpp"
35 #include "compiler/abstractCompiler.hpp"
36 #include "compiler/compileBroker.hpp"
37 #include "compiler/compileLog.hpp"
38 #include "compiler/compilerDirectives.hpp"
39 #include "compiler/directivesParser.hpp"
40 #include "compiler/disassembler.hpp"
41 #include "compiler/oopMap.hpp"
42 #include "gc/shared/collectedHeap.hpp"
43 #include "interpreter/bytecode.hpp"
44 #include "logging/log.hpp"
45 #include "logging/logStream.hpp"
46 #include "memory/allocation.inline.hpp"
47 #include "memory/resourceArea.hpp"
48 #include "memory/universe.hpp"
49 #include "oops/access.inline.hpp"
50 #include "oops/klass.inline.hpp"
51 #include "oops/method.inline.hpp"
52 #include "oops/methodData.hpp"
53 #include "oops/oop.inline.hpp"
54 #include "prims/jvmtiImpl.hpp"
55 #include "prims/jvmtiThreadState.hpp"
56 #include "prims/methodHandles.hpp"
57 #include "runtime/atomic.hpp"
58 #include "runtime/deoptimization.hpp"
59 #include "runtime/flags/flagSetting.hpp"
60 #include "runtime/frame.inline.hpp"
61 #include "runtime/handles.inline.hpp"
62 #include "runtime/jniHandles.inline.hpp"
63 #include "runtime/orderAccess.hpp"
64 #include "runtime/os.hpp"
65 #include "runtime/safepointVerifiers.hpp"
66 #include "runtime/serviceThread.hpp"
67 #include "runtime/sharedRuntime.hpp"
68 #include "runtime/signature.hpp"
69 #include "runtime/sweeper.hpp"
70 #include "runtime/vmThread.hpp"
71 #include "utilities/align.hpp"
72 #include "utilities/copy.hpp"
73 #include "utilities/dtrace.hpp"
74 #include "utilities/events.hpp"
75 #include "utilities/globalDefinitions.hpp"
76 #include "utilities/resourceHash.hpp"
77 #include "utilities/xmlstream.hpp"
78 #if INCLUDE_JVMCI
79 #include "jvmci/jvmciRuntime.hpp"
80 #endif
81 
82 #ifdef DTRACE_ENABLED
83 
84 // Only bother with this argument setup if dtrace is available
85 
86 #define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
87   {                                                                       \
88     Method* m = (method);                                                 \
89     if (m != NULL) {                                                      \
90       Symbol* klass_name = m->klass_name();                               \
91       Symbol* name = m->name();                                           \
92       Symbol* signature = m->signature();                                 \
93       HOTSPOT_COMPILED_METHOD_UNLOAD(                                     \
94         (char *) klass_name->bytes(), klass_name->utf8_length(),                   \
95         (char *) name->bytes(), name->utf8_length(),                               \
96         (char *) signature->bytes(), signature->utf8_length());                    \
97     }                                                                     \
98   }
99 
100 #else //  ndef DTRACE_ENABLED
101 
102 #define DTRACE_METHOD_UNLOAD_PROBE(method)
103 
104 #endif
105 
106 //---------------------------------------------------------------------------------
107 // NMethod statistics
108 // They are printed under various flags, including:
109 //   PrintC1Statistics, PrintOptoStatistics, LogVMOutput, and LogCompilation.
110 // (In the latter two cases, they like other stats are printed to the log only.)
111 
112 #ifndef PRODUCT
113 // These variables are put into one block to reduce relocations
114 // and make it simpler to print from the debugger.
115 struct java_nmethod_stats_struct {
116   int nmethod_count;
117   int total_size;
118   int relocation_size;
119   int consts_size;
120   int insts_size;
121   int stub_size;
122   int scopes_data_size;
123   int scopes_pcs_size;
124   int dependencies_size;
125   int handler_table_size;
126   int nul_chk_table_size;
127 #if INCLUDE_JVMCI
128   int speculations_size;
129   int jvmci_data_size;
130 #endif
131   int oops_size;
132   int metadata_size;
133 
note_nmethodjava_nmethod_stats_struct134   void note_nmethod(nmethod* nm) {
135     nmethod_count += 1;
136     total_size          += nm->size();
137     relocation_size     += nm->relocation_size();
138     consts_size         += nm->consts_size();
139     insts_size          += nm->insts_size();
140     stub_size           += nm->stub_size();
141     oops_size           += nm->oops_size();
142     metadata_size       += nm->metadata_size();
143     scopes_data_size    += nm->scopes_data_size();
144     scopes_pcs_size     += nm->scopes_pcs_size();
145     dependencies_size   += nm->dependencies_size();
146     handler_table_size  += nm->handler_table_size();
147     nul_chk_table_size  += nm->nul_chk_table_size();
148 #if INCLUDE_JVMCI
149     speculations_size   += nm->speculations_size();
150     jvmci_data_size     += nm->jvmci_data_size();
151 #endif
152   }
print_nmethod_statsjava_nmethod_stats_struct153   void print_nmethod_stats(const char* name) {
154     if (nmethod_count == 0)  return;
155     tty->print_cr("Statistics for %d bytecoded nmethods for %s:", nmethod_count, name);
156     if (total_size != 0)          tty->print_cr(" total in heap  = %d", total_size);
157     if (nmethod_count != 0)       tty->print_cr(" header         = " SIZE_FORMAT, nmethod_count * sizeof(nmethod));
158     if (relocation_size != 0)     tty->print_cr(" relocation     = %d", relocation_size);
159     if (consts_size != 0)         tty->print_cr(" constants      = %d", consts_size);
160     if (insts_size != 0)          tty->print_cr(" main code      = %d", insts_size);
161     if (stub_size != 0)           tty->print_cr(" stub code      = %d", stub_size);
162     if (oops_size != 0)           tty->print_cr(" oops           = %d", oops_size);
163     if (metadata_size != 0)       tty->print_cr(" metadata       = %d", metadata_size);
164     if (scopes_data_size != 0)    tty->print_cr(" scopes data    = %d", scopes_data_size);
165     if (scopes_pcs_size != 0)     tty->print_cr(" scopes pcs     = %d", scopes_pcs_size);
166     if (dependencies_size != 0)   tty->print_cr(" dependencies   = %d", dependencies_size);
167     if (handler_table_size != 0)  tty->print_cr(" handler table  = %d", handler_table_size);
168     if (nul_chk_table_size != 0)  tty->print_cr(" nul chk table  = %d", nul_chk_table_size);
169 #if INCLUDE_JVMCI
170     if (speculations_size != 0)   tty->print_cr(" speculations   = %d", speculations_size);
171     if (jvmci_data_size != 0)     tty->print_cr(" JVMCI data     = %d", jvmci_data_size);
172 #endif
173   }
174 };
175 
176 struct native_nmethod_stats_struct {
177   int native_nmethod_count;
178   int native_total_size;
179   int native_relocation_size;
180   int native_insts_size;
181   int native_oops_size;
182   int native_metadata_size;
note_native_nmethodnative_nmethod_stats_struct183   void note_native_nmethod(nmethod* nm) {
184     native_nmethod_count += 1;
185     native_total_size       += nm->size();
186     native_relocation_size  += nm->relocation_size();
187     native_insts_size       += nm->insts_size();
188     native_oops_size        += nm->oops_size();
189     native_metadata_size    += nm->metadata_size();
190   }
print_native_nmethod_statsnative_nmethod_stats_struct191   void print_native_nmethod_stats() {
192     if (native_nmethod_count == 0)  return;
193     tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count);
194     if (native_total_size != 0)       tty->print_cr(" N. total size  = %d", native_total_size);
195     if (native_relocation_size != 0)  tty->print_cr(" N. relocation  = %d", native_relocation_size);
196     if (native_insts_size != 0)       tty->print_cr(" N. main code   = %d", native_insts_size);
197     if (native_oops_size != 0)        tty->print_cr(" N. oops        = %d", native_oops_size);
198     if (native_metadata_size != 0)    tty->print_cr(" N. metadata    = %d", native_metadata_size);
199   }
200 };
201 
202 struct pc_nmethod_stats_struct {
203   int pc_desc_resets;   // number of resets (= number of caches)
204   int pc_desc_queries;  // queries to nmethod::find_pc_desc
205   int pc_desc_approx;   // number of those which have approximate true
206   int pc_desc_repeats;  // number of _pc_descs[0] hits
207   int pc_desc_hits;     // number of LRU cache hits
208   int pc_desc_tests;    // total number of PcDesc examinations
209   int pc_desc_searches; // total number of quasi-binary search steps
210   int pc_desc_adds;     // number of LUR cache insertions
211 
print_pc_statspc_nmethod_stats_struct212   void print_pc_stats() {
213     tty->print_cr("PcDesc Statistics:  %d queries, %.2f comparisons per query",
214                   pc_desc_queries,
215                   (double)(pc_desc_tests + pc_desc_searches)
216                   / pc_desc_queries);
217     tty->print_cr("  caches=%d queries=%d/%d, hits=%d+%d, tests=%d+%d, adds=%d",
218                   pc_desc_resets,
219                   pc_desc_queries, pc_desc_approx,
220                   pc_desc_repeats, pc_desc_hits,
221                   pc_desc_tests, pc_desc_searches, pc_desc_adds);
222   }
223 };
224 
225 #ifdef COMPILER1
226 static java_nmethod_stats_struct c1_java_nmethod_stats;
227 #endif
228 #ifdef COMPILER2
229 static java_nmethod_stats_struct c2_java_nmethod_stats;
230 #endif
231 #if INCLUDE_JVMCI
232 static java_nmethod_stats_struct jvmci_java_nmethod_stats;
233 #endif
234 static java_nmethod_stats_struct unknown_java_nmethod_stats;
235 
236 static native_nmethod_stats_struct native_nmethod_stats;
237 static pc_nmethod_stats_struct pc_nmethod_stats;
238 
note_java_nmethod(nmethod * nm)239 static void note_java_nmethod(nmethod* nm) {
240 #ifdef COMPILER1
241   if (nm->is_compiled_by_c1()) {
242     c1_java_nmethod_stats.note_nmethod(nm);
243   } else
244 #endif
245 #ifdef COMPILER2
246   if (nm->is_compiled_by_c2()) {
247     c2_java_nmethod_stats.note_nmethod(nm);
248   } else
249 #endif
250 #if INCLUDE_JVMCI
251   if (nm->is_compiled_by_jvmci()) {
252     jvmci_java_nmethod_stats.note_nmethod(nm);
253   } else
254 #endif
255   {
256     unknown_java_nmethod_stats.note_nmethod(nm);
257   }
258 }
259 #endif // !PRODUCT
260 
261 //---------------------------------------------------------------------------------
262 
263 
ExceptionCache(Handle exception,address pc,address handler)264 ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
265   assert(pc != NULL, "Must be non null");
266   assert(exception.not_null(), "Must be non null");
267   assert(handler != NULL, "Must be non null");
268 
269   _count = 0;
270   _exception_type = exception->klass();
271   _next = NULL;
272   _purge_list_next = NULL;
273 
274   add_address_and_handler(pc,handler);
275 }
276 
277 
match(Handle exception,address pc)278 address ExceptionCache::match(Handle exception, address pc) {
279   assert(pc != NULL,"Must be non null");
280   assert(exception.not_null(),"Must be non null");
281   if (exception->klass() == exception_type()) {
282     return (test_address(pc));
283   }
284 
285   return NULL;
286 }
287 
288 
match_exception_with_space(Handle exception)289 bool ExceptionCache::match_exception_with_space(Handle exception) {
290   assert(exception.not_null(),"Must be non null");
291   if (exception->klass() == exception_type() && count() < cache_size) {
292     return true;
293   }
294   return false;
295 }
296 
297 
test_address(address addr)298 address ExceptionCache::test_address(address addr) {
299   int limit = count();
300   for (int i = 0; i < limit; i++) {
301     if (pc_at(i) == addr) {
302       return handler_at(i);
303     }
304   }
305   return NULL;
306 }
307 
308 
add_address_and_handler(address addr,address handler)309 bool ExceptionCache::add_address_and_handler(address addr, address handler) {
310   if (test_address(addr) == handler) return true;
311 
312   int index = count();
313   if (index < cache_size) {
314     set_pc_at(index, addr);
315     set_handler_at(index, handler);
316     increment_count();
317     return true;
318   }
319   return false;
320 }
321 
next()322 ExceptionCache* ExceptionCache::next() {
323   return Atomic::load(&_next);
324 }
325 
set_next(ExceptionCache * ec)326 void ExceptionCache::set_next(ExceptionCache *ec) {
327   Atomic::store(&_next, ec);
328 }
329 
330 //-----------------------------------------------------------------------------
331 
332 
333 // Helper used by both find_pc_desc methods.
match_desc(PcDesc * pc,int pc_offset,bool approximate)334 static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
335   NOT_PRODUCT(++pc_nmethod_stats.pc_desc_tests);
336   if (!approximate)
337     return pc->pc_offset() == pc_offset;
338   else
339     return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();
340 }
341 
reset_to(PcDesc * initial_pc_desc)342 void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
343   if (initial_pc_desc == NULL) {
344     _pc_descs[0] = NULL; // native method; no PcDescs at all
345     return;
346   }
347   NOT_PRODUCT(++pc_nmethod_stats.pc_desc_resets);
348   // reset the cache by filling it with benign (non-null) values
349   assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
350   for (int i = 0; i < cache_size; i++)
351     _pc_descs[i] = initial_pc_desc;
352 }
353 
find_pc_desc(int pc_offset,bool approximate)354 PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
355   NOT_PRODUCT(++pc_nmethod_stats.pc_desc_queries);
356   NOT_PRODUCT(if (approximate) ++pc_nmethod_stats.pc_desc_approx);
357 
358   // Note: one might think that caching the most recently
359   // read value separately would be a win, but one would be
360   // wrong.  When many threads are updating it, the cache
361   // line it's in would bounce between caches, negating
362   // any benefit.
363 
364   // In order to prevent race conditions do not load cache elements
365   // repeatedly, but use a local copy:
366   PcDesc* res;
367 
368   // Step one:  Check the most recently added value.
369   res = _pc_descs[0];
370   if (res == NULL) return NULL;  // native method; no PcDescs at all
371   if (match_desc(res, pc_offset, approximate)) {
372     NOT_PRODUCT(++pc_nmethod_stats.pc_desc_repeats);
373     return res;
374   }
375 
376   // Step two:  Check the rest of the LRU cache.
377   for (int i = 1; i < cache_size; ++i) {
378     res = _pc_descs[i];
379     if (res->pc_offset() < 0) break;  // optimization: skip empty cache
380     if (match_desc(res, pc_offset, approximate)) {
381       NOT_PRODUCT(++pc_nmethod_stats.pc_desc_hits);
382       return res;
383     }
384   }
385 
386   // Report failure.
387   return NULL;
388 }
389 
add_pc_desc(PcDesc * pc_desc)390 void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
391   NOT_PRODUCT(++pc_nmethod_stats.pc_desc_adds);
392   // Update the LRU cache by shifting pc_desc forward.
393   for (int i = 0; i < cache_size; i++)  {
394     PcDesc* next = _pc_descs[i];
395     _pc_descs[i] = pc_desc;
396     pc_desc = next;
397   }
398 }
399 
400 // adjust pcs_size so that it is a multiple of both oopSize and
401 // sizeof(PcDesc) (assumes that if sizeof(PcDesc) is not a multiple
402 // of oopSize, then 2*sizeof(PcDesc) is)
adjust_pcs_size(int pcs_size)403 static int adjust_pcs_size(int pcs_size) {
404   int nsize = align_up(pcs_size,   oopSize);
405   if ((nsize % sizeof(PcDesc)) != 0) {
406     nsize = pcs_size + sizeof(PcDesc);
407   }
408   assert((nsize % oopSize) == 0, "correct alignment");
409   return nsize;
410 }
411 
412 
total_size() const413 int nmethod::total_size() const {
414   return
415     consts_size()        +
416     insts_size()         +
417     stub_size()          +
418     scopes_data_size()   +
419     scopes_pcs_size()    +
420     handler_table_size() +
421     nul_chk_table_size();
422 }
423 
orig_pc_addr(const frame * fr)424 address* nmethod::orig_pc_addr(const frame* fr) {
425   return (address*) ((address)fr->unextended_sp() + _orig_pc_offset);
426 }
427 
compile_kind() const428 const char* nmethod::compile_kind() const {
429   if (is_osr_method())     return "osr";
430   if (method() != NULL && is_native_method())  return "c2n";
431   return NULL;
432 }
433 
434 // Fill in default values for various flag fields
init_defaults()435 void nmethod::init_defaults() {
436   _state                      = not_installed;
437   _has_flushed_dependencies   = 0;
438   _lock_count                 = 0;
439   _stack_traversal_mark       = 0;
440   _load_reported              = false; // jvmti state
441   _unload_reported            = false;
442 
443 #ifdef ASSERT
444   _oops_are_stale             = false;
445 #endif
446 
447   _oops_do_mark_link       = NULL;
448   _osr_link                = NULL;
449 #if INCLUDE_RTM_OPT
450   _rtm_state               = NoRTM;
451 #endif
452 }
453 
new_native_nmethod(const methodHandle & method,int compile_id,CodeBuffer * code_buffer,int vep_offset,int frame_complete,int frame_size,ByteSize basic_lock_owner_sp_offset,ByteSize basic_lock_sp_offset,OopMapSet * oop_maps)454 nmethod* nmethod::new_native_nmethod(const methodHandle& method,
455   int compile_id,
456   CodeBuffer *code_buffer,
457   int vep_offset,
458   int frame_complete,
459   int frame_size,
460   ByteSize basic_lock_owner_sp_offset,
461   ByteSize basic_lock_sp_offset,
462   OopMapSet* oop_maps) {
463   code_buffer->finalize_oop_references(method);
464   // create nmethod
465   nmethod* nm = NULL;
466   {
467     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
468     int native_nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
469 
470     CodeOffsets offsets;
471     offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
472     offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
473     nm = new (native_nmethod_size, CompLevel_none)
474     nmethod(method(), compiler_none, native_nmethod_size,
475             compile_id, &offsets,
476             code_buffer, frame_size,
477             basic_lock_owner_sp_offset,
478             basic_lock_sp_offset,
479             oop_maps);
480     NOT_PRODUCT(if (nm != NULL)  native_nmethod_stats.note_native_nmethod(nm));
481   }
482 
483   if (nm != NULL) {
484     // verify nmethod
485     debug_only(nm->verify();) // might block
486 
487     nm->log_new_nmethod();
488   }
489   return nm;
490 }
491 
new_nmethod(const methodHandle & method,int compile_id,int entry_bci,CodeOffsets * offsets,int orig_pc_offset,DebugInformationRecorder * debug_info,Dependencies * dependencies,CodeBuffer * code_buffer,int frame_size,OopMapSet * oop_maps,ExceptionHandlerTable * handler_table,ImplicitExceptionTable * nul_chk_table,AbstractCompiler * compiler,int comp_level,const GrowableArrayView<RuntimeStub * > & native_invokers,char * speculations,int speculations_len,int nmethod_mirror_index,const char * nmethod_mirror_name,FailedSpeculation ** failed_speculations)492 nmethod* nmethod::new_nmethod(const methodHandle& method,
493   int compile_id,
494   int entry_bci,
495   CodeOffsets* offsets,
496   int orig_pc_offset,
497   DebugInformationRecorder* debug_info,
498   Dependencies* dependencies,
499   CodeBuffer* code_buffer, int frame_size,
500   OopMapSet* oop_maps,
501   ExceptionHandlerTable* handler_table,
502   ImplicitExceptionTable* nul_chk_table,
503   AbstractCompiler* compiler,
504   int comp_level,
505   const GrowableArrayView<RuntimeStub*>& native_invokers
506 #if INCLUDE_JVMCI
507   , char* speculations,
508   int speculations_len,
509   int nmethod_mirror_index,
510   const char* nmethod_mirror_name,
511   FailedSpeculation** failed_speculations
512 #endif
513 )
514 {
515   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
516   code_buffer->finalize_oop_references(method);
517   // create nmethod
518   nmethod* nm = NULL;
519   { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
520 #if INCLUDE_JVMCI
521     int jvmci_data_size = !compiler->is_jvmci() ? 0 : JVMCINMethodData::compute_size(nmethod_mirror_name);
522 #endif
523     int nmethod_size =
524       CodeBlob::allocation_size(code_buffer, sizeof(nmethod))
525       + adjust_pcs_size(debug_info->pcs_size())
526       + align_up((int)dependencies->size_in_bytes(), oopSize)
527       + align_up(checked_cast<int>(native_invokers.data_size_in_bytes()), oopSize)
528       + align_up(handler_table->size_in_bytes()    , oopSize)
529       + align_up(nul_chk_table->size_in_bytes()    , oopSize)
530 #if INCLUDE_JVMCI
531       + align_up(speculations_len                  , oopSize)
532       + align_up(jvmci_data_size                   , oopSize)
533 #endif
534       + align_up(debug_info->data_size()           , oopSize);
535 
536     nm = new (nmethod_size, comp_level)
537     nmethod(method(), compiler->type(), nmethod_size, compile_id, entry_bci, offsets,
538             orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
539             oop_maps,
540             handler_table,
541             nul_chk_table,
542             compiler,
543             comp_level,
544             native_invokers
545 #if INCLUDE_JVMCI
546             , speculations,
547             speculations_len,
548             jvmci_data_size
549 #endif
550             );
551 
552     if (nm != NULL) {
553 #if INCLUDE_JVMCI
554       if (compiler->is_jvmci()) {
555         // Initialize the JVMCINMethodData object inlined into nm
556         nm->jvmci_nmethod_data()->initialize(nmethod_mirror_index, nmethod_mirror_name, failed_speculations);
557       }
558 #endif
559       // To make dependency checking during class loading fast, record
560       // the nmethod dependencies in the classes it is dependent on.
561       // This allows the dependency checking code to simply walk the
562       // class hierarchy above the loaded class, checking only nmethods
563       // which are dependent on those classes.  The slow way is to
564       // check every nmethod for dependencies which makes it linear in
565       // the number of methods compiled.  For applications with a lot
566       // classes the slow way is too slow.
567       for (Dependencies::DepStream deps(nm); deps.next(); ) {
568         if (deps.type() == Dependencies::call_site_target_value) {
569           // CallSite dependencies are managed on per-CallSite instance basis.
570           oop call_site = deps.argument_oop(0);
571           MethodHandles::add_dependent_nmethod(call_site, nm);
572         } else {
573           Klass* klass = deps.context_type();
574           if (klass == NULL) {
575             continue;  // ignore things like evol_method
576           }
577           // record this nmethod as dependent on this klass
578           InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
579         }
580       }
581       NOT_PRODUCT(if (nm != NULL)  note_java_nmethod(nm));
582     }
583   }
584   // Do verification and logging outside CodeCache_lock.
585   if (nm != NULL) {
586     // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet.
587     DEBUG_ONLY(nm->verify();)
588     nm->log_new_nmethod();
589   }
590   return nm;
591 }
592 
593 // For native wrappers
nmethod(Method * method,CompilerType type,int nmethod_size,int compile_id,CodeOffsets * offsets,CodeBuffer * code_buffer,int frame_size,ByteSize basic_lock_owner_sp_offset,ByteSize basic_lock_sp_offset,OopMapSet * oop_maps)594 nmethod::nmethod(
595   Method* method,
596   CompilerType type,
597   int nmethod_size,
598   int compile_id,
599   CodeOffsets* offsets,
600   CodeBuffer* code_buffer,
601   int frame_size,
602   ByteSize basic_lock_owner_sp_offset,
603   ByteSize basic_lock_sp_offset,
604   OopMapSet* oop_maps )
605   : CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
606   _is_unloading_state(0),
607   _native_receiver_sp_offset(basic_lock_owner_sp_offset),
608   _native_basic_lock_sp_offset(basic_lock_sp_offset)
609 {
610   {
611     int scopes_data_offset   = 0;
612     int deoptimize_offset    = 0;
613     int deoptimize_mh_offset = 0;
614 
615     debug_only(NoSafepointVerifier nsv;)
616     assert_locked_or_safepoint(CodeCache_lock);
617 
618     init_defaults();
619     _entry_bci               = InvocationEntryBci;
620     // We have no exception handler or deopt handler make the
621     // values something that will never match a pc like the nmethod vtable entry
622     _exception_offset        = 0;
623     _orig_pc_offset          = 0;
624 
625     _consts_offset           = data_offset();
626     _stub_offset             = data_offset();
627     _oops_offset             = data_offset();
628     _metadata_offset         = _oops_offset         + align_up(code_buffer->total_oop_size(), oopSize);
629     scopes_data_offset       = _metadata_offset     + align_up(code_buffer->total_metadata_size(), wordSize);
630     _scopes_pcs_offset       = scopes_data_offset;
631     _dependencies_offset     = _scopes_pcs_offset;
632     _native_invokers_offset     = _dependencies_offset;
633     _handler_table_offset    = _native_invokers_offset;
634     _nul_chk_table_offset    = _handler_table_offset;
635 #if INCLUDE_JVMCI
636     _speculations_offset     = _nul_chk_table_offset;
637     _jvmci_data_offset       = _speculations_offset;
638     _nmethod_end_offset      = _jvmci_data_offset;
639 #else
640     _nmethod_end_offset      = _nul_chk_table_offset;
641 #endif
642     _compile_id              = compile_id;
643     _comp_level              = CompLevel_none;
644     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
645     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
646     _osr_entry_point         = NULL;
647     _exception_cache         = NULL;
648     _pc_desc_container.reset_to(NULL);
649     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
650 
651     _scopes_data_begin = (address) this + scopes_data_offset;
652     _deopt_handler_begin = (address) this + deoptimize_offset;
653     _deopt_mh_handler_begin = (address) this + deoptimize_mh_offset;
654 
655     code_buffer->copy_code_and_locs_to(this);
656     code_buffer->copy_values_to(this);
657 
658     clear_unloading_state();
659 
660     Universe::heap()->register_nmethod(this);
661     debug_only(Universe::heap()->verify_nmethod(this));
662 
663     CodeCache::commit(this);
664   }
665 
666   if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
667     ttyLocker ttyl;  // keep the following output all in one block
668     // This output goes directly to the tty, not the compiler log.
669     // To enable tools to match it up with the compilation activity,
670     // be sure to tag this tty output with the compile ID.
671     if (xtty != NULL) {
672       xtty->begin_head("print_native_nmethod");
673       xtty->method(_method);
674       xtty->stamp();
675       xtty->end_head(" address='" INTPTR_FORMAT "'", (intptr_t) this);
676     }
677     // Print the header part, then print the requested information.
678     // This is both handled in decode2(), called via print_code() -> decode()
679     if (PrintNativeNMethods) {
680       tty->print_cr("-------------------------- Assembly (native nmethod) ---------------------------");
681       print_code();
682       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
683 #if defined(SUPPORT_DATA_STRUCTS)
684       if (AbstractDisassembler::show_structs()) {
685         if (oop_maps != NULL) {
686           tty->print("oop maps:"); // oop_maps->print_on(tty) outputs a cr() at the beginning
687           oop_maps->print_on(tty);
688           tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
689         }
690       }
691 #endif
692     } else {
693       print(); // print the header part only.
694     }
695 #if defined(SUPPORT_DATA_STRUCTS)
696     if (AbstractDisassembler::show_structs()) {
697       if (PrintRelocations) {
698         print_relocations();
699         tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
700       }
701     }
702 #endif
703     if (xtty != NULL) {
704       xtty->tail("print_native_nmethod");
705     }
706   }
707 }
708 
operator new(size_t size,int nmethod_size,int comp_level)709 void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
710   return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
711 }
712 
nmethod(Method * method,CompilerType type,int nmethod_size,int compile_id,int entry_bci,CodeOffsets * offsets,int orig_pc_offset,DebugInformationRecorder * debug_info,Dependencies * dependencies,CodeBuffer * code_buffer,int frame_size,OopMapSet * oop_maps,ExceptionHandlerTable * handler_table,ImplicitExceptionTable * nul_chk_table,AbstractCompiler * compiler,int comp_level,const GrowableArrayView<RuntimeStub * > & native_invokers,char * speculations,int speculations_len,int jvmci_data_size)713 nmethod::nmethod(
714   Method* method,
715   CompilerType type,
716   int nmethod_size,
717   int compile_id,
718   int entry_bci,
719   CodeOffsets* offsets,
720   int orig_pc_offset,
721   DebugInformationRecorder* debug_info,
722   Dependencies* dependencies,
723   CodeBuffer *code_buffer,
724   int frame_size,
725   OopMapSet* oop_maps,
726   ExceptionHandlerTable* handler_table,
727   ImplicitExceptionTable* nul_chk_table,
728   AbstractCompiler* compiler,
729   int comp_level,
730   const GrowableArrayView<RuntimeStub*>& native_invokers
731 #if INCLUDE_JVMCI
732   , char* speculations,
733   int speculations_len,
734   int jvmci_data_size
735 #endif
736   )
737   : CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
738   _is_unloading_state(0),
739   _native_receiver_sp_offset(in_ByteSize(-1)),
740   _native_basic_lock_sp_offset(in_ByteSize(-1))
741 {
742   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
743   {
744     debug_only(NoSafepointVerifier nsv;)
745     assert_locked_or_safepoint(CodeCache_lock);
746 
747     _deopt_handler_begin = (address) this;
748     _deopt_mh_handler_begin = (address) this;
749 
750     init_defaults();
751     _entry_bci               = entry_bci;
752     _compile_id              = compile_id;
753     _comp_level              = comp_level;
754     _orig_pc_offset          = orig_pc_offset;
755     _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
756 
757     // Section offsets
758     _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
759     _stub_offset             = content_offset()      + code_buffer->total_offset_of(code_buffer->stubs());
760     set_ctable_begin(header_begin() + _consts_offset);
761 
762 #if INCLUDE_JVMCI
763     if (compiler->is_jvmci()) {
764       // JVMCI might not produce any stub sections
765       if (offsets->value(CodeOffsets::Exceptions) != -1) {
766         _exception_offset        = code_offset()          + offsets->value(CodeOffsets::Exceptions);
767       } else {
768         _exception_offset = -1;
769       }
770       if (offsets->value(CodeOffsets::Deopt) != -1) {
771         _deopt_handler_begin       = (address) this + code_offset()          + offsets->value(CodeOffsets::Deopt);
772       } else {
773         _deopt_handler_begin = NULL;
774       }
775       if (offsets->value(CodeOffsets::DeoptMH) != -1) {
776         _deopt_mh_handler_begin  = (address) this + code_offset()          + offsets->value(CodeOffsets::DeoptMH);
777       } else {
778         _deopt_mh_handler_begin = NULL;
779       }
780     } else
781 #endif
782     {
783       // Exception handler and deopt handler are in the stub section
784       assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
785       assert(offsets->value(CodeOffsets::Deopt     ) != -1, "must be set");
786 
787       _exception_offset       = _stub_offset          + offsets->value(CodeOffsets::Exceptions);
788       _deopt_handler_begin    = (address) this + _stub_offset          + offsets->value(CodeOffsets::Deopt);
789       if (offsets->value(CodeOffsets::DeoptMH) != -1) {
790         _deopt_mh_handler_begin  = (address) this + _stub_offset          + offsets->value(CodeOffsets::DeoptMH);
791       } else {
792         _deopt_mh_handler_begin  = NULL;
793       }
794     }
795     if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
796       _unwind_handler_offset = code_offset()         + offsets->value(CodeOffsets::UnwindHandler);
797     } else {
798       _unwind_handler_offset = -1;
799     }
800 
801     _oops_offset             = data_offset();
802     _metadata_offset         = _oops_offset          + align_up(code_buffer->total_oop_size(), oopSize);
803     int scopes_data_offset   = _metadata_offset      + align_up(code_buffer->total_metadata_size(), wordSize);
804 
805     _scopes_pcs_offset       = scopes_data_offset    + align_up(debug_info->data_size       (), oopSize);
806     _dependencies_offset     = _scopes_pcs_offset    + adjust_pcs_size(debug_info->pcs_size());
807     _native_invokers_offset  = _dependencies_offset  + align_up((int)dependencies->size_in_bytes(), oopSize);
808     _handler_table_offset    = _native_invokers_offset + align_up(checked_cast<int>(native_invokers.data_size_in_bytes()), oopSize);
809     _nul_chk_table_offset    = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize);
810 #if INCLUDE_JVMCI
811     _speculations_offset     = _nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize);
812     _jvmci_data_offset       = _speculations_offset  + align_up(speculations_len, oopSize);
813     _nmethod_end_offset      = _jvmci_data_offset    + align_up(jvmci_data_size, oopSize);
814 #else
815     _nmethod_end_offset      = _nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize);
816 #endif
817     _entry_point             = code_begin()          + offsets->value(CodeOffsets::Entry);
818     _verified_entry_point    = code_begin()          + offsets->value(CodeOffsets::Verified_Entry);
819     _osr_entry_point         = code_begin()          + offsets->value(CodeOffsets::OSR_Entry);
820     _exception_cache         = NULL;
821     _scopes_data_begin       = (address) this + scopes_data_offset;
822 
823     _pc_desc_container.reset_to(scopes_pcs_begin());
824 
825     code_buffer->copy_code_and_locs_to(this);
826     // Copy contents of ScopeDescRecorder to nmethod
827     code_buffer->copy_values_to(this);
828     debug_info->copy_to(this);
829     dependencies->copy_to(this);
830     if (native_invokers.is_nonempty()) { // can not get address of zero-length array
831       // Copy native stubs
832       memcpy(native_invokers_begin(), native_invokers.adr_at(0), native_invokers.data_size_in_bytes());
833     }
834     clear_unloading_state();
835 
836     Universe::heap()->register_nmethod(this);
837     debug_only(Universe::heap()->verify_nmethod(this));
838 
839     CodeCache::commit(this);
840 
841     // Copy contents of ExceptionHandlerTable to nmethod
842     handler_table->copy_to(this);
843     nul_chk_table->copy_to(this);
844 
845 #if INCLUDE_JVMCI
846     // Copy speculations to nmethod
847     if (speculations_size() != 0) {
848       memcpy(speculations_begin(), speculations, speculations_len);
849     }
850 #endif
851 
852     // we use the information of entry points to find out if a method is
853     // static or non static
854     assert(compiler->is_c2() || compiler->is_jvmci() ||
855            _method->is_static() == (entry_point() == _verified_entry_point),
856            " entry points must be same for static methods and vice versa");
857   }
858 }
859 
860 // Print a short set of xml attributes to identify this nmethod.  The
861 // output should be embedded in some other element.
log_identity(xmlStream * log) const862 void nmethod::log_identity(xmlStream* log) const {
863   log->print(" compile_id='%d'", compile_id());
864   const char* nm_kind = compile_kind();
865   if (nm_kind != NULL)  log->print(" compile_kind='%s'", nm_kind);
866   log->print(" compiler='%s'", compiler_name());
867   if (TieredCompilation) {
868     log->print(" level='%d'", comp_level());
869   }
870 #if INCLUDE_JVMCI
871   if (jvmci_nmethod_data() != NULL) {
872     const char* jvmci_name = jvmci_nmethod_data()->name();
873     if (jvmci_name != NULL) {
874       log->print(" jvmci_mirror_name='");
875       log->text("%s", jvmci_name);
876       log->print("'");
877     }
878   }
879 #endif
880 }
881 
882 
883 #define LOG_OFFSET(log, name)                    \
884   if (p2i(name##_end()) - p2i(name##_begin())) \
885     log->print(" " XSTR(name) "_offset='" INTX_FORMAT "'"    , \
886                p2i(name##_begin()) - p2i(this))
887 
888 
log_new_nmethod() const889 void nmethod::log_new_nmethod() const {
890   if (LogCompilation && xtty != NULL) {
891     ttyLocker ttyl;
892     xtty->begin_elem("nmethod");
893     log_identity(xtty);
894     xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", p2i(code_begin()), size());
895     xtty->print(" address='" INTPTR_FORMAT "'", p2i(this));
896 
897     LOG_OFFSET(xtty, relocation);
898     LOG_OFFSET(xtty, consts);
899     LOG_OFFSET(xtty, insts);
900     LOG_OFFSET(xtty, stub);
901     LOG_OFFSET(xtty, scopes_data);
902     LOG_OFFSET(xtty, scopes_pcs);
903     LOG_OFFSET(xtty, dependencies);
904     LOG_OFFSET(xtty, handler_table);
905     LOG_OFFSET(xtty, nul_chk_table);
906     LOG_OFFSET(xtty, oops);
907     LOG_OFFSET(xtty, metadata);
908 
909     xtty->method(method());
910     xtty->stamp();
911     xtty->end_elem();
912   }
913 }
914 
915 #undef LOG_OFFSET
916 
917 
918 // Print out more verbose output usually for a newly created nmethod.
print_on(outputStream * st,const char * msg) const919 void nmethod::print_on(outputStream* st, const char* msg) const {
920   if (st != NULL) {
921     ttyLocker ttyl;
922     if (WizardMode) {
923       CompileTask::print(st, this, msg, /*short_form:*/ true);
924       st->print_cr(" (" INTPTR_FORMAT ")", p2i(this));
925     } else {
926       CompileTask::print(st, this, msg, /*short_form:*/ false);
927     }
928   }
929 }
930 
maybe_print_nmethod(DirectiveSet * directive)931 void nmethod::maybe_print_nmethod(DirectiveSet* directive) {
932   bool printnmethods = directive->PrintAssemblyOption || directive->PrintNMethodsOption;
933   if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
934     print_nmethod(printnmethods);
935   }
936 }
937 
print_nmethod(bool printmethod)938 void nmethod::print_nmethod(bool printmethod) {
939   run_nmethod_entry_barrier(); // ensure all embedded OOPs are valid before printing
940 
941   ttyLocker ttyl;  // keep the following output all in one block
942   if (xtty != NULL) {
943     xtty->begin_head("print_nmethod");
944     log_identity(xtty);
945     xtty->stamp();
946     xtty->end_head();
947   }
948   // Print the header part, then print the requested information.
949   // This is both handled in decode2().
950   if (printmethod) {
951     ResourceMark m;
952     if (is_compiled_by_c1()) {
953       tty->cr();
954       tty->print_cr("============================= C1-compiled nmethod ==============================");
955     }
956     if (is_compiled_by_jvmci()) {
957       tty->cr();
958       tty->print_cr("=========================== JVMCI-compiled nmethod =============================");
959     }
960     tty->print_cr("----------------------------------- Assembly -----------------------------------");
961     decode2(tty);
962 #if defined(SUPPORT_DATA_STRUCTS)
963     if (AbstractDisassembler::show_structs()) {
964       // Print the oops from the underlying CodeBlob as well.
965       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
966       print_oops(tty);
967       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
968       print_metadata(tty);
969       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
970       print_pcs();
971       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
972       if (oop_maps() != NULL) {
973         tty->print("oop maps:"); // oop_maps()->print_on(tty) outputs a cr() at the beginning
974         oop_maps()->print_on(tty);
975         tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
976       }
977     }
978 #endif
979   } else {
980     print(); // print the header part only.
981   }
982 
983 #if defined(SUPPORT_DATA_STRUCTS)
984   if (AbstractDisassembler::show_structs()) {
985     methodHandle mh(Thread::current(), _method);
986     if (printmethod || PrintDebugInfo || CompilerOracle::has_option(mh, CompileCommand::PrintDebugInfo)) {
987       print_scopes();
988       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
989     }
990     if (printmethod || PrintRelocations || CompilerOracle::has_option(mh, CompileCommand::PrintRelocations)) {
991       print_relocations();
992       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
993     }
994     if (printmethod || PrintDependencies || CompilerOracle::has_option(mh, CompileCommand::PrintDependencies)) {
995       print_dependencies();
996       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
997     }
998     if (printmethod && native_invokers_begin() < native_invokers_end()) {
999       print_native_invokers();
1000       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1001     }
1002     if (printmethod || PrintExceptionHandlers) {
1003       print_handler_table();
1004       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1005       print_nul_chk_table();
1006       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1007     }
1008 
1009     if (printmethod) {
1010       print_recorded_oops();
1011       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1012       print_recorded_metadata();
1013       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
1014     }
1015   }
1016 #endif
1017 
1018   if (xtty != NULL) {
1019     xtty->tail("print_nmethod");
1020   }
1021 }
1022 
1023 
1024 // Promote one word from an assembly-time handle to a live embedded oop.
initialize_immediate_oop(oop * dest,jobject handle)1025 inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
1026   if (handle == NULL ||
1027       // As a special case, IC oops are initialized to 1 or -1.
1028       handle == (jobject) Universe::non_oop_word()) {
1029     *(void**)dest = handle;
1030   } else {
1031     *dest = JNIHandles::resolve_non_null(handle);
1032   }
1033 }
1034 
1035 
1036 // Have to have the same name because it's called by a template
copy_values(GrowableArray<jobject> * array)1037 void nmethod::copy_values(GrowableArray<jobject>* array) {
1038   int length = array->length();
1039   assert((address)(oops_begin() + length) <= (address)oops_end(), "oops big enough");
1040   oop* dest = oops_begin();
1041   for (int index = 0 ; index < length; index++) {
1042     initialize_immediate_oop(&dest[index], array->at(index));
1043   }
1044 
1045   // Now we can fix up all the oops in the code.  We need to do this
1046   // in the code because the assembler uses jobjects as placeholders.
1047   // The code and relocations have already been initialized by the
1048   // CodeBlob constructor, so it is valid even at this early point to
1049   // iterate over relocations and patch the code.
1050   fix_oop_relocations(NULL, NULL, /*initialize_immediates=*/ true);
1051 }
1052 
copy_values(GrowableArray<Metadata * > * array)1053 void nmethod::copy_values(GrowableArray<Metadata*>* array) {
1054   int length = array->length();
1055   assert((address)(metadata_begin() + length) <= (address)metadata_end(), "big enough");
1056   Metadata** dest = metadata_begin();
1057   for (int index = 0 ; index < length; index++) {
1058     dest[index] = array->at(index);
1059   }
1060 }
1061 
free_native_invokers()1062 void nmethod::free_native_invokers() {
1063   for (RuntimeStub** it = native_invokers_begin(); it < native_invokers_end(); it++) {
1064     CodeCache::free(*it);
1065   }
1066 }
1067 
fix_oop_relocations(address begin,address end,bool initialize_immediates)1068 void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
1069   // re-patch all oop-bearing instructions, just in case some oops moved
1070   RelocIterator iter(this, begin, end);
1071   while (iter.next()) {
1072     if (iter.type() == relocInfo::oop_type) {
1073       oop_Relocation* reloc = iter.oop_reloc();
1074       if (initialize_immediates && reloc->oop_is_immediate()) {
1075         oop* dest = reloc->oop_addr();
1076         initialize_immediate_oop(dest, cast_from_oop<jobject>(*dest));
1077       }
1078       // Refresh the oop-related bits of this instruction.
1079       reloc->fix_oop_relocation();
1080     } else if (iter.type() == relocInfo::metadata_type) {
1081       metadata_Relocation* reloc = iter.metadata_reloc();
1082       reloc->fix_metadata_relocation();
1083     }
1084   }
1085 }
1086 
1087 
verify_clean_inline_caches()1088 void nmethod::verify_clean_inline_caches() {
1089   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
1090 
1091   ResourceMark rm;
1092   RelocIterator iter(this, oops_reloc_begin());
1093   while(iter.next()) {
1094     switch(iter.type()) {
1095       case relocInfo::virtual_call_type:
1096       case relocInfo::opt_virtual_call_type: {
1097         CompiledIC *ic = CompiledIC_at(&iter);
1098         // Ok, to lookup references to zombies here
1099         CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
1100         assert(cb != NULL, "destination not in CodeBlob?");
1101         nmethod* nm = cb->as_nmethod_or_null();
1102         if( nm != NULL ) {
1103           // Verify that inline caches pointing to both zombie and not_entrant methods are clean
1104           if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1105             assert(ic->is_clean(), "IC should be clean");
1106           }
1107         }
1108         break;
1109       }
1110       case relocInfo::static_call_type: {
1111         CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
1112         CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
1113         assert(cb != NULL, "destination not in CodeBlob?");
1114         nmethod* nm = cb->as_nmethod_or_null();
1115         if( nm != NULL ) {
1116           // Verify that inline caches pointing to both zombie and not_entrant methods are clean
1117           if (!nm->is_in_use() || (nm->method()->code() != nm)) {
1118             assert(csc->is_clean(), "IC should be clean");
1119           }
1120         }
1121         break;
1122       }
1123       default:
1124         break;
1125     }
1126   }
1127 }
1128 
1129 // This is a private interface with the sweeper.
mark_as_seen_on_stack()1130 void nmethod::mark_as_seen_on_stack() {
1131   assert(is_alive(), "Must be an alive method");
1132   // Set the traversal mark to ensure that the sweeper does 2
1133   // cleaning passes before moving to zombie.
1134   set_stack_traversal_mark(NMethodSweeper::traversal_count());
1135 }
1136 
1137 // Tell if a non-entrant method can be converted to a zombie (i.e.,
1138 // there are no activations on the stack, not in use by the VM,
1139 // and not in use by the ServiceThread)
can_convert_to_zombie()1140 bool nmethod::can_convert_to_zombie() {
1141   // Note that this is called when the sweeper has observed the nmethod to be
1142   // not_entrant. However, with concurrent code cache unloading, the state
1143   // might have moved on to unloaded if it is_unloading(), due to racing
1144   // concurrent GC threads.
1145   assert(is_not_entrant() || is_unloading() ||
1146          !Thread::current()->is_Code_cache_sweeper_thread(),
1147          "must be a non-entrant method if called from sweeper");
1148 
1149   // Since the nmethod sweeper only does partial sweep the sweeper's traversal
1150   // count can be greater than the stack traversal count before it hits the
1151   // nmethod for the second time.
1152   // If an is_unloading() nmethod is still not_entrant, then it is not safe to
1153   // convert it to zombie due to GC unloading interactions. However, if it
1154   // has become unloaded, then it is okay to convert such nmethods to zombie.
1155   return stack_traversal_mark() + 1 < NMethodSweeper::traversal_count() &&
1156          !is_locked_by_vm() && (!is_unloading() || is_unloaded());
1157 }
1158 
inc_decompile_count()1159 void nmethod::inc_decompile_count() {
1160   if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return;
1161   // Could be gated by ProfileTraps, but do not bother...
1162   Method* m = method();
1163   if (m == NULL)  return;
1164   MethodData* mdo = m->method_data();
1165   if (mdo == NULL)  return;
1166   // There is a benign race here.  See comments in methodData.hpp.
1167   mdo->inc_decompile_count();
1168 }
1169 
try_transition(int new_state_int)1170 bool nmethod::try_transition(int new_state_int) {
1171   signed char new_state = new_state_int;
1172 #ifdef ASSERT
1173   if (new_state != unloaded) {
1174     assert_lock_strong(CompiledMethod_lock);
1175   }
1176 #endif
1177   for (;;) {
1178     signed char old_state = Atomic::load(&_state);
1179     if (old_state >= new_state) {
1180       // Ensure monotonicity of transitions.
1181       return false;
1182     }
1183     if (Atomic::cmpxchg(&_state, old_state, new_state) == old_state) {
1184       return true;
1185     }
1186   }
1187 }
1188 
make_unloaded()1189 void nmethod::make_unloaded() {
1190   post_compiled_method_unload();
1191 
1192   // This nmethod is being unloaded, make sure that dependencies
1193   // recorded in instanceKlasses get flushed.
1194   // Since this work is being done during a GC, defer deleting dependencies from the
1195   // InstanceKlass.
1196   assert(Universe::heap()->is_gc_active() || Thread::current()->is_ConcurrentGC_thread(),
1197          "should only be called during gc");
1198   flush_dependencies(/*delete_immediately*/false);
1199 
1200   // Break cycle between nmethod & method
1201   LogTarget(Trace, class, unload, nmethod) lt;
1202   if (lt.is_enabled()) {
1203     LogStream ls(lt);
1204     ls.print("making nmethod " INTPTR_FORMAT
1205              " unloadable, Method*(" INTPTR_FORMAT
1206              ") ",
1207              p2i(this), p2i(_method));
1208      ls.cr();
1209   }
1210   // Unlink the osr method, so we do not look this up again
1211   if (is_osr_method()) {
1212     // Invalidate the osr nmethod only once. Note that with concurrent
1213     // code cache unloading, OSR nmethods are invalidated before they
1214     // are made unloaded. Therefore, this becomes a no-op then.
1215     if (is_in_use()) {
1216       invalidate_osr_method();
1217     }
1218 #ifdef ASSERT
1219     if (method() != NULL) {
1220       // Make sure osr nmethod is invalidated, i.e. not on the list
1221       bool found = method()->method_holder()->remove_osr_nmethod(this);
1222       assert(!found, "osr nmethod should have been invalidated");
1223     }
1224 #endif
1225   }
1226 
1227   // If _method is already NULL the Method* is about to be unloaded,
1228   // so we don't have to break the cycle. Note that it is possible to
1229   // have the Method* live here, in case we unload the nmethod because
1230   // it is pointing to some oop (other than the Method*) being unloaded.
1231   if (_method != NULL) {
1232     _method->unlink_code(this);
1233   }
1234 
1235   // Make the class unloaded - i.e., change state and notify sweeper
1236   assert(SafepointSynchronize::is_at_safepoint() || Thread::current()->is_ConcurrentGC_thread(),
1237          "must be at safepoint");
1238 
1239   {
1240     // Clear ICStubs and release any CompiledICHolders.
1241     CompiledICLocker ml(this);
1242     clear_ic_callsites();
1243   }
1244 
1245   // Unregister must be done before the state change
1246   {
1247     MutexLocker ml(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock,
1248                      Mutex::_no_safepoint_check_flag);
1249     Universe::heap()->unregister_nmethod(this);
1250   }
1251 
1252   // Clear the method of this dead nmethod
1253   set_method(NULL);
1254 
1255   // Log the unloading.
1256   log_state_change();
1257 
1258   // The Method* is gone at this point
1259   assert(_method == NULL, "Tautology");
1260 
1261   set_osr_link(NULL);
1262   NMethodSweeper::report_state_change(this);
1263 
1264   bool transition_success = try_transition(unloaded);
1265 
1266   // It is an important invariant that there exists no race between
1267   // the sweeper and GC thread competing for making the same nmethod
1268   // zombie and unloaded respectively. This is ensured by
1269   // can_convert_to_zombie() returning false for any is_unloading()
1270   // nmethod, informing the sweeper not to step on any GC toes.
1271   assert(transition_success, "Invalid nmethod transition to unloaded");
1272 
1273 #if INCLUDE_JVMCI
1274   // Clear the link between this nmethod and a HotSpotNmethod mirror
1275   JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
1276   if (nmethod_data != NULL) {
1277     nmethod_data->invalidate_nmethod_mirror(this);
1278   }
1279 #endif
1280 }
1281 
invalidate_osr_method()1282 void nmethod::invalidate_osr_method() {
1283   assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod");
1284   // Remove from list of active nmethods
1285   if (method() != NULL) {
1286     method()->method_holder()->remove_osr_nmethod(this);
1287   }
1288 }
1289 
log_state_change() const1290 void nmethod::log_state_change() const {
1291   if (LogCompilation) {
1292     if (xtty != NULL) {
1293       ttyLocker ttyl;  // keep the following output all in one block
1294       if (_state == unloaded) {
1295         xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
1296                          os::current_thread_id());
1297       } else {
1298         xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
1299                          os::current_thread_id(),
1300                          (_state == zombie ? " zombie='1'" : ""));
1301       }
1302       log_identity(xtty);
1303       xtty->stamp();
1304       xtty->end_elem();
1305     }
1306   }
1307 
1308   const char *state_msg = _state == zombie ? "made zombie" : "made not entrant";
1309   CompileTask::print_ul(this, state_msg);
1310   if (PrintCompilation && _state != unloaded) {
1311     print_on(tty, state_msg);
1312   }
1313 }
1314 
unlink_from_method()1315 void nmethod::unlink_from_method() {
1316   if (method() != NULL) {
1317     method()->unlink_code(this);
1318   }
1319 }
1320 
1321 /**
1322  * Common functionality for both make_not_entrant and make_zombie
1323  */
make_not_entrant_or_zombie(int state)1324 bool nmethod::make_not_entrant_or_zombie(int state) {
1325   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
1326 
1327   if (Atomic::load(&_state) >= state) {
1328     // Avoid taking the lock if already in required state.
1329     // This is safe from races because the state is an end-state,
1330     // which the nmethod cannot back out of once entered.
1331     // No need for fencing either.
1332     return false;
1333   }
1334 
1335   // Make sure the nmethod is not flushed.
1336   nmethodLocker nml(this);
1337   // This can be called while the system is already at a safepoint which is ok
1338   NoSafepointVerifier nsv;
1339 
1340   // during patching, depending on the nmethod state we must notify the GC that
1341   // code has been unloaded, unregistering it. We cannot do this right while
1342   // holding the CompiledMethod_lock because we need to use the CodeCache_lock. This
1343   // would be prone to deadlocks.
1344   // This flag is used to remember whether we need to later lock and unregister.
1345   bool nmethod_needs_unregister = false;
1346 
1347   {
1348     // Enter critical section.  Does not block for safepoint.
1349     MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
1350 
1351     // This logic is equivalent to the logic below for patching the
1352     // verified entry point of regular methods. We check that the
1353     // nmethod is in use to ensure that it is invalidated only once.
1354     if (is_osr_method() && is_in_use()) {
1355       // this effectively makes the osr nmethod not entrant
1356       invalidate_osr_method();
1357     }
1358 
1359     if (Atomic::load(&_state) >= state) {
1360       // another thread already performed this transition so nothing
1361       // to do, but return false to indicate this.
1362       return false;
1363     }
1364 
1365     // The caller can be calling the method statically or through an inline
1366     // cache call.
1367     if (!is_osr_method() && !is_not_entrant()) {
1368       NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
1369                   SharedRuntime::get_handle_wrong_method_stub());
1370     }
1371 
1372     if (is_in_use() && update_recompile_counts()) {
1373       // It's a true state change, so mark the method as decompiled.
1374       // Do it only for transition from alive.
1375       inc_decompile_count();
1376     }
1377 
1378     // If the state is becoming a zombie, signal to unregister the nmethod with
1379     // the heap.
1380     // This nmethod may have already been unloaded during a full GC.
1381     if ((state == zombie) && !is_unloaded()) {
1382       nmethod_needs_unregister = true;
1383     }
1384 
1385     // Must happen before state change. Otherwise we have a race condition in
1386     // nmethod::can_convert_to_zombie(). I.e., a method can immediately
1387     // transition its state from 'not_entrant' to 'zombie' without having to wait
1388     // for stack scanning.
1389     if (state == not_entrant) {
1390       mark_as_seen_on_stack();
1391       OrderAccess::storestore(); // _stack_traversal_mark and _state
1392     }
1393 
1394     // Change state
1395     if (!try_transition(state)) {
1396       // If the transition fails, it is due to another thread making the nmethod more
1397       // dead. In particular, one thread might be making the nmethod unloaded concurrently.
1398       // If so, having patched in the jump in the verified entry unnecessarily is fine.
1399       // The nmethod is no longer possible to call by Java threads.
1400       // Incrementing the decompile count is also fine as the caller of make_not_entrant()
1401       // had a valid reason to deoptimize the nmethod.
1402       // Marking the nmethod as seen on stack also has no effect, as the nmethod is now
1403       // !is_alive(), and the seen on stack value is only used to convert not_entrant
1404       // nmethods to zombie in can_convert_to_zombie().
1405       return false;
1406     }
1407 
1408     // Log the transition once
1409     log_state_change();
1410 
1411     // Remove nmethod from method.
1412     unlink_from_method();
1413 
1414   } // leave critical region under CompiledMethod_lock
1415 
1416 #if INCLUDE_JVMCI
1417   // Invalidate can't occur while holding the Patching lock
1418   JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
1419   if (nmethod_data != NULL) {
1420     nmethod_data->invalidate_nmethod_mirror(this);
1421   }
1422 #endif
1423 
1424 #ifdef ASSERT
1425   if (is_osr_method() && method() != NULL) {
1426     // Make sure osr nmethod is invalidated, i.e. not on the list
1427     bool found = method()->method_holder()->remove_osr_nmethod(this);
1428     assert(!found, "osr nmethod should have been invalidated");
1429   }
1430 #endif
1431 
1432   // When the nmethod becomes zombie it is no longer alive so the
1433   // dependencies must be flushed.  nmethods in the not_entrant
1434   // state will be flushed later when the transition to zombie
1435   // happens or they get unloaded.
1436   if (state == zombie) {
1437     {
1438       // Flushing dependencies must be done before any possible
1439       // safepoint can sneak in, otherwise the oops used by the
1440       // dependency logic could have become stale.
1441       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1442       if (nmethod_needs_unregister) {
1443         Universe::heap()->unregister_nmethod(this);
1444       }
1445       flush_dependencies(/*delete_immediately*/true);
1446     }
1447 
1448 #if INCLUDE_JVMCI
1449     // Now that the nmethod has been unregistered, it's
1450     // safe to clear the HotSpotNmethod mirror oop.
1451     if (nmethod_data != NULL) {
1452       nmethod_data->clear_nmethod_mirror(this);
1453     }
1454 #endif
1455 
1456     // Clear ICStubs to prevent back patching stubs of zombie or flushed
1457     // nmethods during the next safepoint (see ICStub::finalize), as well
1458     // as to free up CompiledICHolder resources.
1459     {
1460       CompiledICLocker ml(this);
1461       clear_ic_callsites();
1462     }
1463 
1464     // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
1465     // event and it hasn't already been reported for this nmethod then
1466     // report it now. The event may have been reported earlier if the GC
1467     // marked it for unloading). JvmtiDeferredEventQueue support means
1468     // we no longer go to a safepoint here.
1469     post_compiled_method_unload();
1470 
1471 #ifdef ASSERT
1472     // It's no longer safe to access the oops section since zombie
1473     // nmethods aren't scanned for GC.
1474     _oops_are_stale = true;
1475 #endif
1476      // the Method may be reclaimed by class unloading now that the
1477      // nmethod is in zombie state
1478     set_method(NULL);
1479   } else {
1480     assert(state == not_entrant, "other cases may need to be handled differently");
1481   }
1482 
1483   if (TraceCreateZombies && state == zombie) {
1484     ResourceMark m;
1485     tty->print_cr("nmethod <" INTPTR_FORMAT "> %s code made %s", p2i(this), this->method() ? this->method()->name_and_sig_as_C_string() : "null", (state == not_entrant) ? "not entrant" : "zombie");
1486   }
1487 
1488   NMethodSweeper::report_state_change(this);
1489   return true;
1490 }
1491 
flush()1492 void nmethod::flush() {
1493   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1494   // Note that there are no valid oops in the nmethod anymore.
1495   assert(!is_osr_method() || is_unloaded() || is_zombie(),
1496          "osr nmethod must be unloaded or zombie before flushing");
1497   assert(is_zombie() || is_osr_method(), "must be a zombie method");
1498   assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
1499   assert_locked_or_safepoint(CodeCache_lock);
1500 
1501   // completely deallocate this method
1502   Events::log(JavaThread::current(), "flushing nmethod " INTPTR_FORMAT, p2i(this));
1503   if (PrintMethodFlushing) {
1504     tty->print_cr("*flushing %s nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT
1505                   "/Free CodeCache:" SIZE_FORMAT "Kb",
1506                   is_osr_method() ? "osr" : "",_compile_id, p2i(this), CodeCache::blob_count(),
1507                   CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
1508   }
1509 
1510   // We need to deallocate any ExceptionCache data.
1511   // Note that we do not need to grab the nmethod lock for this, it
1512   // better be thread safe if we're disposing of it!
1513   ExceptionCache* ec = exception_cache();
1514   set_exception_cache(NULL);
1515   while(ec != NULL) {
1516     ExceptionCache* next = ec->next();
1517     delete ec;
1518     ec = next;
1519   }
1520 
1521   Universe::heap()->flush_nmethod(this);
1522   CodeCache::unregister_old_nmethod(this);
1523 
1524   CodeBlob::flush();
1525   CodeCache::free(this);
1526 }
1527 
oop_at(int index) const1528 oop nmethod::oop_at(int index) const {
1529   if (index == 0) {
1530     return NULL;
1531   }
1532   return NativeAccess<AS_NO_KEEPALIVE>::oop_load(oop_addr_at(index));
1533 }
1534 
oop_at_phantom(int index) const1535 oop nmethod::oop_at_phantom(int index) const {
1536   if (index == 0) {
1537     return NULL;
1538   }
1539   return NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(oop_addr_at(index));
1540 }
1541 
1542 //
1543 // Notify all classes this nmethod is dependent on that it is no
1544 // longer dependent. This should only be called in two situations.
1545 // First, when a nmethod transitions to a zombie all dependents need
1546 // to be clear.  Since zombification happens at a safepoint there's no
1547 // synchronization issues.  The second place is a little more tricky.
1548 // During phase 1 of mark sweep class unloading may happen and as a
1549 // result some nmethods may get unloaded.  In this case the flushing
1550 // of dependencies must happen during phase 1 since after GC any
1551 // dependencies in the unloaded nmethod won't be updated, so
1552 // traversing the dependency information in unsafe.  In that case this
1553 // function is called with a boolean argument and this function only
1554 // notifies instanceKlasses that are reachable
1555 
flush_dependencies(bool delete_immediately)1556 void nmethod::flush_dependencies(bool delete_immediately) {
1557   DEBUG_ONLY(bool called_by_gc = Universe::heap()->is_gc_active() || Thread::current()->is_ConcurrentGC_thread();)
1558   assert(called_by_gc != delete_immediately,
1559   "delete_immediately is false if and only if we are called during GC");
1560   if (!has_flushed_dependencies()) {
1561     set_has_flushed_dependencies();
1562     for (Dependencies::DepStream deps(this); deps.next(); ) {
1563       if (deps.type() == Dependencies::call_site_target_value) {
1564         // CallSite dependencies are managed on per-CallSite instance basis.
1565         oop call_site = deps.argument_oop(0);
1566         if (delete_immediately) {
1567           assert_locked_or_safepoint(CodeCache_lock);
1568           MethodHandles::remove_dependent_nmethod(call_site, this);
1569         } else {
1570           MethodHandles::clean_dependency_context(call_site);
1571         }
1572       } else {
1573         Klass* klass = deps.context_type();
1574         if (klass == NULL) {
1575           continue;  // ignore things like evol_method
1576         }
1577         // During GC delete_immediately is false, and liveness
1578         // of dependee determines class that needs to be updated.
1579         if (delete_immediately) {
1580           assert_locked_or_safepoint(CodeCache_lock);
1581           InstanceKlass::cast(klass)->remove_dependent_nmethod(this);
1582         } else if (klass->is_loader_alive()) {
1583           // The GC may clean dependency contexts concurrently and in parallel.
1584           InstanceKlass::cast(klass)->clean_dependency_context();
1585         }
1586       }
1587     }
1588   }
1589 }
1590 
1591 // ------------------------------------------------------------------
1592 // post_compiled_method_load_event
1593 // new method for install_code() path
1594 // Transfer information from compilation to jvmti
post_compiled_method_load_event(JvmtiThreadState * state)1595 void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) {
1596 
1597   // Don't post this nmethod load event if it is already dying
1598   // because the sweeper might already be deleting this nmethod.
1599   {
1600     MutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
1601     // When the nmethod is acquired from the CodeCache iterator, it can racingly become zombie
1602     // before this code is called. Filter them out here under the CompiledMethod_lock.
1603     if (!is_alive()) {
1604       return;
1605     }
1606     // As for is_alive() nmethods, we also don't want them to racingly become zombie once we
1607     // release this lock, so we check that this is not going to be the case.
1608     if (is_not_entrant() && can_convert_to_zombie()) {
1609       return;
1610     }
1611   }
1612 
1613   // This is a bad time for a safepoint.  We don't want
1614   // this nmethod to get unloaded while we're queueing the event.
1615   NoSafepointVerifier nsv;
1616 
1617   Method* m = method();
1618   HOTSPOT_COMPILED_METHOD_LOAD(
1619       (char *) m->klass_name()->bytes(),
1620       m->klass_name()->utf8_length(),
1621       (char *) m->name()->bytes(),
1622       m->name()->utf8_length(),
1623       (char *) m->signature()->bytes(),
1624       m->signature()->utf8_length(),
1625       insts_begin(), insts_size());
1626 
1627 
1628   if (JvmtiExport::should_post_compiled_method_load()) {
1629     // Only post unload events if load events are found.
1630     set_load_reported();
1631     // If a JavaThread hasn't been passed in, let the Service thread
1632     // (which is a real Java thread) post the event
1633     JvmtiDeferredEvent event = JvmtiDeferredEvent::compiled_method_load_event(this);
1634     if (state == NULL) {
1635       // Execute any barrier code for this nmethod as if it's called, since
1636       // keeping it alive looks like stack walking.
1637       run_nmethod_entry_barrier();
1638       ServiceThread::enqueue_deferred_event(&event);
1639     } else {
1640       // This enters the nmethod barrier outside in the caller.
1641       state->enqueue_event(&event);
1642     }
1643   }
1644 }
1645 
post_compiled_method_unload()1646 void nmethod::post_compiled_method_unload() {
1647   if (unload_reported()) {
1648     // During unloading we transition to unloaded and then to zombie
1649     // and the unloading is reported during the first transition.
1650     return;
1651   }
1652 
1653   assert(_method != NULL && !is_unloaded(), "just checking");
1654   DTRACE_METHOD_UNLOAD_PROBE(method());
1655 
1656   // If a JVMTI agent has enabled the CompiledMethodUnload event then
1657   // post the event. Sometime later this nmethod will be made a zombie
1658   // by the sweeper but the Method* will not be valid at that point.
1659   // The jmethodID is a weak reference to the Method* so if
1660   // it's being unloaded there's no way to look it up since the weak
1661   // ref will have been cleared.
1662 
1663   // Don't bother posting the unload if the load event wasn't posted.
1664   if (load_reported() && JvmtiExport::should_post_compiled_method_unload()) {
1665     assert(!unload_reported(), "already unloaded");
1666     JvmtiDeferredEvent event =
1667       JvmtiDeferredEvent::compiled_method_unload_event(
1668           method()->jmethod_id(), insts_begin());
1669     ServiceThread::enqueue_deferred_event(&event);
1670   }
1671 
1672   // The JVMTI CompiledMethodUnload event can be enabled or disabled at
1673   // any time. As the nmethod is being unloaded now we mark it has
1674   // having the unload event reported - this will ensure that we don't
1675   // attempt to report the event in the unlikely scenario where the
1676   // event is enabled at the time the nmethod is made a zombie.
1677   set_unload_reported();
1678 }
1679 
1680 // Iterate over metadata calling this function.   Used by RedefineClasses
metadata_do(MetadataClosure * f)1681 void nmethod::metadata_do(MetadataClosure* f) {
1682   {
1683     // Visit all immediate references that are embedded in the instruction stream.
1684     RelocIterator iter(this, oops_reloc_begin());
1685     while (iter.next()) {
1686       if (iter.type() == relocInfo::metadata_type) {
1687         metadata_Relocation* r = iter.metadata_reloc();
1688         // In this metadata, we must only follow those metadatas directly embedded in
1689         // the code.  Other metadatas (oop_index>0) are seen as part of
1690         // the metadata section below.
1691         assert(1 == (r->metadata_is_immediate()) +
1692                (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
1693                "metadata must be found in exactly one place");
1694         if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
1695           Metadata* md = r->metadata_value();
1696           if (md != _method) f->do_metadata(md);
1697         }
1698       } else if (iter.type() == relocInfo::virtual_call_type) {
1699         // Check compiledIC holders associated with this nmethod
1700         ResourceMark rm;
1701         CompiledIC *ic = CompiledIC_at(&iter);
1702         if (ic->is_icholder_call()) {
1703           CompiledICHolder* cichk = ic->cached_icholder();
1704           f->do_metadata(cichk->holder_metadata());
1705           f->do_metadata(cichk->holder_klass());
1706         } else {
1707           Metadata* ic_oop = ic->cached_metadata();
1708           if (ic_oop != NULL) {
1709             f->do_metadata(ic_oop);
1710           }
1711         }
1712       }
1713     }
1714   }
1715 
1716   // Visit the metadata section
1717   for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
1718     if (*p == Universe::non_oop_word() || *p == NULL)  continue;  // skip non-oops
1719     Metadata* md = *p;
1720     f->do_metadata(md);
1721   }
1722 
1723   // Visit metadata not embedded in the other places.
1724   if (_method != NULL) f->do_metadata(_method);
1725 }
1726 
1727 // The _is_unloading_state encodes a tuple comprising the unloading cycle
1728 // and the result of IsUnloadingBehaviour::is_unloading() fpr that cycle.
1729 // This is the bit layout of the _is_unloading_state byte: 00000CCU
1730 // CC refers to the cycle, which has 2 bits, and U refers to the result of
1731 // IsUnloadingBehaviour::is_unloading() for that unloading cycle.
1732 
1733 class IsUnloadingState: public AllStatic {
1734   static const uint8_t _is_unloading_mask = 1;
1735   static const uint8_t _is_unloading_shift = 0;
1736   static const uint8_t _unloading_cycle_mask = 6;
1737   static const uint8_t _unloading_cycle_shift = 1;
1738 
set_is_unloading(uint8_t state,bool value)1739   static uint8_t set_is_unloading(uint8_t state, bool value) {
1740     state &= ~_is_unloading_mask;
1741     if (value) {
1742       state |= 1 << _is_unloading_shift;
1743     }
1744     assert(is_unloading(state) == value, "unexpected unloading cycle overflow");
1745     return state;
1746   }
1747 
set_unloading_cycle(uint8_t state,uint8_t value)1748   static uint8_t set_unloading_cycle(uint8_t state, uint8_t value) {
1749     state &= ~_unloading_cycle_mask;
1750     state |= value << _unloading_cycle_shift;
1751     assert(unloading_cycle(state) == value, "unexpected unloading cycle overflow");
1752     return state;
1753   }
1754 
1755 public:
is_unloading(uint8_t state)1756   static bool is_unloading(uint8_t state) { return (state & _is_unloading_mask) >> _is_unloading_shift == 1; }
unloading_cycle(uint8_t state)1757   static uint8_t unloading_cycle(uint8_t state) { return (state & _unloading_cycle_mask) >> _unloading_cycle_shift; }
1758 
create(bool is_unloading,uint8_t unloading_cycle)1759   static uint8_t create(bool is_unloading, uint8_t unloading_cycle) {
1760     uint8_t state = 0;
1761     state = set_is_unloading(state, is_unloading);
1762     state = set_unloading_cycle(state, unloading_cycle);
1763     return state;
1764   }
1765 };
1766 
is_unloading()1767 bool nmethod::is_unloading() {
1768   uint8_t state = RawAccess<MO_RELAXED>::load(&_is_unloading_state);
1769   bool state_is_unloading = IsUnloadingState::is_unloading(state);
1770   if (state_is_unloading) {
1771     return true;
1772   }
1773   uint8_t state_unloading_cycle = IsUnloadingState::unloading_cycle(state);
1774   uint8_t current_cycle = CodeCache::unloading_cycle();
1775   if (state_unloading_cycle == current_cycle) {
1776     return false;
1777   }
1778 
1779   // The IsUnloadingBehaviour is responsible for checking if there are any dead
1780   // oops in the CompiledMethod, by calling oops_do on it.
1781   state_unloading_cycle = current_cycle;
1782 
1783   if (is_zombie()) {
1784     // Zombies without calculated unloading epoch are never unloading due to GC.
1785 
1786     // There are no races where a previously observed is_unloading() nmethod
1787     // suddenly becomes not is_unloading() due to here being observed as zombie.
1788 
1789     // With STW unloading, all is_alive() && is_unloading() nmethods are unlinked
1790     // and unloaded in the safepoint. That makes races where an nmethod is first
1791     // observed as is_alive() && is_unloading() and subsequently observed as
1792     // is_zombie() impossible.
1793 
1794     // With concurrent unloading, all references to is_unloading() nmethods are
1795     // first unlinked (e.g. IC caches and dependency contexts). Then a global
1796     // handshake operation is performed with all JavaThreads before finally
1797     // unloading the nmethods. The sweeper never converts is_alive() && is_unloading()
1798     // nmethods to zombies; it waits for them to become is_unloaded(). So before
1799     // the global handshake, it is impossible for is_unloading() nmethods to
1800     // racingly become is_zombie(). And is_unloading() is calculated for all is_alive()
1801     // nmethods before taking that global handshake, meaning that it will never
1802     // be recalculated after the handshake.
1803 
1804     // After that global handshake, is_unloading() nmethods are only observable
1805     // to the iterators, and they will never trigger recomputation of the cached
1806     // is_unloading_state, and hence may not suffer from such races.
1807 
1808     state_is_unloading = false;
1809   } else {
1810     state_is_unloading = IsUnloadingBehaviour::current()->is_unloading(this);
1811   }
1812 
1813   state = IsUnloadingState::create(state_is_unloading, state_unloading_cycle);
1814 
1815   RawAccess<MO_RELAXED>::store(&_is_unloading_state, state);
1816 
1817   return state_is_unloading;
1818 }
1819 
clear_unloading_state()1820 void nmethod::clear_unloading_state() {
1821   uint8_t state = IsUnloadingState::create(false, CodeCache::unloading_cycle());
1822   RawAccess<MO_RELAXED>::store(&_is_unloading_state, state);
1823 }
1824 
1825 
1826 // This is called at the end of the strong tracing/marking phase of a
1827 // GC to unload an nmethod if it contains otherwise unreachable
1828 // oops.
1829 
do_unloading(bool unloading_occurred)1830 void nmethod::do_unloading(bool unloading_occurred) {
1831   // Make sure the oop's ready to receive visitors
1832   assert(!is_zombie() && !is_unloaded(),
1833          "should not call follow on zombie or unloaded nmethod");
1834 
1835   if (is_unloading()) {
1836     make_unloaded();
1837   } else {
1838     guarantee(unload_nmethod_caches(unloading_occurred),
1839               "Should not need transition stubs");
1840   }
1841 }
1842 
oops_do(OopClosure * f,bool allow_dead)1843 void nmethod::oops_do(OopClosure* f, bool allow_dead) {
1844   // make sure the oops ready to receive visitors
1845   assert(allow_dead || is_alive(), "should not call follow on dead nmethod");
1846 
1847   // Prevent extra code cache walk for platforms that don't have immediate oops.
1848   if (relocInfo::mustIterateImmediateOopsInCode()) {
1849     RelocIterator iter(this, oops_reloc_begin());
1850 
1851     while (iter.next()) {
1852       if (iter.type() == relocInfo::oop_type ) {
1853         oop_Relocation* r = iter.oop_reloc();
1854         // In this loop, we must only follow those oops directly embedded in
1855         // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
1856         assert(1 == (r->oop_is_immediate()) +
1857                (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
1858                "oop must be found in exactly one place");
1859         if (r->oop_is_immediate() && r->oop_value() != NULL) {
1860           f->do_oop(r->oop_addr());
1861         }
1862       }
1863     }
1864   }
1865 
1866   // Scopes
1867   // This includes oop constants not inlined in the code stream.
1868   for (oop* p = oops_begin(); p < oops_end(); p++) {
1869     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
1870     f->do_oop(p);
1871   }
1872 }
1873 
1874 nmethod* volatile nmethod::_oops_do_mark_nmethods;
1875 
oops_do_log_change(const char * state)1876 void nmethod::oops_do_log_change(const char* state) {
1877   LogTarget(Trace, gc, nmethod) lt;
1878   if (lt.is_enabled()) {
1879     LogStream ls(lt);
1880     CompileTask::print(&ls, this, state, true /* short_form */);
1881   }
1882 }
1883 
oops_do_try_claim()1884 bool nmethod::oops_do_try_claim() {
1885   if (oops_do_try_claim_weak_request()) {
1886     nmethod* result = oops_do_try_add_to_list_as_weak_done();
1887     assert(result == NULL, "adding to global list as weak done must always succeed.");
1888     return true;
1889   }
1890   return false;
1891 }
1892 
oops_do_try_claim_weak_request()1893 bool nmethod::oops_do_try_claim_weak_request() {
1894   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
1895 
1896   if ((_oops_do_mark_link == NULL) &&
1897       (Atomic::replace_if_null(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag)))) {
1898     oops_do_log_change("oops_do, mark weak request");
1899     return true;
1900   }
1901   return false;
1902 }
1903 
oops_do_set_strong_done(nmethod * old_head)1904 void nmethod::oops_do_set_strong_done(nmethod* old_head) {
1905   _oops_do_mark_link = mark_link(old_head, claim_strong_done_tag);
1906 }
1907 
oops_do_try_claim_strong_done()1908 nmethod::oops_do_mark_link* nmethod::oops_do_try_claim_strong_done() {
1909   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
1910 
1911   oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, mark_link(NULL, claim_weak_request_tag), mark_link(this, claim_strong_done_tag));
1912   if (old_next == NULL) {
1913     oops_do_log_change("oops_do, mark strong done");
1914   }
1915   return old_next;
1916 }
1917 
oops_do_try_add_strong_request(nmethod::oops_do_mark_link * next)1918 nmethod::oops_do_mark_link* nmethod::oops_do_try_add_strong_request(nmethod::oops_do_mark_link* next) {
1919   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
1920   assert(next == mark_link(this, claim_weak_request_tag), "Should be claimed as weak");
1921 
1922   oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(this, claim_strong_request_tag));
1923   if (old_next == next) {
1924     oops_do_log_change("oops_do, mark strong request");
1925   }
1926   return old_next;
1927 }
1928 
oops_do_try_claim_weak_done_as_strong_done(nmethod::oops_do_mark_link * next)1929 bool nmethod::oops_do_try_claim_weak_done_as_strong_done(nmethod::oops_do_mark_link* next) {
1930   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
1931   assert(extract_state(next) == claim_weak_done_tag, "Should be claimed as weak done");
1932 
1933   oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(extract_nmethod(next), claim_strong_done_tag));
1934   if (old_next == next) {
1935     oops_do_log_change("oops_do, mark weak done -> mark strong done");
1936     return true;
1937   }
1938   return false;
1939 }
1940 
oops_do_try_add_to_list_as_weak_done()1941 nmethod* nmethod::oops_do_try_add_to_list_as_weak_done() {
1942   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
1943 
1944   assert(extract_state(_oops_do_mark_link) == claim_weak_request_tag ||
1945          extract_state(_oops_do_mark_link) == claim_strong_request_tag,
1946          "must be but is nmethod " PTR_FORMAT " %u", p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
1947 
1948   nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this);
1949   // Self-loop if needed.
1950   if (old_head == NULL) {
1951     old_head = this;
1952   }
1953   // Try to install end of list and weak done tag.
1954   if (Atomic::cmpxchg(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag), mark_link(old_head, claim_weak_done_tag)) == mark_link(this, claim_weak_request_tag)) {
1955     oops_do_log_change("oops_do, mark weak done");
1956     return NULL;
1957   } else {
1958     return old_head;
1959   }
1960 }
1961 
oops_do_add_to_list_as_strong_done()1962 void nmethod::oops_do_add_to_list_as_strong_done() {
1963   assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
1964 
1965   nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this);
1966   // Self-loop if needed.
1967   if (old_head == NULL) {
1968     old_head = this;
1969   }
1970   assert(_oops_do_mark_link == mark_link(this, claim_strong_done_tag), "must be but is nmethod " PTR_FORMAT " state %u",
1971          p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
1972 
1973   oops_do_set_strong_done(old_head);
1974 }
1975 
oops_do_process_weak(OopsDoProcessor * p)1976 void nmethod::oops_do_process_weak(OopsDoProcessor* p) {
1977   if (!oops_do_try_claim_weak_request()) {
1978     // Failed to claim for weak processing.
1979     oops_do_log_change("oops_do, mark weak request fail");
1980     return;
1981   }
1982 
1983   p->do_regular_processing(this);
1984 
1985   nmethod* old_head = oops_do_try_add_to_list_as_weak_done();
1986   if (old_head == NULL) {
1987     return;
1988   }
1989   oops_do_log_change("oops_do, mark weak done fail");
1990   // Adding to global list failed, another thread added a strong request.
1991   assert(extract_state(_oops_do_mark_link) == claim_strong_request_tag,
1992          "must be but is %u", extract_state(_oops_do_mark_link));
1993 
1994   oops_do_log_change("oops_do, mark weak request -> mark strong done");
1995 
1996   oops_do_set_strong_done(old_head);
1997   // Do missing strong processing.
1998   p->do_remaining_strong_processing(this);
1999 }
2000 
oops_do_process_strong(OopsDoProcessor * p)2001 void nmethod::oops_do_process_strong(OopsDoProcessor* p) {
2002   oops_do_mark_link* next_raw = oops_do_try_claim_strong_done();
2003   if (next_raw == NULL) {
2004     p->do_regular_processing(this);
2005     oops_do_add_to_list_as_strong_done();
2006     return;
2007   }
2008   // Claim failed. Figure out why and handle it.
2009   if (oops_do_has_weak_request(next_raw)) {
2010     oops_do_mark_link* old = next_raw;
2011     // Claim failed because being weak processed (state == "weak request").
2012     // Try to request deferred strong processing.
2013     next_raw = oops_do_try_add_strong_request(old);
2014     if (next_raw == old) {
2015       // Successfully requested deferred strong processing.
2016       return;
2017     }
2018     // Failed because of a concurrent transition. No longer in "weak request" state.
2019   }
2020   if (oops_do_has_any_strong_state(next_raw)) {
2021     // Already claimed for strong processing or requested for such.
2022     return;
2023   }
2024   if (oops_do_try_claim_weak_done_as_strong_done(next_raw)) {
2025     // Successfully claimed "weak done" as "strong done". Do the missing marking.
2026     p->do_remaining_strong_processing(this);
2027     return;
2028   }
2029   // Claim failed, some other thread got it.
2030 }
2031 
oops_do_marking_prologue()2032 void nmethod::oops_do_marking_prologue() {
2033   assert_at_safepoint();
2034 
2035   log_trace(gc, nmethod)("oops_do_marking_prologue");
2036   assert(_oops_do_mark_nmethods == NULL, "must be empty");
2037 }
2038 
oops_do_marking_epilogue()2039 void nmethod::oops_do_marking_epilogue() {
2040   assert_at_safepoint();
2041 
2042   nmethod* next = _oops_do_mark_nmethods;
2043   _oops_do_mark_nmethods = NULL;
2044   if (next != NULL) {
2045     nmethod* cur;
2046     do {
2047       cur = next;
2048       next = extract_nmethod(cur->_oops_do_mark_link);
2049       cur->_oops_do_mark_link = NULL;
2050       DEBUG_ONLY(cur->verify_oop_relocations());
2051 
2052       LogTarget(Trace, gc, nmethod) lt;
2053       if (lt.is_enabled()) {
2054         LogStream ls(lt);
2055         CompileTask::print(&ls, cur, "oops_do, unmark", /*short_form:*/ true);
2056       }
2057       // End if self-loop has been detected.
2058     } while (cur != next);
2059   }
2060   log_trace(gc, nmethod)("oops_do_marking_epilogue");
2061 }
2062 
includes(void * p,void * from,void * to)2063 inline bool includes(void* p, void* from, void* to) {
2064   return from <= p && p < to;
2065 }
2066 
2067 
copy_scopes_pcs(PcDesc * pcs,int count)2068 void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
2069   assert(count >= 2, "must be sentinel values, at least");
2070 
2071 #ifdef ASSERT
2072   // must be sorted and unique; we do a binary search in find_pc_desc()
2073   int prev_offset = pcs[0].pc_offset();
2074   assert(prev_offset == PcDesc::lower_offset_limit,
2075          "must start with a sentinel");
2076   for (int i = 1; i < count; i++) {
2077     int this_offset = pcs[i].pc_offset();
2078     assert(this_offset > prev_offset, "offsets must be sorted");
2079     prev_offset = this_offset;
2080   }
2081   assert(prev_offset == PcDesc::upper_offset_limit,
2082          "must end with a sentinel");
2083 #endif //ASSERT
2084 
2085   // Search for MethodHandle invokes and tag the nmethod.
2086   for (int i = 0; i < count; i++) {
2087     if (pcs[i].is_method_handle_invoke()) {
2088       set_has_method_handle_invokes(true);
2089       break;
2090     }
2091   }
2092   assert(has_method_handle_invokes() == (_deopt_mh_handler_begin != NULL), "must have deopt mh handler");
2093 
2094   int size = count * sizeof(PcDesc);
2095   assert(scopes_pcs_size() >= size, "oob");
2096   memcpy(scopes_pcs_begin(), pcs, size);
2097 
2098   // Adjust the final sentinel downward.
2099   PcDesc* last_pc = &scopes_pcs_begin()[count-1];
2100   assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity");
2101   last_pc->set_pc_offset(content_size() + 1);
2102   for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) {
2103     // Fill any rounding gaps with copies of the last record.
2104     last_pc[1] = last_pc[0];
2105   }
2106   // The following assert could fail if sizeof(PcDesc) is not
2107   // an integral multiple of oopSize (the rounding term).
2108   // If it fails, change the logic to always allocate a multiple
2109   // of sizeof(PcDesc), and fill unused words with copies of *last_pc.
2110   assert(last_pc + 1 == scopes_pcs_end(), "must match exactly");
2111 }
2112 
copy_scopes_data(u_char * buffer,int size)2113 void nmethod::copy_scopes_data(u_char* buffer, int size) {
2114   assert(scopes_data_size() >= size, "oob");
2115   memcpy(scopes_data_begin(), buffer, size);
2116 }
2117 
2118 #ifdef ASSERT
linear_search(const PcDescSearch & search,int pc_offset,bool approximate)2119 static PcDesc* linear_search(const PcDescSearch& search, int pc_offset, bool approximate) {
2120   PcDesc* lower = search.scopes_pcs_begin();
2121   PcDesc* upper = search.scopes_pcs_end();
2122   lower += 1; // exclude initial sentinel
2123   PcDesc* res = NULL;
2124   for (PcDesc* p = lower; p < upper; p++) {
2125     NOT_PRODUCT(--pc_nmethod_stats.pc_desc_tests);  // don't count this call to match_desc
2126     if (match_desc(p, pc_offset, approximate)) {
2127       if (res == NULL)
2128         res = p;
2129       else
2130         res = (PcDesc*) badAddress;
2131     }
2132   }
2133   return res;
2134 }
2135 #endif
2136 
2137 
2138 // Finds a PcDesc with real-pc equal to "pc"
find_pc_desc_internal(address pc,bool approximate,const PcDescSearch & search)2139 PcDesc* PcDescContainer::find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search) {
2140   address base_address = search.code_begin();
2141   if ((pc < base_address) ||
2142       (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
2143     return NULL;  // PC is wildly out of range
2144   }
2145   int pc_offset = (int) (pc - base_address);
2146 
2147   // Check the PcDesc cache if it contains the desired PcDesc
2148   // (This as an almost 100% hit rate.)
2149   PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
2150   if (res != NULL) {
2151     assert(res == linear_search(search, pc_offset, approximate), "cache ok");
2152     return res;
2153   }
2154 
2155   // Fallback algorithm: quasi-linear search for the PcDesc
2156   // Find the last pc_offset less than the given offset.
2157   // The successor must be the required match, if there is a match at all.
2158   // (Use a fixed radix to avoid expensive affine pointer arithmetic.)
2159   PcDesc* lower = search.scopes_pcs_begin();
2160   PcDesc* upper = search.scopes_pcs_end();
2161   upper -= 1; // exclude final sentinel
2162   if (lower >= upper)  return NULL;  // native method; no PcDescs at all
2163 
2164 #define assert_LU_OK \
2165   /* invariant on lower..upper during the following search: */ \
2166   assert(lower->pc_offset() <  pc_offset, "sanity"); \
2167   assert(upper->pc_offset() >= pc_offset, "sanity")
2168   assert_LU_OK;
2169 
2170   // Use the last successful return as a split point.
2171   PcDesc* mid = _pc_desc_cache.last_pc_desc();
2172   NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
2173   if (mid->pc_offset() < pc_offset) {
2174     lower = mid;
2175   } else {
2176     upper = mid;
2177   }
2178 
2179   // Take giant steps at first (4096, then 256, then 16, then 1)
2180   const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1);
2181   const int RADIX = (1 << LOG2_RADIX);
2182   for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
2183     while ((mid = lower + step) < upper) {
2184       assert_LU_OK;
2185       NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
2186       if (mid->pc_offset() < pc_offset) {
2187         lower = mid;
2188       } else {
2189         upper = mid;
2190         break;
2191       }
2192     }
2193     assert_LU_OK;
2194   }
2195 
2196   // Sneak up on the value with a linear search of length ~16.
2197   while (true) {
2198     assert_LU_OK;
2199     mid = lower + 1;
2200     NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches);
2201     if (mid->pc_offset() < pc_offset) {
2202       lower = mid;
2203     } else {
2204       upper = mid;
2205       break;
2206     }
2207   }
2208 #undef assert_LU_OK
2209 
2210   if (match_desc(upper, pc_offset, approximate)) {
2211     assert(upper == linear_search(search, pc_offset, approximate), "search ok");
2212     _pc_desc_cache.add_pc_desc(upper);
2213     return upper;
2214   } else {
2215     assert(NULL == linear_search(search, pc_offset, approximate), "search ok");
2216     return NULL;
2217   }
2218 }
2219 
2220 
check_all_dependencies(DepChange & changes)2221 void nmethod::check_all_dependencies(DepChange& changes) {
2222   // Checked dependencies are allocated into this ResourceMark
2223   ResourceMark rm;
2224 
2225   // Turn off dependency tracing while actually testing dependencies.
2226   NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
2227 
2228   typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
2229                             &DependencySignature::equals, 11027> DepTable;
2230 
2231   DepTable* table = new DepTable();
2232 
2233   // Iterate over live nmethods and check dependencies of all nmethods that are not
2234   // marked for deoptimization. A particular dependency is only checked once.
2235   NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
2236   while(iter.next()) {
2237     nmethod* nm = iter.method();
2238     // Only notify for live nmethods
2239     if (!nm->is_marked_for_deoptimization()) {
2240       for (Dependencies::DepStream deps(nm); deps.next(); ) {
2241         // Construct abstraction of a dependency.
2242         DependencySignature* current_sig = new DependencySignature(deps);
2243 
2244         // Determine if dependency is already checked. table->put(...) returns
2245         // 'true' if the dependency is added (i.e., was not in the hashtable).
2246         if (table->put(*current_sig, 1)) {
2247           if (deps.check_dependency() != NULL) {
2248             // Dependency checking failed. Print out information about the failed
2249             // dependency and finally fail with an assert. We can fail here, since
2250             // dependency checking is never done in a product build.
2251             tty->print_cr("Failed dependency:");
2252             changes.print();
2253             nm->print();
2254             nm->print_dependencies();
2255             assert(false, "Should have been marked for deoptimization");
2256           }
2257         }
2258       }
2259     }
2260   }
2261 }
2262 
check_dependency_on(DepChange & changes)2263 bool nmethod::check_dependency_on(DepChange& changes) {
2264   // What has happened:
2265   // 1) a new class dependee has been added
2266   // 2) dependee and all its super classes have been marked
2267   bool found_check = false;  // set true if we are upset
2268   for (Dependencies::DepStream deps(this); deps.next(); ) {
2269     // Evaluate only relevant dependencies.
2270     if (deps.spot_check_dependency_at(changes) != NULL) {
2271       found_check = true;
2272       NOT_DEBUG(break);
2273     }
2274   }
2275   return found_check;
2276 }
2277 
2278 // Called from mark_for_deoptimization, when dependee is invalidated.
is_dependent_on_method(Method * dependee)2279 bool nmethod::is_dependent_on_method(Method* dependee) {
2280   for (Dependencies::DepStream deps(this); deps.next(); ) {
2281     if (deps.type() != Dependencies::evol_method)
2282       continue;
2283     Method* method = deps.method_argument(0);
2284     if (method == dependee) return true;
2285   }
2286   return false;
2287 }
2288 
2289 
is_patchable_at(address instr_addr)2290 bool nmethod::is_patchable_at(address instr_addr) {
2291   assert(insts_contains(instr_addr), "wrong nmethod used");
2292   if (is_zombie()) {
2293     // a zombie may never be patched
2294     return false;
2295   }
2296   return true;
2297 }
2298 
2299 
nmethod_init()2300 void nmethod_init() {
2301   // make sure you didn't forget to adjust the filler fields
2302   assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
2303 }
2304 
2305 
2306 //-------------------------------------------------------------------------------------------
2307 
2308 
2309 // QQQ might we make this work from a frame??
nmethodLocker(address pc)2310 nmethodLocker::nmethodLocker(address pc) {
2311   CodeBlob* cb = CodeCache::find_blob(pc);
2312   guarantee(cb != NULL && cb->is_compiled(), "bad pc for a nmethod found");
2313   _nm = cb->as_compiled_method();
2314   lock_nmethod(_nm);
2315 }
2316 
2317 // Only JvmtiDeferredEvent::compiled_method_unload_event()
2318 // should pass zombie_ok == true.
lock_nmethod(CompiledMethod * cm,bool zombie_ok)2319 void nmethodLocker::lock_nmethod(CompiledMethod* cm, bool zombie_ok) {
2320   if (cm == NULL)  return;
2321   nmethod* nm = cm->as_nmethod();
2322   Atomic::inc(&nm->_lock_count);
2323   assert(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method: %p", nm);
2324 }
2325 
unlock_nmethod(CompiledMethod * cm)2326 void nmethodLocker::unlock_nmethod(CompiledMethod* cm) {
2327   if (cm == NULL)  return;
2328   nmethod* nm = cm->as_nmethod();
2329   Atomic::dec(&nm->_lock_count);
2330   assert(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
2331 }
2332 
2333 
2334 // -----------------------------------------------------------------------------
2335 // Verification
2336 
2337 class VerifyOopsClosure: public OopClosure {
2338   nmethod* _nm;
2339   bool     _ok;
2340 public:
VerifyOopsClosure(nmethod * nm)2341   VerifyOopsClosure(nmethod* nm) : _nm(nm), _ok(true) { }
ok()2342   bool ok() { return _ok; }
do_oop(oop * p)2343   virtual void do_oop(oop* p) {
2344     if (oopDesc::is_oop_or_null(*p)) return;
2345     // Print diagnostic information before calling print_nmethod().
2346     // Assertions therein might prevent call from returning.
2347     tty->print_cr("*** non-oop " PTR_FORMAT " found at " PTR_FORMAT " (offset %d)",
2348                   p2i(*p), p2i(p), (int)((intptr_t)p - (intptr_t)_nm));
2349     if (_ok) {
2350       _nm->print_nmethod(true);
2351       _ok = false;
2352     }
2353   }
do_oop(narrowOop * p)2354   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2355 };
2356 
2357 class VerifyMetadataClosure: public MetadataClosure {
2358  public:
do_metadata(Metadata * md)2359   void do_metadata(Metadata* md) {
2360     if (md->is_method()) {
2361       Method* method = (Method*)md;
2362       assert(!method->is_old(), "Should not be installing old methods");
2363     }
2364   }
2365 };
2366 
2367 
verify()2368 void nmethod::verify() {
2369 
2370   // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
2371   // seems odd.
2372 
2373   if (is_zombie() || is_not_entrant() || is_unloaded())
2374     return;
2375 
2376   // Make sure all the entry points are correctly aligned for patching.
2377   NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point());
2378 
2379   // assert(oopDesc::is_oop(method()), "must be valid");
2380 
2381   ResourceMark rm;
2382 
2383   if (!CodeCache::contains(this)) {
2384     fatal("nmethod at " INTPTR_FORMAT " not in zone", p2i(this));
2385   }
2386 
2387   if(is_native_method() )
2388     return;
2389 
2390   nmethod* nm = CodeCache::find_nmethod(verified_entry_point());
2391   if (nm != this) {
2392     fatal("findNMethod did not find this nmethod (" INTPTR_FORMAT ")", p2i(this));
2393   }
2394 
2395   for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2396     if (! p->verify(this)) {
2397       tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", p2i(this));
2398     }
2399   }
2400 
2401 #ifdef ASSERT
2402 #if INCLUDE_JVMCI
2403   {
2404     // Verify that implicit exceptions that deoptimize have a PcDesc and OopMap
2405     ImmutableOopMapSet* oms = oop_maps();
2406     ImplicitExceptionTable implicit_table(this);
2407     for (uint i = 0; i < implicit_table.len(); i++) {
2408       int exec_offset = (int) implicit_table.get_exec_offset(i);
2409       if (implicit_table.get_exec_offset(i) == implicit_table.get_cont_offset(i)) {
2410         assert(pc_desc_at(code_begin() + exec_offset) != NULL, "missing PcDesc");
2411         bool found = false;
2412         for (int i = 0, imax = oms->count(); i < imax; i++) {
2413           if (oms->pair_at(i)->pc_offset() == exec_offset) {
2414             found = true;
2415             break;
2416           }
2417         }
2418         assert(found, "missing oopmap");
2419       }
2420     }
2421   }
2422 #endif
2423 #endif
2424 
2425   VerifyOopsClosure voc(this);
2426   oops_do(&voc);
2427   assert(voc.ok(), "embedded oops must be OK");
2428   Universe::heap()->verify_nmethod(this);
2429 
2430   assert(_oops_do_mark_link == NULL, "_oops_do_mark_link for %s should be NULL but is " PTR_FORMAT,
2431          nm->method()->external_name(), p2i(_oops_do_mark_link));
2432   verify_scopes();
2433 
2434   CompiledICLocker nm_verify(this);
2435   VerifyMetadataClosure vmc;
2436   metadata_do(&vmc);
2437 }
2438 
2439 
verify_interrupt_point(address call_site)2440 void nmethod::verify_interrupt_point(address call_site) {
2441 
2442   // Verify IC only when nmethod installation is finished.
2443   if (!is_not_installed()) {
2444     if (CompiledICLocker::is_safe(this)) {
2445       CompiledIC_at(this, call_site);
2446     } else {
2447       CompiledICLocker ml_verify(this);
2448       CompiledIC_at(this, call_site);
2449     }
2450   }
2451 
2452   HandleMark hm(Thread::current());
2453 
2454   PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address());
2455   assert(pd != NULL, "PcDesc must exist");
2456   for (ScopeDesc* sd = new ScopeDesc(this, pd);
2457        !sd->is_top(); sd = sd->sender()) {
2458     sd->verify();
2459   }
2460 }
2461 
verify_scopes()2462 void nmethod::verify_scopes() {
2463   if( !method() ) return;       // Runtime stubs have no scope
2464   if (method()->is_native()) return; // Ignore stub methods.
2465   // iterate through all interrupt point
2466   // and verify the debug information is valid.
2467   RelocIterator iter((nmethod*)this);
2468   while (iter.next()) {
2469     address stub = NULL;
2470     switch (iter.type()) {
2471       case relocInfo::virtual_call_type:
2472         verify_interrupt_point(iter.addr());
2473         break;
2474       case relocInfo::opt_virtual_call_type:
2475         stub = iter.opt_virtual_call_reloc()->static_stub();
2476         verify_interrupt_point(iter.addr());
2477         break;
2478       case relocInfo::static_call_type:
2479         stub = iter.static_call_reloc()->static_stub();
2480         //verify_interrupt_point(iter.addr());
2481         break;
2482       case relocInfo::runtime_call_type:
2483       case relocInfo::runtime_call_w_cp_type: {
2484         address destination = iter.reloc()->value();
2485         // Right now there is no way to find out which entries support
2486         // an interrupt point.  It would be nice if we had this
2487         // information in a table.
2488         break;
2489       }
2490       default:
2491         break;
2492     }
2493     assert(stub == NULL || stub_contains(stub), "static call stub outside stub section");
2494   }
2495 }
2496 
2497 
2498 // -----------------------------------------------------------------------------
2499 // Printing operations
2500 
print() const2501 void nmethod::print() const {
2502   ttyLocker ttyl;   // keep the following output all in one block
2503   print(tty);
2504 }
2505 
print(outputStream * st) const2506 void nmethod::print(outputStream* st) const {
2507   ResourceMark rm;
2508 
2509   st->print("Compiled method ");
2510 
2511   if (is_compiled_by_c1()) {
2512     st->print("(c1) ");
2513   } else if (is_compiled_by_c2()) {
2514     st->print("(c2) ");
2515   } else if (is_compiled_by_jvmci()) {
2516     st->print("(JVMCI) ");
2517   } else {
2518     st->print("(n/a) ");
2519   }
2520 
2521   print_on(tty, NULL);
2522 
2523   if (WizardMode) {
2524     st->print("((nmethod*) " INTPTR_FORMAT ") ", p2i(this));
2525     st->print(" for method " INTPTR_FORMAT , p2i(method()));
2526     st->print(" { ");
2527     st->print_cr("%s ", state());
2528     st->print_cr("}:");
2529   }
2530   if (size              () > 0) st->print_cr(" total in heap  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2531                                              p2i(this),
2532                                              p2i(this) + size(),
2533                                              size());
2534   if (relocation_size   () > 0) st->print_cr(" relocation     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2535                                              p2i(relocation_begin()),
2536                                              p2i(relocation_end()),
2537                                              relocation_size());
2538   if (consts_size       () > 0) st->print_cr(" constants      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2539                                              p2i(consts_begin()),
2540                                              p2i(consts_end()),
2541                                              consts_size());
2542   if (insts_size        () > 0) st->print_cr(" main code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2543                                              p2i(insts_begin()),
2544                                              p2i(insts_end()),
2545                                              insts_size());
2546   if (stub_size         () > 0) st->print_cr(" stub code      [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2547                                              p2i(stub_begin()),
2548                                              p2i(stub_end()),
2549                                              stub_size());
2550   if (oops_size         () > 0) st->print_cr(" oops           [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2551                                              p2i(oops_begin()),
2552                                              p2i(oops_end()),
2553                                              oops_size());
2554   if (metadata_size     () > 0) st->print_cr(" metadata       [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2555                                              p2i(metadata_begin()),
2556                                              p2i(metadata_end()),
2557                                              metadata_size());
2558   if (scopes_data_size  () > 0) st->print_cr(" scopes data    [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2559                                              p2i(scopes_data_begin()),
2560                                              p2i(scopes_data_end()),
2561                                              scopes_data_size());
2562   if (scopes_pcs_size   () > 0) st->print_cr(" scopes pcs     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2563                                              p2i(scopes_pcs_begin()),
2564                                              p2i(scopes_pcs_end()),
2565                                              scopes_pcs_size());
2566   if (dependencies_size () > 0) st->print_cr(" dependencies   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2567                                              p2i(dependencies_begin()),
2568                                              p2i(dependencies_end()),
2569                                              dependencies_size());
2570   if (handler_table_size() > 0) st->print_cr(" handler table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2571                                              p2i(handler_table_begin()),
2572                                              p2i(handler_table_end()),
2573                                              handler_table_size());
2574   if (nul_chk_table_size() > 0) st->print_cr(" nul chk table  [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2575                                              p2i(nul_chk_table_begin()),
2576                                              p2i(nul_chk_table_end()),
2577                                              nul_chk_table_size());
2578 #if INCLUDE_JVMCI
2579   if (speculations_size () > 0) st->print_cr(" speculations   [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2580                                              p2i(speculations_begin()),
2581                                              p2i(speculations_end()),
2582                                              speculations_size());
2583   if (jvmci_data_size   () > 0) st->print_cr(" JVMCI data     [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
2584                                              p2i(jvmci_data_begin()),
2585                                              p2i(jvmci_data_end()),
2586                                              jvmci_data_size());
2587 #endif
2588 }
2589 
print_code()2590 void nmethod::print_code() {
2591   ResourceMark m;
2592   ttyLocker ttyl;
2593   // Call the specialized decode method of this class.
2594   decode(tty);
2595 }
2596 
2597 #ifndef PRODUCT  // called InstanceKlass methods are available only then. Declared as PRODUCT_RETURN
2598 
print_dependencies()2599 void nmethod::print_dependencies() {
2600   ResourceMark rm;
2601   ttyLocker ttyl;   // keep the following output all in one block
2602   tty->print_cr("Dependencies:");
2603   for (Dependencies::DepStream deps(this); deps.next(); ) {
2604     deps.print_dependency();
2605     Klass* ctxk = deps.context_type();
2606     if (ctxk != NULL) {
2607       if (ctxk->is_instance_klass() && InstanceKlass::cast(ctxk)->is_dependent_nmethod(this)) {
2608         tty->print_cr("   [nmethod<=klass]%s", ctxk->external_name());
2609       }
2610     }
2611     deps.log_dependency();  // put it into the xml log also
2612   }
2613 }
2614 #endif
2615 
2616 #if defined(SUPPORT_DATA_STRUCTS)
2617 
2618 // Print the oops from the underlying CodeBlob.
print_oops(outputStream * st)2619 void nmethod::print_oops(outputStream* st) {
2620   ResourceMark m;
2621   st->print("Oops:");
2622   if (oops_begin() < oops_end()) {
2623     st->cr();
2624     for (oop* p = oops_begin(); p < oops_end(); p++) {
2625       Disassembler::print_location((unsigned char*)p, (unsigned char*)oops_begin(), (unsigned char*)oops_end(), st, true, false);
2626       st->print(PTR_FORMAT " ", *((uintptr_t*)p));
2627       if (Universe::contains_non_oop_word(p)) {
2628         st->print_cr("NON_OOP");
2629         continue;  // skip non-oops
2630       }
2631       if (*p == NULL) {
2632         st->print_cr("NULL-oop");
2633         continue;  // skip non-oops
2634       }
2635       (*p)->print_value_on(st);
2636       st->cr();
2637     }
2638   } else {
2639     st->print_cr(" <list empty>");
2640   }
2641 }
2642 
2643 // Print metadata pool.
print_metadata(outputStream * st)2644 void nmethod::print_metadata(outputStream* st) {
2645   ResourceMark m;
2646   st->print("Metadata:");
2647   if (metadata_begin() < metadata_end()) {
2648     st->cr();
2649     for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
2650       Disassembler::print_location((unsigned char*)p, (unsigned char*)metadata_begin(), (unsigned char*)metadata_end(), st, true, false);
2651       st->print(PTR_FORMAT " ", *((uintptr_t*)p));
2652       if (*p && *p != Universe::non_oop_word()) {
2653         (*p)->print_value_on(st);
2654       }
2655       st->cr();
2656     }
2657   } else {
2658     st->print_cr(" <list empty>");
2659   }
2660 }
2661 
2662 #ifndef PRODUCT  // ScopeDesc::print_on() is available only then. Declared as PRODUCT_RETURN
print_scopes_on(outputStream * st)2663 void nmethod::print_scopes_on(outputStream* st) {
2664   // Find the first pc desc for all scopes in the code and print it.
2665   ResourceMark rm;
2666   st->print("scopes:");
2667   if (scopes_pcs_begin() < scopes_pcs_end()) {
2668     st->cr();
2669     for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2670       if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null)
2671         continue;
2672 
2673       ScopeDesc* sd = scope_desc_at(p->real_pc(this));
2674       while (sd != NULL) {
2675         sd->print_on(st, p);  // print output ends with a newline
2676         sd = sd->sender();
2677       }
2678     }
2679   } else {
2680     st->print_cr(" <list empty>");
2681   }
2682 }
2683 #endif
2684 
2685 #ifndef PRODUCT  // RelocIterator does support printing only then.
print_relocations()2686 void nmethod::print_relocations() {
2687   ResourceMark m;       // in case methods get printed via the debugger
2688   tty->print_cr("relocations:");
2689   RelocIterator iter(this);
2690   iter.print();
2691 }
2692 #endif
2693 
print_pcs_on(outputStream * st)2694 void nmethod::print_pcs_on(outputStream* st) {
2695   ResourceMark m;       // in case methods get printed via debugger
2696   st->print("pc-bytecode offsets:");
2697   if (scopes_pcs_begin() < scopes_pcs_end()) {
2698     st->cr();
2699     for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
2700       p->print_on(st, this);  // print output ends with a newline
2701     }
2702   } else {
2703     st->print_cr(" <list empty>");
2704   }
2705 }
2706 
print_native_invokers()2707 void nmethod::print_native_invokers() {
2708   ResourceMark m;       // in case methods get printed via debugger
2709   tty->print_cr("Native invokers:");
2710   for (RuntimeStub** itt = native_invokers_begin(); itt < native_invokers_end(); itt++) {
2711     (*itt)->print_on(tty);
2712   }
2713 }
2714 
print_handler_table()2715 void nmethod::print_handler_table() {
2716   ExceptionHandlerTable(this).print(code_begin());
2717 }
2718 
print_nul_chk_table()2719 void nmethod::print_nul_chk_table() {
2720   ImplicitExceptionTable(this).print(code_begin());
2721 }
2722 
print_recorded_oop(int log_n,int i)2723 void nmethod::print_recorded_oop(int log_n, int i) {
2724   void* value;
2725 
2726   if (i == 0) {
2727     value = NULL;
2728   } else {
2729     // Be careful around non-oop words. Don't create an oop
2730     // with that value, or it will assert in verification code.
2731     if (Universe::contains_non_oop_word(oop_addr_at(i))) {
2732       value = Universe::non_oop_word();
2733     } else {
2734       value = oop_at(i);
2735     }
2736   }
2737 
2738   tty->print("#%*d: " INTPTR_FORMAT " ", log_n, i, p2i(value));
2739 
2740   if (value == Universe::non_oop_word()) {
2741     tty->print("non-oop word");
2742   } else {
2743     if (value == 0) {
2744       tty->print("NULL-oop");
2745     } else {
2746       oop_at(i)->print_value_on(tty);
2747     }
2748   }
2749 
2750   tty->cr();
2751 }
2752 
print_recorded_oops()2753 void nmethod::print_recorded_oops() {
2754   const int n = oops_count();
2755   const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6;
2756   tty->print("Recorded oops:");
2757   if (n > 0) {
2758     tty->cr();
2759     for (int i = 0; i < n; i++) {
2760       print_recorded_oop(log_n, i);
2761     }
2762   } else {
2763     tty->print_cr(" <list empty>");
2764   }
2765 }
2766 
print_recorded_metadata()2767 void nmethod::print_recorded_metadata() {
2768   const int n = metadata_count();
2769   const int log_n = (n<10) ? 1 : (n<100) ? 2 : (n<1000) ? 3 : (n<10000) ? 4 : 6;
2770   tty->print("Recorded metadata:");
2771   if (n > 0) {
2772     tty->cr();
2773     for (int i = 0; i < n; i++) {
2774       Metadata* m = metadata_at(i);
2775       tty->print("#%*d: " INTPTR_FORMAT " ", log_n, i, p2i(m));
2776       if (m == (Metadata*)Universe::non_oop_word()) {
2777         tty->print("non-metadata word");
2778       } else if (m == NULL) {
2779         tty->print("NULL-oop");
2780       } else {
2781         Metadata::print_value_on_maybe_null(tty, m);
2782       }
2783       tty->cr();
2784     }
2785   } else {
2786     tty->print_cr(" <list empty>");
2787   }
2788 }
2789 #endif
2790 
2791 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
2792 
print_constant_pool(outputStream * st)2793 void nmethod::print_constant_pool(outputStream* st) {
2794   //-----------------------------------
2795   //---<  Print the constant pool  >---
2796   //-----------------------------------
2797   int consts_size = this->consts_size();
2798   if ( consts_size > 0 ) {
2799     unsigned char* cstart = this->consts_begin();
2800     unsigned char* cp     = cstart;
2801     unsigned char* cend   = cp + consts_size;
2802     unsigned int   bytes_per_line = 4;
2803     unsigned int   CP_alignment   = 8;
2804     unsigned int   n;
2805 
2806     st->cr();
2807 
2808     //---<  print CP header to make clear what's printed  >---
2809     if( ((uintptr_t)cp&(CP_alignment-1)) == 0 ) {
2810       n = bytes_per_line;
2811       st->print_cr("[Constant Pool]");
2812       Disassembler::print_location(cp, cstart, cend, st, true, true);
2813       Disassembler::print_hexdata(cp, n, st, true);
2814       st->cr();
2815     } else {
2816       n = (uintptr_t)cp&(bytes_per_line-1);
2817       st->print_cr("[Constant Pool (unaligned)]");
2818     }
2819 
2820     //---<  print CP contents, bytes_per_line at a time  >---
2821     while (cp < cend) {
2822       Disassembler::print_location(cp, cstart, cend, st, true, false);
2823       Disassembler::print_hexdata(cp, n, st, false);
2824       cp += n;
2825       n   = bytes_per_line;
2826       st->cr();
2827     }
2828 
2829     //---<  Show potential alignment gap between constant pool and code  >---
2830     cend = code_begin();
2831     if( cp < cend ) {
2832       n = 4;
2833       st->print_cr("[Code entry alignment]");
2834       while (cp < cend) {
2835         Disassembler::print_location(cp, cstart, cend, st, false, false);
2836         cp += n;
2837         st->cr();
2838       }
2839     }
2840   } else {
2841     st->print_cr("[Constant Pool (empty)]");
2842   }
2843   st->cr();
2844 }
2845 
2846 #endif
2847 
2848 // Disassemble this nmethod.
2849 // Print additional debug information, if requested. This could be code
2850 // comments, block comments, profiling counters, etc.
2851 // The undisassembled format is useful no disassembler library is available.
2852 // The resulting hex dump (with markers) can be disassembled later, or on
2853 // another system, when/where a disassembler library is available.
decode2(outputStream * ost) const2854 void nmethod::decode2(outputStream* ost) const {
2855 
2856   // Called from frame::back_trace_with_decode without ResourceMark.
2857   ResourceMark rm;
2858 
2859   // Make sure we have a valid stream to print on.
2860   outputStream* st = ost ? ost : tty;
2861 
2862 #if defined(SUPPORT_ABSTRACT_ASSEMBLY) && ! defined(SUPPORT_ASSEMBLY)
2863   const bool use_compressed_format    = true;
2864   const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() ||
2865                                                                   AbstractDisassembler::show_block_comment());
2866 #else
2867   const bool use_compressed_format    = Disassembler::is_abstract();
2868   const bool compressed_with_comments = use_compressed_format && (AbstractDisassembler::show_comment() ||
2869                                                                   AbstractDisassembler::show_block_comment());
2870 #endif
2871 
2872   st->cr();
2873   this->print(st);
2874   st->cr();
2875 
2876 #if defined(SUPPORT_ASSEMBLY)
2877   //----------------------------------
2878   //---<  Print real disassembly  >---
2879   //----------------------------------
2880   if (! use_compressed_format) {
2881     Disassembler::decode(const_cast<nmethod*>(this), st);
2882     return;
2883   }
2884 #endif
2885 
2886 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
2887 
2888   // Compressed undisassembled disassembly format.
2889   // The following stati are defined/supported:
2890   //   = 0 - currently at bol() position, nothing printed yet on current line.
2891   //   = 1 - currently at position after print_location().
2892   //   > 1 - in the midst of printing instruction stream bytes.
2893   int        compressed_format_idx    = 0;
2894   int        code_comment_column      = 0;
2895   const int  instr_maxlen             = Assembler::instr_maxlen();
2896   const uint tabspacing               = 8;
2897   unsigned char* start = this->code_begin();
2898   unsigned char* p     = this->code_begin();
2899   unsigned char* end   = this->code_end();
2900   unsigned char* pss   = p; // start of a code section (used for offsets)
2901 
2902   if ((start == NULL) || (end == NULL)) {
2903     st->print_cr("PrintAssembly not possible due to uninitialized section pointers");
2904     return;
2905   }
2906 #endif
2907 
2908 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
2909   //---<  plain abstract disassembly, no comments or anything, just section headers  >---
2910   if (use_compressed_format && ! compressed_with_comments) {
2911     const_cast<nmethod*>(this)->print_constant_pool(st);
2912 
2913     //---<  Open the output (Marker for post-mortem disassembler)  >---
2914     st->print_cr("[MachCode]");
2915     const char* header = NULL;
2916     address p0 = p;
2917     while (p < end) {
2918       address pp = p;
2919       while ((p < end) && (header == NULL)) {
2920         header = nmethod_section_label(p);
2921         pp  = p;
2922         p  += Assembler::instr_len(p);
2923       }
2924       if (pp > p0) {
2925         AbstractDisassembler::decode_range_abstract(p0, pp, start, end, st, Assembler::instr_maxlen());
2926         p0 = pp;
2927         p  = pp;
2928         header = NULL;
2929       } else if (header != NULL) {
2930         st->bol();
2931         st->print_cr("%s", header);
2932         header = NULL;
2933       }
2934     }
2935     //---<  Close the output (Marker for post-mortem disassembler)  >---
2936     st->bol();
2937     st->print_cr("[/MachCode]");
2938     return;
2939   }
2940 #endif
2941 
2942 #if defined(SUPPORT_ABSTRACT_ASSEMBLY)
2943   //---<  abstract disassembly with comments and section headers merged in  >---
2944   if (compressed_with_comments) {
2945     const_cast<nmethod*>(this)->print_constant_pool(st);
2946 
2947     //---<  Open the output (Marker for post-mortem disassembler)  >---
2948     st->print_cr("[MachCode]");
2949     while ((p < end) && (p != NULL)) {
2950       const int instruction_size_in_bytes = Assembler::instr_len(p);
2951 
2952       //---<  Block comments for nmethod. Interrupts instruction stream, if any.  >---
2953       // Outputs a bol() before and a cr() after, but only if a comment is printed.
2954       // Prints nmethod_section_label as well.
2955       if (AbstractDisassembler::show_block_comment()) {
2956         print_block_comment(st, p);
2957         if (st->position() == 0) {
2958           compressed_format_idx = 0;
2959         }
2960       }
2961 
2962       //---<  New location information after line break  >---
2963       if (compressed_format_idx == 0) {
2964         code_comment_column   = Disassembler::print_location(p, pss, end, st, false, false);
2965         compressed_format_idx = 1;
2966       }
2967 
2968       //---<  Code comment for current instruction. Address range [p..(p+len))  >---
2969       unsigned char* p_end = p + (ssize_t)instruction_size_in_bytes;
2970       S390_ONLY(if (p_end > end) p_end = end;) // avoid getting past the end
2971 
2972       if (AbstractDisassembler::show_comment() && const_cast<nmethod*>(this)->has_code_comment(p, p_end)) {
2973         //---<  interrupt instruction byte stream for code comment  >---
2974         if (compressed_format_idx > 1) {
2975           st->cr();  // interrupt byte stream
2976           st->cr();  // add an empty line
2977           code_comment_column = Disassembler::print_location(p, pss, end, st, false, false);
2978         }
2979         const_cast<nmethod*>(this)->print_code_comment_on(st, code_comment_column, p, p_end );
2980         st->bol();
2981         compressed_format_idx = 0;
2982       }
2983 
2984       //---<  New location information after line break  >---
2985       if (compressed_format_idx == 0) {
2986         code_comment_column   = Disassembler::print_location(p, pss, end, st, false, false);
2987         compressed_format_idx = 1;
2988       }
2989 
2990       //---<  Nicely align instructions for readability  >---
2991       if (compressed_format_idx > 1) {
2992         Disassembler::print_delimiter(st);
2993       }
2994 
2995       //---<  Now, finally, print the actual instruction bytes  >---
2996       unsigned char* p0 = p;
2997       p = Disassembler::decode_instruction_abstract(p, st, instruction_size_in_bytes, instr_maxlen);
2998       compressed_format_idx += p - p0;
2999 
3000       if (Disassembler::start_newline(compressed_format_idx-1)) {
3001         st->cr();
3002         compressed_format_idx = 0;
3003       }
3004     }
3005     //---<  Close the output (Marker for post-mortem disassembler)  >---
3006     st->bol();
3007     st->print_cr("[/MachCode]");
3008     return;
3009   }
3010 #endif
3011 }
3012 
3013 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
3014 
reloc_string_for(u_char * begin,u_char * end)3015 const char* nmethod::reloc_string_for(u_char* begin, u_char* end) {
3016   RelocIterator iter(this, begin, end);
3017   bool have_one = false;
3018   while (iter.next()) {
3019     have_one = true;
3020     switch (iter.type()) {
3021         case relocInfo::none:                  return "no_reloc";
3022         case relocInfo::oop_type: {
3023           // Get a non-resizable resource-allocated stringStream.
3024           // Our callees make use of (nested) ResourceMarks.
3025           stringStream st(NEW_RESOURCE_ARRAY(char, 1024), 1024);
3026           oop_Relocation* r = iter.oop_reloc();
3027           oop obj = r->oop_value();
3028           st.print("oop(");
3029           if (obj == NULL) st.print("NULL");
3030           else obj->print_value_on(&st);
3031           st.print(")");
3032           return st.as_string();
3033         }
3034         case relocInfo::metadata_type: {
3035           stringStream st;
3036           metadata_Relocation* r = iter.metadata_reloc();
3037           Metadata* obj = r->metadata_value();
3038           st.print("metadata(");
3039           if (obj == NULL) st.print("NULL");
3040           else obj->print_value_on(&st);
3041           st.print(")");
3042           return st.as_string();
3043         }
3044         case relocInfo::runtime_call_type:
3045         case relocInfo::runtime_call_w_cp_type: {
3046           stringStream st;
3047           st.print("runtime_call");
3048           CallRelocation* r = (CallRelocation*)iter.reloc();
3049           address dest = r->destination();
3050           CodeBlob* cb = CodeCache::find_blob(dest);
3051           if (cb != NULL) {
3052             st.print(" %s", cb->name());
3053           } else {
3054             ResourceMark rm;
3055             const int buflen = 1024;
3056             char* buf = NEW_RESOURCE_ARRAY(char, buflen);
3057             int offset;
3058             if (os::dll_address_to_function_name(dest, buf, buflen, &offset)) {
3059               st.print(" %s", buf);
3060               if (offset != 0) {
3061                 st.print("+%d", offset);
3062               }
3063             }
3064           }
3065           return st.as_string();
3066         }
3067         case relocInfo::virtual_call_type: {
3068           stringStream st;
3069           st.print_raw("virtual_call");
3070           virtual_call_Relocation* r = iter.virtual_call_reloc();
3071           Method* m = r->method_value();
3072           if (m != NULL) {
3073             assert(m->is_method(), "");
3074             m->print_short_name(&st);
3075           }
3076           return st.as_string();
3077         }
3078         case relocInfo::opt_virtual_call_type: {
3079           stringStream st;
3080           st.print_raw("optimized virtual_call");
3081           opt_virtual_call_Relocation* r = iter.opt_virtual_call_reloc();
3082           Method* m = r->method_value();
3083           if (m != NULL) {
3084             assert(m->is_method(), "");
3085             m->print_short_name(&st);
3086           }
3087           return st.as_string();
3088         }
3089         case relocInfo::static_call_type: {
3090           stringStream st;
3091           st.print_raw("static_call");
3092           static_call_Relocation* r = iter.static_call_reloc();
3093           Method* m = r->method_value();
3094           if (m != NULL) {
3095             assert(m->is_method(), "");
3096             m->print_short_name(&st);
3097           }
3098           return st.as_string();
3099         }
3100         case relocInfo::static_stub_type:      return "static_stub";
3101         case relocInfo::external_word_type:    return "external_word";
3102         case relocInfo::internal_word_type:    return "internal_word";
3103         case relocInfo::section_word_type:     return "section_word";
3104         case relocInfo::poll_type:             return "poll";
3105         case relocInfo::poll_return_type:      return "poll_return";
3106         case relocInfo::trampoline_stub_type:  return "trampoline_stub";
3107         case relocInfo::type_mask:             return "type_bit_mask";
3108 
3109         default:
3110           break;
3111     }
3112   }
3113   return have_one ? "other" : NULL;
3114 }
3115 
3116 // Return a the last scope in (begin..end]
scope_desc_in(address begin,address end)3117 ScopeDesc* nmethod::scope_desc_in(address begin, address end) {
3118   PcDesc* p = pc_desc_near(begin+1);
3119   if (p != NULL && p->real_pc(this) <= end) {
3120     return new ScopeDesc(this, p);
3121   }
3122   return NULL;
3123 }
3124 
nmethod_section_label(address pos) const3125 const char* nmethod::nmethod_section_label(address pos) const {
3126   const char* label = NULL;
3127   if (pos == code_begin())                                              label = "[Instructions begin]";
3128   if (pos == entry_point())                                             label = "[Entry Point]";
3129   if (pos == verified_entry_point())                                    label = "[Verified Entry Point]";
3130   if (has_method_handle_invokes() && (pos == deopt_mh_handler_begin())) label = "[Deopt MH Handler Code]";
3131   if (pos == consts_begin() && pos != insts_begin())                    label = "[Constants]";
3132   // Check stub_code before checking exception_handler or deopt_handler.
3133   if (pos == this->stub_begin())                                        label = "[Stub Code]";
3134   if (JVMCI_ONLY(_exception_offset >= 0 &&) pos == exception_begin())           label = "[Exception Handler]";
3135   if (JVMCI_ONLY(_deopt_handler_begin != NULL &&) pos == deopt_handler_begin()) label = "[Deopt Handler Code]";
3136   return label;
3137 }
3138 
print_nmethod_labels(outputStream * stream,address block_begin,bool print_section_labels) const3139 void nmethod::print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels) const {
3140   if (print_section_labels) {
3141     const char* label = nmethod_section_label(block_begin);
3142     if (label != NULL) {
3143       stream->bol();
3144       stream->print_cr("%s", label);
3145     }
3146   }
3147 
3148   if (block_begin == entry_point()) {
3149     Method* m = method();
3150     if (m != NULL) {
3151       stream->print("  # ");
3152       m->print_value_on(stream);
3153       stream->cr();
3154     }
3155     if (m != NULL && !is_osr_method()) {
3156       ResourceMark rm;
3157       int sizeargs = m->size_of_parameters();
3158       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
3159       VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
3160       {
3161         int sig_index = 0;
3162         if (!m->is_static())
3163           sig_bt[sig_index++] = T_OBJECT; // 'this'
3164         for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
3165           BasicType t = ss.type();
3166           sig_bt[sig_index++] = t;
3167           if (type2size[t] == 2) {
3168             sig_bt[sig_index++] = T_VOID;
3169           } else {
3170             assert(type2size[t] == 1, "size is 1 or 2");
3171           }
3172         }
3173         assert(sig_index == sizeargs, "");
3174       }
3175       const char* spname = "sp"; // make arch-specific?
3176       intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
3177       int stack_slot_offset = this->frame_size() * wordSize;
3178       int tab1 = 14, tab2 = 24;
3179       int sig_index = 0;
3180       int arg_index = (m->is_static() ? 0 : -1);
3181       bool did_old_sp = false;
3182       for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
3183         bool at_this = (arg_index == -1);
3184         bool at_old_sp = false;
3185         BasicType t = (at_this ? T_OBJECT : ss.type());
3186         assert(t == sig_bt[sig_index], "sigs in sync");
3187         if (at_this)
3188           stream->print("  # this: ");
3189         else
3190           stream->print("  # parm%d: ", arg_index);
3191         stream->move_to(tab1);
3192         VMReg fst = regs[sig_index].first();
3193         VMReg snd = regs[sig_index].second();
3194         if (fst->is_reg()) {
3195           stream->print("%s", fst->name());
3196           if (snd->is_valid())  {
3197             stream->print(":%s", snd->name());
3198           }
3199         } else if (fst->is_stack()) {
3200           int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
3201           if (offset == stack_slot_offset)  at_old_sp = true;
3202           stream->print("[%s+0x%x]", spname, offset);
3203         } else {
3204           stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
3205         }
3206         stream->print(" ");
3207         stream->move_to(tab2);
3208         stream->print("= ");
3209         if (at_this) {
3210           m->method_holder()->print_value_on(stream);
3211         } else {
3212           bool did_name = false;
3213           if (!at_this && ss.is_reference()) {
3214             Symbol* name = ss.as_symbol();
3215             name->print_value_on(stream);
3216             did_name = true;
3217           }
3218           if (!did_name)
3219             stream->print("%s", type2name(t));
3220         }
3221         if (at_old_sp) {
3222           stream->print("  (%s of caller)", spname);
3223           did_old_sp = true;
3224         }
3225         stream->cr();
3226         sig_index += type2size[t];
3227         arg_index += 1;
3228         if (!at_this)  ss.next();
3229       }
3230       if (!did_old_sp) {
3231         stream->print("  # ");
3232         stream->move_to(tab1);
3233         stream->print("[%s+0x%x]", spname, stack_slot_offset);
3234         stream->print("  (%s of caller)", spname);
3235         stream->cr();
3236       }
3237     }
3238   }
3239 }
3240 
3241 // Returns whether this nmethod has code comments.
has_code_comment(address begin,address end)3242 bool nmethod::has_code_comment(address begin, address end) {
3243   // scopes?
3244   ScopeDesc* sd  = scope_desc_in(begin, end);
3245   if (sd != NULL) return true;
3246 
3247   // relocations?
3248   const char* str = reloc_string_for(begin, end);
3249   if (str != NULL) return true;
3250 
3251   // implicit exceptions?
3252   int cont_offset = ImplicitExceptionTable(this).continuation_offset(begin - code_begin());
3253   if (cont_offset != 0) return true;
3254 
3255   return false;
3256 }
3257 
print_code_comment_on(outputStream * st,int column,address begin,address end)3258 void nmethod::print_code_comment_on(outputStream* st, int column, address begin, address end) {
3259   ImplicitExceptionTable implicit_table(this);
3260   int pc_offset = begin - code_begin();
3261   int cont_offset = implicit_table.continuation_offset(pc_offset);
3262   bool oop_map_required = false;
3263   if (cont_offset != 0) {
3264     st->move_to(column, 6, 0);
3265     if (pc_offset == cont_offset) {
3266       st->print("; implicit exception: deoptimizes");
3267       oop_map_required = true;
3268     } else {
3269       st->print("; implicit exception: dispatches to " INTPTR_FORMAT, p2i(code_begin() + cont_offset));
3270     }
3271   }
3272 
3273   // Find an oopmap in (begin, end].  We use the odd half-closed
3274   // interval so that oop maps and scope descs which are tied to the
3275   // byte after a call are printed with the call itself.  OopMaps
3276   // associated with implicit exceptions are printed with the implicit
3277   // instruction.
3278   address base = code_begin();
3279   ImmutableOopMapSet* oms = oop_maps();
3280   if (oms != NULL) {
3281     for (int i = 0, imax = oms->count(); i < imax; i++) {
3282       const ImmutableOopMapPair* pair = oms->pair_at(i);
3283       const ImmutableOopMap* om = pair->get_from(oms);
3284       address pc = base + pair->pc_offset();
3285       if (pc >= begin) {
3286 #if INCLUDE_JVMCI
3287         bool is_implicit_deopt = implicit_table.continuation_offset(pair->pc_offset()) == (uint) pair->pc_offset();
3288 #else
3289         bool is_implicit_deopt = false;
3290 #endif
3291         if (is_implicit_deopt ? pc == begin : pc > begin && pc <= end) {
3292           st->move_to(column, 6, 0);
3293           st->print("; ");
3294           om->print_on(st);
3295           oop_map_required = false;
3296         }
3297       }
3298       if (pc > end) {
3299         break;
3300       }
3301     }
3302   }
3303   assert(!oop_map_required, "missed oopmap");
3304 
3305   Thread* thread = Thread::current();
3306 
3307   // Print any debug info present at this pc.
3308   ScopeDesc* sd  = scope_desc_in(begin, end);
3309   if (sd != NULL) {
3310     st->move_to(column, 6, 0);
3311     if (sd->bci() == SynchronizationEntryBCI) {
3312       st->print(";*synchronization entry");
3313     } else if (sd->bci() == AfterBci) {
3314       st->print(";* method exit (unlocked if synchronized)");
3315     } else if (sd->bci() == UnwindBci) {
3316       st->print(";* unwind (locked if synchronized)");
3317     } else if (sd->bci() == AfterExceptionBci) {
3318       st->print(";* unwind (unlocked if synchronized)");
3319     } else if (sd->bci() == UnknownBci) {
3320       st->print(";* unknown");
3321     } else if (sd->bci() == InvalidFrameStateBci) {
3322       st->print(";* invalid frame state");
3323     } else {
3324       if (sd->method() == NULL) {
3325         st->print("method is NULL");
3326       } else if (sd->method()->is_native()) {
3327         st->print("method is native");
3328       } else {
3329         Bytecodes::Code bc = sd->method()->java_code_at(sd->bci());
3330         st->print(";*%s", Bytecodes::name(bc));
3331         switch (bc) {
3332         case Bytecodes::_invokevirtual:
3333         case Bytecodes::_invokespecial:
3334         case Bytecodes::_invokestatic:
3335         case Bytecodes::_invokeinterface:
3336           {
3337             Bytecode_invoke invoke(methodHandle(thread, sd->method()), sd->bci());
3338             st->print(" ");
3339             if (invoke.name() != NULL)
3340               invoke.name()->print_symbol_on(st);
3341             else
3342               st->print("<UNKNOWN>");
3343             break;
3344           }
3345         case Bytecodes::_getfield:
3346         case Bytecodes::_putfield:
3347         case Bytecodes::_getstatic:
3348         case Bytecodes::_putstatic:
3349           {
3350             Bytecode_field field(methodHandle(thread, sd->method()), sd->bci());
3351             st->print(" ");
3352             if (field.name() != NULL)
3353               field.name()->print_symbol_on(st);
3354             else
3355               st->print("<UNKNOWN>");
3356           }
3357         default:
3358           break;
3359         }
3360       }
3361       st->print(" {reexecute=%d rethrow=%d return_oop=%d}", sd->should_reexecute(), sd->rethrow_exception(), sd->return_oop());
3362     }
3363 
3364     // Print all scopes
3365     for (;sd != NULL; sd = sd->sender()) {
3366       st->move_to(column, 6, 0);
3367       st->print("; -");
3368       if (sd->should_reexecute()) {
3369         st->print(" (reexecute)");
3370       }
3371       if (sd->method() == NULL) {
3372         st->print("method is NULL");
3373       } else {
3374         sd->method()->print_short_name(st);
3375       }
3376       int lineno = sd->method()->line_number_from_bci(sd->bci());
3377       if (lineno != -1) {
3378         st->print("@%d (line %d)", sd->bci(), lineno);
3379       } else {
3380         st->print("@%d", sd->bci());
3381       }
3382       st->cr();
3383     }
3384   }
3385 
3386   // Print relocation information
3387   // Prevent memory leak: allocating without ResourceMark.
3388   ResourceMark rm;
3389   const char* str = reloc_string_for(begin, end);
3390   if (str != NULL) {
3391     if (sd != NULL) st->cr();
3392     st->move_to(column, 6, 0);
3393     st->print(";   {%s}", str);
3394   }
3395 }
3396 
3397 #endif
3398 
3399 class DirectNativeCallWrapper: public NativeCallWrapper {
3400 private:
3401   NativeCall* _call;
3402 
3403 public:
DirectNativeCallWrapper(NativeCall * call)3404   DirectNativeCallWrapper(NativeCall* call) : _call(call) {}
3405 
destination() const3406   virtual address destination() const { return _call->destination(); }
instruction_address() const3407   virtual address instruction_address() const { return _call->instruction_address(); }
next_instruction_address() const3408   virtual address next_instruction_address() const { return _call->next_instruction_address(); }
return_address() const3409   virtual address return_address() const { return _call->return_address(); }
3410 
get_resolve_call_stub(bool is_optimized) const3411   virtual address get_resolve_call_stub(bool is_optimized) const {
3412     if (is_optimized) {
3413       return SharedRuntime::get_resolve_opt_virtual_call_stub();
3414     }
3415     return SharedRuntime::get_resolve_virtual_call_stub();
3416   }
3417 
set_destination_mt_safe(address dest)3418   virtual void set_destination_mt_safe(address dest) {
3419     _call->set_destination_mt_safe(dest);
3420   }
3421 
set_to_interpreted(const methodHandle & method,CompiledICInfo & info)3422   virtual void set_to_interpreted(const methodHandle& method, CompiledICInfo& info) {
3423     CompiledDirectStaticCall* csc = CompiledDirectStaticCall::at(instruction_address());
3424     {
3425       csc->set_to_interpreted(method, info.entry());
3426     }
3427   }
3428 
verify() const3429   virtual void verify() const {
3430     // make sure code pattern is actually a call imm32 instruction
3431     _call->verify();
3432     _call->verify_alignment();
3433   }
3434 
verify_resolve_call(address dest) const3435   virtual void verify_resolve_call(address dest) const {
3436     CodeBlob* db = CodeCache::find_blob_unsafe(dest);
3437     assert(db != NULL && !db->is_adapter_blob(), "must use stub!");
3438   }
3439 
is_call_to_interpreted(address dest) const3440   virtual bool is_call_to_interpreted(address dest) const {
3441     CodeBlob* cb = CodeCache::find_blob(_call->instruction_address());
3442     return cb->contains(dest);
3443   }
3444 
is_safe_for_patching() const3445   virtual bool is_safe_for_patching() const { return false; }
3446 
get_load_instruction(virtual_call_Relocation * r) const3447   virtual NativeInstruction* get_load_instruction(virtual_call_Relocation* r) const {
3448     return nativeMovConstReg_at(r->cached_value());
3449   }
3450 
get_data(NativeInstruction * instruction) const3451   virtual void *get_data(NativeInstruction* instruction) const {
3452     return (void*)((NativeMovConstReg*) instruction)->data();
3453   }
3454 
set_data(NativeInstruction * instruction,intptr_t data)3455   virtual void set_data(NativeInstruction* instruction, intptr_t data) {
3456     ((NativeMovConstReg*) instruction)->set_data(data);
3457   }
3458 };
3459 
call_wrapper_at(address call) const3460 NativeCallWrapper* nmethod::call_wrapper_at(address call) const {
3461   return new DirectNativeCallWrapper((NativeCall*) call);
3462 }
3463 
call_wrapper_before(address return_pc) const3464 NativeCallWrapper* nmethod::call_wrapper_before(address return_pc) const {
3465   return new DirectNativeCallWrapper(nativeCall_before(return_pc));
3466 }
3467 
call_instruction_address(address pc) const3468 address nmethod::call_instruction_address(address pc) const {
3469   if (NativeCall::is_call_before(pc)) {
3470     NativeCall *ncall = nativeCall_before(pc);
3471     return ncall->instruction_address();
3472   }
3473   return NULL;
3474 }
3475 
compiledStaticCall_at(Relocation * call_site) const3476 CompiledStaticCall* nmethod::compiledStaticCall_at(Relocation* call_site) const {
3477   return CompiledDirectStaticCall::at(call_site);
3478 }
3479 
compiledStaticCall_at(address call_site) const3480 CompiledStaticCall* nmethod::compiledStaticCall_at(address call_site) const {
3481   return CompiledDirectStaticCall::at(call_site);
3482 }
3483 
compiledStaticCall_before(address return_addr) const3484 CompiledStaticCall* nmethod::compiledStaticCall_before(address return_addr) const {
3485   return CompiledDirectStaticCall::before(return_addr);
3486 }
3487 
3488 #if defined(SUPPORT_DATA_STRUCTS)
print_value_on(outputStream * st) const3489 void nmethod::print_value_on(outputStream* st) const {
3490   st->print("nmethod");
3491   print_on(st, NULL);
3492 }
3493 #endif
3494 
3495 #ifndef PRODUCT
3496 
print_calls(outputStream * st)3497 void nmethod::print_calls(outputStream* st) {
3498   RelocIterator iter(this);
3499   while (iter.next()) {
3500     switch (iter.type()) {
3501     case relocInfo::virtual_call_type:
3502     case relocInfo::opt_virtual_call_type: {
3503       CompiledICLocker ml_verify(this);
3504       CompiledIC_at(&iter)->print();
3505       break;
3506     }
3507     case relocInfo::static_call_type:
3508       st->print_cr("Static call at " INTPTR_FORMAT, p2i(iter.reloc()->addr()));
3509       CompiledDirectStaticCall::at(iter.reloc())->print();
3510       break;
3511     default:
3512       break;
3513     }
3514   }
3515 }
3516 
print_statistics()3517 void nmethod::print_statistics() {
3518   ttyLocker ttyl;
3519   if (xtty != NULL)  xtty->head("statistics type='nmethod'");
3520   native_nmethod_stats.print_native_nmethod_stats();
3521 #ifdef COMPILER1
3522   c1_java_nmethod_stats.print_nmethod_stats("C1");
3523 #endif
3524 #ifdef COMPILER2
3525   c2_java_nmethod_stats.print_nmethod_stats("C2");
3526 #endif
3527 #if INCLUDE_JVMCI
3528   jvmci_java_nmethod_stats.print_nmethod_stats("JVMCI");
3529 #endif
3530   unknown_java_nmethod_stats.print_nmethod_stats("Unknown");
3531   DebugInformationRecorder::print_statistics();
3532 #ifndef PRODUCT
3533   pc_nmethod_stats.print_pc_stats();
3534 #endif
3535   Dependencies::print_statistics();
3536   if (xtty != NULL)  xtty->tail("statistics");
3537 }
3538 
3539 #endif // !PRODUCT
3540 
3541 #if INCLUDE_JVMCI
update_speculation(JavaThread * thread)3542 void nmethod::update_speculation(JavaThread* thread) {
3543   jlong speculation = thread->pending_failed_speculation();
3544   if (speculation != 0) {
3545     guarantee(jvmci_nmethod_data() != NULL, "failed speculation in nmethod without failed speculation list");
3546     jvmci_nmethod_data()->add_failed_speculation(this, speculation);
3547     thread->set_pending_failed_speculation(0);
3548   }
3549 }
3550 
jvmci_name()3551 const char* nmethod::jvmci_name() {
3552   if (jvmci_nmethod_data() != NULL) {
3553     return jvmci_nmethod_data()->name();
3554   }
3555   return NULL;
3556 }
3557 #endif
3558