1 /* 2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_CODE_NMETHOD_HPP 26 #define SHARE_CODE_NMETHOD_HPP 27 28 #include "code/compiledMethod.hpp" 29 30 class DepChange; 31 class DirectiveSet; 32 class DebugInformationRecorder; 33 class JvmtiThreadState; 34 35 // nmethods (native methods) are the compiled code versions of Java methods. 36 // 37 // An nmethod contains: 38 // - header (the nmethod structure) 39 // [Relocation] 40 // - relocation information 41 // - constant part (doubles, longs and floats used in nmethod) 42 // - oop table 43 // [Code] 44 // - code body 45 // - exception handler 46 // - stub code 47 // [Debugging information] 48 // - oop array 49 // - data array 50 // - pcs 51 // [Exception handler table] 52 // - handler entry point array 53 // [Implicit Null Pointer exception table] 54 // - implicit null table array 55 // [Speculations] 56 // - encoded speculations array 57 // [JVMCINMethodData] 58 // - meta data for JVMCI compiled nmethod 59 60 #if INCLUDE_JVMCI 61 class FailedSpeculation; 62 class JVMCINMethodData; 63 #endif 64 65 class nmethod : public CompiledMethod { 66 friend class VMStructs; 67 friend class JVMCIVMStructs; 68 friend class NMethodSweeper; 69 friend class CodeCache; // scavengable oops 70 friend class JVMCINMethodData; 71 72 private: 73 // Shared fields for all nmethod's 74 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method 75 76 // To support simple linked-list chaining of nmethods: 77 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head 78 79 // STW two-phase nmethod root processing helpers. 80 // 81 // When determining liveness of a given nmethod to do code cache unloading, 82 // some collectors need to to different things depending on whether the nmethods 83 // need to absolutely be kept alive during root processing; "strong"ly reachable 84 // nmethods are known to be kept alive at root processing, but the liveness of 85 // "weak"ly reachable ones is to be determined later. 86 // 87 // We want to allow strong and weak processing of nmethods by different threads 88 // at the same time without heavy synchronization. Additional constraints are 89 // to make sure that every nmethod is processed a minimal amount of time, and 90 // nmethods themselves are always iterated at most once at a particular time. 91 // 92 // Note that strong processing work must be a superset of weak processing work 93 // for this code to work. 94 // 95 // We store state and claim information in the _oops_do_mark_link member, using 96 // the two LSBs for the state and the remaining upper bits for linking together 97 // nmethods that were already visited. 98 // The last element is self-looped, i.e. points to itself to avoid some special 99 // "end-of-list" sentinel value. 100 // 101 // _oops_do_mark_link special values: 102 // 103 // _oops_do_mark_link == NULL: the nmethod has not been visited at all yet, i.e. 104 // is Unclaimed. 105 // 106 // For other values, its lowest two bits indicate the following states of the nmethod: 107 // 108 // weak_request (WR): the nmethod has been claimed by a thread for weak processing 109 // weak_done (WD): weak processing has been completed for this nmethod. 110 // strong_request (SR): the nmethod has been found to need strong processing while 111 // being weak processed. 112 // strong_done (SD): strong processing has been completed for this nmethod . 113 // 114 // The following shows the _only_ possible progressions of the _oops_do_mark_link 115 // pointer. 116 // 117 // Given 118 // N as the nmethod 119 // X the current next value of _oops_do_mark_link 120 // 121 // Unclaimed (C)-> N|WR (C)-> X|WD: the nmethod has been processed weakly by 122 // a single thread. 123 // Unclaimed (C)-> N|WR (C)-> X|WD (O)-> X|SD: after weak processing has been 124 // completed (as above) another thread found that the nmethod needs strong 125 // processing after all. 126 // Unclaimed (C)-> N|WR (O)-> N|SR (C)-> X|SD: during weak processing another 127 // thread finds that the nmethod needs strong processing, marks it as such and 128 // terminates. The original thread completes strong processing. 129 // Unclaimed (C)-> N|SD (C)-> X|SD: the nmethod has been processed strongly from 130 // the beginning by a single thread. 131 // 132 // "|" describes the concatentation of bits in _oops_do_mark_link. 133 // 134 // The diagram also describes the threads responsible for changing the nmethod to 135 // the next state by marking the _transition_ with (C) and (O), which mean "current" 136 // and "other" thread respectively. 137 // 138 struct oops_do_mark_link; // Opaque data type. 139 140 // States used for claiming nmethods during root processing. 141 static const uint claim_weak_request_tag = 0; 142 static const uint claim_weak_done_tag = 1; 143 static const uint claim_strong_request_tag = 2; 144 static const uint claim_strong_done_tag = 3; 145 mark_link(nmethod * nm,uint tag)146 static oops_do_mark_link* mark_link(nmethod* nm, uint tag) { 147 assert(tag <= claim_strong_done_tag, "invalid tag %u", tag); 148 assert(is_aligned(nm, 4), "nmethod pointer must have zero lower two LSB"); 149 return (oops_do_mark_link*)(((uintptr_t)nm & ~0x3) | tag); 150 } 151 extract_state(oops_do_mark_link * link)152 static uint extract_state(oops_do_mark_link* link) { 153 return (uint)((uintptr_t)link & 0x3); 154 } 155 extract_nmethod(oops_do_mark_link * link)156 static nmethod* extract_nmethod(oops_do_mark_link* link) { 157 return (nmethod*)((uintptr_t)link & ~0x3); 158 } 159 160 void oops_do_log_change(const char* state); 161 oops_do_has_weak_request(oops_do_mark_link * next)162 static bool oops_do_has_weak_request(oops_do_mark_link* next) { 163 return extract_state(next) == claim_weak_request_tag; 164 } 165 oops_do_has_any_strong_state(oops_do_mark_link * next)166 static bool oops_do_has_any_strong_state(oops_do_mark_link* next) { 167 return extract_state(next) >= claim_strong_request_tag; 168 } 169 170 // Attempt Unclaimed -> N|WR transition. Returns true if successful. 171 bool oops_do_try_claim_weak_request(); 172 173 // Attempt Unclaimed -> N|SD transition. Returns the current link. 174 oops_do_mark_link* oops_do_try_claim_strong_done(); 175 // Attempt N|WR -> X|WD transition. Returns NULL if successful, X otherwise. 176 nmethod* oops_do_try_add_to_list_as_weak_done(); 177 178 // Attempt X|WD -> N|SR transition. Returns the current link. 179 oops_do_mark_link* oops_do_try_add_strong_request(oops_do_mark_link* next); 180 // Attempt X|WD -> X|SD transition. Returns true if successful. 181 bool oops_do_try_claim_weak_done_as_strong_done(oops_do_mark_link* next); 182 183 // Do the N|SD -> X|SD transition. 184 void oops_do_add_to_list_as_strong_done(); 185 186 // Sets this nmethod as strongly claimed (as part of N|SD -> X|SD and N|SR -> X|SD 187 // transitions). 188 void oops_do_set_strong_done(nmethod* old_head); 189 190 static nmethod* volatile _oops_do_mark_nmethods; 191 oops_do_mark_link* volatile _oops_do_mark_link; 192 193 // offsets for entry points 194 address _entry_point; // entry point with class check 195 address _verified_entry_point; // entry point without class check 196 address _osr_entry_point; // entry point for on stack replacement 197 198 // Offsets for different nmethod parts 199 int _exception_offset; 200 // Offset of the unwind handler if it exists 201 int _unwind_handler_offset; 202 203 int _consts_offset; 204 int _stub_offset; 205 int _oops_offset; // offset to where embedded oop table begins (inside data) 206 int _metadata_offset; // embedded meta data table 207 int _scopes_data_offset; 208 int _scopes_pcs_offset; 209 int _dependencies_offset; 210 int _native_invokers_offset; 211 int _handler_table_offset; 212 int _nul_chk_table_offset; 213 #if INCLUDE_JVMCI 214 int _speculations_offset; 215 int _jvmci_data_offset; 216 #endif 217 int _nmethod_end_offset; 218 code_offset() const219 int code_offset() const { return (address) code_begin() - header_begin(); } 220 221 // location in frame (offset for sp) that deopt can store the original 222 // pc during a deopt. 223 int _orig_pc_offset; 224 225 int _compile_id; // which compilation made this nmethod 226 int _comp_level; // compilation level 227 228 // protected by CodeCache_lock 229 bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock) 230 231 // used by jvmti to track if an event has been posted for this nmethod. 232 bool _unload_reported; 233 bool _load_reported; 234 235 // Protected by CompiledMethod_lock 236 volatile signed char _state; // {not_installed, in_use, not_entrant, zombie, unloaded} 237 238 #ifdef ASSERT 239 bool _oops_are_stale; // indicates that it's no longer safe to access oops section 240 #endif 241 242 #if INCLUDE_RTM_OPT 243 // RTM state at compile time. Used during deoptimization to decide 244 // whether to restart collecting RTM locking abort statistic again. 245 RTMState _rtm_state; 246 #endif 247 248 // Nmethod Flushing lock. If non-zero, then the nmethod is not removed 249 // and is not made into a zombie. However, once the nmethod is made into 250 // a zombie, it will be locked one final time if CompiledMethodUnload 251 // event processing needs to be done. 252 volatile jint _lock_count; 253 254 // not_entrant method removal. Each mark_sweep pass will update 255 // this mark to current sweep invocation count if it is seen on the 256 // stack. An not_entrant method can be removed when there are no 257 // more activations, i.e., when the _stack_traversal_mark is less than 258 // current sweep traversal index. 259 volatile long _stack_traversal_mark; 260 261 // The _hotness_counter indicates the hotness of a method. The higher 262 // the value the hotter the method. The hotness counter of a nmethod is 263 // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method 264 // is active while stack scanning (do_stack_scanning()). The hotness 265 // counter is decreased (by 1) while sweeping. 266 int _hotness_counter; 267 268 // Local state used to keep track of whether unloading is happening or not 269 volatile uint8_t _is_unloading_state; 270 271 // These are used for compiled synchronized native methods to 272 // locate the owner and stack slot for the BasicLock so that we can 273 // properly revoke the bias of the owner if necessary. They are 274 // needed because there is no debug information for compiled native 275 // wrappers and the oop maps are insufficient to allow 276 // frame::retrieve_receiver() to work. Currently they are expected 277 // to be byte offsets from the Java stack pointer for maximum code 278 // sharing between platforms. Note that currently biased locking 279 // will never cause Class instances to be biased but this code 280 // handles the static synchronized case as well. 281 // JVMTI's GetLocalInstance() also uses these offsets to find the receiver 282 // for non-static native wrapper frames. 283 ByteSize _native_receiver_sp_offset; 284 ByteSize _native_basic_lock_sp_offset; 285 286 friend class nmethodLocker; 287 288 // For native wrappers 289 nmethod(Method* method, 290 CompilerType type, 291 int nmethod_size, 292 int compile_id, 293 CodeOffsets* offsets, 294 CodeBuffer *code_buffer, 295 int frame_size, 296 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */ 297 ByteSize basic_lock_sp_offset, /* synchronized natives only */ 298 OopMapSet* oop_maps); 299 300 // Creation support 301 nmethod(Method* method, 302 CompilerType type, 303 int nmethod_size, 304 int compile_id, 305 int entry_bci, 306 CodeOffsets* offsets, 307 int orig_pc_offset, 308 DebugInformationRecorder *recorder, 309 Dependencies* dependencies, 310 CodeBuffer *code_buffer, 311 int frame_size, 312 OopMapSet* oop_maps, 313 ExceptionHandlerTable* handler_table, 314 ImplicitExceptionTable* nul_chk_table, 315 AbstractCompiler* compiler, 316 int comp_level, 317 const GrowableArrayView<BufferBlob*>& native_invokers 318 #if INCLUDE_JVMCI 319 , char* speculations, 320 int speculations_len, 321 int jvmci_data_size 322 #endif 323 ); 324 325 // helper methods 326 void* operator new(size_t size, int nmethod_size, int comp_level) throw(); 327 328 const char* reloc_string_for(u_char* begin, u_char* end); 329 330 bool try_transition(int new_state); 331 332 // Returns true if this thread changed the state of the nmethod or 333 // false if another thread performed the transition. 334 bool make_not_entrant_or_zombie(int state); make_entrant()335 bool make_entrant() { Unimplemented(); return false; } 336 void inc_decompile_count(); 337 338 // Inform external interfaces that a compiled method has been unloaded 339 void post_compiled_method_unload(); 340 341 // Initailize fields to their default values 342 void init_defaults(); 343 344 // Offsets content_offset() const345 int content_offset() const { return content_begin() - header_begin(); } data_offset() const346 int data_offset() const { return _data_offset; } 347 header_end() const348 address header_end() const { return (address) header_begin() + header_size(); } 349 350 public: 351 // create nmethod with entry_bci 352 static nmethod* new_nmethod(const methodHandle& method, 353 int compile_id, 354 int entry_bci, 355 CodeOffsets* offsets, 356 int orig_pc_offset, 357 DebugInformationRecorder* recorder, 358 Dependencies* dependencies, 359 CodeBuffer *code_buffer, 360 int frame_size, 361 OopMapSet* oop_maps, 362 ExceptionHandlerTable* handler_table, 363 ImplicitExceptionTable* nul_chk_table, 364 AbstractCompiler* compiler, 365 int comp_level, 366 const GrowableArrayView<BufferBlob*>& native_invokers = GrowableArrayView<BufferBlob*>::EMPTY 367 #if INCLUDE_JVMCI 368 , char* speculations = NULL, 369 int speculations_len = 0, 370 int nmethod_mirror_index = -1, 371 const char* nmethod_mirror_name = NULL, 372 FailedSpeculation** failed_speculations = NULL 373 #endif 374 ); 375 376 // Only used for unit tests. nmethod()377 nmethod() 378 : CompiledMethod(), 379 _is_unloading_state(0), 380 _native_receiver_sp_offset(in_ByteSize(-1)), 381 _native_basic_lock_sp_offset(in_ByteSize(-1)) {} 382 383 384 static nmethod* new_native_nmethod(const methodHandle& method, 385 int compile_id, 386 CodeBuffer *code_buffer, 387 int vep_offset, 388 int frame_complete, 389 int frame_size, 390 ByteSize receiver_sp_offset, 391 ByteSize basic_lock_sp_offset, 392 OopMapSet* oop_maps); 393 394 // type info is_nmethod() const395 bool is_nmethod() const { return true; } is_osr_method() const396 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; } 397 398 // boundaries for different parts consts_begin() const399 address consts_begin () const { return header_begin() + _consts_offset ; } consts_end() const400 address consts_end () const { return code_begin() ; } stub_begin() const401 address stub_begin () const { return header_begin() + _stub_offset ; } stub_end() const402 address stub_end () const { return header_begin() + _oops_offset ; } exception_begin() const403 address exception_begin () const { return header_begin() + _exception_offset ; } unwind_handler_begin() const404 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; } oops_begin() const405 oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; } oops_end() const406 oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; } 407 metadata_begin() const408 Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; } metadata_end() const409 Metadata** metadata_end () const { return (Metadata**) _scopes_data_begin; } 410 scopes_data_end() const411 address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; } scopes_pcs_begin() const412 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); } scopes_pcs_end() const413 PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; } dependencies_begin() const414 address dependencies_begin () const { return header_begin() + _dependencies_offset ; } dependencies_end() const415 address dependencies_end () const { return header_begin() + _native_invokers_offset ; } native_invokers_begin() const416 BufferBlob** native_invokers_begin() const { return (BufferBlob**)(header_begin() + _native_invokers_offset) ; } native_invokers_end() const417 BufferBlob** native_invokers_end () const { return (BufferBlob**)(header_begin() + _handler_table_offset); } handler_table_begin() const418 address handler_table_begin () const { return header_begin() + _handler_table_offset ; } handler_table_end() const419 address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; } nul_chk_table_begin() const420 address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; } 421 #if INCLUDE_JVMCI nul_chk_table_end() const422 address nul_chk_table_end () const { return header_begin() + _speculations_offset ; } speculations_begin() const423 address speculations_begin () const { return header_begin() + _speculations_offset ; } speculations_end() const424 address speculations_end () const { return header_begin() + _jvmci_data_offset ; } jvmci_data_begin() const425 address jvmci_data_begin () const { return header_begin() + _jvmci_data_offset ; } jvmci_data_end() const426 address jvmci_data_end () const { return header_begin() + _nmethod_end_offset ; } 427 #else nul_chk_table_end() const428 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; } 429 #endif 430 431 // Sizes oops_size() const432 int oops_size () const { return (address) oops_end () - (address) oops_begin (); } metadata_size() const433 int metadata_size () const { return (address) metadata_end () - (address) metadata_begin (); } dependencies_size() const434 int dependencies_size () const { return dependencies_end () - dependencies_begin (); } 435 #if INCLUDE_JVMCI speculations_size() const436 int speculations_size () const { return speculations_end () - speculations_begin (); } jvmci_data_size() const437 int jvmci_data_size () const { return jvmci_data_end () - jvmci_data_begin (); } 438 #endif 439 oops_count() const440 int oops_count() const { assert(oops_size() % oopSize == 0, ""); return (oops_size() / oopSize) + 1; } metadata_count() const441 int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; } 442 443 int total_size () const; 444 dec_hotness_counter()445 void dec_hotness_counter() { _hotness_counter--; } set_hotness_counter(int val)446 void set_hotness_counter(int val) { _hotness_counter = val; } hotness_counter() const447 int hotness_counter() const { return _hotness_counter; } 448 449 // Containment oops_contains(oop * addr) const450 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); } metadata_contains(Metadata ** addr) const451 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); } scopes_data_contains(address addr) const452 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); } scopes_pcs_contains(PcDesc * addr) const453 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); } 454 455 // entry points entry_point() const456 address entry_point() const { return _entry_point; } // normal entry point verified_entry_point() const457 address verified_entry_point() const { return _verified_entry_point; } // if klass is correct 458 459 // flag accessing and manipulation is_not_installed() const460 bool is_not_installed() const { return _state == not_installed; } is_in_use() const461 bool is_in_use() const { return _state <= in_use; } is_alive() const462 bool is_alive() const { return _state < unloaded; } is_not_entrant() const463 bool is_not_entrant() const { return _state == not_entrant; } is_zombie() const464 bool is_zombie() const { return _state == zombie; } is_unloaded() const465 bool is_unloaded() const { return _state == unloaded; } 466 467 void clear_unloading_state(); 468 virtual bool is_unloading(); 469 virtual void do_unloading(bool unloading_occurred); 470 471 #if INCLUDE_RTM_OPT 472 // rtm state accessing and manipulating rtm_state() const473 RTMState rtm_state() const { return _rtm_state; } set_rtm_state(RTMState state)474 void set_rtm_state(RTMState state) { _rtm_state = state; } 475 #endif 476 make_in_use()477 bool make_in_use() { 478 return try_transition(in_use); 479 } 480 // Make the nmethod non entrant. The nmethod will continue to be 481 // alive. It is used when an uncommon trap happens. Returns true 482 // if this thread changed the state of the nmethod or false if 483 // another thread performed the transition. make_not_entrant()484 bool make_not_entrant() { 485 assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant"); 486 return make_not_entrant_or_zombie(not_entrant); 487 } make_not_used()488 bool make_not_used() { return make_not_entrant(); } make_zombie()489 bool make_zombie() { return make_not_entrant_or_zombie(zombie); } 490 get_state() const491 int get_state() const { 492 return _state; 493 } 494 495 void make_unloaded(); 496 has_dependencies()497 bool has_dependencies() { return dependencies_size() != 0; } 498 void print_dependencies() PRODUCT_RETURN; 499 void flush_dependencies(bool delete_immediately); has_flushed_dependencies()500 bool has_flushed_dependencies() { return _has_flushed_dependencies; } set_has_flushed_dependencies()501 void set_has_flushed_dependencies() { 502 assert(!has_flushed_dependencies(), "should only happen once"); 503 _has_flushed_dependencies = 1; 504 } 505 comp_level() const506 int comp_level() const { return _comp_level; } 507 508 void unlink_from_method(); 509 510 // Support for oops in scopes and relocs: 511 // Note: index 0 is reserved for null. 512 oop oop_at(int index) const; 513 oop oop_at_phantom(int index) const; // phantom reference oop_addr_at(int index) const514 oop* oop_addr_at(int index) const { // for GC 515 // relocation indexes are biased by 1 (because 0 is reserved) 516 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index"); 517 assert(!_oops_are_stale, "oops are stale"); 518 return &oops_begin()[index - 1]; 519 } 520 521 // Support for meta data in scopes and relocs: 522 // Note: index 0 is reserved for null. metadata_at(int index) const523 Metadata* metadata_at(int index) const { return index == 0 ? NULL: *metadata_addr_at(index); } metadata_addr_at(int index) const524 Metadata** metadata_addr_at(int index) const { // for GC 525 // relocation indexes are biased by 1 (because 0 is reserved) 526 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index"); 527 return &metadata_begin()[index - 1]; 528 } 529 530 void copy_values(GrowableArray<jobject>* oops); 531 void copy_values(GrowableArray<Metadata*>* metadata); 532 533 void free_native_invokers(); 534 535 // Relocation support 536 private: 537 void fix_oop_relocations(address begin, address end, bool initialize_immediates); 538 inline void initialize_immediate_oop(oop* dest, jobject handle); 539 540 public: fix_oop_relocations(address begin,address end)541 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); } fix_oop_relocations()542 void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); } 543 544 // Sweeper support stack_traversal_mark()545 long stack_traversal_mark() { return _stack_traversal_mark; } set_stack_traversal_mark(long l)546 void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; } 547 548 // On-stack replacement support osr_entry_bci() const549 int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; } osr_entry() const550 address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; } 551 void invalidate_osr_method(); osr_link() const552 nmethod* osr_link() const { return _osr_link; } set_osr_link(nmethod * n)553 void set_osr_link(nmethod *n) { _osr_link = n; } 554 555 // Verify calls to dead methods have been cleaned. 556 void verify_clean_inline_caches(); 557 558 // unlink and deallocate this nmethod 559 // Only NMethodSweeper class is expected to use this. NMethodSweeper is not 560 // expected to use any other private methods/data in this class. 561 562 protected: 563 void flush(); 564 565 public: 566 // When true is returned, it is unsafe to remove this nmethod even if 567 // it is a zombie, since the VM or the ServiceThread might still be 568 // using it. is_locked_by_vm() const569 bool is_locked_by_vm() const { return _lock_count >0; } 570 571 // See comment at definition of _last_seen_on_stack 572 void mark_as_seen_on_stack(); 573 bool can_convert_to_zombie(); 574 575 // Evolution support. We make old (discarded) compiled methods point to new Method*s. set_method(Method * method)576 void set_method(Method* method) { _method = method; } 577 578 #if INCLUDE_JVMCI 579 // Gets the JVMCI name of this nmethod. 580 const char* jvmci_name(); 581 582 // Records the pending failed speculation in the 583 // JVMCI speculation log associated with this nmethod. 584 void update_speculation(JavaThread* thread); 585 586 // Gets the data specific to a JVMCI compiled method. 587 // This returns a non-NULL value iff this nmethod was 588 // compiled by the JVMCI compiler. jvmci_nmethod_data() const589 JVMCINMethodData* jvmci_nmethod_data() const { 590 return jvmci_data_size() == 0 ? NULL : (JVMCINMethodData*) jvmci_data_begin(); 591 } 592 #endif 593 594 public: oops_do(OopClosure * f)595 void oops_do(OopClosure* f) { oops_do(f, false); } 596 void oops_do(OopClosure* f, bool allow_dead); 597 598 // All-in-one claiming of nmethods: returns true if the caller successfully claimed that 599 // nmethod. 600 bool oops_do_try_claim(); 601 602 // Class containing callbacks for the oops_do_process_weak/strong() methods 603 // below. 604 class OopsDoProcessor { 605 public: 606 // Process the oops of the given nmethod based on whether it has been called 607 // in a weak or strong processing context, i.e. apply either weak or strong 608 // work on it. 609 virtual void do_regular_processing(nmethod* nm) = 0; 610 // Assuming that the oops of the given nmethod has already been its weak 611 // processing applied, apply the remaining strong processing part. 612 virtual void do_remaining_strong_processing(nmethod* nm) = 0; 613 }; 614 615 // The following two methods do the work corresponding to weak/strong nmethod 616 // processing. 617 void oops_do_process_weak(OopsDoProcessor* p); 618 void oops_do_process_strong(OopsDoProcessor* p); 619 620 static void oops_do_marking_prologue(); 621 static void oops_do_marking_epilogue(); 622 623 private: 624 ScopeDesc* scope_desc_in(address begin, address end); 625 626 address* orig_pc_addr(const frame* fr); 627 628 // used by jvmti to track if the load and unload events has been reported unload_reported() const629 bool unload_reported() const { return _unload_reported; } set_unload_reported()630 void set_unload_reported() { _unload_reported = true; } load_reported() const631 bool load_reported() const { return _load_reported; } set_load_reported()632 void set_load_reported() { _load_reported = true; } 633 634 public: 635 // copying of debugging information 636 void copy_scopes_pcs(PcDesc* pcs, int count); 637 void copy_scopes_data(address buffer, int size); 638 639 // Accessor/mutator for the original pc of a frame before a frame was deopted. get_original_pc(const frame * fr)640 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); } set_original_pc(const frame * fr,address pc)641 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; } 642 643 // jvmti support: 644 void post_compiled_method_load_event(JvmtiThreadState* state = NULL); 645 646 // verify operations 647 void verify(); 648 void verify_scopes(); 649 void verify_interrupt_point(address interrupt_point); 650 651 // Disassemble this nmethod with additional debug information, e.g. information about blocks. 652 void decode2(outputStream* st) const; 653 void print_constant_pool(outputStream* st); 654 655 // Avoid hiding of parent's 'decode(outputStream*)' method. decode(outputStream * st) const656 void decode(outputStream* st) const { decode2(st); } // just delegate here. 657 658 // printing support 659 void print() const; 660 void print(outputStream* st) const; 661 void print_code(); 662 663 #if defined(SUPPORT_DATA_STRUCTS) 664 // print output in opt build for disassembler library 665 void print_relocations() PRODUCT_RETURN; print_pcs()666 void print_pcs() { print_pcs_on(tty); } 667 void print_pcs_on(outputStream* st); print_scopes()668 void print_scopes() { print_scopes_on(tty); } 669 void print_scopes_on(outputStream* st) PRODUCT_RETURN; 670 void print_value_on(outputStream* st) const; 671 void print_native_invokers(); 672 void print_handler_table(); 673 void print_nul_chk_table(); 674 void print_recorded_oops(); 675 void print_recorded_metadata(); 676 677 void print_oops(outputStream* st); // oops from the underlying CodeBlob. 678 void print_metadata(outputStream* st); // metadata in metadata pool. 679 #else 680 // void print_pcs() PRODUCT_RETURN; print_pcs()681 void print_pcs() { return; } 682 #endif 683 684 void print_calls(outputStream* st) PRODUCT_RETURN; 685 static void print_statistics() PRODUCT_RETURN; 686 687 void maybe_print_nmethod(DirectiveSet* directive); 688 void print_nmethod(bool print_code); 689 690 // need to re-define this from CodeBlob else the overload hides it print_on(outputStream * st) const691 virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); } 692 void print_on(outputStream* st, const char* msg) const; 693 694 // Logging 695 void log_identity(xmlStream* log) const; 696 void log_new_nmethod() const; 697 void log_state_change() const; 698 699 // Prints block-level comments, including nmethod specific block labels: print_block_comment(outputStream * stream,address block_begin) const700 virtual void print_block_comment(outputStream* stream, address block_begin) const { 701 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY) 702 print_nmethod_labels(stream, block_begin); 703 CodeBlob::print_block_comment(stream, block_begin); 704 #endif 705 } 706 707 void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const; 708 const char* nmethod_section_label(address pos) const; 709 710 // returns whether this nmethod has code comments. 711 bool has_code_comment(address begin, address end); 712 // Prints a comment for one native instruction (reloc info, pc desc) 713 void print_code_comment_on(outputStream* st, int column, address begin, address end); 714 715 // Compiler task identification. Note that all OSR methods 716 // are numbered in an independent sequence if CICountOSR is true, 717 // and native method wrappers are also numbered independently if 718 // CICountNative is true. compile_id() const719 virtual int compile_id() const { return _compile_id; } 720 const char* compile_kind() const; 721 722 // tells if any of this method's dependencies have been invalidated 723 // (this is expensive!) 724 static void check_all_dependencies(DepChange& changes); 725 726 // tells if this compiled method is dependent on the given changes, 727 // and the changes have invalidated it 728 bool check_dependency_on(DepChange& changes); 729 730 // Fast breakpoint support. Tells if this compiled method is 731 // dependent on the given method. Returns true if this nmethod 732 // corresponds to the given method as well. 733 virtual bool is_dependent_on_method(Method* dependee); 734 735 // is it ok to patch at address? 736 bool is_patchable_at(address instr_address); 737 738 // UseBiasedLocking support native_receiver_sp_offset()739 ByteSize native_receiver_sp_offset() { 740 return _native_receiver_sp_offset; 741 } native_basic_lock_sp_offset()742 ByteSize native_basic_lock_sp_offset() { 743 return _native_basic_lock_sp_offset; 744 } 745 746 // support for code generation verified_entry_point_offset()747 static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); } osr_entry_point_offset()748 static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); } state_offset()749 static int state_offset() { return offset_of(nmethod, _state); } 750 751 virtual void metadata_do(MetadataClosure* f); 752 753 NativeCallWrapper* call_wrapper_at(address call) const; 754 NativeCallWrapper* call_wrapper_before(address return_pc) const; 755 address call_instruction_address(address pc) const; 756 757 virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const; 758 virtual CompiledStaticCall* compiledStaticCall_at(address addr) const; 759 virtual CompiledStaticCall* compiledStaticCall_before(address addr) const; 760 }; 761 762 // Locks an nmethod so its code will not get removed and it will not 763 // be made into a zombie, even if it is a not_entrant method. After the 764 // nmethod becomes a zombie, if CompiledMethodUnload event processing 765 // needs to be done, then lock_nmethod() is used directly to keep the 766 // generated code from being reused too early. 767 class nmethodLocker : public StackObj { 768 CompiledMethod* _nm; 769 770 public: 771 772 // note: nm can be NULL 773 // Only JvmtiDeferredEvent::compiled_method_unload_event() 774 // should pass zombie_ok == true. 775 static void lock_nmethod(CompiledMethod* nm, bool zombie_ok = false); 776 static void unlock_nmethod(CompiledMethod* nm); // (ditto) 777 778 nmethodLocker(address pc); // derive nm from pc nmethodLocker(nmethod * nm)779 nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); } nmethodLocker(CompiledMethod * nm)780 nmethodLocker(CompiledMethod *nm) { 781 _nm = nm; 782 lock(_nm); 783 } 784 lock(CompiledMethod * method,bool zombie_ok=false)785 static void lock(CompiledMethod* method, bool zombie_ok = false) { 786 if (method == NULL) return; 787 lock_nmethod(method, zombie_ok); 788 } 789 unlock(CompiledMethod * method)790 static void unlock(CompiledMethod* method) { 791 if (method == NULL) return; 792 unlock_nmethod(method); 793 } 794 nmethodLocker()795 nmethodLocker() { _nm = NULL; } ~nmethodLocker()796 ~nmethodLocker() { 797 unlock(_nm); 798 } 799 code()800 CompiledMethod* code() { return _nm; } set_code(CompiledMethod * new_nm,bool zombie_ok=false)801 void set_code(CompiledMethod* new_nm, bool zombie_ok = false) { 802 unlock(_nm); // note: This works even if _nm==new_nm. 803 _nm = new_nm; 804 lock(_nm, zombie_ok); 805 } 806 }; 807 808 #endif // SHARE_CODE_NMETHOD_HPP 809