1 /*
2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "code/compiledIC.hpp"
27 #include "code/compiledMethod.inline.hpp"
28 #include "code/scopeDesc.hpp"
29 #include "code/codeCache.hpp"
30 #include "interpreter/bytecode.inline.hpp"
31 #include "logging/log.hpp"
32 #include "logging/logTag.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "oops/methodData.hpp"
35 #include "oops/method.inline.hpp"
36 #include "prims/methodHandles.hpp"
37 #include "runtime/handles.inline.hpp"
38 #include "runtime/mutexLocker.hpp"
39 
CompiledMethod(Method * method,const char * name,CompilerType type,const CodeBlobLayout & layout,int frame_complete_offset,int frame_size,ImmutableOopMapSet * oop_maps,bool caller_must_gc_arguments)40 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
41   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
42   _method(method), _mark_for_deoptimization_status(not_marked) {
43   init_defaults();
44 }
45 
CompiledMethod(Method * method,const char * name,CompilerType type,int size,int header_size,CodeBuffer * cb,int frame_complete_offset,int frame_size,OopMapSet * oop_maps,bool caller_must_gc_arguments)46 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
47   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
48   _method(method), _mark_for_deoptimization_status(not_marked) {
49   init_defaults();
50 }
51 
init_defaults()52 void CompiledMethod::init_defaults() {
53   { // avoid uninitialized fields, even for short time periods
54     _is_far_code                = false;
55     _scopes_data_begin          = NULL;
56     _deopt_handler_begin        = NULL;
57     _deopt_mh_handler_begin     = NULL;
58     _exception_cache            = NULL;
59   }
60   _has_unsafe_access          = 0;
61   _has_method_handle_invokes  = 0;
62   _lazy_critical_native       = 0;
63   _has_wide_vectors           = 0;
64   _unloading_clock            = 0;
65 }
66 
is_method_handle_return(address return_pc)67 bool CompiledMethod::is_method_handle_return(address return_pc) {
68   if (!has_method_handle_invokes())  return false;
69   PcDesc* pd = pc_desc_at(return_pc);
70   if (pd == NULL)
71     return false;
72   return pd->is_method_handle_invoke();
73 }
74 
75 // Returns a string version of the method state.
state() const76 const char* CompiledMethod::state() const {
77   int state = get_state();
78   switch (state) {
79   case not_installed:
80     return "not installed";
81   case in_use:
82     return "in use";
83   case not_used:
84     return "not_used";
85   case not_entrant:
86     return "not_entrant";
87   case zombie:
88     return "zombie";
89   case unloaded:
90     return "unloaded";
91   default:
92     fatal("unexpected method state: %d", state);
93     return NULL;
94   }
95 }
96 
97 //-----------------------------------------------------------------------------
98 
add_exception_cache_entry(ExceptionCache * new_entry)99 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
100   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
101   assert(new_entry != NULL,"Must be non null");
102   assert(new_entry->next() == NULL, "Must be null");
103 
104   ExceptionCache *ec = exception_cache();
105   if (ec != NULL) {
106     new_entry->set_next(ec);
107   }
108   release_set_exception_cache(new_entry);
109 }
110 
clean_exception_cache()111 void CompiledMethod::clean_exception_cache() {
112   ExceptionCache* prev = NULL;
113   ExceptionCache* curr = exception_cache();
114 
115   while (curr != NULL) {
116     ExceptionCache* next = curr->next();
117 
118     Klass* ex_klass = curr->exception_type();
119     if (ex_klass != NULL && !ex_klass->is_loader_alive()) {
120       if (prev == NULL) {
121         set_exception_cache(next);
122       } else {
123         prev->set_next(next);
124       }
125       delete curr;
126       // prev stays the same.
127     } else {
128       prev = curr;
129     }
130 
131     curr = next;
132   }
133 }
134 
135 // public method for accessing the exception cache
136 // These are the public access methods.
handler_for_exception_and_pc(Handle exception,address pc)137 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
138   // We never grab a lock to read the exception cache, so we may
139   // have false negatives. This is okay, as it can only happen during
140   // the first few exception lookups for a given nmethod.
141   ExceptionCache* ec = exception_cache();
142   while (ec != NULL) {
143     address ret_val;
144     if ((ret_val = ec->match(exception,pc)) != NULL) {
145       return ret_val;
146     }
147     ec = ec->next();
148   }
149   return NULL;
150 }
151 
add_handler_for_exception_and_pc(Handle exception,address pc,address handler)152 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
153   // There are potential race conditions during exception cache updates, so we
154   // must own the ExceptionCache_lock before doing ANY modifications. Because
155   // we don't lock during reads, it is possible to have several threads attempt
156   // to update the cache with the same data. We need to check for already inserted
157   // copies of the current data before adding it.
158 
159   MutexLocker ml(ExceptionCache_lock);
160   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
161 
162   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
163     target_entry = new ExceptionCache(exception,pc,handler);
164     add_exception_cache_entry(target_entry);
165   }
166 }
167 
168 //-------------end of code for ExceptionCache--------------
169 
170 // private method for handling exception cache
171 // These methods are private, and used to manipulate the exception cache
172 // directly.
exception_cache_entry_for_exception(Handle exception)173 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
174   ExceptionCache* ec = exception_cache();
175   while (ec != NULL) {
176     if (ec->match_exception_with_space(exception)) {
177       return ec;
178     }
179     ec = ec->next();
180   }
181   return NULL;
182 }
183 
is_at_poll_return(address pc)184 bool CompiledMethod::is_at_poll_return(address pc) {
185   RelocIterator iter(this, pc, pc+1);
186   while (iter.next()) {
187     if (iter.type() == relocInfo::poll_return_type)
188       return true;
189   }
190   return false;
191 }
192 
193 
is_at_poll_or_poll_return(address pc)194 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
195   RelocIterator iter(this, pc, pc+1);
196   while (iter.next()) {
197     relocInfo::relocType t = iter.type();
198     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
199       return true;
200   }
201   return false;
202 }
203 
verify_oop_relocations()204 void CompiledMethod::verify_oop_relocations() {
205   // Ensure sure that the code matches the current oop values
206   RelocIterator iter(this, NULL, NULL);
207   while (iter.next()) {
208     if (iter.type() == relocInfo::oop_type) {
209       oop_Relocation* reloc = iter.oop_reloc();
210       if (!reloc->oop_is_immediate()) {
211         reloc->verify_oop_relocation();
212       }
213     }
214   }
215 }
216 
217 
scope_desc_at(address pc)218 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
219   PcDesc* pd = pc_desc_at(pc);
220   guarantee(pd != NULL, "scope must be present");
221   return new ScopeDesc(this, pd->scope_decode_offset(),
222                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
223                        pd->return_oop());
224 }
225 
scope_desc_near(address pc)226 ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
227   PcDesc* pd = pc_desc_near(pc);
228   guarantee(pd != NULL, "scope must be present");
229   return new ScopeDesc(this, pd->scope_decode_offset(),
230                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
231                        pd->return_oop());
232 }
233 
oops_reloc_begin() const234 address CompiledMethod::oops_reloc_begin() const {
235   // If the method is not entrant or zombie then a JMP is plastered over the
236   // first few bytes.  If an oop in the old code was there, that oop
237   // should not get GC'd.  Skip the first few bytes of oops on
238   // not-entrant methods.
239   address low_boundary = verified_entry_point();
240   if (!is_in_use() && is_nmethod()) {
241     low_boundary += NativeJump::instruction_size;
242     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
243     // This means that the low_boundary is going to be a little too high.
244     // This shouldn't matter, since oops of non-entrant methods are never used.
245     // In fact, why are we bothering to look at oops in a non-entrant method??
246   }
247   return low_boundary;
248 }
249 
verify_icholder_relocations()250 int CompiledMethod::verify_icholder_relocations() {
251   ResourceMark rm;
252   int count = 0;
253 
254   RelocIterator iter(this);
255   while(iter.next()) {
256     if (iter.type() == relocInfo::virtual_call_type) {
257       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
258         CompiledIC *ic = CompiledIC_at(&iter);
259         if (TraceCompiledIC) {
260           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
261           ic->print();
262         }
263         assert(ic->cached_icholder() != NULL, "must be non-NULL");
264         count++;
265       }
266     }
267   }
268 
269   return count;
270 }
271 
272 // Method that knows how to preserve outgoing arguments at call. This method must be
273 // called with a frame corresponding to a Java invoke
preserve_callee_argument_oops(frame fr,const RegisterMap * reg_map,OopClosure * f)274 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
275   if (method() != NULL && !method()->is_native()) {
276     address pc = fr.pc();
277     SimpleScopeDesc ssd(this, pc);
278     Bytecode_invoke call(ssd.method(), ssd.bci());
279     bool has_receiver = call.has_receiver();
280     bool has_appendix = call.has_appendix();
281     Symbol* signature = call.signature();
282 
283     // The method attached by JIT-compilers should be used, if present.
284     // Bytecode can be inaccurate in such case.
285     Method* callee = attached_method_before_pc(pc);
286     if (callee != NULL) {
287       has_receiver = !(callee->access_flags().is_static());
288       has_appendix = false;
289       signature = callee->signature();
290     }
291 
292     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
293   }
294 }
295 
attached_method(address call_instr)296 Method* CompiledMethod::attached_method(address call_instr) {
297   assert(code_contains(call_instr), "not part of the nmethod");
298   RelocIterator iter(this, call_instr, call_instr + 1);
299   while (iter.next()) {
300     if (iter.addr() == call_instr) {
301       switch(iter.type()) {
302         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
303         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
304         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
305         default:                               break;
306       }
307     }
308   }
309   return NULL; // not found
310 }
311 
attached_method_before_pc(address pc)312 Method* CompiledMethod::attached_method_before_pc(address pc) {
313   if (NativeCall::is_call_before(pc)) {
314     NativeCall* ncall = nativeCall_before(pc);
315     return attached_method(ncall->instruction_address());
316   }
317   return NULL; // not a call
318 }
319 
clear_inline_caches()320 void CompiledMethod::clear_inline_caches() {
321   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
322   if (is_zombie()) {
323     return;
324   }
325 
326   RelocIterator iter(this);
327   while (iter.next()) {
328     iter.reloc()->clear_inline_cache();
329   }
330 }
331 
332 // Clear IC callsites, releasing ICStubs of all compiled ICs
333 // as well as any associated CompiledICHolders.
clear_ic_callsites()334 void CompiledMethod::clear_ic_callsites() {
335   assert_locked_or_safepoint(CompiledIC_lock);
336   ResourceMark rm;
337   RelocIterator iter(this);
338   while(iter.next()) {
339     if (iter.type() == relocInfo::virtual_call_type) {
340       CompiledIC* ic = CompiledIC_at(&iter);
341       ic->set_to_clean(false);
342     }
343   }
344 }
345 
346 #ifdef ASSERT
347 // Check class_loader is alive for this bit of metadata.
check_class(Metadata * md)348 static void check_class(Metadata* md) {
349    Klass* klass = NULL;
350    if (md->is_klass()) {
351      klass = ((Klass*)md);
352    } else if (md->is_method()) {
353      klass = ((Method*)md)->method_holder();
354    } else if (md->is_methodData()) {
355      klass = ((MethodData*)md)->method()->method_holder();
356    } else {
357      md->print();
358      ShouldNotReachHere();
359    }
360    assert(klass->is_loader_alive(), "must be alive");
361 }
362 #endif // ASSERT
363 
364 
clean_ic_if_metadata_is_dead(CompiledIC * ic)365 void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
366   if (ic->is_icholder_call()) {
367     // The only exception is compiledICHolder metdata which may
368     // yet be marked below. (We check this further below).
369     CompiledICHolder* cichk_metdata = ic->cached_icholder();
370 
371     if (cichk_metdata->is_loader_alive()) {
372       return;
373     }
374   } else {
375     Metadata* ic_metdata = ic->cached_metadata();
376     if (ic_metdata != NULL) {
377       if (ic_metdata->is_klass()) {
378         if (((Klass*)ic_metdata)->is_loader_alive()) {
379           return;
380         }
381       } else if (ic_metdata->is_method()) {
382         Method* method = (Method*)ic_metdata;
383         assert(!method->is_old(), "old method should have been cleaned");
384         if (method->method_holder()->is_loader_alive()) {
385           return;
386         }
387       } else {
388         ShouldNotReachHere();
389       }
390     }
391   }
392 
393   ic->set_to_clean();
394 }
395 
396 unsigned char CompiledMethod::_global_unloading_clock = 0;
397 
increase_unloading_clock()398 void CompiledMethod::increase_unloading_clock() {
399   _global_unloading_clock++;
400   if (_global_unloading_clock == 0) {
401     // _nmethods are allocated with _unloading_clock == 0,
402     // so 0 is never used as a clock value.
403     _global_unloading_clock = 1;
404   }
405 }
406 
set_unloading_clock(unsigned char unloading_clock)407 void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) {
408   OrderAccess::release_store(&_unloading_clock, unloading_clock);
409 }
410 
unloading_clock()411 unsigned char CompiledMethod::unloading_clock() {
412   return OrderAccess::load_acquire(&_unloading_clock);
413 }
414 
415 
416 // static_stub_Relocations may have dangling references to
417 // nmethods so trim them out here.  Otherwise it looks like
418 // compiled code is maintaining a link to dead metadata.
clean_ic_stubs()419 void CompiledMethod::clean_ic_stubs() {
420 #ifdef ASSERT
421   address low_boundary = oops_reloc_begin();
422   RelocIterator iter(this, low_boundary);
423   while (iter.next()) {
424     address static_call_addr = NULL;
425     if (iter.type() == relocInfo::opt_virtual_call_type) {
426       CompiledIC* cic = CompiledIC_at(&iter);
427       if (!cic->is_call_to_interpreted()) {
428         static_call_addr = iter.addr();
429       }
430     } else if (iter.type() == relocInfo::static_call_type) {
431       CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
432       if (!csc->is_call_to_interpreted()) {
433         static_call_addr = iter.addr();
434       }
435     }
436     if (static_call_addr != NULL) {
437       RelocIterator sciter(this, low_boundary);
438       while (sciter.next()) {
439         if (sciter.type() == relocInfo::static_stub_type &&
440             sciter.static_stub_reloc()->static_call() == static_call_addr) {
441           sciter.static_stub_reloc()->clear_inline_cache();
442         }
443       }
444     }
445   }
446 #endif
447 }
448 
449 // This is called at the end of the strong tracing/marking phase of a
450 // GC to unload an nmethod if it contains otherwise unreachable
451 // oops.
452 
do_unloading(BoolObjectClosure * is_alive)453 void CompiledMethod::do_unloading(BoolObjectClosure* is_alive) {
454   // Make sure the oop's ready to receive visitors
455   assert(!is_zombie() && !is_unloaded(),
456          "should not call follow on zombie or unloaded nmethod");
457 
458   address low_boundary = oops_reloc_begin();
459 
460   if (do_unloading_oops(low_boundary, is_alive)) {
461     return;
462   }
463 
464 #if INCLUDE_JVMCI
465   if (do_unloading_jvmci()) {
466     return;
467   }
468 #endif
469 
470   // Cleanup exception cache and inline caches happens
471   // after all the unloaded methods are found.
472 }
473 
474 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
475 template <class CompiledICorStaticCall>
clean_if_nmethod_is_unloaded(CompiledICorStaticCall * ic,address addr,CompiledMethod * from,bool parallel,bool clean_all)476 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
477                                          bool parallel, bool clean_all) {
478   // Ok, to lookup references to zombies here
479   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
480   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
481   if (nm != NULL) {
482     if (parallel && nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
483       // The nmethod has not been processed yet.
484       return true;
485     }
486 
487     // Clean inline caches pointing to both zombie and not_entrant methods
488     if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) {
489       ic->set_to_clean(from->is_alive());
490       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
491     }
492   }
493 
494   return false;
495 }
496 
clean_if_nmethod_is_unloaded(CompiledIC * ic,CompiledMethod * from,bool parallel,bool clean_all=false)497 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
498                                          bool parallel, bool clean_all = false) {
499   return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, parallel, clean_all);
500 }
501 
clean_if_nmethod_is_unloaded(CompiledStaticCall * csc,CompiledMethod * from,bool parallel,bool clean_all=false)502 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
503                                          bool parallel, bool clean_all = false) {
504   return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, parallel, clean_all);
505 }
506 
do_unloading_parallel(BoolObjectClosure * is_alive,bool unloading_occurred)507 bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
508   ResourceMark rm;
509 
510   // Make sure the oop's ready to receive visitors
511   assert(!is_zombie() && !is_unloaded(),
512          "should not call follow on zombie or unloaded nmethod");
513 
514   address low_boundary = oops_reloc_begin();
515 
516   if (do_unloading_oops(low_boundary, is_alive)) {
517     return false;
518   }
519 
520 #if INCLUDE_JVMCI
521   if (do_unloading_jvmci()) {
522     return false;
523   }
524 #endif
525 
526   return unload_nmethod_caches(/*parallel*/true, unloading_occurred);
527 }
528 
529 // Cleans caches in nmethods that point to either classes that are unloaded
530 // or nmethods that are unloaded.
531 //
532 // Can be called either in parallel by G1 currently or after all
533 // nmethods are unloaded.  Return postponed=true in the parallel case for
534 // inline caches found that point to nmethods that are not yet visited during
535 // the do_unloading walk.
unload_nmethod_caches(bool parallel,bool unloading_occurred)536 bool CompiledMethod::unload_nmethod_caches(bool parallel, bool unloading_occurred) {
537 
538   // Exception cache only needs to be called if unloading occurred
539   if (unloading_occurred) {
540     clean_exception_cache();
541   }
542 
543   bool postponed = cleanup_inline_caches_impl(parallel, unloading_occurred, /*clean_all*/false);
544 
545   // All static stubs need to be cleaned.
546   clean_ic_stubs();
547 
548   // Check that the metadata embedded in the nmethod is alive
549   DEBUG_ONLY(metadata_do(check_class));
550 
551   return postponed;
552 }
553 
554 // Called to clean up after class unloading for live nmethods and from the sweeper
555 // for all methods.
cleanup_inline_caches_impl(bool parallel,bool unloading_occurred,bool clean_all)556 bool CompiledMethod::cleanup_inline_caches_impl(bool parallel, bool unloading_occurred, bool clean_all) {
557   assert_locked_or_safepoint(CompiledIC_lock);
558   bool postponed = false;
559   ResourceMark rm;
560 
561   // Find all calls in an nmethod and clear the ones that point to non-entrant,
562   // zombie and unloaded nmethods.
563   RelocIterator iter(this, oops_reloc_begin());
564   while(iter.next()) {
565 
566     switch (iter.type()) {
567 
568     case relocInfo::virtual_call_type:
569       if (unloading_occurred) {
570         // If class unloading occurred we first clear ICs where the cached metadata
571         // is referring to an unloaded klass or method.
572         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
573       }
574 
575       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all);
576       break;
577 
578     case relocInfo::opt_virtual_call_type:
579       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all);
580       break;
581 
582     case relocInfo::static_call_type:
583       postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, parallel, clean_all);
584       break;
585 
586     case relocInfo::oop_type:
587       // handled by do_unloading_oops already
588       break;
589 
590     case relocInfo::metadata_type:
591       break; // nothing to do.
592 
593     default:
594       break;
595     }
596   }
597 
598   return postponed;
599 }
600 
do_unloading_parallel_postponed()601 void CompiledMethod::do_unloading_parallel_postponed() {
602   ResourceMark rm;
603 
604   // Make sure the oop's ready to receive visitors
605   assert(!is_zombie(),
606          "should not call follow on zombie nmethod");
607 
608   RelocIterator iter(this, oops_reloc_begin());
609   while(iter.next()) {
610 
611     switch (iter.type()) {
612 
613     case relocInfo::virtual_call_type:
614       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true);
615       break;
616 
617     case relocInfo::opt_virtual_call_type:
618       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true);
619       break;
620 
621     case relocInfo::static_call_type:
622       clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, true);
623       break;
624 
625     default:
626       break;
627     }
628   }
629 }
630 
631