1 /*
2  * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "jvm_io.h"
27 #include "code/codeBlob.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/codeHeapState.hpp"
30 #include "code/compiledIC.hpp"
31 #include "code/dependencies.hpp"
32 #include "code/dependencyContext.hpp"
33 #include "code/icBuffer.hpp"
34 #include "code/nmethod.hpp"
35 #include "code/pcDesc.hpp"
36 #include "compiler/compilationPolicy.hpp"
37 #include "compiler/compileBroker.hpp"
38 #include "compiler/oopMap.hpp"
39 #include "gc/shared/collectedHeap.hpp"
40 #include "jfr/jfrEvents.hpp"
41 #include "logging/log.hpp"
42 #include "logging/logStream.hpp"
43 #include "memory/allocation.inline.hpp"
44 #include "memory/iterator.hpp"
45 #include "memory/resourceArea.hpp"
46 #include "memory/universe.hpp"
47 #include "oops/method.inline.hpp"
48 #include "oops/objArrayOop.hpp"
49 #include "oops/oop.inline.hpp"
50 #include "oops/verifyOopClosure.hpp"
51 #include "runtime/arguments.hpp"
52 #include "runtime/atomic.hpp"
53 #include "runtime/deoptimization.hpp"
54 #include "runtime/globals_extension.hpp"
55 #include "runtime/handles.inline.hpp"
56 #include "runtime/icache.hpp"
57 #include "runtime/java.hpp"
58 #include "runtime/mutexLocker.hpp"
59 #include "runtime/safepointVerifiers.hpp"
60 #include "runtime/sweeper.hpp"
61 #include "runtime/vmThread.hpp"
62 #include "services/memoryService.hpp"
63 #include "utilities/align.hpp"
64 #include "utilities/vmError.hpp"
65 #include "utilities/xmlstream.hpp"
66 #ifdef COMPILER1
67 #include "c1/c1_Compilation.hpp"
68 #include "c1/c1_Compiler.hpp"
69 #endif
70 #ifdef COMPILER2
71 #include "opto/c2compiler.hpp"
72 #include "opto/compile.hpp"
73 #include "opto/node.hpp"
74 #endif
75 
76 // Helper class for printing in CodeCache
77 class CodeBlob_sizes {
78  private:
79   int count;
80   int total_size;
81   int header_size;
82   int code_size;
83   int stub_size;
84   int relocation_size;
85   int scopes_oop_size;
86   int scopes_metadata_size;
87   int scopes_data_size;
88   int scopes_pcs_size;
89 
90  public:
CodeBlob_sizes()91   CodeBlob_sizes() {
92     count            = 0;
93     total_size       = 0;
94     header_size      = 0;
95     code_size        = 0;
96     stub_size        = 0;
97     relocation_size  = 0;
98     scopes_oop_size  = 0;
99     scopes_metadata_size  = 0;
100     scopes_data_size = 0;
101     scopes_pcs_size  = 0;
102   }
103 
total()104   int total()                                    { return total_size; }
is_empty()105   bool is_empty()                                { return count == 0; }
106 
print(const char * title)107   void print(const char* title) {
108     tty->print_cr(" #%d %s = %dK (hdr %d%%,  loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])",
109                   count,
110                   title,
111                   (int)(total() / K),
112                   header_size             * 100 / total_size,
113                   relocation_size         * 100 / total_size,
114                   code_size               * 100 / total_size,
115                   stub_size               * 100 / total_size,
116                   scopes_oop_size         * 100 / total_size,
117                   scopes_metadata_size    * 100 / total_size,
118                   scopes_data_size        * 100 / total_size,
119                   scopes_pcs_size         * 100 / total_size);
120   }
121 
add(CodeBlob * cb)122   void add(CodeBlob* cb) {
123     count++;
124     total_size       += cb->size();
125     header_size      += cb->header_size();
126     relocation_size  += cb->relocation_size();
127     if (cb->is_nmethod()) {
128       nmethod* nm = cb->as_nmethod_or_null();
129       code_size        += nm->insts_size();
130       stub_size        += nm->stub_size();
131 
132       scopes_oop_size  += nm->oops_size();
133       scopes_metadata_size  += nm->metadata_size();
134       scopes_data_size += nm->scopes_data_size();
135       scopes_pcs_size  += nm->scopes_pcs_size();
136     } else {
137       code_size        += cb->code_size();
138     }
139   }
140 };
141 
142 // Iterate over all CodeHeaps
143 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
144 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap)
145 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
146 
147 // Iterate over all CodeBlobs (cb) on the given CodeHeap
148 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
149 
150 address CodeCache::_low_bound = 0;
151 address CodeCache::_high_bound = 0;
152 int CodeCache::_number_of_nmethods_with_dependencies = 0;
153 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL;
154 
155 // Initialize arrays of CodeHeap subsets
156 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);
157 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);
158 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);
159 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);
160 
check_heap_sizes(size_t non_nmethod_size,size_t profiled_size,size_t non_profiled_size,size_t cache_size,bool all_set)161 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
162   size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
163   // Prepare error message
164   const char* error = "Invalid code heap sizes";
165   err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)"
166                   " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K",
167           non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
168 
169   if (total_size > cache_size) {
170     // Some code heap sizes were explicitly set: total_size must be <= cache_size
171     message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
172     vm_exit_during_initialization(error, message);
173   } else if (all_set && total_size != cache_size) {
174     // All code heap sizes were explicitly set: total_size must equal cache_size
175     message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
176     vm_exit_during_initialization(error, message);
177   }
178 }
179 
initialize_heaps()180 void CodeCache::initialize_heaps() {
181   bool non_nmethod_set      = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
182   bool profiled_set         = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
183   bool non_profiled_set     = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
184   size_t min_size           = os::vm_page_size();
185   size_t cache_size         = ReservedCodeCacheSize;
186   size_t non_nmethod_size   = NonNMethodCodeHeapSize;
187   size_t profiled_size      = ProfiledCodeHeapSize;
188   size_t non_profiled_size  = NonProfiledCodeHeapSize;
189   // Check if total size set via command line flags exceeds the reserved size
190   check_heap_sizes((non_nmethod_set  ? non_nmethod_size  : min_size),
191                    (profiled_set     ? profiled_size     : min_size),
192                    (non_profiled_set ? non_profiled_size : min_size),
193                    cache_size,
194                    non_nmethod_set && profiled_set && non_profiled_set);
195 
196   // Determine size of compiler buffers
197   size_t code_buffers_size = 0;
198 #ifdef COMPILER1
199   // C1 temporary code buffers (see Compiler::init_buffer_blob())
200   const int c1_count = CompilationPolicy::c1_count();
201   code_buffers_size += c1_count * Compiler::code_buffer_size();
202 #endif
203 #ifdef COMPILER2
204   // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
205   const int c2_count = CompilationPolicy::c2_count();
206   // Initial size of constant table (this may be increased if a compiled method needs more space)
207   code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
208 #endif
209 
210   // Increase default non_nmethod_size to account for compiler buffers
211   if (!non_nmethod_set) {
212     non_nmethod_size += code_buffers_size;
213   }
214   // Calculate default CodeHeap sizes if not set by user
215   if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
216     // Check if we have enough space for the non-nmethod code heap
217     if (cache_size > non_nmethod_size) {
218       // Use the default value for non_nmethod_size and one half of the
219       // remaining size for non-profiled and one half for profiled methods
220       size_t remaining_size = cache_size - non_nmethod_size;
221       profiled_size = remaining_size / 2;
222       non_profiled_size = remaining_size - profiled_size;
223     } else {
224       // Use all space for the non-nmethod heap and set other heaps to minimal size
225       non_nmethod_size = cache_size - 2 * min_size;
226       profiled_size = min_size;
227       non_profiled_size = min_size;
228     }
229   } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) {
230     // The user explicitly set some code heap sizes. Increase or decrease the (default)
231     // sizes of the other code heaps accordingly. First adapt non-profiled and profiled
232     // code heap sizes and then only change non-nmethod code heap size if still necessary.
233     intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size);
234     if (non_profiled_set) {
235       if (!profiled_set) {
236         // Adapt size of profiled code heap
237         if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) {
238           // Not enough space available, set to minimum size
239           diff_size += profiled_size - min_size;
240           profiled_size = min_size;
241         } else {
242           profiled_size += diff_size;
243           diff_size = 0;
244         }
245       }
246     } else if (profiled_set) {
247       // Adapt size of non-profiled code heap
248       if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) {
249         // Not enough space available, set to minimum size
250         diff_size += non_profiled_size - min_size;
251         non_profiled_size = min_size;
252       } else {
253         non_profiled_size += diff_size;
254         diff_size = 0;
255       }
256     } else if (non_nmethod_set) {
257       // Distribute remaining size between profiled and non-profiled code heaps
258       diff_size = cache_size - non_nmethod_size;
259       profiled_size = diff_size / 2;
260       non_profiled_size = diff_size - profiled_size;
261       diff_size = 0;
262     }
263     if (diff_size != 0) {
264       // Use non-nmethod code heap for remaining space requirements
265       assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity");
266       non_nmethod_size += diff_size;
267     }
268   }
269 
270   // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
271   if (!heap_available(CodeBlobType::MethodProfiled)) {
272     non_profiled_size += profiled_size;
273     profiled_size = 0;
274   }
275   // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
276   if (!heap_available(CodeBlobType::MethodNonProfiled)) {
277     non_nmethod_size += non_profiled_size;
278     non_profiled_size = 0;
279   }
280   // Make sure we have enough space for VM internal code
281   uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
282   if (non_nmethod_size < min_code_cache_size) {
283     vm_exit_during_initialization(err_msg(
284         "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K",
285         non_nmethod_size/K, min_code_cache_size/K));
286   }
287 
288   // Verify sizes and update flag values
289   assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
290   FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod_size);
291   FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled_size);
292   FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled_size);
293 
294   // If large page support is enabled, align code heaps according to large
295   // page size to make sure that code cache is covered by large pages.
296   const size_t alignment = MAX2(page_size(false, 8), (size_t) os::vm_allocation_granularity());
297   non_nmethod_size = align_up(non_nmethod_size, alignment);
298   profiled_size    = align_down(profiled_size, alignment);
299 
300   // Reserve one continuous chunk of memory for CodeHeaps and split it into
301   // parts for the individual heaps. The memory layout looks like this:
302   // ---------- high -----------
303   //    Non-profiled nmethods
304   //      Profiled nmethods
305   //         Non-nmethods
306   // ---------- low ------------
307   ReservedCodeSpace rs = reserve_heap_memory(cache_size);
308   ReservedSpace non_method_space    = rs.first_part(non_nmethod_size);
309   ReservedSpace rest                = rs.last_part(non_nmethod_size);
310   ReservedSpace profiled_space      = rest.first_part(profiled_size);
311   ReservedSpace non_profiled_space  = rest.last_part(profiled_size);
312 
313   // Non-nmethods (stubs, adapters, ...)
314   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
315   // Tier 2 and tier 3 (profiled) methods
316   add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
317   // Tier 1 and tier 4 (non-profiled) methods and native methods
318   add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
319 }
320 
page_size(bool aligned,size_t min_pages)321 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
322   if (os::can_execute_large_page_memory()) {
323     if (InitialCodeCacheSize < ReservedCodeCacheSize) {
324       // Make sure that the page size allows for an incremental commit of the reserved space
325       min_pages = MAX2(min_pages, (size_t)8);
326     }
327     return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
328                      os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
329   } else {
330     return os::vm_page_size();
331   }
332 }
333 
reserve_heap_memory(size_t size)334 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
335   // Align and reserve space for code cache
336   const size_t rs_ps = page_size();
337   const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity());
338   const size_t rs_size = align_up(size, rs_align);
339   ReservedCodeSpace rs(rs_size, rs_align, rs_ps);
340   if (!rs.is_reserved()) {
341     vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
342                                           rs_size/K));
343   }
344 
345   // Initialize bounds
346   _low_bound = (address)rs.base();
347   _high_bound = _low_bound + rs.size();
348   return rs;
349 }
350 
351 // Heaps available for allocation
heap_available(int code_blob_type)352 bool CodeCache::heap_available(int code_blob_type) {
353   if (!SegmentedCodeCache) {
354     // No segmentation: use a single code heap
355     return (code_blob_type == CodeBlobType::All);
356   } else if (Arguments::is_interpreter_only()) {
357     // Interpreter only: we don't need any method code heaps
358     return (code_blob_type == CodeBlobType::NonNMethod);
359   } else if (CompilerConfig::is_c1_profiling()) {
360     // Tiered compilation: use all code heaps
361     return (code_blob_type < CodeBlobType::All);
362   } else {
363     // No TieredCompilation: we only need the non-nmethod and non-profiled code heap
364     return (code_blob_type == CodeBlobType::NonNMethod) ||
365            (code_blob_type == CodeBlobType::MethodNonProfiled);
366   }
367 }
368 
get_code_heap_flag_name(int code_blob_type)369 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) {
370   switch(code_blob_type) {
371   case CodeBlobType::NonNMethod:
372     return "NonNMethodCodeHeapSize";
373     break;
374   case CodeBlobType::MethodNonProfiled:
375     return "NonProfiledCodeHeapSize";
376     break;
377   case CodeBlobType::MethodProfiled:
378     return "ProfiledCodeHeapSize";
379     break;
380   }
381   ShouldNotReachHere();
382   return NULL;
383 }
384 
code_heap_compare(CodeHeap * const & lhs,CodeHeap * const & rhs)385 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) {
386   if (lhs->code_blob_type() == rhs->code_blob_type()) {
387     return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0);
388   } else {
389     return lhs->code_blob_type() - rhs->code_blob_type();
390   }
391 }
392 
add_heap(CodeHeap * heap)393 void CodeCache::add_heap(CodeHeap* heap) {
394   assert(!Universe::is_fully_initialized(), "late heap addition?");
395 
396   _heaps->insert_sorted<code_heap_compare>(heap);
397 
398   int type = heap->code_blob_type();
399   if (code_blob_type_accepts_compiled(type)) {
400     _compiled_heaps->insert_sorted<code_heap_compare>(heap);
401   }
402   if (code_blob_type_accepts_nmethod(type)) {
403     _nmethod_heaps->insert_sorted<code_heap_compare>(heap);
404   }
405   if (code_blob_type_accepts_allocable(type)) {
406     _allocable_heaps->insert_sorted<code_heap_compare>(heap);
407   }
408 }
409 
add_heap(ReservedSpace rs,const char * name,int code_blob_type)410 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) {
411   // Check if heap is needed
412   if (!heap_available(code_blob_type)) {
413     return;
414   }
415 
416   // Create CodeHeap
417   CodeHeap* heap = new CodeHeap(name, code_blob_type);
418   add_heap(heap);
419 
420   // Reserve Space
421   size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size());
422   size_initial = align_up(size_initial, os::vm_page_size());
423   if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
424     vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)",
425                                           heap->name(), size_initial/K));
426   }
427 
428   // Register the CodeHeap
429   MemoryService::add_code_heap_memory_pool(heap, name);
430 }
431 
get_code_heap_containing(void * start)432 CodeHeap* CodeCache::get_code_heap_containing(void* start) {
433   FOR_ALL_HEAPS(heap) {
434     if ((*heap)->contains(start)) {
435       return *heap;
436     }
437   }
438   return NULL;
439 }
440 
get_code_heap(const CodeBlob * cb)441 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) {
442   assert(cb != NULL, "CodeBlob is null");
443   FOR_ALL_HEAPS(heap) {
444     if ((*heap)->contains_blob(cb)) {
445       return *heap;
446     }
447   }
448   ShouldNotReachHere();
449   return NULL;
450 }
451 
get_code_heap(int code_blob_type)452 CodeHeap* CodeCache::get_code_heap(int code_blob_type) {
453   FOR_ALL_HEAPS(heap) {
454     if ((*heap)->accepts(code_blob_type)) {
455       return *heap;
456     }
457   }
458   return NULL;
459 }
460 
first_blob(CodeHeap * heap)461 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
462   assert_locked_or_safepoint(CodeCache_lock);
463   assert(heap != NULL, "heap is null");
464   return (CodeBlob*)heap->first();
465 }
466 
first_blob(int code_blob_type)467 CodeBlob* CodeCache::first_blob(int code_blob_type) {
468   if (heap_available(code_blob_type)) {
469     return first_blob(get_code_heap(code_blob_type));
470   } else {
471     return NULL;
472   }
473 }
474 
next_blob(CodeHeap * heap,CodeBlob * cb)475 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
476   assert_locked_or_safepoint(CodeCache_lock);
477   assert(heap != NULL, "heap is null");
478   return (CodeBlob*)heap->next(cb);
479 }
480 
481 /**
482  * Do not seize the CodeCache lock here--if the caller has not
483  * already done so, we are going to lose bigtime, since the code
484  * cache will contain a garbage CodeBlob until the caller can
485  * run the constructor for the CodeBlob subclass he is busy
486  * instantiating.
487  */
allocate(int size,int code_blob_type,bool handle_alloc_failure,int orig_code_blob_type)488 CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool handle_alloc_failure, int orig_code_blob_type) {
489   // Possibly wakes up the sweeper thread.
490   NMethodSweeper::report_allocation(code_blob_type);
491   assert_locked_or_safepoint(CodeCache_lock);
492   assert(size > 0, "Code cache allocation request must be > 0 but is %d", size);
493   if (size <= 0) {
494     return NULL;
495   }
496   CodeBlob* cb = NULL;
497 
498   // Get CodeHeap for the given CodeBlobType
499   CodeHeap* heap = get_code_heap(code_blob_type);
500   assert(heap != NULL, "heap is null");
501 
502   while (true) {
503     cb = (CodeBlob*)heap->allocate(size);
504     if (cb != NULL) break;
505     if (!heap->expand_by(CodeCacheExpansionSize)) {
506       // Save original type for error reporting
507       if (orig_code_blob_type == CodeBlobType::All) {
508         orig_code_blob_type = code_blob_type;
509       }
510       // Expansion failed
511       if (SegmentedCodeCache) {
512         // Fallback solution: Try to store code in another code heap.
513         // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
514         // Note that in the sweeper, we check the reverse_free_ratio of the code heap
515         // and force stack scanning if less than 10% of the code heap are free.
516         int type = code_blob_type;
517         switch (type) {
518         case CodeBlobType::NonNMethod:
519           type = CodeBlobType::MethodNonProfiled;
520           break;
521         case CodeBlobType::MethodNonProfiled:
522           type = CodeBlobType::MethodProfiled;
523           break;
524         case CodeBlobType::MethodProfiled:
525           // Avoid loop if we already tried that code heap
526           if (type == orig_code_blob_type) {
527             type = CodeBlobType::MethodNonProfiled;
528           }
529           break;
530         }
531         if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
532           if (PrintCodeCacheExtension) {
533             tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
534                           heap->name(), get_code_heap(type)->name());
535           }
536           return allocate(size, type, handle_alloc_failure, orig_code_blob_type);
537         }
538       }
539       if (handle_alloc_failure) {
540         MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
541         CompileBroker::handle_full_code_cache(orig_code_blob_type);
542       }
543       return NULL;
544     }
545     if (PrintCodeCacheExtension) {
546       ResourceMark rm;
547       if (_nmethod_heaps->length() >= 1) {
548         tty->print("%s", heap->name());
549       } else {
550         tty->print("CodeCache");
551       }
552       tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
553                     (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
554                     (address)heap->high() - (address)heap->low_boundary());
555     }
556   }
557   print_trace("allocation", cb, size);
558   return cb;
559 }
560 
free(CodeBlob * cb)561 void CodeCache::free(CodeBlob* cb) {
562   assert_locked_or_safepoint(CodeCache_lock);
563   CodeHeap* heap = get_code_heap(cb);
564   print_trace("free", cb);
565   if (cb->is_nmethod()) {
566     nmethod* ptr = (nmethod *)cb;
567     heap->set_nmethod_count(heap->nmethod_count() - 1);
568     if (ptr->has_dependencies()) {
569       _number_of_nmethods_with_dependencies--;
570     }
571     ptr->free_native_invokers();
572   }
573   if (cb->is_adapter_blob()) {
574     heap->set_adapter_count(heap->adapter_count() - 1);
575   }
576 
577   // Get heap for given CodeBlob and deallocate
578   get_code_heap(cb)->deallocate(cb);
579 
580   assert(heap->blob_count() >= 0, "sanity check");
581 }
582 
free_unused_tail(CodeBlob * cb,size_t used)583 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) {
584   assert_locked_or_safepoint(CodeCache_lock);
585   guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!");
586   print_trace("free_unused_tail", cb);
587 
588   // We also have to account for the extra space (i.e. header) used by the CodeBlob
589   // which provides the memory (see BufferBlob::create() in codeBlob.cpp).
590   used += CodeBlob::align_code_offset(cb->header_size());
591 
592   // Get heap for given CodeBlob and deallocate its unused tail
593   get_code_heap(cb)->deallocate_tail(cb, used);
594   // Adjust the sizes of the CodeBlob
595   cb->adjust_size(used);
596 }
597 
commit(CodeBlob * cb)598 void CodeCache::commit(CodeBlob* cb) {
599   // this is called by nmethod::nmethod, which must already own CodeCache_lock
600   assert_locked_or_safepoint(CodeCache_lock);
601   CodeHeap* heap = get_code_heap(cb);
602   if (cb->is_nmethod()) {
603     heap->set_nmethod_count(heap->nmethod_count() + 1);
604     if (((nmethod *)cb)->has_dependencies()) {
605       _number_of_nmethods_with_dependencies++;
606     }
607   }
608   if (cb->is_adapter_blob()) {
609     heap->set_adapter_count(heap->adapter_count() + 1);
610   }
611 
612   // flush the hardware I-cache
613   ICache::invalidate_range(cb->content_begin(), cb->content_size());
614 }
615 
contains(void * p)616 bool CodeCache::contains(void *p) {
617   // S390 uses contains() in current_frame(), which is used before
618   // code cache initialization if NativeMemoryTracking=detail is set.
619   S390_ONLY(if (_heaps == NULL) return false;)
620   // It should be ok to call contains without holding a lock.
621   FOR_ALL_HEAPS(heap) {
622     if ((*heap)->contains(p)) {
623       return true;
624     }
625   }
626   return false;
627 }
628 
contains(nmethod * nm)629 bool CodeCache::contains(nmethod *nm) {
630   return contains((void *)nm);
631 }
632 
633 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
634 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
635 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
find_blob(void * start)636 CodeBlob* CodeCache::find_blob(void* start) {
637   CodeBlob* result = find_blob_unsafe(start);
638   // We could potentially look up non_entrant methods
639   guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method");
640   return result;
641 }
642 
643 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
644 // what you are doing)
find_blob_unsafe(void * start)645 CodeBlob* CodeCache::find_blob_unsafe(void* start) {
646   // NMT can walk the stack before code cache is created
647   if (_heaps != NULL) {
648     CodeHeap* heap = get_code_heap_containing(start);
649     if (heap != NULL) {
650       return heap->find_blob_unsafe(start);
651     }
652   }
653   return NULL;
654 }
655 
find_nmethod(void * start)656 nmethod* CodeCache::find_nmethod(void* start) {
657   CodeBlob* cb = find_blob(start);
658   assert(cb->is_nmethod(), "did not find an nmethod");
659   return (nmethod*)cb;
660 }
661 
blobs_do(void f (CodeBlob * nm))662 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
663   assert_locked_or_safepoint(CodeCache_lock);
664   FOR_ALL_HEAPS(heap) {
665     FOR_ALL_BLOBS(cb, *heap) {
666       f(cb);
667     }
668   }
669 }
670 
nmethods_do(void f (nmethod * nm))671 void CodeCache::nmethods_do(void f(nmethod* nm)) {
672   assert_locked_or_safepoint(CodeCache_lock);
673   NMethodIterator iter(NMethodIterator::all_blobs);
674   while(iter.next()) {
675     f(iter.method());
676   }
677 }
678 
metadata_do(MetadataClosure * f)679 void CodeCache::metadata_do(MetadataClosure* f) {
680   assert_locked_or_safepoint(CodeCache_lock);
681   NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
682   while(iter.next()) {
683     iter.method()->metadata_do(f);
684   }
685 }
686 
alignment_unit()687 int CodeCache::alignment_unit() {
688   return (int)_heaps->first()->alignment_unit();
689 }
690 
alignment_offset()691 int CodeCache::alignment_offset() {
692   return (int)_heaps->first()->alignment_offset();
693 }
694 
695 // Mark nmethods for unloading if they contain otherwise unreachable oops.
do_unloading(BoolObjectClosure * is_alive,bool unloading_occurred)696 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
697   assert_locked_or_safepoint(CodeCache_lock);
698   UnloadingScope scope(is_alive);
699   CompiledMethodIterator iter(CompiledMethodIterator::only_alive);
700   while(iter.next()) {
701     iter.method()->do_unloading(unloading_occurred);
702   }
703 }
704 
blobs_do(CodeBlobClosure * f)705 void CodeCache::blobs_do(CodeBlobClosure* f) {
706   assert_locked_or_safepoint(CodeCache_lock);
707   FOR_ALL_ALLOCABLE_HEAPS(heap) {
708     FOR_ALL_BLOBS(cb, *heap) {
709       if (cb->is_alive()) {
710         f->do_code_blob(cb);
711 #ifdef ASSERT
712         if (cb->is_nmethod()) {
713           Universe::heap()->verify_nmethod((nmethod*)cb);
714         }
715 #endif //ASSERT
716       }
717     }
718   }
719 }
720 
verify_clean_inline_caches()721 void CodeCache::verify_clean_inline_caches() {
722 #ifdef ASSERT
723   NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
724   while(iter.next()) {
725     nmethod* nm = iter.method();
726     assert(!nm->is_unloaded(), "Tautology");
727     nm->verify_clean_inline_caches();
728     nm->verify();
729   }
730 #endif
731 }
732 
verify_icholder_relocations()733 void CodeCache::verify_icholder_relocations() {
734 #ifdef ASSERT
735   // make sure that we aren't leaking icholders
736   int count = 0;
737   FOR_ALL_HEAPS(heap) {
738     FOR_ALL_BLOBS(cb, *heap) {
739       CompiledMethod *nm = cb->as_compiled_method_or_null();
740       if (nm != NULL) {
741         count += nm->verify_icholder_relocations();
742       }
743     }
744   }
745   assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
746          CompiledICHolder::live_count(), "must agree");
747 #endif
748 }
749 
750 // Defer freeing of concurrently cleaned ExceptionCache entries until
751 // after a global handshake operation.
release_exception_cache(ExceptionCache * entry)752 void CodeCache::release_exception_cache(ExceptionCache* entry) {
753   if (SafepointSynchronize::is_at_safepoint()) {
754     delete entry;
755   } else {
756     for (;;) {
757       ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);
758       entry->set_purge_list_next(purge_list_head);
759       if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
760         break;
761       }
762     }
763   }
764 }
765 
766 // Delete exception caches that have been concurrently unlinked,
767 // followed by a global handshake operation.
purge_exception_caches()768 void CodeCache::purge_exception_caches() {
769   ExceptionCache* curr = _exception_cache_purge_list;
770   while (curr != NULL) {
771     ExceptionCache* next = curr->purge_list_next();
772     delete curr;
773     curr = next;
774   }
775   _exception_cache_purge_list = NULL;
776 }
777 
778 uint8_t CodeCache::_unloading_cycle = 1;
779 
increment_unloading_cycle()780 void CodeCache::increment_unloading_cycle() {
781   // 2-bit value (see IsUnloadingState in nmethod.cpp for details)
782   // 0 is reserved for new methods.
783   _unloading_cycle = (_unloading_cycle + 1) % 4;
784   if (_unloading_cycle == 0) {
785     _unloading_cycle = 1;
786   }
787 }
788 
UnloadingScope(BoolObjectClosure * is_alive)789 CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive)
790   : _is_unloading_behaviour(is_alive)
791 {
792   _saved_behaviour = IsUnloadingBehaviour::current();
793   IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
794   increment_unloading_cycle();
795   DependencyContext::cleaning_start();
796 }
797 
~UnloadingScope()798 CodeCache::UnloadingScope::~UnloadingScope() {
799   IsUnloadingBehaviour::set_current(_saved_behaviour);
800   DependencyContext::cleaning_end();
801 }
802 
verify_oops()803 void CodeCache::verify_oops() {
804   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
805   VerifyOopClosure voc;
806   NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
807   while(iter.next()) {
808     nmethod* nm = iter.method();
809     nm->oops_do(&voc);
810     nm->verify_oop_relocations();
811   }
812 }
813 
blob_count(int code_blob_type)814 int CodeCache::blob_count(int code_blob_type) {
815   CodeHeap* heap = get_code_heap(code_blob_type);
816   return (heap != NULL) ? heap->blob_count() : 0;
817 }
818 
blob_count()819 int CodeCache::blob_count() {
820   int count = 0;
821   FOR_ALL_HEAPS(heap) {
822     count += (*heap)->blob_count();
823   }
824   return count;
825 }
826 
nmethod_count(int code_blob_type)827 int CodeCache::nmethod_count(int code_blob_type) {
828   CodeHeap* heap = get_code_heap(code_blob_type);
829   return (heap != NULL) ? heap->nmethod_count() : 0;
830 }
831 
nmethod_count()832 int CodeCache::nmethod_count() {
833   int count = 0;
834   FOR_ALL_NMETHOD_HEAPS(heap) {
835     count += (*heap)->nmethod_count();
836   }
837   return count;
838 }
839 
adapter_count(int code_blob_type)840 int CodeCache::adapter_count(int code_blob_type) {
841   CodeHeap* heap = get_code_heap(code_blob_type);
842   return (heap != NULL) ? heap->adapter_count() : 0;
843 }
844 
adapter_count()845 int CodeCache::adapter_count() {
846   int count = 0;
847   FOR_ALL_HEAPS(heap) {
848     count += (*heap)->adapter_count();
849   }
850   return count;
851 }
852 
low_bound(int code_blob_type)853 address CodeCache::low_bound(int code_blob_type) {
854   CodeHeap* heap = get_code_heap(code_blob_type);
855   return (heap != NULL) ? (address)heap->low_boundary() : NULL;
856 }
857 
high_bound(int code_blob_type)858 address CodeCache::high_bound(int code_blob_type) {
859   CodeHeap* heap = get_code_heap(code_blob_type);
860   return (heap != NULL) ? (address)heap->high_boundary() : NULL;
861 }
862 
capacity()863 size_t CodeCache::capacity() {
864   size_t cap = 0;
865   FOR_ALL_ALLOCABLE_HEAPS(heap) {
866     cap += (*heap)->capacity();
867   }
868   return cap;
869 }
870 
unallocated_capacity(int code_blob_type)871 size_t CodeCache::unallocated_capacity(int code_blob_type) {
872   CodeHeap* heap = get_code_heap(code_blob_type);
873   return (heap != NULL) ? heap->unallocated_capacity() : 0;
874 }
875 
unallocated_capacity()876 size_t CodeCache::unallocated_capacity() {
877   size_t unallocated_cap = 0;
878   FOR_ALL_ALLOCABLE_HEAPS(heap) {
879     unallocated_cap += (*heap)->unallocated_capacity();
880   }
881   return unallocated_cap;
882 }
883 
max_capacity()884 size_t CodeCache::max_capacity() {
885   size_t max_cap = 0;
886   FOR_ALL_ALLOCABLE_HEAPS(heap) {
887     max_cap += (*heap)->max_capacity();
888   }
889   return max_cap;
890 }
891 
892 /**
893  * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
894  * is free, reverse_free_ratio() returns 4.
895  */
reverse_free_ratio(int code_blob_type)896 double CodeCache::reverse_free_ratio(int code_blob_type) {
897   CodeHeap* heap = get_code_heap(code_blob_type);
898   if (heap == NULL) {
899     return 0;
900   }
901 
902   double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0;
903   double max_capacity = (double)heap->max_capacity();
904   double result = max_capacity / unallocated_capacity;
905   assert (max_capacity >= unallocated_capacity, "Must be");
906   assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
907   return result;
908 }
909 
bytes_allocated_in_freelists()910 size_t CodeCache::bytes_allocated_in_freelists() {
911   size_t allocated_bytes = 0;
912   FOR_ALL_ALLOCABLE_HEAPS(heap) {
913     allocated_bytes += (*heap)->allocated_in_freelist();
914   }
915   return allocated_bytes;
916 }
917 
allocated_segments()918 int CodeCache::allocated_segments() {
919   int number_of_segments = 0;
920   FOR_ALL_ALLOCABLE_HEAPS(heap) {
921     number_of_segments += (*heap)->allocated_segments();
922   }
923   return number_of_segments;
924 }
925 
freelists_length()926 size_t CodeCache::freelists_length() {
927   size_t length = 0;
928   FOR_ALL_ALLOCABLE_HEAPS(heap) {
929     length += (*heap)->freelist_length();
930   }
931   return length;
932 }
933 
934 void icache_init();
935 
initialize()936 void CodeCache::initialize() {
937   assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
938 #ifdef COMPILER2
939   assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
940 #endif
941   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
942   // This was originally just a check of the alignment, causing failure, instead, round
943   // the code cache to the page size.  In particular, Solaris is moving to a larger
944   // default page size.
945   CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size());
946 
947   if (SegmentedCodeCache) {
948     // Use multiple code heaps
949     initialize_heaps();
950   } else {
951     // Use a single code heap
952     FLAG_SET_ERGO(NonNMethodCodeHeapSize, 0);
953     FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
954     FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0);
955     ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
956     add_heap(rs, "CodeCache", CodeBlobType::All);
957   }
958 
959   // Initialize ICache flush mechanism
960   // This service is needed for os::register_code_area
961   icache_init();
962 
963   // Give OS a chance to register generated code area.
964   // This is used on Windows 64 bit platforms to register
965   // Structured Exception Handlers for our generated code.
966   os::register_code_area((char*)low_bound(), (char*)high_bound());
967 }
968 
codeCache_init()969 void codeCache_init() {
970   CodeCache::initialize();
971 }
972 
973 //------------------------------------------------------------------------------------------------
974 
number_of_nmethods_with_dependencies()975 int CodeCache::number_of_nmethods_with_dependencies() {
976   return _number_of_nmethods_with_dependencies;
977 }
978 
clear_inline_caches()979 void CodeCache::clear_inline_caches() {
980   assert_locked_or_safepoint(CodeCache_lock);
981   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
982   while(iter.next()) {
983     iter.method()->clear_inline_caches();
984   }
985 }
986 
cleanup_inline_caches()987 void CodeCache::cleanup_inline_caches() {
988   assert_locked_or_safepoint(CodeCache_lock);
989   NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
990   while(iter.next()) {
991     iter.method()->cleanup_inline_caches(/*clean_all=*/true);
992   }
993 }
994 
995 // Keeps track of time spent for checking dependencies
NOT_PRODUCT(static elapsedTimer dependentCheckTime;)996 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
997 
998 int CodeCache::mark_for_deoptimization(KlassDepChange& changes) {
999   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1000   int number_of_marked_CodeBlobs = 0;
1001 
1002   // search the hierarchy looking for nmethods which are affected by the loading of this class
1003 
1004   // then search the interfaces this class implements looking for nmethods
1005   // which might be dependent of the fact that an interface only had one
1006   // implementor.
1007   // nmethod::check_all_dependencies works only correctly, if no safepoint
1008   // can happen
1009   NoSafepointVerifier nsv;
1010   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1011     Klass* d = str.klass();
1012     number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
1013   }
1014 
1015 #ifndef PRODUCT
1016   if (VerifyDependencies) {
1017     // Object pointers are used as unique identifiers for dependency arguments. This
1018     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1019     dependentCheckTime.start();
1020     nmethod::check_all_dependencies(changes);
1021     dependentCheckTime.stop();
1022   }
1023 #endif
1024 
1025   return number_of_marked_CodeBlobs;
1026 }
1027 
find_compiled(void * start)1028 CompiledMethod* CodeCache::find_compiled(void* start) {
1029   CodeBlob *cb = find_blob(start);
1030   assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method");
1031   return (CompiledMethod*)cb;
1032 }
1033 
1034 #if INCLUDE_JVMTI
1035 // RedefineClasses support for unloading nmethods that are dependent on "old" methods.
1036 // We don't really expect this table to grow very large.  If it does, it can become a hashtable.
1037 static GrowableArray<CompiledMethod*>* old_compiled_method_table = NULL;
1038 
add_to_old_table(CompiledMethod * c)1039 static void add_to_old_table(CompiledMethod* c) {
1040   if (old_compiled_method_table == NULL) {
1041     old_compiled_method_table = new (ResourceObj::C_HEAP, mtCode) GrowableArray<CompiledMethod*>(100, mtCode);
1042   }
1043   old_compiled_method_table->push(c);
1044 }
1045 
reset_old_method_table()1046 static void reset_old_method_table() {
1047   if (old_compiled_method_table != NULL) {
1048     delete old_compiled_method_table;
1049     old_compiled_method_table = NULL;
1050   }
1051 }
1052 
1053 // Remove this method when zombied or unloaded.
unregister_old_nmethod(CompiledMethod * c)1054 void CodeCache::unregister_old_nmethod(CompiledMethod* c) {
1055   assert_lock_strong(CodeCache_lock);
1056   if (old_compiled_method_table != NULL) {
1057     int index = old_compiled_method_table->find(c);
1058     if (index != -1) {
1059       old_compiled_method_table->delete_at(index);
1060     }
1061   }
1062 }
1063 
old_nmethods_do(MetadataClosure * f)1064 void CodeCache::old_nmethods_do(MetadataClosure* f) {
1065   // Walk old method table and mark those on stack.
1066   int length = 0;
1067   if (old_compiled_method_table != NULL) {
1068     length = old_compiled_method_table->length();
1069     for (int i = 0; i < length; i++) {
1070       CompiledMethod* cm = old_compiled_method_table->at(i);
1071       // Only walk alive nmethods, the dead ones will get removed by the sweeper or GC.
1072       if (cm->is_alive() && !cm->is_unloading()) {
1073         old_compiled_method_table->at(i)->metadata_do(f);
1074       }
1075     }
1076   }
1077   log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length);
1078 }
1079 
1080 // Just marks the methods in this class as needing deoptimization
mark_for_evol_deoptimization(InstanceKlass * dependee)1081 void CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) {
1082   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1083 }
1084 
1085 
1086 // Walk compiled methods and mark dependent methods for deoptimization.
mark_dependents_for_evol_deoptimization()1087 int CodeCache::mark_dependents_for_evol_deoptimization() {
1088   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1089   // Each redefinition creates a new set of nmethods that have references to "old" Methods
1090   // So delete old method table and create a new one.
1091   reset_old_method_table();
1092 
1093   int number_of_marked_CodeBlobs = 0;
1094   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1095   while(iter.next()) {
1096     CompiledMethod* nm = iter.method();
1097     // Walk all alive nmethods to check for old Methods.
1098     // This includes methods whose inline caches point to old methods, so
1099     // inline cache clearing is unnecessary.
1100     if (nm->has_evol_metadata()) {
1101       nm->mark_for_deoptimization();
1102       add_to_old_table(nm);
1103       number_of_marked_CodeBlobs++;
1104     }
1105   }
1106 
1107   // return total count of nmethods marked for deoptimization, if zero the caller
1108   // can skip deoptimization
1109   return number_of_marked_CodeBlobs;
1110 }
1111 
mark_all_nmethods_for_evol_deoptimization()1112 void CodeCache::mark_all_nmethods_for_evol_deoptimization() {
1113   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1114   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1115   while(iter.next()) {
1116     CompiledMethod* nm = iter.method();
1117     if (!nm->method()->is_method_handle_intrinsic()) {
1118       nm->mark_for_deoptimization();
1119       if (nm->has_evol_metadata()) {
1120         add_to_old_table(nm);
1121       }
1122     }
1123   }
1124 }
1125 
1126 // Flushes compiled methods dependent on redefined classes, that have already been
1127 // marked for deoptimization.
flush_evol_dependents()1128 void CodeCache::flush_evol_dependents() {
1129   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1130 
1131   // CodeCache can only be updated by a thread_in_VM and they will all be
1132   // stopped during the safepoint so CodeCache will be safe to update without
1133   // holding the CodeCache_lock.
1134 
1135   // At least one nmethod has been marked for deoptimization
1136 
1137   Deoptimization::deoptimize_all_marked();
1138 }
1139 #endif // INCLUDE_JVMTI
1140 
1141 // Mark methods for deopt (if safe or possible).
mark_all_nmethods_for_deoptimization()1142 void CodeCache::mark_all_nmethods_for_deoptimization() {
1143   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1144   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1145   while(iter.next()) {
1146     CompiledMethod* nm = iter.method();
1147     if (!nm->is_native_method()) {
1148       nm->mark_for_deoptimization();
1149     }
1150   }
1151 }
1152 
mark_for_deoptimization(Method * dependee)1153 int CodeCache::mark_for_deoptimization(Method* dependee) {
1154   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1155   int number_of_marked_CodeBlobs = 0;
1156 
1157   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1158   while(iter.next()) {
1159     CompiledMethod* nm = iter.method();
1160     if (nm->is_dependent_on_method(dependee)) {
1161       ResourceMark rm;
1162       nm->mark_for_deoptimization();
1163       number_of_marked_CodeBlobs++;
1164     }
1165   }
1166 
1167   return number_of_marked_CodeBlobs;
1168 }
1169 
make_marked_nmethods_not_entrant()1170 void CodeCache::make_marked_nmethods_not_entrant() {
1171   assert_locked_or_safepoint(CodeCache_lock);
1172   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1173   while(iter.next()) {
1174     CompiledMethod* nm = iter.method();
1175     if (nm->is_marked_for_deoptimization()) {
1176       nm->make_not_entrant();
1177     }
1178   }
1179 }
1180 
1181 // Flushes compiled methods dependent on dependee.
flush_dependents_on(InstanceKlass * dependee)1182 void CodeCache::flush_dependents_on(InstanceKlass* dependee) {
1183   assert_lock_strong(Compile_lock);
1184 
1185   if (number_of_nmethods_with_dependencies() == 0) return;
1186 
1187   int marked = 0;
1188   if (dependee->is_linked()) {
1189     // Class initialization state change.
1190     KlassInitDepChange changes(dependee);
1191     marked = mark_for_deoptimization(changes);
1192   } else {
1193     // New class is loaded.
1194     NewKlassDepChange changes(dependee);
1195     marked = mark_for_deoptimization(changes);
1196   }
1197 
1198   if (marked > 0) {
1199     // At least one nmethod has been marked for deoptimization
1200     Deoptimization::deoptimize_all_marked();
1201   }
1202 }
1203 
1204 // Flushes compiled methods dependent on dependee
flush_dependents_on_method(const methodHandle & m_h)1205 void CodeCache::flush_dependents_on_method(const methodHandle& m_h) {
1206   // --- Compile_lock is not held. However we are at a safepoint.
1207   assert_locked_or_safepoint(Compile_lock);
1208 
1209   // Compute the dependent nmethods
1210   if (mark_for_deoptimization(m_h()) > 0) {
1211     Deoptimization::deoptimize_all_marked();
1212   }
1213 }
1214 
verify()1215 void CodeCache::verify() {
1216   assert_locked_or_safepoint(CodeCache_lock);
1217   FOR_ALL_HEAPS(heap) {
1218     (*heap)->verify();
1219     FOR_ALL_BLOBS(cb, *heap) {
1220       if (cb->is_alive()) {
1221         cb->verify();
1222       }
1223     }
1224   }
1225 }
1226 
1227 // A CodeHeap is full. Print out warning and report event.
1228 PRAGMA_DIAG_PUSH
1229 PRAGMA_FORMAT_NONLITERAL_IGNORED
report_codemem_full(int code_blob_type,bool print)1230 void CodeCache::report_codemem_full(int code_blob_type, bool print) {
1231   // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1232   CodeHeap* heap = get_code_heap(code_blob_type);
1233   assert(heap != NULL, "heap is null");
1234 
1235   if ((heap->full_count() == 0) || print) {
1236     // Not yet reported for this heap, report
1237     if (SegmentedCodeCache) {
1238       ResourceMark rm;
1239       stringStream msg1_stream, msg2_stream;
1240       msg1_stream.print("%s is full. Compiler has been disabled.",
1241                         get_code_heap_name(code_blob_type));
1242       msg2_stream.print("Try increasing the code heap size using -XX:%s=",
1243                  get_code_heap_flag_name(code_blob_type));
1244       const char *msg1 = msg1_stream.as_string();
1245       const char *msg2 = msg2_stream.as_string();
1246 
1247       log_warning(codecache)("%s", msg1);
1248       log_warning(codecache)("%s", msg2);
1249       warning("%s", msg1);
1250       warning("%s", msg2);
1251     } else {
1252       const char *msg1 = "CodeCache is full. Compiler has been disabled.";
1253       const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize=";
1254 
1255       log_warning(codecache)("%s", msg1);
1256       log_warning(codecache)("%s", msg2);
1257       warning("%s", msg1);
1258       warning("%s", msg2);
1259     }
1260     ResourceMark rm;
1261     stringStream s;
1262     // Dump code cache into a buffer before locking the tty.
1263     {
1264       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1265       print_summary(&s);
1266     }
1267     {
1268       ttyLocker ttyl;
1269       tty->print("%s", s.as_string());
1270     }
1271 
1272     if (heap->full_count() == 0) {
1273       if (PrintCodeHeapAnalytics) {
1274         CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot!
1275       }
1276     }
1277   }
1278 
1279   heap->report_full();
1280 
1281   EventCodeCacheFull event;
1282   if (event.should_commit()) {
1283     event.set_codeBlobType((u1)code_blob_type);
1284     event.set_startAddress((u8)heap->low_boundary());
1285     event.set_commitedTopAddress((u8)heap->high());
1286     event.set_reservedTopAddress((u8)heap->high_boundary());
1287     event.set_entryCount(heap->blob_count());
1288     event.set_methodCount(heap->nmethod_count());
1289     event.set_adaptorCount(heap->adapter_count());
1290     event.set_unallocatedCapacity(heap->unallocated_capacity());
1291     event.set_fullCount(heap->full_count());
1292     event.commit();
1293   }
1294 }
1295 PRAGMA_DIAG_POP
1296 
print_memory_overhead()1297 void CodeCache::print_memory_overhead() {
1298   size_t wasted_bytes = 0;
1299   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1300       CodeHeap* curr_heap = *heap;
1301       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) {
1302         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1303         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1304       }
1305   }
1306   // Print bytes that are allocated in the freelist
1307   ttyLocker ttl;
1308   tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT,       freelists_length());
1309   tty->print_cr("Allocated in freelist:          " SSIZE_FORMAT "kB",  bytes_allocated_in_freelists()/K);
1310   tty->print_cr("Unused bytes in CodeBlobs:      " SSIZE_FORMAT "kB",  (wasted_bytes/K));
1311   tty->print_cr("Segment map size:               " SSIZE_FORMAT "kB",  allocated_segments()/K); // 1 byte per segment
1312 }
1313 
1314 //------------------------------------------------------------------------------------------------
1315 // Non-product version
1316 
1317 #ifndef PRODUCT
1318 
print_trace(const char * event,CodeBlob * cb,int size)1319 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
1320   if (PrintCodeCache2) {  // Need to add a new flag
1321     ResourceMark rm;
1322     if (size == 0)  size = cb->size();
1323     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1324   }
1325 }
1326 
print_internals()1327 void CodeCache::print_internals() {
1328   int nmethodCount = 0;
1329   int runtimeStubCount = 0;
1330   int adapterCount = 0;
1331   int deoptimizationStubCount = 0;
1332   int uncommonTrapStubCount = 0;
1333   int bufferBlobCount = 0;
1334   int total = 0;
1335   int nmethodAlive = 0;
1336   int nmethodNotEntrant = 0;
1337   int nmethodZombie = 0;
1338   int nmethodUnloaded = 0;
1339   int nmethodJava = 0;
1340   int nmethodNative = 0;
1341   int max_nm_size = 0;
1342   ResourceMark rm;
1343 
1344   int i = 0;
1345   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1346     if ((_nmethod_heaps->length() >= 1) && Verbose) {
1347       tty->print_cr("-- %s --", (*heap)->name());
1348     }
1349     FOR_ALL_BLOBS(cb, *heap) {
1350       total++;
1351       if (cb->is_nmethod()) {
1352         nmethod* nm = (nmethod*)cb;
1353 
1354         if (Verbose && nm->method() != NULL) {
1355           ResourceMark rm;
1356           char *method_name = nm->method()->name_and_sig_as_C_string();
1357           tty->print("%s", method_name);
1358           if(nm->is_alive()) { tty->print_cr(" alive"); }
1359           if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1360           if(nm->is_zombie()) { tty->print_cr(" zombie"); }
1361         }
1362 
1363         nmethodCount++;
1364 
1365         if(nm->is_alive()) { nmethodAlive++; }
1366         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1367         if(nm->is_zombie()) { nmethodZombie++; }
1368         if(nm->is_unloaded()) { nmethodUnloaded++; }
1369         if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
1370 
1371         if(nm->method() != NULL && nm->is_java_method()) {
1372           nmethodJava++;
1373           max_nm_size = MAX2(max_nm_size, nm->size());
1374         }
1375       } else if (cb->is_runtime_stub()) {
1376         runtimeStubCount++;
1377       } else if (cb->is_deoptimization_stub()) {
1378         deoptimizationStubCount++;
1379       } else if (cb->is_uncommon_trap_stub()) {
1380         uncommonTrapStubCount++;
1381       } else if (cb->is_adapter_blob()) {
1382         adapterCount++;
1383       } else if (cb->is_buffer_blob()) {
1384         bufferBlobCount++;
1385       }
1386     }
1387   }
1388 
1389   int bucketSize = 512;
1390   int bucketLimit = max_nm_size / bucketSize + 1;
1391   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1392   memset(buckets, 0, sizeof(int) * bucketLimit);
1393 
1394   NMethodIterator iter(NMethodIterator::all_blobs);
1395   while(iter.next()) {
1396     nmethod* nm = iter.method();
1397     if(nm->method() != NULL && nm->is_java_method()) {
1398       buckets[nm->size() / bucketSize]++;
1399     }
1400   }
1401 
1402   tty->print_cr("Code Cache Entries (total of %d)",total);
1403   tty->print_cr("-------------------------------------------------");
1404   tty->print_cr("nmethods: %d",nmethodCount);
1405   tty->print_cr("\talive: %d",nmethodAlive);
1406   tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1407   tty->print_cr("\tzombie: %d",nmethodZombie);
1408   tty->print_cr("\tunloaded: %d",nmethodUnloaded);
1409   tty->print_cr("\tjava: %d",nmethodJava);
1410   tty->print_cr("\tnative: %d",nmethodNative);
1411   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1412   tty->print_cr("adapters: %d",adapterCount);
1413   tty->print_cr("buffer blobs: %d",bufferBlobCount);
1414   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1415   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1416   tty->print_cr("\nnmethod size distribution (non-zombie java)");
1417   tty->print_cr("-------------------------------------------------");
1418 
1419   for(int i=0; i<bucketLimit; i++) {
1420     if(buckets[i] != 0) {
1421       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1422       tty->fill_to(40);
1423       tty->print_cr("%d",buckets[i]);
1424     }
1425   }
1426 
1427   FREE_C_HEAP_ARRAY(int, buckets);
1428   print_memory_overhead();
1429 }
1430 
1431 #endif // !PRODUCT
1432 
print()1433 void CodeCache::print() {
1434   print_summary(tty);
1435 
1436 #ifndef PRODUCT
1437   if (!Verbose) return;
1438 
1439   CodeBlob_sizes live;
1440   CodeBlob_sizes dead;
1441 
1442   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1443     FOR_ALL_BLOBS(cb, *heap) {
1444       if (!cb->is_alive()) {
1445         dead.add(cb);
1446       } else {
1447         live.add(cb);
1448       }
1449     }
1450   }
1451 
1452   tty->print_cr("CodeCache:");
1453   tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1454 
1455   if (!live.is_empty()) {
1456     live.print("live");
1457   }
1458   if (!dead.is_empty()) {
1459     dead.print("dead");
1460   }
1461 
1462   if (WizardMode) {
1463      // print the oop_map usage
1464     int code_size = 0;
1465     int number_of_blobs = 0;
1466     int number_of_oop_maps = 0;
1467     int map_size = 0;
1468     FOR_ALL_ALLOCABLE_HEAPS(heap) {
1469       FOR_ALL_BLOBS(cb, *heap) {
1470         if (cb->is_alive()) {
1471           number_of_blobs++;
1472           code_size += cb->code_size();
1473           ImmutableOopMapSet* set = cb->oop_maps();
1474           if (set != NULL) {
1475             number_of_oop_maps += set->count();
1476             map_size           += set->nr_of_bytes();
1477           }
1478         }
1479       }
1480     }
1481     tty->print_cr("OopMaps");
1482     tty->print_cr("  #blobs    = %d", number_of_blobs);
1483     tty->print_cr("  code size = %d", code_size);
1484     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1485     tty->print_cr("  map size  = %d", map_size);
1486   }
1487 
1488 #endif // !PRODUCT
1489 }
1490 
print_summary(outputStream * st,bool detailed)1491 void CodeCache::print_summary(outputStream* st, bool detailed) {
1492   int full_count = 0;
1493   FOR_ALL_HEAPS(heap_iterator) {
1494     CodeHeap* heap = (*heap_iterator);
1495     size_t total = (heap->high_boundary() - heap->low_boundary());
1496     if (_heaps->length() >= 1) {
1497       st->print("%s:", heap->name());
1498     } else {
1499       st->print("CodeCache:");
1500     }
1501     st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1502                  "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1503                  total/K, (total - heap->unallocated_capacity())/K,
1504                  heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);
1505 
1506     if (detailed) {
1507       st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1508                    p2i(heap->low_boundary()),
1509                    p2i(heap->high()),
1510                    p2i(heap->high_boundary()));
1511 
1512       full_count += get_codemem_full_count(heap->code_blob_type());
1513     }
1514   }
1515 
1516   if (detailed) {
1517     st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
1518                        " adapters=" UINT32_FORMAT,
1519                        blob_count(), nmethod_count(), adapter_count());
1520     st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
1521                  "enabled" : Arguments::mode() == Arguments::_int ?
1522                  "disabled (interpreter mode)" :
1523                  "disabled (not enough contiguous free space left)");
1524     st->print_cr("              stopped_count=%d, restarted_count=%d",
1525                  CompileBroker::get_total_compiler_stopped_count(),
1526                  CompileBroker::get_total_compiler_restarted_count());
1527     st->print_cr(" full_count=%d", full_count);
1528   }
1529 }
1530 
print_codelist(outputStream * st)1531 void CodeCache::print_codelist(outputStream* st) {
1532   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1533 
1534   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1535   while (iter.next()) {
1536     CompiledMethod* cm = iter.method();
1537     ResourceMark rm;
1538     char* method_name = cm->method()->name_and_sig_as_C_string();
1539     st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1540                  cm->compile_id(), cm->comp_level(), cm->get_state(),
1541                  method_name,
1542                  (intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end());
1543   }
1544 }
1545 
print_layout(outputStream * st)1546 void CodeCache::print_layout(outputStream* st) {
1547   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1548   ResourceMark rm;
1549   print_summary(st, true);
1550 }
1551 
log_state(outputStream * st)1552 void CodeCache::log_state(outputStream* st) {
1553   st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1554             " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1555             blob_count(), nmethod_count(), adapter_count(),
1556             unallocated_capacity());
1557 }
1558 
1559 #ifdef LINUX
write_perf_map()1560 void CodeCache::write_perf_map() {
1561   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1562 
1563   // Perf expects to find the map file at /tmp/perf-<pid>.map.
1564   char fname[32];
1565   jio_snprintf(fname, sizeof(fname), "/tmp/perf-%d.map", os::current_process_id());
1566 
1567   fileStream fs(fname, "w");
1568   if (!fs.is_open()) {
1569     log_warning(codecache)("Failed to create %s for perf map", fname);
1570     return;
1571   }
1572 
1573   AllCodeBlobsIterator iter(AllCodeBlobsIterator::only_alive_and_not_unloading);
1574   while (iter.next()) {
1575     CodeBlob *cb = iter.method();
1576     ResourceMark rm;
1577     const char* method_name =
1578       cb->is_compiled() ? cb->as_compiled_method()->method()->external_name()
1579                         : cb->name();
1580     fs.print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %s",
1581                 (intptr_t)cb->code_begin(), (intptr_t)cb->code_size(),
1582                 method_name);
1583   }
1584 }
1585 #endif // LINUX
1586 
1587 //---<  BEGIN  >--- CodeHeap State Analytics.
1588 
aggregate(outputStream * out,size_t granularity)1589 void CodeCache::aggregate(outputStream *out, size_t granularity) {
1590   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1591     CodeHeapState::aggregate(out, (*heap), granularity);
1592   }
1593 }
1594 
discard(outputStream * out)1595 void CodeCache::discard(outputStream *out) {
1596   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1597     CodeHeapState::discard(out, (*heap));
1598   }
1599 }
1600 
print_usedSpace(outputStream * out)1601 void CodeCache::print_usedSpace(outputStream *out) {
1602   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1603     CodeHeapState::print_usedSpace(out, (*heap));
1604   }
1605 }
1606 
print_freeSpace(outputStream * out)1607 void CodeCache::print_freeSpace(outputStream *out) {
1608   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1609     CodeHeapState::print_freeSpace(out, (*heap));
1610   }
1611 }
1612 
print_count(outputStream * out)1613 void CodeCache::print_count(outputStream *out) {
1614   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1615     CodeHeapState::print_count(out, (*heap));
1616   }
1617 }
1618 
print_space(outputStream * out)1619 void CodeCache::print_space(outputStream *out) {
1620   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1621     CodeHeapState::print_space(out, (*heap));
1622   }
1623 }
1624 
print_age(outputStream * out)1625 void CodeCache::print_age(outputStream *out) {
1626   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1627     CodeHeapState::print_age(out, (*heap));
1628   }
1629 }
1630 
print_names(outputStream * out)1631 void CodeCache::print_names(outputStream *out) {
1632   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1633     CodeHeapState::print_names(out, (*heap));
1634   }
1635 }
1636 //---<  END  >--- CodeHeap State Analytics.
1637