1 /*
2 * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "aot/aotLoader.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "gc/shared/collectedHeap.hpp"
29 #include "logging/log.hpp"
30 #include "logging/logStream.hpp"
31 #include "memory/filemap.hpp"
32 #include "memory/metaspace.hpp"
33 #include "memory/metaspace/chunkManager.hpp"
34 #include "memory/metaspace/metachunk.hpp"
35 #include "memory/metaspace/metaspaceCommon.hpp"
36 #include "memory/metaspace/printCLDMetaspaceInfoClosure.hpp"
37 #include "memory/metaspace/spaceManager.hpp"
38 #include "memory/metaspace/virtualSpaceList.hpp"
39 #include "memory/metaspaceShared.hpp"
40 #include "memory/metaspaceTracer.hpp"
41 #include "memory/universe.hpp"
42 #include "oops/compressedOops.hpp"
43 #include "runtime/init.hpp"
44 #include "runtime/orderAccess.hpp"
45 #include "services/memTracker.hpp"
46 #include "utilities/copy.hpp"
47 #include "utilities/debug.hpp"
48 #include "utilities/formatBuffer.hpp"
49 #include "utilities/globalDefinitions.hpp"
50 #include "utilities/vmError.hpp"
51
52
53 using namespace metaspace;
54
55 MetaWord* last_allocated = 0;
56
57 size_t Metaspace::_compressed_class_space_size;
58 const MetaspaceTracer* Metaspace::_tracer = NULL;
59
DEBUG_ONLY(bool Metaspace::_frozen=false;) const60 DEBUG_ONLY(bool Metaspace::_frozen = false;)
61
62 static const char* space_type_name(Metaspace::MetaspaceType t) {
63 const char* s = NULL;
64 switch (t) {
65 case Metaspace::StandardMetaspaceType: s = "Standard"; break;
66 case Metaspace::BootMetaspaceType: s = "Boot"; break;
67 case Metaspace::UnsafeAnonymousMetaspaceType: s = "UnsafeAnonymous"; break;
68 case Metaspace::ReflectionMetaspaceType: s = "Reflection"; break;
69 default: ShouldNotReachHere();
70 }
71 return s;
72 }
73
74 volatile size_t MetaspaceGC::_capacity_until_GC = 0;
75 uint MetaspaceGC::_shrink_factor = 0;
76 bool MetaspaceGC::_should_concurrent_collect = false;
77
78 // BlockFreelist methods
79
80 // VirtualSpaceNode methods
81
82 // MetaspaceGC methods
83
84 // VM_CollectForMetadataAllocation is the vm operation used to GC.
85 // Within the VM operation after the GC the attempt to allocate the metadata
86 // should succeed. If the GC did not free enough space for the metaspace
87 // allocation, the HWM is increased so that another virtualspace will be
88 // allocated for the metadata. With perm gen the increase in the perm
89 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The
90 // metaspace policy uses those as the small and large steps for the HWM.
91 //
92 // After the GC the compute_new_size() for MetaspaceGC is called to
93 // resize the capacity of the metaspaces. The current implementation
94 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
95 // to resize the Java heap by some GC's. New flags can be implemented
96 // if really needed. MinMetaspaceFreeRatio is used to calculate how much
97 // free space is desirable in the metaspace capacity to decide how much
98 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much
99 // free space is desirable in the metaspace capacity before decreasing
100 // the HWM.
101
102 // Calculate the amount to increase the high water mark (HWM).
103 // Increase by a minimum amount (MinMetaspaceExpansion) so that
104 // another expansion is not requested too soon. If that is not
105 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
106 // If that is still not enough, expand by the size of the allocation
107 // plus some.
delta_capacity_until_GC(size_t bytes)108 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
109 size_t min_delta = MinMetaspaceExpansion;
110 size_t max_delta = MaxMetaspaceExpansion;
111 size_t delta = align_up(bytes, Metaspace::commit_alignment());
112
113 if (delta <= min_delta) {
114 delta = min_delta;
115 } else if (delta <= max_delta) {
116 // Don't want to hit the high water mark on the next
117 // allocation so make the delta greater than just enough
118 // for this allocation.
119 delta = max_delta;
120 } else {
121 // This allocation is large but the next ones are probably not
122 // so increase by the minimum.
123 delta = delta + min_delta;
124 }
125
126 assert_is_aligned(delta, Metaspace::commit_alignment());
127
128 return delta;
129 }
130
capacity_until_GC()131 size_t MetaspaceGC::capacity_until_GC() {
132 size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
133 assert(value >= MetaspaceSize, "Not initialized properly?");
134 return value;
135 }
136
137 // Try to increase the _capacity_until_GC limit counter by v bytes.
138 // Returns true if it succeeded. It may fail if either another thread
139 // concurrently increased the limit or the new limit would be larger
140 // than MaxMetaspaceSize.
141 // On success, optionally returns new and old metaspace capacity in
142 // new_cap_until_GC and old_cap_until_GC respectively.
143 // On error, optionally sets can_retry to indicate whether if there is
144 // actually enough space remaining to satisfy the request.
inc_capacity_until_GC(size_t v,size_t * new_cap_until_GC,size_t * old_cap_until_GC,bool * can_retry)145 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC, bool* can_retry) {
146 assert_is_aligned(v, Metaspace::commit_alignment());
147
148 size_t old_capacity_until_GC = _capacity_until_GC;
149 size_t new_value = old_capacity_until_GC + v;
150
151 if (new_value < old_capacity_until_GC) {
152 // The addition wrapped around, set new_value to aligned max value.
153 new_value = align_down(max_uintx, Metaspace::commit_alignment());
154 }
155
156 if (new_value > MaxMetaspaceSize) {
157 if (can_retry != NULL) {
158 *can_retry = false;
159 }
160 return false;
161 }
162
163 if (can_retry != NULL) {
164 *can_retry = true;
165 }
166 size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC);
167
168 if (old_capacity_until_GC != prev_value) {
169 return false;
170 }
171
172 if (new_cap_until_GC != NULL) {
173 *new_cap_until_GC = new_value;
174 }
175 if (old_cap_until_GC != NULL) {
176 *old_cap_until_GC = old_capacity_until_GC;
177 }
178 return true;
179 }
180
dec_capacity_until_GC(size_t v)181 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
182 assert_is_aligned(v, Metaspace::commit_alignment());
183
184 return Atomic::sub(v, &_capacity_until_GC);
185 }
186
initialize()187 void MetaspaceGC::initialize() {
188 // Set the high-water mark to MaxMetapaceSize during VM initializaton since
189 // we can't do a GC during initialization.
190 _capacity_until_GC = MaxMetaspaceSize;
191 }
192
post_initialize()193 void MetaspaceGC::post_initialize() {
194 // Reset the high-water mark once the VM initialization is done.
195 _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
196 }
197
can_expand(size_t word_size,bool is_class)198 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
199 // Check if the compressed class space is full.
200 if (is_class && Metaspace::using_class_space()) {
201 size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
202 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
203 log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
204 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
205 return false;
206 }
207 }
208
209 // Check if the user has imposed a limit on the metaspace memory.
210 size_t committed_bytes = MetaspaceUtils::committed_bytes();
211 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
212 log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
213 (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
214 return false;
215 }
216
217 return true;
218 }
219
allowed_expansion()220 size_t MetaspaceGC::allowed_expansion() {
221 size_t committed_bytes = MetaspaceUtils::committed_bytes();
222 size_t capacity_until_gc = capacity_until_GC();
223
224 assert(capacity_until_gc >= committed_bytes,
225 "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
226 capacity_until_gc, committed_bytes);
227
228 size_t left_until_max = MaxMetaspaceSize - committed_bytes;
229 size_t left_until_GC = capacity_until_gc - committed_bytes;
230 size_t left_to_commit = MIN2(left_until_GC, left_until_max);
231 log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
232 " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
233 left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
234
235 return left_to_commit / BytesPerWord;
236 }
237
compute_new_size()238 void MetaspaceGC::compute_new_size() {
239 assert(_shrink_factor <= 100, "invalid shrink factor");
240 uint current_shrink_factor = _shrink_factor;
241 _shrink_factor = 0;
242
243 // Using committed_bytes() for used_after_gc is an overestimation, since the
244 // chunk free lists are included in committed_bytes() and the memory in an
245 // un-fragmented chunk free list is available for future allocations.
246 // However, if the chunk free lists becomes fragmented, then the memory may
247 // not be available for future allocations and the memory is therefore "in use".
248 // Including the chunk free lists in the definition of "in use" is therefore
249 // necessary. Not including the chunk free lists can cause capacity_until_GC to
250 // shrink below committed_bytes() and this has caused serious bugs in the past.
251 const size_t used_after_gc = MetaspaceUtils::committed_bytes();
252 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
253
254 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
255 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
256
257 const double min_tmp = used_after_gc / maximum_used_percentage;
258 size_t minimum_desired_capacity =
259 (size_t)MIN2(min_tmp, double(MaxMetaspaceSize));
260 // Don't shrink less than the initial generation size
261 minimum_desired_capacity = MAX2(minimum_desired_capacity,
262 MetaspaceSize);
263
264 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
265 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f",
266 minimum_free_percentage, maximum_used_percentage);
267 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K);
268
269
270 size_t shrink_bytes = 0;
271 if (capacity_until_GC < minimum_desired_capacity) {
272 // If we have less capacity below the metaspace HWM, then
273 // increment the HWM.
274 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
275 expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
276 // Don't expand unless it's significant
277 if (expand_bytes >= MinMetaspaceExpansion) {
278 size_t new_capacity_until_GC = 0;
279 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
280 assert(succeeded, "Should always succesfully increment HWM when at safepoint");
281
282 Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
283 new_capacity_until_GC,
284 MetaspaceGCThresholdUpdater::ComputeNewSize);
285 log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB",
286 minimum_desired_capacity / (double) K,
287 expand_bytes / (double) K,
288 MinMetaspaceExpansion / (double) K,
289 new_capacity_until_GC / (double) K);
290 }
291 return;
292 }
293
294 // No expansion, now see if we want to shrink
295 // We would never want to shrink more than this
296 assert(capacity_until_GC >= minimum_desired_capacity,
297 SIZE_FORMAT " >= " SIZE_FORMAT,
298 capacity_until_GC, minimum_desired_capacity);
299 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
300
301 // Should shrinking be considered?
302 if (MaxMetaspaceFreeRatio < 100) {
303 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
304 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
305 const double max_tmp = used_after_gc / minimum_used_percentage;
306 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize));
307 maximum_desired_capacity = MAX2(maximum_desired_capacity,
308 MetaspaceSize);
309 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f",
310 maximum_free_percentage, minimum_used_percentage);
311 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB",
312 minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
313
314 assert(minimum_desired_capacity <= maximum_desired_capacity,
315 "sanity check");
316
317 if (capacity_until_GC > maximum_desired_capacity) {
318 // Capacity too large, compute shrinking size
319 shrink_bytes = capacity_until_GC - maximum_desired_capacity;
320 // We don't want shrink all the way back to initSize if people call
321 // System.gc(), because some programs do that between "phases" and then
322 // we'd just have to grow the heap up again for the next phase. So we
323 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
324 // on the third call, and 100% by the fourth call. But if we recompute
325 // size without shrinking, it goes back to 0%.
326 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
327
328 shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
329
330 assert(shrink_bytes <= max_shrink_bytes,
331 "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
332 shrink_bytes, max_shrink_bytes);
333 if (current_shrink_factor == 0) {
334 _shrink_factor = 10;
335 } else {
336 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
337 }
338 log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK",
339 MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
340 log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK",
341 shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
342 }
343 }
344
345 // Don't shrink unless it's significant
346 if (shrink_bytes >= MinMetaspaceExpansion &&
347 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
348 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
349 Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
350 new_capacity_until_GC,
351 MetaspaceGCThresholdUpdater::ComputeNewSize);
352 }
353 }
354
355 // MetaspaceUtils
356 size_t MetaspaceUtils::_capacity_words [Metaspace:: MetadataTypeCount] = {0, 0};
357 size_t MetaspaceUtils::_overhead_words [Metaspace:: MetadataTypeCount] = {0, 0};
358 volatile size_t MetaspaceUtils::_used_words [Metaspace:: MetadataTypeCount] = {0, 0};
359
360 // Collect used metaspace statistics. This involves walking the CLDG. The resulting
361 // output will be the accumulated values for all live metaspaces.
362 // Note: method does not do any locking.
collect_statistics(ClassLoaderMetaspaceStatistics * out)363 void MetaspaceUtils::collect_statistics(ClassLoaderMetaspaceStatistics* out) {
364 out->reset();
365 ClassLoaderDataGraphMetaspaceIterator iter;
366 while (iter.repeat()) {
367 ClassLoaderMetaspace* msp = iter.get_next();
368 if (msp != NULL) {
369 msp->add_to_statistics(out);
370 }
371 }
372 }
373
free_in_vs_bytes(Metaspace::MetadataType mdtype)374 size_t MetaspaceUtils::free_in_vs_bytes(Metaspace::MetadataType mdtype) {
375 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
376 return list == NULL ? 0 : list->free_bytes();
377 }
378
free_in_vs_bytes()379 size_t MetaspaceUtils::free_in_vs_bytes() {
380 return free_in_vs_bytes(Metaspace::ClassType) + free_in_vs_bytes(Metaspace::NonClassType);
381 }
382
inc_stat_nonatomically(size_t * pstat,size_t words)383 static void inc_stat_nonatomically(size_t* pstat, size_t words) {
384 assert_lock_strong(MetaspaceExpand_lock);
385 (*pstat) += words;
386 }
387
dec_stat_nonatomically(size_t * pstat,size_t words)388 static void dec_stat_nonatomically(size_t* pstat, size_t words) {
389 assert_lock_strong(MetaspaceExpand_lock);
390 const size_t size_now = *pstat;
391 assert(size_now >= words, "About to decrement counter below zero "
392 "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
393 size_now, words);
394 *pstat = size_now - words;
395 }
396
inc_stat_atomically(volatile size_t * pstat,size_t words)397 static void inc_stat_atomically(volatile size_t* pstat, size_t words) {
398 Atomic::add(words, pstat);
399 }
400
dec_stat_atomically(volatile size_t * pstat,size_t words)401 static void dec_stat_atomically(volatile size_t* pstat, size_t words) {
402 const size_t size_now = *pstat;
403 assert(size_now >= words, "About to decrement counter below zero "
404 "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
405 size_now, words);
406 Atomic::sub(words, pstat);
407 }
408
dec_capacity(Metaspace::MetadataType mdtype,size_t words)409 void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
410 dec_stat_nonatomically(&_capacity_words[mdtype], words);
411 }
inc_capacity(Metaspace::MetadataType mdtype,size_t words)412 void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
413 inc_stat_nonatomically(&_capacity_words[mdtype], words);
414 }
dec_used(Metaspace::MetadataType mdtype,size_t words)415 void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
416 dec_stat_atomically(&_used_words[mdtype], words);
417 }
inc_used(Metaspace::MetadataType mdtype,size_t words)418 void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
419 inc_stat_atomically(&_used_words[mdtype], words);
420 }
dec_overhead(Metaspace::MetadataType mdtype,size_t words)421 void MetaspaceUtils::dec_overhead(Metaspace::MetadataType mdtype, size_t words) {
422 dec_stat_nonatomically(&_overhead_words[mdtype], words);
423 }
inc_overhead(Metaspace::MetadataType mdtype,size_t words)424 void MetaspaceUtils::inc_overhead(Metaspace::MetadataType mdtype, size_t words) {
425 inc_stat_nonatomically(&_overhead_words[mdtype], words);
426 }
427
reserved_bytes(Metaspace::MetadataType mdtype)428 size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
429 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
430 return list == NULL ? 0 : list->reserved_bytes();
431 }
432
committed_bytes(Metaspace::MetadataType mdtype)433 size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
434 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
435 return list == NULL ? 0 : list->committed_bytes();
436 }
437
min_chunk_size_words()438 size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
439
free_chunks_total_words(Metaspace::MetadataType mdtype)440 size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
441 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
442 if (chunk_manager == NULL) {
443 return 0;
444 }
445 return chunk_manager->free_chunks_total_words();
446 }
447
free_chunks_total_bytes(Metaspace::MetadataType mdtype)448 size_t MetaspaceUtils::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
449 return free_chunks_total_words(mdtype) * BytesPerWord;
450 }
451
free_chunks_total_words()452 size_t MetaspaceUtils::free_chunks_total_words() {
453 return free_chunks_total_words(Metaspace::ClassType) +
454 free_chunks_total_words(Metaspace::NonClassType);
455 }
456
free_chunks_total_bytes()457 size_t MetaspaceUtils::free_chunks_total_bytes() {
458 return free_chunks_total_words() * BytesPerWord;
459 }
460
has_chunk_free_list(Metaspace::MetadataType mdtype)461 bool MetaspaceUtils::has_chunk_free_list(Metaspace::MetadataType mdtype) {
462 return Metaspace::get_chunk_manager(mdtype) != NULL;
463 }
464
chunk_free_list_summary(Metaspace::MetadataType mdtype)465 MetaspaceChunkFreeListSummary MetaspaceUtils::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
466 if (!has_chunk_free_list(mdtype)) {
467 return MetaspaceChunkFreeListSummary();
468 }
469
470 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
471 return cm->chunk_free_list_summary();
472 }
473
print_metaspace_change(size_t prev_metadata_used)474 void MetaspaceUtils::print_metaspace_change(size_t prev_metadata_used) {
475 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
476 prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
477 }
478
print_on(outputStream * out)479 void MetaspaceUtils::print_on(outputStream* out) {
480 Metaspace::MetadataType nct = Metaspace::NonClassType;
481
482 out->print_cr(" Metaspace "
483 "used " SIZE_FORMAT "K, "
484 "capacity " SIZE_FORMAT "K, "
485 "committed " SIZE_FORMAT "K, "
486 "reserved " SIZE_FORMAT "K",
487 used_bytes()/K,
488 capacity_bytes()/K,
489 committed_bytes()/K,
490 reserved_bytes()/K);
491
492 if (Metaspace::using_class_space()) {
493 Metaspace::MetadataType ct = Metaspace::ClassType;
494 out->print_cr(" class space "
495 "used " SIZE_FORMAT "K, "
496 "capacity " SIZE_FORMAT "K, "
497 "committed " SIZE_FORMAT "K, "
498 "reserved " SIZE_FORMAT "K",
499 used_bytes(ct)/K,
500 capacity_bytes(ct)/K,
501 committed_bytes(ct)/K,
502 reserved_bytes(ct)/K);
503 }
504 }
505
506
print_vs(outputStream * out,size_t scale)507 void MetaspaceUtils::print_vs(outputStream* out, size_t scale) {
508 const size_t reserved_nonclass_words = reserved_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
509 const size_t committed_nonclass_words = committed_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
510 {
511 if (Metaspace::using_class_space()) {
512 out->print(" Non-class space: ");
513 }
514 print_scaled_words(out, reserved_nonclass_words, scale, 7);
515 out->print(" reserved, ");
516 print_scaled_words_and_percentage(out, committed_nonclass_words, reserved_nonclass_words, scale, 7);
517 out->print_cr(" committed ");
518
519 if (Metaspace::using_class_space()) {
520 const size_t reserved_class_words = reserved_bytes(Metaspace::ClassType) / sizeof(MetaWord);
521 const size_t committed_class_words = committed_bytes(Metaspace::ClassType) / sizeof(MetaWord);
522 out->print(" Class space: ");
523 print_scaled_words(out, reserved_class_words, scale, 7);
524 out->print(" reserved, ");
525 print_scaled_words_and_percentage(out, committed_class_words, reserved_class_words, scale, 7);
526 out->print_cr(" committed ");
527
528 const size_t reserved_words = reserved_nonclass_words + reserved_class_words;
529 const size_t committed_words = committed_nonclass_words + committed_class_words;
530 out->print(" Both: ");
531 print_scaled_words(out, reserved_words, scale, 7);
532 out->print(" reserved, ");
533 print_scaled_words_and_percentage(out, committed_words, reserved_words, scale, 7);
534 out->print_cr(" committed ");
535 }
536 }
537 }
538
print_basic_switches(outputStream * out,size_t scale)539 static void print_basic_switches(outputStream* out, size_t scale) {
540 out->print("MaxMetaspaceSize: ");
541 if (MaxMetaspaceSize >= (max_uintx) - (2 * os::vm_page_size())) {
542 // aka "very big". Default is max_uintx, but due to rounding in arg parsing the real
543 // value is smaller.
544 out->print("unlimited");
545 } else {
546 print_human_readable_size(out, MaxMetaspaceSize, scale);
547 }
548 out->cr();
549 if (Metaspace::using_class_space()) {
550 out->print("CompressedClassSpaceSize: ");
551 print_human_readable_size(out, CompressedClassSpaceSize, scale);
552 }
553 out->cr();
554 }
555
556 // This will print out a basic metaspace usage report but
557 // unlike print_report() is guaranteed not to lock or to walk the CLDG.
print_basic_report(outputStream * out,size_t scale)558 void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) {
559
560 if (!Metaspace::initialized()) {
561 out->print_cr("Metaspace not yet initialized.");
562 return;
563 }
564
565 out->cr();
566 out->print_cr("Usage:");
567
568 if (Metaspace::using_class_space()) {
569 out->print(" Non-class: ");
570 }
571
572 // In its most basic form, we do not require walking the CLDG. Instead, just print the running totals from
573 // MetaspaceUtils.
574 const size_t cap_nc = MetaspaceUtils::capacity_words(Metaspace::NonClassType);
575 const size_t overhead_nc = MetaspaceUtils::overhead_words(Metaspace::NonClassType);
576 const size_t used_nc = MetaspaceUtils::used_words(Metaspace::NonClassType);
577 const size_t free_and_waste_nc = cap_nc - overhead_nc - used_nc;
578
579 print_scaled_words(out, cap_nc, scale, 5);
580 out->print(" capacity, ");
581 print_scaled_words_and_percentage(out, used_nc, cap_nc, scale, 5);
582 out->print(" used, ");
583 print_scaled_words_and_percentage(out, free_and_waste_nc, cap_nc, scale, 5);
584 out->print(" free+waste, ");
585 print_scaled_words_and_percentage(out, overhead_nc, cap_nc, scale, 5);
586 out->print(" overhead. ");
587 out->cr();
588
589 if (Metaspace::using_class_space()) {
590 const size_t cap_c = MetaspaceUtils::capacity_words(Metaspace::ClassType);
591 const size_t overhead_c = MetaspaceUtils::overhead_words(Metaspace::ClassType);
592 const size_t used_c = MetaspaceUtils::used_words(Metaspace::ClassType);
593 const size_t free_and_waste_c = cap_c - overhead_c - used_c;
594 out->print(" Class: ");
595 print_scaled_words(out, cap_c, scale, 5);
596 out->print(" capacity, ");
597 print_scaled_words_and_percentage(out, used_c, cap_c, scale, 5);
598 out->print(" used, ");
599 print_scaled_words_and_percentage(out, free_and_waste_c, cap_c, scale, 5);
600 out->print(" free+waste, ");
601 print_scaled_words_and_percentage(out, overhead_c, cap_c, scale, 5);
602 out->print(" overhead. ");
603 out->cr();
604
605 out->print(" Both: ");
606 const size_t cap = cap_nc + cap_c;
607
608 print_scaled_words(out, cap, scale, 5);
609 out->print(" capacity, ");
610 print_scaled_words_and_percentage(out, used_nc + used_c, cap, scale, 5);
611 out->print(" used, ");
612 print_scaled_words_and_percentage(out, free_and_waste_nc + free_and_waste_c, cap, scale, 5);
613 out->print(" free+waste, ");
614 print_scaled_words_and_percentage(out, overhead_nc + overhead_c, cap, scale, 5);
615 out->print(" overhead. ");
616 out->cr();
617 }
618
619 out->cr();
620 out->print_cr("Virtual space:");
621
622 print_vs(out, scale);
623
624 out->cr();
625 out->print_cr("Chunk freelists:");
626
627 if (Metaspace::using_class_space()) {
628 out->print(" Non-Class: ");
629 }
630 print_human_readable_size(out, Metaspace::chunk_manager_metadata()->free_chunks_total_bytes(), scale);
631 out->cr();
632 if (Metaspace::using_class_space()) {
633 out->print(" Class: ");
634 print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_bytes(), scale);
635 out->cr();
636 out->print(" Both: ");
637 print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_bytes() +
638 Metaspace::chunk_manager_metadata()->free_chunks_total_bytes(), scale);
639 out->cr();
640 }
641
642 out->cr();
643
644 // Print basic settings
645 print_basic_switches(out, scale);
646
647 out->cr();
648
649 }
650
print_report(outputStream * out,size_t scale,int flags)651 void MetaspaceUtils::print_report(outputStream* out, size_t scale, int flags) {
652
653 if (!Metaspace::initialized()) {
654 out->print_cr("Metaspace not yet initialized.");
655 return;
656 }
657
658 const bool print_loaders = (flags & rf_show_loaders) > 0;
659 const bool print_classes = (flags & rf_show_classes) > 0;
660 const bool print_by_chunktype = (flags & rf_break_down_by_chunktype) > 0;
661 const bool print_by_spacetype = (flags & rf_break_down_by_spacetype) > 0;
662
663 // Some report options require walking the class loader data graph.
664 PrintCLDMetaspaceInfoClosure cl(out, scale, print_loaders, print_classes, print_by_chunktype);
665 if (print_loaders) {
666 out->cr();
667 out->print_cr("Usage per loader:");
668 out->cr();
669 }
670
671 ClassLoaderDataGraph::loaded_cld_do(&cl); // collect data and optionally print
672
673 // Print totals, broken up by space type.
674 if (print_by_spacetype) {
675 out->cr();
676 out->print_cr("Usage per space type:");
677 out->cr();
678 for (int space_type = (int)Metaspace::ZeroMetaspaceType;
679 space_type < (int)Metaspace::MetaspaceTypeCount; space_type ++)
680 {
681 uintx num_loaders = cl._num_loaders_by_spacetype[space_type];
682 uintx num_classes = cl._num_classes_by_spacetype[space_type];
683 out->print("%s - " UINTX_FORMAT " %s",
684 space_type_name((Metaspace::MetaspaceType)space_type),
685 num_loaders, loaders_plural(num_loaders));
686 if (num_classes > 0) {
687 out->print(", ");
688 print_number_of_classes(out, num_classes, cl._num_classes_shared_by_spacetype[space_type]);
689 out->print(":");
690 cl._stats_by_spacetype[space_type].print_on(out, scale, print_by_chunktype);
691 } else {
692 out->print(".");
693 out->cr();
694 }
695 out->cr();
696 }
697 }
698
699 // Print totals for in-use data:
700 out->cr();
701 {
702 uintx num_loaders = cl._num_loaders;
703 out->print("Total Usage - " UINTX_FORMAT " %s, ",
704 num_loaders, loaders_plural(num_loaders));
705 print_number_of_classes(out, cl._num_classes, cl._num_classes_shared);
706 out->print(":");
707 cl._stats_total.print_on(out, scale, print_by_chunktype);
708 out->cr();
709 }
710
711 // -- Print Virtual space.
712 out->cr();
713 out->print_cr("Virtual space:");
714
715 print_vs(out, scale);
716
717 // -- Print VirtualSpaceList details.
718 if ((flags & rf_show_vslist) > 0) {
719 out->cr();
720 out->print_cr("Virtual space list%s:", Metaspace::using_class_space() ? "s" : "");
721
722 if (Metaspace::using_class_space()) {
723 out->print_cr(" Non-Class:");
724 }
725 Metaspace::space_list()->print_on(out, scale);
726 if (Metaspace::using_class_space()) {
727 out->print_cr(" Class:");
728 Metaspace::class_space_list()->print_on(out, scale);
729 }
730 }
731 out->cr();
732
733 // -- Print VirtualSpaceList map.
734 if ((flags & rf_show_vsmap) > 0) {
735 out->cr();
736 out->print_cr("Virtual space map:");
737
738 if (Metaspace::using_class_space()) {
739 out->print_cr(" Non-Class:");
740 }
741 Metaspace::space_list()->print_map(out);
742 if (Metaspace::using_class_space()) {
743 out->print_cr(" Class:");
744 Metaspace::class_space_list()->print_map(out);
745 }
746 }
747 out->cr();
748
749 // -- Print Freelists (ChunkManager) details
750 out->cr();
751 out->print_cr("Chunk freelist%s:", Metaspace::using_class_space() ? "s" : "");
752
753 ChunkManagerStatistics non_class_cm_stat;
754 Metaspace::chunk_manager_metadata()->collect_statistics(&non_class_cm_stat);
755
756 if (Metaspace::using_class_space()) {
757 out->print_cr(" Non-Class:");
758 }
759 non_class_cm_stat.print_on(out, scale);
760
761 if (Metaspace::using_class_space()) {
762 ChunkManagerStatistics class_cm_stat;
763 Metaspace::chunk_manager_class()->collect_statistics(&class_cm_stat);
764 out->print_cr(" Class:");
765 class_cm_stat.print_on(out, scale);
766 }
767
768 // As a convenience, print a summary of common waste.
769 out->cr();
770 out->print("Waste ");
771 // For all wastages, print percentages from total. As total use the total size of memory committed for metaspace.
772 const size_t committed_words = committed_bytes() / BytesPerWord;
773
774 out->print("(percentages refer to total committed size ");
775 print_scaled_words(out, committed_words, scale);
776 out->print_cr("):");
777
778 // Print space committed but not yet used by any class loader
779 const size_t unused_words_in_vs = MetaspaceUtils::free_in_vs_bytes() / BytesPerWord;
780 out->print(" Committed unused: ");
781 print_scaled_words_and_percentage(out, unused_words_in_vs, committed_words, scale, 6);
782 out->cr();
783
784 // Print waste for in-use chunks.
785 UsedChunksStatistics ucs_nonclass = cl._stats_total.nonclass_sm_stats().totals();
786 UsedChunksStatistics ucs_class = cl._stats_total.class_sm_stats().totals();
787 UsedChunksStatistics ucs_all;
788 ucs_all.add(ucs_nonclass);
789 ucs_all.add(ucs_class);
790
791 out->print(" Waste in chunks in use: ");
792 print_scaled_words_and_percentage(out, ucs_all.waste(), committed_words, scale, 6);
793 out->cr();
794 out->print(" Free in chunks in use: ");
795 print_scaled_words_and_percentage(out, ucs_all.free(), committed_words, scale, 6);
796 out->cr();
797 out->print(" Overhead in chunks in use: ");
798 print_scaled_words_and_percentage(out, ucs_all.overhead(), committed_words, scale, 6);
799 out->cr();
800
801 // Print waste in free chunks.
802 const size_t total_capacity_in_free_chunks =
803 Metaspace::chunk_manager_metadata()->free_chunks_total_words() +
804 (Metaspace::using_class_space() ? Metaspace::chunk_manager_class()->free_chunks_total_words() : 0);
805 out->print(" In free chunks: ");
806 print_scaled_words_and_percentage(out, total_capacity_in_free_chunks, committed_words, scale, 6);
807 out->cr();
808
809 // Print waste in deallocated blocks.
810 const uintx free_blocks_num =
811 cl._stats_total.nonclass_sm_stats().free_blocks_num() +
812 cl._stats_total.class_sm_stats().free_blocks_num();
813 const size_t free_blocks_cap_words =
814 cl._stats_total.nonclass_sm_stats().free_blocks_cap_words() +
815 cl._stats_total.class_sm_stats().free_blocks_cap_words();
816 out->print("Deallocated from chunks in use: ");
817 print_scaled_words_and_percentage(out, free_blocks_cap_words, committed_words, scale, 6);
818 out->print(" (" UINTX_FORMAT " blocks)", free_blocks_num);
819 out->cr();
820
821 // Print total waste.
822 const size_t total_waste = ucs_all.waste() + ucs_all.free() + ucs_all.overhead() + total_capacity_in_free_chunks
823 + free_blocks_cap_words + unused_words_in_vs;
824 out->print(" -total-: ");
825 print_scaled_words_and_percentage(out, total_waste, committed_words, scale, 6);
826 out->cr();
827
828 // Print internal statistics
829 #ifdef ASSERT
830 out->cr();
831 out->cr();
832 out->print_cr("Internal statistics:");
833 out->cr();
834 out->print_cr("Number of allocations: " UINTX_FORMAT ".", g_internal_statistics.num_allocs);
835 out->print_cr("Number of space births: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_births);
836 out->print_cr("Number of space deaths: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_deaths);
837 out->print_cr("Number of virtual space node births: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_created);
838 out->print_cr("Number of virtual space node deaths: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_purged);
839 out->print_cr("Number of times virtual space nodes were expanded: " UINTX_FORMAT ".", g_internal_statistics.num_committed_space_expanded);
840 out->print_cr("Number of deallocations: " UINTX_FORMAT " (" UINTX_FORMAT " external).", g_internal_statistics.num_deallocs, g_internal_statistics.num_external_deallocs);
841 out->print_cr("Allocations from deallocated blocks: " UINTX_FORMAT ".", g_internal_statistics.num_allocs_from_deallocated_blocks);
842 out->print_cr("Number of chunks added to freelist: " UINTX_FORMAT ".",
843 g_internal_statistics.num_chunks_added_to_freelist);
844 out->print_cr("Number of chunks removed from freelist: " UINTX_FORMAT ".",
845 g_internal_statistics.num_chunks_removed_from_freelist);
846 out->print_cr("Number of chunk merges: " UINTX_FORMAT ", split-ups: " UINTX_FORMAT ".",
847 g_internal_statistics.num_chunk_merges, g_internal_statistics.num_chunk_splits);
848
849 out->cr();
850 #endif
851
852 // Print some interesting settings
853 out->cr();
854 out->cr();
855 print_basic_switches(out, scale);
856
857 out->cr();
858 out->print("InitialBootClassLoaderMetaspaceSize: ");
859 print_human_readable_size(out, InitialBootClassLoaderMetaspaceSize, scale);
860
861 out->cr();
862 out->cr();
863
864 } // MetaspaceUtils::print_report()
865
866 // Prints an ASCII representation of the given space.
print_metaspace_map(outputStream * out,Metaspace::MetadataType mdtype)867 void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
868 MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
869 const bool for_class = mdtype == Metaspace::ClassType ? true : false;
870 VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
871 if (vsl != NULL) {
872 if (for_class) {
873 if (!Metaspace::using_class_space()) {
874 out->print_cr("No Class Space.");
875 return;
876 }
877 out->print_raw("---- Metaspace Map (Class Space) ----");
878 } else {
879 out->print_raw("---- Metaspace Map (Non-Class Space) ----");
880 }
881 // Print legend:
882 out->cr();
883 out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
884 out->cr();
885 VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
886 vsl->print_map(out);
887 out->cr();
888 }
889 }
890
verify_free_chunks()891 void MetaspaceUtils::verify_free_chunks() {
892 #ifdef ASSERT
893 Metaspace::chunk_manager_metadata()->verify(false);
894 if (Metaspace::using_class_space()) {
895 Metaspace::chunk_manager_class()->verify(false);
896 }
897 #endif
898 }
899
verify_metrics()900 void MetaspaceUtils::verify_metrics() {
901 #ifdef ASSERT
902 // Please note: there are time windows where the internal counters are out of sync with
903 // reality. For example, when a newly created ClassLoaderMetaspace creates its first chunk -
904 // the ClassLoaderMetaspace is not yet attached to its ClassLoaderData object and hence will
905 // not be counted when iterating the CLDG. So be careful when you call this method.
906 ClassLoaderMetaspaceStatistics total_stat;
907 collect_statistics(&total_stat);
908 UsedChunksStatistics nonclass_chunk_stat = total_stat.nonclass_sm_stats().totals();
909 UsedChunksStatistics class_chunk_stat = total_stat.class_sm_stats().totals();
910
911 bool mismatch = false;
912 for (int i = 0; i < Metaspace::MetadataTypeCount; i ++) {
913 Metaspace::MetadataType mdtype = (Metaspace::MetadataType)i;
914 UsedChunksStatistics chunk_stat = total_stat.sm_stats(mdtype).totals();
915 if (capacity_words(mdtype) != chunk_stat.cap() ||
916 used_words(mdtype) != chunk_stat.used() ||
917 overhead_words(mdtype) != chunk_stat.overhead()) {
918 mismatch = true;
919 tty->print_cr("MetaspaceUtils::verify_metrics: counter mismatch for mdtype=%u:", mdtype);
920 tty->print_cr("Expected cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
921 capacity_words(mdtype), used_words(mdtype), overhead_words(mdtype));
922 tty->print_cr("Got cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
923 chunk_stat.cap(), chunk_stat.used(), chunk_stat.overhead());
924 tty->flush();
925 }
926 }
927 assert(mismatch == false, "MetaspaceUtils::verify_metrics: counter mismatch.");
928 #endif
929 }
930
931 // Metaspace methods
932
933 size_t Metaspace::_first_chunk_word_size = 0;
934 size_t Metaspace::_first_class_chunk_word_size = 0;
935
936 size_t Metaspace::_commit_alignment = 0;
937 size_t Metaspace::_reserve_alignment = 0;
938
939 VirtualSpaceList* Metaspace::_space_list = NULL;
940 VirtualSpaceList* Metaspace::_class_space_list = NULL;
941
942 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
943 ChunkManager* Metaspace::_chunk_manager_class = NULL;
944
945 bool Metaspace::_initialized = false;
946
947 #define VIRTUALSPACEMULTIPLIER 2
948
949 #ifdef _LP64
950 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
951
set_narrow_klass_base_and_shift(address metaspace_base,address cds_base)952 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
953 assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
954 // Figure out the narrow_klass_base and the narrow_klass_shift. The
955 // narrow_klass_base is the lower of the metaspace base and the cds base
956 // (if cds is enabled). The narrow_klass_shift depends on the distance
957 // between the lower base and higher address.
958 address lower_base;
959 address higher_address;
960 #if INCLUDE_CDS
961 if (UseSharedSpaces) {
962 higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
963 (address)(metaspace_base + compressed_class_space_size()));
964 lower_base = MIN2(metaspace_base, cds_base);
965 } else
966 #endif
967 {
968 higher_address = metaspace_base + compressed_class_space_size();
969 lower_base = metaspace_base;
970
971 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
972 // If compressed class space fits in lower 32G, we don't need a base.
973 if (higher_address <= (address)klass_encoding_max) {
974 lower_base = 0; // Effectively lower base is zero.
975 }
976 }
977
978 CompressedKlassPointers::set_base(lower_base);
979
980 // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
981 // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
982 // how dump time narrow_klass_shift is set. Although, CDS can work
983 // with zero-shift mode also, to be consistent with AOT it uses
984 // LogKlassAlignmentInBytes for klass shift so archived java heap objects
985 // can be used at same time as AOT code.
986 if (!UseSharedSpaces
987 && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
988 CompressedKlassPointers::set_shift(0);
989 } else {
990 CompressedKlassPointers::set_shift(LogKlassAlignmentInBytes);
991 }
992 AOTLoader::set_narrow_klass_shift();
993 }
994
995 #if INCLUDE_CDS
996 // Return TRUE if the specified metaspace_base and cds_base are close enough
997 // to work with compressed klass pointers.
can_use_cds_with_metaspace_addr(char * metaspace_base,address cds_base)998 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
999 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
1000 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
1001 address lower_base = MIN2((address)metaspace_base, cds_base);
1002 address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
1003 (address)(metaspace_base + compressed_class_space_size()));
1004 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
1005 }
1006 #endif
1007
1008 // Try to allocate the metaspace at the requested addr.
allocate_metaspace_compressed_klass_ptrs(char * requested_addr,address cds_base)1009 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
1010 assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
1011 assert(using_class_space(), "called improperly");
1012 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
1013 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
1014 "Metaspace size is too big");
1015 assert_is_aligned(requested_addr, _reserve_alignment);
1016 assert_is_aligned(cds_base, _reserve_alignment);
1017 assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
1018
1019 // Don't use large pages for the class space.
1020 bool large_pages = false;
1021
1022 #if !(defined(AARCH64) || defined(AIX))
1023 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
1024 _reserve_alignment,
1025 large_pages,
1026 requested_addr);
1027 #else // AARCH64
1028 ReservedSpace metaspace_rs;
1029
1030 // Our compressed klass pointers may fit nicely into the lower 32
1031 // bits.
1032 if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
1033 metaspace_rs = ReservedSpace(compressed_class_space_size(),
1034 _reserve_alignment,
1035 large_pages,
1036 requested_addr);
1037 }
1038
1039 if (! metaspace_rs.is_reserved()) {
1040 // Aarch64: Try to align metaspace so that we can decode a compressed
1041 // klass with a single MOVK instruction. We can do this iff the
1042 // compressed class base is a multiple of 4G.
1043 // Aix: Search for a place where we can find memory. If we need to load
1044 // the base, 4G alignment is helpful, too.
1045 size_t increment = AARCH64_ONLY(4*)G;
1046 for (char *a = align_up(requested_addr, increment);
1047 a < (char*)(1024*G);
1048 a += increment) {
1049 if (a == (char *)(32*G)) {
1050 // Go faster from here on. Zero-based is no longer possible.
1051 increment = 4*G;
1052 }
1053
1054 #if INCLUDE_CDS
1055 if (UseSharedSpaces
1056 && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
1057 // We failed to find an aligned base that will reach. Fall
1058 // back to using our requested addr.
1059 metaspace_rs = ReservedSpace(compressed_class_space_size(),
1060 _reserve_alignment,
1061 large_pages,
1062 requested_addr);
1063 break;
1064 }
1065 #endif
1066
1067 metaspace_rs = ReservedSpace(compressed_class_space_size(),
1068 _reserve_alignment,
1069 large_pages,
1070 a);
1071 if (metaspace_rs.is_reserved())
1072 break;
1073 }
1074 }
1075
1076 #endif // AARCH64
1077
1078 if (!metaspace_rs.is_reserved()) {
1079 #if INCLUDE_CDS
1080 if (UseSharedSpaces) {
1081 size_t increment = align_up(1*G, _reserve_alignment);
1082
1083 // Keep trying to allocate the metaspace, increasing the requested_addr
1084 // by 1GB each time, until we reach an address that will no longer allow
1085 // use of CDS with compressed klass pointers.
1086 char *addr = requested_addr;
1087 while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
1088 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
1089 addr = addr + increment;
1090 metaspace_rs = ReservedSpace(compressed_class_space_size(),
1091 _reserve_alignment, large_pages, addr);
1092 }
1093 }
1094 #endif
1095 // If no successful allocation then try to allocate the space anywhere. If
1096 // that fails then OOM doom. At this point we cannot try allocating the
1097 // metaspace as if UseCompressedClassPointers is off because too much
1098 // initialization has happened that depends on UseCompressedClassPointers.
1099 // So, UseCompressedClassPointers cannot be turned off at this point.
1100 if (!metaspace_rs.is_reserved()) {
1101 metaspace_rs = ReservedSpace(compressed_class_space_size(),
1102 _reserve_alignment, large_pages);
1103 if (!metaspace_rs.is_reserved()) {
1104 vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
1105 compressed_class_space_size()));
1106 }
1107 }
1108 }
1109
1110 // If we got here then the metaspace got allocated.
1111 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
1112
1113 #if INCLUDE_CDS
1114 // Verify that we can use shared spaces. Otherwise, turn off CDS.
1115 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
1116 FileMapInfo::stop_sharing_and_unmap(
1117 "Could not allocate metaspace at a compatible address");
1118 }
1119 #endif
1120 set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
1121 UseSharedSpaces ? (address)cds_base : 0);
1122
1123 initialize_class_space(metaspace_rs);
1124
1125 LogTarget(Trace, gc, metaspace) lt;
1126 if (lt.is_enabled()) {
1127 ResourceMark rm;
1128 LogStream ls(lt);
1129 print_compressed_class_space(&ls, requested_addr);
1130 }
1131 }
1132
print_compressed_class_space(outputStream * st,const char * requested_addr)1133 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
1134 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
1135 p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
1136 if (_class_space_list != NULL) {
1137 address base = (address)_class_space_list->current_virtual_space()->bottom();
1138 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
1139 compressed_class_space_size(), p2i(base));
1140 if (requested_addr != 0) {
1141 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
1142 }
1143 st->cr();
1144 }
1145 }
1146
1147 // For UseCompressedClassPointers the class space is reserved above the top of
1148 // the Java heap. The argument passed in is at the base of the compressed space.
initialize_class_space(ReservedSpace rs)1149 void Metaspace::initialize_class_space(ReservedSpace rs) {
1150 // The reserved space size may be bigger because of alignment, esp with UseLargePages
1151 assert(rs.size() >= CompressedClassSpaceSize,
1152 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
1153 assert(using_class_space(), "Must be using class space");
1154 _class_space_list = new VirtualSpaceList(rs);
1155 _chunk_manager_class = new ChunkManager(true/*is_class*/);
1156
1157 if (!_class_space_list->initialization_succeeded()) {
1158 vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
1159 }
1160 }
1161
1162 #endif
1163
ergo_initialize()1164 void Metaspace::ergo_initialize() {
1165 if (DumpSharedSpaces) {
1166 // Using large pages when dumping the shared archive is currently not implemented.
1167 FLAG_SET_ERGO(UseLargePagesInMetaspace, false);
1168 }
1169
1170 size_t page_size = os::vm_page_size();
1171 if (UseLargePages && UseLargePagesInMetaspace) {
1172 page_size = os::large_page_size();
1173 }
1174
1175 _commit_alignment = page_size;
1176 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
1177
1178 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
1179 // override if MaxMetaspaceSize was set on the command line or not.
1180 // This information is needed later to conform to the specification of the
1181 // java.lang.management.MemoryUsage API.
1182 //
1183 // Ideally, we would be able to set the default value of MaxMetaspaceSize in
1184 // globals.hpp to the aligned value, but this is not possible, since the
1185 // alignment depends on other flags being parsed.
1186 MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
1187
1188 if (MetaspaceSize > MaxMetaspaceSize) {
1189 MetaspaceSize = MaxMetaspaceSize;
1190 }
1191
1192 MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
1193
1194 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
1195
1196 MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
1197 MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
1198
1199 CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
1200
1201 // Initial virtual space size will be calculated at global_initialize()
1202 size_t min_metaspace_sz =
1203 VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
1204 if (UseCompressedClassPointers) {
1205 if ((min_metaspace_sz + CompressedClassSpaceSize) > MaxMetaspaceSize) {
1206 if (min_metaspace_sz >= MaxMetaspaceSize) {
1207 vm_exit_during_initialization("MaxMetaspaceSize is too small.");
1208 } else {
1209 FLAG_SET_ERGO(CompressedClassSpaceSize,
1210 MaxMetaspaceSize - min_metaspace_sz);
1211 }
1212 }
1213 } else if (min_metaspace_sz >= MaxMetaspaceSize) {
1214 FLAG_SET_ERGO(InitialBootClassLoaderMetaspaceSize,
1215 min_metaspace_sz);
1216 }
1217
1218 set_compressed_class_space_size(CompressedClassSpaceSize);
1219 }
1220
global_initialize()1221 void Metaspace::global_initialize() {
1222 MetaspaceGC::initialize();
1223
1224 #if INCLUDE_CDS
1225 if (DumpSharedSpaces) {
1226 MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
1227 } else if (UseSharedSpaces) {
1228 // If any of the archived space fails to map, UseSharedSpaces
1229 // is reset to false. Fall through to the
1230 // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
1231 // metaspace.
1232 MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
1233 }
1234
1235 if (DynamicDumpSharedSpaces && !UseSharedSpaces) {
1236 vm_exit_during_initialization("DynamicDumpSharedSpaces is unsupported when base CDS archive is not loaded", NULL);
1237 }
1238
1239 if (!DumpSharedSpaces && !UseSharedSpaces)
1240 #endif // INCLUDE_CDS
1241 {
1242 #ifdef _LP64
1243 if (using_class_space()) {
1244 char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
1245 allocate_metaspace_compressed_klass_ptrs(base, 0);
1246 }
1247 #endif // _LP64
1248 }
1249
1250 // Initialize these before initializing the VirtualSpaceList
1251 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
1252 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
1253 // Make the first class chunk bigger than a medium chunk so it's not put
1254 // on the medium chunk list. The next chunk will be small and progress
1255 // from there. This size calculated by -version.
1256 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
1257 (CompressedClassSpaceSize/BytesPerWord)*2);
1258 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
1259 // Arbitrarily set the initial virtual space to a multiple
1260 // of the boot class loader size.
1261 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
1262 word_size = align_up(word_size, Metaspace::reserve_alignment_words());
1263
1264 // Initialize the list of virtual spaces.
1265 _space_list = new VirtualSpaceList(word_size);
1266 _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
1267
1268 if (!_space_list->initialization_succeeded()) {
1269 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
1270 }
1271
1272 _tracer = new MetaspaceTracer();
1273
1274 _initialized = true;
1275
1276 }
1277
post_initialize()1278 void Metaspace::post_initialize() {
1279 MetaspaceGC::post_initialize();
1280 }
1281
verify_global_initialization()1282 void Metaspace::verify_global_initialization() {
1283 assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
1284 assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
1285
1286 if (using_class_space()) {
1287 assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
1288 assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
1289 }
1290 }
1291
align_word_size_up(size_t word_size)1292 size_t Metaspace::align_word_size_up(size_t word_size) {
1293 size_t byte_size = word_size * wordSize;
1294 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
1295 }
1296
allocate(ClassLoaderData * loader_data,size_t word_size,MetaspaceObj::Type type,TRAPS)1297 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
1298 MetaspaceObj::Type type, TRAPS) {
1299 assert(!_frozen, "sanity");
1300 assert(!(DumpSharedSpaces && THREAD->is_VM_thread()), "sanity");
1301
1302 if (HAS_PENDING_EXCEPTION) {
1303 assert(false, "Should not allocate with exception pending");
1304 return NULL; // caller does a CHECK_NULL too
1305 }
1306
1307 assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
1308 "ClassLoaderData::the_null_class_loader_data() should have been used.");
1309
1310 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
1311
1312 // Try to allocate metadata.
1313 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
1314
1315 if (result == NULL) {
1316 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
1317
1318 // Allocation failed.
1319 if (is_init_completed()) {
1320 // Only start a GC if the bootstrapping has completed.
1321 // Try to clean out some heap memory and retry. This can prevent premature
1322 // expansion of the metaspace.
1323 result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
1324 }
1325 }
1326
1327 if (result == NULL) {
1328 if (DumpSharedSpaces) {
1329 // CDS dumping keeps loading classes, so if we hit an OOM we probably will keep hitting OOM.
1330 // We should abort to avoid generating a potentially bad archive.
1331 vm_exit_during_cds_dumping(err_msg("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
1332 MetaspaceObj::type_name(type), word_size * BytesPerWord),
1333 err_msg("Please increase MaxMetaspaceSize (currently " SIZE_FORMAT " bytes).", MaxMetaspaceSize));
1334 }
1335 report_metadata_oome(loader_data, word_size, type, mdtype, THREAD);
1336 assert(HAS_PENDING_EXCEPTION, "sanity");
1337 return NULL;
1338 }
1339
1340 // Zero initialize.
1341 Copy::fill_to_words((HeapWord*)result, word_size, 0);
1342
1343 return result;
1344 }
1345
report_metadata_oome(ClassLoaderData * loader_data,size_t word_size,MetaspaceObj::Type type,MetadataType mdtype,TRAPS)1346 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
1347 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
1348
1349 // If result is still null, we are out of memory.
1350 Log(gc, metaspace, freelist, oom) log;
1351 if (log.is_info()) {
1352 log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
1353 is_class_space_allocation(mdtype) ? "class" : "data", word_size);
1354 ResourceMark rm;
1355 if (log.is_debug()) {
1356 if (loader_data->metaspace_or_null() != NULL) {
1357 LogStream ls(log.debug());
1358 loader_data->print_value_on(&ls);
1359 }
1360 }
1361 LogStream ls(log.info());
1362 // In case of an OOM, log out a short but still useful report.
1363 MetaspaceUtils::print_basic_report(&ls, 0);
1364 }
1365
1366 bool out_of_compressed_class_space = false;
1367 if (is_class_space_allocation(mdtype)) {
1368 ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
1369 out_of_compressed_class_space =
1370 MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
1371 (metaspace->class_chunk_size(word_size) * BytesPerWord) >
1372 CompressedClassSpaceSize;
1373 }
1374
1375 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
1376 const char* space_string = out_of_compressed_class_space ?
1377 "Compressed class space" : "Metaspace";
1378
1379 report_java_out_of_memory(space_string);
1380
1381 if (JvmtiExport::should_post_resource_exhausted()) {
1382 JvmtiExport::post_resource_exhausted(
1383 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
1384 space_string);
1385 }
1386
1387 if (!is_init_completed()) {
1388 vm_exit_during_initialization("OutOfMemoryError", space_string);
1389 }
1390
1391 if (out_of_compressed_class_space) {
1392 THROW_OOP(Universe::out_of_memory_error_class_metaspace());
1393 } else {
1394 THROW_OOP(Universe::out_of_memory_error_metaspace());
1395 }
1396 }
1397
metadata_type_name(Metaspace::MetadataType mdtype)1398 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
1399 switch (mdtype) {
1400 case Metaspace::ClassType: return "Class";
1401 case Metaspace::NonClassType: return "Metadata";
1402 default:
1403 assert(false, "Got bad mdtype: %d", (int) mdtype);
1404 return NULL;
1405 }
1406 }
1407
purge(MetadataType mdtype)1408 void Metaspace::purge(MetadataType mdtype) {
1409 get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
1410 }
1411
purge()1412 void Metaspace::purge() {
1413 MutexLocker cl(MetaspaceExpand_lock,
1414 Mutex::_no_safepoint_check_flag);
1415 purge(NonClassType);
1416 if (using_class_space()) {
1417 purge(ClassType);
1418 }
1419 }
1420
contains(const void * ptr)1421 bool Metaspace::contains(const void* ptr) {
1422 if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
1423 return true;
1424 }
1425 return contains_non_shared(ptr);
1426 }
1427
contains_non_shared(const void * ptr)1428 bool Metaspace::contains_non_shared(const void* ptr) {
1429 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
1430 return true;
1431 }
1432
1433 return get_space_list(NonClassType)->contains(ptr);
1434 }
1435
1436 // ClassLoaderMetaspace
1437
ClassLoaderMetaspace(Mutex * lock,Metaspace::MetaspaceType type)1438 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type)
1439 : _space_type(type)
1440 , _lock(lock)
1441 , _vsm(NULL)
1442 , _class_vsm(NULL)
1443 {
1444 initialize(lock, type);
1445 }
1446
~ClassLoaderMetaspace()1447 ClassLoaderMetaspace::~ClassLoaderMetaspace() {
1448 Metaspace::assert_not_frozen();
1449 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_deaths));
1450 delete _vsm;
1451 if (Metaspace::using_class_space()) {
1452 delete _class_vsm;
1453 }
1454 }
1455
initialize_first_chunk(Metaspace::MetaspaceType type,Metaspace::MetadataType mdtype)1456 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
1457 Metachunk* chunk = get_initialization_chunk(type, mdtype);
1458 if (chunk != NULL) {
1459 // Add to this manager's list of chunks in use and make it the current_chunk().
1460 get_space_manager(mdtype)->add_chunk(chunk, true);
1461 }
1462 }
1463
get_initialization_chunk(Metaspace::MetaspaceType type,Metaspace::MetadataType mdtype)1464 Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
1465 size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
1466
1467 // Get a chunk from the chunk freelist
1468 Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
1469
1470 if (chunk == NULL) {
1471 chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
1472 get_space_manager(mdtype)->medium_chunk_bunch());
1473 }
1474
1475 return chunk;
1476 }
1477
initialize(Mutex * lock,Metaspace::MetaspaceType type)1478 void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
1479 Metaspace::verify_global_initialization();
1480
1481 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_births));
1482
1483 // Allocate SpaceManager for metadata objects.
1484 _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
1485
1486 if (Metaspace::using_class_space()) {
1487 // Allocate SpaceManager for classes.
1488 _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
1489 }
1490
1491 MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
1492
1493 // Allocate chunk for metadata objects
1494 initialize_first_chunk(type, Metaspace::NonClassType);
1495
1496 // Allocate chunk for class metadata objects
1497 if (Metaspace::using_class_space()) {
1498 initialize_first_chunk(type, Metaspace::ClassType);
1499 }
1500 }
1501
allocate(size_t word_size,Metaspace::MetadataType mdtype)1502 MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1503 Metaspace::assert_not_frozen();
1504
1505 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs));
1506
1507 // Don't use class_vsm() unless UseCompressedClassPointers is true.
1508 if (Metaspace::is_class_space_allocation(mdtype)) {
1509 return class_vsm()->allocate(word_size);
1510 } else {
1511 return vsm()->allocate(word_size);
1512 }
1513 }
1514
expand_and_allocate(size_t word_size,Metaspace::MetadataType mdtype)1515 MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1516 Metaspace::assert_not_frozen();
1517 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
1518 assert(delta_bytes > 0, "Must be");
1519
1520 size_t before = 0;
1521 size_t after = 0;
1522 bool can_retry = true;
1523 MetaWord* res;
1524 bool incremented;
1525
1526 // Each thread increments the HWM at most once. Even if the thread fails to increment
1527 // the HWM, an allocation is still attempted. This is because another thread must then
1528 // have incremented the HWM and therefore the allocation might still succeed.
1529 do {
1530 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before, &can_retry);
1531 res = allocate(word_size, mdtype);
1532 } while (!incremented && res == NULL && can_retry);
1533
1534 if (incremented) {
1535 Metaspace::tracer()->report_gc_threshold(before, after,
1536 MetaspaceGCThresholdUpdater::ExpandAndAllocate);
1537 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
1538 }
1539
1540 return res;
1541 }
1542
allocated_blocks_bytes() const1543 size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
1544 return (vsm()->used_words() +
1545 (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord;
1546 }
1547
allocated_chunks_bytes() const1548 size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
1549 return (vsm()->capacity_words() +
1550 (Metaspace::using_class_space() ? class_vsm()->capacity_words() : 0)) * BytesPerWord;
1551 }
1552
deallocate(MetaWord * ptr,size_t word_size,bool is_class)1553 void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
1554 Metaspace::assert_not_frozen();
1555 assert(!SafepointSynchronize::is_at_safepoint()
1556 || Thread::current()->is_VM_thread(), "should be the VM thread");
1557
1558 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_external_deallocs));
1559
1560 MutexLocker ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
1561
1562 if (is_class && Metaspace::using_class_space()) {
1563 class_vsm()->deallocate(ptr, word_size);
1564 } else {
1565 vsm()->deallocate(ptr, word_size);
1566 }
1567 }
1568
class_chunk_size(size_t word_size)1569 size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
1570 assert(Metaspace::using_class_space(), "Has to use class space");
1571 return class_vsm()->calc_chunk_size(word_size);
1572 }
1573
print_on(outputStream * out) const1574 void ClassLoaderMetaspace::print_on(outputStream* out) const {
1575 // Print both class virtual space counts and metaspace.
1576 if (Verbose) {
1577 vsm()->print_on(out);
1578 if (Metaspace::using_class_space()) {
1579 class_vsm()->print_on(out);
1580 }
1581 }
1582 }
1583
verify()1584 void ClassLoaderMetaspace::verify() {
1585 vsm()->verify();
1586 if (Metaspace::using_class_space()) {
1587 class_vsm()->verify();
1588 }
1589 }
1590
add_to_statistics_locked(ClassLoaderMetaspaceStatistics * out) const1591 void ClassLoaderMetaspace::add_to_statistics_locked(ClassLoaderMetaspaceStatistics* out) const {
1592 assert_lock_strong(lock());
1593 vsm()->add_to_statistics_locked(&out->nonclass_sm_stats());
1594 if (Metaspace::using_class_space()) {
1595 class_vsm()->add_to_statistics_locked(&out->class_sm_stats());
1596 }
1597 }
1598
add_to_statistics(ClassLoaderMetaspaceStatistics * out) const1599 void ClassLoaderMetaspace::add_to_statistics(ClassLoaderMetaspaceStatistics* out) const {
1600 MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag);
1601 add_to_statistics_locked(out);
1602 }
1603
1604 /////////////// Unit tests ///////////////
1605
1606 struct chunkmanager_statistics_t {
1607 int num_specialized_chunks;
1608 int num_small_chunks;
1609 int num_medium_chunks;
1610 int num_humongous_chunks;
1611 };
1612
test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType,chunkmanager_statistics_t * out)1613 extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
1614 ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
1615 ChunkManagerStatistics stat;
1616 chunk_manager->collect_statistics(&stat);
1617 out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num();
1618 out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num();
1619 out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num();
1620 out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num();
1621 }
1622
1623 struct chunk_geometry_t {
1624 size_t specialized_chunk_word_size;
1625 size_t small_chunk_word_size;
1626 size_t medium_chunk_word_size;
1627 };
1628
test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType,chunk_geometry_t * out)1629 extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
1630 if (mdType == Metaspace::NonClassType) {
1631 out->specialized_chunk_word_size = SpecializedChunk;
1632 out->small_chunk_word_size = SmallChunk;
1633 out->medium_chunk_word_size = MediumChunk;
1634 } else {
1635 out->specialized_chunk_word_size = ClassSpecializedChunk;
1636 out->small_chunk_word_size = ClassSmallChunk;
1637 out->medium_chunk_word_size = ClassMediumChunk;
1638 }
1639 }
1640