1 /*
2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "gc/shared/allocTracer.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shared/collectedHeap.hpp"
30 #include "gc/shared/collectedHeap.inline.hpp"
31 #include "gc/shared/gcLocker.inline.hpp"
32 #include "gc/shared/gcHeapSummary.hpp"
33 #include "gc/shared/gcTrace.hpp"
34 #include "gc/shared/gcTraceTime.inline.hpp"
35 #include "gc/shared/gcWhen.hpp"
36 #include "gc/shared/memAllocator.hpp"
37 #include "gc/shared/vmGCOperations.hpp"
38 #include "logging/log.hpp"
39 #include "memory/metaspace.hpp"
40 #include "memory/resourceArea.hpp"
41 #include "oops/instanceMirrorKlass.hpp"
42 #include "oops/oop.inline.hpp"
43 #include "runtime/handles.inline.hpp"
44 #include "runtime/init.hpp"
45 #include "runtime/thread.inline.hpp"
46 #include "runtime/threadSMR.hpp"
47 #include "runtime/vmThread.hpp"
48 #include "services/heapDumper.hpp"
49 #include "utilities/align.hpp"
50 #include "utilities/copy.hpp"
51
52 class ClassLoaderData;
53
54 #ifdef ASSERT
55 int CollectedHeap::_fire_out_of_memory_count = 0;
56 #endif
57
58 size_t CollectedHeap::_filler_array_max_size = 0;
59
60 template <>
print(outputStream * st,GCMessage & m)61 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
62 st->print_cr("GC heap %s", m.is_before ? "before" : "after");
63 st->print_raw(m);
64 }
65
log_heap(CollectedHeap * heap,bool before)66 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
67 if (!should_log()) {
68 return;
69 }
70
71 double timestamp = fetch_timestamp();
72 MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
73 int index = compute_log_index();
74 _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
75 _records[index].timestamp = timestamp;
76 _records[index].data.is_before = before;
77 stringStream st(_records[index].data.buffer(), _records[index].data.size());
78
79 st.print_cr("{Heap %s GC invocations=%u (full %u):",
80 before ? "before" : "after",
81 heap->total_collections(),
82 heap->total_full_collections());
83
84 heap->print_on(&st);
85 st.print_cr("}");
86 }
87
create_heap_space_summary()88 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
89 size_t capacity_in_words = capacity() / HeapWordSize;
90
91 return VirtualSpaceSummary(
92 reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end());
93 }
94
create_heap_summary()95 GCHeapSummary CollectedHeap::create_heap_summary() {
96 VirtualSpaceSummary heap_space = create_heap_space_summary();
97 return GCHeapSummary(heap_space, used());
98 }
99
create_metaspace_summary()100 MetaspaceSummary CollectedHeap::create_metaspace_summary() {
101 const MetaspaceSizes meta_space(
102 MetaspaceUtils::committed_bytes(),
103 MetaspaceUtils::used_bytes(),
104 MetaspaceUtils::reserved_bytes());
105 const MetaspaceSizes data_space(
106 MetaspaceUtils::committed_bytes(Metaspace::NonClassType),
107 MetaspaceUtils::used_bytes(Metaspace::NonClassType),
108 MetaspaceUtils::reserved_bytes(Metaspace::NonClassType));
109 const MetaspaceSizes class_space(
110 MetaspaceUtils::committed_bytes(Metaspace::ClassType),
111 MetaspaceUtils::used_bytes(Metaspace::ClassType),
112 MetaspaceUtils::reserved_bytes(Metaspace::ClassType));
113
114 const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
115 MetaspaceUtils::chunk_free_list_summary(Metaspace::NonClassType);
116 const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =
117 MetaspaceUtils::chunk_free_list_summary(Metaspace::ClassType);
118
119 return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space,
120 ms_chunk_free_list_summary, class_chunk_free_list_summary);
121 }
122
print_heap_before_gc()123 void CollectedHeap::print_heap_before_gc() {
124 Universe::print_heap_before_gc();
125 if (_gc_heap_log != NULL) {
126 _gc_heap_log->log_heap_before(this);
127 }
128 }
129
print_heap_after_gc()130 void CollectedHeap::print_heap_after_gc() {
131 Universe::print_heap_after_gc();
132 if (_gc_heap_log != NULL) {
133 _gc_heap_log->log_heap_after(this);
134 }
135 }
136
print_on_error(outputStream * st) const137 void CollectedHeap::print_on_error(outputStream* st) const {
138 st->print_cr("Heap:");
139 print_extended_on(st);
140 st->cr();
141
142 BarrierSet::barrier_set()->print_on(st);
143 }
144
trace_heap(GCWhen::Type when,const GCTracer * gc_tracer)145 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
146 const GCHeapSummary& heap_summary = create_heap_summary();
147 gc_tracer->report_gc_heap_summary(when, heap_summary);
148
149 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
150 gc_tracer->report_metaspace_summary(when, metaspace_summary);
151 }
152
trace_heap_before_gc(const GCTracer * gc_tracer)153 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) {
154 trace_heap(GCWhen::BeforeGC, gc_tracer);
155 }
156
trace_heap_after_gc(const GCTracer * gc_tracer)157 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
158 trace_heap(GCWhen::AfterGC, gc_tracer);
159 }
160
161 // WhiteBox API support for concurrent collectors. These are the
162 // default implementations, for collectors which don't support this
163 // feature.
supports_concurrent_phase_control() const164 bool CollectedHeap::supports_concurrent_phase_control() const {
165 return false;
166 }
167
concurrent_phases() const168 const char* const* CollectedHeap::concurrent_phases() const {
169 static const char* const result[] = { NULL };
170 return result;
171 }
172
request_concurrent_phase(const char * phase)173 bool CollectedHeap::request_concurrent_phase(const char* phase) {
174 return false;
175 }
176
is_oop(oop object) const177 bool CollectedHeap::is_oop(oop object) const {
178 if (!check_obj_alignment(object)) {
179 return false;
180 }
181
182 if (!is_in_reserved(object)) {
183 return false;
184 }
185
186 if (is_in_reserved(object->klass_or_null())) {
187 return false;
188 }
189
190 return true;
191 }
192
193 // Memory state functions.
194
195
CollectedHeap()196 CollectedHeap::CollectedHeap() :
197 _is_gc_active(false),
198 _total_collections(0),
199 _total_full_collections(0),
200 _gc_cause(GCCause::_no_gc),
201 _gc_lastcause(GCCause::_no_gc)
202 {
203 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
204 const size_t elements_per_word = HeapWordSize / sizeof(jint);
205 _filler_array_max_size = align_object_size(filler_array_hdr_size() +
206 max_len / elements_per_word);
207
208 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
209 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
210
211 if (UsePerfData) {
212 EXCEPTION_MARK;
213
214 // create the gc cause jvmstat counters
215 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
216 80, GCCause::to_string(_gc_cause), CHECK);
217
218 _perf_gc_lastcause =
219 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
220 80, GCCause::to_string(_gc_lastcause), CHECK);
221 }
222
223 // Create the ring log
224 if (LogEvents) {
225 _gc_heap_log = new GCHeapLog();
226 } else {
227 _gc_heap_log = NULL;
228 }
229 }
230
231 // This interface assumes that it's being called by the
232 // vm thread. It collects the heap assuming that the
233 // heap lock is already held and that we are executing in
234 // the context of the vm thread.
collect_as_vm_thread(GCCause::Cause cause)235 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
236 assert(Thread::current()->is_VM_thread(), "Precondition#1");
237 assert(Heap_lock->is_locked(), "Precondition#2");
238 GCCauseSetter gcs(this, cause);
239 switch (cause) {
240 case GCCause::_heap_inspection:
241 case GCCause::_heap_dump:
242 case GCCause::_metadata_GC_threshold : {
243 HandleMark hm;
244 do_full_collection(false); // don't clear all soft refs
245 break;
246 }
247 case GCCause::_metadata_GC_clear_soft_refs: {
248 HandleMark hm;
249 do_full_collection(true); // do clear all soft refs
250 break;
251 }
252 default:
253 ShouldNotReachHere(); // Unexpected use of this function
254 }
255 }
256
satisfy_failed_metadata_allocation(ClassLoaderData * loader_data,size_t word_size,Metaspace::MetadataType mdtype)257 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
258 size_t word_size,
259 Metaspace::MetadataType mdtype) {
260 uint loop_count = 0;
261 uint gc_count = 0;
262 uint full_gc_count = 0;
263
264 assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
265
266 do {
267 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
268 if (result != NULL) {
269 return result;
270 }
271
272 if (GCLocker::is_active_and_needs_gc()) {
273 // If the GCLocker is active, just expand and allocate.
274 // If that does not succeed, wait if this thread is not
275 // in a critical section itself.
276 result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype);
277 if (result != NULL) {
278 return result;
279 }
280 JavaThread* jthr = JavaThread::current();
281 if (!jthr->in_critical()) {
282 // Wait for JNI critical section to be exited
283 GCLocker::stall_until_clear();
284 // The GC invoked by the last thread leaving the critical
285 // section will be a young collection and a full collection
286 // is (currently) needed for unloading classes so continue
287 // to the next iteration to get a full GC.
288 continue;
289 } else {
290 if (CheckJNICalls) {
291 fatal("Possible deadlock due to allocating while"
292 " in jni critical section");
293 }
294 return NULL;
295 }
296 }
297
298 { // Need lock to get self consistent gc_count's
299 MutexLocker ml(Heap_lock);
300 gc_count = Universe::heap()->total_collections();
301 full_gc_count = Universe::heap()->total_full_collections();
302 }
303
304 // Generate a VM operation
305 VM_CollectForMetadataAllocation op(loader_data,
306 word_size,
307 mdtype,
308 gc_count,
309 full_gc_count,
310 GCCause::_metadata_GC_threshold);
311 VMThread::execute(&op);
312
313 // If GC was locked out, try again. Check before checking success because the
314 // prologue could have succeeded and the GC still have been locked out.
315 if (op.gc_locked()) {
316 continue;
317 }
318
319 if (op.prologue_succeeded()) {
320 return op.result();
321 }
322 loop_count++;
323 if ((QueuedAllocationWarningCount > 0) &&
324 (loop_count % QueuedAllocationWarningCount == 0)) {
325 log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times,"
326 " size=" SIZE_FORMAT, loop_count, word_size);
327 }
328 } while (true); // Until a GC is done
329 }
330
331 #ifndef PRODUCT
check_for_non_bad_heap_word_value(HeapWord * addr,size_t size)332 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
333 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
334 for (size_t slot = 0; slot < size; slot += 1) {
335 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
336 "Found non badHeapWordValue in pre-allocation check");
337 }
338 }
339 }
340 #endif // PRODUCT
341
max_tlab_size() const342 size_t CollectedHeap::max_tlab_size() const {
343 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
344 // This restriction could be removed by enabling filling with multiple arrays.
345 // If we compute that the reasonable way as
346 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
347 // we'll overflow on the multiply, so we do the divide first.
348 // We actually lose a little by dividing first,
349 // but that just makes the TLAB somewhat smaller than the biggest array,
350 // which is fine, since we'll be able to fill that.
351 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
352 sizeof(jint) *
353 ((juint) max_jint / (size_t) HeapWordSize);
354 return align_down(max_int_size, MinObjAlignment);
355 }
356
filler_array_hdr_size()357 size_t CollectedHeap::filler_array_hdr_size() {
358 return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
359 }
360
filler_array_min_size()361 size_t CollectedHeap::filler_array_min_size() {
362 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
363 }
364
365 #ifdef ASSERT
fill_args_check(HeapWord * start,size_t words)366 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
367 {
368 assert(words >= min_fill_size(), "too small to fill");
369 assert(is_object_aligned(words), "unaligned size");
370 assert(Universe::heap()->is_in_reserved(start), "not in heap");
371 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
372 }
373
zap_filler_array(HeapWord * start,size_t words,bool zap)374 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
375 {
376 if (ZapFillerObjects && zap) {
377 Copy::fill_to_words(start + filler_array_hdr_size(),
378 words - filler_array_hdr_size(), 0XDEAFBABE);
379 }
380 }
381 #endif // ASSERT
382
383 void
fill_with_array(HeapWord * start,size_t words,bool zap)384 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
385 {
386 assert(words >= filler_array_min_size(), "too small for an array");
387 assert(words <= filler_array_max_size(), "too big for a single object");
388
389 const size_t payload_size = words - filler_array_hdr_size();
390 const size_t len = payload_size * HeapWordSize / sizeof(jint);
391 assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
392
393 ObjArrayAllocator allocator(Universe::intArrayKlassObj(), words, (int)len, /* do_zero */ false);
394 allocator.initialize(start);
395 DEBUG_ONLY(zap_filler_array(start, words, zap);)
396 }
397
398 void
fill_with_object_impl(HeapWord * start,size_t words,bool zap)399 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
400 {
401 assert(words <= filler_array_max_size(), "too big for a single object");
402
403 if (words >= filler_array_min_size()) {
404 fill_with_array(start, words, zap);
405 } else if (words > 0) {
406 assert(words == min_fill_size(), "unaligned size");
407 ObjAllocator allocator(SystemDictionary::Object_klass(), words);
408 allocator.initialize(start);
409 }
410 }
411
fill_with_object(HeapWord * start,size_t words,bool zap)412 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
413 {
414 DEBUG_ONLY(fill_args_check(start, words);)
415 HandleMark hm; // Free handles before leaving.
416 fill_with_object_impl(start, words, zap);
417 }
418
fill_with_objects(HeapWord * start,size_t words,bool zap)419 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
420 {
421 DEBUG_ONLY(fill_args_check(start, words);)
422 HandleMark hm; // Free handles before leaving.
423
424 // Multiple objects may be required depending on the filler array maximum size. Fill
425 // the range up to that with objects that are filler_array_max_size sized. The
426 // remainder is filled with a single object.
427 const size_t min = min_fill_size();
428 const size_t max = filler_array_max_size();
429 while (words > max) {
430 const size_t cur = (words - max) >= min ? max : max - min;
431 fill_with_array(start, cur, zap);
432 start += cur;
433 words -= cur;
434 }
435
436 fill_with_object_impl(start, words, zap);
437 }
438
fill_with_dummy_object(HeapWord * start,HeapWord * end,bool zap)439 void CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
440 CollectedHeap::fill_with_object(start, end, zap);
441 }
442
allocate_new_tlab(size_t min_size,size_t requested_size,size_t * actual_size)443 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size,
444 size_t requested_size,
445 size_t* actual_size) {
446 guarantee(false, "thread-local allocation buffers not supported");
447 return NULL;
448 }
449
obj_allocate(Klass * klass,int size,TRAPS)450 oop CollectedHeap::obj_allocate(Klass* klass, int size, TRAPS) {
451 ObjAllocator allocator(klass, size, THREAD);
452 return allocator.allocate();
453 }
454
array_allocate(Klass * klass,int size,int length,bool do_zero,TRAPS)455 oop CollectedHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) {
456 ObjArrayAllocator allocator(klass, size, length, do_zero, THREAD);
457 return allocator.allocate();
458 }
459
class_allocate(Klass * klass,int size,TRAPS)460 oop CollectedHeap::class_allocate(Klass* klass, int size, TRAPS) {
461 ClassAllocator allocator(klass, size, THREAD);
462 return allocator.allocate();
463 }
464
ensure_parsability(bool retire_tlabs)465 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
466 // The second disjunct in the assertion below makes a concession
467 // for the start-up verification done while the VM is being
468 // created. Callers be careful that you know that mutators
469 // aren't going to interfere -- for instance, this is permissible
470 // if we are still single-threaded and have either not yet
471 // started allocating (nothing much to verify) or we have
472 // started allocating but are now a full-fledged JavaThread
473 // (and have thus made our TLAB's) available for filling.
474 assert(SafepointSynchronize::is_at_safepoint() ||
475 !is_init_completed(),
476 "Should only be called at a safepoint or at start-up"
477 " otherwise concurrent mutator activity may make heap "
478 " unparsable again");
479 const bool use_tlab = UseTLAB;
480 // The main thread starts allocating via a TLAB even before it
481 // has added itself to the threads list at vm boot-up.
482 JavaThreadIteratorWithHandle jtiwh;
483 assert(!use_tlab || jtiwh.length() > 0,
484 "Attempt to fill tlabs before main thread has been added"
485 " to threads list is doomed to failure!");
486 BarrierSet *bs = BarrierSet::barrier_set();
487 for (; JavaThread *thread = jtiwh.next(); ) {
488 if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
489 bs->make_parsable(thread);
490 }
491 }
492
accumulate_statistics_all_tlabs()493 void CollectedHeap::accumulate_statistics_all_tlabs() {
494 if (UseTLAB) {
495 assert(SafepointSynchronize::is_at_safepoint() ||
496 !is_init_completed(),
497 "should only accumulate statistics on tlabs at safepoint");
498
499 ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
500 }
501 }
502
resize_all_tlabs()503 void CollectedHeap::resize_all_tlabs() {
504 if (UseTLAB) {
505 assert(SafepointSynchronize::is_at_safepoint() ||
506 !is_init_completed(),
507 "should only resize tlabs at safepoint");
508
509 ThreadLocalAllocBuffer::resize_all_tlabs();
510 }
511 }
512
full_gc_dump(GCTimer * timer,bool before)513 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) {
514 assert(timer != NULL, "timer is null");
515 if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) {
516 GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer);
517 HeapDumper::dump_heap();
518 }
519
520 LogTarget(Trace, gc, classhisto) lt;
521 if (lt.is_enabled()) {
522 GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer);
523 ResourceMark rm;
524 LogStream ls(lt);
525 VM_GC_HeapInspection inspector(&ls, false /* ! full gc */);
526 inspector.doit();
527 }
528 }
529
pre_full_gc_dump(GCTimer * timer)530 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
531 full_gc_dump(timer, true);
532 }
533
post_full_gc_dump(GCTimer * timer)534 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
535 full_gc_dump(timer, false);
536 }
537
initialize_reserved_region(HeapWord * start,HeapWord * end)538 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
539 // It is important to do this in a way such that concurrent readers can't
540 // temporarily think something is in the heap. (Seen this happen in asserts.)
541 _reserved.set_word_size(0);
542 _reserved.set_start(start);
543 _reserved.set_end(end);
544 }
545
post_initialize()546 void CollectedHeap::post_initialize() {
547 initialize_serviceability();
548 }
549
550 #ifndef PRODUCT
551
promotion_should_fail(volatile size_t * count)552 bool CollectedHeap::promotion_should_fail(volatile size_t* count) {
553 // Access to count is not atomic; the value does not have to be exact.
554 if (PromotionFailureALot) {
555 const size_t gc_num = total_collections();
556 const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
557 if (elapsed_gcs >= PromotionFailureALotInterval) {
558 // Test for unsigned arithmetic wrap-around.
559 if (++*count >= PromotionFailureALotCount) {
560 *count = 0;
561 return true;
562 }
563 }
564 }
565 return false;
566 }
567
promotion_should_fail()568 bool CollectedHeap::promotion_should_fail() {
569 return promotion_should_fail(&_promotion_failure_alot_count);
570 }
571
reset_promotion_should_fail(volatile size_t * count)572 void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) {
573 if (PromotionFailureALot) {
574 _promotion_failure_alot_gc_number = total_collections();
575 *count = 0;
576 }
577 }
578
reset_promotion_should_fail()579 void CollectedHeap::reset_promotion_should_fail() {
580 reset_promotion_should_fail(&_promotion_failure_alot_count);
581 }
582
583 #endif // #ifndef PRODUCT
584
supports_object_pinning() const585 bool CollectedHeap::supports_object_pinning() const {
586 return false;
587 }
588
pin_object(JavaThread * thread,oop obj)589 oop CollectedHeap::pin_object(JavaThread* thread, oop obj) {
590 ShouldNotReachHere();
591 return NULL;
592 }
593
unpin_object(JavaThread * thread,oop obj)594 void CollectedHeap::unpin_object(JavaThread* thread, oop obj) {
595 ShouldNotReachHere();
596 }
597
deduplicate_string(oop str)598 void CollectedHeap::deduplicate_string(oop str) {
599 // Do nothing, unless overridden in subclass.
600 }
601