1 /*
2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/classLoaderDataGraph.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc/cms/cmsGCStats.hpp"
30 #include "gc/cms/cmsHeap.hpp"
31 #include "gc/cms/cmsOopClosures.inline.hpp"
32 #include "gc/cms/cmsVMOperations.hpp"
33 #include "gc/cms/compactibleFreeListSpace.hpp"
34 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
35 #include "gc/cms/concurrentMarkSweepThread.hpp"
36 #include "gc/cms/parNewGeneration.hpp"
37 #include "gc/cms/promotionInfo.inline.hpp"
38 #include "gc/serial/genMarkSweep.hpp"
39 #include "gc/serial/tenuredGeneration.hpp"
40 #include "gc/shared/adaptiveSizePolicy.hpp"
41 #include "gc/shared/cardGeneration.inline.hpp"
42 #include "gc/shared/cardTableRS.hpp"
43 #include "gc/shared/collectedHeap.inline.hpp"
44 #include "gc/shared/collectorCounters.hpp"
45 #include "gc/shared/gcLocker.hpp"
46 #include "gc/shared/gcPolicyCounters.hpp"
47 #include "gc/shared/gcTimer.hpp"
48 #include "gc/shared/gcTrace.hpp"
49 #include "gc/shared/gcTraceTime.inline.hpp"
50 #include "gc/shared/genCollectedHeap.hpp"
51 #include "gc/shared/genOopClosures.inline.hpp"
52 #include "gc/shared/isGCActiveMark.hpp"
53 #include "gc/shared/owstTaskTerminator.hpp"
54 #include "gc/shared/referencePolicy.hpp"
55 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
56 #include "gc/shared/space.inline.hpp"
57 #include "gc/shared/strongRootsScope.hpp"
58 #include "gc/shared/taskqueue.inline.hpp"
59 #include "gc/shared/weakProcessor.hpp"
60 #include "gc/shared/workerPolicy.hpp"
61 #include "logging/log.hpp"
62 #include "logging/logStream.hpp"
63 #include "memory/allocation.hpp"
64 #include "memory/binaryTreeDictionary.inline.hpp"
65 #include "memory/iterator.inline.hpp"
66 #include "memory/padded.hpp"
67 #include "memory/resourceArea.hpp"
68 #include "memory/universe.hpp"
69 #include "oops/access.inline.hpp"
70 #include "oops/oop.inline.hpp"
71 #include "prims/jvmtiExport.hpp"
72 #include "runtime/atomic.hpp"
73 #include "runtime/flags/flagSetting.hpp"
74 #include "runtime/globals_extension.hpp"
75 #include "runtime/handles.inline.hpp"
76 #include "runtime/java.hpp"
77 #include "runtime/orderAccess.hpp"
78 #include "runtime/timer.hpp"
79 #include "runtime/vmThread.hpp"
80 #include "services/memoryService.hpp"
81 #include "services/runtimeService.hpp"
82 #include "utilities/align.hpp"
83 #include "utilities/stack.inline.hpp"
84 #if INCLUDE_JVMCI
85 #include "jvmci/jvmci.hpp"
86 #endif
87
88 // statics
89 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
90 bool CMSCollector::_full_gc_requested = false;
91 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
92
93 //////////////////////////////////////////////////////////////////
94 // In support of CMS/VM thread synchronization
95 //////////////////////////////////////////////////////////////////
96 // We split use of the CGC_lock into 2 "levels".
97 // The low-level locking is of the usual CGC_lock monitor. We introduce
98 // a higher level "token" (hereafter "CMS token") built on top of the
99 // low level monitor (hereafter "CGC lock").
100 // The token-passing protocol gives priority to the VM thread. The
101 // CMS-lock doesn't provide any fairness guarantees, but clients
102 // should ensure that it is only held for very short, bounded
103 // durations.
104 //
105 // When either of the CMS thread or the VM thread is involved in
106 // collection operations during which it does not want the other
107 // thread to interfere, it obtains the CMS token.
108 //
109 // If either thread tries to get the token while the other has
110 // it, that thread waits. However, if the VM thread and CMS thread
111 // both want the token, then the VM thread gets priority while the
112 // CMS thread waits. This ensures, for instance, that the "concurrent"
113 // phases of the CMS thread's work do not block out the VM thread
114 // for long periods of time as the CMS thread continues to hog
115 // the token. (See bug 4616232).
116 //
117 // The baton-passing functions are, however, controlled by the
118 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
119 // and here the low-level CMS lock, not the high level token,
120 // ensures mutual exclusion.
121 //
122 // Two important conditions that we have to satisfy:
123 // 1. if a thread does a low-level wait on the CMS lock, then it
124 // relinquishes the CMS token if it were holding that token
125 // when it acquired the low-level CMS lock.
126 // 2. any low-level notifications on the low-level lock
127 // should only be sent when a thread has relinquished the token.
128 //
129 // In the absence of either property, we'd have potential deadlock.
130 //
131 // We protect each of the CMS (concurrent and sequential) phases
132 // with the CMS _token_, not the CMS _lock_.
133 //
134 // The only code protected by CMS lock is the token acquisition code
135 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
136 // baton-passing code.
137 //
138 // Unfortunately, i couldn't come up with a good abstraction to factor and
139 // hide the naked CGC_lock manipulation in the baton-passing code
140 // further below. That's something we should try to do. Also, the proof
141 // of correctness of this 2-level locking scheme is far from obvious,
142 // and potentially quite slippery. We have an uneasy suspicion, for instance,
143 // that there may be a theoretical possibility of delay/starvation in the
144 // low-level lock/wait/notify scheme used for the baton-passing because of
145 // potential interference with the priority scheme embodied in the
146 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
147 // invocation further below and marked with "XXX 20011219YSR".
148 // Indeed, as we note elsewhere, this may become yet more slippery
149 // in the presence of multiple CMS and/or multiple VM threads. XXX
150
151 class CMSTokenSync: public StackObj {
152 private:
153 bool _is_cms_thread;
154 public:
CMSTokenSync(bool is_cms_thread)155 CMSTokenSync(bool is_cms_thread):
156 _is_cms_thread(is_cms_thread) {
157 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
158 "Incorrect argument to constructor");
159 ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
160 }
161
~CMSTokenSync()162 ~CMSTokenSync() {
163 assert(_is_cms_thread ?
164 ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
165 ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
166 "Incorrect state");
167 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
168 }
169 };
170
171 // Convenience class that does a CMSTokenSync, and then acquires
172 // upto three locks.
173 class CMSTokenSyncWithLocks: public CMSTokenSync {
174 private:
175 // Note: locks are acquired in textual declaration order
176 // and released in the opposite order
177 MutexLocker _locker1, _locker2, _locker3;
178 public:
CMSTokenSyncWithLocks(bool is_cms_thread,Mutex * mutex1,Mutex * mutex2=NULL,Mutex * mutex3=NULL)179 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
180 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
181 CMSTokenSync(is_cms_thread),
182 _locker1(mutex1, Mutex::_no_safepoint_check_flag),
183 _locker2(mutex2, Mutex::_no_safepoint_check_flag),
184 _locker3(mutex3, Mutex::_no_safepoint_check_flag)
185 { }
186 };
187
188
189 //////////////////////////////////////////////////////////////////
190 // Concurrent Mark-Sweep Generation /////////////////////////////
191 //////////////////////////////////////////////////////////////////
192
193 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
194
195 // This struct contains per-thread things necessary to support parallel
196 // young-gen collection.
197 class CMSParGCThreadState: public CHeapObj<mtGC> {
198 public:
199 CompactibleFreeListSpaceLAB lab;
200 PromotionInfo promo;
201
202 // Constructor.
CMSParGCThreadState(CompactibleFreeListSpace * cfls)203 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
204 promo.setSpace(cfls);
205 }
206 };
207
ConcurrentMarkSweepGeneration(ReservedSpace rs,size_t initial_byte_size,size_t min_byte_size,size_t max_byte_size,CardTableRS * ct)208 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
209 ReservedSpace rs,
210 size_t initial_byte_size,
211 size_t min_byte_size,
212 size_t max_byte_size,
213 CardTableRS* ct) :
214 CardGeneration(rs, initial_byte_size, ct),
215 _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
216 _did_compact(false)
217 {
218 HeapWord* bottom = (HeapWord*) _virtual_space.low();
219 HeapWord* end = (HeapWord*) _virtual_space.high();
220
221 _direct_allocated_words = 0;
222 NOT_PRODUCT(
223 _numObjectsPromoted = 0;
224 _numWordsPromoted = 0;
225 _numObjectsAllocated = 0;
226 _numWordsAllocated = 0;
227 )
228
229 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end));
230 NOT_PRODUCT(debug_cms_space = _cmsSpace;)
231 _cmsSpace->_old_gen = this;
232
233 _gc_stats = new CMSGCStats();
234
235 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
236 // offsets match. The ability to tell free chunks from objects
237 // depends on this property.
238 debug_only(
239 FreeChunk* junk = NULL;
240 assert(UseCompressedClassPointers ||
241 junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
242 "Offset of FreeChunk::_prev within FreeChunk must match"
243 " that of OopDesc::_klass within OopDesc");
244 )
245
246 _par_gc_thread_states = NEW_C_HEAP_ARRAY(CMSParGCThreadState*, ParallelGCThreads, mtGC);
247 for (uint i = 0; i < ParallelGCThreads; i++) {
248 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
249 }
250
251 _incremental_collection_failed = false;
252 // The "dilatation_factor" is the expansion that can occur on
253 // account of the fact that the minimum object size in the CMS
254 // generation may be larger than that in, say, a contiguous young
255 // generation.
256 // Ideally, in the calculation below, we'd compute the dilatation
257 // factor as: MinChunkSize/(promoting_gen's min object size)
258 // Since we do not have such a general query interface for the
259 // promoting generation, we'll instead just use the minimum
260 // object size (which today is a header's worth of space);
261 // note that all arithmetic is in units of HeapWords.
262 assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
263 assert(_dilatation_factor >= 1.0, "from previous assert");
264
265 initialize_performance_counters(min_byte_size, max_byte_size);
266 }
267
268
269 // The field "_initiating_occupancy" represents the occupancy percentage
270 // at which we trigger a new collection cycle. Unless explicitly specified
271 // via CMSInitiatingOccupancyFraction (argument "io" below), it
272 // is calculated by:
273 //
274 // Let "f" be MinHeapFreeRatio in
275 //
276 // _initiating_occupancy = 100-f +
277 // f * (CMSTriggerRatio/100)
278 // where CMSTriggerRatio is the argument "tr" below.
279 //
280 // That is, if we assume the heap is at its desired maximum occupancy at the
281 // end of a collection, we let CMSTriggerRatio of the (purported) free
282 // space be allocated before initiating a new collection cycle.
283 //
init_initiating_occupancy(intx io,uintx tr)284 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
285 assert(io <= 100 && tr <= 100, "Check the arguments");
286 if (io >= 0) {
287 _initiating_occupancy = (double)io / 100.0;
288 } else {
289 _initiating_occupancy = ((100 - MinHeapFreeRatio) +
290 (double)(tr * MinHeapFreeRatio) / 100.0)
291 / 100.0;
292 }
293 }
294
ref_processor_init()295 void ConcurrentMarkSweepGeneration::ref_processor_init() {
296 assert(collector() != NULL, "no collector");
297 collector()->ref_processor_init();
298 }
299
ref_processor_init()300 void CMSCollector::ref_processor_init() {
301 if (_ref_processor == NULL) {
302 // Allocate and initialize a reference processor
303 _ref_processor =
304 new ReferenceProcessor(&_span_based_discoverer,
305 (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
306 ParallelGCThreads, // mt processing degree
307 _cmsGen->refs_discovery_is_mt(), // mt discovery
308 MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
309 _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
310 &_is_alive_closure, // closure for liveness info
311 false); // disable adjusting number of processing threads
312 // Initialize the _ref_processor field of CMSGen
313 _cmsGen->set_ref_processor(_ref_processor);
314
315 }
316 }
317
size_policy()318 AdaptiveSizePolicy* CMSCollector::size_policy() {
319 return CMSHeap::heap()->size_policy();
320 }
321
initialize_performance_counters(size_t min_old_size,size_t max_old_size)322 void ConcurrentMarkSweepGeneration::initialize_performance_counters(size_t min_old_size,
323 size_t max_old_size) {
324
325 const char* gen_name = "old";
326 // Generation Counters - generation 1, 1 subspace
327 _gen_counters = new GenerationCounters(gen_name, 1, 1,
328 min_old_size, max_old_size, &_virtual_space);
329
330 _space_counters = new GSpaceCounters(gen_name, 0,
331 _virtual_space.reserved_size(),
332 this, _gen_counters);
333 }
334
CMSStats(ConcurrentMarkSweepGeneration * cms_gen,unsigned int alpha)335 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
336 _cms_gen(cms_gen)
337 {
338 assert(alpha <= 100, "bad value");
339 _saved_alpha = alpha;
340
341 // Initialize the alphas to the bootstrap value of 100.
342 _gc0_alpha = _cms_alpha = 100;
343
344 _cms_begin_time.update();
345 _cms_end_time.update();
346
347 _gc0_duration = 0.0;
348 _gc0_period = 0.0;
349 _gc0_promoted = 0;
350
351 _cms_duration = 0.0;
352 _cms_period = 0.0;
353 _cms_allocated = 0;
354
355 _cms_used_at_gc0_begin = 0;
356 _cms_used_at_gc0_end = 0;
357 _allow_duty_cycle_reduction = false;
358 _valid_bits = 0;
359 }
360
cms_free_adjustment_factor(size_t free) const361 double CMSStats::cms_free_adjustment_factor(size_t free) const {
362 // TBD: CR 6909490
363 return 1.0;
364 }
365
adjust_cms_free_adjustment_factor(bool fail,size_t free)366 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
367 }
368
369 // If promotion failure handling is on use
370 // the padded average size of the promotion for each
371 // young generation collection.
time_until_cms_gen_full() const372 double CMSStats::time_until_cms_gen_full() const {
373 size_t cms_free = _cms_gen->cmsSpace()->free();
374 CMSHeap* heap = CMSHeap::heap();
375 size_t expected_promotion = MIN2(heap->young_gen()->capacity(),
376 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
377 if (cms_free > expected_promotion) {
378 // Start a cms collection if there isn't enough space to promote
379 // for the next young collection. Use the padded average as
380 // a safety factor.
381 cms_free -= expected_promotion;
382
383 // Adjust by the safety factor.
384 double cms_free_dbl = (double)cms_free;
385 double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0;
386 // Apply a further correction factor which tries to adjust
387 // for recent occurance of concurrent mode failures.
388 cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
389 cms_free_dbl = cms_free_dbl * cms_adjustment;
390
391 log_trace(gc)("CMSStats::time_until_cms_gen_full: cms_free " SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
392 cms_free, expected_promotion);
393 log_trace(gc)(" cms_free_dbl %f cms_consumption_rate %f", cms_free_dbl, cms_consumption_rate() + 1.0);
394 // Add 1 in case the consumption rate goes to zero.
395 return cms_free_dbl / (cms_consumption_rate() + 1.0);
396 }
397 return 0.0;
398 }
399
400 // Compare the duration of the cms collection to the
401 // time remaining before the cms generation is empty.
402 // Note that the time from the start of the cms collection
403 // to the start of the cms sweep (less than the total
404 // duration of the cms collection) can be used. This
405 // has been tried and some applications experienced
406 // promotion failures early in execution. This was
407 // possibly because the averages were not accurate
408 // enough at the beginning.
time_until_cms_start() const409 double CMSStats::time_until_cms_start() const {
410 // We add "gc0_period" to the "work" calculation
411 // below because this query is done (mostly) at the
412 // end of a scavenge, so we need to conservatively
413 // account for that much possible delay
414 // in the query so as to avoid concurrent mode failures
415 // due to starting the collection just a wee bit too
416 // late.
417 double work = cms_duration() + gc0_period();
418 double deadline = time_until_cms_gen_full();
419 // If a concurrent mode failure occurred recently, we want to be
420 // more conservative and halve our expected time_until_cms_gen_full()
421 if (work > deadline) {
422 log_develop_trace(gc)("CMSCollector: collect because of anticipated promotion before full %3.7f + %3.7f > %3.7f ",
423 cms_duration(), gc0_period(), time_until_cms_gen_full());
424 return 0.0;
425 }
426 return work - deadline;
427 }
428
429 #ifndef PRODUCT
print_on(outputStream * st) const430 void CMSStats::print_on(outputStream *st) const {
431 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
432 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
433 gc0_duration(), gc0_period(), gc0_promoted());
434 st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
435 cms_duration(), cms_period(), cms_allocated());
436 st->print(",cms_since_beg=%g,cms_since_end=%g",
437 cms_time_since_begin(), cms_time_since_end());
438 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
439 _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
440
441 if (valid()) {
442 st->print(",promo_rate=%g,cms_alloc_rate=%g",
443 promotion_rate(), cms_allocation_rate());
444 st->print(",cms_consumption_rate=%g,time_until_full=%g",
445 cms_consumption_rate(), time_until_cms_gen_full());
446 }
447 st->cr();
448 }
449 #endif // #ifndef PRODUCT
450
451 CMSCollector::CollectorState CMSCollector::_collectorState =
452 CMSCollector::Idling;
453 bool CMSCollector::_foregroundGCIsActive = false;
454 bool CMSCollector::_foregroundGCShouldWait = false;
455
CMSCollector(ConcurrentMarkSweepGeneration * cmsGen,CardTableRS * ct)456 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
457 CardTableRS* ct):
458 _overflow_list(NULL),
459 _conc_workers(NULL), // may be set later
460 _completed_initialization(false),
461 _collection_count_start(0),
462 _should_unload_classes(CMSClassUnloadingEnabled),
463 _concurrent_cycles_since_last_unload(0),
464 _roots_scanning_options(GenCollectedHeap::SO_None),
465 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
466 _verifying(false),
467 _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
468 _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
469 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
470 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
471 _cms_start_registered(false),
472 _cmsGen(cmsGen),
473 // Adjust span to cover old (cms) gen
474 _span(cmsGen->reserved()),
475 _ct(ct),
476 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
477 _modUnionTable((CardTable::card_shift - LogHeapWordSize),
478 -1 /* lock-free */, "No_lock" /* dummy */),
479 _restart_addr(NULL),
480 _ser_pmc_preclean_ovflw(0),
481 _ser_pmc_remark_ovflw(0),
482 _par_pmc_remark_ovflw(0),
483 _ser_kac_preclean_ovflw(0),
484 _ser_kac_ovflw(0),
485 _par_kac_ovflw(0),
486 #ifndef PRODUCT
487 _num_par_pushes(0),
488 #endif
489 _span_based_discoverer(_span),
490 _ref_processor(NULL), // will be set later
491 // Construct the is_alive_closure with _span & markBitMap
492 _is_alive_closure(_span, &_markBitMap),
493 _modUnionClosurePar(&_modUnionTable),
494 _between_prologue_and_epilogue(false),
495 _abort_preclean(false),
496 _start_sampling(false),
497 _stats(cmsGen),
498 _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
499 //verify that this lock should be acquired with safepoint check.
500 Monitor::_safepoint_check_never)),
501 _eden_chunk_array(NULL), // may be set in ctor body
502 _eden_chunk_index(0), // -- ditto --
503 _eden_chunk_capacity(0), // -- ditto --
504 _survivor_chunk_array(NULL), // -- ditto --
505 _survivor_chunk_index(0), // -- ditto --
506 _survivor_chunk_capacity(0), // -- ditto --
507 _survivor_plab_array(NULL) // -- ditto --
508 {
509 // Now expand the span and allocate the collection support structures
510 // (MUT, marking bit map etc.) to cover both generations subject to
511 // collection.
512
513 // For use by dirty card to oop closures.
514 _cmsGen->cmsSpace()->set_collector(this);
515
516 // Allocate MUT and marking bit map
517 {
518 MutexLocker x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
519 if (!_markBitMap.allocate(_span)) {
520 log_warning(gc)("Failed to allocate CMS Bit Map");
521 return;
522 }
523 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
524 }
525 {
526 _modUnionTable.allocate(_span);
527 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
528 }
529
530 if (!_markStack.allocate(MarkStackSize)) {
531 log_warning(gc)("Failed to allocate CMS Marking Stack");
532 return;
533 }
534
535 // Support for multi-threaded concurrent phases
536 if (CMSConcurrentMTEnabled) {
537 if (FLAG_IS_DEFAULT(ConcGCThreads)) {
538 // just for now
539 FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4);
540 }
541 if (ConcGCThreads > 1) {
542 _conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
543 ConcGCThreads, true);
544 if (_conc_workers == NULL) {
545 log_warning(gc)("GC/CMS: _conc_workers allocation failure: forcing -CMSConcurrentMTEnabled");
546 CMSConcurrentMTEnabled = false;
547 } else {
548 _conc_workers->initialize_workers();
549 }
550 } else {
551 CMSConcurrentMTEnabled = false;
552 }
553 }
554 if (!CMSConcurrentMTEnabled) {
555 ConcGCThreads = 0;
556 } else {
557 // Turn off CMSCleanOnEnter optimization temporarily for
558 // the MT case where it's not fixed yet; see 6178663.
559 CMSCleanOnEnter = false;
560 }
561 assert((_conc_workers != NULL) == (ConcGCThreads > 1),
562 "Inconsistency");
563 log_debug(gc)("ConcGCThreads: %u", ConcGCThreads);
564 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
565
566 // Parallel task queues; these are shared for the
567 // concurrent and stop-world phases of CMS, but
568 // are not shared with parallel scavenge (ParNew).
569 {
570 uint i;
571 uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads);
572
573 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
574 || ParallelRefProcEnabled)
575 && num_queues > 0) {
576 _task_queues = new OopTaskQueueSet(num_queues);
577 if (_task_queues == NULL) {
578 log_warning(gc)("task_queues allocation failure.");
579 return;
580 }
581 typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
582 for (i = 0; i < num_queues; i++) {
583 PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
584 if (q == NULL) {
585 log_warning(gc)("work_queue allocation failure.");
586 return;
587 }
588 _task_queues->register_queue(i, q);
589 }
590 for (i = 0; i < num_queues; i++) {
591 _task_queues->queue(i)->initialize();
592 }
593 }
594 }
595
596 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
597
598 // Clip CMSBootstrapOccupancy between 0 and 100.
599 _bootstrap_occupancy = CMSBootstrapOccupancy / 100.0;
600
601 // Now tell CMS generations the identity of their collector
602 ConcurrentMarkSweepGeneration::set_collector(this);
603
604 // Create & start a CMS thread for this CMS collector
605 _cmsThread = ConcurrentMarkSweepThread::start(this);
606 assert(cmsThread() != NULL, "CMS Thread should have been created");
607 assert(cmsThread()->collector() == this,
608 "CMS Thread should refer to this gen");
609 assert(CGC_lock != NULL, "Where's the CGC_lock?");
610
611 // Support for parallelizing young gen rescan
612 CMSHeap* heap = CMSHeap::heap();
613 _young_gen = heap->young_gen();
614 if (heap->supports_inline_contig_alloc()) {
615 _top_addr = heap->top_addr();
616 _end_addr = heap->end_addr();
617 assert(_young_gen != NULL, "no _young_gen");
618 _eden_chunk_index = 0;
619 _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
620 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
621 }
622
623 // Support for parallelizing survivor space rescan
624 if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
625 const size_t max_plab_samples =
626 _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
627
628 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
629 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
630 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
631 _survivor_chunk_capacity = max_plab_samples;
632 for (uint i = 0; i < ParallelGCThreads; i++) {
633 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
634 ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
635 assert(cur->end() == 0, "Should be 0");
636 assert(cur->array() == vec, "Should be vec");
637 assert(cur->capacity() == max_plab_samples, "Error");
638 }
639 }
640
641 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
642 _gc_counters = new CollectorCounters("CMS full collection pauses", 1);
643 _cgc_counters = new CollectorCounters("CMS concurrent cycle pauses", 2);
644 _completed_initialization = true;
645 _inter_sweep_timer.start(); // start of time
646 }
647
name() const648 const char* ConcurrentMarkSweepGeneration::name() const {
649 return "concurrent mark-sweep generation";
650 }
update_counters()651 void ConcurrentMarkSweepGeneration::update_counters() {
652 if (UsePerfData) {
653 _space_counters->update_all();
654 _gen_counters->update_all();
655 }
656 }
657
658 // this is an optimized version of update_counters(). it takes the
659 // used value as a parameter rather than computing it.
660 //
update_counters(size_t used)661 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
662 if (UsePerfData) {
663 _space_counters->update_used(used);
664 _space_counters->update_capacity();
665 _gen_counters->update_all();
666 }
667 }
668
print() const669 void ConcurrentMarkSweepGeneration::print() const {
670 Generation::print();
671 cmsSpace()->print();
672 }
673
674 #ifndef PRODUCT
print_statistics()675 void ConcurrentMarkSweepGeneration::print_statistics() {
676 cmsSpace()->printFLCensus(0);
677 }
678 #endif
679
680 size_t
contiguous_available() const681 ConcurrentMarkSweepGeneration::contiguous_available() const {
682 // dld proposes an improvement in precision here. If the committed
683 // part of the space ends in a free block we should add that to
684 // uncommitted size in the calculation below. Will make this
685 // change later, staying with the approximation below for the
686 // time being. -- ysr.
687 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
688 }
689
690 size_t
unsafe_max_alloc_nogc() const691 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
692 return _cmsSpace->max_alloc_in_words() * HeapWordSize;
693 }
694
used_stable() const695 size_t ConcurrentMarkSweepGeneration::used_stable() const {
696 return cmsSpace()->used_stable();
697 }
698
max_available() const699 size_t ConcurrentMarkSweepGeneration::max_available() const {
700 return free() + _virtual_space.uncommitted_size();
701 }
702
promotion_attempt_is_safe(size_t max_promotion_in_bytes) const703 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
704 size_t available = max_available();
705 size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
706 bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
707 log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
708 res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
709 return res;
710 }
711
712 // At a promotion failure dump information on block layout in heap
713 // (cms old generation).
promotion_failure_occurred()714 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
715 Log(gc, promotion) log;
716 if (log.is_trace()) {
717 LogStream ls(log.trace());
718 cmsSpace()->dump_at_safepoint_with_locks(collector(), &ls);
719 }
720 }
721
reset_after_compaction()722 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
723 // Clear the promotion information. These pointers can be adjusted
724 // along with all the other pointers into the heap but
725 // compaction is expected to be a rare event with
726 // a heap using cms so don't do it without seeing the need.
727 for (uint i = 0; i < ParallelGCThreads; i++) {
728 _par_gc_thread_states[i]->promo.reset();
729 }
730 }
731
compute_new_size()732 void ConcurrentMarkSweepGeneration::compute_new_size() {
733 assert_locked_or_safepoint(Heap_lock);
734
735 // If incremental collection failed, we just want to expand
736 // to the limit.
737 if (incremental_collection_failed()) {
738 clear_incremental_collection_failed();
739 grow_to_reserved();
740 return;
741 }
742
743 // The heap has been compacted but not reset yet.
744 // Any metric such as free() or used() will be incorrect.
745
746 CardGeneration::compute_new_size();
747
748 // Reset again after a possible resizing
749 if (did_compact()) {
750 cmsSpace()->reset_after_compaction();
751 }
752 }
753
compute_new_size_free_list()754 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
755 assert_locked_or_safepoint(Heap_lock);
756
757 // If incremental collection failed, we just want to expand
758 // to the limit.
759 if (incremental_collection_failed()) {
760 clear_incremental_collection_failed();
761 grow_to_reserved();
762 return;
763 }
764
765 double free_percentage = ((double) free()) / capacity();
766 double desired_free_percentage = (double) MinHeapFreeRatio / 100;
767 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
768
769 // compute expansion delta needed for reaching desired free percentage
770 if (free_percentage < desired_free_percentage) {
771 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
772 assert(desired_capacity >= capacity(), "invalid expansion size");
773 size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
774 Log(gc) log;
775 if (log.is_trace()) {
776 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
777 log.trace("From compute_new_size: ");
778 log.trace(" Free fraction %f", free_percentage);
779 log.trace(" Desired free fraction %f", desired_free_percentage);
780 log.trace(" Maximum free fraction %f", maximum_free_percentage);
781 log.trace(" Capacity " SIZE_FORMAT, capacity() / 1000);
782 log.trace(" Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
783 CMSHeap* heap = CMSHeap::heap();
784 size_t young_size = heap->young_gen()->capacity();
785 log.trace(" Young gen size " SIZE_FORMAT, young_size / 1000);
786 log.trace(" unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
787 log.trace(" contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
788 log.trace(" Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
789 }
790 // safe if expansion fails
791 expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
792 log.trace(" Expanded free fraction %f", ((double) free()) / capacity());
793 } else {
794 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
795 assert(desired_capacity <= capacity(), "invalid expansion size");
796 size_t shrink_bytes = capacity() - desired_capacity;
797 // Don't shrink unless the delta is greater than the minimum shrink we want
798 if (shrink_bytes >= MinHeapDeltaBytes) {
799 shrink_free_list_by(shrink_bytes);
800 }
801 }
802 }
803
freelistLock() const804 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
805 return cmsSpace()->freelistLock();
806 }
807
allocate(size_t size,bool tlab)808 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
809 CMSSynchronousYieldRequest yr;
810 MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
811 return have_lock_and_allocate(size, tlab);
812 }
813
have_lock_and_allocate(size_t size,bool tlab)814 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
815 bool tlab /* ignored */) {
816 assert_lock_strong(freelistLock());
817 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
818 HeapWord* res = cmsSpace()->allocate(adjustedSize);
819 // Allocate the object live (grey) if the background collector has
820 // started marking. This is necessary because the marker may
821 // have passed this address and consequently this object will
822 // not otherwise be greyed and would be incorrectly swept up.
823 // Note that if this object contains references, the writing
824 // of those references will dirty the card containing this object
825 // allowing the object to be blackened (and its references scanned)
826 // either during a preclean phase or at the final checkpoint.
827 if (res != NULL) {
828 // We may block here with an uninitialized object with
829 // its mark-bit or P-bits not yet set. Such objects need
830 // to be safely navigable by block_start().
831 assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
832 assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
833 collector()->direct_allocated(res, adjustedSize);
834 _direct_allocated_words += adjustedSize;
835 // allocation counters
836 NOT_PRODUCT(
837 _numObjectsAllocated++;
838 _numWordsAllocated += (int)adjustedSize;
839 )
840 }
841 return res;
842 }
843
844 // In the case of direct allocation by mutators in a generation that
845 // is being concurrently collected, the object must be allocated
846 // live (grey) if the background collector has started marking.
847 // This is necessary because the marker may
848 // have passed this address and consequently this object will
849 // not otherwise be greyed and would be incorrectly swept up.
850 // Note that if this object contains references, the writing
851 // of those references will dirty the card containing this object
852 // allowing the object to be blackened (and its references scanned)
853 // either during a preclean phase or at the final checkpoint.
direct_allocated(HeapWord * start,size_t size)854 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
855 assert(_markBitMap.covers(start, size), "Out of bounds");
856 if (_collectorState >= Marking) {
857 MutexLocker y(_markBitMap.lock(),
858 Mutex::_no_safepoint_check_flag);
859 // [see comments preceding SweepClosure::do_blk() below for details]
860 //
861 // Can the P-bits be deleted now? JJJ
862 //
863 // 1. need to mark the object as live so it isn't collected
864 // 2. need to mark the 2nd bit to indicate the object may be uninitialized
865 // 3. need to mark the end of the object so marking, precleaning or sweeping
866 // can skip over uninitialized or unparsable objects. An allocated
867 // object is considered uninitialized for our purposes as long as
868 // its klass word is NULL. All old gen objects are parsable
869 // as soon as they are initialized.)
870 _markBitMap.mark(start); // object is live
871 _markBitMap.mark(start + 1); // object is potentially uninitialized?
872 _markBitMap.mark(start + size - 1);
873 // mark end of object
874 }
875 // check that oop looks uninitialized
876 assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
877 }
878
promoted(bool par,HeapWord * start,bool is_obj_array,size_t obj_size)879 void CMSCollector::promoted(bool par, HeapWord* start,
880 bool is_obj_array, size_t obj_size) {
881 assert(_markBitMap.covers(start), "Out of bounds");
882 // See comment in direct_allocated() about when objects should
883 // be allocated live.
884 if (_collectorState >= Marking) {
885 // we already hold the marking bit map lock, taken in
886 // the prologue
887 if (par) {
888 _markBitMap.par_mark(start);
889 } else {
890 _markBitMap.mark(start);
891 }
892 // We don't need to mark the object as uninitialized (as
893 // in direct_allocated above) because this is being done with the
894 // world stopped and the object will be initialized by the
895 // time the marking, precleaning or sweeping get to look at it.
896 // But see the code for copying objects into the CMS generation,
897 // where we need to ensure that concurrent readers of the
898 // block offset table are able to safely navigate a block that
899 // is in flux from being free to being allocated (and in
900 // transition while being copied into) and subsequently
901 // becoming a bona-fide object when the copy/promotion is complete.
902 assert(SafepointSynchronize::is_at_safepoint(),
903 "expect promotion only at safepoints");
904
905 if (_collectorState < Sweeping) {
906 // Mark the appropriate cards in the modUnionTable, so that
907 // this object gets scanned before the sweep. If this is
908 // not done, CMS generation references in the object might
909 // not get marked.
910 // For the case of arrays, which are otherwise precisely
911 // marked, we need to dirty the entire array, not just its head.
912 if (is_obj_array) {
913 // The [par_]mark_range() method expects mr.end() below to
914 // be aligned to the granularity of a bit's representation
915 // in the heap. In the case of the MUT below, that's a
916 // card size.
917 MemRegion mr(start,
918 align_up(start + obj_size,
919 CardTable::card_size /* bytes */));
920 if (par) {
921 _modUnionTable.par_mark_range(mr);
922 } else {
923 _modUnionTable.mark_range(mr);
924 }
925 } else { // not an obj array; we can just mark the head
926 if (par) {
927 _modUnionTable.par_mark(start);
928 } else {
929 _modUnionTable.mark(start);
930 }
931 }
932 }
933 }
934 }
935
promote(oop obj,size_t obj_size)936 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
937 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
938 // allocate, copy and if necessary update promoinfo --
939 // delegate to underlying space.
940 assert_lock_strong(freelistLock());
941
942 #ifndef PRODUCT
943 if (CMSHeap::heap()->promotion_should_fail()) {
944 return NULL;
945 }
946 #endif // #ifndef PRODUCT
947
948 oop res = _cmsSpace->promote(obj, obj_size);
949 if (res == NULL) {
950 // expand and retry
951 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
952 expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
953 // Since this is the old generation, we don't try to promote
954 // into a more senior generation.
955 res = _cmsSpace->promote(obj, obj_size);
956 }
957 if (res != NULL) {
958 // See comment in allocate() about when objects should
959 // be allocated live.
960 assert(oopDesc::is_oop(obj), "Will dereference klass pointer below");
961 collector()->promoted(false, // Not parallel
962 (HeapWord*)res, obj->is_objArray(), obj_size);
963 // promotion counters
964 NOT_PRODUCT(
965 _numObjectsPromoted++;
966 _numWordsPromoted +=
967 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
968 )
969 }
970 return res;
971 }
972
973
974 // IMPORTANT: Notes on object size recognition in CMS.
975 // ---------------------------------------------------
976 // A block of storage in the CMS generation is always in
977 // one of three states. A free block (FREE), an allocated
978 // object (OBJECT) whose size() method reports the correct size,
979 // and an intermediate state (TRANSIENT) in which its size cannot
980 // be accurately determined.
981 // STATE IDENTIFICATION: (32 bit and 64 bit w/o COOPS)
982 // -----------------------------------------------------
983 // FREE: klass_word & 1 == 1; mark_word holds block size
984 //
985 // OBJECT: klass_word installed; klass_word != 0 && klass_word & 1 == 0;
986 // obj->size() computes correct size
987 //
988 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
989 //
990 // STATE IDENTIFICATION: (64 bit+COOPS)
991 // ------------------------------------
992 // FREE: mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
993 //
994 // OBJECT: klass_word installed; klass_word != 0;
995 // obj->size() computes correct size
996 //
997 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
998 //
999 //
1000 // STATE TRANSITION DIAGRAM
1001 //
1002 // mut / parnew mut / parnew
1003 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1004 // ^ |
1005 // |------------------------ DEAD <------------------------------------|
1006 // sweep mut
1007 //
1008 // While a block is in TRANSIENT state its size cannot be determined
1009 // so readers will either need to come back later or stall until
1010 // the size can be determined. Note that for the case of direct
1011 // allocation, P-bits, when available, may be used to determine the
1012 // size of an object that may not yet have been initialized.
1013
1014 // Things to support parallel young-gen collection.
1015 oop
par_promote(int thread_num,oop old,markOop m,size_t word_sz)1016 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1017 oop old, markOop m,
1018 size_t word_sz) {
1019 #ifndef PRODUCT
1020 if (CMSHeap::heap()->promotion_should_fail()) {
1021 return NULL;
1022 }
1023 #endif // #ifndef PRODUCT
1024
1025 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1026 PromotionInfo* promoInfo = &ps->promo;
1027 // if we are tracking promotions, then first ensure space for
1028 // promotion (including spooling space for saving header if necessary).
1029 // then allocate and copy, then track promoted info if needed.
1030 // When tracking (see PromotionInfo::track()), the mark word may
1031 // be displaced and in this case restoration of the mark word
1032 // occurs in the (oop_since_save_marks_)iterate phase.
1033 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1034 // Out of space for allocating spooling buffers;
1035 // try expanding and allocating spooling buffers.
1036 if (!expand_and_ensure_spooling_space(promoInfo)) {
1037 return NULL;
1038 }
1039 }
1040 assert(!promoInfo->tracking() || promoInfo->has_spooling_space(), "Control point invariant");
1041 const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1042 HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1043 if (obj_ptr == NULL) {
1044 obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1045 if (obj_ptr == NULL) {
1046 return NULL;
1047 }
1048 }
1049 oop obj = oop(obj_ptr);
1050 OrderAccess::storestore();
1051 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1052 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1053 // IMPORTANT: See note on object initialization for CMS above.
1054 // Otherwise, copy the object. Here we must be careful to insert the
1055 // klass pointer last, since this marks the block as an allocated object.
1056 // Except with compressed oops it's the mark word.
1057 HeapWord* old_ptr = (HeapWord*)old;
1058 // Restore the mark word copied above.
1059 obj->set_mark_raw(m);
1060 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1061 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1062 OrderAccess::storestore();
1063
1064 if (UseCompressedClassPointers) {
1065 // Copy gap missed by (aligned) header size calculation below
1066 obj->set_klass_gap(old->klass_gap());
1067 }
1068 if (word_sz > (size_t)oopDesc::header_size()) {
1069 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1070 obj_ptr + oopDesc::header_size(),
1071 word_sz - oopDesc::header_size());
1072 }
1073
1074 // Now we can track the promoted object, if necessary. We take care
1075 // to delay the transition from uninitialized to full object
1076 // (i.e., insertion of klass pointer) until after, so that it
1077 // atomically becomes a promoted object.
1078 if (promoInfo->tracking()) {
1079 promoInfo->track((PromotedObject*)obj, old->klass());
1080 }
1081 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1082 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1083 assert(oopDesc::is_oop(old), "Will use and dereference old klass ptr below");
1084
1085 // Finally, install the klass pointer (this should be volatile).
1086 OrderAccess::storestore();
1087 obj->set_klass(old->klass());
1088 // We should now be able to calculate the right size for this object
1089 assert(oopDesc::is_oop(obj) && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1090
1091 collector()->promoted(true, // parallel
1092 obj_ptr, old->is_objArray(), word_sz);
1093
1094 NOT_PRODUCT(
1095 Atomic::inc(&_numObjectsPromoted);
1096 Atomic::add(alloc_sz, &_numWordsPromoted);
1097 )
1098
1099 return obj;
1100 }
1101
1102 void
1103 ConcurrentMarkSweepGeneration::
par_promote_alloc_done(int thread_num)1104 par_promote_alloc_done(int thread_num) {
1105 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1106 ps->lab.retire(thread_num);
1107 }
1108
1109 void
1110 ConcurrentMarkSweepGeneration::
par_oop_since_save_marks_iterate_done(int thread_num)1111 par_oop_since_save_marks_iterate_done(int thread_num) {
1112 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1113 ParScanWithoutBarrierClosure* dummy_cl = NULL;
1114 ps->promo.promoted_oops_iterate(dummy_cl);
1115
1116 // Because card-scanning has been completed, subsequent phases
1117 // (e.g., reference processing) will not need to recognize which
1118 // objects have been promoted during this GC. So, we can now disable
1119 // promotion tracking.
1120 ps->promo.stopTrackingPromotions();
1121 }
1122
should_collect(bool full,size_t size,bool tlab)1123 bool ConcurrentMarkSweepGeneration::should_collect(bool full,
1124 size_t size,
1125 bool tlab)
1126 {
1127 // We allow a STW collection only if a full
1128 // collection was requested.
1129 return full || should_allocate(size, tlab); // FIX ME !!!
1130 // This and promotion failure handling are connected at the
1131 // hip and should be fixed by untying them.
1132 }
1133
shouldConcurrentCollect()1134 bool CMSCollector::shouldConcurrentCollect() {
1135 LogTarget(Trace, gc) log;
1136
1137 if (_full_gc_requested) {
1138 log.print("CMSCollector: collect because of explicit gc request (or GCLocker)");
1139 return true;
1140 }
1141
1142 FreelistLocker x(this);
1143 // ------------------------------------------------------------------
1144 // Print out lots of information which affects the initiation of
1145 // a collection.
1146 if (log.is_enabled() && stats().valid()) {
1147 log.print("CMSCollector shouldConcurrentCollect: ");
1148
1149 LogStream out(log);
1150 stats().print_on(&out);
1151
1152 log.print("time_until_cms_gen_full %3.7f", stats().time_until_cms_gen_full());
1153 log.print("free=" SIZE_FORMAT, _cmsGen->free());
1154 log.print("contiguous_available=" SIZE_FORMAT, _cmsGen->contiguous_available());
1155 log.print("promotion_rate=%g", stats().promotion_rate());
1156 log.print("cms_allocation_rate=%g", stats().cms_allocation_rate());
1157 log.print("occupancy=%3.7f", _cmsGen->occupancy());
1158 log.print("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1159 log.print("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1160 log.print("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1161 log.print("metadata initialized %d", MetaspaceGC::should_concurrent_collect());
1162 }
1163 // ------------------------------------------------------------------
1164
1165 // If the estimated time to complete a cms collection (cms_duration())
1166 // is less than the estimated time remaining until the cms generation
1167 // is full, start a collection.
1168 if (!UseCMSInitiatingOccupancyOnly) {
1169 if (stats().valid()) {
1170 if (stats().time_until_cms_start() == 0.0) {
1171 return true;
1172 }
1173 } else {
1174 // We want to conservatively collect somewhat early in order
1175 // to try and "bootstrap" our CMS/promotion statistics;
1176 // this branch will not fire after the first successful CMS
1177 // collection because the stats should then be valid.
1178 if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1179 log.print(" CMSCollector: collect for bootstrapping statistics: occupancy = %f, boot occupancy = %f",
1180 _cmsGen->occupancy(), _bootstrap_occupancy);
1181 return true;
1182 }
1183 }
1184 }
1185
1186 // Otherwise, we start a collection cycle if
1187 // old gen want a collection cycle started. Each may use
1188 // an appropriate criterion for making this decision.
1189 // XXX We need to make sure that the gen expansion
1190 // criterion dovetails well with this. XXX NEED TO FIX THIS
1191 if (_cmsGen->should_concurrent_collect()) {
1192 log.print("CMS old gen initiated");
1193 return true;
1194 }
1195
1196 // We start a collection if we believe an incremental collection may fail;
1197 // this is not likely to be productive in practice because it's probably too
1198 // late anyway.
1199 CMSHeap* heap = CMSHeap::heap();
1200 if (heap->incremental_collection_will_fail(true /* consult_young */)) {
1201 log.print("CMSCollector: collect because incremental collection will fail ");
1202 return true;
1203 }
1204
1205 if (MetaspaceGC::should_concurrent_collect()) {
1206 log.print("CMSCollector: collect for metadata allocation ");
1207 return true;
1208 }
1209
1210 // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1211 if (CMSTriggerInterval >= 0) {
1212 if (CMSTriggerInterval == 0) {
1213 // Trigger always
1214 return true;
1215 }
1216
1217 // Check the CMS time since begin (we do not check the stats validity
1218 // as we want to be able to trigger the first CMS cycle as well)
1219 if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1220 if (stats().valid()) {
1221 log.print("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1222 stats().cms_time_since_begin());
1223 } else {
1224 log.print("CMSCollector: collect because of trigger interval (first collection)");
1225 }
1226 return true;
1227 }
1228 }
1229
1230 return false;
1231 }
1232
set_did_compact(bool v)1233 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1234
1235 // Clear _expansion_cause fields of constituent generations
clear_expansion_cause()1236 void CMSCollector::clear_expansion_cause() {
1237 _cmsGen->clear_expansion_cause();
1238 }
1239
1240 // We should be conservative in starting a collection cycle. To
1241 // start too eagerly runs the risk of collecting too often in the
1242 // extreme. To collect too rarely falls back on full collections,
1243 // which works, even if not optimum in terms of concurrent work.
1244 // As a work around for too eagerly collecting, use the flag
1245 // UseCMSInitiatingOccupancyOnly. This also has the advantage of
1246 // giving the user an easily understandable way of controlling the
1247 // collections.
1248 // We want to start a new collection cycle if any of the following
1249 // conditions hold:
1250 // . our current occupancy exceeds the configured initiating occupancy
1251 // for this generation, or
1252 // . we recently needed to expand this space and have not, since that
1253 // expansion, done a collection of this generation, or
1254 // . the underlying space believes that it may be a good idea to initiate
1255 // a concurrent collection (this may be based on criteria such as the
1256 // following: the space uses linear allocation and linear allocation is
1257 // going to fail, or there is believed to be excessive fragmentation in
1258 // the generation, etc... or ...
1259 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1260 // the case of the old generation; see CR 6543076):
1261 // we may be approaching a point at which allocation requests may fail because
1262 // we will be out of sufficient free space given allocation rate estimates.]
should_concurrent_collect() const1263 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1264
1265 assert_lock_strong(freelistLock());
1266 if (occupancy() > initiating_occupancy()) {
1267 log_trace(gc)(" %s: collect because of occupancy %f / %f ",
1268 short_name(), occupancy(), initiating_occupancy());
1269 return true;
1270 }
1271 if (UseCMSInitiatingOccupancyOnly) {
1272 return false;
1273 }
1274 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1275 log_trace(gc)(" %s: collect because expanded for allocation ", short_name());
1276 return true;
1277 }
1278 return false;
1279 }
1280
collect(bool full,bool clear_all_soft_refs,size_t size,bool tlab)1281 void ConcurrentMarkSweepGeneration::collect(bool full,
1282 bool clear_all_soft_refs,
1283 size_t size,
1284 bool tlab)
1285 {
1286 collector()->collect(full, clear_all_soft_refs, size, tlab);
1287 }
1288
collect(bool full,bool clear_all_soft_refs,size_t size,bool tlab)1289 void CMSCollector::collect(bool full,
1290 bool clear_all_soft_refs,
1291 size_t size,
1292 bool tlab)
1293 {
1294 // The following "if" branch is present for defensive reasons.
1295 // In the current uses of this interface, it can be replaced with:
1296 // assert(!GCLocker.is_active(), "Can't be called otherwise");
1297 // But I am not placing that assert here to allow future
1298 // generality in invoking this interface.
1299 if (GCLocker::is_active()) {
1300 // A consistency test for GCLocker
1301 assert(GCLocker::needs_gc(), "Should have been set already");
1302 // Skip this foreground collection, instead
1303 // expanding the heap if necessary.
1304 // Need the free list locks for the call to free() in compute_new_size()
1305 compute_new_size();
1306 return;
1307 }
1308 acquire_control_and_collect(full, clear_all_soft_refs);
1309 }
1310
request_full_gc(unsigned int full_gc_count,GCCause::Cause cause)1311 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1312 CMSHeap* heap = CMSHeap::heap();
1313 unsigned int gc_count = heap->total_full_collections();
1314 if (gc_count == full_gc_count) {
1315 MutexLocker y(CGC_lock, Mutex::_no_safepoint_check_flag);
1316 _full_gc_requested = true;
1317 _full_gc_cause = cause;
1318 CGC_lock->notify(); // nudge CMS thread
1319 } else {
1320 assert(gc_count > full_gc_count, "Error: causal loop");
1321 }
1322 }
1323
is_external_interruption()1324 bool CMSCollector::is_external_interruption() {
1325 GCCause::Cause cause = CMSHeap::heap()->gc_cause();
1326 return GCCause::is_user_requested_gc(cause) ||
1327 GCCause::is_serviceability_requested_gc(cause);
1328 }
1329
report_concurrent_mode_interruption()1330 void CMSCollector::report_concurrent_mode_interruption() {
1331 if (is_external_interruption()) {
1332 log_debug(gc)("Concurrent mode interrupted");
1333 } else {
1334 log_debug(gc)("Concurrent mode failure");
1335 _gc_tracer_cm->report_concurrent_mode_failure();
1336 }
1337 }
1338
1339
1340 // The foreground and background collectors need to coordinate in order
1341 // to make sure that they do not mutually interfere with CMS collections.
1342 // When a background collection is active,
1343 // the foreground collector may need to take over (preempt) and
1344 // synchronously complete an ongoing collection. Depending on the
1345 // frequency of the background collections and the heap usage
1346 // of the application, this preemption can be seldom or frequent.
1347 // There are only certain
1348 // points in the background collection that the "collection-baton"
1349 // can be passed to the foreground collector.
1350 //
1351 // The foreground collector will wait for the baton before
1352 // starting any part of the collection. The foreground collector
1353 // will only wait at one location.
1354 //
1355 // The background collector will yield the baton before starting a new
1356 // phase of the collection (e.g., before initial marking, marking from roots,
1357 // precleaning, final re-mark, sweep etc.) This is normally done at the head
1358 // of the loop which switches the phases. The background collector does some
1359 // of the phases (initial mark, final re-mark) with the world stopped.
1360 // Because of locking involved in stopping the world,
1361 // the foreground collector should not block waiting for the background
1362 // collector when it is doing a stop-the-world phase. The background
1363 // collector will yield the baton at an additional point just before
1364 // it enters a stop-the-world phase. Once the world is stopped, the
1365 // background collector checks the phase of the collection. If the
1366 // phase has not changed, it proceeds with the collection. If the
1367 // phase has changed, it skips that phase of the collection. See
1368 // the comments on the use of the Heap_lock in collect_in_background().
1369 //
1370 // Variable used in baton passing.
1371 // _foregroundGCIsActive - Set to true by the foreground collector when
1372 // it wants the baton. The foreground clears it when it has finished
1373 // the collection.
1374 // _foregroundGCShouldWait - Set to true by the background collector
1375 // when it is running. The foreground collector waits while
1376 // _foregroundGCShouldWait is true.
1377 // CGC_lock - monitor used to protect access to the above variables
1378 // and to notify the foreground and background collectors.
1379 // _collectorState - current state of the CMS collection.
1380 //
1381 // The foreground collector
1382 // acquires the CGC_lock
1383 // sets _foregroundGCIsActive
1384 // waits on the CGC_lock for _foregroundGCShouldWait to be false
1385 // various locks acquired in preparation for the collection
1386 // are released so as not to block the background collector
1387 // that is in the midst of a collection
1388 // proceeds with the collection
1389 // clears _foregroundGCIsActive
1390 // returns
1391 //
1392 // The background collector in a loop iterating on the phases of the
1393 // collection
1394 // acquires the CGC_lock
1395 // sets _foregroundGCShouldWait
1396 // if _foregroundGCIsActive is set
1397 // clears _foregroundGCShouldWait, notifies _CGC_lock
1398 // waits on _CGC_lock for _foregroundGCIsActive to become false
1399 // and exits the loop.
1400 // otherwise
1401 // proceed with that phase of the collection
1402 // if the phase is a stop-the-world phase,
1403 // yield the baton once more just before enqueueing
1404 // the stop-world CMS operation (executed by the VM thread).
1405 // returns after all phases of the collection are done
1406 //
1407
acquire_control_and_collect(bool full,bool clear_all_soft_refs)1408 void CMSCollector::acquire_control_and_collect(bool full,
1409 bool clear_all_soft_refs) {
1410 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1411 assert(!Thread::current()->is_ConcurrentGC_thread(),
1412 "shouldn't try to acquire control from self!");
1413
1414 // Start the protocol for acquiring control of the
1415 // collection from the background collector (aka CMS thread).
1416 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1417 "VM thread should have CMS token");
1418 // Remember the possibly interrupted state of an ongoing
1419 // concurrent collection
1420 CollectorState first_state = _collectorState;
1421
1422 // Signal to a possibly ongoing concurrent collection that
1423 // we want to do a foreground collection.
1424 _foregroundGCIsActive = true;
1425
1426 // release locks and wait for a notify from the background collector
1427 // releasing the locks in only necessary for phases which
1428 // do yields to improve the granularity of the collection.
1429 assert_lock_strong(bitMapLock());
1430 // We need to lock the Free list lock for the space that we are
1431 // currently collecting.
1432 assert(haveFreelistLocks(), "Must be holding free list locks");
1433 bitMapLock()->unlock();
1434 releaseFreelistLocks();
1435 {
1436 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
1437 if (_foregroundGCShouldWait) {
1438 // We are going to be waiting for action for the CMS thread;
1439 // it had better not be gone (for instance at shutdown)!
1440 assert(ConcurrentMarkSweepThread::cmst() != NULL && !ConcurrentMarkSweepThread::cmst()->has_terminated(),
1441 "CMS thread must be running");
1442 // Wait here until the background collector gives us the go-ahead
1443 ConcurrentMarkSweepThread::clear_CMS_flag(
1444 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token
1445 // Get a possibly blocked CMS thread going:
1446 // Note that we set _foregroundGCIsActive true above,
1447 // without protection of the CGC_lock.
1448 CGC_lock->notify();
1449 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1450 "Possible deadlock");
1451 while (_foregroundGCShouldWait) {
1452 // wait for notification
1453 CGC_lock->wait_without_safepoint_check();
1454 // Possibility of delay/starvation here, since CMS token does
1455 // not know to give priority to VM thread? Actually, i think
1456 // there wouldn't be any delay/starvation, but the proof of
1457 // that "fact" (?) appears non-trivial. XXX 20011219YSR
1458 }
1459 ConcurrentMarkSweepThread::set_CMS_flag(
1460 ConcurrentMarkSweepThread::CMS_vm_has_token);
1461 }
1462 }
1463 // The CMS_token is already held. Get back the other locks.
1464 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1465 "VM thread should have CMS token");
1466 getFreelistLocks();
1467 bitMapLock()->lock_without_safepoint_check();
1468 log_debug(gc, state)("CMS foreground collector has asked for control " INTPTR_FORMAT " with first state %d",
1469 p2i(Thread::current()), first_state);
1470 log_debug(gc, state)(" gets control with state %d", _collectorState);
1471
1472 // Inform cms gen if this was due to partial collection failing.
1473 // The CMS gen may use this fact to determine its expansion policy.
1474 CMSHeap* heap = CMSHeap::heap();
1475 if (heap->incremental_collection_will_fail(false /* don't consult_young */)) {
1476 assert(!_cmsGen->incremental_collection_failed(),
1477 "Should have been noticed, reacted to and cleared");
1478 _cmsGen->set_incremental_collection_failed();
1479 }
1480
1481 if (first_state > Idling) {
1482 report_concurrent_mode_interruption();
1483 }
1484
1485 set_did_compact(true);
1486
1487 // If the collection is being acquired from the background
1488 // collector, there may be references on the discovered
1489 // references lists. Abandon those references, since some
1490 // of them may have become unreachable after concurrent
1491 // discovery; the STW compacting collector will redo discovery
1492 // more precisely, without being subject to floating garbage.
1493 // Leaving otherwise unreachable references in the discovered
1494 // lists would require special handling.
1495 ref_processor()->disable_discovery();
1496 ref_processor()->abandon_partial_discovery();
1497 ref_processor()->verify_no_references_recorded();
1498
1499 if (first_state > Idling) {
1500 save_heap_summary();
1501 }
1502
1503 do_compaction_work(clear_all_soft_refs);
1504
1505 // Has the GC time limit been exceeded?
1506 size_t max_eden_size = _young_gen->max_eden_size();
1507 GCCause::Cause gc_cause = heap->gc_cause();
1508 size_policy()->check_gc_overhead_limit(_young_gen->eden()->used(),
1509 _cmsGen->max_capacity(),
1510 max_eden_size,
1511 full,
1512 gc_cause,
1513 heap->soft_ref_policy());
1514
1515 // Reset the expansion cause, now that we just completed
1516 // a collection cycle.
1517 clear_expansion_cause();
1518 _foregroundGCIsActive = false;
1519 return;
1520 }
1521
1522 // Resize the tenured generation
1523 // after obtaining the free list locks for the
1524 // two generations.
compute_new_size()1525 void CMSCollector::compute_new_size() {
1526 assert_locked_or_safepoint(Heap_lock);
1527 FreelistLocker z(this);
1528 MetaspaceGC::compute_new_size();
1529 _cmsGen->compute_new_size_free_list();
1530 // recalculate CMS used space after CMS collection
1531 _cmsGen->cmsSpace()->recalculate_used_stable();
1532 }
1533
1534 // A work method used by the foreground collector to do
1535 // a mark-sweep-compact.
do_compaction_work(bool clear_all_soft_refs)1536 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1537 CMSHeap* heap = CMSHeap::heap();
1538
1539 STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1540 gc_timer->register_gc_start();
1541
1542 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1543 gc_tracer->report_gc_start(heap->gc_cause(), gc_timer->gc_start());
1544
1545 heap->pre_full_gc_dump(gc_timer);
1546
1547 GCTraceTime(Trace, gc, phases) t("CMS:MSC");
1548
1549 // Temporarily widen the span of the weak reference processing to
1550 // the entire heap.
1551 MemRegion new_span(CMSHeap::heap()->reserved_region());
1552 ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1553 // Temporarily, clear the "is_alive_non_header" field of the
1554 // reference processor.
1555 ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1556 // Temporarily make reference _processing_ single threaded (non-MT).
1557 ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1558 // Temporarily make refs discovery atomic
1559 ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1560 // Temporarily make reference _discovery_ single threaded (non-MT)
1561 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1562
1563 ref_processor()->set_enqueuing_is_done(false);
1564 ref_processor()->enable_discovery();
1565 ref_processor()->setup_policy(clear_all_soft_refs);
1566 // If an asynchronous collection finishes, the _modUnionTable is
1567 // all clear. If we are assuming the collection from an asynchronous
1568 // collection, clear the _modUnionTable.
1569 assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1570 "_modUnionTable should be clear if the baton was not passed");
1571 _modUnionTable.clear_all();
1572 assert(_collectorState != Idling || _ct->cld_rem_set()->mod_union_is_clear(),
1573 "mod union for klasses should be clear if the baton was passed");
1574 _ct->cld_rem_set()->clear_mod_union();
1575
1576
1577 // We must adjust the allocation statistics being maintained
1578 // in the free list space. We do so by reading and clearing
1579 // the sweep timer and updating the block flux rate estimates below.
1580 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1581 if (_inter_sweep_timer.is_active()) {
1582 _inter_sweep_timer.stop();
1583 // Note that we do not use this sample to update the _inter_sweep_estimate.
1584 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1585 _inter_sweep_estimate.padded_average(),
1586 _intra_sweep_estimate.padded_average());
1587 }
1588
1589 GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
1590 #ifdef ASSERT
1591 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1592 size_t free_size = cms_space->free();
1593 assert(free_size ==
1594 pointer_delta(cms_space->end(), cms_space->compaction_top())
1595 * HeapWordSize,
1596 "All the free space should be compacted into one chunk at top");
1597 assert(cms_space->dictionary()->total_chunk_size(
1598 debug_only(cms_space->freelistLock())) == 0 ||
1599 cms_space->totalSizeInIndexedFreeLists() == 0,
1600 "All the free space should be in a single chunk");
1601 size_t num = cms_space->totalCount();
1602 assert((free_size == 0 && num == 0) ||
1603 (free_size > 0 && (num == 1 || num == 2)),
1604 "There should be at most 2 free chunks after compaction");
1605 #endif // ASSERT
1606 _collectorState = Resetting;
1607 assert(_restart_addr == NULL,
1608 "Should have been NULL'd before baton was passed");
1609 reset_stw();
1610 _cmsGen->reset_after_compaction();
1611 _concurrent_cycles_since_last_unload = 0;
1612
1613 // Clear any data recorded in the PLAB chunk arrays.
1614 if (_survivor_plab_array != NULL) {
1615 reset_survivor_plab_arrays();
1616 }
1617
1618 // Adjust the per-size allocation stats for the next epoch.
1619 _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
1620 // Restart the "inter sweep timer" for the next epoch.
1621 _inter_sweep_timer.reset();
1622 _inter_sweep_timer.start();
1623
1624 // No longer a need to do a concurrent collection for Metaspace.
1625 MetaspaceGC::set_should_concurrent_collect(false);
1626
1627 heap->post_full_gc_dump(gc_timer);
1628
1629 gc_timer->register_gc_end();
1630
1631 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1632
1633 // For a mark-sweep-compact, compute_new_size() will be called
1634 // in the heap's do_collection() method.
1635 }
1636
print_eden_and_survivor_chunk_arrays()1637 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
1638 Log(gc, heap) log;
1639 if (!log.is_trace()) {
1640 return;
1641 }
1642
1643 ContiguousSpace* eden_space = _young_gen->eden();
1644 ContiguousSpace* from_space = _young_gen->from();
1645 ContiguousSpace* to_space = _young_gen->to();
1646 // Eden
1647 if (_eden_chunk_array != NULL) {
1648 log.trace("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1649 p2i(eden_space->bottom()), p2i(eden_space->top()),
1650 p2i(eden_space->end()), eden_space->capacity());
1651 log.trace("_eden_chunk_index=" SIZE_FORMAT ", _eden_chunk_capacity=" SIZE_FORMAT,
1652 _eden_chunk_index, _eden_chunk_capacity);
1653 for (size_t i = 0; i < _eden_chunk_index; i++) {
1654 log.trace("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_eden_chunk_array[i]));
1655 }
1656 }
1657 // Survivor
1658 if (_survivor_chunk_array != NULL) {
1659 log.trace("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1660 p2i(from_space->bottom()), p2i(from_space->top()),
1661 p2i(from_space->end()), from_space->capacity());
1662 log.trace("_survivor_chunk_index=" SIZE_FORMAT ", _survivor_chunk_capacity=" SIZE_FORMAT,
1663 _survivor_chunk_index, _survivor_chunk_capacity);
1664 for (size_t i = 0; i < _survivor_chunk_index; i++) {
1665 log.trace("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_survivor_chunk_array[i]));
1666 }
1667 }
1668 }
1669
getFreelistLocks() const1670 void CMSCollector::getFreelistLocks() const {
1671 // Get locks for all free lists in all generations that this
1672 // collector is responsible for
1673 _cmsGen->freelistLock()->lock_without_safepoint_check();
1674 }
1675
releaseFreelistLocks() const1676 void CMSCollector::releaseFreelistLocks() const {
1677 // Release locks for all free lists in all generations that this
1678 // collector is responsible for
1679 _cmsGen->freelistLock()->unlock();
1680 }
1681
haveFreelistLocks() const1682 bool CMSCollector::haveFreelistLocks() const {
1683 // Check locks for all free lists in all generations that this
1684 // collector is responsible for
1685 assert_lock_strong(_cmsGen->freelistLock());
1686 PRODUCT_ONLY(ShouldNotReachHere());
1687 return true;
1688 }
1689
1690 // A utility class that is used by the CMS collector to
1691 // temporarily "release" the foreground collector from its
1692 // usual obligation to wait for the background collector to
1693 // complete an ongoing phase before proceeding.
1694 class ReleaseForegroundGC: public StackObj {
1695 private:
1696 CMSCollector* _c;
1697 public:
ReleaseForegroundGC(CMSCollector * c)1698 ReleaseForegroundGC(CMSCollector* c) : _c(c) {
1699 assert(_c->_foregroundGCShouldWait, "Else should not need to call");
1700 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
1701 // allow a potentially blocked foreground collector to proceed
1702 _c->_foregroundGCShouldWait = false;
1703 if (_c->_foregroundGCIsActive) {
1704 CGC_lock->notify();
1705 }
1706 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1707 "Possible deadlock");
1708 }
1709
~ReleaseForegroundGC()1710 ~ReleaseForegroundGC() {
1711 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
1712 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
1713 _c->_foregroundGCShouldWait = true;
1714 }
1715 };
1716
collect_in_background(GCCause::Cause cause)1717 void CMSCollector::collect_in_background(GCCause::Cause cause) {
1718 assert(Thread::current()->is_ConcurrentGC_thread(),
1719 "A CMS asynchronous collection is only allowed on a CMS thread.");
1720
1721 CMSHeap* heap = CMSHeap::heap();
1722 {
1723 MutexLocker hl(Heap_lock, Mutex::_no_safepoint_check_flag);
1724 FreelistLocker fll(this);
1725 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
1726 if (_foregroundGCIsActive) {
1727 // The foreground collector is. Skip this
1728 // background collection.
1729 assert(!_foregroundGCShouldWait, "Should be clear");
1730 return;
1731 } else {
1732 assert(_collectorState == Idling, "Should be idling before start.");
1733 _collectorState = InitialMarking;
1734 register_gc_start(cause);
1735 // Reset the expansion cause, now that we are about to begin
1736 // a new cycle.
1737 clear_expansion_cause();
1738
1739 // Clear the MetaspaceGC flag since a concurrent collection
1740 // is starting but also clear it after the collection.
1741 MetaspaceGC::set_should_concurrent_collect(false);
1742 }
1743 // Decide if we want to enable class unloading as part of the
1744 // ensuing concurrent GC cycle.
1745 update_should_unload_classes();
1746 _full_gc_requested = false; // acks all outstanding full gc requests
1747 _full_gc_cause = GCCause::_no_gc;
1748 // Signal that we are about to start a collection
1749 heap->increment_total_full_collections(); // ... starting a collection cycle
1750 _collection_count_start = heap->total_full_collections();
1751 }
1752
1753 size_t prev_used = _cmsGen->used();
1754
1755 // The change of the collection state is normally done at this level;
1756 // the exceptions are phases that are executed while the world is
1757 // stopped. For those phases the change of state is done while the
1758 // world is stopped. For baton passing purposes this allows the
1759 // background collector to finish the phase and change state atomically.
1760 // The foreground collector cannot wait on a phase that is done
1761 // while the world is stopped because the foreground collector already
1762 // has the world stopped and would deadlock.
1763 while (_collectorState != Idling) {
1764 log_debug(gc, state)("Thread " INTPTR_FORMAT " in CMS state %d",
1765 p2i(Thread::current()), _collectorState);
1766 // The foreground collector
1767 // holds the Heap_lock throughout its collection.
1768 // holds the CMS token (but not the lock)
1769 // except while it is waiting for the background collector to yield.
1770 //
1771 // The foreground collector should be blocked (not for long)
1772 // if the background collector is about to start a phase
1773 // executed with world stopped. If the background
1774 // collector has already started such a phase, the
1775 // foreground collector is blocked waiting for the
1776 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking)
1777 // are executed in the VM thread.
1778 //
1779 // The locking order is
1780 // PendingListLock (PLL) -- if applicable (FinalMarking)
1781 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue())
1782 // CMS token (claimed in
1783 // stop_world_and_do() -->
1784 // safepoint_synchronize() -->
1785 // CMSThread::synchronize())
1786
1787 {
1788 // Check if the FG collector wants us to yield.
1789 CMSTokenSync x(true); // is cms thread
1790 if (waitForForegroundGC()) {
1791 // We yielded to a foreground GC, nothing more to be
1792 // done this round.
1793 assert(_foregroundGCShouldWait == false, "We set it to false in "
1794 "waitForForegroundGC()");
1795 log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
1796 p2i(Thread::current()), _collectorState);
1797 return;
1798 } else {
1799 // The background collector can run but check to see if the
1800 // foreground collector has done a collection while the
1801 // background collector was waiting to get the CGC_lock
1802 // above. If yes, break so that _foregroundGCShouldWait
1803 // is cleared before returning.
1804 if (_collectorState == Idling) {
1805 break;
1806 }
1807 }
1808 }
1809
1810 assert(_foregroundGCShouldWait, "Foreground collector, if active, "
1811 "should be waiting");
1812
1813 switch (_collectorState) {
1814 case InitialMarking:
1815 {
1816 ReleaseForegroundGC x(this);
1817 stats().record_cms_begin();
1818 VM_CMS_Initial_Mark initial_mark_op(this);
1819 VMThread::execute(&initial_mark_op);
1820 }
1821 // The collector state may be any legal state at this point
1822 // since the background collector may have yielded to the
1823 // foreground collector.
1824 break;
1825 case Marking:
1826 // initial marking in checkpointRootsInitialWork has been completed
1827 if (markFromRoots()) { // we were successful
1828 assert(_collectorState == Precleaning, "Collector state should "
1829 "have changed");
1830 } else {
1831 assert(_foregroundGCIsActive, "Internal state inconsistency");
1832 }
1833 break;
1834 case Precleaning:
1835 // marking from roots in markFromRoots has been completed
1836 preclean();
1837 assert(_collectorState == AbortablePreclean ||
1838 _collectorState == FinalMarking,
1839 "Collector state should have changed");
1840 break;
1841 case AbortablePreclean:
1842 abortable_preclean();
1843 assert(_collectorState == FinalMarking, "Collector state should "
1844 "have changed");
1845 break;
1846 case FinalMarking:
1847 {
1848 ReleaseForegroundGC x(this);
1849
1850 VM_CMS_Final_Remark final_remark_op(this);
1851 VMThread::execute(&final_remark_op);
1852 }
1853 assert(_foregroundGCShouldWait, "block post-condition");
1854 break;
1855 case Sweeping:
1856 // final marking in checkpointRootsFinal has been completed
1857 sweep();
1858 assert(_collectorState == Resizing, "Collector state change "
1859 "to Resizing must be done under the free_list_lock");
1860
1861 case Resizing: {
1862 // Sweeping has been completed...
1863 // At this point the background collection has completed.
1864 // Don't move the call to compute_new_size() down
1865 // into code that might be executed if the background
1866 // collection was preempted.
1867 {
1868 ReleaseForegroundGC x(this); // unblock FG collection
1869 MutexLocker y(Heap_lock, Mutex::_no_safepoint_check_flag);
1870 CMSTokenSync z(true); // not strictly needed.
1871 if (_collectorState == Resizing) {
1872 compute_new_size();
1873 save_heap_summary();
1874 _collectorState = Resetting;
1875 } else {
1876 assert(_collectorState == Idling, "The state should only change"
1877 " because the foreground collector has finished the collection");
1878 }
1879 }
1880 break;
1881 }
1882 case Resetting:
1883 // CMS heap resizing has been completed
1884 reset_concurrent();
1885 assert(_collectorState == Idling, "Collector state should "
1886 "have changed");
1887
1888 MetaspaceGC::set_should_concurrent_collect(false);
1889
1890 stats().record_cms_end();
1891 // Don't move the concurrent_phases_end() and compute_new_size()
1892 // calls to here because a preempted background collection
1893 // has it's state set to "Resetting".
1894 break;
1895 case Idling:
1896 default:
1897 ShouldNotReachHere();
1898 break;
1899 }
1900 log_debug(gc, state)(" Thread " INTPTR_FORMAT " done - next CMS state %d",
1901 p2i(Thread::current()), _collectorState);
1902 assert(_foregroundGCShouldWait, "block post-condition");
1903 }
1904
1905 // Should this be in gc_epilogue?
1906 heap->counters()->update_counters();
1907
1908 {
1909 // Clear _foregroundGCShouldWait and, in the event that the
1910 // foreground collector is waiting, notify it, before
1911 // returning.
1912 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
1913 _foregroundGCShouldWait = false;
1914 if (_foregroundGCIsActive) {
1915 CGC_lock->notify();
1916 }
1917 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1918 "Possible deadlock");
1919 }
1920 log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
1921 p2i(Thread::current()), _collectorState);
1922 log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
1923 prev_used / K, _cmsGen->used()/K, _cmsGen->capacity() /K);
1924 }
1925
register_gc_start(GCCause::Cause cause)1926 void CMSCollector::register_gc_start(GCCause::Cause cause) {
1927 _cms_start_registered = true;
1928 _gc_timer_cm->register_gc_start();
1929 _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
1930 }
1931
register_gc_end()1932 void CMSCollector::register_gc_end() {
1933 if (_cms_start_registered) {
1934 report_heap_summary(GCWhen::AfterGC);
1935
1936 _gc_timer_cm->register_gc_end();
1937 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
1938 _cms_start_registered = false;
1939 }
1940 }
1941
save_heap_summary()1942 void CMSCollector::save_heap_summary() {
1943 CMSHeap* heap = CMSHeap::heap();
1944 _last_heap_summary = heap->create_heap_summary();
1945 _last_metaspace_summary = heap->create_metaspace_summary();
1946 }
1947
report_heap_summary(GCWhen::Type when)1948 void CMSCollector::report_heap_summary(GCWhen::Type when) {
1949 _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
1950 _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
1951 }
1952
waitForForegroundGC()1953 bool CMSCollector::waitForForegroundGC() {
1954 bool res = false;
1955 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1956 "CMS thread should have CMS token");
1957 // Block the foreground collector until the
1958 // background collectors decides whether to
1959 // yield.
1960 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
1961 _foregroundGCShouldWait = true;
1962 if (_foregroundGCIsActive) {
1963 // The background collector yields to the
1964 // foreground collector and returns a value
1965 // indicating that it has yielded. The foreground
1966 // collector can proceed.
1967 res = true;
1968 _foregroundGCShouldWait = false;
1969 ConcurrentMarkSweepThread::clear_CMS_flag(
1970 ConcurrentMarkSweepThread::CMS_cms_has_token);
1971 ConcurrentMarkSweepThread::set_CMS_flag(
1972 ConcurrentMarkSweepThread::CMS_cms_wants_token);
1973 // Get a possibly blocked foreground thread going
1974 CGC_lock->notify();
1975 log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
1976 p2i(Thread::current()), _collectorState);
1977 while (_foregroundGCIsActive) {
1978 CGC_lock->wait_without_safepoint_check();
1979 }
1980 ConcurrentMarkSweepThread::set_CMS_flag(
1981 ConcurrentMarkSweepThread::CMS_cms_has_token);
1982 ConcurrentMarkSweepThread::clear_CMS_flag(
1983 ConcurrentMarkSweepThread::CMS_cms_wants_token);
1984 }
1985 log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
1986 p2i(Thread::current()), _collectorState);
1987 return res;
1988 }
1989
1990 // Because of the need to lock the free lists and other structures in
1991 // the collector, common to all the generations that the collector is
1992 // collecting, we need the gc_prologues of individual CMS generations
1993 // delegate to their collector. It may have been simpler had the
1994 // current infrastructure allowed one to call a prologue on a
1995 // collector. In the absence of that we have the generation's
1996 // prologue delegate to the collector, which delegates back
1997 // some "local" work to a worker method in the individual generations
1998 // that it's responsible for collecting, while itself doing any
1999 // work common to all generations it's responsible for. A similar
2000 // comment applies to the gc_epilogue()'s.
2001 // The role of the variable _between_prologue_and_epilogue is to
2002 // enforce the invocation protocol.
gc_prologue(bool full)2003 void CMSCollector::gc_prologue(bool full) {
2004 // Call gc_prologue_work() for the CMSGen
2005 // we are responsible for.
2006
2007 // The following locking discipline assumes that we are only called
2008 // when the world is stopped.
2009 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2010
2011 // The CMSCollector prologue must call the gc_prologues for the
2012 // "generations" that it's responsible
2013 // for.
2014
2015 assert( Thread::current()->is_VM_thread()
2016 || ( CMSScavengeBeforeRemark
2017 && Thread::current()->is_ConcurrentGC_thread()),
2018 "Incorrect thread type for prologue execution");
2019
2020 if (_between_prologue_and_epilogue) {
2021 // We have already been invoked; this is a gc_prologue delegation
2022 // from yet another CMS generation that we are responsible for, just
2023 // ignore it since all relevant work has already been done.
2024 return;
2025 }
2026
2027 // set a bit saying prologue has been called; cleared in epilogue
2028 _between_prologue_and_epilogue = true;
2029 // Claim locks for common data structures, then call gc_prologue_work()
2030 // for each CMSGen.
2031
2032 getFreelistLocks(); // gets free list locks on constituent spaces
2033 bitMapLock()->lock_without_safepoint_check();
2034
2035 // Should call gc_prologue_work() for all cms gens we are responsible for
2036 bool duringMarking = _collectorState >= Marking
2037 && _collectorState < Sweeping;
2038
2039 // The young collections clear the modified oops state, which tells if
2040 // there are any modified oops in the class. The remark phase also needs
2041 // that information. Tell the young collection to save the union of all
2042 // modified klasses.
2043 if (duringMarking) {
2044 _ct->cld_rem_set()->set_accumulate_modified_oops(true);
2045 }
2046
2047 bool registerClosure = duringMarking;
2048
2049 _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
2050
2051 if (!full) {
2052 stats().record_gc0_begin();
2053 }
2054 }
2055
gc_prologue(bool full)2056 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2057
2058 _capacity_at_prologue = capacity();
2059 _used_at_prologue = used();
2060 _cmsSpace->recalculate_used_stable();
2061
2062 // We enable promotion tracking so that card-scanning can recognize
2063 // which objects have been promoted during this GC and skip them.
2064 for (uint i = 0; i < ParallelGCThreads; i++) {
2065 _par_gc_thread_states[i]->promo.startTrackingPromotions();
2066 }
2067
2068 // Delegate to CMScollector which knows how to coordinate between
2069 // this and any other CMS generations that it is responsible for
2070 // collecting.
2071 collector()->gc_prologue(full);
2072 }
2073
2074 // This is a "private" interface for use by this generation's CMSCollector.
2075 // Not to be called directly by any other entity (for instance,
2076 // GenCollectedHeap, which calls the "public" gc_prologue method above).
gc_prologue_work(bool full,bool registerClosure,ModUnionClosure * modUnionClosure)2077 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2078 bool registerClosure, ModUnionClosure* modUnionClosure) {
2079 assert(!incremental_collection_failed(), "Shouldn't be set yet");
2080 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2081 "Should be NULL");
2082 if (registerClosure) {
2083 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2084 }
2085 cmsSpace()->gc_prologue();
2086 // Clear stat counters
2087 NOT_PRODUCT(
2088 assert(_numObjectsPromoted == 0, "check");
2089 assert(_numWordsPromoted == 0, "check");
2090 log_develop_trace(gc, alloc)("Allocated " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes concurrently",
2091 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2092 _numObjectsAllocated = 0;
2093 _numWordsAllocated = 0;
2094 )
2095 }
2096
gc_epilogue(bool full)2097 void CMSCollector::gc_epilogue(bool full) {
2098 // The following locking discipline assumes that we are only called
2099 // when the world is stopped.
2100 assert(SafepointSynchronize::is_at_safepoint(),
2101 "world is stopped assumption");
2102
2103 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2104 // if linear allocation blocks need to be appropriately marked to allow the
2105 // the blocks to be parsable. We also check here whether we need to nudge the
2106 // CMS collector thread to start a new cycle (if it's not already active).
2107 assert( Thread::current()->is_VM_thread()
2108 || ( CMSScavengeBeforeRemark
2109 && Thread::current()->is_ConcurrentGC_thread()),
2110 "Incorrect thread type for epilogue execution");
2111
2112 if (!_between_prologue_and_epilogue) {
2113 // We have already been invoked; this is a gc_epilogue delegation
2114 // from yet another CMS generation that we are responsible for, just
2115 // ignore it since all relevant work has already been done.
2116 return;
2117 }
2118 assert(haveFreelistLocks(), "must have freelist locks");
2119 assert_lock_strong(bitMapLock());
2120
2121 _ct->cld_rem_set()->set_accumulate_modified_oops(false);
2122
2123 _cmsGen->gc_epilogue_work(full);
2124
2125 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2126 // in case sampling was not already enabled, enable it
2127 _start_sampling = true;
2128 }
2129 // reset _eden_chunk_array so sampling starts afresh
2130 _eden_chunk_index = 0;
2131
2132 size_t cms_used = _cmsGen->cmsSpace()->used();
2133 _cmsGen->cmsSpace()->recalculate_used_stable();
2134
2135 // update performance counters - this uses a special version of
2136 // update_counters() that allows the utilization to be passed as a
2137 // parameter, avoiding multiple calls to used().
2138 //
2139 _cmsGen->update_counters(cms_used);
2140
2141 bitMapLock()->unlock();
2142 releaseFreelistLocks();
2143
2144 if (!CleanChunkPoolAsync) {
2145 Chunk::clean_chunk_pool();
2146 }
2147
2148 set_did_compact(false);
2149 _between_prologue_and_epilogue = false; // ready for next cycle
2150 }
2151
gc_epilogue(bool full)2152 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2153 collector()->gc_epilogue(full);
2154
2155 // When using ParNew, promotion tracking should have already been
2156 // disabled. However, the prologue (which enables promotion
2157 // tracking) and epilogue are called irrespective of the type of
2158 // GC. So they will also be called before and after Full GCs, during
2159 // which promotion tracking will not be explicitly disabled. So,
2160 // it's safer to also disable it here too (to be symmetric with
2161 // enabling it in the prologue).
2162 for (uint i = 0; i < ParallelGCThreads; i++) {
2163 _par_gc_thread_states[i]->promo.stopTrackingPromotions();
2164 }
2165 }
2166
gc_epilogue_work(bool full)2167 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2168 assert(!incremental_collection_failed(), "Should have been cleared");
2169 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2170 cmsSpace()->gc_epilogue();
2171 // Print stat counters
2172 NOT_PRODUCT(
2173 assert(_numObjectsAllocated == 0, "check");
2174 assert(_numWordsAllocated == 0, "check");
2175 log_develop_trace(gc, promotion)("Promoted " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
2176 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2177 _numObjectsPromoted = 0;
2178 _numWordsPromoted = 0;
2179 )
2180
2181 // Call down the chain in contiguous_available needs the freelistLock
2182 // so print this out before releasing the freeListLock.
2183 log_develop_trace(gc)(" Contiguous available " SIZE_FORMAT " bytes ", contiguous_available());
2184 }
2185
2186 #ifndef PRODUCT
have_cms_token()2187 bool CMSCollector::have_cms_token() {
2188 Thread* thr = Thread::current();
2189 if (thr->is_VM_thread()) {
2190 return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2191 } else if (thr->is_ConcurrentGC_thread()) {
2192 return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2193 } else if (thr->is_GC_task_thread()) {
2194 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2195 ParGCRareEvent_lock->owned_by_self();
2196 }
2197 return false;
2198 }
2199
2200 // Check reachability of the given heap address in CMS generation,
2201 // treating all other generations as roots.
is_cms_reachable(HeapWord * addr)2202 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2203 // We could "guarantee" below, rather than assert, but I'll
2204 // leave these as "asserts" so that an adventurous debugger
2205 // could try this in the product build provided some subset of
2206 // the conditions were met, provided they were interested in the
2207 // results and knew that the computation below wouldn't interfere
2208 // with other concurrent computations mutating the structures
2209 // being read or written.
2210 assert(SafepointSynchronize::is_at_safepoint(),
2211 "Else mutations in object graph will make answer suspect");
2212 assert(have_cms_token(), "Should hold cms token");
2213 assert(haveFreelistLocks(), "must hold free list locks");
2214 assert_lock_strong(bitMapLock());
2215
2216 // Clear the marking bit map array before starting, but, just
2217 // for kicks, first report if the given address is already marked
2218 tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr),
2219 _markBitMap.isMarked(addr) ? "" : " not");
2220
2221 if (verify_after_remark()) {
2222 MutexLocker x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2223 bool result = verification_mark_bm()->isMarked(addr);
2224 tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr),
2225 result ? "IS" : "is NOT");
2226 return result;
2227 } else {
2228 tty->print_cr("Could not compute result");
2229 return false;
2230 }
2231 }
2232 #endif
2233
2234 void
print_on_error(outputStream * st)2235 CMSCollector::print_on_error(outputStream* st) {
2236 CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2237 if (collector != NULL) {
2238 CMSBitMap* bitmap = &collector->_markBitMap;
2239 st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap));
2240 bitmap->print_on_error(st, " Bits: ");
2241
2242 st->cr();
2243
2244 CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2245 st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap));
2246 mut_bitmap->print_on_error(st, " Bits: ");
2247 }
2248 }
2249
2250 ////////////////////////////////////////////////////////
2251 // CMS Verification Support
2252 ////////////////////////////////////////////////////////
2253 // Following the remark phase, the following invariant
2254 // should hold -- each object in the CMS heap which is
2255 // marked in markBitMap() should be marked in the verification_mark_bm().
2256
2257 class VerifyMarkedClosure: public BitMapClosure {
2258 CMSBitMap* _marks;
2259 bool _failed;
2260
2261 public:
VerifyMarkedClosure(CMSBitMap * bm)2262 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2263
do_bit(size_t offset)2264 bool do_bit(size_t offset) {
2265 HeapWord* addr = _marks->offsetToHeapWord(offset);
2266 if (!_marks->isMarked(addr)) {
2267 Log(gc, verify) log;
2268 ResourceMark rm;
2269 LogStream ls(log.error());
2270 oop(addr)->print_on(&ls);
2271 log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
2272 _failed = true;
2273 }
2274 return true;
2275 }
2276
failed()2277 bool failed() { return _failed; }
2278 };
2279
verify_after_remark()2280 bool CMSCollector::verify_after_remark() {
2281 GCTraceTime(Info, gc, phases, verify) tm("Verifying CMS Marking.");
2282 MutexLocker ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2283 static bool init = false;
2284
2285 assert(SafepointSynchronize::is_at_safepoint(),
2286 "Else mutations in object graph will make answer suspect");
2287 assert(have_cms_token(),
2288 "Else there may be mutual interference in use of "
2289 " verification data structures");
2290 assert(_collectorState > Marking && _collectorState <= Sweeping,
2291 "Else marking info checked here may be obsolete");
2292 assert(haveFreelistLocks(), "must hold free list locks");
2293 assert_lock_strong(bitMapLock());
2294
2295
2296 // Allocate marking bit map if not already allocated
2297 if (!init) { // first time
2298 if (!verification_mark_bm()->allocate(_span)) {
2299 return false;
2300 }
2301 init = true;
2302 }
2303
2304 assert(verification_mark_stack()->isEmpty(), "Should be empty");
2305
2306 // Turn off refs discovery -- so we will be tracing through refs.
2307 // This is as intended, because by this time
2308 // GC must already have cleared any refs that need to be cleared,
2309 // and traced those that need to be marked; moreover,
2310 // the marking done here is not going to interfere in any
2311 // way with the marking information used by GC.
2312 NoRefDiscovery no_discovery(ref_processor());
2313
2314 #if COMPILER2_OR_JVMCI
2315 DerivedPointerTableDeactivate dpt_deact;
2316 #endif
2317
2318 // Clear any marks from a previous round
2319 verification_mark_bm()->clear_all();
2320 assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2321 verify_work_stacks_empty();
2322
2323 CMSHeap* heap = CMSHeap::heap();
2324 heap->ensure_parsability(false); // fill TLABs, but no need to retire them
2325 // Update the saved marks which may affect the root scans.
2326 heap->save_marks();
2327
2328 if (CMSRemarkVerifyVariant == 1) {
2329 // In this first variant of verification, we complete
2330 // all marking, then check if the new marks-vector is
2331 // a subset of the CMS marks-vector.
2332 verify_after_remark_work_1();
2333 } else {
2334 guarantee(CMSRemarkVerifyVariant == 2, "Range checking for CMSRemarkVerifyVariant should guarantee 1 or 2");
2335 // In this second variant of verification, we flag an error
2336 // (i.e. an object reachable in the new marks-vector not reachable
2337 // in the CMS marks-vector) immediately, also indicating the
2338 // identify of an object (A) that references the unmarked object (B) --
2339 // presumably, a mutation to A failed to be picked up by preclean/remark?
2340 verify_after_remark_work_2();
2341 }
2342
2343 return true;
2344 }
2345
verify_after_remark_work_1()2346 void CMSCollector::verify_after_remark_work_1() {
2347 ResourceMark rm;
2348 HandleMark hm;
2349 CMSHeap* heap = CMSHeap::heap();
2350
2351 // Get a clear set of claim bits for the roots processing to work with.
2352 ClassLoaderDataGraph::clear_claimed_marks();
2353
2354 // Mark from roots one level into CMS
2355 MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2356 heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2357
2358 {
2359 StrongRootsScope srs(1);
2360
2361 heap->cms_process_roots(&srs,
2362 true, // young gen as roots
2363 GenCollectedHeap::ScanningOption(roots_scanning_options()),
2364 should_unload_classes(),
2365 ¬Older,
2366 NULL);
2367 }
2368
2369 // Now mark from the roots
2370 MarkFromRootsClosure markFromRootsClosure(this, _span,
2371 verification_mark_bm(), verification_mark_stack(),
2372 false /* don't yield */, true /* verifying */);
2373 assert(_restart_addr == NULL, "Expected pre-condition");
2374 verification_mark_bm()->iterate(&markFromRootsClosure);
2375 while (_restart_addr != NULL) {
2376 // Deal with stack overflow: by restarting at the indicated
2377 // address.
2378 HeapWord* ra = _restart_addr;
2379 markFromRootsClosure.reset(ra);
2380 _restart_addr = NULL;
2381 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2382 }
2383 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2384 verify_work_stacks_empty();
2385
2386 // Marking completed -- now verify that each bit marked in
2387 // verification_mark_bm() is also marked in markBitMap(); flag all
2388 // errors by printing corresponding objects.
2389 VerifyMarkedClosure vcl(markBitMap());
2390 verification_mark_bm()->iterate(&vcl);
2391 if (vcl.failed()) {
2392 Log(gc, verify) log;
2393 log.error("Failed marking verification after remark");
2394 ResourceMark rm;
2395 LogStream ls(log.error());
2396 heap->print_on(&ls);
2397 fatal("CMS: failed marking verification after remark");
2398 }
2399 }
2400
2401 class VerifyCLDOopsCLDClosure : public CLDClosure {
2402 class VerifyCLDOopsClosure : public OopClosure {
2403 CMSBitMap* _bitmap;
2404 public:
VerifyCLDOopsClosure(CMSBitMap * bitmap)2405 VerifyCLDOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
do_oop(oop * p)2406 void do_oop(oop* p) { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
do_oop(narrowOop * p)2407 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2408 } _oop_closure;
2409 public:
VerifyCLDOopsCLDClosure(CMSBitMap * bitmap)2410 VerifyCLDOopsCLDClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
do_cld(ClassLoaderData * cld)2411 void do_cld(ClassLoaderData* cld) {
2412 cld->oops_do(&_oop_closure, ClassLoaderData::_claim_none, false);
2413 }
2414 };
2415
verify_after_remark_work_2()2416 void CMSCollector::verify_after_remark_work_2() {
2417 ResourceMark rm;
2418 HandleMark hm;
2419 CMSHeap* heap = CMSHeap::heap();
2420
2421 // Get a clear set of claim bits for the roots processing to work with.
2422 ClassLoaderDataGraph::clear_claimed_marks();
2423
2424 // Mark from roots one level into CMS
2425 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2426 markBitMap());
2427 CLDToOopClosure cld_closure(¬Older, ClassLoaderData::_claim_strong);
2428
2429 heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2430
2431 {
2432 StrongRootsScope srs(1);
2433
2434 heap->cms_process_roots(&srs,
2435 true, // young gen as roots
2436 GenCollectedHeap::ScanningOption(roots_scanning_options()),
2437 should_unload_classes(),
2438 ¬Older,
2439 &cld_closure);
2440 }
2441
2442 // Now mark from the roots
2443 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2444 verification_mark_bm(), markBitMap(), verification_mark_stack());
2445 assert(_restart_addr == NULL, "Expected pre-condition");
2446 verification_mark_bm()->iterate(&markFromRootsClosure);
2447 while (_restart_addr != NULL) {
2448 // Deal with stack overflow: by restarting at the indicated
2449 // address.
2450 HeapWord* ra = _restart_addr;
2451 markFromRootsClosure.reset(ra);
2452 _restart_addr = NULL;
2453 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2454 }
2455 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2456 verify_work_stacks_empty();
2457
2458 VerifyCLDOopsCLDClosure verify_cld_oops(verification_mark_bm());
2459 ClassLoaderDataGraph::cld_do(&verify_cld_oops);
2460
2461 // Marking completed -- now verify that each bit marked in
2462 // verification_mark_bm() is also marked in markBitMap(); flag all
2463 // errors by printing corresponding objects.
2464 VerifyMarkedClosure vcl(markBitMap());
2465 verification_mark_bm()->iterate(&vcl);
2466 assert(!vcl.failed(), "Else verification above should not have succeeded");
2467 }
2468
save_marks()2469 void ConcurrentMarkSweepGeneration::save_marks() {
2470 // delegate to CMS space
2471 cmsSpace()->save_marks();
2472 }
2473
no_allocs_since_save_marks()2474 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2475 return cmsSpace()->no_allocs_since_save_marks();
2476 }
2477
2478 void
oop_iterate(OopIterateClosure * cl)2479 ConcurrentMarkSweepGeneration::oop_iterate(OopIterateClosure* cl) {
2480 if (freelistLock()->owned_by_self()) {
2481 Generation::oop_iterate(cl);
2482 } else {
2483 MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
2484 Generation::oop_iterate(cl);
2485 }
2486 }
2487
2488 void
object_iterate(ObjectClosure * cl)2489 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
2490 if (freelistLock()->owned_by_self()) {
2491 Generation::object_iterate(cl);
2492 } else {
2493 MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
2494 Generation::object_iterate(cl);
2495 }
2496 }
2497
2498 void
safe_object_iterate(ObjectClosure * cl)2499 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
2500 if (freelistLock()->owned_by_self()) {
2501 Generation::safe_object_iterate(cl);
2502 } else {
2503 MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
2504 Generation::safe_object_iterate(cl);
2505 }
2506 }
2507
2508 void
post_compact()2509 ConcurrentMarkSweepGeneration::post_compact() {
2510 }
2511
2512 void
prepare_for_verify()2513 ConcurrentMarkSweepGeneration::prepare_for_verify() {
2514 // Fix the linear allocation blocks to look like free blocks.
2515
2516 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2517 // are not called when the heap is verified during universe initialization and
2518 // at vm shutdown.
2519 if (freelistLock()->owned_by_self()) {
2520 cmsSpace()->prepare_for_verify();
2521 } else {
2522 MutexLocker fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2523 cmsSpace()->prepare_for_verify();
2524 }
2525 }
2526
2527 void
verify()2528 ConcurrentMarkSweepGeneration::verify() {
2529 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2530 // are not called when the heap is verified during universe initialization and
2531 // at vm shutdown.
2532 if (freelistLock()->owned_by_self()) {
2533 cmsSpace()->verify();
2534 } else {
2535 MutexLocker fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2536 cmsSpace()->verify();
2537 }
2538 }
2539
verify()2540 void CMSCollector::verify() {
2541 _cmsGen->verify();
2542 }
2543
2544 #ifndef PRODUCT
overflow_list_is_empty() const2545 bool CMSCollector::overflow_list_is_empty() const {
2546 assert(_num_par_pushes >= 0, "Inconsistency");
2547 if (_overflow_list == NULL) {
2548 assert(_num_par_pushes == 0, "Inconsistency");
2549 }
2550 return _overflow_list == NULL;
2551 }
2552
2553 // The methods verify_work_stacks_empty() and verify_overflow_empty()
2554 // merely consolidate assertion checks that appear to occur together frequently.
verify_work_stacks_empty() const2555 void CMSCollector::verify_work_stacks_empty() const {
2556 assert(_markStack.isEmpty(), "Marking stack should be empty");
2557 assert(overflow_list_is_empty(), "Overflow list should be empty");
2558 }
2559
verify_overflow_empty() const2560 void CMSCollector::verify_overflow_empty() const {
2561 assert(overflow_list_is_empty(), "Overflow list should be empty");
2562 assert(no_preserved_marks(), "No preserved marks");
2563 }
2564 #endif // PRODUCT
2565
2566 // Decide if we want to enable class unloading as part of the
2567 // ensuing concurrent GC cycle. We will collect and
2568 // unload classes if it's the case that:
2569 // (a) class unloading is enabled at the command line, and
2570 // (b) old gen is getting really full
2571 // NOTE: Provided there is no change in the state of the heap between
2572 // calls to this method, it should have idempotent results. Moreover,
2573 // its results should be monotonically increasing (i.e. going from 0 to 1,
2574 // but not 1 to 0) between successive calls between which the heap was
2575 // not collected. For the implementation below, it must thus rely on
2576 // the property that concurrent_cycles_since_last_unload()
2577 // will not decrease unless a collection cycle happened and that
2578 // _cmsGen->is_too_full() are
2579 // themselves also monotonic in that sense. See check_monotonicity()
2580 // below.
update_should_unload_classes()2581 void CMSCollector::update_should_unload_classes() {
2582 _should_unload_classes = false;
2583 if (CMSClassUnloadingEnabled) {
2584 _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2585 CMSClassUnloadingMaxInterval)
2586 || _cmsGen->is_too_full();
2587 }
2588 }
2589
is_too_full() const2590 bool ConcurrentMarkSweepGeneration::is_too_full() const {
2591 bool res = should_concurrent_collect();
2592 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2593 return res;
2594 }
2595
setup_cms_unloading_and_verification_state()2596 void CMSCollector::setup_cms_unloading_and_verification_state() {
2597 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2598 || VerifyBeforeExit;
2599 const int rso = GenCollectedHeap::SO_AllCodeCache;
2600
2601 // We set the proper root for this CMS cycle here.
2602 if (should_unload_classes()) { // Should unload classes this cycle
2603 remove_root_scanning_option(rso); // Shrink the root set appropriately
2604 set_verifying(should_verify); // Set verification state for this cycle
2605 return; // Nothing else needs to be done at this time
2606 }
2607
2608 // Not unloading classes this cycle
2609 assert(!should_unload_classes(), "Inconsistency!");
2610
2611 // If we are not unloading classes then add SO_AllCodeCache to root
2612 // scanning options.
2613 add_root_scanning_option(rso);
2614
2615 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
2616 set_verifying(true);
2617 } else if (verifying() && !should_verify) {
2618 // We were verifying, but some verification flags got disabled.
2619 set_verifying(false);
2620 // Exclude symbols, strings and code cache elements from root scanning to
2621 // reduce IM and RM pauses.
2622 remove_root_scanning_option(rso);
2623 }
2624 }
2625
2626
2627 #ifndef PRODUCT
block_start(const void * p) const2628 HeapWord* CMSCollector::block_start(const void* p) const {
2629 const HeapWord* addr = (HeapWord*)p;
2630 if (_span.contains(p)) {
2631 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
2632 return _cmsGen->cmsSpace()->block_start(p);
2633 }
2634 }
2635 return NULL;
2636 }
2637 #endif
2638
2639 HeapWord*
expand_and_allocate(size_t word_size,bool tlab,bool parallel)2640 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
2641 bool tlab,
2642 bool parallel) {
2643 CMSSynchronousYieldRequest yr;
2644 assert(!tlab, "Can't deal with TLAB allocation");
2645 MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
2646 expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
2647 if (GCExpandToAllocateDelayMillis > 0) {
2648 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2649 }
2650 return have_lock_and_allocate(word_size, tlab);
2651 }
2652
expand_for_gc_cause(size_t bytes,size_t expand_bytes,CMSExpansionCause::Cause cause)2653 void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
2654 size_t bytes,
2655 size_t expand_bytes,
2656 CMSExpansionCause::Cause cause)
2657 {
2658
2659 bool success = expand(bytes, expand_bytes);
2660
2661 // remember why we expanded; this information is used
2662 // by shouldConcurrentCollect() when making decisions on whether to start
2663 // a new CMS cycle.
2664 if (success) {
2665 set_expansion_cause(cause);
2666 log_trace(gc)("Expanded CMS gen for %s", CMSExpansionCause::to_string(cause));
2667 }
2668 }
2669
expand_and_par_lab_allocate(CMSParGCThreadState * ps,size_t word_sz)2670 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2671 HeapWord* res = NULL;
2672 MutexLocker x(ParGCRareEvent_lock);
2673 while (true) {
2674 // Expansion by some other thread might make alloc OK now:
2675 res = ps->lab.alloc(word_sz);
2676 if (res != NULL) return res;
2677 // If there's not enough expansion space available, give up.
2678 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2679 return NULL;
2680 }
2681 // Otherwise, we try expansion.
2682 expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
2683 // Now go around the loop and try alloc again;
2684 // A competing par_promote might beat us to the expansion space,
2685 // so we may go around the loop again if promotion fails again.
2686 if (GCExpandToAllocateDelayMillis > 0) {
2687 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2688 }
2689 }
2690 }
2691
2692
expand_and_ensure_spooling_space(PromotionInfo * promo)2693 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
2694 PromotionInfo* promo) {
2695 MutexLocker x(ParGCRareEvent_lock);
2696 size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
2697 while (true) {
2698 // Expansion by some other thread might make alloc OK now:
2699 if (promo->ensure_spooling_space()) {
2700 assert(promo->has_spooling_space(),
2701 "Post-condition of successful ensure_spooling_space()");
2702 return true;
2703 }
2704 // If there's not enough expansion space available, give up.
2705 if (_virtual_space.uncommitted_size() < refill_size_bytes) {
2706 return false;
2707 }
2708 // Otherwise, we try expansion.
2709 expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
2710 // Now go around the loop and try alloc again;
2711 // A competing allocation might beat us to the expansion space,
2712 // so we may go around the loop again if allocation fails again.
2713 if (GCExpandToAllocateDelayMillis > 0) {
2714 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2715 }
2716 }
2717 }
2718
shrink(size_t bytes)2719 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
2720 // Only shrink if a compaction was done so that all the free space
2721 // in the generation is in a contiguous block at the end.
2722 if (did_compact()) {
2723 CardGeneration::shrink(bytes);
2724 }
2725 }
2726
assert_correct_size_change_locking()2727 void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2728 assert_locked_or_safepoint(Heap_lock);
2729 }
2730
shrink_free_list_by(size_t bytes)2731 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2732 assert_locked_or_safepoint(Heap_lock);
2733 assert_lock_strong(freelistLock());
2734 log_trace(gc)("Shrinking of CMS not yet implemented");
2735 return;
2736 }
2737
2738
2739 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2740 // phases.
2741 class CMSPhaseAccounting: public StackObj {
2742 public:
2743 CMSPhaseAccounting(CMSCollector *collector,
2744 const char *title);
2745 ~CMSPhaseAccounting();
2746
2747 private:
2748 CMSCollector *_collector;
2749 const char *_title;
2750 GCTraceConcTime(Info, gc) _trace_time;
2751
2752 public:
2753 // Not MT-safe; so do not pass around these StackObj's
2754 // where they may be accessed by other threads.
wallclock_millis()2755 double wallclock_millis() {
2756 return TimeHelper::counter_to_millis(os::elapsed_counter() - _trace_time.start_time());
2757 }
2758 };
2759
CMSPhaseAccounting(CMSCollector * collector,const char * title)2760 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2761 const char *title) :
2762 _collector(collector), _title(title), _trace_time(title) {
2763
2764 _collector->resetYields();
2765 _collector->resetTimer();
2766 _collector->startTimer();
2767 _collector->gc_timer_cm()->register_gc_concurrent_start(title);
2768 }
2769
~CMSPhaseAccounting()2770 CMSPhaseAccounting::~CMSPhaseAccounting() {
2771 _collector->gc_timer_cm()->register_gc_concurrent_end();
2772 _collector->stopTimer();
2773 log_debug(gc)("Concurrent active time: %.3fms", TimeHelper::counter_to_millis(_collector->timerTicks()));
2774 log_trace(gc)(" (CMS %s yielded %d times)", _title, _collector->yields());
2775 }
2776
2777 // CMS work
2778
2779 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2780 class CMSParMarkTask : public AbstractGangTask {
2781 protected:
2782 CMSCollector* _collector;
2783 uint _n_workers;
CMSParMarkTask(const char * name,CMSCollector * collector,uint n_workers)2784 CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2785 AbstractGangTask(name),
2786 _collector(collector),
2787 _n_workers(n_workers) {}
2788 // Work method in support of parallel rescan ... of young gen spaces
2789 void do_young_space_rescan(OopsInGenClosure* cl,
2790 ContiguousSpace* space,
2791 HeapWord** chunk_array, size_t chunk_top);
2792 void work_on_young_gen_roots(OopsInGenClosure* cl);
2793 };
2794
2795 // Parallel initial mark task
2796 class CMSParInitialMarkTask: public CMSParMarkTask {
2797 StrongRootsScope* _strong_roots_scope;
2798 public:
CMSParInitialMarkTask(CMSCollector * collector,StrongRootsScope * strong_roots_scope,uint n_workers)2799 CMSParInitialMarkTask(CMSCollector* collector, StrongRootsScope* strong_roots_scope, uint n_workers) :
2800 CMSParMarkTask("Scan roots and young gen for initial mark in parallel", collector, n_workers),
2801 _strong_roots_scope(strong_roots_scope) {}
2802 void work(uint worker_id);
2803 };
2804
2805 // Checkpoint the roots into this generation from outside
2806 // this generation. [Note this initial checkpoint need only
2807 // be approximate -- we'll do a catch up phase subsequently.]
checkpointRootsInitial()2808 void CMSCollector::checkpointRootsInitial() {
2809 assert(_collectorState == InitialMarking, "Wrong collector state");
2810 check_correct_thread_executing();
2811 TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
2812
2813 save_heap_summary();
2814 report_heap_summary(GCWhen::BeforeGC);
2815
2816 ReferenceProcessor* rp = ref_processor();
2817 assert(_restart_addr == NULL, "Control point invariant");
2818 {
2819 // acquire locks for subsequent manipulations
2820 MutexLocker x(bitMapLock(),
2821 Mutex::_no_safepoint_check_flag);
2822 checkpointRootsInitialWork();
2823 // enable ("weak") refs discovery
2824 rp->enable_discovery();
2825 _collectorState = Marking;
2826 }
2827
2828 _cmsGen->cmsSpace()->recalculate_used_stable();
2829 }
2830
checkpointRootsInitialWork()2831 void CMSCollector::checkpointRootsInitialWork() {
2832 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2833 assert(_collectorState == InitialMarking, "just checking");
2834
2835 // Already have locks.
2836 assert_lock_strong(bitMapLock());
2837 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2838
2839 // Setup the verification and class unloading state for this
2840 // CMS collection cycle.
2841 setup_cms_unloading_and_verification_state();
2842
2843 GCTraceTime(Trace, gc, phases) ts("checkpointRootsInitialWork", _gc_timer_cm);
2844
2845 // Reset all the PLAB chunk arrays if necessary.
2846 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2847 reset_survivor_plab_arrays();
2848 }
2849
2850 ResourceMark rm;
2851 HandleMark hm;
2852
2853 MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2854 CMSHeap* heap = CMSHeap::heap();
2855
2856 verify_work_stacks_empty();
2857 verify_overflow_empty();
2858
2859 heap->ensure_parsability(false); // fill TLABs, but no need to retire them
2860 // Update the saved marks which may affect the root scans.
2861 heap->save_marks();
2862
2863 // weak reference processing has not started yet.
2864 ref_processor()->set_enqueuing_is_done(false);
2865
2866 // Need to remember all newly created CLDs,
2867 // so that we can guarantee that the remark finds them.
2868 ClassLoaderDataGraph::remember_new_clds(true);
2869
2870 // Whenever a CLD is found, it will be claimed before proceeding to mark
2871 // the klasses. The claimed marks need to be cleared before marking starts.
2872 ClassLoaderDataGraph::clear_claimed_marks();
2873
2874 print_eden_and_survivor_chunk_arrays();
2875
2876 {
2877 #if COMPILER2_OR_JVMCI
2878 DerivedPointerTableDeactivate dpt_deact;
2879 #endif
2880 if (CMSParallelInitialMarkEnabled) {
2881 // The parallel version.
2882 WorkGang* workers = heap->workers();
2883 assert(workers != NULL, "Need parallel worker threads.");
2884 uint n_workers = workers->active_workers();
2885
2886 StrongRootsScope srs(n_workers);
2887
2888 CMSParInitialMarkTask tsk(this, &srs, n_workers);
2889 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
2890 // If the total workers is greater than 1, then multiple workers
2891 // may be used at some time and the initialization has been set
2892 // such that the single threaded path cannot be used.
2893 if (workers->total_workers() > 1) {
2894 workers->run_task(&tsk);
2895 } else {
2896 tsk.work(0);
2897 }
2898 } else {
2899 // The serial version.
2900 CLDToOopClosure cld_closure(¬Older, ClassLoaderData::_claim_strong);
2901 heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2902
2903 StrongRootsScope srs(1);
2904
2905 heap->cms_process_roots(&srs,
2906 true, // young gen as roots
2907 GenCollectedHeap::ScanningOption(roots_scanning_options()),
2908 should_unload_classes(),
2909 ¬Older,
2910 &cld_closure);
2911 }
2912 }
2913
2914 // Clear mod-union table; it will be dirtied in the prologue of
2915 // CMS generation per each young generation collection.
2916
2917 assert(_modUnionTable.isAllClear(),
2918 "Was cleared in most recent final checkpoint phase"
2919 " or no bits are set in the gc_prologue before the start of the next "
2920 "subsequent marking phase.");
2921
2922 assert(_ct->cld_rem_set()->mod_union_is_clear(), "Must be");
2923
2924 // Save the end of the used_region of the constituent generations
2925 // to be used to limit the extent of sweep in each generation.
2926 save_sweep_limits();
2927 verify_overflow_empty();
2928 }
2929
markFromRoots()2930 bool CMSCollector::markFromRoots() {
2931 // we might be tempted to assert that:
2932 // assert(!SafepointSynchronize::is_at_safepoint(),
2933 // "inconsistent argument?");
2934 // However that wouldn't be right, because it's possible that
2935 // a safepoint is indeed in progress as a young generation
2936 // stop-the-world GC happens even as we mark in this generation.
2937 assert(_collectorState == Marking, "inconsistent state?");
2938 check_correct_thread_executing();
2939 verify_overflow_empty();
2940
2941 // Weak ref discovery note: We may be discovering weak
2942 // refs in this generation concurrent (but interleaved) with
2943 // weak ref discovery by the young generation collector.
2944
2945 CMSTokenSyncWithLocks ts(true, bitMapLock());
2946 GCTraceCPUTime tcpu;
2947 CMSPhaseAccounting pa(this, "Concurrent Mark");
2948 bool res = markFromRootsWork();
2949 if (res) {
2950 _collectorState = Precleaning;
2951 } else { // We failed and a foreground collection wants to take over
2952 assert(_foregroundGCIsActive, "internal state inconsistency");
2953 assert(_restart_addr == NULL, "foreground will restart from scratch");
2954 log_debug(gc)("bailing out to foreground collection");
2955 }
2956 verify_overflow_empty();
2957 return res;
2958 }
2959
markFromRootsWork()2960 bool CMSCollector::markFromRootsWork() {
2961 // iterate over marked bits in bit map, doing a full scan and mark
2962 // from these roots using the following algorithm:
2963 // . if oop is to the right of the current scan pointer,
2964 // mark corresponding bit (we'll process it later)
2965 // . else (oop is to left of current scan pointer)
2966 // push oop on marking stack
2967 // . drain the marking stack
2968
2969 // Note that when we do a marking step we need to hold the
2970 // bit map lock -- recall that direct allocation (by mutators)
2971 // and promotion (by the young generation collector) is also
2972 // marking the bit map. [the so-called allocate live policy.]
2973 // Because the implementation of bit map marking is not
2974 // robust wrt simultaneous marking of bits in the same word,
2975 // we need to make sure that there is no such interference
2976 // between concurrent such updates.
2977
2978 // already have locks
2979 assert_lock_strong(bitMapLock());
2980
2981 verify_work_stacks_empty();
2982 verify_overflow_empty();
2983 bool result = false;
2984 if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
2985 result = do_marking_mt();
2986 } else {
2987 result = do_marking_st();
2988 }
2989 return result;
2990 }
2991
2992 // Forward decl
2993 class CMSConcMarkingTask;
2994
2995 class CMSConcMarkingParallelTerminator: public ParallelTaskTerminator {
2996 CMSCollector* _collector;
2997 CMSConcMarkingTask* _task;
2998 public:
2999 virtual void yield();
3000
3001 // "n_threads" is the number of threads to be terminated.
3002 // "queue_set" is a set of work queues of other threads.
3003 // "collector" is the CMS collector associated with this task terminator.
3004 // "yield" indicates whether we need the gang as a whole to yield.
CMSConcMarkingParallelTerminator(int n_threads,TaskQueueSetSuper * queue_set,CMSCollector * collector)3005 CMSConcMarkingParallelTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3006 ParallelTaskTerminator(n_threads, queue_set),
3007 _collector(collector) { }
3008
set_task(CMSConcMarkingTask * task)3009 void set_task(CMSConcMarkingTask* task) {
3010 _task = task;
3011 }
3012 };
3013
3014 class CMSConcMarkingOWSTTerminator: public OWSTTaskTerminator {
3015 CMSCollector* _collector;
3016 CMSConcMarkingTask* _task;
3017 public:
3018 virtual void yield();
3019
3020 // "n_threads" is the number of threads to be terminated.
3021 // "queue_set" is a set of work queues of other threads.
3022 // "collector" is the CMS collector associated with this task terminator.
3023 // "yield" indicates whether we need the gang as a whole to yield.
CMSConcMarkingOWSTTerminator(int n_threads,TaskQueueSetSuper * queue_set,CMSCollector * collector)3024 CMSConcMarkingOWSTTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3025 OWSTTaskTerminator(n_threads, queue_set),
3026 _collector(collector) { }
3027
set_task(CMSConcMarkingTask * task)3028 void set_task(CMSConcMarkingTask* task) {
3029 _task = task;
3030 }
3031 };
3032
3033 class CMSConcMarkingTaskTerminator {
3034 private:
3035 ParallelTaskTerminator* _term;
3036 public:
CMSConcMarkingTaskTerminator(int n_threads,TaskQueueSetSuper * queue_set,CMSCollector * collector)3037 CMSConcMarkingTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) {
3038 if (UseOWSTTaskTerminator) {
3039 _term = new CMSConcMarkingOWSTTerminator(n_threads, queue_set, collector);
3040 } else {
3041 _term = new CMSConcMarkingParallelTerminator(n_threads, queue_set, collector);
3042 }
3043 }
~CMSConcMarkingTaskTerminator()3044 ~CMSConcMarkingTaskTerminator() {
3045 assert(_term != NULL, "Must not be NULL");
3046 delete _term;
3047 }
3048
3049 void set_task(CMSConcMarkingTask* task);
terminator() const3050 ParallelTaskTerminator* terminator() const { return _term; }
3051 };
3052
3053 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3054 CMSConcMarkingTask* _task;
3055 public:
3056 bool should_exit_termination();
set_task(CMSConcMarkingTask * task)3057 void set_task(CMSConcMarkingTask* task) {
3058 _task = task;
3059 }
3060 };
3061
3062 // MT Concurrent Marking Task
3063 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3064 CMSCollector* _collector;
3065 uint _n_workers; // requested/desired # workers
3066 bool _result;
3067 CompactibleFreeListSpace* _cms_space;
3068 char _pad_front[64]; // padding to ...
3069 HeapWord* volatile _global_finger; // ... avoid sharing cache line
3070 char _pad_back[64];
3071 HeapWord* _restart_addr;
3072
3073 // Exposed here for yielding support
3074 Mutex* const _bit_map_lock;
3075
3076 // The per thread work queues, available here for stealing
3077 OopTaskQueueSet* _task_queues;
3078
3079 // Termination (and yielding) support
3080 CMSConcMarkingTaskTerminator _term;
3081 CMSConcMarkingTerminatorTerminator _term_term;
3082
3083 public:
CMSConcMarkingTask(CMSCollector * collector,CompactibleFreeListSpace * cms_space,YieldingFlexibleWorkGang * workers,OopTaskQueueSet * task_queues)3084 CMSConcMarkingTask(CMSCollector* collector,
3085 CompactibleFreeListSpace* cms_space,
3086 YieldingFlexibleWorkGang* workers,
3087 OopTaskQueueSet* task_queues):
3088 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3089 _collector(collector),
3090 _n_workers(0),
3091 _result(true),
3092 _cms_space(cms_space),
3093 _bit_map_lock(collector->bitMapLock()),
3094 _task_queues(task_queues),
3095 _term(_n_workers, task_queues, _collector)
3096 {
3097 _requested_size = _n_workers;
3098 _term.set_task(this);
3099 _term_term.set_task(this);
3100 _restart_addr = _global_finger = _cms_space->bottom();
3101 }
3102
3103
task_queues()3104 OopTaskQueueSet* task_queues() { return _task_queues; }
3105
work_queue(int i)3106 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3107
global_finger_addr()3108 HeapWord* volatile* global_finger_addr() { return &_global_finger; }
3109
terminator()3110 ParallelTaskTerminator* terminator() { return _term.terminator(); }
3111
set_for_termination(uint active_workers)3112 virtual void set_for_termination(uint active_workers) {
3113 terminator()->reset_for_reuse(active_workers);
3114 }
3115
3116 void work(uint worker_id);
should_yield()3117 bool should_yield() {
3118 return ConcurrentMarkSweepThread::should_yield()
3119 && !_collector->foregroundGCIsActive();
3120 }
3121
3122 virtual void coordinator_yield(); // stuff done by coordinator
result()3123 bool result() { return _result; }
3124
reset(HeapWord * ra)3125 void reset(HeapWord* ra) {
3126 assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)");
3127 _restart_addr = _global_finger = ra;
3128 _term.terminator()->reset_for_reuse();
3129 }
3130
3131 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3132 OopTaskQueue* work_q);
3133
3134 private:
3135 void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3136 void do_work_steal(int i);
3137 void bump_global_finger(HeapWord* f);
3138 };
3139
should_exit_termination()3140 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3141 assert(_task != NULL, "Error");
3142 return _task->yielding();
3143 // Note that we do not need the disjunct || _task->should_yield() above
3144 // because we want terminating threads to yield only if the task
3145 // is already in the midst of yielding, which happens only after at least one
3146 // thread has yielded.
3147 }
3148
yield()3149 void CMSConcMarkingParallelTerminator::yield() {
3150 if (_task->should_yield()) {
3151 _task->yield();
3152 } else {
3153 ParallelTaskTerminator::yield();
3154 }
3155 }
3156
yield()3157 void CMSConcMarkingOWSTTerminator::yield() {
3158 if (_task->should_yield()) {
3159 _task->yield();
3160 } else {
3161 OWSTTaskTerminator::yield();
3162 }
3163 }
3164
set_task(CMSConcMarkingTask * task)3165 void CMSConcMarkingTaskTerminator::set_task(CMSConcMarkingTask* task) {
3166 if (UseOWSTTaskTerminator) {
3167 ((CMSConcMarkingOWSTTerminator*)_term)->set_task(task);
3168 } else {
3169 ((CMSConcMarkingParallelTerminator*)_term)->set_task(task);
3170 }
3171 }
3172
3173 ////////////////////////////////////////////////////////////////
3174 // Concurrent Marking Algorithm Sketch
3175 ////////////////////////////////////////////////////////////////
3176 // Until all tasks exhausted (both spaces):
3177 // -- claim next available chunk
3178 // -- bump global finger via CAS
3179 // -- find first object that starts in this chunk
3180 // and start scanning bitmap from that position
3181 // -- scan marked objects for oops
3182 // -- CAS-mark target, and if successful:
3183 // . if target oop is above global finger (volatile read)
3184 // nothing to do
3185 // . if target oop is in chunk and above local finger
3186 // then nothing to do
3187 // . else push on work-queue
3188 // -- Deal with possible overflow issues:
3189 // . local work-queue overflow causes stuff to be pushed on
3190 // global (common) overflow queue
3191 // . always first empty local work queue
3192 // . then get a batch of oops from global work queue if any
3193 // . then do work stealing
3194 // -- When all tasks claimed (both spaces)
3195 // and local work queue empty,
3196 // then in a loop do:
3197 // . check global overflow stack; steal a batch of oops and trace
3198 // . try to steal from other threads oif GOS is empty
3199 // . if neither is available, offer termination
3200 // -- Terminate and return result
3201 //
work(uint worker_id)3202 void CMSConcMarkingTask::work(uint worker_id) {
3203 elapsedTimer _timer;
3204 ResourceMark rm;
3205 HandleMark hm;
3206
3207 DEBUG_ONLY(_collector->verify_overflow_empty();)
3208
3209 // Before we begin work, our work queue should be empty
3210 assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
3211 // Scan the bitmap covering _cms_space, tracing through grey objects.
3212 _timer.start();
3213 do_scan_and_mark(worker_id, _cms_space);
3214 _timer.stop();
3215 log_trace(gc, task)("Finished cms space scanning in %dth thread: %3.3f sec", worker_id, _timer.seconds());
3216
3217 // ... do work stealing
3218 _timer.reset();
3219 _timer.start();
3220 do_work_steal(worker_id);
3221 _timer.stop();
3222 log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
3223 assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3224 assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
3225 // Note that under the current task protocol, the
3226 // following assertion is true even of the spaces
3227 // expanded since the completion of the concurrent
3228 // marking. XXX This will likely change under a strict
3229 // ABORT semantics.
3230 // After perm removal the comparison was changed to
3231 // greater than or equal to from strictly greater than.
3232 // Before perm removal the highest address sweep would
3233 // have been at the end of perm gen but now is at the
3234 // end of the tenured gen.
3235 assert(_global_finger >= _cms_space->end(),
3236 "All tasks have been completed");
3237 DEBUG_ONLY(_collector->verify_overflow_empty();)
3238 }
3239
bump_global_finger(HeapWord * f)3240 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3241 HeapWord* read = _global_finger;
3242 HeapWord* cur = read;
3243 while (f > read) {
3244 cur = read;
3245 read = Atomic::cmpxchg(f, &_global_finger, cur);
3246 if (cur == read) {
3247 // our cas succeeded
3248 assert(_global_finger >= f, "protocol consistency");
3249 break;
3250 }
3251 }
3252 }
3253
3254 // This is really inefficient, and should be redone by
3255 // using (not yet available) block-read and -write interfaces to the
3256 // stack and the work_queue. XXX FIX ME !!!
get_work_from_overflow_stack(CMSMarkStack * ovflw_stk,OopTaskQueue * work_q)3257 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3258 OopTaskQueue* work_q) {
3259 // Fast lock-free check
3260 if (ovflw_stk->length() == 0) {
3261 return false;
3262 }
3263 assert(work_q->size() == 0, "Shouldn't steal");
3264 MutexLocker ml(ovflw_stk->par_lock(),
3265 Mutex::_no_safepoint_check_flag);
3266 // Grab up to 1/4 the size of the work queue
3267 size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3268 (size_t)ParGCDesiredObjsFromOverflowList);
3269 num = MIN2(num, ovflw_stk->length());
3270 for (int i = (int) num; i > 0; i--) {
3271 oop cur = ovflw_stk->pop();
3272 assert(cur != NULL, "Counted wrong?");
3273 work_q->push(cur);
3274 }
3275 return num > 0;
3276 }
3277
do_scan_and_mark(int i,CompactibleFreeListSpace * sp)3278 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3279 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3280 int n_tasks = pst->n_tasks();
3281 // We allow that there may be no tasks to do here because
3282 // we are restarting after a stack overflow.
3283 assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3284 uint nth_task = 0;
3285
3286 HeapWord* aligned_start = sp->bottom();
3287 if (sp->used_region().contains(_restart_addr)) {
3288 // Align down to a card boundary for the start of 0th task
3289 // for this space.
3290 aligned_start = align_down(_restart_addr, CardTable::card_size);
3291 }
3292
3293 size_t chunk_size = sp->marking_task_size();
3294 while (pst->try_claim_task(/* reference */ nth_task)) {
3295 // Having claimed the nth task in this space,
3296 // compute the chunk that it corresponds to:
3297 MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3298 aligned_start + (nth_task+1)*chunk_size);
3299 // Try and bump the global finger via a CAS;
3300 // note that we need to do the global finger bump
3301 // _before_ taking the intersection below, because
3302 // the task corresponding to that region will be
3303 // deemed done even if the used_region() expands
3304 // because of allocation -- as it almost certainly will
3305 // during start-up while the threads yield in the
3306 // closure below.
3307 HeapWord* finger = span.end();
3308 bump_global_finger(finger); // atomically
3309 // There are null tasks here corresponding to chunks
3310 // beyond the "top" address of the space.
3311 span = span.intersection(sp->used_region());
3312 if (!span.is_empty()) { // Non-null task
3313 HeapWord* prev_obj;
3314 assert(!span.contains(_restart_addr) || nth_task == 0,
3315 "Inconsistency");
3316 if (nth_task == 0) {
3317 // For the 0th task, we'll not need to compute a block_start.
3318 if (span.contains(_restart_addr)) {
3319 // In the case of a restart because of stack overflow,
3320 // we might additionally skip a chunk prefix.
3321 prev_obj = _restart_addr;
3322 } else {
3323 prev_obj = span.start();
3324 }
3325 } else {
3326 // We want to skip the first object because
3327 // the protocol is to scan any object in its entirety
3328 // that _starts_ in this span; a fortiori, any
3329 // object starting in an earlier span is scanned
3330 // as part of an earlier claimed task.
3331 // Below we use the "careful" version of block_start
3332 // so we do not try to navigate uninitialized objects.
3333 prev_obj = sp->block_start_careful(span.start());
3334 // Below we use a variant of block_size that uses the
3335 // Printezis bits to avoid waiting for allocated
3336 // objects to become initialized/parsable.
3337 while (prev_obj < span.start()) {
3338 size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3339 if (sz > 0) {
3340 prev_obj += sz;
3341 } else {
3342 // In this case we may end up doing a bit of redundant
3343 // scanning, but that appears unavoidable, short of
3344 // locking the free list locks; see bug 6324141.
3345 break;
3346 }
3347 }
3348 }
3349 if (prev_obj < span.end()) {
3350 MemRegion my_span = MemRegion(prev_obj, span.end());
3351 // Do the marking work within a non-empty span --
3352 // the last argument to the constructor indicates whether the
3353 // iteration should be incremental with periodic yields.
3354 ParMarkFromRootsClosure cl(this, _collector, my_span,
3355 &_collector->_markBitMap,
3356 work_queue(i),
3357 &_collector->_markStack);
3358 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3359 } // else nothing to do for this task
3360 } // else nothing to do for this task
3361 }
3362 // We'd be tempted to assert here that since there are no
3363 // more tasks left to claim in this space, the global_finger
3364 // must exceed space->top() and a fortiori space->end(). However,
3365 // that would not quite be correct because the bumping of
3366 // global_finger occurs strictly after the claiming of a task,
3367 // so by the time we reach here the global finger may not yet
3368 // have been bumped up by the thread that claimed the last
3369 // task.
3370 pst->all_tasks_completed();
3371 }
3372
3373 class ParConcMarkingClosure: public MetadataVisitingOopIterateClosure {
3374 private:
3375 CMSCollector* _collector;
3376 CMSConcMarkingTask* _task;
3377 MemRegion _span;
3378 CMSBitMap* _bit_map;
3379 CMSMarkStack* _overflow_stack;
3380 OopTaskQueue* _work_queue;
3381 protected:
3382 DO_OOP_WORK_DEFN
3383 public:
ParConcMarkingClosure(CMSCollector * collector,CMSConcMarkingTask * task,OopTaskQueue * work_queue,CMSBitMap * bit_map,CMSMarkStack * overflow_stack)3384 ParConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
3385 CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3386 MetadataVisitingOopIterateClosure(collector->ref_processor()),
3387 _collector(collector),
3388 _task(task),
3389 _span(collector->_span),
3390 _bit_map(bit_map),
3391 _overflow_stack(overflow_stack),
3392 _work_queue(work_queue)
3393 { }
3394 virtual void do_oop(oop* p);
3395 virtual void do_oop(narrowOop* p);
3396
3397 void trim_queue(size_t max);
3398 void handle_stack_overflow(HeapWord* lost);
do_yield_check()3399 void do_yield_check() {
3400 if (_task->should_yield()) {
3401 _task->yield();
3402 }
3403 }
3404 };
3405
DO_OOP_WORK_IMPL(ParConcMarkingClosure)3406 DO_OOP_WORK_IMPL(ParConcMarkingClosure)
3407
3408 // Grey object scanning during work stealing phase --
3409 // the salient assumption here is that any references
3410 // that are in these stolen objects being scanned must
3411 // already have been initialized (else they would not have
3412 // been published), so we do not need to check for
3413 // uninitialized objects before pushing here.
3414 void ParConcMarkingClosure::do_oop(oop obj) {
3415 assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
3416 HeapWord* addr = (HeapWord*)obj;
3417 // Check if oop points into the CMS generation
3418 // and is not marked
3419 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3420 // a white object ...
3421 // If we manage to "claim" the object, by being the
3422 // first thread to mark it, then we push it on our
3423 // marking stack
3424 if (_bit_map->par_mark(addr)) { // ... now grey
3425 // push on work queue (grey set)
3426 bool simulate_overflow = false;
3427 NOT_PRODUCT(
3428 if (CMSMarkStackOverflowALot &&
3429 _collector->simulate_overflow()) {
3430 // simulate a stack overflow
3431 simulate_overflow = true;
3432 }
3433 )
3434 if (simulate_overflow ||
3435 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3436 // stack overflow
3437 log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
3438 // We cannot assert that the overflow stack is full because
3439 // it may have been emptied since.
3440 assert(simulate_overflow ||
3441 _work_queue->size() == _work_queue->max_elems(),
3442 "Else push should have succeeded");
3443 handle_stack_overflow(addr);
3444 }
3445 } // Else, some other thread got there first
3446 do_yield_check();
3447 }
3448 }
3449
trim_queue(size_t max)3450 void ParConcMarkingClosure::trim_queue(size_t max) {
3451 while (_work_queue->size() > max) {
3452 oop new_oop;
3453 if (_work_queue->pop_local(new_oop)) {
3454 assert(oopDesc::is_oop(new_oop), "Should be an oop");
3455 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
3456 assert(_span.contains((HeapWord*)new_oop), "Not in span");
3457 new_oop->oop_iterate(this); // do_oop() above
3458 do_yield_check();
3459 }
3460 }
3461 }
3462
3463 // Upon stack overflow, we discard (part of) the stack,
3464 // remembering the least address amongst those discarded
3465 // in CMSCollector's _restart_address.
handle_stack_overflow(HeapWord * lost)3466 void ParConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
3467 // We need to do this under a mutex to prevent other
3468 // workers from interfering with the work done below.
3469 MutexLocker ml(_overflow_stack->par_lock(),
3470 Mutex::_no_safepoint_check_flag);
3471 // Remember the least grey address discarded
3472 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
3473 _collector->lower_restart_addr(ra);
3474 _overflow_stack->reset(); // discard stack contents
3475 _overflow_stack->expand(); // expand the stack if possible
3476 }
3477
3478
do_work_steal(int i)3479 void CMSConcMarkingTask::do_work_steal(int i) {
3480 OopTaskQueue* work_q = work_queue(i);
3481 oop obj_to_scan;
3482 CMSBitMap* bm = &(_collector->_markBitMap);
3483 CMSMarkStack* ovflw = &(_collector->_markStack);
3484 ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
3485 while (true) {
3486 cl.trim_queue(0);
3487 assert(work_q->size() == 0, "Should have been emptied above");
3488 if (get_work_from_overflow_stack(ovflw, work_q)) {
3489 // Can't assert below because the work obtained from the
3490 // overflow stack may already have been stolen from us.
3491 // assert(work_q->size() > 0, "Work from overflow stack");
3492 continue;
3493 } else if (task_queues()->steal(i, /* reference */ obj_to_scan)) {
3494 assert(oopDesc::is_oop(obj_to_scan), "Should be an oop");
3495 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
3496 obj_to_scan->oop_iterate(&cl);
3497 } else if (terminator()->offer_termination(&_term_term)) {
3498 assert(work_q->size() == 0, "Impossible!");
3499 break;
3500 } else if (yielding() || should_yield()) {
3501 yield();
3502 }
3503 }
3504 }
3505
3506 // This is run by the CMS (coordinator) thread.
coordinator_yield()3507 void CMSConcMarkingTask::coordinator_yield() {
3508 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3509 "CMS thread should hold CMS token");
3510 // First give up the locks, then yield, then re-lock
3511 // We should probably use a constructor/destructor idiom to
3512 // do this unlock/lock or modify the MutexUnlocker class to
3513 // serve our purpose. XXX
3514 assert_lock_strong(_bit_map_lock);
3515 _bit_map_lock->unlock();
3516 ConcurrentMarkSweepThread::desynchronize(true);
3517 _collector->stopTimer();
3518 _collector->incrementYields();
3519
3520 // It is possible for whichever thread initiated the yield request
3521 // not to get a chance to wake up and take the bitmap lock between
3522 // this thread releasing it and reacquiring it. So, while the
3523 // should_yield() flag is on, let's sleep for a bit to give the
3524 // other thread a chance to wake up. The limit imposed on the number
3525 // of iterations is defensive, to avoid any unforseen circumstances
3526 // putting us into an infinite loop. Since it's always been this
3527 // (coordinator_yield()) method that was observed to cause the
3528 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
3529 // which is by default non-zero. For the other seven methods that
3530 // also perform the yield operation, as are using a different
3531 // parameter (CMSYieldSleepCount) which is by default zero. This way we
3532 // can enable the sleeping for those methods too, if necessary.
3533 // See 6442774.
3534 //
3535 // We really need to reconsider the synchronization between the GC
3536 // thread and the yield-requesting threads in the future and we
3537 // should really use wait/notify, which is the recommended
3538 // way of doing this type of interaction. Additionally, we should
3539 // consolidate the eight methods that do the yield operation and they
3540 // are almost identical into one for better maintainability and
3541 // readability. See 6445193.
3542 //
3543 // Tony 2006.06.29
3544 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3545 ConcurrentMarkSweepThread::should_yield() &&
3546 !CMSCollector::foregroundGCIsActive(); ++i) {
3547 os::sleep(Thread::current(), 1, false);
3548 }
3549
3550 ConcurrentMarkSweepThread::synchronize(true);
3551 _bit_map_lock->lock_without_safepoint_check();
3552 _collector->startTimer();
3553 }
3554
do_marking_mt()3555 bool CMSCollector::do_marking_mt() {
3556 assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3557 uint num_workers = WorkerPolicy::calc_active_conc_workers(conc_workers()->total_workers(),
3558 conc_workers()->active_workers(),
3559 Threads::number_of_non_daemon_threads());
3560 num_workers = conc_workers()->update_active_workers(num_workers);
3561 log_info(gc,task)("Using %u workers of %u for marking", num_workers, conc_workers()->total_workers());
3562
3563 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
3564
3565 CMSConcMarkingTask tsk(this,
3566 cms_space,
3567 conc_workers(),
3568 task_queues());
3569
3570 // Since the actual number of workers we get may be different
3571 // from the number we requested above, do we need to do anything different
3572 // below? In particular, may be we need to subclass the SequantialSubTasksDone
3573 // class?? XXX
3574 cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
3575
3576 // Refs discovery is already non-atomic.
3577 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
3578 assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
3579 conc_workers()->start_task(&tsk);
3580 while (tsk.yielded()) {
3581 tsk.coordinator_yield();
3582 conc_workers()->continue_task(&tsk);
3583 }
3584 // If the task was aborted, _restart_addr will be non-NULL
3585 assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
3586 while (_restart_addr != NULL) {
3587 // XXX For now we do not make use of ABORTED state and have not
3588 // yet implemented the right abort semantics (even in the original
3589 // single-threaded CMS case). That needs some more investigation
3590 // and is deferred for now; see CR# TBF. 07252005YSR. XXX
3591 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
3592 // If _restart_addr is non-NULL, a marking stack overflow
3593 // occurred; we need to do a fresh marking iteration from the
3594 // indicated restart address.
3595 if (_foregroundGCIsActive) {
3596 // We may be running into repeated stack overflows, having
3597 // reached the limit of the stack size, while making very
3598 // slow forward progress. It may be best to bail out and
3599 // let the foreground collector do its job.
3600 // Clear _restart_addr, so that foreground GC
3601 // works from scratch. This avoids the headache of
3602 // a "rescan" which would otherwise be needed because
3603 // of the dirty mod union table & card table.
3604 _restart_addr = NULL;
3605 return false;
3606 }
3607 // Adjust the task to restart from _restart_addr
3608 tsk.reset(_restart_addr);
3609 cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
3610 _restart_addr);
3611 _restart_addr = NULL;
3612 // Get the workers going again
3613 conc_workers()->start_task(&tsk);
3614 while (tsk.yielded()) {
3615 tsk.coordinator_yield();
3616 conc_workers()->continue_task(&tsk);
3617 }
3618 }
3619 assert(tsk.completed(), "Inconsistency");
3620 assert(tsk.result() == true, "Inconsistency");
3621 return true;
3622 }
3623
do_marking_st()3624 bool CMSCollector::do_marking_st() {
3625 ResourceMark rm;
3626 HandleMark hm;
3627
3628 // Temporarily make refs discovery single threaded (non-MT)
3629 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
3630 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
3631 &_markStack, CMSYield);
3632 // the last argument to iterate indicates whether the iteration
3633 // should be incremental with periodic yields.
3634 _markBitMap.iterate(&markFromRootsClosure);
3635 // If _restart_addr is non-NULL, a marking stack overflow
3636 // occurred; we need to do a fresh iteration from the
3637 // indicated restart address.
3638 while (_restart_addr != NULL) {
3639 if (_foregroundGCIsActive) {
3640 // We may be running into repeated stack overflows, having
3641 // reached the limit of the stack size, while making very
3642 // slow forward progress. It may be best to bail out and
3643 // let the foreground collector do its job.
3644 // Clear _restart_addr, so that foreground GC
3645 // works from scratch. This avoids the headache of
3646 // a "rescan" which would otherwise be needed because
3647 // of the dirty mod union table & card table.
3648 _restart_addr = NULL;
3649 return false; // indicating failure to complete marking
3650 }
3651 // Deal with stack overflow:
3652 // we restart marking from _restart_addr
3653 HeapWord* ra = _restart_addr;
3654 markFromRootsClosure.reset(ra);
3655 _restart_addr = NULL;
3656 _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
3657 }
3658 return true;
3659 }
3660
preclean()3661 void CMSCollector::preclean() {
3662 check_correct_thread_executing();
3663 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3664 verify_work_stacks_empty();
3665 verify_overflow_empty();
3666 _abort_preclean = false;
3667 if (CMSPrecleaningEnabled) {
3668 if (!CMSEdenChunksRecordAlways) {
3669 _eden_chunk_index = 0;
3670 }
3671 size_t used = get_eden_used();
3672 size_t capacity = get_eden_capacity();
3673 // Don't start sampling unless we will get sufficiently
3674 // many samples.
3675 if (used < (((capacity / CMSScheduleRemarkSamplingRatio) / 100)
3676 * CMSScheduleRemarkEdenPenetration)) {
3677 _start_sampling = true;
3678 } else {
3679 _start_sampling = false;
3680 }
3681 GCTraceCPUTime tcpu;
3682 CMSPhaseAccounting pa(this, "Concurrent Preclean");
3683 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3684 }
3685 CMSTokenSync x(true); // is cms thread
3686 if (CMSPrecleaningEnabled) {
3687 sample_eden();
3688 _collectorState = AbortablePreclean;
3689 } else {
3690 _collectorState = FinalMarking;
3691 }
3692 verify_work_stacks_empty();
3693 verify_overflow_empty();
3694 }
3695
3696 // Try and schedule the remark such that young gen
3697 // occupancy is CMSScheduleRemarkEdenPenetration %.
abortable_preclean()3698 void CMSCollector::abortable_preclean() {
3699 check_correct_thread_executing();
3700 assert(CMSPrecleaningEnabled, "Inconsistent control state");
3701 assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3702
3703 // If Eden's current occupancy is below this threshold,
3704 // immediately schedule the remark; else preclean
3705 // past the next scavenge in an effort to
3706 // schedule the pause as described above. By choosing
3707 // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3708 // we will never do an actual abortable preclean cycle.
3709 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3710 GCTraceCPUTime tcpu;
3711 CMSPhaseAccounting pa(this, "Concurrent Abortable Preclean");
3712 // We need more smarts in the abortable preclean
3713 // loop below to deal with cases where allocation
3714 // in young gen is very very slow, and our precleaning
3715 // is running a losing race against a horde of
3716 // mutators intent on flooding us with CMS updates
3717 // (dirty cards).
3718 // One, admittedly dumb, strategy is to give up
3719 // after a certain number of abortable precleaning loops
3720 // or after a certain maximum time. We want to make
3721 // this smarter in the next iteration.
3722 // XXX FIX ME!!! YSR
3723 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3724 while (!(should_abort_preclean() ||
3725 ConcurrentMarkSweepThread::cmst()->should_terminate())) {
3726 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3727 cumworkdone += workdone;
3728 loops++;
3729 // Voluntarily terminate abortable preclean phase if we have
3730 // been at it for too long.
3731 if ((CMSMaxAbortablePrecleanLoops != 0) &&
3732 loops >= CMSMaxAbortablePrecleanLoops) {
3733 log_debug(gc)(" CMS: abort preclean due to loops ");
3734 break;
3735 }
3736 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
3737 log_debug(gc)(" CMS: abort preclean due to time ");
3738 break;
3739 }
3740 // If we are doing little work each iteration, we should
3741 // take a short break.
3742 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
3743 // Sleep for some time, waiting for work to accumulate
3744 stopTimer();
3745 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
3746 startTimer();
3747 waited++;
3748 }
3749 }
3750 log_trace(gc)(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
3751 loops, waited, cumworkdone);
3752 }
3753 CMSTokenSync x(true); // is cms thread
3754 if (_collectorState != Idling) {
3755 assert(_collectorState == AbortablePreclean,
3756 "Spontaneous state transition?");
3757 _collectorState = FinalMarking;
3758 } // Else, a foreground collection completed this CMS cycle.
3759 return;
3760 }
3761
3762 // Respond to an Eden sampling opportunity
sample_eden()3763 void CMSCollector::sample_eden() {
3764 // Make sure a young gc cannot sneak in between our
3765 // reading and recording of a sample.
3766 assert(Thread::current()->is_ConcurrentGC_thread(),
3767 "Only the cms thread may collect Eden samples");
3768 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3769 "Should collect samples while holding CMS token");
3770 if (!_start_sampling) {
3771 return;
3772 }
3773 // When CMSEdenChunksRecordAlways is true, the eden chunk array
3774 // is populated by the young generation.
3775 if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
3776 if (_eden_chunk_index < _eden_chunk_capacity) {
3777 _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample
3778 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
3779 "Unexpected state of Eden");
3780 // We'd like to check that what we just sampled is an oop-start address;
3781 // however, we cannot do that here since the object may not yet have been
3782 // initialized. So we'll instead do the check when we _use_ this sample
3783 // later.
3784 if (_eden_chunk_index == 0 ||
3785 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
3786 _eden_chunk_array[_eden_chunk_index-1])
3787 >= CMSSamplingGrain)) {
3788 _eden_chunk_index++; // commit sample
3789 }
3790 }
3791 }
3792 if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
3793 size_t used = get_eden_used();
3794 size_t capacity = get_eden_capacity();
3795 assert(used <= capacity, "Unexpected state of Eden");
3796 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
3797 _abort_preclean = true;
3798 }
3799 }
3800 }
3801
preclean_work(bool clean_refs,bool clean_survivor)3802 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3803 assert(_collectorState == Precleaning ||
3804 _collectorState == AbortablePreclean, "incorrect state");
3805 ResourceMark rm;
3806 HandleMark hm;
3807
3808 // Precleaning is currently not MT but the reference processor
3809 // may be set for MT. Disable it temporarily here.
3810 ReferenceProcessor* rp = ref_processor();
3811 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3812
3813 // Do one pass of scrubbing the discovered reference lists
3814 // to remove any reference objects with strongly-reachable
3815 // referents.
3816 if (clean_refs) {
3817 CMSPrecleanRefsYieldClosure yield_cl(this);
3818 assert(_span_based_discoverer.span().equals(_span), "Spans should be equal");
3819 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3820 &_markStack, true /* preclean */);
3821 CMSDrainMarkingStackClosure complete_trace(this,
3822 _span, &_markBitMap, &_markStack,
3823 &keep_alive, true /* preclean */);
3824
3825 // We don't want this step to interfere with a young
3826 // collection because we don't want to take CPU
3827 // or memory bandwidth away from the young GC threads
3828 // (which may be as many as there are CPUs).
3829 // Note that we don't need to protect ourselves from
3830 // interference with mutators because they can't
3831 // manipulate the discovered reference lists nor affect
3832 // the computed reachability of the referents, the
3833 // only properties manipulated by the precleaning
3834 // of these reference lists.
3835 stopTimer();
3836 CMSTokenSyncWithLocks x(true /* is cms thread */,
3837 bitMapLock());
3838 startTimer();
3839 sample_eden();
3840
3841 // The following will yield to allow foreground
3842 // collection to proceed promptly. XXX YSR:
3843 // The code in this method may need further
3844 // tweaking for better performance and some restructuring
3845 // for cleaner interfaces.
3846 GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
3847 rp->preclean_discovered_references(
3848 rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
3849 gc_timer);
3850 }
3851
3852 if (clean_survivor) { // preclean the active survivor space(s)
3853 PushAndMarkClosure pam_cl(this, _span, ref_processor(),
3854 &_markBitMap, &_modUnionTable,
3855 &_markStack, true /* precleaning phase */);
3856 stopTimer();
3857 CMSTokenSyncWithLocks ts(true /* is cms thread */,
3858 bitMapLock());
3859 startTimer();
3860 unsigned int before_count =
3861 CMSHeap::heap()->total_collections();
3862 SurvivorSpacePrecleanClosure
3863 sss_cl(this, _span, &_markBitMap, &_markStack,
3864 &pam_cl, before_count, CMSYield);
3865 _young_gen->from()->object_iterate_careful(&sss_cl);
3866 _young_gen->to()->object_iterate_careful(&sss_cl);
3867 }
3868 MarkRefsIntoAndScanClosure
3869 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
3870 &_markStack, this, CMSYield,
3871 true /* precleaning phase */);
3872 // CAUTION: The following closure has persistent state that may need to
3873 // be reset upon a decrease in the sequence of addresses it
3874 // processes.
3875 ScanMarkedObjectsAgainCarefullyClosure
3876 smoac_cl(this, _span,
3877 &_markBitMap, &_markStack, &mrias_cl, CMSYield);
3878
3879 // Preclean dirty cards in ModUnionTable and CardTable using
3880 // appropriate convergence criterion;
3881 // repeat CMSPrecleanIter times unless we find that
3882 // we are losing.
3883 assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
3884 assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
3885 "Bad convergence multiplier");
3886 assert(CMSPrecleanThreshold >= 100,
3887 "Unreasonably low CMSPrecleanThreshold");
3888
3889 size_t numIter, cumNumCards, lastNumCards, curNumCards;
3890 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
3891 numIter < CMSPrecleanIter;
3892 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
3893 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl);
3894 log_trace(gc)(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
3895 // Either there are very few dirty cards, so re-mark
3896 // pause will be small anyway, or our pre-cleaning isn't
3897 // that much faster than the rate at which cards are being
3898 // dirtied, so we might as well stop and re-mark since
3899 // precleaning won't improve our re-mark time by much.
3900 if (curNumCards <= CMSPrecleanThreshold ||
3901 (numIter > 0 &&
3902 (curNumCards * CMSPrecleanDenominator >
3903 lastNumCards * CMSPrecleanNumerator))) {
3904 numIter++;
3905 cumNumCards += curNumCards;
3906 break;
3907 }
3908 }
3909
3910 preclean_cld(&mrias_cl, _cmsGen->freelistLock());
3911
3912 curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
3913 cumNumCards += curNumCards;
3914 log_trace(gc)(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
3915 curNumCards, cumNumCards, numIter);
3916 return cumNumCards; // as a measure of useful work done
3917 }
3918
3919 // PRECLEANING NOTES:
3920 // Precleaning involves:
3921 // . reading the bits of the modUnionTable and clearing the set bits.
3922 // . For the cards corresponding to the set bits, we scan the
3923 // objects on those cards. This means we need the free_list_lock
3924 // so that we can safely iterate over the CMS space when scanning
3925 // for oops.
3926 // . When we scan the objects, we'll be both reading and setting
3927 // marks in the marking bit map, so we'll need the marking bit map.
3928 // . For protecting _collector_state transitions, we take the CGC_lock.
3929 // Note that any races in the reading of of card table entries by the
3930 // CMS thread on the one hand and the clearing of those entries by the
3931 // VM thread or the setting of those entries by the mutator threads on the
3932 // other are quite benign. However, for efficiency it makes sense to keep
3933 // the VM thread from racing with the CMS thread while the latter is
3934 // dirty card info to the modUnionTable. We therefore also use the
3935 // CGC_lock to protect the reading of the card table and the mod union
3936 // table by the CM thread.
3937 // . We run concurrently with mutator updates, so scanning
3938 // needs to be done carefully -- we should not try to scan
3939 // potentially uninitialized objects.
3940 //
3941 // Locking strategy: While holding the CGC_lock, we scan over and
3942 // reset a maximal dirty range of the mod union / card tables, then lock
3943 // the free_list_lock and bitmap lock to do a full marking, then
3944 // release these locks; and repeat the cycle. This allows for a
3945 // certain amount of fairness in the sharing of these locks between
3946 // the CMS collector on the one hand, and the VM thread and the
3947 // mutators on the other.
3948
3949 // NOTE: preclean_mod_union_table() and preclean_card_table()
3950 // further below are largely identical; if you need to modify
3951 // one of these methods, please check the other method too.
3952
preclean_mod_union_table(ConcurrentMarkSweepGeneration * old_gen,ScanMarkedObjectsAgainCarefullyClosure * cl)3953 size_t CMSCollector::preclean_mod_union_table(
3954 ConcurrentMarkSweepGeneration* old_gen,
3955 ScanMarkedObjectsAgainCarefullyClosure* cl) {
3956 verify_work_stacks_empty();
3957 verify_overflow_empty();
3958
3959 // strategy: starting with the first card, accumulate contiguous
3960 // ranges of dirty cards; clear these cards, then scan the region
3961 // covered by these cards.
3962
3963 // Since all of the MUT is committed ahead, we can just use
3964 // that, in case the generations expand while we are precleaning.
3965 // It might also be fine to just use the committed part of the
3966 // generation, but we might potentially miss cards when the
3967 // generation is rapidly expanding while we are in the midst
3968 // of precleaning.
3969 HeapWord* startAddr = old_gen->reserved().start();
3970 HeapWord* endAddr = old_gen->reserved().end();
3971
3972 cl->setFreelistLock(old_gen->freelistLock()); // needed for yielding
3973
3974 size_t numDirtyCards, cumNumDirtyCards;
3975 HeapWord *nextAddr, *lastAddr;
3976 for (cumNumDirtyCards = numDirtyCards = 0,
3977 nextAddr = lastAddr = startAddr;
3978 nextAddr < endAddr;
3979 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
3980
3981 ResourceMark rm;
3982 HandleMark hm;
3983
3984 MemRegion dirtyRegion;
3985 {
3986 stopTimer();
3987 // Potential yield point
3988 CMSTokenSync ts(true);
3989 startTimer();
3990 sample_eden();
3991 // Get dirty region starting at nextOffset (inclusive),
3992 // simultaneously clearing it.
3993 dirtyRegion =
3994 _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
3995 assert(dirtyRegion.start() >= nextAddr,
3996 "returned region inconsistent?");
3997 }
3998 // Remember where the next search should begin.
3999 // The returned region (if non-empty) is a right open interval,
4000 // so lastOffset is obtained from the right end of that
4001 // interval.
4002 lastAddr = dirtyRegion.end();
4003 // Should do something more transparent and less hacky XXX
4004 numDirtyCards =
4005 _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4006
4007 // We'll scan the cards in the dirty region (with periodic
4008 // yields for foreground GC as needed).
4009 if (!dirtyRegion.is_empty()) {
4010 assert(numDirtyCards > 0, "consistency check");
4011 HeapWord* stop_point = NULL;
4012 stopTimer();
4013 // Potential yield point
4014 CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(),
4015 bitMapLock());
4016 startTimer();
4017 {
4018 verify_work_stacks_empty();
4019 verify_overflow_empty();
4020 sample_eden();
4021 stop_point =
4022 old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4023 }
4024 if (stop_point != NULL) {
4025 // The careful iteration stopped early either because it found an
4026 // uninitialized object, or because we were in the midst of an
4027 // "abortable preclean", which should now be aborted. Redirty
4028 // the bits corresponding to the partially-scanned or unscanned
4029 // cards. We'll either restart at the next block boundary or
4030 // abort the preclean.
4031 assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4032 "Should only be AbortablePreclean.");
4033 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4034 if (should_abort_preclean()) {
4035 break; // out of preclean loop
4036 } else {
4037 // Compute the next address at which preclean should pick up;
4038 // might need bitMapLock in order to read P-bits.
4039 lastAddr = next_card_start_after_block(stop_point);
4040 }
4041 }
4042 } else {
4043 assert(lastAddr == endAddr, "consistency check");
4044 assert(numDirtyCards == 0, "consistency check");
4045 break;
4046 }
4047 }
4048 verify_work_stacks_empty();
4049 verify_overflow_empty();
4050 return cumNumDirtyCards;
4051 }
4052
4053 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4054 // below are largely identical; if you need to modify
4055 // one of these methods, please check the other method too.
4056
preclean_card_table(ConcurrentMarkSweepGeneration * old_gen,ScanMarkedObjectsAgainCarefullyClosure * cl)4057 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
4058 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4059 // strategy: it's similar to precleamModUnionTable above, in that
4060 // we accumulate contiguous ranges of dirty cards, mark these cards
4061 // precleaned, then scan the region covered by these cards.
4062 HeapWord* endAddr = (HeapWord*)(old_gen->_virtual_space.high());
4063 HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low());
4064
4065 cl->setFreelistLock(old_gen->freelistLock()); // needed for yielding
4066
4067 size_t numDirtyCards, cumNumDirtyCards;
4068 HeapWord *lastAddr, *nextAddr;
4069
4070 for (cumNumDirtyCards = numDirtyCards = 0,
4071 nextAddr = lastAddr = startAddr;
4072 nextAddr < endAddr;
4073 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4074
4075 ResourceMark rm;
4076 HandleMark hm;
4077
4078 MemRegion dirtyRegion;
4079 {
4080 // See comments in "Precleaning notes" above on why we
4081 // do this locking. XXX Could the locking overheads be
4082 // too high when dirty cards are sparse? [I don't think so.]
4083 stopTimer();
4084 CMSTokenSync x(true); // is cms thread
4085 startTimer();
4086 sample_eden();
4087 // Get and clear dirty region from card table
4088 dirtyRegion = _ct->dirty_card_range_after_reset(MemRegion(nextAddr, endAddr),
4089 true,
4090 CardTable::precleaned_card_val());
4091
4092 assert(dirtyRegion.start() >= nextAddr,
4093 "returned region inconsistent?");
4094 }
4095 lastAddr = dirtyRegion.end();
4096 numDirtyCards =
4097 dirtyRegion.word_size()/CardTable::card_size_in_words;
4098
4099 if (!dirtyRegion.is_empty()) {
4100 stopTimer();
4101 CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock());
4102 startTimer();
4103 sample_eden();
4104 verify_work_stacks_empty();
4105 verify_overflow_empty();
4106 HeapWord* stop_point =
4107 old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4108 if (stop_point != NULL) {
4109 assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4110 "Should only be AbortablePreclean.");
4111 _ct->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4112 if (should_abort_preclean()) {
4113 break; // out of preclean loop
4114 } else {
4115 // Compute the next address at which preclean should pick up.
4116 lastAddr = next_card_start_after_block(stop_point);
4117 }
4118 }
4119 } else {
4120 break;
4121 }
4122 }
4123 verify_work_stacks_empty();
4124 verify_overflow_empty();
4125 return cumNumDirtyCards;
4126 }
4127
4128 class PrecleanCLDClosure : public CLDClosure {
4129 MetadataVisitingOopsInGenClosure* _cm_closure;
4130 public:
PrecleanCLDClosure(MetadataVisitingOopsInGenClosure * oop_closure)4131 PrecleanCLDClosure(MetadataVisitingOopsInGenClosure* oop_closure) : _cm_closure(oop_closure) {}
do_cld(ClassLoaderData * cld)4132 void do_cld(ClassLoaderData* cld) {
4133 if (cld->has_accumulated_modified_oops()) {
4134 cld->clear_accumulated_modified_oops();
4135
4136 _cm_closure->do_cld(cld);
4137 }
4138 }
4139 };
4140
4141 // The freelist lock is needed to prevent asserts, is it really needed?
preclean_cld(MarkRefsIntoAndScanClosure * cl,Mutex * freelistLock)4142 void CMSCollector::preclean_cld(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4143 // Needed to walk CLDG
4144 MutexLocker ml(ClassLoaderDataGraph_lock);
4145
4146 cl->set_freelistLock(freelistLock);
4147
4148 CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4149
4150 // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4151 // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4152 PrecleanCLDClosure preclean_closure(cl);
4153 ClassLoaderDataGraph::cld_do(&preclean_closure);
4154
4155 verify_work_stacks_empty();
4156 verify_overflow_empty();
4157 }
4158
checkpointRootsFinal()4159 void CMSCollector::checkpointRootsFinal() {
4160 assert(_collectorState == FinalMarking, "incorrect state transition?");
4161 check_correct_thread_executing();
4162 // world is stopped at this checkpoint
4163 assert(SafepointSynchronize::is_at_safepoint(),
4164 "world should be stopped");
4165 TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
4166
4167 verify_work_stacks_empty();
4168 verify_overflow_empty();
4169
4170 log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)",
4171 _young_gen->used() / K, _young_gen->capacity() / K);
4172 {
4173 if (CMSScavengeBeforeRemark) {
4174 CMSHeap* heap = CMSHeap::heap();
4175 // Temporarily set flag to false, GCH->do_collection will
4176 // expect it to be false and set to true
4177 FlagSetting fl(heap->_is_gc_active, false);
4178
4179 heap->do_collection(true, // full (i.e. force, see below)
4180 false, // !clear_all_soft_refs
4181 0, // size
4182 false, // is_tlab
4183 GenCollectedHeap::YoungGen // type
4184 );
4185 }
4186 FreelistLocker x(this);
4187 MutexLocker y(bitMapLock(),
4188 Mutex::_no_safepoint_check_flag);
4189 checkpointRootsFinalWork();
4190 _cmsGen->cmsSpace()->recalculate_used_stable();
4191 }
4192 verify_work_stacks_empty();
4193 verify_overflow_empty();
4194 }
4195
checkpointRootsFinalWork()4196 void CMSCollector::checkpointRootsFinalWork() {
4197 GCTraceTime(Trace, gc, phases) tm("checkpointRootsFinalWork", _gc_timer_cm);
4198
4199 assert(haveFreelistLocks(), "must have free list locks");
4200 assert_lock_strong(bitMapLock());
4201
4202 ResourceMark rm;
4203 HandleMark hm;
4204
4205 CMSHeap* heap = CMSHeap::heap();
4206
4207 assert(haveFreelistLocks(), "must have free list locks");
4208 assert_lock_strong(bitMapLock());
4209
4210 // We might assume that we need not fill TLAB's when
4211 // CMSScavengeBeforeRemark is set, because we may have just done
4212 // a scavenge which would have filled all TLAB's -- and besides
4213 // Eden would be empty. This however may not always be the case --
4214 // for instance although we asked for a scavenge, it may not have
4215 // happened because of a JNI critical section. We probably need
4216 // a policy for deciding whether we can in that case wait until
4217 // the critical section releases and then do the remark following
4218 // the scavenge, and skip it here. In the absence of that policy,
4219 // or of an indication of whether the scavenge did indeed occur,
4220 // we cannot rely on TLAB's having been filled and must do
4221 // so here just in case a scavenge did not happen.
4222 heap->ensure_parsability(false); // fill TLAB's, but no need to retire them
4223 // Update the saved marks which may affect the root scans.
4224 heap->save_marks();
4225
4226 print_eden_and_survivor_chunk_arrays();
4227
4228 {
4229 #if COMPILER2_OR_JVMCI
4230 DerivedPointerTableDeactivate dpt_deact;
4231 #endif
4232
4233 // Note on the role of the mod union table:
4234 // Since the marker in "markFromRoots" marks concurrently with
4235 // mutators, it is possible for some reachable objects not to have been
4236 // scanned. For instance, an only reference to an object A was
4237 // placed in object B after the marker scanned B. Unless B is rescanned,
4238 // A would be collected. Such updates to references in marked objects
4239 // are detected via the mod union table which is the set of all cards
4240 // dirtied since the first checkpoint in this GC cycle and prior to
4241 // the most recent young generation GC, minus those cleaned up by the
4242 // concurrent precleaning.
4243 if (CMSParallelRemarkEnabled) {
4244 GCTraceTime(Debug, gc, phases) t("Rescan (parallel)", _gc_timer_cm);
4245 do_remark_parallel();
4246 } else {
4247 GCTraceTime(Debug, gc, phases) t("Rescan (non-parallel)", _gc_timer_cm);
4248 do_remark_non_parallel();
4249 }
4250 }
4251 verify_work_stacks_empty();
4252 verify_overflow_empty();
4253
4254 {
4255 GCTraceTime(Trace, gc, phases) ts("refProcessingWork", _gc_timer_cm);
4256 refProcessingWork();
4257 }
4258 verify_work_stacks_empty();
4259 verify_overflow_empty();
4260
4261 if (should_unload_classes()) {
4262 heap->prune_scavengable_nmethods();
4263 }
4264 JvmtiExport::gc_epilogue();
4265
4266 // If we encountered any (marking stack / work queue) overflow
4267 // events during the current CMS cycle, take appropriate
4268 // remedial measures, where possible, so as to try and avoid
4269 // recurrence of that condition.
4270 assert(_markStack.isEmpty(), "No grey objects");
4271 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4272 _ser_kac_ovflw + _ser_kac_preclean_ovflw;
4273 if (ser_ovflw > 0) {
4274 log_trace(gc)("Marking stack overflow (benign) (pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ", kac_preclean=" SIZE_FORMAT ")",
4275 _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4276 _markStack.expand();
4277 _ser_pmc_remark_ovflw = 0;
4278 _ser_pmc_preclean_ovflw = 0;
4279 _ser_kac_preclean_ovflw = 0;
4280 _ser_kac_ovflw = 0;
4281 }
4282 if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4283 log_trace(gc)("Work queue overflow (benign) (pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")",
4284 _par_pmc_remark_ovflw, _par_kac_ovflw);
4285 _par_pmc_remark_ovflw = 0;
4286 _par_kac_ovflw = 0;
4287 }
4288 if (_markStack._hit_limit > 0) {
4289 log_trace(gc)(" (benign) Hit max stack size limit (" SIZE_FORMAT ")",
4290 _markStack._hit_limit);
4291 }
4292 if (_markStack._failed_double > 0) {
4293 log_trace(gc)(" (benign) Failed stack doubling (" SIZE_FORMAT "), current capacity " SIZE_FORMAT,
4294 _markStack._failed_double, _markStack.capacity());
4295 }
4296 _markStack._hit_limit = 0;
4297 _markStack._failed_double = 0;
4298
4299 if ((VerifyAfterGC || VerifyDuringGC) &&
4300 CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
4301 verify_after_remark();
4302 }
4303
4304 _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
4305
4306 // Change under the freelistLocks.
4307 _collectorState = Sweeping;
4308 // Call isAllClear() under bitMapLock
4309 assert(_modUnionTable.isAllClear(),
4310 "Should be clear by end of the final marking");
4311 assert(_ct->cld_rem_set()->mod_union_is_clear(),
4312 "Should be clear by end of the final marking");
4313 }
4314
work(uint worker_id)4315 void CMSParInitialMarkTask::work(uint worker_id) {
4316 elapsedTimer _timer;
4317 ResourceMark rm;
4318 HandleMark hm;
4319
4320 // ---------- scan from roots --------------
4321 _timer.start();
4322 CMSHeap* heap = CMSHeap::heap();
4323 ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
4324
4325 // ---------- young gen roots --------------
4326 {
4327 work_on_young_gen_roots(&par_mri_cl);
4328 _timer.stop();
4329 log_trace(gc, task)("Finished young gen initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4330 }
4331
4332 // ---------- remaining roots --------------
4333 _timer.reset();
4334 _timer.start();
4335
4336 CLDToOopClosure cld_closure(&par_mri_cl, ClassLoaderData::_claim_strong);
4337
4338 heap->cms_process_roots(_strong_roots_scope,
4339 false, // yg was scanned above
4340 GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4341 _collector->should_unload_classes(),
4342 &par_mri_cl,
4343 &cld_closure);
4344
4345 assert(_collector->should_unload_classes()
4346 || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4347 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4348 _timer.stop();
4349 log_trace(gc, task)("Finished remaining root initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4350 }
4351
4352 // Parallel remark task
4353 class CMSParRemarkTask: public CMSParMarkTask {
4354 CompactibleFreeListSpace* _cms_space;
4355
4356 // The per-thread work queues, available here for stealing.
4357 OopTaskQueueSet* _task_queues;
4358 TaskTerminator _term;
4359 StrongRootsScope* _strong_roots_scope;
4360
4361 public:
4362 // A value of 0 passed to n_workers will cause the number of
4363 // workers to be taken from the active workers in the work gang.
CMSParRemarkTask(CMSCollector * collector,CompactibleFreeListSpace * cms_space,uint n_workers,WorkGang * workers,OopTaskQueueSet * task_queues,StrongRootsScope * strong_roots_scope)4364 CMSParRemarkTask(CMSCollector* collector,
4365 CompactibleFreeListSpace* cms_space,
4366 uint n_workers, WorkGang* workers,
4367 OopTaskQueueSet* task_queues,
4368 StrongRootsScope* strong_roots_scope):
4369 CMSParMarkTask("Rescan roots and grey objects in parallel",
4370 collector, n_workers),
4371 _cms_space(cms_space),
4372 _task_queues(task_queues),
4373 _term(n_workers, task_queues),
4374 _strong_roots_scope(strong_roots_scope) { }
4375
task_queues()4376 OopTaskQueueSet* task_queues() { return _task_queues; }
4377
work_queue(int i)4378 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4379
terminator()4380 ParallelTaskTerminator* terminator() { return _term.terminator(); }
n_workers()4381 uint n_workers() { return _n_workers; }
4382
4383 void work(uint worker_id);
4384
4385 private:
4386 // ... of dirty cards in old space
4387 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4388 ParMarkRefsIntoAndScanClosure* cl);
4389
4390 // ... work stealing for the above
4391 void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl);
4392 };
4393
4394 class RemarkCLDClosure : public CLDClosure {
4395 CLDToOopClosure _cm_closure;
4396 public:
RemarkCLDClosure(OopClosure * oop_closure)4397 RemarkCLDClosure(OopClosure* oop_closure) : _cm_closure(oop_closure, ClassLoaderData::_claim_strong) {}
do_cld(ClassLoaderData * cld)4398 void do_cld(ClassLoaderData* cld) {
4399 // Check if we have modified any oops in the CLD during the concurrent marking.
4400 if (cld->has_accumulated_modified_oops()) {
4401 cld->clear_accumulated_modified_oops();
4402
4403 // We could have transfered the current modified marks to the accumulated marks,
4404 // like we do with the Card Table to Mod Union Table. But it's not really necessary.
4405 } else if (cld->has_modified_oops()) {
4406 // Don't clear anything, this info is needed by the next young collection.
4407 } else {
4408 // No modified oops in the ClassLoaderData.
4409 return;
4410 }
4411
4412 // The klass has modified fields, need to scan the klass.
4413 _cm_closure.do_cld(cld);
4414 }
4415 };
4416
work_on_young_gen_roots(OopsInGenClosure * cl)4417 void CMSParMarkTask::work_on_young_gen_roots(OopsInGenClosure* cl) {
4418 ParNewGeneration* young_gen = _collector->_young_gen;
4419 ContiguousSpace* eden_space = young_gen->eden();
4420 ContiguousSpace* from_space = young_gen->from();
4421 ContiguousSpace* to_space = young_gen->to();
4422
4423 HeapWord** eca = _collector->_eden_chunk_array;
4424 size_t ect = _collector->_eden_chunk_index;
4425 HeapWord** sca = _collector->_survivor_chunk_array;
4426 size_t sct = _collector->_survivor_chunk_index;
4427
4428 assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4429 assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4430
4431 do_young_space_rescan(cl, to_space, NULL, 0);
4432 do_young_space_rescan(cl, from_space, sca, sct);
4433 do_young_space_rescan(cl, eden_space, eca, ect);
4434 }
4435
4436 // work_queue(i) is passed to the closure
4437 // ParMarkRefsIntoAndScanClosure. The "i" parameter
4438 // also is passed to do_dirty_card_rescan_tasks() and to
4439 // do_work_steal() to select the i-th task_queue.
4440
work(uint worker_id)4441 void CMSParRemarkTask::work(uint worker_id) {
4442 elapsedTimer _timer;
4443 ResourceMark rm;
4444 HandleMark hm;
4445
4446 // ---------- rescan from roots --------------
4447 _timer.start();
4448 CMSHeap* heap = CMSHeap::heap();
4449 ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4450 _collector->_span, _collector->ref_processor(),
4451 &(_collector->_markBitMap),
4452 work_queue(worker_id));
4453
4454 // Rescan young gen roots first since these are likely
4455 // coarsely partitioned and may, on that account, constitute
4456 // the critical path; thus, it's best to start off that
4457 // work first.
4458 // ---------- young gen roots --------------
4459 {
4460 work_on_young_gen_roots(&par_mrias_cl);
4461 _timer.stop();
4462 log_trace(gc, task)("Finished young gen rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4463 }
4464
4465 // ---------- remaining roots --------------
4466 _timer.reset();
4467 _timer.start();
4468 heap->cms_process_roots(_strong_roots_scope,
4469 false, // yg was scanned above
4470 GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4471 _collector->should_unload_classes(),
4472 &par_mrias_cl,
4473 NULL); // The dirty klasses will be handled below
4474
4475 assert(_collector->should_unload_classes()
4476 || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4477 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4478 _timer.stop();
4479 log_trace(gc, task)("Finished remaining root rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4480
4481 // ---------- unhandled CLD scanning ----------
4482 if (worker_id == 0) { // Single threaded at the moment.
4483 _timer.reset();
4484 _timer.start();
4485
4486 // Scan all new class loader data objects and new dependencies that were
4487 // introduced during concurrent marking.
4488 ResourceMark rm;
4489 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4490 for (int i = 0; i < array->length(); i++) {
4491 Devirtualizer::do_cld(&par_mrias_cl, array->at(i));
4492 }
4493
4494 // We don't need to keep track of new CLDs anymore.
4495 ClassLoaderDataGraph::remember_new_clds(false);
4496
4497 _timer.stop();
4498 log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4499 }
4500
4501 // We might have added oops to ClassLoaderData::_handles during the
4502 // concurrent marking phase. These oops do not always point to newly allocated objects
4503 // that are guaranteed to be kept alive. Hence,
4504 // we do have to revisit the _handles block during the remark phase.
4505
4506 // ---------- dirty CLD scanning ----------
4507 if (worker_id == 0) { // Single threaded at the moment.
4508 _timer.reset();
4509 _timer.start();
4510
4511 // Scan all classes that was dirtied during the concurrent marking phase.
4512 RemarkCLDClosure remark_closure(&par_mrias_cl);
4513 ClassLoaderDataGraph::cld_do(&remark_closure);
4514
4515 _timer.stop();
4516 log_trace(gc, task)("Finished dirty CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4517 }
4518
4519
4520 // ---------- rescan dirty cards ------------
4521 _timer.reset();
4522 _timer.start();
4523
4524 // Do the rescan tasks for each of the two spaces
4525 // (cms_space) in turn.
4526 // "worker_id" is passed to select the task_queue for "worker_id"
4527 do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4528 _timer.stop();
4529 log_trace(gc, task)("Finished dirty card rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4530
4531 // ---------- steal work from other threads ...
4532 // ---------- ... and drain overflow list.
4533 _timer.reset();
4534 _timer.start();
4535 do_work_steal(worker_id, &par_mrias_cl);
4536 _timer.stop();
4537 log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4538 }
4539
4540 void
do_young_space_rescan(OopsInGenClosure * cl,ContiguousSpace * space,HeapWord ** chunk_array,size_t chunk_top)4541 CMSParMarkTask::do_young_space_rescan(
4542 OopsInGenClosure* cl, ContiguousSpace* space,
4543 HeapWord** chunk_array, size_t chunk_top) {
4544 // Until all tasks completed:
4545 // . claim an unclaimed task
4546 // . compute region boundaries corresponding to task claimed
4547 // using chunk_array
4548 // . par_oop_iterate(cl) over that region
4549
4550 ResourceMark rm;
4551 HandleMark hm;
4552
4553 SequentialSubTasksDone* pst = space->par_seq_tasks();
4554
4555 uint nth_task = 0;
4556 uint n_tasks = pst->n_tasks();
4557
4558 if (n_tasks > 0) {
4559 assert(pst->valid(), "Uninitialized use?");
4560 HeapWord *start, *end;
4561 while (pst->try_claim_task(/* reference */ nth_task)) {
4562 // We claimed task # nth_task; compute its boundaries.
4563 if (chunk_top == 0) { // no samples were taken
4564 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
4565 start = space->bottom();
4566 end = space->top();
4567 } else if (nth_task == 0) {
4568 start = space->bottom();
4569 end = chunk_array[nth_task];
4570 } else if (nth_task < (uint)chunk_top) {
4571 assert(nth_task >= 1, "Control point invariant");
4572 start = chunk_array[nth_task - 1];
4573 end = chunk_array[nth_task];
4574 } else {
4575 assert(nth_task == (uint)chunk_top, "Control point invariant");
4576 start = chunk_array[chunk_top - 1];
4577 end = space->top();
4578 }
4579 MemRegion mr(start, end);
4580 // Verify that mr is in space
4581 assert(mr.is_empty() || space->used_region().contains(mr),
4582 "Should be in space");
4583 // Verify that "start" is an object boundary
4584 assert(mr.is_empty() || oopDesc::is_oop(oop(mr.start())),
4585 "Should be an oop");
4586 space->par_oop_iterate(mr, cl);
4587 }
4588 pst->all_tasks_completed();
4589 }
4590 }
4591
4592 void
do_dirty_card_rescan_tasks(CompactibleFreeListSpace * sp,int i,ParMarkRefsIntoAndScanClosure * cl)4593 CMSParRemarkTask::do_dirty_card_rescan_tasks(
4594 CompactibleFreeListSpace* sp, int i,
4595 ParMarkRefsIntoAndScanClosure* cl) {
4596 // Until all tasks completed:
4597 // . claim an unclaimed task
4598 // . compute region boundaries corresponding to task claimed
4599 // . transfer dirty bits ct->mut for that region
4600 // . apply rescanclosure to dirty mut bits for that region
4601
4602 ResourceMark rm;
4603 HandleMark hm;
4604
4605 OopTaskQueue* work_q = work_queue(i);
4606 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
4607 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
4608 // CAUTION: This closure has state that persists across calls to
4609 // the work method dirty_range_iterate_clear() in that it has
4610 // embedded in it a (subtype of) UpwardsObjectClosure. The
4611 // use of that state in the embedded UpwardsObjectClosure instance
4612 // assumes that the cards are always iterated (even if in parallel
4613 // by several threads) in monotonically increasing order per each
4614 // thread. This is true of the implementation below which picks
4615 // card ranges (chunks) in monotonically increasing order globally
4616 // and, a-fortiori, in monotonically increasing order per thread
4617 // (the latter order being a subsequence of the former).
4618 // If the work code below is ever reorganized into a more chaotic
4619 // work-partitioning form than the current "sequential tasks"
4620 // paradigm, the use of that persistent state will have to be
4621 // revisited and modified appropriately. See also related
4622 // bug 4756801 work on which should examine this code to make
4623 // sure that the changes there do not run counter to the
4624 // assumptions made here and necessary for correctness and
4625 // efficiency. Note also that this code might yield inefficient
4626 // behavior in the case of very large objects that span one or
4627 // more work chunks. Such objects would potentially be scanned
4628 // several times redundantly. Work on 4756801 should try and
4629 // address that performance anomaly if at all possible. XXX
4630 MemRegion full_span = _collector->_span;
4631 CMSBitMap* bm = &(_collector->_markBitMap); // shared
4632 MarkFromDirtyCardsClosure
4633 greyRescanClosure(_collector, full_span, // entire span of interest
4634 sp, bm, work_q, cl);
4635
4636 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4637 assert(pst->valid(), "Uninitialized use?");
4638 uint nth_task = 0;
4639 const int alignment = CardTable::card_size * BitsPerWord;
4640 MemRegion span = sp->used_region();
4641 HeapWord* start_addr = span.start();
4642 HeapWord* end_addr = align_up(span.end(), alignment);
4643 const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
4644 assert(is_aligned(start_addr, alignment), "Check alignment");
4645 assert(is_aligned(chunk_size, alignment), "Check alignment");
4646
4647 while (pst->try_claim_task(/* reference */ nth_task)) {
4648 // Having claimed the nth_task, compute corresponding mem-region,
4649 // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
4650 // The alignment restriction ensures that we do not need any
4651 // synchronization with other gang-workers while setting or
4652 // clearing bits in thus chunk of the MUT.
4653 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
4654 start_addr + (nth_task+1)*chunk_size);
4655 // The last chunk's end might be way beyond end of the
4656 // used region. In that case pull back appropriately.
4657 if (this_span.end() > end_addr) {
4658 this_span.set_end(end_addr);
4659 assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
4660 }
4661 // Iterate over the dirty cards covering this chunk, marking them
4662 // precleaned, and setting the corresponding bits in the mod union
4663 // table. Since we have been careful to partition at Card and MUT-word
4664 // boundaries no synchronization is needed between parallel threads.
4665 _collector->_ct->dirty_card_iterate(this_span,
4666 &modUnionClosure);
4667
4668 // Having transferred these marks into the modUnionTable,
4669 // rescan the marked objects on the dirty cards in the modUnionTable.
4670 // Even if this is at a synchronous collection, the initial marking
4671 // may have been done during an asynchronous collection so there
4672 // may be dirty bits in the mod-union table.
4673 _collector->_modUnionTable.dirty_range_iterate_clear(
4674 this_span, &greyRescanClosure);
4675 _collector->_modUnionTable.verifyNoOneBitsInRange(
4676 this_span.start(),
4677 this_span.end());
4678 }
4679 pst->all_tasks_completed(); // declare that i am done
4680 }
4681
4682 // . see if we can share work_queues with ParNew? XXX
4683 void
do_work_steal(int i,ParMarkRefsIntoAndScanClosure * cl)4684 CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl) {
4685 OopTaskQueue* work_q = work_queue(i);
4686 NOT_PRODUCT(int num_steals = 0;)
4687 oop obj_to_scan;
4688 CMSBitMap* bm = &(_collector->_markBitMap);
4689
4690 while (true) {
4691 // Completely finish any left over work from (an) earlier round(s)
4692 cl->trim_queue(0);
4693 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4694 (size_t)ParGCDesiredObjsFromOverflowList);
4695 // Now check if there's any work in the overflow list
4696 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
4697 // only affects the number of attempts made to get work from the
4698 // overflow list and does not affect the number of workers. Just
4699 // pass ParallelGCThreads so this behavior is unchanged.
4700 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
4701 work_q,
4702 ParallelGCThreads)) {
4703 // found something in global overflow list;
4704 // not yet ready to go stealing work from others.
4705 // We'd like to assert(work_q->size() != 0, ...)
4706 // because we just took work from the overflow list,
4707 // but of course we can't since all of that could have
4708 // been already stolen from us.
4709 // "He giveth and He taketh away."
4710 continue;
4711 }
4712 // Verify that we have no work before we resort to stealing
4713 assert(work_q->size() == 0, "Have work, shouldn't steal");
4714 // Try to steal from other queues that have work
4715 if (task_queues()->steal(i, /* reference */ obj_to_scan)) {
4716 NOT_PRODUCT(num_steals++;)
4717 assert(oopDesc::is_oop(obj_to_scan), "Oops, not an oop!");
4718 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
4719 // Do scanning work
4720 obj_to_scan->oop_iterate(cl);
4721 // Loop around, finish this work, and try to steal some more
4722 } else if (terminator()->offer_termination()) {
4723 break; // nirvana from the infinite cycle
4724 }
4725 }
4726 log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
4727 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
4728 "Else our work is not yet done");
4729 }
4730
4731 // Record object boundaries in _eden_chunk_array by sampling the eden
4732 // top in the slow-path eden object allocation code path and record
4733 // the boundaries, if CMSEdenChunksRecordAlways is true. If
4734 // CMSEdenChunksRecordAlways is false, we use the other asynchronous
4735 // sampling in sample_eden() that activates during the part of the
4736 // preclean phase.
sample_eden_chunk()4737 void CMSCollector::sample_eden_chunk() {
4738 if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
4739 if (_eden_chunk_lock->try_lock()) {
4740 // Record a sample. This is the critical section. The contents
4741 // of the _eden_chunk_array have to be non-decreasing in the
4742 // address order.
4743 _eden_chunk_array[_eden_chunk_index] = *_top_addr;
4744 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4745 "Unexpected state of Eden");
4746 if (_eden_chunk_index == 0 ||
4747 ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
4748 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4749 _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
4750 _eden_chunk_index++; // commit sample
4751 }
4752 _eden_chunk_lock->unlock();
4753 }
4754 }
4755 }
4756
4757 // Return a thread-local PLAB recording array, as appropriate.
get_data_recorder(int thr_num)4758 void* CMSCollector::get_data_recorder(int thr_num) {
4759 if (_survivor_plab_array != NULL &&
4760 (CMSPLABRecordAlways ||
4761 (_collectorState > Marking && _collectorState < FinalMarking))) {
4762 assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
4763 ChunkArray* ca = &_survivor_plab_array[thr_num];
4764 ca->reset(); // clear it so that fresh data is recorded
4765 return (void*) ca;
4766 } else {
4767 return NULL;
4768 }
4769 }
4770
4771 // Reset all the thread-local PLAB recording arrays
reset_survivor_plab_arrays()4772 void CMSCollector::reset_survivor_plab_arrays() {
4773 for (uint i = 0; i < ParallelGCThreads; i++) {
4774 _survivor_plab_array[i].reset();
4775 }
4776 }
4777
4778 // Merge the per-thread plab arrays into the global survivor chunk
4779 // array which will provide the partitioning of the survivor space
4780 // for CMS initial scan and rescan.
merge_survivor_plab_arrays(ContiguousSpace * surv,int no_of_gc_threads)4781 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
4782 int no_of_gc_threads) {
4783 assert(_survivor_plab_array != NULL, "Error");
4784 assert(_survivor_chunk_array != NULL, "Error");
4785 assert(_collectorState == FinalMarking ||
4786 (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
4787 for (int j = 0; j < no_of_gc_threads; j++) {
4788 _cursor[j] = 0;
4789 }
4790 HeapWord* top = surv->top();
4791 size_t i;
4792 for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries
4793 HeapWord* min_val = top; // Higher than any PLAB address
4794 uint min_tid = 0; // position of min_val this round
4795 for (int j = 0; j < no_of_gc_threads; j++) {
4796 ChunkArray* cur_sca = &_survivor_plab_array[j];
4797 if (_cursor[j] == cur_sca->end()) {
4798 continue;
4799 }
4800 assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
4801 HeapWord* cur_val = cur_sca->nth(_cursor[j]);
4802 assert(surv->used_region().contains(cur_val), "Out of bounds value");
4803 if (cur_val < min_val) {
4804 min_tid = j;
4805 min_val = cur_val;
4806 } else {
4807 assert(cur_val < top, "All recorded addresses should be less");
4808 }
4809 }
4810 // At this point min_val and min_tid are respectively
4811 // the least address in _survivor_plab_array[j]->nth(_cursor[j])
4812 // and the thread (j) that witnesses that address.
4813 // We record this address in the _survivor_chunk_array[i]
4814 // and increment _cursor[min_tid] prior to the next round i.
4815 if (min_val == top) {
4816 break;
4817 }
4818 _survivor_chunk_array[i] = min_val;
4819 _cursor[min_tid]++;
4820 }
4821 // We are all done; record the size of the _survivor_chunk_array
4822 _survivor_chunk_index = i; // exclusive: [0, i)
4823 log_trace(gc, survivor)(" (Survivor:" SIZE_FORMAT "chunks) ", i);
4824 // Verify that we used up all the recorded entries
4825 #ifdef ASSERT
4826 size_t total = 0;
4827 for (int j = 0; j < no_of_gc_threads; j++) {
4828 assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
4829 total += _cursor[j];
4830 }
4831 assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
4832 // Check that the merged array is in sorted order
4833 if (total > 0) {
4834 for (size_t i = 0; i < total - 1; i++) {
4835 log_develop_trace(gc, survivor)(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
4836 i, p2i(_survivor_chunk_array[i]));
4837 assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
4838 "Not sorted");
4839 }
4840 }
4841 #endif // ASSERT
4842 }
4843
4844 // Set up the space's par_seq_tasks structure for work claiming
4845 // for parallel initial scan and rescan of young gen.
4846 // See ParRescanTask where this is currently used.
4847 void
4848 CMSCollector::
initialize_sequential_subtasks_for_young_gen_rescan(int n_threads)4849 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
4850 assert(n_threads > 0, "Unexpected n_threads argument");
4851
4852 // Eden space
4853 if (!_young_gen->eden()->is_empty()) {
4854 SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
4855 assert(!pst->valid(), "Clobbering existing data?");
4856 // Each valid entry in [0, _eden_chunk_index) represents a task.
4857 size_t n_tasks = _eden_chunk_index + 1;
4858 assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
4859 // Sets the condition for completion of the subtask (how many threads
4860 // need to finish in order to be done).
4861 pst->set_n_threads(n_threads);
4862 pst->set_n_tasks((int)n_tasks);
4863 }
4864
4865 // Merge the survivor plab arrays into _survivor_chunk_array
4866 if (_survivor_plab_array != NULL) {
4867 merge_survivor_plab_arrays(_young_gen->from(), n_threads);
4868 } else {
4869 assert(_survivor_chunk_index == 0, "Error");
4870 }
4871
4872 // To space
4873 {
4874 SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks();
4875 assert(!pst->valid(), "Clobbering existing data?");
4876 // Sets the condition for completion of the subtask (how many threads
4877 // need to finish in order to be done).
4878 pst->set_n_threads(n_threads);
4879 pst->set_n_tasks(1);
4880 assert(pst->valid(), "Error");
4881 }
4882
4883 // From space
4884 {
4885 SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
4886 assert(!pst->valid(), "Clobbering existing data?");
4887 size_t n_tasks = _survivor_chunk_index + 1;
4888 assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
4889 // Sets the condition for completion of the subtask (how many threads
4890 // need to finish in order to be done).
4891 pst->set_n_threads(n_threads);
4892 pst->set_n_tasks((int)n_tasks);
4893 assert(pst->valid(), "Error");
4894 }
4895 }
4896
4897 // Parallel version of remark
do_remark_parallel()4898 void CMSCollector::do_remark_parallel() {
4899 CMSHeap* heap = CMSHeap::heap();
4900 WorkGang* workers = heap->workers();
4901 assert(workers != NULL, "Need parallel worker threads.");
4902 // Choose to use the number of GC workers most recently set
4903 // into "active_workers".
4904 uint n_workers = workers->active_workers();
4905
4906 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
4907
4908 StrongRootsScope srs(n_workers);
4909
4910 CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
4911
4912 // We won't be iterating over the cards in the card table updating
4913 // the younger_gen cards, so we shouldn't call the following else
4914 // the verification code as well as subsequent younger_refs_iterate
4915 // code would get confused. XXX
4916 // heap->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
4917
4918 // The young gen rescan work will not be done as part of
4919 // process_roots (which currently doesn't know how to
4920 // parallelize such a scan), but rather will be broken up into
4921 // a set of parallel tasks (via the sampling that the [abortable]
4922 // preclean phase did of eden, plus the [two] tasks of
4923 // scanning the [two] survivor spaces. Further fine-grain
4924 // parallelization of the scanning of the survivor spaces
4925 // themselves, and of precleaning of the young gen itself
4926 // is deferred to the future.
4927 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
4928
4929 // The dirty card rescan work is broken up into a "sequence"
4930 // of parallel tasks (per constituent space) that are dynamically
4931 // claimed by the parallel threads.
4932 cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
4933
4934 // It turns out that even when we're using 1 thread, doing the work in a
4935 // separate thread causes wide variance in run times. We can't help this
4936 // in the multi-threaded case, but we special-case n=1 here to get
4937 // repeatable measurements of the 1-thread overhead of the parallel code.
4938 if (n_workers > 1) {
4939 // Make refs discovery MT-safe, if it isn't already: it may not
4940 // necessarily be so, since it's possible that we are doing
4941 // ST marking.
4942 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
4943 workers->run_task(&tsk);
4944 } else {
4945 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
4946 tsk.work(0);
4947 }
4948
4949 // restore, single-threaded for now, any preserved marks
4950 // as a result of work_q overflow
4951 restore_preserved_marks_if_any();
4952 }
4953
4954 // Non-parallel version of remark
do_remark_non_parallel()4955 void CMSCollector::do_remark_non_parallel() {
4956 ResourceMark rm;
4957 HandleMark hm;
4958 CMSHeap* heap = CMSHeap::heap();
4959 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
4960
4961 MarkRefsIntoAndScanClosure
4962 mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
4963 &_markStack, this,
4964 false /* should_yield */, false /* not precleaning */);
4965 MarkFromDirtyCardsClosure
4966 markFromDirtyCardsClosure(this, _span,
4967 NULL, // space is set further below
4968 &_markBitMap, &_markStack, &mrias_cl);
4969 {
4970 GCTraceTime(Trace, gc, phases) t("Grey Object Rescan", _gc_timer_cm);
4971 // Iterate over the dirty cards, setting the corresponding bits in the
4972 // mod union table.
4973 {
4974 ModUnionClosure modUnionClosure(&_modUnionTable);
4975 _ct->dirty_card_iterate(_cmsGen->used_region(),
4976 &modUnionClosure);
4977 }
4978 // Having transferred these marks into the modUnionTable, we just need
4979 // to rescan the marked objects on the dirty cards in the modUnionTable.
4980 // The initial marking may have been done during an asynchronous
4981 // collection so there may be dirty bits in the mod-union table.
4982 const int alignment = CardTable::card_size * BitsPerWord;
4983 {
4984 // ... First handle dirty cards in CMS gen
4985 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
4986 MemRegion ur = _cmsGen->used_region();
4987 HeapWord* lb = ur.start();
4988 HeapWord* ub = align_up(ur.end(), alignment);
4989 MemRegion cms_span(lb, ub);
4990 _modUnionTable.dirty_range_iterate_clear(cms_span,
4991 &markFromDirtyCardsClosure);
4992 verify_work_stacks_empty();
4993 log_trace(gc)(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ", markFromDirtyCardsClosure.num_dirty_cards());
4994 }
4995 }
4996 if (VerifyDuringGC &&
4997 CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
4998 HandleMark hm; // Discard invalid handles created during verification
4999 Universe::verify();
5000 }
5001 {
5002 GCTraceTime(Trace, gc, phases) t("Root Rescan", _gc_timer_cm);
5003
5004 verify_work_stacks_empty();
5005
5006 heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5007 StrongRootsScope srs(1);
5008
5009 heap->cms_process_roots(&srs,
5010 true, // young gen as roots
5011 GenCollectedHeap::ScanningOption(roots_scanning_options()),
5012 should_unload_classes(),
5013 &mrias_cl,
5014 NULL); // The dirty klasses will be handled below
5015
5016 assert(should_unload_classes()
5017 || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5018 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5019 }
5020
5021 {
5022 GCTraceTime(Trace, gc, phases) t("Visit Unhandled CLDs", _gc_timer_cm);
5023
5024 verify_work_stacks_empty();
5025
5026 // Scan all class loader data objects that might have been introduced
5027 // during concurrent marking.
5028 ResourceMark rm;
5029 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5030 for (int i = 0; i < array->length(); i++) {
5031 Devirtualizer::do_cld(&mrias_cl, array->at(i));
5032 }
5033
5034 // We don't need to keep track of new CLDs anymore.
5035 ClassLoaderDataGraph::remember_new_clds(false);
5036
5037 verify_work_stacks_empty();
5038 }
5039
5040 // We might have added oops to ClassLoaderData::_handles during the
5041 // concurrent marking phase. These oops do not point to newly allocated objects
5042 // that are guaranteed to be kept alive. Hence,
5043 // we do have to revisit the _handles block during the remark phase.
5044 {
5045 GCTraceTime(Trace, gc, phases) t("Dirty CLD Scan", _gc_timer_cm);
5046
5047 verify_work_stacks_empty();
5048
5049 RemarkCLDClosure remark_closure(&mrias_cl);
5050 ClassLoaderDataGraph::cld_do(&remark_closure);
5051
5052 verify_work_stacks_empty();
5053 }
5054
5055 verify_work_stacks_empty();
5056 // Restore evacuated mark words, if any, used for overflow list links
5057 restore_preserved_marks_if_any();
5058
5059 verify_overflow_empty();
5060 }
5061
5062 ////////////////////////////////////////////////////////
5063 // Parallel Reference Processing Task Proxy Class
5064 ////////////////////////////////////////////////////////
5065 class AbstractGangTaskWOopQueues : public AbstractGangTask {
5066 OopTaskQueueSet* _queues;
5067 TaskTerminator _terminator;
5068 public:
AbstractGangTaskWOopQueues(const char * name,OopTaskQueueSet * queues,uint n_threads)5069 AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) :
5070 AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {}
terminator()5071 ParallelTaskTerminator* terminator() { return _terminator.terminator(); }
queues()5072 OopTaskQueueSet* queues() { return _queues; }
5073 };
5074
5075 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5076 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5077 CMSCollector* _collector;
5078 CMSBitMap* _mark_bit_map;
5079 const MemRegion _span;
5080 ProcessTask& _task;
5081
5082 public:
CMSRefProcTaskProxy(ProcessTask & task,CMSCollector * collector,const MemRegion & span,CMSBitMap * mark_bit_map,AbstractWorkGang * workers,OopTaskQueueSet * task_queues)5083 CMSRefProcTaskProxy(ProcessTask& task,
5084 CMSCollector* collector,
5085 const MemRegion& span,
5086 CMSBitMap* mark_bit_map,
5087 AbstractWorkGang* workers,
5088 OopTaskQueueSet* task_queues):
5089 AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5090 task_queues,
5091 workers->active_workers()),
5092 _collector(collector),
5093 _mark_bit_map(mark_bit_map),
5094 _span(span),
5095 _task(task)
5096 {
5097 assert(_collector->_span.equals(_span) && !_span.is_empty(),
5098 "Inconsistency in _span");
5099 }
5100
task_queues()5101 OopTaskQueueSet* task_queues() { return queues(); }
5102
work_queue(int i)5103 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5104
5105 void do_work_steal(int i,
5106 CMSParDrainMarkingStackClosure* drain,
5107 CMSParKeepAliveClosure* keep_alive);
5108
5109 virtual void work(uint worker_id);
5110 };
5111
work(uint worker_id)5112 void CMSRefProcTaskProxy::work(uint worker_id) {
5113 ResourceMark rm;
5114 HandleMark hm;
5115 assert(_collector->_span.equals(_span), "Inconsistency in _span");
5116 CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5117 _mark_bit_map,
5118 work_queue(worker_id));
5119 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5120 _mark_bit_map,
5121 work_queue(worker_id));
5122 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5123 _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
5124 if (_task.marks_oops_alive()) {
5125 do_work_steal(worker_id, &par_drain_stack, &par_keep_alive);
5126 }
5127 assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
5128 assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5129 }
5130
CMSParKeepAliveClosure(CMSCollector * collector,MemRegion span,CMSBitMap * bit_map,OopTaskQueue * work_queue)5131 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5132 MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5133 _span(span),
5134 _work_queue(work_queue),
5135 _bit_map(bit_map),
5136 _mark_and_push(collector, span, bit_map, work_queue),
5137 _low_water_mark(MIN2((work_queue->max_elems()/4),
5138 ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5139 { }
5140
5141 // . see if we can share work_queues with ParNew? XXX
do_work_steal(int i,CMSParDrainMarkingStackClosure * drain,CMSParKeepAliveClosure * keep_alive)5142 void CMSRefProcTaskProxy::do_work_steal(int i,
5143 CMSParDrainMarkingStackClosure* drain,
5144 CMSParKeepAliveClosure* keep_alive) {
5145 OopTaskQueue* work_q = work_queue(i);
5146 NOT_PRODUCT(int num_steals = 0;)
5147 oop obj_to_scan;
5148
5149 while (true) {
5150 // Completely finish any left over work from (an) earlier round(s)
5151 drain->trim_queue(0);
5152 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5153 (size_t)ParGCDesiredObjsFromOverflowList);
5154 // Now check if there's any work in the overflow list
5155 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5156 // only affects the number of attempts made to get work from the
5157 // overflow list and does not affect the number of workers. Just
5158 // pass ParallelGCThreads so this behavior is unchanged.
5159 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5160 work_q,
5161 ParallelGCThreads)) {
5162 // Found something in global overflow list;
5163 // not yet ready to go stealing work from others.
5164 // We'd like to assert(work_q->size() != 0, ...)
5165 // because we just took work from the overflow list,
5166 // but of course we can't, since all of that might have
5167 // been already stolen from us.
5168 continue;
5169 }
5170 // Verify that we have no work before we resort to stealing
5171 assert(work_q->size() == 0, "Have work, shouldn't steal");
5172 // Try to steal from other queues that have work
5173 if (task_queues()->steal(i, /* reference */ obj_to_scan)) {
5174 NOT_PRODUCT(num_steals++;)
5175 assert(oopDesc::is_oop(obj_to_scan), "Oops, not an oop!");
5176 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5177 // Do scanning work
5178 obj_to_scan->oop_iterate(keep_alive);
5179 // Loop around, finish this work, and try to steal some more
5180 } else if (terminator()->offer_termination()) {
5181 break; // nirvana from the infinite cycle
5182 }
5183 }
5184 log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
5185 }
5186
execute(ProcessTask & task,uint ergo_workers)5187 void CMSRefProcTaskExecutor::execute(ProcessTask& task, uint ergo_workers) {
5188 CMSHeap* heap = CMSHeap::heap();
5189 WorkGang* workers = heap->workers();
5190 assert(workers != NULL, "Need parallel worker threads.");
5191 assert(workers->active_workers() == ergo_workers,
5192 "Ergonomically chosen workers (%u) must be equal to active workers (%u)",
5193 ergo_workers, workers->active_workers());
5194 CMSRefProcTaskProxy rp_task(task, &_collector,
5195 _collector.ref_processor_span(),
5196 _collector.markBitMap(),
5197 workers, _collector.task_queues());
5198 workers->run_task(&rp_task, workers->active_workers());
5199 }
5200
refProcessingWork()5201 void CMSCollector::refProcessingWork() {
5202 ResourceMark rm;
5203 HandleMark hm;
5204
5205 ReferenceProcessor* rp = ref_processor();
5206 assert(_span_based_discoverer.span().equals(_span), "Spans should be equal");
5207 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5208 // Process weak references.
5209 rp->setup_policy(false);
5210 verify_work_stacks_empty();
5211
5212 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues());
5213 {
5214 GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
5215
5216 // Setup keep_alive and complete closures.
5217 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5218 &_markStack, false /* !preclean */);
5219 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5220 _span, &_markBitMap, &_markStack,
5221 &cmsKeepAliveClosure, false /* !preclean */);
5222
5223 ReferenceProcessorStats stats;
5224 if (rp->processing_is_mt()) {
5225 // Set the degree of MT here. If the discovery is done MT, there
5226 // may have been a different number of threads doing the discovery
5227 // and a different number of discovered lists may have Ref objects.
5228 // That is OK as long as the Reference lists are balanced (see
5229 // balance_all_queues() and balance_queues()).
5230 CMSHeap* heap = CMSHeap::heap();
5231 uint active_workers = ParallelGCThreads;
5232 WorkGang* workers = heap->workers();
5233 if (workers != NULL) {
5234 active_workers = workers->active_workers();
5235 // The expectation is that active_workers will have already
5236 // been set to a reasonable value. If it has not been set,
5237 // investigate.
5238 assert(active_workers > 0, "Should have been set during scavenge");
5239 }
5240 rp->set_active_mt_degree(active_workers);
5241 CMSRefProcTaskExecutor task_executor(*this);
5242 stats = rp->process_discovered_references(&_is_alive_closure,
5243 &cmsKeepAliveClosure,
5244 &cmsDrainMarkingStackClosure,
5245 &task_executor,
5246 &pt);
5247 } else {
5248 stats = rp->process_discovered_references(&_is_alive_closure,
5249 &cmsKeepAliveClosure,
5250 &cmsDrainMarkingStackClosure,
5251 NULL,
5252 &pt);
5253 }
5254 _gc_tracer_cm->report_gc_reference_stats(stats);
5255 pt.print_all_references();
5256 }
5257
5258 // This is the point where the entire marking should have completed.
5259 verify_work_stacks_empty();
5260
5261 {
5262 GCTraceTime(Debug, gc, phases) t("Weak Processing", _gc_timer_cm);
5263 WeakProcessor::weak_oops_do(&_is_alive_closure, &do_nothing_cl);
5264 }
5265
5266 if (should_unload_classes()) {
5267 {
5268 GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer_cm);
5269
5270 // Unload classes and purge the SystemDictionary.
5271 bool purged_class = SystemDictionary::do_unloading(_gc_timer_cm);
5272
5273 // Unload nmethods.
5274 CodeCache::do_unloading(&_is_alive_closure, purged_class);
5275
5276 // Prune dead klasses from subklass/sibling/implementor lists.
5277 Klass::clean_weak_klass_links(purged_class);
5278
5279 // Clean JVMCI metadata handles.
5280 JVMCI_ONLY(JVMCI::do_unloading(purged_class));
5281 }
5282 }
5283
5284 // Restore any preserved marks as a result of mark stack or
5285 // work queue overflow
5286 restore_preserved_marks_if_any(); // done single-threaded for now
5287
5288 rp->set_enqueuing_is_done(true);
5289 rp->verify_no_references_recorded();
5290 }
5291
5292 #ifndef PRODUCT
check_correct_thread_executing()5293 void CMSCollector::check_correct_thread_executing() {
5294 Thread* t = Thread::current();
5295 // Only the VM thread or the CMS thread should be here.
5296 assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5297 "Unexpected thread type");
5298 // If this is the vm thread, the foreground process
5299 // should not be waiting. Note that _foregroundGCIsActive is
5300 // true while the foreground collector is waiting.
5301 if (_foregroundGCShouldWait) {
5302 // We cannot be the VM thread
5303 assert(t->is_ConcurrentGC_thread(),
5304 "Should be CMS thread");
5305 } else {
5306 // We can be the CMS thread only if we are in a stop-world
5307 // phase of CMS collection.
5308 if (t->is_ConcurrentGC_thread()) {
5309 assert(_collectorState == InitialMarking ||
5310 _collectorState == FinalMarking,
5311 "Should be a stop-world phase");
5312 // The CMS thread should be holding the CMS_token.
5313 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5314 "Potential interference with concurrently "
5315 "executing VM thread");
5316 }
5317 }
5318 }
5319 #endif
5320
sweep()5321 void CMSCollector::sweep() {
5322 assert(_collectorState == Sweeping, "just checking");
5323 check_correct_thread_executing();
5324 verify_work_stacks_empty();
5325 verify_overflow_empty();
5326 increment_sweep_count();
5327 TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
5328
5329 _inter_sweep_timer.stop();
5330 _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5331
5332 assert(!_intra_sweep_timer.is_active(), "Should not be active");
5333 _intra_sweep_timer.reset();
5334 _intra_sweep_timer.start();
5335 {
5336 GCTraceCPUTime tcpu;
5337 CMSPhaseAccounting pa(this, "Concurrent Sweep");
5338 // First sweep the old gen
5339 {
5340 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5341 bitMapLock());
5342 sweepWork(_cmsGen);
5343 }
5344
5345 // Update Universe::_heap_*_at_gc figures.
5346 // We need all the free list locks to make the abstract state
5347 // transition from Sweeping to Resetting. See detailed note
5348 // further below.
5349 {
5350 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5351
5352 // Update heap occupancy information which is used as
5353 // input to soft ref clearing policy at the next gc.
5354 Universe::update_heap_info_at_gc();
5355
5356 // recalculate CMS used space after CMS collection
5357 _cmsGen->cmsSpace()->recalculate_used_stable();
5358
5359 _collectorState = Resizing;
5360 }
5361 }
5362 verify_work_stacks_empty();
5363 verify_overflow_empty();
5364
5365 if (should_unload_classes()) {
5366 // Delay purge to the beginning of the next safepoint. Metaspace::contains
5367 // requires that the virtual spaces are stable and not deleted.
5368 ClassLoaderDataGraph::set_should_purge(true);
5369 }
5370
5371 _intra_sweep_timer.stop();
5372 _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5373
5374 _inter_sweep_timer.reset();
5375 _inter_sweep_timer.start();
5376
5377 // We need to use a monotonically non-decreasing time in ms
5378 // or we will see time-warp warnings and os::javaTimeMillis()
5379 // does not guarantee monotonicity.
5380 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
5381 update_time_of_last_gc(now);
5382
5383 // NOTE on abstract state transitions:
5384 // Mutators allocate-live and/or mark the mod-union table dirty
5385 // based on the state of the collection. The former is done in
5386 // the interval [Marking, Sweeping] and the latter in the interval
5387 // [Marking, Sweeping). Thus the transitions into the Marking state
5388 // and out of the Sweeping state must be synchronously visible
5389 // globally to the mutators.
5390 // The transition into the Marking state happens with the world
5391 // stopped so the mutators will globally see it. Sweeping is
5392 // done asynchronously by the background collector so the transition
5393 // from the Sweeping state to the Resizing state must be done
5394 // under the freelistLock (as is the check for whether to
5395 // allocate-live and whether to dirty the mod-union table).
5396 assert(_collectorState == Resizing, "Change of collector state to"
5397 " Resizing must be done under the freelistLocks (plural)");
5398
5399 // Now that sweeping has been completed, we clear
5400 // the incremental_collection_failed flag,
5401 // thus inviting a younger gen collection to promote into
5402 // this generation. If such a promotion may still fail,
5403 // the flag will be set again when a young collection is
5404 // attempted.
5405 CMSHeap* heap = CMSHeap::heap();
5406 heap->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up
5407 heap->update_full_collections_completed(_collection_count_start);
5408 }
5409
5410 // FIX ME!!! Looks like this belongs in CFLSpace, with
5411 // CMSGen merely delegating to it.
setNearLargestChunk()5412 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5413 double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5414 HeapWord* minAddr = _cmsSpace->bottom();
5415 HeapWord* largestAddr =
5416 (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5417 if (largestAddr == NULL) {
5418 // The dictionary appears to be empty. In this case
5419 // try to coalesce at the end of the heap.
5420 largestAddr = _cmsSpace->end();
5421 }
5422 size_t largestOffset = pointer_delta(largestAddr, minAddr);
5423 size_t nearLargestOffset =
5424 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5425 log_debug(gc, freelist)("CMS: Large Block: " PTR_FORMAT "; Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5426 p2i(largestAddr), p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
5427 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5428 }
5429
isNearLargestChunk(HeapWord * addr)5430 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5431 return addr >= _cmsSpace->nearLargestChunk();
5432 }
5433
find_chunk_at_end()5434 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5435 return _cmsSpace->find_chunk_at_end();
5436 }
5437
update_gc_stats(Generation * current_generation,bool full)5438 void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
5439 bool full) {
5440 // If the young generation has been collected, gather any statistics
5441 // that are of interest at this point.
5442 bool current_is_young = CMSHeap::heap()->is_young_gen(current_generation);
5443 if (!full && current_is_young) {
5444 // Gather statistics on the young generation collection.
5445 collector()->stats().record_gc0_end(used());
5446 }
5447 _cmsSpace->recalculate_used_stable();
5448 }
5449
sweepWork(ConcurrentMarkSweepGeneration * old_gen)5450 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
5451 // We iterate over the space(s) underlying this generation,
5452 // checking the mark bit map to see if the bits corresponding
5453 // to specific blocks are marked or not. Blocks that are
5454 // marked are live and are not swept up. All remaining blocks
5455 // are swept up, with coalescing on-the-fly as we sweep up
5456 // contiguous free and/or garbage blocks:
5457 // We need to ensure that the sweeper synchronizes with allocators
5458 // and stop-the-world collectors. In particular, the following
5459 // locks are used:
5460 // . CMS token: if this is held, a stop the world collection cannot occur
5461 // . freelistLock: if this is held no allocation can occur from this
5462 // generation by another thread
5463 // . bitMapLock: if this is held, no other thread can access or update
5464 //
5465
5466 // Note that we need to hold the freelistLock if we use
5467 // block iterate below; else the iterator might go awry if
5468 // a mutator (or promotion) causes block contents to change
5469 // (for instance if the allocator divvies up a block).
5470 // If we hold the free list lock, for all practical purposes
5471 // young generation GC's can't occur (they'll usually need to
5472 // promote), so we might as well prevent all young generation
5473 // GC's while we do a sweeping step. For the same reason, we might
5474 // as well take the bit map lock for the entire duration
5475
5476 // check that we hold the requisite locks
5477 assert(have_cms_token(), "Should hold cms token");
5478 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
5479 assert_lock_strong(old_gen->freelistLock());
5480 assert_lock_strong(bitMapLock());
5481
5482 assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
5483 assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context");
5484 old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
5485 _inter_sweep_estimate.padded_average(),
5486 _intra_sweep_estimate.padded_average());
5487 old_gen->setNearLargestChunk();
5488
5489 {
5490 SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield);
5491 old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5492 // We need to free-up/coalesce garbage/blocks from a
5493 // co-terminal free run. This is done in the SweepClosure
5494 // destructor; so, do not remove this scope, else the
5495 // end-of-sweep-census below will be off by a little bit.
5496 }
5497 old_gen->cmsSpace()->sweep_completed();
5498 old_gen->cmsSpace()->endSweepFLCensus(sweep_count());
5499 if (should_unload_classes()) { // unloaded classes this cycle,
5500 _concurrent_cycles_since_last_unload = 0; // ... reset count
5501 } else { // did not unload classes,
5502 _concurrent_cycles_since_last_unload++; // ... increment count
5503 }
5504 }
5505
5506 // Reset CMS data structures (for now just the marking bit map)
5507 // preparatory for the next cycle.
reset_concurrent()5508 void CMSCollector::reset_concurrent() {
5509 CMSTokenSyncWithLocks ts(true, bitMapLock());
5510
5511 // If the state is not "Resetting", the foreground thread
5512 // has done a collection and the resetting.
5513 if (_collectorState != Resetting) {
5514 assert(_collectorState == Idling, "The state should only change"
5515 " because the foreground collector has finished the collection");
5516 return;
5517 }
5518
5519 {
5520 // Clear the mark bitmap (no grey objects to start with)
5521 // for the next cycle.
5522 GCTraceCPUTime tcpu;
5523 CMSPhaseAccounting cmspa(this, "Concurrent Reset");
5524
5525 HeapWord* curAddr = _markBitMap.startWord();
5526 while (curAddr < _markBitMap.endWord()) {
5527 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
5528 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5529 _markBitMap.clear_large_range(chunk);
5530 if (ConcurrentMarkSweepThread::should_yield() &&
5531 !foregroundGCIsActive() &&
5532 CMSYield) {
5533 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5534 "CMS thread should hold CMS token");
5535 assert_lock_strong(bitMapLock());
5536 bitMapLock()->unlock();
5537 ConcurrentMarkSweepThread::desynchronize(true);
5538 stopTimer();
5539 incrementYields();
5540
5541 // See the comment in coordinator_yield()
5542 for (unsigned i = 0; i < CMSYieldSleepCount &&
5543 ConcurrentMarkSweepThread::should_yield() &&
5544 !CMSCollector::foregroundGCIsActive(); ++i) {
5545 os::sleep(Thread::current(), 1, false);
5546 }
5547
5548 ConcurrentMarkSweepThread::synchronize(true);
5549 bitMapLock()->lock_without_safepoint_check();
5550 startTimer();
5551 }
5552 curAddr = chunk.end();
5553 }
5554 // A successful mostly concurrent collection has been done.
5555 // Because only the full (i.e., concurrent mode failure) collections
5556 // are being measured for gc overhead limits, clean the "near" flag
5557 // and count.
5558 size_policy()->reset_gc_overhead_limit_count();
5559 _collectorState = Idling;
5560 }
5561
5562 register_gc_end();
5563 }
5564
5565 // Same as above but for STW paths
reset_stw()5566 void CMSCollector::reset_stw() {
5567 // already have the lock
5568 assert(_collectorState == Resetting, "just checking");
5569 assert_lock_strong(bitMapLock());
5570 GCIdMark gc_id_mark(_cmsThread->gc_id());
5571 _markBitMap.clear_all();
5572 _collectorState = Idling;
5573 register_gc_end();
5574 }
5575
do_CMS_operation(CMS_op_type op,GCCause::Cause gc_cause)5576 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5577 GCTraceCPUTime tcpu;
5578 TraceCollectorStats tcs_cgc(cgc_counters());
5579
5580 switch (op) {
5581 case CMS_op_checkpointRootsInitial: {
5582 GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
5583 SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
5584 checkpointRootsInitial();
5585 break;
5586 }
5587 case CMS_op_checkpointRootsFinal: {
5588 GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
5589 SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
5590 checkpointRootsFinal();
5591 break;
5592 }
5593 default:
5594 fatal("No such CMS_op");
5595 }
5596 }
5597
5598 #ifndef PRODUCT
skip_header_HeapWords()5599 size_t const CMSCollector::skip_header_HeapWords() {
5600 return FreeChunk::header_size();
5601 }
5602
5603 // Try and collect here conditions that should hold when
5604 // CMS thread is exiting. The idea is that the foreground GC
5605 // thread should not be blocked if it wants to terminate
5606 // the CMS thread and yet continue to run the VM for a while
5607 // after that.
verify_ok_to_terminate() const5608 void CMSCollector::verify_ok_to_terminate() const {
5609 assert(Thread::current()->is_ConcurrentGC_thread(),
5610 "should be called by CMS thread");
5611 assert(!_foregroundGCShouldWait, "should be false");
5612 // We could check here that all the various low-level locks
5613 // are not held by the CMS thread, but that is overkill; see
5614 // also CMSThread::verify_ok_to_terminate() where the CGC_lock
5615 // is checked.
5616 }
5617 #endif
5618
block_size_using_printezis_bits(HeapWord * addr) const5619 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
5620 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
5621 "missing Printezis mark?");
5622 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5623 size_t size = pointer_delta(nextOneAddr + 1, addr);
5624 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5625 "alignment problem");
5626 assert(size >= 3, "Necessary for Printezis marks to work");
5627 return size;
5628 }
5629
5630 // A variant of the above (block_size_using_printezis_bits()) except
5631 // that we return 0 if the P-bits are not yet set.
block_size_if_printezis_bits(HeapWord * addr) const5632 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
5633 if (_markBitMap.isMarked(addr + 1)) {
5634 assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
5635 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5636 size_t size = pointer_delta(nextOneAddr + 1, addr);
5637 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5638 "alignment problem");
5639 assert(size >= 3, "Necessary for Printezis marks to work");
5640 return size;
5641 }
5642 return 0;
5643 }
5644
next_card_start_after_block(HeapWord * addr) const5645 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
5646 size_t sz = 0;
5647 oop p = (oop)addr;
5648 if (p->klass_or_null_acquire() != NULL) {
5649 sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
5650 } else {
5651 sz = block_size_using_printezis_bits(addr);
5652 }
5653 assert(sz > 0, "size must be nonzero");
5654 HeapWord* next_block = addr + sz;
5655 HeapWord* next_card = align_up(next_block, CardTable::card_size);
5656 assert(align_down((uintptr_t)addr, CardTable::card_size) <
5657 align_down((uintptr_t)next_card, CardTable::card_size),
5658 "must be different cards");
5659 return next_card;
5660 }
5661
5662
5663 // CMS Bit Map Wrapper /////////////////////////////////////////
5664
5665 // Construct a CMS bit map infrastructure, but don't create the
5666 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
5667 // further below.
CMSBitMap(int shifter,int mutex_rank,const char * mutex_name)5668 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
5669 _shifter(shifter),
5670 _bm(),
5671 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
5672 Monitor::_safepoint_check_never) : NULL)
5673 {
5674 _bmStartWord = 0;
5675 _bmWordSize = 0;
5676 }
5677
allocate(MemRegion mr)5678 bool CMSBitMap::allocate(MemRegion mr) {
5679 _bmStartWord = mr.start();
5680 _bmWordSize = mr.word_size();
5681 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
5682 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
5683 if (!brs.is_reserved()) {
5684 log_warning(gc)("CMS bit map allocation failure");
5685 return false;
5686 }
5687 // For now we'll just commit all of the bit map up front.
5688 // Later on we'll try to be more parsimonious with swap.
5689 if (!_virtual_space.initialize(brs, brs.size())) {
5690 log_warning(gc)("CMS bit map backing store failure");
5691 return false;
5692 }
5693 assert(_virtual_space.committed_size() == brs.size(),
5694 "didn't reserve backing store for all of CMS bit map?");
5695 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
5696 _bmWordSize, "inconsistency in bit map sizing");
5697 _bm = BitMapView((BitMap::bm_word_t*)_virtual_space.low(), _bmWordSize >> _shifter);
5698
5699 // bm.clear(); // can we rely on getting zero'd memory? verify below
5700 assert(isAllClear(),
5701 "Expected zero'd memory from ReservedSpace constructor");
5702 assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
5703 "consistency check");
5704 return true;
5705 }
5706
dirty_range_iterate_clear(MemRegion mr,MemRegionClosure * cl)5707 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
5708 HeapWord *next_addr, *end_addr, *last_addr;
5709 assert_locked();
5710 assert(covers(mr), "out-of-range error");
5711 // XXX assert that start and end are appropriately aligned
5712 for (next_addr = mr.start(), end_addr = mr.end();
5713 next_addr < end_addr; next_addr = last_addr) {
5714 MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
5715 last_addr = dirty_region.end();
5716 if (!dirty_region.is_empty()) {
5717 cl->do_MemRegion(dirty_region);
5718 } else {
5719 assert(last_addr == end_addr, "program logic");
5720 return;
5721 }
5722 }
5723 }
5724
print_on_error(outputStream * st,const char * prefix) const5725 void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
5726 _bm.print_on_error(st, prefix);
5727 }
5728
5729 #ifndef PRODUCT
assert_locked() const5730 void CMSBitMap::assert_locked() const {
5731 CMSLockVerifier::assert_locked(lock());
5732 }
5733
covers(MemRegion mr) const5734 bool CMSBitMap::covers(MemRegion mr) const {
5735 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
5736 assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
5737 "size inconsistency");
5738 return (mr.start() >= _bmStartWord) &&
5739 (mr.end() <= endWord());
5740 }
5741
covers(HeapWord * start,size_t size) const5742 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
5743 return (start >= _bmStartWord && (start + size) <= endWord());
5744 }
5745
verifyNoOneBitsInRange(HeapWord * left,HeapWord * right)5746 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
5747 // verify that there are no 1 bits in the interval [left, right)
5748 FalseBitMapClosure falseBitMapClosure;
5749 iterate(&falseBitMapClosure, left, right);
5750 }
5751
region_invariant(MemRegion mr)5752 void CMSBitMap::region_invariant(MemRegion mr)
5753 {
5754 assert_locked();
5755 // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
5756 assert(!mr.is_empty(), "unexpected empty region");
5757 assert(covers(mr), "mr should be covered by bit map");
5758 // convert address range into offset range
5759 size_t start_ofs = heapWordToOffset(mr.start());
5760 // Make sure that end() is appropriately aligned
5761 assert(mr.end() == align_up(mr.end(), (1 << (_shifter+LogHeapWordSize))),
5762 "Misaligned mr.end()");
5763 size_t end_ofs = heapWordToOffset(mr.end());
5764 assert(end_ofs > start_ofs, "Should mark at least one bit");
5765 }
5766
5767 #endif
5768
allocate(size_t size)5769 bool CMSMarkStack::allocate(size_t size) {
5770 // allocate a stack of the requisite depth
5771 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5772 size * sizeof(oop)));
5773 if (!rs.is_reserved()) {
5774 log_warning(gc)("CMSMarkStack allocation failure");
5775 return false;
5776 }
5777 if (!_virtual_space.initialize(rs, rs.size())) {
5778 log_warning(gc)("CMSMarkStack backing store failure");
5779 return false;
5780 }
5781 assert(_virtual_space.committed_size() == rs.size(),
5782 "didn't reserve backing store for all of CMS stack?");
5783 _base = (oop*)(_virtual_space.low());
5784 _index = 0;
5785 _capacity = size;
5786 NOT_PRODUCT(_max_depth = 0);
5787 return true;
5788 }
5789
5790 // XXX FIX ME !!! In the MT case we come in here holding a
5791 // leaf lock. For printing we need to take a further lock
5792 // which has lower rank. We need to recalibrate the two
5793 // lock-ranks involved in order to be able to print the
5794 // messages below. (Or defer the printing to the caller.
5795 // For now we take the expedient path of just disabling the
5796 // messages for the problematic case.)
expand()5797 void CMSMarkStack::expand() {
5798 assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
5799 if (_capacity == MarkStackSizeMax) {
5800 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled) {
5801 // We print a warning message only once per CMS cycle.
5802 log_debug(gc)(" (benign) Hit CMSMarkStack max size limit");
5803 }
5804 return;
5805 }
5806 // Double capacity if possible
5807 size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
5808 // Do not give up existing stack until we have managed to
5809 // get the double capacity that we desired.
5810 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5811 new_capacity * sizeof(oop)));
5812 if (rs.is_reserved()) {
5813 // Release the backing store associated with old stack
5814 _virtual_space.release();
5815 // Reinitialize virtual space for new stack
5816 if (!_virtual_space.initialize(rs, rs.size())) {
5817 fatal("Not enough swap for expanded marking stack");
5818 }
5819 _base = (oop*)(_virtual_space.low());
5820 _index = 0;
5821 _capacity = new_capacity;
5822 } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled) {
5823 // Failed to double capacity, continue;
5824 // we print a detail message only once per CMS cycle.
5825 log_debug(gc)(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
5826 _capacity / K, new_capacity / K);
5827 }
5828 }
5829
5830
5831 // Closures
5832 // XXX: there seems to be a lot of code duplication here;
5833 // should refactor and consolidate common code.
5834
5835 // This closure is used to mark refs into the CMS generation in
5836 // the CMS bit map. Called at the first checkpoint. This closure
5837 // assumes that we do not need to re-mark dirty cards; if the CMS
5838 // generation on which this is used is not an oldest
5839 // generation then this will lose younger_gen cards!
5840
MarkRefsIntoClosure(MemRegion span,CMSBitMap * bitMap)5841 MarkRefsIntoClosure::MarkRefsIntoClosure(
5842 MemRegion span, CMSBitMap* bitMap):
5843 _span(span),
5844 _bitMap(bitMap)
5845 {
5846 assert(ref_discoverer() == NULL, "deliberately left NULL");
5847 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5848 }
5849
do_oop(oop obj)5850 void MarkRefsIntoClosure::do_oop(oop obj) {
5851 // if p points into _span, then mark corresponding bit in _markBitMap
5852 assert(oopDesc::is_oop(obj), "expected an oop");
5853 HeapWord* addr = (HeapWord*)obj;
5854 if (_span.contains(addr)) {
5855 // this should be made more efficient
5856 _bitMap->mark(addr);
5857 }
5858 }
5859
ParMarkRefsIntoClosure(MemRegion span,CMSBitMap * bitMap)5860 ParMarkRefsIntoClosure::ParMarkRefsIntoClosure(
5861 MemRegion span, CMSBitMap* bitMap):
5862 _span(span),
5863 _bitMap(bitMap)
5864 {
5865 assert(ref_discoverer() == NULL, "deliberately left NULL");
5866 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5867 }
5868
do_oop(oop obj)5869 void ParMarkRefsIntoClosure::do_oop(oop obj) {
5870 // if p points into _span, then mark corresponding bit in _markBitMap
5871 assert(oopDesc::is_oop(obj), "expected an oop");
5872 HeapWord* addr = (HeapWord*)obj;
5873 if (_span.contains(addr)) {
5874 // this should be made more efficient
5875 _bitMap->par_mark(addr);
5876 }
5877 }
5878
5879 // A variant of the above, used for CMS marking verification.
MarkRefsIntoVerifyClosure(MemRegion span,CMSBitMap * verification_bm,CMSBitMap * cms_bm)5880 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
5881 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
5882 _span(span),
5883 _verification_bm(verification_bm),
5884 _cms_bm(cms_bm)
5885 {
5886 assert(ref_discoverer() == NULL, "deliberately left NULL");
5887 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
5888 }
5889
do_oop(oop obj)5890 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
5891 // if p points into _span, then mark corresponding bit in _markBitMap
5892 assert(oopDesc::is_oop(obj), "expected an oop");
5893 HeapWord* addr = (HeapWord*)obj;
5894 if (_span.contains(addr)) {
5895 _verification_bm->mark(addr);
5896 if (!_cms_bm->isMarked(addr)) {
5897 Log(gc, verify) log;
5898 ResourceMark rm;
5899 LogStream ls(log.error());
5900 oop(addr)->print_on(&ls);
5901 log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
5902 fatal("... aborting");
5903 }
5904 }
5905 }
5906
5907 //////////////////////////////////////////////////
5908 // MarkRefsIntoAndScanClosure
5909 //////////////////////////////////////////////////
5910
MarkRefsIntoAndScanClosure(MemRegion span,ReferenceDiscoverer * rd,CMSBitMap * bit_map,CMSBitMap * mod_union_table,CMSMarkStack * mark_stack,CMSCollector * collector,bool should_yield,bool concurrent_precleaning)5911 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
5912 ReferenceDiscoverer* rd,
5913 CMSBitMap* bit_map,
5914 CMSBitMap* mod_union_table,
5915 CMSMarkStack* mark_stack,
5916 CMSCollector* collector,
5917 bool should_yield,
5918 bool concurrent_precleaning):
5919 _span(span),
5920 _bit_map(bit_map),
5921 _mark_stack(mark_stack),
5922 _pushAndMarkClosure(collector, span, rd, bit_map, mod_union_table,
5923 mark_stack, concurrent_precleaning),
5924 _collector(collector),
5925 _freelistLock(NULL),
5926 _yield(should_yield),
5927 _concurrent_precleaning(concurrent_precleaning)
5928 {
5929 // FIXME: Should initialize in base class constructor.
5930 assert(rd != NULL, "ref_discoverer shouldn't be NULL");
5931 set_ref_discoverer_internal(rd);
5932 }
5933
5934 // This closure is used to mark refs into the CMS generation at the
5935 // second (final) checkpoint, and to scan and transitively follow
5936 // the unmarked oops. It is also used during the concurrent precleaning
5937 // phase while scanning objects on dirty cards in the CMS generation.
5938 // The marks are made in the marking bit map and the marking stack is
5939 // used for keeping the (newly) grey objects during the scan.
5940 // The parallel version (Par_...) appears further below.
do_oop(oop obj)5941 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
5942 if (obj != NULL) {
5943 assert(oopDesc::is_oop(obj), "expected an oop");
5944 HeapWord* addr = (HeapWord*)obj;
5945 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
5946 assert(_collector->overflow_list_is_empty(),
5947 "overflow list should be empty");
5948 if (_span.contains(addr) &&
5949 !_bit_map->isMarked(addr)) {
5950 // mark bit map (object is now grey)
5951 _bit_map->mark(addr);
5952 // push on marking stack (stack should be empty), and drain the
5953 // stack by applying this closure to the oops in the oops popped
5954 // from the stack (i.e. blacken the grey objects)
5955 bool res = _mark_stack->push(obj);
5956 assert(res, "Should have space to push on empty stack");
5957 do {
5958 oop new_oop = _mark_stack->pop();
5959 assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop");
5960 assert(_bit_map->isMarked((HeapWord*)new_oop),
5961 "only grey objects on this stack");
5962 // iterate over the oops in this oop, marking and pushing
5963 // the ones in CMS heap (i.e. in _span).
5964 new_oop->oop_iterate(&_pushAndMarkClosure);
5965 // check if it's time to yield
5966 do_yield_check();
5967 } while (!_mark_stack->isEmpty() ||
5968 (!_concurrent_precleaning && take_from_overflow_list()));
5969 // if marking stack is empty, and we are not doing this
5970 // during precleaning, then check the overflow list
5971 }
5972 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
5973 assert(_collector->overflow_list_is_empty(),
5974 "overflow list was drained above");
5975
5976 assert(_collector->no_preserved_marks(),
5977 "All preserved marks should have been restored above");
5978 }
5979 }
5980
do_yield_work()5981 void MarkRefsIntoAndScanClosure::do_yield_work() {
5982 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5983 "CMS thread should hold CMS token");
5984 assert_lock_strong(_freelistLock);
5985 assert_lock_strong(_bit_map->lock());
5986 // relinquish the free_list_lock and bitMaplock()
5987 _bit_map->lock()->unlock();
5988 _freelistLock->unlock();
5989 ConcurrentMarkSweepThread::desynchronize(true);
5990 _collector->stopTimer();
5991 _collector->incrementYields();
5992
5993 // See the comment in coordinator_yield()
5994 for (unsigned i = 0;
5995 i < CMSYieldSleepCount &&
5996 ConcurrentMarkSweepThread::should_yield() &&
5997 !CMSCollector::foregroundGCIsActive();
5998 ++i) {
5999 os::sleep(Thread::current(), 1, false);
6000 }
6001
6002 ConcurrentMarkSweepThread::synchronize(true);
6003 _freelistLock->lock_without_safepoint_check();
6004 _bit_map->lock()->lock_without_safepoint_check();
6005 _collector->startTimer();
6006 }
6007
6008 ///////////////////////////////////////////////////////////
6009 // ParMarkRefsIntoAndScanClosure: a parallel version of
6010 // MarkRefsIntoAndScanClosure
6011 ///////////////////////////////////////////////////////////
ParMarkRefsIntoAndScanClosure(CMSCollector * collector,MemRegion span,ReferenceDiscoverer * rd,CMSBitMap * bit_map,OopTaskQueue * work_queue)6012 ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure(
6013 CMSCollector* collector, MemRegion span, ReferenceDiscoverer* rd,
6014 CMSBitMap* bit_map, OopTaskQueue* work_queue):
6015 _span(span),
6016 _bit_map(bit_map),
6017 _work_queue(work_queue),
6018 _low_water_mark(MIN2((work_queue->max_elems()/4),
6019 ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6020 _parPushAndMarkClosure(collector, span, rd, bit_map, work_queue)
6021 {
6022 // FIXME: Should initialize in base class constructor.
6023 assert(rd != NULL, "ref_discoverer shouldn't be NULL");
6024 set_ref_discoverer_internal(rd);
6025 }
6026
6027 // This closure is used to mark refs into the CMS generation at the
6028 // second (final) checkpoint, and to scan and transitively follow
6029 // the unmarked oops. The marks are made in the marking bit map and
6030 // the work_queue is used for keeping the (newly) grey objects during
6031 // the scan phase whence they are also available for stealing by parallel
6032 // threads. Since the marking bit map is shared, updates are
6033 // synchronized (via CAS).
do_oop(oop obj)6034 void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) {
6035 if (obj != NULL) {
6036 // Ignore mark word because this could be an already marked oop
6037 // that may be chained at the end of the overflow list.
6038 assert(oopDesc::is_oop(obj, true), "expected an oop");
6039 HeapWord* addr = (HeapWord*)obj;
6040 if (_span.contains(addr) &&
6041 !_bit_map->isMarked(addr)) {
6042 // mark bit map (object will become grey):
6043 // It is possible for several threads to be
6044 // trying to "claim" this object concurrently;
6045 // the unique thread that succeeds in marking the
6046 // object first will do the subsequent push on
6047 // to the work queue (or overflow list).
6048 if (_bit_map->par_mark(addr)) {
6049 // push on work_queue (which may not be empty), and trim the
6050 // queue to an appropriate length by applying this closure to
6051 // the oops in the oops popped from the stack (i.e. blacken the
6052 // grey objects)
6053 bool res = _work_queue->push(obj);
6054 assert(res, "Low water mark should be less than capacity?");
6055 trim_queue(_low_water_mark);
6056 } // Else, another thread claimed the object
6057 }
6058 }
6059 }
6060
6061 // This closure is used to rescan the marked objects on the dirty cards
6062 // in the mod union table and the card table proper.
do_object_careful_m(oop p,MemRegion mr)6063 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6064 oop p, MemRegion mr) {
6065
6066 size_t size = 0;
6067 HeapWord* addr = (HeapWord*)p;
6068 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6069 assert(_span.contains(addr), "we are scanning the CMS generation");
6070 // check if it's time to yield
6071 if (do_yield_check()) {
6072 // We yielded for some foreground stop-world work,
6073 // and we have been asked to abort this ongoing preclean cycle.
6074 return 0;
6075 }
6076 if (_bitMap->isMarked(addr)) {
6077 // it's marked; is it potentially uninitialized?
6078 if (p->klass_or_null_acquire() != NULL) {
6079 // an initialized object; ignore mark word in verification below
6080 // since we are running concurrent with mutators
6081 assert(oopDesc::is_oop(p, true), "should be an oop");
6082 if (p->is_objArray()) {
6083 // objArrays are precisely marked; restrict scanning
6084 // to dirty cards only.
6085 size = CompactibleFreeListSpace::adjustObjectSize(
6086 p->oop_iterate_size(_scanningClosure, mr));
6087 } else {
6088 // A non-array may have been imprecisely marked; we need
6089 // to scan object in its entirety.
6090 size = CompactibleFreeListSpace::adjustObjectSize(
6091 p->oop_iterate_size(_scanningClosure));
6092 }
6093 #ifdef ASSERT
6094 size_t direct_size =
6095 CompactibleFreeListSpace::adjustObjectSize(p->size());
6096 assert(size == direct_size, "Inconsistency in size");
6097 assert(size >= 3, "Necessary for Printezis marks to work");
6098 HeapWord* start_pbit = addr + 1;
6099 HeapWord* end_pbit = addr + size - 1;
6100 assert(_bitMap->isMarked(start_pbit) == _bitMap->isMarked(end_pbit),
6101 "inconsistent Printezis mark");
6102 // Verify inner mark bits (between Printezis bits) are clear,
6103 // but don't repeat if there are multiple dirty regions for
6104 // the same object, to avoid potential O(N^2) performance.
6105 if (addr != _last_scanned_object) {
6106 _bitMap->verifyNoOneBitsInRange(start_pbit + 1, end_pbit);
6107 _last_scanned_object = addr;
6108 }
6109 #endif // ASSERT
6110 } else {
6111 // An uninitialized object.
6112 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6113 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6114 size = pointer_delta(nextOneAddr + 1, addr);
6115 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6116 "alignment problem");
6117 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6118 // will dirty the card when the klass pointer is installed in the
6119 // object (signaling the completion of initialization).
6120 }
6121 } else {
6122 // Either a not yet marked object or an uninitialized object
6123 if (p->klass_or_null_acquire() == NULL) {
6124 // An uninitialized object, skip to the next card, since
6125 // we may not be able to read its P-bits yet.
6126 assert(size == 0, "Initial value");
6127 } else {
6128 // An object not (yet) reached by marking: we merely need to
6129 // compute its size so as to go look at the next block.
6130 assert(oopDesc::is_oop(p, true), "should be an oop");
6131 size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6132 }
6133 }
6134 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6135 return size;
6136 }
6137
do_yield_work()6138 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6139 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6140 "CMS thread should hold CMS token");
6141 assert_lock_strong(_freelistLock);
6142 assert_lock_strong(_bitMap->lock());
6143 // relinquish the free_list_lock and bitMaplock()
6144 _bitMap->lock()->unlock();
6145 _freelistLock->unlock();
6146 ConcurrentMarkSweepThread::desynchronize(true);
6147 _collector->stopTimer();
6148 _collector->incrementYields();
6149
6150 // See the comment in coordinator_yield()
6151 for (unsigned i = 0; i < CMSYieldSleepCount &&
6152 ConcurrentMarkSweepThread::should_yield() &&
6153 !CMSCollector::foregroundGCIsActive(); ++i) {
6154 os::sleep(Thread::current(), 1, false);
6155 }
6156
6157 ConcurrentMarkSweepThread::synchronize(true);
6158 _freelistLock->lock_without_safepoint_check();
6159 _bitMap->lock()->lock_without_safepoint_check();
6160 _collector->startTimer();
6161 }
6162
6163
6164 //////////////////////////////////////////////////////////////////
6165 // SurvivorSpacePrecleanClosure
6166 //////////////////////////////////////////////////////////////////
6167 // This (single-threaded) closure is used to preclean the oops in
6168 // the survivor spaces.
do_object_careful(oop p)6169 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6170
6171 HeapWord* addr = (HeapWord*)p;
6172 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6173 assert(!_span.contains(addr), "we are scanning the survivor spaces");
6174 assert(p->klass_or_null() != NULL, "object should be initialized");
6175 // an initialized object; ignore mark word in verification below
6176 // since we are running concurrent with mutators
6177 assert(oopDesc::is_oop(p, true), "should be an oop");
6178 // Note that we do not yield while we iterate over
6179 // the interior oops of p, pushing the relevant ones
6180 // on our marking stack.
6181 size_t size = p->oop_iterate_size(_scanning_closure);
6182 do_yield_check();
6183 // Observe that below, we do not abandon the preclean
6184 // phase as soon as we should; rather we empty the
6185 // marking stack before returning. This is to satisfy
6186 // some existing assertions. In general, it may be a
6187 // good idea to abort immediately and complete the marking
6188 // from the grey objects at a later time.
6189 while (!_mark_stack->isEmpty()) {
6190 oop new_oop = _mark_stack->pop();
6191 assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop");
6192 assert(_bit_map->isMarked((HeapWord*)new_oop),
6193 "only grey objects on this stack");
6194 // iterate over the oops in this oop, marking and pushing
6195 // the ones in CMS heap (i.e. in _span).
6196 new_oop->oop_iterate(_scanning_closure);
6197 // check if it's time to yield
6198 do_yield_check();
6199 }
6200 unsigned int after_count =
6201 CMSHeap::heap()->total_collections();
6202 bool abort = (_before_count != after_count) ||
6203 _collector->should_abort_preclean();
6204 return abort ? 0 : size;
6205 }
6206
do_yield_work()6207 void SurvivorSpacePrecleanClosure::do_yield_work() {
6208 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6209 "CMS thread should hold CMS token");
6210 assert_lock_strong(_bit_map->lock());
6211 // Relinquish the bit map lock
6212 _bit_map->lock()->unlock();
6213 ConcurrentMarkSweepThread::desynchronize(true);
6214 _collector->stopTimer();
6215 _collector->incrementYields();
6216
6217 // See the comment in coordinator_yield()
6218 for (unsigned i = 0; i < CMSYieldSleepCount &&
6219 ConcurrentMarkSweepThread::should_yield() &&
6220 !CMSCollector::foregroundGCIsActive(); ++i) {
6221 os::sleep(Thread::current(), 1, false);
6222 }
6223
6224 ConcurrentMarkSweepThread::synchronize(true);
6225 _bit_map->lock()->lock_without_safepoint_check();
6226 _collector->startTimer();
6227 }
6228
6229 // This closure is used to rescan the marked objects on the dirty cards
6230 // in the mod union table and the card table proper. In the parallel
6231 // case, although the bitMap is shared, we do a single read so the
6232 // isMarked() query is "safe".
do_object_bm(oop p,MemRegion mr)6233 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6234 // Ignore mark word because we are running concurrent with mutators
6235 assert(oopDesc::is_oop_or_null(p, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p));
6236 HeapWord* addr = (HeapWord*)p;
6237 assert(_span.contains(addr), "we are scanning the CMS generation");
6238 bool is_obj_array = false;
6239 #ifdef ASSERT
6240 if (!_parallel) {
6241 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6242 assert(_collector->overflow_list_is_empty(),
6243 "overflow list should be empty");
6244
6245 }
6246 #endif // ASSERT
6247 if (_bit_map->isMarked(addr)) {
6248 // Obj arrays are precisely marked, non-arrays are not;
6249 // so we scan objArrays precisely and non-arrays in their
6250 // entirety.
6251 if (p->is_objArray()) {
6252 is_obj_array = true;
6253 if (_parallel) {
6254 p->oop_iterate(_par_scan_closure, mr);
6255 } else {
6256 p->oop_iterate(_scan_closure, mr);
6257 }
6258 } else {
6259 if (_parallel) {
6260 p->oop_iterate(_par_scan_closure);
6261 } else {
6262 p->oop_iterate(_scan_closure);
6263 }
6264 }
6265 }
6266 #ifdef ASSERT
6267 if (!_parallel) {
6268 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6269 assert(_collector->overflow_list_is_empty(),
6270 "overflow list should be empty");
6271
6272 }
6273 #endif // ASSERT
6274 return is_obj_array;
6275 }
6276
MarkFromRootsClosure(CMSCollector * collector,MemRegion span,CMSBitMap * bitMap,CMSMarkStack * markStack,bool should_yield,bool verifying)6277 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6278 MemRegion span,
6279 CMSBitMap* bitMap, CMSMarkStack* markStack,
6280 bool should_yield, bool verifying):
6281 _collector(collector),
6282 _span(span),
6283 _bitMap(bitMap),
6284 _mut(&collector->_modUnionTable),
6285 _markStack(markStack),
6286 _yield(should_yield),
6287 _skipBits(0)
6288 {
6289 assert(_markStack->isEmpty(), "stack should be empty");
6290 _finger = _bitMap->startWord();
6291 _threshold = _finger;
6292 assert(_collector->_restart_addr == NULL, "Sanity check");
6293 assert(_span.contains(_finger), "Out of bounds _finger?");
6294 DEBUG_ONLY(_verifying = verifying;)
6295 }
6296
reset(HeapWord * addr)6297 void MarkFromRootsClosure::reset(HeapWord* addr) {
6298 assert(_markStack->isEmpty(), "would cause duplicates on stack");
6299 assert(_span.contains(addr), "Out of bounds _finger?");
6300 _finger = addr;
6301 _threshold = align_up(_finger, CardTable::card_size);
6302 }
6303
6304 // Should revisit to see if this should be restructured for
6305 // greater efficiency.
do_bit(size_t offset)6306 bool MarkFromRootsClosure::do_bit(size_t offset) {
6307 if (_skipBits > 0) {
6308 _skipBits--;
6309 return true;
6310 }
6311 // convert offset into a HeapWord*
6312 HeapWord* addr = _bitMap->startWord() + offset;
6313 assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6314 "address out of range");
6315 assert(_bitMap->isMarked(addr), "tautology");
6316 if (_bitMap->isMarked(addr+1)) {
6317 // this is an allocated but not yet initialized object
6318 assert(_skipBits == 0, "tautology");
6319 _skipBits = 2; // skip next two marked bits ("Printezis-marks")
6320 oop p = oop(addr);
6321 if (p->klass_or_null_acquire() == NULL) {
6322 DEBUG_ONLY(if (!_verifying) {)
6323 // We re-dirty the cards on which this object lies and increase
6324 // the _threshold so that we'll come back to scan this object
6325 // during the preclean or remark phase. (CMSCleanOnEnter)
6326 if (CMSCleanOnEnter) {
6327 size_t sz = _collector->block_size_using_printezis_bits(addr);
6328 HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
6329 MemRegion redirty_range = MemRegion(addr, end_card_addr);
6330 assert(!redirty_range.is_empty(), "Arithmetical tautology");
6331 // Bump _threshold to end_card_addr; note that
6332 // _threshold cannot possibly exceed end_card_addr, anyhow.
6333 // This prevents future clearing of the card as the scan proceeds
6334 // to the right.
6335 assert(_threshold <= end_card_addr,
6336 "Because we are just scanning into this object");
6337 if (_threshold < end_card_addr) {
6338 _threshold = end_card_addr;
6339 }
6340 if (p->klass_or_null_acquire() != NULL) {
6341 // Redirty the range of cards...
6342 _mut->mark_range(redirty_range);
6343 } // ...else the setting of klass will dirty the card anyway.
6344 }
6345 DEBUG_ONLY(})
6346 return true;
6347 }
6348 }
6349 scanOopsInOop(addr);
6350 return true;
6351 }
6352
6353 // We take a break if we've been at this for a while,
6354 // so as to avoid monopolizing the locks involved.
6355 void MarkFromRootsClosure::do_yield_work() {
6356 // First give up the locks, then yield, then re-lock
6357 // We should probably use a constructor/destructor idiom to
6358 // do this unlock/lock or modify the MutexUnlocker class to
6359 // serve our purpose. XXX
6360 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6361 "CMS thread should hold CMS token");
6362 assert_lock_strong(_bitMap->lock());
6363 _bitMap->lock()->unlock();
6364 ConcurrentMarkSweepThread::desynchronize(true);
6365 _collector->stopTimer();
6366 _collector->incrementYields();
6367
6368 // See the comment in coordinator_yield()
6369 for (unsigned i = 0; i < CMSYieldSleepCount &&
6370 ConcurrentMarkSweepThread::should_yield() &&
6371 !CMSCollector::foregroundGCIsActive(); ++i) {
6372 os::sleep(Thread::current(), 1, false);
6373 }
6374
6375 ConcurrentMarkSweepThread::synchronize(true);
6376 _bitMap->lock()->lock_without_safepoint_check();
6377 _collector->startTimer();
6378 }
6379
6380 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6381 assert(_bitMap->isMarked(ptr), "expected bit to be set");
6382 assert(_markStack->isEmpty(),
6383 "should drain stack to limit stack usage");
6384 // convert ptr to an oop preparatory to scanning
6385 oop obj = oop(ptr);
6386 // Ignore mark word in verification below, since we
6387 // may be running concurrent with mutators.
6388 assert(oopDesc::is_oop(obj, true), "should be an oop");
6389 assert(_finger <= ptr, "_finger runneth ahead");
6390 // advance the finger to right end of this object
6391 _finger = ptr + obj->size();
6392 assert(_finger > ptr, "we just incremented it above");
6393 // On large heaps, it may take us some time to get through
6394 // the marking phase. During
6395 // this time it's possible that a lot of mutations have
6396 // accumulated in the card table and the mod union table --
6397 // these mutation records are redundant until we have
6398 // actually traced into the corresponding card.
6399 // Here, we check whether advancing the finger would make
6400 // us cross into a new card, and if so clear corresponding
6401 // cards in the MUT (preclean them in the card-table in the
6402 // future).
6403
6404 DEBUG_ONLY(if (!_verifying) {)
6405 // The clean-on-enter optimization is disabled by default,
6406 // until we fix 6178663.
6407 if (CMSCleanOnEnter && (_finger > _threshold)) {
6408 // [_threshold, _finger) represents the interval
6409 // of cards to be cleared in MUT (or precleaned in card table).
6410 // The set of cards to be cleared is all those that overlap
6411 // with the interval [_threshold, _finger); note that
6412 // _threshold is always kept card-aligned but _finger isn't
6413 // always card-aligned.
6414 HeapWord* old_threshold = _threshold;
6415 assert(is_aligned(old_threshold, CardTable::card_size),
6416 "_threshold should always be card-aligned");
6417 _threshold = align_up(_finger, CardTable::card_size);
6418 MemRegion mr(old_threshold, _threshold);
6419 assert(!mr.is_empty(), "Control point invariant");
6420 assert(_span.contains(mr), "Should clear within span");
6421 _mut->clear_range(mr);
6422 }
6423 DEBUG_ONLY(})
6424 // Note: the finger doesn't advance while we drain
6425 // the stack below.
6426 PushOrMarkClosure pushOrMarkClosure(_collector,
6427 _span, _bitMap, _markStack,
6428 _finger, this);
6429 bool res = _markStack->push(obj);
6430 assert(res, "Empty non-zero size stack should have space for single push");
6431 while (!_markStack->isEmpty()) {
6432 oop new_oop = _markStack->pop();
6433 // Skip verifying header mark word below because we are
6434 // running concurrent with mutators.
6435 assert(oopDesc::is_oop(new_oop, true), "Oops! expected to pop an oop");
6436 // now scan this oop's oops
6437 new_oop->oop_iterate(&pushOrMarkClosure);
6438 do_yield_check();
6439 }
6440 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
6441 }
6442
6443 ParMarkFromRootsClosure::ParMarkFromRootsClosure(CMSConcMarkingTask* task,
6444 CMSCollector* collector, MemRegion span,
6445 CMSBitMap* bit_map,
6446 OopTaskQueue* work_queue,
6447 CMSMarkStack* overflow_stack):
6448 _collector(collector),
6449 _whole_span(collector->_span),
6450 _span(span),
6451 _bit_map(bit_map),
6452 _mut(&collector->_modUnionTable),
6453 _work_queue(work_queue),
6454 _overflow_stack(overflow_stack),
6455 _skip_bits(0),
6456 _task(task)
6457 {
6458 assert(_work_queue->size() == 0, "work_queue should be empty");
6459 _finger = span.start();
6460 _threshold = _finger; // XXX Defer clear-on-enter optimization for now
6461 assert(_span.contains(_finger), "Out of bounds _finger?");
6462 }
6463
6464 // Should revisit to see if this should be restructured for
6465 // greater efficiency.
6466 bool ParMarkFromRootsClosure::do_bit(size_t offset) {
6467 if (_skip_bits > 0) {
6468 _skip_bits--;
6469 return true;
6470 }
6471 // convert offset into a HeapWord*
6472 HeapWord* addr = _bit_map->startWord() + offset;
6473 assert(_bit_map->endWord() && addr < _bit_map->endWord(),
6474 "address out of range");
6475 assert(_bit_map->isMarked(addr), "tautology");
6476 if (_bit_map->isMarked(addr+1)) {
6477 // this is an allocated object that might not yet be initialized
6478 assert(_skip_bits == 0, "tautology");
6479 _skip_bits = 2; // skip next two marked bits ("Printezis-marks")
6480 oop p = oop(addr);
6481 if (p->klass_or_null_acquire() == NULL) {
6482 // in the case of Clean-on-Enter optimization, redirty card
6483 // and avoid clearing card by increasing the threshold.
6484 return true;
6485 }
6486 }
6487 scan_oops_in_oop(addr);
6488 return true;
6489 }
6490
6491 void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
6492 assert(_bit_map->isMarked(ptr), "expected bit to be set");
6493 // Should we assert that our work queue is empty or
6494 // below some drain limit?
6495 assert(_work_queue->size() == 0,
6496 "should drain stack to limit stack usage");
6497 // convert ptr to an oop preparatory to scanning
6498 oop obj = oop(ptr);
6499 // Ignore mark word in verification below, since we
6500 // may be running concurrent with mutators.
6501 assert(oopDesc::is_oop(obj, true), "should be an oop");
6502 assert(_finger <= ptr, "_finger runneth ahead");
6503 // advance the finger to right end of this object
6504 _finger = ptr + obj->size();
6505 assert(_finger > ptr, "we just incremented it above");
6506 // On large heaps, it may take us some time to get through
6507 // the marking phase. During
6508 // this time it's possible that a lot of mutations have
6509 // accumulated in the card table and the mod union table --
6510 // these mutation records are redundant until we have
6511 // actually traced into the corresponding card.
6512 // Here, we check whether advancing the finger would make
6513 // us cross into a new card, and if so clear corresponding
6514 // cards in the MUT (preclean them in the card-table in the
6515 // future).
6516
6517 // The clean-on-enter optimization is disabled by default,
6518 // until we fix 6178663.
6519 if (CMSCleanOnEnter && (_finger > _threshold)) {
6520 // [_threshold, _finger) represents the interval
6521 // of cards to be cleared in MUT (or precleaned in card table).
6522 // The set of cards to be cleared is all those that overlap
6523 // with the interval [_threshold, _finger); note that
6524 // _threshold is always kept card-aligned but _finger isn't
6525 // always card-aligned.
6526 HeapWord* old_threshold = _threshold;
6527 assert(is_aligned(old_threshold, CardTable::card_size),
6528 "_threshold should always be card-aligned");
6529 _threshold = align_up(_finger, CardTable::card_size);
6530 MemRegion mr(old_threshold, _threshold);
6531 assert(!mr.is_empty(), "Control point invariant");
6532 assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
6533 _mut->clear_range(mr);
6534 }
6535
6536 // Note: the local finger doesn't advance while we drain
6537 // the stack below, but the global finger sure can and will.
6538 HeapWord* volatile* gfa = _task->global_finger_addr();
6539 ParPushOrMarkClosure pushOrMarkClosure(_collector,
6540 _span, _bit_map,
6541 _work_queue,
6542 _overflow_stack,
6543 _finger,
6544 gfa, this);
6545 bool res = _work_queue->push(obj); // overflow could occur here
6546 assert(res, "Will hold once we use workqueues");
6547 while (true) {
6548 oop new_oop;
6549 if (!_work_queue->pop_local(new_oop)) {
6550 // We emptied our work_queue; check if there's stuff that can
6551 // be gotten from the overflow stack.
6552 if (CMSConcMarkingTask::get_work_from_overflow_stack(
6553 _overflow_stack, _work_queue)) {
6554 do_yield_check();
6555 continue;
6556 } else { // done
6557 break;
6558 }
6559 }
6560 // Skip verifying header mark word below because we are
6561 // running concurrent with mutators.
6562 assert(oopDesc::is_oop(new_oop, true), "Oops! expected to pop an oop");
6563 // now scan this oop's oops
6564 new_oop->oop_iterate(&pushOrMarkClosure);
6565 do_yield_check();
6566 }
6567 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
6568 }
6569
6570 // Yield in response to a request from VM Thread or
6571 // from mutators.
6572 void ParMarkFromRootsClosure::do_yield_work() {
6573 assert(_task != NULL, "sanity");
6574 _task->yield();
6575 }
6576
6577 // A variant of the above used for verifying CMS marking work.
6578 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
6579 MemRegion span,
6580 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6581 CMSMarkStack* mark_stack):
6582 _collector(collector),
6583 _span(span),
6584 _verification_bm(verification_bm),
6585 _cms_bm(cms_bm),
6586 _mark_stack(mark_stack),
6587 _pam_verify_closure(collector, span, verification_bm, cms_bm,
6588 mark_stack)
6589 {
6590 assert(_mark_stack->isEmpty(), "stack should be empty");
6591 _finger = _verification_bm->startWord();
6592 assert(_collector->_restart_addr == NULL, "Sanity check");
6593 assert(_span.contains(_finger), "Out of bounds _finger?");
6594 }
6595
6596 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
6597 assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
6598 assert(_span.contains(addr), "Out of bounds _finger?");
6599 _finger = addr;
6600 }
6601
6602 // Should revisit to see if this should be restructured for
6603 // greater efficiency.
6604 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
6605 // convert offset into a HeapWord*
6606 HeapWord* addr = _verification_bm->startWord() + offset;
6607 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
6608 "address out of range");
6609 assert(_verification_bm->isMarked(addr), "tautology");
6610 assert(_cms_bm->isMarked(addr), "tautology");
6611
6612 assert(_mark_stack->isEmpty(),
6613 "should drain stack to limit stack usage");
6614 // convert addr to an oop preparatory to scanning
6615 oop obj = oop(addr);
6616 assert(oopDesc::is_oop(obj), "should be an oop");
6617 assert(_finger <= addr, "_finger runneth ahead");
6618 // advance the finger to right end of this object
6619 _finger = addr + obj->size();
6620 assert(_finger > addr, "we just incremented it above");
6621 // Note: the finger doesn't advance while we drain
6622 // the stack below.
6623 bool res = _mark_stack->push(obj);
6624 assert(res, "Empty non-zero size stack should have space for single push");
6625 while (!_mark_stack->isEmpty()) {
6626 oop new_oop = _mark_stack->pop();
6627 assert(oopDesc::is_oop(new_oop), "Oops! expected to pop an oop");
6628 // now scan this oop's oops
6629 new_oop->oop_iterate(&_pam_verify_closure);
6630 }
6631 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
6632 return true;
6633 }
6634
6635 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
6636 CMSCollector* collector, MemRegion span,
6637 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6638 CMSMarkStack* mark_stack):
6639 MetadataVisitingOopIterateClosure(collector->ref_processor()),
6640 _collector(collector),
6641 _span(span),
6642 _verification_bm(verification_bm),
6643 _cms_bm(cms_bm),
6644 _mark_stack(mark_stack)
6645 { }
6646
6647 template <class T> void PushAndMarkVerifyClosure::do_oop_work(T *p) {
6648 oop obj = RawAccess<>::oop_load(p);
6649 do_oop(obj);
6650 }
6651
6652 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6653 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6654
6655 // Upon stack overflow, we discard (part of) the stack,
6656 // remembering the least address amongst those discarded
6657 // in CMSCollector's _restart_address.
6658 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6659 // Remember the least grey address discarded
6660 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6661 _collector->lower_restart_addr(ra);
6662 _mark_stack->reset(); // discard stack contents
6663 _mark_stack->expand(); // expand the stack if possible
6664 }
6665
6666 void PushAndMarkVerifyClosure::do_oop(oop obj) {
6667 assert(oopDesc::is_oop_or_null(obj), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6668 HeapWord* addr = (HeapWord*)obj;
6669 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6670 // Oop lies in _span and isn't yet grey or black
6671 _verification_bm->mark(addr); // now grey
6672 if (!_cms_bm->isMarked(addr)) {
6673 Log(gc, verify) log;
6674 ResourceMark rm;
6675 LogStream ls(log.error());
6676 oop(addr)->print_on(&ls);
6677 log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
6678 fatal("... aborting");
6679 }
6680
6681 if (!_mark_stack->push(obj)) { // stack overflow
6682 log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity());
6683 assert(_mark_stack->isFull(), "Else push should have succeeded");
6684 handle_stack_overflow(addr);
6685 }
6686 // anything including and to the right of _finger
6687 // will be scanned as we iterate over the remainder of the
6688 // bit map
6689 }
6690 }
6691
6692 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6693 MemRegion span,
6694 CMSBitMap* bitMap, CMSMarkStack* markStack,
6695 HeapWord* finger, MarkFromRootsClosure* parent) :
6696 MetadataVisitingOopIterateClosure(collector->ref_processor()),
6697 _collector(collector),
6698 _span(span),
6699 _bitMap(bitMap),
6700 _markStack(markStack),
6701 _finger(finger),
6702 _parent(parent)
6703 { }
6704
6705 ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector,
6706 MemRegion span,
6707 CMSBitMap* bit_map,
6708 OopTaskQueue* work_queue,
6709 CMSMarkStack* overflow_stack,
6710 HeapWord* finger,
6711 HeapWord* volatile* global_finger_addr,
6712 ParMarkFromRootsClosure* parent) :
6713 MetadataVisitingOopIterateClosure(collector->ref_processor()),
6714 _collector(collector),
6715 _whole_span(collector->_span),
6716 _span(span),
6717 _bit_map(bit_map),
6718 _work_queue(work_queue),
6719 _overflow_stack(overflow_stack),
6720 _finger(finger),
6721 _global_finger_addr(global_finger_addr),
6722 _parent(parent)
6723 { }
6724
6725 // Assumes thread-safe access by callers, who are
6726 // responsible for mutual exclusion.
6727 void CMSCollector::lower_restart_addr(HeapWord* low) {
6728 assert(_span.contains(low), "Out of bounds addr");
6729 if (_restart_addr == NULL) {
6730 _restart_addr = low;
6731 } else {
6732 _restart_addr = MIN2(_restart_addr, low);
6733 }
6734 }
6735
6736 // Upon stack overflow, we discard (part of) the stack,
6737 // remembering the least address amongst those discarded
6738 // in CMSCollector's _restart_address.
6739 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6740 // Remember the least grey address discarded
6741 HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
6742 _collector->lower_restart_addr(ra);
6743 _markStack->reset(); // discard stack contents
6744 _markStack->expand(); // expand the stack if possible
6745 }
6746
6747 // Upon stack overflow, we discard (part of) the stack,
6748 // remembering the least address amongst those discarded
6749 // in CMSCollector's _restart_address.
6750 void ParPushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6751 // We need to do this under a mutex to prevent other
6752 // workers from interfering with the work done below.
6753 MutexLocker ml(_overflow_stack->par_lock(),
6754 Mutex::_no_safepoint_check_flag);
6755 // Remember the least grey address discarded
6756 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
6757 _collector->lower_restart_addr(ra);
6758 _overflow_stack->reset(); // discard stack contents
6759 _overflow_stack->expand(); // expand the stack if possible
6760 }
6761
6762 void PushOrMarkClosure::do_oop(oop obj) {
6763 // Ignore mark word because we are running concurrent with mutators.
6764 assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6765 HeapWord* addr = (HeapWord*)obj;
6766 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
6767 // Oop lies in _span and isn't yet grey or black
6768 _bitMap->mark(addr); // now grey
6769 if (addr < _finger) {
6770 // the bit map iteration has already either passed, or
6771 // sampled, this bit in the bit map; we'll need to
6772 // use the marking stack to scan this oop's oops.
6773 bool simulate_overflow = false;
6774 NOT_PRODUCT(
6775 if (CMSMarkStackOverflowALot &&
6776 _collector->simulate_overflow()) {
6777 // simulate a stack overflow
6778 simulate_overflow = true;
6779 }
6780 )
6781 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
6782 log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _markStack->capacity());
6783 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
6784 handle_stack_overflow(addr);
6785 }
6786 }
6787 // anything including and to the right of _finger
6788 // will be scanned as we iterate over the remainder of the
6789 // bit map
6790 do_yield_check();
6791 }
6792 }
6793
6794 void ParPushOrMarkClosure::do_oop(oop obj) {
6795 // Ignore mark word because we are running concurrent with mutators.
6796 assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6797 HeapWord* addr = (HeapWord*)obj;
6798 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
6799 // Oop lies in _span and isn't yet grey or black
6800 // We read the global_finger (volatile read) strictly after marking oop
6801 bool res = _bit_map->par_mark(addr); // now grey
6802 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
6803 // Should we push this marked oop on our stack?
6804 // -- if someone else marked it, nothing to do
6805 // -- if target oop is above global finger nothing to do
6806 // -- if target oop is in chunk and above local finger
6807 // then nothing to do
6808 // -- else push on work queue
6809 if ( !res // someone else marked it, they will deal with it
6810 || (addr >= *gfa) // will be scanned in a later task
6811 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
6812 return;
6813 }
6814 // the bit map iteration has already either passed, or
6815 // sampled, this bit in the bit map; we'll need to
6816 // use the marking stack to scan this oop's oops.
6817 bool simulate_overflow = false;
6818 NOT_PRODUCT(
6819 if (CMSMarkStackOverflowALot &&
6820 _collector->simulate_overflow()) {
6821 // simulate a stack overflow
6822 simulate_overflow = true;
6823 }
6824 )
6825 if (simulate_overflow ||
6826 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
6827 // stack overflow
6828 log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
6829 // We cannot assert that the overflow stack is full because
6830 // it may have been emptied since.
6831 assert(simulate_overflow ||
6832 _work_queue->size() == _work_queue->max_elems(),
6833 "Else push should have succeeded");
6834 handle_stack_overflow(addr);
6835 }
6836 do_yield_check();
6837 }
6838 }
6839
6840 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
6841 MemRegion span,
6842 ReferenceDiscoverer* rd,
6843 CMSBitMap* bit_map,
6844 CMSBitMap* mod_union_table,
6845 CMSMarkStack* mark_stack,
6846 bool concurrent_precleaning):
6847 MetadataVisitingOopIterateClosure(rd),
6848 _collector(collector),
6849 _span(span),
6850 _bit_map(bit_map),
6851 _mod_union_table(mod_union_table),
6852 _mark_stack(mark_stack),
6853 _concurrent_precleaning(concurrent_precleaning)
6854 {
6855 assert(ref_discoverer() != NULL, "ref_discoverer shouldn't be NULL");
6856 }
6857
6858 // Grey object rescan during pre-cleaning and second checkpoint phases --
6859 // the non-parallel version (the parallel version appears further below.)
6860 void PushAndMarkClosure::do_oop(oop obj) {
6861 // Ignore mark word verification. If during concurrent precleaning,
6862 // the object monitor may be locked. If during the checkpoint
6863 // phases, the object may already have been reached by a different
6864 // path and may be at the end of the global overflow list (so
6865 // the mark word may be NULL).
6866 assert(oopDesc::is_oop_or_null(obj, true /* ignore mark word */),
6867 "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6868 HeapWord* addr = (HeapWord*)obj;
6869 // Check if oop points into the CMS generation
6870 // and is not marked
6871 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6872 // a white object ...
6873 _bit_map->mark(addr); // ... now grey
6874 // push on the marking stack (grey set)
6875 bool simulate_overflow = false;
6876 NOT_PRODUCT(
6877 if (CMSMarkStackOverflowALot &&
6878 _collector->simulate_overflow()) {
6879 // simulate a stack overflow
6880 simulate_overflow = true;
6881 }
6882 )
6883 if (simulate_overflow || !_mark_stack->push(obj)) {
6884 if (_concurrent_precleaning) {
6885 // During precleaning we can just dirty the appropriate card(s)
6886 // in the mod union table, thus ensuring that the object remains
6887 // in the grey set and continue. In the case of object arrays
6888 // we need to dirty all of the cards that the object spans,
6889 // since the rescan of object arrays will be limited to the
6890 // dirty cards.
6891 // Note that no one can be interfering with us in this action
6892 // of dirtying the mod union table, so no locking or atomics
6893 // are required.
6894 if (obj->is_objArray()) {
6895 size_t sz = obj->size();
6896 HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
6897 MemRegion redirty_range = MemRegion(addr, end_card_addr);
6898 assert(!redirty_range.is_empty(), "Arithmetical tautology");
6899 _mod_union_table->mark_range(redirty_range);
6900 } else {
6901 _mod_union_table->mark(addr);
6902 }
6903 _collector->_ser_pmc_preclean_ovflw++;
6904 } else {
6905 // During the remark phase, we need to remember this oop
6906 // in the overflow list.
6907 _collector->push_on_overflow_list(obj);
6908 _collector->_ser_pmc_remark_ovflw++;
6909 }
6910 }
6911 }
6912 }
6913
6914 ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector,
6915 MemRegion span,
6916 ReferenceDiscoverer* rd,
6917 CMSBitMap* bit_map,
6918 OopTaskQueue* work_queue):
6919 MetadataVisitingOopIterateClosure(rd),
6920 _collector(collector),
6921 _span(span),
6922 _bit_map(bit_map),
6923 _work_queue(work_queue)
6924 {
6925 assert(ref_discoverer() != NULL, "ref_discoverer shouldn't be NULL");
6926 }
6927
6928 // Grey object rescan during second checkpoint phase --
6929 // the parallel version.
6930 void ParPushAndMarkClosure::do_oop(oop obj) {
6931 // In the assert below, we ignore the mark word because
6932 // this oop may point to an already visited object that is
6933 // on the overflow stack (in which case the mark word has
6934 // been hijacked for chaining into the overflow stack --
6935 // if this is the last object in the overflow stack then
6936 // its mark word will be NULL). Because this object may
6937 // have been subsequently popped off the global overflow
6938 // stack, and the mark word possibly restored to the prototypical
6939 // value, by the time we get to examined this failing assert in
6940 // the debugger, is_oop_or_null(false) may subsequently start
6941 // to hold.
6942 assert(oopDesc::is_oop_or_null(obj, true),
6943 "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6944 HeapWord* addr = (HeapWord*)obj;
6945 // Check if oop points into the CMS generation
6946 // and is not marked
6947 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6948 // a white object ...
6949 // If we manage to "claim" the object, by being the
6950 // first thread to mark it, then we push it on our
6951 // marking stack
6952 if (_bit_map->par_mark(addr)) { // ... now grey
6953 // push on work queue (grey set)
6954 bool simulate_overflow = false;
6955 NOT_PRODUCT(
6956 if (CMSMarkStackOverflowALot &&
6957 _collector->par_simulate_overflow()) {
6958 // simulate a stack overflow
6959 simulate_overflow = true;
6960 }
6961 )
6962 if (simulate_overflow || !_work_queue->push(obj)) {
6963 _collector->par_push_on_overflow_list(obj);
6964 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
6965 }
6966 } // Else, some other thread got there first
6967 }
6968 }
6969
6970 void CMSPrecleanRefsYieldClosure::do_yield_work() {
6971 Mutex* bml = _collector->bitMapLock();
6972 assert_lock_strong(bml);
6973 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6974 "CMS thread should hold CMS token");
6975
6976 bml->unlock();
6977 ConcurrentMarkSweepThread::desynchronize(true);
6978
6979 _collector->stopTimer();
6980 _collector->incrementYields();
6981
6982 // See the comment in coordinator_yield()
6983 for (unsigned i = 0; i < CMSYieldSleepCount &&
6984 ConcurrentMarkSweepThread::should_yield() &&
6985 !CMSCollector::foregroundGCIsActive(); ++i) {
6986 os::sleep(Thread::current(), 1, false);
6987 }
6988
6989 ConcurrentMarkSweepThread::synchronize(true);
6990 bml->lock();
6991
6992 _collector->startTimer();
6993 }
6994
6995 bool CMSPrecleanRefsYieldClosure::should_return() {
6996 if (ConcurrentMarkSweepThread::should_yield()) {
6997 do_yield_work();
6998 }
6999 return _collector->foregroundGCIsActive();
7000 }
7001
7002 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7003 assert(((size_t)mr.start())%CardTable::card_size_in_words == 0,
7004 "mr should be aligned to start at a card boundary");
7005 // We'd like to assert:
7006 // assert(mr.word_size()%CardTable::card_size_in_words == 0,
7007 // "mr should be a range of cards");
7008 // However, that would be too strong in one case -- the last
7009 // partition ends at _unallocated_block which, in general, can be
7010 // an arbitrary boundary, not necessarily card aligned.
7011 _num_dirty_cards += mr.word_size()/CardTable::card_size_in_words;
7012 _space->object_iterate_mem(mr, &_scan_cl);
7013 }
7014
7015 SweepClosure::SweepClosure(CMSCollector* collector,
7016 ConcurrentMarkSweepGeneration* g,
7017 CMSBitMap* bitMap, bool should_yield) :
7018 _collector(collector),
7019 _g(g),
7020 _sp(g->cmsSpace()),
7021 _limit(_sp->sweep_limit()),
7022 _freelistLock(_sp->freelistLock()),
7023 _bitMap(bitMap),
7024 _inFreeRange(false), // No free range at beginning of sweep
7025 _freeRangeInFreeLists(false), // No free range at beginning of sweep
7026 _lastFreeRangeCoalesced(false),
7027 _yield(should_yield),
7028 _freeFinger(g->used_region().start())
7029 {
7030 NOT_PRODUCT(
7031 _numObjectsFreed = 0;
7032 _numWordsFreed = 0;
7033 _numObjectsLive = 0;
7034 _numWordsLive = 0;
7035 _numObjectsAlreadyFree = 0;
7036 _numWordsAlreadyFree = 0;
7037 _last_fc = NULL;
7038
7039 _sp->initializeIndexedFreeListArrayReturnedBytes();
7040 _sp->dictionary()->initialize_dict_returned_bytes();
7041 )
7042 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7043 "sweep _limit out of bounds");
7044 log_develop_trace(gc, sweep)("====================");
7045 log_develop_trace(gc, sweep)("Starting new sweep with limit " PTR_FORMAT, p2i(_limit));
7046 }
7047
7048 void SweepClosure::print_on(outputStream* st) const {
7049 st->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7050 p2i(_sp->bottom()), p2i(_sp->end()));
7051 st->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
7052 st->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
7053 NOT_PRODUCT(st->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
7054 st->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7055 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7056 }
7057
7058 #ifndef PRODUCT
7059 // Assertion checking only: no useful work in product mode --
7060 // however, if any of the flags below become product flags,
7061 // you may need to review this code to see if it needs to be
7062 // enabled in product mode.
7063 SweepClosure::~SweepClosure() {
7064 assert_lock_strong(_freelistLock);
7065 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7066 "sweep _limit out of bounds");
7067 if (inFreeRange()) {
7068 Log(gc, sweep) log;
7069 log.error("inFreeRange() should have been reset; dumping state of SweepClosure");
7070 ResourceMark rm;
7071 LogStream ls(log.error());
7072 print_on(&ls);
7073 ShouldNotReachHere();
7074 }
7075
7076 if (log_is_enabled(Debug, gc, sweep)) {
7077 log_debug(gc, sweep)("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7078 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7079 log_debug(gc, sweep)("Live " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7080 _numObjectsLive, _numWordsLive*sizeof(HeapWord), _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7081 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * sizeof(HeapWord);
7082 log_debug(gc, sweep)("Total sweep: " SIZE_FORMAT " bytes", totalBytes);
7083 }
7084
7085 if (log_is_enabled(Trace, gc, sweep) && CMSVerifyReturnedBytes) {
7086 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7087 size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7088 size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7089 log_trace(gc, sweep)("Returned " SIZE_FORMAT " bytes Indexed List Returned " SIZE_FORMAT " bytes Dictionary Returned " SIZE_FORMAT " bytes",
7090 returned_bytes, indexListReturnedBytes, dict_returned_bytes);
7091 }
7092 log_develop_trace(gc, sweep)("end of sweep with _limit = " PTR_FORMAT, p2i(_limit));
7093 log_develop_trace(gc, sweep)("================");
7094 }
7095 #endif // PRODUCT
7096
7097 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7098 bool freeRangeInFreeLists) {
7099 log_develop_trace(gc, sweep)("---- Start free range at " PTR_FORMAT " with free block (%d)",
7100 p2i(freeFinger), freeRangeInFreeLists);
7101 assert(!inFreeRange(), "Trampling existing free range");
7102 set_inFreeRange(true);
7103 set_lastFreeRangeCoalesced(false);
7104
7105 set_freeFinger(freeFinger);
7106 set_freeRangeInFreeLists(freeRangeInFreeLists);
7107 if (CMSTestInFreeList) {
7108 if (freeRangeInFreeLists) {
7109 FreeChunk* fc = (FreeChunk*) freeFinger;
7110 assert(fc->is_free(), "A chunk on the free list should be free.");
7111 assert(fc->size() > 0, "Free range should have a size");
7112 assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
7113 }
7114 }
7115 }
7116
7117 // Note that the sweeper runs concurrently with mutators. Thus,
7118 // it is possible for direct allocation in this generation to happen
7119 // in the middle of the sweep. Note that the sweeper also coalesces
7120 // contiguous free blocks. Thus, unless the sweeper and the allocator
7121 // synchronize appropriately freshly allocated blocks may get swept up.
7122 // This is accomplished by the sweeper locking the free lists while
7123 // it is sweeping. Thus blocks that are determined to be free are
7124 // indeed free. There is however one additional complication:
7125 // blocks that have been allocated since the final checkpoint and
7126 // mark, will not have been marked and so would be treated as
7127 // unreachable and swept up. To prevent this, the allocator marks
7128 // the bit map when allocating during the sweep phase. This leads,
7129 // however, to a further complication -- objects may have been allocated
7130 // but not yet initialized -- in the sense that the header isn't yet
7131 // installed. The sweeper can not then determine the size of the block
7132 // in order to skip over it. To deal with this case, we use a technique
7133 // (due to Printezis) to encode such uninitialized block sizes in the
7134 // bit map. Since the bit map uses a bit per every HeapWord, but the
7135 // CMS generation has a minimum object size of 3 HeapWords, it follows
7136 // that "normal marks" won't be adjacent in the bit map (there will
7137 // always be at least two 0 bits between successive 1 bits). We make use
7138 // of these "unused" bits to represent uninitialized blocks -- the bit
7139 // corresponding to the start of the uninitialized object and the next
7140 // bit are both set. Finally, a 1 bit marks the end of the object that
7141 // started with the two consecutive 1 bits to indicate its potentially
7142 // uninitialized state.
7143
7144 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7145 FreeChunk* fc = (FreeChunk*)addr;
7146 size_t res;
7147
7148 // Check if we are done sweeping. Below we check "addr >= _limit" rather
7149 // than "addr == _limit" because although _limit was a block boundary when
7150 // we started the sweep, it may no longer be one because heap expansion
7151 // may have caused us to coalesce the block ending at the address _limit
7152 // with a newly expanded chunk (this happens when _limit was set to the
7153 // previous _end of the space), so we may have stepped past _limit:
7154 // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7155 if (addr >= _limit) { // we have swept up to or past the limit: finish up
7156 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7157 "sweep _limit out of bounds");
7158 assert(addr < _sp->end(), "addr out of bounds");
7159 // Flush any free range we might be holding as a single
7160 // coalesced chunk to the appropriate free list.
7161 if (inFreeRange()) {
7162 assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7163 "freeFinger() " PTR_FORMAT " is out of bounds", p2i(freeFinger()));
7164 flush_cur_free_chunk(freeFinger(),
7165 pointer_delta(addr, freeFinger()));
7166 log_develop_trace(gc, sweep)("Sweep: last chunk: put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") [coalesced:%d]",
7167 p2i(freeFinger()), pointer_delta(addr, freeFinger()),
7168 lastFreeRangeCoalesced() ? 1 : 0);
7169 }
7170
7171 // help the iterator loop finish
7172 return pointer_delta(_sp->end(), addr);
7173 }
7174
7175 assert(addr < _limit, "sweep invariant");
7176 // check if we should yield
7177 do_yield_check(addr);
7178 if (fc->is_free()) {
7179 // Chunk that is already free
7180 res = fc->size();
7181 do_already_free_chunk(fc);
7182 debug_only(_sp->verifyFreeLists());
7183 // If we flush the chunk at hand in lookahead_and_flush()
7184 // and it's coalesced with a preceding chunk, then the
7185 // process of "mangling" the payload of the coalesced block
7186 // will cause erasure of the size information from the
7187 // (erstwhile) header of all the coalesced blocks but the
7188 // first, so the first disjunct in the assert will not hold
7189 // in that specific case (in which case the second disjunct
7190 // will hold).
7191 assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
7192 "Otherwise the size info doesn't change at this step");
7193 NOT_PRODUCT(
7194 _numObjectsAlreadyFree++;
7195 _numWordsAlreadyFree += res;
7196 )
7197 NOT_PRODUCT(_last_fc = fc;)
7198 } else if (!_bitMap->isMarked(addr)) {
7199 // Chunk is fresh garbage
7200 res = do_garbage_chunk(fc);
7201 debug_only(_sp->verifyFreeLists());
7202 NOT_PRODUCT(
7203 _numObjectsFreed++;
7204 _numWordsFreed += res;
7205 )
7206 } else {
7207 // Chunk that is alive.
7208 res = do_live_chunk(fc);
7209 debug_only(_sp->verifyFreeLists());
7210 NOT_PRODUCT(
7211 _numObjectsLive++;
7212 _numWordsLive += res;
7213 )
7214 }
7215 return res;
7216 }
7217
7218 // For the smart allocation, record following
7219 // split deaths - a free chunk is removed from its free list because
7220 // it is being split into two or more chunks.
7221 // split birth - a free chunk is being added to its free list because
7222 // a larger free chunk has been split and resulted in this free chunk.
7223 // coal death - a free chunk is being removed from its free list because
7224 // it is being coalesced into a large free chunk.
7225 // coal birth - a free chunk is being added to its free list because
7226 // it was created when two or more free chunks where coalesced into
7227 // this free chunk.
7228 //
7229 // These statistics are used to determine the desired number of free
7230 // chunks of a given size. The desired number is chosen to be relative
7231 // to the end of a CMS sweep. The desired number at the end of a sweep
7232 // is the
7233 // count-at-end-of-previous-sweep (an amount that was enough)
7234 // - count-at-beginning-of-current-sweep (the excess)
7235 // + split-births (gains in this size during interval)
7236 // - split-deaths (demands on this size during interval)
7237 // where the interval is from the end of one sweep to the end of the
7238 // next.
7239 //
7240 // When sweeping the sweeper maintains an accumulated chunk which is
7241 // the chunk that is made up of chunks that have been coalesced. That
7242 // will be termed the left-hand chunk. A new chunk of garbage that
7243 // is being considered for coalescing will be referred to as the
7244 // right-hand chunk.
7245 //
7246 // When making a decision on whether to coalesce a right-hand chunk with
7247 // the current left-hand chunk, the current count vs. the desired count
7248 // of the left-hand chunk is considered. Also if the right-hand chunk
7249 // is near the large chunk at the end of the heap (see
7250 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7251 // left-hand chunk is coalesced.
7252 //
7253 // When making a decision about whether to split a chunk, the desired count
7254 // vs. the current count of the candidate to be split is also considered.
7255 // If the candidate is underpopulated (currently fewer chunks than desired)
7256 // a chunk of an overpopulated (currently more chunks than desired) size may
7257 // be chosen. The "hint" associated with a free list, if non-null, points
7258 // to a free list which may be overpopulated.
7259 //
7260
7261 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
7262 const size_t size = fc->size();
7263 // Chunks that cannot be coalesced are not in the
7264 // free lists.
7265 if (CMSTestInFreeList && !fc->cantCoalesce()) {
7266 assert(_sp->verify_chunk_in_free_list(fc),
7267 "free chunk should be in free lists");
7268 }
7269 // a chunk that is already free, should not have been
7270 // marked in the bit map
7271 HeapWord* const addr = (HeapWord*) fc;
7272 assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7273 // Verify that the bit map has no bits marked between
7274 // addr and purported end of this block.
7275 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7276
7277 // Some chunks cannot be coalesced under any circumstances.
7278 // See the definition of cantCoalesce().
7279 if (!fc->cantCoalesce()) {
7280 // This chunk can potentially be coalesced.
7281 // All the work is done in
7282 do_post_free_or_garbage_chunk(fc, size);
7283 // Note that if the chunk is not coalescable (the else arm
7284 // below), we unconditionally flush, without needing to do
7285 // a "lookahead," as we do below.
7286 if (inFreeRange()) lookahead_and_flush(fc, size);
7287 } else {
7288 // Code path common to both original and adaptive free lists.
7289
7290 // cant coalesce with previous block; this should be treated
7291 // as the end of a free run if any
7292 if (inFreeRange()) {
7293 // we kicked some butt; time to pick up the garbage
7294 assert(freeFinger() < addr, "freeFinger points too high");
7295 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7296 }
7297 // else, nothing to do, just continue
7298 }
7299 }
7300
7301 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
7302 // This is a chunk of garbage. It is not in any free list.
7303 // Add it to a free list or let it possibly be coalesced into
7304 // a larger chunk.
7305 HeapWord* const addr = (HeapWord*) fc;
7306 const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7307
7308 // Verify that the bit map has no bits marked between
7309 // addr and purported end of just dead object.
7310 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7311 do_post_free_or_garbage_chunk(fc, size);
7312
7313 assert(_limit >= addr + size,
7314 "A freshly garbage chunk can't possibly straddle over _limit");
7315 if (inFreeRange()) lookahead_and_flush(fc, size);
7316 return size;
7317 }
7318
7319 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
7320 HeapWord* addr = (HeapWord*) fc;
7321 // The sweeper has just found a live object. Return any accumulated
7322 // left hand chunk to the free lists.
7323 if (inFreeRange()) {
7324 assert(freeFinger() < addr, "freeFinger points too high");
7325 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7326 }
7327
7328 // This object is live: we'd normally expect this to be
7329 // an oop, and like to assert the following:
7330 // assert(oopDesc::is_oop(oop(addr)), "live block should be an oop");
7331 // However, as we commented above, this may be an object whose
7332 // header hasn't yet been initialized.
7333 size_t size;
7334 assert(_bitMap->isMarked(addr), "Tautology for this control point");
7335 if (_bitMap->isMarked(addr + 1)) {
7336 // Determine the size from the bit map, rather than trying to
7337 // compute it from the object header.
7338 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7339 size = pointer_delta(nextOneAddr + 1, addr);
7340 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7341 "alignment problem");
7342
7343 #ifdef ASSERT
7344 if (oop(addr)->klass_or_null_acquire() != NULL) {
7345 // Ignore mark word because we are running concurrent with mutators
7346 assert(oopDesc::is_oop(oop(addr), true), "live block should be an oop");
7347 assert(size ==
7348 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7349 "P-mark and computed size do not agree");
7350 }
7351 #endif
7352
7353 } else {
7354 // This should be an initialized object that's alive.
7355 assert(oop(addr)->klass_or_null_acquire() != NULL,
7356 "Should be an initialized object");
7357 // Ignore mark word because we are running concurrent with mutators
7358 assert(oopDesc::is_oop(oop(addr), true), "live block should be an oop");
7359 // Verify that the bit map has no bits marked between
7360 // addr and purported end of this block.
7361 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7362 assert(size >= 3, "Necessary for Printezis marks to work");
7363 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7364 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7365 }
7366 return size;
7367 }
7368
7369 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7370 size_t chunkSize) {
7371 // do_post_free_or_garbage_chunk() should only be called in the case
7372 // of the adaptive free list allocator.
7373 const bool fcInFreeLists = fc->is_free();
7374 assert((HeapWord*)fc <= _limit, "sweep invariant");
7375 if (CMSTestInFreeList && fcInFreeLists) {
7376 assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7377 }
7378
7379 log_develop_trace(gc, sweep)(" -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
7380
7381 HeapWord* const fc_addr = (HeapWord*) fc;
7382
7383 bool coalesce = false;
7384 const size_t left = pointer_delta(fc_addr, freeFinger());
7385 const size_t right = chunkSize;
7386 switch (FLSCoalescePolicy) {
7387 // numeric value forms a coalition aggressiveness metric
7388 case 0: { // never coalesce
7389 coalesce = false;
7390 break;
7391 }
7392 case 1: { // coalesce if left & right chunks on overpopulated lists
7393 coalesce = _sp->coalOverPopulated(left) &&
7394 _sp->coalOverPopulated(right);
7395 break;
7396 }
7397 case 2: { // coalesce if left chunk on overpopulated list (default)
7398 coalesce = _sp->coalOverPopulated(left);
7399 break;
7400 }
7401 case 3: { // coalesce if left OR right chunk on overpopulated list
7402 coalesce = _sp->coalOverPopulated(left) ||
7403 _sp->coalOverPopulated(right);
7404 break;
7405 }
7406 case 4: { // always coalesce
7407 coalesce = true;
7408 break;
7409 }
7410 default:
7411 ShouldNotReachHere();
7412 }
7413
7414 // Should the current free range be coalesced?
7415 // If the chunk is in a free range and either we decided to coalesce above
7416 // or the chunk is near the large block at the end of the heap
7417 // (isNearLargestChunk() returns true), then coalesce this chunk.
7418 const bool doCoalesce = inFreeRange()
7419 && (coalesce || _g->isNearLargestChunk(fc_addr));
7420 if (doCoalesce) {
7421 // Coalesce the current free range on the left with the new
7422 // chunk on the right. If either is on a free list,
7423 // it must be removed from the list and stashed in the closure.
7424 if (freeRangeInFreeLists()) {
7425 FreeChunk* const ffc = (FreeChunk*)freeFinger();
7426 assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
7427 "Size of free range is inconsistent with chunk size.");
7428 if (CMSTestInFreeList) {
7429 assert(_sp->verify_chunk_in_free_list(ffc),
7430 "Chunk is not in free lists");
7431 }
7432 _sp->coalDeath(ffc->size());
7433 _sp->removeFreeChunkFromFreeLists(ffc);
7434 set_freeRangeInFreeLists(false);
7435 }
7436 if (fcInFreeLists) {
7437 _sp->coalDeath(chunkSize);
7438 assert(fc->size() == chunkSize,
7439 "The chunk has the wrong size or is not in the free lists");
7440 _sp->removeFreeChunkFromFreeLists(fc);
7441 }
7442 set_lastFreeRangeCoalesced(true);
7443 print_free_block_coalesced(fc);
7444 } else { // not in a free range and/or should not coalesce
7445 // Return the current free range and start a new one.
7446 if (inFreeRange()) {
7447 // In a free range but cannot coalesce with the right hand chunk.
7448 // Put the current free range into the free lists.
7449 flush_cur_free_chunk(freeFinger(),
7450 pointer_delta(fc_addr, freeFinger()));
7451 }
7452 // Set up for new free range. Pass along whether the right hand
7453 // chunk is in the free lists.
7454 initialize_free_range((HeapWord*)fc, fcInFreeLists);
7455 }
7456 }
7457
7458 // Lookahead flush:
7459 // If we are tracking a free range, and this is the last chunk that
7460 // we'll look at because its end crosses past _limit, we'll preemptively
7461 // flush it along with any free range we may be holding on to. Note that
7462 // this can be the case only for an already free or freshly garbage
7463 // chunk. If this block is an object, it can never straddle
7464 // over _limit. The "straddling" occurs when _limit is set at
7465 // the previous end of the space when this cycle started, and
7466 // a subsequent heap expansion caused the previously co-terminal
7467 // free block to be coalesced with the newly expanded portion,
7468 // thus rendering _limit a non-block-boundary making it dangerous
7469 // for the sweeper to step over and examine.
7470 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7471 assert(inFreeRange(), "Should only be called if currently in a free range.");
7472 HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7473 assert(_sp->used_region().contains(eob - 1),
7474 "eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7475 " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7476 " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7477 p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size);
7478 if (eob >= _limit) {
7479 assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7480 log_develop_trace(gc, sweep)("_limit " PTR_FORMAT " reached or crossed by block "
7481 "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7482 "[" PTR_FORMAT "," PTR_FORMAT ")",
7483 p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
7484 // Return the storage we are tracking back into the free lists.
7485 log_develop_trace(gc, sweep)("Flushing ... ");
7486 assert(freeFinger() < eob, "Error");
7487 flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7488 }
7489 }
7490
7491 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7492 assert(inFreeRange(), "Should only be called if currently in a free range.");
7493 assert(size > 0,
7494 "A zero sized chunk cannot be added to the free lists.");
7495 if (!freeRangeInFreeLists()) {
7496 if (CMSTestInFreeList) {
7497 FreeChunk* fc = (FreeChunk*) chunk;
7498 fc->set_size(size);
7499 assert(!_sp->verify_chunk_in_free_list(fc),
7500 "chunk should not be in free lists yet");
7501 }
7502 log_develop_trace(gc, sweep)(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists", p2i(chunk), size);
7503 // A new free range is going to be starting. The current
7504 // free range has not been added to the free lists yet or
7505 // was removed so add it back.
7506 // If the current free range was coalesced, then the death
7507 // of the free range was recorded. Record a birth now.
7508 if (lastFreeRangeCoalesced()) {
7509 _sp->coalBirth(size);
7510 }
7511 _sp->addChunkAndRepairOffsetTable(chunk, size,
7512 lastFreeRangeCoalesced());
7513 } else {
7514 log_develop_trace(gc, sweep)("Already in free list: nothing to flush");
7515 }
7516 set_inFreeRange(false);
7517 set_freeRangeInFreeLists(false);
7518 }
7519
7520 // We take a break if we've been at this for a while,
7521 // so as to avoid monopolizing the locks involved.
7522 void SweepClosure::do_yield_work(HeapWord* addr) {
7523 // Return current free chunk being used for coalescing (if any)
7524 // to the appropriate freelist. After yielding, the next
7525 // free block encountered will start a coalescing range of
7526 // free blocks. If the next free block is adjacent to the
7527 // chunk just flushed, they will need to wait for the next
7528 // sweep to be coalesced.
7529 if (inFreeRange()) {
7530 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7531 }
7532
7533 // First give up the locks, then yield, then re-lock.
7534 // We should probably use a constructor/destructor idiom to
7535 // do this unlock/lock or modify the MutexUnlocker class to
7536 // serve our purpose. XXX
7537 assert_lock_strong(_bitMap->lock());
7538 assert_lock_strong(_freelistLock);
7539 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7540 "CMS thread should hold CMS token");
7541 _bitMap->lock()->unlock();
7542 _freelistLock->unlock();
7543 ConcurrentMarkSweepThread::desynchronize(true);
7544 _collector->stopTimer();
7545 _collector->incrementYields();
7546
7547 // See the comment in coordinator_yield()
7548 for (unsigned i = 0; i < CMSYieldSleepCount &&
7549 ConcurrentMarkSweepThread::should_yield() &&
7550 !CMSCollector::foregroundGCIsActive(); ++i) {
7551 os::sleep(Thread::current(), 1, false);
7552 }
7553
7554 ConcurrentMarkSweepThread::synchronize(true);
7555 _freelistLock->lock_without_safepoint_check();
7556 _bitMap->lock()->lock_without_safepoint_check();
7557 _collector->startTimer();
7558 }
7559
7560 #ifndef PRODUCT
7561 // This is actually very useful in a product build if it can
7562 // be called from the debugger. Compile it into the product
7563 // as needed.
7564 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
7565 return debug_cms_space->verify_chunk_in_free_list(fc);
7566 }
7567 #endif
7568
7569 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
7570 log_develop_trace(gc, sweep)("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
7571 p2i(fc), fc->size());
7572 }
7573
7574 // CMSIsAliveClosure
7575 bool CMSIsAliveClosure::do_object_b(oop obj) {
7576 HeapWord* addr = (HeapWord*)obj;
7577 return addr != NULL &&
7578 (!_span.contains(addr) || _bit_map->isMarked(addr));
7579 }
7580
7581 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
7582 MemRegion span,
7583 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
7584 bool cpc):
7585 _collector(collector),
7586 _span(span),
7587 _mark_stack(mark_stack),
7588 _bit_map(bit_map),
7589 _concurrent_precleaning(cpc) {
7590 assert(!_span.is_empty(), "Empty span could spell trouble");
7591 }
7592
7593
7594 // CMSKeepAliveClosure: the serial version
7595 void CMSKeepAliveClosure::do_oop(oop obj) {
7596 HeapWord* addr = (HeapWord*)obj;
7597 if (_span.contains(addr) &&
7598 !_bit_map->isMarked(addr)) {
7599 _bit_map->mark(addr);
7600 bool simulate_overflow = false;
7601 NOT_PRODUCT(
7602 if (CMSMarkStackOverflowALot &&
7603 _collector->simulate_overflow()) {
7604 // simulate a stack overflow
7605 simulate_overflow = true;
7606 }
7607 )
7608 if (simulate_overflow || !_mark_stack->push(obj)) {
7609 if (_concurrent_precleaning) {
7610 // We dirty the overflown object and let the remark
7611 // phase deal with it.
7612 assert(_collector->overflow_list_is_empty(), "Error");
7613 // In the case of object arrays, we need to dirty all of
7614 // the cards that the object spans. No locking or atomics
7615 // are needed since no one else can be mutating the mod union
7616 // table.
7617 if (obj->is_objArray()) {
7618 size_t sz = obj->size();
7619 HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
7620 MemRegion redirty_range = MemRegion(addr, end_card_addr);
7621 assert(!redirty_range.is_empty(), "Arithmetical tautology");
7622 _collector->_modUnionTable.mark_range(redirty_range);
7623 } else {
7624 _collector->_modUnionTable.mark(addr);
7625 }
7626 _collector->_ser_kac_preclean_ovflw++;
7627 } else {
7628 _collector->push_on_overflow_list(obj);
7629 _collector->_ser_kac_ovflw++;
7630 }
7631 }
7632 }
7633 }
7634
7635 // CMSParKeepAliveClosure: a parallel version of the above.
7636 // The work queues are private to each closure (thread),
7637 // but (may be) available for stealing by other threads.
7638 void CMSParKeepAliveClosure::do_oop(oop obj) {
7639 HeapWord* addr = (HeapWord*)obj;
7640 if (_span.contains(addr) &&
7641 !_bit_map->isMarked(addr)) {
7642 // In general, during recursive tracing, several threads
7643 // may be concurrently getting here; the first one to
7644 // "tag" it, claims it.
7645 if (_bit_map->par_mark(addr)) {
7646 bool res = _work_queue->push(obj);
7647 assert(res, "Low water mark should be much less than capacity");
7648 // Do a recursive trim in the hope that this will keep
7649 // stack usage lower, but leave some oops for potential stealers
7650 trim_queue(_low_water_mark);
7651 } // Else, another thread got there first
7652 }
7653 }
7654
7655 void CMSParKeepAliveClosure::trim_queue(uint max) {
7656 while (_work_queue->size() > max) {
7657 oop new_oop;
7658 if (_work_queue->pop_local(new_oop)) {
7659 assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop");
7660 assert(_bit_map->isMarked((HeapWord*)new_oop),
7661 "no white objects on this stack!");
7662 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7663 // iterate over the oops in this oop, marking and pushing
7664 // the ones in CMS heap (i.e. in _span).
7665 new_oop->oop_iterate(&_mark_and_push);
7666 }
7667 }
7668 }
7669
7670 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
7671 CMSCollector* collector,
7672 MemRegion span, CMSBitMap* bit_map,
7673 OopTaskQueue* work_queue):
7674 _collector(collector),
7675 _span(span),
7676 _work_queue(work_queue),
7677 _bit_map(bit_map) { }
7678
7679 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
7680 HeapWord* addr = (HeapWord*)obj;
7681 if (_span.contains(addr) &&
7682 !_bit_map->isMarked(addr)) {
7683 if (_bit_map->par_mark(addr)) {
7684 bool simulate_overflow = false;
7685 NOT_PRODUCT(
7686 if (CMSMarkStackOverflowALot &&
7687 _collector->par_simulate_overflow()) {
7688 // simulate a stack overflow
7689 simulate_overflow = true;
7690 }
7691 )
7692 if (simulate_overflow || !_work_queue->push(obj)) {
7693 _collector->par_push_on_overflow_list(obj);
7694 _collector->_par_kac_ovflw++;
7695 }
7696 } // Else another thread got there already
7697 }
7698 }
7699
7700 //////////////////////////////////////////////////////////////////
7701 // CMSExpansionCause /////////////////////////////
7702 //////////////////////////////////////////////////////////////////
7703 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
7704 switch (cause) {
7705 case _no_expansion:
7706 return "No expansion";
7707 case _satisfy_free_ratio:
7708 return "Free ratio";
7709 case _satisfy_promotion:
7710 return "Satisfy promotion";
7711 case _satisfy_allocation:
7712 return "allocation";
7713 case _allocate_par_lab:
7714 return "Par LAB";
7715 case _allocate_par_spooling_space:
7716 return "Par Spooling Space";
7717 case _adaptive_size_policy:
7718 return "Ergonomics";
7719 default:
7720 return "unknown";
7721 }
7722 }
7723
7724 void CMSDrainMarkingStackClosure::do_void() {
7725 // the max number to take from overflow list at a time
7726 const size_t num = _mark_stack->capacity()/4;
7727 assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
7728 "Overflow list should be NULL during concurrent phases");
7729 while (!_mark_stack->isEmpty() ||
7730 // if stack is empty, check the overflow list
7731 _collector->take_from_overflow_list(num, _mark_stack)) {
7732 oop obj = _mark_stack->pop();
7733 HeapWord* addr = (HeapWord*)obj;
7734 assert(_span.contains(addr), "Should be within span");
7735 assert(_bit_map->isMarked(addr), "Should be marked");
7736 assert(oopDesc::is_oop(obj), "Should be an oop");
7737 obj->oop_iterate(_keep_alive);
7738 }
7739 }
7740
7741 void CMSParDrainMarkingStackClosure::do_void() {
7742 // drain queue
7743 trim_queue(0);
7744 }
7745
7746 // Trim our work_queue so its length is below max at return
7747 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
7748 while (_work_queue->size() > max) {
7749 oop new_oop;
7750 if (_work_queue->pop_local(new_oop)) {
7751 assert(oopDesc::is_oop(new_oop), "Expected an oop");
7752 assert(_bit_map->isMarked((HeapWord*)new_oop),
7753 "no white objects on this stack!");
7754 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7755 // iterate over the oops in this oop, marking and pushing
7756 // the ones in CMS heap (i.e. in _span).
7757 new_oop->oop_iterate(&_mark_and_push);
7758 }
7759 }
7760 }
7761
7762 ////////////////////////////////////////////////////////////////////
7763 // Support for Marking Stack Overflow list handling and related code
7764 ////////////////////////////////////////////////////////////////////
7765 // Much of the following code is similar in shape and spirit to the
7766 // code used in ParNewGC. We should try and share that code
7767 // as much as possible in the future.
7768
7769 #ifndef PRODUCT
7770 // Debugging support for CMSStackOverflowALot
7771
7772 // It's OK to call this multi-threaded; the worst thing
7773 // that can happen is that we'll get a bunch of closely
7774 // spaced simulated overflows, but that's OK, in fact
7775 // probably good as it would exercise the overflow code
7776 // under contention.
7777 bool CMSCollector::simulate_overflow() {
7778 if (_overflow_counter-- <= 0) { // just being defensive
7779 _overflow_counter = CMSMarkStackOverflowInterval;
7780 return true;
7781 } else {
7782 return false;
7783 }
7784 }
7785
7786 bool CMSCollector::par_simulate_overflow() {
7787 return simulate_overflow();
7788 }
7789 #endif
7790
7791 // Single-threaded
7792 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
7793 assert(stack->isEmpty(), "Expected precondition");
7794 assert(stack->capacity() > num, "Shouldn't bite more than can chew");
7795 size_t i = num;
7796 oop cur = _overflow_list;
7797 const markOop proto = markOopDesc::prototype();
7798 NOT_PRODUCT(ssize_t n = 0;)
7799 for (oop next; i > 0 && cur != NULL; cur = next, i--) {
7800 next = oop(cur->mark_raw());
7801 cur->set_mark_raw(proto); // until proven otherwise
7802 assert(oopDesc::is_oop(cur), "Should be an oop");
7803 bool res = stack->push(cur);
7804 assert(res, "Bit off more than can chew?");
7805 NOT_PRODUCT(n++;)
7806 }
7807 _overflow_list = cur;
7808 #ifndef PRODUCT
7809 assert(_num_par_pushes >= n, "Too many pops?");
7810 _num_par_pushes -=n;
7811 #endif
7812 return !stack->isEmpty();
7813 }
7814
7815 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
7816 // (MT-safe) Get a prefix of at most "num" from the list.
7817 // The overflow list is chained through the mark word of
7818 // each object in the list. We fetch the entire list,
7819 // break off a prefix of the right size and return the
7820 // remainder. If other threads try to take objects from
7821 // the overflow list at that time, they will wait for
7822 // some time to see if data becomes available. If (and
7823 // only if) another thread places one or more object(s)
7824 // on the global list before we have returned the suffix
7825 // to the global list, we will walk down our local list
7826 // to find its end and append the global list to
7827 // our suffix before returning it. This suffix walk can
7828 // prove to be expensive (quadratic in the amount of traffic)
7829 // when there are many objects in the overflow list and
7830 // there is much producer-consumer contention on the list.
7831 // *NOTE*: The overflow list manipulation code here and
7832 // in ParNewGeneration:: are very similar in shape,
7833 // except that in the ParNew case we use the old (from/eden)
7834 // copy of the object to thread the list via its klass word.
7835 // Because of the common code, if you make any changes in
7836 // the code below, please check the ParNew version to see if
7837 // similar changes might be needed.
7838 // CR 6797058 has been filed to consolidate the common code.
7839 bool CMSCollector::par_take_from_overflow_list(size_t num,
7840 OopTaskQueue* work_q,
7841 int no_of_gc_threads) {
7842 assert(work_q->size() == 0, "First empty local work queue");
7843 assert(num < work_q->max_elems(), "Can't bite more than we can chew");
7844 if (_overflow_list == NULL) {
7845 return false;
7846 }
7847 // Grab the entire list; we'll put back a suffix
7848 oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
7849 Thread* tid = Thread::current();
7850 // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
7851 // set to ParallelGCThreads.
7852 size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
7853 size_t sleep_time_millis = MAX2((size_t)1, num/100);
7854 // If the list is busy, we spin for a short while,
7855 // sleeping between attempts to get the list.
7856 for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
7857 os::sleep(tid, sleep_time_millis, false);
7858 if (_overflow_list == NULL) {
7859 // Nothing left to take
7860 return false;
7861 } else if (_overflow_list != BUSY) {
7862 // Try and grab the prefix
7863 prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
7864 }
7865 }
7866 // If the list was found to be empty, or we spun long
7867 // enough, we give up and return empty-handed. If we leave
7868 // the list in the BUSY state below, it must be the case that
7869 // some other thread holds the overflow list and will set it
7870 // to a non-BUSY state in the future.
7871 if (prefix == NULL || prefix == BUSY) {
7872 // Nothing to take or waited long enough
7873 if (prefix == NULL) {
7874 // Write back the NULL in case we overwrote it with BUSY above
7875 // and it is still the same value.
7876 Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
7877 }
7878 return false;
7879 }
7880 assert(prefix != NULL && prefix != BUSY, "Error");
7881 size_t i = num;
7882 oop cur = prefix;
7883 // Walk down the first "num" objects, unless we reach the end.
7884 for (; i > 1 && cur->mark_raw() != NULL; cur = oop(cur->mark_raw()), i--);
7885 if (cur->mark_raw() == NULL) {
7886 // We have "num" or fewer elements in the list, so there
7887 // is nothing to return to the global list.
7888 // Write back the NULL in lieu of the BUSY we wrote
7889 // above, if it is still the same value.
7890 if (_overflow_list == BUSY) {
7891 Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
7892 }
7893 } else {
7894 // Chop off the suffix and return it to the global list.
7895 assert(cur->mark_raw() != BUSY, "Error");
7896 oop suffix_head = cur->mark_raw(); // suffix will be put back on global list
7897 cur->set_mark_raw(NULL); // break off suffix
7898 // It's possible that the list is still in the empty(busy) state
7899 // we left it in a short while ago; in that case we may be
7900 // able to place back the suffix without incurring the cost
7901 // of a walk down the list.
7902 oop observed_overflow_list = _overflow_list;
7903 oop cur_overflow_list = observed_overflow_list;
7904 bool attached = false;
7905 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
7906 observed_overflow_list =
7907 Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
7908 if (cur_overflow_list == observed_overflow_list) {
7909 attached = true;
7910 break;
7911 } else cur_overflow_list = observed_overflow_list;
7912 }
7913 if (!attached) {
7914 // Too bad, someone else sneaked in (at least) an element; we'll need
7915 // to do a splice. Find tail of suffix so we can prepend suffix to global
7916 // list.
7917 for (cur = suffix_head; cur->mark_raw() != NULL; cur = (oop)(cur->mark_raw()));
7918 oop suffix_tail = cur;
7919 assert(suffix_tail != NULL && suffix_tail->mark_raw() == NULL,
7920 "Tautology");
7921 observed_overflow_list = _overflow_list;
7922 do {
7923 cur_overflow_list = observed_overflow_list;
7924 if (cur_overflow_list != BUSY) {
7925 // Do the splice ...
7926 suffix_tail->set_mark_raw(markOop(cur_overflow_list));
7927 } else { // cur_overflow_list == BUSY
7928 suffix_tail->set_mark_raw(NULL);
7929 }
7930 // ... and try to place spliced list back on overflow_list ...
7931 observed_overflow_list =
7932 Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
7933 } while (cur_overflow_list != observed_overflow_list);
7934 // ... until we have succeeded in doing so.
7935 }
7936 }
7937
7938 // Push the prefix elements on work_q
7939 assert(prefix != NULL, "control point invariant");
7940 const markOop proto = markOopDesc::prototype();
7941 oop next;
7942 NOT_PRODUCT(ssize_t n = 0;)
7943 for (cur = prefix; cur != NULL; cur = next) {
7944 next = oop(cur->mark_raw());
7945 cur->set_mark_raw(proto); // until proven otherwise
7946 assert(oopDesc::is_oop(cur), "Should be an oop");
7947 bool res = work_q->push(cur);
7948 assert(res, "Bit off more than we can chew?");
7949 NOT_PRODUCT(n++;)
7950 }
7951 #ifndef PRODUCT
7952 assert(_num_par_pushes >= n, "Too many pops?");
7953 Atomic::sub(n, &_num_par_pushes);
7954 #endif
7955 return true;
7956 }
7957
7958 // Single-threaded
7959 void CMSCollector::push_on_overflow_list(oop p) {
7960 NOT_PRODUCT(_num_par_pushes++;)
7961 assert(oopDesc::is_oop(p), "Not an oop");
7962 preserve_mark_if_necessary(p);
7963 p->set_mark_raw((markOop)_overflow_list);
7964 _overflow_list = p;
7965 }
7966
7967 // Multi-threaded; use CAS to prepend to overflow list
7968 void CMSCollector::par_push_on_overflow_list(oop p) {
7969 NOT_PRODUCT(Atomic::inc(&_num_par_pushes);)
7970 assert(oopDesc::is_oop(p), "Not an oop");
7971 par_preserve_mark_if_necessary(p);
7972 oop observed_overflow_list = _overflow_list;
7973 oop cur_overflow_list;
7974 do {
7975 cur_overflow_list = observed_overflow_list;
7976 if (cur_overflow_list != BUSY) {
7977 p->set_mark_raw(markOop(cur_overflow_list));
7978 } else {
7979 p->set_mark_raw(NULL);
7980 }
7981 observed_overflow_list =
7982 Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list);
7983 } while (cur_overflow_list != observed_overflow_list);
7984 }
7985 #undef BUSY
7986
7987 // Single threaded
7988 // General Note on GrowableArray: pushes may silently fail
7989 // because we are (temporarily) out of C-heap for expanding
7990 // the stack. The problem is quite ubiquitous and affects
7991 // a lot of code in the JVM. The prudent thing for GrowableArray
7992 // to do (for now) is to exit with an error. However, that may
7993 // be too draconian in some cases because the caller may be
7994 // able to recover without much harm. For such cases, we
7995 // should probably introduce a "soft_push" method which returns
7996 // an indication of success or failure with the assumption that
7997 // the caller may be able to recover from a failure; code in
7998 // the VM can then be changed, incrementally, to deal with such
7999 // failures where possible, thus, incrementally hardening the VM
8000 // in such low resource situations.
8001 void CMSCollector::preserve_mark_work(oop p, markOop m) {
8002 _preserved_oop_stack.push(p);
8003 _preserved_mark_stack.push(m);
8004 assert(m == p->mark_raw(), "Mark word changed");
8005 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8006 "bijection");
8007 }
8008
8009 // Single threaded
8010 void CMSCollector::preserve_mark_if_necessary(oop p) {
8011 markOop m = p->mark_raw();
8012 if (m->must_be_preserved(p)) {
8013 preserve_mark_work(p, m);
8014 }
8015 }
8016
8017 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8018 markOop m = p->mark_raw();
8019 if (m->must_be_preserved(p)) {
8020 MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8021 // Even though we read the mark word without holding
8022 // the lock, we are assured that it will not change
8023 // because we "own" this oop, so no other thread can
8024 // be trying to push it on the overflow list; see
8025 // the assertion in preserve_mark_work() that checks
8026 // that m == p->mark_raw().
8027 preserve_mark_work(p, m);
8028 }
8029 }
8030
8031 // We should be able to do this multi-threaded,
8032 // a chunk of stack being a task (this is
8033 // correct because each oop only ever appears
8034 // once in the overflow list. However, it's
8035 // not very easy to completely overlap this with
8036 // other operations, so will generally not be done
8037 // until all work's been completed. Because we
8038 // expect the preserved oop stack (set) to be small,
8039 // it's probably fine to do this single-threaded.
8040 // We can explore cleverer concurrent/overlapped/parallel
8041 // processing of preserved marks if we feel the
8042 // need for this in the future. Stack overflow should
8043 // be so rare in practice and, when it happens, its
8044 // effect on performance so great that this will
8045 // likely just be in the noise anyway.
8046 void CMSCollector::restore_preserved_marks_if_any() {
8047 assert(SafepointSynchronize::is_at_safepoint(),
8048 "world should be stopped");
8049 assert(Thread::current()->is_ConcurrentGC_thread() ||
8050 Thread::current()->is_VM_thread(),
8051 "should be single-threaded");
8052 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8053 "bijection");
8054
8055 while (!_preserved_oop_stack.is_empty()) {
8056 oop p = _preserved_oop_stack.pop();
8057 assert(oopDesc::is_oop(p), "Should be an oop");
8058 assert(_span.contains(p), "oop should be in _span");
8059 assert(p->mark_raw() == markOopDesc::prototype(),
8060 "Set when taken from overflow list");
8061 markOop m = _preserved_mark_stack.pop();
8062 p->set_mark_raw(m);
8063 }
8064 assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8065 "stacks were cleared above");
8066 }
8067
8068 #ifndef PRODUCT
8069 bool CMSCollector::no_preserved_marks() const {
8070 return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
8071 }
8072 #endif
8073
8074 // Transfer some number of overflown objects to usual marking
8075 // stack. Return true if some objects were transferred.
8076 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8077 size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
8078 (size_t)ParGCDesiredObjsFromOverflowList);
8079
8080 bool res = _collector->take_from_overflow_list(num, _mark_stack);
8081 assert(_collector->overflow_list_is_empty() || res,
8082 "If list is not empty, we should have taken something");
8083 assert(!res || !_mark_stack->isEmpty(),
8084 "If we took something, it should now be on our stack");
8085 return res;
8086 }
8087
8088 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8089 size_t res = _sp->block_size_no_stall(addr, _collector);
8090 if (_sp->block_is_obj(addr)) {
8091 if (_live_bit_map->isMarked(addr)) {
8092 // It can't have been dead in a previous cycle
8093 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8094 } else {
8095 _dead_bit_map->mark(addr); // mark the dead object
8096 }
8097 }
8098 // Could be 0, if the block size could not be computed without stalling.
8099 return res;
8100 }
8101
8102 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
8103 GCMemoryManager* manager = CMSHeap::heap()->old_manager();
8104 switch (phase) {
8105 case CMSCollector::InitialMarking:
8106 initialize(manager /* GC manager */ ,
8107 cause /* cause of the GC */,
8108 true /* allMemoryPoolsAffected */,
8109 true /* recordGCBeginTime */,
8110 true /* recordPreGCUsage */,
8111 false /* recordPeakUsage */,
8112 false /* recordPostGCusage */,
8113 true /* recordAccumulatedGCTime */,
8114 false /* recordGCEndTime */,
8115 false /* countCollection */ );
8116 break;
8117
8118 case CMSCollector::FinalMarking:
8119 initialize(manager /* GC manager */ ,
8120 cause /* cause of the GC */,
8121 true /* allMemoryPoolsAffected */,
8122 false /* recordGCBeginTime */,
8123 false /* recordPreGCUsage */,
8124 false /* recordPeakUsage */,
8125 false /* recordPostGCusage */,
8126 true /* recordAccumulatedGCTime */,
8127 false /* recordGCEndTime */,
8128 false /* countCollection */ );
8129 break;
8130
8131 case CMSCollector::Sweeping:
8132 initialize(manager /* GC manager */ ,
8133 cause /* cause of the GC */,
8134 true /* allMemoryPoolsAffected */,
8135 false /* recordGCBeginTime */,
8136 false /* recordPreGCUsage */,
8137 true /* recordPeakUsage */,
8138 true /* recordPostGCusage */,
8139 false /* recordAccumulatedGCTime */,
8140 true /* recordGCEndTime */,
8141 true /* countCollection */ );
8142 break;
8143
8144 default:
8145 ShouldNotReachHere();
8146 }
8147 }
8148