1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP 27 28 #include "gc_implementation/shared/gcHeapSummary.hpp" 29 #include "gc_implementation/shared/gSpaceCounters.hpp" 30 #include "gc_implementation/shared/gcStats.hpp" 31 #include "gc_implementation/shared/gcWhen.hpp" 32 #include "gc_implementation/shared/generationCounters.hpp" 33 #include "memory/freeBlockDictionary.hpp" 34 #include "memory/generation.hpp" 35 #include "memory/iterator.hpp" 36 #include "runtime/mutexLocker.hpp" 37 #include "runtime/virtualspace.hpp" 38 #include "services/memoryService.hpp" 39 #include "utilities/bitMap.inline.hpp" 40 #include "utilities/stack.inline.hpp" 41 #include "utilities/taskqueue.hpp" 42 #include "utilities/yieldingWorkgroup.hpp" 43 44 // ConcurrentMarkSweepGeneration is in support of a concurrent 45 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker 46 // style. We assume, for now, that this generation is always the 47 // seniormost generation and for simplicity 48 // in the first implementation, that this generation is a single compactible 49 // space. Neither of these restrictions appears essential, and will be 50 // relaxed in the future when more time is available to implement the 51 // greater generality (and there's a need for it). 52 // 53 // Concurrent mode failures are currently handled by 54 // means of a sliding mark-compact. 55 56 class CMSAdaptiveSizePolicy; 57 class CMSConcMarkingTask; 58 class CMSGCAdaptivePolicyCounters; 59 class CMSTracer; 60 class ConcurrentGCTimer; 61 class ConcurrentMarkSweepGeneration; 62 class ConcurrentMarkSweepPolicy; 63 class ConcurrentMarkSweepThread; 64 class CompactibleFreeListSpace; 65 class FreeChunk; 66 class PromotionInfo; 67 class ScanMarkedObjectsAgainCarefullyClosure; 68 class TenuredGeneration; 69 class SerialOldTracer; 70 71 // A generic CMS bit map. It's the basis for both the CMS marking bit map 72 // as well as for the mod union table (in each case only a subset of the 73 // methods are used). This is essentially a wrapper around the BitMap class, 74 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, 75 // we have _shifter == 0. and for the mod union table we have 76 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.) 77 // XXX 64-bit issues in BitMap? 78 class CMSBitMap VALUE_OBJ_CLASS_SPEC { 79 friend class VMStructs; 80 81 HeapWord* _bmStartWord; // base address of range covered by map 82 size_t _bmWordSize; // map size (in #HeapWords covered) 83 const int _shifter; // shifts to convert HeapWord to bit position 84 VirtualSpace _virtual_space; // underlying the bit map 85 BitMap _bm; // the bit map itself 86 public: 87 Mutex* const _lock; // mutex protecting _bm; 88 89 public: 90 // constructor 91 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name); 92 93 // allocates the actual storage for the map 94 bool allocate(MemRegion mr); 95 // field getter lock() const96 Mutex* lock() const { return _lock; } 97 // locking verifier convenience function 98 void assert_locked() const PRODUCT_RETURN; 99 100 // inquiries startWord() const101 HeapWord* startWord() const { return _bmStartWord; } sizeInWords() const102 size_t sizeInWords() const { return _bmWordSize; } sizeInBits() const103 size_t sizeInBits() const { return _bm.size(); } 104 // the following is one past the last word in space endWord() const105 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } 106 107 // reading marks 108 bool isMarked(HeapWord* addr) const; 109 bool par_isMarked(HeapWord* addr) const; // do not lock checks 110 bool isUnmarked(HeapWord* addr) const; 111 bool isAllClear() const; 112 113 // writing marks 114 void mark(HeapWord* addr); 115 // For marking by parallel GC threads; 116 // returns true if we did, false if another thread did 117 bool par_mark(HeapWord* addr); 118 119 void mark_range(MemRegion mr); 120 void par_mark_range(MemRegion mr); 121 void mark_large_range(MemRegion mr); 122 void par_mark_large_range(MemRegion mr); 123 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads. 124 void clear_range(MemRegion mr); 125 void par_clear_range(MemRegion mr); 126 void clear_large_range(MemRegion mr); 127 void par_clear_large_range(MemRegion mr); 128 void clear_all(); 129 void clear_all_incrementally(); // Not yet implemented!! 130 NOT_PRODUCT(void region_invariant (MemRegion mr);)131 NOT_PRODUCT( 132 // checks the memory region for validity 133 void region_invariant(MemRegion mr); 134 ) 135 136 // iteration 137 void iterate(BitMapClosure* cl) { 138 _bm.iterate(cl); 139 } 140 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right); 141 void dirty_range_iterate_clear(MemRegionClosure* cl); 142 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl); 143 144 // auxiliary support for iteration 145 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const; 146 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr, 147 HeapWord* end_addr) const; 148 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const; 149 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr, 150 HeapWord* end_addr) const; 151 MemRegion getAndClearMarkedRegion(HeapWord* addr); 152 MemRegion getAndClearMarkedRegion(HeapWord* start_addr, 153 HeapWord* end_addr); 154 155 // conversion utilities 156 HeapWord* offsetToHeapWord(size_t offset) const; 157 size_t heapWordToOffset(HeapWord* addr) const; 158 size_t heapWordDiffToOffsetDiff(size_t diff) const; 159 160 void print_on_error(outputStream* st, const char* prefix) const; 161 162 // debugging 163 // is this address range covered by the bit-map? 164 NOT_PRODUCT( 165 bool covers(MemRegion mr) const; 166 bool covers(HeapWord* start, size_t size = 0) const; 167 ) 168 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN; 169 }; 170 171 // Represents a marking stack used by the CMS collector. 172 // Ideally this should be GrowableArray<> just like MSC's marking stack(s). 173 class CMSMarkStack: public CHeapObj<mtGC> { 174 // 175 friend class CMSCollector; // to get at expasion stats further below 176 // 177 178 VirtualSpace _virtual_space; // space for the stack 179 oop* _base; // bottom of stack 180 size_t _index; // one more than last occupied index 181 size_t _capacity; // max #elements 182 Mutex _par_lock; // an advisory lock used in case of parallel access 183 NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run 184 185 protected: 186 size_t _hit_limit; // we hit max stack size limit 187 size_t _failed_double; // we failed expansion before hitting limit 188 189 public: CMSMarkStack()190 CMSMarkStack(): 191 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true), 192 _hit_limit(0), 193 _failed_double(0) {} 194 195 bool allocate(size_t size); 196 capacity() const197 size_t capacity() const { return _capacity; } 198 pop()199 oop pop() { 200 if (!isEmpty()) { 201 return _base[--_index] ; 202 } 203 return NULL; 204 } 205 push(oop ptr)206 bool push(oop ptr) { 207 if (isFull()) { 208 return false; 209 } else { 210 _base[_index++] = ptr; 211 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); 212 return true; 213 } 214 } 215 isEmpty() const216 bool isEmpty() const { return _index == 0; } isFull() const217 bool isFull() const { 218 assert(_index <= _capacity, "buffer overflow"); 219 return _index == _capacity; 220 } 221 length()222 size_t length() { return _index; } 223 224 // "Parallel versions" of some of the above par_pop()225 oop par_pop() { 226 // lock and pop 227 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 228 return pop(); 229 } 230 par_push(oop ptr)231 bool par_push(oop ptr) { 232 // lock and push 233 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); 234 return push(ptr); 235 } 236 237 // Forcibly reset the stack, losing all of its contents. reset()238 void reset() { 239 _index = 0; 240 } 241 242 // Expand the stack, typically in response to an overflow condition 243 void expand(); 244 245 // Compute the least valued stack element. least_value(HeapWord * low)246 oop least_value(HeapWord* low) { 247 oop least = (oop)low; 248 for (size_t i = 0; i < _index; i++) { 249 least = MIN2(least, _base[i]); 250 } 251 return least; 252 } 253 254 // Exposed here to allow stack expansion in || case par_lock()255 Mutex* par_lock() { return &_par_lock; } 256 }; 257 258 class CardTableRS; 259 class CMSParGCThreadState; 260 261 class ModUnionClosure: public MemRegionClosure { 262 protected: 263 CMSBitMap* _t; 264 public: ModUnionClosure(CMSBitMap * t)265 ModUnionClosure(CMSBitMap* t): _t(t) { } 266 void do_MemRegion(MemRegion mr); 267 }; 268 269 class ModUnionClosurePar: public ModUnionClosure { 270 public: ModUnionClosurePar(CMSBitMap * t)271 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { } 272 void do_MemRegion(MemRegion mr); 273 }; 274 275 // Survivor Chunk Array in support of parallelization of 276 // Survivor Space rescan. 277 class ChunkArray: public CHeapObj<mtGC> { 278 size_t _index; 279 size_t _capacity; 280 size_t _overflows; 281 HeapWord** _array; // storage for array 282 283 public: ChunkArray()284 ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {} ChunkArray(HeapWord ** a,size_t c)285 ChunkArray(HeapWord** a, size_t c): 286 _index(0), _capacity(c), _overflows(0), _array(a) {} 287 array()288 HeapWord** array() { return _array; } set_array(HeapWord ** a)289 void set_array(HeapWord** a) { _array = a; } 290 capacity()291 size_t capacity() { return _capacity; } set_capacity(size_t c)292 void set_capacity(size_t c) { _capacity = c; } 293 end()294 size_t end() { 295 assert(_index <= capacity(), 296 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds", 297 _index, _capacity)); 298 return _index; 299 } // exclusive 300 nth(size_t n)301 HeapWord* nth(size_t n) { 302 assert(n < end(), "Out of bounds access"); 303 return _array[n]; 304 } 305 reset()306 void reset() { 307 _index = 0; 308 if (_overflows > 0 && PrintCMSStatistics > 1) { 309 warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times", 310 _capacity, _overflows); 311 } 312 _overflows = 0; 313 } 314 record_sample(HeapWord * p,size_t sz)315 void record_sample(HeapWord* p, size_t sz) { 316 // For now we do not do anything with the size 317 if (_index < _capacity) { 318 _array[_index++] = p; 319 } else { 320 ++_overflows; 321 assert(_index == _capacity, 322 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT 323 "): out of bounds at overflow#" SIZE_FORMAT, 324 _index, _capacity, _overflows)); 325 } 326 } 327 }; 328 329 // 330 // Timing, allocation and promotion statistics for gc scheduling and incremental 331 // mode pacing. Most statistics are exponential averages. 332 // 333 class CMSStats VALUE_OBJ_CLASS_SPEC { 334 private: 335 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen. 336 337 // The following are exponential averages with factor alpha: 338 // avg = (100 - alpha) * avg + alpha * cur_sample 339 // 340 // The durations measure: end_time[n] - start_time[n] 341 // The periods measure: start_time[n] - start_time[n-1] 342 // 343 // The cms period and duration include only concurrent collections; time spent 344 // in foreground cms collections due to System.gc() or because of a failure to 345 // keep up are not included. 346 // 347 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the 348 // real value, but is used only after the first period. A value of 100 is 349 // used for the first sample so it gets the entire weight. 350 unsigned int _saved_alpha; // 0-100 351 unsigned int _gc0_alpha; 352 unsigned int _cms_alpha; 353 354 double _gc0_duration; 355 double _gc0_period; 356 size_t _gc0_promoted; // bytes promoted per gc0 357 double _cms_duration; 358 double _cms_duration_pre_sweep; // time from initiation to start of sweep 359 double _cms_duration_per_mb; 360 double _cms_period; 361 size_t _cms_allocated; // bytes of direct allocation per gc0 period 362 363 // Timers. 364 elapsedTimer _cms_timer; 365 TimeStamp _gc0_begin_time; 366 TimeStamp _cms_begin_time; 367 TimeStamp _cms_end_time; 368 369 // Snapshots of the amount used in the CMS generation. 370 size_t _cms_used_at_gc0_begin; 371 size_t _cms_used_at_gc0_end; 372 size_t _cms_used_at_cms_begin; 373 374 // Used to prevent the duty cycle from being reduced in the middle of a cms 375 // cycle. 376 bool _allow_duty_cycle_reduction; 377 378 enum { 379 _GC0_VALID = 0x1, 380 _CMS_VALID = 0x2, 381 _ALL_VALID = _GC0_VALID | _CMS_VALID 382 }; 383 384 unsigned int _valid_bits; 385 386 unsigned int _icms_duty_cycle; // icms duty cycle (0-100). 387 388 protected: 389 390 // Return a duty cycle that avoids wild oscillations, by limiting the amount 391 // of change between old_duty_cycle and new_duty_cycle (the latter is treated 392 // as a recommended value). 393 static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle, 394 unsigned int new_duty_cycle); 395 unsigned int icms_update_duty_cycle_impl(); 396 397 // In support of adjusting of cms trigger ratios based on history 398 // of concurrent mode failure. 399 double cms_free_adjustment_factor(size_t free) const; 400 void adjust_cms_free_adjustment_factor(bool fail, size_t free); 401 402 public: 403 CMSStats(ConcurrentMarkSweepGeneration* cms_gen, 404 unsigned int alpha = CMSExpAvgFactor); 405 406 // Whether or not the statistics contain valid data; higher level statistics 407 // cannot be called until this returns true (they require at least one young 408 // gen and one cms cycle to have completed). 409 bool valid() const; 410 411 // Record statistics. 412 void record_gc0_begin(); 413 void record_gc0_end(size_t cms_gen_bytes_used); 414 void record_cms_begin(); 415 void record_cms_end(); 416 417 // Allow management of the cms timer, which must be stopped/started around 418 // yield points. cms_timer()419 elapsedTimer& cms_timer() { return _cms_timer; } start_cms_timer()420 void start_cms_timer() { _cms_timer.start(); } stop_cms_timer()421 void stop_cms_timer() { _cms_timer.stop(); } 422 423 // Basic statistics; units are seconds or bytes. gc0_period() const424 double gc0_period() const { return _gc0_period; } gc0_duration() const425 double gc0_duration() const { return _gc0_duration; } gc0_promoted() const426 size_t gc0_promoted() const { return _gc0_promoted; } cms_period() const427 double cms_period() const { return _cms_period; } cms_duration() const428 double cms_duration() const { return _cms_duration; } cms_duration_per_mb() const429 double cms_duration_per_mb() const { return _cms_duration_per_mb; } cms_allocated() const430 size_t cms_allocated() const { return _cms_allocated; } 431 cms_used_at_gc0_end() const432 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;} 433 434 // Seconds since the last background cms cycle began or ended. 435 double cms_time_since_begin() const; 436 double cms_time_since_end() const; 437 438 // Higher level statistics--caller must check that valid() returns true before 439 // calling. 440 441 // Returns bytes promoted per second of wall clock time. 442 double promotion_rate() const; 443 444 // Returns bytes directly allocated per second of wall clock time. 445 double cms_allocation_rate() const; 446 447 // Rate at which space in the cms generation is being consumed (sum of the 448 // above two). 449 double cms_consumption_rate() const; 450 451 // Returns an estimate of the number of seconds until the cms generation will 452 // fill up, assuming no collection work is done. 453 double time_until_cms_gen_full() const; 454 455 // Returns an estimate of the number of seconds remaining until 456 // the cms generation collection should start. 457 double time_until_cms_start() const; 458 459 // End of higher level statistics. 460 461 // Returns the cms incremental mode duty cycle, as a percentage (0-100). icms_duty_cycle() const462 unsigned int icms_duty_cycle() const { return _icms_duty_cycle; } 463 464 // Update the duty cycle and return the new value. 465 unsigned int icms_update_duty_cycle(); 466 467 // Debugging. 468 void print_on(outputStream* st) const PRODUCT_RETURN; print() const469 void print() const { print_on(gclog_or_tty); } 470 }; 471 472 // A closure related to weak references processing which 473 // we embed in the CMSCollector, since we need to pass 474 // it to the reference processor for secondary filtering 475 // of references based on reachability of referent; 476 // see role of _is_alive_non_header closure in the 477 // ReferenceProcessor class. 478 // For objects in the CMS generation, this closure checks 479 // if the object is "live" (reachable). Used in weak 480 // reference processing. 481 class CMSIsAliveClosure: public BoolObjectClosure { 482 const MemRegion _span; 483 const CMSBitMap* _bit_map; 484 485 friend class CMSCollector; 486 public: CMSIsAliveClosure(MemRegion span,CMSBitMap * bit_map)487 CMSIsAliveClosure(MemRegion span, 488 CMSBitMap* bit_map): 489 _span(span), 490 _bit_map(bit_map) { 491 assert(!span.is_empty(), "Empty span could spell trouble"); 492 } 493 494 bool do_object_b(oop obj); 495 }; 496 497 498 // Implements AbstractRefProcTaskExecutor for CMS. 499 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 500 public: 501 CMSRefProcTaskExecutor(CMSCollector & collector)502 CMSRefProcTaskExecutor(CMSCollector& collector) 503 : _collector(collector) 504 { } 505 506 // Executes a task using worker threads. 507 virtual void execute(ProcessTask& task); 508 virtual void execute(EnqueueTask& task); 509 private: 510 CMSCollector& _collector; 511 }; 512 513 514 class CMSCollector: public CHeapObj<mtGC> { 515 friend class VMStructs; 516 friend class ConcurrentMarkSweepThread; 517 friend class ConcurrentMarkSweepGeneration; 518 friend class CompactibleFreeListSpace; 519 friend class CMSParMarkTask; 520 friend class CMSParInitialMarkTask; 521 friend class CMSParRemarkTask; 522 friend class CMSConcMarkingTask; 523 friend class CMSRefProcTaskProxy; 524 friend class CMSRefProcTaskExecutor; 525 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden 526 friend class SurvivorSpacePrecleanClosure; // --- ditto ------- 527 friend class PushOrMarkClosure; // to access _restart_addr 528 friend class Par_PushOrMarkClosure; // to access _restart_addr 529 friend class MarkFromRootsClosure; // -- ditto -- 530 // ... and for clearing cards 531 friend class Par_MarkFromRootsClosure; // to access _restart_addr 532 // ... and for clearing cards 533 friend class Par_ConcMarkingClosure; // to access _restart_addr etc. 534 friend class MarkFromRootsVerifyClosure; // to access _restart_addr 535 friend class PushAndMarkVerifyClosure; // -- ditto -- 536 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list 537 friend class PushAndMarkClosure; // -- ditto -- 538 friend class Par_PushAndMarkClosure; // -- ditto -- 539 friend class CMSKeepAliveClosure; // -- ditto -- 540 friend class CMSDrainMarkingStackClosure; // -- ditto -- 541 friend class CMSInnerParMarkAndPushClosure; // -- ditto -- 542 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list 543 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait 544 friend class VM_CMS_Operation; 545 friend class VM_CMS_Initial_Mark; 546 friend class VM_CMS_Final_Remark; 547 friend class TraceCMSMemoryManagerStats; 548 549 private: 550 jlong _time_of_last_gc; update_time_of_last_gc(jlong now)551 void update_time_of_last_gc(jlong now) { 552 _time_of_last_gc = now; 553 } 554 555 OopTaskQueueSet* _task_queues; 556 557 // Overflow list of grey objects, threaded through mark-word 558 // Manipulated with CAS in the parallel/multi-threaded case. 559 oop _overflow_list; 560 // The following array-pair keeps track of mark words 561 // displaced for accomodating overflow list above. 562 // This code will likely be revisited under RFE#4922830. 563 Stack<oop, mtGC> _preserved_oop_stack; 564 Stack<markOop, mtGC> _preserved_mark_stack; 565 566 int* _hash_seed; 567 568 // In support of multi-threaded concurrent phases 569 YieldingFlexibleWorkGang* _conc_workers; 570 571 // Performance Counters 572 CollectorCounters* _gc_counters; 573 574 // Initialization Errors 575 bool _completed_initialization; 576 577 // In support of ExplicitGCInvokesConcurrent 578 static bool _full_gc_requested; 579 static GCCause::Cause _full_gc_cause; 580 unsigned int _collection_count_start; 581 582 // Should we unload classes this concurrent cycle? 583 bool _should_unload_classes; 584 unsigned int _concurrent_cycles_since_last_unload; concurrent_cycles_since_last_unload() const585 unsigned int concurrent_cycles_since_last_unload() const { 586 return _concurrent_cycles_since_last_unload; 587 } 588 // Did we (allow) unload classes in the previous concurrent cycle? unloaded_classes_last_cycle() const589 bool unloaded_classes_last_cycle() const { 590 return concurrent_cycles_since_last_unload() == 0; 591 } 592 // Root scanning options for perm gen 593 int _roots_scanning_options; roots_scanning_options() const594 int roots_scanning_options() const { return _roots_scanning_options; } add_root_scanning_option(int o)595 void add_root_scanning_option(int o) { _roots_scanning_options |= o; } remove_root_scanning_option(int o)596 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; } 597 598 // Verification support 599 CMSBitMap _verification_mark_bm; 600 void verify_after_remark_work_1(); 601 void verify_after_remark_work_2(); 602 603 // true if any verification flag is on. 604 bool _verifying; verifying() const605 bool verifying() const { return _verifying; } set_verifying(bool v)606 void set_verifying(bool v) { _verifying = v; } 607 608 // Collector policy 609 ConcurrentMarkSweepPolicy* _collector_policy; collector_policy()610 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } 611 612 void set_did_compact(bool v); 613 614 // XXX Move these to CMSStats ??? FIX ME !!! 615 elapsedTimer _inter_sweep_timer; // time between sweeps 616 elapsedTimer _intra_sweep_timer; // time _in_ sweeps 617 // padded decaying average estimates of the above 618 AdaptivePaddedAverage _inter_sweep_estimate; 619 AdaptivePaddedAverage _intra_sweep_estimate; 620 621 CMSTracer* _gc_tracer_cm; 622 ConcurrentGCTimer* _gc_timer_cm; 623 624 bool _cms_start_registered; 625 626 GCHeapSummary _last_heap_summary; 627 MetaspaceSummary _last_metaspace_summary; 628 629 void register_foreground_gc_start(GCCause::Cause cause); 630 void register_gc_start(GCCause::Cause cause); 631 void register_gc_end(); 632 void save_heap_summary(); 633 void report_heap_summary(GCWhen::Type when); 634 635 protected: 636 ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS) 637 MemRegion _span; // span covering above two 638 CardTableRS* _ct; // card table 639 640 // CMS marking support structures 641 CMSBitMap _markBitMap; 642 CMSBitMap _modUnionTable; 643 CMSMarkStack _markStack; 644 645 HeapWord* _restart_addr; // in support of marking stack overflow 646 void lower_restart_addr(HeapWord* low); 647 648 // Counters in support of marking stack / work queue overflow handling: 649 // a non-zero value indicates certain types of overflow events during 650 // the current CMS cycle and could lead to stack resizing efforts at 651 // an opportune future time. 652 size_t _ser_pmc_preclean_ovflw; 653 size_t _ser_pmc_remark_ovflw; 654 size_t _par_pmc_remark_ovflw; 655 size_t _ser_kac_preclean_ovflw; 656 size_t _ser_kac_ovflw; 657 size_t _par_kac_ovflw; 658 NOT_PRODUCT(ssize_t _num_par_pushes;) 659 660 // ("Weak") Reference processing support 661 ReferenceProcessor* _ref_processor; 662 CMSIsAliveClosure _is_alive_closure; 663 // keep this textually after _markBitMap and _span; c'tor dependency 664 665 ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work 666 ModUnionClosure _modUnionClosure; 667 ModUnionClosurePar _modUnionClosurePar; 668 669 // CMS abstract state machine 670 // initial_state: Idling 671 // next_state(Idling) = {Marking} 672 // next_state(Marking) = {Precleaning, Sweeping} 673 // next_state(Precleaning) = {AbortablePreclean, FinalMarking} 674 // next_state(AbortablePreclean) = {FinalMarking} 675 // next_state(FinalMarking) = {Sweeping} 676 // next_state(Sweeping) = {Resizing} 677 // next_state(Resizing) = {Resetting} 678 // next_state(Resetting) = {Idling} 679 // The numeric values below are chosen so that: 680 // . _collectorState <= Idling == post-sweep && pre-mark 681 // . _collectorState in (Idling, Sweeping) == {initial,final}marking || 682 // precleaning || abortablePrecleanb 683 public: 684 enum CollectorState { 685 Resizing = 0, 686 Resetting = 1, 687 Idling = 2, 688 InitialMarking = 3, 689 Marking = 4, 690 Precleaning = 5, 691 AbortablePreclean = 6, 692 FinalMarking = 7, 693 Sweeping = 8 694 }; 695 protected: 696 static CollectorState _collectorState; 697 698 // State related to prologue/epilogue invocation for my generations 699 bool _between_prologue_and_epilogue; 700 701 // Signalling/State related to coordination between fore- and backgroud GC 702 // Note: When the baton has been passed from background GC to foreground GC, 703 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false. 704 static bool _foregroundGCIsActive; // true iff foreground collector is active or 705 // wants to go active 706 static bool _foregroundGCShouldWait; // true iff background GC is active and has not 707 // yet passed the baton to the foreground GC 708 709 // Support for CMSScheduleRemark (abortable preclean) 710 bool _abort_preclean; 711 bool _start_sampling; 712 713 int _numYields; 714 size_t _numDirtyCards; 715 size_t _sweep_count; 716 // number of full gc's since the last concurrent gc. 717 uint _full_gcs_since_conc_gc; 718 719 // occupancy used for bootstrapping stats 720 double _bootstrap_occupancy; 721 722 // timer 723 elapsedTimer _timer; 724 725 // Timing, allocation and promotion statistics, used for scheduling. 726 CMSStats _stats; 727 728 // Allocation limits installed in the young gen, used only in 729 // CMSIncrementalMode. When an allocation in the young gen would cross one of 730 // these limits, the cms generation is notified and the cms thread is started 731 // or stopped, respectively. 732 HeapWord* _icms_start_limit; 733 HeapWord* _icms_stop_limit; 734 735 enum CMS_op_type { 736 CMS_op_checkpointRootsInitial, 737 CMS_op_checkpointRootsFinal 738 }; 739 740 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause); 741 bool stop_world_and_do(CMS_op_type op); 742 task_queues()743 OopTaskQueueSet* task_queues() { return _task_queues; } hash_seed(int i)744 int* hash_seed(int i) { return &_hash_seed[i]; } conc_workers()745 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; } 746 747 // Support for parallelizing Eden rescan in CMS remark phase 748 void sample_eden(); // ... sample Eden space top 749 750 private: 751 // Support for parallelizing young gen rescan in CMS remark phase 752 Generation* _young_gen; // the younger gen 753 HeapWord** _top_addr; // ... Top of Eden 754 HeapWord** _end_addr; // ... End of Eden 755 Mutex* _eden_chunk_lock; 756 HeapWord** _eden_chunk_array; // ... Eden partitioning array 757 size_t _eden_chunk_index; // ... top (exclusive) of array 758 size_t _eden_chunk_capacity; // ... max entries in array 759 760 // Support for parallelizing survivor space rescan 761 HeapWord** _survivor_chunk_array; 762 size_t _survivor_chunk_index; 763 size_t _survivor_chunk_capacity; 764 size_t* _cursor; 765 ChunkArray* _survivor_plab_array; 766 767 // A bounded minimum size of PLABs, should not return too small values since 768 // this will affect the size of the data structures used for parallel young gen rescan 769 size_t plab_sample_minimum_size(); 770 771 // Support for marking stack overflow handling 772 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack); 773 bool par_take_from_overflow_list(size_t num, 774 OopTaskQueue* to_work_q, 775 int no_of_gc_threads); 776 void push_on_overflow_list(oop p); 777 void par_push_on_overflow_list(oop p); 778 // the following is, obviously, not, in general, "MT-stable" 779 bool overflow_list_is_empty() const; 780 781 void preserve_mark_if_necessary(oop p); 782 void par_preserve_mark_if_necessary(oop p); 783 void preserve_mark_work(oop p, markOop m); 784 void restore_preserved_marks_if_any(); 785 NOT_PRODUCT(bool no_preserved_marks() const;) 786 // in support of testing overflow code 787 NOT_PRODUCT(int _overflow_counter;) 788 NOT_PRODUCT(bool simulate_overflow();) // sequential 789 NOT_PRODUCT(bool par_simulate_overflow();) // MT version 790 791 // CMS work methods 792 void checkpointRootsInitialWork(bool asynch); // initial checkpoint work 793 794 // a return value of false indicates failure due to stack overflow 795 bool markFromRootsWork(bool asynch); // concurrent marking work 796 797 public: // FIX ME!!! only for testing 798 bool do_marking_st(bool asynch); // single-threaded marking 799 bool do_marking_mt(bool asynch); // multi-threaded marking 800 801 private: 802 803 // concurrent precleaning work 804 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen, 805 ScanMarkedObjectsAgainCarefullyClosure* cl); 806 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen, 807 ScanMarkedObjectsAgainCarefullyClosure* cl); 808 // Does precleaning work, returning a quantity indicative of 809 // the amount of "useful work" done. 810 size_t preclean_work(bool clean_refs, bool clean_survivors); 811 void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock); 812 void abortable_preclean(); // Preclean while looking for possible abort 813 void initialize_sequential_subtasks_for_young_gen_rescan(int i); 814 // Helper function for above; merge-sorts the per-thread plab samples 815 void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads); 816 // Resets (i.e. clears) the per-thread plab sample vectors 817 void reset_survivor_plab_arrays(); 818 819 // final (second) checkpoint work 820 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, 821 bool init_mark_was_synchronous); 822 // work routine for parallel version of remark 823 void do_remark_parallel(); 824 // work routine for non-parallel version of remark 825 void do_remark_non_parallel(); 826 // reference processing work routine (during second checkpoint) 827 void refProcessingWork(bool asynch, bool clear_all_soft_refs); 828 829 // concurrent sweeping work 830 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch); 831 832 // (concurrent) resetting of support data structures 833 void reset(bool asynch); 834 835 // Clear _expansion_cause fields of constituent generations 836 void clear_expansion_cause(); 837 838 // An auxilliary method used to record the ends of 839 // used regions of each generation to limit the extent of sweep 840 void save_sweep_limits(); 841 842 // A work method used by foreground collection to determine 843 // what type of collection (compacting or not, continuing or fresh) 844 // it should do. 845 void decide_foreground_collection_type(bool clear_all_soft_refs, 846 bool* should_compact, bool* should_start_over); 847 848 // A work method used by the foreground collector to do 849 // a mark-sweep-compact. 850 void do_compaction_work(bool clear_all_soft_refs); 851 852 // A work method used by the foreground collector to do 853 // a mark-sweep, after taking over from a possibly on-going 854 // concurrent mark-sweep collection. 855 void do_mark_sweep_work(bool clear_all_soft_refs, 856 CollectorState first_state, bool should_start_over); 857 858 // Work methods for reporting concurrent mode interruption or failure 859 bool is_external_interruption(); 860 void report_concurrent_mode_interruption(); 861 862 // If the backgrould GC is active, acquire control from the background 863 // GC and do the collection. 864 void acquire_control_and_collect(bool full, bool clear_all_soft_refs); 865 866 // For synchronizing passing of control from background to foreground 867 // GC. waitForForegroundGC() is called by the background 868 // collector. It if had to wait for a foreground collection, 869 // it returns true and the background collection should assume 870 // that the collection was finished by the foreground 871 // collector. 872 bool waitForForegroundGC(); 873 874 // Incremental mode triggering: recompute the icms duty cycle and set the 875 // allocation limits in the young gen. 876 void icms_update_allocation_limits(); 877 878 size_t block_size_using_printezis_bits(HeapWord* addr) const; 879 size_t block_size_if_printezis_bits(HeapWord* addr) const; 880 HeapWord* next_card_start_after_block(HeapWord* addr) const; 881 882 void setup_cms_unloading_and_verification_state(); 883 public: 884 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, 885 CardTableRS* ct, 886 ConcurrentMarkSweepPolicy* cp); cmsThread()887 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; } 888 ref_processor()889 ReferenceProcessor* ref_processor() { return _ref_processor; } 890 void ref_processor_init(); 891 bitMapLock() const892 Mutex* bitMapLock() const { return _markBitMap.lock(); } abstract_state()893 static CollectorState abstract_state() { return _collectorState; } 894 895 bool should_abort_preclean() const; // Whether preclean should be aborted. 896 size_t get_eden_used() const; 897 size_t get_eden_capacity() const; 898 cmsGen()899 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; } 900 901 // locking checks 902 NOT_PRODUCT(static bool have_cms_token();) 903 904 // XXXPERM bool should_collect(bool full, size_t size, bool tlab); 905 bool shouldConcurrentCollect(); 906 907 void collect(bool full, 908 bool clear_all_soft_refs, 909 size_t size, 910 bool tlab); 911 void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause); 912 void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause); 913 914 // In support of ExplicitGCInvokesConcurrent 915 static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause); 916 // Should we unload classes in a particular concurrent cycle? should_unload_classes() const917 bool should_unload_classes() const { 918 return _should_unload_classes; 919 } 920 void update_should_unload_classes(); 921 922 void direct_allocated(HeapWord* start, size_t size); 923 924 // Object is dead if not marked and current phase is sweeping. 925 bool is_dead_obj(oop obj) const; 926 927 // After a promotion (of "start"), do any necessary marking. 928 // If "par", then it's being done by a parallel GC thread. 929 // The last two args indicate if we need precise marking 930 // and if so the size of the object so it can be dirtied 931 // in its entirety. 932 void promoted(bool par, HeapWord* start, 933 bool is_obj_array, size_t obj_size); 934 935 HeapWord* allocation_limit_reached(Space* space, HeapWord* top, 936 size_t word_size); 937 938 void getFreelistLocks() const; 939 void releaseFreelistLocks() const; 940 bool haveFreelistLocks() const; 941 942 // Adjust size of underlying generation 943 void compute_new_size(); 944 945 // GC prologue and epilogue 946 void gc_prologue(bool full); 947 void gc_epilogue(bool full); 948 time_of_last_gc(jlong now)949 jlong time_of_last_gc(jlong now) { 950 if (_collectorState <= Idling) { 951 // gc not in progress 952 return _time_of_last_gc; 953 } else { 954 // collection in progress 955 return now; 956 } 957 } 958 959 // Support for parallel remark of survivor space 960 void* get_data_recorder(int thr_num); 961 void sample_eden_chunk(); 962 markBitMap()963 CMSBitMap* markBitMap() { return &_markBitMap; } 964 void directAllocated(HeapWord* start, size_t size); 965 966 // main CMS steps and related support 967 void checkpointRootsInitial(bool asynch); 968 bool markFromRoots(bool asynch); // a return value of false indicates failure 969 // due to stack overflow 970 void preclean(); 971 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs, 972 bool init_mark_was_synchronous); 973 void sweep(bool asynch); 974 975 // Check that the currently executing thread is the expected 976 // one (foreground collector or background collector). 977 static void check_correct_thread_executing() PRODUCT_RETURN; 978 // XXXPERM void print_statistics() PRODUCT_RETURN; 979 980 bool is_cms_reachable(HeapWord* addr); 981 982 // Performance Counter Support counters()983 CollectorCounters* counters() { return _gc_counters; } 984 985 // timer stuff startTimer()986 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); } stopTimer()987 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); } resetTimer()988 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); } timerValue()989 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); } 990 yields()991 int yields() { return _numYields; } resetYields()992 void resetYields() { _numYields = 0; } incrementYields()993 void incrementYields() { _numYields++; } resetNumDirtyCards()994 void resetNumDirtyCards() { _numDirtyCards = 0; } incrementNumDirtyCards(size_t num)995 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; } numDirtyCards()996 size_t numDirtyCards() { return _numDirtyCards; } 997 foregroundGCShouldWait()998 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; } set_foregroundGCShouldWait(bool v)999 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; } foregroundGCIsActive()1000 static bool foregroundGCIsActive() { return _foregroundGCIsActive; } set_foregroundGCIsActive(bool v)1001 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; } sweep_count() const1002 size_t sweep_count() const { return _sweep_count; } increment_sweep_count()1003 void increment_sweep_count() { _sweep_count++; } 1004 1005 // Timers/stats for gc scheduling and incremental mode pacing. stats()1006 CMSStats& stats() { return _stats; } 1007 1008 // Convenience methods that check whether CMSIncrementalMode is enabled and 1009 // forward to the corresponding methods in ConcurrentMarkSweepThread. 1010 static void start_icms(); 1011 static void stop_icms(); // Called at the end of the cms cycle. 1012 static void disable_icms(); // Called before a foreground collection. 1013 static void enable_icms(); // Called after a foreground collection. 1014 void icms_wait(); // Called at yield points. 1015 1016 // Adaptive size policy 1017 CMSAdaptiveSizePolicy* size_policy(); 1018 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); 1019 1020 static void print_on_error(outputStream* st); 1021 1022 // debugging 1023 void verify(); 1024 bool verify_after_remark(bool silent = VerifySilently); 1025 void verify_ok_to_terminate() const PRODUCT_RETURN; 1026 void verify_work_stacks_empty() const PRODUCT_RETURN; 1027 void verify_overflow_empty() const PRODUCT_RETURN; 1028 1029 // convenience methods in support of debugging 1030 static const size_t skip_header_HeapWords() PRODUCT_RETURN0; 1031 HeapWord* block_start(const void* p) const PRODUCT_RETURN0; 1032 1033 // accessors verification_mark_stack()1034 CMSMarkStack* verification_mark_stack() { return &_markStack; } verification_mark_bm()1035 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; } 1036 1037 // Initialization errors completed_initialization()1038 bool completed_initialization() { return _completed_initialization; } 1039 1040 void print_eden_and_survivor_chunk_arrays(); 1041 }; 1042 1043 class CMSExpansionCause : public AllStatic { 1044 public: 1045 enum Cause { 1046 _no_expansion, 1047 _satisfy_free_ratio, 1048 _satisfy_promotion, 1049 _satisfy_allocation, 1050 _allocate_par_lab, 1051 _allocate_par_spooling_space, 1052 _adaptive_size_policy 1053 }; 1054 // Return a string describing the cause of the expansion. 1055 static const char* to_string(CMSExpansionCause::Cause cause); 1056 }; 1057 1058 class ConcurrentMarkSweepGeneration: public CardGeneration { 1059 friend class VMStructs; 1060 friend class ConcurrentMarkSweepThread; 1061 friend class ConcurrentMarkSweep; 1062 friend class CMSCollector; 1063 protected: 1064 static CMSCollector* _collector; // the collector that collects us 1065 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now) 1066 1067 // Performance Counters 1068 GenerationCounters* _gen_counters; 1069 GSpaceCounters* _space_counters; 1070 1071 // Words directly allocated, used by CMSStats. 1072 size_t _direct_allocated_words; 1073 1074 // Non-product stat counters 1075 NOT_PRODUCT( 1076 size_t _numObjectsPromoted; 1077 size_t _numWordsPromoted; 1078 size_t _numObjectsAllocated; 1079 size_t _numWordsAllocated; 1080 ) 1081 1082 // Used for sizing decisions 1083 bool _incremental_collection_failed; incremental_collection_failed()1084 bool incremental_collection_failed() { 1085 return _incremental_collection_failed; 1086 } set_incremental_collection_failed()1087 void set_incremental_collection_failed() { 1088 _incremental_collection_failed = true; 1089 } clear_incremental_collection_failed()1090 void clear_incremental_collection_failed() { 1091 _incremental_collection_failed = false; 1092 } 1093 1094 // accessors set_expansion_cause(CMSExpansionCause::Cause v)1095 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} expansion_cause() const1096 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; } 1097 1098 private: 1099 // For parallel young-gen GC support. 1100 CMSParGCThreadState** _par_gc_thread_states; 1101 1102 // Reason generation was expanded 1103 CMSExpansionCause::Cause _expansion_cause; 1104 1105 // In support of MinChunkSize being larger than min object size 1106 const double _dilatation_factor; 1107 1108 enum CollectionTypes { 1109 Concurrent_collection_type = 0, 1110 MS_foreground_collection_type = 1, 1111 MSC_foreground_collection_type = 2, 1112 Unknown_collection_type = 3 1113 }; 1114 1115 CollectionTypes _debug_collection_type; 1116 1117 // True if a compactiing collection was done. 1118 bool _did_compact; did_compact()1119 bool did_compact() { return _did_compact; } 1120 1121 // Fraction of current occupancy at which to start a CMS collection which 1122 // will collect this generation (at least). 1123 double _initiating_occupancy; 1124 1125 protected: 1126 // Shrink generation by specified size (returns false if unable to shrink) 1127 void shrink_free_list_by(size_t bytes); 1128 1129 // Update statistics for GC 1130 virtual void update_gc_stats(int level, bool full); 1131 1132 // Maximum available space in the generation (including uncommitted) 1133 // space. 1134 size_t max_available() const; 1135 1136 // getter and initializer for _initiating_occupancy field. initiating_occupancy() const1137 double initiating_occupancy() const { return _initiating_occupancy; } 1138 void init_initiating_occupancy(intx io, uintx tr); 1139 1140 public: 1141 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, 1142 int level, CardTableRS* ct, 1143 bool use_adaptive_freelists, 1144 FreeBlockDictionary<FreeChunk>::DictionaryChoice); 1145 1146 // Accessors collector() const1147 CMSCollector* collector() const { return _collector; } set_collector(CMSCollector * collector)1148 static void set_collector(CMSCollector* collector) { 1149 assert(_collector == NULL, "already set"); 1150 _collector = collector; 1151 } cmsSpace() const1152 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; } 1153 1154 Mutex* freelistLock() const; 1155 kind()1156 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; } 1157 1158 // Adaptive size policy 1159 CMSAdaptiveSizePolicy* size_policy(); 1160 set_did_compact(bool v)1161 void set_did_compact(bool v) { _did_compact = v; } 1162 refs_discovery_is_atomic() const1163 bool refs_discovery_is_atomic() const { return false; } refs_discovery_is_mt() const1164 bool refs_discovery_is_mt() const { 1165 // Note: CMS does MT-discovery during the parallel-remark 1166 // phases. Use ReferenceProcessorMTMutator to make refs 1167 // discovery MT-safe during such phases or other parallel 1168 // discovery phases in the future. This may all go away 1169 // if/when we decide that refs discovery is sufficiently 1170 // rare that the cost of the CAS's involved is in the 1171 // noise. That's a measurement that should be done, and 1172 // the code simplified if that turns out to be the case. 1173 return ConcGCThreads > 1; 1174 } 1175 1176 // Override 1177 virtual void ref_processor_init(); 1178 1179 // Grow generation by specified size (returns false if unable to grow) 1180 bool grow_by(size_t bytes); 1181 // Grow generation to reserved size. 1182 bool grow_to_reserved(); 1183 clear_expansion_cause()1184 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; } 1185 1186 // Space enquiries 1187 size_t capacity() const; 1188 size_t used() const; 1189 size_t free() const; occupancy() const1190 double occupancy() const { return ((double)used())/((double)capacity()); } 1191 size_t contiguous_available() const; 1192 size_t unsafe_max_alloc_nogc() const; 1193 size_t used_stable() const; 1194 1195 // over-rides 1196 MemRegion used_region() const; 1197 MemRegion used_region_at_save_marks() const; 1198 1199 // Does a "full" (forced) collection invoked on this generation collect 1200 // all younger generations as well? Note that the second conjunct is a 1201 // hack to allow the collection of the younger gen first if the flag is 1202 // set. This is better than using th policy's should_collect_gen0_first() 1203 // since that causes us to do an extra unnecessary pair of restart-&-stop-world. full_collects_younger_generations() const1204 virtual bool full_collects_younger_generations() const { 1205 return UseCMSCompactAtFullCollection && !CollectGen0First; 1206 } 1207 1208 void space_iterate(SpaceClosure* blk, bool usedOnly = false); 1209 1210 // Support for compaction 1211 CompactibleSpace* first_compaction_space() const; 1212 // Adjust quantites in the generation affected by 1213 // the compaction. 1214 void reset_after_compaction(); 1215 1216 // Allocation support 1217 HeapWord* allocate(size_t size, bool tlab); 1218 HeapWord* have_lock_and_allocate(size_t size, bool tlab); 1219 oop promote(oop obj, size_t obj_size); par_allocate(size_t size,bool tlab)1220 HeapWord* par_allocate(size_t size, bool tlab) { 1221 return allocate(size, tlab); 1222 } 1223 1224 // Incremental mode triggering. 1225 HeapWord* allocation_limit_reached(Space* space, HeapWord* top, 1226 size_t word_size); 1227 1228 // Used by CMSStats to track direct allocation. The value is sampled and 1229 // reset after each young gen collection. direct_allocated_words() const1230 size_t direct_allocated_words() const { return _direct_allocated_words; } reset_direct_allocated_words()1231 void reset_direct_allocated_words() { _direct_allocated_words = 0; } 1232 1233 // Overrides for parallel promotion. 1234 virtual oop par_promote(int thread_num, 1235 oop obj, markOop m, size_t word_sz); 1236 // This one should not be called for CMS. 1237 virtual void par_promote_alloc_undo(int thread_num, 1238 HeapWord* obj, size_t word_sz); 1239 virtual void par_promote_alloc_done(int thread_num); 1240 virtual void par_oop_since_save_marks_iterate_done(int thread_num); 1241 1242 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const; 1243 1244 // Inform this (non-young) generation that a promotion failure was 1245 // encountered during a collection of a younger generation that 1246 // promotes into this generation. 1247 virtual void promotion_failure_occurred(); 1248 1249 bool should_collect(bool full, size_t size, bool tlab); 1250 virtual bool should_concurrent_collect() const; 1251 virtual bool is_too_full() const; 1252 void collect(bool full, 1253 bool clear_all_soft_refs, 1254 size_t size, 1255 bool tlab); 1256 1257 HeapWord* expand_and_allocate(size_t word_size, 1258 bool tlab, 1259 bool parallel = false); 1260 1261 // GC prologue and epilogue 1262 void gc_prologue(bool full); 1263 void gc_prologue_work(bool full, bool registerClosure, 1264 ModUnionClosure* modUnionClosure); 1265 void gc_epilogue(bool full); 1266 void gc_epilogue_work(bool full); 1267 1268 // Time since last GC of this generation time_of_last_gc(jlong now)1269 jlong time_of_last_gc(jlong now) { 1270 return collector()->time_of_last_gc(now); 1271 } update_time_of_last_gc(jlong now)1272 void update_time_of_last_gc(jlong now) { 1273 collector()-> update_time_of_last_gc(now); 1274 } 1275 1276 // Allocation failure 1277 void expand(size_t bytes, size_t expand_bytes, 1278 CMSExpansionCause::Cause cause); 1279 virtual bool expand(size_t bytes, size_t expand_bytes); 1280 void shrink(size_t bytes); 1281 void shrink_by(size_t bytes); 1282 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); 1283 bool expand_and_ensure_spooling_space(PromotionInfo* promo); 1284 1285 // Iteration support and related enquiries 1286 void save_marks(); 1287 bool no_allocs_since_save_marks(); 1288 void younger_refs_iterate(OopsInGenClosure* cl); 1289 1290 // Iteration support specific to CMS generations 1291 void save_sweep_limit(); 1292 1293 // More iteration support 1294 virtual void oop_iterate(ExtendedOopClosure* cl); 1295 virtual void safe_object_iterate(ObjectClosure* cl); 1296 virtual void object_iterate(ObjectClosure* cl); 1297 1298 // Need to declare the full complement of closures, whether we'll 1299 // override them or not, or get message from the compiler: 1300 // oop_since_save_marks_iterate_nv hides virtual function... 1301 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 1302 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); 1303 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL) 1304 1305 // Smart allocation XXX -- move to CFLSpace? 1306 void setNearLargestChunk(); 1307 bool isNearLargestChunk(HeapWord* addr); 1308 1309 // Get the chunk at the end of the space. Delagates to 1310 // the space. 1311 FreeChunk* find_chunk_at_end(); 1312 1313 void post_compact(); 1314 1315 // Debugging 1316 void prepare_for_verify(); 1317 void verify(); 1318 void print_statistics() PRODUCT_RETURN; 1319 1320 // Performance Counters support 1321 virtual void update_counters(); 1322 virtual void update_counters(size_t used); 1323 void initialize_performance_counters(); counters()1324 CollectorCounters* counters() { return collector()->counters(); } 1325 1326 // Support for parallel remark of survivor space get_data_recorder(int thr_num)1327 void* get_data_recorder(int thr_num) { 1328 //Delegate to collector 1329 return collector()->get_data_recorder(thr_num); 1330 } sample_eden_chunk()1331 void sample_eden_chunk() { 1332 //Delegate to collector 1333 return collector()->sample_eden_chunk(); 1334 } 1335 1336 // Printing 1337 const char* name() const; short_name() const1338 virtual const char* short_name() const { return "CMS"; } 1339 void print() const; 1340 void printOccupancy(const char* s); must_be_youngest() const1341 bool must_be_youngest() const { return false; } must_be_oldest() const1342 bool must_be_oldest() const { return true; } 1343 1344 // Resize the generation after a compacting GC. The 1345 // generation can be treated as a contiguous space 1346 // after the compaction. 1347 virtual void compute_new_size(); 1348 // Resize the generation after a non-compacting 1349 // collection. 1350 void compute_new_size_free_list(); 1351 debug_collection_type()1352 CollectionTypes debug_collection_type() { return _debug_collection_type; } 1353 void rotate_debug_collection_type(); 1354 }; 1355 1356 class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration { 1357 1358 // Return the size policy from the heap's collector 1359 // policy casted to CMSAdaptiveSizePolicy*. 1360 CMSAdaptiveSizePolicy* cms_size_policy() const; 1361 1362 // Resize the generation based on the adaptive size 1363 // policy. 1364 void resize(size_t cur_promo, size_t desired_promo); 1365 1366 // Return the GC counters from the collector policy 1367 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); 1368 1369 virtual void shrink_by(size_t bytes); 1370 1371 public: ASConcurrentMarkSweepGeneration(ReservedSpace rs,size_t initial_byte_size,int level,CardTableRS * ct,bool use_adaptive_freelists,FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice)1372 ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, 1373 int level, CardTableRS* ct, 1374 bool use_adaptive_freelists, 1375 FreeBlockDictionary<FreeChunk>::DictionaryChoice 1376 dictionaryChoice) : 1377 ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct, 1378 use_adaptive_freelists, dictionaryChoice) {} 1379 short_name() const1380 virtual const char* short_name() const { return "ASCMS"; } kind()1381 virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; } 1382 1383 virtual void update_counters(); 1384 virtual void update_counters(size_t used); 1385 }; 1386 1387 // 1388 // Closures of various sorts used by CMS to accomplish its work 1389 // 1390 1391 // This closure is used to do concurrent marking from the roots 1392 // following the first checkpoint. 1393 class MarkFromRootsClosure: public BitMapClosure { 1394 CMSCollector* _collector; 1395 MemRegion _span; 1396 CMSBitMap* _bitMap; 1397 CMSBitMap* _mut; 1398 CMSMarkStack* _markStack; 1399 bool _yield; 1400 int _skipBits; 1401 HeapWord* _finger; 1402 HeapWord* _threshold; 1403 DEBUG_ONLY(bool _verifying;) 1404 1405 public: 1406 MarkFromRootsClosure(CMSCollector* collector, MemRegion span, 1407 CMSBitMap* bitMap, 1408 CMSMarkStack* markStack, 1409 bool should_yield, bool verifying = false); 1410 bool do_bit(size_t offset); 1411 void reset(HeapWord* addr); 1412 inline void do_yield_check(); 1413 1414 private: 1415 void scanOopsInOop(HeapWord* ptr); 1416 void do_yield_work(); 1417 }; 1418 1419 // This closure is used to do concurrent multi-threaded 1420 // marking from the roots following the first checkpoint. 1421 // XXX This should really be a subclass of The serial version 1422 // above, but i have not had the time to refactor things cleanly. 1423 // That willbe done for Dolphin. 1424 class Par_MarkFromRootsClosure: public BitMapClosure { 1425 CMSCollector* _collector; 1426 MemRegion _whole_span; 1427 MemRegion _span; 1428 CMSBitMap* _bit_map; 1429 CMSBitMap* _mut; 1430 OopTaskQueue* _work_queue; 1431 CMSMarkStack* _overflow_stack; 1432 bool _yield; 1433 int _skip_bits; 1434 HeapWord* _finger; 1435 HeapWord* _threshold; 1436 CMSConcMarkingTask* _task; 1437 public: 1438 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, 1439 MemRegion span, 1440 CMSBitMap* bit_map, 1441 OopTaskQueue* work_queue, 1442 CMSMarkStack* overflow_stack, 1443 bool should_yield); 1444 bool do_bit(size_t offset); 1445 inline void do_yield_check(); 1446 1447 private: 1448 void scan_oops_in_oop(HeapWord* ptr); 1449 void do_yield_work(); 1450 bool get_work_from_overflow_stack(); 1451 }; 1452 1453 // The following closures are used to do certain kinds of verification of 1454 // CMS marking. 1455 class PushAndMarkVerifyClosure: public MetadataAwareOopClosure { 1456 CMSCollector* _collector; 1457 MemRegion _span; 1458 CMSBitMap* _verification_bm; 1459 CMSBitMap* _cms_bm; 1460 CMSMarkStack* _mark_stack; 1461 protected: 1462 void do_oop(oop p); do_oop_work(T * p)1463 template <class T> inline void do_oop_work(T *p) { 1464 oop obj = oopDesc::load_decode_heap_oop(p); 1465 do_oop(obj); 1466 } 1467 public: 1468 PushAndMarkVerifyClosure(CMSCollector* cms_collector, 1469 MemRegion span, 1470 CMSBitMap* verification_bm, 1471 CMSBitMap* cms_bm, 1472 CMSMarkStack* mark_stack); 1473 void do_oop(oop* p); 1474 void do_oop(narrowOop* p); 1475 1476 // Deal with a stack overflow condition 1477 void handle_stack_overflow(HeapWord* lost); 1478 }; 1479 1480 class MarkFromRootsVerifyClosure: public BitMapClosure { 1481 CMSCollector* _collector; 1482 MemRegion _span; 1483 CMSBitMap* _verification_bm; 1484 CMSBitMap* _cms_bm; 1485 CMSMarkStack* _mark_stack; 1486 HeapWord* _finger; 1487 PushAndMarkVerifyClosure _pam_verify_closure; 1488 public: 1489 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span, 1490 CMSBitMap* verification_bm, 1491 CMSBitMap* cms_bm, 1492 CMSMarkStack* mark_stack); 1493 bool do_bit(size_t offset); 1494 void reset(HeapWord* addr); 1495 }; 1496 1497 1498 // This closure is used to check that a certain set of bits is 1499 // "empty" (i.e. the bit vector doesn't have any 1-bits). 1500 class FalseBitMapClosure: public BitMapClosure { 1501 public: do_bit(size_t offset)1502 bool do_bit(size_t offset) { 1503 guarantee(false, "Should not have a 1 bit"); 1504 return true; 1505 } 1506 }; 1507 1508 // A version of ObjectClosure with "memory" (see _previous_address below) 1509 class UpwardsObjectClosure: public BoolObjectClosure { 1510 HeapWord* _previous_address; 1511 public: UpwardsObjectClosure()1512 UpwardsObjectClosure() : _previous_address(NULL) { } set_previous(HeapWord * addr)1513 void set_previous(HeapWord* addr) { _previous_address = addr; } previous()1514 HeapWord* previous() { return _previous_address; } 1515 // A return value of "true" can be used by the caller to decide 1516 // if this object's end should *NOT* be recorded in 1517 // _previous_address above. 1518 virtual bool do_object_bm(oop obj, MemRegion mr) = 0; 1519 }; 1520 1521 // This closure is used during the second checkpointing phase 1522 // to rescan the marked objects on the dirty cards in the mod 1523 // union table and the card table proper. It's invoked via 1524 // MarkFromDirtyCardsClosure below. It uses either 1525 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case) 1526 // declared in genOopClosures.hpp to accomplish some of its work. 1527 // In the parallel case the bitMap is shared, so access to 1528 // it needs to be suitably synchronized for updates by embedded 1529 // closures that update it; however, this closure itself only 1530 // reads the bit_map and because it is idempotent, is immune to 1531 // reading stale values. 1532 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure { 1533 #ifdef ASSERT 1534 CMSCollector* _collector; 1535 MemRegion _span; 1536 union { 1537 CMSMarkStack* _mark_stack; 1538 OopTaskQueue* _work_queue; 1539 }; 1540 #endif // ASSERT 1541 bool _parallel; 1542 CMSBitMap* _bit_map; 1543 union { 1544 MarkRefsIntoAndScanClosure* _scan_closure; 1545 Par_MarkRefsIntoAndScanClosure* _par_scan_closure; 1546 }; 1547 1548 public: ScanMarkedObjectsAgainClosure(CMSCollector * collector,MemRegion span,ReferenceProcessor * rp,CMSBitMap * bit_map,CMSMarkStack * mark_stack,MarkRefsIntoAndScanClosure * cl)1549 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1550 MemRegion span, 1551 ReferenceProcessor* rp, 1552 CMSBitMap* bit_map, 1553 CMSMarkStack* mark_stack, 1554 MarkRefsIntoAndScanClosure* cl): 1555 #ifdef ASSERT 1556 _collector(collector), 1557 _span(span), 1558 _mark_stack(mark_stack), 1559 #endif // ASSERT 1560 _parallel(false), 1561 _bit_map(bit_map), 1562 _scan_closure(cl) { } 1563 ScanMarkedObjectsAgainClosure(CMSCollector * collector,MemRegion span,ReferenceProcessor * rp,CMSBitMap * bit_map,OopTaskQueue * work_queue,Par_MarkRefsIntoAndScanClosure * cl)1564 ScanMarkedObjectsAgainClosure(CMSCollector* collector, 1565 MemRegion span, 1566 ReferenceProcessor* rp, 1567 CMSBitMap* bit_map, 1568 OopTaskQueue* work_queue, 1569 Par_MarkRefsIntoAndScanClosure* cl): 1570 #ifdef ASSERT 1571 _collector(collector), 1572 _span(span), 1573 _work_queue(work_queue), 1574 #endif // ASSERT 1575 _parallel(true), 1576 _bit_map(bit_map), 1577 _par_scan_closure(cl) { } 1578 do_object_b(oop obj)1579 bool do_object_b(oop obj) { 1580 guarantee(false, "Call do_object_b(oop, MemRegion) form instead"); 1581 return false; 1582 } 1583 bool do_object_bm(oop p, MemRegion mr); 1584 }; 1585 1586 // This closure is used during the second checkpointing phase 1587 // to rescan the marked objects on the dirty cards in the mod 1588 // union table and the card table proper. It invokes 1589 // ScanMarkedObjectsAgainClosure above to accomplish much of its work. 1590 // In the parallel case, the bit map is shared and requires 1591 // synchronized access. 1592 class MarkFromDirtyCardsClosure: public MemRegionClosure { 1593 CompactibleFreeListSpace* _space; 1594 ScanMarkedObjectsAgainClosure _scan_cl; 1595 size_t _num_dirty_cards; 1596 1597 public: MarkFromDirtyCardsClosure(CMSCollector * collector,MemRegion span,CompactibleFreeListSpace * space,CMSBitMap * bit_map,CMSMarkStack * mark_stack,MarkRefsIntoAndScanClosure * cl)1598 MarkFromDirtyCardsClosure(CMSCollector* collector, 1599 MemRegion span, 1600 CompactibleFreeListSpace* space, 1601 CMSBitMap* bit_map, 1602 CMSMarkStack* mark_stack, 1603 MarkRefsIntoAndScanClosure* cl): 1604 _space(space), 1605 _num_dirty_cards(0), 1606 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1607 mark_stack, cl) { } 1608 MarkFromDirtyCardsClosure(CMSCollector * collector,MemRegion span,CompactibleFreeListSpace * space,CMSBitMap * bit_map,OopTaskQueue * work_queue,Par_MarkRefsIntoAndScanClosure * cl)1609 MarkFromDirtyCardsClosure(CMSCollector* collector, 1610 MemRegion span, 1611 CompactibleFreeListSpace* space, 1612 CMSBitMap* bit_map, 1613 OopTaskQueue* work_queue, 1614 Par_MarkRefsIntoAndScanClosure* cl): 1615 _space(space), 1616 _num_dirty_cards(0), 1617 _scan_cl(collector, span, collector->ref_processor(), bit_map, 1618 work_queue, cl) { } 1619 1620 void do_MemRegion(MemRegion mr); set_space(CompactibleFreeListSpace * space)1621 void set_space(CompactibleFreeListSpace* space) { _space = space; } num_dirty_cards()1622 size_t num_dirty_cards() { return _num_dirty_cards; } 1623 }; 1624 1625 // This closure is used in the non-product build to check 1626 // that there are no MemRegions with a certain property. 1627 class FalseMemRegionClosure: public MemRegionClosure { do_MemRegion(MemRegion mr)1628 void do_MemRegion(MemRegion mr) { 1629 guarantee(!mr.is_empty(), "Shouldn't be empty"); 1630 guarantee(false, "Should never be here"); 1631 } 1632 }; 1633 1634 // This closure is used during the precleaning phase 1635 // to "carefully" rescan marked objects on dirty cards. 1636 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp 1637 // to accomplish some of its work. 1638 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful { 1639 CMSCollector* _collector; 1640 MemRegion _span; 1641 bool _yield; 1642 Mutex* _freelistLock; 1643 CMSBitMap* _bitMap; 1644 CMSMarkStack* _markStack; 1645 MarkRefsIntoAndScanClosure* _scanningClosure; 1646 1647 public: ScanMarkedObjectsAgainCarefullyClosure(CMSCollector * collector,MemRegion span,CMSBitMap * bitMap,CMSMarkStack * markStack,MarkRefsIntoAndScanClosure * cl,bool should_yield)1648 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector, 1649 MemRegion span, 1650 CMSBitMap* bitMap, 1651 CMSMarkStack* markStack, 1652 MarkRefsIntoAndScanClosure* cl, 1653 bool should_yield): 1654 _collector(collector), 1655 _span(span), 1656 _yield(should_yield), 1657 _bitMap(bitMap), 1658 _markStack(markStack), 1659 _scanningClosure(cl) { 1660 } 1661 do_object(oop p)1662 void do_object(oop p) { 1663 guarantee(false, "call do_object_careful instead"); 1664 } 1665 do_object_careful(oop p)1666 size_t do_object_careful(oop p) { 1667 guarantee(false, "Unexpected caller"); 1668 return 0; 1669 } 1670 1671 size_t do_object_careful_m(oop p, MemRegion mr); 1672 setFreelistLock(Mutex * m)1673 void setFreelistLock(Mutex* m) { 1674 _freelistLock = m; 1675 _scanningClosure->set_freelistLock(m); 1676 } 1677 1678 private: 1679 inline bool do_yield_check(); 1680 1681 void do_yield_work(); 1682 }; 1683 1684 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful { 1685 CMSCollector* _collector; 1686 MemRegion _span; 1687 bool _yield; 1688 CMSBitMap* _bit_map; 1689 CMSMarkStack* _mark_stack; 1690 PushAndMarkClosure* _scanning_closure; 1691 unsigned int _before_count; 1692 1693 public: SurvivorSpacePrecleanClosure(CMSCollector * collector,MemRegion span,CMSBitMap * bit_map,CMSMarkStack * mark_stack,PushAndMarkClosure * cl,unsigned int before_count,bool should_yield)1694 SurvivorSpacePrecleanClosure(CMSCollector* collector, 1695 MemRegion span, 1696 CMSBitMap* bit_map, 1697 CMSMarkStack* mark_stack, 1698 PushAndMarkClosure* cl, 1699 unsigned int before_count, 1700 bool should_yield): 1701 _collector(collector), 1702 _span(span), 1703 _yield(should_yield), 1704 _bit_map(bit_map), 1705 _mark_stack(mark_stack), 1706 _scanning_closure(cl), 1707 _before_count(before_count) 1708 { } 1709 do_object(oop p)1710 void do_object(oop p) { 1711 guarantee(false, "call do_object_careful instead"); 1712 } 1713 1714 size_t do_object_careful(oop p); 1715 do_object_careful_m(oop p,MemRegion mr)1716 size_t do_object_careful_m(oop p, MemRegion mr) { 1717 guarantee(false, "Unexpected caller"); 1718 return 0; 1719 } 1720 1721 private: 1722 inline void do_yield_check(); 1723 void do_yield_work(); 1724 }; 1725 1726 // This closure is used to accomplish the sweeping work 1727 // after the second checkpoint but before the concurrent reset 1728 // phase. 1729 // 1730 // Terminology 1731 // left hand chunk (LHC) - block of one or more chunks currently being 1732 // coalesced. The LHC is available for coalescing with a new chunk. 1733 // right hand chunk (RHC) - block that is currently being swept that is 1734 // free or garbage that can be coalesced with the LHC. 1735 // _inFreeRange is true if there is currently a LHC 1736 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk. 1737 // _freeRangeInFreeLists is true if the LHC is in the free lists. 1738 // _freeFinger is the address of the current LHC 1739 class SweepClosure: public BlkClosureCareful { 1740 CMSCollector* _collector; // collector doing the work 1741 ConcurrentMarkSweepGeneration* _g; // Generation being swept 1742 CompactibleFreeListSpace* _sp; // Space being swept 1743 HeapWord* _limit;// the address at or above which the sweep should stop 1744 // because we do not expect newly garbage blocks 1745 // eligible for sweeping past that address. 1746 Mutex* _freelistLock; // Free list lock (in space) 1747 CMSBitMap* _bitMap; // Marking bit map (in 1748 // generation) 1749 bool _inFreeRange; // Indicates if we are in the 1750 // midst of a free run 1751 bool _freeRangeInFreeLists; 1752 // Often, we have just found 1753 // a free chunk and started 1754 // a new free range; we do not 1755 // eagerly remove this chunk from 1756 // the free lists unless there is 1757 // a possibility of coalescing. 1758 // When true, this flag indicates 1759 // that the _freeFinger below 1760 // points to a potentially free chunk 1761 // that may still be in the free lists 1762 bool _lastFreeRangeCoalesced; 1763 // free range contains chunks 1764 // coalesced 1765 bool _yield; 1766 // Whether sweeping should be 1767 // done with yields. For instance 1768 // when done by the foreground 1769 // collector we shouldn't yield. 1770 HeapWord* _freeFinger; // When _inFreeRange is set, the 1771 // pointer to the "left hand 1772 // chunk" 1773 size_t _freeRangeSize; 1774 // When _inFreeRange is set, this 1775 // indicates the accumulated size 1776 // of the "left hand chunk" 1777 NOT_PRODUCT( 1778 size_t _numObjectsFreed; 1779 size_t _numWordsFreed; 1780 size_t _numObjectsLive; 1781 size_t _numWordsLive; 1782 size_t _numObjectsAlreadyFree; 1783 size_t _numWordsAlreadyFree; 1784 FreeChunk* _last_fc; 1785 ) 1786 private: 1787 // Code that is common to a free chunk or garbage when 1788 // encountered during sweeping. 1789 void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize); 1790 // Process a free chunk during sweeping. 1791 void do_already_free_chunk(FreeChunk *fc); 1792 // Work method called when processing an already free or a 1793 // freshly garbage chunk to do a lookahead and possibly a 1794 // premptive flush if crossing over _limit. 1795 void lookahead_and_flush(FreeChunk* fc, size_t chunkSize); 1796 // Process a garbage chunk during sweeping. 1797 size_t do_garbage_chunk(FreeChunk *fc); 1798 // Process a live chunk during sweeping. 1799 size_t do_live_chunk(FreeChunk* fc); 1800 1801 // Accessors. freeFinger() const1802 HeapWord* freeFinger() const { return _freeFinger; } set_freeFinger(HeapWord * v)1803 void set_freeFinger(HeapWord* v) { _freeFinger = v; } inFreeRange() const1804 bool inFreeRange() const { return _inFreeRange; } set_inFreeRange(bool v)1805 void set_inFreeRange(bool v) { _inFreeRange = v; } lastFreeRangeCoalesced() const1806 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } set_lastFreeRangeCoalesced(bool v)1807 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; } freeRangeInFreeLists() const1808 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; } set_freeRangeInFreeLists(bool v)1809 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; } 1810 1811 // Initialize a free range. 1812 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists); 1813 // Return this chunk to the free lists. 1814 void flush_cur_free_chunk(HeapWord* chunk, size_t size); 1815 1816 // Check if we should yield and do so when necessary. 1817 inline void do_yield_check(HeapWord* addr); 1818 1819 // Yield 1820 void do_yield_work(HeapWord* addr); 1821 1822 // Debugging/Printing 1823 void print_free_block_coalesced(FreeChunk* fc) const; 1824 1825 public: 1826 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, 1827 CMSBitMap* bitMap, bool should_yield); 1828 ~SweepClosure() PRODUCT_RETURN; 1829 1830 size_t do_blk_careful(HeapWord* addr); print() const1831 void print() const { print_on(tty); } 1832 void print_on(outputStream *st) const; 1833 }; 1834 1835 // Closures related to weak references processing 1836 1837 // During CMS' weak reference processing, this is a 1838 // work-routine/closure used to complete transitive 1839 // marking of objects as live after a certain point 1840 // in which an initial set has been completely accumulated. 1841 // This closure is currently used both during the final 1842 // remark stop-world phase, as well as during the concurrent 1843 // precleaning of the discovered reference lists. 1844 class CMSDrainMarkingStackClosure: public VoidClosure { 1845 CMSCollector* _collector; 1846 MemRegion _span; 1847 CMSMarkStack* _mark_stack; 1848 CMSBitMap* _bit_map; 1849 CMSKeepAliveClosure* _keep_alive; 1850 bool _concurrent_precleaning; 1851 public: CMSDrainMarkingStackClosure(CMSCollector * collector,MemRegion span,CMSBitMap * bit_map,CMSMarkStack * mark_stack,CMSKeepAliveClosure * keep_alive,bool cpc)1852 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, 1853 CMSBitMap* bit_map, CMSMarkStack* mark_stack, 1854 CMSKeepAliveClosure* keep_alive, 1855 bool cpc): 1856 _collector(collector), 1857 _span(span), 1858 _bit_map(bit_map), 1859 _mark_stack(mark_stack), 1860 _keep_alive(keep_alive), 1861 _concurrent_precleaning(cpc) { 1862 assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(), 1863 "Mismatch"); 1864 } 1865 1866 void do_void(); 1867 }; 1868 1869 // A parallel version of CMSDrainMarkingStackClosure above. 1870 class CMSParDrainMarkingStackClosure: public VoidClosure { 1871 CMSCollector* _collector; 1872 MemRegion _span; 1873 OopTaskQueue* _work_queue; 1874 CMSBitMap* _bit_map; 1875 CMSInnerParMarkAndPushClosure _mark_and_push; 1876 1877 public: CMSParDrainMarkingStackClosure(CMSCollector * collector,MemRegion span,CMSBitMap * bit_map,OopTaskQueue * work_queue)1878 CMSParDrainMarkingStackClosure(CMSCollector* collector, 1879 MemRegion span, CMSBitMap* bit_map, 1880 OopTaskQueue* work_queue): 1881 _collector(collector), 1882 _span(span), 1883 _bit_map(bit_map), 1884 _work_queue(work_queue), 1885 _mark_and_push(collector, span, bit_map, work_queue) { } 1886 1887 public: 1888 void trim_queue(uint max); 1889 void do_void(); 1890 }; 1891 1892 // Allow yielding or short-circuiting of reference list 1893 // prelceaning work. 1894 class CMSPrecleanRefsYieldClosure: public YieldClosure { 1895 CMSCollector* _collector; 1896 void do_yield_work(); 1897 public: CMSPrecleanRefsYieldClosure(CMSCollector * collector)1898 CMSPrecleanRefsYieldClosure(CMSCollector* collector): 1899 _collector(collector) {} 1900 virtual bool should_return(); 1901 }; 1902 1903 1904 // Convenience class that locks free list locks for given CMS collector 1905 class FreelistLocker: public StackObj { 1906 private: 1907 CMSCollector* _collector; 1908 public: FreelistLocker(CMSCollector * collector)1909 FreelistLocker(CMSCollector* collector): 1910 _collector(collector) { 1911 _collector->getFreelistLocks(); 1912 } 1913 ~FreelistLocker()1914 ~FreelistLocker() { 1915 _collector->releaseFreelistLocks(); 1916 } 1917 }; 1918 1919 // Mark all dead objects in a given space. 1920 class MarkDeadObjectsClosure: public BlkClosure { 1921 const CMSCollector* _collector; 1922 const CompactibleFreeListSpace* _sp; 1923 CMSBitMap* _live_bit_map; 1924 CMSBitMap* _dead_bit_map; 1925 public: MarkDeadObjectsClosure(const CMSCollector * collector,const CompactibleFreeListSpace * sp,CMSBitMap * live_bit_map,CMSBitMap * dead_bit_map)1926 MarkDeadObjectsClosure(const CMSCollector* collector, 1927 const CompactibleFreeListSpace* sp, 1928 CMSBitMap *live_bit_map, 1929 CMSBitMap *dead_bit_map) : 1930 _collector(collector), 1931 _sp(sp), 1932 _live_bit_map(live_bit_map), 1933 _dead_bit_map(dead_bit_map) {} 1934 size_t do_blk(HeapWord* addr); 1935 }; 1936 1937 class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats { 1938 1939 public: 1940 TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause); 1941 }; 1942 1943 1944 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP 1945