1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_SHARED_GENERATION_HPP 26 #define SHARE_GC_SHARED_GENERATION_HPP 27 28 #include "gc/shared/collectorCounters.hpp" 29 #include "gc/shared/referenceProcessor.hpp" 30 #include "logging/log.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/memRegion.hpp" 33 #include "memory/virtualspace.hpp" 34 #include "runtime/mutex.hpp" 35 #include "runtime/perfData.hpp" 36 37 // A Generation models a heap area for similarly-aged objects. 38 // It will contain one ore more spaces holding the actual objects. 39 // 40 // The Generation class hierarchy: 41 // 42 // Generation - abstract base class 43 // - DefNewGeneration - allocation area (copy collected) 44 // - CardGeneration - abstract class adding offset array behavior 45 // - TenuredGeneration - tenured (old object) space (markSweepCompact) 46 // 47 // The system configuration currently allowed is: 48 // 49 // DefNewGeneration + TenuredGeneration 50 // 51 52 class DefNewGeneration; 53 class GCMemoryManager; 54 class GenerationSpec; 55 class CompactibleSpace; 56 class ContiguousSpace; 57 class CompactPoint; 58 class OopClosure; 59 class GenCollectedHeap; 60 class GCStats; 61 62 // A "ScratchBlock" represents a block of memory in one generation usable by 63 // another. It represents "num_words" free words, starting at and including 64 // the address of "this". 65 struct ScratchBlock { 66 ScratchBlock* next; 67 size_t num_words; 68 HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming 69 // first two fields are word-sized.) 70 }; 71 72 class Generation: public CHeapObj<mtGC> { 73 friend class VMStructs; 74 private: 75 MemRegion _prev_used_region; // for collectors that want to "remember" a value for 76 // used region at some specific point during collection. 77 78 GCMemoryManager* _gc_manager; 79 80 protected: 81 // Minimum and maximum addresses for memory reserved (not necessarily 82 // committed) for generation. 83 // Used by card marking code. Must not overlap with address ranges of 84 // other generations. 85 MemRegion _reserved; 86 87 // Memory area reserved for generation 88 VirtualSpace _virtual_space; 89 90 // ("Weak") Reference processing support 91 SpanSubjectToDiscoveryClosure _span_based_discoverer; 92 ReferenceProcessor* _ref_processor; 93 94 // Performance Counters 95 CollectorCounters* _gc_counters; 96 97 // Statistics for garbage collection 98 GCStats* _gc_stats; 99 100 // Initialize the generation. 101 Generation(ReservedSpace rs, size_t initial_byte_size); 102 103 public: 104 // The set of possible generation kinds. 105 enum Name { 106 DefNew, 107 MarkSweepCompact, 108 Other 109 }; 110 111 enum SomePublicConstants { 112 // Generations are GenGrain-aligned and have size that are multiples of 113 // GenGrain. 114 // Note: on ARM we add 1 bit for card_table_base to be properly aligned 115 // (we expect its low byte to be zero - see implementation of post_barrier) 116 LogOfGenGrain = 16 ARM32_ONLY(+1), 117 GenGrain = 1 << LogOfGenGrain 118 }; 119 120 // allocate and initialize ("weak") refs processing support 121 virtual void ref_processor_init(); set_ref_processor(ReferenceProcessor * rp)122 void set_ref_processor(ReferenceProcessor* rp) { 123 assert(_ref_processor == NULL, "clobbering existing _ref_processor"); 124 _ref_processor = rp; 125 } 126 kind()127 virtual Generation::Name kind() { return Generation::Other; } 128 129 // This properly belongs in the collector, but for now this 130 // will do. refs_discovery_is_atomic() const131 virtual bool refs_discovery_is_atomic() const { return true; } refs_discovery_is_mt() const132 virtual bool refs_discovery_is_mt() const { return false; } 133 134 // Space inquiries (results in bytes) 135 size_t initial_size(); 136 virtual size_t capacity() const = 0; // The maximum number of object bytes the 137 // generation can currently hold. 138 virtual size_t used() const = 0; // The number of used bytes in the gen. 139 virtual size_t free() const = 0; // The number of free bytes in the gen. 140 141 // Support for java.lang.Runtime.maxMemory(); see CollectedHeap. 142 // Returns the total number of bytes available in a generation 143 // for the allocation of objects. 144 virtual size_t max_capacity() const; 145 146 // If this is a young generation, the maximum number of bytes that can be 147 // allocated in this generation before a GC is triggered. capacity_before_gc() const148 virtual size_t capacity_before_gc() const { return 0; } 149 150 // The largest number of contiguous free bytes in the generation, 151 // including expansion (Assumes called at a safepoint.) 152 virtual size_t contiguous_available() const = 0; 153 // The largest number of contiguous free bytes in this or any higher generation. 154 virtual size_t max_contiguous_available() const; 155 156 // Returns true if promotions of the specified amount are 157 // likely to succeed without a promotion failure. 158 // Promotion of the full amount is not guaranteed but 159 // might be attempted in the worst case. 160 virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const; 161 162 // For a non-young generation, this interface can be used to inform a 163 // generation that a promotion attempt into that generation failed. 164 // Typically used to enable diagnostic output for post-mortem analysis, 165 // but other uses of the interface are not ruled out. promotion_failure_occurred()166 virtual void promotion_failure_occurred() { /* does nothing */ } 167 168 // Return an estimate of the maximum allocation that could be performed 169 // in the generation without triggering any collection or expansion 170 // activity. It is "unsafe" because no locks are taken; the result 171 // should be treated as an approximation, not a guarantee, for use in 172 // heuristic resizing decisions. 173 virtual size_t unsafe_max_alloc_nogc() const = 0; 174 175 // Returns true if this generation cannot be expanded further 176 // without a GC. Override as appropriate. is_maximal_no_gc() const177 virtual bool is_maximal_no_gc() const { 178 return _virtual_space.uncommitted_size() == 0; 179 } 180 reserved() const181 MemRegion reserved() const { return _reserved; } 182 183 // Returns a region guaranteed to contain all the objects in the 184 // generation. used_region() const185 virtual MemRegion used_region() const { return _reserved; } 186 prev_used_region() const187 MemRegion prev_used_region() const { return _prev_used_region; } save_used_region()188 virtual void save_used_region() { _prev_used_region = used_region(); } 189 190 // Returns "TRUE" iff "p" points into the committed areas in the generation. 191 // For some kinds of generations, this may be an expensive operation. 192 // To avoid performance problems stemming from its inadvertent use in 193 // product jvm's, we restrict its use to assertion checking or 194 // verification only. 195 virtual bool is_in(const void* p) const; 196 197 /* Returns "TRUE" iff "p" points into the reserved area of the generation. */ is_in_reserved(const void * p) const198 bool is_in_reserved(const void* p) const { 199 return _reserved.contains(p); 200 } 201 202 // If some space in the generation contains the given "addr", return a 203 // pointer to that space, else return "NULL". 204 virtual Space* space_containing(const void* addr) const; 205 206 // Iteration - do not use for time critical operations 207 virtual void space_iterate(SpaceClosure* blk, bool usedOnly = false) = 0; 208 209 // Returns the first space, if any, in the generation that can participate 210 // in compaction, or else "NULL". 211 virtual CompactibleSpace* first_compaction_space() const = 0; 212 213 // Returns "true" iff this generation should be used to allocate an 214 // object of the given size. Young generations might 215 // wish to exclude very large objects, for example, since, if allocated 216 // often, they would greatly increase the frequency of young-gen 217 // collection. should_allocate(size_t word_size,bool is_tlab)218 virtual bool should_allocate(size_t word_size, bool is_tlab) { 219 bool result = false; 220 size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize); 221 if (!is_tlab || supports_tlab_allocation()) { 222 result = (word_size > 0) && (word_size < overflow_limit); 223 } 224 return result; 225 } 226 227 // Allocate and returns a block of the requested size, or returns "NULL". 228 // Assumes the caller has done any necessary locking. 229 virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0; 230 231 // Like "allocate", but performs any necessary locking internally. 232 virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0; 233 234 // Some generation may offer a region for shared, contiguous allocation, 235 // via inlined code (by exporting the address of the top and end fields 236 // defining the extent of the contiguous allocation region.) 237 238 // This function returns "true" iff the heap supports this kind of 239 // allocation. (More precisely, this means the style of allocation that 240 // increments *top_addr()" with a CAS.) (Default is "no".) 241 // A generation that supports this allocation style must use lock-free 242 // allocation for *all* allocation, since there are times when lock free 243 // allocation will be concurrent with plain "allocate" calls. supports_inline_contig_alloc() const244 virtual bool supports_inline_contig_alloc() const { return false; } 245 246 // These functions return the addresses of the fields that define the 247 // boundaries of the contiguous allocation area. (These fields should be 248 // physically near to one another.) top_addr() const249 virtual HeapWord* volatile* top_addr() const { return NULL; } end_addr() const250 virtual HeapWord** end_addr() const { return NULL; } 251 252 // Thread-local allocation buffers supports_tlab_allocation() const253 virtual bool supports_tlab_allocation() const { return false; } tlab_capacity() const254 virtual size_t tlab_capacity() const { 255 guarantee(false, "Generation doesn't support thread local allocation buffers"); 256 return 0; 257 } tlab_used() const258 virtual size_t tlab_used() const { 259 guarantee(false, "Generation doesn't support thread local allocation buffers"); 260 return 0; 261 } unsafe_max_tlab_alloc() const262 virtual size_t unsafe_max_tlab_alloc() const { 263 guarantee(false, "Generation doesn't support thread local allocation buffers"); 264 return 0; 265 } 266 267 // "obj" is the address of an object in a younger generation. Allocate space 268 // for "obj" in the current (or some higher) generation, and copy "obj" into 269 // the newly allocated space, if possible, returning the result (or NULL if 270 // the allocation failed). 271 // 272 // The "obj_size" argument is just obj->size(), passed along so the caller can 273 // avoid repeating the virtual call to retrieve it. 274 virtual oop promote(oop obj, size_t obj_size); 275 276 // Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote 277 // object "obj", whose original mark word was "m", and whose size is 278 // "word_sz". If possible, allocate space for "obj", copy obj into it 279 // (taking care to copy "m" into the mark word when done, since the mark 280 // word of "obj" may have been overwritten with a forwarding pointer, and 281 // also taking care to copy the klass pointer *last*. Returns the new 282 // object if successful, or else NULL. 283 virtual oop par_promote(int thread_num, oop obj, markWord m, size_t word_sz); 284 285 // Informs the current generation that all par_promote_alloc's in the 286 // collection have been completed; any supporting data structures can be 287 // reset. Default is to do nothing. par_promote_alloc_done(int thread_num)288 virtual void par_promote_alloc_done(int thread_num) {} 289 290 // Informs the current generation that all oop_since_save_marks_iterates 291 // performed by "thread_num" in the current collection, if any, have been 292 // completed; any supporting data structures can be reset. Default is to 293 // do nothing. par_oop_since_save_marks_iterate_done(int thread_num)294 virtual void par_oop_since_save_marks_iterate_done(int thread_num) {} 295 296 // Returns "true" iff collect() should subsequently be called on this 297 // this generation. See comment below. 298 // This is a generic implementation which can be overridden. 299 // 300 // Note: in the current (1.4) implementation, when genCollectedHeap's 301 // incremental_collection_will_fail flag is set, all allocations are 302 // slow path (the only fast-path place to allocate is DefNew, which 303 // will be full if the flag is set). 304 // Thus, older generations which collect younger generations should 305 // test this flag and collect if it is set. should_collect(bool full,size_t word_size,bool is_tlab)306 virtual bool should_collect(bool full, 307 size_t word_size, 308 bool is_tlab) { 309 return (full || should_allocate(word_size, is_tlab)); 310 } 311 312 // Returns true if the collection is likely to be safely 313 // completed. Even if this method returns true, a collection 314 // may not be guaranteed to succeed, and the system should be 315 // able to safely unwind and recover from that failure, albeit 316 // at some additional cost. collection_attempt_is_safe()317 virtual bool collection_attempt_is_safe() { 318 guarantee(false, "Are you sure you want to call this method?"); 319 return true; 320 } 321 322 // Perform a garbage collection. 323 // If full is true attempt a full garbage collection of this generation. 324 // Otherwise, attempting to (at least) free enough space to support an 325 // allocation of the given "word_size". 326 virtual void collect(bool full, 327 bool clear_all_soft_refs, 328 size_t word_size, 329 bool is_tlab) = 0; 330 331 // Perform a heap collection, attempting to create (at least) enough 332 // space to support an allocation of the given "word_size". If 333 // successful, perform the allocation and return the resulting 334 // "oop" (initializing the allocated block). If the allocation is 335 // still unsuccessful, return "NULL". 336 virtual HeapWord* expand_and_allocate(size_t word_size, 337 bool is_tlab, 338 bool parallel = false) = 0; 339 340 // Some generations may require some cleanup or preparation actions before 341 // allowing a collection. The default is to do nothing. gc_prologue(bool full)342 virtual void gc_prologue(bool full) {} 343 344 // Some generations may require some cleanup actions after a collection. 345 // The default is to do nothing. gc_epilogue(bool full)346 virtual void gc_epilogue(bool full) {} 347 348 // Save the high water marks for the used space in a generation. record_spaces_top()349 virtual void record_spaces_top() {} 350 351 // Some generations may need to be "fixed-up" after some allocation 352 // activity to make them parsable again. The default is to do nothing. ensure_parsability()353 virtual void ensure_parsability() {} 354 355 // Generations may keep statistics about collection. This method 356 // updates those statistics. current_generation is the generation 357 // that was most recently collected. This allows the generation to 358 // decide what statistics are valid to collect. For example, the 359 // generation can decide to gather the amount of promoted data if 360 // the collection of the young generation has completed. gc_stats() const361 GCStats* gc_stats() const { return _gc_stats; } update_gc_stats(Generation * current_generation,bool full)362 virtual void update_gc_stats(Generation* current_generation, bool full) {} 363 364 #if INCLUDE_SERIALGC 365 // Mark sweep support phase2 366 virtual void prepare_for_compaction(CompactPoint* cp); 367 // Mark sweep support phase3 368 virtual void adjust_pointers(); 369 // Mark sweep support phase4 370 virtual void compact(); post_compact()371 virtual void post_compact() { ShouldNotReachHere(); } 372 #endif 373 374 // Some generations may require some cleanup actions before allowing 375 // a verification. prepare_for_verify()376 virtual void prepare_for_verify() {} 377 378 // Accessing "marks". 379 380 // This function gives a generation a chance to note a point between 381 // collections. For example, a contiguous generation might note the 382 // beginning allocation point post-collection, which might allow some later 383 // operations to be optimized. save_marks()384 virtual void save_marks() {} 385 386 // This function allows generations to initialize any "saved marks". That 387 // is, should only be called when the generation is empty. reset_saved_marks()388 virtual void reset_saved_marks() {} 389 390 // This function is "true" iff any no allocations have occurred in the 391 // generation since the last call to "save_marks". 392 virtual bool no_allocs_since_save_marks() = 0; 393 394 // The "requestor" generation is performing some garbage collection 395 // action for which it would be useful to have scratch space. If 396 // the target is not the requestor, no gc actions will be required 397 // of the target. The requestor promises to allocate no more than 398 // "max_alloc_words" in the target generation (via promotion say, 399 // if the requestor is a young generation and the target is older). 400 // If the target generation can provide any scratch space, it adds 401 // it to "list", leaving "list" pointing to the head of the 402 // augmented list. The default is to offer no space. contribute_scratch(ScratchBlock * & list,Generation * requestor,size_t max_alloc_words)403 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor, 404 size_t max_alloc_words) {} 405 406 // Give each generation an opportunity to do clean up for any 407 // contributed scratch. reset_scratch()408 virtual void reset_scratch() {} 409 410 // When an older generation has been collected, and perhaps resized, 411 // this method will be invoked on all younger generations (from older to 412 // younger), allowing them to resize themselves as appropriate. 413 virtual void compute_new_size() = 0; 414 415 // Printing 416 virtual const char* name() const = 0; 417 virtual const char* short_name() const = 0; 418 419 // Reference Processing accessor ref_processor()420 ReferenceProcessor* const ref_processor() { return _ref_processor; } 421 422 // Iteration. 423 424 // Iterate over all the ref-containing fields of all objects in the 425 // generation, calling "cl.do_oop" on each. 426 virtual void oop_iterate(OopIterateClosure* cl); 427 428 // Iterate over all objects in the generation, calling "cl.do_object" on 429 // each. 430 virtual void object_iterate(ObjectClosure* cl); 431 432 // Inform a generation that it longer contains references to objects 433 // in any younger generation. [e.g. Because younger gens are empty, 434 // clear the card table.] clear_remembered_set()435 virtual void clear_remembered_set() { } 436 437 // Inform a generation that some of its objects have moved. [e.g. The 438 // generation's spaces were compacted, invalidating the card table.] invalidate_remembered_set()439 virtual void invalidate_remembered_set() { } 440 441 // Block abstraction. 442 443 // Returns the address of the start of the "block" that contains the 444 // address "addr". We say "blocks" instead of "object" since some heaps 445 // may not pack objects densely; a chunk may either be an object or a 446 // non-object. 447 virtual HeapWord* block_start(const void* addr) const; 448 449 // Requires "addr" to be the start of a chunk, and returns its size. 450 // "addr + size" is required to be the start of a new chunk, or the end 451 // of the active area of the heap. 452 virtual size_t block_size(const HeapWord* addr) const ; 453 454 // Requires "addr" to be the start of a block, and returns "TRUE" iff 455 // the block is an object. 456 virtual bool block_is_obj(const HeapWord* addr) const; 457 458 void print_heap_change(size_t prev_used) const; 459 460 virtual void print() const; 461 virtual void print_on(outputStream* st) const; 462 463 virtual void verify() = 0; 464 465 struct StatRecord { 466 int invocations; 467 elapsedTimer accumulated_time; StatRecordGeneration::StatRecord468 StatRecord() : 469 invocations(0), 470 accumulated_time(elapsedTimer()) {} 471 }; 472 private: 473 StatRecord _stat_record; 474 public: stat_record()475 StatRecord* stat_record() { return &_stat_record; } 476 477 virtual void print_summary_info_on(outputStream* st); 478 479 // Performance Counter support 480 virtual void update_counters() = 0; counters()481 virtual CollectorCounters* counters() { return _gc_counters; } 482 gc_manager() const483 GCMemoryManager* gc_manager() const { 484 assert(_gc_manager != NULL, "not initialized yet"); 485 return _gc_manager; 486 } 487 set_gc_manager(GCMemoryManager * gc_manager)488 void set_gc_manager(GCMemoryManager* gc_manager) { 489 _gc_manager = gc_manager; 490 } 491 492 }; 493 494 #endif // SHARE_GC_SHARED_GENERATION_HPP 495