1 /*
2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
27 
28 #include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
29 #include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
30 #include "memory/binaryTreeDictionary.hpp"
31 #include "memory/blockOffsetTable.inline.hpp"
32 #include "memory/freeList.hpp"
33 #include "memory/space.hpp"
34 
35 // Classes in support of keeping track of promotions into a non-Contiguous
36 // space, in this case a CompactibleFreeListSpace.
37 
38 // Forward declarations
39 class CompactibleFreeListSpace;
40 class BlkClosure;
41 class BlkClosureCareful;
42 class FreeChunk;
43 class UpwardsObjectClosure;
44 class ObjectClosureCareful;
45 class Klass;
46 
47 class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
48  public:
LinearAllocBlock()49   LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0),
50     _allocation_size_limit(0) {}
set(HeapWord * ptr,size_t word_size,size_t refill_size,size_t allocation_size_limit)51   void set(HeapWord* ptr, size_t word_size, size_t refill_size,
52     size_t allocation_size_limit) {
53     _ptr = ptr;
54     _word_size = word_size;
55     _refillSize = refill_size;
56     _allocation_size_limit = allocation_size_limit;
57   }
58   HeapWord* _ptr;
59   size_t    _word_size;
60   size_t    _refillSize;
61   size_t    _allocation_size_limit;  // largest size that will be allocated
62 
63   void print_on(outputStream* st) const;
64 };
65 
66 // Concrete subclass of CompactibleSpace that implements
67 // a free list space, such as used in the concurrent mark sweep
68 // generation.
69 
70 class CompactibleFreeListSpace: public CompactibleSpace {
71   friend class VMStructs;
72   friend class ConcurrentMarkSweepGeneration;
73   friend class ASConcurrentMarkSweepGeneration;
74   friend class CMSCollector;
75   // Local alloc buffer for promotion into this space.
76   friend class CFLS_LAB;
77 
78   // "Size" of chunks of work (executed during parallel remark phases
79   // of CMS collection); this probably belongs in CMSCollector, although
80   // it's cached here because it's used in
81   // initialize_sequential_subtasks_for_rescan() which modifies
82   // par_seq_tasks which also lives in Space. XXX
83   const size_t _rescan_task_size;
84   const size_t _marking_task_size;
85 
86   // Yet another sequential tasks done structure. This supports
87   // CMS GC, where we have threads dynamically
88   // claiming sub-tasks from a larger parallel task.
89   SequentialSubTasksDone _conc_par_seq_tasks;
90 
91   BlockOffsetArrayNonContigSpace _bt;
92 
93   CMSCollector* _collector;
94   ConcurrentMarkSweepGeneration* _gen;
95 
96   // Data structures for free blocks (used during allocation/sweeping)
97 
98   // Allocation is done linearly from two different blocks depending on
99   // whether the request is small or large, in an effort to reduce
100   // fragmentation. We assume that any locking for allocation is done
101   // by the containing generation. Thus, none of the methods in this
102   // space are re-entrant.
103   enum SomeConstants {
104     SmallForLinearAlloc = 16,        // size < this then use _sLAB
105     SmallForDictionary  = 257,       // size < this then use _indexedFreeList
106     IndexSetSize        = SmallForDictionary  // keep this odd-sized
107   };
108   static size_t IndexSetStart;
109   static size_t IndexSetStride;
110 
111  private:
112   enum FitStrategyOptions {
113     FreeBlockStrategyNone = 0,
114     FreeBlockBestFitFirst
115   };
116 
117   PromotionInfo _promoInfo;
118 
119   // helps to impose a global total order on freelistLock ranks;
120   // assumes that CFLSpace's are allocated in global total order
121   static int   _lockRank;
122 
123   // a lock protecting the free lists and free blocks;
124   // mutable because of ubiquity of locking even for otherwise const methods
125   mutable Mutex _freelistLock;
126   // locking verifier convenience function
127   void assert_locked() const PRODUCT_RETURN;
128   void assert_locked(const Mutex* lock) const PRODUCT_RETURN;
129 
130   // Linear allocation blocks
131   LinearAllocBlock _smallLinearAllocBlock;
132 
133   FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice;
134   AFLBinaryTreeDictionary* _dictionary;    // ptr to dictionary for large size blocks
135 
136   AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize];
137                                        // indexed array for small size blocks
138   // allocation stategy
139   bool       _fitStrategy;      // Use best fit strategy.
140   bool       _adaptive_freelists; // Use adaptive freelists
141 
142   // This is an address close to the largest free chunk in the heap.
143   // It is currently assumed to be at the end of the heap.  Free
144   // chunks with addresses greater than nearLargestChunk are coalesced
145   // in an effort to maintain a large chunk at the end of the heap.
146   HeapWord*  _nearLargestChunk;
147 
148   // Used to keep track of limit of sweep for the space
149   HeapWord* _sweep_limit;
150 
151   // Stable value of used().
152   size_t _used_stable;
153 
154   // Support for compacting cms
155   HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
156   HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
157 
158   // Initialization helpers.
159   void initializeIndexedFreeListArray();
160 
161   // Extra stuff to manage promotion parallelism.
162 
163   // a lock protecting the dictionary during par promotion allocation.
164   mutable Mutex _parDictionaryAllocLock;
parDictionaryAllocLock() const165   Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; }
166 
167   // Locks protecting the exact lists during par promotion allocation.
168   Mutex* _indexedFreeListParLocks[IndexSetSize];
169 
170   // Attempt to obtain up to "n" blocks of the size "word_sz" (which is
171   // required to be smaller than "IndexSetSize".)  If successful,
172   // adds them to "fl", which is required to be an empty free list.
173   // If the count of "fl" is negative, it's absolute value indicates a
174   // number of free chunks that had been previously "borrowed" from global
175   // list of size "word_sz", and must now be decremented.
176   void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
177 
178   // Used by par_get_chunk_of_blocks() for the chunks from the
179   // indexed_free_lists.
180   bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
181 
182   // Used by par_get_chunk_of_blocks_dictionary() to get a chunk
183   // evenly splittable into "n" "word_sz" chunks.  Returns that
184   // evenly splittable chunk.  May split a larger chunk to get the
185   // evenly splittable chunk.
186   FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n);
187 
188   // Used by par_get_chunk_of_blocks() for the chunks from the
189   // dictionary.
190   void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
191 
192   // Allocation helper functions
193   // Allocate using a strategy that takes from the indexed free lists
194   // first.  This allocation strategy assumes a companion sweeping
195   // strategy that attempts to keep the needed number of chunks in each
196   // indexed free lists.
197   HeapWord* allocate_adaptive_freelists(size_t size);
198   // Allocate from the linear allocation buffers first.  This allocation
199   // strategy assumes maximal coalescing can maintain chunks large enough
200   // to be used as linear allocation buffers.
201   HeapWord* allocate_non_adaptive_freelists(size_t size);
202 
203   // Gets a chunk from the linear allocation block (LinAB).  If there
204   // is not enough space in the LinAB, refills it.
205   HeapWord*  getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size);
206   HeapWord*  getChunkFromSmallLinearAllocBlock(size_t size);
207   // Get a chunk from the space remaining in the linear allocation block.  Do
208   // not attempt to refill if the space is not available, return NULL.  Do the
209   // repairs on the linear allocation block as appropriate.
210   HeapWord*  getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size);
211   inline HeapWord*  getChunkFromSmallLinearAllocBlockRemainder(size_t size);
212 
213   // Helper function for getChunkFromIndexedFreeList.
214   // Replenish the indexed free list for this "size".  Do not take from an
215   // underpopulated size.
216   FreeChunk*  getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true);
217 
218   // Get a chunk from the indexed free list.  If the indexed free list
219   // does not have a free chunk, try to replenish the indexed free list
220   // then get the free chunk from the replenished indexed free list.
221   inline FreeChunk* getChunkFromIndexedFreeList(size_t size);
222 
223   // The returned chunk may be larger than requested (or null).
224   FreeChunk* getChunkFromDictionary(size_t size);
225   // The returned chunk is the exact size requested (or null).
226   FreeChunk* getChunkFromDictionaryExact(size_t size);
227 
228   // Find a chunk in the indexed free list that is the best
229   // fit for size "numWords".
230   FreeChunk* bestFitSmall(size_t numWords);
231   // For free list "fl" of chunks of size > numWords,
232   // remove a chunk, split off a chunk of size numWords
233   // and return it.  The split off remainder is returned to
234   // the free lists.  The old name for getFromListGreater
235   // was lookInListGreater.
236   FreeChunk* getFromListGreater(AdaptiveFreeList<FreeChunk>* fl, size_t numWords);
237   // Get a chunk in the indexed free list or dictionary,
238   // by considering a larger chunk and splitting it.
239   FreeChunk* getChunkFromGreater(size_t numWords);
240   //  Verify that the given chunk is in the indexed free lists.
241   bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const;
242   // Remove the specified chunk from the indexed free lists.
243   void       removeChunkFromIndexedFreeList(FreeChunk* fc);
244   // Remove the specified chunk from the dictionary.
245   void       removeChunkFromDictionary(FreeChunk* fc);
246   // Split a free chunk into a smaller free chunk of size "new_size".
247   // Return the smaller free chunk and return the remainder to the
248   // free lists.
249   FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size);
250   // Add a chunk to the free lists.
251   void       addChunkToFreeLists(HeapWord* chunk, size_t size);
252   // Add a chunk to the free lists, preferring to suffix it
253   // to the last free chunk at end of space if possible, and
254   // updating the block census stats as well as block offset table.
255   // Take any locks as appropriate if we are multithreaded.
256   void       addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size);
257   // Add a free chunk to the indexed free lists.
258   void       returnChunkToFreeList(FreeChunk* chunk);
259   // Add a free chunk to the dictionary.
260   void       returnChunkToDictionary(FreeChunk* chunk);
261 
262   // Functions for maintaining the linear allocation buffers (LinAB).
263   // Repairing a linear allocation block refers to operations
264   // performed on the remainder of a LinAB after an allocation
265   // has been made from it.
266   void       repairLinearAllocationBlocks();
267   void       repairLinearAllocBlock(LinearAllocBlock* blk);
268   void       refillLinearAllocBlock(LinearAllocBlock* blk);
269   void       refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk);
270   void       refillLinearAllocBlocksIfNeeded();
271 
272   void       verify_objects_initialized() const;
273 
274   // Statistics reporting helper functions
275   void       reportFreeListStatistics() const;
276   void       reportIndexedFreeListStatistics() const;
277   size_t     maxChunkSizeInIndexedFreeLists() const;
278   size_t     numFreeBlocksInIndexedFreeLists() const;
279   // Accessor
unallocated_block() const280   HeapWord* unallocated_block() const {
281     if (BlockOffsetArrayUseUnallocatedBlock) {
282       HeapWord* ub = _bt.unallocated_block();
283       assert(ub >= bottom() &&
284              ub <= end(), "space invariant");
285       return ub;
286     } else {
287       return end();
288     }
289   }
freed(HeapWord * start,size_t size)290   void freed(HeapWord* start, size_t size) {
291     _bt.freed(start, size);
292   }
293 
294  protected:
295   // reset the indexed free list to its initial empty condition.
296   void resetIndexedFreeListArray();
297   // reset to an initial state with a single free block described
298   // by the MemRegion parameter.
299   void reset(MemRegion mr);
300   // Return the total number of words in the indexed free lists.
301   size_t     totalSizeInIndexedFreeLists() const;
302 
303  public:
304   // Constructor...
305   CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
306                            bool use_adaptive_freelists,
307                            FreeBlockDictionary<FreeChunk>::DictionaryChoice);
308   // accessors
bestFitFirst()309   bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
dictionary() const310   FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; }
nearLargestChunk() const311   HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
set_nearLargestChunk(HeapWord * v)312   void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
313 
314   // Set CMS global values
315   static void set_cms_values();
316 
317   // Return the free chunk at the end of the space.  If no such
318   // chunk exists, return NULL.
319   FreeChunk* find_chunk_at_end();
320 
adaptive_freelists() const321   bool adaptive_freelists() const { return _adaptive_freelists; }
322 
set_collector(CMSCollector * collector)323   void set_collector(CMSCollector* collector) { _collector = collector; }
324 
325   // Support for parallelization of rescan and marking
rescan_task_size() const326   const size_t rescan_task_size()  const { return _rescan_task_size;  }
marking_task_size() const327   const size_t marking_task_size() const { return _marking_task_size; }
conc_par_seq_tasks()328   SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
329   void initialize_sequential_subtasks_for_rescan(int n_threads);
330   void initialize_sequential_subtasks_for_marking(int n_threads,
331          HeapWord* low = NULL);
332 
333   // Space enquiries
334   size_t used() const;
335   size_t free() const;
336   size_t max_alloc_in_words() const;
337   // XXX: should have a less conservative used_region() than that of
338   // Space; we could consider keeping track of highest allocated
339   // address and correcting that at each sweep, as the sweeper
340   // goes through the entire allocated part of the generation. We
341   // could also use that information to keep the sweeper from
342   // sweeping more than is necessary. The allocator and sweeper will
343   // of course need to synchronize on this, since the sweeper will
344   // try to bump down the address and the allocator will try to bump it up.
345   // For now, however, we'll just use the default used_region()
346   // which overestimates the region by returning the entire
347   // committed region (this is safe, but inefficient).
348 
349   // Returns monotonically increasing stable used space bytes for CMS.
350   // This is required for jstat and other memory monitoring tools
351   // that might otherwise see inconsistent used space values during a garbage
352   // collection, promotion or allocation into compactibleFreeListSpace.
353   // The value returned by this function might be smaller than the
354   // actual value.
355   size_t used_stable() const;
356   // Recalculate and cache the current stable used() value. Only to be called
357   // in places where we can be sure that the result is stable.
358   void recalculate_used_stable();
359 
360   // Returns a subregion of the space containing all the objects in
361   // the space.
used_region() const362   MemRegion used_region() const {
363     return MemRegion(bottom(),
364                      BlockOffsetArrayUseUnallocatedBlock ?
365                      unallocated_block() : end());
366   }
367 
368   virtual bool is_free_block(const HeapWord* p) const;
369 
370   // Resizing support
371   void set_end(HeapWord* value);  // override
372 
373   // mutual exclusion support
freelistLock() const374   Mutex* freelistLock() const { return &_freelistLock; }
375 
376   // Iteration support
377   void oop_iterate(ExtendedOopClosure* cl);
378 
379   void object_iterate(ObjectClosure* blk);
380   // Apply the closure to each object in the space whose references
381   // point to objects in the heap.  The usage of CompactibleFreeListSpace
382   // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
383   // objects in the space with references to objects that are no longer
384   // valid.  For example, an object may reference another object
385   // that has already been sweep up (collected).  This method uses
386   // obj_is_alive() to determine whether it is safe to iterate of
387   // an object.
388   void safe_object_iterate(ObjectClosure* blk);
389 
390   // Iterate over all objects that intersect with mr, calling "cl->do_object"
391   // on each.  There is an exception to this: if this closure has already
392   // been invoked on an object, it may skip such objects in some cases.  This is
393   // Most likely to happen in an "upwards" (ascending address) iteration of
394   // MemRegions.
395   void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
396 
397   // Requires that "mr" be entirely within the space.
398   // Apply "cl->do_object" to all objects that intersect with "mr".
399   // If the iteration encounters an unparseable portion of the region,
400   // terminate the iteration and return the address of the start of the
401   // subregion that isn't done.  Return of "NULL" indicates that the
402   // interation completed.
403  HeapWord* object_iterate_careful_m(MemRegion mr,
404                                     ObjectClosureCareful* cl);
405 
406   // Override: provides a DCTO_CL specific to this kind of space.
407   DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
408                                      CardTableModRefBS::PrecisionStyle precision,
409                                      HeapWord* boundary);
410 
411   void blk_iterate(BlkClosure* cl);
412   void blk_iterate_careful(BlkClosureCareful* cl);
413   HeapWord* block_start_const(const void* p) const;
414   HeapWord* block_start_careful(const void* p) const;
415   size_t block_size(const HeapWord* p) const;
416   size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const;
417   bool block_is_obj(const HeapWord* p) const;
418   bool obj_is_alive(const HeapWord* p) const;
419   size_t block_size_nopar(const HeapWord* p) const;
420   bool block_is_obj_nopar(const HeapWord* p) const;
421 
422   // iteration support for promotion
423   void save_marks();
424   bool no_allocs_since_save_marks();
425 
426   // iteration support for sweeping
save_sweep_limit()427   void save_sweep_limit() {
428     _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
429                    unallocated_block() : end();
430     if (CMSTraceSweeper) {
431       gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT
432                              "  for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<",
433                              p2i(_sweep_limit), p2i(bottom()), p2i(end()));
434     }
435   }
NOT_PRODUCT(void clear_sweep_limit (){ _sweep_limit = NULL; } )436   NOT_PRODUCT(
437     void clear_sweep_limit() { _sweep_limit = NULL; }
438   )
439   HeapWord* sweep_limit() { return _sweep_limit; }
440 
441   // Apply "blk->do_oop" to the addresses of all reference fields in objects
442   // promoted into this generation since the most recent save_marks() call.
443   // Fields in objects allocated by applications of the closure
444   // *are* included in the iteration. Thus, when the iteration completes
445   // there should be no further such objects remaining.
446   #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
447     void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
448   ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL)
449   #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL
450 
451   // Allocation support
452   HeapWord* allocate(size_t size);
453   HeapWord* par_allocate(size_t size);
454 
455   oop       promote(oop obj, size_t obj_size);
456   void      gc_prologue();
457   void      gc_epilogue();
458 
459   // This call is used by a containing CMS generation / collector
460   // to inform the CFLS space that a sweep has been completed
461   // and that the space can do any related house-keeping functions.
462   void      sweep_completed();
463 
464   // For an object in this space, the mark-word's two
465   // LSB's having the value [11] indicates that it has been
466   // promoted since the most recent call to save_marks() on
467   // this generation and has not subsequently been iterated
468   // over (using oop_since_save_marks_iterate() above).
469   // This property holds only for single-threaded collections,
470   // and is typically used for Cheney scans; for MT scavenges,
471   // the property holds for all objects promoted during that
472   // scavenge for the duration of the scavenge and is used
473   // by card-scanning to avoid scanning objects (being) promoted
474   // during that scavenge.
obj_allocated_since_save_marks(const oop obj) const475   bool obj_allocated_since_save_marks(const oop obj) const {
476     assert(is_in_reserved(obj), "Wrong space?");
477     return ((PromotedObject*)obj)->hasPromotedMark();
478   }
479 
480   // A worst-case estimate of the space required (in HeapWords) to expand the
481   // heap when promoting an obj of size obj_size.
482   size_t expansionSpaceRequired(size_t obj_size) const;
483 
484   FreeChunk* allocateScratch(size_t size);
485 
486   // returns true if either the small or large linear allocation buffer is empty.
487   bool       linearAllocationWouldFail() const;
488 
489   // Adjust the chunk for the minimum size.  This version is called in
490   // most cases in CompactibleFreeListSpace methods.
adjustObjectSize(size_t size)491   inline static size_t adjustObjectSize(size_t size) {
492     return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize));
493   }
494   // This is a virtual version of adjustObjectSize() that is called
495   // only occasionally when the compaction space changes and the type
496   // of the new compaction space is is only known to be CompactibleSpace.
adjust_object_size_v(size_t size) const497   size_t adjust_object_size_v(size_t size) const {
498     return adjustObjectSize(size);
499   }
500   // Minimum size of a free block.
minimum_free_block_size() const501   virtual size_t minimum_free_block_size() const { return MinChunkSize; }
502   void      removeFreeChunkFromFreeLists(FreeChunk* chunk);
503   void      addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
504               bool coalesced);
505 
506   // Support for decisions regarding concurrent collection policy
507   bool should_concurrent_collect() const;
508 
509   // Support for compaction
510   void prepare_for_compaction(CompactPoint* cp);
511   void adjust_pointers();
512   void compact();
513   // reset the space to reflect the fact that a compaction of the
514   // space has been done.
515   virtual void reset_after_compaction();
516 
517   // Debugging support
518   void print()                            const;
519   void print_on(outputStream* st)         const;
520   void prepare_for_verify();
521   void verify()                           const;
522   void verifyFreeLists()                  const PRODUCT_RETURN;
523   void verifyIndexedFreeLists()           const;
524   void verifyIndexedFreeList(size_t size) const;
525   // Verify that the given chunk is in the free lists:
526   // i.e. either the binary tree dictionary, the indexed free lists
527   // or the linear allocation block.
528   bool verify_chunk_in_free_list(FreeChunk* fc) const;
529   // Verify that the given chunk is the linear allocation block
530   bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const;
531   // Do some basic checks on the the free lists.
532   void check_free_list_consistency()      const PRODUCT_RETURN;
533 
534   // Printing support
535   void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st);
536   void print_indexed_free_lists(outputStream* st) const;
537   void print_dictionary_free_lists(outputStream* st) const;
538   void print_promo_info_blocks(outputStream* st) const;
539 
540   NOT_PRODUCT (
541     void initializeIndexedFreeListArrayReturnedBytes();
542     size_t sumIndexedFreeListArrayReturnedBytes();
543     // Return the total number of chunks in the indexed free lists.
544     size_t totalCountInIndexedFreeLists() const;
545     // Return the total numberof chunks in the space.
546     size_t totalCount();
547   )
548 
549   // The census consists of counts of the quantities such as
550   // the current count of the free chunks, number of chunks
551   // created as a result of the split of a larger chunk or
552   // coalescing of smaller chucks, etc.  The counts in the
553   // census is used to make decisions on splitting and
554   // coalescing of chunks during the sweep of garbage.
555 
556   // Print the statistics for the free lists.
557   void printFLCensus(size_t sweep_count) const;
558 
559   // Statistics functions
560   // Initialize census for lists before the sweep.
561   void beginSweepFLCensus(float inter_sweep_current,
562                           float inter_sweep_estimate,
563                           float intra_sweep_estimate);
564   // Set the surplus for each of the free lists.
565   void setFLSurplus();
566   // Set the hint for each of the free lists.
567   void setFLHints();
568   // Clear the census for each of the free lists.
569   void clearFLCensus();
570   // Perform functions for the census after the end of the sweep.
571   void endSweepFLCensus(size_t sweep_count);
572   // Return true if the count of free chunks is greater
573   // than the desired number of free chunks.
574   bool coalOverPopulated(size_t size);
575 
576 // Record (for each size):
577 //
578 //   split-births = #chunks added due to splits in (prev-sweep-end,
579 //      this-sweep-start)
580 //   split-deaths = #chunks removed for splits in (prev-sweep-end,
581 //      this-sweep-start)
582 //   num-curr     = #chunks at start of this sweep
583 //   num-prev     = #chunks at end of previous sweep
584 //
585 // The above are quantities that are measured. Now define:
586 //
587 //   num-desired := num-prev + split-births - split-deaths - num-curr
588 //
589 // Roughly, num-prev + split-births is the supply,
590 // split-deaths is demand due to other sizes
591 // and num-curr is what we have left.
592 //
593 // Thus, num-desired is roughly speaking the "legitimate demand"
594 // for blocks of this size and what we are striving to reach at the
595 // end of the current sweep.
596 //
597 // For a given list, let num-len be its current population.
598 // Define, for a free list of a given size:
599 //
600 //   coal-overpopulated := num-len >= num-desired * coal-surplus
601 // (coal-surplus is set to 1.05, i.e. we allow a little slop when
602 // coalescing -- we do not coalesce unless we think that the current
603 // supply has exceeded the estimated demand by more than 5%).
604 //
605 // For the set of sizes in the binary tree, which is neither dense nor
606 // closed, it may be the case that for a particular size we have never
607 // had, or do not now have, or did not have at the previous sweep,
608 // chunks of that size. We need to extend the definition of
609 // coal-overpopulated to such sizes as well:
610 //
611 //   For a chunk in/not in the binary tree, extend coal-overpopulated
612 //   defined above to include all sizes as follows:
613 //
614 //   . a size that is non-existent is coal-overpopulated
615 //   . a size that has a num-desired <= 0 as defined above is
616 //     coal-overpopulated.
617 //
618 // Also define, for a chunk heap-offset C and mountain heap-offset M:
619 //
620 //   close-to-mountain := C >= 0.99 * M
621 //
622 // Now, the coalescing strategy is:
623 //
624 //    Coalesce left-hand chunk with right-hand chunk if and
625 //    only if:
626 //
627 //      EITHER
628 //        . left-hand chunk is of a size that is coal-overpopulated
629 //      OR
630 //        . right-hand chunk is close-to-mountain
631   void smallCoalBirth(size_t size);
632   void smallCoalDeath(size_t size);
633   void coalBirth(size_t size);
634   void coalDeath(size_t size);
635   void smallSplitBirth(size_t size);
636   void smallSplitDeath(size_t size);
637   void split_birth(size_t size);
638   void splitDeath(size_t size);
639   void split(size_t from, size_t to1);
640 
641   double flsFrag() const;
642 };
643 
644 // A parallel-GC-thread-local allocation buffer for allocation into a
645 // CompactibleFreeListSpace.
646 class CFLS_LAB : public CHeapObj<mtGC> {
647   // The space that this buffer allocates into.
648   CompactibleFreeListSpace* _cfls;
649 
650   // Our local free lists.
651   AdaptiveFreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
652 
653   // Initialized from a command-line arg.
654 
655   // Allocation statistics in support of dynamic adjustment of
656   // #blocks to claim per get_from_global_pool() call below.
657   static AdaptiveWeightedAverage
658                  _blocks_to_claim  [CompactibleFreeListSpace::IndexSetSize];
659   static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize];
660   static uint   _global_num_workers[CompactibleFreeListSpace::IndexSetSize];
661   size_t        _num_blocks        [CompactibleFreeListSpace::IndexSetSize];
662 
663   // Internal work method
664   void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl);
665 
666 public:
667   CFLS_LAB(CompactibleFreeListSpace* cfls);
668 
669   // Allocate and return a block of the given size, or else return NULL.
670   HeapWord* alloc(size_t word_sz);
671 
672   // Return any unused portions of the buffer to the global pool.
673   void retire(int tid);
674 
675   // Dynamic OldPLABSize sizing
676   static void compute_desired_plab_size();
677   // When the settings are modified from default static initialization
678   static void modify_initialization(size_t n, unsigned wt);
679 };
680 
refillSize() const681 size_t PromotionInfo::refillSize() const {
682   const size_t CMSSpoolBlockSize = 256;
683   const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop)
684                                    * CMSSpoolBlockSize);
685   return CompactibleFreeListSpace::adjustObjectSize(sz);
686 }
687 
688 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
689